diff --git a/.coveragerc b/.coveragerc
new file mode 100644
index 0000000000..611707ae9f
--- /dev/null
+++ b/.coveragerc
@@ -0,0 +1,11 @@
+[run]
+source=
+ ./couchpotato/
+omit =
+ ./libs/*
+ ./node_modules/*
+[report]
+omit =
+ */python?.?/*
+ ./libs/*
+ ./node_modules/*
diff --git a/.editorconfig b/.editorconfig
new file mode 100644
index 0000000000..7c1af9a31d
--- /dev/null
+++ b/.editorconfig
@@ -0,0 +1,16 @@
+# http://editorconfig.org
+root = true
+
+[*]
+indent_style = tab
+indent_size = 4
+end_of_line = lf
+charset = utf-8
+trim_trailing_whitespace = true
+insert_final_newline = true
+
+[*.py]
+indent_style = space
+
+[*.md]
+trim_trailing_whitespace = false
\ No newline at end of file
diff --git a/.github/contributing.md b/.github/contributing.md
new file mode 100644
index 0000000000..9af7d0a6ef
--- /dev/null
+++ b/.github/contributing.md
@@ -0,0 +1,40 @@
+# Contributing to CouchPotatoServer
+
+1. [Contributing](#contributing)
+2. [Submitting an Issue](#issues)
+3. [Submitting a Pull Request](#pull-requests)
+
+## Contributing
+Thank you for your interest in contributing to CouchPotato. There are several ways to help out, even if you've never worked on an open source project before.
+If you've found a bug or want to request a feature, you can report it by [posting an issue](https://github.com/CouchPotato/CouchPotatoServer/issues/new) - be sure to read the [guidelines](#issues) first!
+If you want to contribute your own work, please read the [guidelines](#pull-requests) for submitting a pull request.
+Lastly, for anything related to CouchPotato, feel free to stop by the [forum](http://couchpota.to/forum/) or the [#couchpotato](http://webchat.freenode.net/?channels=couchpotato) IRC channel at irc.freenode.net.
+
+## Issues
+Issues are intended for reporting bugs and weird behaviour or suggesting improvements to CouchPotatoServer.
+Before you submit an issue, please go through the following checklist:
+ * **FILL IN ALL THE FIELDS ASKED FOR**
+ * **POST MORE THAN A SINGLE LINE LOG**, if you do, you'd better have a easy reproducable bug
+ * Search through existing issues (*including closed issues!*) first: you might be able to get your answer there.
+ * Double check your issue manually, because it could be an external issue.
+ * Post logs with your issue: Without seeing what is going on, the developers can't reproduce the error.
+ * Check the logs yourself before submitting them. Obvious errors like permission or HTTP errors are often not related to CouchPotato.
+ * What movie and quality are you searching for?
+ * What are your settings for the specific problem?
+ * What providers are you using? (While your logs include these, scanning through hundreds of lines of logs isn't our hobby)
+ * Post the logs from the *config* directory, please do not copy paste the UI. Use pastebin to store these logs!
+ * Give a short step by step of how to reproduce the error.
+ * What hardware / OS are you using and what are its limitations? For example: NAS can be slow and maybe have a different version of python installed than when you use CP on OS X or Windows.
+ * Your issue might be marked with the "can't reproduce" tag. Don't ask why your issue was closed if it says so in the tag.
+ * If you're running on a NAS (QNAP, Austor, Synology etc.) with pre-made packages, make sure these are set up to use our source repository (CouchPotato/CouchPotatoServer) and nothing else!
+ * Do not "bump" issues with "Any updates on this" or whatever. Yes I've seen it, you don't have to remind me of it. There will be an update when the code is done or I need information. If you feel the need to do so, you'd better have more info on the issue.
+
+The more relevant information you provide, the more likely that your issue will be resolved.
+If you don't follow any of the checks above, I'll close the issue. If you are wondering why (and ask) I'll block you from posting new issues and the repo.
+
+## Pull Requests
+Pull requests are intended for contributing code or documentation to the project. Before you submit a pull request, consider the following:
+ * Make sure your pull request is made for the *develop* branch (or relevant feature branch).
+ * Have you tested your PR? If not, why?
+ * Does your PR have any limitations I should know of?
+ * Is your PR up-to-date with the branch you're trying to push into?
diff --git a/.github/issue_template.md b/.github/issue_template.md
new file mode 100644
index 0000000000..dd24310c03
--- /dev/null
+++ b/.github/issue_template.md
@@ -0,0 +1,14 @@
+### Steps to reproduce:
+1. ..
+2. ..
+
+### Information:
+Movie(s) I have this with: ...
+Quality of the movie being searched: ...
+Providers I use: ...
+Version of CouchPotato: ...
+Running on: ...
+
+### Logs:
+```
+```
diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md
new file mode 100644
index 0000000000..fe31e264b9
--- /dev/null
+++ b/.github/pull_request_template.md
@@ -0,0 +1,5 @@
+### Description of what this fixes:
+...
+
+### Related issues:
+...
diff --git a/.gitignore b/.gitignore
index e156f873bb..6de15688c8 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,5 +1,19 @@
*.pyc
/data/
+/_env/
/_source/
.project
.pydevproject
+/node_modules/
+/.tmp/
+/.sass-cache/
+.coverage
+coverage.xml
+nosetests.xml
+
+# Visual Studio
+
+/.vs
+
+.DS_Store
+/.vscode/
diff --git a/.nosetestsrc b/.nosetestsrc
new file mode 100644
index 0000000000..df777870a0
--- /dev/null
+++ b/.nosetestsrc
@@ -0,0 +1,2 @@
+[nosetests]
+where=couchpotato
diff --git a/.travis.yml b/.travis.yml
new file mode 100644
index 0000000000..9f0c298199
--- /dev/null
+++ b/.travis.yml
@@ -0,0 +1,35 @@
+language: python
+
+# with enabled SUDO the build goes slower
+sudo: false
+
+python:
+ # - "2.6"
+ - "2.7"
+ # - "3.2"
+ # - "3.3"
+ # - "3.4"
+ # - "3.5"
+ # - "3.5-dev" # 3.5 development branch
+ # - "nightly" # currently points to 3.6-dev
+
+cache:
+ pip: true
+ directories:
+ - node_modules
+ - libs
+ - lib
+
+# command to install dependencies
+install:
+ - pip install --upgrade pip
+ - npm install
+ - pip install -r requirements-dev.txt -t ./libs
+
+# command to run tests
+script:
+ - grunt test
+ - grunt coverage
+
+after_success:
+ coveralls
\ No newline at end of file
diff --git a/.vs/CouchPotatoServer/v14/.suo b/.vs/CouchPotatoServer/v14/.suo
new file mode 100644
index 0000000000..98c6663bbe
Binary files /dev/null and b/.vs/CouchPotatoServer/v14/.suo differ
diff --git a/CouchPotato.py b/CouchPotato.py
index e777f9bf43..b4a64218d2 100755
--- a/CouchPotato.py
+++ b/CouchPotato.py
@@ -1,4 +1,5 @@
#!/usr/bin/env python
+from __future__ import print_function
from logging import handlers
from os.path import dirname
import logging
@@ -9,7 +10,6 @@
import subprocess
import sys
import traceback
-import time
# Root path
base_path = dirname(os.path.abspath(__file__))
@@ -18,7 +18,12 @@
sys.path.insert(0, os.path.join(base_path, 'libs'))
from couchpotato.environment import Env
-from couchpotato.core.helpers.variable import getDataDir
+from couchpotato.core.helpers.variable import getDataDir, removePyc
+
+
+# Remove pyc files before dynamic load (sees .pyc files regular .py modules)
+removePyc(base_path)
+
class Loader(object):
@@ -28,7 +33,7 @@ def __init__(self):
# Get options via arg
from couchpotato.runner import getOptions
- self.options = getOptions(base_path, sys.argv[1:])
+ self.options = getOptions(sys.argv[1:])
# Load settings
settings = Env.get('settings')
@@ -47,9 +52,9 @@ def __init__(self):
os.makedirs(self.data_dir)
# Create logging dir
- self.log_dir = os.path.join(self.data_dir, 'logs');
+ self.log_dir = os.path.join(self.data_dir, 'logs')
if not os.path.isdir(self.log_dir):
- os.mkdir(self.log_dir)
+ os.makedirs(self.log_dir)
# Logging
from couchpotato.core.logger import CPLog
@@ -66,14 +71,15 @@ def addSignals(self):
signal.signal(signal.SIGTERM, lambda signum, stack_frame: sys.exit(1))
from couchpotato.core.event import addEvent
- addEvent('app.after_shutdown', self.afterShutdown)
+ addEvent('app.do_shutdown', self.setRestart)
- def afterShutdown(self, restart):
+ def setRestart(self, restart):
self.do_restart = restart
+ return True
def onExit(self, signal, frame):
from couchpotato.core.event import fireEvent
- fireEvent('app.shutdown', single = True)
+ fireEvent('app.shutdown', single=True)
def run(self):
@@ -90,14 +96,15 @@ def restart(self):
# remove old pidfile first
try:
if self.runAsDaemon():
- try: self.daemon.stop()
- except: pass
+ try:
+ self.daemon.stop()
+ except:
+ pass
except:
self.log.critical(traceback.format_exc())
# Release log files and shutdown logger
logging.shutdown()
- time.sleep(3)
args = [sys.executable] + [os.path.join(base_path, os.path.basename(__file__))] + sys.argv[1:]
subprocess.Popen(args)
@@ -117,7 +124,7 @@ def daemonize(self):
self.log.critical(traceback.format_exc())
def runAsDaemon(self):
- return self.options.daemon and self.options.pid_file
+ return self.options.daemon and self.options.pid_file
if __name__ == '__main__':
@@ -132,14 +139,15 @@ def runAsDaemon(self):
pass
except SystemExit:
raise
- except socket.error as (nr, msg):
+ except socket.error as e:
# log when socket receives SIGINT, but continue.
# previous code would have skipped over other types of IO errors too.
+ nr, msg = e
if nr != 4:
try:
l.log.critical(traceback.format_exc())
except:
- print traceback.format_exc()
+ print(traceback.format_exc())
raise
except:
try:
@@ -148,7 +156,7 @@ def runAsDaemon(self):
if l:
l.log.critical(traceback.format_exc())
else:
- print traceback.format_exc()
+ print(traceback.format_exc())
except:
- print traceback.format_exc()
+ print(traceback.format_exc())
raise
diff --git a/CouchPotatoServer.pyproj b/CouchPotatoServer.pyproj
new file mode 100644
index 0000000000..f35ff48e3f
--- /dev/null
+++ b/CouchPotatoServer.pyproj
@@ -0,0 +1,1648 @@
+О╩©
+
+
+ Debug
+ 2.0
+ {854ac11a-81d3-4fcf-b9cb-69e38e5adc75}
+
+ CouchPotato.py
+
+ .
+ .
+ {888888a0-9f3d-457c-b088-3a5042f75d52}
+ Standard Python launcher
+
+
+
+
+
+
+ 10.0
+ $(MSBuildExtensionsPath32)\Microsoft\VisualStudio\v$(VisualStudioVersion)\Python Tools\Microsoft.PythonTools.targets
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Code
+
+
+ Code
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Code
+
+
+ Code
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/CouchPotatoServer.pyproj.user b/CouchPotatoServer.pyproj.user
new file mode 100644
index 0000000000..55f44b95fe
--- /dev/null
+++ b/CouchPotatoServer.pyproj.user
@@ -0,0 +1,6 @@
+О╩©
+
+
+ ShowAllFiles
+
+
\ No newline at end of file
diff --git a/CouchPotatoServer.sln b/CouchPotatoServer.sln
new file mode 100644
index 0000000000..53a27a87df
--- /dev/null
+++ b/CouchPotatoServer.sln
@@ -0,0 +1,20 @@
+О╩©
+Microsoft Visual Studio Solution File, Format Version 12.00
+# Visual Studio 14
+VisualStudioVersion = 14.0.25420.1
+MinimumVisualStudioVersion = 10.0.40219.1
+Project("{888888A0-9F3D-457C-B088-3A5042F75D52}") = "CouchPotatoServer", "CouchPotatoServer.pyproj", "{854AC11A-81D3-4FCF-B9CB-69E38E5ADC75}"
+EndProject
+Global
+ GlobalSection(SolutionConfigurationPlatforms) = preSolution
+ Debug|Any CPU = Debug|Any CPU
+ Release|Any CPU = Release|Any CPU
+ EndGlobalSection
+ GlobalSection(ProjectConfigurationPlatforms) = postSolution
+ {854AC11A-81D3-4FCF-B9CB-69E38E5ADC75}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {854AC11A-81D3-4FCF-B9CB-69E38E5ADC75}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ EndGlobalSection
+ GlobalSection(SolutionProperties) = preSolution
+ HideSolutionNode = FALSE
+ EndGlobalSection
+EndGlobal
diff --git a/CouchPotatoServer.v12.suo b/CouchPotatoServer.v12.suo
new file mode 100644
index 0000000000..c504008783
Binary files /dev/null and b/CouchPotatoServer.v12.suo differ
diff --git a/Gruntfile.js b/Gruntfile.js
new file mode 100644
index 0000000000..41db92e5dd
--- /dev/null
+++ b/Gruntfile.js
@@ -0,0 +1,249 @@
+'use strict';
+
+module.exports = function(grunt){
+ require('jit-grunt')(grunt);
+ require('time-grunt')(grunt);
+
+ grunt.loadNpmTasks('grunt-shell-spawn');
+
+ // Configurable paths
+ var config = {
+ python: grunt.file.exists('./_env/bin/python') ? './_env/bin/python' : 'python',
+ // colorful output on travis is not required, so disable it there, using travic'es env var :
+ colorful_tests_output: ! process.env.TRAVIS,
+ tmp: '.tmp',
+ base: 'couchpotato',
+ css_dest: 'couchpotato/static/style/combined.min.css',
+ scripts_vendor_dest: 'couchpotato/static/scripts/combined.vendor.min.js',
+ scripts_base_dest: 'couchpotato/static/scripts/combined.base.min.js',
+ scripts_plugins_dest: 'couchpotato/static/scripts/combined.plugins.min.js'
+ };
+
+ var vendor_scripts_files = [
+ 'couchpotato/static/scripts/vendor/mootools.js',
+ 'couchpotato/static/scripts/vendor/mootools_more.js',
+ 'couchpotato/static/scripts/vendor/Array.stableSort.js',
+ 'couchpotato/static/scripts/vendor/history.js',
+ 'couchpotato/static/scripts/vendor/dynamics.js',
+ 'couchpotato/static/scripts/vendor/fastclick.js',
+ 'couchpotato/static/scripts/vendor/requestAnimationFrame.js'
+ ];
+
+ var scripts_files = [
+ 'couchpotato/static/scripts/library/uniform.js',
+ 'couchpotato/static/scripts/library/question.js',
+ 'couchpotato/static/scripts/library/scrollspy.js',
+ 'couchpotato/static/scripts/couchpotato.js',
+ 'couchpotato/static/scripts/api.js',
+ 'couchpotato/static/scripts/page.js',
+ 'couchpotato/static/scripts/block.js',
+ 'couchpotato/static/scripts/block/navigation.js',
+ 'couchpotato/static/scripts/block/header.js',
+ 'couchpotato/static/scripts/block/footer.js',
+ 'couchpotato/static/scripts/block/menu.js',
+ 'couchpotato/static/scripts/page/home.js',
+ 'couchpotato/static/scripts/page/settings.js',
+ 'couchpotato/static/scripts/page/about.js',
+ 'couchpotato/static/scripts/page/login.js'
+ ];
+
+ grunt.initConfig({
+
+ // Project settings
+ config: config,
+
+ // Make sure code styles are up to par and there are no obvious mistakes
+ jshint: {
+ options: {
+ reporter: require('jshint-stylish'),
+ unused: false,
+ camelcase: false,
+ devel: true
+ },
+ all: [
+ '<%= config.base %>/{,**/}*.js',
+ '!<%= config.base %>/static/scripts/vendor/{,**/}*.js',
+ '!<%= config.base %>/static/scripts/combined.*.js'
+ ]
+ },
+
+ // Compiles Sass to CSS and generates necessary files if requested
+ sass: {
+ options: {
+ compass: true,
+ update: true,
+ sourcemap: 'none'
+ },
+ server: {
+ files: [{
+ expand: true,
+ cwd: '<%= config.base %>/',
+ src: ['**/*.scss'],
+ dest: '<%= config.tmp %>/styles/',
+ ext: '.css'
+ }]
+ }
+ },
+
+ // Empties folders to start fresh
+ clean: {
+ server: '.tmp'
+ },
+
+ // Add vendor prefixed styles
+ autoprefixer: {
+ options: {
+ browsers: ['last 2 versions'],
+ remove: false,
+ cascade: false
+ },
+ dist: {
+ files: [{
+ expand: true,
+ cwd: '<%= config.tmp %>/styles/',
+ src: '{,**/}*.css',
+ dest: '<%= config.tmp %>/styles/'
+ }]
+ }
+ },
+
+ cssmin: {
+ dist: {
+ options: {
+ keepBreaks: true
+ },
+ files: {
+ '<%= config.css_dest %>': ['<%= config.tmp %>/styles/**/*.css']
+ }
+ }
+ },
+
+ uglify: {
+ options: {
+ mangle: false,
+ compress: false,
+ beautify: true,
+ screwIE8: true
+ },
+ vendor: {
+ files: {
+ '<%= config.scripts_vendor_dest %>': vendor_scripts_files
+ }
+ },
+ base: {
+ files: {
+ '<%= config.scripts_base_dest %>': scripts_files
+ }
+ },
+ plugins: {
+ files: {
+ '<%= config.scripts_plugins_dest %>': ['<%= config.base %>/core/**/*.js']
+ }
+ }
+ },
+
+ shell: {
+ runCouchPotato: {
+ command: '<%= config.python %> CouchPotato.py',
+ options: {
+ stdout: true,
+ stderr: true
+ }
+ }
+ },
+
+ // COOL TASKS ==============================================================
+ watch: {
+ scss: {
+ files: ['<%= config.base %>/**/*.{scss,sass}'],
+ tasks: ['sass:server', 'autoprefixer', 'cssmin']
+ },
+ js: {
+ files: [
+ '<%= config.base %>/**/*.js',
+ '!<%= config.base %>/static/scripts/combined.*.js'
+ ],
+ tasks: ['uglify:base', 'uglify:plugins', 'jshint']
+ },
+ livereload: {
+ options: {
+ livereload: 35729
+ },
+ files: [
+ '<%= config.css_dest %>',
+ '<%= config.scripts_vendor_dest %>',
+ '<%= config.scripts_base_dest %>',
+ '<%= config.scripts_plugins_dest %>'
+ ]
+ }
+ },
+
+ // TEST TASKS ==============================================================
+ env: {
+ options: {
+ },
+
+ test:{
+ concat: {
+ PYTHONPATH: {
+ 'value' : './libs',
+ 'delimiter' : ':',
+ }
+ }
+ }
+ },
+
+ // for python tests
+ nose: {
+ options: {
+ verbosity: 2,
+ exe: true,
+ config: './.nosetestsrc',
+ // 'rednose' is a colored output for nose test-runner. But we do not requre colors on travis-ci
+ rednose: config.colorful_tests_output,
+ externalNose: true,
+ },
+
+ test: {
+ },
+
+ coverage: {
+ options:{
+ with_coverage: true,
+ cover_package: "couchpotato",
+ cover_branches: true,
+ cover_xml: true,
+ with_doctest: true,
+ with_xunit: true,
+ cover_tests: false,
+ cover_erase: true,
+ }
+ },
+ },
+
+ concurrent: {
+ options: {
+ logConcurrentOutput: true
+ },
+ tasks: ['shell:runCouchPotato', 'watch']
+ }
+
+ });
+
+ // testing task
+ grunt.registerTask('test', ['env:test', 'nose:test']);
+
+ // currently, coverage does not generate local html report, but it is useful and possible
+ grunt.registerTask('coverage', ['env:test', 'nose:coverage']);
+
+ grunt.registerTask('default', [
+ 'clean:server',
+ 'sass:server',
+ 'autoprefixer',
+ 'cssmin',
+ 'uglify:vendor',
+ 'uglify:base',
+ 'uglify:plugins',
+ 'concurrent'
+ ]);
+};
diff --git a/README.md b/README.md
index 8d1e5b89fc..aebf591ccc 100644
--- a/README.md
+++ b/README.md
@@ -1,42 +1,87 @@
-CouchPotato Server
+CouchPotato
=====
+[![Join the chat at https://gitter.im/CouchPotato/CouchPotatoServer](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/CouchPotato/CouchPotatoServer?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
+[![Build Status](https://travis-ci.org/CouchPotato/CouchPotatoServer.svg?branch=master)](https://travis-ci.org/CouchPotato/CouchPotatoServer)
+[![Coverage Status](https://coveralls.io/repos/CouchPotato/CouchPotatoServer/badge.svg?branch=master&service=github)](https://coveralls.io/github/CouchPotato/CouchPotatoServer?branch=master)
+
CouchPotato (CP) is an automatic NZB and torrent downloader. You can keep a "movies I want"-list and it will search for NZBs/torrents of these movies every X hours.
Once a movie is found, it will send it to SABnzbd or download the torrent to a specified directory.
## Running from Source
-CouchPotatoServer can be run from source. This will use *git* as updater, so make sure that is installed also.
+CouchPotatoServer can be run from source. This will use *git* as updater, so make sure that is installed.
-Windows, see [the CP forum](http://couchpota.to/forum/showthread.php?tid=14) for more details:
+Windows, see [the CP forum](http://couchpota.to/forum/viewtopic.php?t=14) for more details:
* Install [Python 2.7](http://www.python.org/download/releases/2.7.3/)
* Then install [PyWin32 2.7](http://sourceforge.net/projects/pywin32/files/pywin32/Build%20217/) and [GIT](http://git-scm.com/)
* If you come and ask on the forums 'why directory selection no work?', I will kill a kitten, also this is because you need PyWin32
* Open up `Git Bash` (or CMD) and go to the folder you want to install CP. Something like Program Files.
-* Run `git clone https://github.com/RuudBurger/CouchPotatoServer.git`.
+* Run `git clone https://github.com/CouchPotato/CouchPotatoServer.git`.
* You can now start CP via `CouchPotatoServer\CouchPotato.py` to start
-* Your browser should open up, but if it doesn't go to: `http://localhost:5050/`
+* Your browser should open up, but if it doesn't go to `http://localhost:5050/`
-OSx:
+OS X:
* If you're on Leopard (10.5) install Python 2.6+: [Python 2.6.5](http://www.python.org/download/releases/2.6.5/)
* Install [GIT](http://git-scm.com/)
+* Install [LXML](http://lxml.de/installation.html) for better/faster website scraping
* Open up `Terminal`
* Go to your App folder `cd /Applications`
-* Run `git clone https://github.com/RuudBurger/CouchPotatoServer.git`
+* Run `git clone https://github.com/CouchPotato/CouchPotatoServer.git`
* Then do `python CouchPotatoServer/CouchPotato.py`
-* Your browser should open up, but if it doesn't go to: `http://localhost:5050/`
+* Your browser should open up, but if it doesn't go to `http://localhost:5050/`
-Linux (ubuntu / debian):
+Linux:
-* Install [GIT](http://git-scm.com/) with `apt-get install git-core`
+* (Ubuntu / Debian) Install [GIT](http://git-scm.com/) with `apt-get install git-core`
+* (Fedora / CentOS) Install [GIT](http://git-scm.com/) with `yum install git`
+* Install [LXML](http://lxml.de/installation.html) for better/faster website scraping
* 'cd' to the folder of your choosing.
-* Run `git clone https://github.com/RuudBurger/CouchPotatoServer.git`
+* Install [PyOpenSSL](https://pypi.python.org/pypi/pyOpenSSL) with `pip install --upgrade pyopenssl`
+* Run `git clone https://github.com/CouchPotato/CouchPotatoServer.git`
* Then do `python CouchPotatoServer/CouchPotato.py` to start
-* To run on boot copy the init script. `sudo cp CouchPotatoServer/init/ubuntu /etc/init.d/couchpotato`
-* Change the paths inside the init script. `sudo nano /etc/init.d/couchpotato`
-* Make it executable. `sudo chmod +x /etc/init.d/couchpotato`
-* Add it to defaults. `sudo update-rc.d couchpotato defaults`
-* Open your browser and go to: `http://localhost:5050/`
+* (Ubuntu / Debian with upstart) To run on boot copy the init script `sudo cp CouchPotatoServer/init/ubuntu /etc/init.d/couchpotato`
+* (Ubuntu / Debian with upstart) Copy the default paths file `sudo cp CouchPotatoServer/init/ubuntu.default /etc/default/couchpotato`
+* (Ubuntu / Debian with upstart) Change the paths inside the default file `sudo nano /etc/default/couchpotato`
+* (Ubuntu / Debian with upstart) Make it executable `sudo chmod +x /etc/init.d/couchpotato`
+* (Ubuntu / Debian with upstart) Add it to defaults `sudo update-rc.d couchpotato defaults`
+* (Linux with systemd) To run on boot copy the systemd config `sudo cp CouchPotatoServer/init/couchpotato.service /etc/systemd/system/couchpotato.service`
+* (Linux with systemd) Update the systemd config file with your user and path to CouchPotato.py
+* (Linux with systemd) Enable it at boot with `sudo systemctl enable couchpotato`
+* Open your browser and go to `http://localhost:5050/`
+
+Docker:
+* You can use [linuxserver.io](https://github.com/linuxserver/docker-couchpotato) or [razorgirl's](https://github.com/razorgirl/docker-couchpotato) to quickly build your own isolated app container. It's based on the Linux instructions above. For more info about Docker check out the [official website](https://www.docker.com).
+
+FreeBSD:
+
+* Become root with `su`
+* Update your repo catalog `pkg update`
+* Install required tools `pkg install python py27-sqlite3 fpc-libcurl docbook-xml git-lite`
+* For default install location and running as root `cd /usr/local`
+* If running as root, expects python here `ln -s /usr/local/bin/python /usr/bin/python`
+* Run `git clone https://github.com/CouchPotato/CouchPotatoServer.git`
+* Copy the startup script `cp CouchPotatoServer/init/freebsd /usr/local/etc/rc.d/couchpotato`
+* Make startup script executable `chmod 555 /usr/local/etc/rc.d/couchpotato`
+* Add startup to boot `echo 'couchpotato_enable="YES"' >> /etc/rc.conf`
+* Read the options at the top of `more /usr/local/etc/rc.d/couchpotato`
+* If not default install, specify options with startup flags in `ee /etc/rc.conf`
+* Finally, `service couchpotato start`
+* Open your browser and go to: `http://server:5050/`
+
+
+## Development
+
+Be sure you're running the latest version of [Python 2.7](http://python.org/).
+
+If you're going to add styling or doing some javascript work you'll need a few tools that build and compress scss -> css and combine the javascript files. [Node/NPM](https://nodejs.org/), [Grunt](http://gruntjs.com/installing-grunt), [Compass](http://compass-style.org/install/)
+
+After you've got these tools you can install the packages using `npm install`. Once this process has finished you can start CP using the command `grunt`. This will start all the needed tools and watches any files for changes.
+You can now change css and javascript and it wil reload the page when needed.
+
+By default it will combine files used in the core folder. If you're adding a new .scss or .js file, you might need to add it and then restart the grunt process for it to combine it properly.
+
+Don't forget to enable development inside the CP settings. This disables some functions and also makes sure javascript errors are pushed to console instead of the log.
diff --git a/config.rb b/config.rb
new file mode 100644
index 0000000000..a26a2ee10e
--- /dev/null
+++ b/config.rb
@@ -0,0 +1,44 @@
+# First, require any additional compass plugins installed on your system.
+# require 'zen-grids'
+# require 'susy'
+# require 'breakpoint'
+
+
+# Toggle this between :development and :production when deploying the CSS to the
+# live server. Development mode will retain comments and spacing from the
+# original Sass source and adds line numbering comments for easier debugging.
+environment = :development
+# environment = :development
+
+# In development, we can turn on the FireSass-compatible debug_info.
+firesass = false
+# firesass = true
+
+
+# Location of the your project's resources.
+
+
+# Set this to the root of your project. All resource locations above are
+# considered to be relative to this path.
+http_path = "/"
+
+# To use relative paths to assets in your compiled CSS files, set this to true.
+# relative_assets = true
+
+
+##
+## You probably don't need to edit anything below this.
+##
+sass_dir = "./couchpotato/static/style"
+css_dir = "./couchpotato/static/style"
+
+# You can select your preferred output style here (can be overridden via the command line):
+# output_style = :expanded or :nested or :compact or :compressed
+output_style = (environment == :development) ? :expanded : :compressed
+
+# To disable debugging comments that display the original location of your selectors. Uncomment:
+# line_comments = false
+
+# Pass options to sass. For development, we turn on the FireSass-compatible
+# debug_info if the firesass config variable above is true.
+sass_options = (environment == :development && firesass == true) ? {:debug_info => true} : {}
diff --git a/contributing.md b/contributing.md
deleted file mode 100644
index 572dd3325a..0000000000
--- a/contributing.md
+++ /dev/null
@@ -1,15 +0,0 @@
-#So you feel like posting a bug, sending me a pull request or just telling me how awesome I am. No problem!
-
-##Just make sure you think of the following things:
-
- * Search through the existing (and closed) issues first. See if you can get your answer there.
- * Double check the result manually, because it could be an external issue.
- * Post logs! Without seeing what is going on, I can't reproduce the error.
- * What is the movie + quality you are searching for.
- * What are you settings for the specific problem.
- * What providers are you using. (While your logs include these, scanning through hundred of lines of log isn't my hobby).
- * Give me a short step by step of how to reproduce.
- * What hardware / OS are you using and what are the limits? NAS can be slow and maybe have a different python installed then when you use CP on OSX or Windows for example.
- * I will mark issues with the "can't reproduce" tag. Don't go asking me "why closed" if it clearly says the issue in the tag ;)
-
-**If I don't get enough info, the change of the issue getting closed is a lot bigger ;)**
\ No newline at end of file
diff --git a/couchpotato/__init__.py b/couchpotato/__init__.py
index 38b3617405..9a47120197 100644
--- a/couchpotato/__init__.py
+++ b/couchpotato/__init__.py
@@ -1,83 +1,205 @@
-from couchpotato.api import api_docs, api_docs_missing
-from couchpotato.core.auth import requires_auth
+import os
+import time
+import traceback
+
+from couchpotato.api import api_docs, api_docs_missing, api
from couchpotato.core.event import fireEvent
-from couchpotato.core.helpers.request import getParams, jsonified
-from couchpotato.core.helpers.variable import md5
+from couchpotato.core.helpers.encoding import sp
+from couchpotato.core.helpers.variable import md5, tryInt
from couchpotato.core.logger import CPLog
from couchpotato.environment import Env
-from flask.app import Flask
-from flask.blueprints import Blueprint
-from flask.globals import request
-from flask.helpers import url_for
-from flask.templating import render_template
-from sqlalchemy.engine import create_engine
-from sqlalchemy.orm import scoped_session
-from sqlalchemy.orm.session import sessionmaker
-from werkzeug.utils import redirect
-import os
-import time
+from tornado import template
+from tornado.web import RequestHandler, authenticated
+
log = CPLog(__name__)
-app = Flask(__name__, static_folder = 'nope')
-web = Blueprint('web', __name__)
+views = {}
+template_loader = template.Loader(os.path.join(os.path.dirname(__file__), 'templates'))
+
+
+class BaseHandler(RequestHandler):
+
+ def get_current_user(self):
+ username = Env.setting('username')
+ password = Env.setting('password')
+
+ if username and password:
+ return self.get_secure_cookie('user')
+ else: # Login when no username or password are set
+ return True
+
+
+# Main web handler
+class WebHandler(BaseHandler):
+
+ @authenticated
+ def get(self, route, *args, **kwargs):
+ route = route.strip('/')
+ if not views.get(route):
+ page_not_found(self)
+ return
+
+ try:
+ self.write(views[route](self))
+ except:
+ log.error("Failed doing web request '%s': %s", (route, traceback.format_exc()))
+ self.write({'success': False, 'error': 'Failed returning results'})
+
+
+def addView(route, func):
+ views[route] = func
+
+
+def get_db():
+ return Env.get('db')
+
+
+# Web view
+def index(*args):
+ return template_loader.load('index.html').generate(sep = os.sep, fireEvent = fireEvent, Env = Env)
+addView('', index)
+
+
+# Web view
+def robots(handler):
+ handler.set_header('Content-Type', 'text/plain')
+
+ return 'User-agent: * \n' \
+ 'Disallow: /'
+addView('robots.txt', robots)
+
+
+# Manifest
+def manifest(handler):
+ web_base = Env.get('web_base')
+ static_base = Env.get('static_path')
+
+ lines = [
+ 'CACHE MANIFEST',
+ '# %s theme' % ('dark' if Env.setting('dark_theme') else 'light'),
+ '',
+ 'CACHE:',
+ ''
+ ]
+
+ if not Env.get('dev'):
+ # CSS
+ for url in fireEvent('clientscript.get_styles', single = True):
+ lines.append(web_base + url)
+ # Scripts
+ for url in fireEvent('clientscript.get_scripts', single = True):
+ lines.append(web_base + url)
-def get_session(engine = None):
- return Env.getSession(engine)
+ # Favicon
+ lines.append(static_base + 'images/favicon.ico')
-def addView(route, func, static = False):
- web.add_url_rule(route + ('' if static else '/'), endpoint = route if route else 'index', view_func = func)
+ # Fonts
+ font_folder = sp(os.path.join(Env.get('app_dir'), 'couchpotato', 'static', 'fonts'))
+ for subfolder, dirs, files in os.walk(font_folder, topdown = False):
+ for file in files:
+ if '.woff' in file:
+ lines.append(static_base + 'fonts/' + file + ('?%s' % os.path.getmtime(os.path.join(font_folder, file))))
+ else:
+ lines.append('# Not caching anything in dev mode')
+
+ # End lines
+ lines.extend(['',
+ 'NETWORK: ',
+ '*'])
+
+ handler.set_header('Content-Type', 'text/cache-manifest')
+ return '\n'.join(lines)
+
+addView('couchpotato.appcache', manifest)
-""" Web view """
-@web.route('/')
-@requires_auth
-def index():
- return render_template('index.html', sep = os.sep, fireEvent = fireEvent, env = Env)
-""" Api view """
-@web.route('docs/')
-@requires_auth
-def apiDocs():
- from couchpotato import app
- routes = []
- for route, x in sorted(app.view_functions.iteritems()):
- if route[0:4] == 'api.':
- routes += [route[4:].replace('::', '.')]
+# API docs
+def apiDocs(*args):
+ routes = list(api.keys())
if api_docs.get(''):
del api_docs['']
del api_docs_missing['']
- return render_template('api.html', fireEvent = fireEvent, routes = sorted(routes), api_docs = api_docs, api_docs_missing = sorted(api_docs_missing))
-@web.route('getkey/')
-def getApiKey():
+ return template_loader.load('api.html').generate(fireEvent = fireEvent, routes = sorted(routes), api_docs = api_docs, api_docs_missing = sorted(api_docs_missing), Env = Env)
- api = None
- params = getParams()
- username = Env.setting('username')
- password = Env.setting('password')
+addView('docs', apiDocs)
- if (params.get('u') == md5(username) or not username) and (params.get('p') == password or not password):
- api = Env.setting('api_key')
- return jsonified({
- 'success': api is not None,
- 'api_key': api
- })
+# Database debug manager
+def databaseManage(*args):
+ return template_loader.load('database.html').generate(fireEvent = fireEvent, Env = Env)
-@app.errorhandler(404)
-def page_not_found(error):
- index_url = url_for('web.index')
- url = request.path[len(index_url):]
+addView('database', databaseManage)
- if url[:3] != 'api':
- if request.path != '/':
- r = request.url.replace(request.path, index_url + '#' + url)
+
+# Make non basic auth option to get api key
+class KeyHandler(RequestHandler):
+
+ def get(self, *args, **kwargs):
+ api_key = None
+
+ try:
+ username = Env.setting('username')
+ password = Env.setting('password')
+
+ if (self.get_argument('u') == md5(username) or not username) and (self.get_argument('p') == password or not password):
+ api_key = Env.setting('api_key')
+
+ self.write({
+ 'success': api_key is not None,
+ 'api_key': api_key
+ })
+ except:
+ log.error('Failed doing key request: %s', (traceback.format_exc()))
+ self.write({'success': False, 'error': 'Failed returning results'})
+
+
+class LoginHandler(BaseHandler):
+
+ def get(self, *args, **kwargs):
+
+ if self.get_current_user():
+ self.redirect(Env.get('web_base'))
else:
- r = '%s%s' % (request.url.rstrip('/'), index_url + '#' + url)
- return redirect(r)
+ self.write(template_loader.load('login.html').generate(sep = os.sep, fireEvent = fireEvent, Env = Env))
+
+ def post(self, *args, **kwargs):
+
+ api_key = None
+
+ username = Env.setting('username')
+ password = Env.setting('password')
+
+ if (self.get_argument('username') == username or not username) and (md5(self.get_argument('password')) == password or not password):
+ api_key = Env.setting('api_key')
+
+ if api_key:
+ remember_me = tryInt(self.get_argument('remember_me', default = 0))
+ self.set_secure_cookie('user', api_key, expires_days = 30 if remember_me > 0 else None)
+
+ self.redirect(Env.get('web_base'))
+
+
+class LogoutHandler(BaseHandler):
+
+ def get(self, *args, **kwargs):
+ self.clear_cookie('user')
+ self.redirect('%slogin/' % Env.get('web_base'))
+
+
+def page_not_found(rh):
+ index_url = Env.get('web_base')
+ url = rh.request.uri[len(index_url):]
+
+ if url[:3] != 'api':
+ r = index_url + '#' + url.lstrip('/')
+ rh.redirect(r)
else:
- time.sleep(0.1)
- return 'Wrong API key used', 404
+ if not Env.get('dev'):
+ time.sleep(0.1)
+ rh.set_status(404)
+ rh.write('Wrong API key used')
diff --git a/couchpotato/api.py b/couchpotato/api.py
index 718527c937..b5754d8209 100644
--- a/couchpotato/api.py
+++ b/couchpotato/api.py
@@ -1,61 +1,170 @@
-from flask.blueprints import Blueprint
-from flask.helpers import url_for
+from functools import wraps
+from threading import Thread
+import json
+import threading
+import traceback
+import urllib
+
+from couchpotato.core.helpers.request import getParams
+from couchpotato.core.logger import CPLog
+from tornado.ioloop import IOLoop
from tornado.web import RequestHandler, asynchronous
-from werkzeug.utils import redirect
-api = Blueprint('api', __name__)
+
+log = CPLog(__name__)
+
+
+api = {}
+api_locks = {}
+api_nonblock = {}
+
api_docs = {}
api_docs_missing = []
-api_nonblock = {}
+def run_async(func):
+ @wraps(func)
+ def async_func(*args, **kwargs):
+ func_hl = Thread(target = func, args = args, kwargs = kwargs)
+ func_hl.start()
+
+ return async_func
+
+@run_async
+def run_handler(route, kwargs, callback = None):
+ try:
+ res = api[route](**kwargs)
+ callback(res, route)
+ except:
+ log.error('Failed doing api request "%s": %s', (route, traceback.format_exc()))
+ callback({'success': False, 'error': 'Failed returning results'}, route)
+
+
+# NonBlock API handler
class NonBlockHandler(RequestHandler):
- def __init__(self, application, request, **kwargs):
- cls = NonBlockHandler
- cls.stoppers = []
- super(NonBlockHandler, self).__init__(application, request, **kwargs)
+ stopper = None
@asynchronous
- def get(self, route):
- cls = NonBlockHandler
+ def get(self, route, *args, **kwargs):
+ route = route.strip('/')
start, stop = api_nonblock[route]
- cls.stoppers.append(stop)
+ self.stopper = stop
- start(self.onNewMessage, last_id = self.get_argument("last_id", None))
+ start(self.sendData, last_id = self.get_argument('last_id', None))
- def onNewMessage(self, response):
- if self.request.connection.stream.closed():
- return
- self.finish(response)
+ def sendData(self, response):
+ if not self.request.connection.stream.closed():
+ try:
+ self.finish(response)
+ except:
+ log.debug('Failed doing nonblock request, probably already closed: %s', (traceback.format_exc()))
+ try: self.finish({'success': False, 'error': 'Failed returning results'})
+ except: pass
- def on_connection_close(self):
- cls = NonBlockHandler
+ self.removeStopper()
- for stop in cls.stoppers:
- stop(self.onNewMessage)
+ def removeStopper(self):
+ if self.stopper:
+ self.stopper(self.sendData)
- cls.stoppers = []
+ self.stopper = None
-def addApiView(route, func, static = False, docs = None, **kwargs):
- api.add_url_rule(route + ('' if static else '/'), endpoint = route.replace('.', '::') if route else 'index', view_func = func, **kwargs)
+def addNonBlockApiView(route, func_tuple, docs = None, **kwargs):
+ api_nonblock[route] = func_tuple
+
if docs:
api_docs[route[4:] if route[0:4] == 'api.' else route] = docs
else:
api_docs_missing.append(route)
-def addNonBlockApiView(route, func_tuple, docs = None, **kwargs):
- api_nonblock[route] = func_tuple
+
+# Blocking API handler
+class ApiHandler(RequestHandler):
+ route = None
+
+ @asynchronous
+ def get(self, route, *args, **kwargs):
+ self.route = route = route.strip('/')
+ if not api.get(route):
+ self.write('API call doesn\'t seem to exist')
+ self.finish()
+ return
+
+ # Create lock if it doesn't exist
+ if route in api_locks and not api_locks.get(route):
+ api_locks[route] = threading.Lock()
+
+ api_locks[route].acquire()
+
+ try:
+
+ kwargs = {}
+ for x in self.request.arguments:
+ kwargs[x] = urllib.unquote(self.get_argument(x))
+
+ # Split array arguments
+ kwargs = getParams(kwargs)
+ kwargs['_request'] = self
+
+ # Remove t random string
+ try: del kwargs['t']
+ except: pass
+
+ # Add async callback handler
+ run_handler(route, kwargs, callback = self.taskFinished)
+
+ except:
+ log.error('Failed doing api request "%s": %s', (route, traceback.format_exc()))
+ try:
+ self.write({'success': False, 'error': 'Failed returning results'})
+ self.finish()
+ except:
+ log.error('Failed write error "%s": %s', (route, traceback.format_exc()))
+
+ self.unlock()
+
+ post = get
+
+ def taskFinished(self, result, route):
+ IOLoop.current().add_callback(self.sendData, result, route)
+ self.unlock()
+
+ def sendData(self, result, route):
+
+ if not self.request.connection.stream.closed():
+ try:
+ # Check JSONP callback
+ jsonp_callback = self.get_argument('callback_func', default = None)
+
+ if jsonp_callback:
+ self.set_header('Content-Type', 'text/javascript')
+ self.finish(str(jsonp_callback) + '(' + json.dumps(result) + ')')
+ elif isinstance(result, tuple) and result[0] == 'redirect':
+ self.redirect(result[1])
+ else:
+ self.finish(result)
+ except UnicodeDecodeError:
+ log.error('Failed proper encode: %s', traceback.format_exc())
+ except:
+ log.debug('Failed doing request, probably already closed: %s', (traceback.format_exc()))
+ try: self.finish({'success': False, 'error': 'Failed returning results'})
+ except: pass
+
+ def unlock(self):
+ try: api_locks[self.route].release()
+ except: pass
+
+
+def addApiView(route, func, static = False, docs = None, **kwargs):
+
+ if static: func(route)
+ else:
+ api[route] = func
+ api_locks[route] = threading.Lock()
if docs:
api_docs[route[4:] if route[0:4] == 'api.' else route] = docs
else:
api_docs_missing.append(route)
-
-""" Api view """
-def index():
- index_url = url_for('web.index')
- return redirect(index_url + 'docs/')
-
-addApiView('', index)
diff --git a/couchpotato/core/_base/_core.py b/couchpotato/core/_base/_core.py
new file mode 100644
index 0000000000..47c8bb4e68
--- /dev/null
+++ b/couchpotato/core/_base/_core.py
@@ -0,0 +1,382 @@
+from uuid import uuid4
+import os
+import platform
+import signal
+import time
+import traceback
+import webbrowser
+import sys
+
+from couchpotato.api import addApiView
+from couchpotato.core.event import fireEvent, addEvent
+from couchpotato.core.helpers.variable import cleanHost, md5, isSubFolder, compareVersions
+from couchpotato.core.logger import CPLog
+from couchpotato.core.plugins.base import Plugin
+from couchpotato.environment import Env
+from tornado.ioloop import IOLoop
+
+
+log = CPLog(__name__)
+
+autoload = 'Core'
+
+
+class Core(Plugin):
+
+ ignore_restart = [
+ 'Core.restart', 'Core.shutdown',
+ 'Updater.check', 'Updater.autoUpdate',
+ ]
+ shutdown_started = False
+
+ def __init__(self):
+ addApiView('app.shutdown', self.shutdown, docs = {
+ 'desc': 'Shutdown the app.',
+ 'return': {'type': 'string: shutdown'}
+ })
+ addApiView('app.restart', self.restart, docs = {
+ 'desc': 'Restart the app.',
+ 'return': {'type': 'string: restart'}
+ })
+ addApiView('app.available', self.available, docs = {
+ 'desc': 'Check if app available.'
+ })
+ addApiView('app.version', self.versionView, docs = {
+ 'desc': 'Get version.'
+ })
+
+ addEvent('app.shutdown', self.shutdown)
+ addEvent('app.restart', self.restart)
+ addEvent('app.load', self.launchBrowser, priority = 1)
+ addEvent('app.base_url', self.createBaseUrl)
+ addEvent('app.api_url', self.createApiUrl)
+ addEvent('app.version', self.version)
+ addEvent('app.load', self.checkDataDir)
+ addEvent('app.load', self.cleanUpFolders)
+ addEvent('app.load.after', self.dependencies)
+
+ addEvent('setting.save.core.password', self.md5Password)
+ addEvent('setting.save.core.api_key', self.checkApikey)
+
+ # Make sure we can close-down with ctrl+c properly
+ if not Env.get('desktop'):
+ self.signalHandler()
+
+ # Set default urlopen timeout
+ import socket
+ socket.setdefaulttimeout(30)
+
+ # Don't check ssl by default
+ try:
+ if sys.version_info >= (2, 7, 9):
+ import ssl
+ ssl._create_default_https_context = ssl._create_unverified_context
+ except:
+ log.debug('Failed setting default ssl context: %s', traceback.format_exc())
+
+ def dependencies(self):
+
+ # Check if lxml is available
+ try: from lxml import etree
+ except: log.error('LXML not available, please install for better/faster scraping support: `http://lxml.de/installation.html`')
+
+ try:
+ import OpenSSL
+ v = OpenSSL.__version__
+ v_needed = '0.15'
+ if compareVersions(OpenSSL.__version__, v_needed) < 0:
+ log.error('OpenSSL installed but %s is needed while %s is installed. Run `pip install pyopenssl --upgrade`', (v_needed, v))
+
+ try:
+ import ssl
+ log.debug('OpenSSL detected: pyopenssl (%s) using OpenSSL (%s)', (v, ssl.OPENSSL_VERSION))
+ except:
+ pass
+ except:
+ log.error('OpenSSL not available, please install for better requests validation: `https://pyopenssl.readthedocs.org/en/latest/install.html`: %s', traceback.format_exc())
+
+ def md5Password(self, value):
+ return md5(value) if value else ''
+
+ def checkApikey(self, value):
+ return value if value and len(value) > 3 else uuid4().hex
+
+ def checkDataDir(self):
+ if isSubFolder(Env.get('data_dir'), Env.get('app_dir')):
+ log.error('You should NOT use your CouchPotato directory to save your settings in. Files will get overwritten or be deleted.')
+
+ return True
+
+ def cleanUpFolders(self):
+ only_clean = ['couchpotato', 'libs', 'init']
+ self.deleteEmptyFolder(Env.get('app_dir'), show_error = False, only_clean = only_clean)
+
+ def available(self, **kwargs):
+ return {
+ 'success': True
+ }
+
+ def shutdown(self, **kwargs):
+ if self.shutdown_started:
+ return False
+
+ def shutdown():
+ self.initShutdown()
+
+ if IOLoop.current()._closing:
+ shutdown()
+ else:
+ IOLoop.current().add_callback(shutdown)
+
+ return 'shutdown'
+
+ def restart(self, **kwargs):
+ if self.shutdown_started:
+ return False
+
+ def restart():
+ self.initShutdown(restart = True)
+ IOLoop.current().add_callback(restart)
+
+ return 'restarting'
+
+ def initShutdown(self, restart = False):
+ if self.shutdown_started:
+ log.info('Already shutting down')
+ return
+
+ log.info('Shutting down' if not restart else 'Restarting')
+
+ self.shutdown_started = True
+
+ fireEvent('app.do_shutdown', restart = restart)
+ log.debug('Every plugin got shutdown event')
+
+ loop = True
+ starttime = time.time()
+ while loop:
+ log.debug('Asking who is running')
+ still_running = fireEvent('plugin.running', merge = True)
+ log.debug('Still running: %s', still_running)
+
+ if len(still_running) == 0:
+ break
+ elif starttime < time.time() - 30: # Always force break after 30s wait
+ break
+
+ running = list(set(still_running) - set(self.ignore_restart))
+ if len(running) > 0:
+ log.info('Waiting on plugins to finish: %s', running)
+ else:
+ loop = False
+
+ time.sleep(1)
+
+ log.debug('Safe to shutdown/restart')
+
+ loop = IOLoop.current()
+
+ try:
+ if not loop._closing:
+ loop.stop()
+ except RuntimeError:
+ pass
+ except:
+ log.error('Failed shutting down the server: %s', traceback.format_exc())
+
+ fireEvent('app.after_shutdown', restart = restart)
+
+ def launchBrowser(self):
+
+ if Env.setting('launch_browser'):
+ log.info('Launching browser')
+
+ url = self.createBaseUrl()
+ try:
+ webbrowser.open(url, 2, 1)
+ except:
+ try:
+ webbrowser.open(url, 1, 1)
+ except:
+ log.error('Could not launch a browser.')
+
+ def createBaseUrl(self):
+ host = Env.setting('host')
+ if host == '0.0.0.0' or host == '':
+ host = 'localhost'
+ port = Env.setting('port')
+ ssl = Env.setting('ssl_cert') and Env.setting('ssl_key')
+
+ return '%s:%d%s' % (cleanHost(host, ssl = ssl).rstrip('/'), int(port), Env.get('web_base'))
+
+ def createApiUrl(self):
+ return '%sapi/%s' % (self.createBaseUrl(), Env.setting('api_key'))
+
+ def version(self):
+ ver = fireEvent('updater.info', single = True) or {'version': {}}
+
+ if os.name == 'nt': platf = 'windows'
+ elif 'Darwin' in platform.platform(): platf = 'osx'
+ else: platf = 'linux'
+
+ return '%s - %s-%s - v2' % (platf, ver.get('version').get('type') or 'unknown', ver.get('version').get('hash') or 'unknown')
+
+ def versionView(self, **kwargs):
+ return {
+ 'version': self.version()
+ }
+
+ def signalHandler(self):
+ if Env.get('daemonized'): return
+
+ def signal_handler(*args, **kwargs):
+ fireEvent('app.shutdown', single = True)
+
+ signal.signal(signal.SIGINT, signal_handler)
+ signal.signal(signal.SIGTERM, signal_handler)
+
+
+config = [{
+ 'name': 'core',
+ 'order': 1,
+ 'groups': [
+ {
+ 'tab': 'general',
+ 'name': 'basics',
+ 'description': 'Needs restart before changes take effect.',
+ 'wizard': True,
+ 'options': [
+ {
+ 'name': 'username',
+ 'default': '',
+ 'ui-meta' : 'rw',
+ },
+ {
+ 'name': 'password',
+ 'default': '',
+ 'type': 'password',
+ },
+ {
+ 'name': 'port',
+ 'default': 5050,
+ 'type': 'int',
+ 'description': 'The port I should listen to.',
+ },
+ {
+ 'name': 'languages',
+ 'default': 'fr, en',
+ 'ui-meta' : 'rw',
+ 'description': 'Langue pour le titre des films',
+ },
+ {
+ 'name': 'ipv6',
+ 'default': 0,
+ 'type': 'bool',
+ 'description': 'Also bind the WebUI to ipv6 address',
+ },
+ {
+ 'name': 'ssl_cert',
+ 'description': 'Path to SSL server.crt',
+ 'advanced': True,
+ },
+ {
+ 'name': 'ssl_key',
+ 'description': 'Path to SSL server.key',
+ 'advanced': True,
+ },
+ {
+ 'name': 'launch_browser',
+ 'default': True,
+ 'type': 'bool',
+ 'description': 'Launch the browser when I start.',
+ 'wizard': True,
+ },
+ {
+ 'name': 'dark_theme',
+ 'default': False,
+ 'type': 'bool',
+ 'description': 'For people with sensitive skin',
+ 'wizard': True,
+ },
+
+ ],
+ },
+ {
+ 'tab': 'general',
+ 'name': 'advanced',
+ 'description': "For those who know what they're doing",
+ 'advanced': True,
+ 'options': [
+ {
+ 'name': 'api_key',
+ 'default': uuid4().hex,
+ 'ui-meta' : 'ro',
+ 'description': 'Let 3rd party app do stuff. Docs ',
+ },
+ {
+ 'name': 'dereferer',
+ 'default': 'http://www.nullrefer.com/?',
+ 'description': 'Derefer links to external sites, keep empty for no dereferer. Example: http://www.dereferer.org/? or http://www.nullrefer.com/?.',
+ },
+ {
+ 'name': 'use_proxy',
+ 'default': 0,
+ 'type': 'bool',
+ 'description': 'Route outbound connections via proxy. Currently, only HTTP(S) proxies are supported. ',
+ },
+ {
+ 'name': 'proxy_server',
+ 'description': 'Override system default proxy server. Currently, only HTTP(S) proxies are supported. Ex. \"127.0.0.1:8080\" . Keep empty to use system default proxy server.',
+ },
+ {
+ 'name': 'proxy_username',
+ 'description': 'Only HTTP Basic Auth is supported. Leave blank to disable authentication.',
+ },
+ {
+ 'name': 'proxy_password',
+ 'type': 'password',
+ 'description': 'Leave blank for no password.',
+ },
+ {
+ 'name': 'bookmarklet_host',
+ 'description': 'Override default bookmarklet host. This can be useful in a reverse proxy environment. For example: "http://username:password@customHost:1020". Requires restart to take effect.',
+ 'advanced': True,
+ },
+ {
+ 'name': 'debug',
+ 'default': 0,
+ 'type': 'bool',
+ 'description': 'Enable debugging.',
+ },
+ {
+ 'name': 'development',
+ 'default': 0,
+ 'type': 'bool',
+ 'description': 'Enable this if you\'re developing, and NOT in any other case, thanks.',
+ },
+ {
+ 'name': 'data_dir',
+ 'type': 'directory',
+ 'description': 'Where cache/logs/etc are stored. Keep empty for defaults.',
+ },
+ {
+ 'name': 'url_base',
+ 'default': '',
+ 'description': 'When using mod_proxy use this to append the url with this.',
+ },
+ {
+ 'name': 'permission_folder',
+ 'default': '0755',
+ 'label': 'Folder CHMOD',
+ 'description': 'Can be either decimal (493) or octal (leading zero: 0755). Calculate the correct value ',
+ },
+ {
+ 'name': 'permission_file',
+ 'default': '0644',
+ 'label': 'File CHMOD',
+ 'description': 'See Folder CHMOD description, but for files',
+ },
+ ],
+ },
+ ],
+}]
diff --git a/couchpotato/core/_base/_core/__init__.py b/couchpotato/core/_base/_core/__init__.py
deleted file mode 100644
index c8c3fda68f..0000000000
--- a/couchpotato/core/_base/_core/__init__.py
+++ /dev/null
@@ -1,100 +0,0 @@
-from .main import Core
-from uuid import uuid4
-
-def start():
- return Core()
-
-config = [{
- 'name': 'core',
- 'order': 1,
- 'groups': [
- {
- 'tab': 'general',
- 'name': 'basics',
- 'description': 'Needs restart before changes take effect.',
- 'wizard': True,
- 'options': [
- {
- 'name': 'username',
- 'default': '',
- },
- {
- 'name': 'password',
- 'default': '',
- 'type': 'password',
- },
- {
- 'name': 'port',
- 'default': 5050,
- 'type': 'int',
- 'description': 'The port I should listen to.',
- },
- {
- 'name': 'ssl_cert',
- 'description': 'Path to SSL server.crt',
- 'advanced': True,
- },
- {
- 'name': 'ssl_key',
- 'description': 'Path to SSL server.key',
- 'advanced': True,
- },
- {
- 'name': 'launch_browser',
- 'default': True,
- 'type': 'bool',
- 'description': 'Launch the browser when I start.',
- 'wizard': True,
- },
- ],
- },
- {
- 'tab': 'general',
- 'name': 'advanced',
- 'description': "For those who know what they're doing",
- 'advanced': True,
- 'options': [
- {
- 'name': 'api_key',
- 'default': uuid4().hex,
- 'readonly': 1,
- 'description': 'Let 3rd party app do stuff. Docs ',
- },
- {
- 'name': 'debug',
- 'default': 0,
- 'type': 'bool',
- 'description': 'Enable debugging.',
- },
- {
- 'name': 'development',
- 'default': 0,
- 'type': 'bool',
- 'description': 'Disables some checks/downloads for faster reloading.',
- },
- {
- 'name': 'data_dir',
- 'type': 'directory',
- 'description': 'Where cache/logs/etc are stored. Keep empty for defaults.',
- },
- {
- 'name': 'url_base',
- 'default': '',
- 'description': 'When using mod_proxy use this to append the url with this.',
- },
- {
- 'name': 'permission_folder',
- 'default': '0755',
- 'label': 'Folder CHMOD',
- 'description': 'Can be either decimal (493) or octal (leading zero: 0755)',
- },
- {
- 'name': 'permission_file',
- 'default': '0755',
- 'label': 'File CHMOD',
- 'description': 'Same as Folder CHMOD but for files',
- },
- ],
- },
- ],
-}]
diff --git a/couchpotato/core/_base/_core/main.py b/couchpotato/core/_base/_core/main.py
deleted file mode 100644
index c91140fab3..0000000000
--- a/couchpotato/core/_base/_core/main.py
+++ /dev/null
@@ -1,185 +0,0 @@
-from couchpotato.api import addApiView
-from couchpotato.core.event import fireEvent, addEvent
-from couchpotato.core.helpers.request import jsonified
-from couchpotato.core.helpers.variable import cleanHost, md5
-from couchpotato.core.logger import CPLog
-from couchpotato.core.plugins.base import Plugin
-from couchpotato.environment import Env
-from tornado.ioloop import IOLoop
-from uuid import uuid4
-import os
-import platform
-import signal
-import time
-import traceback
-import webbrowser
-
-log = CPLog(__name__)
-
-
-class Core(Plugin):
-
- ignore_restart = [
- 'Core.restart', 'Core.shutdown',
- 'Updater.check', 'Updater.autoUpdate',
- ]
- shutdown_started = False
-
- def __init__(self):
- addApiView('app.shutdown', self.shutdown, docs = {
- 'desc': 'Shutdown the app.',
- 'return': {'type': 'string: shutdown'}
- })
- addApiView('app.restart', self.restart, docs = {
- 'desc': 'Restart the app.',
- 'return': {'type': 'string: restart'}
- })
- addApiView('app.available', self.available, docs = {
- 'desc': 'Check if app available.'
- })
- addApiView('app.version', self.versionView, docs = {
- 'desc': 'Get version.'
- })
-
- addEvent('app.shutdown', self.shutdown)
- addEvent('app.restart', self.restart)
- addEvent('app.load', self.launchBrowser, priority = 1)
- addEvent('app.base_url', self.createBaseUrl)
- addEvent('app.api_url', self.createApiUrl)
- addEvent('app.version', self.version)
- addEvent('app.load', self.checkDataDir)
-
- addEvent('setting.save.core.password', self.md5Password)
- addEvent('setting.save.core.api_key', self.checkApikey)
-
- # Make sure we can close-down with ctrl+c properly
- if not Env.get('desktop'):
- self.signalHandler()
-
- def md5Password(self, value):
- return md5(value.encode(Env.get('encoding'))) if value else ''
-
- def checkApikey(self, value):
- return value if value and len(value) > 3 else uuid4().hex
-
- def checkDataDir(self):
- if Env.get('app_dir') in Env.get('data_dir'):
- log.error('You should NOT use your CouchPotato directory to save your settings in. Files will get overwritten or be deleted.')
-
- return True
-
- def available(self):
- return jsonified({
- 'success': True
- })
-
- def shutdown(self):
- if self.shutdown_started:
- return False
-
- def shutdown():
- self.initShutdown()
- IOLoop.instance().add_callback(shutdown)
-
- return 'shutdown'
-
- def restart(self):
- if self.shutdown_started:
- return False
-
- def restart():
- self.initShutdown(restart = True)
- IOLoop.instance().add_callback(restart)
-
- return 'restarting'
-
- def initShutdown(self, restart = False):
- if self.shutdown_started:
- log.info('Already shutting down')
- return
-
- log.info('Shutting down' if not restart else 'Restarting')
-
- self.shutdown_started = True
-
- fireEvent('app.do_shutdown')
- log.debug('Every plugin got shutdown event')
-
- loop = True
- starttime = time.time()
- while loop:
- log.debug('Asking who is running')
- still_running = fireEvent('plugin.running', merge = True)
- log.debug('Still running: %s', still_running)
-
- if len(still_running) == 0:
- break
- elif starttime < time.time() - 30: # Always force break after 30s wait
- break
-
- running = list(set(still_running) - set(self.ignore_restart))
- if len(running) > 0:
- log.info('Waiting on plugins to finish: %s', running)
- else:
- loop = False
-
- time.sleep(1)
-
- log.debug('Save to shutdown/restart')
-
- try:
- IOLoop.instance().stop()
- except RuntimeError:
- pass
- except:
- log.error('Failed shutting down the server: %s', traceback.format_exc())
-
- fireEvent('app.after_shutdown', restart = restart)
-
- def launchBrowser(self):
-
- if Env.setting('launch_browser'):
- log.info('Launching browser')
-
- url = self.createBaseUrl()
- try:
- webbrowser.open(url, 2, 1)
- except:
- try:
- webbrowser.open(url, 1, 1)
- except:
- log.error('Could not launch a browser.')
-
- def createBaseUrl(self):
- host = Env.setting('host')
- if host == '0.0.0.0' or host == '':
- host = 'localhost'
- port = Env.setting('port')
-
- return '%s:%d%s' % (cleanHost(host).rstrip('/'), int(port), '/' + Env.setting('url_base').lstrip('/') if Env.setting('url_base') else '')
-
- def createApiUrl(self):
- return '%s/api/%s' % (self.createBaseUrl(), Env.setting('api_key'))
-
- def version(self):
- ver = fireEvent('updater.info', single = True)
-
- if os.name == 'nt': platf = 'windows'
- elif 'Darwin' in platform.platform(): platf = 'osx'
- else: platf = 'linux'
-
- return '%s - %s-%s - v2' % (platf, ver.get('version')['type'], ver.get('version')['hash'])
-
- def versionView(self):
- return jsonified({
- 'version': self.version()
- })
-
- def signalHandler(self):
- if Env.get('daemonized'): return
-
- def signal_handler(signal, frame):
- fireEvent('app.shutdown', single = True)
-
- signal.signal(signal.SIGINT, signal_handler)
- signal.signal(signal.SIGTERM, signal_handler)
diff --git a/couchpotato/core/_base/clientscript.py b/couchpotato/core/_base/clientscript.py
new file mode 100644
index 0000000000..ab52003755
--- /dev/null
+++ b/couchpotato/core/_base/clientscript.py
@@ -0,0 +1,57 @@
+import os
+
+from couchpotato.core.event import addEvent
+from couchpotato.core.helpers.variable import tryInt
+from couchpotato.core.logger import CPLog
+from couchpotato.core.plugins.base import Plugin
+from couchpotato.environment import Env
+
+
+log = CPLog(__name__)
+
+autoload = 'ClientScript'
+
+
+class ClientScript(Plugin):
+
+ paths = {
+ 'style': [
+ 'style/combined.min.css',
+ ],
+ 'script': [
+ 'scripts/combined.vendor.min.js',
+ 'scripts/combined.base.min.js',
+ 'scripts/combined.plugins.min.js',
+ ],
+ }
+
+ def __init__(self):
+ addEvent('clientscript.get_styles', self.getStyles)
+ addEvent('clientscript.get_scripts', self.getScripts)
+
+ self.makeRelative()
+
+ def makeRelative(self):
+
+ for static_type in self.paths:
+
+ updates_paths = []
+ for rel_path in self.paths.get(static_type):
+ file_path = os.path.join(Env.get('app_dir'), 'couchpotato', 'static', rel_path)
+ core_url = 'static/%s?%d' % (rel_path, tryInt(os.path.getmtime(file_path)))
+
+ updates_paths.append(core_url)
+
+ self.paths[static_type] = updates_paths
+
+ def getStyles(self, *args, **kwargs):
+ return self.get('style', *args, **kwargs)
+
+ def getScripts(self, *args, **kwargs):
+ return self.get('script', *args, **kwargs)
+
+ def get(self, type):
+ if type in self.paths:
+ return self.paths[type]
+
+ return []
diff --git a/couchpotato/core/_base/clientscript/__init__.py b/couchpotato/core/_base/clientscript/__init__.py
deleted file mode 100644
index 8490eae7e8..0000000000
--- a/couchpotato/core/_base/clientscript/__init__.py
+++ /dev/null
@@ -1,6 +0,0 @@
-from .main import ClientScript
-
-def start():
- return ClientScript()
-
-config = []
diff --git a/couchpotato/core/_base/clientscript/main.py b/couchpotato/core/_base/clientscript/main.py
deleted file mode 100644
index f2a30f6f67..0000000000
--- a/couchpotato/core/_base/clientscript/main.py
+++ /dev/null
@@ -1,169 +0,0 @@
-from couchpotato.core.event import addEvent
-from couchpotato.core.helpers.variable import tryInt
-from couchpotato.core.logger import CPLog
-from couchpotato.core.plugins.base import Plugin
-from couchpotato.environment import Env
-from minify.cssmin import cssmin
-from minify.jsmin import jsmin
-import os
-import traceback
-
-log = CPLog(__name__)
-
-
-class ClientScript(Plugin):
-
- core_static = {
- 'style': [
- 'style/main.css',
- 'style/uniform.generic.css',
- 'style/uniform.css',
- 'style/settings.css',
- ],
- 'script': [
- 'scripts/library/mootools.js',
- 'scripts/library/mootools_more.js',
- 'scripts/library/prefix_free.js',
- 'scripts/library/uniform.js',
- 'scripts/library/form_replacement/form_check.js',
- 'scripts/library/form_replacement/form_radio.js',
- 'scripts/library/form_replacement/form_dropdown.js',
- 'scripts/library/form_replacement/form_selectoption.js',
- 'scripts/library/question.js',
- 'scripts/library/scrollspy.js',
- 'scripts/library/spin.js',
- 'scripts/couchpotato.js',
- 'scripts/api.js',
- 'scripts/library/history.js',
- 'scripts/page.js',
- 'scripts/block.js',
- 'scripts/block/navigation.js',
- 'scripts/block/footer.js',
- 'scripts/block/menu.js',
- 'scripts/page/home.js',
- 'scripts/page/wanted.js',
- 'scripts/page/settings.js',
- 'scripts/page/about.js',
- 'scripts/page/manage.js',
- ],
- }
-
-
- urls = {'style': {}, 'script': {}, }
- minified = {'style': {}, 'script': {}, }
- paths = {'style': {}, 'script': {}, }
- comment = {
- 'style': '/*** %s:%d ***/\n',
- 'script': '// %s:%d\n'
- }
-
- html = {
- 'style': ' ',
- 'script': '',
- }
-
- def __init__(self):
- addEvent('register_style', self.registerStyle)
- addEvent('register_script', self.registerScript)
-
- addEvent('clientscript.get_styles', self.getStyles)
- addEvent('clientscript.get_scripts', self.getScripts)
-
- addEvent('app.load', self.minify)
-
- self.addCore()
-
- def addCore(self):
-
- for static_type in self.core_static:
- for rel_path in self.core_static.get(static_type):
- file_path = os.path.join(Env.get('app_dir'), 'couchpotato', 'static', rel_path)
- core_url = 'api/%s/static/%s?%s' % (Env.setting('api_key'), rel_path, tryInt(os.path.getmtime(file_path)))
-
- if static_type == 'script':
- self.registerScript(core_url, file_path, position = 'front')
- else:
- self.registerStyle(core_url, file_path, position = 'front')
-
-
- def minify(self):
-
- for file_type in ['style', 'script']:
- ext = 'js' if file_type is 'script' else 'css'
- positions = self.paths.get(file_type, {})
- for position in positions:
- files = positions.get(position)
- self._minify(file_type, files, position, position + '.' + ext)
-
- def _minify(self, file_type, files, position, out):
-
- cache = Env.get('cache_dir')
- out_name = 'minified_' + out
- out = os.path.join(cache, out_name)
-
- raw = []
- for file_path in files:
- f = open(file_path, 'r').read()
-
- if file_type == 'script':
- data = jsmin(f)
- else:
- data = cssmin(f)
- data = data.replace('../images/', '../static/images/')
-
- raw.append({'file': file_path, 'date': int(os.path.getmtime(file_path)), 'data': data})
-
- # Combine all files together with some comments
- data = ''
- for r in raw:
- data += self.comment.get(file_type) % (r.get('file'), r.get('date'))
- data += r.get('data') + '\n\n'
-
- self.createFile(out, data.strip())
-
- if not self.minified.get(file_type):
- self.minified[file_type] = {}
- if not self.minified[file_type].get(position):
- self.minified[file_type][position] = []
-
- minified_url = 'api/%s/file.cache/%s?%s' % (Env.setting('api_key'), out_name, tryInt(os.path.getmtime(out)))
- self.minified[file_type][position].append(minified_url)
-
- def getStyles(self, *args, **kwargs):
- return self.get('style', *args, **kwargs)
-
- def getScripts(self, *args, **kwargs):
- return self.get('script', *args, **kwargs)
-
- def get(self, type, as_html = False, location = 'head'):
-
- data = '' if as_html else []
-
- try:
- try:
- if not Env.get('dev'):
- return self.minified[type][location]
- except:
- pass
-
- return self.urls[type][location]
- except:
- log.error('Error getting minified %s, %s: %s', (type, location, traceback.format_exc()))
-
- return data
-
- def registerStyle(self, api_path, file_path, position = 'head'):
- self.register(api_path, file_path, 'style', position)
-
- def registerScript(self, api_path, file_path, position = 'head'):
- self.register(api_path, file_path, 'script', position)
-
- def register(self, api_path, file_path, type, location):
-
- if not self.urls[type].get(location):
- self.urls[type][location] = []
- self.urls[type][location].append(api_path)
-
- if not self.paths[type].get(location):
- self.paths[type][location] = []
- self.paths[type][location].append(file_path)
diff --git a/couchpotato/core/_base/desktop.py b/couchpotato/core/_base/desktop.py
new file mode 100644
index 0000000000..9a3656362f
--- /dev/null
+++ b/couchpotato/core/_base/desktop.py
@@ -0,0 +1,39 @@
+from couchpotato.core.event import fireEvent, addEvent
+from couchpotato.core.logger import CPLog
+from couchpotato.core.plugins.base import Plugin
+from couchpotato.environment import Env
+
+log = CPLog(__name__)
+
+autoload = 'Desktop'
+
+
+if Env.get('desktop'):
+
+ class Desktop(Plugin):
+
+ def __init__(self):
+
+ desktop = Env.get('desktop')
+ desktop.setSettings({
+ 'base_url': fireEvent('app.base_url', single = True),
+ 'api_url': fireEvent('app.api_url', single = True),
+ 'api': Env.setting('api'),
+ })
+
+ # Events from desktop
+ desktop.addEvents({
+ 'onClose': self.onClose,
+ })
+
+ # Events to desktop
+ addEvent('app.after_shutdown', desktop.afterShutdown)
+ addEvent('app.load', desktop.onAppLoad, priority = 110)
+
+ def onClose(self, event):
+ return fireEvent('app.shutdown', single = True)
+
+else:
+
+ class Desktop(Plugin):
+ pass
diff --git a/couchpotato/core/_base/desktop/__init__.py b/couchpotato/core/_base/desktop/__init__.py
deleted file mode 100644
index 064492f2e0..0000000000
--- a/couchpotato/core/_base/desktop/__init__.py
+++ /dev/null
@@ -1,6 +0,0 @@
-from .main import Desktop
-
-def start():
- return Desktop()
-
-config = []
diff --git a/couchpotato/core/_base/desktop/main.py b/couchpotato/core/_base/desktop/main.py
deleted file mode 100644
index c3beff17e7..0000000000
--- a/couchpotato/core/_base/desktop/main.py
+++ /dev/null
@@ -1,36 +0,0 @@
-from couchpotato.core.event import fireEvent, addEvent
-from couchpotato.core.logger import CPLog
-from couchpotato.core.plugins.base import Plugin
-from couchpotato.environment import Env
-
-log = CPLog(__name__)
-
-if Env.get('desktop'):
-
- class Desktop(Plugin):
-
- def __init__(self):
-
- desktop = Env.get('desktop')
- desktop.setSettings({
- 'base_url': fireEvent('app.base_url', single = True),
- 'api_url': fireEvent('app.api_url', single = True),
- 'api': Env.setting('api'),
- })
-
- # Events from desktop
- desktop.addEvents({
- 'onClose': self.onClose,
- })
-
- # Events to desktop
- addEvent('app.after_shutdown', desktop.afterShutdown)
- addEvent('app.load', desktop.onAppLoad, priority = 110)
-
- def onClose(self, event):
- return fireEvent('app.shutdown', single = True)
-
-else:
-
- class Desktop(Plugin):
- pass
diff --git a/couchpotato/core/_base/downloader/__init__.py b/couchpotato/core/_base/downloader/__init__.py
new file mode 100644
index 0000000000..0b9201bdbf
--- /dev/null
+++ b/couchpotato/core/_base/downloader/__init__.py
@@ -0,0 +1,20 @@
+from .main import Downloader
+
+
+def autoload():
+ return Downloader()
+
+
+config = [{
+ 'name': 'download_providers',
+ 'groups': [
+ {
+ 'label': 'Downloaders',
+ 'description': 'You can select different downloaders for each type (usenet / torrent)',
+ 'type': 'list',
+ 'name': 'download_providers',
+ 'tab': 'downloaders',
+ 'options': [],
+ },
+ ],
+}]
diff --git a/couchpotato/core/_base/downloader/main.py b/couchpotato/core/_base/downloader/main.py
new file mode 100644
index 0000000000..70126a883c
--- /dev/null
+++ b/couchpotato/core/_base/downloader/main.py
@@ -0,0 +1,230 @@
+from base64 import b32decode, b16encode
+import random
+import re
+
+from couchpotato.api import addApiView
+from couchpotato.core.event import addEvent
+from couchpotato.core.helpers.variable import mergeDicts
+from couchpotato.core.logger import CPLog
+from couchpotato.core.media._base.providers.base import Provider
+from couchpotato.core.plugins.base import Plugin
+
+
+log = CPLog(__name__)
+
+
+## This is here to load the static files
+class Downloader(Plugin):
+ pass
+
+
+class DownloaderBase(Provider):
+
+ protocol = []
+ http_time_between_calls = 0
+ status_support = True
+
+ torrent_sources = [
+ 'https://torcache.net/torrent/%s.torrent',
+ ]
+
+ torrent_trackers = [
+ 'udp://tracker.istole.it:80/announce',
+ 'http://tracker.istole.it/announce',
+ 'udp://fr33domtracker.h33t.com:3310/announce',
+ 'http://tracker.publicbt.com/announce',
+ 'udp://tracker.publicbt.com:80/announce',
+ 'http://tracker.ccc.de/announce',
+ 'udp://tracker.ccc.de:80/announce',
+ 'http://exodus.desync.com/announce',
+ 'http://exodus.desync.com:6969/announce',
+ 'http://tracker.publichd.eu/announce',
+ 'udp://tracker.publichd.eu:80/announce',
+ 'http://tracker.openbittorrent.com/announce',
+ 'udp://tracker.openbittorrent.com/announce',
+ 'udp://tracker.openbittorrent.com:80/announce',
+ 'udp://open.demonii.com:1337/announce',
+ ]
+
+ def __init__(self):
+ addEvent('download', self._download)
+ addEvent('download.enabled', self._isEnabled)
+ addEvent('download.enabled_protocols', self.getEnabledProtocol)
+ addEvent('download.status', self._getAllDownloadStatus)
+ addEvent('download.remove_failed', self._removeFailed)
+ addEvent('download.pause', self._pause)
+ addEvent('download.process_complete', self._processComplete)
+ addApiView('download.%s.test' % self.getName().lower(), self._test)
+
+ def getEnabledProtocol(self):
+ for download_protocol in self.protocol:
+ if self.isEnabled(manual = True, data = {'protocol': download_protocol}):
+ return self.protocol
+
+ return []
+
+ def _download(self, data = None, media = None, manual = False, filedata = None):
+ if not media: media = {}
+ if not data: data = {}
+
+ if self.isDisabled(manual, data):
+ return
+ return self.download(data = data, media = media, filedata = filedata)
+
+ def download(self, *args, **kwargs):
+ return False
+
+ def _getAllDownloadStatus(self, download_ids):
+ if self.isDisabled(manual = True, data = {}):
+ return
+
+ ids = [download_id['id'] for download_id in download_ids if download_id['downloader'] == self.getName()]
+
+ if ids:
+ return self.getAllDownloadStatus(ids)
+ else:
+ return
+
+ def getAllDownloadStatus(self, ids):
+ return []
+
+ def _removeFailed(self, release_download):
+ if self.isDisabled(manual = True, data = {}):
+ return
+
+ if release_download and release_download.get('downloader') == self.getName():
+ if self.conf('delete_failed'):
+ return self.removeFailed(release_download)
+
+ return False
+ return
+
+ def removeFailed(self, release_download):
+ return
+
+ def _processComplete(self, release_download):
+ if self.isDisabled(manual = True, data = {}):
+ return
+
+ if release_download and release_download.get('downloader') == self.getName():
+ if self.conf('remove_complete', default = False):
+ return self.processComplete(release_download = release_download, delete_files = self.conf('delete_files', default = False))
+
+ return False
+ return
+
+ def processComplete(self, release_download, delete_files):
+ return
+
+ def isCorrectProtocol(self, protocol):
+ is_correct = protocol in self.protocol
+
+ if not is_correct:
+ log.debug("Downloader doesn't support this protocol")
+
+ return is_correct
+
+ def magnetToTorrent(self, magnet_link):
+ torrent_hash = re.findall('urn:btih:([\w]{32,40})', magnet_link)[0].upper()
+
+ # Convert base 32 to hex
+ if len(torrent_hash) == 32:
+ torrent_hash = b16encode(b32decode(torrent_hash))
+
+ sources = self.torrent_sources
+ random.shuffle(sources)
+
+ for source in sources:
+ try:
+ filedata = self.urlopen(source % torrent_hash, headers = {'Referer': source % torrent_hash}, show_error = False)
+ if 'torcache' in filedata and 'file not found' in filedata.lower():
+ continue
+
+ return filedata
+ except:
+ log.debug('Torrent hash "%s" wasn\'t found on: %s', (torrent_hash, source))
+
+ log.error('Failed converting magnet url to torrent: %s', torrent_hash)
+ return False
+
+ def downloadReturnId(self, download_id):
+ return {
+ 'downloader': self.getName(),
+ 'status_support': self.status_support,
+ 'id': download_id
+ }
+
+ def isDisabled(self, manual = False, data = None):
+ if not data: data = {}
+
+ return not self.isEnabled(manual, data)
+
+ def _isEnabled(self, manual, data = None):
+ if not data: data = {}
+
+ if not self.isEnabled(manual, data):
+ return
+ return True
+
+ def isEnabled(self, manual = False, data = None):
+ if not data: data = {}
+
+ d_manual = self.conf('manual', default = False)
+ return super(DownloaderBase, self).isEnabled() and \
+ (d_manual and manual or d_manual is False) and \
+ (not data or self.isCorrectProtocol(data.get('protocol')))
+
+ def _test(self, **kwargs):
+ t = self.test()
+ if isinstance(t, tuple):
+ return {'success': t[0], 'msg': t[1]}
+ return {'success': t}
+
+ def test(self):
+ return False
+
+ def _pause(self, release_download, pause = True):
+ if self.isDisabled(manual = True, data = {}):
+ return
+
+ if release_download and release_download.get('downloader') == self.getName():
+ self.pause(release_download, pause)
+ return True
+
+ return False
+
+ def pause(self, release_download, pause):
+ return
+
+
+class ReleaseDownloadList(list):
+
+ provider = None
+
+ def __init__(self, provider, **kwargs):
+
+ self.provider = provider
+ self.kwargs = kwargs
+
+ super(ReleaseDownloadList, self).__init__()
+
+ def extend(self, results):
+ for r in results:
+ self.append(r)
+
+ def append(self, result):
+ new_result = self.fillResult(result)
+ super(ReleaseDownloadList, self).append(new_result)
+
+ def fillResult(self, result):
+
+ defaults = {
+ 'id': 0,
+ 'status': 'busy',
+ 'downloader': self.provider.getName(),
+ 'folder': '',
+ 'files': [],
+ }
+
+ return mergeDicts(defaults, result)
+
diff --git a/couchpotato/core/_base/downloader/static/downloaders.js b/couchpotato/core/_base/downloader/static/downloaders.js
new file mode 100644
index 0000000000..fd6185dfbd
--- /dev/null
+++ b/couchpotato/core/_base/downloader/static/downloaders.js
@@ -0,0 +1,76 @@
+var DownloadersBase = new Class({
+
+ Implements: [Events],
+
+ initialize: function(){
+ var self = this;
+
+ // Add test buttons to settings page
+ App.addEvent('loadSettings', self.addTestButtons.bind(self));
+
+ },
+
+ // Downloaders setting tests
+ addTestButtons: function(){
+ var self = this;
+
+ var setting_page = App.getPage('Settings');
+ setting_page.addEvent('create', function(){
+ Object.each(setting_page.tabs.downloaders.groups, self.addTestButton.bind(self));
+ });
+
+ },
+
+ addTestButton: function(fieldset, plugin_name){
+ var self = this,
+ button_name = self.testButtonName(fieldset);
+
+ if(button_name.contains('Downloaders')) return;
+
+ new Element('.ctrlHolder.test_button').grab(
+ new Element('a.button', {
+ 'text': button_name,
+ 'events': {
+ 'click': function(){
+ var button = fieldset.getElement('.test_button .button');
+ button.set('text', 'Connecting...');
+
+ Api.request('download.'+plugin_name+'.test', {
+ 'onComplete': function(json){
+
+ button.set('text', button_name);
+
+ var message;
+ if(json.success){
+ message = new Element('span.success', {
+ 'text': 'Connection successful'
+ }).inject(button, 'after');
+ }
+ else {
+ var msg_text = 'Connection failed. Check logs for details.';
+ if(json.hasOwnProperty('msg')) msg_text = json.msg;
+ message = new Element('span.failed', {
+ 'text': msg_text
+ }).inject(button, 'after');
+ }
+
+ requestTimeout(function(){
+ message.destroy();
+ }, 3000);
+ }
+ });
+ }
+ }
+ })
+ ).inject(fieldset);
+
+ },
+
+ testButtonName: function(fieldset){
+ var name = fieldset.getElement('h2 .group_label').get('text');
+ return 'Test '+name;
+ }
+
+});
+
+var Downloaders = new DownloadersBase();
diff --git a/couchpotato/core/_base/scheduler.py b/couchpotato/core/_base/scheduler.py
new file mode 100644
index 0000000000..271a2d8187
--- /dev/null
+++ b/couchpotato/core/_base/scheduler.py
@@ -0,0 +1,82 @@
+from apscheduler.scheduler import Scheduler as Sched
+from couchpotato.core.event import addEvent
+from couchpotato.core.logger import CPLog
+from couchpotato.core.plugins.base import Plugin
+
+log = CPLog(__name__)
+
+autoload = 'Scheduler'
+
+
+class Scheduler(Plugin):
+
+ crons = {}
+ intervals = {}
+ started = False
+
+ def __init__(self):
+
+ addEvent('schedule.cron', self.cron)
+ addEvent('schedule.interval', self.interval)
+ addEvent('schedule.remove', self.remove)
+ addEvent('schedule.queue', self.queue)
+
+ self.sched = Sched(misfire_grace_time = 60)
+ self.sched.start()
+ self.started = True
+
+ def remove(self, identifier):
+ for cron_type in ['intervals', 'crons']:
+ try:
+ self.sched.unschedule_job(getattr(self, cron_type)[identifier]['job'])
+ log.debug('%s unscheduled %s', (cron_type.capitalize(), identifier))
+ except:
+ pass
+
+ def doShutdown(self, *args, **kwargs):
+ self.stop()
+ return super(Scheduler, self).doShutdown(*args, **kwargs)
+
+ def stop(self):
+ if self.started:
+ log.debug('Stopping scheduler')
+ self.sched.shutdown(wait = False)
+ log.debug('Scheduler stopped')
+ self.started = False
+
+ def cron(self, identifier = '', handle = None, day = '*', hour = '*', minute = '*'):
+ log.info('Scheduling "%s", cron: day = %s, hour = %s, minute = %s', (identifier, day, hour, minute))
+
+ self.remove(identifier)
+ self.crons[identifier] = {
+ 'handle': handle,
+ 'day': day,
+ 'hour': hour,
+ 'minute': minute,
+ 'job': self.sched.add_cron_job(handle, day = day, hour = hour, minute = minute)
+ }
+
+ def interval(self, identifier = '', handle = None, hours = 0, minutes = 0, seconds = 0):
+ log.info('Scheduling %s, interval: hours = %s, minutes = %s, seconds = %s', (identifier, hours, minutes, seconds))
+
+ self.remove(identifier)
+ self.intervals[identifier] = {
+ 'handle': handle,
+ 'hours': hours,
+ 'minutes': minutes,
+ 'seconds': seconds,
+ 'job': self.sched.add_interval_job(handle, hours = hours, minutes = minutes, seconds = seconds)
+ }
+
+ return True
+
+ def queue(self, handlers = None):
+ if not handlers: handlers = []
+
+ for h in handlers:
+ h()
+
+ if self.shuttingDown():
+ break
+
+ return True
diff --git a/couchpotato/core/_base/scheduler/__init__.py b/couchpotato/core/_base/scheduler/__init__.py
deleted file mode 100644
index aa1c5c90b3..0000000000
--- a/couchpotato/core/_base/scheduler/__init__.py
+++ /dev/null
@@ -1,6 +0,0 @@
-from .main import Scheduler
-
-def start():
- return Scheduler()
-
-config = []
diff --git a/couchpotato/core/_base/scheduler/main.py b/couchpotato/core/_base/scheduler/main.py
deleted file mode 100644
index 4102552e4f..0000000000
--- a/couchpotato/core/_base/scheduler/main.py
+++ /dev/null
@@ -1,96 +0,0 @@
-from apscheduler.scheduler import Scheduler as Sched
-from couchpotato.core.event import addEvent
-from couchpotato.core.logger import CPLog
-from couchpotato.core.plugins.base import Plugin
-
-log = CPLog(__name__)
-
-
-class Scheduler(Plugin):
-
- crons = {}
- intervals = {}
- started = False
-
- def __init__(self):
-
- addEvent('schedule.cron', self.cron)
- addEvent('schedule.interval', self.interval)
- addEvent('schedule.start', self.start)
- addEvent('schedule.restart', self.start)
-
- addEvent('app.load', self.start)
-
- self.sched = Sched(misfire_grace_time = 60)
-
- def remove(self, identifier):
- for type in ['interval', 'cron']:
- try:
- self.sched.unschedule_job(getattr(self, type)[identifier]['job'])
- log.debug('%s unscheduled %s', (type.capitalize(), identifier))
- except:
- pass
-
- def start(self):
-
- # Stop all running
- self.stop()
-
- # Crons
- for identifier in self.crons:
- try:
- self.remove(identifier)
- cron = self.crons[identifier]
- job = self.sched.add_cron_job(cron['handle'], day = cron['day'], hour = cron['hour'], minute = cron['minute'])
- cron['job'] = job
- except ValueError, e:
- log.error('Failed adding cronjob: %s', e)
-
- # Intervals
- for identifier in self.intervals:
- try:
- self.remove(identifier)
- interval = self.intervals[identifier]
- job = self.sched.add_interval_job(interval['handle'], hours = interval['hours'], minutes = interval['minutes'], seconds = interval['seconds'])
- interval['job'] = job
- except ValueError, e:
- log.error('Failed adding interval cronjob: %s', e)
-
- # Start it
- log.debug('Starting scheduler')
- self.sched.start()
- self.started = True
- log.debug('Scheduler started')
-
- def doShutdown(self):
- super(Scheduler, self).doShutdown()
- self.stop()
-
- def stop(self):
- if self.started:
- log.debug('Stopping scheduler')
- self.sched.shutdown()
- log.debug('Scheduler stopped')
- self.started = False
-
- def cron(self, identifier = '', handle = None, day = '*', hour = '*', minute = '*'):
- log.info('Scheduling "%s", cron: day = %s, hour = %s, minute = %s', (identifier, day, hour, minute))
-
- self.remove(identifier)
- self.crons[identifier] = {
- 'handle': handle,
- 'day': day,
- 'hour': hour,
- 'minute': minute,
- }
-
- def interval(self, identifier = '', handle = None, hours = 0, minutes = 0, seconds = 0):
- log.info('Scheduling %s, interval: hours = %s, minutes = %s, seconds = %s', (identifier, hours, minutes, seconds))
-
- self.remove(identifier)
- self.intervals[identifier] = {
- 'handle': handle,
- 'hours': hours,
- 'minutes': minutes,
- 'seconds': seconds,
- }
diff --git a/couchpotato/core/_base/updater/__init__.py b/couchpotato/core/_base/updater/__init__.py
index a304f9e782..16e08b8193 100644
--- a/couchpotato/core/_base/updater/__init__.py
+++ b/couchpotato/core/_base/updater/__init__.py
@@ -1,8 +1,10 @@
+import os
+
from .main import Updater
from couchpotato.environment import Env
-import os
-def start():
+
+def autoload():
return Updater()
config = [{
diff --git a/couchpotato/core/_base/updater/main.py b/couchpotato/core/_base/updater/main.py
index 18d2c3034a..89788836eb 100644
--- a/couchpotato/core/_base/updater/main.py
+++ b/couchpotato/core/_base/updater/main.py
@@ -1,20 +1,26 @@
+О╩©import json
+import os
+import shutil
+import tarfile
+import time
+import traceback
+import zipfile
+from datetime import datetime
+from threading import RLock
+import re
+
from couchpotato.api import addApiView
from couchpotato.core.event import addEvent, fireEvent, fireEventAsync
-from couchpotato.core.helpers.encoding import ss
-from couchpotato.core.helpers.request import jsonified
+from couchpotato.core.helpers.encoding import sp
+from couchpotato.core.helpers.variable import removePyc, tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.environment import Env
-from datetime import datetime
from dateutil.parser import parse
from git.repository import LocalRepository
-import json
-import os
-import shutil
-import tarfile
-import time
-import traceback
import version
+from six.moves import filter
+
log = CPLog(__name__)
@@ -22,21 +28,26 @@
class Updater(Plugin):
available_notified = False
+ _lock = RLock()
+ last_check = 'updater.last_checked'
def __init__(self):
if Env.get('desktop'):
self.updater = DesktopUpdater()
elif os.path.isdir(os.path.join(Env.get('app_dir'), '.git')):
- self.updater = GitUpdater(self.conf('git_command', default = 'git'))
+ git_default = 'git'
+ git_command = self.conf('git_command', default = git_default)
+ git_command = git_command if git_command != git_default and (os.path.isfile(git_command) or re.match('^[a-zA-Z0-9_/\.\-]+$', git_command)) else git_default
+ self.updater = GitUpdater(git_command)
else:
self.updater = SourceUpdater()
- fireEvent('schedule.interval', 'updater.check', self.autoUpdate, hours = 6)
- addEvent('app.load', self.autoUpdate)
+ addEvent('app.load', self.logVersion, priority = 10000)
+ addEvent('app.load', self.setCrons)
addEvent('updater.info', self.info)
- addApiView('updater.info', self.getInfo, docs = {
+ addApiView('updater.info', self.info, docs = {
'desc': 'Get updater information',
'return': {
'type': 'object',
@@ -52,8 +63,34 @@ def __init__(self):
'return': {'type': 'see updater.info'}
})
+ addEvent('setting.save.updater.enabled.after', self.setCrons)
+
+ def logVersion(self):
+ info = self.info()
+ log.info('=== VERSION %s, using %s ===', (info.get('version', {}).get('repr', 'UNKNOWN'), self.updater.getName()))
+
+ def setCrons(self):
+
+ fireEvent('schedule.remove', 'updater.check', single = True)
+ if self.isEnabled():
+ fireEvent('schedule.interval', 'updater.check', self.autoUpdate, hours = 24)
+ self.autoUpdate() # Check after enabling
+
def autoUpdate(self):
- if self.check() and self.conf('automatic') and not self.updater.update_failed:
+ do_check = True
+
+ try:
+ last_check = tryInt(Env.prop(self.last_check, default = 0))
+ now = tryInt(time.time())
+ do_check = last_check < now - 43200
+
+ if do_check:
+ Env.prop(self.last_check, value = now)
+ except:
+ log.error('Failed checking last time to update: %s', traceback.format_exc())
+
+ if do_check and self.isEnabled() and self.check() and self.conf('automatic') and not self.updater.update_failed:
+
if self.updater.doUpdate():
# Notify before restarting
@@ -61,7 +98,7 @@ def autoUpdate(self):
if self.conf('notification'):
info = self.updater.info()
version_date = datetime.fromtimestamp(info['update_version']['date'])
- fireEvent('updater.updated', 'Updated to a new version with hash "%s", this version is from %s' % (info['update_version']['hash'], version_date), data = info)
+ fireEvent('updater.updated', 'CouchPotato: Updated to a new version with hash "%s", this version is from %s' % (info['update_version']['hash'], version_date), data = info)
except:
log.error('Failed notifying for update: %s', traceback.format_exc())
@@ -71,31 +108,40 @@ def autoUpdate(self):
return False
- def check(self):
- if self.isDisabled():
+ def check(self, force = False):
+ if not force and self.isDisabled():
return
if self.updater.check():
if not self.available_notified and self.conf('notification') and not self.conf('automatic'):
- fireEvent('updater.available', message = 'A new update is available', data = self.updater.info())
+ info = self.updater.info()
+ version_date = datetime.fromtimestamp(info['update_version']['date'])
+ fireEvent('updater.available', message = 'A new update with hash "%s" is available, this version is from %s' % (info['update_version']['hash'], version_date), data = info)
self.available_notified = True
return True
return False
- def info(self):
- return self.updater.info()
+ def info(self, **kwargs):
+ self._lock.acquire()
+
+ info = {}
+ try:
+ info = self.updater.info()
+ except:
+ log.error('Failed getting updater info: %s', traceback.format_exc())
+
+ self._lock.release()
- def getInfo(self):
- return jsonified(self.updater.info())
+ return info
- def checkView(self):
- return jsonified({
- 'update_available': self.check(),
+ def checkView(self, **kwargs):
+ return {
+ 'update_available': self.check(force = True),
'info': self.updater.info()
- })
+ }
- def doUpdateView(self):
+ def doUpdateView(self, **kwargs):
self.check()
if not self.updater.update_version:
@@ -110,14 +156,20 @@ def doUpdateView(self):
if not success:
success = True
- return jsonified({
+ return {
'success': success
- })
+ }
+
+ def doShutdown(self, *args, **kwargs):
+ if not Env.get('dev') and not Env.get('desktop'):
+ removePyc(Env.get('app_dir'), show_logs = False)
+
+ return super(Updater, self).doShutdown(*args, **kwargs)
class BaseUpdater(Plugin):
- repo_user = 'RuudBurger'
+ repo_user = 'cyberden'
repo_name = 'CouchPotatoServer'
branch = version.BRANCH
@@ -129,64 +181,46 @@ class BaseUpdater(Plugin):
def doUpdate(self):
pass
- def getInfo(self):
- return jsonified(self.info())
-
def info(self):
+
+ current_version = self.getVersion()
+
return {
'last_check': self.last_check,
'update_version': self.update_version,
- 'version': self.getVersion(),
+ 'version': current_version,
'repo_name': '%s/%s' % (self.repo_user, self.repo_name),
- 'branch': self.branch,
+ 'branch': current_version.get('branch', self.branch),
}
- def check(self):
+ def getVersion(self):
pass
- def deletePyc(self, only_excess = True):
-
- for root, dirs, files in os.walk(ss(Env.get('app_dir'))):
-
- pyc_files = filter(lambda filename: filename.endswith('.pyc'), files)
- py_files = set(filter(lambda filename: filename.endswith('.py'), files))
- excess_pyc_files = filter(lambda pyc_filename: pyc_filename[:-1] not in py_files, pyc_files) if only_excess else pyc_files
-
- for excess_pyc_file in excess_pyc_files:
- full_path = os.path.join(root, excess_pyc_file)
- log.debug('Removing old PYC file: %s', full_path)
- try:
- os.remove(full_path)
- except:
- log.error('Couldn\'t remove %s: %s', (full_path, traceback.format_exc()))
-
- for dir_name in dirs:
- full_path = os.path.join(root, dir_name)
- if len(os.listdir(full_path)) == 0:
- try:
- os.rmdir(full_path)
- except:
- log.error('Couldn\'t remove empty directory %s: %s', (full_path, traceback.format_exc()))
-
+ def check(self):
+ pass
class GitUpdater(BaseUpdater):
+ old_repo = 'cyberden/CouchPotatoServer'
+ new_repo = 'cyberden/CouchPotatoServer'
+
def __init__(self, git_command):
self.repo = LocalRepository(Env.get('app_dir'), command = git_command)
+ remote_name = 'origin'
+ remote = self.repo.getRemoteByName(remote_name)
+ if self.old_repo in remote.url:
+ log.info('Changing repo to new github organization: %s -> %s', (self.old_repo, self.new_repo))
+ new_url = remote.url.replace(self.old_repo, self.new_repo)
+ self.repo._executeGitCommandAssertSuccess("remote set-url %s %s" % (remote_name, new_url))
+
def doUpdate(self):
try:
- log.debug('Stashing local changes')
- self.repo.saveStash()
-
log.info('Updating to latest version')
self.repo.pull()
- # Delete leftover .pyc files
- self.deletePyc()
-
return True
except:
log.error('Failed updating via GIT: %s', traceback.format_exc())
@@ -198,17 +232,28 @@ def doUpdate(self):
def getVersion(self):
if not self.version:
+
+ hash = None
+ date = None
+ branch = self.branch
+
try:
- output = self.repo.getHead() # Yes, please
+ output = self.repo.getHead() # Yes, please
log.debug('Git version output: %s', output.hash)
- self.version = {
- 'hash': output.hash[:8],
- 'date': output.getDate(),
- 'type': 'git',
- }
- except Exception, e:
+
+ hash = output.hash[:8]
+ date = output.getDate()
+ branch = self.repo.getCurrentBranch().name
+ except Exception as e:
log.error('Failed using GIT updater, running from source, you need to have GIT installed. %s', e)
- return 'No GIT'
+
+ self.version = {
+ 'repr': 'git:(%s:%s % s) %s (%s)' % (self.repo_user, self.repo_name, branch, hash or 'unknown_hash', datetime.fromtimestamp(date) if date else 'unknown_date'),
+ 'hash': hash,
+ 'date': date,
+ 'type': 'git',
+ 'branch': branch
+ }
return self.version
@@ -229,7 +274,7 @@ def check(self):
local = self.repo.getHead()
remote = branch.getHead()
- log.info('Versions, local:%s, remote:%s', (local.hash[:8], remote.hash[:8]))
+ log.debug('Versions, local:%s, remote:%s', (local.hash[:8], remote.hash[:8]))
if local.getDate() < remote.getDate():
self.update_version = {
@@ -242,7 +287,6 @@ def check(self):
return False
-
class SourceUpdater(BaseUpdater):
def __init__(self):
@@ -255,11 +299,11 @@ def __init__(self):
def doUpdate(self):
try:
- url = 'https://github.com/%s/%s/tarball/%s' % (self.repo_user, self.repo_name, self.branch)
- destination = os.path.join(Env.get('cache_dir'), self.update_version.get('hash') + '.tar.gz')
- extracted_path = os.path.join(Env.get('cache_dir'), 'temp_updater')
+ download_data = fireEvent('cp.source_url', repo = self.repo_user, repo_name = self.repo_name, branch = self.branch, single = True)
+ destination = os.path.join(Env.get('cache_dir'), self.update_version.get('hash')) + '.' + download_data.get('type')
- destination = fireEvent('file.download', url = url, dest = destination, single = True)
+ extracted_path = os.path.join(Env.get('cache_dir'), 'temp_updater')
+ destination = fireEvent('file.download', url = download_data.get('url'), dest = destination, single = True)
# Cleanup leftover from last time
if os.path.isdir(extracted_path):
@@ -267,9 +311,15 @@ def doUpdate(self):
self.makeDir(extracted_path)
# Extract
- tar = tarfile.open(destination)
- tar.extractall(path = extracted_path)
- tar.close()
+ if download_data.get('type') == 'zip':
+ zip_file = zipfile.ZipFile(destination)
+ zip_file.extractall(extracted_path)
+ zip_file.close()
+ else:
+ tar = tarfile.open(destination)
+ tar.extractall(path = extracted_path)
+ tar.close()
+
os.remove(destination)
if self.replaceWith(os.path.join(extracted_path, os.listdir(extracted_path)[0])):
@@ -286,10 +336,12 @@ def doUpdate(self):
return False
def replaceWith(self, path):
- app_dir = ss(Env.get('app_dir'))
+ path = sp(path)
+ app_dir = Env.get('app_dir')
+ data_dir = Env.get('data_dir')
# Get list of files we want to overwrite
- self.deletePyc()
+ removePyc(app_dir)
existing_files = []
for root, subfiles, filenames in os.walk(app_dir):
for filename in filenames:
@@ -318,22 +370,24 @@ def replaceWith(self, path):
log.error('Failed overwriting file "%s": %s', (tofile, traceback.format_exc()))
return False
- if Env.get('app_dir') not in Env.get('data_dir'):
- for still_exists in existing_files:
- try:
- os.remove(still_exists)
- except:
- log.error('Failed removing non-used file: %s', traceback.format_exc())
+ for still_exists in existing_files:
- return True
+ if data_dir in still_exists:
+ continue
+
+ try:
+ os.remove(still_exists)
+ except:
+ log.error('Failed removing non-used file: %s', traceback.format_exc())
+ return True
def removeDir(self, path):
try:
if os.path.isdir(path):
shutil.rmtree(path)
- except OSError, inst:
- os.chmod(inst.filename, 0777)
+ except OSError as inst:
+ os.chmod(inst.filename, 0o777)
self.removeDir(path)
def getVersion(self):
@@ -347,7 +401,8 @@ def getVersion(self):
log.debug('Source version output: %s', output)
self.version = output
self.version['type'] = 'source'
- except Exception, e:
+ self.version['repr'] = 'source:(%s:%s % s) %s (%s)' % (self.repo_user, self.repo_name, self.branch, output.get('hash', '')[:8], datetime.fromtimestamp(output.get('date', 0)))
+ except Exception as e:
log.error('Failed using source updater. %s', e)
return {}
@@ -377,7 +432,7 @@ def latestCommit(self):
return {
'hash': commit['sha'],
- 'date': int(time.mktime(parse(commit['commit']['committer']['date']).timetuple())),
+ 'date': int(time.mktime(parse(commit['commit']['committer']['date']).timetuple())),
}
except:
log.error('Failed getting latest request from github: %s', traceback.format_exc())
@@ -422,7 +477,7 @@ def check(self):
if latest and latest != current_version.get('hash'):
self.update_version = {
'hash': latest,
- 'date': None,
+ 'date': None,
'changelog': self.desktop._changelogURL,
}
@@ -434,6 +489,7 @@ def check(self):
def getVersion(self):
return {
+ 'repr': 'desktop: %s' % self.desktop._esky.active_version,
'hash': self.desktop._esky.active_version,
'date': None,
'type': 'desktop',
diff --git a/couchpotato/core/_base/updater/static/updater.js b/couchpotato/core/_base/updater/static/updater.js
index cc17be5579..158c8d9c4a 100644
--- a/couchpotato/core/_base/updater/static/updater.js
+++ b/couchpotato/core/_base/updater/static/updater.js
@@ -5,10 +5,10 @@ var UpdaterBase = new Class({
initialize: function(){
var self = this;
- App.addEvent('load', self.info.bind(self, 1000))
+ App.addEvent('load', self.info.bind(self, 2000));
App.addEvent('unload', function(){
if(self.timer)
- clearTimeout(self.timer);
+ clearRequestTimeout(self.timer);
});
},
@@ -24,19 +24,19 @@ var UpdaterBase = new Class({
self.doUpdate();
else {
App.unBlockPage();
- App.fireEvent('message', 'No updates available');
+ App.trigger('message', ['No updates available']);
}
}
- })
+ });
},
info: function(timeout){
var self = this;
- if(self.timer) clearTimeout(self.timer);
+ if(self.timer) clearRequestTimeout(self.timer);
- self.timer = setTimeout(function(){
+ self.timer = requestTimeout(function(){
Api.request('updater.info', {
'onComplete': function(json){
self.json = json;
@@ -50,8 +50,8 @@ var UpdaterBase = new Class({
self.message.destroy();
}
}
- })
- }, (timeout || 0))
+ });
+ }, (timeout || 0));
},
@@ -66,7 +66,7 @@ var UpdaterBase = new Class({
var changelog = 'https://github.com/'+data.repo_name+'/compare/'+data.version.hash+'...'+data.branch;
if(data.update_version.changelog)
- changelog = data.update_version.changelog + '#' + data.version.hash+'...'+data.update_version.hash
+ changelog = data.update_version.changelog + '#' + data.version.hash+'...'+data.update_version.hash;
self.message = new Element('div.message.update').adopt(
new Element('span', {
@@ -84,7 +84,7 @@ var UpdaterBase = new Class({
'click': self.doUpdate.bind(self)
}
})
- ).inject($(document.body).getElement('.header'))
+ ).inject(App.getBlock('footer'));
},
doUpdate: function(){
@@ -96,15 +96,17 @@ var UpdaterBase = new Class({
if(json.success)
self.updating();
else
- App.unBlockPage()
+ App.unBlockPage();
}
});
},
updating: function(){
- App.checkAvailable.delay(500, App, [1000, function(){
- window.location.reload();
- }]);
+ requestTimeout(function(){
+ App.checkAvailable(1000, function(){
+ window.location.reload();
+ });
+ }, 500);
if(self.message)
self.message.destroy();
}
diff --git a/couchpotato/core/_base/updater/static/updater.scss b/couchpotato/core/_base/updater/static/updater.scss
new file mode 100644
index 0000000000..b83c1bb337
--- /dev/null
+++ b/couchpotato/core/_base/updater/static/updater.scss
@@ -0,0 +1,17 @@
+@import "_mixins";
+
+.update.message {
+ @include theme(background, background);
+ padding: $padding;
+ text-align: center;
+ font-size: 1.25em;
+
+ @include media-tablet {
+ font-size: 1em;
+ }
+
+ a {
+ @include theme(color, primary);
+ padding: $padding/4;
+ }
+}
diff --git a/couchpotato/core/auth.py b/couchpotato/core/auth.py
deleted file mode 100644
index 0111b9abdf..0000000000
--- a/couchpotato/core/auth.py
+++ /dev/null
@@ -1,26 +0,0 @@
-from couchpotato.core.helpers.variable import md5
-from couchpotato.environment import Env
-from flask import request, Response
-from functools import wraps
-
-def check_auth(username, password):
- return username == Env.setting('username') and password == Env.setting('password')
-
-def authenticate():
- return Response(
- 'This is not the page you are looking for. *waves hand*', 401,
- {'WWW-Authenticate': 'Basic realm="CouchPotato Login"'}
- )
-
-def requires_auth(f):
-
- @wraps(f)
- def decorated(*args, **kwargs):
- auth = getattr(request, 'authorization')
- if Env.setting('username') and Env.setting('password'):
- if (not auth or not check_auth(auth.username.decode('latin1'), md5(auth.password.decode('latin1').encode(Env.get('encoding'))))):
- return authenticate()
-
- return f(*args, **kwargs)
-
- return decorated
diff --git a/couchpotato/core/database.py b/couchpotato/core/database.py
new file mode 100644
index 0000000000..bed427e979
--- /dev/null
+++ b/couchpotato/core/database.py
@@ -0,0 +1,639 @@
+import json
+import os
+import time
+import traceback
+from sqlite3 import OperationalError
+
+from CodernityDB.database import RecordNotFound
+from CodernityDB.index import IndexException, IndexNotFoundException, IndexConflict
+from couchpotato import CPLog
+from couchpotato.api import addApiView
+from couchpotato.core.event import addEvent, fireEvent, fireEventAsync
+from couchpotato.core.helpers.encoding import toUnicode, sp
+from couchpotato.core.helpers.variable import getImdb, tryInt, randomString
+
+
+log = CPLog(__name__)
+
+
+class Database(object):
+
+ indexes = None
+ db = None
+
+ def __init__(self):
+
+ self.indexes = {}
+
+ addApiView('database.list_documents', self.listDocuments)
+ addApiView('database.reindex', self.reindex)
+ addApiView('database.compact', self.compact)
+ addApiView('database.document.update', self.updateDocument)
+ addApiView('database.document.delete', self.deleteDocument)
+
+ addEvent('database.setup.after', self.startup_compact)
+ addEvent('database.setup_index', self.setupIndex)
+ addEvent('database.delete_corrupted', self.deleteCorrupted)
+
+ addEvent('app.migrate', self.migrate)
+ addEvent('app.after_shutdown', self.close)
+
+ def getDB(self):
+
+ if not self.db:
+ from couchpotato import get_db
+ self.db = get_db()
+
+ return self.db
+
+ def close(self, **kwargs):
+ self.getDB().close()
+
+ def setupIndex(self, index_name, klass):
+
+ self.indexes[index_name] = klass
+
+ db = self.getDB()
+
+ # Category index
+ index_instance = klass(db.path, index_name)
+ try:
+
+ # Make sure store and bucket don't exist
+ exists = []
+ for x in ['buck', 'stor']:
+ full_path = os.path.join(db.path, '%s_%s' % (index_name, x))
+ if os.path.exists(full_path):
+ exists.append(full_path)
+
+ if index_name not in db.indexes_names:
+
+ # Remove existing buckets if index isn't there
+ for x in exists:
+ os.unlink(x)
+
+ # Add index (will restore buckets)
+ db.add_index(index_instance)
+ db.reindex_index(index_name)
+ else:
+ # Previous info
+ previous = db.indexes_names[index_name]
+ previous_version = previous._version
+ current_version = klass._version
+
+ # Only edit index if versions are different
+ if previous_version < current_version:
+ log.debug('Index "%s" already exists, updating and reindexing', index_name)
+ db.destroy_index(previous)
+ db.add_index(index_instance)
+ db.reindex_index(index_name)
+
+ except:
+ log.error('Failed adding index %s: %s', (index_name, traceback.format_exc()))
+
+ def deleteDocument(self, **kwargs):
+
+ db = self.getDB()
+
+ try:
+
+ document_id = kwargs.get('_request').get_argument('id')
+ document = db.get('id', document_id)
+ db.delete(document)
+
+ return {
+ 'success': True
+ }
+ except:
+ return {
+ 'success': False,
+ 'error': traceback.format_exc()
+ }
+
+ def updateDocument(self, **kwargs):
+
+ db = self.getDB()
+
+ try:
+
+ document = json.loads(kwargs.get('_request').get_argument('document'))
+ d = db.update(document)
+ document.update(d)
+
+ return {
+ 'success': True,
+ 'document': document
+ }
+ except:
+ return {
+ 'success': False,
+ 'error': traceback.format_exc()
+ }
+
+ def listDocuments(self, **kwargs):
+ db = self.getDB()
+
+ results = {
+ 'unknown': []
+ }
+
+ for document in db.all('id'):
+ key = document.get('_t', 'unknown')
+
+ if kwargs.get('show') and key != kwargs.get('show'):
+ continue
+
+ if not results.get(key):
+ results[key] = []
+ results[key].append(document)
+
+ return results
+
+ def deleteCorrupted(self, _id, traceback_error = ''):
+
+ db = self.getDB()
+
+ try:
+ log.debug('Deleted corrupted document "%s": %s', (_id, traceback_error))
+ corrupted = db.get('id', _id, with_storage = False)
+ db._delete_id_index(corrupted.get('_id'), corrupted.get('_rev'), None)
+ except:
+ log.debug('Failed deleting corrupted: %s', traceback.format_exc())
+
+ def reindex(self, **kwargs):
+
+ success = True
+ try:
+ db = self.getDB()
+ db.reindex()
+ except:
+ log.error('Failed index: %s', traceback.format_exc())
+ success = False
+
+ return {
+ 'success': success
+ }
+
+ def compact(self, try_repair = True, **kwargs):
+
+ success = False
+ db = self.getDB()
+
+ # Removing left over compact files
+ db_path = sp(db.path)
+ for f in os.listdir(sp(db.path)):
+ for x in ['_compact_buck', '_compact_stor']:
+ if f[-len(x):] == x:
+ os.unlink(os.path.join(db_path, f))
+
+ try:
+ start = time.time()
+ size = float(db.get_db_details().get('size', 0))
+ log.debug('Compacting database, current size: %sMB', round(size/1048576, 2))
+
+ db.compact()
+ new_size = float(db.get_db_details().get('size', 0))
+ log.debug('Done compacting database in %ss, new size: %sMB, saved: %sMB', (round(time.time()-start, 2), round(new_size/1048576, 2), round((size-new_size)/1048576, 2)))
+ success = True
+ except (IndexException, AttributeError):
+ if try_repair:
+ log.error('Something wrong with indexes, trying repair')
+
+ # Remove all indexes
+ old_indexes = self.indexes.keys()
+ for index_name in old_indexes:
+ try:
+ db.destroy_index(index_name)
+ except IndexNotFoundException:
+ pass
+ except:
+ log.error('Failed removing old index %s', index_name)
+
+ # Add them again
+ for index_name in self.indexes:
+ klass = self.indexes[index_name]
+
+ # Category index
+ index_instance = klass(db.path, index_name)
+ try:
+ db.add_index(index_instance)
+ db.reindex_index(index_name)
+ except IndexConflict:
+ pass
+ except:
+ log.error('Failed adding index %s', index_name)
+ raise
+
+ self.compact(try_repair = False)
+ else:
+ log.error('Failed compact: %s', traceback.format_exc())
+
+ except:
+ log.error('Failed compact: %s', traceback.format_exc())
+
+ return {
+ 'success': success
+ }
+
+ # Compact on start
+ def startup_compact(self):
+ from couchpotato import Env
+
+ db = self.getDB()
+
+ # Try fix for migration failures on desktop
+ if Env.get('desktop'):
+ try:
+ list(db.all('profile', with_doc = True))
+ except RecordNotFound:
+
+ failed_location = '%s_failed' % db.path
+ old_db = os.path.join(Env.get('data_dir'), 'couchpotato.db.old')
+
+ if not os.path.isdir(failed_location) and os.path.isfile(old_db):
+ log.error('Corrupt database, trying migrate again')
+ db.close()
+
+ # Rename database folder
+ os.rename(db.path, '%s_failed' % db.path)
+
+ # Rename .old database to try another migrate
+ os.rename(old_db, old_db[:-4])
+
+ fireEventAsync('app.restart')
+ else:
+ log.error('Migration failed and couldn\'t recover database. Please report on GitHub, with this message.')
+ db.reindex()
+
+ return
+
+ # Check size and compact if needed
+ size = db.get_db_details().get('size')
+ prop_name = 'last_db_compact'
+ last_check = int(Env.prop(prop_name, default = 0))
+
+ if last_check < time.time()-604800: # 7 days
+ self.compact()
+ Env.prop(prop_name, value = int(time.time()))
+
+ def migrate(self):
+
+ from couchpotato import Env
+ old_db = os.path.join(Env.get('data_dir'), 'couchpotato.db')
+ if not os.path.isfile(old_db): return
+
+ log.info('=' * 30)
+ log.info('Migrating database, hold on..')
+ time.sleep(1)
+
+ if os.path.isfile(old_db):
+
+ migrate_start = time.time()
+
+ import sqlite3
+ conn = sqlite3.connect(old_db)
+
+ migrate_list = {
+ 'category': ['id', 'label', 'order', 'required', 'preferred', 'ignored', 'destination'],
+ 'profile': ['id', 'label', 'order', 'core', 'hide'],
+ 'profiletype': ['id', 'order', 'finish', 'wait_for', 'quality_id', 'profile_id'],
+ 'quality': ['id', 'identifier', 'order', 'size_min', 'size_max'],
+ 'movie': ['id', 'last_edit', 'library_id', 'status_id', 'profile_id', 'category_id'],
+ 'library': ['id', 'identifier', 'info'],
+ 'librarytitle': ['id', 'title', 'default', 'libraries_id'],
+ 'library_files__file_library': ['library_id', 'file_id'],
+ 'release': ['id', 'identifier', 'movie_id', 'status_id', 'quality_id', 'last_edit'],
+ 'releaseinfo': ['id', 'identifier', 'value', 'release_id'],
+ 'release_files__file_release': ['release_id', 'file_id'],
+ 'status': ['id', 'identifier'],
+ 'properties': ['id', 'identifier', 'value'],
+ 'file': ['id', 'path', 'type_id'],
+ 'filetype': ['identifier', 'id']
+ }
+
+ migrate_data = {}
+ rename_old = False
+
+ try:
+
+ c = conn.cursor()
+
+ for ml in migrate_list:
+ migrate_data[ml] = {}
+ rows = migrate_list[ml]
+
+ try:
+ c.execute('SELECT %s FROM `%s`' % ('`' + '`,`'.join(rows) + '`', ml))
+ except:
+ # ignore faulty destination_id database
+ if ml == 'category':
+ migrate_data[ml] = {}
+ else:
+ rename_old = True
+ raise
+
+ for p in c.fetchall():
+ columns = {}
+ for row in migrate_list[ml]:
+ columns[row] = p[rows.index(row)]
+
+ if not migrate_data[ml].get(p[0]):
+ migrate_data[ml][p[0]] = columns
+ else:
+ if not isinstance(migrate_data[ml][p[0]], list):
+ migrate_data[ml][p[0]] = [migrate_data[ml][p[0]]]
+ migrate_data[ml][p[0]].append(columns)
+
+ conn.close()
+
+ log.info('Getting data took %s', time.time() - migrate_start)
+
+ db = self.getDB()
+ if not db.opened:
+ return
+
+ # Use properties
+ properties = migrate_data['properties']
+ log.info('Importing %s properties', len(properties))
+ for x in properties:
+ property = properties[x]
+ Env.prop(property.get('identifier'), property.get('value'))
+
+ # Categories
+ categories = migrate_data.get('category', [])
+ log.info('Importing %s categories', len(categories))
+ category_link = {}
+ for x in categories:
+ c = categories[x]
+
+ new_c = db.insert({
+ '_t': 'category',
+ 'order': c.get('order', 999),
+ 'label': toUnicode(c.get('label', '')),
+ 'ignored': toUnicode(c.get('ignored', '')),
+ 'preferred': toUnicode(c.get('preferred', '')),
+ 'required': toUnicode(c.get('required', '')),
+ 'destination': toUnicode(c.get('destination', '')),
+ })
+
+ category_link[x] = new_c.get('_id')
+
+ # Profiles
+ log.info('Importing profiles')
+ new_profiles = db.all('profile', with_doc = True)
+ new_profiles_by_label = {}
+ for x in new_profiles:
+
+ # Remove default non core profiles
+ if not x['doc'].get('core'):
+ db.delete(x['doc'])
+ else:
+ new_profiles_by_label[x['doc']['label']] = x['_id']
+
+ profiles = migrate_data['profile']
+ profile_link = {}
+ for x in profiles:
+ p = profiles[x]
+
+ exists = new_profiles_by_label.get(p.get('label'))
+
+ # Update existing with order only
+ if exists and p.get('core'):
+ profile = db.get('id', exists)
+ profile['order'] = tryInt(p.get('order'))
+ profile['hide'] = p.get('hide') in [1, True, 'true', 'True']
+ db.update(profile)
+
+ profile_link[x] = profile.get('_id')
+ else:
+
+ new_profile = {
+ '_t': 'profile',
+ 'label': p.get('label'),
+ 'order': int(p.get('order', 999)),
+ 'core': p.get('core', False),
+ 'qualities': [],
+ 'wait_for': [],
+ 'finish': []
+ }
+
+ types = migrate_data['profiletype']
+ for profile_type in types:
+ p_type = types[profile_type]
+ if types[profile_type]['profile_id'] == p['id']:
+ if p_type['quality_id']:
+ new_profile['finish'].append(p_type['finish'])
+ new_profile['wait_for'].append(p_type['wait_for'])
+ new_profile['qualities'].append(migrate_data['quality'][p_type['quality_id']]['identifier'])
+
+ if len(new_profile['qualities']) > 0:
+ new_profile.update(db.insert(new_profile))
+ profile_link[x] = new_profile.get('_id')
+ else:
+ log.error('Corrupt profile list for "%s", using default.', p.get('label'))
+
+ # Qualities
+ log.info('Importing quality sizes')
+ new_qualities = db.all('quality', with_doc = True)
+ new_qualities_by_identifier = {}
+ for x in new_qualities:
+ new_qualities_by_identifier[x['doc']['identifier']] = x['_id']
+
+ qualities = migrate_data['quality']
+ quality_link = {}
+ for x in qualities:
+ q = qualities[x]
+ q_id = new_qualities_by_identifier[q.get('identifier')]
+
+ quality = db.get('id', q_id)
+ quality['order'] = q.get('order')
+ quality['size_min'] = tryInt(q.get('size_min'))
+ quality['size_max'] = tryInt(q.get('size_max'))
+ db.update(quality)
+
+ quality_link[x] = quality
+
+ # Titles
+ titles = migrate_data['librarytitle']
+ titles_by_library = {}
+ for x in titles:
+ title = titles[x]
+ if title.get('default'):
+ titles_by_library[title.get('libraries_id')] = title.get('title')
+
+ # Releases
+ releaseinfos = migrate_data['releaseinfo']
+ for x in releaseinfos:
+ info = releaseinfos[x]
+
+ # Skip if release doesn't exist for this info
+ if not migrate_data['release'].get(info.get('release_id')):
+ continue
+
+ if not migrate_data['release'][info.get('release_id')].get('info'):
+ migrate_data['release'][info.get('release_id')]['info'] = {}
+
+ migrate_data['release'][info.get('release_id')]['info'][info.get('identifier')] = info.get('value')
+
+ releases = migrate_data['release']
+ releases_by_media = {}
+ for x in releases:
+ release = releases[x]
+ if not releases_by_media.get(release.get('movie_id')):
+ releases_by_media[release.get('movie_id')] = []
+
+ releases_by_media[release.get('movie_id')].append(release)
+
+ # Type ids
+ types = migrate_data['filetype']
+ type_by_id = {}
+ for t in types:
+ type = types[t]
+ type_by_id[type.get('id')] = type
+
+ # Media
+ log.info('Importing %s media items', len(migrate_data['movie']))
+ statuses = migrate_data['status']
+ libraries = migrate_data['library']
+ library_files = migrate_data['library_files__file_library']
+ releases_files = migrate_data['release_files__file_release']
+ all_files = migrate_data['file']
+ poster_type = migrate_data['filetype']['poster']
+ medias = migrate_data['movie']
+ for x in medias:
+ m = medias[x]
+
+ status = statuses.get(m['status_id']).get('identifier')
+ l = libraries.get(m['library_id'])
+
+ # Only migrate wanted movies, Skip if no identifier present
+ if not l or not getImdb(l.get('identifier')): continue
+
+ profile_id = profile_link.get(m['profile_id'])
+ category_id = category_link.get(m['category_id'])
+ title = titles_by_library.get(m['library_id'])
+ releases = releases_by_media.get(x, [])
+ info = json.loads(l.get('info', ''))
+
+ files = library_files.get(m['library_id'], [])
+ if not isinstance(files, list):
+ files = [files]
+
+ added_media = fireEvent('movie.add', {
+ 'info': info,
+ 'identifier': l.get('identifier'),
+ 'profile_id': profile_id,
+ 'category_id': category_id,
+ 'title': title
+ }, force_readd = False, search_after = False, update_after = False, notify_after = False, status = status, single = True)
+
+ if not added_media:
+ log.error('Failed adding media %s: %s', (l.get('identifier'), info))
+ continue
+
+ added_media['files'] = added_media.get('files', {})
+ for f in files:
+ ffile = all_files[f.get('file_id')]
+
+ # Only migrate posters
+ if ffile.get('type_id') == poster_type.get('id'):
+ if ffile.get('path') not in added_media['files'].get('image_poster', []) and os.path.isfile(ffile.get('path')):
+ added_media['files']['image_poster'] = [ffile.get('path')]
+ break
+
+ if 'image_poster' in added_media['files']:
+ db.update(added_media)
+
+ for rel in releases:
+
+ empty_info = False
+ if not rel.get('info'):
+ empty_info = True
+ rel['info'] = {}
+
+ quality = quality_link.get(rel.get('quality_id'))
+ if not quality:
+ continue
+
+ release_status = statuses.get(rel.get('status_id')).get('identifier')
+
+ if rel['info'].get('download_id'):
+ status_support = rel['info'].get('download_status_support', False) in [True, 'true', 'True']
+ rel['info']['download_info'] = {
+ 'id': rel['info'].get('download_id'),
+ 'downloader': rel['info'].get('download_downloader'),
+ 'status_support': status_support,
+ }
+
+ # Add status to keys
+ rel['info']['status'] = release_status
+ if not empty_info:
+ fireEvent('release.create_from_search', [rel['info']], added_media, quality, single = True)
+ else:
+ release = {
+ '_t': 'release',
+ 'identifier': rel.get('identifier'),
+ 'media_id': added_media.get('_id'),
+ 'quality': quality.get('identifier'),
+ 'status': release_status,
+ 'last_edit': int(time.time()),
+ 'files': {}
+ }
+
+ # Add downloader info if provided
+ try:
+ release['download_info'] = rel['info']['download_info']
+ del rel['download_info']
+ except:
+ pass
+
+ # Add files
+ release_files = releases_files.get(rel.get('id'), [])
+ if not isinstance(release_files, list):
+ release_files = [release_files]
+
+ if len(release_files) == 0:
+ continue
+
+ for f in release_files:
+ rfile = all_files.get(f.get('file_id'))
+ if not rfile:
+ continue
+
+ file_type = type_by_id.get(rfile.get('type_id')).get('identifier')
+
+ if not release['files'].get(file_type):
+ release['files'][file_type] = []
+
+ release['files'][file_type].append(rfile.get('path'))
+
+ try:
+ rls = db.get('release_identifier', rel.get('identifier'), with_doc = True)['doc']
+ rls.update(release)
+ db.update(rls)
+ except:
+ db.insert(release)
+
+ log.info('Total migration took %s', time.time() - migrate_start)
+ log.info('=' * 30)
+
+ rename_old = True
+
+ except OperationalError:
+ log.error('Migrating from faulty database, probably a (too) old version: %s', traceback.format_exc())
+
+ rename_old = True
+ except:
+ log.error('Migration failed: %s', traceback.format_exc())
+
+
+ # rename old database
+ if rename_old:
+ random = randomString()
+ log.info('Renaming old database to %s ', '%s.%s_old' % (old_db, random))
+ os.rename(old_db, '%s.%s_old' % (old_db, random))
+
+ if os.path.isfile(old_db + '-wal'):
+ os.rename(old_db + '-wal', '%s-wal.%s_old' % (old_db, random))
+ if os.path.isfile(old_db + '-shm'):
+ os.rename(old_db + '-shm', '%s-shm.%s_old' % (old_db, random))
diff --git a/couchpotato/core/downloaders/__init__.py b/couchpotato/core/downloaders/__init__.py
index 5fb7125fb1..e69de29bb2 100644
--- a/couchpotato/core/downloaders/__init__.py
+++ b/couchpotato/core/downloaders/__init__.py
@@ -1,13 +0,0 @@
-config = {
- 'name': 'download_providers',
- 'groups': [
- {
- 'label': 'Downloaders',
- 'description': 'You can select different downloaders for each type (usenet / torrent)',
- 'type': 'list',
- 'name': 'download_providers',
- 'tab': 'downloaders',
- 'options': [],
- },
- ],
-}
diff --git a/couchpotato/core/downloaders/base.py b/couchpotato/core/downloaders/base.py
deleted file mode 100644
index 70500dc09d..0000000000
--- a/couchpotato/core/downloaders/base.py
+++ /dev/null
@@ -1,118 +0,0 @@
-from base64 import b32decode, b16encode
-from couchpotato.core.event import addEvent
-from couchpotato.core.logger import CPLog
-from couchpotato.core.providers.base import Provider
-import random
-import re
-
-log = CPLog(__name__)
-
-
-class Downloader(Provider):
-
- type = []
- http_time_between_calls = 0
-
- torrent_sources = [
- 'http://torrage.com/torrent/%s.torrent',
- 'http://torcache.net/torrent/%s.torrent',
- ]
-
- torrent_trackers = [
- 'http://tracker.publicbt.com/announce',
- 'udp://tracker.istole.it:80/announce',
- 'udp://fr33domtracker.h33t.com:3310/announce',
- 'http://tracker.istole.it/announce',
- 'http://tracker.ccc.de/announce',
- 'udp://tracker.publicbt.com:80/announce',
- 'udp://tracker.ccc.de:80/announce',
- 'http://exodus.desync.com/announce',
- 'http://exodus.desync.com:6969/announce',
- 'http://tracker.publichd.eu/announce',
- 'http://tracker.openbittorrent.com/announce',
- ]
-
- def __init__(self):
- addEvent('download', self._download)
- addEvent('download.enabled', self._isEnabled)
- addEvent('download.enabled_types', self.getEnabledDownloadType)
- addEvent('download.status', self._getAllDownloadStatus)
- addEvent('download.remove_failed', self._removeFailed)
-
- def getEnabledDownloadType(self):
- for download_type in self.type:
- if self.isEnabled(manual = True, data = {'type': download_type}):
- return self.type
-
- return []
-
- def _download(self, data = {}, movie = {}, manual = False, filedata = None):
- if self.isDisabled(manual, data):
- return
- return self.download(data = data, movie = movie, filedata = filedata)
-
- def _getAllDownloadStatus(self):
- if self.isDisabled(manual = True, data = {}):
- return
-
- return self.getAllDownloadStatus()
-
- def getAllDownloadStatus(self):
- return
-
- def _removeFailed(self, item):
- if self.isDisabled(manual = True, data = {}):
- return
-
- if self.conf('delete_failed', default = True):
- return self.removeFailed(item)
-
- return False
-
- def removeFailed(self, item):
- return
-
- def isCorrectType(self, item_type):
- is_correct = item_type in self.type
-
- if not is_correct:
- log.debug("Downloader doesn't support this type")
-
- return is_correct
-
- def magnetToTorrent(self, magnet_link):
- torrent_hash = re.findall('urn:btih:([\w]{32,40})', magnet_link)[0].upper()
-
- # Convert base 32 to hex
- if len(torrent_hash) == 32:
- torrent_hash = b16encode(b32decode(torrent_hash))
-
- sources = self.torrent_sources
- random.shuffle(sources)
-
- for source in sources:
- try:
- filedata = self.urlopen(source % torrent_hash, headers = {'Referer': ''}, show_error = False)
- if 'torcache' in filedata and 'file not found' in filedata.lower():
- continue
-
- return filedata
- except:
- log.debug('Torrent hash "%s" wasn\'t found on: %s', (torrent_hash, source))
-
- log.error('Failed converting magnet url to torrent: %s', (torrent_hash))
- return False
-
- def isDisabled(self, manual, data):
- return not self.isEnabled(manual, data)
-
- def _isEnabled(self, manual, data = {}):
- if not self.isEnabled(manual, data):
- return
- return True
-
- def isEnabled(self, manual, data = {}):
- d_manual = self.conf('manual', default = False)
- return super(Downloader, self).isEnabled() and \
- ((d_manual and manual) or (d_manual is False)) and \
- (not data or self.isCorrectType(data.get('type')))
diff --git a/couchpotato/core/downloaders/blackhole.py b/couchpotato/core/downloaders/blackhole.py
new file mode 100644
index 0000000000..e9853f4e53
--- /dev/null
+++ b/couchpotato/core/downloaders/blackhole.py
@@ -0,0 +1,205 @@
+from __future__ import with_statement
+import os
+import traceback
+
+from couchpotato.core._base.downloader.main import DownloaderBase
+from couchpotato.core.helpers.encoding import sp
+from couchpotato.core.helpers.variable import getDownloadDir
+from couchpotato.core.logger import CPLog
+from couchpotato.environment import Env
+
+
+log = CPLog(__name__)
+
+autoload = 'Blackhole'
+
+
+class Blackhole(DownloaderBase):
+
+ protocol = ['nzb', 'torrent', 'torrent_magnet']
+ status_support = False
+
+ def download(self, data = None, media = None, filedata = None):
+ """ Send a torrent/nzb file to the downloader
+
+ :param data: dict returned from provider
+ Contains the release information
+ :param media: media dict with information
+ Used for creating the filename when possible
+ :param filedata: downloaded torrent/nzb filedata
+ The file gets downloaded in the searcher and send to this function
+ This is done to have failed checking before using the downloader, so the downloader
+ doesn't need to worry about that
+ :return: boolean
+ One faile returns false, but the downloaded should log his own errors
+ """
+
+ if not media: media = {}
+ if not data: data = {}
+
+ directory = self.conf('directory')
+
+ # The folder needs to exist
+ if not directory or not os.path.isdir(directory):
+ log.error('No directory set for blackhole %s download.', data.get('protocol'))
+ else:
+ try:
+ # Filedata can be empty, which probably means it a magnet link
+ if not filedata or len(filedata) < 50:
+ try:
+ if data.get('protocol') == 'torrent_magnet':
+ filedata = self.magnetToTorrent(data.get('url'))
+ data['protocol'] = 'torrent'
+ except:
+ log.error('Failed download torrent via magnet url: %s', traceback.format_exc())
+
+ # If it's still empty, either write the magnet link to a .magnet file, or error out.
+ if not filedata or len(filedata) < 50:
+ if self.conf('magnet_file'):
+ filedata = data.get('url') + '\n'
+ data['protocol'] = 'magnet'
+ else:
+ log.error('No nzb/torrent available: %s', data.get('url'))
+ return False
+
+ # Create filename with imdb id and other nice stuff
+ file_name = self.createFileName(data, filedata, media)
+ full_path = os.path.join(directory, file_name)
+
+ # People want thinks nice and tidy, create a subdir
+ if self.conf('create_subdir'):
+ try:
+ new_path = os.path.splitext(full_path)[0]
+ if not os.path.exists(new_path):
+ os.makedirs(new_path)
+ full_path = os.path.join(new_path, file_name)
+ except:
+ log.error('Couldnt create sub dir, reverting to old one: %s', full_path)
+
+ try:
+
+ # Make sure the file doesn't exist yet, no need in overwriting it
+ if not os.path.isfile(full_path):
+ log.info('Downloading %s to %s.', (data.get('protocol'), full_path))
+ with open(full_path, 'wb') as f:
+ f.write(filedata)
+ os.chmod(full_path, Env.getPermission('file'))
+ return self.downloadReturnId('')
+ else:
+ log.info('File %s already exists.', full_path)
+ return self.downloadReturnId('')
+
+ except:
+ log.error('Failed to download to blackhole %s', traceback.format_exc())
+ pass
+
+ except:
+ log.info('Failed to download file %s: %s', (data.get('name'), traceback.format_exc()))
+ return False
+
+ return False
+
+ def test(self):
+ """ Test and see if the directory is writable
+ :return: boolean
+ """
+
+ directory = self.conf('directory')
+ if directory and os.path.isdir(directory):
+
+ test_file = sp(os.path.join(directory, 'couchpotato_test.txt'))
+
+ # Check if folder is writable
+ self.createFile(test_file, 'This is a test file')
+ if os.path.isfile(test_file):
+ os.remove(test_file)
+ return True
+
+ return False
+
+ def getEnabledProtocol(self):
+ """ What protocols is this downloaded used for
+ :return: list with protocols
+ """
+
+ if self.conf('use_for') == 'both':
+ return super(Blackhole, self).getEnabledProtocol()
+ elif self.conf('use_for') == 'torrent':
+ return ['torrent', 'torrent_magnet']
+ else:
+ return ['nzb']
+
+ def isEnabled(self, manual = False, data = None):
+ """ Check if protocol is used (and enabled)
+ :param manual: The user has clicked to download a link through the webUI
+ :param data: dict returned from provider
+ Contains the release information
+ :return: boolean
+ """
+ if not data: data = {}
+ for_protocol = ['both']
+ if data and 'torrent' in data.get('protocol'):
+ for_protocol.append('torrent')
+ elif data:
+ for_protocol.append(data.get('protocol'))
+
+ return super(Blackhole, self).isEnabled(manual, data) and \
+ ((self.conf('use_for') in for_protocol))
+
+
+config = [{
+ 'name': 'blackhole',
+ 'order': 30,
+ 'groups': [
+ {
+ 'tab': 'downloaders',
+ 'list': 'download_providers',
+ 'name': 'blackhole',
+ 'label': 'Black hole',
+ 'description': 'Download the NZB/Torrent to a specific folder. Note: Seeding and copying/linking features do not work with Black hole .',
+ 'wizard': True,
+ 'options': [
+ {
+ 'name': 'enabled',
+ 'default': True,
+ 'type': 'enabler',
+ 'radio_group': 'nzb,torrent',
+ },
+ {
+ 'name': 'directory',
+ 'type': 'directory',
+ 'description': 'Directory where the .nzb (or .torrent) file is saved to.',
+ 'default': getDownloadDir()
+ },
+ {
+ 'name': 'use_for',
+ 'label': 'Use for',
+ 'default': 'both',
+ 'type': 'dropdown',
+ 'values': [('usenet & torrents', 'both'), ('usenet', 'nzb'), ('torrent', 'torrent')],
+ },
+ {
+ 'name': 'create_subdir',
+ 'default': 0,
+ 'type': 'bool',
+ 'advanced': True,
+ 'description': 'Create a sub directory when saving the .nzb (or .torrent).',
+ },
+ {
+ 'name': 'manual',
+ 'default': 0,
+ 'type': 'bool',
+ 'advanced': True,
+ 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.',
+ },
+ {
+ 'name': 'magnet_file',
+ 'default': 0,
+ 'type': 'bool',
+ 'advanced': True,
+ 'description': 'If magnet file conversion fails, write down the magnet link in a .magnet file instead.',
+ },
+ ],
+ }
+ ],
+}]
diff --git a/couchpotato/core/downloaders/blackhole/__init__.py b/couchpotato/core/downloaders/blackhole/__init__.py
deleted file mode 100644
index 290e8d43dd..0000000000
--- a/couchpotato/core/downloaders/blackhole/__init__.py
+++ /dev/null
@@ -1,48 +0,0 @@
-from .main import Blackhole
-from couchpotato.core.helpers.variable import getDownloadDir
-
-def start():
- return Blackhole()
-
-config = [{
- 'name': 'blackhole',
- 'order': 30,
- 'groups': [
- {
- 'tab': 'downloaders',
- 'list': 'download_providers',
- 'name': 'blackhole',
- 'label': 'Black hole',
- 'description': 'Download the NZB/Torrent to a specific folder.',
- 'wizard': True,
- 'options': [
- {
- 'name': 'enabled',
- 'default': True,
- 'type': 'enabler',
- 'radio_group': 'nzb,torrent',
- },
- {
- 'name': 'directory',
- 'type': 'directory',
- 'description': 'Directory where the .nzb (or .torrent) file is saved to.',
- 'default': getDownloadDir()
- },
- {
- 'name': 'use_for',
- 'label': 'Use for',
- 'default': 'both',
- 'type': 'dropdown',
- 'values': [('usenet & torrents', 'both'), ('usenet', 'nzb'), ('torrent', 'torrent')],
- },
- {
- 'name': 'manual',
- 'default': 0,
- 'type': 'bool',
- 'advanced': True,
- 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.',
- },
- ],
- }
- ],
-}]
diff --git a/couchpotato/core/downloaders/blackhole/main.py b/couchpotato/core/downloaders/blackhole/main.py
deleted file mode 100644
index aad9ea7fc5..0000000000
--- a/couchpotato/core/downloaders/blackhole/main.py
+++ /dev/null
@@ -1,72 +0,0 @@
-from __future__ import with_statement
-from couchpotato.core.downloaders.base import Downloader
-from couchpotato.core.logger import CPLog
-from couchpotato.environment import Env
-import os
-import traceback
-
-log = CPLog(__name__)
-
-class Blackhole(Downloader):
-
- type = ['nzb', 'torrent', 'torrent_magnet']
-
- def download(self, data = {}, movie = {}, filedata = None):
-
- directory = self.conf('directory')
- if not directory or not os.path.isdir(directory):
- log.error('No directory set for blackhole %s download.', data.get('type'))
- else:
- try:
- if not filedata or len(filedata) < 50:
- try:
- if data.get('type') == 'torrent_magnet':
- filedata = self.magnetToTorrent(data.get('url'))
- data['type'] = 'torrent'
- except:
- log.error('Failed download torrent via magnet url: %s', traceback.format_exc())
-
- if not filedata or len(filedata) < 50:
- log.error('No nzb/torrent available: %s', data.get('url'))
- return False
-
- fullPath = os.path.join(directory, self.createFileName(data, filedata, movie))
-
- try:
- if not os.path.isfile(fullPath):
- log.info('Downloading %s to %s.', (data.get('type'), fullPath))
- with open(fullPath, 'wb') as f:
- f.write(filedata)
- os.chmod(fullPath, Env.getPermission('file'))
- return True
- else:
- log.info('File %s already exists.', fullPath)
- return True
-
- except:
- log.error('Failed to download to blackhole %s', traceback.format_exc())
- pass
-
- except:
- log.info('Failed to download file %s: %s', (data.get('name'), traceback.format_exc()))
- return False
-
- return False
-
- def getEnabledDownloadType(self):
- if self.conf('use_for') == 'both':
- return super(Blackhole, self).getEnabledDownloadType()
- elif self.conf('use_for') == 'torrent':
- return ['torrent', 'torrent_magnet']
- else:
- return ['nzb']
-
- def isEnabled(self, manual, data = {}):
- for_type = ['both']
- if data and 'torrent' in data.get('type'):
- for_type.append('torrent')
- elif data:
- for_type.append(data.get('type'))
-
- return super(Blackhole, self).isEnabled(manual, data) and \
- ((self.conf('use_for') in for_type))
diff --git a/couchpotato/core/downloaders/deluge.py b/couchpotato/core/downloaders/deluge.py
new file mode 100644
index 0000000000..aaca40e663
--- /dev/null
+++ b/couchpotato/core/downloaders/deluge.py
@@ -0,0 +1,421 @@
+from base64 import b64encode, b16encode, b32decode
+from datetime import timedelta
+from hashlib import sha1
+import os.path
+import re
+import traceback
+
+from bencode import bencode as benc, bdecode
+from couchpotato.core._base.downloader.main import DownloaderBase, ReleaseDownloadList
+from couchpotato.core.helpers.encoding import isInt, sp
+from couchpotato.core.helpers.variable import tryFloat, cleanHost
+from couchpotato.core.logger import CPLog
+from synchronousdeluge import DelugeClient
+
+
+log = CPLog(__name__)
+
+autoload = 'Deluge'
+
+
+class Deluge(DownloaderBase):
+
+ protocol = ['torrent', 'torrent_magnet']
+ log = CPLog(__name__)
+ drpc = None
+
+ def connect(self, reconnect = False):
+ """ Connect to the delugeRPC, re-use connection when already available
+ :param reconnect: force reconnect
+ :return: DelugeRPC instance
+ """
+
+ # Load host from config and split out port.
+ host = cleanHost(self.conf('host'), protocol = False).split(':')
+
+ # Force host assignment
+ if len(host) == 1:
+ host.append(80)
+
+ if not isInt(host[1]):
+ log.error('Config properties are not filled in correctly, port is missing.')
+ return False
+
+ if not self.drpc or reconnect:
+ self.drpc = DelugeRPC(host[0], port = host[1], username = self.conf('username'), password = self.conf('password'))
+
+ return self.drpc
+
+ def download(self, data = None, media = None, filedata = None):
+ """ Send a torrent/nzb file to the downloader
+
+ :param data: dict returned from provider
+ Contains the release information
+ :param media: media dict with information
+ Used for creating the filename when possible
+ :param filedata: downloaded torrent/nzb filedata
+ The file gets downloaded in the searcher and send to this function
+ This is done to have failed checking before using the downloader, so the downloader
+ doesn't need to worry about that
+ :return: boolean
+ One faile returns false, but the downloaded should log his own errors
+ """
+
+ if not media: media = {}
+ if not data: data = {}
+
+ log.info('Sending "%s" (%s) to Deluge.', (data.get('name'), data.get('protocol')))
+
+ if not self.connect():
+ return False
+
+ if not filedata and data.get('protocol') == 'torrent':
+ log.error('Failed sending torrent, no data')
+ return False
+
+ # Set parameters for Deluge
+ options = {
+ 'add_paused': self.conf('paused', default = 0),
+ 'label': self.conf('label')
+ }
+
+ if self.conf('directory'):
+ if os.path.isdir(self.conf('directory')):
+ options['download_location'] = self.conf('directory')
+ else:
+ log.error('Download directory from Deluge settings: %s doesn\'t exist', self.conf('directory'))
+
+ if self.conf('completed_directory'):
+ if os.path.isdir(self.conf('completed_directory')):
+ options['move_completed'] = 1
+ options['move_completed_path'] = self.conf('completed_directory')
+ else:
+ log.error('Download directory from Deluge settings: %s doesn\'t exist', self.conf('directory'))
+
+ if data.get('seed_ratio'):
+ options['stop_at_ratio'] = 1
+ options['stop_ratio'] = tryFloat(data.get('seed_ratio'))
+
+# Deluge only has seed time as a global option. Might be added in
+# in a future API release.
+# if data.get('seed_time'):
+
+ # Send request to Deluge
+ if data.get('protocol') == 'torrent_magnet':
+ remote_torrent = self.drpc.add_torrent_magnet(data.get('url'), options)
+ else:
+ filename = self.createFileName(data, filedata, media)
+ remote_torrent = self.drpc.add_torrent_file(filename, filedata, options)
+
+ if not remote_torrent:
+ log.error('Failed sending torrent to Deluge')
+ return False
+
+ log.info('Torrent sent to Deluge successfully.')
+ return self.downloadReturnId(remote_torrent)
+
+ def test(self):
+ """ Check if connection works
+ :return: bool
+ """
+ if self.connect(True) and self.drpc.test():
+ return True
+ return False
+
+ def getAllDownloadStatus(self, ids):
+ """ Get status of all active downloads
+
+ :param ids: list of (mixed) downloader ids
+ Used to match the releases for this downloader as there could be
+ other downloaders active that it should ignore
+ :return: list of releases
+ """
+
+ log.debug('Checking Deluge download status.')
+
+ if not self.connect():
+ return []
+
+ release_downloads = ReleaseDownloadList(self)
+
+ queue = self.drpc.get_alltorrents(ids)
+
+ if not queue:
+ log.debug('Nothing in queue or error')
+ return []
+
+ for torrent_id in queue:
+ torrent = queue[torrent_id]
+
+ if not 'hash' in torrent:
+ # When given a list of ids, deluge will return an empty item for a non-existant torrent.
+ continue
+
+ log.debug('name=%s / id=%s / save_path=%s / move_on_completed=%s / move_completed_path=%s / hash=%s / progress=%s / state=%s / eta=%s / ratio=%s / stop_ratio=%s / is_seed=%s / is_finished=%s / paused=%s', (torrent['name'], torrent['hash'], torrent['save_path'], torrent['move_on_completed'], torrent['move_completed_path'], torrent['hash'], torrent['progress'], torrent['state'], torrent['eta'], torrent['ratio'], torrent['stop_ratio'], torrent['is_seed'], torrent['is_finished'], torrent['paused']))
+
+ # Deluge has no easy way to work out if a torrent is stalled or failing.
+ #status = 'failed'
+ status = 'busy'
+ # If an user opts to seed a torrent forever (usually associated to private trackers usage), stop_ratio will be 0 or -1 (depending on Deluge version).
+ # In this scenario the status of the torrent would never change from BUSY to SEEDING.
+ # The last check takes care of this case.
+ if torrent['is_seed'] and ((tryFloat(torrent['ratio']) < tryFloat(torrent['stop_ratio'])) or (tryFloat(torrent['stop_ratio']) < 0)):
+ # We have torrent['seeding_time'] to work out what the seeding time is, but we do not
+ # have access to the downloader seed_time, as with deluge we have no way to pass it
+ # when the torrent is added. So Deluge will only look at the ratio.
+ # See above comment in download().
+ status = 'seeding'
+ elif torrent['is_seed'] and torrent['is_finished'] and torrent['paused'] and torrent['state'] == 'Paused':
+ status = 'completed'
+
+ download_dir = sp(torrent['save_path'])
+ if torrent['move_on_completed']:
+ download_dir = torrent['move_completed_path']
+
+ torrent_files = []
+ for file_item in torrent['files']:
+ torrent_files.append(sp(os.path.join(download_dir, file_item['path'])))
+
+ release_downloads.append({
+ 'id': torrent['hash'],
+ 'name': torrent['name'],
+ 'status': status,
+ 'original_status': torrent['state'],
+ 'seed_ratio': torrent['ratio'],
+ 'timeleft': str(timedelta(seconds = torrent['eta'])),
+ 'folder': sp(download_dir if len(torrent_files) == 1 else os.path.join(download_dir, torrent['name'])),
+ 'files': torrent_files,
+ })
+
+ return release_downloads
+
+ def pause(self, release_download, pause = True):
+ if pause:
+ return self.drpc.pause_torrent([release_download['id']])
+ else:
+ return self.drpc.resume_torrent([release_download['id']])
+
+ def removeFailed(self, release_download):
+ log.info('%s failed downloading, deleting...', release_download['name'])
+ return self.drpc.remove_torrent(release_download['id'], True)
+
+ def processComplete(self, release_download, delete_files = False):
+ log.debug('Requesting Deluge to remove the torrent %s%s.', (release_download['name'], ' and cleanup the downloaded files' if delete_files else ''))
+ return self.drpc.remove_torrent(release_download['id'], remove_local_data = delete_files)
+
+
+class DelugeRPC(object):
+
+ host = 'localhost'
+ port = 58846
+ username = None
+ password = None
+ client = None
+
+ def __init__(self, host = 'localhost', port = 58846, username = None, password = None):
+ super(DelugeRPC, self).__init__()
+
+ self.host = host
+ self.port = port
+ self.username = username
+ self.password = password
+
+ def connect(self):
+ self.client = DelugeClient()
+ self.client.connect(self.host, int(self.port), self.username, self.password)
+
+ def test(self):
+ try:
+ self.connect()
+ except:
+ return False
+ return True
+
+ def add_torrent_magnet(self, torrent, options):
+ torrent_id = False
+ try:
+ self.connect()
+ torrent_id = self.client.core.add_torrent_magnet(torrent, options).get()
+ if not torrent_id:
+ torrent_id = self._check_torrent(True, torrent)
+
+ if torrent_id and options['label']:
+ self.client.label.set_torrent(torrent_id, options['label']).get()
+ except Exception as err:
+ log.error('Failed to add torrent magnet %s: %s %s', (torrent, err, traceback.format_exc()))
+ finally:
+ if self.client:
+ self.disconnect()
+
+ return torrent_id
+
+ def add_torrent_file(self, filename, torrent, options):
+ torrent_id = False
+ try:
+ self.connect()
+ torrent_id = self.client.core.add_torrent_file(filename, b64encode(torrent), options).get()
+ if not torrent_id:
+ torrent_id = self._check_torrent(False, torrent)
+
+ if torrent_id and options['label']:
+ self.client.label.set_torrent(torrent_id, options['label']).get()
+ except Exception as err:
+ log.error('Failed to add torrent file %s: %s %s', (filename, err, traceback.format_exc()))
+ finally:
+ if self.client:
+ self.disconnect()
+
+ return torrent_id
+
+ def get_alltorrents(self, ids):
+ ret = False
+ try:
+ self.connect()
+ ret = self.client.core.get_torrents_status({'id': ids}, ('name', 'hash', 'save_path', 'move_completed_path', 'progress', 'state', 'eta', 'ratio', 'stop_ratio', 'is_seed', 'is_finished', 'paused', 'move_on_completed', 'files')).get()
+ except Exception as err:
+ log.error('Failed to get all torrents: %s %s', (err, traceback.format_exc()))
+ finally:
+ if self.client:
+ self.disconnect()
+ return ret
+
+ def pause_torrent(self, torrent_ids):
+ try:
+ self.connect()
+ self.client.core.pause_torrent(torrent_ids).get()
+ except Exception as err:
+ log.error('Failed to pause torrent: %s %s', (err, traceback.format_exc()))
+ finally:
+ if self.client:
+ self.disconnect()
+
+ def resume_torrent(self, torrent_ids):
+ try:
+ self.connect()
+ self.client.core.resume_torrent(torrent_ids).get()
+ except Exception as err:
+ log.error('Failed to resume torrent: %s %s', (err, traceback.format_exc()))
+ finally:
+ if self.client:
+ self.disconnect()
+
+ def remove_torrent(self, torrent_id, remove_local_data):
+ ret = False
+ try:
+ self.connect()
+ ret = self.client.core.remove_torrent(torrent_id, remove_local_data).get()
+ except Exception as err:
+ log.error('Failed to remove torrent: %s %s', (err, traceback.format_exc()))
+ finally:
+ if self.client:
+ self.disconnect()
+ return ret
+
+ def disconnect(self):
+ self.client.disconnect()
+
+ def _check_torrent(self, magnet, torrent):
+ # Torrent not added, check if it already existed.
+ if magnet:
+ torrent_hash = re.findall('urn:btih:([\w]{32,40})', torrent)[0]
+ else:
+ info = bdecode(torrent)["info"]
+ torrent_hash = sha1(benc(info)).hexdigest()
+
+ # Convert base 32 to hex
+ if len(torrent_hash) == 32:
+ torrent_hash = b16encode(b32decode(torrent_hash))
+
+ torrent_hash = torrent_hash.lower()
+ torrent_check = self.client.core.get_torrent_status(torrent_hash, {}).get()
+ if torrent_check['hash']:
+ return torrent_hash
+
+ return False
+
+
+config = [{
+ 'name': 'deluge',
+ 'groups': [
+ {
+ 'tab': 'downloaders',
+ 'list': 'download_providers',
+ 'name': 'deluge',
+ 'label': 'Deluge',
+ 'description': 'Use Deluge to download torrents.',
+ 'wizard': True,
+ 'options': [
+ {
+ 'name': 'enabled',
+ 'default': 0,
+ 'type': 'enabler',
+ 'radio_group': 'torrent',
+ },
+ {
+ 'name': 'host',
+ 'default': 'localhost:58846',
+ 'description': 'Hostname with port. Usually localhost:58846 ',
+ },
+ {
+ 'name': 'username',
+ },
+ {
+ 'name': 'password',
+ 'type': 'password',
+ },
+ {
+ 'name': 'directory',
+ 'type': 'directory',
+ 'description': 'Download to this directory. Keep empty for default Deluge download directory.',
+ },
+ {
+ 'name': 'completed_directory',
+ 'type': 'directory',
+ 'description': 'Move completed torrent to this directory. Keep empty for default Deluge options.',
+ 'advanced': True,
+ },
+ {
+ 'name': 'label',
+ 'description': 'Label to add to torrents in the Deluge UI.',
+ },
+ {
+ 'name': 'remove_complete',
+ 'label': 'Remove torrent',
+ 'type': 'bool',
+ 'default': True,
+ 'advanced': True,
+ 'description': 'Remove the torrent from Deluge after it has finished seeding.',
+ },
+ {
+ 'name': 'delete_files',
+ 'label': 'Remove files',
+ 'default': True,
+ 'type': 'bool',
+ 'advanced': True,
+ 'description': 'Also remove the leftover files.',
+ },
+ {
+ 'name': 'paused',
+ 'type': 'bool',
+ 'advanced': True,
+ 'default': False,
+ 'description': 'Add the torrent paused.',
+ },
+ {
+ 'name': 'manual',
+ 'default': 0,
+ 'type': 'bool',
+ 'advanced': True,
+ 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.',
+ },
+ {
+ 'name': 'delete_failed',
+ 'default': True,
+ 'advanced': True,
+ 'type': 'bool',
+ 'description': 'Delete a release after the download has failed.',
+ },
+ ],
+ }
+ ],
+}]
diff --git a/couchpotato/core/downloaders/hadouken.py b/couchpotato/core/downloaders/hadouken.py
new file mode 100644
index 0000000000..f96a374e10
--- /dev/null
+++ b/couchpotato/core/downloaders/hadouken.py
@@ -0,0 +1,590 @@
+from base64 import b16encode, b32decode, b64encode
+from distutils.version import LooseVersion
+from hashlib import sha1
+import httplib
+import json
+import os
+import re
+import urllib2
+
+from couchpotato.core._base.downloader.main import DownloaderBase, ReleaseDownloadList
+from couchpotato.core.helpers.encoding import isInt, sp
+from couchpotato.core.helpers.variable import cleanHost
+from couchpotato.core.logger import CPLog
+from bencode import bencode as benc, bdecode
+
+
+log = CPLog(__name__)
+
+autoload = 'Hadouken'
+
+
+class Hadouken(DownloaderBase):
+ protocol = ['torrent', 'torrent_magnet']
+ hadouken_api = None
+
+ def connect(self):
+ # Load host from config and split out port.
+ host = cleanHost(self.conf('host'), protocol = False).split(':')
+
+ if not isInt(host[1]):
+ log.error('Config properties are not filled in correctly, port is missing.')
+ return False
+
+ # This is where v4 and v5 begin to differ
+ if(self.conf('version') == 'v4'):
+ if not self.conf('api_key'):
+ log.error('Config properties are not filled in correctly, API key is missing.')
+ return False
+
+ url = 'http://' + str(host[0]) + ':' + str(host[1]) + '/jsonrpc'
+ client = JsonRpcClient(url, 'Token ' + self.conf('api_key'))
+ self.hadouken_api = HadoukenAPIv4(client)
+
+ return True
+ else:
+ auth_type = self.conf('auth_type')
+ header = None
+
+ if auth_type == 'api_key':
+ header = 'Token ' + self.conf('api_key')
+ elif auth_type == 'user_pass':
+ header = 'Basic ' + b64encode(self.conf('auth_user') + ':' + self.conf('auth_pass'))
+
+ url = 'http://' + str(host[0]) + ':' + str(host[1]) + '/api'
+ client = JsonRpcClient(url, header)
+ self.hadouken_api = HadoukenAPIv5(client)
+
+ return True
+
+ return False
+
+ def download(self, data = None, media = None, filedata = None):
+ """ Send a torrent/nzb file to the downloader
+
+ :param data: dict returned from provider
+ Contains the release information
+ :param media: media dict with information
+ Used for creating the filename when possible
+ :param filedata: downloaded torrent/nzb filedata
+ The file gets downloaded in the searcher and send to this function
+ This is done to have failed checking before using the downloader, so the downloader
+ doesn't need to worry about that
+ :return: boolean
+ One faile returns false, but the downloaded should log his own errors
+ """
+
+ if not media: media = {}
+ if not data: data = {}
+
+ log.debug("Sending '%s' (%s) to Hadouken.", (data.get('name'), data.get('protocol')))
+
+ if not self.connect():
+ return False
+
+ torrent_params = {}
+
+ if self.conf('label'):
+ torrent_params['label'] = self.conf('label')
+ # Set the tags array since that is what v5 expects.
+ torrent_params['tags'] = [self.conf('label')]
+
+ torrent_filename = self.createFileName(data, filedata, media)
+
+ if data.get('protocol') == 'torrent_magnet':
+ torrent_hash = re.findall('urn:btih:([\w]{32,40})', data.get('url'))[0].upper()
+ torrent_params['trackers'] = self.torrent_trackers
+ torrent_params['name'] = torrent_filename
+ else:
+ info = bdecode(filedata)['info']
+ torrent_hash = sha1(benc(info)).hexdigest().upper()
+
+ # Convert base 32 to hex
+ if len(torrent_hash) == 32:
+ torrent_hash = b16encode(b32decode(torrent_hash))
+
+ # Send request to Hadouken
+ if data.get('protocol') == 'torrent_magnet':
+ self.hadouken_api.add_magnet_link(data.get('url'), torrent_params)
+ else:
+ self.hadouken_api.add_file(filedata, torrent_params)
+
+ return self.downloadReturnId(torrent_hash)
+
+ def test(self):
+ """ Tests the given host:port and API key """
+
+ if not self.connect():
+ return False
+
+ version = self.hadouken_api.get_version()
+
+ if not version:
+ log.error('Could not get Hadouken version.')
+ return False
+
+ # The minimum required version of Hadouken is 4.5.6.
+ if LooseVersion(version) >= LooseVersion('4.5.6'):
+ return True
+
+ log.error('Hadouken v4.5.6 (or newer) required. Found v%s', version)
+ return False
+
+ def getAllDownloadStatus(self, ids):
+ """ Get status of all active downloads
+
+ :param ids: list of (mixed) downloader ids
+ Used to match the releases for this downloader as there could be
+ other downloaders active that it should ignore
+ :return: list of releases
+ """
+
+ log.debug('Checking Hadouken download status.')
+
+ if not self.connect():
+ return []
+
+ release_downloads = ReleaseDownloadList(self)
+ queue = self.hadouken_api.get_by_hash_list(ids)
+
+ if not queue:
+ return []
+
+ for torrent in queue:
+ if torrent is None:
+ continue
+
+ torrent_filelist = self.hadouken_api.get_files_by_hash(torrent.info_hash)
+ torrent_files = []
+
+ for file_item in torrent_filelist:
+ torrent_files.append(sp(os.path.join(torrent.save_path, file_item)))
+
+ release_downloads.append({
+ 'id': torrent.info_hash.upper(),
+ 'name': torrent.name,
+ 'status': torrent.get_status(),
+ 'seed_ratio': torrent.get_seed_ratio(),
+ 'original_status': torrent.state,
+ 'timeleft': -1,
+ 'folder': sp(torrent.save_path if len(torrent_files == 1) else os.path.join(torrent.save_path, torrent.name)),
+ 'files': torrent_files
+ })
+
+ return release_downloads
+
+ def pause(self, release_download, pause = True):
+ """ Pauses or resumes the torrent specified by the ID field
+ in release_download.
+
+ Keyword arguments:
+ release_download -- The CouchPotato release_download to pause/resume.
+ pause -- Boolean indicating whether to pause or resume.
+ """
+
+ if not self.connect():
+ return False
+
+ return self.hadouken_api.pause(release_download['id'], pause)
+
+ def removeFailed(self, release_download):
+ """ Removes a failed torrent and also remove the data associated with it.
+
+ Keyword arguments:
+ release_download -- The CouchPotato release_download to remove.
+ """
+
+ log.info('%s failed downloading, deleting...', release_download['name'])
+
+ if not self.connect():
+ return False
+
+ return self.hadouken_api.remove(release_download['id'], remove_data = True)
+
+ def processComplete(self, release_download, delete_files = False):
+ """ Removes the completed torrent from Hadouken and optionally removes the data
+ associated with it.
+
+ Keyword arguments:
+ release_download -- The CouchPotato release_download to remove.
+ delete_files: Boolean indicating whether to remove the associated data.
+ """
+
+ log.debug('Requesting Hadouken to remove the torrent %s%s.',
+ (release_download['name'], ' and cleanup the downloaded files' if delete_files else ''))
+
+ if not self.connect():
+ return False
+
+ return self.hadouken_api.remove(release_download['id'], remove_data = delete_files)
+
+
+class JsonRpcClient(object):
+ def __init__(self, url, auth_header = None):
+ self.url = url
+ self.requestId = 0
+
+ self.opener = urllib2.build_opener()
+ self.opener.addheaders = [
+ ('User-Agent', 'couchpotato-hadouken-client/1.0'),
+ ('Accept', 'application/json'),
+ ('Content-Type', 'application/json')
+ ]
+
+ if auth_header:
+ self.opener.addheaders.append(('Authorization', auth_header))
+
+ def invoke(self, method, params):
+ self.requestId += 1
+
+ data = {
+ 'jsonrpc': '2.0',
+ 'id': self.requestId,
+ 'method': method,
+ 'params': params
+ }
+
+ request = urllib2.Request(self.url, data = json.dumps(data))
+
+ try:
+ f = self.opener.open(request)
+ response = f.read()
+ f.close()
+
+ obj = json.loads(response)
+
+ if 'error' in obj.keys():
+ log.error('JSONRPC error, %s: %s', (obj['error']['code'], obj['error']['message']))
+ return False
+
+ if 'result' in obj.keys():
+ return obj['result']
+
+ return True
+ except httplib.InvalidURL as err:
+ log.error('Invalid Hadouken host, check your config %s', err)
+ except urllib2.HTTPError as err:
+ if err.code == 401:
+ log.error('Could not authenticate, check your config')
+ else:
+ log.error('Hadouken HTTPError: %s', err)
+ except urllib2.URLError as err:
+ log.error('Unable to connect to Hadouken %s', err)
+
+ return False
+
+
+class HadoukenAPI(object):
+ def __init__(self, rpc_client):
+ self.rpc = rpc_client
+
+ if not rpc_client:
+ log.error('No JSONRPC client specified.')
+
+ def add_file(self, data, params):
+ """ Add a file to Hadouken with the specified parameters.
+
+ Keyword arguments:
+ filedata -- The binary torrent data.
+ torrent_params -- Additional parameters for the file.
+ """
+ pass
+
+ def add_magnet_link(self, link, params):
+ """ Add a magnet link to Hadouken with the specified parameters.
+
+ Keyword arguments:
+ magnetLink -- The magnet link to send.
+ torrent_params -- Additional parameters for the magnet link.
+ """
+ pass
+
+ def get_by_hash_list(self, infoHashList):
+ """ Gets a list of torrents filtered by the given info hash list.
+
+ Keyword arguments:
+ infoHashList -- A list of info hashes.
+ """
+ pass
+
+ def get_files_by_hash(self, infoHash):
+ """ Gets a list of files for the torrent identified by the
+ given info hash.
+
+ Keyword arguments:
+ infoHash -- The info hash of the torrent to return files for.
+ """
+ pass
+
+ def get_version(self):
+ """ Gets the version, commitish and build date of Hadouken. """
+ pass
+
+ def pause(self, infoHash, pause):
+ """ Pauses/unpauses the torrent identified by the given info hash.
+
+ Keyword arguments:
+ infoHash -- The info hash of the torrent to operate on.
+ pause -- If true, pauses the torrent. Otherwise resumes.
+ """
+ pass
+
+ def remove(self, infoHash, remove_data = False):
+ """ Removes the torrent identified by the given info hash and
+ optionally removes the data as well.
+
+ Keyword arguments:
+ infoHash -- The info hash of the torrent to remove.
+ remove_data -- If true, removes the data associated with the torrent.
+ """
+ pass
+
+
+class TorrentItem(object):
+ @property
+ def info_hash(self):
+ pass
+
+ @property
+ def save_path(self):
+ pass
+
+ @property
+ def name(self):
+ pass
+
+ @property
+ def state(self):
+ pass
+
+ def get_status(self):
+ """ Returns the CouchPotato status for a given torrent."""
+ pass
+
+ def get_seed_ratio(self):
+ """ Returns the seed ratio for a given torrent."""
+ pass
+
+
+class TorrentItemv5(TorrentItem):
+ def __init__(self, obj):
+ self.obj = obj
+
+ def info_hash(self):
+ return self.obj[0]
+
+ def save_path(self):
+ return self.obj[26]
+
+ def name(self):
+ return self.obj[2]
+
+ def state(self):
+ return self.obj[1]
+
+ def get_status(self):
+ if self.obj[1] == 32:
+ return 'completed'
+
+ if self.obj[1] == 1:
+ return 'seeding'
+
+ return 'busy'
+
+ def get_seed_ratio(self):
+ up = self.obj[6]
+ down = self.obj[5]
+
+ if up > 0 and down > 0:
+ return up / down
+
+ return 0
+
+
+class HadoukenAPIv5(HadoukenAPI):
+
+ def add_file(self, data, params):
+ return self.rpc.invoke('webui.addTorrent', ['file', b64encode(data), params])
+
+ def add_magnet_link(self, link, params):
+ return self.rpc.invoke('webui.addTorrent', ['url', link, params])
+
+ def get_by_hash_list(self, infoHashList):
+ torrents = self.rpc.invoke('webui.list', None)
+ result = []
+
+ for torrent in torrents['torrents']:
+ if torrent[0] in infoHashList:
+ result.append(TorrentItemv5(torrent))
+
+ return result
+
+ def get_files_by_hash(self, infoHash):
+ files = self.rpc.invoke('webui.getFiles', [infoHash])
+ result = []
+
+ for file in files['files'][1]:
+ result.append(file[0])
+
+ return result
+
+ def get_version(self):
+ result = self.rpc.invoke('core.getSystemInfo', None)
+
+ if not result:
+ return False
+
+ return result['versions']['hadouken']
+
+ def pause(self, infoHash, pause):
+ if pause:
+ return self.rpc.invoke('webui.perform', ['pause', infoHash])
+
+ return self.rpc.invoke('webui.perform', ['resume', infoHash])
+
+ def remove(self, infoHash, remove_data=False):
+ if remove_data:
+ return self.rpc.invoke('webui.perform', ['removedata', infoHash])
+
+ return self.rpc.invoke('webui.perform', ['remove', infoHash])
+
+
+class TorrentItemv4(TorrentItem):
+ def __init__(self, obj):
+ self.obj = obj
+
+ def info_hash(self):
+ return self.obj['InfoHash']
+
+ def save_path(self):
+ return self.obj['SavePath']
+
+ def name(self):
+ return self.obj['Name']
+
+ def state(self):
+ return self.obj['State']
+
+ def get_status(self):
+ if self.obj['IsSeeding'] and self.obj['IsFinished'] and self.obj['Paused']:
+ return 'completed'
+
+ if self.obj['IsSeeding']:
+ return 'seeding'
+
+ return 'busy'
+
+ def get_seed_ratio(self):
+ up = self.obj['TotalUploadedBytes']
+ down = self.obj['TotalDownloadedBytes']
+
+ if up > 0 and down > 0:
+ return up / down
+
+ return 0
+
+
+class HadoukenAPIv4(object):
+ def add_file(self, data, params):
+ return self.rpc.invoke('torrents.addFile', [b64encode(data), params])
+
+ def add_magnet_link(self, link, params):
+ return self.rpc.invoke('torrents.addUrl', [link, params])
+
+ def get_by_hash_list(self, infoHashList):
+ torrents = self.rpc.invoke('torrents.getByInfoHashList', [infoHashList])
+ result = []
+
+ for torrent in torrents:
+ result.append(TorrentItemv4(torrent))
+
+ return result
+
+ def get_files_by_hash(self, infoHash):
+ files = self.rpc.invoke('torrents.getFiles', [infoHash])
+ result = []
+
+ for file in files:
+ result.append(file['Path'])
+
+ return result
+
+ def get_version(self):
+ result = self.rpc.invoke('core.getVersion', None)
+
+ if not result:
+ return False
+
+ return result['Version']
+
+ def pause(self, infoHash, pause):
+ if pause:
+ return self.rpc.invoke('torrents.pause', [infoHash])
+
+ return self.rpc.invoke('torrents.resume', [infoHash])
+
+ def remove(self, infoHash, remove_data = False):
+ return self.rpc.invoke('torrents.remove', [infoHash, remove_data])
+
+
+config = [{
+ 'name': 'hadouken',
+ 'groups': [
+ {
+ 'tab': 'downloaders',
+ 'list': 'download_providers',
+ 'name': 'hadouken',
+ 'label': 'Hadouken',
+ 'description': 'Use Hadouken (>= v4.5.6) to download torrents.',
+ 'wizard': True,
+ 'options': [
+ {
+ 'name': 'enabled',
+ 'default': 0,
+ 'type': 'enabler',
+ 'radio_group': 'torrent'
+ },
+ {
+ 'name': 'version',
+ 'label': 'Version',
+ 'type': 'dropdown',
+ 'default': 'v4',
+ 'values': [('v4.x', 'v4'), ('v5.x', 'v5')],
+ 'description': 'Hadouken version.',
+ },
+ {
+ 'name': 'host',
+ 'default': 'localhost:7890'
+ },
+ {
+ 'name': 'auth_type',
+ 'label': 'Auth. type',
+ 'type': 'dropdown',
+ 'default': 'api_key',
+ 'values': [('None', 'none'), ('API key/Token', 'api_key'), ('Username/Password', 'user_pass')],
+ 'description': 'Type of authentication',
+ },
+ {
+ 'name': 'api_key',
+ 'label': 'API key (v4)/Token (v5)',
+ 'type': 'password'
+ },
+ {
+ 'name': 'auth_user',
+ 'label': 'Username',
+ 'description': '(only for v5)'
+ },
+ {
+ 'name': 'auth_pass',
+ 'label': 'Password',
+ 'type': 'password',
+ 'description': '(only for v5)'
+ },
+ {
+ 'name': 'label',
+ 'description': 'Label to add torrent as.'
+ }
+ ]
+ }
+ ]
+}]
diff --git a/couchpotato/core/downloaders/nzbget.py b/couchpotato/core/downloaders/nzbget.py
new file mode 100644
index 0000000000..35a71850aa
--- /dev/null
+++ b/couchpotato/core/downloaders/nzbget.py
@@ -0,0 +1,318 @@
+from base64 import standard_b64encode
+from datetime import timedelta
+import re
+import shutil
+import socket
+import traceback
+import xmlrpclib
+
+from couchpotato.core._base.downloader.main import DownloaderBase, ReleaseDownloadList
+from couchpotato.core.helpers.encoding import ss, sp
+from couchpotato.core.helpers.variable import tryInt, md5, cleanHost
+from couchpotato.core.logger import CPLog
+
+
+log = CPLog(__name__)
+
+autoload = 'NZBGet'
+
+
+class NZBGet(DownloaderBase):
+
+ protocol = ['nzb']
+ rpc = 'xmlrpc'
+
+ def download(self, data = None, media = None, filedata = None):
+ """ Send a torrent/nzb file to the downloader
+
+ :param data: dict returned from provider
+ Contains the release information
+ :param media: media dict with information
+ Used for creating the filename when possible
+ :param filedata: downloaded torrent/nzb filedata
+ The file gets downloaded in the searcher and send to this function
+ This is done to have failed checking before using the downloader, so the downloader
+ doesn't need to worry about that
+ :return: boolean
+ One faile returns false, but the downloaded should log his own errors
+ """
+
+ if not media: media = {}
+ if not data: data = {}
+
+ if not filedata:
+ log.error('Unable to get NZB file: %s', traceback.format_exc())
+ return False
+
+ log.info('Sending "%s" to NZBGet.', data.get('name'))
+
+ nzb_name = ss('%s.nzb' % self.createNzbName(data, media))
+
+ rpc = self.getRPC()
+
+ try:
+ if rpc.writelog('INFO', 'CouchPotato connected to drop off %s.' % nzb_name):
+ log.debug('Successfully connected to NZBGet')
+ else:
+ log.info('Successfully connected to NZBGet, but unable to send a message')
+ except socket.error:
+ log.error('NZBGet is not responding. Please ensure that NZBGet is running and host setting is correct.')
+ return False
+ except xmlrpclib.ProtocolError as e:
+ if e.errcode == 401:
+ log.error('Password is incorrect.')
+ else:
+ log.error('Protocol Error: %s', e)
+ return False
+
+ if re.search(r"^0", rpc.version()):
+ xml_response = rpc.append(nzb_name, self.conf('category'), False, standard_b64encode(filedata.strip()))
+ else:
+ xml_response = rpc.append(nzb_name, self.conf('category'), tryInt(self.conf('priority')), False, standard_b64encode(filedata.strip()))
+
+ if xml_response:
+ log.info('NZB sent successfully to NZBGet')
+ nzb_id = md5(data['url']) # about as unique as they come ;)
+ couchpotato_id = "couchpotato=" + nzb_id
+ groups = rpc.listgroups()
+ file_id = [item['LastID'] for item in groups if item['NZBFilename'] == nzb_name]
+ confirmed = rpc.editqueue("GroupSetParameter", 0, couchpotato_id, file_id)
+ if confirmed:
+ log.debug('couchpotato parameter set in nzbget download')
+ return self.downloadReturnId(nzb_id)
+ else:
+ log.error('NZBGet could not add %s to the queue.', nzb_name)
+ return False
+
+ def test(self):
+ """ Check if connection works
+ :return: bool
+ """
+
+ rpc = self.getRPC()
+
+ try:
+ if rpc.writelog('INFO', 'CouchPotato connected to test connection'):
+ log.debug('Successfully connected to NZBGet')
+ else:
+ log.info('Successfully connected to NZBGet, but unable to send a message')
+ except socket.error:
+ log.error('NZBGet is not responding. Please ensure that NZBGet is running and host setting is correct.')
+ return False
+ except xmlrpclib.ProtocolError as e:
+ if e.errcode == 401:
+ log.error('Password is incorrect.')
+ else:
+ log.error('Protocol Error: %s', e)
+ return False
+
+ return True
+
+ def getAllDownloadStatus(self, ids):
+ """ Get status of all active downloads
+
+ :param ids: list of (mixed) downloader ids
+ Used to match the releases for this downloader as there could be
+ other downloaders active that it should ignore
+ :return: list of releases
+ """
+
+ log.debug('Checking NZBGet download status.')
+
+ rpc = self.getRPC()
+
+ try:
+ if rpc.writelog('DETAIL', 'CouchPotato connected to check status'):
+ log.debug('Successfully connected to NZBGet')
+ else:
+ log.info('Successfully connected to NZBGet, but unable to send a message')
+ except socket.error:
+ log.error('NZBGet is not responding. Please ensure that NZBGet is running and host setting is correct.')
+ return []
+ except xmlrpclib.ProtocolError as e:
+ if e.errcode == 401:
+ log.error('Password is incorrect.')
+ else:
+ log.error('Protocol Error: %s', e)
+ return []
+
+ # Get NZBGet data
+ try:
+ status = rpc.status()
+ groups = rpc.listgroups()
+ queue = rpc.postqueue(0)
+ history = rpc.history()
+ except:
+ log.error('Failed getting data: %s', traceback.format_exc(1))
+ return []
+
+ release_downloads = ReleaseDownloadList(self)
+
+ for nzb in groups:
+ try:
+ nzb_id = [param['Value'] for param in nzb['Parameters'] if param['Name'] == 'couchpotato'][0]
+ except:
+ nzb_id = nzb['NZBID']
+
+ if nzb_id in ids:
+ log.debug('Found %s in NZBGet download queue', nzb['NZBFilename'])
+ timeleft = -1
+ try:
+ if nzb['ActiveDownloads'] > 0 and nzb['DownloadRate'] > 0 and not (status['DownloadPaused'] or status['Download2Paused']):
+ timeleft = str(timedelta(seconds = nzb['RemainingSizeMB'] / status['DownloadRate'] * 2 ^ 20))
+ except:
+ pass
+
+ release_downloads.append({
+ 'id': nzb_id,
+ 'name': nzb['NZBFilename'],
+ 'original_status': 'DOWNLOADING' if nzb['ActiveDownloads'] > 0 else 'QUEUED',
+ # Seems to have no native API function for time left. This will return the time left after NZBGet started downloading this item
+ 'timeleft': timeleft,
+ })
+
+ for nzb in queue: # 'Parameters' is not passed in rpc.postqueue
+ if nzb['NZBID'] in ids:
+ log.debug('Found %s in NZBGet postprocessing queue', nzb['NZBFilename'])
+ release_downloads.append({
+ 'id': nzb['NZBID'],
+ 'name': nzb['NZBFilename'],
+ 'original_status': nzb['Stage'],
+ 'timeleft': str(timedelta(seconds = 0)) if not status['PostPaused'] else -1,
+ })
+
+ for nzb in history:
+ try:
+ nzb_id = [param['Value'] for param in nzb['Parameters'] if param['Name'] == 'couchpotato'][0]
+ except:
+ nzb_id = nzb['NZBID']
+
+ if nzb_id in ids:
+ log.debug('Found %s in NZBGet history. TotalStatus: %s, ParStatus: %s, ScriptStatus: %s, Log: %s', (nzb['NZBFilename'] , nzb['Status'], nzb['ParStatus'], nzb['ScriptStatus'] , nzb['Log']))
+ release_downloads.append({
+ 'id': nzb_id,
+ 'name': nzb['NZBFilename'],
+ 'status': 'completed' if 'SUCCESS' in nzb['Status'] else 'failed',
+ 'original_status': nzb['Status'],
+ 'timeleft': str(timedelta(seconds = 0)),
+ 'folder': sp(nzb['DestDir'])
+ })
+
+ return release_downloads
+
+ def removeFailed(self, release_download):
+
+ log.info('%s failed downloading, deleting...', release_download['name'])
+
+ rpc = self.getRPC()
+
+ try:
+ if rpc.writelog('INFO', 'CouchPotato connected to delete some history'):
+ log.debug('Successfully connected to NZBGet')
+ else:
+ log.info('Successfully connected to NZBGet, but unable to send a message')
+ except socket.error:
+ log.error('NZBGet is not responding. Please ensure that NZBGet is running and host setting is correct.')
+ return False
+ except xmlrpclib.ProtocolError as e:
+ if e.errcode == 401:
+ log.error('Password is incorrect.')
+ else:
+ log.error('Protocol Error: %s', e)
+ return False
+
+ try:
+ history = rpc.history()
+ nzb_id = None
+ path = None
+
+ for hist in history:
+ for param in hist['Parameters']:
+ if param['Name'] == 'couchpotato' and param['Value'] == release_download['id']:
+ nzb_id = hist['ID']
+ path = hist['DestDir']
+
+ if nzb_id and path and rpc.editqueue('HistoryDelete', 0, "", [tryInt(nzb_id)]):
+ shutil.rmtree(path, True)
+ except:
+ log.error('Failed deleting: %s', traceback.format_exc(0))
+ return False
+
+ return True
+
+ def getRPC(self):
+ url = cleanHost(host = self.conf('host'), ssl = self.conf('ssl'), username = self.conf('username'), password = self.conf('password')) + self.rpc
+ return xmlrpclib.ServerProxy(url)
+
+
+config = [{
+ 'name': 'nzbget',
+ 'groups': [
+ {
+ 'tab': 'downloaders',
+ 'list': 'download_providers',
+ 'name': 'nzbget',
+ 'label': 'NZBGet',
+ 'description': 'Use NZBGet to download NZBs.',
+ 'wizard': True,
+ 'options': [
+ {
+ 'name': 'enabled',
+ 'default': 0,
+ 'type': 'enabler',
+ 'radio_group': 'nzb',
+ },
+ {
+ 'name': 'host',
+ 'default': 'localhost:6789',
+ 'description': 'Hostname with port. Usually localhost:6789 ',
+ },
+ {
+ 'name': 'ssl',
+ 'default': 0,
+ 'type': 'bool',
+ 'advanced': True,
+ 'description': 'Use HyperText Transfer Protocol Secure, or https ',
+ },
+ {
+ 'name': 'username',
+ 'default': 'nzbget',
+ 'advanced': True,
+ 'description': 'Set a different username to connect. Default: nzbget',
+ },
+ {
+ 'name': 'password',
+ 'type': 'password',
+ 'description': 'Default NZBGet password is tegbzn6789 ',
+ },
+ {
+ 'name': 'category',
+ 'default': 'Movies',
+ 'description': 'The category CP places the nzb in. Like movies or couchpotato ',
+ },
+ {
+ 'name': 'priority',
+ 'advanced': True,
+ 'default': '0',
+ 'type': 'dropdown',
+ 'values': [('Very Low', -100), ('Low', -50), ('Normal', 0), ('High', 50), ('Very High', 100), ('Forced', 900)],
+ 'description': 'Only change this if you are using NZBget 13.0 or higher',
+ },
+ {
+ 'name': 'manual',
+ 'default': 0,
+ 'type': 'bool',
+ 'advanced': True,
+ 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.',
+ },
+ {
+ 'name': 'delete_failed',
+ 'default': True,
+ 'advanced': True,
+ 'type': 'bool',
+ 'description': 'Delete a release after the download has failed.',
+ },
+ ],
+ }
+ ],
+}]
diff --git a/couchpotato/core/downloaders/nzbget/__init__.py b/couchpotato/core/downloaders/nzbget/__init__.py
deleted file mode 100644
index 403a7e7d10..0000000000
--- a/couchpotato/core/downloaders/nzbget/__init__.py
+++ /dev/null
@@ -1,54 +0,0 @@
-from .main import NZBGet
-
-def start():
- return NZBGet()
-
-config = [{
- 'name': 'nzbget',
- 'groups': [
- {
- 'tab': 'downloaders',
- 'list': 'download_providers',
- 'name': 'nzbget',
- 'label': 'NZBGet',
- 'description': 'Use NZBGet to download NZBs.',
- 'options': [
- {
- 'name': 'enabled',
- 'default': 0,
- 'type': 'enabler',
- 'radio_group': 'nzb',
- },
- {
- 'name': 'host',
- 'default': 'localhost:6789',
- 'description': 'Hostname with port. Usually localhost:6789 ',
- },
- {
- 'name': 'password',
- 'type': 'password',
- 'description': 'Default NZBGet password is tegbzn6789 ',
- },
- {
- 'name': 'category',
- 'default': 'Movies',
- 'description': 'The category CP places the nzb in. Like movies or couchpotato ',
- },
- {
- 'name': 'priority',
- 'default': '0',
- 'type': 'dropdown',
- 'values': [('Very Low', -100), ('Low', -50), ('Normal', 0), ('High', 50), ('Very High', 100)],
- 'description': 'Only change this if you are using NZBget 9.0 or higher',
- },
- {
- 'name': 'manual',
- 'default': 0,
- 'type': 'bool',
- 'advanced': True,
- 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.',
- },
- ],
- }
- ],
-}]
diff --git a/couchpotato/core/downloaders/nzbget/main.py b/couchpotato/core/downloaders/nzbget/main.py
deleted file mode 100644
index 82d8a3634e..0000000000
--- a/couchpotato/core/downloaders/nzbget/main.py
+++ /dev/null
@@ -1,56 +0,0 @@
-from base64 import standard_b64encode
-from couchpotato.core.downloaders.base import Downloader
-from couchpotato.core.helpers.encoding import ss
-from couchpotato.core.helpers.variable import tryInt
-from couchpotato.core.logger import CPLog
-import re
-import socket
-import traceback
-import xmlrpclib
-
-log = CPLog(__name__)
-
-class NZBGet(Downloader):
-
- type = ['nzb']
-
- url = 'http://nzbget:%(password)s@%(host)s/xmlrpc'
-
- def download(self, data = {}, movie = {}, filedata = None):
-
- if not filedata:
- log.error('Unable to get NZB file: %s', traceback.format_exc())
- return False
-
- log.info('Sending "%s" to NZBGet.', data.get('name'))
-
- url = self.url % {'host': self.conf('host'), 'password': self.conf('password')}
- nzb_name = ss('%s.nzb' % self.createNzbName(data, movie))
-
- rpc = xmlrpclib.ServerProxy(url)
- try:
- if rpc.writelog('INFO', 'CouchPotato connected to drop off %s.' % nzb_name):
- log.info('Successfully connected to NZBGet')
- else:
- log.info('Successfully connected to NZBGet, but unable to send a message')
- except socket.error:
- log.error('NZBGet is not responding. Please ensure that NZBGet is running and host setting is correct.')
- return False
- except xmlrpclib.ProtocolError, e:
- if e.errcode == 401:
- log.error('Password is incorrect.')
- else:
- log.error('Protocol Error: %s', e)
- return False
-
- if re.search(r"^0", rpc.version()):
- xml_response = rpc.append(nzb_name, self.conf('category'), False, standard_b64encode(filedata.strip()))
- else:
- xml_response = rpc.append(nzb_name, self.conf('category'), tryInt(self.conf('priority')), False, standard_b64encode(filedata.strip()))
-
- if xml_response:
- log.info('NZB sent successfully to NZBGet')
- return True
- else:
- log.error('NZBGet could not add %s to the queue.', nzb_name)
- return False
diff --git a/couchpotato/core/downloaders/nzbvortex.py b/couchpotato/core/downloaders/nzbvortex.py
new file mode 100644
index 0000000000..5b1fc843dc
--- /dev/null
+++ b/couchpotato/core/downloaders/nzbvortex.py
@@ -0,0 +1,245 @@
+from base64 import b64encode
+import os
+from uuid import uuid4
+import hashlib
+import traceback
+
+from requests import HTTPError
+
+from couchpotato.core._base.downloader.main import DownloaderBase, ReleaseDownloadList
+from couchpotato.core.helpers.encoding import tryUrlencode, sp
+from couchpotato.core.helpers.variable import cleanHost
+from couchpotato.core.logger import CPLog
+
+
+log = CPLog(__name__)
+
+autoload = 'NZBVortex'
+
+
+class NZBVortex(DownloaderBase):
+
+ protocol = ['nzb']
+ api_level = None
+ session_id = None
+
+ def download(self, data = None, media = None, filedata = None):
+ """ Send a torrent/nzb file to the downloader
+
+ :param data: dict returned from provider
+ Contains the release information
+ :param media: media dict with information
+ Used for creating the filename when possible
+ :param filedata: downloaded torrent/nzb filedata
+ The file gets downloaded in the searcher and send to this function
+ This is done to have failed checking before using the downloader, so the downloader
+ doesn't need to worry about that
+ :return: boolean
+ One faile returns false, but the downloaded should log his own errors
+ """
+
+ if not media: media = {}
+ if not data: data = {}
+
+ # Send the nzb
+ try:
+ nzb_filename = self.createFileName(data, filedata, media, unique_tag = True)
+ response = self.call('nzb/add', files = {'file': (nzb_filename, filedata, 'application/octet-stream')}, parameters = {
+ 'name': nzb_filename,
+ 'groupname': self.conf('group')
+ })
+
+ if response and response.get('result', '').lower() == 'ok':
+ return self.downloadReturnId(nzb_filename)
+
+ log.error('Something went wrong sending the NZB file. Response: %s', response)
+ return False
+ except:
+ log.error('Something went wrong sending the NZB file: %s', traceback.format_exc())
+ return False
+
+ def test(self):
+ """ Check if connection works
+ :return: bool
+ """
+
+ try:
+ login_result = self.login()
+ except:
+ return False
+
+ return login_result
+
+ def getAllDownloadStatus(self, ids):
+ """ Get status of all active downloads
+
+ :param ids: list of (mixed) downloader ids
+ Used to match the releases for this downloader as there could be
+ other downloaders active that it should ignore
+ :return: list of releases
+ """
+
+ raw_statuses = self.call('nzb')
+
+ release_downloads = ReleaseDownloadList(self)
+ for nzb in raw_statuses.get('nzbs', []):
+ nzb_id = os.path.basename(nzb['nzbFileName'])
+ if nzb_id in ids:
+
+ # Check status
+ status = 'busy'
+ if nzb['state'] == 20:
+ status = 'completed'
+ elif nzb['state'] in [21, 22, 24]:
+ status = 'failed'
+
+ release_downloads.append({
+ 'temp_id': nzb['id'],
+ 'id': nzb_id,
+ 'name': nzb['uiTitle'],
+ 'status': status,
+ 'original_status': nzb['state'],
+ 'timeleft': -1,
+ 'folder': sp(nzb['destinationPath']),
+ })
+
+ return release_downloads
+
+ def removeFailed(self, release_download):
+
+ log.info('%s failed downloading, deleting...', release_download['name'])
+
+ try:
+ self.call('nzb/%s/cancel' % release_download['temp_id'])
+ except:
+ log.error('Failed deleting: %s', traceback.format_exc(0))
+ return False
+
+ return True
+
+ def login(self):
+
+ nonce = self.call('auth/nonce', auth = False).get('authNonce')
+ cnonce = uuid4().hex
+ hashed = b64encode(hashlib.sha256('%s:%s:%s' % (nonce, cnonce, self.conf('api_key'))).digest())
+
+ params = {
+ 'nonce': nonce,
+ 'cnonce': cnonce,
+ 'hash': hashed
+ }
+
+ login_data = self.call('auth/login', parameters = params, auth = False)
+
+ # Save for later
+ if login_data.get('loginResult') == 'successful':
+ self.session_id = login_data.get('sessionID')
+ return True
+
+ log.error('Login failed, please check you api-key')
+ return False
+
+ def call(self, call, parameters = None, is_repeat = False, auth = True, *args, **kwargs):
+
+ # Login first
+ if not parameters: parameters = {}
+ if not self.session_id and auth:
+ self.login()
+
+ # Always add session id to request
+ if self.session_id:
+ parameters['sessionid'] = self.session_id
+
+ params = tryUrlencode(parameters)
+
+ url = cleanHost(self.conf('host')) + 'api/' + call
+
+ try:
+ data = self.getJsonData('%s%s' % (url, '?' + params if params else ''), *args, cache_timeout = 0, show_error = False, **kwargs)
+
+ if data:
+ return data
+ except HTTPError as e:
+ sc = e.response.status_code
+ if sc == 403:
+ # Try login and do again
+ if not is_repeat:
+ self.login()
+ return self.call(call, parameters = parameters, is_repeat = True, **kwargs)
+
+ log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
+ except:
+ log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
+
+ return {}
+
+ def getApiLevel(self):
+
+ if not self.api_level:
+
+ try:
+ data = self.call('app/apilevel', auth = False)
+ self.api_level = float(data.get('apilevel'))
+ except HTTPError as e:
+ sc = e.response.status_code
+ if sc == 403:
+ log.error('This version of NZBVortex isn\'t supported. Please update to 2.8.6 or higher')
+ else:
+ log.error('NZBVortex doesn\'t seem to be running or maybe the remote option isn\'t enabled yet: %s', traceback.format_exc(1))
+
+ return self.api_level
+
+ def isEnabled(self, manual = False, data = None):
+ if not data: data = {}
+ return super(NZBVortex, self).isEnabled(manual, data) and self.getApiLevel()
+
+
+config = [{
+ 'name': 'nzbvortex',
+ 'groups': [
+ {
+ 'tab': 'downloaders',
+ 'list': 'download_providers',
+ 'name': 'nzbvortex',
+ 'label': 'NZBVortex',
+ 'description': 'Use NZBVortex to download NZBs.',
+ 'wizard': True,
+ 'options': [
+ {
+ 'name': 'enabled',
+ 'default': 0,
+ 'type': 'enabler',
+ 'radio_group': 'nzb',
+ },
+ {
+ 'name': 'host',
+ 'default': 'https://localhost:4321',
+ 'description': 'Hostname with port. Usually https://localhost:4321 ',
+ },
+ {
+ 'name': 'api_key',
+ 'label': 'Api Key',
+ },
+ {
+ 'name': 'group',
+ 'label': 'Group',
+ 'description': 'The group CP places the nzb in. Make sure to create it in NZBVortex.',
+ },
+ {
+ 'name': 'manual',
+ 'default': False,
+ 'type': 'bool',
+ 'advanced': True,
+ 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.',
+ },
+ {
+ 'name': 'delete_failed',
+ 'default': True,
+ 'advanced': True,
+ 'type': 'bool',
+ 'description': 'Delete a release after the download has failed.',
+ },
+ ],
+ }
+ ],
+}]
diff --git a/couchpotato/core/downloaders/nzbvortex/__init__.py b/couchpotato/core/downloaders/nzbvortex/__init__.py
deleted file mode 100644
index f1604ea84b..0000000000
--- a/couchpotato/core/downloaders/nzbvortex/__init__.py
+++ /dev/null
@@ -1,47 +0,0 @@
-from .main import NZBVortex
-
-def start():
- return NZBVortex()
-
-config = [{
- 'name': 'nzbvortex',
- 'groups': [
- {
- 'tab': 'downloaders',
- 'list': 'download_providers',
- 'name': 'nzbvortex',
- 'label': 'NZBVortex',
- 'description': 'Use NZBVortex to download NZBs.',
- 'wizard': True,
- 'options': [
- {
- 'name': 'enabled',
- 'default': 0,
- 'type': 'enabler',
- 'radio_group': 'nzb',
- },
- {
- 'name': 'host',
- 'default': 'https://localhost:4321',
- },
- {
- 'name': 'api_key',
- 'label': 'Api Key',
- },
- {
- 'name': 'manual',
- 'default': False,
- 'type': 'bool',
- 'advanced': True,
- 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.',
- },
- {
- 'name': 'delete_failed',
- 'default': True,
- 'type': 'bool',
- 'description': 'Delete a release after the download has failed.',
- },
- ],
- }
- ],
-}]
diff --git a/couchpotato/core/downloaders/nzbvortex/main.py b/couchpotato/core/downloaders/nzbvortex/main.py
deleted file mode 100644
index 1462c6782b..0000000000
--- a/couchpotato/core/downloaders/nzbvortex/main.py
+++ /dev/null
@@ -1,170 +0,0 @@
-from base64 import b64encode
-from couchpotato.core.downloaders.base import Downloader
-from couchpotato.core.helpers.encoding import tryUrlencode, ss
-from couchpotato.core.helpers.variable import cleanHost
-from couchpotato.core.logger import CPLog
-from urllib2 import URLError
-from uuid import uuid4
-import hashlib
-import httplib
-import json
-import socket
-import ssl
-import sys
-import traceback
-import urllib2
-
-log = CPLog(__name__)
-
-class NZBVortex(Downloader):
-
- type = ['nzb']
- api_level = None
- session_id = None
-
- def download(self, data = {}, movie = {}, filedata = None):
-
- # Send the nzb
- try:
- nzb_filename = self.createFileName(data, filedata, movie)
- self.call('nzb/add', params = {'file': (ss(nzb_filename), filedata)}, multipart = True)
-
- return True
- except:
- log.error('Something went wrong sending the NZB file: %s', traceback.format_exc())
- return False
-
- def getAllDownloadStatus(self):
-
- raw_statuses = self.call('nzb')
-
- statuses = []
- for item in raw_statuses.get('nzbs', []):
-
- # Check status
- status = 'busy'
- if item['state'] == 20:
- status = 'completed'
- elif item['state'] in [21, 22, 24]:
- status = 'failed'
-
- statuses.append({
- 'id': item['id'],
- 'name': item['uiTitle'],
- 'status': status,
- 'original_status': item['state'],
- 'timeleft':-1,
- })
-
- return statuses
-
- def removeFailed(self, item):
-
- log.info('%s failed downloading, deleting...', item['name'])
-
- try:
- self.call('nzb/%s/cancel' % item['id'])
- except:
- log.error('Failed deleting: %s', traceback.format_exc(0))
- return False
-
- return True
-
- def login(self):
-
- nonce = self.call('auth/nonce', auth = False).get('authNonce')
- cnonce = uuid4().hex
- hashed = b64encode(hashlib.sha256('%s:%s:%s' % (nonce, cnonce, self.conf('api_key'))).digest())
-
- params = {
- 'nonce': nonce,
- 'cnonce': cnonce,
- 'hash': hashed
- }
-
- login_data = self.call('auth/login', parameters = params, auth = False)
-
- # Save for later
- if login_data.get('loginResult') == 'successful':
- self.session_id = login_data.get('sessionID')
- return True
-
- log.error('Login failed, please check you api-key')
- return False
-
-
- def call(self, call, parameters = {}, repeat = False, auth = True, *args, **kwargs):
-
- # Login first
- if not self.session_id and auth:
- self.login()
-
- # Always add session id to request
- if self.session_id:
- parameters['sessionid'] = self.session_id
-
- params = tryUrlencode(parameters)
-
- url = cleanHost(self.conf('host')) + 'api/' + call
- url_opener = urllib2.build_opener(HTTPSHandler())
-
- try:
- data = self.urlopen('%s?%s' % (url, params), opener = url_opener, *args, **kwargs)
-
- if data:
- return json.loads(data)
- except URLError, e:
- if hasattr(e, 'code') and e.code == 403:
- # Try login and do again
- if not repeat:
- self.login()
- return self.call(call, parameters = parameters, repeat = True, *args, **kwargs)
-
- log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
- except:
- log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
-
- return {}
-
- def getApiLevel(self):
-
- if not self.api_level:
-
- url = cleanHost(self.conf('host')) + 'api/app/apilevel'
- url_opener = urllib2.build_opener(HTTPSHandler())
-
- try:
- data = self.urlopen(url, opener = url_opener, show_error = False)
- self.api_level = float(json.loads(data).get('apilevel'))
- except URLError, e:
- if hasattr(e, 'code') and e.code == 403:
- log.error('This version of NZBVortex isn\'t supported. Please update to 2.8.6 or higher')
- else:
- log.error('NZBVortex doesn\'t seem to be running or maybe the remote option isn\'t enabled yet: %s', traceback.format_exc(1))
-
- return self.api_level
-
- def isEnabled(self, manual, data):
- return super(NZBVortex, self).isEnabled(manual, data) and self.getApiLevel()
-
-
-class HTTPSConnection(httplib.HTTPSConnection):
- def __init__(self, *args, **kwargs):
- httplib.HTTPSConnection.__init__(self, *args, **kwargs)
-
- def connect(self):
- sock = socket.create_connection((self.host, self.port), self.timeout)
- if sys.version_info < (2, 6, 7):
- if hasattr(self, '_tunnel_host'):
- self.sock = sock
- self._tunnel()
- else:
- if self._tunnel_host:
- self.sock = sock
- self._tunnel()
-
- self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, ssl_version = ssl.PROTOCOL_TLSv1)
-
-class HTTPSHandler(urllib2.HTTPSHandler):
- def https_open(self, req):
- return self.do_open(HTTPSConnection, req)
diff --git a/couchpotato/core/downloaders/pneumatic.py b/couchpotato/core/downloaders/pneumatic.py
new file mode 100644
index 0000000000..4ad32bdb64
--- /dev/null
+++ b/couchpotato/core/downloaders/pneumatic.py
@@ -0,0 +1,129 @@
+from __future__ import with_statement
+import os
+import traceback
+
+from couchpotato.core._base.downloader.main import DownloaderBase
+from couchpotato.core.helpers.encoding import sp
+from couchpotato.core.logger import CPLog
+
+
+log = CPLog(__name__)
+
+autoload = 'Pneumatic'
+
+
+class Pneumatic(DownloaderBase):
+
+ protocol = ['nzb']
+ strm_syntax = 'plugin://plugin.program.pneumatic/?mode=strm&type=add_file&nzb=%s&nzbname=%s'
+ status_support = False
+
+ def download(self, data = None, media = None, filedata = None):
+ """ Send a torrent/nzb file to the downloader
+
+ :param data: dict returned from provider
+ Contains the release information
+ :param media: media dict with information
+ Used for creating the filename when possible
+ :param filedata: downloaded torrent/nzb filedata
+ The file gets downloaded in the searcher and send to this function
+ This is done to have failed checking before using the downloader, so the downloader
+ doesn't need to worry about that
+ :return: boolean
+ One faile returns false, but the downloaded should log his own errors
+ """
+
+ if not media: media = {}
+ if not data: data = {}
+
+ directory = self.conf('directory')
+ if not directory or not os.path.isdir(directory):
+ log.error('No directory set for .strm downloads.')
+ else:
+ try:
+ if not filedata or len(filedata) < 50:
+ log.error('No nzb available!')
+ return False
+
+ full_path = os.path.join(directory, self.createFileName(data, filedata, media))
+
+ try:
+ if not os.path.isfile(full_path):
+ log.info('Downloading %s to %s.', (data.get('protocol'), full_path))
+ with open(full_path, 'wb') as f:
+ f.write(filedata)
+
+ nzb_name = self.createNzbName(data, media)
+ strm_path = os.path.join(directory, nzb_name)
+
+ strm_file = open(strm_path + '.strm', 'wb')
+ strmContent = self.strm_syntax % (full_path, nzb_name)
+ strm_file.write(strmContent)
+ strm_file.close()
+
+ return self.downloadReturnId('')
+
+ else:
+ log.info('File %s already exists.', full_path)
+ return self.downloadReturnId('')
+
+ except:
+ log.error('Failed to download .strm: %s', traceback.format_exc())
+ pass
+
+ except:
+ log.info('Failed to download file %s: %s', (data.get('name'), traceback.format_exc()))
+ return False
+ return False
+
+ def test(self):
+ """ Check if connection works
+ :return: bool
+ """
+
+ directory = self.conf('directory')
+ if directory and os.path.isdir(directory):
+
+ test_file = sp(os.path.join(directory, 'couchpotato_test.txt'))
+
+ # Check if folder is writable
+ self.createFile(test_file, 'This is a test file')
+ if os.path.isfile(test_file):
+ os.remove(test_file)
+ return True
+
+ return False
+
+
+config = [{
+ 'name': 'pneumatic',
+ 'order': 30,
+ 'groups': [
+ {
+ 'tab': 'downloaders',
+ 'list': 'download_providers',
+ 'name': 'pneumatic',
+ 'label': 'Pneumatic',
+ 'description': 'Use Pneumatic to download .strm files.',
+ 'options': [
+ {
+ 'name': 'enabled',
+ 'default': 0,
+ 'type': 'enabler',
+ },
+ {
+ 'name': 'directory',
+ 'type': 'directory',
+ 'description': 'Directory where the .strm file is saved to.',
+ },
+ {
+ 'name': 'manual',
+ 'default': 0,
+ 'type': 'bool',
+ 'advanced': True,
+ 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.',
+ },
+ ],
+ }
+ ],
+}]
diff --git a/couchpotato/core/downloaders/pneumatic/__init__.py b/couchpotato/core/downloaders/pneumatic/__init__.py
deleted file mode 100644
index 96574a7a9e..0000000000
--- a/couchpotato/core/downloaders/pneumatic/__init__.py
+++ /dev/null
@@ -1,37 +0,0 @@
-from .main import Pneumatic
-
-def start():
- return Pneumatic()
-
-config = [{
- 'name': 'pneumatic',
- 'order': 30,
- 'groups': [
- {
- 'tab': 'downloaders',
- 'list': 'download_providers',
- 'name': 'pneumatic',
- 'label': 'Pneumatic',
- 'description': 'Use Pneumatic to download .strm files.',
- 'options': [
- {
- 'name': 'enabled',
- 'default': 0,
- 'type': 'enabler',
- },
- {
- 'name': 'directory',
- 'type': 'directory',
- 'description': 'Directory where the .strm file is saved to.',
- },
- {
- 'name': 'manual',
- 'default': 0,
- 'type': 'bool',
- 'advanced': True,
- 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.',
- },
- ],
- }
- ],
-}]
diff --git a/couchpotato/core/downloaders/pneumatic/main.py b/couchpotato/core/downloaders/pneumatic/main.py
deleted file mode 100644
index 5e2b78547d..0000000000
--- a/couchpotato/core/downloaders/pneumatic/main.py
+++ /dev/null
@@ -1,54 +0,0 @@
-from __future__ import with_statement
-from couchpotato.core.downloaders.base import Downloader
-from couchpotato.core.logger import CPLog
-import os
-import traceback
-
-log = CPLog(__name__)
-
-class Pneumatic(Downloader):
-
- type = ['nzb']
- strm_syntax = 'plugin://plugin.program.pneumatic/?mode=strm&type=add_file&nzb=%s&nzbname=%s'
-
- def download(self, data = {}, movie = {}, filedata = None):
-
- directory = self.conf('directory')
- if not directory or not os.path.isdir(directory):
- log.error('No directory set for .strm downloads.')
- else:
- try:
- if not filedata or len(filedata) < 50:
- log.error('No nzb available!')
- return False
-
- fullPath = os.path.join(directory, self.createFileName(data, filedata, movie))
-
- try:
- if not os.path.isfile(fullPath):
- log.info('Downloading %s to %s.', (data.get('type'), fullPath))
- with open(fullPath, 'wb') as f:
- f.write(filedata)
-
- nzb_name = self.createNzbName(data, movie)
- strm_path = os.path.join(directory, nzb_name)
-
- strm_file = open(strm_path + '.strm', 'wb')
- strmContent = self.strm_syntax % (fullPath, nzb_name)
- strm_file.write(strmContent)
- strm_file.close()
-
- return True
-
- else:
- log.info('File %s already exists.', fullPath)
- return True
-
- except:
- log.error('Failed to download .strm: %s', traceback.format_exc())
- pass
-
- except:
- log.info('Failed to download file %s: %s', (data.get('name'), traceback.format_exc()))
- return False
- return False
diff --git a/couchpotato/core/downloaders/putio/__init__.py b/couchpotato/core/downloaders/putio/__init__.py
new file mode 100644
index 0000000000..0f3654a12b
--- /dev/null
+++ b/couchpotato/core/downloaders/putio/__init__.py
@@ -0,0 +1,74 @@
+from .main import PutIO
+
+
+def autoload():
+ return PutIO()
+
+
+config = [{
+ 'name': 'putio',
+ 'groups': [
+ {
+ 'tab': 'downloaders',
+ 'list': 'download_providers',
+ 'name': 'putio',
+ 'label': 'Put.io',
+ 'description': 'This will start a torrent download on Put.io .',
+ 'wizard': True,
+ 'options': [
+ {
+ 'name': 'enabled',
+ 'default': 0,
+ 'type': 'enabler',
+ 'radio_group': 'torrent',
+ },
+ {
+ 'name': 'oauth_token',
+ 'label': 'oauth_token',
+ 'description': 'This is the OAUTH_TOKEN from your putio API',
+ 'advanced': True,
+ },
+ {
+ 'name': 'folder',
+ 'description': ('The folder on putio where you want the upload to go','Will find the first first folder that matches this name'),
+ 'default': 0,
+ },
+ {
+ 'name': 'https',
+ 'description': 'Set to true if your callback host accepts https instead of http',
+ 'type': 'bool',
+ 'default': 0,
+ },
+ {
+ 'name': 'callback_host',
+ 'description': 'External reachable url to CP so put.io can do it\'s thing',
+ },
+ {
+ 'name': 'download',
+ 'description': 'Set this to have CouchPotato download the file from Put.io',
+ 'type': 'bool',
+ 'default': 0,
+ },
+ {
+ 'name': 'delete_file',
+ 'description': ('Set this to remove the file from putio after sucessful download','Does nothing if you don\'t select download'),
+ 'type': 'bool',
+ 'default': 0,
+ },
+ {
+ 'name': 'download_dir',
+ 'type': 'directory',
+ 'label': 'Download Directory',
+ 'description': 'The Directory to download files to, does nothing if you don\'t select download',
+ },
+ {
+ 'name': 'manual',
+ 'default': 0,
+ 'type': 'bool',
+ 'advanced': True,
+ 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.',
+ },
+ ],
+ }
+ ],
+}]
diff --git a/couchpotato/core/downloaders/putio/main.py b/couchpotato/core/downloaders/putio/main.py
new file mode 100644
index 0000000000..a49f870f85
--- /dev/null
+++ b/couchpotato/core/downloaders/putio/main.py
@@ -0,0 +1,185 @@
+from couchpotato.api import addApiView
+from couchpotato.core.event import addEvent, fireEventAsync
+from couchpotato.core._base.downloader.main import DownloaderBase, ReleaseDownloadList
+from couchpotato.core.helpers.variable import cleanHost
+from couchpotato.core.logger import CPLog
+from couchpotato.environment import Env
+from pio import api as pio
+import datetime
+
+log = CPLog(__name__)
+
+autoload = 'Putiodownload'
+
+
+class PutIO(DownloaderBase):
+
+ protocol = ['torrent', 'torrent_magnet']
+ downloading_list = []
+ oauth_authenticate = 'https://api.couchpota.to/authorize/putio/'
+
+ def __init__(self):
+ addApiView('downloader.putio.getfrom', self.getFromPutio, docs = {
+ 'desc': 'Allows you to download file from prom Put.io',
+ })
+ addApiView('downloader.putio.auth_url', self.getAuthorizationUrl)
+ addApiView('downloader.putio.credentials', self.getCredentials)
+ addEvent('putio.download', self.putioDownloader)
+
+ return super(PutIO, self).__init__()
+
+ # This is a recusive function to check for the folders
+ def recursionFolder(self, client, folder = 0, tfolder = ''):
+ files = client.File.list(folder)
+ for f in files:
+ if f.content_type == 'application/x-directory':
+ if f.name == tfolder:
+ return f.id
+ else:
+ result = self.recursionFolder(client, f.id, tfolder)
+ if result != 0:
+ return result
+ return 0
+
+ # This will check the root for the folder, and kick of recusively checking sub folder
+ def convertFolder(self, client, folder):
+ if folder == 0:
+ return 0
+ else:
+ return self.recursionFolder(client, 0, folder)
+
+ def download(self, data = None, media = None, filedata = None):
+ if not media: media = {}
+ if not data: data = {}
+
+ log.info('Sending "%s" to put.io', data.get('name'))
+ url = data.get('url')
+ client = pio.Client(self.conf('oauth_token'))
+ putioFolder = self.convertFolder(client, self.conf('folder'))
+ log.debug('putioFolder ID is %s', putioFolder)
+ # It might be possible to call getFromPutio from the renamer if we can then we don't need to do this.
+ # Note callback_host is NOT our address, it's the internet host that putio can call too
+ callbackurl = None
+ if self.conf('download'):
+ pre = 'http://'
+ if self.conf('https'):
+ pre = 'https://'
+ callbackurl = pre + self.conf('callback_host') + '%sdownloader.putio.getfrom/' %Env.get('api_base'.strip('/'))
+ log.debug('callbackurl is %s', callbackurl)
+ resp = client.Transfer.add_url(url, callback_url = callbackurl, parent_id = putioFolder)
+ log.debug('resp is %s', resp.id)
+ return self.downloadReturnId(resp.id)
+
+ def test(self):
+ try:
+ client = pio.Client(self.conf('oauth_token'))
+ if client.File.list():
+ return True
+ except:
+ log.info('Failed to get file listing, check OAUTH_TOKEN')
+ return False
+
+ def getAuthorizationUrl(self, host = None, **kwargs):
+
+ callback_url = cleanHost(host) + '%sdownloader.putio.credentials/' % (Env.get('api_base').lstrip('/'))
+ log.debug('callback_url is %s', callback_url)
+
+ target_url = self.oauth_authenticate + "?target=" + callback_url
+ log.debug('target_url is %s', target_url)
+
+ return {
+ 'success': True,
+ 'url': target_url,
+ }
+
+ def getCredentials(self, **kwargs):
+ try:
+ oauth_token = kwargs.get('oauth')
+ except:
+ return 'redirect', Env.get('web_base') + 'settings/downloaders/'
+ log.debug('oauth_token is: %s', oauth_token)
+ self.conf('oauth_token', value = oauth_token);
+ return 'redirect', Env.get('web_base') + 'settings/downloaders/'
+
+ def getAllDownloadStatus(self, ids):
+
+ log.debug('Checking putio download status.')
+ client = pio.Client(self.conf('oauth_token'))
+
+ transfers = client.Transfer.list()
+
+ log.debug(transfers);
+ release_downloads = ReleaseDownloadList(self)
+ for t in transfers:
+ if t.id in ids:
+
+ log.debug('downloading list is %s', self.downloading_list)
+ if t.status == "COMPLETED" and self.conf('download') == False :
+ status = 'completed'
+
+ # So check if we are trying to download something
+ elif t.status == "COMPLETED" and self.conf('download') == True:
+ # Assume we are done
+ status = 'completed'
+ if not self.downloading_list:
+ now = datetime.datetime.utcnow()
+ date_time = datetime.datetime.strptime(t.finished_at,"%Y-%m-%dT%H:%M:%S")
+ # We need to make sure a race condition didn't happen
+ if (now - date_time) < datetime.timedelta(minutes=5):
+ # 5 minutes haven't passed so we wait
+ status = 'busy'
+ else:
+ # If we have the file_id in the downloading_list mark it as busy
+ if str(t.file_id) in self.downloading_list:
+ status = 'busy'
+ else:
+ status = 'busy'
+ release_downloads.append({
+ 'id' : t.id,
+ 'name': t.name,
+ 'status': status,
+ 'timeleft': t.estimated_time,
+ })
+
+ return release_downloads
+
+ def putioDownloader(self, fid):
+
+ log.info('Put.io Real downloader called with file_id: %s',fid)
+ client = pio.Client(self.conf('oauth_token'))
+
+ log.debug('About to get file List')
+ putioFolder = self.convertFolder(client, self.conf('folder'))
+ log.debug('PutioFolderID is %s', putioFolder)
+ files = client.File.list(parent_id=putioFolder)
+ downloaddir = self.conf('download_dir')
+
+ for f in files:
+ if str(f.id) == str(fid):
+ client.File.download(f, dest = downloaddir, delete_after_download = self.conf('delete_file'))
+ # Once the download is complete we need to remove it from the running list.
+ self.downloading_list.remove(fid)
+
+ return True
+
+ def getFromPutio(self, **kwargs):
+
+ try:
+ file_id = str(kwargs.get('file_id'))
+ except:
+ return {
+ 'success' : False,
+ }
+
+ log.info('Put.io Download has been called file_id is %s', file_id)
+ if file_id not in self.downloading_list:
+ self.downloading_list.append(file_id)
+ fireEventAsync('putio.download',fid = file_id)
+ return {
+ 'success': True,
+ }
+
+ return {
+ 'success': False,
+ }
+
diff --git a/couchpotato/core/downloaders/putio/static/putio.js b/couchpotato/core/downloaders/putio/static/putio.js
new file mode 100644
index 0000000000..438348f805
--- /dev/null
+++ b/couchpotato/core/downloaders/putio/static/putio.js
@@ -0,0 +1,68 @@
+var PutIODownloader = new Class({
+
+ initialize: function(){
+ var self = this;
+
+ App.addEvent('loadSettings', self.addRegisterButton.bind(self));
+ },
+
+ addRegisterButton: function(){
+ var self = this;
+
+ var setting_page = App.getPage('Settings');
+ setting_page.addEvent('create', function(){
+
+ var fieldset = setting_page.tabs.downloaders.groups.putio,
+ l = window.location;
+
+ var putio_set = 0;
+ fieldset.getElements('input[type=text]').each(function(el){
+ putio_set += +(el.get('value') !== '');
+ });
+
+ new Element('.ctrlHolder').adopt(
+
+ // Unregister button
+ (putio_set > 0) ?
+ [
+ self.unregister = new Element('a.button.red', {
+ 'text': 'Unregister "'+fieldset.getElement('input[name*=oauth_token]').get('value')+'"',
+ 'events': {
+ 'click': function(){
+ fieldset.getElements('input[name*=oauth_token]').set('value', '').fireEvent('change');
+
+ self.unregister.destroy();
+ self.unregister_or.destroy();
+ }
+ }
+ }),
+ self.unregister_or = new Element('span[text=or]')
+ ]
+ : null,
+
+ // Register button
+ new Element('a.button', {
+ 'text': putio_set > 0 ? 'Register a different account' : 'Register your put.io account',
+ 'events': {
+ 'click': function(){
+ Api.request('downloader.putio.auth_url', {
+ 'data': {
+ 'host': l.protocol + '//' + l.hostname + (l.port ? ':' + l.port : '')
+ },
+ 'onComplete': function(json){
+ window.location = json.url;
+ }
+ });
+ }
+ }
+ })
+ ).inject(fieldset.getElement('.test_button'), 'before');
+ });
+
+ }
+
+});
+
+window.addEvent('domready', function(){
+ new PutIODownloader();
+});
diff --git a/couchpotato/core/downloaders/qbittorrent_.py b/couchpotato/core/downloaders/qbittorrent_.py
new file mode 100644
index 0000000000..f36b6e4fcb
--- /dev/null
+++ b/couchpotato/core/downloaders/qbittorrent_.py
@@ -0,0 +1,274 @@
+from base64 import b16encode, b32decode
+from hashlib import sha1
+from datetime import timedelta
+import os
+import re
+
+from bencode import bencode, bdecode
+from couchpotato.core._base.downloader.main import DownloaderBase, ReleaseDownloadList
+from couchpotato.core.helpers.encoding import sp
+from couchpotato.core.helpers.variable import cleanHost
+from couchpotato.core.logger import CPLog
+from qbittorrent.client import QBittorrentClient
+
+
+log = CPLog(__name__)
+
+autoload = 'qBittorrent'
+
+
+class qBittorrent(DownloaderBase):
+
+ protocol = ['torrent', 'torrent_magnet']
+ qb = None
+
+ def __init__(self):
+ super(qBittorrent, self).__init__()
+
+ def connect(self):
+ if self.qb is not None:
+ self.qb.logout()
+
+ url = cleanHost(self.conf('host'), protocol = True, ssl = False)
+
+ if self.conf('username') and self.conf('password'):
+ self.qb = QBittorrentClient(url)
+ self.qb.login(username=self.conf('username'), password=self.conf('password'))
+ else:
+ self.qb = QBittorrentClient(url)
+
+ return self.qb._is_authenticated
+
+ def test(self):
+ """ Check if connection works
+ :return: bool
+ """
+ return self.connect()
+
+ def download(self, data = None, media = None, filedata = None):
+ """ Send a torrent/nzb file to the downloader
+
+ :param data: dict returned from provider
+ Contains the release information
+ :param media: media dict with information
+ Used for creating the filename when possible
+ :param filedata: downloaded torrent/nzb filedata
+ The file gets downloaded in the searcher and send to this function
+ This is done to have failed checking before using the downloader, so the downloader
+ doesn't need to worry about that
+ :return: boolean
+ One faile returns false, but the downloaded should log his own errors
+ """
+
+ if not media: media = {}
+ if not data: data = {}
+
+ log.debug('Sending "%s" to qBittorrent.', (data.get('name')))
+
+ if not self.connect():
+ return False
+
+ if not filedata and data.get('protocol') == 'torrent':
+ log.error('Failed sending torrent, no data')
+ return False
+
+ if data.get('protocol') == 'torrent_magnet':
+ # Send request to qBittorrent directly as a magnet
+ try:
+ self.qb.download_from_link(data.get('url'), label=self.conf('label'))
+ torrent_hash = re.findall('urn:btih:([\w]{32,40})', data.get('url'))[0].upper()
+ log.info('Torrent [magnet] sent to QBittorrent successfully.')
+ return self.downloadReturnId(torrent_hash)
+
+ except Exception as e:
+ log.error('Failed to send torrent to qBittorrent: %s', e)
+ return False
+
+ if data.get('protocol') == 'torrent':
+ info = bdecode(filedata)["info"]
+ torrent_hash = sha1(bencode(info)).hexdigest()
+
+ # Convert base 32 to hex
+ if len(torrent_hash) == 32:
+ torrent_hash = b16encode(b32decode(torrent_hash))
+
+ # Send request to qBittorrent
+ try:
+ self.qb.download_from_file(filedata, label=self.conf('label'))
+ log.info('Torrent [file] sent to QBittorrent successfully.')
+ return self.downloadReturnId(torrent_hash)
+ except Exception as e:
+ log.error('Failed to send torrent to qBittorrent: %s', e)
+ return False
+
+ def getTorrentStatus(self, torrent):
+
+ if torrent['state'] in ('uploading', 'queuedUP', 'stalledUP'):
+ return 'seeding'
+
+ if torrent['progress'] == 1:
+ return 'completed'
+
+ return 'busy'
+
+ def getAllDownloadStatus(self, ids):
+ """ Get status of all active downloads
+
+ :param ids: list of (mixed) downloader ids
+ Used to match the releases for this downloader as there could be
+ other downloaders active that it should ignore
+ :return: list of releases
+ """
+
+ log.debug('Checking qBittorrent download status.')
+
+ if not self.connect():
+ return []
+
+ try:
+ torrents = self.qb.torrents(status='all', label=self.conf('label'))
+
+ release_downloads = ReleaseDownloadList(self)
+
+ for torrent in torrents:
+ if torrent['hash'] in ids:
+ torrent_filelist = self.qb.get_torrent_files(torrent['hash'])
+
+ torrent_files = []
+ torrent_dir = os.path.join(torrent['save_path'], torrent['name'])
+
+ if os.path.isdir(torrent_dir):
+ torrent['save_path'] = torrent_dir
+
+ if len(torrent_filelist) > 1 and os.path.isdir(torrent_dir): # multi file torrent, path.isdir check makes sure we're not in the root download folder
+ for root, _, files in os.walk(torrent['save_path']):
+ for f in files:
+ torrent_files.append(sp(os.path.join(root, f)))
+
+ else: # multi or single file placed directly in torrent.save_path
+ for f in torrent_filelist:
+ file_path = os.path.join(torrent['save_path'], f['name'])
+ if os.path.isfile(file_path):
+ torrent_files.append(sp(file_path))
+
+ release_downloads.append({
+ 'id': torrent['hash'],
+ 'name': torrent['name'],
+ 'status': self.getTorrentStatus(torrent),
+ 'seed_ratio': torrent['ratio'],
+ 'original_status': torrent['state'],
+ 'timeleft': str(timedelta(seconds = torrent['eta'])),
+ 'folder': sp(torrent['save_path']),
+ 'files': torrent_files
+ })
+
+ return release_downloads
+
+ except Exception as e:
+ log.error('Failed to get status from qBittorrent: %s', e)
+ return []
+
+ def pause(self, release_download, pause = True):
+ if not self.connect():
+ return False
+
+ torrent = self.qb.get_torrent(release_download['id'])
+ if torrent is None:
+ return False
+
+ if pause:
+ return self.qb.pause(release_download['id'])
+ return self.qb.resume(release_download['id'])
+
+ def removeFailed(self, release_download):
+ log.info('%s failed downloading, deleting...', release_download['name'])
+ return self.processComplete(release_download, delete_files = True)
+
+ def processComplete(self, release_download, delete_files):
+ log.debug('Requesting qBittorrent to remove the torrent %s%s.',
+ (release_download['name'], ' and cleanup the downloaded files' if delete_files else ''))
+
+ if not self.connect():
+ return False
+
+ torrent = self.qb.get_torrent(release_download['id'])
+
+ if torrent is None:
+ return False
+
+ if delete_files:
+ self.qb.delete_permanently(release_download['id']) # deletes torrent with data
+ else:
+ self.qb.delete(release_download['id']) # just removes the torrent, doesn't delete data
+
+ return True
+
+
+config = [{
+ 'name': 'qbittorrent',
+ 'groups': [
+ {
+ 'tab': 'downloaders',
+ 'list': 'download_providers',
+ 'name': 'qbittorrent',
+ 'label': 'qBittorrent',
+ 'description': 'Use qBittorrent to download torrents.',
+ 'wizard': True,
+ 'options': [
+ {
+ 'name': 'enabled',
+ 'default': 0,
+ 'type': 'enabler',
+ 'radio_group': 'torrent',
+ },
+ {
+ 'name': 'host',
+ 'default': 'http://localhost:8080/',
+ 'description': 'RPC Communication URI. Usually http://localhost:8080/ '
+ },
+ {
+ 'name': 'username',
+ },
+ {
+ 'name': 'password',
+ 'type': 'password',
+ },
+ {
+ 'name': 'label',
+ 'label': 'Torrent Label',
+ 'default': 'couchpotato',
+ },
+ {
+ 'name': 'remove_complete',
+ 'label': 'Remove torrent',
+ 'default': False,
+ 'advanced': True,
+ 'type': 'bool',
+ 'description': 'Remove the torrent after it finishes seeding.',
+ },
+ {
+ 'name': 'delete_files',
+ 'label': 'Remove files',
+ 'default': True,
+ 'type': 'bool',
+ 'advanced': True,
+ 'description': 'Also remove the leftover files.',
+ },
+ {
+ 'name': 'paused',
+ 'type': 'bool',
+ 'advanced': True,
+ 'default': False,
+ 'description': 'Add the torrent paused.',
+ },
+ {
+ 'name': 'manual',
+ 'default': 0,
+ 'type': 'bool',
+ 'advanced': True,
+ 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.',
+ },
+ ],
+ }
+ ],
+}]
diff --git a/couchpotato/core/downloaders/rtorrent_.py b/couchpotato/core/downloaders/rtorrent_.py
new file mode 100644
index 0000000000..4902cff044
--- /dev/null
+++ b/couchpotato/core/downloaders/rtorrent_.py
@@ -0,0 +1,442 @@
+from base64 import b16encode, b32decode
+from datetime import timedelta
+from hashlib import sha1
+from urlparse import urlparse
+import os
+import re
+
+from couchpotato.core._base.downloader.main import DownloaderBase, ReleaseDownloadList
+from couchpotato.core.event import addEvent
+from couchpotato.core.helpers.encoding import sp
+from couchpotato.core.helpers.variable import cleanHost, splitString
+from couchpotato.core.logger import CPLog
+from bencode import bencode, bdecode
+from rtorrent import RTorrent
+
+
+log = CPLog(__name__)
+
+autoload = 'rTorrent'
+
+
+class rTorrent(DownloaderBase):
+
+ protocol = ['torrent', 'torrent_magnet']
+ rt = None
+ error_msg = ''
+
+ # Migration url to host options
+ def __init__(self):
+ super(rTorrent, self).__init__()
+
+ addEvent('app.load', self.migrate)
+ addEvent('setting.save.rtorrent.*.after', self.settingsChanged)
+
+ def migrate(self):
+
+ url = self.conf('url')
+ if url:
+ host_split = splitString(url.split('://')[-1], split_on = '/')
+
+ self.conf('ssl', value = url.startswith('https'))
+ self.conf('host', value = host_split[0].strip())
+ self.conf('rpc_url', value = '/'.join(host_split[1:]))
+
+ self.deleteConf('url')
+
+ def settingsChanged(self):
+ # Reset active connection if settings have changed
+ if self.rt:
+ log.debug('Settings have changed, closing active connection')
+
+ self.rt = None
+ return True
+
+ def getAuth(self):
+ if not self.conf('username') or not self.conf('password'):
+ # Missing username or password parameter
+ return None
+
+ # Build authentication tuple
+ return (
+ self.conf('authentication'),
+ self.conf('username'),
+ self.conf('password')
+ )
+
+ def getVerifySsl(self):
+ # Ensure verification has been enabled
+ if not self.conf('ssl_verify'):
+ return False
+
+ # Use ca bundle if defined
+ ca_bundle = self.conf('ssl_ca_bundle')
+
+ if ca_bundle and os.path.exists(ca_bundle):
+ return ca_bundle
+
+ # Use default ssl verification
+ return True
+
+ def connect(self, reconnect = False):
+ # Already connected?
+ if not reconnect and self.rt is not None:
+ return self.rt
+
+ url = cleanHost(self.conf('host'), protocol = True, ssl = self.conf('ssl'))
+
+ # Automatically add '+https' to 'httprpc' protocol if SSL is enabled
+ if self.conf('ssl') and url.startswith('httprpc://'):
+ url = url.replace('httprpc://', 'httprpc+https://')
+
+ parsed = urlparse(url)
+
+ # rpc_url is only used on http/https scgi pass-through
+ if parsed.scheme in ['http', 'https']:
+ url += self.conf('rpc_url')
+
+ # Construct client
+ self.rt = RTorrent(
+ url, self.getAuth(),
+ verify_ssl=self.getVerifySsl()
+ )
+
+ self.error_msg = ''
+ try:
+ self.rt.connection.verify()
+ except AssertionError as e:
+ self.error_msg = e.message
+ self.rt = None
+
+ return self.rt
+
+ def test(self):
+ """ Check if connection works
+ :return: bool
+ """
+
+ if self.connect(True):
+ return True
+
+ if self.error_msg:
+ return False, 'Connection failed: ' + self.error_msg
+
+ return False
+
+
+ def download(self, data = None, media = None, filedata = None):
+ """ Send a torrent/nzb file to the downloader
+
+ :param data: dict returned from provider
+ Contains the release information
+ :param media: media dict with information
+ Used for creating the filename when possible
+ :param filedata: downloaded torrent/nzb filedata
+ The file gets downloaded in the searcher and send to this function
+ This is done to have failed checking before using the downloader, so the downloader
+ doesn't need to worry about that
+ :return: boolean
+ One faile returns false, but the downloaded should log his own errors
+ """
+
+ if not media: media = {}
+ if not data: data = {}
+
+ log.debug('Sending "%s" to rTorrent.', (data.get('name')))
+
+ if not self.connect():
+ return False
+
+ torrent_hash = 0
+ torrent_params = {}
+ if self.conf('label'):
+ torrent_params['label'] = self.conf('label')
+
+ if not filedata and data.get('protocol') == 'torrent':
+ log.error('Failed sending torrent, no data')
+ return False
+
+ # Try download magnet torrents
+ if data.get('protocol') == 'torrent_magnet':
+ # Send magnet to rTorrent
+ torrent_hash = re.findall('urn:btih:([\w]{32,40})', data.get('url'))[0].upper()
+ # Send request to rTorrent
+ try:
+ torrent = self.rt.load_magnet(data.get('url'), torrent_hash)
+
+ if not torrent:
+ log.error('Unable to find the torrent, did it fail to load?')
+ return False
+
+ except Exception as err:
+ log.error('Failed to send magnet to rTorrent: %s', err)
+ return False
+
+ if data.get('protocol') == 'torrent':
+ info = bdecode(filedata)["info"]
+ torrent_hash = sha1(bencode(info)).hexdigest().upper()
+
+ # Convert base 32 to hex
+ if len(torrent_hash) == 32:
+ torrent_hash = b16encode(b32decode(torrent_hash))
+
+ # Send request to rTorrent
+ try:
+ # Send torrent to rTorrent
+ torrent = self.rt.load_torrent(filedata, verify_retries=10)
+
+ if not torrent:
+ log.error('Unable to find the torrent, did it fail to load?')
+ return False
+
+ except Exception as err:
+ log.error('Failed to send torrent to rTorrent: %s', err)
+ return False
+
+ try:
+ # Set label
+ if self.conf('label'):
+ torrent.set_custom(1, self.conf('label'))
+
+ if self.conf('directory'):
+ torrent.set_directory(self.conf('directory'))
+
+ # Start torrent
+ if not self.conf('paused', default = 0):
+ torrent.start()
+
+ return self.downloadReturnId(torrent_hash)
+
+ except Exception as err:
+ log.error('Failed to send torrent to rTorrent: %s', err)
+ return False
+
+
+ def getTorrentStatus(self, torrent):
+ if not torrent.complete:
+ return 'busy'
+
+ if torrent.open:
+ return 'seeding'
+
+ return 'completed'
+
+ def getAllDownloadStatus(self, ids):
+ """ Get status of all active downloads
+
+ :param ids: list of (mixed) downloader ids
+ Used to match the releases for this downloader as there could be
+ other downloaders active that it should ignore
+ :return: list of releases
+ """
+
+ log.debug('Checking rTorrent download status.')
+
+ if not self.connect():
+ return []
+
+ try:
+ torrents = self.rt.get_torrents()
+
+ release_downloads = ReleaseDownloadList(self)
+
+ for torrent in torrents:
+ if torrent.info_hash in ids:
+ torrent_directory = os.path.normpath(torrent.directory)
+ torrent_files = []
+
+ for file in torrent.get_files():
+ if not os.path.normpath(file.path).startswith(torrent_directory):
+ file_path = os.path.join(torrent_directory, file.path.lstrip('/'))
+ else:
+ file_path = file.path
+
+ torrent_files.append(sp(file_path))
+
+ release_downloads.append({
+ 'id': torrent.info_hash,
+ 'name': torrent.name,
+ 'status': self.getTorrentStatus(torrent),
+ 'seed_ratio': torrent.ratio,
+ 'original_status': torrent.state,
+ 'timeleft': str(timedelta(seconds = float(torrent.left_bytes) / torrent.down_rate)) if torrent.down_rate > 0 else -1,
+ 'folder': sp(torrent.directory),
+ 'files': torrent_files
+ })
+
+ return release_downloads
+
+ except Exception as err:
+ log.error('Failed to get status from rTorrent: %s', err)
+ return []
+
+ def pause(self, release_download, pause = True):
+ if not self.connect():
+ return False
+
+ torrent = self.rt.find_torrent(release_download['id'])
+ if torrent is None:
+ return False
+
+ if pause:
+ return torrent.pause()
+ return torrent.resume()
+
+ def removeFailed(self, release_download):
+ log.info('%s failed downloading, deleting...', release_download['name'])
+ return self.processComplete(release_download, delete_files = True)
+
+ def processComplete(self, release_download, delete_files):
+ log.debug('Requesting rTorrent to remove the torrent %s%s.',
+ (release_download['name'], ' and cleanup the downloaded files' if delete_files else ''))
+
+ if not self.connect():
+ return False
+
+ torrent = self.rt.find_torrent(release_download['id'])
+
+ if torrent is None:
+ return False
+
+ if delete_files:
+ for file_item in torrent.get_files(): # will only delete files, not dir/sub-dir
+ os.unlink(os.path.join(torrent.directory, file_item.path))
+
+ if torrent.is_multi_file() and torrent.directory.endswith(torrent.name):
+ # Remove empty directories bottom up
+ try:
+ for path, _, _ in os.walk(sp(torrent.directory), topdown = False):
+ os.rmdir(path)
+ except OSError:
+ log.info('Directory "%s" contains extra files, unable to remove', torrent.directory)
+
+ torrent.erase() # just removes the torrent, doesn't delete data
+
+ return True
+
+
+config = [{
+ 'name': 'rtorrent',
+ 'groups': [
+ {
+ 'tab': 'downloaders',
+ 'list': 'download_providers',
+ 'name': 'rtorrent',
+ 'label': 'rTorrent',
+ 'description': 'Use rTorrent to download torrents.',
+ 'wizard': True,
+ 'options': [
+ {
+ 'name': 'enabled',
+ 'default': 0,
+ 'type': 'enabler',
+ 'radio_group': 'torrent',
+ },
+ {
+ 'name': 'ssl',
+ 'label': 'SSL Enabled',
+ 'order': 1,
+ 'default': 0,
+ 'type': 'bool',
+ 'advanced': True,
+ 'description': 'Use HyperText Transfer Protocol Secure, or https ',
+ },
+ {
+ 'name': 'ssl_verify',
+ 'label': 'SSL Verify',
+ 'order': 2,
+ 'default': 1,
+ 'type': 'bool',
+ 'advanced': True,
+ 'description': 'Verify SSL certificate on https connections',
+ },
+ {
+ 'name': 'ssl_ca_bundle',
+ 'label': 'SSL CA Bundle',
+ 'order': 3,
+ 'type': 'string',
+ 'advanced': True,
+ 'description': 'Path to a directory (or file) containing trusted certificate authorities',
+ },
+ {
+ 'name': 'host',
+ 'order': 4,
+ 'default': 'localhost:80',
+ 'description': 'RPC Communication URI. Usually scgi://localhost:5000 , '
+ 'httprpc://localhost/rutorrent or localhost:80 ',
+ },
+ {
+ 'name': 'rpc_url',
+ 'order': 5,
+ 'default': 'RPC2',
+ 'type': 'string',
+ 'advanced': True,
+ 'description': 'Change if your RPC mount is at a different path.',
+ },
+ {
+ 'name': 'authentication',
+ 'order': 6,
+ 'default': 'basic',
+ 'type': 'dropdown',
+ 'advanced': True,
+ 'values': [('Basic', 'basic'), ('Digest', 'digest')],
+ 'description': 'Authentication method used for http(s) connections',
+ },
+ {
+ 'name': 'username',
+ 'order': 7,
+ },
+ {
+ 'name': 'password',
+ 'order': 8,
+ 'type': 'password',
+ },
+ {
+ 'name': 'label',
+ 'order': 9,
+ 'description': 'Label to apply on added torrents.',
+ },
+ {
+ 'name': 'directory',
+ 'order': 10,
+ 'type': 'directory',
+ 'description': 'Download to this directory. Keep empty for default rTorrent download directory.',
+ },
+ {
+ 'name': 'remove_complete',
+ 'label': 'Remove torrent',
+ 'order': 11,
+ 'default': False,
+ 'type': 'bool',
+ 'advanced': True,
+ 'description': 'Remove the torrent after it finishes seeding.',
+ },
+ {
+ 'name': 'delete_files',
+ 'label': 'Remove files',
+ 'order': 12,
+ 'default': True,
+ 'type': 'bool',
+ 'advanced': True,
+ 'description': 'Also remove the leftover files.',
+ },
+ {
+ 'name': 'paused',
+ 'order': 13,
+ 'type': 'bool',
+ 'advanced': True,
+ 'default': False,
+ 'description': 'Add the torrent paused.',
+ },
+ {
+ 'name': 'manual',
+ 'order': 14,
+ 'default': 0,
+ 'type': 'bool',
+ 'advanced': True,
+ 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.',
+ },
+ ],
+ }
+ ],
+}]
diff --git a/couchpotato/core/downloaders/sabnzbd.py b/couchpotato/core/downloaders/sabnzbd.py
new file mode 100644
index 0000000000..47c94ada9c
--- /dev/null
+++ b/couchpotato/core/downloaders/sabnzbd.py
@@ -0,0 +1,309 @@
+from datetime import timedelta
+from urllib2 import URLError
+import json
+import os
+import traceback
+
+from couchpotato.core._base.downloader.main import DownloaderBase, ReleaseDownloadList
+from couchpotato.core.helpers.encoding import tryUrlencode, ss, sp
+from couchpotato.core.helpers.variable import cleanHost, mergeDicts
+from couchpotato.core.logger import CPLog
+from couchpotato.environment import Env
+
+
+log = CPLog(__name__)
+
+autoload = 'Sabnzbd'
+
+
+class Sabnzbd(DownloaderBase):
+
+ protocol = ['nzb']
+
+ def download(self, data = None, media = None, filedata = None):
+ """
+ Send a torrent/nzb file to the downloader
+
+ :param data: dict returned from provider
+ Contains the release information
+ :param media: media dict with information
+ Used for creating the filename when possible
+ :param filedata: downloaded torrent/nzb filedata
+ The file gets downloaded in the searcher and send to this function
+ This is done to have failed checking before using the downloader, so the downloader
+ doesn't need to worry about that
+ :return: boolean
+ One faile returns false, but the downloaded should log his own errors
+ """
+
+ if not media: media = {}
+ if not data: data = {}
+
+ log.info('Sending "%s" to SABnzbd.', data.get('name'))
+
+ req_params = {
+ 'cat': self.conf('category'),
+ 'mode': 'addurl',
+ 'nzbname': self.createNzbName(data, media),
+ 'priority': self.conf('priority'),
+ }
+
+ nzb_filename = None
+ if filedata:
+ if len(filedata) < 50:
+ log.error('No proper nzb available: %s', filedata)
+ return False
+
+ # If it's a .rar, it adds the .rar extension, otherwise it stays .nzb
+ nzb_filename = self.createFileName(data, filedata, media)
+ req_params['mode'] = 'addfile'
+ else:
+ req_params['name'] = data.get('url')
+
+ try:
+ if nzb_filename and req_params.get('mode') is 'addfile':
+ sab_data = self.call(req_params, files = {'nzbfile': (ss(nzb_filename), filedata)})
+ else:
+ sab_data = self.call(req_params)
+ except URLError:
+ log.error('Failed sending release, probably wrong HOST: %s', traceback.format_exc(0))
+ return False
+ except:
+ log.error('Failed sending release, use API key, NOT the NZB key: %s', traceback.format_exc(0))
+ return False
+
+ log.debug('Result from SAB: %s', sab_data)
+ nzo_ids = sab_data.get('nzo_ids', [])
+ if sab_data.get('status') and not sab_data.get('error') and isinstance(nzo_ids, list) and len(nzo_ids) > 0:
+ log.info('NZB sent to SAB successfully.')
+ if filedata:
+ return self.downloadReturnId(nzo_ids[0])
+ else:
+ return True
+ else:
+ log.error('Error getting data from SABNZBd: %s', sab_data)
+ return False
+
+ def test(self):
+ """ Check if connection works
+ Return message if an old version of SAB is used
+ :return: bool
+ """
+
+ try:
+ sab_data = self.call({
+ 'mode': 'version',
+ })
+ v = sab_data.split('.')
+ if sab_data != 'develop' and int(v[0]) == 0 and int(v[1]) < 7:
+ return False, 'Your Sabnzbd client is too old, please update to newest version.'
+
+ # the version check will work even with wrong api key, so we need the next check as well
+ sab_data = self.call({
+ 'mode': 'queue',
+ })
+ if not sab_data:
+ return False
+ except:
+ return False
+
+ return True
+
+ def getAllDownloadStatus(self, ids):
+ """ Get status of all active downloads
+
+ :param ids: list of (mixed) downloader ids
+ Used to match the releases for this downloader as there could be
+ other downloaders active that it should ignore
+ :return: list of releases
+ """
+
+ log.debug('Checking SABnzbd download status.')
+
+ # Go through Queue
+ try:
+ queue = self.call({
+ 'mode': 'queue',
+ })
+ except:
+ log.error('Failed getting queue: %s', traceback.format_exc(1))
+ return []
+
+ # Go through history items
+ try:
+ history = self.call({
+ 'mode': 'history',
+ 'limit': 15,
+ })
+ except:
+ log.error('Failed getting history json: %s', traceback.format_exc(1))
+ return []
+
+ release_downloads = ReleaseDownloadList(self)
+
+ # Get busy releases
+ for nzb in queue.get('slots', []):
+ if nzb['nzo_id'] in ids:
+ status = 'busy'
+ if 'ENCRYPTED / ' in nzb['filename']:
+ status = 'failed'
+
+ release_downloads.append({
+ 'id': nzb['nzo_id'],
+ 'name': nzb['filename'],
+ 'status': status,
+ 'original_status': nzb['status'],
+ 'timeleft': nzb['timeleft'] if not queue['paused'] else -1,
+ })
+
+ # Get old releases
+ for nzb in history.get('slots', []):
+ if nzb['nzo_id'] in ids:
+ status = 'busy'
+ if nzb['status'] == 'Failed' or (nzb['status'] == 'Completed' and nzb['fail_message'].strip()):
+ status = 'failed'
+ elif nzb['status'] == 'Completed':
+ status = 'completed'
+
+ release_downloads.append({
+ 'id': nzb['nzo_id'],
+ 'name': nzb['name'],
+ 'status': status,
+ 'original_status': nzb['status'],
+ 'timeleft': str(timedelta(seconds = 0)),
+ 'folder': sp(os.path.dirname(nzb['storage']) if os.path.isfile(nzb['storage']) else nzb['storage']),
+ })
+
+ return release_downloads
+
+ def removeFailed(self, release_download):
+
+ log.info('%s failed downloading, deleting...', release_download['name'])
+
+ try:
+ self.call({
+ 'mode': 'queue',
+ 'name': 'delete',
+ 'del_files': '1',
+ 'value': release_download['id']
+ }, use_json = False)
+ self.call({
+ 'mode': 'history',
+ 'name': 'delete',
+ 'del_files': '1',
+ 'value': release_download['id']
+ }, use_json = False)
+ except:
+ log.error('Failed deleting: %s', traceback.format_exc(0))
+ return False
+
+ return True
+
+ def processComplete(self, release_download, delete_files = False):
+ log.debug('Requesting SabNZBd to remove the NZB %s.', release_download['name'])
+
+ try:
+ self.call({
+ 'mode': 'history',
+ 'name': 'delete',
+ 'del_files': '0',
+ 'value': release_download['id']
+ }, use_json = False)
+ except:
+ log.error('Failed removing: %s', traceback.format_exc(0))
+ return False
+
+ return True
+
+ def call(self, request_params, use_json = True, **kwargs):
+
+ url = cleanHost(self.conf('host'), ssl = self.conf('ssl')) + 'api?' + tryUrlencode(mergeDicts(request_params, {
+ 'apikey': self.conf('api_key'),
+ 'output': 'json'
+ }))
+
+ data = self.urlopen(url, timeout = 60, show_error = False, headers = {'User-Agent': Env.getIdentifier()}, **kwargs)
+ if use_json:
+ d = json.loads(data)
+ if d.get('error'):
+ log.error('Error getting data from SABNZBd: %s', d.get('error'))
+ return {}
+
+ return d.get(request_params['mode']) or d
+ else:
+ return data
+
+
+config = [{
+ 'name': 'sabnzbd',
+ 'groups': [
+ {
+ 'tab': 'downloaders',
+ 'list': 'download_providers',
+ 'name': 'sabnzbd',
+ 'label': 'Sabnzbd',
+ 'description': 'Use SABnzbd (0.7+) to download NZBs.',
+ 'wizard': True,
+ 'options': [
+ {
+ 'name': 'enabled',
+ 'default': 0,
+ 'type': 'enabler',
+ 'radio_group': 'nzb',
+ },
+ {
+ 'name': 'host',
+ 'default': 'localhost:8080',
+ },
+ {
+ 'name': 'ssl',
+ 'default': 0,
+ 'type': 'bool',
+ 'advanced': True,
+ 'description': 'Use HyperText Transfer Protocol Secure, or https ',
+ },
+ {
+ 'name': 'api_key',
+ 'label': 'Api Key',
+ 'description': 'Used for all calls to Sabnzbd.',
+ },
+ {
+ 'name': 'category',
+ 'label': 'Category',
+ 'description': 'The category CP places the nzb in. Like movies or couchpotato ',
+ },
+ {
+ 'name': 'priority',
+ 'label': 'Priority',
+ 'type': 'dropdown',
+ 'default': '0',
+ 'advanced': True,
+ 'values': [('Paused', -2), ('Low', -1), ('Normal', 0), ('High', 1), ('Forced', 2)],
+ 'description': 'Add to the queue with this priority.',
+ },
+ {
+ 'name': 'manual',
+ 'default': False,
+ 'type': 'bool',
+ 'advanced': True,
+ 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.',
+ },
+ {
+ 'name': 'remove_complete',
+ 'advanced': True,
+ 'label': 'Remove NZB',
+ 'default': False,
+ 'type': 'bool',
+ 'description': 'Remove the NZB from history after it completed.',
+ },
+ {
+ 'name': 'delete_failed',
+ 'default': True,
+ 'advanced': True,
+ 'type': 'bool',
+ 'description': 'Delete a release after the download has failed.',
+ },
+ ],
+ }
+ ],
+}]
diff --git a/couchpotato/core/downloaders/sabnzbd/__init__.py b/couchpotato/core/downloaders/sabnzbd/__init__.py
deleted file mode 100644
index 6c976f1e49..0000000000
--- a/couchpotato/core/downloaders/sabnzbd/__init__.py
+++ /dev/null
@@ -1,53 +0,0 @@
-from .main import Sabnzbd
-
-def start():
- return Sabnzbd()
-
-config = [{
- 'name': 'sabnzbd',
- 'groups': [
- {
- 'tab': 'downloaders',
- 'list': 'download_providers',
- 'name': 'sabnzbd',
- 'label': 'Sabnzbd',
- 'description': 'Use SABnzbd to download NZBs.',
- 'wizard': True,
- 'options': [
- {
- 'name': 'enabled',
- 'default': 0,
- 'type': 'enabler',
- 'radio_group': 'nzb',
- },
- {
- 'name': 'host',
- 'default': 'localhost:8080',
- },
- {
- 'name': 'api_key',
- 'label': 'Api Key',
- 'description': 'Used for all calls to Sabnzbd.',
- },
- {
- 'name': 'category',
- 'label': 'Category',
- 'description': 'The category CP places the nzb in. Like movies or couchpotato ',
- },
- {
- 'name': 'manual',
- 'default': False,
- 'type': 'bool',
- 'advanced': True,
- 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.',
- },
- {
- 'name': 'delete_failed',
- 'default': True,
- 'type': 'bool',
- 'description': 'Delete a release after the download has failed.',
- },
- ],
- }
- ],
-}]
diff --git a/couchpotato/core/downloaders/sabnzbd/main.py b/couchpotato/core/downloaders/sabnzbd/main.py
deleted file mode 100644
index a287f119ff..0000000000
--- a/couchpotato/core/downloaders/sabnzbd/main.py
+++ /dev/null
@@ -1,153 +0,0 @@
-from couchpotato.core.downloaders.base import Downloader
-from couchpotato.core.helpers.encoding import tryUrlencode, ss
-from couchpotato.core.helpers.variable import cleanHost, mergeDicts
-from couchpotato.core.logger import CPLog
-from couchpotato.environment import Env
-from urllib2 import URLError
-import json
-import traceback
-
-log = CPLog(__name__)
-
-class Sabnzbd(Downloader):
-
- type = ['nzb']
-
- def download(self, data = {}, movie = {}, filedata = None):
-
- log.info('Sending "%s" to SABnzbd.', data.get('name'))
-
- params = {
- 'apikey': self.conf('api_key'),
- 'cat': self.conf('category'),
- 'mode': 'addurl',
- 'nzbname': self.createNzbName(data, movie),
- }
-
- if filedata:
- if len(filedata) < 50:
- log.error('No proper nzb available: %s', (filedata))
- return False
-
- # If it's a .rar, it adds the .rar extension, otherwise it stays .nzb
- nzb_filename = self.createFileName(data, filedata, movie)
- params['mode'] = 'addfile'
- else:
- params['name'] = data.get('url')
-
- url = cleanHost(self.conf('host')) + 'api?' + tryUrlencode(params)
-
- try:
- if params.get('mode') is 'addfile':
- sab = self.urlopen(url, timeout = 60, params = {'nzbfile': (ss(nzb_filename), filedata)}, multipart = True, show_error = False, headers = {'User-Agent': Env.getIdentifier()})
- else:
- sab = self.urlopen(url, timeout = 60, show_error = False, headers = {'User-Agent': Env.getIdentifier()})
- except URLError:
- log.error('Failed sending release, probably wrong HOST: %s', traceback.format_exc(0))
- return False
- except:
- log.error('Failed sending release, use API key, NOT the NZB key: %s', traceback.format_exc(0))
- return False
-
- result = sab.strip()
- if not result:
- log.error('SABnzbd didn\'t return anything.')
- return False
-
- log.debug('Result text from SAB: %s', result[:40])
- if result[:2] == 'ok':
- log.info('NZB sent to SAB successfully.')
- return True
- else:
- log.error(result[:40])
- return False
-
- def getAllDownloadStatus(self):
-
- log.debug('Checking SABnzbd download status.')
-
- # Go through Queue
- try:
- queue = self.call({
- 'mode': 'queue',
- })
- except:
- log.error('Failed getting queue: %s', traceback.format_exc(1))
- return False
-
- # Go through history items
- try:
- history = self.call({
- 'mode': 'history',
- 'limit': 15,
- })
- except:
- log.error('Failed getting history json: %s', traceback.format_exc(1))
- return False
-
- statuses = []
-
- # Get busy releases
- for item in queue.get('slots', []):
- statuses.append({
- 'id': item['nzo_id'],
- 'name': item['filename'],
- 'status': 'busy',
- 'original_status': item['status'],
- 'timeleft': item['timeleft'] if not queue['paused'] else -1,
- })
-
- # Get old releases
- for item in history.get('slots', []):
-
- status = 'busy'
- if item['status'] == 'Failed' or (item['status'] == 'Completed' and item['fail_message'].strip()):
- status = 'failed'
- elif item['status'] == 'Completed':
- status = 'completed'
-
- statuses.append({
- 'id': item['nzo_id'],
- 'name': item['name'],
- 'status': status,
- 'original_status': item['status'],
- 'timeleft': 0,
- })
-
- return statuses
-
- def removeFailed(self, item):
-
- log.info('%s failed downloading, deleting...', item['name'])
-
- try:
- self.call({
- 'mode': 'history',
- 'name': 'delete',
- 'del_files': '1',
- 'value': item['id']
- }, use_json = False)
- except:
- log.error('Failed deleting: %s', traceback.format_exc(0))
- return False
-
- return True
-
- def call(self, params, use_json = True):
-
- url = cleanHost(self.conf('host')) + 'api?' + tryUrlencode(mergeDicts(params, {
- 'apikey': self.conf('api_key'),
- 'output': 'json'
- }))
-
- data = self.urlopen(url, timeout = 60, show_error = False, headers = {'User-Agent': Env.getIdentifier()})
- if use_json:
- d = json.loads(data)
- if d.get('error'):
- log.error('Error getting data from SABNZBd: %s', d.get('error'))
- return {}
-
- return d[params['mode']]
- else:
- return data
-
diff --git a/couchpotato/core/downloaders/synology.py b/couchpotato/core/downloaders/synology.py
new file mode 100644
index 0000000000..4a9b9d7353
--- /dev/null
+++ b/couchpotato/core/downloaders/synology.py
@@ -0,0 +1,260 @@
+import json
+import traceback
+
+from couchpotato.core._base.downloader.main import DownloaderBase
+from couchpotato.core.helpers.encoding import isInt
+from couchpotato.core.helpers.variable import cleanHost
+from couchpotato.core.logger import CPLog
+import requests
+
+
+log = CPLog(__name__)
+
+autoload = 'Synology'
+
+
+class Synology(DownloaderBase):
+
+ protocol = ['nzb', 'torrent', 'torrent_magnet']
+ status_support = False
+
+ def download(self, data = None, media = None, filedata = None):
+ """
+ Send a torrent/nzb file to the downloader
+
+ :param data: dict returned from provider
+ Contains the release information
+ :param media: media dict with information
+ Used for creating the filename when possible
+ :param filedata: downloaded torrent/nzb filedata
+ The file gets downloaded in the searcher and send to this function
+ This is done to have fail checking before using the downloader, so the downloader
+ doesn't need to worry about that
+ :return: boolean
+ One fail returns false, but the downloader should log his own errors
+ """
+
+ if not media: media = {}
+ if not data: data = {}
+
+ response = False
+ log.info('Sending "%s" (%s) to Synology.', (data['name'], data['protocol']))
+
+ # Load host from config and split out port.
+ host = cleanHost(self.conf('host'), protocol = False).split(':')
+ if not isInt(host[1]):
+ log.error('Config properties are not filled in correctly, port is missing.')
+ return False
+
+ try:
+ # Send request to Synology
+ srpc = SynologyRPC(host[0], host[1], self.conf('username'), self.conf('password'), self.conf('destination'))
+ if data['protocol'] == 'torrent_magnet':
+ log.info('Adding torrent URL %s', data['url'])
+ response = srpc.create_task(url = data['url'])
+ elif data['protocol'] in ['nzb', 'torrent']:
+ log.info('Adding %s' % data['protocol'])
+ if not filedata:
+ log.error('No %s data found', data['protocol'])
+ else:
+ filename = data['name'] + '.' + data['protocol']
+ response = srpc.create_task(filename = filename, filedata = filedata)
+ except:
+ log.error('Exception while adding torrent: %s', traceback.format_exc())
+ finally:
+ return self.downloadReturnId('') if response else False
+
+ def test(self):
+ """ Check if connection works
+ :return: bool
+ """
+
+ host = cleanHost(self.conf('host'), protocol = False).split(':')
+ try:
+ srpc = SynologyRPC(host[0], host[1], self.conf('username'), self.conf('password'))
+ test_result = srpc.test()
+ except:
+ return False
+
+ return test_result
+
+ def getEnabledProtocol(self):
+ if self.conf('use_for') == 'both':
+ return super(Synology, self).getEnabledProtocol()
+ elif self.conf('use_for') == 'torrent':
+ return ['torrent', 'torrent_magnet']
+ else:
+ return ['nzb']
+
+ def isEnabled(self, manual = False, data = None):
+ if not data: data = {}
+
+ for_protocol = ['both']
+ if data and 'torrent' in data.get('protocol'):
+ for_protocol.append('torrent')
+ elif data:
+ for_protocol.append(data.get('protocol'))
+
+ return super(Synology, self).isEnabled(manual, data) and\
+ ((self.conf('use_for') in for_protocol))
+
+
+class SynologyRPC(object):
+
+ """SynologyRPC lite library"""
+
+ def __init__(self, host = 'localhost', port = 5000, username = None, password = None, destination = None):
+
+ super(SynologyRPC, self).__init__()
+
+ self.download_url = 'http://%s:%s/webapi/DownloadStation/task.cgi' % (host, port)
+ self.auth_url = 'http://%s:%s/webapi/auth.cgi' % (host, port)
+ self.sid = None
+ self.username = username
+ self.password = password
+ self.destination = destination
+ self.session_name = 'DownloadStation'
+
+ def _login(self):
+ if self.username and self.password:
+ args = {'api': 'SYNO.API.Auth', 'account': self.username, 'passwd': self.password, 'version': 2,
+ 'method': 'login', 'session': self.session_name, 'format': 'sid'}
+ response = self._req(self.auth_url, args)
+ if response['success']:
+ self.sid = response['data']['sid']
+ log.debug('sid=%s', self.sid)
+ else:
+ log.error('Couldn\'t log into Synology, %s', response)
+ return response['success']
+ else:
+ log.error('User or password missing, not using authentication.')
+ return False
+
+ def _logout(self):
+ args = {'api':'SYNO.API.Auth', 'version':1, 'method':'logout', 'session':self.session_name, '_sid':self.sid}
+ return self._req(self.auth_url, args)
+
+ def _req(self, url, args, files = None):
+ response = {'success': False}
+ try:
+ req = requests.post(url, data = args, files = files, verify = False)
+ req.raise_for_status()
+ response = json.loads(req.text)
+ if response['success']:
+ log.info('Synology action successfull')
+ return response
+ except requests.ConnectionError as err:
+ log.error('Synology connection error, check your config %s', err)
+ except requests.HTTPError as err:
+ log.error('SynologyRPC HTTPError: %s', err)
+ except Exception as err:
+ log.error('Exception: %s', err)
+ finally:
+ return response
+
+ def create_task(self, url = None, filename = None, filedata = None):
+ """ Creates new download task in Synology DownloadStation. Either specify
+ url or pair (filename, filedata).
+
+ Returns True if task was created, False otherwise
+ """
+ result = False
+ # login
+ if self._login():
+ args = {'api': 'SYNO.DownloadStation.Task',
+ 'version': '1',
+ 'method': 'create',
+ '_sid': self.sid}
+
+ if self.destination and len(self.destination) > 0:
+ args['destination'] = self.destination
+
+ if url:
+ log.info('Login success, adding torrent URI')
+ args['uri'] = url
+ response = self._req(self.download_url, args = args)
+ if response['success']:
+ log.info('Response: %s', response)
+ else:
+ log.error('Response: %s', response)
+ synoerrortype = {
+ 400 : 'File upload failed',
+ 401 : 'Max number of tasks reached',
+ 402 : 'Destination denied',
+ 403 : 'Destination does not exist',
+ 404 : 'Invalid task id',
+ 405 : 'Invalid task action',
+ 406 : 'No default destination',
+ 407 : 'Set destination failed',
+ 408 : 'File does not exist'
+ }
+ log.error('DownloadStation returned the following error : %s', synoerrortype[response['error']['code']])
+ result = response['success']
+ elif filename and filedata:
+ log.info('Login success, adding torrent')
+ files = {'file': (filename, filedata)}
+ response = self._req(self.download_url, args = args, files = files)
+ log.info('Response: %s', response)
+ result = response['success']
+ else:
+ log.error('Invalid use of SynologyRPC.create_task: either url or filename+filedata must be specified')
+ self._logout()
+
+ return result
+
+ def test(self):
+ return bool(self._login())
+
+
+config = [{
+ 'name': 'synology',
+ 'groups': [
+ {
+ 'tab': 'downloaders',
+ 'list': 'download_providers',
+ 'name': 'synology',
+ 'label': 'Synology',
+ 'description': 'Use Synology Download Station to download.',
+ 'wizard': True,
+ 'options': [
+ {
+ 'name': 'enabled',
+ 'default': 0,
+ 'type': 'enabler',
+ 'radio_group': 'nzb,torrent',
+ },
+ {
+ 'name': 'host',
+ 'default': 'localhost:5000',
+ 'description': 'Hostname with port. Usually localhost:5000 ',
+ },
+ {
+ 'name': 'username',
+ },
+ {
+ 'name': 'password',
+ 'type': 'password',
+ },
+ {
+ 'name': 'destination',
+ 'description': 'Specify existing destination share to where your files will be downloaded, usually Downloads ',
+ 'advanced': True,
+ },
+ {
+ 'name': 'use_for',
+ 'label': 'Use for',
+ 'default': 'both',
+ 'type': 'dropdown',
+ 'values': [('usenet & torrents', 'both'), ('usenet', 'nzb'), ('torrent', 'torrent')],
+ },
+ {
+ 'name': 'manual',
+ 'default': 0,
+ 'type': 'bool',
+ 'advanced': True,
+ 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.',
+ },
+ ],
+ }
+ ],
+}]
diff --git a/couchpotato/core/downloaders/synology/__init__.py b/couchpotato/core/downloaders/synology/__init__.py
deleted file mode 100644
index 00a135d407..0000000000
--- a/couchpotato/core/downloaders/synology/__init__.py
+++ /dev/null
@@ -1,45 +0,0 @@
-from .main import Synology
-
-def start():
- return Synology()
-
-config = [{
- 'name': 'synology',
- 'groups': [
- {
- 'tab': 'downloaders',
- 'list': 'download_providers',
- 'name': 'synology',
- 'label': 'Synology',
- 'description': 'Use Synology Download Station to download.',
- 'wizard': True,
- 'options': [
- {
- 'name': 'enabled',
- 'default': 0,
- 'type': 'enabler',
- 'radio_group': 'torrent',
- },
- {
- 'name': 'host',
- 'default': 'localhost:5000',
- 'description': 'Hostname with port. Usually localhost:5000 ',
- },
- {
- 'name': 'username',
- },
- {
- 'name': 'password',
- 'type': 'password',
- },
- {
- 'name': 'manual',
- 'default': 0,
- 'type': 'bool',
- 'advanced': True,
- 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.',
- },
- ],
- }
- ],
-}]
diff --git a/couchpotato/core/downloaders/synology/main.py b/couchpotato/core/downloaders/synology/main.py
deleted file mode 100644
index 6e4059807f..0000000000
--- a/couchpotato/core/downloaders/synology/main.py
+++ /dev/null
@@ -1,105 +0,0 @@
-from couchpotato.core.downloaders.base import Downloader
-from couchpotato.core.helpers.encoding import isInt
-from couchpotato.core.logger import CPLog
-import httplib
-import json
-import urllib
-import urllib2
-
-
-log = CPLog(__name__)
-
-class Synology(Downloader):
-
- type = ['torrent_magnet']
- log = CPLog(__name__)
-
- def download(self, data, movie, filedata = None):
-
- log.error('Sending "%s" (%s) to Synology.', (data.get('name'), data.get('type')))
-
- # Load host from config and split out port.
- host = self.conf('host').split(':')
- if not isInt(host[1]):
- log.error('Config properties are not filled in correctly, port is missing.')
- return False
-
- if data.get('type') == 'torrent':
- log.error('Can\'t add binary torrent file')
- return False
-
- try:
- # Send request to Transmission
- srpc = SynologyRPC(host[0], host[1], self.conf('username'), self.conf('password'))
- remote_torrent = srpc.add_torrent_uri(data.get('url'))
- log.info('Response: %s', remote_torrent)
- return remote_torrent['success']
- except Exception, err:
- log.error('Exception while adding torrent: %s', err)
- return False
-
-
-class SynologyRPC(object):
-
- '''SynologyRPC lite library'''
-
- def __init__(self, host = 'localhost', port = 5000, username = None, password = None):
-
- super(SynologyRPC, self).__init__()
-
- self.download_url = 'http://%s:%s/webapi/DownloadStation/task.cgi' % (host, port)
- self.auth_url = 'http://%s:%s/webapi/auth.cgi' % (host, port)
- self.username = username
- self.password = password
- self.session_name = 'DownloadStation'
-
- def _login(self):
- if self.username and self.password:
- args = {'api': 'SYNO.API.Auth', 'account': self.username, 'passwd': self.password, 'version': 2,
- 'method': 'login', 'session': self.session_name, 'format': 'sid'}
- response = self._req(self.auth_url, args)
- if response['success'] == True:
- self.sid = response['data']['sid']
- log.debug('Sid=%s', self.sid)
- return response
- elif self.username or self.password:
- log.error('User or password missing, not using authentication.')
- return False
-
- def _logout(self):
- args = {'api':'SYNO.API.Auth', 'version':1, 'method':'logout', 'session':self.session_name, '_sid':self.sid}
- return self._req(self.auth_url, args)
-
- def _req(self, url, args):
- req_url = url + '?' + urllib.urlencode(args)
- try:
- req_open = urllib2.urlopen(req_url)
- response = json.loads(req_open.read())
- if response['success'] == True:
- log.info('Synology action successfull')
- return response
- except httplib.InvalidURL, err:
- log.error('Invalid Transmission host, check your config %s', err)
- return False
- except urllib2.HTTPError, err:
- log.error('SynologyRPC HTTPError: %s', err)
- return False
- except urllib2.URLError, err:
- log.error('Unable to connect to Synology %s', err)
- return False
-
- def add_torrent_uri(self, torrent):
- log.info('Adding torrent URL %s', torrent)
- response = {}
- # login
- login = self._login()
- if len(login) > 0 and login['success'] == True:
- log.info('Login success, adding torrent')
- args = {'api':'SYNO.DownloadStation.Task', 'version':1, 'method':'create', 'uri':torrent, '_sid':self.sid}
- response = self._req(self.download_url, args)
- self._logout()
- else:
- log.error('Couldn\'t login to Synology, %s', login)
- return response
-
-
diff --git a/couchpotato/core/downloaders/transmission.py b/couchpotato/core/downloaders/transmission.py
new file mode 100644
index 0000000000..2059044aa4
--- /dev/null
+++ b/couchpotato/core/downloaders/transmission.py
@@ -0,0 +1,386 @@
+from base64 import b64encode
+from datetime import timedelta
+import httplib
+import json
+import os.path
+import re
+import urllib2
+
+from couchpotato.core._base.downloader.main import DownloaderBase, ReleaseDownloadList
+from couchpotato.core.helpers.encoding import isInt, sp
+from couchpotato.core.helpers.variable import tryInt, tryFloat, cleanHost
+from couchpotato.core.logger import CPLog
+
+
+log = CPLog(__name__)
+
+autoload = 'Transmission'
+
+
+class Transmission(DownloaderBase):
+
+ protocol = ['torrent', 'torrent_magnet']
+ log = CPLog(__name__)
+ trpc = None
+
+ def connect(self):
+ # Load host from config and split out port.
+ host = cleanHost(self.conf('host')).rstrip('/').rsplit(':', 1)
+ if not isInt(host[1]):
+ log.error('Config properties are not filled in correctly, port is missing.')
+ return False
+
+ self.trpc = TransmissionRPC(host[0], port = host[1], rpc_url = self.conf('rpc_url').strip('/ '), username = self.conf('username'), password = self.conf('password'))
+ return self.trpc
+
+ def download(self, data = None, media = None, filedata = None):
+ """
+ Send a torrent/nzb file to the downloader
+
+ :param data: dict returned from provider
+ Contains the release information
+ :param media: media dict with information
+ Used for creating the filename when possible
+ :param filedata: downloaded torrent/nzb filedata
+ The file gets downloaded in the searcher and send to this function
+ This is done to have failed checking before using the downloader, so the downloader
+ doesn't need to worry about that
+ :return: boolean
+ One faile returns false, but the downloaded should log his own errors
+ """
+
+ if not media: media = {}
+ if not data: data = {}
+
+ log.info('Sending "%s" (%s) to Transmission.', (data.get('name'), data.get('protocol')))
+
+ if not self.connect():
+ return False
+
+ if not filedata and data.get('protocol') == 'torrent':
+ log.error('Failed sending torrent, no data')
+ return False
+
+ # Set parameters for adding torrent
+ params = {
+ 'paused': self.conf('paused', default = False)
+ }
+
+ if self.conf('directory'):
+ host = cleanHost(self.conf('host')).rstrip('/').rsplit(':', 1)
+ if os.path.isdir(self.conf('directory')) or not (host[0] == '127.0.0.1' or host[0] == 'localhost'):
+ params['download-dir'] = self.conf('directory').rstrip(os.path.sep)
+ else:
+ log.error('Download directory from Transmission settings: %s doesn\'t exist', self.conf('directory'))
+
+ # Change parameters of torrent
+ torrent_params = {}
+ if data.get('seed_ratio'):
+ torrent_params['seedRatioLimit'] = tryFloat(data.get('seed_ratio'))
+ torrent_params['seedRatioMode'] = 1
+
+ if data.get('seed_time'):
+ torrent_params['seedIdleLimit'] = tryInt(data.get('seed_time')) * 60
+ torrent_params['seedIdleMode'] = 1
+
+ # Send request to Transmission
+ if data.get('protocol') == 'torrent_magnet':
+ remote_torrent = self.trpc.add_torrent_uri(data.get('url'), arguments = params)
+ torrent_params['trackerAdd'] = self.torrent_trackers
+ else:
+ remote_torrent = self.trpc.add_torrent_file(b64encode(filedata), arguments = params)
+
+ if not remote_torrent:
+ log.error('Failed sending torrent to Transmission')
+ return False
+
+ data = remote_torrent.get('torrent-added') or remote_torrent.get('torrent-duplicate')
+
+ # Change settings of added torrents
+ if torrent_params:
+ self.trpc.set_torrent(data['hashString'], torrent_params)
+
+ log.info('Torrent sent to Transmission successfully.')
+ return self.downloadReturnId(data['hashString'])
+
+ def test(self):
+ """ Check if connection works
+ :return: bool
+ """
+
+ if self.connect() and self.trpc.get_session():
+ return True
+ return False
+
+ def getAllDownloadStatus(self, ids):
+ """ Get status of all active downloads
+
+ :param ids: list of (mixed) downloader ids
+ Used to match the releases for this downloader as there could be
+ other downloaders active that it should ignore
+ :return: list of releases
+ """
+
+ log.debug('Checking Transmission download status.')
+
+ if not self.connect():
+ return []
+
+ release_downloads = ReleaseDownloadList(self)
+
+ return_params = {
+ 'fields': ['id', 'name', 'hashString', 'percentDone', 'status', 'eta', 'isStalled', 'isFinished', 'downloadDir', 'uploadRatio', 'secondsSeeding', 'seedIdleLimit', 'files']
+ }
+
+ session = self.trpc.get_session()
+ queue = self.trpc.get_alltorrents(return_params)
+ if not (queue and queue.get('torrents')):
+ log.debug('Nothing in queue or error')
+ return []
+
+ for torrent in queue['torrents']:
+ if torrent['hashString'] in ids:
+ log.debug('name=%s / id=%s / downloadDir=%s / hashString=%s / percentDone=%s / status=%s / isStalled=%s / eta=%s / uploadRatio=%s / isFinished=%s / incomplete-dir-enabled=%s / incomplete-dir=%s',
+ (torrent['name'], torrent['id'], torrent['downloadDir'], torrent['hashString'], torrent['percentDone'], torrent['status'], torrent.get('isStalled', 'N/A'), torrent['eta'], torrent['uploadRatio'], torrent['isFinished'], session['incomplete-dir-enabled'], session['incomplete-dir']))
+
+ """
+ https://trac.transmissionbt.com/browser/branches/2.8x/libtransmission/transmission.h#L1853
+ 0 = Torrent is stopped
+ 1 = Queued to check files
+ 2 = Checking files
+ 3 = Queued to download
+ 4 = Downloading
+ 5 = Queued to seed
+ 6 = Seeding
+ """
+
+ status = 'busy'
+ if torrent.get('isStalled') and not torrent['percentDone'] == 1 and self.conf('stalled_as_failed'):
+ status = 'failed'
+ elif torrent['status'] == 0 and torrent['percentDone'] == 1 and torrent['isFinished']:
+ status = 'completed'
+ elif torrent['status'] in [5, 6]:
+ status = 'seeding'
+
+ if session['incomplete-dir-enabled'] and status == 'busy':
+ torrent_folder = session['incomplete-dir']
+ else:
+ torrent_folder = torrent['downloadDir']
+
+ torrent_files = []
+ for file_item in torrent['files']:
+ torrent_files.append(sp(os.path.join(torrent_folder, file_item['name'])))
+
+ release_downloads.append({
+ 'id': torrent['hashString'],
+ 'name': torrent['name'],
+ 'status': status,
+ 'original_status': torrent['status'],
+ 'seed_ratio': torrent['uploadRatio'],
+ 'timeleft': str(timedelta(seconds = torrent['eta'])),
+ 'folder': sp(torrent_folder if len(torrent_files) == 1 else os.path.join(torrent_folder, torrent['name'])),
+ 'files': torrent_files
+ })
+
+ return release_downloads
+
+ def pause(self, release_download, pause = True):
+ if pause:
+ return self.trpc.stop_torrent(release_download['id'])
+ else:
+ return self.trpc.start_torrent(release_download['id'])
+
+ def removeFailed(self, release_download):
+ log.info('%s failed downloading, deleting...', release_download['name'])
+ return self.trpc.remove_torrent(release_download['id'], True)
+
+ def processComplete(self, release_download, delete_files = False):
+ log.debug('Requesting Transmission to remove the torrent %s%s.', (release_download['name'], ' and cleanup the downloaded files' if delete_files else ''))
+ return self.trpc.remove_torrent(release_download['id'], delete_files)
+
+
+class TransmissionRPC(object):
+
+ """TransmissionRPC lite library"""
+ def __init__(self, host = 'http://localhost', port = 9091, rpc_url = 'transmission', username = None, password = None):
+
+ super(TransmissionRPC, self).__init__()
+
+ self.url = host + ':' + str(port) + '/' + rpc_url + '/rpc'
+ self.tag = 0
+ self.session_id = 0
+ self.session = {}
+ if username and password:
+ password_manager = urllib2.HTTPPasswordMgrWithDefaultRealm()
+ password_manager.add_password(realm = None, uri = self.url, user = username, passwd = password)
+ opener = urllib2.build_opener(urllib2.HTTPBasicAuthHandler(password_manager))
+ opener.addheaders = [('User-agent', 'couchpotato-transmission-client/1.0')]
+ urllib2.install_opener(opener)
+ elif username or password:
+ log.debug('User or password missing, not using authentication.')
+ self.session = self.get_session()
+
+ def _request(self, ojson):
+ self.tag += 1
+ headers = {'x-transmission-session-id': str(self.session_id)}
+ request = urllib2.Request(self.url, json.dumps(ojson).encode('utf-8'), headers)
+ try:
+ open_request = urllib2.urlopen(request)
+ response = json.loads(open_request.read())
+ log.debug('request: %s', json.dumps(ojson))
+ log.debug('response: %s', json.dumps(response))
+ if response['result'] == 'success':
+ log.debug('Transmission action successful')
+ return response['arguments']
+ else:
+ log.debug('Unknown failure sending command to Transmission. Return text is: %s', response['result'])
+ return False
+ except httplib.InvalidURL as err:
+ log.error('Invalid Transmission host, check your config %s', err)
+ return False
+ except urllib2.HTTPError as err:
+ if err.code == 401:
+ log.error('Invalid Transmission Username or Password, check your config')
+ return False
+ elif err.code == 409:
+ msg = str(err.read())
+ try:
+ self.session_id = \
+ re.search('X-Transmission-Session-Id:\s*(\w+)', msg).group(1)
+ log.debug('X-Transmission-Session-Id: %s', self.session_id)
+
+ # #resend request with the updated header
+
+ return self._request(ojson)
+ except:
+ log.error('Unable to get Transmission Session-Id %s', err)
+ else:
+ log.error('TransmissionRPC HTTPError: %s', err)
+ except urllib2.URLError as err:
+ log.error('Unable to connect to Transmission %s', err)
+
+ def get_session(self):
+ post_data = {'method': 'session-get', 'tag': self.tag}
+ return self._request(post_data)
+
+ def add_torrent_uri(self, torrent, arguments):
+ arguments['filename'] = torrent
+ post_data = {'arguments': arguments, 'method': 'torrent-add', 'tag': self.tag}
+ return self._request(post_data)
+
+ def add_torrent_file(self, torrent, arguments):
+ arguments['metainfo'] = torrent
+ post_data = {'arguments': arguments, 'method': 'torrent-add', 'tag': self.tag}
+ return self._request(post_data)
+
+ def set_torrent(self, torrent_id, arguments):
+ arguments['ids'] = torrent_id
+ post_data = {'arguments': arguments, 'method': 'torrent-set', 'tag': self.tag}
+ return self._request(post_data)
+
+ def get_alltorrents(self, arguments):
+ post_data = {'arguments': arguments, 'method': 'torrent-get', 'tag': self.tag}
+ return self._request(post_data)
+
+ def stop_torrent(self, torrent_id):
+ post_data = {'arguments': {'ids': torrent_id}, 'method': 'torrent-stop', 'tag': self.tag}
+ return self._request(post_data)
+
+ def start_torrent(self, torrent_id):
+ post_data = {'arguments': {'ids': torrent_id}, 'method': 'torrent-start', 'tag': self.tag}
+ return self._request(post_data)
+
+ def remove_torrent(self, torrent_id, delete_local_data):
+ post_data = {'arguments': {'ids': torrent_id, 'delete-local-data': delete_local_data}, 'method': 'torrent-remove', 'tag': self.tag}
+ return self._request(post_data)
+
+
+config = [{
+ 'name': 'transmission',
+ 'groups': [
+ {
+ 'tab': 'downloaders',
+ 'list': 'download_providers',
+ 'name': 'transmission',
+ 'label': 'Transmission',
+ 'description': 'Use Transmission to download torrents.',
+ 'wizard': True,
+ 'options': [
+ {
+ 'name': 'enabled',
+ 'default': 0,
+ 'type': 'enabler',
+ 'radio_group': 'torrent',
+ },
+ {
+ 'name': 'host',
+ 'default': 'http://localhost:9091',
+ 'description': 'Hostname with port. Usually http://localhost:9091 ',
+ },
+ {
+ 'name': 'rpc_url',
+ 'type': 'string',
+ 'default': 'transmission',
+ 'advanced': True,
+ 'description': 'Change if you don\'t run Transmission RPC at the default url.',
+ },
+ {
+ 'name': 'username',
+ },
+ {
+ 'name': 'password',
+ 'type': 'password',
+ },
+ {
+ 'name': 'directory',
+ 'type': 'directory',
+ 'description': 'Download to this directory. Keep empty for default Transmission download directory.',
+ },
+ {
+ 'name': 'remove_complete',
+ 'label': 'Remove torrent',
+ 'default': True,
+ 'advanced': True,
+ 'type': 'bool',
+ 'description': 'Remove the torrent from Transmission after it finished seeding.',
+ },
+ {
+ 'name': 'delete_files',
+ 'label': 'Remove files',
+ 'default': True,
+ 'type': 'bool',
+ 'advanced': True,
+ 'description': 'Also remove the leftover files.',
+ },
+ {
+ 'name': 'paused',
+ 'type': 'bool',
+ 'advanced': True,
+ 'default': False,
+ 'description': 'Add the torrent paused.',
+ },
+ {
+ 'name': 'manual',
+ 'default': 0,
+ 'type': 'bool',
+ 'advanced': True,
+ 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.',
+ },
+ {
+ 'name': 'stalled_as_failed',
+ 'default': True,
+ 'advanced': True,
+ 'type': 'bool',
+ 'description': 'Consider a stalled torrent as failed',
+ },
+ {
+ 'name': 'delete_failed',
+ 'default': True,
+ 'advanced': True,
+ 'type': 'bool',
+ 'description': 'Delete a release after the download has failed.',
+ },
+ ],
+ }
+ ],
+}]
diff --git a/couchpotato/core/downloaders/transmission/__init__.py b/couchpotato/core/downloaders/transmission/__init__.py
deleted file mode 100644
index 210a0d9e46..0000000000
--- a/couchpotato/core/downloaders/transmission/__init__.py
+++ /dev/null
@@ -1,63 +0,0 @@
-from .main import Transmission
-
-def start():
- return Transmission()
-
-config = [{
- 'name': 'transmission',
- 'groups': [
- {
- 'tab': 'downloaders',
- 'list': 'download_providers',
- 'name': 'transmission',
- 'label': 'Transmission',
- 'description': 'Use Transmission to download torrents.',
- 'wizard': True,
- 'options': [
- {
- 'name': 'enabled',
- 'default': 0,
- 'type': 'enabler',
- 'radio_group': 'torrent',
- },
- {
- 'name': 'host',
- 'default': 'localhost:9091',
- 'description': 'Hostname with port. Usually localhost:9091 ',
- },
- {
- 'name': 'username',
- },
- {
- 'name': 'password',
- 'type': 'password',
- },
- {
- 'name': 'paused',
- 'type': 'bool',
- 'default': False,
- 'description': 'Add the torrent paused.',
- },
- {
- 'name': 'directory',
- 'type': 'directory',
- 'description': 'Where should Transmission saved the downloaded files?',
- },
- {
- 'name': 'ratio',
- 'default': 10,
- 'type': 'int',
- 'advanced': True,
- 'description': 'Stop transfer when reaching ratio',
- },
- {
- 'name': 'manual',
- 'default': 0,
- 'type': 'bool',
- 'advanced': True,
- 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.',
- },
- ],
- }
- ],
-}]
diff --git a/couchpotato/core/downloaders/transmission/main.py b/couchpotato/core/downloaders/transmission/main.py
deleted file mode 100644
index 5c13af6e1c..0000000000
--- a/couchpotato/core/downloaders/transmission/main.py
+++ /dev/null
@@ -1,148 +0,0 @@
-from base64 import b64encode
-from couchpotato.core.downloaders.base import Downloader
-from couchpotato.core.helpers.encoding import isInt
-from couchpotato.core.logger import CPLog
-import httplib
-import json
-import os.path
-import re
-import urllib2
-
-log = CPLog(__name__)
-
-
-class Transmission(Downloader):
-
- type = ['torrent', 'torrent_magnet']
- log = CPLog(__name__)
-
- def download(self, data, movie, filedata = None):
-
- log.debug('Sending "%s" (%s) to Transmission.', (data.get('name'), data.get('type')))
-
- # Load host from config and split out port.
- host = self.conf('host').split(':')
- if not isInt(host[1]):
- log.error('Config properties are not filled in correctly, port is missing.')
- return False
-
- # Set parameters for Transmission
- folder_name = self.createFileName(data, filedata, movie)[:-len(data.get('type')) - 1]
- folder_path = os.path.join(self.conf('directory', default = ''), folder_name).rstrip(os.path.sep)
-
- # Create the empty folder to download too
- self.makeDir(folder_path)
-
- params = {
- 'paused': self.conf('paused', default = 0),
- 'download-dir': folder_path
- }
-
- torrent_params = {}
- if self.conf('ratio'):
- torrent_params = {
- 'seedRatioLimit': self.conf('ratio'),
- 'seedRatioMode': self.conf('ratio')
- }
-
- if not filedata and data.get('type') == 'torrent':
- log.error('Failed sending torrent, no data')
- return False
-
- # Send request to Transmission
- try:
- trpc = TransmissionRPC(host[0], port = host[1], username = self.conf('username'), password = self.conf('password'))
- if data.get('type') == 'torrent_magnet':
- remote_torrent = trpc.add_torrent_uri(data.get('url'), arguments = params)
- torrent_params['trackerAdd'] = self.torrent_trackers
- else:
- remote_torrent = trpc.add_torrent_file(b64encode(filedata), arguments = params)
-
- # Change settings of added torrents
- if torrent_params:
- trpc.set_torrent(remote_torrent['torrent-added']['hashString'], torrent_params)
-
- return True
- except Exception, err:
- log.error('Failed to change settings for transfer: %s', err)
- return False
-
-
-class TransmissionRPC(object):
-
- """TransmissionRPC lite library"""
-
- def __init__(self, host = 'localhost', port = 9091, username = None, password = None):
-
- super(TransmissionRPC, self).__init__()
-
- self.url = 'http://' + host + ':' + str(port) + '/transmission/rpc'
- self.tag = 0
- self.session_id = 0
- self.session = {}
- if username and password:
- password_manager = urllib2.HTTPPasswordMgrWithDefaultRealm()
- password_manager.add_password(realm = None, uri = self.url, user = username, passwd = password)
- opener = urllib2.build_opener(urllib2.HTTPBasicAuthHandler(password_manager), urllib2.HTTPDigestAuthHandler(password_manager))
- opener.addheaders = [('User-agent', 'couchpotato-transmission-client/1.0')]
- urllib2.install_opener(opener)
- elif username or password:
- log.debug('User or password missing, not using authentication.')
- self.session = self.get_session()
-
- def _request(self, ojson):
- self.tag += 1
- headers = {'x-transmission-session-id': str(self.session_id)}
- request = urllib2.Request(self.url, json.dumps(ojson).encode('utf-8'), headers)
- try:
- open_request = urllib2.urlopen(request)
- response = json.loads(open_request.read())
- log.debug('response: %s', json.dumps(response))
- if response['result'] == 'success':
- log.debug('Transmission action successfull')
- return response['arguments']
- else:
- log.debug('Unknown failure sending command to Transmission. Return text is: %s', response['result'])
- return False
- except httplib.InvalidURL, err:
- log.error('Invalid Transmission host, check your config %s', err)
- return False
- except urllib2.HTTPError, err:
- if err.code == 401:
- log.error('Invalid Transmission Username or Password, check your config')
- return False
- elif err.code == 409:
- msg = str(err.read())
- try:
- self.session_id = \
- re.search('X-Transmission-Session-Id:\s*(\w+)', msg).group(1)
- log.debug('X-Transmission-Session-Id: %s', self.session_id)
-
- # #resend request with the updated header
-
- return self._request(ojson)
- except:
- log.error('Unable to get Transmission Session-Id %s', err)
- else:
- log.error('TransmissionRPC HTTPError: %s', err)
- except urllib2.URLError, err:
- log.error('Unable to connect to Transmission %s', err)
-
- def get_session(self):
- post_data = {'method': 'session-get', 'tag': self.tag}
- return self._request(post_data)
-
- def add_torrent_uri(self, torrent, arguments):
- arguments['filename'] = torrent
- post_data = {'arguments': arguments, 'method': 'torrent-add', 'tag': self.tag}
- return self._request(post_data)
-
- def add_torrent_file(self, torrent, arguments):
- arguments['metainfo'] = torrent
- post_data = {'arguments': arguments, 'method': 'torrent-add', 'tag': self.tag}
- return self._request(post_data)
-
- def set_torrent(self, torrent_id, arguments):
- arguments['ids'] = torrent_id
- post_data = {'arguments': arguments, 'method': 'torrent-set', 'tag': self.tag}
- return self._request(post_data)
diff --git a/couchpotato/core/downloaders/utorrent.py b/couchpotato/core/downloaders/utorrent.py
new file mode 100644
index 0000000000..264e4965f3
--- /dev/null
+++ b/couchpotato/core/downloaders/utorrent.py
@@ -0,0 +1,429 @@
+О╩©from base64 import b16encode, b32decode
+from datetime import timedelta
+from hashlib import sha1
+import cookielib
+import httplib
+import json
+import os
+import re
+import stat
+import time
+import urllib
+import urllib2
+
+from bencode import bencode as benc, bdecode
+from couchpotato.core._base.downloader.main import DownloaderBase, ReleaseDownloadList
+from couchpotato.core.helpers.encoding import isInt, ss, sp
+from couchpotato.core.helpers.variable import tryInt, tryFloat, cleanHost
+from couchpotato.core.logger import CPLog
+from multipartpost import MultipartPostHandler
+
+
+log = CPLog(__name__)
+
+autoload = 'uTorrent'
+
+
+class uTorrent(DownloaderBase):
+
+ protocol = ['torrent', 'torrent_magnet']
+ utorrent_api = None
+ status_flags = {
+ 'STARTED': 1,
+ 'CHECKING': 2,
+ 'CHECK-START': 4,
+ 'CHECKED': 8,
+ 'ERROR': 16,
+ 'PAUSED': 32,
+ 'QUEUED': 64,
+ 'LOADED': 128
+ }
+
+ def connect(self):
+ # Load host from config and split out port.
+ host = cleanHost(self.conf('host'), protocol = False).split(':')
+ if not isInt(host[1]):
+ log.error('Config properties are not filled in correctly, port is missing.')
+ return False
+
+ self.utorrent_api = uTorrentAPI(host[0], port = host[1], username = self.conf('username'), password = self.conf('password'))
+
+ return self.utorrent_api
+
+ def download(self, data = None, media = None, filedata = None):
+ """
+ Send a torrent/nzb file to the downloader
+
+ :param data: dict returned from provider
+ Contains the release information
+ :param media: media dict with information
+ Used for creating the filename when possible
+ :param filedata: downloaded torrent/nzb filedata
+ The file gets downloaded in the searcher and send to this function
+ This is done to have failed checking before using the downloader, so the downloader
+ doesn't need to worry about that
+ :return: boolean
+ One faile returns false, but the downloaded should log his own errors
+ """
+
+ if not media: media = {}
+ if not data: data = {}
+
+ log.debug("Sending '%s' (%s) to uTorrent.", (data.get('name'), data.get('protocol')))
+
+ if not self.connect():
+ return False
+
+ torrent_params = {}
+ if self.conf('label'):
+ torrent_params['label'] = self.conf('label')
+
+ if not filedata and data.get('protocol') == 'torrent':
+ log.error('Failed sending torrent, no data')
+ return False
+
+ if data.get('protocol') == 'torrent_magnet':
+ torrent_hash = re.findall('urn:btih:([\w]{32,40})', data.get('url'))[0].upper()
+ torrent_params['trackers'] = '%0D%0A%0D%0A'.join(self.torrent_trackers)
+ else:
+ info = bdecode(filedata)['info']
+ torrent_hash = sha1(benc(info)).hexdigest().upper()
+
+ torrent_filename = self.createFileName(data, filedata, media)
+
+ if data.get('seed_ratio'):
+ torrent_params['seed_override'] = 1
+ torrent_params['seed_ratio'] = tryInt(tryFloat(data['seed_ratio']) * 1000)
+
+ if data.get('seed_time'):
+ torrent_params['seed_override'] = 1
+ torrent_params['seed_time'] = tryInt(data['seed_time']) * 3600
+
+ # Convert base 32 to hex
+ if len(torrent_hash) == 32:
+ torrent_hash = b16encode(b32decode(torrent_hash))
+
+ # Send request to uTorrent
+ if data.get('protocol') == 'torrent_magnet':
+ self.utorrent_api.add_torrent_uri(torrent_filename, data.get('url'))
+ else:
+ self.utorrent_api.add_torrent_file(torrent_filename, filedata)
+
+ # Change settings of added torrent
+ self.utorrent_api.set_torrent(torrent_hash, torrent_params)
+ if self.conf('paused', default = 0):
+ self.utorrent_api.pause_torrent(torrent_hash)
+
+ return self.downloadReturnId(torrent_hash)
+
+ def test(self):
+ """ Check if connection works
+ :return: bool
+ """
+
+ if self.connect():
+ build_version = self.utorrent_api.get_build()
+ if not build_version:
+ return False
+ if build_version < 25406: # This build corresponds to version 3.0.0 stable
+ return False, 'Your uTorrent client is too old, please update to newest version.'
+ return True
+
+ return False
+
+ def getAllDownloadStatus(self, ids):
+ """ Get status of all active downloads
+
+ :param ids: list of (mixed) downloader ids
+ Used to match the releases for this downloader as there could be
+ other downloaders active that it should ignore
+ :return: list of releases
+ """
+
+ log.debug('Checking uTorrent download status.')
+
+ if not self.connect():
+ return []
+
+ release_downloads = ReleaseDownloadList(self)
+
+ data = self.utorrent_api.get_status()
+ if not data:
+ log.error('Error getting data from uTorrent')
+ return []
+
+ queue = json.loads(data)
+ if queue.get('error'):
+ log.error('Error getting data from uTorrent: %s', queue.get('error'))
+ return []
+
+ if not queue.get('torrents'):
+ log.debug('Nothing in queue')
+ return []
+
+ # Get torrents
+ for torrent in queue['torrents']:
+ if torrent[0] in ids:
+
+ #Get files of the torrent
+ torrent_files = []
+ try:
+ torrent_files = json.loads(self.utorrent_api.get_files(torrent[0]))
+ torrent_files = [sp(os.path.join(torrent[26], torrent_file[0])) for torrent_file in torrent_files['files'][1]]
+ except:
+ log.debug('Failed getting files from torrent: %s', torrent[2])
+
+ status = 'busy'
+ if (torrent[1] & self.status_flags['STARTED'] or torrent[1] & self.status_flags['QUEUED']) and torrent[4] == 1000:
+ status = 'seeding'
+ elif torrent[1] & self.status_flags['ERROR'] and 'There is not enough space on the disk' not in torrent[21]:
+ status = 'failed'
+ elif torrent[4] == 1000:
+ status = 'completed'
+
+ if not status == 'busy':
+ self.removeReadOnly(torrent_files)
+
+ release_downloads.append({
+ 'id': torrent[0],
+ 'name': torrent[2],
+ 'status': status,
+ 'seed_ratio': float(torrent[7]) / 1000,
+ 'original_status': torrent[1],
+ 'timeleft': str(timedelta(seconds = torrent[10])),
+ 'folder': sp(torrent[26]),
+ 'files': torrent_files
+ })
+
+ return release_downloads
+
+ def pause(self, release_download, pause = True):
+ if not self.connect():
+ return False
+ return self.utorrent_api.pause_torrent(release_download['id'], pause)
+
+ def removeFailed(self, release_download):
+ log.info('%s failed downloading, deleting...', release_download['name'])
+ if not self.connect():
+ return False
+ return self.utorrent_api.remove_torrent(release_download['id'], remove_data = True)
+
+ def processComplete(self, release_download, delete_files = False):
+ log.debug('Requesting uTorrent to remove the torrent %s%s.', (release_download['name'], ' and cleanup the downloaded files' if delete_files else ''))
+ if not self.connect():
+ return False
+ return self.utorrent_api.remove_torrent(release_download['id'], remove_data = delete_files)
+
+ def removeReadOnly(self, files):
+ #Removes all read-on ly flags in a for all files
+ for filepath in files:
+ if os.path.isfile(filepath):
+ #Windows only needs S_IWRITE, but we bitwise-or with current perms to preserve other permission bits on Linux
+ os.chmod(filepath, stat.S_IWRITE | os.stat(filepath).st_mode)
+
+class uTorrentAPI(object):
+
+ def __init__(self, host = 'localhost', port = 8000, username = None, password = None):
+
+ super(uTorrentAPI, self).__init__()
+
+ self.url = 'http://' + str(host) + ':' + str(port) + '/gui/'
+ self.token = ''
+ self.last_time = time.time()
+ cookies = cookielib.CookieJar()
+ self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookies), MultipartPostHandler)
+ self.opener.addheaders = [('User-agent', 'couchpotato-utorrent-client/1.0')]
+ if username and password:
+ password_manager = urllib2.HTTPPasswordMgrWithDefaultRealm()
+ password_manager.add_password(realm = None, uri = self.url, user = username, passwd = password)
+ self.opener.add_handler(urllib2.HTTPBasicAuthHandler(password_manager))
+ elif username or password:
+ log.debug('User or password missing, not using authentication.')
+ self.token = self.get_token()
+
+ def _request(self, action, data = None):
+ if time.time() > self.last_time + 1800:
+ self.last_time = time.time()
+ self.token = self.get_token()
+ request = urllib2.Request(self.url + '?token=' + self.token + '&' + action, data)
+ try:
+ open_request = self.opener.open(request)
+ response = open_request.read()
+ if response:
+ return response
+ else:
+ log.debug('Unknown failure sending command to uTorrent. Return text is: %s', response)
+ except httplib.InvalidURL as err:
+ log.error('Invalid uTorrent host, check your config %s', err)
+ except urllib2.HTTPError as err:
+ if err.code == 401:
+ log.error('Invalid uTorrent Username or Password, check your config')
+ else:
+ log.error('uTorrent HTTPError: %s', err)
+ except urllib2.URLError as err:
+ log.error('Unable to connect to uTorrent %s', err)
+ return False
+
+ def get_token(self):
+ request = self.opener.open(self.url + 'token.html')
+ token = re.findall('
(.*?)', request.read())[0]
+ return token
+
+ def add_torrent_uri(self, filename, torrent, add_folder = False):
+ action = 'action=add-url&s=%s' % urllib.quote(torrent)
+ if add_folder:
+ action += '&path=%s' % urllib.quote(filename)
+ return self._request(action)
+
+ def add_torrent_file(self, filename, filedata, add_folder = False):
+ action = 'action=add-file'
+ if add_folder:
+ action += '&path=%s' % urllib.quote(filename)
+ return self._request(action, {'torrent_file': (ss(filename), filedata)})
+
+ def set_torrent(self, hash, params):
+ action = 'action=setprops&hash=%s' % hash
+ for k, v in params.items():
+ action += '&s=%s&v=%s' % (k, v)
+ return self._request(action)
+
+ def pause_torrent(self, hash, pause = True):
+ if pause:
+ action = 'action=pause&hash=%s' % hash
+ else:
+ action = 'action=unpause&hash=%s' % hash
+ return self._request(action)
+
+ def stop_torrent(self, hash):
+ action = 'action=stop&hash=%s' % hash
+ return self._request(action)
+
+ def remove_torrent(self, hash, remove_data = False):
+ if remove_data:
+ action = 'action=removedata&hash=%s' % hash
+ else:
+ action = 'action=remove&hash=%s' % hash
+ return self._request(action)
+
+ def get_status(self):
+ action = 'list=1'
+ return self._request(action)
+
+ def get_settings(self):
+ action = 'action=getsettings'
+ settings_dict = {}
+ try:
+ utorrent_settings = json.loads(self._request(action))
+
+ # Create settings dict
+ for setting in utorrent_settings['settings']:
+ if setting[1] == 0: # int
+ settings_dict[setting[0]] = int(setting[2] if not setting[2].strip() == '' else '0')
+ elif setting[1] == 1: # bool
+ settings_dict[setting[0]] = True if setting[2] == 'true' else False
+ elif setting[1] == 2: # string
+ settings_dict[setting[0]] = setting[2]
+
+ #log.debug('uTorrent settings: %s', settings_dict)
+
+ except Exception as err:
+ log.error('Failed to get settings from uTorrent: %s', err)
+
+ return settings_dict
+
+ def set_settings(self, settings_dict = None):
+ if not settings_dict: settings_dict = {}
+
+ for key in settings_dict:
+ if isinstance(settings_dict[key], bool):
+ settings_dict[key] = 1 if settings_dict[key] else 0
+
+ action = 'action=setsetting' + ''.join(['&s=%s&v=%s' % (key, value) for (key, value) in settings_dict.items()])
+ return self._request(action)
+
+ def get_files(self, hash):
+ action = 'action=getfiles&hash=%s' % hash
+ return self._request(action)
+
+ def get_build(self):
+ data = self._request('')
+ if not data:
+ return False
+ response = json.loads(data)
+ return int(response.get('build'))
+
+
+config = [{
+ 'name': 'utorrent',
+ 'groups': [
+ {
+ 'tab': 'downloaders',
+ 'list': 'download_providers',
+ 'name': 'utorrent',
+ 'label': 'uTorrent',
+ 'description': 'Use uTorrent (3.0+) to download torrents.',
+ 'wizard': True,
+ 'options': [
+ {
+ 'name': 'enabled',
+ 'default': 0,
+ 'type': 'enabler',
+ 'radio_group': 'torrent',
+ },
+ {
+ 'name': 'host',
+ 'default': 'localhost:8000',
+ 'description': 'Port can be found in settings when enabling WebUI.',
+ },
+ {
+ 'name': 'username',
+ },
+ {
+ 'name': 'password',
+ 'type': 'password',
+ },
+ {
+ 'name': 'label',
+ 'description': 'Label to add torrent as.',
+ },
+ {
+ 'name': 'remove_complete',
+ 'label': 'Remove torrent',
+ 'default': True,
+ 'advanced': True,
+ 'type': 'bool',
+ 'description': 'Remove the torrent from uTorrent after it finished seeding.',
+ },
+ {
+ 'name': 'delete_files',
+ 'label': 'Remove files',
+ 'default': True,
+ 'type': 'bool',
+ 'advanced': True,
+ 'description': 'Also remove the leftover files.',
+ },
+ {
+ 'name': 'paused',
+ 'type': 'bool',
+ 'advanced': True,
+ 'default': False,
+ 'description': 'Add the torrent paused.',
+ },
+ {
+ 'name': 'manual',
+ 'default': 0,
+ 'type': 'bool',
+ 'advanced': True,
+ 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.',
+ },
+ {
+ 'name': 'delete_failed',
+ 'default': True,
+ 'advanced': True,
+ 'type': 'bool',
+ 'description': 'Delete a release after the download has failed.',
+ },
+ ],
+ }
+ ],
+}]
diff --git a/couchpotato/core/downloaders/utorrent/__init__.py b/couchpotato/core/downloaders/utorrent/__init__.py
deleted file mode 100644
index 2c494eb208..0000000000
--- a/couchpotato/core/downloaders/utorrent/__init__.py
+++ /dev/null
@@ -1,55 +0,0 @@
-from .main import uTorrent
-
-def start():
- return uTorrent()
-
-config = [{
- 'name': 'utorrent',
- 'groups': [
- {
- 'tab': 'downloaders',
- 'list': 'download_providers',
- 'name': 'utorrent',
- 'label': 'uTorrent',
- 'description': 'Use uTorrent to download torrents.',
- 'wizard': True,
- 'options': [
- {
- 'name': 'enabled',
- 'default': 0,
- 'type': 'enabler',
- 'radio_group': 'torrent',
- },
- {
- 'name': 'host',
- 'default': 'localhost:8000',
- 'description': 'Hostname with port. Usually localhost:8000 ',
- },
- {
- 'name': 'username',
- },
- {
- 'name': 'password',
- 'type': 'password',
- },
- {
- 'name': 'label',
- 'description': 'Label to add torrent as.',
- },
- {
- 'name': 'paused',
- 'type': 'bool',
- 'default': False,
- 'description': 'Add the torrent paused.',
- },
- {
- 'name': 'manual',
- 'default': 0,
- 'type': 'bool',
- 'advanced': True,
- 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.',
- },
- ],
- }
- ],
-}]
diff --git a/couchpotato/core/downloaders/utorrent/main.py b/couchpotato/core/downloaders/utorrent/main.py
deleted file mode 100644
index 5953b11790..0000000000
--- a/couchpotato/core/downloaders/utorrent/main.py
+++ /dev/null
@@ -1,197 +0,0 @@
-from base64 import b16encode, b32decode
-from bencode import bencode, bdecode
-from couchpotato.core.downloaders.base import Downloader
-from couchpotato.core.helpers.encoding import isInt, ss
-from couchpotato.core.logger import CPLog
-from hashlib import sha1
-from multipartpost import MultipartPostHandler
-import cookielib
-import httplib
-import json
-import re
-import time
-import urllib
-import urllib2
-
-
-log = CPLog(__name__)
-
-
-class uTorrent(Downloader):
-
- type = ['torrent', 'torrent_magnet']
- utorrent_api = None
-
- def download(self, data, movie, filedata = None):
-
- log.debug('Sending "%s" (%s) to uTorrent.', (data.get('name'), data.get('type')))
-
- # Load host from config and split out port.
- host = self.conf('host').split(':')
- if not isInt(host[1]):
- log.error('Config properties are not filled in correctly, port is missing.')
- return False
-
- torrent_params = {}
- if self.conf('label'):
- torrent_params['label'] = self.conf('label')
-
- if not filedata and data.get('type') == 'torrent':
- log.error('Failed sending torrent, no data')
- return False
-
- if data.get('type') == 'torrent_magnet':
- torrent_hash = re.findall('urn:btih:([\w]{32,40})', data.get('url'))[0].upper()
- torrent_params['trackers'] = '%0D%0A%0D%0A'.join(self.torrent_trackers)
- else:
- info = bdecode(filedata)["info"]
- torrent_hash = sha1(bencode(info)).hexdigest().upper()
- torrent_filename = self.createFileName(data, filedata, movie)
-
- # Convert base 32 to hex
- if len(torrent_hash) == 32:
- torrent_hash = b16encode(b32decode(torrent_hash))
-
- # Send request to uTorrent
- try:
- if not self.utorrent_api:
- self.utorrent_api = uTorrentAPI(host[0], port = host[1], username = self.conf('username'), password = self.conf('password'))
-
- if data.get('type') == 'torrent_magnet':
- self.utorrent_api.add_torrent_uri(data.get('url'))
- else:
- self.utorrent_api.add_torrent_file(torrent_filename, filedata)
-
- # Change settings of added torrents
- self.utorrent_api.set_torrent(torrent_hash, torrent_params)
- if self.conf('paused', default = 0):
- self.utorrent_api.pause_torrent(torrent_hash)
- return True
- except Exception, err:
- log.error('Failed to send torrent to uTorrent: %s', err)
- return False
-
- def getAllDownloadStatus(self):
-
- log.debug('Checking uTorrent download status.')
-
- # Load host from config and split out port.
- host = self.conf('host').split(':')
- if not isInt(host[1]):
- log.error('Config properties are not filled in correctly, port is missing.')
- return False
-
- try:
- self.utorrent_api = uTorrentAPI(host[0], port = host[1], username = self.conf('username'), password = self.conf('password'))
- except Exception, err:
- log.error('Failed to get uTorrent object: %s', err)
- return False
-
- data = ''
- try:
- data = self.utorrent_api.get_status()
- queue = json.loads(data)
- if queue.get('error'):
- log.error('Error getting data from uTorrent: %s', queue.get('error'))
- return False
-
- except Exception, err:
- log.error('Failed to get status from uTorrent: %s', err)
- return False
-
- if queue.get('torrents', []) == []:
- log.debug('Nothing in queue')
- return False
-
- statuses = []
-
- # Get torrents
- for item in queue.get('torrents', []):
-
- # item[21] = Paused | Downloading | Seeding | Finished
- status = 'busy'
- if item[21] == 'Finished' or item[21] == 'Seeding':
- status = 'completed'
-
- statuses.append({
- 'id': item[0],
- 'name': item[2],
- 'status': status,
- 'original_status': item[1],
- 'timeleft': item[10],
- })
-
- return statuses
-
-
-
-class uTorrentAPI(object):
-
- def __init__(self, host = 'localhost', port = 8000, username = None, password = None):
-
- super(uTorrentAPI, self).__init__()
-
- self.url = 'http://' + str(host) + ':' + str(port) + '/gui/'
- self.token = ''
- self.last_time = time.time()
- cookies = cookielib.CookieJar()
- self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookies), MultipartPostHandler)
- self.opener.addheaders = [('User-agent', 'couchpotato-utorrent-client/1.0')]
- if username and password:
- password_manager = urllib2.HTTPPasswordMgrWithDefaultRealm()
- password_manager.add_password(realm = None, uri = self.url, user = username, passwd = password)
- self.opener.add_handler(urllib2.HTTPBasicAuthHandler(password_manager))
- self.opener.add_handler(urllib2.HTTPDigestAuthHandler(password_manager))
- elif username or password:
- log.debug('User or password missing, not using authentication.')
- self.token = self.get_token()
-
- def _request(self, action, data = None):
- if time.time() > self.last_time + 1800:
- self.last_time = time.time()
- self.token = self.get_token()
- request = urllib2.Request(self.url + "?token=" + self.token + "&" + action, data)
- try:
- open_request = self.opener.open(request)
- response = open_request.read()
- if response:
- return response
- else:
- log.debug('Unknown failure sending command to uTorrent. Return text is: %s', response)
- except httplib.InvalidURL, err:
- log.error('Invalid uTorrent host, check your config %s', err)
- except urllib2.HTTPError, err:
- if err.code == 401:
- log.error('Invalid uTorrent Username or Password, check your config')
- else:
- log.error('uTorrent HTTPError: %s', err)
- except urllib2.URLError, err:
- log.error('Unable to connect to uTorrent %s', err)
- return False
-
- def get_token(self):
- request = self.opener.open(self.url + "token.html")
- token = re.findall("(.*?)", request.read())[0]
- return token
-
- def add_torrent_uri(self, torrent):
- action = "action=add-url&s=%s" % urllib.quote(torrent)
- return self._request(action)
-
- def add_torrent_file(self, filename, filedata):
- action = "action=add-file"
- return self._request(action, {"torrent_file": (ss(filename), filedata)})
-
- def set_torrent(self, hash, params):
- action = "action=setprops&hash=%s" % hash
- for k, v in params.iteritems():
- action += "&s=%s&v=%s" % (k, v)
- return self._request(action)
-
- def pause_torrent(self, hash):
- action = "action=pause&hash=%s" % hash
- return self._request(action)
-
- def get_status(self):
- action = "list=1"
- return self._request(action)
diff --git a/couchpotato/core/event.py b/couchpotato/core/event.py
index aa05ce0fde..35818e7edf 100644
--- a/couchpotato/core/event.py
+++ b/couchpotato/core/event.py
@@ -1,12 +1,15 @@
-from axl.axel import Event
-from couchpotato.core.helpers.variable import mergeDicts, natcmp
-from couchpotato.core.logger import CPLog
import threading
import traceback
+from axl.axel import Event
+from couchpotato.core.helpers.variable import mergeDicts, natsortKey
+from couchpotato.core.logger import CPLog
+
+
log = CPLog(__name__)
events = {}
+
def runHandler(name, handler, *args, **kwargs):
try:
return handler(*args, **kwargs)
@@ -14,44 +17,54 @@ def runHandler(name, handler, *args, **kwargs):
from couchpotato.environment import Env
log.error('Error in event "%s", that wasn\'t caught: %s%s', (name, traceback.format_exc(), Env.all() if not Env.get('dev') else ''))
+
def addEvent(name, handler, priority = 100):
- if events.get(name):
- e = events[name]
- else:
- e = events[name] = Event(name = name, threads = 10, exc_info = True, traceback = True, lock = threading.RLock())
+ if not events.get(name):
+ events[name] = []
def createHandle(*args, **kwargs):
+ h = None
try:
- parent = handler.im_self
- bc = hasattr(parent, 'beforeCall')
- if bc: parent.beforeCall(handler)
+ # Open handler
+ has_parent = hasattr(handler, 'im_self')
+ parent = None
+ if has_parent:
+ parent = handler.__self__
+ bc = hasattr(parent, 'beforeCall')
+ if bc: parent.beforeCall(handler)
+
+ # Main event
h = runHandler(name, handler, *args, **kwargs)
- ac = hasattr(parent, 'afterCall')
- if ac: parent.afterCall(handler)
+
+ # Close handler
+ if parent and has_parent:
+ ac = hasattr(parent, 'afterCall')
+ if ac: parent.afterCall(handler)
except:
- h = runHandler(name, handler, *args, **kwargs)
+ log.error('Failed creating handler %s %s: %s', (name, handler, traceback.format_exc()))
return h
- e.handle(createHandle, priority = priority)
+ events[name].append({
+ 'handler': createHandle,
+ 'priority': priority,
+ })
-def removeEvent(name, handler):
- e = events[name]
- e -= handler
def fireEvent(name, *args, **kwargs):
- if not events.get(name): return
+ if name not in events: return
+
#log.debug('Firing event %s', name)
try:
options = {
- 'is_after_event': False, # Fire after event
- 'on_complete': False, # onComplete event
- 'single': False, # Return single handler
- 'merge': False, # Merge items
- 'in_order': False, # Fire them in specific order, waits for the other to finish
+ 'is_after_event': False, # Fire after event
+ 'on_complete': False, # onComplete event
+ 'single': False, # Return single handler
+ 'merge': False, # Merge items
+ 'in_order': False, # Fire them in specific order, waits for the other to finish
}
# Do options
@@ -62,28 +75,41 @@ def fireEvent(name, *args, **kwargs):
options[x] = val
except: pass
- e = events[name]
+ if len(events[name]) == 1:
- # Lock this event
- e.lock.acquire()
+ single = None
+ try:
+ single = events[name][0]['handler'](*args, **kwargs)
+ except:
+ log.error('Failed running single event: %s', traceback.format_exc())
+
+ # Don't load thread for single event
+ result = {
+ 'single': (single is not None, single),
+ }
+
+ else:
+
+ e = Event(name = name, threads = 10, exc_info = True, traceback = True)
- e.asynchronous = False
+ for event in events[name]:
+ e.handle(event['handler'], priority = event['priority'])
- # Make sure only 1 event is fired at a time when order is wanted
- kwargs['event_order_lock'] = threading.RLock() if options['in_order'] or options['single'] else None
- kwargs['event_return_on_result'] = options['single']
+ # Make sure only 1 event is fired at a time when order is wanted
+ kwargs['event_order_lock'] = threading.RLock() if options['in_order'] or options['single'] else None
+ kwargs['event_return_on_result'] = options['single']
- # Fire
- result = e(*args, **kwargs)
+ # Fire
+ result = e(*args, **kwargs)
- # Release lock for this event
- e.lock.release()
+ result_keys = result.keys()
+ result_keys.sort(key = natsortKey)
if options['single'] and not options['merge']:
results = None
# Loop over results, stop when first not None result is found.
- for r_key in sorted(result.iterkeys(), cmp = natcmp):
+ for r_key in result_keys:
r = result[r_key]
if r[0] is True and r[1] is not None:
results = r[1]
@@ -95,7 +121,7 @@ def fireEvent(name, *args, **kwargs):
else:
results = []
- for r_key in sorted(result.iterkeys(), cmp = natcmp):
+ for r_key in result_keys:
r = result[r_key]
if r[0] == True and r[1]:
results.append(r[1])
@@ -104,11 +130,14 @@ def fireEvent(name, *args, **kwargs):
# Merge
if options['merge'] and len(results) > 0:
+
# Dict
if isinstance(results[0], dict):
+ results.reverse()
+
merged = {}
for result in results:
- merged = mergeDicts(merged, result)
+ merged = mergeDicts(merged, result, prepend_list = True)
results = merged
# Lists
@@ -132,23 +161,24 @@ def fireEvent(name, *args, **kwargs):
options['on_complete']()
return results
- except KeyError, e:
- pass
except Exception:
log.error('%s: %s', (name, traceback.format_exc()))
+
def fireEventAsync(*args, **kwargs):
try:
- my_thread = threading.Thread(target = fireEvent, args = args, kwargs = kwargs)
- my_thread.setDaemon(True)
- my_thread.start()
+ t = threading.Thread(target = fireEvent, args = args, kwargs = kwargs)
+ t.setDaemon(True)
+ t.start()
return True
- except Exception, e:
+ except Exception as e:
log.error('%s: %s', (args[0], e))
+
def errorHandler(error):
etype, value, tb = error
log.error(''.join(traceback.format_exception(etype, value, tb)))
+
def getEvent(name):
return events[name]
diff --git a/couchpotato/core/helpers/encoding.py b/couchpotato/core/helpers/encoding.py
index a11dd88bcc..f99953eebc 100644
--- a/couchpotato/core/helpers/encoding.py
+++ b/couchpotato/core/helpers/encoding.py
@@ -1,17 +1,24 @@
-from couchpotato.core.logger import CPLog
from string import ascii_letters, digits
from urllib import quote_plus
+import os
import re
import traceback
import unicodedata
+from chardet import detect
+from couchpotato.core.logger import CPLog
+import six
+
+
log = CPLog(__name__)
def toSafeString(original):
valid_chars = "-_.() %s%s" % (ascii_letters, digits)
- cleanedFilename = unicodedata.normalize('NFKD', toUnicode(original)).encode('ASCII', 'ignore')
- return ''.join(c for c in cleanedFilename if c in valid_chars)
+ cleaned_filename = unicodedata.normalize('NFKD', toUnicode(original)).encode('ASCII', 'ignore')
+ valid_string = ''.join(c for c in cleaned_filename if c in valid_chars)
+ return ' '.join(valid_string.split())
+
def simplifyString(original):
string = stripAccents(original.lower())
@@ -19,37 +26,86 @@ def simplifyString(original):
split = re.split('\W+|_', string.lower())
return toUnicode(' '.join(split))
+
def toUnicode(original, *args):
try:
if isinstance(original, unicode):
return original
else:
try:
- return unicode(original, *args)
+ return six.text_type(original, *args)
except:
try:
- return ek(original, *args)
+ from couchpotato.environment import Env
+ return original.decode(Env.get("encoding"))
except:
- raise
+ try:
+ detected = detect(original)
+ try:
+ if detected.get('confidence') > 0.8:
+ return original.decode(detected.get('encoding'))
+ except:
+ pass
+
+ return ek(original, *args)
+ except:
+ raise
except:
log.error('Unable to decode value "%s..." : %s ', (repr(original)[:20], traceback.format_exc()))
- ascii_text = str(original).encode('string_escape')
- return toUnicode(ascii_text)
+ return 'ERROR DECODING STRING'
+
def ss(original, *args):
- from couchpotato.environment import Env
- return toUnicode(original, *args).encode(Env.get('encoding'))
+
+ u_original = toUnicode(original, *args)
+ try:
+ from couchpotato.environment import Env
+ return u_original.encode(Env.get('encoding'))
+ except Exception as e:
+ log.debug('Failed ss encoding char, force UTF8: %s', e)
+ try:
+ return u_original.encode(Env.get('encoding'), 'replace')
+ except:
+ return u_original.encode('utf-8', 'replace')
+
+
+def sp(path, *args):
+
+ # Standardise encoding, normalise case, path and strip trailing '/' or '\'
+ if not path or len(path) == 0:
+ return path
+
+ # convert windows path (from remote box) to *nix path
+ if os.path.sep == '/' and '\\' in path:
+ path = '/' + path.replace(':', '').replace('\\', '/')
+
+ path = os.path.normpath(ss(path, *args))
+
+ # Remove any trailing path separators
+ if path != os.path.sep:
+ path = path.rstrip(os.path.sep)
+
+ # Add a trailing separator in case it is a root folder on windows (crashes guessit)
+ if len(path) == 2 and path[1] == ':':
+ path = path + os.path.sep
+
+ # Replace *NIX ambiguous '//' at the beginning of a path with '/' (crashes guessit)
+ path = re.sub('^//', '/', path)
+
+ return path
+
def ek(original, *args):
if isinstance(original, (str, unicode)):
try:
from couchpotato.environment import Env
- return original.decode(Env.get('encoding'))
+ return original.decode(Env.get('encoding'), 'ignore')
except UnicodeDecodeError:
raise
return original
+
def isInt(value):
try:
int(value)
@@ -57,14 +113,16 @@ def isInt(value):
except ValueError:
return False
+
def stripAccents(s):
return ''.join((c for c in unicodedata.normalize('NFD', toUnicode(s)) if unicodedata.category(c) != 'Mn'))
+
def tryUrlencode(s):
- new = u''
- if isinstance(s, (dict)):
- for key, value in s.iteritems():
- new += u'&%s=%s' % (key, tryUrlencode(value))
+ new = six.u('')
+ if isinstance(s, dict):
+ for key, value in s.items():
+ new += six.u('&%s=%s') % (key, tryUrlencode(value))
return new[1:]
else:
diff --git a/couchpotato/core/helpers/namer_check.py b/couchpotato/core/helpers/namer_check.py
new file mode 100644
index 0000000000..1d18b60c6c
--- /dev/null
+++ b/couchpotato/core/helpers/namer_check.py
@@ -0,0 +1,127 @@
+#Namer Check routine by sarakha63
+from xml.dom.minidom import parseString
+from xml.dom.minidom import Node
+import cookielib
+import urllib
+import urllib2
+import re
+import time
+from datetime import datetime
+from bs4 import BeautifulSoup
+from couchpotato.core.helpers.variable import getTitle, tryInt
+from couchpotato.core.logger import CPLog
+from couchpotato.core.helpers.encoding import simplifyString, tryUrlencode, toUnicode
+from couchpotato.core.helpers.variable import getTitle, mergeDicts
+from couchpotato.core.media._base.providers.torrent.base import TorrentProvider
+from dateutil.parser import parse
+from guessit import guess_movie_info
+from couchpotato.core.event import fireEvent
+
+log = CPLog(__name__)
+
+clean = '[ _\,\.\(\)\[\]\-](extended.cut|directors.cut|french|by|ioaw|swedisch|danish|dutch|swesub|spanish|german|ac3|dts|custom|dc|divx|divx5|dsr|dsrip|dutch|dvd|dvdr|dvdrip|dvdscr|dvdscreener|screener|dvdivx|cam|fragment|fs|hdtv|hdrip|hdtvrip|internal|limited|multisubs|vost|vostfr|multi|ntsc|ogg|ogm|pal|pdtv|proper|repack|rerip|retail|r3|r5|bd5|se|svcd|swedish|german|read.nfo|nfofix|unrated|ws|telesync|ts|telecine|tc|brrip|bdrip|video_ts|audio_ts|480p|480i|576p|576i|720p|720i|1080p|1080i|hrhd|hrhdtv|hddvd|full|multi|bluray|x264|h264|xvid|xvidvd|xxx|www.www|cd[1-9]|\[.*\])([ _\,\.\(\)\[\]\-]|$)'
+multipart_regex = [
+ '[ _\.-]+cd[ _\.-]*([0-9a-d]+)', #*cd1
+ '[ _\.-]+dvd[ _\.-]*([0-9a-d]+)', #*dvd1
+ '[ _\.-]+part[ _\.-]*([0-9a-d]+)', #*part1
+ '[ _\.-]+dis[ck][ _\.-]*([0-9a-d]+)', #*disk1
+ 'cd[ _\.-]*([0-9a-d]+)$', #cd1.ext
+ 'dvd[ _\.-]*([0-9a-d]+)$', #dvd1.ext
+ 'part[ _\.-]*([0-9a-d]+)$', #part1.mkv
+ 'dis[ck][ _\.-]*([0-9a-d]+)$', #disk1.mkv
+ '()[ _\.-]+([0-9]*[abcd]+)(\.....?)$',
+ '([a-z])([0-9]+)(\.....?)$',
+ '()([ab])(\.....?)$' #*a.mkv
+ ]
+
+def correctName(check_name, movie):
+ MovieTitles = movie['info']['titles']
+ result=0
+ for movietitle in MovieTitles:
+ check_names = [simplifyString(check_name)]
+
+ # Match names between "
+ try: check_names.append(re.search(r'([\'"])[^\1]*\1', check_name).group(0))
+ except: pass
+
+ # Match longest name between []
+ try: check_names.append(max(check_name.split('['), key = len))
+ except: pass
+
+ for check_name in list(set(check_names)):
+ check_movie = getReleaseNameYear(check_name)
+
+ try:
+ check_words = filter(None, re.split('\W+', simplifyString(check_movie.get('name', ''))))
+ movie_words = filter(None, re.split('\W+', simplifyString(movietitle)))
+ if len(check_words) > 0 and len(movie_words) > 0 and len(list(set(check_words) - set(movie_words))) == 0 and len(list(set(movie_words) - set(check_words))) == 0:
+ result+=1
+ return result
+ except:
+ pass
+
+ result+=0
+ return result
+
+def getReleaseNameYear(release_name, file_name = None):
+
+ # Use guessit first
+ guess = {}
+ if release_name:
+ release_name = re.sub(clean, ' ', release_name.lower())
+ try:
+ guess = guess_movie_info(toUnicode(release_name))
+ if guess.get('title') and guess.get('year'):
+ guess = {
+ 'name': guess.get('title'),
+ 'year': guess.get('year'),
+ }
+ elif guess.get('title'):
+ guess = {
+ 'name': guess.get('title'),
+ 'year': 0,
+ }
+ except:
+ log.debug('Could not detect via guessit "%s": %s', (file_name, traceback.format_exc()))
+
+ # Backup to simple
+ cleaned = ' '.join(re.split('\W+', simplifyString(release_name)))
+ for i in range(1,4):
+ cleaned = re.sub(clean, ' ', cleaned)
+ cleaned = re.sub(clean, ' ', cleaned)
+ year = findYear(cleaned)
+ cp_guess = {}
+
+ if year: # Split name on year
+ try:
+ movie_name = cleaned.split(year).pop(0).strip()
+ cp_guess = {
+ 'name': movie_name,
+ 'year': int(year),
+ }
+ except:
+ pass
+ else: # Split name on multiple spaces
+ try:
+ movie_name = cleaned.split(' ').pop(0).strip()
+ cp_guess = {
+ 'name': movie_name,
+ 'year': 0,
+ }
+ except:
+ pass
+
+ if cp_guess.get('year') == guess.get('year') and len(cp_guess.get('name', '')) > len(guess.get('name', '')):
+ return guess
+ elif guess == {}:
+ return cp_guess
+ if cp_guess.get('year') == guess.get('year') and len(cp_guess.get('name', '')) < len(guess.get('name', '')):
+ return cp_guess
+ return guess
+
+def findYear(text):
+ matches = re.search('(?P19[0-9]{2}|20[0-9]{2})', text)
+ if matches:
+ return matches.group('year')
+
+ return ''
\ No newline at end of file
diff --git a/couchpotato/core/helpers/request.py b/couchpotato/core/helpers/request.py
index 3c6558b134..4c0add187f 100644
--- a/couchpotato/core/helpers/request.py
+++ b/couchpotato/core/helpers/request.py
@@ -1,19 +1,21 @@
-from couchpotato.core.helpers.encoding import toUnicode
-from couchpotato.core.helpers.variable import natcmp
-from flask.globals import current_app
-from flask.helpers import json, make_response
from urllib import unquote
-from werkzeug.urls import url_decode
-import flask
import re
-def getParams():
+from couchpotato.core.helpers.encoding import toUnicode
+from couchpotato.core.helpers.variable import natsortKey
+
+
+def getParams(params):
- params = url_decode(getattr(flask.request, 'environ').get('QUERY_STRING', ''))
reg = re.compile('^[a-z0-9_\.]+$')
- current = temp = {}
- for param, value in sorted(params.iteritems()):
+ # Sort keys
+ param_keys = params.keys()
+ param_keys.sort(key = natsortKey)
+
+ temp = {}
+ for param in param_keys:
+ value = params[param]
nest = re.split("([\[\]]+)", param)
if len(nest) > 1:
@@ -36,16 +38,31 @@ def getParams():
current = current[item]
else:
temp[param] = toUnicode(unquote(value))
+ if temp[param].lower() in ['true', 'false']:
+ temp[param] = temp[param].lower() != 'false'
return dictToList(temp)
+non_decimal = re.compile(r'[^\d.]+')
+
def dictToList(params):
if type(params) is dict:
new = {}
- for x, value in params.iteritems():
+ for x, value in params.items():
try:
- new_value = [dictToList(value[k]) for k in sorted(value.iterkeys(), cmp = natcmp)]
+ convert = lambda text: int(text) if text.isdigit() else text.lower()
+ alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]
+ sorted_keys = sorted(value.keys(), key = alphanum_key)
+
+ all_ints = 0
+ for pnr in sorted_keys:
+ all_ints += 1 if non_decimal.sub('', pnr) == pnr else 0
+
+ if all_ints == len(sorted_keys):
+ new_value = [dictToList(value[k]) for k in sorted_keys]
+ else:
+ new_value = value
except:
new_value = value
@@ -54,29 +71,3 @@ def dictToList(params):
new = params
return new
-
-def getParam(attr, default = None):
- try:
- return getParams().get(attr, default)
- except:
- return default
-
-def padded_jsonify(callback, *args, **kwargs):
- content = str(callback) + '(' + json.dumps(dict(*args, **kwargs)) + ')'
- return getattr(current_app, 'response_class')(content, mimetype = 'text/javascript')
-
-def jsonify(mimetype, *args, **kwargs):
- content = json.dumps(dict(*args, **kwargs))
- return getattr(current_app, 'response_class')(content, mimetype = mimetype)
-
-def jsonified(*args, **kwargs):
- callback = getParam('callback_func', None)
- if callback:
- content = padded_jsonify(callback, *args, **kwargs)
- else:
- content = jsonify('application/json', *args, **kwargs)
-
- response = make_response(content)
- response.cache_control.no_cache = True
-
- return response
diff --git a/couchpotato/core/helpers/rss.py b/couchpotato/core/helpers/rss.py
index d88fdb5399..f455007e89 100644
--- a/couchpotato/core/helpers/rss.py
+++ b/couchpotato/core/helpers/rss.py
@@ -1,12 +1,15 @@
-from couchpotato.core.logger import CPLog
import xml.etree.ElementTree as XMLTree
+from couchpotato.core.logger import CPLog
+
+
log = CPLog(__name__)
+
class RSS(object):
def getTextElements(self, xml, path):
- ''' Find elements and return tree'''
+ """ Find elements and return tree"""
textelements = []
try:
@@ -28,7 +31,7 @@ def getElements(self, xml, path):
return elements
def getElement(self, xml, path):
- ''' Find element and return text'''
+ """ Find element and return text"""
try:
return xml.find(path)
@@ -36,7 +39,7 @@ def getElement(self, xml, path):
return
def getTextElement(self, xml, path):
- ''' Find element and return text'''
+ """ Find element and return text"""
try:
return xml.find(path).text
@@ -46,6 +49,6 @@ def getTextElement(self, xml, path):
def getItems(self, data, path = 'channel/item'):
try:
return XMLTree.parse(data).findall(path)
- except Exception, e:
+ except Exception as e:
log.error('Error parsing RSS. %s', e)
return []
diff --git a/couchpotato/core/helpers/variable.py b/couchpotato/core/helpers/variable.py
old mode 100644
new mode 100755
index 82bf88f725..b1dd966e0b
--- a/couchpotato/core/helpers/variable.py
+++ b/couchpotato/core/helpers/variable.py
@@ -1,23 +1,53 @@
-from couchpotato.core.helpers.encoding import simplifyString, toSafeString
-from couchpotato.core.logger import CPLog
+О╩©import collections
+import ctypes
import hashlib
-import os.path
+import os
import platform
import random
import re
import string
import sys
+import traceback
+
+from couchpotato.core.helpers.encoding import simplifyString, toSafeString, ss, sp, toUnicode
+from couchpotato.core.logger import CPLog
+import six
+from six.moves import map, zip, filter
+
log = CPLog(__name__)
+
+def fnEscape(pattern):
+ return pattern.replace('[', '[[').replace(']', '[]]').replace('[[', '[[]')
+
+
+def link(src, dst):
+ if os.name == 'nt':
+ import ctypes
+ if ctypes.windll.kernel32.CreateHardLinkW(toUnicode(dst), toUnicode(src), 0) == 0: raise ctypes.WinError()
+ else:
+ os.link(toUnicode(src), toUnicode(dst))
+
+
+def symlink(src, dst):
+ if os.name == 'nt':
+ import ctypes
+ if ctypes.windll.kernel32.CreateSymbolicLinkW(toUnicode(dst), toUnicode(src), 1 if os.path.isdir(src) else 0) in [0, 1280]: raise ctypes.WinError()
+ else:
+ os.symlink(toUnicode(src), toUnicode(dst))
+
+
def getUserDir():
try:
import pwd
- os.environ['HOME'] = pwd.getpwuid(os.geteuid()).pw_dir
+ if not os.environ['HOME']:
+ os.environ['HOME'] = sp(pwd.getpwuid(os.geteuid()).pw_dir)
except:
pass
- return os.path.expanduser('~')
+ return sp(os.path.expanduser('~'))
+
def getDownloadDir():
user_dir = getUserDir()
@@ -31,6 +61,7 @@ def getDownloadDir():
return user_dir
+
def getDataDir():
# Windows
@@ -50,10 +81,12 @@ def getDataDir():
# Linux
return os.path.join(user_dir, '.couchpotato')
-def isDict(object):
- return isinstance(object, dict)
-def mergeDicts(a, b):
+def isDict(obj):
+ return isinstance(obj, dict)
+
+
+def mergeDicts(a, b, prepend_list = False):
assert isDict(a), isDict(b)
dst = a.copy()
@@ -67,12 +100,13 @@ def mergeDicts(a, b):
if isDict(current_src[key]) and isDict(current_dst[key]):
stack.append((current_dst[key], current_src[key]))
elif isinstance(current_src[key], list) and isinstance(current_dst[key], list):
- current_dst[key].extend(current_src[key])
+ current_dst[key] = current_src[key] + current_dst[key] if prepend_list else current_dst[key] + current_src[key]
current_dst[key] = removeListDuplicates(current_dst[key])
else:
current_dst[key] = current_src[key]
return dst
+
def removeListDuplicates(seq):
checked = []
for e in seq:
@@ -80,31 +114,79 @@ def removeListDuplicates(seq):
checked.append(e)
return checked
+
def flattenList(l):
if isinstance(l, list):
return sum(map(flattenList, l))
else:
return l
+
def md5(text):
- return hashlib.md5(text).hexdigest()
+ return hashlib.md5(ss(text)).hexdigest()
+
def sha1(text):
return hashlib.sha1(text).hexdigest()
+
+def isLocalIP(ip):
+ ip = ip.lstrip('htps:/')
+ regex = '/(^127\.)|(^192\.168\.)|(^10\.)|(^172\.1[6-9]\.)|(^172\.2[0-9]\.)|(^172\.3[0-1]\.)|(^::1)$/'
+ return re.search(regex, ip) is not None or 'localhost' in ip or ip[:4] == '127.'
+
+
def getExt(filename):
return os.path.splitext(filename)[1][1:]
-def cleanHost(host):
- if not host.startswith(('http://', 'https://')):
- host = 'http://' + host
- if not host.endswith('/'):
+def cleanHost(host, protocol = True, ssl = False, username = None, password = None):
+ """Return a cleaned up host with given url options set
+
+ Changes protocol to https if ssl is set to True and http if ssl is set to false.
+ >>> cleanHost("localhost:80", ssl=True)
+ 'https://localhost:80/'
+ >>> cleanHost("localhost:80", ssl=False)
+ 'http://localhost:80/'
+
+ Username and password is managed with the username and password variables
+ >>> cleanHost("localhost:80", username="user", password="passwd")
+ 'http://user:passwd@localhost:80/'
+
+ Output without scheme (protocol) can be forced with protocol=False
+ >>> cleanHost("localhost:80", protocol=False)
+ 'localhost:80'
+ """
+
+ if not '://' in host and protocol:
+ host = ('https://' if ssl else 'http://') + host
+
+ if not protocol:
+ host = host.split('://', 1)[-1]
+
+ if protocol and username and password:
+ try:
+ auth = re.findall('^(?:.+?//)(.+?):(.+?)@(?:.+)$', host)
+ if auth:
+ log.error('Cleanhost error: auth already defined in url: %s, please remove BasicAuth from url.', host)
+ else:
+ host = host.replace('://', '://%s:%s@' % (username, password), 1)
+ except:
+ pass
+
+ host = host.rstrip('/ ')
+ if protocol:
host += '/'
return host
-def getImdb(txt, check_inside = True, multiple = False):
+
+def getImdb(txt, check_inside = False, multiple = False):
+
+ if not check_inside:
+ txt = simplifyString(txt)
+ else:
+ txt = ss(txt)
if check_inside and os.path.isfile(txt):
output = open(txt, 'r')
@@ -112,60 +194,446 @@ def getImdb(txt, check_inside = True, multiple = False):
output.close()
try:
- ids = re.findall('(tt\d{7})', txt)
+ ids = re.findall('(tt\d{4,8})', txt)
+
if multiple:
- return ids if len(ids) > 0 else []
- return ids[0]
+ return removeDuplicate(['tt%07d' % tryInt(x[2:]) for x in ids]) if len(ids) > 0 else []
+
+ return 'tt%07d' % tryInt(ids[0][2:])
except IndexError:
pass
return False
-def tryInt(s):
+
+def tryInt(s, default = 0):
try: return int(s)
- except: return 0
+ except: return default
+
def tryFloat(s):
- try: return float(s) if '.' in s else tryInt(s)
+ try:
+ if isinstance(s, str):
+ return float(s) if '.' in s else tryInt(s)
+ else:
+ return float(s)
except: return 0
-def natsortKey(s):
- return map(tryInt, re.findall(r'(\d+|\D+)', s))
-def natcmp(a, b):
- return cmp(natsortKey(a), natsortKey(b))
+def natsortKey(string_):
+ """See http://www.codinghorror.com/blog/archives/001018.html"""
+ return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_)]
+
+
+def toIterable(value):
+ if isinstance(value, collections.Iterable):
+ return value
+ return [value]
+
-def getTitle(library_dict):
+def getIdentifier(media):
+ return media.get('identifier') or media.get('identifiers', {}).get('imdb')
+
+
+def getTitle(media_dict):
try:
try:
- return library_dict['titles'][0]['title']
+ return media_dict['title']
except:
try:
- for title in library_dict.titles:
- if title.default:
- return title.title
+ return media_dict['titles'][0]
except:
- log.error('Could not get title for %s', library_dict.identifier)
- return None
-
- log.error('Could not get title for %s', library_dict['identifier'])
- return None
+ try:
+ return media_dict['info']['titles'][0]
+ except:
+ try:
+ return media_dict['media']['info']['titles'][0]
+ except:
+ log.error('Could not get title for %s', getIdentifier(media_dict))
+ return None
except:
- log.error('Could not get title for library item: %s', library_dict)
+ log.error('Could not get title for library item: %s', media_dict)
return None
+
def possibleTitles(raw_title):
- titles = []
+ titles = [
+ toSafeString(raw_title).lower(),
+ raw_title.lower(),
+ simplifyString(raw_title)
+ ]
+
+ # replace some chars
+ new_title = raw_title.replace('&', 'and')
+ titles.append(simplifyString(new_title))
- titles.append(toSafeString(raw_title).lower())
- titles.append(raw_title.lower())
- titles.append(simplifyString(raw_title))
+ return removeDuplicate(titles)
- return list(set(titles))
def randomString(size = 8, chars = string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for x in range(size))
-def splitString(str, split_on = ','):
- return [x.strip() for x in str.split(split_on)] if str else []
+def getAllLanguages():
+ return [
+ ('aa', 'Afar'),
+ ('ab', 'Abkhazian'),
+ ('af', 'Afrikaans'),
+ ('ak', 'Akan'),
+ ('sq', 'Albanian'),
+ ('am', 'Amharic'),
+ ('ar', 'Arabic'),
+ ('an', 'Aragonese'),
+ ('hy', 'Armenian'),
+ ('as', 'Assamese'),
+ ('av', 'Avaric'),
+ ('ae', 'Avestan'),
+ ('ay', 'Aymara'),
+ ('az', 'Azerbaijani'),
+ ('ba', 'Bashkir'),
+ ('bm', 'Bambara'),
+ ('eu', 'Basque'),
+ ('be', 'Belarusian'),
+ ('bn', 'Bengali'),
+ ('bh', 'Bihari languages'),
+ ('bi', 'Bislama'),
+ ('bo', 'Tibetan'),
+ ('bs', 'Bosnian'),
+ ('br', 'Breton'),
+ ('bg', 'Bulgarian'),
+ ('my', 'Burmese'),
+ ('ca', 'Catalan; Valencian'),
+ ('cs', 'Czech'),
+ ('ch', 'Chamorro'),
+ ('ce', 'Chechen'),
+ ('zh', 'Chinese'),
+ ('cu', 'Church Slavic; Old Slavonic; Church Slavonic; Old Bulgarian; Old Church Slavonic'),
+ ('cv', 'Chuvash'),
+ ('kw', 'Cornish'),
+ ('co', 'Corsican'),
+ ('cr', 'Cree'),
+ ('cy', 'Welsh'),
+ ('cs', 'Czech'),
+ ('da', 'Danish'),
+ ('de', 'German'),
+ ('dv', 'Divehi; Dhivehi; Maldivian'),
+ ('nl', 'Dutch; Flemish'),
+ ('dz', 'Dzongkha'),
+ ('el', 'Greek, Modern (1453-)'),
+ ('en', 'English'),
+ ('eo', 'Esperanto'),
+ ('et', 'Estonian'),
+ ('eu', 'Basque'),
+ ('ee', 'Ewe'),
+ ('fo', 'Faroese'),
+ ('fa', 'Persian'),
+ ('fj', 'Fijian'),
+ ('fi', 'Finnish'),
+ ('fr', 'French'),
+ ('fr', 'Truefrench'),
+ ('fy', 'Western Frisian'),
+ ('ff', 'Fulah'),
+ ('Ga', 'Georgian'),
+ ('de', 'German'),
+ ('gd', 'Gaelic; Scottish Gaelic'),
+ ('ga', 'Irish'),
+ ('gl', 'Galician'),
+ ('gv', 'Manx'),
+ ('el', 'Greek, Modern (1453-)'),
+ ('gn', 'Guarani'),
+ ('gu', 'Gujarati'),
+ ('ht', 'Haitian; Haitian Creole'),
+ ('ha', 'Hausa'),
+ ('he', 'Hebrew'),
+ ('hz', 'Herero'),
+ ('hi', 'Hindi'),
+ ('ho', 'Hiri Motu'),
+ ('hr', 'Croatian'),
+ ('hu', 'Hungarian'),
+ ('hy', 'Armenian'),
+ ('ig', 'Igbo'),
+ ('is', 'Icelandic'),
+ ('io', 'Ido'),
+ ('ii', 'Sichuan Yi; Nuosu'),
+ ('iu', 'Inuktitut'),
+ ('ie', 'Interlingue; Occidental'),
+ ('ia', 'Interlingua (International Auxiliary Language Association)'),
+ ('id', 'Indonesian'),
+ ('ik', 'Inupiaq'),
+ ('is', 'Icelandic'),
+ ('it', 'Italian'),
+ ('jv', 'Javanese'),
+ ('ja', 'Japanese'),
+ ('kl', 'Kalaallisut; Greenlandic'),
+ ('kn', 'Kannada'),
+ ('ks', 'Kashmiri'),
+ ('ka', 'Georgian'),
+ ('kr', 'Kanuri'),
+ ('kk', 'Kazakh'),
+ ('km', 'Central Khmer'),
+ ('ki', 'Kikuyu; Gikuyu'),
+ ('rw', 'Kinyarwanda'),
+ ('ky', 'Kirghiz; Kyrgyz'),
+ ('kv', 'Komi'),
+ ('kg', 'Kongo'),
+ ('ko', 'Korean'),
+ ('kj', 'Kuanyama; Kwanyama'),
+ ('ku', 'Kurdish'),
+ ('lo', 'Lao'),
+ ('la', 'Latin'),
+ ('lv', 'Latvian'),
+ ('li', 'Limburgan; Limburger; Limburgish'),
+ ('ln', 'Lingala'),
+ ('lt', 'Lithuanian'),
+ ('lb', 'Luxembourgish; Letzeburgesch'),
+ ('lu', 'Luba-Katanga'),
+ ('lg', 'Ganda'),
+ ('mk', 'Macedonian'),
+ ('mh', 'Marshallese'),
+ ('ml', 'Malayalam'),
+ ('mi', 'Maori'),
+ ('mr', 'Marathi'),
+ ('ms', 'Malay'),
+ ('Mi', 'Micmac'),
+ ('mk', 'Macedonian'),
+ ('mg', 'Malagasy'),
+ ('mt', 'Maltese'),
+ ('mn', 'Mongolian'),
+ ('mi', 'Maori'),
+ ('ms', 'Malay'),
+ ('my', 'Burmese'),
+ ('na', 'Nauru'),
+ ('nv', 'Navajo; Navaho'),
+ ('nr', 'Ndebele, South; South Ndebele'),
+ ('nd', 'Ndebele, North; North Ndebele'),
+ ('ng', 'Ndonga'),
+ ('ne', 'Nepali'),
+ ('nl', 'Dutch; Flemish'),
+ ('nn', 'Norwegian Nynorsk; Nynorsk, Norwegian'),
+ ('nb', 'Bokmal, Norwegian; Norwegian Bokmal'),
+ ('no', 'Norwegian'),
+ ('oc', 'Occitan (post 1500)'),
+ ('oj', 'Ojibwa'),
+ ('or', 'Oriya'),
+ ('om', 'Oromo'),
+ ('os', 'Ossetian; Ossetic'),
+ ('pa', 'Panjabi; Punjabi'),
+ ('fa', 'Persian'),
+ ('pi', 'Pali'),
+ ('pl', 'Polish'),
+ ('pt', 'Portuguese'),
+ ('ps', 'Pushto; Pashto'),
+ ('qu', 'Quechua'),
+ ('rm', 'Romansh'),
+ ('ro', 'Romanian; Moldavian; Moldovan'),
+ ('rn', 'Rundi'),
+ ('ru', 'Russian'),
+ ('sg', 'Sango'),
+ ('sa', 'Sanskrit'),
+ ('si', 'Sinhala; Sinhalese'),
+ ('sk', 'Slovak'),
+ ('sk', 'Slovak'),
+ ('sl', 'Slovenian'),
+ ('se', 'Northern Sami'),
+ ('sm', 'Samoan'),
+ ('sn', 'Shona'),
+ ('sd', 'Sindhi'),
+ ('so', 'Somali'),
+ ('st', 'Sotho, Southern'),
+ ('es', 'Spanish; Castilian'),
+ ('sq', 'Albanian'),
+ ('sc', 'Sardinian'),
+ ('sr', 'Serbian'),
+ ('ss', 'Swati'),
+ ('su', 'Sundanese'),
+ ('sw', 'Swahili'),
+ ('sv', 'Swedish'),
+ ('ty', 'Tahitian'),
+ ('ta', 'Tamil'),
+ ('tt', 'Tatar'),
+ ('te', 'Telugu'),
+ ('tg', 'Tajik'),
+ ('tl', 'Tagalog'),
+ ('th', 'Thai'),
+ ('bo', 'Tibetan'),
+ ('ti', 'Tigrinya'),
+ ('to', 'Tonga (Tonga Islands)'),
+ ('tn', 'Tswana'),
+ ('ts', 'Tsonga'),
+ ('tk', 'Turkmen'),
+ ('tr', 'Turkish'),
+ ('tw', 'Twi'),
+ ('ug', 'Uighur; Uyghur'),
+ ('uk', 'Ukrainian'),
+ ('ur', 'Urdu'),
+ ('uz', 'Uzbek'),
+ ('ve', 'Venda'),
+ ('vi', 'Vietnamese'),
+ ('vo', 'Volapuk'),
+ ('cy', 'Welsh'),
+ ('wa', 'Walloon'),
+ ('wo', 'Wolof'),
+ ('xh', 'Xhosa'),
+ ('yi', 'Yiddish'),
+ ('yo', 'Yoruba'),
+ ('za', 'Zhuang; Chuang'),
+ ('zh', 'Chinese'),
+ ('zu', 'Zulu')]
+
+def fillingLanguages(languages):
+ allLanguages = getAllLanguages()
+
+ languagesToAppend = []
+
+ for currentLanguage in languages:
+ matchingTuples = [item for item in allLanguages if item[0].upper() == currentLanguage.upper()]
+ if matchingTuples and any(matchingTuples):
+ languagesToAppend.append(matchingTuples[0][1].upper())
+
+ if currentLanguage == 'FR' or currentLanguage == 'FRENCH':
+ languagesToAppend.append('TRUEFRENCH')
+
+ return languages + languagesToAppend
+
+def splitString(str, split_on = ',', clean = True):
+ l = [x.strip() for x in str.split(split_on)] if str else []
+ return removeEmpty(l) if clean else l
+
+
+def removeEmpty(l):
+ return list(filter(None, l))
+
+
+def removeDuplicate(l):
+ seen = set()
+ return [x for x in l if x not in seen and not seen.add(x)]
+
+
+def dictIsSubset(a, b):
+ return all([k in b and b[k] == v for k, v in a.items()])
+
+
+# Returns True if sub_folder is the same as or inside base_folder
+def isSubFolder(sub_folder, base_folder):
+ if base_folder and sub_folder:
+ base = sp(os.path.realpath(base_folder)) + os.path.sep
+ subfolder = sp(os.path.realpath(sub_folder)) + os.path.sep
+ return os.path.commonprefix([subfolder, base]) == base
+
+ return False
+
+
+# From SABNZBD
+re_password = [re.compile(r'(.+){{([^{}]+)}}$'), re.compile(r'(.+)\s+password\s*=\s*(.+)$', re.I)]
+
+
+def scanForPassword(name):
+ m = None
+ for reg in re_password:
+ m = reg.search(name)
+ if m: break
+
+ if m:
+ return m.group(1).strip('. '), m.group(2).strip()
+
+
+under_pat = re.compile(r'_([a-z])')
+
+def underscoreToCamel(name):
+ return under_pat.sub(lambda x: x.group(1).upper(), name)
+
+
+def removePyc(folder, only_excess = True, show_logs = True):
+
+ folder = sp(folder)
+
+ for root, dirs, files in os.walk(folder):
+
+ pyc_files = filter(lambda filename: filename.endswith('.pyc'), files)
+ py_files = set(filter(lambda filename: filename.endswith('.py'), files))
+ excess_pyc_files = filter(lambda pyc_filename: pyc_filename[:-1] not in py_files, pyc_files) if only_excess else pyc_files
+
+ for excess_pyc_file in excess_pyc_files:
+ full_path = os.path.join(root, excess_pyc_file)
+ if show_logs: log.debug('Removing old PYC file: %s', full_path)
+ try:
+ os.remove(full_path)
+ except:
+ log.error('Couldn\'t remove %s: %s', (full_path, traceback.format_exc()))
+
+ for dir_name in dirs:
+ full_path = os.path.join(root, dir_name)
+ if len(os.listdir(full_path)) == 0:
+ try:
+ os.rmdir(full_path)
+ except:
+ log.error('Couldn\'t remove empty directory %s: %s', (full_path, traceback.format_exc()))
+
+
+def getFreeSpace(directories):
+
+ single = not isinstance(directories, (tuple, list))
+ if single:
+ directories = [directories]
+
+ free_space = {}
+ for folder in directories:
+
+ size = None
+ if os.path.isdir(folder):
+ if os.name == 'nt':
+ _, total, free = ctypes.c_ulonglong(), ctypes.c_ulonglong(), \
+ ctypes.c_ulonglong()
+ if sys.version_info >= (3,) or isinstance(folder, unicode):
+ fun = ctypes.windll.kernel32.GetDiskFreeSpaceExW #@UndefinedVariable
+ else:
+ fun = ctypes.windll.kernel32.GetDiskFreeSpaceExA #@UndefinedVariable
+ ret = fun(folder, ctypes.byref(_), ctypes.byref(total), ctypes.byref(free))
+ if ret == 0:
+ raise ctypes.WinError()
+ return [total.value, free.value]
+ else:
+ s = os.statvfs(folder)
+ size = [s.f_blocks * s.f_frsize / (1024 * 1024), (s.f_bavail * s.f_frsize) / (1024 * 1024)]
+
+ if single: return size
+
+ free_space[folder] = size
+
+ return free_space
+
+
+def getSize(paths):
+
+ single = not isinstance(paths, (tuple, list))
+ if single:
+ paths = [paths]
+
+ total_size = 0
+ for path in paths:
+ path = sp(path)
+
+ if os.path.isdir(path):
+ total_size = 0
+ for dirpath, _, filenames in os.walk(path):
+ for f in filenames:
+ total_size += os.path.getsize(sp(os.path.join(dirpath, f)))
+
+ elif os.path.isfile(path):
+ total_size += os.path.getsize(path)
+
+ return total_size / 1048576 # MB
+
+
+def find(func, iterable):
+ for item in iterable:
+ if func(item):
+ return item
+
+ return None
+
+
+def compareVersions(version1, version2):
+ def normalize(v):
+ return [int(x) for x in re.sub(r'(\.0+)*$','', v).split(".")]
+ return cmp(normalize(version1), normalize(version2))
diff --git a/couchpotato/core/loader.py b/couchpotato/core/loader.py
index a97437a211..5b0f6fca3d 100644
--- a/couchpotato/core/loader.py
+++ b/couchpotato/core/loader.py
@@ -1,59 +1,71 @@
-from couchpotato.core.event import fireEvent
-from couchpotato.core.logger import CPLog
-import glob
import os
+import sys
import traceback
+from couchpotato.core.event import fireEvent
+from couchpotato.core.logger import CPLog
+from importhelper import import_module
+import six
+
+
log = CPLog(__name__)
-class Loader(object):
- plugins = {}
- providers = {}
+class Loader(object):
- modules = {}
+ def __init__(self):
+ self.plugins = {}
+ self.providers = {}
+ self.modules = {}
+ self.paths = {}
def preload(self, root = ''):
-
core = os.path.join(root, 'couchpotato', 'core')
- self.paths = {
+ self.paths.update({
'core': (0, 'couchpotato.core._base', os.path.join(core, '_base')),
'plugin': (1, 'couchpotato.core.plugins', os.path.join(core, 'plugins')),
'notifications': (20, 'couchpotato.core.notifications', os.path.join(core, 'notifications')),
'downloaders': (20, 'couchpotato.core.downloaders', os.path.join(core, 'downloaders')),
- }
+ })
- # Add providers to loader
- provider_dir = os.path.join(root, 'couchpotato', 'core', 'providers')
- for provider in os.listdir(provider_dir):
- path = os.path.join(provider_dir, provider)
- if os.path.isdir(path):
- self.paths[provider + '_provider'] = (25, 'couchpotato.core.providers.' + provider, path)
+ # Add media to loader
+ self.addPath(root, ['couchpotato', 'core', 'media'], 25, recursive = True)
+ # Add custom plugin folder
+ from couchpotato.environment import Env
+ custom_plugin_dir = os.path.join(Env.get('data_dir'), 'custom_plugins')
+ if os.path.isdir(custom_plugin_dir):
+ sys.path.insert(0, custom_plugin_dir)
+ self.paths['custom_plugins'] = (30, '', custom_plugin_dir)
- for plugin_type, plugin_tuple in self.paths.iteritems():
+ # Loop over all paths and add to module list
+ for plugin_type, plugin_tuple in self.paths.items():
priority, module, dir_name = plugin_tuple
self.addFromDir(plugin_type, priority, module, dir_name)
def run(self):
did_save = 0
- for priority in self.modules:
- for module_name, plugin in sorted(self.modules[priority].iteritems()):
+ for priority in sorted(self.modules):
+ for module_name, plugin in sorted(self.modules[priority].items()):
+
# Load module
try:
- m = getattr(self.loadModule(module_name), plugin.get('name'))
+ if plugin.get('name')[:2] == '__':
+ continue
- log.info('Loading %s: %s', (plugin['type'], plugin['name']))
+ m = self.loadModule(module_name)
+ if m is None:
+ continue
# Save default settings for plugin/provider
did_save += self.loadSettings(m, module_name, save = False)
- self.loadPlugins(m, plugin.get('name'))
+ self.loadPlugins(m, plugin.get('type'), plugin.get('name'))
except ImportError as e:
# todo:: subclass ImportError for missing requirements.
- if (e.message.lower().startswith("missing")):
+ if e.message.lower().startswith("missing"):
log.error(e.message)
pass
# todo:: this needs to be more descriptive.
@@ -65,27 +77,45 @@ def run(self):
if did_save:
fireEvent('settings.save')
+ def addPath(self, root, base_path, priority, recursive = False):
+ root_path = os.path.join(root, *base_path)
+ for filename in os.listdir(root_path):
+ path = os.path.join(root_path, filename)
+ if os.path.isdir(path) and filename[:2] != '__':
+ if six.u('__init__.py') in os.listdir(path):
+ new_base_path = ''.join(s + '.' for s in base_path) + filename
+ self.paths[new_base_path.replace('.', '_')] = (priority, new_base_path, path)
+
+ if recursive:
+ self.addPath(root, base_path + [filename], priority, recursive = True)
+
def addFromDir(self, plugin_type, priority, module, dir_name):
# Load dir module
- try:
- m = __import__(module)
- splitted = module.split('.')
- for sub in splitted[1:]:
- m = getattr(m, sub)
+ if module and len(module) > 0:
+ self.addModule(priority, plugin_type, module, os.path.basename(dir_name))
- if hasattr(m, 'config'):
- fireEvent('settings.options', splitted[-1] + '_config', getattr(m, 'config'))
- except:
- raise
+ for name in os.listdir(dir_name):
+ path = os.path.join(dir_name, name)
+ ext = os.path.splitext(path)[1]
+ ext_length = len(ext)
+
+ # SKIP test files:
+ if path.endswith('_test.py'):
+ continue
- for cur_file in glob.glob(os.path.join(dir_name, '*')):
- name = os.path.basename(cur_file)
- if os.path.isdir(os.path.join(dir_name, name)):
+ if name != 'static' and ((os.path.isdir(path) and os.path.isfile(os.path.join(path, '__init__.py')))
+ or (os.path.isfile(path) and ext == '.py')):
+ name = name[:-ext_length] if ext_length > 0 else name
module_name = '%s.%s' % (module, name)
self.addModule(priority, plugin_type, module_name, name)
def loadSettings(self, module, name, save = True):
+
+ if not hasattr(module, 'config'):
+ #log.debug('Skip loading settings for plugin %s as it has no config section' % module.__file__)
+ return False
+
try:
for section in module.config:
fireEvent('settings.options', section['name'], section)
@@ -99,16 +129,22 @@ def loadSettings(self, module, name, save = True):
log.debug('Failed loading settings for "%s": %s', (name, traceback.format_exc()))
return False
- def loadPlugins(self, module, name):
- try:
- klass = module.start()
- klass.registerPlugin()
-
- if klass and getattr(klass, 'auto_register_static'):
- klass.registerStatic(module.__file__)
+ def loadPlugins(self, module, type, name):
+ if not hasattr(module, 'autoload'):
+ #log.debug('Skip startup for plugin %s as it has no start section' % module.__file__)
+ return False
+ try:
+ # Load single file plugin
+ if isinstance(module.autoload, (str, unicode)):
+ getattr(module, module.autoload)()
+ # Load folder plugin
+ else:
+ module.autoload()
+
+ log.info('Loaded %s: %s', (type, name))
return True
- except Exception, e:
+ except:
log.error('Failed loading plugin "%s": %s', (module.__file__, traceback.format_exc()))
return False
@@ -117,6 +153,10 @@ def addModule(self, priority, plugin_type, module, name):
if not self.modules.get(priority):
self.modules[priority] = {}
+ module = module.lstrip('.')
+ if plugin_type.startswith('couchpotato_core'):
+ plugin_type = plugin_type[17:]
+
self.modules[priority][module] = {
'priority': priority,
'module': module,
@@ -126,10 +166,9 @@ def addModule(self, priority, plugin_type, module, name):
def loadModule(self, name):
try:
- m = __import__(name)
- splitted = name.split('.')
- for sub in splitted[1:-1]:
- m = getattr(m, sub)
- return m
+ return import_module(name)
+ except ImportError:
+ log.debug('Skip loading module plugin %s: %s', (name, traceback.format_exc()))
+ return None
except:
raise
diff --git a/couchpotato/core/logger.py b/couchpotato/core/logger.py
index 3ad33aa036..ce99d682dc 100644
--- a/couchpotato/core/logger.py
+++ b/couchpotato/core/logger.py
@@ -1,11 +1,14 @@
import logging
import re
-import traceback
+
class CPLog(object):
context = ''
- replace_private = ['api', 'apikey', 'api_key', 'password', 'username', 'h', 'uid', 'key']
+ replace_private = ['api', 'apikey', 'api_key', 'password', 'username', 'h', 'uid', 'key', 'passkey']
+
+ Env = None
+ is_develop = False
def __init__(self, context = ''):
if context.endswith('.main'):
@@ -14,6 +17,20 @@ def __init__(self, context = ''):
self.context = context
self.logger = logging.getLogger()
+ def setup(self):
+
+ if not self.Env:
+ from couchpotato.environment import Env
+
+ self.Env = Env
+ self.is_develop = Env.get('dev')
+
+ from couchpotato.core.event import addEvent
+ addEvent('app.after_shutdown', self.close)
+
+ def close(self, *args, **kwargs):
+ logging.shutdown()
+
def info(self, msg, replace_tuple = ()):
self.logger.info(self.addContext(msg, replace_tuple))
@@ -37,23 +54,22 @@ def addContext(self, msg, replace_tuple = ()):
def safeMessage(self, msg, replace_tuple = ()):
- from couchpotato.environment import Env
- from couchpotato.core.helpers.encoding import ss
+ from couchpotato.core.helpers.encoding import ss, toUnicode
msg = ss(msg)
try:
- msg = msg % replace_tuple
- except:
- try:
- if isinstance(replace_tuple, tuple):
- msg = msg % tuple([ss(x) for x in list(replace_tuple)])
- else:
- msg = msg % ss(replace_tuple)
- except:
- self.logger.error(u'Failed encoding stuff to log: %s' % traceback.format_exc())
-
- if not Env.get('dev'):
+ if isinstance(replace_tuple, tuple):
+ msg = msg % tuple([ss(x) if not isinstance(x, (int, float)) else x for x in list(replace_tuple)])
+ elif isinstance(replace_tuple, dict):
+ msg = msg % dict((k, ss(v) if not isinstance(v, (int, float)) else v) for k, v in replace_tuple.iteritems())
+ else:
+ msg = msg % ss(replace_tuple)
+ except Exception as e:
+ self.logger.error('Failed encoding stuff to log "%s": %s' % (msg, e))
+
+ self.setup()
+ if not self.is_develop:
for replace in self.replace_private:
msg = re.sub('(\?%s=)[^\&]+' % replace, '?%s=xxx' % replace, msg)
@@ -61,10 +77,10 @@ def safeMessage(self, msg, replace_tuple = ()):
# Replace api key
try:
- api_key = Env.setting('api_key')
+ api_key = self.Env.setting('api_key')
if api_key:
msg = msg.replace(api_key, 'API_KEY')
except:
pass
- return msg
+ return toUnicode(msg)
diff --git a/couchpotato/core/media/__init__.py b/couchpotato/core/media/__init__.py
new file mode 100755
index 0000000000..17494ef160
--- /dev/null
+++ b/couchpotato/core/media/__init__.py
@@ -0,0 +1,115 @@
+import os
+import traceback
+
+from couchpotato import CPLog, md5
+from couchpotato.core.event import addEvent, fireEvent, fireEventAsync
+from couchpotato.core.helpers.encoding import toUnicode
+from couchpotato.core.helpers.variable import getExt
+from couchpotato.core.plugins.base import Plugin
+import six
+
+
+log = CPLog(__name__)
+
+
+class MediaBase(Plugin):
+
+ _type = None
+
+ def initType(self):
+ addEvent('media.types', self.getType)
+
+ def getType(self):
+ return self._type
+
+ def createOnComplete(self, media_id):
+
+ def onComplete():
+ try:
+ media = fireEvent('media.get', media_id, single = True)
+ if media:
+ event_name = '%s.searcher.single' % media.get('type')
+ fireEventAsync(event_name, media, on_complete = self.createNotifyFront(media_id), manual = True)
+ except:
+ log.error('Failed creating onComplete: %s', traceback.format_exc())
+
+ return onComplete
+
+ def createNotifyFront(self, media_id):
+
+ def notifyFront():
+ try:
+ media = fireEvent('media.get', media_id, single = True)
+ if media:
+ event_name = '%s.update' % media.get('type')
+ fireEvent('notify.frontend', type = event_name, data = media)
+ except:
+ log.error('Failed creating onComplete: %s', traceback.format_exc())
+
+ return notifyFront
+
+ def getDefaultTitle(self, info, default_title = None):
+
+ # Set default title
+ default_title = default_title if default_title else toUnicode(info.get('title'))
+ titles = info.get('titles', [])
+ counter = 0
+ def_title = None
+ for title in titles:
+ if (len(default_title) == 0 and counter == 0) or len(titles) == 1 or title.lower() == toUnicode(default_title.lower()) or (toUnicode(default_title) == six.u('') and toUnicode(titles[0]) == title):
+ def_title = toUnicode(title)
+ break
+ counter += 1
+
+ if not def_title and titles and len(titles) > 0:
+ def_title = toUnicode(titles[0])
+
+ return def_title or 'UNKNOWN'
+
+ def getPoster(self, media, image_urls):
+ if 'files' not in media:
+ media['files'] = {}
+
+ existing_files = media['files']
+
+ image_type = 'poster'
+ file_type = 'image_%s' % image_type
+
+ # Make existing unique
+ unique_files = list(set(existing_files.get(file_type, [])))
+
+ # Remove files that can't be found
+ for ef in unique_files:
+ if not os.path.isfile(ef):
+ unique_files.remove(ef)
+
+ # Replace new files list
+ existing_files[file_type] = unique_files
+ if len(existing_files) == 0:
+ del existing_files[file_type]
+
+ images = image_urls.get(image_type, [])
+ for y in ['SX300', 'tmdb']:
+ initially_try = [x for x in images if y in x]
+ images[:-1] = initially_try
+
+ # Loop over type
+ for image in images:
+ if not isinstance(image, (str, unicode)):
+ continue
+
+ # Check if it has top image
+ filename = '%s.%s' % (md5(image), getExt(image))
+ existing = existing_files.get(file_type, [])
+ has_latest = False
+ for x in existing:
+ if filename in x:
+ has_latest = True
+
+ if not has_latest or file_type not in existing_files or len(existing_files.get(file_type, [])) == 0:
+ file_path = fireEvent('file.download', url = image, single = True)
+ if file_path:
+ existing_files[file_type] = [toUnicode(file_path)]
+ break
+ else:
+ break
diff --git a/couchpotato/core/migration/__init__.py b/couchpotato/core/media/_base/__init__.py
old mode 100755
new mode 100644
similarity index 100%
rename from couchpotato/core/migration/__init__.py
rename to couchpotato/core/media/_base/__init__.py
diff --git a/couchpotato/core/media/_base/library/__init__.py b/couchpotato/core/media/_base/library/__init__.py
new file mode 100644
index 0000000000..a404f81c0f
--- /dev/null
+++ b/couchpotato/core/media/_base/library/__init__.py
@@ -0,0 +1,7 @@
+from .main import Library
+
+
+def autoload():
+ return Library()
+
+config = []
diff --git a/couchpotato/core/media/_base/library/base.py b/couchpotato/core/media/_base/library/base.py
new file mode 100644
index 0000000000..553eff5a50
--- /dev/null
+++ b/couchpotato/core/media/_base/library/base.py
@@ -0,0 +1,13 @@
+from couchpotato.core.event import addEvent
+from couchpotato.core.plugins.base import Plugin
+
+
+class LibraryBase(Plugin):
+
+ _type = None
+
+ def initType(self):
+ addEvent('library.types', self.getType)
+
+ def getType(self):
+ return self._type
diff --git a/couchpotato/core/media/_base/library/main.py b/couchpotato/core/media/_base/library/main.py
new file mode 100755
index 0000000000..9e614fb4b2
--- /dev/null
+++ b/couchpotato/core/media/_base/library/main.py
@@ -0,0 +1,128 @@
+from couchpotato import get_db
+from couchpotato.api import addApiView
+from couchpotato.core.event import addEvent, fireEvent
+from couchpotato.core.logger import CPLog
+from couchpotato.core.media._base.library.base import LibraryBase
+
+log = CPLog(__name__)
+
+
+class Library(LibraryBase):
+ def __init__(self):
+ addEvent('library.title', self.title)
+ addEvent('library.related', self.related)
+ addEvent('library.tree', self.tree)
+
+ addEvent('library.root', self.root)
+
+ addApiView('library.query', self.queryView)
+ addApiView('library.related', self.relatedView)
+ addApiView('library.tree', self.treeView)
+
+ def queryView(self, media_id, **kwargs):
+ db = get_db()
+ media = db.get('id', media_id)
+
+ return {
+ 'result': fireEvent('library.query', media, single = True)
+ }
+
+ def relatedView(self, media_id, **kwargs):
+ db = get_db()
+ media = db.get('id', media_id)
+
+ return {
+ 'result': fireEvent('library.related', media, single = True)
+ }
+
+ def treeView(self, media_id, **kwargs):
+ db = get_db()
+ media = db.get('id', media_id)
+
+ return {
+ 'result': fireEvent('library.tree', media, single = True)
+ }
+
+ def title(self, library):
+ return fireEvent(
+ 'library.query',
+ library,
+
+ condense = False,
+ include_year = False,
+ include_identifier = False,
+ single = True
+ )
+
+ def related(self, media):
+ result = {self.key(media['type']): media}
+
+ db = get_db()
+ cur = media
+
+ while cur and cur.get('parent_id'):
+ cur = db.get('id', cur['parent_id'])
+
+ result[self.key(cur['type'])] = cur
+
+ children = db.get_many('media_children', media['_id'], with_doc = True)
+
+ for item in children:
+ key = self.key(item['doc']['type']) + 's'
+
+ if key not in result:
+ result[key] = []
+
+ result[key].append(item['doc'])
+
+ return result
+
+ def root(self, media):
+ db = get_db()
+ cur = media
+
+ while cur and cur.get('parent_id'):
+ cur = db.get('id', cur['parent_id'])
+
+ return cur
+
+ def tree(self, media = None, media_id = None):
+ db = get_db()
+
+ if media:
+ result = media
+ elif media_id:
+ result = db.get('id', media_id, with_doc = True)
+ else:
+ return None
+
+ # Find children
+ items = db.get_many('media_children', result['_id'], with_doc = True)
+ keys = []
+
+ # Build children arrays
+ for item in items:
+ key = self.key(item['doc']['type']) + 's'
+
+ if key not in result:
+ result[key] = {}
+ elif type(result[key]) is not dict:
+ result[key] = {}
+
+ if key not in keys:
+ keys.append(key)
+
+ result[key][item['_id']] = fireEvent('library.tree', item['doc'], single = True)
+
+ # Unique children
+ for key in keys:
+ result[key] = result[key].values()
+
+ # Include releases
+ result['releases'] = fireEvent('release.for_media', result['_id'], single = True)
+
+ return result
+
+ def key(self, media_type):
+ parts = media_type.split('.')
+ return parts[-1]
diff --git a/couchpotato/core/media/_base/matcher/__init__.py b/couchpotato/core/media/_base/matcher/__init__.py
new file mode 100644
index 0000000000..c8b1e82197
--- /dev/null
+++ b/couchpotato/core/media/_base/matcher/__init__.py
@@ -0,0 +1,7 @@
+from .main import Matcher
+
+
+def autoload():
+ return Matcher()
+
+config = []
diff --git a/couchpotato/core/media/_base/matcher/base.py b/couchpotato/core/media/_base/matcher/base.py
new file mode 100644
index 0000000000..8651126314
--- /dev/null
+++ b/couchpotato/core/media/_base/matcher/base.py
@@ -0,0 +1,84 @@
+from couchpotato.core.event import addEvent
+from couchpotato.core.helpers.encoding import simplifyString
+from couchpotato.core.logger import CPLog
+from couchpotato.core.plugins.base import Plugin
+
+log = CPLog(__name__)
+
+
+class MatcherBase(Plugin):
+ type = None
+
+ def __init__(self):
+ if self.type:
+ addEvent('%s.matcher.correct' % self.type, self.correct)
+
+ def correct(self, chain, release, media, quality):
+ raise NotImplementedError()
+
+ def flattenInfo(self, info):
+ # Flatten dictionary of matches (chain info)
+ if isinstance(info, dict):
+ return dict([(key, self.flattenInfo(value)) for key, value in info.items()])
+
+ # Flatten matches
+ result = None
+
+ for match in info:
+ if isinstance(match, dict):
+ if result is None:
+ result = {}
+
+ for key, value in match.items():
+ if key not in result:
+ result[key] = []
+
+ result[key].append(value)
+ else:
+ if result is None:
+ result = []
+
+ result.append(match)
+
+ return result
+
+ def constructFromRaw(self, match):
+ if not match:
+ return None
+
+ parts = [
+ ''.join([
+ y for y in x[1:] if y
+ ]) for x in match
+ ]
+
+ return ''.join(parts)[:-1].strip()
+
+ def simplifyValue(self, value):
+ if not value:
+ return value
+
+ if isinstance(value, basestring):
+ return simplifyString(value)
+
+ if isinstance(value, list):
+ return [self.simplifyValue(x) for x in value]
+
+ raise ValueError("Unsupported value type")
+
+ def chainMatch(self, chain, group, tags):
+ info = self.flattenInfo(chain.info[group])
+
+ found_tags = []
+ for tag, accepted in tags.items():
+ values = [self.simplifyValue(x) for x in info.get(tag, [None])]
+
+ if any([val in accepted for val in values]):
+ found_tags.append(tag)
+
+ log.debug('tags found: %s, required: %s' % (found_tags, tags.keys()))
+
+ if set(tags.keys()) == set(found_tags):
+ return True
+
+ return all([key in found_tags for key, value in tags.items()])
diff --git a/couchpotato/core/media/_base/matcher/main.py b/couchpotato/core/media/_base/matcher/main.py
new file mode 100644
index 0000000000..64e13ae619
--- /dev/null
+++ b/couchpotato/core/media/_base/matcher/main.py
@@ -0,0 +1,89 @@
+from couchpotato.core.event import addEvent, fireEvent
+from couchpotato.core.helpers.variable import possibleTitles
+from couchpotato.core.logger import CPLog
+from couchpotato.core.media._base.matcher.base import MatcherBase
+from caper import Caper
+
+log = CPLog(__name__)
+
+
+class Matcher(MatcherBase):
+
+ def __init__(self):
+ super(Matcher, self).__init__()
+
+ self.caper = Caper()
+
+ addEvent('matcher.parse', self.parse)
+ addEvent('matcher.match', self.match)
+
+ addEvent('matcher.flatten_info', self.flattenInfo)
+ addEvent('matcher.construct_from_raw', self.constructFromRaw)
+
+ addEvent('matcher.correct_title', self.correctTitle)
+ addEvent('matcher.correct_quality', self.correctQuality)
+
+ def parse(self, name, parser='scene'):
+ return self.caper.parse(name, parser)
+
+ def match(self, release, media, quality):
+ match = fireEvent('matcher.parse', release['name'], single = True)
+
+ if len(match.chains) < 1:
+ log.info2('Wrong: %s, unable to parse release name (no chains)', release['name'])
+ return False
+
+ for chain in match.chains:
+ if fireEvent('%s.matcher.correct' % media['type'], chain, release, media, quality, single = True):
+ return chain
+
+ return False
+
+ def correctTitle(self, chain, media):
+ root = fireEvent('library.root', media, single = True)
+
+ if 'show_name' not in chain.info or not len(chain.info['show_name']):
+ log.info('Wrong: missing show name in parsed result')
+ return False
+
+ # Get the lower-case parsed show name from the chain
+ chain_words = [x.lower() for x in chain.info['show_name']]
+
+ # Build a list of possible titles of the media we are searching for
+ titles = root['info']['titles']
+
+ # Add year suffix titles (will result in ['', ' ', '', ...])
+ suffixes = [None, root['info']['year']]
+
+ titles = [
+ title + ((' %s' % suffix) if suffix else '')
+ for title in titles
+ for suffix in suffixes
+ ]
+
+ # Check show titles match
+ # TODO check xem names
+ for title in titles:
+ for valid_words in [x.split(' ') for x in possibleTitles(title)]:
+
+ if valid_words == chain_words:
+ return True
+
+ return False
+
+ def correctQuality(self, chain, quality, quality_map):
+ if quality['identifier'] not in quality_map:
+ log.info2('Wrong: unknown preferred quality %s', quality['identifier'])
+ return False
+
+ if 'video' not in chain.info:
+ log.info2('Wrong: no video tags found')
+ return False
+
+ video_tags = quality_map[quality['identifier']]
+
+ if not self.chainMatch(chain, 'video', video_tags):
+ log.info2('Wrong: %s tags not in chain', video_tags)
+ return False
+
+ return True
diff --git a/couchpotato/core/media/_base/media/__init__.py b/couchpotato/core/media/_base/media/__init__.py
new file mode 100644
index 0000000000..b1cde097fc
--- /dev/null
+++ b/couchpotato/core/media/_base/media/__init__.py
@@ -0,0 +1,5 @@
+from .main import MediaPlugin
+
+
+def autoload():
+ return MediaPlugin()
diff --git a/couchpotato/core/media/_base/media/index.py b/couchpotato/core/media/_base/media/index.py
new file mode 100644
index 0000000000..b40e162be9
--- /dev/null
+++ b/couchpotato/core/media/_base/media/index.py
@@ -0,0 +1,199 @@
+from string import ascii_letters
+from hashlib import md5
+
+from CodernityDB.tree_index import MultiTreeBasedIndex, TreeBasedIndex
+from couchpotato.core.helpers.encoding import toUnicode, simplifyString
+
+
+class MediaIndex(MultiTreeBasedIndex):
+ _version = 3
+
+ custom_header = """from CodernityDB.tree_index import MultiTreeBasedIndex"""
+
+ def __init__(self, *args, **kwargs):
+ kwargs['key_format'] = '32s'
+ super(MediaIndex, self).__init__(*args, **kwargs)
+
+ def make_key(self, key):
+ return md5(key).hexdigest()
+
+ def make_key_value(self, data):
+ if data.get('_t') == 'media' and (data.get('identifier') or data.get('identifiers')):
+
+ identifiers = data.get('identifiers', {})
+ if data.get('identifier') and 'imdb' not in identifiers:
+ identifiers['imdb'] = data.get('identifier')
+
+ ids = []
+ for x in identifiers:
+ ids.append(md5('%s-%s' % (x, identifiers[x])).hexdigest())
+
+ return ids, None
+
+
+class MediaStatusIndex(TreeBasedIndex):
+ _version = 1
+
+ def __init__(self, *args, **kwargs):
+ kwargs['key_format'] = '32s'
+ super(MediaStatusIndex, self).__init__(*args, **kwargs)
+
+ def make_key(self, key):
+ return md5(key).hexdigest()
+
+ def make_key_value(self, data):
+ if data.get('_t') == 'media' and data.get('status'):
+ return md5(data.get('status')).hexdigest(), None
+
+
+class MediaTypeIndex(TreeBasedIndex):
+ _version = 1
+
+ def __init__(self, *args, **kwargs):
+ kwargs['key_format'] = '32s'
+ super(MediaTypeIndex, self).__init__(*args, **kwargs)
+
+ def make_key(self, key):
+ return md5(key).hexdigest()
+
+ def make_key_value(self, data):
+ if data.get('_t') == 'media' and data.get('type'):
+ return md5(data.get('type')).hexdigest(), None
+
+
+class TitleSearchIndex(MultiTreeBasedIndex):
+ _version = 1
+
+ custom_header = """from CodernityDB.tree_index import MultiTreeBasedIndex
+from itertools import izip
+from couchpotato.core.helpers.encoding import simplifyString"""
+
+ def __init__(self, *args, **kwargs):
+ kwargs['key_format'] = '32s'
+ super(TitleSearchIndex, self).__init__(*args, **kwargs)
+ self.__l = kwargs.get('w_len', 2)
+
+ def make_key_value(self, data):
+
+ if data.get('_t') == 'media' and len(data.get('title', '')) > 0:
+
+ out = set()
+ title = str(simplifyString(data.get('title').lower()))
+ l = self.__l
+ title_split = title.split()
+
+ for x in range(len(title_split)):
+ combo = ' '.join(title_split[x:])[:32].strip()
+ out.add(combo.rjust(32, '_'))
+ combo_range = max(l, min(len(combo), 32))
+
+ for cx in range(1, combo_range):
+ ccombo = combo[:-cx].strip()
+ if len(ccombo) > l:
+ out.add(ccombo.rjust(32, '_'))
+
+ return out, None
+
+ def make_key(self, key):
+ return key.rjust(32, '_').lower()
+
+
+class TitleIndex(TreeBasedIndex):
+ _version = 4
+
+ custom_header = """from CodernityDB.tree_index import TreeBasedIndex
+from string import ascii_letters
+from couchpotato.core.helpers.encoding import toUnicode, simplifyString"""
+
+ def __init__(self, *args, **kwargs):
+ kwargs['key_format'] = '32s'
+ super(TitleIndex, self).__init__(*args, **kwargs)
+
+ def make_key(self, key):
+ return self.simplify(key)
+
+ def make_key_value(self, data):
+ if data.get('_t') == 'media' and data.get('title') is not None and len(data.get('title')) > 0:
+ return self.simplify(data['title']), None
+
+ def simplify(self, title):
+
+ title = toUnicode(title)
+
+ nr_prefix = '' if title and len(title) > 0 and title[0] in ascii_letters else '#'
+ title = simplifyString(title)
+
+ for prefix in ['the ', 'an ', 'a ']:
+ if prefix == title[:len(prefix)]:
+ title = title[len(prefix):]
+ break
+
+ return str(nr_prefix + title).ljust(32, ' ')[:32]
+
+
+class StartsWithIndex(TreeBasedIndex):
+ _version = 3
+
+ custom_header = """from CodernityDB.tree_index import TreeBasedIndex
+from string import ascii_letters
+from couchpotato.core.helpers.encoding import toUnicode, simplifyString"""
+
+ def __init__(self, *args, **kwargs):
+ kwargs['key_format'] = '1s'
+ super(StartsWithIndex, self).__init__(*args, **kwargs)
+
+ def make_key(self, key):
+ return self.first(key)
+
+ def make_key_value(self, data):
+ if data.get('_t') == 'media' and data.get('title') is not None:
+ return self.first(data['title']), None
+
+ def first(self, title):
+ title = toUnicode(title)
+ title = simplifyString(title)
+
+ for prefix in ['the ', 'an ', 'a ']:
+ if prefix == title[:len(prefix)]:
+ title = title[len(prefix):]
+ break
+
+ return str(title[0] if title and len(title) > 0 and title[0] in ascii_letters else '#').lower()
+
+
+
+class MediaChildrenIndex(TreeBasedIndex):
+ _version = 1
+
+ def __init__(self, *args, **kwargs):
+ kwargs['key_format'] = '32s'
+ super(MediaChildrenIndex, self).__init__(*args, **kwargs)
+
+ def make_key(self, key):
+ return key
+
+ def make_key_value(self, data):
+ if data.get('_t') == 'media' and data.get('parent_id'):
+ return data.get('parent_id'), None
+
+
+class MediaTagIndex(MultiTreeBasedIndex):
+ _version = 2
+
+ custom_header = """from CodernityDB.tree_index import MultiTreeBasedIndex"""
+
+ def __init__(self, *args, **kwargs):
+ kwargs['key_format'] = '32s'
+ super(MediaTagIndex, self).__init__(*args, **kwargs)
+
+ def make_key_value(self, data):
+ if data.get('_t') == 'media' and data.get('tags') and len(data.get('tags', [])) > 0:
+
+ tags = set()
+ for tag in data.get('tags', []):
+ tags.add(self.make_key(tag))
+
+ return list(tags), None
+
+ def make_key(self, key):
+ return md5(key).hexdigest()
diff --git a/couchpotato/core/media/_base/media/main.py b/couchpotato/core/media/_base/media/main.py
new file mode 100755
index 0000000000..01dc0f14be
--- /dev/null
+++ b/couchpotato/core/media/_base/media/main.py
@@ -0,0 +1,584 @@
+from datetime import timedelta
+import time
+import traceback
+from string import ascii_lowercase
+
+from CodernityDB.database import RecordNotFound, RecordDeleted
+from couchpotato import tryInt, get_db
+from couchpotato.api import addApiView
+from couchpotato.core.event import fireEvent, fireEventAsync, addEvent
+from couchpotato.core.helpers.encoding import toUnicode
+from couchpotato.core.helpers.variable import splitString, getImdb, getTitle
+from couchpotato.core.logger import CPLog
+from couchpotato.core.media import MediaBase
+from .index import MediaIndex, MediaStatusIndex, MediaTypeIndex, TitleSearchIndex, TitleIndex, StartsWithIndex, MediaChildrenIndex, MediaTagIndex
+
+
+log = CPLog(__name__)
+
+
+class MediaPlugin(MediaBase):
+
+ _database = {
+ 'media': MediaIndex,
+ 'media_search_title': TitleSearchIndex,
+ 'media_status': MediaStatusIndex,
+ 'media_tag': MediaTagIndex,
+ 'media_by_type': MediaTypeIndex,
+ 'media_title': TitleIndex,
+ 'media_startswith': StartsWithIndex,
+ 'media_children': MediaChildrenIndex,
+ }
+
+ def __init__(self):
+
+ addApiView('media.refresh', self.refresh, docs = {
+ 'desc': 'Refresh a any media type by ID',
+ 'params': {
+ 'id': {'desc': 'Movie, Show, Season or Episode ID(s) you want to refresh.', 'type': 'int (comma separated)'},
+ }
+ })
+
+ addApiView('media.list', self.listView, docs = {
+ 'desc': 'List media',
+ 'params': {
+ 'type': {'type': 'string', 'desc': 'Media type to filter on.'},
+ 'status': {'type': 'array or csv', 'desc': 'Filter media by status. Example:"active,done"'},
+ 'release_status': {'type': 'array or csv', 'desc': 'Filter media by status of its releases. Example:"snatched,available"'},
+ 'limit_offset': {'desc': 'Limit and offset the media list. Examples: "50" or "50,30"'},
+ 'starts_with': {'desc': 'Starts with these characters. Example: "a" returns all media starting with the letter "a"'},
+ 'search': {'desc': 'Search media title'},
+ },
+ 'return': {'type': 'object', 'example': """{
+ 'success': True,
+ 'empty': bool, any media returned or not,
+ 'media': array, media found,
+}"""}
+ })
+
+ addApiView('media.get', self.getView, docs = {
+ 'desc': 'Get media by id',
+ 'params': {
+ 'id': {'desc': 'The id of the media'},
+ }
+ })
+
+ addApiView('media.delete', self.deleteView, docs = {
+ 'desc': 'Delete a media from the wanted list',
+ 'params': {
+ 'id': {'desc': 'Media ID(s) you want to delete.', 'type': 'int (comma separated)'},
+ 'delete_from': {'desc': 'Delete media from this page', 'type': 'string: all (default), wanted, manage'},
+ }
+ })
+
+ addApiView('media.available_chars', self.charView)
+
+ addEvent('app.load', self.addSingleRefreshView, priority = 100)
+ addEvent('app.load', self.addSingleListView, priority = 100)
+ addEvent('app.load', self.addSingleCharView, priority = 100)
+ addEvent('app.load', self.addSingleDeleteView, priority = 100)
+ addEvent('app.load', self.cleanupFaults)
+
+ addEvent('media.get', self.get)
+ addEvent('media.with_status', self.withStatus)
+ addEvent('media.with_identifiers', self.withIdentifiers)
+ addEvent('media.list', self.list)
+ addEvent('media.delete', self.delete)
+ addEvent('media.restatus', self.restatus)
+ addEvent('media.tag', self.tag)
+ addEvent('media.untag', self.unTag)
+
+ # Wrongly tagged media files
+ def cleanupFaults(self):
+ medias = fireEvent('media.with_status', 'ignored', single = True) or []
+
+ db = get_db()
+ for media in medias:
+ try:
+ media['status'] = 'done'
+ db.update(media)
+ except:
+ pass
+
+ def refresh(self, id = '', **kwargs):
+ handlers = []
+ ids = splitString(id)
+
+ for x in ids:
+
+ refresh_handler = self.createRefreshHandler(x)
+ if refresh_handler:
+ handlers.append(refresh_handler)
+
+ fireEvent('notify.frontend', type = 'media.busy', data = {'_id': ids})
+ fireEventAsync('schedule.queue', handlers = handlers)
+
+ return {
+ 'success': True,
+ }
+
+ def createRefreshHandler(self, media_id):
+
+ try:
+ media = get_db().get('id', media_id)
+ event = '%s.update' % media.get('type')
+
+ def handler():
+ fireEvent(event, media_id = media_id, on_complete = self.createOnComplete(media_id))
+
+ return handler
+
+ except:
+ log.error('Refresh handler for non existing media: %s', traceback.format_exc())
+
+ def addSingleRefreshView(self):
+
+ for media_type in fireEvent('media.types', merge = True):
+ addApiView('%s.refresh' % media_type, self.refresh)
+
+ def get(self, media_id):
+
+ try:
+ db = get_db()
+
+ imdb_id = getImdb(str(media_id))
+
+ if imdb_id:
+ media = db.get('media', 'imdb-%s' % imdb_id, with_doc = True)['doc']
+ else:
+ media = db.get('id', media_id)
+
+ if media:
+
+ # Attach category
+ try: media['category'] = db.get('id', media.get('category_id'))
+ except: pass
+
+ media['releases'] = fireEvent('release.for_media', media['_id'], single = True)
+
+ return media
+
+ except (RecordNotFound, RecordDeleted):
+ log.error('Media with id "%s" not found', media_id)
+ except:
+ raise
+
+ def getView(self, id = None, **kwargs):
+
+ media = self.get(id) if id else None
+
+ return {
+ 'success': media is not None,
+ 'media': media,
+ }
+
+ def withStatus(self, status, types = None, with_doc = True):
+
+ db = get_db()
+
+ if types and not isinstance(types, (list, tuple)):
+ types = [types]
+
+ status = list(status if isinstance(status, (list, tuple)) else [status])
+
+ for s in status:
+ for ms in db.get_many('media_status', s):
+ if with_doc:
+ try:
+ doc = db.get('id', ms['_id'])
+
+ if types and doc.get('type') not in types:
+ continue
+
+ yield doc
+ except (RecordDeleted, RecordNotFound):
+ log.debug('Record not found, skipping: %s', ms['_id'])
+ except (ValueError, EOFError):
+ fireEvent('database.delete_corrupted', ms.get('_id'), traceback_error = traceback.format_exc(0))
+ else:
+ yield ms
+
+ def withIdentifiers(self, identifiers, with_doc = False):
+ db = get_db()
+
+ for x in identifiers:
+ try:
+ return db.get('media', '%s-%s' % (x, identifiers[x]), with_doc = with_doc)
+ except:
+ pass
+
+ log.debug('No media found with identifiers: %s', identifiers)
+ return False
+
+ def list(self, types = None, status = None, release_status = None, status_or = False, limit_offset = None, with_tags = None, starts_with = None, search = None):
+
+ db = get_db()
+
+ # Make a list from string
+ if status and not isinstance(status, (list, tuple)):
+ status = [status]
+ if release_status and not isinstance(release_status, (list, tuple)):
+ release_status = [release_status]
+ if types and not isinstance(types, (list, tuple)):
+ types = [types]
+ if with_tags and not isinstance(with_tags, (list, tuple)):
+ with_tags = [with_tags]
+
+ # query media ids
+ if types:
+ all_media_ids = set()
+ for media_type in types:
+ all_media_ids = all_media_ids.union(set([x['_id'] for x in db.get_many('media_by_type', media_type)]))
+ else:
+ all_media_ids = set([x['_id'] for x in db.all('media')])
+
+ media_ids = list(all_media_ids)
+ filter_by = {}
+
+ # Filter on movie status
+ if status and len(status) > 0:
+ filter_by['media_status'] = set()
+ for media_status in fireEvent('media.with_status', status, with_doc = False, single = True):
+ filter_by['media_status'].add(media_status.get('_id'))
+
+ # Filter on release status
+ if release_status and len(release_status) > 0:
+ filter_by['release_status'] = set()
+ for release_status in fireEvent('release.with_status', release_status, with_doc = False, single = True):
+ filter_by['release_status'].add(release_status.get('media_id'))
+
+ # Add search filters
+ if starts_with:
+ starts_with = toUnicode(starts_with.lower())[0]
+ starts_with = starts_with if starts_with in ascii_lowercase else '#'
+ filter_by['starts_with'] = [x['_id'] for x in db.get_many('media_startswith', starts_with)]
+
+ # Add tag filter
+ if with_tags:
+ filter_by['with_tags'] = set()
+ for tag in with_tags:
+ for x in db.get_many('media_tag', tag):
+ filter_by['with_tags'].add(x['_id'])
+
+ # Filter with search query
+ if search:
+ filter_by['search'] = [x['_id'] for x in db.get_many('media_search_title', search)]
+
+ if status_or and 'media_status' in filter_by and 'release_status' in filter_by:
+ filter_by['status'] = list(filter_by['media_status']) + list(filter_by['release_status'])
+ del filter_by['media_status']
+ del filter_by['release_status']
+
+ # Filter by combining ids
+ for x in filter_by:
+ media_ids = [n for n in media_ids if n in filter_by[x]]
+
+ total_count = len(media_ids)
+ if total_count == 0:
+ return 0, []
+
+ offset = 0
+ limit = -1
+ if limit_offset:
+ splt = splitString(limit_offset) if isinstance(limit_offset, (str, unicode)) else limit_offset
+ limit = tryInt(splt[0])
+ offset = tryInt(0 if len(splt) is 1 else splt[1])
+
+ # List movies based on title order
+ medias = []
+ for m in db.all('media_title'):
+ media_id = m['_id']
+ if media_id not in media_ids: continue
+ if offset > 0:
+ offset -= 1
+ continue
+
+ media = fireEvent('media.get', media_id, single = True)
+
+ # Skip if no media has been found
+ if not media:
+ continue
+
+ # Merge releases with movie dict
+ medias.append(media)
+
+ # remove from media ids
+ media_ids.remove(media_id)
+ if len(media_ids) == 0 or len(medias) == limit: break
+
+ return total_count, medias
+
+ def listView(self, **kwargs):
+
+ total_movies, movies = self.list(
+ types = splitString(kwargs.get('type')),
+ status = splitString(kwargs.get('status')),
+ release_status = splitString(kwargs.get('release_status')),
+ status_or = kwargs.get('status_or') is not None,
+ limit_offset = kwargs.get('limit_offset'),
+ with_tags = splitString(kwargs.get('with_tags')),
+ starts_with = kwargs.get('starts_with'),
+ search = kwargs.get('search')
+ )
+
+ return {
+ 'success': True,
+ 'empty': len(movies) == 0,
+ 'total': total_movies,
+ 'movies': movies,
+ }
+
+ def addSingleListView(self):
+
+ for media_type in fireEvent('media.types', merge = True):
+ tempList = lambda *args, **kwargs : self.listView(type = media_type, **kwargs)
+ addApiView('%s.list' % media_type, tempList, docs = {
+ 'desc': 'List media',
+ 'params': {
+ 'status': {'type': 'array or csv', 'desc': 'Filter ' + media_type + ' by status. Example:"active,done"'},
+ 'release_status': {'type': 'array or csv', 'desc': 'Filter ' + media_type + ' by status of its releases. Example:"snatched,available"'},
+ 'limit_offset': {'desc': 'Limit and offset the ' + media_type + ' list. Examples: "50" or "50,30"'},
+ 'starts_with': {'desc': 'Starts with these characters. Example: "a" returns all ' + media_type + 's starting with the letter "a"'},
+ 'search': {'desc': 'Search ' + media_type + ' title'},
+ },
+ 'return': {'type': 'object', 'example': """{
+ 'success': True,
+ 'empty': bool, any """ + media_type + """s returned or not,
+ 'media': array, media found,
+ }"""}
+ })
+
+ def availableChars(self, types = None, status = None, release_status = None):
+
+ db = get_db()
+
+ # Make a list from string
+ if status and not isinstance(status, (list, tuple)):
+ status = [status]
+ if release_status and not isinstance(release_status, (list, tuple)):
+ release_status = [release_status]
+ if types and not isinstance(types, (list, tuple)):
+ types = [types]
+
+ # query media ids
+ if types:
+ all_media_ids = set()
+ for media_type in types:
+ all_media_ids = all_media_ids.union(set([x['_id'] for x in db.get_many('media_by_type', media_type)]))
+ else:
+ all_media_ids = set([x['_id'] for x in db.all('media')])
+
+ media_ids = all_media_ids
+ filter_by = {}
+
+ # Filter on movie status
+ if status and len(status) > 0:
+ filter_by['media_status'] = set()
+ for media_status in fireEvent('media.with_status', status, with_doc = False, single = True):
+ filter_by['media_status'].add(media_status.get('_id'))
+
+ # Filter on release status
+ if release_status and len(release_status) > 0:
+ filter_by['release_status'] = set()
+ for release_status in fireEvent('release.with_status', release_status, with_doc = False, single = True):
+ filter_by['release_status'].add(release_status.get('media_id'))
+
+ # Filter by combining ids
+ for x in filter_by:
+ media_ids = [n for n in media_ids if n in filter_by[x]]
+
+ chars = set()
+ for x in db.all('media_startswith'):
+ if x['_id'] in media_ids:
+ chars.add(x['key'])
+
+ if len(chars) == 27:
+ break
+
+ return list(chars)
+
+ def charView(self, **kwargs):
+
+ type = splitString(kwargs.get('type', 'movie'))
+ status = splitString(kwargs.get('status', None))
+ release_status = splitString(kwargs.get('release_status', None))
+ chars = self.availableChars(type, status, release_status)
+
+ return {
+ 'success': True,
+ 'empty': len(chars) == 0,
+ 'chars': chars,
+ }
+
+ def addSingleCharView(self):
+
+ for media_type in fireEvent('media.types', merge = True):
+ tempChar = lambda *args, **kwargs : self.charView(type = media_type, **kwargs)
+ addApiView('%s.available_chars' % media_type, tempChar)
+
+ def delete(self, media_id, delete_from = None):
+
+ try:
+ db = get_db()
+
+ media = db.get('id', media_id)
+ if media:
+ deleted = False
+
+ media_releases = fireEvent('release.for_media', media['_id'], single = True)
+
+ if delete_from == 'all':
+ # Delete connected releases
+ for release in media_releases:
+ db.delete(release)
+
+ db.delete(media)
+ deleted = True
+ else:
+
+ total_releases = len(media_releases)
+ total_deleted = 0
+ new_media_status = None
+
+ for release in media_releases:
+ if delete_from in ['wanted', 'snatched', 'late']:
+ if release.get('status') != 'done':
+ db.delete(release)
+ total_deleted += 1
+ new_media_status = 'done'
+ elif delete_from == 'manage':
+ if release.get('status') == 'done' or media.get('status') == 'done':
+ db.delete(release)
+ total_deleted += 1
+
+ if (total_releases == total_deleted) or (total_releases == 0 and not new_media_status) or (not new_media_status and delete_from == 'late'):
+ db.delete(media)
+ deleted = True
+ elif new_media_status:
+ media['status'] = new_media_status
+
+ # Remove profile (no use for in manage)
+ if new_media_status == 'done':
+ media['profile_id'] = None
+
+ db.update(media)
+
+ fireEvent('media.untag', media['_id'], 'recent', single = True)
+ else:
+ fireEvent('media.restatus', media.get('_id'), single = True)
+
+ if deleted:
+ fireEvent('notify.frontend', type = 'media.deleted', data = media)
+ except:
+ log.error('Failed deleting media: %s', traceback.format_exc())
+
+ return True
+
+ def deleteView(self, id = '', **kwargs):
+
+ ids = splitString(id)
+ for media_id in ids:
+ self.delete(media_id, delete_from = kwargs.get('delete_from', 'all'))
+
+ return {
+ 'success': True,
+ }
+
+ def addSingleDeleteView(self):
+
+ for media_type in fireEvent('media.types', merge = True):
+ tempDelete = lambda *args, **kwargs : self.deleteView(type = media_type, **kwargs)
+ addApiView('%s.delete' % media_type, tempDelete, docs = {
+ 'desc': 'Delete a ' + media_type + ' from the wanted list',
+ 'params': {
+ 'id': {'desc': 'Media ID(s) you want to delete.', 'type': 'int (comma separated)'},
+ 'delete_from': {'desc': 'Delete ' + media_type + ' from this page', 'type': 'string: all (default), wanted, manage'},
+ }
+ })
+
+ def restatus(self, media_id, tag_recent = True, allowed_restatus = None):
+
+ try:
+ db = get_db()
+
+ m = db.get('id', media_id)
+ previous_status = m['status']
+
+ log.debug('Changing status for %s', getTitle(m))
+ if not m['profile_id']:
+ m['status'] = 'done'
+ else:
+ m['status'] = 'active'
+
+ try:
+ profile = db.get('id', m['profile_id'])
+ media_releases = fireEvent('release.for_media', m['_id'], single = True)
+ done_releases = [release for release in media_releases if release.get('status') == 'done']
+
+ if done_releases:
+
+ # Check if we are finished with the media
+ for release in done_releases:
+ if fireEvent('quality.isfinish', {'identifier': release['quality'], 'is_3d': release.get('is_3d', False)}, profile, timedelta(seconds = time.time() - release['last_edit']).days, single = True):
+ m['status'] = 'done'
+ break
+
+ elif previous_status == 'done':
+ m['status'] = 'done'
+
+ except RecordNotFound:
+ log.debug('Failed restatus, keeping previous: %s', traceback.format_exc())
+ m['status'] = previous_status
+
+ # Only update when status has changed
+ if previous_status != m['status'] and (not allowed_restatus or m['status'] in allowed_restatus):
+ db.update(m)
+
+ # Tag media as recent
+ if tag_recent:
+ self.tag(media_id, 'recent', update_edited = True)
+
+ return m['status']
+ except:
+ log.error('Failed restatus: %s', traceback.format_exc())
+
+ def tag(self, media_id, tag, update_edited = False):
+
+ try:
+ db = get_db()
+ m = db.get('id', media_id)
+
+ if update_edited:
+ m['last_edit'] = int(time.time())
+
+ tags = m.get('tags') or []
+ if tag not in tags:
+ tags.append(tag)
+ m['tags'] = tags
+ db.update(m)
+
+ return True
+ except:
+ log.error('Failed tagging: %s', traceback.format_exc())
+
+ return False
+
+ def unTag(self, media_id, tag):
+
+ try:
+ db = get_db()
+ m = db.get('id', media_id)
+
+ tags = m.get('tags') or []
+ if tag in tags:
+ new_tags = list(set(tags))
+ new_tags.remove(tag)
+
+ m['tags'] = new_tags
+ db.update(m)
+
+ return True
+ except:
+ log.error('Failed untagging: %s', traceback.format_exc())
+
+ return False
diff --git a/couchpotato/core/providers/__init__.py b/couchpotato/core/media/_base/providers/__init__.py
similarity index 100%
rename from couchpotato/core/providers/__init__.py
rename to couchpotato/core/media/_base/providers/__init__.py
diff --git a/couchpotato/core/providers/metadata/__init__.py b/couchpotato/core/media/_base/providers/automation/__init__.py
similarity index 100%
rename from couchpotato/core/providers/metadata/__init__.py
rename to couchpotato/core/media/_base/providers/automation/__init__.py
diff --git a/couchpotato/core/media/_base/providers/automation/base.py b/couchpotato/core/media/_base/providers/automation/base.py
new file mode 100644
index 0000000000..21d205aeac
--- /dev/null
+++ b/couchpotato/core/media/_base/providers/automation/base.py
@@ -0,0 +1,8 @@
+from couchpotato.core.logger import CPLog
+from couchpotato.core.media._base.providers.base import Provider
+
+log = CPLog(__name__)
+
+
+class AutomationBase(Provider):
+ pass
diff --git a/couchpotato/core/media/_base/providers/base.py b/couchpotato/core/media/_base/providers/base.py
new file mode 100644
index 0000000000..9ff11f4749
--- /dev/null
+++ b/couchpotato/core/media/_base/providers/base.py
@@ -0,0 +1,377 @@
+from urlparse import urlparse
+import json
+import re
+from requests import HTTPError
+import time
+import traceback
+import xml.etree.ElementTree as XMLTree
+
+try:
+ from xml.etree.ElementTree import ParseError as XmlParseError
+except ImportError:
+ from xml.parsers.expat import ExpatError as XmlParseError
+
+from couchpotato.core.event import addEvent, fireEvent
+from couchpotato.core.helpers.encoding import ss
+from couchpotato.core.helpers.variable import tryFloat, mergeDicts, md5, \
+ possibleTitles
+from couchpotato.core.logger import CPLog
+from couchpotato.core.plugins.base import Plugin
+from couchpotato.environment import Env
+
+
+log = CPLog(__name__)
+
+
+class MultiProvider(Plugin):
+
+ def __init__(self):
+ self._classes = []
+
+ for Type in self.getTypes():
+ klass = Type()
+
+ # Overwrite name so logger knows what we're talking about
+ klass.setName('%s:%s' % (self.getName(), klass.getName()))
+
+ self._classes.append(klass)
+
+ def getTypes(self):
+ return []
+
+ def getClasses(self):
+ return self._classes
+
+
+class Provider(Plugin):
+
+ type = None # movie, show, subtitle, trailer, ...
+ http_time_between_calls = 10 # Default timeout for url requests
+
+ last_available_check = {}
+ is_available = {}
+
+ def isAvailable(self, test_url):
+
+ if Env.get('dev'): return True
+
+ now = time.time()
+ host = urlparse(test_url).hostname
+
+ if self.last_available_check.get(host) < now - 900:
+ self.last_available_check[host] = now
+
+ try:
+ self.urlopen(test_url, 30)
+ self.is_available[host] = True
+ except:
+ log.error('"%s" unavailable, trying again in an 15 minutes.', host)
+ self.is_available[host] = False
+
+ return self.is_available.get(host, False)
+
+ def getJsonData(self, url, decode_from = None, **kwargs):
+
+ cache_key = md5(url)
+ data = self.getCache(cache_key, url, **kwargs)
+
+ if data:
+ try:
+ data = data.strip()
+ if decode_from:
+ data = data.decode(decode_from)
+
+ return json.loads(data)
+ except:
+ log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
+
+ return []
+
+ def getRSSData(self, url, item_path = 'channel/item', **kwargs):
+
+ cache_key = md5(url)
+ data = self.getCache(cache_key, url, **kwargs)
+
+ if data and len(data) > 0:
+ try:
+ data = XMLTree.fromstring(data)
+ return self.getElements(data, item_path)
+ except:
+ try:
+ data = XMLTree.fromstring(ss(data))
+ return self.getElements(data, item_path)
+ except XmlParseError:
+ log.error('Invalid XML returned, check "%s" manually for issues', url)
+ except:
+ log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
+
+ return []
+
+ def getHTMLData(self, url, **kwargs):
+
+ cache_key = md5(url)
+ return self.getCache(cache_key, url, **kwargs)
+
+
+class YarrProvider(Provider):
+
+ protocol = None # nzb, torrent, torrent_magnet
+
+ cat_ids = {}
+ cat_backup_id = None
+
+ size_gb = ['gb', 'gib','go']
+ size_mb = ['mb', 'mib','mo']
+ size_kb = ['kb', 'kib','ko']
+
+ last_login_check = None
+ login_failures = 0
+
+ login_fail_msg = None
+
+ def __init__(self):
+ addEvent('provider.enabled_protocols', self.getEnabledProtocol)
+ addEvent('provider.belongs_to', self.belongsTo)
+ addEvent('provider.search.%s.%s' % (self.protocol, self.type), self.search)
+
+ def getEnabledProtocol(self):
+ if self.isEnabled():
+ return self.protocol
+ else:
+ return []
+
+ def buildUrl(self, *args, **kwargs):
+ pass
+
+ def login(self):
+
+ # Check if we are still logged in every hour
+ now = time.time()
+ if self.last_login_check and self.last_login_check < (now - 3600):
+ try:
+ output = self.urlopen(self.urls['login_check'])
+ if self.loginCheckSuccess(output):
+ self.last_login_check = now
+ return True
+ except: pass
+ self.last_login_check = None
+
+ if self.last_login_check:
+ return True
+
+ log.info('Session expired, attempting a new login.')
+
+ try:
+ output = self.urlopen(self.urls['login'], data = self.getLoginParams())
+
+ if self.loginSuccess(output):
+ self.last_login_check = now
+ self.login_failures = 0
+ return True
+
+ error = 'unknown'
+ except Exception as e:
+ if isinstance(e, HTTPError):
+ if e.response.status_code >= 400 and e.response.status_code < 500:
+ self.login_failures += 1
+ if self.login_failures >= 3:
+ self.disableAccount()
+ error = traceback.format_exc()
+
+ self.last_login_check = None
+
+ if self.login_fail_msg and self.login_fail_msg in output:
+ error = "Login credentials rejected."
+ self.disableAccount()
+
+ log.error('Failed to login %s: %s', (self.getName(), error))
+ return False
+
+ def loginSuccess(self, output):
+ return True
+
+ def loginCheckSuccess(self, output):
+ return True
+
+ def loginDownload(self, url = '', nzb_id = ''):
+ try:
+ if not self.login():
+ log.error('Failed downloading from %s', self.getName())
+ return self.urlopen(url)
+ except:
+ log.error('Failed downloading from %s: %s', (self.getName(), traceback.format_exc()))
+
+ def getLoginParams(self):
+ return {}
+
+ def download(self, url = '', nzb_id = ''):
+ try:
+ return self.urlopen(url, headers = {'User-Agent': Env.getIdentifier()}, show_error = False)
+ except:
+ log.error('Failed getting release from %s: %s', (self.getName(), traceback.format_exc()))
+
+ return 'try_next'
+
+ def search(self, media, quality):
+
+ if self.isDisabled():
+ return []
+
+ # Login if needed
+ if self.urls.get('login') and not self.login():
+ log.error('Failed to login to: %s', self.getName())
+ return []
+
+ # Create result container
+ imdb_results = hasattr(self, '_search')
+ results = ResultList(self, media, quality, imdb_results = imdb_results)
+
+ # Do search based on imdb id
+ if imdb_results:
+ self._search(media, quality, results)
+ # Search possible titles
+ else:
+ media_title = fireEvent('library.query', media, include_year = False, single = True)
+
+ for title in possibleTitles(media_title):
+ self._searchOnTitle(title, media, quality, results)
+
+ return results
+
+ def belongsTo(self, url, provider = None, host = None):
+ try:
+ if provider and provider == self.getName():
+ return self
+
+ hostname = urlparse(url).hostname
+ if host and hostname in host:
+ return self
+ else:
+ for url_type in self.urls:
+ download_url = self.urls[url_type]
+ if hostname in download_url:
+ return self
+ except:
+ log.debug('Url %s doesn\'t belong to %s', (url, self.getName()))
+
+ return
+
+ def parseSize(self, size):
+
+ size_raw = size.lower()
+ size = tryFloat(re.sub(r'[^0-9.]', '', size).strip())
+
+ for s in self.size_gb:
+ if s in size_raw:
+ return size * 1024
+
+ for s in self.size_mb:
+ if s in size_raw:
+ return size
+
+ for s in self.size_kb:
+ if s in size_raw:
+ return size / 1024
+
+ return 0
+
+ def getCatId(self, quality = None):
+ if not quality: quality = {}
+ identifier = quality.get('identifier')
+
+ want_3d = False
+ if quality.get('custom'):
+ want_3d = quality['custom'].get('3d')
+
+ for ids, qualities in self.cat_ids:
+ if identifier in qualities or (want_3d and '3d' in qualities):
+ return ids
+
+ if self.cat_backup_id:
+ return [self.cat_backup_id]
+
+ return []
+
+ def disableAccount(self):
+ log.error("Failed %s login, disabling provider. "
+ "Please check the configuration. Re-enabling the "
+ "provider without fixing the problem may result "
+ "in an IP ban, depending on the site.", self.getName())
+ self.conf(self.enabled_option, False)
+ self.login_failures = 0
+
+
+class ResultList(list):
+
+ result_ids = None
+ provider = None
+ media = None
+ quality = None
+
+ def __init__(self, provider, media, quality, **kwargs):
+
+ self.result_ids = []
+ self.provider = provider
+ self.media = media
+ self.quality = quality
+ self.kwargs = kwargs
+
+ super(ResultList, self).__init__()
+
+ def extend(self, results):
+ for r in results:
+ self.append(r)
+
+ def append(self, result):
+
+ new_result = self.fillResult(result)
+
+ is_correct = fireEvent('searcher.correct_release', new_result, self.media, self.quality,
+ imdb_results = self.kwargs.get('imdb_results', False), single = True)
+
+ if is_correct and new_result['id'] not in self.result_ids:
+ is_correct_weight = float(is_correct)
+
+ new_result['score'] += fireEvent('score.calculate', new_result, self.media, single = True)
+
+ old_score = new_result['score']
+ new_result['score'] = int(old_score * is_correct_weight)
+
+ log.info2('Found correct release with weight %.02f, old_score(%d) now scaled to score(%d)', (
+ is_correct_weight,
+ old_score,
+ new_result['score']
+ ))
+
+ self.found(new_result)
+ self.result_ids.append(result['id'])
+
+ super(ResultList, self).append(new_result)
+
+ def fillResult(self, result):
+
+ defaults = {
+ 'id': 0,
+ 'protocol': self.provider.protocol,
+ 'type': self.provider.type,
+ 'provider': self.provider.getName(),
+ 'download': self.provider.loginDownload if self.provider.urls.get('login') else self.provider.download,
+ 'seed_ratio': Env.setting('seed_ratio', section = self.provider.getName().lower(), default = ''),
+ 'seed_time': Env.setting('seed_time', section = self.provider.getName().lower(), default = ''),
+ 'url': '',
+ 'name': '',
+ 'age': 0,
+ 'size': 0,
+ 'description': '',
+ 'score': 0
+ }
+
+ return mergeDicts(defaults, result)
+
+ def found(self, new_result):
+ if not new_result.get('provider_extra'):
+ new_result['provider_extra'] = ''
+ else:
+ new_result['provider_extra'] = ', %s' % new_result['provider_extra']
+
+ log.info('Found: score(%(score)s) on %(provider)s%(provider_extra)s: %(name)s', new_result)
diff --git a/couchpotato/core/providers/movie/__init__.py b/couchpotato/core/media/_base/providers/info/__init__.py
similarity index 100%
rename from couchpotato/core/providers/movie/__init__.py
rename to couchpotato/core/media/_base/providers/info/__init__.py
diff --git a/couchpotato/core/media/_base/providers/info/base.py b/couchpotato/core/media/_base/providers/info/base.py
new file mode 100644
index 0000000000..90a9153ca7
--- /dev/null
+++ b/couchpotato/core/media/_base/providers/info/base.py
@@ -0,0 +1,5 @@
+from couchpotato.core.media._base.providers.base import Provider
+
+
+class BaseInfoProvider(Provider):
+ type = 'unknown'
diff --git a/couchpotato/core/providers/trailer/__init__.py b/couchpotato/core/media/_base/providers/metadata/__init__.py
similarity index 100%
rename from couchpotato/core/providers/trailer/__init__.py
rename to couchpotato/core/media/_base/providers/metadata/__init__.py
diff --git a/couchpotato/core/media/_base/providers/metadata/base.py b/couchpotato/core/media/_base/providers/metadata/base.py
new file mode 100644
index 0000000000..2a8c5cfe6d
--- /dev/null
+++ b/couchpotato/core/media/_base/providers/metadata/base.py
@@ -0,0 +1,8 @@
+from couchpotato.core.logger import CPLog
+from couchpotato.core.plugins.base import Plugin
+
+log = CPLog(__name__)
+
+
+class MetaDataBase(Plugin):
+ pass
diff --git a/couchpotato/core/media/_base/providers/nzb/__init__.py b/couchpotato/core/media/_base/providers/nzb/__init__.py
new file mode 100644
index 0000000000..88d9865d9a
--- /dev/null
+++ b/couchpotato/core/media/_base/providers/nzb/__init__.py
@@ -0,0 +1,14 @@
+config = [{
+ 'name': 'nzb_providers',
+ 'groups': [
+ {
+ 'label': 'Usenet Providers',
+ 'description': 'Providers searching usenet for new releases',
+ 'wizard': True,
+ 'type': 'list',
+ 'name': 'nzb_providers',
+ 'tab': 'searcher',
+ 'options': [],
+ },
+ ],
+}]
diff --git a/couchpotato/core/media/_base/providers/nzb/base.py b/couchpotato/core/media/_base/providers/nzb/base.py
new file mode 100644
index 0000000000..5e19e5246f
--- /dev/null
+++ b/couchpotato/core/media/_base/providers/nzb/base.py
@@ -0,0 +1,11 @@
+import time
+
+from couchpotato.core.media._base.providers.base import YarrProvider
+
+
+class NZBProvider(YarrProvider):
+
+ protocol = 'nzb'
+
+ def calculateAge(self, unix):
+ return int(time.time() - unix) / 24 / 60 / 60
diff --git a/couchpotato/core/providers/userscript/__init__.py b/couchpotato/core/media/_base/providers/nzb/binnewz/__init__.py
similarity index 100%
rename from couchpotato/core/providers/userscript/__init__.py
rename to couchpotato/core/media/_base/providers/nzb/binnewz/__init__.py
diff --git a/couchpotato/core/media/_base/providers/nzb/binnewz/binsearch.py b/couchpotato/core/media/_base/providers/nzb/binnewz/binsearch.py
new file mode 100644
index 0000000000..3308a29073
--- /dev/null
+++ b/couchpotato/core/media/_base/providers/nzb/binnewz/binsearch.py
@@ -0,0 +1,87 @@
+import urllib
+from bs4 import BeautifulSoup
+from couchpotato.core.logger import CPLog
+import re
+from nzbdownloader import NZBDownloader
+from nzbdownloader import NZBPostURLSearchResult
+from couchpotato.core.helpers.variable import tryInt, tryFloat
+log = CPLog(__name__)
+
+class BinSearch(NZBDownloader):
+
+ def search(self, filename, minSize, newsgroup=None):
+ filename=filename.encode('utf8')
+ if newsgroup != None:
+ binSearchURLs = [ urllib.urlencode({'server' : 1, 'max': '250', 'adv_g' : newsgroup, 'q' : filename}), urllib.urlencode({'server' : 2, 'max': '250', 'adv_g' : newsgroup, 'q' : filename})]
+ else:
+ binSearchURLs = [ urllib.urlencode({'server' : 1, 'max': '250', 'q' : filename}), urllib.urlencode({'server' : 2, 'max': '250', 'q' : filename})]
+
+ for suffixURL in binSearchURLs:
+ binSearchURL = "https://binsearch.info/?adv_age=&" + suffixURL
+
+ binSearchSoup = BeautifulSoup( self.open(binSearchURL) )
+
+ foundName = None
+ sizeInMegs = None
+ main_table = binSearchSoup.find('table', attrs = {'id': 'r2'})
+ if not main_table:
+ return
+
+ items = main_table.find_all('tr')
+ for row in items:
+ title = row.find('span', attrs = {'class': 's'})
+
+ if not title: continue
+
+ nzb_id = row.find('input', attrs = {'type': 'checkbox'})['name']
+ info = row.find('span', attrs = {'class':'d'})
+ try:
+ size_match = re.search('size:.(?P[0-9\.]+.[GMB]+)', info.text)
+ except:
+ continue
+ age = 0
+ try: age = re.search('(?P\d+d)', row.find_all('td')[-1:][0].text).group('size')[:-1]
+ except: pass
+
+ parts = re.search('available:.(?P\d+)./.(?P\d+)', info.text)
+ total = float(tryInt(parts.group('total')))
+ parts = float(tryInt(parts.group('parts')))
+
+ if (total / parts) < 1 and ((total / parts) < 0.95 or ((total / parts) >= 0.95 and not ('par2' in info.text.lower() or 'pa3' in info.text.lower()))):
+ log.info2('Wrong: \'%s\', not complete: %s out of %s', (title, parts, total))
+ continue
+
+ if 'requires password' in info.text.lower():
+ log.info2('Wrong: \'%s\', passworded', (title))
+ continue
+ sizeInMegs=self.parseSize(size_match.group('size'))
+ if sizeInMegs < minSize:
+ continue
+ postData = title
+ nzbURL = 'https://www.binsearch.info/fcgi/nzb.fcgi?q=' + nzb_id
+ nzbid=nzb_id
+ age=tryInt(age)
+ return NZBPostURLSearchResult( self, nzbURL, postData, sizeInMegs, binSearchURL, age, nzbid )
+
+ def parseSize(self, size):
+ size_gb = ['gb', 'gib','go']
+ size_mb = ['mb', 'mib','mo']
+ size_kb = ['kb', 'kib','ko']
+ size_raw = size.lower()
+ size = tryFloat(re.sub(r'[^0-9.]', '', size).strip())
+
+ for s in size_gb:
+ if s in size_raw:
+ return size * 1024
+
+ for s in size_mb:
+ if s in size_raw:
+ return size
+
+ for s in size_kb:
+ if s in size_raw:
+ return size / 1024
+
+ return 0
+
+
diff --git a/couchpotato/core/media/_base/providers/nzb/binnewz/main.py b/couchpotato/core/media/_base/providers/nzb/binnewz/main.py
new file mode 100644
index 0000000000..55e2802e54
--- /dev/null
+++ b/couchpotato/core/media/_base/providers/nzb/binnewz/main.py
@@ -0,0 +1,392 @@
+from binsearch import BinSearch
+from nzbindex import NZBIndex
+
+from bs4 import BeautifulSoup
+from couchpotato.core.helpers.variable import getTitle, splitString, tryInt
+from couchpotato.core.helpers.encoding import simplifyString
+from couchpotato.environment import Env
+from couchpotato.core.logger import CPLog
+from couchpotato.core.helpers import namer_check
+from couchpotato.core.media._base.providers.nzb.base import NZBProvider
+
+log = CPLog(__name__)
+import re
+import urllib
+import urllib2
+import traceback
+class Base(NZBProvider):
+
+ urls = {
+ 'download': 'http://www.binnews.in/',
+ 'detail': 'http://www.binnews.in/',
+ 'search': 'http://www.binnews.in/_bin/search2.php',
+ }
+
+ http_time_between_calls = 4 # Seconds
+ cat_backup_id = None
+
+ def _search(self, movie, quality, results):
+ nzbDownloaders = [BinSearch(), NZBIndex()]
+ MovieTitles = movie['info']['titles']
+ moviequality = simplifyString(quality['identifier'])
+ movieyear = movie['info']['year']
+ if quality['custom']['3d']==1:
+ threeD= True
+ else:
+ threeD=False
+ if moviequality in ("720p","1080p","bd50","2160p"):
+ cat1='39'
+ cat2='49'
+ minSize = 2000
+ elif moviequality in ("dvdr"):
+ cat1='23'
+ cat2='48'
+ minSize = 3000
+ else:
+ cat1='6'
+ cat2='27'
+ minSize = 500
+
+ for MovieTitle in MovieTitles:
+ try:
+ TitleStringReal = str(MovieTitle.encode("latin-1").replace('-',' '))
+ except:
+ continue
+ if threeD:
+ TitleStringReal = TitleStringReal + ' 3d'
+ data = 'chkInit=1&edTitre='+simplifyString(unicode(TitleStringReal,"latin-1"))+'&chkTitre=on&chkFichier=on&chkCat=on&cats%5B%5D='+cat1+'&cats%5B%5D='+cat2+'&edAge=&edYear='
+ try:
+ soup = BeautifulSoup( urllib2.urlopen(self.urls['search'], data) )
+ except Exception, e:
+ log.error(u"Error trying to load BinNewz response: "+e)
+ return []
+
+ tables = soup.findAll("table", id="tabliste")
+ for table in tables:
+
+ rows = table.findAll("tr")
+ for row in rows:
+
+ cells = row.select("> td")
+ if (len(cells) < 11):
+ continue
+
+ name = cells[2].text.strip()
+ testname=namer_check.correctName(name,movie)
+ if testname==0:
+ continue
+ language = cells[3].find("img").get("src")
+
+ if not "_fr" in language and not "_frq" in language:
+ continue
+
+ detectedlang=''
+
+ if "_fr" in language:
+ detectedlang=' truefrench '
+ else:
+ detectedlang=' french '
+
+
+ # blacklist_groups = [ "alt.binaries.multimedia" ]
+ blacklist_groups = []
+
+ newgroupLink = cells[4].find("a")
+ newsgroup = None
+ if newgroupLink.contents:
+ newsgroup = newgroupLink.contents[0]
+ if newsgroup == "abmulti":
+ newsgroup = "alt.binaries.multimedia"
+ elif newsgroup == "ab.moovee":
+ newsgroup = "alt.binaries.moovee"
+ elif newsgroup == "abtvseries":
+ newsgroup = "alt.binaries.tvseries"
+ elif newsgroup == "abtv":
+ newsgroup = "alt.binaries.tv"
+ elif newsgroup == "a.b.teevee":
+ newsgroup = "alt.binaries.teevee"
+ elif newsgroup == "abstvdivxf":
+ newsgroup = "alt.binaries.series.tv.divx.french"
+ elif newsgroup == "abhdtvx264fr":
+ newsgroup = "alt.binaries.hdtv.x264.french"
+ elif newsgroup == "abmom":
+ newsgroup = "alt.binaries.mom"
+ elif newsgroup == "abhdtv":
+ newsgroup = "alt.binaries.hdtv"
+ elif newsgroup == "abboneless":
+ newsgroup = "alt.binaries.boneless"
+ elif newsgroup == "abhdtvf":
+ newsgroup = "alt.binaries.hdtv.french"
+ elif newsgroup == "abhdtvx264":
+ newsgroup = "alt.binaries.hdtv.x264"
+ elif newsgroup == "absuperman":
+ newsgroup = "alt.binaries.superman"
+ elif newsgroup == "abechangeweb":
+ newsgroup = "alt.binaries.echange-web"
+ elif newsgroup == "abmdfvost":
+ newsgroup = "alt.binaries.movies.divx.french.vost"
+ elif newsgroup == "abdvdr":
+ newsgroup = "alt.binaries.dvdr"
+ elif newsgroup == "abmzeromov":
+ newsgroup = "alt.binaries.movies.zeromovies"
+ elif newsgroup == "abcfaf":
+ newsgroup = "alt.binaries.cartoons.french.animes-fansub"
+ elif newsgroup == "abcfrench":
+ newsgroup = "alt.binaries.cartoons.french"
+ elif newsgroup == "abgougouland":
+ newsgroup = "alt.binaries.gougouland"
+ elif newsgroup == "abroger":
+ newsgroup = "alt.binaries.roger"
+ elif newsgroup == "abtatu":
+ newsgroup = "alt.binaries.tatu"
+ elif newsgroup =="abstvf":
+ newsgroup = "alt.binaries.series.tv.french"
+ elif newsgroup =="abmdfreposts":
+ newsgroup="alt.binaries.movies.divx.french.reposts"
+ elif newsgroup =="abmdf":
+ newsgroup="alt.binaries.movies.french"
+ elif newsgroup =="abhdtvfrepost":
+ newsgroup="alt.binaries.hdtv.french.repost"
+ elif newsgroup == "abmmkv":
+ newsgroup = "alt.binaries.movies.mkv"
+ elif newsgroup == "abf-tv":
+ newsgroup = "alt.binaries.french-tv"
+ elif newsgroup == "abmdfo":
+ newsgroup = "alt.binaries.movies.divx.french.old"
+ elif newsgroup == "abmf":
+ newsgroup = "alt.binaries.movies.french"
+ elif newsgroup == "ab.movies":
+ newsgroup = "alt.binaries.movies"
+ elif newsgroup == "a.b.french":
+ newsgroup = "alt.binaries.french"
+ elif newsgroup == "a.b.3d":
+ newsgroup = "alt.binaries.3d"
+ elif newsgroup == "ab.dvdrip":
+ newsgroup = "alt.binaries.dvdrip"
+ elif newsgroup == "ab.welovelori":
+ newsgroup = "alt.binaries.welovelori"
+ elif newsgroup == "abblu-ray":
+ newsgroup = "alt.binaries.blu-ray"
+ elif newsgroup == "ab.bloaf":
+ newsgroup = "alt.binaries.bloaf"
+ elif newsgroup == "ab.hdtv.german":
+ newsgroup = "alt.binaries.hdtv.german"
+ elif newsgroup == "abmd":
+ newsgroup = "alt.binaries.movies.divx"
+ elif newsgroup == "ab.ath":
+ newsgroup = "alt.binaries.ath"
+ elif newsgroup == "a.b.town":
+ newsgroup = "alt.binaries.town"
+ elif newsgroup == "a.b.u-4all":
+ newsgroup = "alt.binaries.u-4all"
+ elif newsgroup == "ab.amazing":
+ newsgroup = "alt.binaries.amazing"
+ elif newsgroup == "ab.astronomy":
+ newsgroup = "alt.binaries.astronomy"
+ elif newsgroup == "ab.nospam.cheer":
+ newsgroup = "alt.binaries.nospam.cheerleaders"
+ elif newsgroup == "ab.worms":
+ newsgroup = "alt.binaries.worms"
+ elif newsgroup == "abcores":
+ newsgroup = "alt.binaries.cores"
+ elif newsgroup == "abdvdclassics":
+ newsgroup = "alt.binaries.dvd.classics"
+ elif newsgroup == "abdvdf":
+ newsgroup = "alt.binaries.dvd.french"
+ elif newsgroup == "abdvds":
+ newsgroup = "alt.binaries.dvds"
+ elif newsgroup == "abmdfrance":
+ newsgroup = "alt.binaries.movies.divx.france"
+ elif newsgroup == "abmisc":
+ newsgroup = "alt.binaries.misc"
+ elif newsgroup == "abnl":
+ newsgroup = "alt.binaries.nl"
+ elif newsgroup == "abx":
+ newsgroup = "alt.binaries.x"
+ elif newsgroup == "ab.new-movies":
+ newsgroup = "alt.binaries.new-movies"
+ elif newsgroup == "ab.triballs":
+ newsgroup = "alt.binaries.triballs"
+ elif newsgroup == "abdivxf":
+ newsgroup = "alt.binaries.divx.french"
+ elif newsgroup == "ab.solar-xl":
+ newsgroup = "alt.binaries.solar-xl"
+ elif newsgroup == "abbig":
+ newsgroup = "alt.binaries.big"
+ elif newsgroup == "ab.insiderz":
+ newsgroup = "alt.binaries.insiderz"
+ elif newsgroup == "abwarez":
+ newsgroup = "alt.binaries.warez"
+ elif newsgroup == "abdvd":
+ newsgroup = "alt.binaries.dvd"
+ elif newsgroup == "abdvd9":
+ newsgroup = "alt.binaries.dvd9"
+ elif newsgroup == "absvcdf":
+ newsgroup = "alt.binaries.svcd.french"
+ elif newsgroup == "ab.ftd":
+ newsgroup = "alt.binaries.ftd"
+ elif newsgroup == "ab.u-4all":
+ newsgroup = "alt.binaries.u-4all"
+ elif newsgroup == "a.b.u4all":
+ newsgroup = "alt.binaries.u-4all"
+ else:
+ log.error(u"Unknown binnewz newsgroup: " + newsgroup)
+ continue
+
+ if newsgroup in blacklist_groups:
+ log.error(u"Ignoring result, newsgroup is blacklisted: " + newsgroup)
+ continue
+
+ filename = cells[5].contents[0]
+
+ m = re.search("^(.+)\s+{(.*)}$", name)
+ qualityStr = ""
+ if m:
+ name = m.group(1)
+ qualityStr = m.group(2)
+
+ m = re.search("^(.+)\s+\[(.*)\]$", name)
+ source = None
+ if m:
+ name = m.group(1)
+ source = m.group(2)
+
+ m = re.search("(.+)\(([0-9]{4})\)", name)
+ year = ""
+ if m:
+ name = m.group(1)
+ year = m.group(2)
+ if int(year) > movieyear + 1 or int(year) < movieyear - 1:
+ continue
+
+ m = re.search("(.+)\((\d{2}/\d{2}/\d{4})\)", name)
+ dateStr = ""
+ if m:
+ name = m.group(1)
+ dateStr = m.group(2)
+ year = dateStr[-5:].strip(")").strip("/")
+
+ m = re.search("(.+)\s+S(\d{2})\s+E(\d{2})(.*)", name)
+ if m:
+ name = m.group(1) + " S" + m.group(2) + "E" + m.group(3) + m.group(4)
+
+ m = re.search("(.+)\s+S(\d{2})\s+Ep(\d{2})(.*)", name)
+ if m:
+ name = m.group(1) + " S" + m.group(2) + "E" + m.group(3) + m.group(4)
+
+ filenameLower = filename.lower()
+ searchItems = []
+ if qualityStr=="":
+ if source in ("Blu Ray-Rip", "HD DVD-Rip"):
+ qualityStr="brrip"
+ elif source =="Blu Ray-Rip 4K":
+ qualityStr="2160p"
+ elif source =="DVDRip":
+ qualityStr="dvdrip"
+ elif source == "TS":
+ qualityStr ="ts"
+ elif source == "DVDSCR":
+ qualityStr ="scr"
+ elif source == "CAM":
+ qualityStr ="cam"
+ elif moviequality == "dvdr":
+ qualityStr ="dvdr"
+ if year =='':
+ year = '1900'
+ if len(searchItems) == 0 and qualityStr == str(moviequality):
+ searchItems.append( filename )
+ for searchItem in searchItems:
+ resultno=1
+ for downloader in nzbDownloaders:
+
+ log.info("Searching for download : " + name + ", search string = "+ searchItem + " on " + downloader.__class__.__name__)
+ try:
+ binsearch_result = downloader.search(searchItem, minSize, newsgroup )
+ if binsearch_result:
+ new={}
+
+ def extra_check(item):
+ return True
+ qualitytag=''
+ if qualityStr.lower() in ['720p','1080p','2160p']:
+ qualitytag=' hd x264 h264 '
+ elif qualityStr.lower() in ['dvdrip']:
+ qualitytag=' dvd xvid '
+ elif qualityStr.lower() in ['brrip']:
+ qualitytag=' hdrip '
+ elif qualityStr.lower() in ['ts']:
+ qualitytag=' webrip '
+ elif qualityStr.lower() in ['scr']:
+ qualitytag=''
+ elif qualityStr.lower() in ['dvdr']:
+ qualitytag=' pal video_ts '
+ new['id'] = binsearch_result.nzbid
+ new['name'] = name + detectedlang + qualityStr + qualitytag + downloader.__class__.__name__
+ new['url'] = binsearch_result.nzburl
+ new['detail_url'] = binsearch_result.refererURL
+ new['size'] = binsearch_result.sizeInMegs
+ new['age'] = binsearch_result.age
+ new['extra_check'] = extra_check
+
+ results.append(new)
+
+ resultno=resultno+1
+ log.info("Found : " + searchItem + " on " + downloader.__class__.__name__)
+ if resultno==3:
+ break
+ except Exception, e:
+ log.error("Searching from " + downloader.__class__.__name__ + " failed : " + str(e) + traceback.format_exc())
+
+ def download(self, url = '', nzb_id = ''):
+ if 'binsearch' in url:
+ data = {
+ 'action': 'nzb',
+ nzb_id: 'on'
+ }
+ try:
+ return self.urlopen(url, data = data, show_error = False)
+ except:
+ log.error('Failed getting nzb from %s: %s', (self.getName(), traceback.format_exc()))
+ return 'try_next'
+ else:
+ values = {
+ 'url' : '/'
+ }
+ data_tmp = urllib.urlencode(values)
+ req = urllib2.Request(url, data_tmp )
+
+ try:
+ #log.error('Failed downloading from %s', self.getName())
+ return urllib2.urlopen(req).read()
+ except:
+ log.error('Failed downloading from %s: %s', (self.getName(), traceback.format_exc()))
+
+ return 'try_next'
+config = [{
+ 'name': 'binnewz',
+ 'groups': [
+ {
+ 'tab': 'searcher',
+ 'list': 'nzb_providers',
+ 'name': 'binnewz',
+ 'description': 'Free provider, lots of french nzbs. See binnewz ',
+ 'wizard': True,
+ 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABHNCSVQICAgIfAhkiAAAAgRJREFUOI1t009rVFcYx/HPuffOTGYmMcZoEmNUkiJRSZRAC1ropuimuy6KuHHhShe+EF+CL8AX4LpQCgoiohhMMKKMqHRTtaJJ5k8nudfFnBkjzoEf5zk8PN/zO3+egFGMYX+MS9hFG604d/A/ulG7yFFkqOGgcuUuSJK32q0NPMMaNrE9RC10UxzCedX6767cqDu2MGV8YlFz62ed9iWVkYvy/IyimEUSFaKD3QwV7ENwapmlHymVU5126tNHVh9MW3s8bfXhOW8b16TpliR5otW8jm6GHiSEYOYoF076Zjx6x29/8OHfssZzNp6Ou3XzF8zicxYtZWBislfUKL4CFgIvd5mcYuowed7PjKOSGTYWwiAsij6srChmJI058Q6qyIYD9jgIIQzWxXygPtZPpUj6gGJv/V4HGoViPsLWt77bK9P7FDtg8zPr21RrX48wT3g11OcA0MG2oii8aXB4jiInK5FmSAcOGBUawwFvtFuJO7dpbLBynuM/UK0Jn0YolXtqNfn4vl/bRZ7pfcsXdrqX3f/rhgd/L+m0J8zMdZ1eKTn7U7C4zNg+yhX+ed2/syZ2AkZQ12umSRyI8wpOqdaXdTszRmocOR5Mz2bu/ZnL81/xIsTnyFCOsKpeg9ViPBo1jxMq1UVpEjS3r+K/Pe81aJQ0qhShlQiuxPxOtL+J1heOZZ0e63LUQAAAAABJRU5ErkJggg==',
+ 'options': [
+ {
+ 'name': 'enabled',
+ 'type': 'enabler',
+ 'default': False,
+ },
+ {
+ 'name': 'extra_score',
+ 'advanced': True,
+ 'label': 'Extra Score',
+ 'type': 'int',
+ 'default': 0,
+ 'description': 'Starting score for each release found via this provider.',
+ }
+ ],
+ },
+ ],
+}]
diff --git a/couchpotato/core/media/_base/providers/nzb/binnewz/nzbclub.py b/couchpotato/core/media/_base/providers/nzb/binnewz/nzbclub.py
new file mode 100644
index 0000000000..dcad4e77e9
--- /dev/null
+++ b/couchpotato/core/media/_base/providers/nzb/binnewz/nzbclub.py
@@ -0,0 +1,64 @@
+from bs4 import BeautifulSoup
+from nzbdownloader import NZBDownloader
+from nzbdownloader import NZBGetURLSearchResult
+from couchpotato.core.helpers.encoding import toUnicode,tryUrlencode
+from couchpotato.core.helpers.variable import tryInt
+from couchpotato.core.logger import CPLog
+from couchpotato.core.helpers.rss import RSS
+from couchpotato.core.media._base.providers.nzb.base import NZBProvider
+from dateutil.parser import parse
+import time
+log = CPLog(__name__)
+class NZBClub(NZBDownloader, NZBProvider, RSS):
+
+ urls = {
+ 'search': 'https://www.nzbclub.com/nzbrss.aspx?%s',
+ }
+
+ http_time_between_calls = 4 #seconds
+
+ def search(self, filename, minSize, newsgroup=None):
+
+ q = filename
+
+ params = tryUrlencode({
+ 'q': q,
+ 'qq': newsgroup,
+ 'ig': 1,
+ 'rpp': 200,
+ 'st': 5,
+ 'sp': 1,
+ 'ns': 1,
+ })
+
+ nzbs = self.getRSSData(self.urls['search'] % params)
+
+ for nzb in nzbs:
+
+ nzbclub_id = tryInt(self.getTextElement(nzb, "link").split('/nzb_view/')[1].split('/')[0])
+ enclosure = self.getElement(nzb, "enclosure").attrib
+ size = enclosure['length']
+ date = self.getTextElement(nzb, "pubDate")
+
+ def extra_check(item):
+ full_description = self.getCache('nzbclub.%s' % nzbclub_id, item['detail_url'], cache_timeout = 25920000)
+
+ for ignored in ['ARCHIVE inside ARCHIVE', 'Incomplete', 'repair impossible']:
+ if ignored in full_description:
+ log.info('Wrong: Seems to be passworded or corrupted files: %s', item['name'])
+ # return False
+
+ #return True
+ nzbid = nzbclub_id
+ #'name': toUnicode(self.getTextElement(nzb, "title")),
+ age = self.calculateAge(int(time.mktime(parse(date).timetuple())))
+ sizeInMegs = (tryInt(size)/1024/1024)
+ downloadUrl = enclosure['url'].replace(' ', '_')
+ nzbClubURL = self.getTextElement(nzb, "link")
+ #'get_more_info': self.getMoreInfo,
+ #'extra_check': extra_check
+
+
+ return NZBGetURLSearchResult( self, downloadUrl, sizeInMegs, nzbClubURL, age, nzbid)
+
+
\ No newline at end of file
diff --git a/couchpotato/core/media/_base/providers/nzb/binnewz/nzbdownloader.py b/couchpotato/core/media/_base/providers/nzb/binnewz/nzbdownloader.py
new file mode 100644
index 0000000000..687f8e058e
--- /dev/null
+++ b/couchpotato/core/media/_base/providers/nzb/binnewz/nzbdownloader.py
@@ -0,0 +1,83 @@
+import urllib2
+from StringIO import StringIO
+import gzip
+import cookielib
+import time
+
+class NZBDownloader(object):
+
+ def __init__( self ):
+ headers = [
+ ('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.57 Safari/537.17')
+ ]
+ self.cj = cookielib.CookieJar()
+ self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cj),urllib2.HTTPSHandler(), urllib2.HTTPHandler())
+ self.opener.addheaders = headers
+ self.lastRequestTime = None
+
+ def waitBeforeNextRequest(self):
+ if self.lastRequestTime and self.lastRequestTime > ( time.mktime(time.localtime()) - 10):
+ time.sleep( 10 )
+ self.lastRequestTime = time.gmtime()
+
+ def open(self, request):
+ self.waitBeforeNextRequest()
+ return self.opener.open(request)
+
+class NZBSearchResult(object):
+
+ def __init__(self, downloader, sizeInMegs, refererURL, age, nzbid):
+ self.downloader = downloader
+ self.refererURL = refererURL
+ self.sizeInMegs = sizeInMegs
+ self.age = age
+ self.nzbid = nzbid
+ def readRequest(self, request):
+ request.add_header('Accept-encoding', 'gzip')
+ request.add_header('Referer', self.refererURL)
+ request.add_header('Accept-Encoding', 'gzip')
+ request.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.57 Safari/537.17')
+
+ response = self.downloader.open(request)
+ if response.info().get('Content-Encoding') == 'gzip':
+ buf = StringIO( response.read())
+ f = gzip.GzipFile(fileobj=buf)
+ return f.read()
+ else:
+ return response.read()
+
+ def getNZB(self):
+ pass
+
+class NZBGetURLSearchResult( NZBSearchResult ):
+
+ def __init__(self, downloader, nzburl, sizeInMegs, refererURL, age, nzbid):
+ NZBSearchResult.__init__(self, downloader, sizeInMegs, refererURL, age, nzbid)
+ self.nzburl = nzburl
+
+ def getNZB(self):
+ request = urllib2.Request( self.nzburl )
+ self.nzbdata = NZBSearchResult.readRequest( self, request )
+ return self.nzbdata
+
+class NZBPostURLSearchResult( NZBSearchResult ):
+
+ def __init__(self, downloader, nzburl, postData, sizeInMegs, refererURL, age, nzbid):
+ NZBSearchResult.__init__(self, downloader, sizeInMegs, refererURL, age, nzbid)
+ self.nzburl = nzburl
+ self.postData = postData
+
+ def getNZB(self):
+ request = urllib2.Request( self.nzburl, self.postData )
+ self.nzbdata = NZBSearchResult.readRequest( self, request )
+ return self.nzbdata
+
+class NZBDataSearchResult( NZBSearchResult ):
+
+ def __init__(self, nzbdata, sizeInMegs, refererURL, age, nzbid):
+ NZBSearchResult.__init__(self, None, refererURL, age, nzbid)
+ self.nzbdata = nzbdata
+
+ def getNZB(self):
+ return self.nzbdata
+
\ No newline at end of file
diff --git a/couchpotato/core/media/_base/providers/nzb/binnewz/nzbindex.py b/couchpotato/core/media/_base/providers/nzb/binnewz/nzbindex.py
new file mode 100644
index 0000000000..ea72c00af1
--- /dev/null
+++ b/couchpotato/core/media/_base/providers/nzb/binnewz/nzbindex.py
@@ -0,0 +1,51 @@
+from bs4 import BeautifulSoup
+from nzbdownloader import NZBDownloader, NZBGetURLSearchResult
+from couchpotato.core.helpers.rss import RSS
+from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode
+from couchpotato.core.helpers.variable import tryInt
+from couchpotato.core.logger import CPLog
+from couchpotato.core.media._base.providers.nzb.base import NZBProvider
+from couchpotato.environment import Env
+from dateutil.parser import parse
+import urllib
+import time
+log = CPLog(__name__)
+
+class NZBIndex(NZBDownloader,NZBProvider, RSS):
+
+ urls = {
+ 'download': 'https://www.nzbindex.nl/download/',
+ 'search': 'http://www.nzbindex.com/rss/?%s',
+ }
+
+ http_time_between_calls = 5 # Seconds
+
+ def search(self, filename, minSize, newsgroup=None):
+
+ q = filename
+ arguments = tryUrlencode({
+ 'q': q,
+ 'age': Env.setting('retention', 'nzb'),
+ 'sort': 'agedesc',
+ 'minsize': minSize,
+ 'rating': 1,
+ 'max': 250,
+ 'more': 1,
+ 'complete': 1,
+ })
+ nzbs = self.getRSSData(self.urls['search'] % arguments)
+ nzbid = None
+ for nzb in nzbs:
+
+ enclosure = self.getElement(nzb, 'enclosure').attrib
+ nzbindex_id = int(self.getTextElement(nzb, "link").split('/')[4])
+
+
+ nzbid = nzbindex_id
+ age = self.calculateAge(int(time.mktime(parse(self.getTextElement(nzb, "pubDate")).timetuple())))
+ sizeInMegs = tryInt(enclosure['length']) / 1024 / 1024
+ downloadUrl = enclosure['url']
+ detailURL = enclosure['url'].replace('/download/', '/release/')
+
+ if nzbid:
+ return NZBGetURLSearchResult(self, downloadUrl, sizeInMegs, detailURL, age, nzbid)
diff --git a/couchpotato/core/media/_base/providers/nzb/binsearch.py b/couchpotato/core/media/_base/providers/nzb/binsearch.py
new file mode 100644
index 0000000000..6b798840c1
--- /dev/null
+++ b/couchpotato/core/media/_base/providers/nzb/binsearch.py
@@ -0,0 +1,120 @@
+import re
+import traceback
+
+from bs4 import BeautifulSoup
+from couchpotato.core.helpers.variable import tryInt, simplifyString
+from couchpotato.core.logger import CPLog
+from couchpotato.core.media._base.providers.nzb.base import NZBProvider
+
+
+log = CPLog(__name__)
+
+
+class Base(NZBProvider):
+
+ urls = {
+ 'download': 'https://www.binsearch.info/fcgi/nzb.fcgi?q=%s',
+ 'detail': 'https://www.binsearch.info%s',
+ 'search': 'https://www.binsearch.info/index.php?%s',
+ }
+
+ http_time_between_calls = 4 # Seconds
+
+ def _search(self, media, quality, results):
+
+ data = self.getHTMLData(self.urls['search'] % self.buildUrl(media, quality))
+
+ if data:
+ try:
+
+ html = BeautifulSoup(data)
+ main_table = html.find('table', attrs = {'id': 'r2'})
+
+ if not main_table:
+ return
+
+ items = main_table.find_all('tr')
+
+ for row in items:
+ title = row.find('span', attrs = {'class': 's'})
+
+ if not title: continue
+
+ nzb_id = row.find('input', attrs = {'type': 'checkbox'})['name']
+ info = row.find('span', attrs = {'class':'d'})
+ size_match = re.search('size:.(?P[0-9\.]+.[GMB]+)', info.text)
+
+ age = 0
+ try: age = re.search('(?P\d+d)', row.find_all('td')[-1:][0].text).group('size')[:-1]
+ except: pass
+
+ def extra_check(item):
+ parts = re.search('available:.(?P\d+)./.(?P\d+)', info.text)
+ total = float(tryInt(parts.group('total')))
+ parts = float(tryInt(parts.group('parts')))
+
+ if (total / parts) < 1 and ((total / parts) < 0.95 or ((total / parts) >= 0.95 and not ('par2' in info.text.lower() or 'pa3' in info.text.lower()))):
+ log.info2('Wrong: \'%s\', not complete: %s out of %s', (item['name'], parts, total))
+ return False
+
+ if 'requires password' in info.text.lower():
+ log.info2('Wrong: \'%s\', passworded', (item['name']))
+ return False
+
+ return True
+
+ results.append({
+ 'id': nzb_id,
+ 'name': simplifyString(title.text),
+ 'age': tryInt(age),
+ 'size': self.parseSize(size_match.group('size')),
+ 'url': self.urls['download'] % nzb_id,
+ 'detail_url': self.urls['detail'] % info.find('a')['href'],
+ 'extra_check': extra_check
+ })
+
+ except:
+ log.error('Failed to parse HTML response from BinSearch: %s', traceback.format_exc())
+
+ def download(self, url = '', nzb_id = ''):
+
+ data = {
+ 'action': 'nzb',
+ nzb_id: 'on'
+ }
+
+ try:
+ return self.urlopen(url, data = data, show_error = False)
+ except:
+ log.error('Failed getting nzb from %s: %s', (self.getName(), traceback.format_exc()))
+
+ return 'try_next'
+
+
+config = [{
+ 'name': 'binsearch',
+ 'groups': [
+ {
+ 'tab': 'searcher',
+ 'list': 'nzb_providers',
+ 'name': 'binsearch',
+ 'description': 'Free provider, less accurate. See BinSearch ',
+ 'wizard': True,
+ 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAAAAAA6mKC9AAAATklEQVQY02NwQAMMWAXOnz+PKvD//3/CAvM//z+fgiwAAs+RBab4PP//vwbFjPlAffgEChzOo2r5fBuIfRAC5w8D+QUofkkp8MHjOWQAAM3Sbogztg2wAAAAAElFTkSuQmCC',
+ 'options': [
+ {
+ 'name': 'enabled',
+ 'type': 'enabler',
+ },
+ {
+ 'name': 'extra_score',
+ 'advanced': True,
+ 'label': 'Extra Score',
+ 'type': 'int',
+ 'default': 0,
+ 'description': 'Starting score for each release found via this provider.',
+ }
+ ],
+ },
+ ],
+}]
diff --git a/couchpotato/core/media/_base/providers/nzb/newznab.py b/couchpotato/core/media/_base/providers/nzb/newznab.py
new file mode 100644
index 0000000000..0f28db8b65
--- /dev/null
+++ b/couchpotato/core/media/_base/providers/nzb/newznab.py
@@ -0,0 +1,291 @@
+from urlparse import urlparse
+import time
+import traceback
+import re
+
+from couchpotato.core.helpers.encoding import tryUrlencode, toUnicode
+from couchpotato.core.helpers.rss import RSS
+from couchpotato.core.helpers.variable import cleanHost, splitString, tryInt
+from couchpotato.core.logger import CPLog
+from couchpotato.core.media._base.providers.base import ResultList
+from couchpotato.core.media._base.providers.nzb.base import NZBProvider
+from couchpotato.environment import Env
+from dateutil.parser import parse
+from requests import HTTPError
+
+
+log = CPLog(__name__)
+
+
+class Base(NZBProvider, RSS):
+
+ urls = {
+ 'detail': 'details/%s',
+ 'download': 't=get&id=%s'
+ }
+
+ passwords_regex = 'password|wachtwoord'
+ limits_reached = {}
+
+ http_time_between_calls = 2 # Seconds
+
+ def search(self, media, quality):
+ hosts = self.getHosts()
+
+ results = ResultList(self, media, quality, imdb_results = True)
+
+ for host in hosts:
+ if self.isDisabled(host):
+ continue
+
+ self._searchOnHost(host, media, quality, results)
+
+ return results
+
+ def _searchOnHost(self, host, media, quality, results):
+
+ query = self.buildUrl(media, host)
+ url = '%s%s' % (self.getUrl(host['host']), query)
+ nzbs = self.getRSSData(url, cache_timeout = 1800, headers = {'User-Agent': Env.getIdentifier()})
+
+ for nzb in nzbs:
+
+ date = None
+ spotter = None
+ for item in nzb:
+ if date and spotter:
+ break
+ if item.attrib.get('name') == 'usenetdate':
+ date = item.attrib.get('value')
+ break
+
+ # Get the name of the person who posts the spot
+ if item.attrib.get('name') == 'poster':
+ if "@spot.net" in item.attrib.get('value'):
+ spotter = item.attrib.get('value').split("@")[0]
+ continue
+
+ if not date:
+ date = self.getTextElement(nzb, 'pubDate')
+
+ name = self.getTextElement(nzb, 'title')
+ detail_url = self.getTextElement(nzb, 'guid')
+ nzb_id = detail_url.split('/')[-1:].pop()
+
+ try:
+ link = self.getElement(nzb, 'enclosure').attrib['url']
+ except:
+ link = self.getTextElement(nzb, 'link')
+
+ if '://' not in detail_url:
+ detail_url = (cleanHost(host['host']) + self.urls['detail']) % tryUrlencode(nzb_id)
+
+ if not link:
+ link = ((self.getUrl(host['host']) + self.urls['download']) % tryUrlencode(nzb_id)) + self.getApiExt(host)
+
+ if not name:
+ continue
+
+ name_extra = ''
+ if spotter:
+ name_extra = spotter
+
+ description = ''
+ if "@spot.net" in nzb_id:
+ try:
+ # Get details for extended description to retrieve passwords
+ query = self.buildDetailsUrl(nzb_id, host['api_key'])
+ url = '%s%s' % (self.getUrl(host['host']), query)
+ nzb_details = self.getRSSData(url, cache_timeout = 1800, headers = {'User-Agent': Env.getIdentifier()})[0]
+
+ description = self.getTextElement(nzb_details, 'description')
+
+ # Extract a password from the description
+ password = re.search('(?:' + self.passwords_regex + ')(?: *)(?:\:|\=)(?: *)(.*?)\ |\n|$', description, flags = re.I).group(1)
+ if password:
+ name += ' {{%s}}' % password.strip()
+ except:
+ log.debug('Error getting details of "%s": %s', (name, traceback.format_exc()))
+
+ results.append({
+ 'id': nzb_id,
+ 'provider_extra': urlparse(host['host']).hostname or host['host'],
+ 'name': toUnicode(name),
+ 'name_extra': name_extra,
+ 'age': self.calculateAge(int(time.mktime(parse(date).timetuple()))),
+ 'size': int(self.getElement(nzb, 'enclosure').attrib['length']) / 1024 / 1024,
+ 'url': link,
+ 'detail_url': detail_url,
+ 'content': self.getTextElement(nzb, 'description'),
+ 'description': description,
+ 'score': host['extra_score'],
+ })
+
+ def getHosts(self):
+
+ uses = splitString(str(self.conf('use')), clean = False)
+ hosts = splitString(self.conf('host'), clean = False)
+ api_keys = splitString(self.conf('api_key'), clean = False)
+ extra_score = splitString(self.conf('extra_score'), clean = False)
+ custom_tags = splitString(self.conf('custom_tag'), clean = False)
+ custom_categories = splitString(self.conf('custom_categories'), clean = False)
+
+ list = []
+ for nr in range(len(hosts)):
+
+ try: key = api_keys[nr]
+ except: key = ''
+
+ try: host = hosts[nr]
+ except: host = ''
+
+ try: score = tryInt(extra_score[nr])
+ except: score = 0
+
+ try: custom_tag = custom_tags[nr]
+ except: custom_tag = ''
+
+ try: custom_category = custom_categories[nr].replace(" ", ",")
+ except: custom_category = ''
+
+ list.append({
+ 'use': uses[nr],
+ 'host': host,
+ 'api_key': key,
+ 'extra_score': score,
+ 'custom_tag': custom_tag,
+ 'custom_category' : custom_category
+ })
+
+ return list
+
+ def belongsTo(self, url, provider = None, host = None):
+
+ hosts = self.getHosts()
+
+ for host in hosts:
+ result = super(Base, self).belongsTo(url, host = host['host'], provider = provider)
+ if result:
+ return result
+
+ def getUrl(self, host):
+ if '?page=newznabapi' in host:
+ return cleanHost(host)[:-1] + '&'
+
+ return cleanHost(host) + 'api?'
+
+ def isDisabled(self, host = None):
+ return not self.isEnabled(host)
+
+ def isEnabled(self, host = None):
+
+ # Return true if at least one is enabled and no host is given
+ if host is None:
+ for host in self.getHosts():
+ if self.isEnabled(host):
+ return True
+ return False
+
+ return NZBProvider.isEnabled(self) and host['host'] and host['api_key'] and int(host['use'])
+
+ def getApiExt(self, host):
+ return '&apikey=%s' % host['api_key']
+
+ def download(self, url = '', nzb_id = ''):
+ host = urlparse(url).hostname
+
+ if self.limits_reached.get(host):
+ # Try again in 3 hours
+ if self.limits_reached[host] > time.time() - 10800:
+ return 'try_next'
+
+ try:
+ data = self.urlopen(url, show_error = False, headers = {'User-Agent': Env.getIdentifier()})
+ self.limits_reached[host] = False
+ return data
+ except HTTPError as e:
+ sc = e.response.status_code
+ if sc in [503, 429]:
+ response = e.read().lower()
+ if sc == 429 or 'maximum api' in response or 'download limit' in response:
+ if not self.limits_reached.get(host):
+ log.error('Limit reached / to many requests for newznab provider: %s', host)
+ self.limits_reached[host] = time.time()
+ return 'try_next'
+
+ log.error('Failed download from %s: %s', (host, traceback.format_exc()))
+
+ return 'try_next'
+
+ def buildDetailsUrl(self, nzb_id, api_key):
+ query = tryUrlencode({
+ 't': 'details',
+ 'id': nzb_id,
+ 'apikey': api_key,
+ })
+ return query
+
+
+
+config = [{
+ 'name': 'newznab',
+ 'groups': [
+ {
+ 'tab': 'searcher',
+ 'list': 'nzb_providers',
+ 'name': 'newznab',
+ 'order': 10,
+ 'description': 'Enable NewzNab such as NZB.su , \
+ NZBs.org , DOGnzb.cr , \
+ Spotweb , NZBGeek , \
+ NZBFinder , Usenet-Crawler ',
+ 'wizard': True,
+ 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQAgMAAABinRfyAAAACVBMVEVjhwD///86aRovd/sBAAAAMklEQVQI12NgAIPQUCCRmQkjssDEShiRuRIqwZqZGcDAGBrqANUhGgIkWAOABKMDxCAA24UK50b26SAAAAAASUVORK5CYII=',
+ 'options': [
+ {
+ 'name': 'enabled',
+ 'type': 'enabler',
+ 'default': True,
+ },
+ {
+ 'name': 'use',
+ 'default': '0,0,0,0,0,0'
+ },
+ {
+ 'name': 'host',
+ 'default': 'api.nzb.su,api.dognzb.cr,nzbs.org,https://api.nzbgeek.info,https://www.nzbfinder.ws,https://www.usenet-crawler.com',
+ 'description': 'The hostname of your newznab provider',
+ },
+ {
+ 'name': 'extra_score',
+ 'advanced': True,
+ 'label': 'Extra Score',
+ 'default': '0,0,0,0,0,0',
+ 'description': 'Starting score for each release found via this provider.',
+ },
+ {
+ 'name': 'custom_tag',
+ 'advanced': True,
+ 'label': 'Custom tag',
+ 'default': ',,,,,',
+ 'description': 'Add custom tags, for example add rls=1 to get only scene releases from nzbs.org',
+ },
+ {
+ 'name': 'custom_categories',
+ 'advanced': True,
+ 'label': 'Custom Categories',
+ 'default': '2000,2000,2000,2000,2000,2000',
+ 'description': 'Specify categories to search in seperated by a single space, defaults to all movies. EG: "2030 2040 2060" would only search in HD, SD, and 3D movie categories',
+ },
+ {
+ 'name': 'api_key',
+ 'default': ',,,,,',
+ 'label': 'Api Key',
+ 'description': 'Can be found on your profile page',
+ 'type': 'combined',
+ 'combine': ['use', 'host', 'api_key', 'extra_score', 'custom_tag'],
+ },
+ ],
+ },
+ ],
+}]
diff --git a/couchpotato/core/media/_base/providers/nzb/nzbclub.py b/couchpotato/core/media/_base/providers/nzb/nzbclub.py
new file mode 100644
index 0000000000..4e1c843544
--- /dev/null
+++ b/couchpotato/core/media/_base/providers/nzb/nzbclub.py
@@ -0,0 +1,100 @@
+import time
+
+from bs4 import BeautifulSoup
+from couchpotato.core.helpers.encoding import toUnicode
+from couchpotato.core.helpers.rss import RSS
+from couchpotato.core.helpers.variable import tryInt
+from couchpotato.core.logger import CPLog
+from couchpotato.core.media._base.providers.nzb.base import NZBProvider
+from dateutil.parser import parse
+
+
+log = CPLog(__name__)
+
+
+class Base(NZBProvider, RSS):
+
+ urls = {
+ 'search': 'https://www.nzbclub.com/nzbrss.aspx?%s',
+ }
+
+ http_time_between_calls = 4 # seconds
+
+ def _search(self, media, quality, results):
+
+ nzbs = self.getRSSData(self.urls['search'] % self.buildUrl(media))
+
+ for nzb in nzbs:
+
+ nzbclub_id = tryInt(self.getTextElement(nzb, "link").split('/nzb_view/')[1].split('/')[0])
+ enclosure = self.getElement(nzb, "enclosure").attrib
+ size = enclosure['length']
+ date = self.getTextElement(nzb, "pubDate")
+
+ def extra_check(item):
+ full_description = self.getCache('nzbclub.%s' % nzbclub_id, item['detail_url'], cache_timeout = 25920000)
+
+ for ignored in ['ARCHIVE inside ARCHIVE', 'Incomplete', 'repair impossible']:
+ if ignored in full_description:
+ log.info('Wrong: Seems to be passworded or corrupted files: %s', item['name'])
+ return False
+
+ return True
+
+ results.append({
+ 'id': nzbclub_id,
+ 'name': toUnicode(self.getTextElement(nzb, "title")),
+ 'age': self.calculateAge(int(time.mktime(parse(date).timetuple()))),
+ 'size': tryInt(size) / 1024 / 1024,
+ 'url': enclosure['url'].replace(' ', '_'),
+ 'detail_url': self.getTextElement(nzb, "link"),
+ 'get_more_info': self.getMoreInfo,
+ 'extra_check': extra_check
+ })
+
+ def getMoreInfo(self, item):
+ full_description = self.getCache('nzbclub.%s' % item['id'], item['detail_url'], cache_timeout = 25920000)
+ html = BeautifulSoup(full_description)
+ nfo_pre = html.find('pre', attrs = {'class': 'nfo'})
+ description = toUnicode(nfo_pre.text) if nfo_pre else ''
+
+ item['description'] = description
+ return item
+
+ def extraCheck(self, item):
+ full_description = self.getCache('nzbclub.%s' % item['id'], item['detail_url'], cache_timeout = 25920000)
+
+ if 'ARCHIVE inside ARCHIVE' in full_description:
+ log.info('Wrong: Seems to be passworded files: %s', item['name'])
+ return False
+
+ return True
+
+
+config = [{
+ 'name': 'nzbclub',
+ 'groups': [
+ {
+ 'tab': 'searcher',
+ 'list': 'nzb_providers',
+ 'name': 'NZBClub',
+ 'description': 'Free provider, less accurate. See NZBClub ',
+ 'wizard': True,
+ 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACEUlEQVQ4y3VSMWgUQRR9/8/s7OzeJSdnTsVGghLEYBNQjBpQiRBFhIB2EcHG1kbs0murhZAmVocExEZQ0c7CxkLINYcJJpoYj9wZcnu72fF21uJSXMzuhyne58/j/fcf4b+KokgBIOSU53lxP5b9oNVqDT36dH+5UjoiKvIwPFEEgWBshGZ3E7/NOupL9fMjx0e+ZhKsrq+c/FPZKJi0w4FsQXMBDEJsd7BNW9h2tuyP9vfTALIJkMIu1hYRtINM+dpzcWc0sbkreK4fUEogyraAmKGF3+7vcT/wtR9QwkCabSAzQQuvk0uglAo5YaQ5DASGYjfMXcHVOqKu6NmR7iehlKAdHWUqWPv1c3i+9uwVdRlEBGaGEAJCCrDo9ShhvF6qPq8tL57bp+DbRn2sHtUuCY9YphLMu5921VhrwYJ5tbt0tt6sjQP4vEfB2Ikz7/ytwbeR6ljHkXCUA6UcOLtPOg4MYhtH8ZcLw5er+xQMDAwEURRNl96X596Y6oxFwsw9fmtTOAr2Ik19nL365FZpsLSdnQPPM8aYewc+lDcX4rkHqbQMAGTJXulOLzycmr1bKBTi3DOGYagajcahiaOT89fbM0/dxEsUu3aidfPljWO3HzebzYNBELi5Z5RSJlrrHd/3w8lT114MrVTWOn875fHRiYVisRhorWMpZXdvNnLKGCOstb0AMlulVJI19w/+nceU4D0aCwAAAABJRU5ErkJggg==',
+ 'options': [
+ {
+ 'name': 'enabled',
+ 'type': 'enabler',
+ },
+ {
+ 'name': 'extra_score',
+ 'advanced': True,
+ 'label': 'Extra Score',
+ 'type': 'int',
+ 'default': 0,
+ 'description': 'Starting score for each release found via this provider.',
+ }
+ ],
+ },
+ ],
+}]
diff --git a/couchpotato/core/media/_base/providers/nzb/omgwtfnzbs.py b/couchpotato/core/media/_base/providers/nzb/omgwtfnzbs.py
new file mode 100644
index 0000000000..6d4d0a28fc
--- /dev/null
+++ b/couchpotato/core/media/_base/providers/nzb/omgwtfnzbs.py
@@ -0,0 +1,99 @@
+from couchpotato.core.event import fireEvent
+from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode
+from couchpotato.core.helpers.rss import RSS
+from couchpotato.core.helpers.variable import tryInt
+from couchpotato.core.logger import CPLog
+from couchpotato.core.media._base.providers.nzb.base import NZBProvider
+
+
+log = CPLog(__name__)
+
+
+class Base(NZBProvider, RSS):
+
+ urls = {
+ 'search': 'https://api.omgwtfnzbs.me/json/?%s',
+ }
+
+ http_time_between_calls = 1 # Seconds
+
+ cat_ids = [
+ ([15], ['dvdrip', 'scr', 'r5', 'tc', 'ts', 'cam']),
+ ([15, 16], ['brrip']),
+ ([16], ['720p', '1080p', 'bd50']),
+ ([17], ['dvdr']),
+ ]
+ cat_backup_id = 'movie'
+
+ def _searchOnTitle(self, title, movie, quality, results):
+
+ q = '%s %s' % (title, movie['info']['year'])
+ params = tryUrlencode({
+ 'search': q,
+ 'catid': ','.join([str(x) for x in self.getCatId(quality)]),
+ 'user': self.conf('username', default = ''),
+ 'api': self.conf('api_key', default = ''),
+ })
+
+ if len(self.conf('custom_tag')) > 0:
+ params = '%s&%s' % (params, self.conf('custom_tag'))
+
+ nzbs = self.getJsonData(self.urls['search'] % params)
+
+ if isinstance(nzbs, list):
+ for nzb in nzbs:
+
+ results.append({
+ 'id': nzb.get('nzbid'),
+ 'name': toUnicode(nzb.get('release')),
+ 'age': self.calculateAge(tryInt(nzb.get('usenetage'))),
+ 'size': tryInt(nzb.get('sizebytes')) / 1024 / 1024,
+ 'url': nzb.get('getnzb'),
+ 'detail_url': nzb.get('details'),
+ 'description': nzb.get('weblink')
+ })
+
+
+config = [{
+ 'name': 'omgwtfnzbs',
+ 'groups': [
+ {
+ 'tab': 'searcher',
+ 'list': 'nzb_providers',
+ 'name': 'OMGWTFNZBs',
+ 'description': 'See OMGWTFNZBs ',
+ 'wizard': True,
+ 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQEAIAAADAAbR1AAADbElEQVR4AZ2UW0ybZRiAy/OvdHaLYvB0YTRIFi7GkM44zRLmIfNixkWdiRMyYoxRE8/TC7MYvXCGEBmr3mxLwVMwY0wYA7e6Wso4lB6h/U9taSlMGIfBXLYlJMyo0S///2dJI5lxN8/F2/f9nu9737e/jYmXr6KTbN9BGG9HE/NotQ76UWziNzrXFiETk/5ARUNH+7+0kW7fSgTl0VKGOLZzidOkmuuIo7q2oTArNLPIzhdIkqXkerFOm2CaD/5bcKrjIL2c3fkhPxOq93Kcb91v46fV9TQKF4TgV/TbUsQtzfCaK6jMOd5DJrguSIIhexmqqVxN0FXbRR8/ND/LYTTj6J7nl2gnL47OkDW4KJhnQHCa6JpKVNJGA3OC58nwBJoZ//ebbIyKpBxjrr0o1q1FMRkrKXZnHWF85VvxMrJxibwhGyd0f5bLnKzqJs1k0Sfo+EU8hdAUvkbcwKEgs2D0OiV4jmmD1zb+Tp6er0JMMvDxPo5xev9zTBF683NS+N56n1YiB95B5crr93KRuKhKI0tb0Kw2mgLLqTjLEWO8424i9IvURaYeOckwf3+/yCC9e3bQQ/MuD+Monk0k+XFXMUfx7z5EEP+XlXi5tLlMxH8zLppw7idJrugcus30kC86gc7UrQqjLIukM8zWHOACeU+TiMxXN6ExVOkgz4lvPEzice1GIVhxhG4CrZvpl6TH55giKWqXGLy9hZh5aUtgDSew/msSyCKpl+DDNfxJc8NBIsxUxUnz14O/oONu+IIIvso9TLBQ1SY5rUhuSzUhAqJ2mRXBLDOCeUtgUZXsaObT8BffhUJPqWgiV+3zKKzYH0ClvTRLhD77HIqVkyh5jThnivehoG+qJctIRSPn6bxvO4FCgTl9c1DmbpjLajbQFE8aW5SU3rg+zOPGUjTUF9NFpLEbH2c/KmGYlY69/GQJVtGMSUcEp9eCbB1nctbxHTLRdTUkGDf+B02uGWRG3OvpJ/zSMwzif+oxVBID3cQKBavLCiPmB2PM2UuSCUPgrX4VDb97AwEG67bh4+KTOlncvu3M31BwA5rLHbCfEjwkNDky9e/SSbSxnD46Pg0RJtpXRvhmBSZHpRjWtKwFybjuQeXaKxto4WjLZZZvVmC17pZLJFkwxm5++PS2Mrwc7nyIMYZe/IzoP5d6QgEybqTXAAAAAElFTkSuQmCC',
+ 'options': [
+ {
+ 'name': 'enabled',
+ 'type': 'enabler',
+ },
+ {
+ 'name': 'username',
+ 'default': '',
+ },
+ {
+ 'name': 'api_key',
+ 'label': 'Api Key',
+ 'default': '',
+ },
+ {
+ 'name': 'custom_tag',
+ 'advanced': True,
+ 'label': 'Custom tag',
+ 'default': '',
+ 'description': 'Add custom parameters, for example add catid=18 to get foreign (non-english) movies',
+ },
+ {
+ 'name': 'extra_score',
+ 'advanced': True,
+ 'label': 'Extra Score',
+ 'default': 20,
+ 'type': 'int',
+ 'description': 'Starting score for each release found via this provider.',
+ }
+ ],
+ },
+ ],
+}]
diff --git a/couchpotato/core/media/_base/providers/torrent/__init__.py b/couchpotato/core/media/_base/providers/torrent/__init__.py
new file mode 100644
index 0000000000..12dda708d0
--- /dev/null
+++ b/couchpotato/core/media/_base/providers/torrent/__init__.py
@@ -0,0 +1,14 @@
+config = [{
+ 'name': 'torrent_providers',
+ 'groups': [
+ {
+ 'label': 'Torrent Providers',
+ 'description': 'Providers searching torrent sites for new releases',
+ 'wizard': True,
+ 'type': 'list',
+ 'name': 'torrent_providers',
+ 'tab': 'searcher',
+ 'options': [],
+ },
+ ],
+}]
diff --git a/couchpotato/core/media/_base/providers/torrent/abnormal.py b/couchpotato/core/media/_base/providers/torrent/abnormal.py
new file mode 100644
index 0000000000..a9c089dff0
--- /dev/null
+++ b/couchpotato/core/media/_base/providers/torrent/abnormal.py
@@ -0,0 +1,151 @@
+О╩©import traceback
+import urlparse
+import urllib
+import re
+import unicodedata
+
+from datetime import datetime
+from bs4 import BeautifulSoup
+from couchpotato.core.helpers.encoding import toUnicode, simplifyString, tryUrlencode
+from couchpotato.core.helpers.variable import getTitle, tryInt
+from couchpotato.core.logger import CPLog
+from couchpotato.core.media._base.providers.torrent.base import TorrentProvider
+import re
+
+log = CPLog(__name__)
+
+
+class Base(TorrentProvider):
+ urls = {
+ 'login' : 'https://abnormal.ws/login.php',
+ 'detail' : 'https://abnormal.ws/torrents.php?id=%s',
+ 'search' : 'https://abnormal.ws/torrents.php?',
+ 'home' : 'https://abnormal.ws/%s',
+ }
+
+ http_time_between_calls = 1 #seconds
+
+ def _search(self, media, quality, results):
+
+ #urllib.urlencode( {'name': getTitle(media['info']) })
+ for title in media['info']['titles']:
+ try:
+ TitleStringReal = str(title.encode("latin-1").replace('-',' '))
+
+ url = self.urls['search'] + 'cat[]=MOVIE|DVDR&cat[]=MOVIE|DVDRIP&cat[]=MOVIE|BDRIP&cat[]=MOVIE|VOSTFR&cat[]=MOVIE|HD|720p&cat[]=MOVIE|HD|1080p&cat[]=MOVIE|REMUXBR&cat[]=MOVIE|FULLBR&cat[]=ANIME&' + urllib.urlencode( {'search': unicodedata.normalize('NFKD', title).encode('ASCII', 'ignore').replace('\'', ' ') }) + '&order=Time&way=desc'
+
+ data = self.getHTMLData(url)
+
+ if data:
+ html = BeautifulSoup(data)
+ try:
+ #Get first entry in table
+ torrentTable = html.find('table', class_ = 'torrent_table cats')
+
+ if torrentTable:
+ torrents = torrentTable.find_all('tr', class_=None)
+ torrents += torrentTable.find_all('tr', class_='tablerow-lightgrey')
+
+ for torrentRow in torrents:
+
+ nameCell = torrentRow.find_all('td')[1]
+ downloadCell = torrentRow.find_all('td')[3]
+ sizeCell = torrentRow.find_all('td')[4]
+ seedersCell = torrentRow.find_all('td')[5]
+ leechersCell = torrentRow.find_all('td')[6]
+
+ name = nameCell.find_all('a')[0].get_text().upper()
+
+ splittedReleaseName = re.split('(\.[0-9]{4}\.)', name, flags=re.IGNORECASE)
+
+ if len(splittedReleaseName) > 1:
+ cleanedReleaseName = ''.join(splittedReleaseName[0:-2])
+
+ match = re.compile(ur"[\w]+", re.UNICODE)
+ nameSplit = ''.join(match.findall(unicodedata.normalize('NFKD', cleanedReleaseName).encode('ASCII','ignore')))
+ titleSplit = ''.join(match.findall(unicodedata.normalize('NFKD', title.upper()).encode('ASCII','ignore')))
+
+ if titleSplit == nameSplit:
+ downloadUrl = downloadCell.find('a')['href']
+ parsed = urlparse.urlparse(downloadUrl)
+ torrent_id = urlparse.parse_qs(parsed.query)['id']
+
+ new = {}
+ new['id'] = torrent_id
+ new['name'] = name
+ new['url'] = self.urls['home'] % (downloadUrl)
+ new['detail_url'] = self.urls['home'] % (nameCell.find('a')['href'])
+ new['size'] = self.parseSize(sizeCell.get_text())
+ new['seeders'] = tryInt(seedersCell.get_text())
+ new['leechers'] = tryInt(leechersCell.get_text())
+
+ results.append(new)
+ except:
+ log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
+ except:
+ continue
+
+ def getLoginParams(self):
+ return {
+ 'username': self.conf('username'),
+ 'password': self.conf('password'),
+ 'Login': '',
+ }
+
+ def loginSuccess(self, output):
+ return 'logout.php' in output.lower()
+
+ loginCheckSuccess = loginSuccess
+
+
+config = [{
+ 'name': 'abnormal',
+ 'groups': [
+ {
+ 'tab': 'searcher',
+ 'list': 'torrent_providers',
+ 'name': 'abnormal',
+ 'description': 'See Abnormal ',
+ 'wizard': True,
+ 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAABzElEQVR4nJWQW08TURSF97nMzJlbO6XtFNuCKQzRcjFFSkCixkSCCSa++Qv9E0QTgw8+mGhqFFQQCS2lthRb6AxzKzPjgyYKosJ623vl2ytrA1xS5M8VQqh066GeM1p7WxcC8oWJq2MlgcnH5qFtdc+4+OwBQq/duI0xAYTyI1MA6D9A4XpZUjSEUBQGABAfGPwXQDmhWLoLACd9/9jsBH2vfOcRJvSvHabK99V46qBVc2xLFJXh0SLGmBDablbPSWCSakzMrVdWP1RW4wmdSbLn2kk9v7D4mDH5nITJ8uKJ76+9fuY6lqQm0pkhbSDDM1FgMiJkd3vtVIIox1J6buP9yzAMAeDrzkeeyYBQFEVRFE7PPZAU7RRwc2GZEH6/sf1j/NauN+tbURiGQeB7ruOYkzP3fgExTadUfLHy5PcHrL95bpldq9fxPZsSWppfUuPpnx0SqeFety2pWjprZHKGnh3VsyO7X94NFYpj47NKLEk5ARDieGFn8y0WmMpRHnOCHEsxUUkkB43izPT8EgBUXj3FmCKMEAACMMZnteQVwnMsCMO+7/qOyQQBYeTZ5sF+ba/6ybGOXMfqdVqN+majutGsfT46bMNl9R01bKCKBrRO8wAAAABJRU5ErkJggg==',
+ 'options': [
+ {
+ 'name': 'enabled',
+ 'type': 'enabler',
+ 'default': False,
+ },
+ {
+ 'name': 'username',
+ 'default': '',
+ },
+ {
+ 'name': 'password',
+ 'default': '',
+ 'type': 'password',
+ },
+ {
+ 'name': 'seed_ratio',
+ 'label': 'Seed ratio',
+ 'type': 'float',
+ 'default': 1,
+ 'description': 'Will not be (re)moved until this seed ratio is met.',
+ },
+ {
+ 'name': 'seed_time',
+ 'label': 'Seed time',
+ 'type': 'int',
+ 'default': 40,
+ 'description': 'Will not be (re)moved until this seed time (in hours) is met.',
+ },
+ {
+ 'name': 'extra_score',
+ 'advanced': True,
+ 'label': 'Extra Score',
+ 'type': 'int',
+ 'default': 20,
+ 'description': 'Starting score for each release found via this provider.',
+ }
+ ],
+ },
+ ],
+}]
\ No newline at end of file
diff --git a/couchpotato/core/media/_base/providers/torrent/addict.py b/couchpotato/core/media/_base/providers/torrent/addict.py
new file mode 100644
index 0000000000..7f373d1f4d
--- /dev/null
+++ b/couchpotato/core/media/_base/providers/torrent/addict.py
@@ -0,0 +1,256 @@
+from bs4 import BeautifulSoup
+from couchpotato.core.helpers.variable import tryInt
+from couchpotato.core.logger import CPLog
+from couchpotato.core.helpers.encoding import simplifyString
+from couchpotato.core.media._base.providers.torrent.base import TorrentProvider
+from couchpotato.core.helpers import namer_check
+import cookielib
+import re
+import urllib2
+import urllib
+from StringIO import StringIO
+import gzip
+import time
+import datetime
+log = CPLog(__name__)
+
+
+class Base(TorrentProvider):
+
+ urls = {
+ 'test': 'https://addict-to.net/',
+ 'detail': 'https://addict-to.net/index.php?page=torrent-details&id=%s',
+ 'search': 'https://addict-to.net/index.php?page=torrents&',
+ }
+
+ http_time_between_calls = 1 #seconds
+ cat_backup_id = None
+ cj = cookielib.CookieJar()
+ opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
+
+ class NotLoggedInHTTPError(urllib2.HTTPError):
+ def __init__(self, url, code, msg, headers, fp):
+ urllib2.HTTPError.__init__(self, url, code, msg, headers, fp)
+
+ class PTPHTTPRedirectHandler(urllib2.HTTPRedirectHandler):
+ def http_error_302(self, req, fp, code, msg, headers):
+ log.debug("302 detected; redirected to %s" % headers['Location'])
+ if (headers['Location'] != 'login.php'):
+ return urllib2.HTTPRedirectHandler.http_error_302(self, req, fp, code, msg, headers)
+ else:
+ raise Base.NotLoggedInHTTPError(req.get_full_url(), code, msg, headers, fp)
+
+ def getSearchParams(self, movie, quality):
+ results = []
+ MovieTitles = movie['info']['titles']
+ moviequality = simplifyString(quality['identifier'])
+ moviegenre = movie['info']['genres']
+ if quality['custom']['3d']==1:
+ category=13
+ elif 'Animation' in moviegenre:
+ category=25
+ elif 'Documentaire' in moviegenre or 'Documentary' in moviegenre:
+ category=48
+ else:
+
+ if moviequality in ['720p']:
+ category=15
+ elif moviequality in ['1080p']:
+ category=14
+ elif moviequality in ['dvd-r']:
+ category=11
+ elif moviequality in ['br-disk']:
+ category=49
+ elif moviequality in ['bdrip']:
+ category=17
+ elif moviequality in ['brrip']:
+ category=18
+ else:
+ category=16
+
+
+ for MovieTitle in MovieTitles:
+ try:
+ TitleStringReal = str(MovieTitle.encode("latin-1").replace('-',' '))
+ except:
+ continue
+ try:
+ results.append(urllib.urlencode( {'search': TitleStringReal, 'category' : category, 'page' : 'torrents', 'options' : 0, 'active' : 0}))
+ except:
+ continue
+
+ return results
+
+ def _search(self, movie, quality, results):
+
+ # Cookie login
+ if not self.last_login_check and not self.login():
+ return
+ searchStrings= self.getSearchParams(movie,quality)
+ lastsearch=0
+ for searchString in searchStrings:
+ actualtime=int(time.time())
+ if actualtime-lastsearch<10:
+ timetosleep= 10-(actualtime-lastsearch)
+ time.sleep(timetosleep)
+ URL = self.urls['search']+searchString
+ r = self.opener.open(URL)
+ soupfull = BeautifulSoup(r)
+ #hack to avoid dummy parsing css and else
+ delbegin=str(soupfull.prettify).split('')[1]
+ restable=delbegin[delbegin.find('')+6]
+ soup=BeautifulSoup(restable)
+ resultsTable = soup.find("table")
+ if resultsTable:
+
+ rows = resultsTable.findAll("tr")
+ x=0
+ for row in rows:
+ x=x+1
+ if (x > 1):
+ #bypass first row because title only
+ #bypass date lines
+ if 'Liste des torrents' in str(row) :
+ continue
+ link = row.findAll('td')[1].find("a", href=re.compile("torrent-details"))
+ if link:
+ new={}
+ title = link.text
+ testname=namer_check.correctName(title,movie)
+ if testname==0:
+ continue
+ downloadURL = self.urls['test'] + "/" + row.find("a",href=re.compile("\.torrent"))['href']
+ size= row.findAll('td')[9].text
+ leecher=row.findAll('td')[7].text
+ seeder=row.findAll('td')[6].text
+ date=row.findAll('td')[5].text
+ detail=self.urls['test'] + "/" + row.find("a",href=re.compile("torrent-details"))['href']
+
+ def extra_check(item):
+ return True
+
+ new['id'] = detail[detail.rfind('=')+1:]
+ new['name'] = title
+ new['url'] = downloadURL
+ new['detail_url'] = detail
+ new['size'] = self.parseSize(size)
+ new['age'] = self.ageToDays(date)
+ new['seeders'] = tryInt(seeder)
+ new['leechers'] = tryInt(leecher)
+ new['extra_check'] = extra_check
+ new['download'] = self.download
+
+ results.append(new)
+
+ def ageToDays(self, age_str):
+ try:
+ from_dt = datetime.datetime.strptime(age_str[9:11]+'-'+age_str[12:14]+'-'+age_str[15:], "%d-%m-%Y")
+ except:
+ from_dt = datetime.datetime.strptime(age_str[9:11]+'-'+age_str[12:14]+'-'+age_str[15:], "%m-%d-%Y")
+ try:
+ to_dt = datetime.datetime.strptime(time.strftime("%x"), "%d/%m/%Y")
+ except:
+ to_dt = datetime.datetime.strptime(time.strftime("%x"), "%m/%d/%Y")
+ timedelta = to_dt - from_dt
+ diff_day = timedelta.days
+ return tryInt(diff_day)
+
+ def login(self):
+
+ self. opener.addheaders = [
+ ('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko)'),
+ ('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'),
+ ('Accept-Language', 'fr-fr,fr;q=0.5'),
+ ('Accept-Charset', 'ISO-8859-1,utf-8;q=0.7,*;q=0.7'),
+ ('Keep-Alive', '115'),
+ ('Connection', 'keep-alive'),
+ ('Cache-Control', 'max-age=0'),
+ ]
+
+ data = urllib.urlencode({'uid': self.conf('username'), 'pwd' : self.conf('password'), 'submit' : 'Envoyer'})
+
+
+ r = self.opener.open('https://addict-to.net/index.php?page=login',data)
+
+ for index, cookie in enumerate(self.cj):
+ if (cookie.name == "xbtitFM"): login_done = True
+
+ if not login_done:
+ log.error('Login to Addict failed')
+ return False
+
+ if login_done:
+ log.debug('Login HTTP Addict status 200; seems successful')
+ self.last_login_check = self.opener
+ return True
+
+ def download(self, url = '', nzb_id = ''):
+ if not self.last_login_check and not self.login():
+ return
+ try:
+ request = urllib2.Request(url)
+
+ response = self.last_login_check.open(request)
+ # unzip if needed
+ if response.info().get('Content-Encoding') == 'gzip':
+ buf = StringIO(response.read())
+ f = gzip.GzipFile(fileobj = buf)
+ data = f.read()
+ f.close()
+ else:
+ data = response.read()
+ response.close()
+ return data
+ except:
+ return 'try_next'
+config = [{
+ 'name': 'addict',
+ 'groups': [
+ {
+ 'tab': 'searcher',
+ 'list': 'torrent_providers',
+ 'name': 'addict',
+ 'description': 'See Addict ',
+ 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABHNCSVQICAgIfAhkiAAAArZJREFUOI2NkktPE2EUht/5vmlH2oyIBAeKICUIMfUCUtuSSDTGaDckbkzcmLgx0Y0iCxe60sSVxhh/gDsNmhBjwMBCkwaiAblIQqhWqIptp1OmU3qZdjq003FHUEF9lue859mcF9gaxuVyXXW73Re32W9Atxr237pzOxkN+/Rypb5eENoSicTkfwvGfpjXNKbmPtHk1mJGiSlraWtLS0tnPB6f+Kfg6YJ5Y3HqyyOWqwW111rUyHSdWcGatJqscjpb2iVJer+tIPDNvDodmH1c0dehpRUsLwSwz9NnI3p6j7omfs5k822CINQqijLzh6D/2u2BH3HmMWNQ5FMSPs0Oo91zFk0dPbDV7a3SUyttSjz6zjDRy3GcXVXVeQAVAKBer/dSIhE+QXRp/7pO2ZXlKbR7/di1uxm5pAS+xgG9lOfKhURXQoyMgqEejuN2apr2EYBJ7Xb7saJe4kvrhVVD+y7s6ThZ5WjqRDYpgiUWBCdHoJcL8J27QuWvi95ENBwg1NJqtVobXC7XPFUUZV4QhC5FSZUJIWlqZOsYUm3bwe5E6OMYtHIGnjOXwVpqUO88gtxquEuOLi0aJtktiiIoAFOW5YnGxkZfLCYSTU0ulwtiay6b2wEOcJ+6BC2TgqEXQVkO+eIaIcTskKXYXLFYHNn4gizLAYfD0anmtaZMShpnWbX74PELlClRlAt5qGkFHwKDONzbB1tt3dD021d3AYR/6UEqlRrneb7BBOlZjUdH02LIx1c3A2UGc5MvcdDjR+zr5+fPHvYPAIhs2US/3z8TCoWqWQvXLUuRN2p6pTubSZMDR0+b4rfgi6Ent24CiG5b5WAwaGqaNme1WgXKWpxKMjLPstjHENvr4cF7A5uPAYD5XbAJwvP8dcOodJRKRaZUMh4AWPpLfksYSul5AIe2C/wE9XA/rBqvYMsAAAAASUVORK5CYII=',
+ 'wizard': True,
+ 'options': [
+ {
+ 'name': 'enabled',
+ 'type': 'enabler',
+ 'default': False,
+ },
+ {
+ 'name': 'username',
+ 'default': '',
+ },
+ {
+ 'name': 'password',
+ 'default': '',
+ 'type': 'password',
+ },
+ {
+ 'name': 'seed_ratio',
+ 'label': 'Seed ratio',
+ 'type': 'float',
+ 'default': 1,
+ 'description': 'Will not be (re)moved until this seed ratio is met.',
+ },
+ {
+ 'name': 'seed_time',
+ 'label': 'Seed time',
+ 'type': 'int',
+ 'default': 40,
+ 'description': 'Will not be (re)moved until this seed time (in hours) is met.',
+ },
+ {
+ 'name': 'extra_score',
+ 'advanced': True,
+ 'label': 'Extra Score',
+ 'type': 'int',
+ 'default': 0,
+ 'description': 'Starting score for each release found via this provider.',
+ }
+ ],
+ },
+ ],
+}]
diff --git a/couchpotato/core/media/_base/providers/torrent/alpharatio.py b/couchpotato/core/media/_base/providers/torrent/alpharatio.py
new file mode 100644
index 0000000000..96d91dedf6
--- /dev/null
+++ b/couchpotato/core/media/_base/providers/torrent/alpharatio.py
@@ -0,0 +1,134 @@
+import traceback
+
+from bs4 import BeautifulSoup
+from couchpotato.core.helpers.variable import tryInt
+from couchpotato.core.logger import CPLog
+from couchpotato.core.media._base.providers.torrent.base import TorrentProvider
+import six
+
+
+log = CPLog(__name__)
+
+
+class Base(TorrentProvider):
+
+ urls = {
+ 'test': 'https://alpharatio.cc/',
+ 'login': 'https://alpharatio.cc/login.php',
+ 'login_check': 'https://alpharatio.cc/inbox.php',
+ 'detail': 'https://alpharatio.cc/torrents.php?torrentid=%s',
+ 'search': 'https://alpharatio.cc/torrents.php?action=advanced&searchstr=%s&scene=%s&filter_cat[%d]=1',
+ 'download': 'https://alpharatio.cc/%s',
+ }
+
+ http_time_between_calls = 1 # Seconds
+ login_fail_msg = ' attempts remaining.'
+
+ def _search(self, media, quality, results):
+
+ url = self.urls['search'] % self.buildUrl(media, quality)
+ cleaned_url = url.replace('%3A', '')
+ data = self.getHTMLData(cleaned_url)
+
+ if data:
+ html = BeautifulSoup(data)
+
+ try:
+ result_table = html.find('table', attrs = {'id': 'torrent_table'})
+ if not result_table:
+ return
+
+ entries = result_table.find_all('tr', attrs = {'class': 'torrent'})
+ for result in entries:
+
+ link = result.find('a', attrs = {'dir': 'ltr'})
+ url = result.find('a', attrs = {'title': 'Download'})
+ tds = result.find_all('td')
+
+ results.append({
+ 'id': link['href'].replace('torrents.php?id=', '').split('&')[0],
+ 'name': link.contents[0],
+ 'url': self.urls['download'] % url['href'],
+ 'detail_url': self.urls['download'] % link['href'],
+ 'size': self.parseSize(tds[len(tds)-4].string),
+ 'seeders': tryInt(tds[len(tds)-2].string),
+ 'leechers': tryInt(tds[len(tds)-1].string),
+ })
+ except:
+ log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
+
+
+ def getLoginParams(self):
+ return {
+ 'username': self.conf('username'),
+ 'password': self.conf('password'),
+ 'keeplogged': '1',
+ 'login': 'Login',
+ }
+
+ def loginSuccess(self, output):
+ return 'logout.php' in output.lower()
+
+ loginCheckSuccess = loginSuccess
+
+ def getSceneOnly(self):
+ return '1' if self.conf('scene_only') else ''
+
+
+config = [{
+ 'name': 'alpharatio',
+ 'groups': [
+ {
+ 'tab': 'searcher',
+ 'list': 'torrent_providers',
+ 'name': 'AlphaRatio',
+ 'description': 'AlphaRatio ',
+ 'wizard': True,
+ 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACX0lEQVQ4jbWTX0hTURzHv+fu3umdV9GtOZ3pcllGBomJ9RCmkiWIEJUQET2EMqF86aFeegqLHgoio1ICScoieugPiBlFFmpROUjNIub+NKeba2rqvdvuPKeXDIcsgugHB378fj8+X37fcw5hjOFfgvtTc8o7mdveHWv0+YJ5iWb45SQWi2kc7olCnteoHCGUMqbpejBkO99rPDlW5rjV3FjZkmXU+3SiKK8EkOUVxj2+9bZOe8ebhZxSRTCIQmAES1oLQADKp4EIc8gRFr3t+/SNe0oLelatYM0zO56dqS3fmh4eXkoxIrWvAwXegLta8bymYyak9lyGR7d57eHHtOt7aNaQ0AORU8OEqlg0HURTnXi96cCaK0AYEW0l+MAoQoIp48PHke0JAYwyBkYhameUQ3vz7lTt3NRdKH0ajxgqQMJzAMdBkRVdYgAAEA71G2Z6MnOyvSmSJB/bFblN5DHEsosghf3zZduK+1fdQhyEcKitr+r0B2dMAyPOcmd02oxiC2jUjJaSwbPZpoLJhAA1Ci3hGURRlO0Of8nN9/MNUUXSkrQsFQ4meNORG6/G2O/jGXdZ044OKzg3z3r77TUre81tL1pxirLMWnsoMB00LtfjPLh67/OJH3xRMgiHb96JOCVbxbobRONBQNqScffJ6JE4E2VZFvv6BirbXpkboGcA4eGaDOV73G4LAFBKSWRhNsmqfnHCosG159Lxt++GdgC/XuLD3sH60/fdFxjJBNMDAAVZ8CNfVJxPLzbs/uqa2Lj/0stHkWSDFlwS4FIhRKei3a3VNeS//sa/iZ/B6hMIr7Fq4QAAAABJRU5ErkJggg==',
+ 'options': [
+ {
+ 'name': 'enabled',
+ 'type': 'enabler',
+ 'default': False,
+ },
+ {
+ 'name': 'username',
+ 'default': '',
+ },
+ {
+ 'name': 'password',
+ 'default': '',
+ 'type': 'password',
+ },
+ {
+ 'name': 'seed_ratio',
+ 'label': 'Seed ratio',
+ 'type': 'float',
+ 'default': 1,
+ 'description': 'Will not be (re)moved until this seed ratio is met.',
+ },
+ {
+ 'name': 'seed_time',
+ 'label': 'Seed time',
+ 'type': 'int',
+ 'default': 40,
+ 'description': 'Will not be (re)moved until this seed time (in hours) is met.',
+ },
+ {
+ 'name': 'scene_only',
+ 'type': 'bool',
+ 'default': False,
+ 'description': 'Only allow scene releases.'
+ },
+ {
+ 'name': 'extra_score',
+ 'advanced': True,
+ 'label': 'Extra Score',
+ 'type': 'int',
+ 'default': 0,
+ 'description': 'Starting score for each release found via this provider.',
+ }
+ ],
+ },
+ ],
+}]
diff --git a/couchpotato/core/media/_base/providers/torrent/awesomehd.py b/couchpotato/core/media/_base/providers/torrent/awesomehd.py
new file mode 100644
index 0000000000..5a24f517b5
--- /dev/null
+++ b/couchpotato/core/media/_base/providers/torrent/awesomehd.py
@@ -0,0 +1,154 @@
+import re
+import traceback
+
+from bs4 import BeautifulSoup
+from couchpotato.core.helpers.variable import tryInt, getIdentifier
+from couchpotato.core.logger import CPLog
+from couchpotato.core.media._base.providers.torrent.base import TorrentProvider
+
+
+log = CPLog(__name__)
+
+
+class Base(TorrentProvider):
+
+ urls = {
+ 'test': 'https://awesome-hd.me/',
+ 'detail': 'https://awesome-hd.me/torrents.php?torrentid=%s',
+ 'search': 'https://awesome-hd.me/searchapi.php?action=imdbsearch&passkey=%s&imdb=%s&internal=%s',
+ 'download': 'https://awesome-hd.me/torrents.php?action=download&id=%s&authkey=%s&torrent_pass=%s',
+ }
+ http_time_between_calls = 1
+ login_fail_msg = 'Please check that you provided a valid API Key, username, and action.'
+
+ def _search(self, movie, quality, results):
+
+ data = self.getHTMLData(self.urls['search'] % (self.conf('passkey'), getIdentifier(movie), self.conf('only_internal')))
+
+ if data:
+ if self.login_fail_msg in data:
+ self.disableAccount()
+ return
+
+ try:
+ soup = BeautifulSoup(data)
+
+ if soup.find('error'):
+ log.info(soup.find('error').get_text())
+ return
+
+ authkey = soup.find('authkey').get_text()
+ entries = soup.find_all('torrent')
+
+ for entry in entries:
+
+ torrentscore = 0
+ torrent_id = entry.find('id').get_text()
+ name = entry.find('name').get_text()
+ year = entry.find('year').get_text()
+ releasegroup = entry.find('releasegroup').get_text()
+ resolution = entry.find('resolution').get_text()
+ encoding = entry.find('encoding').get_text()
+ freeleech = entry.find('freeleech').get_text()
+ media = entry.find('media').get_text()
+ audioformat = entry.find('audioformat').get_text()
+
+ # skip audio channel only releases
+ if resolution == '':
+ continue
+
+ torrent_desc = '%s.%s.%s.%s-%s' % (resolution, media, audioformat, encoding, releasegroup)
+
+ if self.conf('prefer_internal') and freeleech in ['0.25', '0.50']:
+ torrentscore += 200
+
+ if encoding == 'x264' and self.conf('favor') in ['encode', 'both']:
+ torrentscore += 200
+ elif re.search('Remux', encoding) and self.conf('favor') in ['remux', 'both']:
+ torrentscore += 200
+
+ name = re.sub(r'\W', '.', name)
+ name = re.sub(r'\.+', '.', name)
+ results.append({
+ 'id': torrent_id,
+ 'name': '%s.%s.%s' % (name, year, torrent_desc),
+ 'url': self.urls['download'] % (torrent_id, authkey, self.conf('passkey')),
+ 'detail_url': self.urls['detail'] % torrent_id,
+ 'size': tryInt(entry.find('size').get_text()) / 1048576,
+ 'seeders': tryInt(entry.find('seeders').get_text()),
+ 'leechers': tryInt(entry.find('leechers').get_text()),
+ 'score': torrentscore
+ })
+
+ except:
+ log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
+
+
+config = [{
+ 'name': 'awesomehd',
+ 'groups': [
+ {
+ 'tab': 'searcher',
+ 'list': 'torrent_providers',
+ 'name': 'Awesome-HD',
+ 'description': 'AHD ',
+ 'wizard': True,
+ 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAC+UlEQVR4AV1SO0y6dxQ9H4g8CoIoohZ5NA0aR2UgkYpNB5uocTSaLlrDblMH09Gt8d90r3YpJkanxjA4GGkbO7RNxSABq8jDGnkpD+UD5NV7Bxvbk9wvv+/3uPece66A/yEWi42FQqHVfD7/cbPZtIEglUpjOp3uZHR0dBvAn3gDIRqNgjE4OKj0+Xzf3NzcfD4wMCCjf5TLZbTbbajVatzf3+Pu7q5uNpt35ufnvwBQAScQRREEldfr9RWLxan+/n5YrVa+jFarhVfQQyQSCU4EhULhX15engEgSrjC0dHRVqlUmjQYDBgaGgKtuTqz4mTgIoVCASaTCX19fajVapOHh4dbFJBks9mxcDi8qtFoJEajkfVyJWi1WkxMTMDhcIAT8x6D7/Dd6+vr1fHx8TGp2+3+iqo5+YCzBwIBToK5ubl/mQwPDyMSibAs2Gw2UHNRrValz8/PDUk8Hv9EqVRCr9fj4uICTNflcqFer+Pg4AB7e3uoVCq8x9Rxfn6O7u5uqFQq8FspZXxHTekggByA3W4Hr9PpNDeRL3I1cMhkMrBrnZ2dyGQyvNYIs7OzVbJNPjIyAraLwYdcjR8wXl5eIJfLwRIFQQDLYkm3t7c1CdGPPT4+cpOImp4PODMeaK+n10As2jBbrHifHOjS6qAguVFimkqlwAMmIQnHV1dX4NDQhVwuhyZTV6pgIktzDzkkk0lEwhEEzs7ASQr5Ai4vL1nuccfCwsLO/v6+p9FoyJhF6ekJro/cPCzIZLNQa7rQoK77/SdgWWpKkCaJ5EB9aWnpe6nH40nRMBnJV4f5gw+FX3/5GX/8/htXRZdOzzqhJWn6nl6YbTZqqhrhULD16fT0d8FgcFtYW1vD5uamfGVl5cd4IjldKhZACdkJvKfWUANrxEaJV4hiGVaL1b+7653hXzwRZQr2X76xsfG1xWIRaZzbNPv/CdrjEL9cX/+WXFBSgEPgzxuwG3Yans9OT0+naBZMIJDNfzudzp8WFxd/APAX3uAf9WOTxOPLdosAAAAASUVORK5CYII=',
+ 'options': [
+ {
+ 'name': 'enabled',
+ 'type': 'enabler',
+ 'default': False,
+ },
+ {
+ 'name': 'passkey',
+ 'default': '',
+ },
+ {
+ 'name': 'seed_ratio',
+ 'label': 'Seed ratio',
+ 'type': 'float',
+ 'default': 1,
+ 'description': 'Will not be (re)moved until this seed ratio is met.',
+ },
+ {
+ 'name': 'seed_time',
+ 'label': 'Seed time',
+ 'type': 'int',
+ 'default': 40,
+ 'description': 'Will not be (re)moved until this seed time (in hours) is met.',
+ },
+ {
+ 'name': 'only_internal',
+ 'advanced': True,
+ 'type': 'bool',
+ 'default': 1,
+ 'description': 'Only search for internal releases.'
+ },
+ {
+ 'name': 'prefer_internal',
+ 'advanced': True,
+ 'type': 'bool',
+ 'default': 1,
+ 'description': 'Favors internal releases over non-internal releases.'
+ },
+ {
+ 'name': 'favor',
+ 'advanced': True,
+ 'default': 'both',
+ 'type': 'dropdown',
+ 'values': [('Encodes & Remuxes', 'both'), ('Encodes', 'encode'), ('Remuxes', 'remux'), ('None', 'none')],
+ 'description': 'Give extra scoring to encodes or remuxes.'
+ },
+ {
+ 'name': 'extra_score',
+ 'advanced': True,
+ 'type': 'int',
+ 'default': 20,
+ 'description': 'Starting score for each release found via this provider.',
+ },
+ ],
+ },
+ ],
+}]
+
diff --git a/couchpotato/core/media/_base/providers/torrent/base.py b/couchpotato/core/media/_base/providers/torrent/base.py
new file mode 100644
index 0000000000..9f5f289067
--- /dev/null
+++ b/couchpotato/core/media/_base/providers/torrent/base.py
@@ -0,0 +1,78 @@
+import time
+import traceback
+
+from couchpotato.core.helpers.variable import getImdb, md5, cleanHost
+from couchpotato.core.logger import CPLog
+from couchpotato.core.media._base.providers.base import YarrProvider
+from couchpotato.environment import Env
+
+
+log = CPLog(__name__)
+
+
+class TorrentProvider(YarrProvider):
+
+ protocol = 'torrent'
+
+ proxy_domain = None
+ proxy_list = []
+
+ def imdbMatch(self, url, imdbId):
+ if getImdb(url) == imdbId:
+ return True
+
+ if url[:4] == 'http':
+ try:
+ cache_key = md5(url)
+ data = self.getCache(cache_key, url)
+ except IOError:
+ log.error('Failed to open %s.', url)
+ return False
+
+ return getImdb(data) == imdbId
+
+ return False
+
+ def getDomain(self, url = ''):
+
+ forced_domain = self.conf('domain')
+ if forced_domain:
+ return cleanHost(forced_domain).rstrip('/') + url
+
+ if not self.proxy_domain:
+ for proxy in self.proxy_list:
+
+ prop_name = 'proxy.%s' % proxy
+ last_check = float(Env.prop(prop_name, default = 0))
+
+ if last_check > time.time() - 86400:
+ continue
+
+ data = ''
+ try:
+ data = self.urlopen(proxy, timeout = 3, show_error = False)
+ except:
+ log.debug('Failed %s proxy %s: %s', (self.getName(), proxy, traceback.format_exc()))
+
+ if self.correctProxy(data):
+ log.debug('Using proxy for %s: %s', (self.getName(), proxy))
+ self.proxy_domain = proxy
+ break
+
+ Env.prop(prop_name, time.time())
+
+ if not self.proxy_domain:
+ log.error('No %s proxies left, please add one in settings, or let us know which one to add on the forum.', self.getName())
+ return None
+
+ return cleanHost(self.proxy_domain).rstrip('/') + url
+
+ def correctProxy(self, data):
+ return True
+
+
+class TorrentMagnetProvider(TorrentProvider):
+
+ protocol = 'torrent_magnet'
+
+ download = None
diff --git a/couchpotato/core/media/_base/providers/torrent/bithdtv.py b/couchpotato/core/media/_base/providers/torrent/bithdtv.py
new file mode 100644
index 0000000000..a3eb1d9f02
--- /dev/null
+++ b/couchpotato/core/media/_base/providers/torrent/bithdtv.py
@@ -0,0 +1,155 @@
+import traceback
+
+from bs4 import BeautifulSoup
+from couchpotato.core.helpers.encoding import toUnicode
+from couchpotato.core.helpers.variable import tryInt
+from couchpotato.core.logger import CPLog
+from couchpotato.core.media._base.providers.torrent.base import TorrentProvider
+
+
+log = CPLog(__name__)
+
+
+class Base(TorrentProvider):
+
+ urls = {
+ 'detail': 'https://www.bit-hdtv.com/details.php?id=%s',
+ 'search': 'https://www.bit-hdtv.com/torrents.php?',
+ 'download': 'https://www.bit-hdtv.com/download.php?id=%s',
+ }
+
+ # Searches for movies only - BiT-HDTV's subcategory and resolution search filters appear to be broken
+ http_time_between_calls = 1 # Seconds
+ login_fail_msg = 'Username or password incorrect.'
+
+ def _search(self, media, quality, results):
+
+ query = self.buildUrl(media, quality)
+
+ url = "%s&%s" % (self.urls['search'], query)
+
+ data = self.getHTMLData(url, headers = self.getRequestHeaders())
+
+ if data:
+ # Remove BiT-HDTV's output garbage so outdated BS4 versions successfully parse the HTML
+ split_data = data.partition('-->')
+ if '## SELECT COUNT(' in split_data[0]:
+ data = split_data[2]
+
+ html = BeautifulSoup(data, 'html.parser')
+
+ try:
+ result_tables = html.find_all('table', attrs = {'width': '800', 'class': ''})
+ if result_tables is None:
+ return
+
+ # Take first result
+ result_table = result_tables[0]
+
+ if result_table is None:
+ return
+
+ entries = result_table.find_all('tr')
+ for result in entries[1:]:
+
+ cells = result.find_all('td')
+ link = cells[2].find('a')
+ torrent_id = link['href'].split('id=')[1]
+
+ results.append({
+ 'id': torrent_id,
+ 'name': link.contents[0].get_text(),
+ 'url': self.urls['download'] % torrent_id,
+ 'detail_url': self.urls['detail'] % torrent_id,
+ 'size': self.parseSize(cells[6].get_text()),
+ 'seeders': tryInt(cells[8].string),
+ 'leechers': tryInt(cells[9].string),
+ 'get_more_info': self.getMoreInfo,
+ })
+
+ except:
+ log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
+
+ def getRequestHeaders(self):
+ cookies = 'h_sl={};h_sp={};h_su={}'.format(self.conf('cookiesettingsl') or '', self.conf('cookiesettingsp') or '', self.conf('cookiesettingsu') or '')
+ return {
+ 'Cookie': cookies
+ }
+
+ def getMoreInfo(self, item):
+ full_description = self.getCache('bithdtv.%s' % item['id'], item['detail_url'], cache_timeout = 25920000)
+ html = BeautifulSoup(full_description)
+ nfo_pre = html.find('table', attrs = {'class': 'detail'})
+ description = toUnicode(nfo_pre.text) if nfo_pre else ''
+
+ item['description'] = description
+ return item
+
+ def download(self, url = '', nzb_id = ''):
+ try:
+ return self.urlopen(url, headers=self.getRequestHeaders())
+ except:
+ log.error('Failed getting release from %s: %s', (self.getName(), traceback.format_exc()))
+
+ return 'try_next'
+
+config = [{
+ 'name': 'bithdtv',
+ 'groups': [
+ {
+ 'tab': 'searcher',
+ 'list': 'torrent_providers',
+ 'name': 'BiT-HDTV',
+ 'description': 'BiT-HDTV ',
+ 'wizard': True,
+ 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAAABnRSTlMAAAAAAABupgeRAAABMklEQVR4AZ3Qu0ojcQCF8W9MJcQbJNgEEQUbQVIqWgnaWfkIvoCgggixEAmIhRtY2GV3w7KwU61B0EYIxmiw0YCik84ipaCuc0nmP5dcjIUgOjqDvxf4OAdf9mnMLcUJyPyGSCP+YRdC+Kp8iagJKhuS+InYRhTGgDbeV2uEMand4ZRxizjXHQEimxhraAnUr73BNqQxMiNeV2SwcjTLEVtb4Zl10mXutvOWm2otw5Sxz6TGTbdd6ncuYvVLXAXrvM+ruyBpy1S3JLGDfUQ1O6jn5vTsrJXvqSt4UNfj6vxTRPxBHER5QeSirhLGk/5rWN+ffB1XZuxjnDy1q87m7TS+xOGA+Iv4gfkbaw+nOMXHDHnITGEk0VfRFnn4Po4vNYm6RGukmggR0L08+l+e4HMeASo/i6AJUjLgAAAAAElFTkSuQmCC',
+ 'options': [
+ {
+ 'name': 'enabled',
+ 'type': 'enabler',
+ 'default': False,
+ },
+ {
+ 'name': 'cookiesettingsl',
+ 'label': 'Cookies (h_sl)',
+ 'default': '',
+ 'description': 'Cookie h_sl from session',
+ },
+ {
+ 'name': 'cookiesettingsp',
+ 'label': 'Cookies (h_sp)',
+ 'default': '',
+ 'description': 'Cookie h_sp from session',
+ },
+ {
+ 'name': 'cookiesettingsu',
+ 'label': 'Cookies (h_su)',
+ 'default': '',
+ 'description': 'Cookie h_su from session',
+ },
+ {
+ 'name': 'seed_ratio',
+ 'label': 'Seed ratio',
+ 'type': 'float',
+ 'default': 1,
+ 'description': 'Will not be (re)moved until this seed ratio is met.',
+ },
+ {
+ 'name': 'seed_time',
+ 'label': 'Seed time',
+ 'type': 'int',
+ 'default': 40,
+ 'description': 'Will not be (re)moved until this seed time (in hours) is met.',
+ },
+ {
+ 'name': 'extra_score',
+ 'advanced': True,
+ 'label': 'Extra Score',
+ 'type': 'int',
+ 'default': 20,
+ 'description': 'Starting score for each release found via this provider.',
+ }
+ ],
+ },
+ ],
+}]
diff --git a/couchpotato/core/media/_base/providers/torrent/bitsoup.py b/couchpotato/core/media/_base/providers/torrent/bitsoup.py
new file mode 100644
index 0000000000..3736f107ed
--- /dev/null
+++ b/couchpotato/core/media/_base/providers/torrent/bitsoup.py
@@ -0,0 +1,138 @@
+import traceback
+
+from bs4 import BeautifulSoup, SoupStrainer
+from couchpotato.core.helpers.variable import tryInt
+from couchpotato.core.logger import CPLog
+from couchpotato.core.media._base.providers.torrent.base import TorrentProvider
+
+
+log = CPLog(__name__)
+
+
+class Base(TorrentProvider):
+
+ urls = {
+ 'test': 'https://www.bitsoup.me/',
+ 'login': 'https://www.bitsoup.me/takelogin.php',
+ 'login_check': 'https://www.bitsoup.me/my.php',
+ 'search': 'https://www.bitsoup.me/browse.php?%s',
+ 'baseurl': 'https://www.bitsoup.me/%s',
+ }
+
+ http_time_between_calls = 1 # Seconds
+ login_fail_msg = 'Login failed!'
+ only_tables_tags = SoupStrainer('table')
+
+ torrent_name_cell = 1
+ torrent_download_cell = 2
+
+ def _searchOnTitle(self, title, movie, quality, results):
+
+ url = self.urls['search'] % self.buildUrl(title, movie, quality)
+ data = self.getHTMLData(url)
+
+ if data:
+ html = BeautifulSoup(data, 'html.parser', parse_only = self.only_tables_tags)
+
+ try:
+ result_table = html.find('table', attrs = {'class': 'koptekst'})
+ if not result_table or 'nothing found!' in data.lower():
+ return
+
+ entries = result_table.find_all('tr')
+ for result in entries[1:]:
+
+ all_cells = result.find_all('td')
+
+ torrent = all_cells[self.torrent_name_cell].find('a')
+ download = all_cells[self.torrent_download_cell].find('a')
+
+ torrent_id = torrent['href']
+ torrent_id = torrent_id.replace('details.php?id=', '')
+ torrent_id = torrent_id.replace('&hit=1', '')
+
+ torrent_name = torrent.getText()
+
+ torrent_size = self.parseSize(all_cells[8].getText())
+ torrent_seeders = tryInt(all_cells[10].getText())
+ torrent_leechers = tryInt(all_cells[11].getText())
+ torrent_url = self.urls['baseurl'] % download['href']
+ torrent_detail_url = self.urls['baseurl'] % torrent['href']
+
+ results.append({
+ 'id': torrent_id,
+ 'name': torrent_name,
+ 'size': torrent_size,
+ 'seeders': torrent_seeders,
+ 'leechers': torrent_leechers,
+ 'url': torrent_url,
+ 'detail_url': torrent_detail_url,
+ })
+
+ except:
+ log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
+
+ def getLoginParams(self):
+ return {
+ 'username': self.conf('username'),
+ 'password': self.conf('password'),
+ 'ssl': 'yes',
+ }
+
+ def loginSuccess(self, output):
+ return 'logout.php' in output.lower()
+
+ loginCheckSuccess = loginSuccess
+
+
+config = [{
+ 'name': 'bitsoup',
+ 'groups': [
+ {
+ 'tab': 'searcher',
+ 'list': 'torrent_providers',
+ 'name': 'Bitsoup',
+ 'description': 'Bitsoup ',
+ 'wizard': True,
+ 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAAB8ElEQVR4AbWSS2sTURiGz3euk0mswaE37HhNhIrajQheFgF3rgR/lAt/gOBCXNZlo6AbqfUWRVCxi04wqUnTRibpJLaJzdzOOZ6WUumyC5/VHOb9eN/FA91uFx0FjI4IPfgiGLTWH73tn348GKmN7ijD0d2b41fO5qJEaX24AWNIUrVQCTTJ3Llx6vbV6Vtzk7Gi9+ebi996guFDDYAQAVj4FExP5qdOZB49W62t/zH3hECcwsPnbWeMXz6Xi2K1f0ApeK3hMCHHbP5gvvoriBgFAAQJEAxhjJ4u+YWTNsVI6b1JgtPWZkoIefKy4fcii2OTw2BABs7wj3bYDlLL4rvjGWOdTser1j5Xf7c3Q/MbHQYApxItvnm31mhQQ71eX2vUB76/vsWB2hg0QuogrMwLIG8P3InM2/eVGXeDViqVwWB79vRU2lgJYmdHcgXCTAXQFJTN5HguvDCR2Hxsxe8EvT54nlcul5vNpqDIEgwRQanAhAAABgRIyiQcjpIkkTOuWyqVoN/vSylX67XXH74uV1vHRUyxxFqbLBCSmBpiXSq6xcL5QrGYzWZ3XQIAwdlOJB+/aL764ucdmncYs0WsCI7kvTnn+qyDMEnTVCn1Tz5KsBFg6fvWcmsUAcnYNC/g2hnromvvqbHvxv+39S+MX+bWkFXwAgAAAABJRU5ErkJggg==',
+ 'options': [
+ {
+ 'name': 'enabled',
+ 'type': 'enabler',
+ 'default': False,
+ },
+ {
+ 'name': 'username',
+ 'default': '',
+ },
+ {
+ 'name': 'password',
+ 'default': '',
+ 'type': 'password',
+ },
+ {
+ 'name': 'seed_ratio',
+ 'label': 'Seed ratio',
+ 'type': 'float',
+ 'default': 1,
+ 'description': 'Will not be (re)moved until this seed ratio is met.',
+ },
+ {
+ 'name': 'seed_time',
+ 'label': 'Seed time',
+ 'type': 'int',
+ 'default': 40,
+ 'description': 'Will not be (re)moved until this seed time (in hours) is met.',
+ },
+ {
+ 'name': 'extra_score',
+ 'advanced': True,
+ 'label': 'Extra Score',
+ 'type': 'int',
+ 'default': 20,
+ 'description': 'Starting score for each release found via this provider.',
+ }
+ ],
+ },
+ ],
+}]
diff --git a/couchpotato/core/media/_base/providers/torrent/cpasbien.py b/couchpotato/core/media/_base/providers/torrent/cpasbien.py
new file mode 100644
index 0000000000..104ff708e6
--- /dev/null
+++ b/couchpotato/core/media/_base/providers/torrent/cpasbien.py
@@ -0,0 +1,261 @@
+from bs4 import BeautifulSoup
+from couchpotato.core.helpers.variable import getTitle, tryInt
+from couchpotato.core.logger import CPLog
+from couchpotato.core.helpers.encoding import simplifyString, tryUrlencode
+from couchpotato.core.media._base.providers.torrent.base import TorrentProvider
+import cookielib
+import re
+import traceback
+import urllib
+import urllib2
+import unicodedata
+from couchpotato.core.helpers import namer_check
+
+log = CPLog(__name__)
+
+
+class Base(TorrentProvider):
+
+ urls = {
+ 'test': 'http://www.cpasbien.ch/',
+ 'search': 'http://www.cpasbien.ch/recherche/',
+ }
+
+ http_time_between_calls = 1 #seconds
+ cat_backup_id = None
+
+ class NotLoggedInHTTPError(urllib2.HTTPError):
+ def __init__(self, url, code, msg, headers, fp):
+ urllib2.HTTPError.__init__(self, url, code, msg, headers, fp)
+
+ class PTPHTTPRedirectHandler(urllib2.HTTPRedirectHandler):
+ def http_error_302(self, req, fp, code, msg, headers):
+ log.debug("302 detected; redirected to %s" % headers['Location'])
+ if (headers['Location'] != 'login.php'):
+ return urllib2.HTTPRedirectHandler.http_error_302(self, req, fp, code, msg, headers)
+ else:
+ raise Base.NotLoggedInHTTPError(req.get_full_url(), code, msg, headers, fp)
+
+ def _search(self, movie, quality, results):
+
+ # Cookie login
+ if not self.last_login_check and not self.login():
+ return
+
+
+ TitleStringReal = (getTitle(movie['info']) + ' ' + simplifyString(quality['identifier'] )).replace('-',' ').replace(' ',' ').replace(' ',' ').replace(' ',' ').encode("utf8")
+
+ URL = (self.urls['search']).encode('UTF8')
+ URL=unicodedata.normalize('NFD',unicode(URL,"utf8","replace"))
+ URL=URL.encode('ascii','ignore')
+ URL = urllib2.quote(URL.encode('utf8'), ":/?=")
+
+ values = {
+ 'champ_recherche' : TitleStringReal
+ }
+
+ data_tmp = urllib.urlencode(values)
+ req = urllib2.Request(URL, data_tmp, headers={'User-Agent' : "Mozilla/5.0"} )
+
+ data = urllib2.urlopen(req )
+
+ id = 1000
+
+ if data:
+
+ try:
+ html = BeautifulSoup(data)
+ lin=0
+ erlin=0
+ resultdiv=[]
+ while erlin==0:
+ try:
+ classlin='ligne'+str(lin)
+ resultlin=html.findAll(attrs = {'class' : [classlin]})
+ if resultlin:
+ for ele in resultlin:
+ resultdiv.append(ele)
+ lin+=1
+ else:
+ erlin=1
+ except:
+ erlin=1
+ for result in resultdiv:
+
+ try:
+
+ new = {}
+ name = result.findAll(attrs = {'class' : ["titre"]})[0].text
+ testname=namer_check.correctName(name,movie)
+ if testname==0:
+ continue
+ detail_url = result.find("a")['href']
+ tmp = detail_url.split('/')[-1].replace('.html','.torrent')
+ url_download = ('http://www.cpasbien.cm/telechargement/%s' % tmp)
+ size = result.findAll(attrs = {'class' : ["poid"]})[0].text
+ seeder = result.findAll(attrs = {'class' : ["seed_ok"]})[0].text
+ leecher = result.findAll(attrs = {'class' : ["down"]})[0].text
+ age = '1'
+
+ verify = getTitle(movie['info']).split(' ')
+
+ add = 1
+
+ for verify_unit in verify:
+ if (name.lower().find(verify_unit.lower()) == -1) :
+ add = 0
+
+ def extra_check(item):
+ return True
+
+ if add == 1:
+
+ new['id'] = id
+ new['name'] = name.strip()
+ new['url'] = url_download
+ new['detail_url'] = detail_url
+
+ new['size'] = self.parseSize(size)
+ new['age'] = self.ageToDays(age)
+ new['seeders'] = tryInt(seeder)
+ new['leechers'] = tryInt(leecher)
+ new['extra_check'] = extra_check
+ new['download'] = self.loginDownload
+
+ #new['score'] = fireEvent('score.calculate', new, movie, single = True)
+
+ #log.error('score')
+ #log.error(new['score'])
+
+ results.append(new)
+
+ id = id+1
+
+ except:
+ log.error('Failed parsing cPASbien: %s', traceback.format_exc())
+
+ except AttributeError:
+ log.debug('No search results found.')
+ else:
+ log.debug('No search results found.')
+
+ def ageToDays(self, age_str):
+ age = 0
+ age_str = age_str.replace(' ', ' ')
+
+ regex = '(\d*.?\d+).(sec|heure|jour|semaine|mois|ans)+'
+ matches = re.findall(regex, age_str)
+ for match in matches:
+ nr, size = match
+ mult = 1
+ if size == 'semaine':
+ mult = 7
+ elif size == 'mois':
+ mult = 30.5
+ elif size == 'ans':
+ mult = 365
+
+ age += tryInt(nr) * mult
+
+ return tryInt(age)
+
+ def login(self):
+
+ cookieprocessor = urllib2.HTTPCookieProcessor(cookielib.CookieJar())
+ opener = urllib2.build_opener(cookieprocessor, Base.PTPHTTPRedirectHandler())
+ opener.addheaders = [
+ ('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko)'),
+ ('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'),
+ ('Accept-Language', 'fr-fr,fr;q=0.5'),
+ ('Accept-Charset', 'ISO-8859-1,utf-8;q=0.7,*;q=0.7'),
+ ('Keep-Alive', '115'),
+ ('Connection', 'keep-alive'),
+ ('Cache-Control', 'max-age=0'),
+ ]
+
+ try:
+ response = opener.open('http://www.cpasbien.cm', tryUrlencode({'url': '/'}))
+ except urllib2.URLError as e:
+ log.error('Login to cPASbien failed: %s' % e)
+ return False
+
+ if response.getcode() == 200:
+ log.debug('Login HTTP cPASbien status 200; seems successful')
+ self.last_login_check = opener
+ return True
+ else:
+ log.error('Login to cPASbien failed: returned code %d' % response.getcode())
+ return False
+
+
+ def loginDownload(self, url = '', nzb_id = ''):
+ values = {
+ 'url' : '/'
+ }
+ data_tmp = urllib.urlencode(values)
+ req = urllib2.Request(url, data_tmp, headers={'User-Agent' : "Mozilla/5.0"} )
+
+ try:
+ if not self.last_login_check and not self.login():
+ log.error('Failed downloading from %s', self.getName())
+ return urllib2.urlopen(req).read()
+ except:
+ log.error('Failed downloading from %s: %s', (self.getName(), traceback.format_exc()))
+
+ def download(self, url = '', nzb_id = ''):
+
+ if not self.last_login_check and not self.login():
+ return
+
+ values = {
+ 'url' : '/'
+ }
+ data_tmp = urllib.urlencode(values)
+ req = urllib2.Request(url, data_tmp, headers={'User-Agent' : "Mozilla/5.0"} )
+
+ try:
+ return urllib2.urlopen(req).read()
+ except:
+ log.error('Failed downloading from %s: %s', (self.getName(), traceback.format_exc()))
+config = [{
+ 'name': 'cpasbien',
+ 'groups': [
+ {
+ 'tab': 'searcher',
+ 'list': 'torrent_providers',
+ 'name': 'cpasbien',
+ 'description': 'See cPASbien ',
+ 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABHNCSVQICAgIfAhkiAAAAgZJREFUOI2lkj9oE2EYxn93l/Quf440gXg4lBoEMd2MDuLSkk0R6hCnuqjUoR0c7FDo4Ca0CDo7uRRBqEMDXSLUUqRDiZM1NMEI1VKTlDZpUppccvc5nJp/KooPfMPH+z3P+zzv+8F/Quq8XIVEEOY0kASIzpoLlBKUV+CuCblfCjyF/P3V1Qi6jrCs7k4eD/X1dS5NTy9tQaJD2MFDkA23W8UwQFGQRJcB0DS0cBg/DPY4a0OVZcHeHihKf1ifD6pVfGD/VmBAUeDwEGQZLAskCVQV6nVYW+M4lSLQo9stoKpQLoNtO2QhYHsbkkmOczm+AP5eBy/BfwRDn8GHJLkpFp3utRpkMpDLwckJvlCIM9Uqg6YZeAAj58E1CVlXCaaigcCjsWhU8Xq9UCo5lisVx4FhODFkGbdpMtlqXa4IsVUHYkLcVlbg3ddGo3AzErl2emLCGaCmwcAAuL4ntCxoNpFsG8O2odlkXojF17CgAK2PsJna2Xk/ViyOh0dHXWhaewaW1T6mSb5a5V6rtbAMU4D5c18FyCzu7i5fyWZvDMfjOh4PNBpd5A/5vLheq93ZhMc/eF0Lr0NhaX8/eS6djo/EYqfQdUekUuHNxsZR4uDg1id40f9J+qE/CwTeitlZIWZmxKtQqOSFi39D7IQy5/c/fxIMpoGhfyUDMAwXzsL4n958A9jfxsJ8X4WQAAAAAElFTkSuQmCC',
+ 'wizard': True,
+ 'options': [
+ {
+ 'name': 'enabled',
+ 'type': 'enabler',
+ 'default': False,
+ },
+ {
+ 'name': 'seed_ratio',
+ 'label': 'Seed ratio',
+ 'type': 'float',
+ 'default': 1,
+ 'description': 'Will not be (re)moved until this seed ratio is met.',
+ },
+ {
+ 'name': 'seed_time',
+ 'label': 'Seed time',
+ 'type': 'int',
+ 'default': 40,
+ 'description': 'Will not be (re)moved until this seed time (in hours) is met.',
+ },
+ {
+ 'name': 'extra_score',
+ 'advanced': True,
+ 'label': 'Extra Score',
+ 'type': 'int',
+ 'default': 0,
+ 'description': 'Starting score for each release found via this provider.',
+ }
+ ],
+ },
+ ],
+}]
diff --git a/couchpotato/core/media/_base/providers/torrent/hd4free.py b/couchpotato/core/media/_base/providers/torrent/hd4free.py
new file mode 100644
index 0000000000..dbffba58f5
--- /dev/null
+++ b/couchpotato/core/media/_base/providers/torrent/hd4free.py
@@ -0,0 +1,140 @@
+
+import re
+import json
+import traceback
+
+from couchpotato.core.helpers.variable import tryInt, getIdentifier
+from couchpotato.core.logger import CPLog
+from couchpotato.core.media._base.providers.torrent.base import TorrentProvider
+
+
+log = CPLog(__name__)
+
+
+class Base(TorrentProvider):
+
+ urls = {
+ 'test': 'https://hd4free.xyz/',
+ 'detail': 'https://hd4free.xyz/details.php?id=%s',
+ 'search': 'https://hd4free.xyz/searchapi.php?apikey=%s&username=%s&imdbid=%s&internal=%s',
+ 'download': 'https://hd4free.xyz/download.php?torrent=%s&torrent_pass=%s',
+ }
+
+ http_time_between_calls = 1 # Seconds
+ login_fail_msg = 'Your apikey is not valid! Go to HD4Free and reset your apikey.'
+
+ def _search(self, movie, quality, results):
+ data = self.getJsonData(self.urls['search'] % (self.conf('apikey'), self.conf('username'), getIdentifier(movie), self.conf('internal_only')))
+
+ if data:
+ if 'error' in data:
+ if self.login_fail_msg in data['error']: # Check for login failure
+ self.disableAccount()
+ else:
+ log.error('%s returned an error (possible rate limit): %s', (self.getName(), data['error']))
+ return
+
+ try:
+ #for result in data[]:
+ for key, result in data.iteritems():
+ if tryInt(result['total_results']) == 0:
+ return
+ torrentscore = self.conf('extra_score')
+ releasegroup = result['releasegroup']
+ resolution = result['resolution']
+ encoding = result['encoding']
+ freeleech = tryInt(result['freeleech'])
+ seeders = tryInt(result['seeders'])
+ torrent_desc = '/ %s / %s / %s / %s seeders' % (releasegroup, resolution, encoding, seeders)
+
+ if freeleech > 0 and self.conf('prefer_internal'):
+ torrent_desc += '/ Internal'
+ torrentscore += 200
+
+ if seeders == 0:
+ torrentscore = 0
+
+ name = result['release_name']
+ year = tryInt(result['year'])
+
+ results.append({
+ 'id': tryInt(result['torrentid']),
+ 'name': re.sub('[^A-Za-z0-9\-_ \(\).]+', '', '%s (%s) %s' % (name, year, torrent_desc)),
+ 'url': self.urls['download'] % (result['torrentid'], result['torrentpass']),
+ 'detail_url': self.urls['detail'] % result['torrentid'],
+ 'size': tryInt(result['size']),
+ 'seeders': tryInt(result['seeders']),
+ 'leechers': tryInt(result['leechers']),
+ 'age': tryInt(result['age']),
+ 'score': torrentscore
+ })
+ except:
+ log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
+config = [{
+ 'name': 'hd4free',
+ 'groups': [
+ {
+ 'tab': 'searcher',
+ 'list': 'torrent_providers',
+ 'name': 'HD4Free',
+ 'wizard': True,
+ 'description': 'HD4Free ',
+ 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAMAAAAoLQ9TAAABX1BMVEUF6nsH33cJ03EJ1XIJ1nMKzXIKz28Lym4MxGsMxWsMx2wNvmgNv2kNwGkNwWwOuGgOuWYOuWcOumcOu2cOvmgPtWQPtmUPt2UPt2YQr2IQsGIQsGMQsmMQs2QRqmARq2ARrmERrmISpV4SpmASp14SqF8ToFsToFwToVwTo10TpV0UnFoUn1sVllcVmFgWkFUWklYXjVQXjlMXkFUYh1EYilIYi1MZhlEafk0af04agE4agU4beEobeUsbe0wcdUkeaUQebUYfZEMfZ0QgX0AgYEAgYUEhWj4iVz0iWD0jTzkkSzcmQTMmQzQnPTInPjInPzIoNy8oOC8oODAoOTAoOjApMi0pNC4pNS4qLCoqLSsqLisqMCwrJygrKCgrKCkrKSkrKikrKiorKyosIyYsIycsJCcsJScsJigtHyUuGCIuGiMuGyMuHCMuHCQvEyAvFSEvFiEvFyE0ABU0ABY5lYz4AAAA3ElEQVR4AWNIQAMMiYmJCYkIkMCQnpKWkZ4KBGlARlpaLEOor194kI+Pj6+PT0CET0AYg46Alr22NDeHkBinnq6SkitDrolDgYtaapajdpGppoFfGkMhv2GxE0uuPwNfsk6mhHMOQ54isxmbUJKCtWx+tIZQcDpDtqSol7qIMqsRu3dIhJxxFkOBoF2JG5O7lSqjh5S/tkkWQ5SBTbqnfkymv2WGLa95YCSDhZiMvKIwj4GJCpesuDivK0N6VFRUYlRyfHJUchQQJDMkxsfHJcTHAxEIxMVj+BZDAACjwkqhYgsTAAAAAABJRU5ErkJggg==',
+ 'options': [
+ {
+ 'name': 'enabled',
+ 'type': 'enabler',
+ 'default': False,
+ },
+ {
+ 'name': 'username',
+ 'default': '',
+ 'description': 'Enter your site username.',
+ },
+ {
+ 'name': 'apikey',
+ 'default': '',
+ 'label': 'API Key',
+ 'description': 'Enter your site api key. This can be found on Profile Security ',
+ },
+ {
+ 'name': 'seed_ratio',
+ 'label': 'Seed ratio',
+ 'type': 'float',
+ 'default': 0,
+ 'description': 'Will not be (re)moved until this seed ratio is met. HD4Free minimum is 1:1.',
+ },
+ {
+ 'name': 'seed_time',
+ 'label': 'Seed time',
+ 'type': 'int',
+ 'default': 0,
+ 'description': 'Will not be (re)moved until this seed time (in hours) is met. HD4Free minimum is 72 hours.',
+ },
+ {
+ 'name': 'prefer_internal',
+ 'advanced': True,
+ 'type': 'bool',
+ 'default': 1,
+ 'description': 'Favors internal releases over non-internal releases.',
+ },
+ {
+ 'name': 'internal_only',
+ 'advanced': True,
+ 'label': 'Internal Only',
+ 'type': 'bool',
+ 'default': False,
+ 'description': 'Only download releases marked as HD4Free internal',
+ },
+ {
+ 'name': 'extra_score',
+ 'advanced': True,
+ 'label': 'Extra Score',
+ 'type': 'int',
+ 'default': 0,
+ 'description': 'Starting score for each release found via this provider.',
+ }
+ ],
+ },
+ ],
+}]
diff --git a/couchpotato/core/media/_base/providers/torrent/hdbits.py b/couchpotato/core/media/_base/providers/torrent/hdbits.py
new file mode 100644
index 0000000000..ccb429329e
--- /dev/null
+++ b/couchpotato/core/media/_base/providers/torrent/hdbits.py
@@ -0,0 +1,131 @@
+import re
+import json
+import traceback
+
+from couchpotato.core.helpers.variable import tryInt, getIdentifier
+from couchpotato.core.logger import CPLog
+from couchpotato.core.media._base.providers.torrent.base import TorrentProvider
+
+
+log = CPLog(__name__)
+
+
+class Base(TorrentProvider):
+
+ urls = {
+ 'test': 'https://hdbits.org/',
+ 'detail': 'https://hdbits.org/details.php?id=%s',
+ 'download': 'https://hdbits.org/download.php?id=%s&passkey=%s',
+ 'api': 'https://hdbits.org/api/torrents'
+ }
+
+ http_time_between_calls = 1 # Seconds
+ login_fail_msg = 'Invalid authentication credentials'
+
+ def _post_query(self, **params):
+
+ post_data = {
+ 'username': self.conf('username'),
+ 'passkey': self.conf('passkey')
+ }
+ post_data.update(params)
+
+ if self.conf('internal_only'):
+ post_data.update({'origin': [1]})
+
+ try:
+ result = self.getJsonData(self.urls['api'], data = json.dumps(post_data))
+
+ if result:
+ if result['status'] != 0:
+ if self.login_fail_msg in result['message']: # Check for login failure
+ self.disableAccount()
+ return
+ log.error('Error searching hdbits: %s' % result['message'])
+ else:
+ return result['data']
+ except:
+ pass
+
+ return None
+
+ def _search(self, movie, quality, results):
+
+ match = re.match(r'tt(\d{7})', getIdentifier(movie))
+
+ data = self._post_query(imdb = {'id': match.group(1)})
+
+ if data:
+ try:
+ for result in data:
+ results.append({
+ 'id': result['id'],
+ 'name': result['name'],
+ 'url': self.urls['download'] % (result['id'], self.conf('passkey')),
+ 'detail_url': self.urls['detail'] % result['id'],
+ 'size': tryInt(result['size']) / 1024 / 1024,
+ 'seeders': tryInt(result['seeders']),
+ 'leechers': tryInt(result['leechers'])
+ })
+ except:
+ log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
+
+
+config = [{
+ 'name': 'hdbits',
+ 'groups': [
+ {
+ 'tab': 'searcher',
+ 'list': 'torrent_providers',
+ 'name': 'HDBits',
+ 'wizard': True,
+ 'description': 'HDBits ',
+ 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAABi0lEQVR4AZWSzUsbQRjGdyabTcvSNPTSHlpQQeMHJApC8CJRvHgQQU969+LJP8G7f4N3DwpeFRQvRr0EKaUl0ATSpkigUNFsMl/r9NmZLCEHA/nNO5PfvMPDm0DI6fV3ZxiolEICe1oZCBVCCmBPKwOh2ErKBHGE4KYEXBpSLkUlqO4LcM7f+6nVhRnOhSkOz/hexk+tL+YL0yPF2YmN4tynD++4gTLGkNNac9YFLoREBR1+cnF3dFY6v/m6PD+FaXiNJtgA4xYbABxiGrz6+6HWaI5/+Qh37YS0/3Znc8UxwNGBIIBX22z+/ZdJ+4wzyjpR4PEpODg8tgUXBv2iWUzSpa12B0IR6n6lvt8Aek2lZHb084+fdRNgrwY8z81PjhVy2d2ttUrtV/lbBa+JXGEpDMPnoF2tN1QYRqVUtf6nFbThb7wk7le395elcqhASLb39okDiHY00VCtCTEHwSiH4AI0lkOiT1dwMeSfT3SRxiQWNO7Zwj1egkoVIQFMKvSiC3bcjXq9Jf8DcDIRT3hh10kAAAAASUVORK5CYII=',
+ 'options': [
+ {
+ 'name': 'enabled',
+ 'type': 'enabler',
+ 'default': False,
+ },
+ {
+ 'name': 'username',
+ 'default': '',
+ },
+ {
+ 'name': 'passkey',
+ 'default': '',
+ },
+ {
+ 'name': 'seed_ratio',
+ 'label': 'Seed ratio',
+ 'type': 'float',
+ 'default': 1,
+ 'description': 'Will not be (re)moved until this seed ratio is met.',
+ },
+ {
+ 'name': 'seed_time',
+ 'label': 'Seed time',
+ 'type': 'int',
+ 'default': 40,
+ 'description': 'Will not be (re)moved until this seed time (in hours) is met.',
+ },
+ {
+ 'name': 'extra_score',
+ 'advanced': True,
+ 'label': 'Extra Score',
+ 'type': 'int',
+ 'default': 0,
+ 'description': 'Starting score for each release found via this provider.',
+ },
+ {
+ 'name': 'internal_only',
+ 'advanced': True,
+ 'label': 'Internal Only',
+ 'type': 'bool',
+ 'default': False,
+ 'description': 'Only download releases marked as HDBits internal'
+ }
+ ],
+ },
+ ],
+}]
diff --git a/couchpotato/core/media/_base/providers/torrent/hdonly.py b/couchpotato/core/media/_base/providers/torrent/hdonly.py
new file mode 100644
index 0000000000..b069b3a705
--- /dev/null
+++ b/couchpotato/core/media/_base/providers/torrent/hdonly.py
@@ -0,0 +1,181 @@
+О╩©import htmlentitydefs
+import json
+import re
+import unicodedata
+import urllib
+import time
+import traceback
+
+from couchpotato.core.helpers.encoding import tryUrlencode
+from couchpotato.core.helpers.variable import getTitle, tryInt, mergeDicts, getIdentifier
+from couchpotato.core.logger import CPLog
+from couchpotato.core.media._base.providers.torrent.base import TorrentProvider
+from dateutil.parser import parse
+import six
+from HTMLParser import HTMLParser
+
+
+log = CPLog(__name__)
+
+
+class Base(TorrentProvider):
+
+ urls = {
+ 'domain': 'https://hd-only.org',
+ 'detail': 'https://hd-only.org/ajax.php?action=torrent&id=%s',
+ 'detailLink': 'https://hd-only.org/torrents.php?id=%s&torrentid=%s',
+ 'torrent': 'https://hd-only.org/torrents.php?action=download&id=%s&authkey=%s&torrent_pass=%s',
+ 'login': 'https://hd-only.org/login.php',
+ 'login_check': 'https://hd-only.org/login.php',
+ 'search': 'https://hd-only.org/ajax.php?action=browse&searchstr=%s',
+ 'index': 'https://hd-only.org/ajax.php?action=index'
+ }
+
+ http_time_between_calls = 2
+
+ def _search(self, media, quality, results):
+
+ h = HTMLParser()
+
+ indexResponse = self.getJsonData(self.urls['index'])
+
+ authkey = indexResponse['response']['authkey']
+ passkey = indexResponse['response']['passkey']
+
+ title = media['title']
+
+ TitleStringReal = str(title.encode("latin-1").replace('-',' '))
+
+ frTitle = self.getFrenchTitle(TitleStringReal)
+ if frTitle is None:
+ frTitle = TitleStringReal
+
+ url = self.urls['search'] % tryUrlencode(frTitle)
+ data = self.getJsonData(url)
+
+ if data['status'] == 'success' and len(data['response']['results']) > 0:
+ groupId = data[u'response'][u'results'][0][u'groupId']
+ name = data['response']['results'][0]['groupName'].upper()
+ splittedReleaseName = re.split('(\.[0-9]{4}\.)', name, flags=re.IGNORECASE)
+ cleanedReleaseName = ''.join(splittedReleaseName)
+
+ match = re.compile(ur"[\w]+", re.UNICODE)
+ nameSplit = ''.join(match.findall(cleanedReleaseName))
+ titleSplit = ''.join(match.findall(frTitle.upper()))
+
+ if titleSplit == nameSplit: # and self.matchLanguage(media['info']['languages'], re.split('[\. ]', splittedReleaseName[-1])):
+ for torrent in data['response']['results'][0]['torrents']:
+
+ detail_url = self.urls['detail'] % torrent['torrentId']
+ if not self.getJsonData(detail_url)['response']['torrent']['filePath']:
+ detail = self.getJsonData(detail_url)['response']['torrent']['fileList'].lower()
+ else:
+ detail = self.getJsonData(detail_url)['response']['torrent']['filePath'].lower()
+
+ detailName = h.unescape(detail)
+
+ results.append({
+ 'id': torrent['torrentId'],
+ 'name': detailName, #name + '.' + torrent['encoding'] + '.' + torrent['media'] + '.' + torrent['format'],
+ 'Source': torrent['media'],
+ 'Resolution': torrent['encoding'],
+ 'url': self.urls['torrent'] % (torrent['torrentId'], authkey, passkey),
+ 'detail_url': self.urls['detailLink'] % (groupId, torrent['torrentId']),
+ 'date': tryInt(time.mktime(parse(torrent['time']).timetuple())),
+ 'size': tryInt(torrent['size']) / 1024 / 1024,
+ 'seeders': tryInt(torrent['seeders']),
+ 'leechers': tryInt(torrent['leechers']),
+ })
+
+ def getLoginParams(self):
+ return {
+ 'username': self.conf('username'),
+ 'password': self.conf('password'),
+ 'keeplogged': '1',
+ 'login': tryUrlencode('M\'identifier')
+ }
+
+ def getFrenchTitle(self, title):
+ """
+ This function uses TMDB API to get the French movie title of the given title.
+ """
+
+ url = "https://api.themoviedb.org/3/search/movie?api_key=0f3094295d96461eb7a672626c54574d&language=fr&query=%s" % title
+ log.debug('#### Looking on TMDB for French title of : ' + title)
+ #data = self.getJsonData(url, decode_from = 'utf8')
+ data = self.getJsonData(url)
+ try:
+ if data['results'] != None:
+ for res in data['results']:
+ #frTitle = res['title'].lower().replace(':','').replace(' ',' ').replace('-','')
+ frTitle = res['title'].lower().replace(':','').replace(' ',' ')
+ if frTitle == title:
+ log.debug('#### TMDB report identical FR and original title')
+ return None
+ else:
+ log.debug(u'#### TMDB API found a french title : ' + frTitle)
+ return frTitle
+ else:
+ log.debug('#### TMDB could not find a movie corresponding to : ' + title)
+ return None
+ except:
+ log.error('#### Failed to parse TMDB API: %s' % (traceback.format_exc()))
+
+
+ def loginSuccess(self, output):
+ return 'logout' in output.lower()
+
+ loginCheckSuccess = loginSuccess
+
+
+config = [{
+ 'name': 'hdonly',
+ 'groups': [
+ {
+ 'tab': 'searcher',
+ 'list': 'torrent_providers',
+ 'name': 'hdonly',
+ 'description': 'HD-Only.org ',
+ 'wizard': True,
+ 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAACf0lEQVR4nLXSz07TYAAA8O9bW9Ztbcfc2EZEHVu3GDc6wGgwgoGTXow3jUaDIgcv+AK+g/Hgn4MX7ibuICpiUBKBAeLI5sZIgEEGhnXZGPvabmv7dfUZPPh7hh8A/xuM9cVvTz69OTY0s7ByffjScjofDvRUTyQDQF8nk98/HImf/7S4fmt06P3XxcT0a3hvfDISCWd/Z4W4kMvmQnxILIkOxgEAkGXF7/ft7OzGYtF0OiMIfbncJnz55m2xuO/xeI6rx16fFyHJ5/MqsmICwDCMKJY4jhPFstvtrlQq/m4fea6nm6Ygx3V63S6Oc2KsuzpdRtsAAHZ0UG4XRxKEy8k67PZTTtbp5MjP899binLudPfW9q6NYWkrrek6be2gafrh/bv1Ono13y8eAQBIA3J3Yi9gIpFASG62WrWTWqg3QFiI2S9z5bL4eOKRjvHct2Sq/qyn8WSgPzqzPdXltZMLP5YMjNumCQEsiWWMcWFvLz4w+OHjrNFurteeAwIPXbm8urbGMvsHB2eJIB+pVKuB3kAqldIxVlXNztjVltpQW5retjbe1eCNenFaEC78LI6SUCHCPE+R1MHhH4qiQLttGgbWsa5puqrmN3NXh0eOtcEjdWyrfBFjcEabgg/GJ5qNBklRBjZomxVCC8sypgkAMCGEkiSZptlqtkwAgGmSFGlhHA6E6nabDaET2kpLCEFgkWVJlhUIIEKS1UrXEeJYpo4Qy7CEJDdCIT6ZXA6HI6urKx5PV35rU9V0SUK7hT2OY3+lNvhQcCm5Eg7zy8kkHL42upHOxIX+TCYdjcYKhR2v168oMgCAcThK5XIoGMzmcnFBSGfSA3Hhn7f+Ba/6N2aE1SAhAAAAAElFTkSuQmCC',
+ 'options': [
+ {
+ 'name': 'enabled',
+ 'type': 'enabler',
+ 'default': False
+ },
+ {
+ 'name': 'username',
+ 'default': '',
+ },
+ {
+ 'name': 'password',
+ 'default': '',
+ 'type': 'password',
+ },
+ {
+ 'name': 'seed_ratio',
+ 'label': 'Seed ratio',
+ 'type': 'float',
+ 'default': 1,
+ 'description': 'Will not be (re)moved until this seed ratio is met.',
+ },
+ {
+ 'name': 'seed_time',
+ 'label': 'Seed time',
+ 'type': 'int',
+ 'default': 40,
+ 'description': 'Will not be (re)moved until this seed time (in hours) is met.',
+ },
+ {
+ 'name': 'extra_score',
+ 'advanced': True,
+ 'label': 'Extra Score',
+ 'type': 'int',
+ 'default': 20,
+ 'description': 'Starting score for each release found via this provider.',
+ }
+ ],
+ }
+ ]
+}]
diff --git a/couchpotato/core/media/_base/providers/torrent/hdtorrents.py b/couchpotato/core/media/_base/providers/torrent/hdtorrents.py
new file mode 100644
index 0000000000..c6e88270b1
--- /dev/null
+++ b/couchpotato/core/media/_base/providers/torrent/hdtorrents.py
@@ -0,0 +1,179 @@
+import traceback
+
+from datetime import datetime
+from bs4 import BeautifulSoup
+from couchpotato.core.helpers.encoding import toUnicode
+from couchpotato.core.helpers.variable import tryInt
+from couchpotato.core.logger import CPLog
+from couchpotato.core.media._base.providers.torrent.base import TorrentProvider
+import re
+
+log = CPLog(__name__)
+
+
+class Base(TorrentProvider):
+
+ urls = {
+ 'login' : 'https://www.hdts.ru/login.php',
+ 'detail' : 'https://www.hdts.ru/details.php?id=%s',
+ 'search' : 'https://www.hdts.ru/torrents.php?search=%s&active=1',
+ 'home' : 'https://www.hdts.ru/%s',
+ }
+
+ http_time_between_calls = 1 #seconds
+
+ def _search(self, media, quality, results):
+
+ url = self.urls['search'] % (media['identifiers']['imdb'])#, cats[0])
+ data = self.getHTMLData(url)
+
+ if data:
+
+ # Remove HDTorrents NEW list
+ split_data = data.partition('\n\n\n\n')
+ data = split_data[2]
+
+ html = BeautifulSoup(data)
+ try:
+ #Get first entry in table
+ entries = html.find_all('td', attrs={'align' : 'center'})
+
+ if len(entries) < 21:
+ return
+
+ base = 21
+ extend = 0
+
+ try:
+ torrent_id = entries[base].find('div')['id']
+ except:
+ extend = 2
+ torrent_id = entries[base + extend].find('div')['id']
+
+ torrent_age = datetime.now() - datetime.strptime(entries[15 + extend].get_text()[:8] + ' ' + entries[15 + extend].get_text()[-10::], '%H:%M:%S %d/%m/%Y')
+
+ results.append({
+ 'id': torrent_id,
+ 'name': entries[20 + extend].find('a')['title'].strip('History - ').replace('Blu-ray', 'bd50'),
+ 'url': self.urls['home'] % entries[13 + extend].find('a')['href'],
+ 'detail_url': self.urls['detail'] % torrent_id,
+ 'size': self.parseSize(entries[16 + extend].get_text()),
+ 'age': torrent_age.days,
+ 'seeders': tryInt(entries[18 + extend].get_text()),
+ 'leechers': tryInt(entries[19 + extend].get_text()),
+ 'get_more_info': self.getMoreInfo,
+ })
+
+ #Now attempt to get any others
+ result_table = html.find('table', attrs = {'class' : 'mainblockcontenttt'})
+
+ if not result_table:
+ return
+
+ entries = result_table.find_all('td', attrs={'align' : 'center', 'class' : 'listas'})
+
+ if not entries:
+ return
+
+ for result in entries:
+ block2 = result.find_parent('tr').find_next_sibling('tr')
+ if not block2:
+ continue
+ cells = block2.find_all('td')
+ try:
+ extend = 0
+ detail = cells[1 + extend].find('a')['href']
+ except:
+ extend = 1
+ detail = cells[1 + extend].find('a')['href']
+ torrent_id = detail.replace('details.php?id=', '')
+ torrent_age = datetime.now() - datetime.strptime(cells[5 + extend].get_text(), '%H:%M:%S %d/%m/%Y')
+
+ results.append({
+ 'id': torrent_id,
+ 'name': cells[1 + extend].find('b').get_text().strip('\t ').replace('Blu-ray', 'bd50'),
+ 'url': self.urls['home'] % cells[3 + extend].find('a')['href'],
+ 'detail_url': self.urls['home'] % cells[1 + extend].find('a')['href'],
+ 'size': self.parseSize(cells[6 + extend].get_text()),
+ 'age': torrent_age.days,
+ 'seeders': tryInt(cells[8 + extend].get_text()),
+ 'leechers': tryInt(cells[9 + extend].get_text()),
+ 'get_more_info': self.getMoreInfo,
+ })
+
+ except:
+ log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
+
+ def getMoreInfo(self, item):
+ full_description = self.getCache('hdtorrents.%s' % item['id'], item['detail_url'], cache_timeout = 25920000)
+ html = BeautifulSoup(full_description)
+ nfo_pre = html.find('div', attrs = {'id':'details_table'})
+ description = toUnicode(nfo_pre.text) if nfo_pre else ''
+
+ item['description'] = description
+ return item
+
+ def getLoginParams(self):
+ return {
+ 'uid': self.conf('username'),
+ 'pwd': self.conf('password'),
+ 'Login': 'submit',
+ }
+
+ def loginSuccess(self, output):
+ return "if your browser doesn\'t have javascript enabled" or 'logout.php' in output.lower()
+
+ loginCheckSuccess = loginSuccess
+
+
+config = [{
+ 'name': 'hdtorrents',
+ 'groups': [
+ {
+ 'tab': 'searcher',
+ 'list': 'torrent_providers',
+ 'name': 'HDTorrents',
+ 'description': 'See HDTorrents ',
+ 'wizard': True,
+ 'icon' : 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAABfElEQVR4nM2SO47CMBCGx47zUhJeAiHRIp4NRSo6kCi4Aj0NBZwDUXMJLoI4AAVFCiQeBYIghMBxMPYWYVlg65X27zyebzz6fwP8O6HXg2VZpmlKKQFAfgshRCkNguATKBaL5XL5dDopisI555wHQSCEUFXVtm3P81ar1c9sRVEajQZCCGMMAAghAEgmk9lsFgAwxs1mM7oiEaCqqu/7uq4PBoPRaNTpdOLxuOu6lNLNZjMcDu/3OyEkDEP82AwhwzAwxplMxrZty7ISicRsNuv3+6lUynXd8/kcdb4BjLFarTYej9vt9uFw4JwDwHQ6TafTl8slMgO/uqTruud5vV5vMplIKY/HIwDkcrntdht1vwGMMSHEer2mlO73e9/38/l8t9tljM3nc03TngwAACGk1WohhGKxWPSUYRiFQqFUKkUL1+v1h4FPplKpVKvV3W5HCLndblLKMAwBQNM0x3EWi8VyufxM2nEc0zSFEFHSzzql9Hq9/volf6QvVr6n2OEjGOYAAAAASUVORK5CYII=',
+ 'options': [
+ {
+ 'name': 'enabled',
+ 'type': 'enabler',
+ 'default': False,
+ },
+ {
+ 'name': 'username',
+ 'default': '',
+ },
+ {
+ 'name': 'password',
+ 'default': '',
+ 'type': 'password',
+ },
+ {
+ 'name': 'seed_ratio',
+ 'label': 'Seed ratio',
+ 'type': 'float',
+ 'default': 1,
+ 'description': 'Will not be (re)moved until this seed ratio is met.',
+ },
+ {
+ 'name': 'seed_time',
+ 'label': 'Seed time',
+ 'type': 'int',
+ 'default': 40,
+ 'description': 'Will not be (re)moved until this seed time (in hours) is met.',
+ },
+ {
+ 'name': 'extra_score',
+ 'advanced': True,
+ 'label': 'Extra Score',
+ 'type': 'int',
+ 'default': 20,
+ 'description': 'Starting score for each release found via this provider.',
+ }
+ ],
+ },
+ ],
+}]
\ No newline at end of file
diff --git a/couchpotato/core/media/_base/providers/torrent/ilovetorrents.py b/couchpotato/core/media/_base/providers/torrent/ilovetorrents.py
new file mode 100644
index 0000000000..f9ec22dd8d
--- /dev/null
+++ b/couchpotato/core/media/_base/providers/torrent/ilovetorrents.py
@@ -0,0 +1,198 @@
+import re
+import traceback
+
+from bs4 import BeautifulSoup
+from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode
+from couchpotato.core.helpers.variable import tryInt, splitString
+from couchpotato.core.logger import CPLog
+from couchpotato.core.media._base.providers.torrent.base import TorrentProvider
+
+
+log = CPLog(__name__)
+
+
+class Base(TorrentProvider):
+
+ urls = {
+ 'download': 'https://www.ilovetorrents.me/%s',
+ 'detail': 'https://www.ilovetorrents.me/%s',
+ 'search': 'https://www.ilovetorrents.me/browse.php?search=%s&page=%s&cat=%s',
+ 'test': 'https://www.ilovetorrents.me/',
+ 'login': 'https://www.ilovetorrents.me/takelogin.php',
+ 'login_check': 'https://www.ilovetorrents.me'
+ }
+
+ login_fail_msg = 'Login failed!'
+
+ cat_ids = [
+ (['80'], ['720p', '1080p']),
+ (['41'], ['brrip']),
+ (['19'], ['cam', 'ts', 'dvdrip', 'tc', 'r5', 'scr']),
+ (['20'], ['dvdr'])
+ ]
+
+ cat_backup_id = 200
+ disable_provider = False
+ http_time_between_calls = 1
+
+ def _searchOnTitle(self, title, movie, quality, results):
+
+ page = 0
+ total_pages = 1
+ cats = self.getCatId(quality)
+
+ while page < total_pages:
+
+ movieTitle = tryUrlencode('"%s" %s' % (title, movie['info']['year']))
+ search_url = self.urls['search'] % (movieTitle, page, cats[0])
+ page += 1
+
+ data = self.getHTMLData(search_url)
+ if data:
+ try:
+
+ results_table = None
+
+ data_split = splitString(data, '.+'')', i['href']).group('page_number')) for i in pagelinks]
+ total_pages = max(page_numbers)
+ except:
+ pass
+
+ entries = results_table.find_all('tr')
+
+ for result in entries[1:]:
+ prelink = result.find(href = re.compile('details.php'))
+ link = prelink['href']
+ download = result.find('a', href = re.compile('download.php'))['href']
+
+ if link and download:
+
+ def extra_score(item):
+ trusted = (0, 10)[result.find('img', alt = re.compile('Trusted')) is not None]
+ vip = (0, 20)[result.find('img', alt = re.compile('VIP')) is not None]
+ confirmed = (0, 30)[result.find('img', alt = re.compile('Helpers')) is not None]
+ moderated = (0, 50)[result.find('img', alt = re.compile('Moderator')) is not None]
+
+ return confirmed + trusted + vip + moderated
+
+ id = re.search('id=(?P\d+)&', link).group('id')
+ url = self.urls['download'] % download
+
+ fileSize = self.parseSize(result.select('td.rowhead')[8].text)
+ results.append({
+ 'id': id,
+ 'name': toUnicode(prelink.find('b').text),
+ 'url': url,
+ 'detail_url': self.urls['detail'] % link,
+ 'size': fileSize,
+ 'seeders': tryInt(result.find_all('td')[2].string),
+ 'leechers': tryInt(result.find_all('td')[3].string),
+ 'extra_score': extra_score,
+ 'get_more_info': self.getMoreInfo
+ })
+
+ except:
+ log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
+
+ def getLoginParams(self):
+ return {
+ 'username': self.conf('username'),
+ 'password': self.conf('password'),
+ 'submit': 'Welcome to ILT',
+ }
+
+ def getMoreInfo(self, item):
+ cache_key = 'ilt.%s' % item['id']
+ description = self.getCache(cache_key)
+
+ if not description:
+
+ try:
+ full_description = self.getHTMLData(item['detail_url'])
+ html = BeautifulSoup(full_description)
+ nfo_pre = html.find('td', attrs = {'class': 'main'}).findAll('table')[1]
+ description = toUnicode(nfo_pre.text) if nfo_pre else ''
+ except:
+ log.error('Failed getting more info for %s', item['name'])
+ description = ''
+
+ self.setCache(cache_key, description, timeout = 25920000)
+
+ item['description'] = description
+ return item
+
+ def loginSuccess(self, output):
+ return 'logout.php' in output.lower()
+
+ loginCheckSuccess = loginSuccess
+
+
+config = [{
+ 'name': 'ilovetorrents',
+ 'groups': [
+ {
+ 'tab': 'searcher',
+ 'list': 'torrent_providers',
+ 'name': 'ILoveTorrents',
+ 'description': 'Where the Love of Torrents is Born. ILoveTorrents ',
+ 'wizard': True,
+ 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAACPUlEQVR4AYWM0U9SbxjH3+v266I/oNvWZTfd2J1d0ZqbZEFwWrUImOKs4YwtumFKZvvlJJADR2TCQQlMPKg5NmpREgaekAPnBATKgmK1LqQlx6awHnZWF1Tr2Xfvvs+7z+dB0mlO7StpAh+M4S/2jbo3w8+xvJvlnSneEt+10zwer5ujNUOoChjALWFw5XOwdCAk/P57cGvPl+Oht0W7VJHN5NC1uW1BON4hGjXbwpVWMZhsy9v7sEIXAsDNYBXgdkEoIKyWD2CF8ut/aOXTZc/fBSgLWw1BgA4BDHOV0GkT90cBQpXahU5TFomsb38XhJC5/Tbh1P8c6rJlBeGfAeyMhUFwNVcs9lxV9Ot0dwmyd+mrNvRtbJ2fSPC6Z3Vsvub2z3sDFACAAYzk0+kUyxEkyfN7PopqNBro55A+P6yPKIrL5zF1HwjdeBJJCObIsZO79bo3sHhWhglo5WMV3mazuVPb4fLvSL8/FAkB1hK6rXQPwYhMyROK8VK5LAiH/jsMt0HQjxiN4/ePdoilllcqDyt3Mkg8mRBNbIhMb8RERkowQA/p76g0/UDDdCoNmDminM0qSK5vlpE5kugCHhNPxntwWmJPYTMZtYcFR6ABHQsVRlYLukVORaaULvqKI46keFSCv77kSPS6kxrPptLNDHgz16fWBtyxe6v5h08LUy+KI8ushqTPWWIX8Sg6b45IrGtyW6zXFb/hpQf9m3oqfWuB0fpSw0uZ4WB69En69uOk2rmO2V52PXj+A/mI4ESKpb2HAAAAAElFTkSuQmCC',
+ 'options': [
+ {
+ 'name': 'enabled',
+ 'type': 'enabler',
+ 'default': False
+ },
+ {
+ 'name': 'username',
+ 'label': 'Username',
+ 'type': 'string',
+ 'default': '',
+ 'description': 'The user name for your ILT account',
+ },
+ {
+ 'name': 'password',
+ 'label': 'Password',
+ 'type': 'password',
+ 'default': '',
+ 'description': 'The password for your ILT account.',
+ },
+ {
+ 'name': 'seed_ratio',
+ 'label': 'Seed ratio',
+ 'type': 'float',
+ 'default': 1,
+ 'description': 'Will not be (re)moved until this seed ratio is met.',
+ },
+ {
+ 'name': 'seed_time',
+ 'label': 'Seed time',
+ 'type': 'int',
+ 'default': 40,
+ 'description': 'Will not be (re)moved until this seed time (in hours) is met.',
+ },
+ {
+ 'name': 'extra_score',
+ 'advanced': True,
+ 'label': 'Extra Score',
+ 'type': 'int',
+ 'default': 0,
+ 'description': 'Starting score for each release found via this provider.',
+ }
+ ],
+ }
+ ]
+}]
diff --git a/couchpotato/core/media/_base/providers/torrent/iptorrents.py b/couchpotato/core/media/_base/providers/torrent/iptorrents.py
new file mode 100644
index 0000000000..e3331efccc
--- /dev/null
+++ b/couchpotato/core/media/_base/providers/torrent/iptorrents.py
@@ -0,0 +1,175 @@
+import traceback
+
+from bs4 import BeautifulSoup
+from couchpotato.core.helpers.encoding import tryUrlencode
+from couchpotato.core.helpers.variable import tryInt
+from couchpotato.core.logger import CPLog
+from couchpotato.core.media._base.providers.torrent.base import TorrentProvider
+import six
+
+
+log = CPLog(__name__)
+
+
+class Base(TorrentProvider):
+
+ urls = {
+ 'test': 'https://iptorrents.com/',
+ 'base_url': 'https://iptorrents.com',
+ 'login': 'https://iptorrents.com/take_login.php',
+ 'login_check': 'https://iptorrents.com/oldinbox.php',
+ 'search': 'https://iptorrents.com/t?%s%%s&q=%s&qf=ti#torrents&p=%%d',
+ }
+
+ http_time_between_calls = 1 # Seconds
+ login_fail_msg = 'Invalid username and password combination'
+ cat_backup_id = None
+
+ def buildUrl(self, title, media, quality):
+ return self._buildUrl(title.replace(':', ''), quality)
+
+ def _buildUrl(self, query, quality):
+
+ cat_ids = self.getCatId(quality)
+
+ if not cat_ids:
+ log.warning('Unable to find category ids for identifier "%s"', quality.get('identifier'))
+ return None
+
+ query = query.replace('"', '')
+
+ return self.urls['search'] % ("&".join(("%d=" % x) for x in cat_ids), tryUrlencode(query).replace('%', '%%'))
+
+ def _searchOnTitle(self, title, media, quality, results):
+
+ freeleech = '' if not self.conf('freeleech') else '&free=on'
+
+ base_url = self.buildUrl(title, media, quality)
+ if not base_url: return
+
+ pages = 1
+ current_page = 1
+ while current_page <= pages and not self.shuttingDown():
+ data = self.getHTMLData(base_url % (freeleech, current_page))
+
+ if data:
+ html = BeautifulSoup(data)
+
+ try:
+ page_nav = html.find('span', attrs = {'class': 'page_nav'})
+ if page_nav:
+ next_link = page_nav.find("a", text = "Next")
+ if next_link:
+ final_page_link = next_link.previous_sibling.previous_sibling
+ pages = int(final_page_link.string)
+
+ result_table = html.find('table', attrs={'id': 'torrents'})
+
+ if not result_table or 'nothing found!' in data.lower():
+ return
+
+ entries = result_table.find_all('tr')
+
+ for result in entries[1:]:
+
+ torrent = result.find_all('td')
+ if len(torrent) <= 1:
+ break
+
+ torrent = torrent[1].find('a')
+
+ torrent_id = torrent['href'].replace('/details.php?id=', '')
+ torrent_name = six.text_type(torrent.string)
+ torrent_download_url = self.urls['base_url'] + (result.find_all('td')[3].find('a'))['href'].replace(' ', '.')
+ torrent_details_url = self.urls['base_url'] + torrent['href']
+ torrent_size = self.parseSize(result.find_all('td')[5].string)
+ torrent_seeders = tryInt(result.find('td', attrs = {'class': 'ac t_seeders'}).string)
+ torrent_leechers = tryInt(result.find('td', attrs = {'class': 'ac t_leechers'}).string)
+
+ results.append({
+ 'id': torrent_id,
+ 'name': torrent_name,
+ 'url': torrent_download_url,
+ 'detail_url': torrent_details_url,
+ 'size': torrent_size,
+ 'seeders': torrent_seeders,
+ 'leechers': torrent_leechers,
+ })
+
+ except:
+ log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
+ break
+
+ current_page += 1
+
+ def getLoginParams(self):
+ return {
+ 'username': self.conf('username'),
+ 'password': self.conf('password'),
+ 'login': 'submit',
+ }
+
+ def loginSuccess(self, output):
+ return 'don\'t have an account' not in output.lower()
+
+ def loginCheckSuccess(self, output):
+ return '/logout.php' in output.lower()
+
+
+config = [{
+ 'name': 'iptorrents',
+ 'groups': [
+ {
+ 'tab': 'searcher',
+ 'list': 'torrent_providers',
+ 'name': 'IPTorrents',
+ 'description': 'IPTorrents ',
+ 'wizard': True,
+ 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAABRklEQVR42qWQO0vDUBiG8zeKY3EqQUtNO7g0J6ZJ1+ifKIIFQXAqDYKCyaaYxM3udrZLHdRFhXrZ6liCW6mubfk874EESgqaeOCF7/Y8hEh41aq6yZi2nyZgBGya9XKtZs4No05pAkZV2YbEmyMMsoSxLQeC46wCTdPPY4HruPQyGIhF97qLWsS78Miydn4XdK46NJ9OsQAYBzMIMf8MQ9wtCnTdWCaIDx/u7uljOIQEe0hiIWPamSTLay3+RxOCSPI9+RJAo7Er9r2bnqjBFAqyK+VyK4f5/Cr5ni8OFKVCz49PFI5GdNvvU7ttE1M1zMU+8AMqFksEhrMnQsBDzqmDAwzx2ehRLwT7yyCI+vSC99c3mozH1NxrJgWWtR1BOECfEJSVCm6WCzJGCA7+IWhBsM4zywDPwEp4vCjx2DzBH2ODAfsDb33Ps6dQwJgAAAAASUVORK5CYII=',
+ 'options': [
+ {
+ 'name': 'enabled',
+ 'type': 'enabler',
+ 'default': False,
+ },
+ {
+ 'name': 'username',
+ 'default': '',
+ },
+ {
+ 'name': 'password',
+ 'default': '',
+ 'type': 'password',
+ },
+ {
+ 'name': 'freeleech',
+ 'default': 0,
+ 'type': 'bool',
+ 'description': 'Only search for [FreeLeech] torrents.',
+ },
+ {
+ 'name': 'seed_ratio',
+ 'label': 'Seed ratio',
+ 'type': 'float',
+ 'default': 1,
+ 'description': 'Will not be (re)moved until this seed ratio is met.',
+ },
+ {
+ 'name': 'seed_time',
+ 'label': 'Seed time',
+ 'type': 'int',
+ 'default': 40,
+ 'description': 'Will not be (re)moved until this seed time (in hours) is met.',
+ },
+ {
+ 'name': 'extra_score',
+ 'advanced': True,
+ 'label': 'Extra Score',
+ 'type': 'int',
+ 'default': 0,
+ 'description': 'Starting score for each release found via this provider.',
+ }
+ ],
+ },
+ ],
+}]
diff --git a/couchpotato/core/media/_base/providers/torrent/kickasstorrents.py b/couchpotato/core/media/_base/providers/torrent/kickasstorrents.py
new file mode 100644
index 0000000000..791286a4ec
--- /dev/null
+++ b/couchpotato/core/media/_base/providers/torrent/kickasstorrents.py
@@ -0,0 +1,196 @@
+import re
+import traceback
+
+from bs4 import BeautifulSoup
+from couchpotato.core.helpers.variable import tryInt, getIdentifier
+from couchpotato.core.logger import CPLog
+from couchpotato.core.media._base.providers.torrent.base import TorrentMagnetProvider
+
+
+log = CPLog(__name__)
+
+
+class Base(TorrentMagnetProvider):
+
+ urls = {
+ 'detail': '%s/%s',
+ 'search': '%s/%s-i%s/',
+ }
+
+ cat_ids = [
+ (['cam'], ['cam']),
+ (['telesync'], ['ts', 'tc']),
+ (['screener', 'tvrip'], ['screener']),
+ (['x264', '720p', '1080p', 'blu-ray', 'hdrip'], ['bd50', '1080p', '720p', 'brrip']),
+ (['dvdrip'], ['dvdrip']),
+ (['dvd'], ['dvdr']),
+ ]
+
+ http_time_between_calls = 1 # Seconds
+ cat_backup_id = None
+
+ proxy_list = [
+ 'http://flowtorrent.com',
+ 'http://katcr.to/span',
+ 'http://dx-torrente.com',
+ 'https://kickass.unblocked.vip',
+ 'https://katcr.co',
+ 'https://kat.how',
+ 'https://kickass.cd',
+ 'https://kickass.unlockproject.online',
+ 'https://kickasstorrents.video',
+ 'https://kat.al',
+ 'https://katproxy.al',
+ 'https://kattor.xyz',
+ 'https://kickass.unblocked.video',
+ 'https://kickass.unblocked.rocks',
+ 'https://kickass.immunicity.live',
+ 'https://kickass.immunicity.red',
+ 'https://kickass.immunicity.video',
+ 'https://kickass.bypassed.live',
+ 'https://kickass.bypassed.video',
+ 'https://kickass.bypassed.red',
+ 'https://kickass.unblocked.pw',
+ 'https://katproxy.com'
+ ]
+
+ def _search(self, media, quality, results):
+
+ data = self.getHTMLData(self.urls['search'] % (self.getDomain(), 'm', getIdentifier(media).replace('tt', '')))
+
+ if data:
+
+ cat_ids = self.getCatId(quality)
+ table_order = ['name', 'size', None, 'age', 'seeds', 'leechers']
+
+ try:
+ html = BeautifulSoup(data)
+ resultdiv = html.find('div', attrs = {'class': 'tabs'})
+ for result in resultdiv.find_all('div', recursive = False):
+ if result.get('id').lower().strip('tab-') not in cat_ids:
+ continue
+
+ try:
+ for temp in result.find_all('tr'):
+ if temp['class'] is 'firstr' or not temp.get('id'):
+ continue
+
+ new = {}
+
+ nr = 0
+ for td in temp.find_all('td'):
+ column_name = table_order[nr]
+ if column_name:
+
+ if column_name == 'name':
+ link = td.find('div', {'class': 'torrentname'}).find_all('a')[2]
+ new['id'] = temp.get('id')[-7:]
+ new['name'] = link.text
+ new['url'] = td.find('a', {'href': re.compile('magnet:*')})['href']
+ new['detail_url'] = self.urls['detail'] % (self.getDomain(), link['href'][1:])
+ new['verified'] = True if td.find('i', {'class': re.compile('verify')}) else False
+ new['score'] = 100 if new['verified'] else 0
+ elif column_name is 'size':
+ new['size'] = self.parseSize(td.text)
+ elif column_name is 'age':
+ new['age'] = self.ageToDays(td.text)
+ elif column_name is 'seeds':
+ new['seeders'] = tryInt(td.text)
+ elif column_name is 'leechers':
+ new['leechers'] = tryInt(td.text)
+
+ nr += 1
+
+ # Only store verified torrents
+ if self.conf('only_verified') and not new['verified']:
+ continue
+
+ results.append(new)
+ except:
+ log.error('Failed parsing KickAssTorrents: %s', traceback.format_exc())
+
+ except AttributeError:
+ log.debug('No search results found.')
+
+ def ageToDays(self, age_str):
+ age = 0
+ age_str = age_str.replace(' ', ' ')
+
+ regex = '(\d*.?\d+).(sec|hour|day|week|month|year)+'
+ matches = re.findall(regex, age_str)
+ for match in matches:
+ nr, size = match
+ mult = 1
+ if size == 'week':
+ mult = 7
+ elif size == 'month':
+ mult = 30.5
+ elif size == 'year':
+ mult = 365
+
+ age += tryInt(nr) * mult
+
+ return tryInt(age)
+
+ def isEnabled(self):
+ return super(Base, self).isEnabled() and self.getDomain()
+
+ def correctProxy(self, data):
+ return 'search query' in data.lower()
+
+
+config = [{
+ 'name': 'kickasstorrents',
+ 'groups': [
+ {
+ 'tab': 'searcher',
+ 'list': 'torrent_providers',
+ 'name': 'KickAssTorrents',
+ 'description': 'KickAssTorrents ',
+ 'wizard': True,
+ 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACD0lEQVR42pXK20uTcRjA8d/fsJsuap0orBuFlm3hir3JJvQOVmuwllN20Lb2isI2nVHKjBqrCWYaNnNuBrkSWxglhDVJOkBdSWUOq5FgoiOrMdRJ2xPPxW+8OUf1ge/FcyCUSVe2qedK5U/OxNTTXRNXEQ52Glb4O6dNEfK1auJkvRY7+/zxnQbA/D596laXcY3OWOiaIX2393SGznUmxkUo/YkDgqHemuzobQ7+NV+reo5Q1mqp68GABdY3+/EloO+JeN4tEqiFU8f3CwhyWo9E7wfMgI0ELTDx0AvjIxcgvZoC9P7NMN7yMmrFeoKa68rfDfmrARsNN0Ihr55cx59ctZWSiwS5bLKpwW4dYJH+M/B6/CYszE0BFZ+egG+Ln+HRoBN/cpl1pV6COIMkOnBVA/w+fXgGKJVM4LxhumMleoL06hJ3wKcCfl+/TAKKx17gnFePRwkqxR4BQSpFkbCrrQJueI7mWpyfATQ9OQY43+uv/+PutBycJ3y2qn2x7jY50GJvnwLKZjOwspyE5I8F4N+1yr1uwqcs3ym63Hwo29EiAyzUWQVr6WVAS4lZCPutQG/2GtES2YiW3d3XflYKtL72kzAcdEDHeSa3czeIMyyz/TApRKvcFfE0isHbJMnrHCf6xTLb1ORvWNlWo91cvHrJUQo0o6ZoRi7dIiT/g2WEDi27Iyov21xMCvgNfXvtwIACfHwAAAAASUVORK5CYII=',
+ 'options': [
+ {
+ 'name': 'enabled',
+ 'type': 'enabler',
+ 'default': True,
+ },
+ {
+ 'name': 'domain',
+ 'advanced': True,
+ 'label': 'Proxy server',
+ 'description': 'Domain for requests, keep empty to let CouchPotato pick.',
+ },
+ {
+ 'name': 'seed_ratio',
+ 'label': 'Seed ratio',
+ 'type': 'float',
+ 'default': 1,
+ 'description': 'Will not be (re)moved until this seed ratio is met.',
+ },
+ {
+ 'name': 'seed_time',
+ 'label': 'Seed time',
+ 'type': 'int',
+ 'default': 40,
+ 'description': 'Will not be (re)moved until this seed time (in hours) is met.',
+ },
+ {
+ 'name': 'only_verified',
+ 'advanced': True,
+ 'type': 'bool',
+ 'default': False,
+ 'description': 'Only search for verified releases.'
+ },
+ {
+ 'name': 'extra_score',
+ 'advanced': True,
+ 'label': 'Extra Score',
+ 'type': 'int',
+ 'default': 0,
+ 'description': 'Starting score for each release found via this provider.',
+ }
+ ],
+ },
+ ],
+}]
diff --git a/couchpotato/core/media/_base/providers/torrent/magnetdl.py b/couchpotato/core/media/_base/providers/torrent/magnetdl.py
new file mode 100755
index 0000000000..f2209dbd96
--- /dev/null
+++ b/couchpotato/core/media/_base/providers/torrent/magnetdl.py
@@ -0,0 +1,143 @@
+import re
+import traceback
+
+from bs4 import BeautifulSoup
+from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode
+from couchpotato.core.helpers.variable import tryInt
+from couchpotato.core.logger import CPLog
+from couchpotato.core.media._base.providers.torrent.base import TorrentMagnetProvider
+import six
+
+
+log = CPLog(__name__)
+
+class Base(TorrentMagnetProvider):
+
+ urls = {
+ 'search': 'http://www.magnetdl.com/%s/%s/se/desc/%s/',
+ 'detail': 'http://www.magnetdl.com/%s'
+ }
+
+ http_time_between_calls = 1 # Seconds
+
+ def _searchOnTitle(self, title, movie, quality, results):
+
+ movieTitle = tryUrlencode('%s-%s' % (title.replace(':', '').replace(' ', '-'), movie['info']['year']))
+
+ next_page = True
+ current_page = 1
+ max_page = self.conf('max_pages')
+ while next_page and current_page <= max_page and not self.shuttingDown():
+
+ next_page = False
+ url = self.urls['search'] % (movieTitle[:1], movieTitle, current_page)
+ data = self.getHTMLData(url)
+
+ if data:
+ html = BeautifulSoup(data)
+
+ try:
+ result_table = html.find('table', attrs = {'class': 'download'})
+ if not result_table:
+ return
+
+ entries = result_table.find_all('tr')
+ for result in entries:
+
+ if result.find('td', attrs = {'class': 'n'}):
+ link = result.find('td', attrs = {'class': 'n'}).find('a')
+ url = result.find('td', attrs = {'class': 'm'}).find('a')
+ tds = result.find_all('td')
+ size = tds[5].contents[0].strip('\n ')
+ age = tds[2].contents[0].strip('\n ')
+
+ results.append({
+ 'id': link['href'].split('/')[2],
+ 'name': link['title'],
+ 'url': url['href'],
+ 'detail_url': self.urls['detail'] % link['href'],
+ 'size': self.parseSize(size),
+ 'age' : self.ageToDays(age),
+ 'seeders': tryInt(tds[len(tds)-2].string),
+ 'leechers': tryInt(tds[len(tds)-1].string),
+ })
+ elif result.find('td', attrs = {'id': 'pages'}):
+ page_td = result.find('td', attrs = {'id': 'pages'})
+ next_title = 'Downloads | Page %s' % (current_page + 1)
+ if page_td.find('a', attrs = {'title': next_title}):
+ next_page = True
+
+ except:
+ log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
+
+ current_page += 1
+
+ def ageToDays(self, age_str):
+ age = 0
+ age_str = age_str.replace(' ', ' ')
+
+ regex = '(\d*.?\d+).(sec|hour|day|week|month|year)+'
+ matches = re.findall(regex, age_str)
+ for match in matches:
+ nr, size = match
+ mult = 1
+ if size == 'week':
+ mult = 7
+ elif size == 'month':
+ mult = 30.5
+ elif size == 'year':
+ mult = 365
+
+ age += tryInt(nr) * mult
+
+ return tryInt(age)
+
+config = [{
+ 'name': 'magnetdl',
+ 'groups': [
+ {
+ 'tab': 'searcher',
+ 'list': 'torrent_providers',
+ 'name': 'MagnetDL',
+ 'description': 'MagnetDL ',
+ 'wizard': True,
+ 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAhBJREFUOBGFU89rE0EYfbObpk2qbpqY0ItV1NarFy1CqIeeehF68p6bP1Dx4Mn/QEQ8aDWHnEW8CLZo0ZMopQQtHiyWoqAgCdGNaxP3x8zOOjPJDBUW+2D4vtnvfW/mfcwSDNFoNO6L9MJwu1Sr1S7qmo7/5dTr9aTX66klc920O6ZxMprAGEO73VZbmachjWMEKKXwPE/1yTwNaRwjkFt/i1dRpPqcjWZaP3LNtUhwsrLofHinyEagtLqChfy2alxf3UoVKL14hoXxL+AxR/P5pi9JRiAGAQsH3mWehjghWRaE4NyG5hgBJubOooGAzNOgOEEETkagOUZAKtK9bjDkcELMDSx9UgzE1KdgAQW3LDwGbF2TUeyziW2rOouoEBjACNAErcBnysZY5SB2SoVzQ44KXtFZzE1WVD3oi4MEXxaMAE+s5e6OmIOwcfzsLMQ0rj4oOucfTkxMyZjY1qNjc6dU3fViMQeyLAXMuO8VCidz+0ffz0wC+UNHYJ04ja2Xr9H/6WK8VMT0fBV8cw29b1/x6TsHjaPpS53f28bnShC05jMjB/6EOJMPu7B9D4fnqjhanUV5qgJ/4w36ovlzJ4Efxjcv//Ce/nMDuZG4WyzcHs1Y18v7Ejhj4qEIk4wDv8Sz6fQJQpbcuuZ2bwzYuyzoDzLeEXZAiPy1F8UqC58tofEkQ8jSFdf9KDkafwGzPw7miJh+wQAAAABJRU5ErkJggg==',
+ 'options': [
+ {
+ 'name': 'enabled',
+ 'type': 'enabler',
+ 'default': False,
+ },
+ {
+ 'name': 'seed_ratio',
+ 'label': 'Seed ratio',
+ 'type': 'float',
+ 'default': 1,
+ 'description': 'Will not be (re)moved until this seed ratio is met.',
+ },
+ {
+ 'name': 'seed_time',
+ 'label': 'Seed time',
+ 'type': 'int',
+ 'default': 40,
+ 'description': 'Will not be (re)moved until this seed time (in hours) is met.',
+ },
+ {
+ 'name': 'max_pages',
+ 'label': 'Max Pages',
+ 'type': 'int',
+ 'default': 3,
+ 'description': 'Maximum number of pages to scan.',
+ },
+ {
+ 'name': 'extra_score',
+ 'advanced': True,
+ 'label': 'Extra Score',
+ 'type': 'int',
+ 'default': 0,
+ 'description': 'Starting score for each release found via this provider.',
+ }
+ ],
+ },
+ ],
+}]
diff --git a/couchpotato/core/media/_base/providers/torrent/morethantv.py b/couchpotato/core/media/_base/providers/torrent/morethantv.py
new file mode 100755
index 0000000000..2b8179c1e6
--- /dev/null
+++ b/couchpotato/core/media/_base/providers/torrent/morethantv.py
@@ -0,0 +1,135 @@
+import traceback
+
+from bs4 import BeautifulSoup
+from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode
+from couchpotato.core.helpers.variable import tryInt
+from couchpotato.core.logger import CPLog
+from couchpotato.core.media._base.providers.torrent.base import TorrentProvider
+import six
+
+
+log = CPLog(__name__)
+
+
+class Base(TorrentProvider):
+
+ urls = {
+ 'test': 'https://www.morethan.tv/',
+ 'login': 'https://www.morethan.tv/login.php',
+ 'login_check': 'https://www.morethan.tv/inbox.php',
+ 'detail': 'https://www.morethan.tv/torrents.php?torrentid=%s',
+ 'search': 'https://www.morethan.tv/torrents.php?%s&filter_cat%%5B1%%5D=1&action=advanced&searchstr=%s',
+ 'download': 'https://www.morethan.tv/%s',
+ }
+
+ http_time_between_calls = 1 # Seconds
+ login_fail_msg = 'You entered an invalid password.'
+
+ def _searchOnTitle(self, title, movie, quality, results):
+
+ movieTitle = tryUrlencode('%s %s' % (title.replace(':', ''), movie['info']['year']))
+ url = self.urls['search'] % (self.getSceneOnly(), movieTitle)
+ data = self.getHTMLData(url)
+
+ if data:
+ html = BeautifulSoup(data)
+
+ try:
+ result_table = html.find('table', attrs = {'id': 'torrent_table'})
+ if not result_table:
+ return
+
+ entries = result_table.find_all('tr', attrs = {'class': 'torrent'})
+ for result in entries:
+
+ link = result.find('a', attrs = {'dir': 'ltr'})
+ url = result.find('span', attrs = {'title': 'Download'}).parent
+ tds = result.find_all('td')
+ size = tds[5].contents[0].strip('\n ')
+
+ results.append({
+ 'id': link['href'].replace('torrents.php?id=', '').split('&')[0],
+ 'name': link.contents[0],
+ 'url': self.urls['download'] % url['href'],
+ 'detail_url': self.urls['download'] % link['href'],
+ 'size': self.parseSize(size),
+ 'seeders': tryInt(tds[len(tds)-2].string),
+ 'leechers': tryInt(tds[len(tds)-1].string),
+ })
+ except:
+ log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
+
+
+ def getLoginParams(self):
+ return {
+ 'username': self.conf('username'),
+ 'password': self.conf('password'),
+ 'login': 'Log in',
+ }
+
+ def loginSuccess(self, output):
+ return 'logout.php' in output.lower()
+
+ loginCheckSuccess = loginSuccess
+
+ def getSceneOnly(self):
+ return 'releasetype=24' if self.conf('scene_only') else ''
+
+
+config = [{
+ 'name': 'morethantv',
+ 'groups': [
+ {
+ 'tab': 'searcher',
+ 'list': 'torrent_providers',
+ 'name': 'MoreThanTV',
+ 'description': 'MoreThanTV ',
+ 'wizard': True,
+ 'icon': 'AAABAAEAEBAAAAEAIABoBAAAFgAAACgAAAAQAAAAIAAAAAEAIAAAAAAAQAQAABMLAAATCwAAAAAAAAAAAAAiHaEEIh2hYCIdoaEiHaGaIh2hmCIdoZgiHaGYIh2hmCIdoZgiHaGYIh2hlyIdoZUiHaHAIh2htiIdoUEAAAAAIh2hJyIdoW0iHaFsIh2hbCIdoWsiHaFrIh2hayIdoWsiHaFrIh2hayIdoWoiHaFbIh2hsyIdof8iHaH7Ih2hQSIdoQciHaEDAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAiHaG8Ih2h/yIdoZgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIh2hoSIdof8iHaGeAAAAAAAAAAAAAAAAIh2hIiIdoZkiHaGZIh2hIiIdoSIiHaGZIh2hiAAAAAAAAAAAAAAAACIdoaEiHaH/Ih2hngAAAAAAAAAAAAAAACIdoaoiHaH/Ih2h/yIdoUQiHaF3Ih2h/yIdof8iHaFEAAAAAAAAAAAiHaGiIh2h/yIdoZ4AAAAAAAAAAAAAAAAiHaG7Ih2h/yIdoREAAAAAIh2h7iIdof8iHaH/Ih2hqgAAAAAAAAAAIh2hoiIdof8iHaGeAAAAAAAAAAAAAAAAIh2huyIdof8AAAAAIh2hVSIdof8iHaGZIh2hzCIdof8iHaERAAAAACIdoaEiHaH/Ih2hngAAAAAAAAAAIh2hZiIdod0iHaH/Ih2hmSIdobsiHaH/Ih2hVSIdoXciHaH/Ih2hdwAAAAAiHaGhIh2h/yIdoZ4AAAAAAAAAACIdoZkiHaH/Ih2h/yIdof8iHaH/Ih2h7gAAAAAiHaEzIh2h/yIdobsAAAAAIh2hoSIdof8iHaGeAAAAAAAAAAAAAAAAIh2huyIdof8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACIdoaEiHaH/Ih2hngAAAAAAAAAAAAAAACIdobsiHaH/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAiHaGhIh2h/yIdoZ4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIh2hoSIdof8iHaGeIh2hCyIdoQYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACIdocUiHaH/Ih2hlSIdoSMiHaFwIh2hfSIdoXEiHaF3Ih2heiIdoXkiHaF5Ih2heSIdoXoiHaFzIh2hYiIdocIiHaH/Ih2h5yIdoS4AAAAAIh2hLyIdoXoiHaGMIh2hcyIdoXMiHaFzIh2hcyIdoXMiHaFyIh2heSIdoY0iHaFsIh2hSSIdoQoAAAAAAAEgNgAAb2Q/+CA1//hTdOA4cGngGCA54hhHZeQIaW7ACG50wIgAUOf4Q0Xn+E9S//hFVj/4PTYAAFJPgAFTUw==',
+ 'options': [
+ {
+ 'name': 'enabled',
+ 'type': 'enabler',
+ 'default': False,
+ },
+ {
+ 'name': 'username',
+ 'default': '',
+ },
+ {
+ 'name': 'password',
+ 'default': '',
+ 'type': 'password',
+ },
+ {
+ 'name': 'seed_ratio',
+ 'label': 'Seed ratio',
+ 'type': 'float',
+ 'default': 1,
+ 'description': 'Will not be (re)moved until this seed ratio is met.',
+ },
+ {
+ 'name': 'seed_time',
+ 'label': 'Seed time',
+ 'type': 'int',
+ 'default': 40,
+ 'description': 'Will not be (re)moved until this seed time (in hours) is met.',
+ },
+ {
+ 'name': 'scene_only',
+ 'type': 'bool',
+ 'default': False,
+ 'description': 'Only allow scene releases.'
+ },
+ {
+ 'name': 'extra_score',
+ 'advanced': True,
+ 'label': 'Extra Score',
+ 'type': 'int',
+ 'default': 0,
+ 'description': 'Starting score for each release found via this provider.',
+ }
+ ],
+ },
+ ],
+}]
diff --git a/couchpotato/core/media/_base/providers/torrent/nextorrent.py b/couchpotato/core/media/_base/providers/torrent/nextorrent.py
new file mode 100644
index 0000000000..99f22dd6d9
--- /dev/null
+++ b/couchpotato/core/media/_base/providers/torrent/nextorrent.py
@@ -0,0 +1,277 @@
+from bs4 import BeautifulSoup
+from couchpotato.core.helpers.variable import getTitle, tryInt
+from couchpotato.core.logger import CPLog
+from couchpotato.core.helpers.encoding import simplifyString, tryUrlencode
+from couchpotato.core.media._base.providers.torrent.base import TorrentProvider
+import cookielib
+import re
+import traceback
+import urllib
+import urllib2
+import ssl
+import unicodedata
+from couchpotato.core.helpers import namer_check
+from StringIO import StringIO
+import gzip
+log = CPLog(__name__)
+
+
+class Base(TorrentProvider):
+
+ urls = {
+ 'test': 'https://www.nextorrent.net',
+ 'search': 'https://www.nextorrent.net/torrents/recherche/',
+ }
+
+ http_time_between_calls = 1 #seconds
+ cat_backup_id = None
+ cj = cookielib.CookieJar()
+ opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
+
+ class NotLoggedInHTTPError(urllib2.HTTPError):
+ def __init__(self, url, code, msg, headers, fp):
+ urllib2.HTTPError.__init__(self, url, code, msg, headers, fp)
+
+ class PTPHTTPRedirectHandler(urllib2.HTTPRedirectHandler):
+ def http_error_302(self, req, fp, code, msg, headers):
+ log.debug("302 detected; redirected to %s" % headers['Location'])
+ if (headers['Location'] != 'login.php'):
+ return urllib2.HTTPRedirectHandler.http_error_302(self, req, fp, code, msg, headers)
+ else:
+ raise Base.NotLoggedInHTTPError(req.get_full_url(), code, msg, headers, fp)
+
+ def _search(self, movie, quality, results):
+
+ # Cookie login
+ if not self.last_login_check and not self.login():
+ return
+
+
+ TitleStringReal = (getTitle(movie['info']) + ' ' + simplifyString(quality['identifier'] )).replace('-',' ').replace(' ',' ').replace(' ',' ').replace(' ',' ').encode("utf8")
+
+ URL = (self.urls['search']).encode('UTF8')
+ URL=unicodedata.normalize('NFD',unicode(URL,"utf8","replace"))
+ URL=URL.encode('ascii','ignore')
+
+
+ URL = urllib2.quote(URL.encode('utf8'), ":/?=")
+ URL = URL + TitleStringReal
+ values = { }
+ URLTST = (self.urls['test']).encode('UTF8')
+
+ data_tmp = urllib.urlencode(values)
+
+
+ req = urllib2.Request(URL, data_tmp, headers={'User-Agent' : "Mozilla/5.0"} )
+
+ data = urllib2.urlopen(req)
+
+ id = 1000
+
+ if data:
+
+ try:
+ html = BeautifulSoup(data)
+ erlin=0
+ resultdiv=[]
+ while erlin==0:
+ try:
+ resultContent = html.findAll(attrs={'class': ["listing-torrent"]})[0]
+ if resultContent:
+ resultlin = resultContent.findAll(attrs={'class': ['table-hover']})[0].find('tbody')
+ if resultlin:
+ trList= resultlin.findAll("tr");
+ for tr in trList:
+ resultdiv.append(tr)
+ erlin=1
+ except:
+ erlin=1
+ nbrResult = 0
+ for result in resultdiv:
+
+ try:
+ new = {}
+ firstTd = result.findAll("td")[0]
+ nothing = firstTd.findAll("center")
+ if nothing:
+ continue
+ name = firstTd.findAll("a")[1]['title'];
+ testname = namer_check.correctName(name,movie)
+ if testname == 0 and nbrResult < 5:
+ values_sec = {}
+ url_sec = result.findAll("a")[1]['href'];
+ req_sec = urllib2.Request(URLTST+url_sec, values_sec, headers={'User-Agent': "Mozilla/5.0"})
+ data_sec = urllib2.urlopen(req_sec)
+ if data_sec:
+ html_sec = BeautifulSoup(data_sec)
+ classlin_sec = 'torrentsdesc'
+ resultlin_sec = html_sec.findAll(attrs={'id': [classlin_sec]})[0]
+ name = resultlin_sec.find("div").text
+ name = name.replace(".", " ")
+ testname = namer_check.correctName(name, movie)
+ if testname == 0:
+ continue
+ nbrResult += 1
+ values_sec = {}
+ detail_url = result.findAll("a")[1]['href'];
+ req_sec = urllib2.Request(URLTST+detail_url, values_sec, headers={'User-Agent': "Mozilla/5.0"})
+ data_sec = urllib2.urlopen(req_sec)
+ html_sec = BeautifulSoup(data_sec)
+ classlin_sec = 'download'
+ resultlin_sec = html_sec.findAll(attrs={'class': [classlin_sec]})[0]
+ url_download = resultlin_sec.findAll("a")[0]['href']
+ size = result.findAll("td")[1].text
+ seeder = result.findAll("td")[2].text
+ leecher = result.findAll("td")[3].text
+ age = '1'
+
+ verify = getTitle(movie['info']).split(' ')
+
+ add = 1
+
+ for verify_unit in verify:
+ if (name.lower().find(verify_unit.lower()) == -1) :
+ add = 0
+
+ def extra_check(item):
+ return True
+
+ if add == 1:
+
+ new['id'] = id
+ new['name'] = name.strip() + ' french'
+ new['url'] = url_download
+ new['detail_url'] = detail_url
+ new['size'] = self.parseSize(size)
+ new['age'] = 10
+ new['seeders'] = tryInt(seeder)
+ new['leechers'] = tryInt(leecher)
+ new['extra_check'] = extra_check
+ new['download'] = self.loginDownload
+
+ #new['score'] = fireEvent('score.calculate', new, movie, single = True)
+
+ #log.error('score')
+ #log.error(new['score'])
+
+ results.append(new)
+
+ id = id+1
+
+
+ except:
+ log.error('Failed parsing zetorrents: %s', traceback.format_exc())
+
+ except AttributeError:
+ log.debug('No search results found.')
+ else:
+ log.debug('No search results found.')
+
+ def ageToDays(self, age_str):
+ age = 0
+ age_str = age_str.replace(' ', ' ')
+
+ regex = '(\d*.?\d+).(sec|heure|jour|semaine|mois|ans)+'
+ matches = re.findall(regex, age_str)
+ for match in matches:
+ nr, size = match
+ mult = 1
+ if size == 'semaine':
+ mult = 7
+ elif size == 'mois':
+ mult = 30.5
+ elif size == 'ans':
+ mult = 365
+
+ age += tryInt(nr) * mult
+
+ return tryInt(age)
+
+ def login(self):
+ return True
+
+
+
+ def loginDownload(self, url = '', nzb_id = ''):
+ try:
+ URLTST = (self.urls['test']).encode('UTF8')
+ request_headers = {
+ 'User-Agent': 'Mozilla/5.0',
+ 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
+ 'Accept-Language': 'fr,fr-FR;q=0.8,en-US;q=0.5,en;q=0.3',
+ 'Accept-Encoding': 'gzip, deflate, br',
+ 'Referer': 'https://www.nextorrent.net/torrent/3183/beaut-cache',
+ 'Connection': 'keep-alive',
+ 'Upgrade-Insecure-Requests': '1'
+ }
+ request = urllib2.Request(URLTST+url, headers=request_headers)
+ response = self.opener.open(request)
+ if response.info().get('Content-Encoding') == 'gzip':
+ buf = StringIO(response.read())
+ f = gzip.GzipFile(fileobj=buf)
+ data = f.read()
+ f.close()
+ else:
+ data = response.read()
+ response.close()
+ return data
+ except:
+ log.error('Failed downloading from %s: %s', (self.getName(), traceback.format_exc()))
+
+ def download(self, url = '', nzb_id = ''):
+
+ if not self.last_login_check and not self.login():
+ return
+
+ values = {
+ 'url' : '/'
+ }
+ data_tmp = urllib.urlencode(values)
+ req = urllib2.Request(url, data_tmp, headers={'User-Agent' : "Mozilla/5.0"} )
+
+ try:
+ return urllib2.urlopen(req).read()
+ except:
+ log.error('Failed downloading from %s: %s', (self.getName(), traceback.format_exc()))
+config = [{
+ 'name': 'nextorrent',
+ 'groups': [
+ {
+ 'tab': 'searcher',
+ 'list': 'torrent_providers',
+ 'name': 'nextorrent',
+ 'description': 'See nextorrent ',
+ 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAACXBIWXMAAAsTAAALEwEAmpwYAAAAIGNIUk0AAHolAACAgwAA+f8AAIDpAAB1MAAA6mAAADqYAAAXb5JfxUYAAAI5SURBVHjabJM/T+NAEMV/u57YsQ05pBS00EQiJFKIoOGTUFFDQY0QfAFo4FNQI0FDg+iogPTuafJHCiaOUbzra7DPubuVRlqtZt68eW9W+b7/sbGxsaK1BsBaS5ZlKKXKyPO8vBd5P7lforX+1ev1gna7XQIMBgPe398REUQEpRRpmrK1tcXu7i6e55FlGa+vr444jmP29vY4ODjAGEOtViOKIm5ubnh5eSEIAkSE7+9vWq0Wh4eHrK6ukiQJs9nM6CrtxWLBfD6n1WpxcnJCv99nNpthjEEpVeYVYa3lz0A/J89zkiSh0+lwenpKv98njmOMMfzv6DzPl4q11ogIcRzT6XQ4Ozuj2+0ynU5LkGqNLlQuipMkIY5jgiBgMpnQ7XY5Pz+n3W7z+fmJMWbJCV21yPM8hsMht7e3RFFEs9lkNBrR6/W4uLhgZ2cHYwzW2hJAqpQcx8FxHJ6enhgMBlxdXbG+vs54PGZ/f5/t7W2UUkt6aAClVDmbiNBoNHh+fuby8pLhcMja2hrz+Rzf96nVav9q8LcLIkIYhjw+PnJ9fc1oNCIMQ7IsK/UqGkv1ocrG8zwcx+H+/p56vc7x8TGNRoM0TZcZK6UQETzPK0NrjbWWMAwBuLu7Q2vN0dERzWaTxWJR6iXWWt7e3siyDBFhMpkwHo9xXZc8z6nX66RpysPDQ7mlhRNRFKF8359tbm4Ghbd5ni8tTEG36Oq6bvU3Jsp13Q+l1EpVmOqiFCCFVksOaP31ewAjgDxHOfDVqAAAAABJRU5ErkJggg==',
+ 'wizard': True,
+ 'options': [
+ {
+ 'name': 'enabled',
+ 'type': 'enabler',
+ 'default': False,
+ },
+ {
+ 'name': 'seed_ratio',
+ 'label': 'Seed ratio',
+ 'type': 'float',
+ 'default': 1,
+ 'description': 'Will not be (re)moved until this seed ratio is met.',
+ },
+ {
+ 'name': 'seed_time',
+ 'label': 'Seed time',
+ 'type': 'int',
+ 'default': 40,
+ 'description': 'Will not be (re)moved until this seed time (in hours) is met.',
+ },
+ {
+ 'name': 'extra_score',
+ 'advanced': True,
+ 'label': 'Extra Score',
+ 'type': 'int',
+ 'default': 0,
+ 'description': 'Starting score for each release found via this provider.',
+ }
+ ],
+ },
+ ],
+}]
diff --git a/couchpotato/core/media/_base/providers/torrent/passthepopcorn.py b/couchpotato/core/media/_base/providers/torrent/passthepopcorn.py
new file mode 100644
index 0000000000..69aa07125a
--- /dev/null
+++ b/couchpotato/core/media/_base/providers/torrent/passthepopcorn.py
@@ -0,0 +1,304 @@
+import htmlentitydefs
+import json
+import re
+import time
+import traceback
+
+from couchpotato.core.helpers.encoding import tryUrlencode
+from couchpotato.core.helpers.variable import getTitle, tryInt, mergeDicts, getIdentifier
+from couchpotato.core.logger import CPLog
+from couchpotato.core.media._base.providers.torrent.base import TorrentProvider
+from dateutil.parser import parse
+import six
+
+
+log = CPLog(__name__)
+
+
+class Base(TorrentProvider):
+
+ urls = {
+ 'domain': 'https://passthepopcorn.me',
+ 'detail': 'https://passthepopcorn.me/torrents.php?torrentid=%s',
+ 'torrent': 'https://passthepopcorn.me/torrents.php',
+ 'login': 'https://passthepopcorn.me/ajax.php?action=login',
+ 'login_check': 'https://passthepopcorn.me/ajax.php?action=login',
+ 'search': 'https://passthepopcorn.me/search/%s/0/7/%d'
+ }
+
+ login_errors = 0
+ http_time_between_calls = 2
+
+ def _search(self, media, quality, results):
+
+ movie_title = getTitle(media)
+ quality_id = quality['identifier']
+
+ params = mergeDicts(self.quality_search_params[quality_id].copy(), {
+ 'order_by': 'relevance',
+ 'order_way': 'descending',
+ 'searchstr': getIdentifier(media)
+ })
+
+ url = '%s?json=noredirect&%s' % (self.urls['torrent'], tryUrlencode(params))
+ res = self.getJsonData(url)
+
+ try:
+ if not 'Movies' in res:
+ return
+
+ authkey = res['AuthKey']
+ passkey = res['PassKey']
+
+ for ptpmovie in res['Movies']:
+ if not 'Torrents' in ptpmovie:
+ log.debug('Movie %s (%s) has NO torrents', (ptpmovie['Title'], ptpmovie['Year']))
+ continue
+
+ log.debug('Movie %s (%s) has %d torrents', (ptpmovie['Title'], ptpmovie['Year'], len(ptpmovie['Torrents'])))
+ for torrent in ptpmovie['Torrents']:
+ torrent_id = tryInt(torrent['Id'])
+ torrentdesc = ''
+ torrentscore = 0
+
+ if 'GoldenPopcorn' in torrent and torrent['GoldenPopcorn']:
+ torrentdesc += ' HQ'
+ if self.conf('prefer_golden'):
+ torrentscore += 5000
+ if 'FreeleechType' in torrent:
+ torrentdesc += ' Freeleech'
+ if self.conf('prefer_freeleech'):
+ torrentscore += 7000
+ if 'Scene' in torrent and torrent['Scene']:
+ torrentdesc += ' Scene'
+ if self.conf('prefer_scene'):
+ torrentscore += 2000
+ if self.conf('no_scene'):
+ torrentscore -= 2000
+ if 'RemasterTitle' in torrent and torrent['RemasterTitle']:
+ torrentdesc += self.htmlToASCII(' %s' % torrent['RemasterTitle'])
+
+ torrent_name = torrent['ReleaseName'] + ' - %s' % torrentdesc
+
+ def extra_check(item):
+ return self.torrentMeetsQualitySpec(item, quality_id)
+
+ results.append({
+ 'id': torrent_id,
+ 'name': torrent_name,
+ 'Source': torrent['Source'],
+ 'Checked': 'true' if torrent['Checked'] else 'false',
+ 'Resolution': torrent['Resolution'],
+ 'url': '%s?action=download&id=%d&authkey=%s&torrent_pass=%s' % (self.urls['torrent'], torrent_id, authkey, passkey),
+ 'detail_url': self.urls['detail'] % torrent_id,
+ 'date': tryInt(time.mktime(parse(torrent['UploadTime']).timetuple())),
+ 'size': tryInt(torrent['Size']) / (1024 * 1024),
+ 'seeders': tryInt(torrent['Seeders']),
+ 'leechers': tryInt(torrent['Leechers']),
+ 'score': torrentscore,
+ 'extra_check': extra_check,
+ })
+
+ except:
+ log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
+
+ def torrentMeetsQualitySpec(self, torrent, quality):
+
+ if not quality in self.post_search_filters:
+ return True
+
+ reqs = self.post_search_filters[quality].copy()
+
+ if self.conf('require_approval'):
+ log.debug('Config: Require staff-approval activated')
+ reqs['Checked'] = ['true']
+
+ for field, specs in reqs.items():
+ matches_one = False
+ seen_one = False
+
+ if not field in torrent:
+ log.debug('Torrent with ID %s has no field "%s"; cannot apply post-search-filter for quality "%s"', (torrent['id'], field, quality))
+ continue
+
+ for spec in specs:
+ if len(spec) > 0 and spec[0] == '!':
+ # a negative rule; if the field matches, return False
+ if torrent[field] == spec[1:]:
+ return False
+ else:
+ # a positive rule; if any of the possible positive values match the field, return True
+ log.debug('Checking if torrents field %s equals %s' % (field, spec))
+ seen_one = True
+ if torrent[field] == spec:
+ log.debug('Torrent satisfied %s == %s' % (field, spec))
+ matches_one = True
+
+ if seen_one and not matches_one:
+ log.debug('Torrent did not satisfy requirements, ignoring')
+ return False
+
+ return True
+
+ def htmlToUnicode(self, text):
+ def fixup(m):
+ txt = m.group(0)
+ if txt[:2] == "":
+ # character reference
+ try:
+ if txt[:3] == "":
+ return unichr(int(txt[3:-1], 16))
+ else:
+ return unichr(int(txt[2:-1]))
+ except ValueError:
+ pass
+ else:
+ # named entity
+ try:
+ txt = unichr(htmlentitydefs.name2codepoint[txt[1:-1]])
+ except KeyError:
+ pass
+ return txt # leave as is
+ return re.sub("?\w+;", fixup, six.u('%s') % text)
+
+ def unicodeToASCII(self, text):
+ import unicodedata
+ return ''.join(c for c in unicodedata.normalize('NFKD', text) if unicodedata.category(c) != 'Mn')
+
+ def htmlToASCII(self, text):
+ return self.unicodeToASCII(self.htmlToUnicode(text))
+
+ def getLoginParams(self):
+ return {
+ 'username': self.conf('username'),
+ 'password': self.conf('password'),
+ 'passkey': self.conf('passkey'),
+ 'keeplogged': '1',
+ 'login': 'Login'
+ }
+
+ def loginSuccess(self, output):
+ log.info('PTP Login response : %s', output)
+ try:
+ if json.loads(output).get('Result', '').lower() == 'ok':
+ self.login_errors = 0
+ return True
+ except:
+ pass
+
+ self.login_errors += 1
+ if self.login_errors >= 3:
+ log.error('Disabling PTP provider after repeated failed logins. '
+ 'Please check your configuration. Re-enabling without '
+ 'solving the problem may cause an IP ban. response=%s',
+ output)
+ self.conf('enabled', value=False)
+ self.login_errors = 0
+
+ return False
+
+ loginCheckSuccess = loginSuccess
+
+
+config = [{
+ 'name': 'passthepopcorn',
+ 'groups': [
+ {
+ 'tab': 'searcher',
+ 'list': 'torrent_providers',
+ 'name': 'PassThePopcorn',
+ 'description': 'PassThePopcorn.me ',
+ 'wizard': True,
+ 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAAARklEQVQoz2NgIAP8BwMiGWRpIN1JNWn/t6T9f5'
+ '32+W8GkNt7vzz9UkfarZVpb68BuWlbnqW1nU7L2DMx7eCoBlpqGOppCQB83zIgIg+wWQAAAABJRU5ErkJggg==',
+ 'options': [
+ {
+ 'name': 'enabled',
+ 'type': 'enabler',
+ 'default': False
+ },
+ {
+ 'name': 'domain',
+ 'advanced': True,
+ 'label': 'Proxy server',
+ 'description': 'Domain for requests (HTTPS only!), keep empty to use default (passthepopcorn.me).',
+ },
+ {
+ 'name': 'username',
+ 'default': '',
+ },
+ {
+ 'name': 'password',
+ 'default': '',
+ 'type': 'password',
+ },
+ {
+ 'name': 'passkey',
+ 'default': '',
+ },
+ {
+ 'name': 'prefer_golden',
+ 'advanced': True,
+ 'type': 'bool',
+ 'label': 'Prefer golden',
+ 'default': 1,
+ 'description': 'Favors Golden Popcorn-releases over all other releases.'
+ },
+ {
+ 'name': 'prefer_freeleech',
+ 'advanced': True,
+ 'type': 'bool',
+ 'label': 'Prefer Freeleech',
+ 'default': 1,
+ 'description': 'Favors torrents marked as freeleech over all other releases.'
+ },
+ {
+ 'name': 'prefer_scene',
+ 'advanced': True,
+ 'type': 'bool',
+ 'label': 'Prefer scene',
+ 'default': 0,
+ 'description': 'Favors scene-releases over non-scene releases.'
+ },
+ {
+ 'name': 'no_scene',
+ 'advanced': True,
+ 'type': 'bool',
+ 'label': 'Reject scene',
+ 'default': 0,
+ 'description': 'Reject scene-releases over non-scene releases.'
+ },
+ {
+ 'name': 'require_approval',
+ 'advanced': True,
+ 'type': 'bool',
+ 'label': 'Require approval',
+ 'default': 0,
+ 'description': 'Require staff-approval for releases to be accepted.'
+ },
+ {
+ 'name': 'seed_ratio',
+ 'label': 'Seed ratio',
+ 'type': 'float',
+ 'default': 2,
+ 'description': 'Will not be (re)moved until this seed ratio is met.',
+ },
+ {
+ 'name': 'seed_time',
+ 'label': 'Seed time',
+ 'type': 'int',
+ 'default': 96,
+ 'description': 'Will not be (re)moved until this seed time (in hours) is met.',
+ },
+ {
+ 'name': 'extra_score',
+ 'advanced': True,
+ 'label': 'Extra Score',
+ 'type': 'int',
+ 'default': 20,
+ 'description': 'Starting score for each release found via this provider.',
+ }
+ ],
+ }
+ ]
+}]
\ No newline at end of file
diff --git a/couchpotato/core/media/_base/providers/torrent/rarbg.py b/couchpotato/core/media/_base/providers/torrent/rarbg.py
new file mode 100644
index 0000000000..ace33dec3e
--- /dev/null
+++ b/couchpotato/core/media/_base/providers/torrent/rarbg.py
@@ -0,0 +1,230 @@
+import re
+import traceback
+import random
+from datetime import datetime
+
+from couchpotato import fireEvent
+from couchpotato.core.helpers.variable import tryInt, getIdentifier
+from couchpotato.core.logger import CPLog
+from couchpotato.core.media._base.providers.torrent.base import TorrentMagnetProvider
+
+log = CPLog(__name__)
+
+class Base(TorrentMagnetProvider):
+
+ urls = {
+ 'test': 'https://torrentapi.org/pubapi_v2.php?app_id=couchpotato',
+ 'token': 'https://torrentapi.org/pubapi_v2.php?get_token=get_token&app_id=couchpotato',
+ 'search': 'https://torrentapi.org/pubapi_v2.php?token=%s&mode=search&search_imdb=%s&min_seeders=%s&min_leechers'
+ '=%s&ranked=%s&category=movies&format=json_extended&app_id=couchpotato',
+ }
+
+ http_time_between_calls = 2 # Seconds
+ _token = 0
+
+ def _search(self, movie, quality, results):
+ hasresults = 0
+ curryear = datetime.now().year
+ movieid = getIdentifier(movie)
+
+ try:
+ movieyear = movie['info']['year']
+ except:
+ log.error('RARBG: Couldn\'t get movie year')
+ movieyear = 0
+
+ self.getToken()
+
+ if (self._token != 0) and (movieyear == 0 or movieyear <= curryear):
+ data = self.getJsonData(self.urls['search'] % (self._token, movieid, self.conf('min_seeders'),
+ self.conf('min_leechers'), self.conf('ranked_only')), headers = self.getRequestHeaders())
+
+ if data:
+ if 'error_code' in data:
+ if data['error'] == 'No results found':
+ log.debug('RARBG: No results returned from Rarbg')
+ else:
+ if data['error_code'] == 10:
+ log.error(data['error'], movieid)
+ else:
+ log.error('RARBG: There is an error in the returned JSON: %s', data['error'])
+ else:
+ hasresults = 1
+
+ try:
+ if hasresults:
+ for result in data['torrent_results']:
+ name = result['title']
+ titlesplit = re.split('-', name)
+ releasegroup = titlesplit[len(titlesplit)-1]
+
+ xtrainfo = self.find_info(name)
+ encoding = xtrainfo[0]
+ resolution = xtrainfo[1]
+ # source = xtrainfo[2]
+ pubdate = result['pubdate'] # .strip(' +0000')
+ try:
+ pubdate = datetime.strptime(pubdate, '%Y-%m-%d %H:%M:%S +0000')
+ now = datetime.utcnow()
+ age = (now - pubdate).days
+ except ValueError:
+ log.debug('RARBG: Bad pubdate')
+ age = 0
+
+ torrentscore = self.conf('extra_score')
+ seeders = tryInt(result['seeders'])
+ torrent_desc = '/ %s / %s / %s / %s seeders' % (releasegroup, resolution, encoding, seeders)
+
+ if seeders == 0:
+ torrentscore = 0
+
+ sliceyear = result['pubdate'][0:4]
+ year = tryInt(sliceyear)
+
+ results.append({
+ 'id': random.randint(100, 9999),
+ 'name': re.sub('[^A-Za-z0-9\-_ \(\).]+', '', '%s (%s) %s' % (name, year, torrent_desc)),
+ 'url': result['download'],
+ 'detail_url': result['info_page'],
+ 'size': tryInt(result['size']/1048576), # rarbg sends in bytes
+ 'seeders': tryInt(result['seeders']),
+ 'leechers': tryInt(result['leechers']),
+ 'age': tryInt(age),
+ 'score': torrentscore
+ })
+
+ except RuntimeError:
+ log.error('RARBG: Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
+
+ def getToken(self):
+ tokendata = self.getJsonData(self.urls['token'], cache_timeout = 900, headers = self.getRequestHeaders())
+ if tokendata:
+ try:
+ token = tokendata['token']
+ if self._token != token:
+ log.debug('RARBG: GOT TOKEN: %s', token)
+ self._token = token
+ except:
+ log.error('RARBG: Failed getting token from Rarbg: %s', traceback.format_exc())
+ self._token = 0
+
+ def getRequestHeaders(self):
+ return {
+ 'User-Agent': fireEvent('app.version', single = True)
+ }
+
+ @staticmethod
+ def find_info(filename):
+ # CODEC #
+ codec = 'x264'
+ v = re.search('(?i)(x265|h265|h\.265)', filename)
+ if v:
+ codec = 'x265'
+
+ v = re.search('(?i)(xvid)', filename)
+ if v:
+ codec = 'xvid'
+
+ # RESOLUTION #
+ resolution = 'SD'
+ a = re.search('(?i)(720p)', filename)
+ if a:
+ resolution = '720p'
+
+ a = re.search('(?i)(1080p)', filename)
+ if a:
+ resolution = '1080p'
+
+ a = re.search('(?i)(2160p)', filename)
+ if a:
+ resolution = '2160p'
+
+ # SOURCE #
+ source = 'HD-Rip'
+ s = re.search('(?i)(WEB-DL|WEB_DL|WEB\.DL)', filename)
+ if s:
+ source = 'WEB-DL'
+
+ s = re.search('(?i)(WEBRIP)', filename)
+ if s:
+ source = 'WEBRIP'
+
+ s = re.search('(?i)(DVDR|DVDRip|DVD-Rip)', filename)
+ if s:
+ source = 'DVD-R'
+
+ s = re.search('(?i)(BRRIP|BDRIP|BluRay)', filename)
+ if s:
+ source = 'BR-Rip'
+
+ s = re.search('(?i)BluRay(.*)REMUX', filename)
+ if s:
+ source = 'BluRay-Remux'
+
+ s = re.search('(?i)BluRay(.*)\.(AVC|VC-1)\.', filename)
+ if s:
+ source = 'BluRay-Full'
+
+ return_info = [codec, resolution, source]
+ return return_info
+
+config = [{
+ 'name': 'rarbg',
+ 'groups': [
+ {
+ 'tab': 'searcher',
+ 'list': 'torrent_providers',
+ 'name': 'RARBG',
+ 'wizard': True,
+ 'description': 'RARBG ',
+ 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAB+UlEQVQ4jYXTP2hcRxDH8c8JJZjbYNy8V7gIr0qhg5AiFnETX'
+ '+PmVAtSmKDaUhUiFyGxjXFlp0hhHy5cqFd9lSGcU55cBU6EEMIj5dsmMewSjNGmOJ3852wysMyww37n94OdXimlh49xDR/hxGr'
+ '8hZ/xx0qnlHK5lPKk/H/8U0r5oZTyQSmltzzr+AKfT+ed8UFLeHNAH1UVbA2r88NBfQcX8O2yv74sUqKNWT+T01sy2+zpUbS/w'
+ '/awvo7H+O0NQEA/LPKlQWXrSgUmR9HxcZQwmbZGw/pc4MsVAIT+IjcNw80aTjaaem1vPCNlGakj1C6uWFiqeDtyTvoyqAKhBn+'
+ '+E7CkxC6Zzjop57XpUSenpIuMhpXAc/zyHkAicRSjw6fHZ1ewPdqwszWAB2hXACln8+NWSlld9zX9YN7GhajQXz5+joPXR66de'
+ 'U1J27Zi7FzaqE0OdmwNGzF2Ymzt3j+E8/gJH64AFlozKS4+Be7tjwyaIKVsOpnavX0II9x8ByDLKco5SwvjL0MI/z64tyOcwsf'
+ 'jQw8PJvAdvsb6GSBlxI7UyTnD37i7OWhe3NrflvOit3djbDKdwR181SulXMXdrkubbdvKaOpK09S/4jP8iG9m8zmJjCoEg0HzO'
+ '77vna7zp7ju1TqfYIyZxT7dwCd4eWr7BR7h2X8S6gShJlbKYQAAAABJRU5ErkJggg==',
+ 'options': [
+ {
+ 'name': 'enabled',
+ 'type': 'enabler',
+ 'default': False,
+ },
+ {
+ 'name': 'ranked_only',
+ 'advanced': True,
+ 'label': 'Ranked Only',
+ 'type': 'int',
+ 'default': 1,
+ 'description': 'Only ranked torrents (internal), scene releases, rarbg releases. '
+ 'Enter 1 (true) or 0 (false)',
+ },
+ {
+ 'name': 'min_seeders',
+ 'advanced': True,
+ 'label': 'Minimum Seeders',
+ 'type': 'int',
+ 'default': 10,
+ 'description': 'Minium amount of seeders the release must have.',
+ },
+ {
+ 'name': 'min_leechers',
+ 'advanced': True,
+ 'label': 'Minimum leechers',
+ 'type': 'int',
+ 'default': 0,
+ 'description': 'Minium amount of leechers the release must have.',
+ },
+ {
+ 'name': 'extra_score',
+ 'advanced': True,
+ 'label': 'Extra Score',
+ 'type': 'int',
+ 'default': 0,
+ 'description': 'Starting score for each release found via this provider.',
+ }
+ ],
+ },
+ ],
+}]
diff --git a/couchpotato/core/media/_base/providers/torrent/sceneaccess.py b/couchpotato/core/media/_base/providers/torrent/sceneaccess.py
new file mode 100644
index 0000000000..9db63f7c02
--- /dev/null
+++ b/couchpotato/core/media/_base/providers/torrent/sceneaccess.py
@@ -0,0 +1,137 @@
+import traceback
+
+from bs4 import BeautifulSoup
+from couchpotato.core.helpers.encoding import toUnicode
+from couchpotato.core.helpers.variable import tryInt
+from couchpotato.core.logger import CPLog
+from couchpotato.core.media._base.providers.torrent.base import TorrentProvider
+
+
+log = CPLog(__name__)
+
+
+class Base(TorrentProvider):
+
+ urls = {
+ 'test': 'https://www.sceneaccess.eu/',
+ 'login': 'https://www.sceneaccess.eu/login',
+ 'login_check': 'https://www.sceneaccess.eu/inbox',
+ 'detail': 'https://www.sceneaccess.eu/details?id=%s',
+ 'search': 'https://www.sceneaccess.eu/browse?c%d=%d',
+ 'archive': 'https://www.sceneaccess.eu/archive?&c%d=%d',
+ 'download': 'https://www.sceneaccess.eu/%s',
+ }
+
+ http_time_between_calls = 1 # Seconds
+ login_fail_msg = 'Username or password incorrect'
+
+ def _searchOnTitle(self, title, media, quality, results):
+
+ url = self.buildUrl(title, media, quality)
+ data = self.getHTMLData(url)
+
+ if data:
+ html = BeautifulSoup(data)
+
+ try:
+ resultsTable = html.find('table', attrs = {'id': 'torrents-table'})
+ if resultsTable is None:
+ return
+
+ entries = resultsTable.find_all('tr', attrs = {'class': 'tt_row'})
+ for result in entries:
+
+ link = result.find('td', attrs = {'class': 'ttr_name'}).find('a')
+ url = result.find('td', attrs = {'class': 'td_dl'}).find('a')
+ seeders = result.find('td', attrs = {'class': 'ttr_seeders'}).find('a')
+ leechers = result.find('td', attrs = {'class': 'ttr_leechers'}).find('a')
+ torrent_id = link['href'].replace('details?id=', '')
+
+ results.append({
+ 'id': torrent_id,
+ 'name': link['title'],
+ 'url': self.urls['download'] % url['href'],
+ 'detail_url': self.urls['detail'] % torrent_id,
+ 'size': self.parseSize(result.find('td', attrs = {'class': 'ttr_size'}).contents[0]),
+ 'seeders': tryInt(seeders.string) if seeders else 0,
+ 'leechers': tryInt(leechers.string) if leechers else 0,
+ 'get_more_info': self.getMoreInfo,
+ })
+
+ except:
+ log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
+
+ def getMoreInfo(self, item):
+ full_description = self.getCache('sceneaccess.%s' % item['id'], item['detail_url'], cache_timeout = 25920000)
+ html = BeautifulSoup(full_description)
+ nfo_pre = html.find('div', attrs = {'id': 'details_table'})
+ description = toUnicode(nfo_pre.text) if nfo_pre else ''
+
+ item['description'] = description
+ return item
+
+ # Login
+ def getLoginParams(self):
+ return {
+ 'username': self.conf('username'),
+ 'password': self.conf('password'),
+ 'submit': 'come on in',
+ }
+
+ def loginSuccess(self, output):
+ return '/inbox' in output.lower()
+
+ loginCheckSuccess = loginSuccess
+
+
+config = [{
+ 'name': 'sceneaccess',
+ 'groups': [
+ {
+ 'tab': 'searcher',
+ 'list': 'torrent_providers',
+ 'name': 'SceneAccess',
+ 'description': 'SceneAccess ',
+ 'wizard': True,
+ 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAAABnRSTlMAAAAAAABupgeRAAACT0lEQVR4AYVQS0sbURidO3OTmajJ5FElTTOkPmZ01GhHrIq0aoWAj1Vc+A/cuRMXbl24V9SlCGqrLhVFCrooEhCp2BAx0mobTY2kaR7qmOm87EXL1EWxh29xL+c7nPMdgGHYO5bF/gdbefnr6WlbWRnxluMwAB4Z0uEgXa7nwaDL7+/RNPzxbYvb/XJ0FBYVfd/ayh0fQ4qCGEHcm0KLRZUk7Pb2YRJPRwcsKMidnKD3t9VVT3s7BDh+z5FOZ3Vfn3h+Hltfx00mRRSRWFcUmmVNhYVqPn8dj3va2oh+txvcQRVF9ebm1fi4k+dRFbosY5rm4Hk7xxULQnJnx93S4g0EIEEQRoDLo6PrWEw8Pc0eHLwYGopMTDirqlJ7eyhYYGHhfgfHCcKYksZGVB/NcXI2mw6HhZERqrjYTNPHi4tFPh8aJIYIhgPlcCRDoZLW1s75+Z/7+59nZ/OJhLWigqAoKZX6Mjf3dXkZ3pydGYLc4aEoCCkInzQ1fRobS2xuvllaonkedfArnY5OTdGVldBkOADgqq2Nr6z8CIWaJietDHOhKB+HhwFKC6Gnq4ukKJvP9zcSbjYDXbeVlkKzuZBhnnV3e3t6UOmaJO0ODibW1hB1GYkg8R/gup7Z3TVZLJ5AILW9LcZiVpYtYBhw16O3t7cauckyeF9Tgz0ATpL2+nopmWycmbnY2LiKRjFk6/d7+/vRJfl4HGzV1T0UIM43MGBvaIBWK/YvwM5w+IMgGH8tkyEgvIpE7M3Nt6qqZrNyOq1kMmouh455Ggz+BhKY4GEc2CfwAAAAAElFTkSuQmCC',
+ 'options': [
+ {
+ 'name': 'enabled',
+ 'type': 'enabler',
+ 'default': False,
+ },
+ {
+ 'name': 'username',
+ 'default': '',
+ },
+ {
+ 'name': 'password',
+ 'default': '',
+ 'type': 'password',
+ },
+ {
+ 'name': 'seed_ratio',
+ 'label': 'Seed ratio',
+ 'type': 'float',
+ 'default': 1,
+ 'description': 'Will not be (re)moved until this seed ratio is met.',
+ },
+ {
+ 'name': 'seed_time',
+ 'label': 'Seed time',
+ 'type': 'int',
+ 'default': 40,
+ 'description': 'Will not be (re)moved until this seed time (in hours) is met.',
+ },
+ {
+ 'name': 'extra_score',
+ 'advanced': True,
+ 'label': 'Extra Score',
+ 'type': 'int',
+ 'default': 20,
+ 'description': 'Starting score for each release found via this provider.',
+ }
+ ],
+ },
+ ],
+}]
diff --git a/couchpotato/core/media/_base/providers/torrent/scenetime.py b/couchpotato/core/media/_base/providers/torrent/scenetime.py
new file mode 100644
index 0000000000..6c10cc27a7
--- /dev/null
+++ b/couchpotato/core/media/_base/providers/torrent/scenetime.py
@@ -0,0 +1,139 @@
+import traceback
+
+from bs4 import BeautifulSoup
+from couchpotato.core.helpers.encoding import tryUrlencode, toUnicode
+from couchpotato.core.helpers.variable import tryInt
+from couchpotato.core.logger import CPLog
+from couchpotato.core.media._base.providers.torrent.base import TorrentProvider
+
+
+log = CPLog(__name__)
+
+
+class Base(TorrentProvider):
+
+ urls = {
+ 'test': 'https://www.scenetime.com/',
+ 'login': 'https://www.scenetime.com/takelogin.php',
+ 'login_check': 'https://www.scenetime.com/inbox.php',
+ 'detail': 'https://www.scenetime.com/details.php?id=%s',
+ 'search': 'https://www.scenetime.com/browse.php?search=%s&cat=%d',
+ 'download': 'https://www.scenetime.com/download.php/%s/%s',
+ }
+
+ cat_ids = [
+ ([59], ['720p', '1080p']),
+ ([81], ['brrip']),
+ ([102], ['bd50']),
+ ([3], ['dvdrip']),
+ ]
+
+ http_time_between_calls = 1 # Seconds
+ login_fail_msg = 'Username or password incorrect'
+ cat_backup_id = None
+
+ def _searchOnTitle(self, title, movie, quality, results):
+
+ url = self.urls['search'] % (tryUrlencode('%s %s' % (title.replace(':', ''), movie['info']['year'])), self.getCatId(quality)[0])
+ data = self.getHTMLData(url)
+
+ if data:
+ html = BeautifulSoup(data)
+
+ try:
+ result_table = html.find(attrs = {'id': 'torrenttable'})
+
+ if not result_table:
+ log.error('failed to generate result_table')
+ return
+
+ entries = result_table.find_all('tr')
+
+ for result in entries[1:]:
+ cells = result.find_all('td')
+ link = result.find('a', attrs = {'class': 'index'})
+ torrent_id = link['href'].replace('download.php/','').split('/')[0]
+ torrent_file = link['href'].replace('download.php/','').split('/')[1]
+ size = self.parseSize(cells[5].contents[0] + cells[5].contents[2])
+ name_row = cells[1].contents[0]
+ name = name_row.getText()
+ seeders_row = cells[6].contents[0]
+ seeders = seeders_row.getText()
+
+
+ results.append({
+ 'id': torrent_id,
+ 'name': name,
+ 'url': self.urls['download'] % (torrent_id,torrent_file),
+ 'detail_url': self.urls['detail'] % torrent_id,
+ 'size': size,
+ 'seeders': seeders,
+ })
+
+ except:
+ log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
+
+ def getLoginParams(self):
+ return {
+ 'login': 'submit',
+ 'username': self.conf('username'),
+ 'password': self.conf('password'),
+ }
+
+ def loginSuccess(self, output):
+ return 'logout.php' in output.lower() or 'Welcome' in output.lower()
+
+ loginCheckSuccess = loginSuccess
+
+
+config = [{
+ 'name': 'scenetime',
+ 'groups': [
+ {
+ 'tab': 'searcher',
+ 'list': 'torrent_providers',
+ 'name': 'SceneTime',
+ 'description': 'SceneTime ',
+ 'wizard': True,
+ 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAADsMAAA7DAcdvqGQAAAAYdEVYdFNvZnR3YXJlAHBhaW50Lm5ldCA0LjAuNWWFMmUAAAIwSURBVDhPZZFbSBRRGMePs7Mzjma7+9AWWxpeYrXLkrcIfUwIpIeK3tO1hWhfltKwhyJMFIqgCz2EpdHWRun2oGG02O2hlYyypY21CygrlbhRIYHizO6/mdk5szPtB785hzm//zeXj7Q89q4I4QaQBx6ZHQY84Efq4Rrbg4rxVmx61AJ2pFY/twzvhP1hU4ZwIQ8K7mw1wdzdhrrxQ7g8E0Q09R6flubw+mcM7tHWPJcwt91ghuTQUDWYW8rejbrRA3i1OA0xLYGWJO8bxw6q50YIc70CRoQbNbj2MQgpkwsrpTYI7ze5CoS5UgYjpTd3YWphWg1l1CuwLC4jufQNtaG9JleBWM67YKR6oBlzf+bVoPIOUiaNwVgIzcF9sF3aknMvZFfCnnNCp9eJqqsNSKQ+qw2USssNzrzoh9Dnynmaq6yEPe2AkfX9lXjy5akWz9ZkcgqVFz0mj0KsJ0tgROh2oCfSJ3/3ihaHPA0Rh+/7UNhtN7kKhAsI+J+a3u2If49r8WxFZiawtsuR5xLumBUU3s/B2bkOm0+V4V3yrTwFOgcg8SMBe8CmuxTC+SygFB3l8TzxDLOpWYiSqEWzFf0ahc2/RncphPcSUIqPWPFhPqZFcrUqraLzXkA+Z3WXQvh2eaNR3MHmNVB+YPjNMMqPb9Q9I6YGRR0WTMQj6hOV+f/++wuDLwfg7iqH4GVMQQrh28w3Nvgd2H22Hk09jag6UYoSH4/C9gKTo9NG8A8MPUM4DJp74gAAAABJRU5ErkJggg==',
+ 'options': [
+ {
+ 'name': 'enabled',
+ 'type': 'enabler',
+ 'default': False,
+ },
+ {
+ 'name': 'username',
+ 'default': '',
+ },
+ {
+ 'name': 'password',
+ 'default': '',
+ 'type': 'password',
+ },
+ {
+ 'name': 'seed_ratio',
+ 'label': 'Seed ratio',
+ 'type': 'float',
+ 'default': 1,
+ 'description': 'Will not be (re)moved until this seed ratio is met.',
+ },
+ {
+ 'name': 'seed_time',
+ 'label': 'Seed time',
+ 'type': 'int',
+ 'default': 40,
+ 'description': 'Will not be (re)moved until this seed time (in hours) is met.',
+ },
+ {
+ 'name': 'extra_score',
+ 'advanced': True,
+ 'label': 'Extra Score',
+ 'type': 'int',
+ 'default': 20,
+ 'description': 'Starting score for each release found via this provider.',
+ }
+ ],
+ },
+ ],
+}]
diff --git a/couchpotato/core/media/_base/providers/torrent/t411.py b/couchpotato/core/media/_base/providers/torrent/t411.py
new file mode 100644
index 0000000000..c6531bb9ca
--- /dev/null
+++ b/couchpotato/core/media/_base/providers/torrent/t411.py
@@ -0,0 +1,316 @@
+from bs4 import BeautifulSoup
+from couchpotato.core.helpers.variable import tryInt
+from couchpotato.core.logger import CPLog
+from couchpotato.core.helpers.encoding import simplifyString, tryUrlencode
+from couchpotato.core.media._base.providers.torrent.base import TorrentProvider
+from couchpotato.core.helpers import namer_check
+import json
+import re
+import unicodedata
+import traceback
+import urllib2
+import sys
+import urllib
+
+log = CPLog(__name__)
+
+import ast
+import operator
+
+_binOps = {
+ ast.Add: operator.add,
+ ast.Sub: operator.sub,
+ ast.Mult: operator.mul,
+ ast.Div: operator.div,
+ ast.Mod: operator.mod
+}
+
+def _arithmeticEval(s):
+ """
+ A safe eval supporting basic arithmetic operations.
+
+ :param s: expression to evaluate
+ :return: value
+ """
+ node = ast.parse(s, mode='eval')
+
+ def _eval(node):
+ if isinstance(node, ast.Expression):
+ return _eval(node.body)
+ elif isinstance(node, ast.Str):
+ return node.s
+ elif isinstance(node, ast.Num):
+ return node.n
+ elif isinstance(node, ast.BinOp):
+ return _binOps[type(node.op)](_eval(node.left), _eval(node.right))
+ else:
+ raise Exception('Unsupported type {}'.format(node))
+
+ return _eval(node.body)
+
+class Base(TorrentProvider):
+
+
+ urls = {
+ 'test' : 'https://www.t411.al',
+ 'login' : 'https://www.t411.al/users/login/',
+ 'login_check': 'https://www.t411.al',
+ 'detail': 'https://www.t411.al/torrents/?id=%s',
+ 'search': 'https://www.t411.al/torrents/search/?search=%s %s',
+ 'download' : 'http://www.t411.al/torrents/download/?id=%s',
+ }
+
+ http_time_between_calls = 1 #seconds
+ cat_backup_id = None
+
+ def _searchOnTitle(self, title, movie, quality, results):
+
+ # test the new title and search for it if valid
+ newTitle = self.getFrenchTitle(title, str(movie['info']['year']))
+ request = ''
+ if isinstance(title, str):
+ title = title.decode('utf8')
+ if newTitle is not None:
+ request = (u'(' + title + u')|(' + newTitle + u')').replace(':', '')
+ else:
+ request = title.replace(':', '')
+ request = urllib2.quote(request.encode('iso-8859-1'))
+
+ log.debug('Looking on T411 for movie named %s or %s' % (title, newTitle))
+ url = self.urls['search'] % (request, acceptableQualityTerms(quality))
+ data = self.getHTMLData(url)
+
+ log.debug('Received data from T411')
+ if data:
+ log.debug('Data is valid from T411')
+ html = BeautifulSoup(data)
+
+ try:
+ result_table = html.find('table', attrs = {'class':'results'})
+ if not result_table:
+ log.debug('No table results from T411')
+ return
+
+ torrents = result_table.find('tbody').findAll('tr')
+ for result in torrents:
+ idt = result.findAll('td')[2].findAll('a')[0]['href'][1:].replace('torrents/nfo/?id=','')
+ release_name = result.findAll('td')[1].findAll('a')[0]['title']
+ words = title.lower().replace(':',' ').split()
+ if self.conf('ignore_year'):
+ index = release_name.lower().find(words[-1] if words[-1] != 'the' else words[-2]) + len(words[-1] if words[-1] != 'the' else words[-2]) +1
+ index2 = index + 7
+ if not str(movie['info']['year']) in release_name[index:index2]:
+ release_name = release_name[0:index] + '(' + str(movie['info']['year']) + ').' + release_name[index:]
+ #if 'the' not in release_name.lower() and (words[-1] == 'the' or words[0] == 'the'):
+ # release_name = 'the.' + release_name
+ if 'multi' in release_name.lower():
+ release_name = release_name.lower().replace('truefrench','').replace('french','')
+ age = result.findAll('td')[4].text
+ log.debug('result : name=%s, detail_url=%s' % (replaceTitle(release_name, title, newTitle), (self.urls['detail'] % idt)))
+ results.append({
+ 'id': idt,
+ 'name': replaceTitle(release_name, title, newTitle),
+ 'url': self.urls['download'] % idt,
+ 'detail_url': self.urls['detail'] % idt,
+ 'age' : age,
+ 'size': self.parseSize(str(result.findAll('td')[5].text)),
+ 'seeders': result.findAll('td')[7].text,
+ 'leechers': result.findAll('td')[8].text,
+ })
+
+ except:
+ log.error('Failed to parse T411: %s' % (traceback.format_exc()))
+
+ def getLoginParams(self):
+ log.debug('Getting login params for T411')
+ return {
+ 'login': self.conf('username'),
+ 'password': self.conf('password'),
+ 'remember': '1',
+ 'url': '/'
+ }
+
+ def loginSuccess(self, output):
+ log.debug('Checking login success for T411: %s' % ('True' if ('logout' in output.lower()) else 'False'))
+
+ if 'confirmer le captcha' in output.lower():
+ log.debug('Too many login attempts. A captcha is displayed.')
+ output = self._solveCaptcha(output)
+
+ return 'logout' in output.lower()
+
+ def _solveCaptcha(self, output):
+ """
+ When trying to connect too many times with wrong password, a captcha can be requested.
+ This captcha is really simple and can be solved by the provider.
+ 204 + 65 =
+
+
+
+
+ :param output: initial login output
+ :return: output after captcha resolution
+ """
+ html = BeautifulSoup(output)
+
+ query = html.find('input', {'name': 'captchaQuery'})
+ token = html.find('input', {'name': 'captchaToken'})
+ if not query or not token:
+ log.error('Unable to solve login captcha.')
+ return output
+
+ query_expr = query.attrs['value'].strip('= ')
+ log.debug(u'Captcha query: ' + query_expr)
+ answer = _arithmeticEval(query_expr)
+
+ log.debug(u'Captcha answer: %s' % answer)
+
+ login_params = self.getLoginParams()
+
+ login_params['captchaAnswer'] = answer
+ login_params['captchaQuery'] = query.attrs['value']
+ login_params['captchaToken'] = token.attrs['value']
+
+ return self.urlopen(self.urls['login'], data = login_params)
+
+ loginCheckSuccess = loginSuccess
+
+ def getFrenchTitle(self, title, year):
+ """
+ This function uses TMDB API to get the French movie title of the given title.
+ """
+
+ url = "https://api.themoviedb.org/3/search/movie?api_key=0f3094295d96461eb7a672626c54574d&language=fr&query=%s" % title
+ log.debug('Looking on TMDB for French title of : ' + title)
+ #data = self.getJsonData(url, decode_from = 'utf8')
+ data = self.getJsonData(url)
+ try:
+ if data['results'] != None:
+ for res in data['results']:
+ yearI = res['release_date']
+ if year in yearI:
+ break
+ frTitle = res['title'].lower()
+ if frTitle == title:
+ log.debug('TMDB report identical FR and original title')
+ return None
+ else:
+ log.debug(u'L\'API TMDB a trouve un titre francais => ' + frTitle)
+ return frTitle
+ else:
+ log.debug('TMDB could not find a movie corresponding to : ' + title)
+ return None
+ except:
+ log.error('Failed to parse TMDB API: %s' % (traceback.format_exc()))
+
+def acceptableQualityTerms(quality):
+ """
+ This function retrieve all the acceptable terms for a quality (eg hdrip and bdrip for brrip)
+ Then it creates regex accepted by t411 to search for one of this term
+ t411 have to handle alternatives as OR and then the regex is firstAlternative|secondAlternative
+
+ In alternatives, there can be "doubled terms" as "br rip" or "bd rip" for brrip
+ These doubled terms have to be handled as AND and are then (firstBit&secondBit)
+ """
+ alternatives = quality.get('alternative', [])
+ # first acceptable term is the identifier itself
+ acceptableTerms = [quality['identifier']]
+ log.debug('Requesting alternative quality terms for : ' + str(acceptableTerms) )
+ # handle single terms
+ acceptableTerms.extend([ term for term in alternatives if type(term) == type('') ])
+ # handle doubled terms (such as 'dvd rip')
+ doubledTerms = [ term for term in alternatives if type(term) == type(('', '')) ]
+ acceptableTerms.extend([ '('+first+'%26'+second+')' for (first,second) in doubledTerms ])
+ # join everything and return
+ log.debug('Found alternative quality terms : ' + str(acceptableTerms).replace('%26', ' '))
+ return '|'.join(acceptableTerms)
+
+def replaceTitle(releaseNameI, titleI, newTitleI):
+ """
+ This function is replacing the title in the release name by the old one,
+ so that couchpotato recognise it as a valid release.
+ """
+
+ if newTitleI is None: # if the newTitle is empty, do nothing
+ return releaseNameI
+ else:
+ # input as lower case
+ releaseName = releaseNameI.lower()
+ title = titleI.lower()
+ newTitle = newTitleI.lower()
+ log.debug('Replacing -- ' + newTitle + ' -- in the release -- ' + releaseName + ' -- by the original title -- ' + title)
+ separatedWords = []
+ for s in releaseName.split(' '):
+ separatedWords.extend(s.split('.'))
+ # test how far the release name corresponds to the original title
+ index = 0
+ while separatedWords[index] in title.split(' '):
+ index += 1
+ # test how far the release name corresponds to the new title
+ newIndex = 0
+ while separatedWords[newIndex] in newTitle.split(' '):
+ newIndex += 1
+ # then determine if it correspoinds to the new title or old title
+ if index >= newIndex:
+ # the release name corresponds to the original title. SO no change needed
+ log.debug('The release name is already corresponding. Changed nothing.')
+ return releaseNameI
+ else:
+ # otherwise, we replace the french title by the original title
+ finalName = [title]
+ finalName.extend(separatedWords[newIndex:])
+ newReleaseName = ' '.join(finalName)
+ log.debug('The new release name is : ' + newReleaseName)
+ return newReleaseName
+
+config = [{
+ 'name': 't411',
+ 'groups': [
+ {
+ 'tab': 'searcher',
+ 'list': 'torrent_providers',
+ 'name': 't411',
+ 'description': 'See T411 ',
+ 'icon' : 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAAAA3NCSVQICAjb4U/gAAACdklEQVQokW2RX0hTcRTHz+/+cbvz3m1srbv8M6Ws6SbK1hRTkUoKIui5jIJ8sz9vQQTRQxDRexCkIGgmSC+B1YNWNCIrRQ3Z2PyTf5pb2/S2ud2/2723hyIt/b4cDud7+H4OB2CXrpOW+wYLYPju0R66DTABEAWYB7i6lwHtbEYAKi5crPE36Wa6QGKQyYylk1cePPwX4FqPquSSiZVHAN+Gh/JihpezUpGXinmxkBN5Lvjm5U4/1hzwS5JsJIkzkWnmZDtSZF2WQZZ0SSoIgiSJXq+37VjLNhLL7h/ofUzg0Dceutl1ejHOoa0fScUQW1rouXQWw3ANULXbt8cNJ7pudPrcd/pmLp8PBNpa344HDYTqYc2Ls58G+59sI/0uTgBTKj78OQIdTb6W5gKg+PpKaPprUoLB/mBHY/v/CacARru7ucaG6NCrj5vp2rpDWvmBDa83PzDwdJVOl5Zo8S+JQhoD7E/CGMBEKLyYTNWjLKNl6KkP5OsXbE1leGqdNFoBd3K034jbcJzYfqfPTpUZjOHkmkmS+SpzinXYlxdGM+4I5ezkoyHSUcIjHXHY3wWPqM9SOg2ataFMlvQ6YWs5FIvaKxxgmzEfrWYOazanXuAxAGBwGALoNcWePxtx8cKR4wGuBFZo05TI2gXViE3SaiyVn3bQRgU0DABuVdHn7na6iuSMAOk2X6WnrqLcMVlqTVQ5lHw2VaQURtNN+7YoD7L4cQCQKGo9GJsUEGC6bNPfzc1xpZAjWuH7+3u+xHy+BuFLLkYsx7la0yrCAeqdZg0h1kDQFkpVlSyvrG1krM5mNbtK/9wM0wddjF6UNywElpWVX6HUDxDMdBkmAAAAAElFTkSuQmCC',
+ 'wizard': True,
+ 'options': [
+ {
+ 'name': 'enabled',
+ 'type': 'enabler',
+ 'default': False,
+ },
+ {
+ 'name': 'username',
+ 'default': '',
+ },
+ {
+ 'name': 'password',
+ 'default': '',
+ 'type': 'password',
+ },
+ {
+ 'name': 'seed_ratio',
+ 'label': 'Seed ratio',
+ 'type': 'float',
+ 'default': 1,
+ 'description': 'Will not be (re)moved until this seed ratio is met.',
+ },
+ {
+ 'name': 'seed_time',
+ 'label': 'Seed time',
+ 'type': 'int',
+ 'default': 40,
+ 'description': 'Will not be (re)moved until this seed time (in hours) is met.',
+ },
+ {
+ 'name': 'extra_score',
+ 'advanced': True,
+ 'label': 'Extra Score',
+ 'type': 'int',
+ 'default': 0,
+ 'description': 'Starting score for each release found via this provider.',
+ }
+ ],
+ },
+ ],
+}]
diff --git a/couchpotato/core/media/_base/providers/torrent/t411api.py b/couchpotato/core/media/_base/providers/torrent/t411api.py
new file mode 100644
index 0000000000..41ec5f47fb
--- /dev/null
+++ b/couchpotato/core/media/_base/providers/torrent/t411api.py
@@ -0,0 +1,149 @@
+from couchpotato.core.helpers.variable import tryInt
+from couchpotato.core.logger import CPLog
+from couchpotato.core.helpers.encoding import simplifyString, tryUrlencode
+from couchpotato.core.media._base.providers.torrent.base import TorrentProvider
+from couchpotato.core.helpers import namer_check
+import json
+import re
+import unicodedata
+
+log = CPLog(__name__)
+
+
+class Base(TorrentProvider):
+
+ urls = {
+ 'test': 'https://www.t411.al/',
+ 'torrent': 'https://www.t411.al/torrents/%s',
+ 'login': 'https://api.t411.al/auth',
+ 'detail': 'https://www.t411.al/torrents/?id=%s',
+ 'search': 'https://api.t411.al/torrents/search/%s',
+ 'download': 'https://api.t411.al/torrents/download/%s',
+ }
+
+ http_time_between_calls = 1 #seconds
+ auth_token = ''
+
+ def _search(self, movie, quality, results):
+ headers = {}
+ headers['Authorization'] = self.auth_token
+
+ for title in movie['info']['titles']:
+ try:
+ TitleStringReal = str(title.encode("latin-1").replace('-',' '))
+
+ url = self.urls['search'] % TitleStringReal
+ url = url + '?cat=631&limit=100'
+ data = self.getJsonData(url, None, headers = headers)
+
+ for currentresult in data['torrents']:
+ if currentresult['categoryname'] in ['Film', 'Animation']:
+ name = currentresult['name']
+ splittedReleaseName = re.split('(?:\(|\.|\s)([0-9]{4})(?:\)|\.|\s)', name, flags=re.IGNORECASE)
+
+ if len(splittedReleaseName) > 1:
+ cleanedReleaseName = ''.join(splittedReleaseName[0:-2])
+
+ match = re.compile(ur"[\w]+", re.UNICODE)
+ nameSplit = ''.join(match.findall(unicodedata.normalize('NFKD', cleanedReleaseName.upper()).encode('ASCII','ignore')))
+ titleSplit = ''.join(match.findall(unicodedata.normalize('NFKD', title.upper()).encode('ASCII','ignore')))
+
+ if titleSplit == nameSplit:
+ new = {}
+ new['id'] = currentresult['id']
+ new['name'] = name
+ new['url'] = self.urls['download'] % (currentresult['id'])
+ new['detail_url'] = self.urls['torrent'] % (currentresult['rewritename'])
+ new['size'] = tryInt(currentresult['size']) / 1024 / 1024
+ new['seeders'] = tryInt(currentresult['seeders'])
+ new['leechers'] = tryInt(currentresult['leechers'])
+ new['authtoken'] = self.auth_token
+ new['download'] = self.loginDownload
+
+ results.append(new)
+ except:
+ continue
+
+ return
+
+ def getLoginParams(self):
+ return {
+ 'username': self.conf('username'),
+ 'password': self.conf('password')
+ }
+
+ def loginSuccess(self, output):
+ try:
+ jsonData = json.loads(output)
+ if jsonData.get('uid', '') != '':
+ self.auth_token = jsonData.get('token', '')
+ return True
+ except:
+ pass
+
+ return False
+
+ loginCheckSuccess = loginSuccess
+
+ def loginDownload(self, url = '', nzb_id = ''):
+ try:
+ if not self.login():
+ log.error('Failed downloading from %s', self.getName())
+
+ headers = {}
+ headers['Authorization'] = self.auth_token
+ return self.urlopen(url, None, headers = headers)
+ except:
+ log.error('Failed downloading from %s: %s', (self.getName(), traceback.format_exc()))
+
+config = [{
+ 'name': 't411api',
+ 'groups': [
+ {
+ 'tab': 'searcher',
+ 'list': 'torrent_providers',
+ 'name': 't411 api version',
+ 'description': 'See T411 ',
+ 'icon' : 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAAAA3NCSVQICAjb4U/gAAACdklEQVQokW2RX0hTcRTHz+/+cbvz3m1srbv8M6Ws6SbK1hRTkUoKIui5jIJ8sz9vQQTRQxDRexCkIGgmSC+B1YNWNCIrRQ3Z2PyTf5pb2/S2ud2/2723hyIt/b4cDud7+H4OB2CXrpOW+wYLYPju0R66DTABEAWYB7i6lwHtbEYAKi5crPE36Wa6QGKQyYylk1cePPwX4FqPquSSiZVHAN+Gh/JihpezUpGXinmxkBN5Lvjm5U4/1hzwS5JsJIkzkWnmZDtSZF2WQZZ0SSoIgiSJXq+37VjLNhLL7h/ofUzg0Dceutl1ejHOoa0fScUQW1rouXQWw3ANULXbt8cNJ7pudPrcd/pmLp8PBNpa344HDYTqYc2Ls58G+59sI/0uTgBTKj78OQIdTb6W5gKg+PpKaPprUoLB/mBHY/v/CacARru7ucaG6NCrj5vp2rpDWvmBDa83PzDwdJVOl5Zo8S+JQhoD7E/CGMBEKLyYTNWjLKNl6KkP5OsXbE1leGqdNFoBd3K034jbcJzYfqfPTpUZjOHkmkmS+SpzinXYlxdGM+4I5ezkoyHSUcIjHXHY3wWPqM9SOg2ataFMlvQ6YWs5FIvaKxxgmzEfrWYOazanXuAxAGBwGALoNcWePxtx8cKR4wGuBFZo05TI2gXViE3SaiyVn3bQRgU0DABuVdHn7na6iuSMAOk2X6WnrqLcMVlqTVQ5lHw2VaQURtNN+7YoD7L4cQCQKGo9GJsUEGC6bNPfzc1xpZAjWuH7+3u+xHy+BuFLLkYsx7la0yrCAeqdZg0h1kDQFkpVlSyvrG1krM5mNbtK/9wM0wddjF6UNywElpWVX6HUDxDMdBkmAAAAAElFTkSuQmCC',
+ 'wizard': True,
+ 'options': [
+ {
+ 'name': 'enabled',
+ 'type': 'enabler',
+ 'default': False,
+ },
+ {
+ 'name': 'username',
+ 'default': '',
+ },
+ {
+ 'name': 'password',
+ 'default': '',
+ 'type': 'password',
+ },
+ {
+ 'name': 'seed_ratio',
+ 'label': 'Seed ratio',
+ 'type': 'float',
+ 'default': 1,
+ 'description': 'Will not be (re)moved until this seed ratio is met.',
+ },
+ {
+ 'name': 'seed_time',
+ 'label': 'Seed time',
+ 'type': 'int',
+ 'default': 40,
+ 'description': 'Will not be (re)moved until this seed time (in hours) is met.',
+ },
+ {
+ 'name': 'extra_score',
+ 'advanced': True,
+ 'label': 'Extra Score',
+ 'type': 'int',
+ 'default': 0,
+ 'description': 'Starting score for each release found via this provider.',
+ }
+ ],
+ },
+ ],
+}]
diff --git a/couchpotato/core/media/_base/providers/torrent/thepiratebay.py b/couchpotato/core/media/_base/providers/torrent/thepiratebay.py
new file mode 100644
index 0000000000..4e84ceb013
--- /dev/null
+++ b/couchpotato/core/media/_base/providers/torrent/thepiratebay.py
@@ -0,0 +1,205 @@
+import re
+import traceback
+
+from bs4 import BeautifulSoup
+from couchpotato.core.event import addEvent
+from couchpotato.core.helpers.encoding import toUnicode
+from couchpotato.core.helpers.variable import tryInt
+from couchpotato.core.logger import CPLog
+from couchpotato.core.media._base.providers.torrent.base import TorrentMagnetProvider
+import six
+
+
+log = CPLog(__name__)
+
+
+class Base(TorrentMagnetProvider):
+
+ urls = {
+ 'detail': '%s/torrent/%s',
+ 'search': '%s/search/%%s/%%s/7/%%s'
+ }
+
+ cat_backup_id = 200
+ disable_provider = False
+ http_time_between_calls = 0
+
+ proxy_list = [
+ 'https://pirateproxy.cat',
+ 'https://pirateproxy.wf',
+ 'https://pirateproxy.tf',
+ 'https://urbanproxy.eu',
+ 'https://piratebays.co',
+ 'https://pirateproxy.yt',
+ 'https://thepiratebay.uk.net',
+ 'https://thebay.tv',
+ 'https://thepirateproxy.co',
+ 'https://theproxypirate.pw',
+ 'https://arrr.xyz',
+ 'https://tpb.dashitz.com'
+ ]
+
+ def __init__(self):
+ super(Base, self).__init__()
+
+ addEvent('app.test', self.doTest)
+
+ def _search(self, media, quality, results):
+
+ page = 0
+ total_pages = 1
+ cats = self.getCatId(quality)
+
+ base_search_url = self.urls['search'] % self.getDomain()
+
+ while page < total_pages:
+
+ search_url = base_search_url % self.buildUrl(media, page, cats)
+
+ page += 1
+
+ data = self.getHTMLData(search_url)
+
+ if data:
+ try:
+ soup = BeautifulSoup(data)
+ results_table = soup.find('table', attrs = {'id': 'searchResult'})
+
+ if not results_table:
+ return
+
+ try:
+ total_pages = len(soup.find('div', attrs = {'align': 'center'}).find_all('a'))
+ except:
+ pass
+
+ entries = results_table.find_all('tr')
+ for result in entries[1:]:
+ link = result.find(href = re.compile('torrent\/\d+\/'))
+ download = result.find(href = re.compile('magnet:'))
+
+ try:
+ size = re.search('Size (?P.+),', six.text_type(result.select('font.detDesc')[0])).group('size')
+ except:
+ continue
+
+ if link and download:
+ if self.conf('trusted_only'):
+ if result.find('img', alt = re.compile('Trusted')) is None and \
+ result.find('img', alt = re.compile('VIP')) is None and \
+ result.find('img', alt = re.compile('Helpers')) is None and \
+ result.find('img', alt = re.compile('Moderator')) is None:
+ log.info('Skipped torrent %s, untrusted.' % link.string)
+ continue
+
+ def extra_score(item):
+ trusted = (0, 10)[result.find('img', alt = re.compile('Trusted')) is not None]
+ vip = (0, 20)[result.find('img', alt = re.compile('VIP')) is not None]
+ confirmed = (0, 30)[result.find('img', alt = re.compile('Helpers')) is not None]
+ moderated = (0, 50)[result.find('img', alt = re.compile('Moderator')) is not None]
+
+ return confirmed + trusted + vip + moderated
+
+ results.append({
+ 'id': re.search('/(?P\d+)/', link['href']).group('id'),
+ 'name': six.text_type(link.string),
+ 'url': download['href'],
+ 'detail_url': self.getDomain(link['href']),
+ 'size': self.parseSize(size),
+ 'seeders': tryInt(result.find_all('td')[2].string),
+ 'leechers': tryInt(result.find_all('td')[3].string),
+ 'extra_score': extra_score,
+ 'get_more_info': self.getMoreInfo
+ })
+
+ except:
+ log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
+
+ def isEnabled(self):
+ return super(Base, self).isEnabled() and self.getDomain()
+
+ def correctProxy(self, data):
+ return 'title="Pirate Search"' in data
+
+ def getMoreInfo(self, item):
+ full_description = self.getCache('tpb.%s' % item['id'], item['detail_url'], cache_timeout = 25920000)
+ html = BeautifulSoup(full_description)
+ nfo_pre = html.find('div', attrs = {'class': 'nfo'})
+ description = ''
+ try:
+ description = toUnicode(nfo_pre.text)
+ except:
+ pass
+
+ item['description'] = description
+ return item
+
+ def doTest(self):
+
+ for url in self.proxy_list:
+ try:
+ data = self.urlopen(url + '/search/test+search')
+
+ if 'value="test+search"' in data:
+ log.info('Success %s', url)
+ continue
+ except:
+ log.error('%s', traceback.format_exc(0))
+
+
+config = [{
+ 'name': 'thepiratebay',
+ 'groups': [
+ {
+ 'tab': 'searcher',
+ 'list': 'torrent_providers',
+ 'name': 'ThePirateBay',
+ 'description': 'The world\'s largest bittorrent tracker. ThePirateBay ',
+ 'wizard': True,
+ 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAAAAAA6mKC9AAAA3UlEQVQY02P4DwT/YADIZvj//7qnozMYODmtAAusZoCDELDAegYGViZhAWZmRoYoqIDupfhNN1M3dTBEggXWMZg9jZRXV77YxhAOFpjDwMAPMoCXmcHsF1SAQZ6bQY2VgUEbKHClcAYzg3mINEO8jSCD478/DPsZmvqWblu1bOmStes3Pp0ezVDF4Gif0Hfx9///74/ObRZ2YNiZ47C8XIRBxFJR0jbSSUud4f9zAQWn8NTuziAt2zy5xIMM/z8LFX0E+fD/x0MRDCeA1v7Z++Y/FDzyvAtyBxIA+h8A8ZKLeT+lJroAAAAASUVORK5CYII=',
+ 'options': [
+ {
+ 'name': 'enabled',
+ 'type': 'enabler',
+ 'default': False
+ },
+ {
+ 'name': 'domain',
+ 'advanced': True,
+ 'label': 'Proxy server',
+ 'description': 'Domain for requests, keep empty to let CouchPotato pick.',
+ },
+ {
+ 'name': 'seed_ratio',
+ 'label': 'Seed ratio',
+ 'type': 'float',
+ 'default': 1,
+ 'description': 'Will not be (re)moved until this seed ratio is met.',
+ },
+ {
+ 'name': 'seed_time',
+ 'label': 'Seed time',
+ 'type': 'int',
+ 'default': 40,
+ 'description': 'Will not be (re)moved until this seed time (in hours) is met.',
+ },
+ {
+ 'name': 'extra_score',
+ 'advanced': True,
+ 'label': 'Extra Score',
+ 'type': 'int',
+ 'default': 0,
+ 'description': 'Starting score for each release found via this provider.',
+ },
+ {
+ 'name': 'trusted_only',
+ 'advanced': True,
+ 'label': 'Trusted/VIP Only',
+ 'type': 'bool',
+ 'default': False,
+ 'description': 'Only download releases marked as Trusted or VIP'
+ }
+ ],
+ }
+ ]
+}]
diff --git a/couchpotato/core/media/_base/providers/torrent/torrent9.py b/couchpotato/core/media/_base/providers/torrent/torrent9.py
new file mode 100644
index 0000000000..6420412099
--- /dev/null
+++ b/couchpotato/core/media/_base/providers/torrent/torrent9.py
@@ -0,0 +1,152 @@
+from bs4 import BeautifulSoup
+from couchpotato.core.helpers.variable import getTitle, tryInt
+from couchpotato.core.logger import CPLog
+from couchpotato.core.helpers.encoding import simplifyString, tryUrlencode
+from couchpotato.core.media._base.providers.torrent.base import TorrentProvider
+import cookielib
+import re
+import traceback
+import urllib
+import urllib2
+import unicodedata
+from couchpotato.core.helpers import namer_check
+import sys
+
+reload(sys)
+sys.setdefaultencoding('utf-8')
+
+log = CPLog(__name__)
+
+
+class Base(TorrentProvider):
+ urls = {
+ 'site': 'http://www.torrent9.pe/',
+ 'search': 'http://www.torrent9.pe/search_torrent/',
+ }
+
+ def _search(self, movie, quality, results):
+ TitleStringReal = (getTitle(movie['info']) + ' ' + simplifyString(quality['identifier'])).replace('-',' ').replace(' ',' ').replace(' ',' ').replace(' ',' ').encode("utf-8")
+ log.info('Title %s', TitleStringReal)
+ URL = ((self.urls['search']) + TitleStringReal.replace('.', '-').replace(' ', '-') + '.html,trie-seeds-d').encode('utf-8')
+
+ req = urllib2.Request(URL, headers={'User-Agent' : "Mozilla/5.0"})
+ log.info('opening url %s', URL)
+ data = urllib2.urlopen(req,timeout=500)
+ log.info('data retrieved')
+ id = 1000
+
+ if data:
+ try:
+ html = BeautifulSoup(data)
+ torrent_rows = html.findAll('tr')
+
+ for result in torrent_rows:
+ try:
+ if not result.find('a'):
+ continue
+
+ title = result.find('a').get_text(strip=False)
+ log.info('found title %s',title)
+
+ testname = namer_check.correctName(title.lower(),movie)
+ if testname == 0:
+ log.info('%s not match %s',(title.lower(),movie['info']['titles']))
+ continue
+ log.info('title %s match',title)
+
+ tmp = result.find("a")['href'].split('/')[-1].replace('.html', '.torrent').strip()
+ download_url = (self.urls['site'] + 'get_torrent/{0}'.format(tmp) + ".torrent")
+ detail_url = (self.urls['site'] + 'torrent/{0}'.format(tmp))
+ log.debug('download_url %s',download_url)
+
+ if not all([title, download_url]):
+ continue
+
+ seeders = int(result.find(class_="seed_ok").get_text(strip=True))
+ leechers = int(result.find_all('td')[3].get_text(strip=True))
+ size = result.find_all('td')[1].get_text(strip=True)
+
+ def extra_check(item):
+ return True
+
+ size = size.lower()
+ size = size.replace("go", "gb")
+ size = size.replace("mo", "mb")
+ size = size.replace("ko", "kb")
+ size = size.replace(' ','')
+ size = self.parseSize(str(size))
+
+ new = {}
+ new['id'] = id
+ new['name'] = title.strip()
+ new['url'] = download_url
+ new['detail_url'] = detail_url
+ new['size'] = size
+ new['seeders'] = seeders
+ new['leechers'] = leechers
+ new['extra_check'] = extra_check
+ new['download'] = self.loginDownload
+ results.append(new)
+ log.info(results)
+ id = id + 1
+ except StandardError, e:
+ log.info('boum %s',e)
+ continue
+
+ except AttributeError:
+ log.debug('No search results found.')
+ else:
+ log.debug('No search results found.')
+
+ def login(self):
+ log.info('Try to login on torrent9')
+ return True
+
+ def download(self, url='', nzb_id=''):
+ log.debug('download %s',url)
+ req = urllib2.Request(url, headers={'User-Agent' : "Mozilla/5.0"})
+ try:
+ return urllib2.urlopen(req).read()
+ except:
+ log.error('Failed downloading from %s: %s', (self.getName(), traceback.format_exc()))
+
+ loginDownload = download
+
+config = [{
+ 'name': 'torrent9',
+ 'groups': [{
+ 'tab': 'searcher',
+ 'list': 'torrent_providers',
+ 'name': 'torrent9',
+ 'description': 'See Torrent9 ',
+ 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABHNCSVQICAgIfAhkiAAAAgZJREFUOI2lkj9oE2EYxn93l/Quf440gXg4lBoEMd2MDuLSkk0R6hCnuqjUoR0c7FDo4Ca0CDo7uRRBqEMDXSLUUqRDiZM1NMEI1VKTlDZpUppccvc5nJp/KooPfMPH+z3P+zzv+8F/Quq8XIVEEOY0kASIzpoLlBKUV+CuCblfCjyF/P3V1Qi6jrCs7k4eD/X1dS5NTy9tQaJD2MFDkA23W8UwQFGQRJcB0DS0cBg/DPY4a0OVZcHeHihKf1ifD6pVfGD/VmBAUeDwEGQZLAskCVQV6nVYW+M4lSLQo9stoKpQLoNtO2QhYHsbkkmOczm+AP5eBy/BfwRDn8GHJLkpFp3utRpkMpDLwckJvlCIM9Uqg6YZeAAj58E1CVlXCaaigcCjsWhU8Xq9UCo5lisVx4FhODFkGbdpMtlqXa4IsVUHYkLcVlbg3ddGo3AzErl2emLCGaCmwcAAuL4ntCxoNpFsG8O2odlkXojF17CgAK2PsJna2Xk/ViyOh0dHXWhaewaW1T6mSb5a5V6rtbAMU4D5c18FyCzu7i5fyWZvDMfjOh4PNBpd5A/5vLheq93ZhMc/eF0Lr0NhaX8/eS6djo/EYqfQdUekUuHNxsZR4uDg1id40f9J+qE/CwTeitlZIWZmxKtQqOSFi39D7IQy5/c/fxIMpoGhfyUDMAwXzsL4n958A9jfxsJ8X4WQAAAAAElFTkSuQmCC',
+ 'wizard': True,
+ 'options': [{
+ 'name': 'enabled',
+ 'type': 'enabler',
+ 'default': False,
+ },
+ {
+ 'name': 'seed_ratio',
+ 'label': 'Seed ratio',
+ 'type': 'float',
+ 'default': 1,
+ 'description': 'Will not be (re)moved until this seed ratio is met.',
+ },
+ {
+ 'name': 'seed_time',
+ 'label': 'Seed time',
+ 'type': 'int',
+ 'default': 40,
+ 'description': 'Will not be (re)moved until this seed time (in hours) is met.',
+ },
+ {
+ 'name': 'extra_score',
+ 'advanced': True,
+ 'label': 'Extra Score',
+ 'type': 'int',
+ 'default': 10,
+ 'description': 'Starting score for each release found via this provider.',
+ }],
+ },],
+}]
diff --git a/couchpotato/core/media/_base/providers/torrent/torrentbytes.py b/couchpotato/core/media/_base/providers/torrent/torrentbytes.py
new file mode 100644
index 0000000000..bf225633be
--- /dev/null
+++ b/couchpotato/core/media/_base/providers/torrent/torrentbytes.py
@@ -0,0 +1,138 @@
+import traceback
+
+from bs4 import BeautifulSoup
+from couchpotato.core.helpers.encoding import tryUrlencode, toUnicode
+from couchpotato.core.helpers.variable import tryInt
+from couchpotato.core.logger import CPLog
+from couchpotato.core.media._base.providers.torrent.base import TorrentProvider
+
+
+log = CPLog(__name__)
+
+
+class Base(TorrentProvider):
+
+ urls = {
+ 'test': 'https://www.torrentbytes.net/',
+ 'login': 'https://www.torrentbytes.net/takelogin.php',
+ 'login_check': 'https://www.torrentbytes.net/inbox.php',
+ 'detail': 'https://www.torrentbytes.net/details.php?id=%s',
+ 'search': 'https://www.torrentbytes.net/browse.php?search=%s&cat=%d',
+ 'download': 'https://www.torrentbytes.net/download.php?id=%s&name=%s',
+ }
+
+ cat_ids = [
+ ([5], ['720p', '1080p', 'bd50']),
+ ([19], ['cam']),
+ ([19], ['ts', 'tc']),
+ ([19], ['r5', 'scr']),
+ ([19], ['dvdrip']),
+ ([19], ['brrip']),
+ ([20], ['dvdr']),
+ ]
+
+ http_time_between_calls = 1 # Seconds
+ login_fail_msg = 'Username or password incorrect'
+ cat_backup_id = None
+
+ def _searchOnTitle(self, title, movie, quality, results):
+
+ url = self.urls['search'] % (tryUrlencode('%s %s' % (title.replace(':', ''), movie['info']['year'])), self.getCatId(quality)[0])
+ data = self.getHTMLData(url)
+
+ if data:
+ html = BeautifulSoup(data)
+
+ try:
+ result_table = html.find('table', attrs = {'border': '1'})
+ if not result_table:
+ return
+
+ entries = result_table.find_all('tr')
+
+ for result in entries[1:]:
+ cells = result.find_all('td')
+
+ link = cells[1].find('a', attrs = {'class': 'index'})
+
+ full_id = link['href'].replace('details.php?id=', '')
+ torrent_id = full_id[:7]
+ name = toUnicode(link.get('title', link.contents[0]).encode('ISO-8859-1')).strip()
+
+ results.append({
+ 'id': torrent_id,
+ 'name': name,
+ 'url': self.urls['download'] % (torrent_id, name),
+ 'detail_url': self.urls['detail'] % torrent_id,
+ 'size': self.parseSize(cells[6].contents[0] + cells[6].contents[2]),
+ 'seeders': tryInt(cells[8].find('span').contents[0]),
+ 'leechers': tryInt(cells[9].find('span').contents[0]),
+ })
+
+ except:
+ log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
+
+ def getLoginParams(self):
+ return {
+ 'username': self.conf('username'),
+ 'password': self.conf('password'),
+ 'login': 'submit',
+ }
+
+ def loginSuccess(self, output):
+ return 'logout.php' in output.lower() or 'Welcome' in output.lower()
+
+ loginCheckSuccess = loginSuccess
+
+
+config = [{
+ 'name': 'torrentbytes',
+ 'groups': [
+ {
+ 'tab': 'searcher',
+ 'list': 'torrent_providers',
+ 'name': 'TorrentBytes',
+ 'description': 'TorrentBytes ',
+ 'wizard': True,
+ 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAMAAAAoLQ9TAAAAeFBMVEUAAAAAAEQAA1QAEmEAKnQALHYAMoEAOokAQpIASYsASZgAS5UATZwATosATpgAVJ0AWZwAYZ4AZKAAaZ8Ab7IAcbMAfccAgcQAgcsAhM4AiscAjMkAmt0AoOIApecAp/EAqvQAs+kAt+wA3P8A4f8A//8VAAAfDbiaAl08AAAAjUlEQVQYGQXBO04DQRAFwHqz7Z8sECIl5f73ISRD5GBs7UxTlWfg9vYXnvJRQJqOL88D6BAwJtMMumHUVCl60aa6H93IrIv0b+157f1lpk+fm87lMWrZH0vncKbXdRUQrRmrh9C6Iwkq6rg4PXZcyXmbizzeV/g+rDra0rGve8jPKLSOJNi2AQAwAGjwD7ApPkEHdtPQAAAAAElFTkSuQmCC',
+ 'options': [
+ {
+ 'name': 'enabled',
+ 'type': 'enabler',
+ 'default': False,
+ },
+ {
+ 'name': 'username',
+ 'default': '',
+ },
+ {
+ 'name': 'password',
+ 'default': '',
+ 'type': 'password',
+ },
+ {
+ 'name': 'seed_ratio',
+ 'label': 'Seed ratio',
+ 'type': 'float',
+ 'default': 1,
+ 'description': 'Will not be (re)moved until this seed ratio is met.',
+ },
+ {
+ 'name': 'seed_time',
+ 'label': 'Seed time',
+ 'type': 'int',
+ 'default': 40,
+ 'description': 'Will not be (re)moved until this seed time (in hours) is met.',
+ },
+ {
+ 'name': 'extra_score',
+ 'advanced': True,
+ 'label': 'Extra Score',
+ 'type': 'int',
+ 'default': 20,
+ 'description': 'Starting score for each release found via this provider.',
+ }
+ ],
+ },
+ ],
+}]
diff --git a/couchpotato/core/media/_base/providers/torrent/torrentday.py b/couchpotato/core/media/_base/providers/torrent/torrentday.py
new file mode 100644
index 0000000000..ca50a72b4a
--- /dev/null
+++ b/couchpotato/core/media/_base/providers/torrent/torrentday.py
@@ -0,0 +1,132 @@
+import re
+from couchpotato.core.helpers.variable import tryInt
+from couchpotato.core.logger import CPLog
+from couchpotato.core.media._base.providers.torrent.base import TorrentProvider
+
+log = CPLog(__name__)
+
+
+class Base(TorrentProvider):
+
+ urls = {
+ 'test': 'https://www.torrentday.com/',
+ 'login': 'https://www.torrentday.com/t',
+ 'login_check': 'https://www.torrentday.com/userdetails.php',
+ 'detail': 'https://www.torrentday.com/details.php?id=%s',
+ 'search': 'https://www.torrentday.com/t.json?q=%s',
+ 'download': 'https://www.torrentday.com/download.php/%s/%s.torrent',
+ }
+
+ http_time_between_calls = 1 # Seconds
+
+ def loginDownload(self, url = '', nzb_id = ''):
+ try:
+ if not self.login():
+ log.error('Failed downloading from %s', self.getName())
+ return self.urlopen(url, headers=self.getRequestHeaders())
+ except:
+ log.error('Failed downloading from %s: %s', (self.getName(), traceback.format_exc()))
+
+ def _searchOnTitle(self, title, media, quality, results):
+
+ query = '"%s" %s' % (title, media['info']['year'])
+
+ data = {
+ 'q': query,
+ }
+
+ data = self.getJsonData(self.urls['search'] % query, headers = self.getRequestHeaders())
+
+ for torrent in data:
+ results.append({
+ 'id': torrent['t'],
+ 'name': torrent['name'],
+ 'url': self.urls['download'] % (torrent['t'], torrent['t']),
+ 'detail_url': self.urls['detail'] % torrent['t'],
+ 'size': tryInt(torrent['size']) / (1024 * 1024),
+ 'seeders': torrent['seeders'],
+ 'leechers': torrent['leechers'],
+ })
+
+ def getRequestHeaders(self):
+ return {
+ 'Cookie': self.conf('cookiesetting') or ''
+ }
+
+ def getLoginParams(self):
+ return {
+ 'username': self.conf('username'),
+ 'password': self.conf('password'),
+ 'submit.x': 18,
+ 'submit.y': 11,
+ 'submit': 'submit',
+ }
+
+ def loginSuccess(self, output):
+ often = re.search('You tried too often, please wait .*', output)
+ if often:
+ raise Exception(often.group(0)[:-6].strip())
+
+ return 'Password not correct' not in output
+
+ def loginCheckSuccess(self, output):
+ return 'logout.php' in output.lower()
+
+
+config = [{
+ 'name': 'torrentday',
+ 'groups': [
+ {
+ 'tab': 'searcher',
+ 'list': 'torrent_providers',
+ 'name': 'TorrentDay',
+ 'description': 'TorrentDay ',
+ 'wizard': True,
+ 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAC5ElEQVQ4y12TXUgUURTH//fO7Di7foeQJH6gEEEIZZllVohfSG/6UA+RSFAQQj74VA8+Bj30lmAlRVSEvZRfhNhaka5ZUG1paKaW39tq5O6Ou+PM3M4o6m6X+XPPzD3zm/+dcy574r515WfIW8CZBM4YAA5Gc/aQC3yd7oXYEONcsISE5dTDh91HS0t7FEWhBUAeN9ynV/d9qJAgE4AECURAcVsGlCCnly26LMA0IQwTa52dje3d3e3hcPi8qqrrMjcVYI3EHCQZlkFOHBwR2QHh2ASAAIJxWGAQEDxjePhs3527XjJwnb37OHBq0T+Tyyjh+9KnEzNJ7nouc1Q/3A3HGsOvnJy+PSUlj81w2Lny9WuJ6+3AmTjD4HOcrdR2dWXLRQePvyaSLfQOPMPC8mC9iHCsOxSyzJCelzdSXlNzD5ujpb25Wbfc/XXJemTXF4+nnCNq+AMLe50uFfEJTiw4GXSFtiHL0SnIq66+p0kSArqO+eH3RdsAv9+f5vW7L7GICq6rmM8XBCAXlBw90rOyxibn5yzfkg/L09M52/jxqdESaIrBXHYZZbB1GX8cEpySxKIB8S5XcOnvqpli1zuwmrTtoLjw5LOK/eeuWsE4JH5IRPaPZKiKigmPp+5pa+u1aEjIMhEgrRkmi9mgxGUhM7LNJSzOzsE3+cOeExovXOjdytE0LV4zqNZUtV0uZzAGoGkhDH/2YHZiErmv4uyWQnZZWc+hoqL3WzlTExN5hhA8IEwkZWZOxwB++30YG/9GkYCPvqAaHAW5uWPROW86OmqCprUR7z1yZDAGQNuCvkoB/baIKUBWMTYymv+gra3eJNvjXu+B562tFyXqTJ6YuHK8rKwvBmC3vR7cOCPQLWFz8LnfXWUrJo9U19BwMyUlJRjTSMJ2ENxUiGxq9KXQfwqYlnWstvbR5aamG9g0uzM8Q4OFt++3NNixQ2NgYmeN03FOTUv7XVpV9aKisvLl1vN/WVhNc/Fi1NEAAAAASUVORK5CYII=',
+ 'options': [
+ {
+ 'name': 'enabled',
+ 'type': 'enabler',
+ 'default': False,
+ },
+ {
+ 'name': 'username',
+ 'default': '',
+ },
+ {
+ 'name': 'password',
+ 'default': '',
+ 'type': 'password',
+ },
+ {
+ 'name': 'seed_ratio',
+ 'label': 'Seed ratio',
+ 'type': 'float',
+ 'default': 1,
+ 'description': 'Will not be (re)moved until this seed ratio is met.',
+ },
+ {
+ 'name': 'seed_time',
+ 'label': 'Seed time',
+ 'type': 'int',
+ 'default': 40,
+ 'description': 'Will not be (re)moved until this seed time (in hours) is met.',
+ },
+ {
+ 'name': 'cookiesetting',
+ 'label': 'Cookies',
+ 'default': '',
+ 'description': 'Cookies',
+ },
+ {
+ 'name': 'extra_score',
+ 'advanced': True,
+ 'label': 'Extra Score',
+ 'type': 'int',
+ 'default': 0,
+ 'description': 'Starting score for each release found via this provider.',
+ }
+ ],
+ },
+ ],
+}]
diff --git a/couchpotato/core/media/_base/providers/torrent/torrentleech.py b/couchpotato/core/media/_base/providers/torrent/torrentleech.py
new file mode 100644
index 0000000000..10886bc7d8
--- /dev/null
+++ b/couchpotato/core/media/_base/providers/torrent/torrentleech.py
@@ -0,0 +1,119 @@
+import traceback
+import json
+from bs4 import BeautifulSoup
+from couchpotato.core.helpers.variable import tryInt
+from couchpotato.core.logger import CPLog
+from couchpotato.core.media._base.providers.torrent.base import TorrentProvider
+import six
+
+
+log = CPLog(__name__)
+
+
+class Base(TorrentProvider):
+
+ urls = {
+ 'test': 'https://www.torrentleech.org/',
+ 'login': 'https://www.torrentleech.org/user/account/login/',
+ 'login_check': 'https://torrentleech.org/user/messages',
+ 'detail': 'https://www.torrentleech.org/torrent/%s',
+ 'search': 'https://www.torrentleech.org/torrents/browse/list/categories/%s/query/%s',
+ 'download': 'https://www.torrentleech.org/download/%s/%s',
+ }
+
+ http_time_between_calls = 1 # Seconds
+ login_fail_msg = 'Invalid Username/password combination!'
+ cat_backup_id = None
+
+ def _searchOnTitle(self, title, media, quality, results):
+ urlParms = self.buildUrl(title, media, quality)
+ url = self.urls['search'] % (urlParms[1], urlParms[0])
+
+ data = self.getHTMLData(url)
+ jsonResults = json.loads(data)
+
+ if jsonResults:
+
+ try:
+
+ for torrent in jsonResults['torrentList']:
+ link = self.urls['detail'] % torrent['fid']
+ url = self.urls['download'] % (torrent['fid'], torrent['filename'])
+ currentResult = {
+ 'id': torrent['fid'],
+ 'name': six.text_type(torrent['name']),
+ 'url': url,
+ 'detail_url': link,
+ 'size': torrent['size']/1024/1024,
+ 'seeders': torrent['seeders'],
+ 'leechers': torrent['leechers'],
+ }
+ results.append(currentResult)
+ except:
+ log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
+
+ def getLoginParams(self):
+ return {
+ 'username': self.conf('username'),
+ 'password': self.conf('password'),
+ 'remember_me': 'on',
+ 'login': 'submit',
+ }
+
+ def loginSuccess(self, output):
+ return '/user/account/logout' in output.lower() or 'welcome back' in output.lower()
+
+ loginCheckSuccess = loginSuccess
+
+
+config = [{
+ 'name': 'torrentleech',
+ 'groups': [
+ {
+ 'tab': 'searcher',
+ 'list': 'torrent_providers',
+ 'name': 'TorrentLeech',
+ 'description': 'TorrentLeech ',
+ 'wizard': True,
+ 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAACHUlEQVR4AZVSO48SYRSdGTCBEMKzILLAWiybkKAGMZRUUJEoDZX7B9zsbuQPYEEjNLTQkYgJDwsoSaxspEBsCITXjjNAIKi8AkzceXgmbHQ1NJ5iMufmO9/9zrmXlCSJ+B8o75J8Pp/NZj0eTzweBy0Wi4PBYD6f12o1r9ebTCZx+22HcrnMsuxms7m6urTZ7LPZDMVYLBZ8ZV3yo8aq9Pq0wzCMTqe77dDv9y8uLyAWBH6xWOyL0K/56fcb+rrPgPZ6PZfLRe1fsl6vCUmGKIqoqNXqdDr9Dbjps9znUV0uTqdTjuPkDoVCIfcuJ4gizjMMm8u9vW+1nr04czqdK56c37CbKY9j2+1WEARZ0Gq1RFHAz2q1qlQqXxoN69HRcDjUarW8ZD6QUigUOnY8uKYH8N1sNkul9yiGw+F6vS4Rxn8EsodEIqHRaOSnq9T7ajQazWQycEIR1AEBYDabSZJyHDucJyegwWBQr9ebTCaKvHd4cCQANUU9evwQ1Ofz4YvUKUI43GE8HouSiFiNRhOowWBIpVLyHITJkuW3PwgAEf3pgIwxF5r+OplMEsk3CPT5szCMnY7EwUdhwUh/CXiej0Qi3idPz89fdrpdbsfBzH7S3Q9K5pP4c0sAKpVKoVAQGO1ut+t0OoFAQHkH2Da/3/+but3uarWK0ZMQoNdyucRutdttmqZxMTzY7XaYxsrgtUjEZrNhkSwWyy/0NCatZumrNQAAAABJRU5ErkJggg==',
+ 'options': [
+ {
+ 'name': 'enabled',
+ 'type': 'enabler',
+ 'default': False,
+ },
+ {
+ 'name': 'username',
+ 'default': '',
+ },
+ {
+ 'name': 'password',
+ 'default': '',
+ 'type': 'password',
+ },
+ {
+ 'name': 'seed_ratio',
+ 'label': 'Seed ratio',
+ 'type': 'float',
+ 'default': 1,
+ 'description': 'Will not be (re)moved until this seed ratio is met.',
+ },
+ {
+ 'name': 'seed_time',
+ 'label': 'Seed time',
+ 'type': 'int',
+ 'default': 40,
+ 'description': 'Will not be (re)moved until this seed time (in hours) is met.',
+ },
+ {
+ 'name': 'extra_score',
+ 'advanced': True,
+ 'label': 'Extra Score',
+ 'type': 'int',
+ 'default': 20,
+ 'description': 'Starting score for each release found via this provider.',
+ }
+ ],
+ },
+ ],
+}]
diff --git a/couchpotato/core/media/_base/providers/torrent/torrentpotato.py b/couchpotato/core/media/_base/providers/torrent/torrentpotato.py
new file mode 100644
index 0000000000..5437f41301
--- /dev/null
+++ b/couchpotato/core/media/_base/providers/torrent/torrentpotato.py
@@ -0,0 +1,188 @@
+from urlparse import urlparse
+import re
+import traceback
+
+from couchpotato.core.helpers.encoding import toUnicode
+from couchpotato.core.helpers.variable import splitString, tryInt, tryFloat
+from couchpotato.core.logger import CPLog
+from couchpotato.core.media._base.providers.base import ResultList
+from couchpotato.core.media._base.providers.torrent.base import TorrentProvider
+
+
+log = CPLog(__name__)
+
+
+class Base(TorrentProvider):
+
+ urls = {}
+ limits_reached = {}
+
+ http_time_between_calls = 1 # Seconds
+
+ def search(self, media, quality):
+ hosts = self.getHosts()
+
+ results = ResultList(self, media, quality, imdb_results = True)
+
+ for host in hosts:
+ if self.isDisabled(host):
+ continue
+
+ self._searchOnHost(host, media, quality, results)
+
+ return results
+
+ def _searchOnHost(self, host, media, quality, results):
+
+ torrents = self.getJsonData(self.buildUrl(media, host), cache_timeout = 1800)
+
+ if torrents:
+ try:
+ if torrents.get('error'):
+ log.error('%s: %s', (torrents.get('error'), host['host']))
+ elif torrents.get('results'):
+ for torrent in torrents.get('results', []):
+ results.append({
+ 'id': torrent.get('torrent_id'),
+ 'protocol': 'torrent' if re.match('^(http|https|ftp)://.*$', torrent.get('download_url')) else 'torrent_magnet',
+ 'provider_extra': urlparse(host['host']).hostname or host['host'],
+ 'name': toUnicode(torrent.get('release_name')),
+ 'url': torrent.get('download_url'),
+ 'detail_url': torrent.get('details_url'),
+ 'size': torrent.get('size'),
+ 'score': host['extra_score'],
+ 'seeders': torrent.get('seeders'),
+ 'leechers': torrent.get('leechers'),
+ 'seed_ratio': host['seed_ratio'],
+ 'seed_time': host['seed_time'],
+ })
+
+ except:
+ log.error('Failed getting results from %s: %s', (host['host'], traceback.format_exc()))
+
+ def getHosts(self):
+
+ uses = splitString(str(self.conf('use')), clean = False)
+ hosts = splitString(self.conf('host'), clean = False)
+ names = splitString(self.conf('name'), clean = False)
+ seed_times = splitString(self.conf('seed_time'), clean = False)
+ seed_ratios = splitString(self.conf('seed_ratio'), clean = False)
+ pass_keys = splitString(self.conf('pass_key'), clean = False)
+ extra_score = splitString(self.conf('extra_score'), clean = False)
+
+ host_list = []
+ for nr in range(len(hosts)):
+
+ try: key = pass_keys[nr]
+ except: key = ''
+
+ try: host = hosts[nr]
+ except: host = ''
+
+ try: name = names[nr]
+ except: name = ''
+
+ try: ratio = seed_ratios[nr]
+ except: ratio = ''
+
+ try: seed_time = seed_times[nr]
+ except: seed_time = ''
+
+ host_list.append({
+ 'use': uses[nr],
+ 'host': host,
+ 'name': name,
+ 'seed_ratio': tryFloat(ratio),
+ 'seed_time': tryInt(seed_time),
+ 'pass_key': key,
+ 'extra_score': tryInt(extra_score[nr]) if len(extra_score) > nr else 0
+ })
+
+ return host_list
+
+ def belongsTo(self, url, provider = None, host = None):
+
+ hosts = self.getHosts()
+
+ for host in hosts:
+ result = super(Base, self).belongsTo(url, host = host['host'], provider = provider)
+ if result:
+ return result
+
+ def isDisabled(self, host = None):
+ return not self.isEnabled(host)
+
+ def isEnabled(self, host = None):
+
+ # Return true if at least one is enabled and no host is given
+ if host is None:
+ for host in self.getHosts():
+ if self.isEnabled(host):
+ return True
+ return False
+
+ return TorrentProvider.isEnabled(self) and host['host'] and host['pass_key'] and int(host['use'])
+
+
+config = [{
+ 'name': 'torrentpotato',
+ 'groups': [
+ {
+ 'tab': 'searcher',
+ 'list': 'torrent_providers',
+ 'name': 'TorrentPotato',
+ 'order': 10,
+ 'description': 'CouchPotato torrent provider. Checkout the wiki page about this provider for more info.',
+ 'wizard': True,
+ 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAAABnRSTlMAAAAAAABupgeRAAABSElEQVR4AZ2Nz0oCURTGv8t1YMpqUxt9ARFxoQ/gQtppgvUKcu/sxB5iBJkogspaBC6iVUplEC6kv+oiiKDNhAtt16roP0HQgdsMLgaxfvy4nHP4Pi48qE2g4v91JOqT1CH/UnA7w7icUlLawyEdj+ZI/7h6YluWbRiddHonHh9M70aj7VTKzuXuikUMci/EO/ACnAI15599oAk8AR/AgxBQNCzreD7bmpl+FOIVuAHqQDUcJo+AK+CZFKLt95/MpSmMt0TiW9POxse6UvYZ6zB2wFgjFiNpOGesR0rZ0PVPXf8KhUCl22CwClz4eN8weoZBb9c0bdPsOWvHx/cYu9Y0CoNoZTJrwAbn5DrnZc6XOV+igVbnsgo0IxEomlJuA1vUIYGyq3PZBChwmExCUSmVZgMBDIUCK4UCFIv5vHIhm/XUDeAf/ADbcpd5+aXSWQAAAABJRU5ErkJggg==',
+ 'options': [
+ {
+ 'name': 'enabled',
+ 'type': 'enabler',
+ 'default': False,
+ },
+ {
+ 'name': 'use',
+ 'default': ''
+ },
+ {
+ 'name': 'host',
+ 'default': '',
+ 'description': 'The url path of your TorrentPotato provider.',
+ },
+ {
+ 'name': 'extra_score',
+ 'advanced': True,
+ 'label': 'Extra Score',
+ 'default': '0',
+ 'description': 'Starting score for each release found via this provider.',
+ },
+ {
+ 'name': 'name',
+ 'label': 'Username',
+ 'default': '',
+ },
+ {
+ 'name': 'seed_ratio',
+ 'label': 'Seed ratio',
+ 'default': '1',
+ 'description': 'Will not be (re)moved until this seed ratio is met.',
+ },
+ {
+ 'name': 'seed_time',
+ 'label': 'Seed time',
+ 'default': '40',
+ 'description': 'Will not be (re)moved until this seed time (in hours) is met.',
+ },
+ {
+ 'name': 'pass_key',
+ 'default': ',',
+ 'label': 'Pass Key',
+ 'description': 'Can be found on your profile page',
+ 'type': 'combined',
+ 'combine': ['use', 'host', 'pass_key', 'name', 'seed_ratio', 'seed_time', 'extra_score'],
+ },
+ ],
+ },
+ ],
+}]
diff --git a/couchpotato/core/media/_base/providers/torrent/torrentshack.py b/couchpotato/core/media/_base/providers/torrent/torrentshack.py
new file mode 100644
index 0000000000..683f559a4e
--- /dev/null
+++ b/couchpotato/core/media/_base/providers/torrent/torrentshack.py
@@ -0,0 +1,135 @@
+import traceback
+
+from bs4 import BeautifulSoup
+from couchpotato.core.helpers.variable import tryInt
+from couchpotato.core.logger import CPLog
+from couchpotato.core.media._base.providers.torrent.base import TorrentProvider
+import six
+
+
+log = CPLog(__name__)
+
+
+class Base(TorrentProvider):
+
+ urls = {
+ 'test': 'https://torrentshack.me/',
+ 'login': 'https://torrentshack.me/login.php',
+ 'login_check': 'https://torrentshack.me/inbox.php',
+ 'detail': 'https://torrentshack.me/torrent/%s',
+ 'search': 'https://torrentshack.me/torrents.php?action=advanced&searchstr=%s&scene=%s&filter_cat[%d]=1',
+ 'download': 'https://torrentshack.me/%s',
+ }
+
+ http_time_between_calls = 1 # Seconds
+ login_fail_msg = 'You entered an invalid'
+
+ def _search(self, media, quality, results):
+
+ url = self.urls['search'] % self.buildUrl(media, quality)
+ data = self.getHTMLData(url)
+
+ if data:
+ html = BeautifulSoup(data)
+
+ try:
+ result_table = html.find('table', attrs = {'id': 'torrent_table'})
+ if not result_table:
+ return
+
+ entries = result_table.find_all('tr', attrs = {'class': 'torrent'})
+
+ for result in entries:
+
+ link = result.find('span', attrs = {'class': 'torrent_name_link'}).parent
+ url = result.find('td', attrs = {'class': 'torrent_td'}).find('a')
+ size = result.find('td', attrs = {'class': 'size'}).contents[0].strip('\n ')
+ tds = result.find_all('td')
+
+ results.append({
+ 'id': link['href'].replace('torrents.php?torrentid=', ''),
+ 'name': six.text_type(link.span.string).translate({ord(six.u('\xad')): None}),
+ 'url': self.urls['download'] % url['href'],
+ 'detail_url': self.urls['download'] % link['href'],
+ 'size': self.parseSize(size),
+ 'seeders': tryInt(tds[len(tds)-2].string),
+ 'leechers': tryInt(tds[len(tds)-1].string),
+ })
+
+ except:
+ log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
+
+ def getLoginParams(self):
+ return {
+ 'username': self.conf('username'),
+ 'password': self.conf('password'),
+ 'keeplogged': '1',
+ 'login': 'Login',
+ }
+
+ def loginSuccess(self, output):
+ return 'logout.php' in output.lower()
+
+ loginCheckSuccess = loginSuccess
+
+ def getSceneOnly(self):
+ return '1' if self.conf('scene_only') else ''
+
+
+config = [{
+ 'name': 'torrentshack',
+ 'groups': [
+ {
+ 'tab': 'searcher',
+ 'list': 'torrent_providers',
+ 'name': 'TorrentShack',
+ 'description': 'TorrentShack ',
+ 'wizard': True,
+ 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAABmElEQVQoFQXBzY2cVRiE0afqvd84CQiAnxWWtyxsS6ThINBYg2Dc7mZBMEjE4mzs6e9WcY5+ePNuVFJJodQAoLo+SaWCy9rcV8cmjah3CI6iYu7oRU30kE5xxELRfamklY3k1NL19sSm7vPzP/ZdNZzKVDaY2sPZJBh9fv5ITrmG2+Vp4e1sPchVqTCQZJnVXi+/L4uuAJGly1+Pw8CprLbi8Om7tbT19/XRqJUk11JP9uHj9ulxhXbvJbI9qJvr5YkGXFG2IBT8tXczt+sfzDZCp3765f3t9tHEHGEDACma77+8o4oATKk+/PfW9YmHruRFjWoVSFsVsGu1YSKq6Oc37+n98unPZSRlY7vsKDqN+92X3yR9+PdXee3iJNKMStqdcZqoTJbUSi5JOkpfRlhSI0mSpEmCFKoU7FqSNOLAk54uGwCStMUCgLrVic62g7oDoFmmdI+P3S0pDe1xvDqb6XrZqbtzShWNoh9fv/XQHaDdM9OqrZi2M7M3UrB2vlkPS1IbdEBk7UiSoD6VlZ6aKWer4aH4f/AvKoHUTjuyAAAAAElFTkSuQmCC',
+ 'options': [
+ {
+ 'name': 'enabled',
+ 'type': 'enabler',
+ 'default': False,
+ },
+ {
+ 'name': 'username',
+ 'default': '',
+ },
+ {
+ 'name': 'password',
+ 'default': '',
+ 'type': 'password',
+ },
+ {
+ 'name': 'seed_ratio',
+ 'label': 'Seed ratio',
+ 'type': 'float',
+ 'default': 1,
+ 'description': 'Will not be (re)moved until this seed ratio is met.',
+ },
+ {
+ 'name': 'seed_time',
+ 'label': 'Seed time',
+ 'type': 'int',
+ 'default': 40,
+ 'description': 'Will not be (re)moved until this seed time (in hours) is met.',
+ },
+ {
+ 'name': 'scene_only',
+ 'type': 'bool',
+ 'default': False,
+ 'description': 'Only allow scene releases.'
+ },
+ {
+ 'name': 'extra_score',
+ 'advanced': True,
+ 'label': 'Extra Score',
+ 'type': 'int',
+ 'default': 0,
+ 'description': 'Starting score for each release found via this provider.',
+ }
+ ],
+ },
+ ],
+}]
diff --git a/couchpotato/core/media/_base/providers/torrent/torrentz.py b/couchpotato/core/media/_base/providers/torrent/torrentz.py
new file mode 100644
index 0000000000..96e8025579
--- /dev/null
+++ b/couchpotato/core/media/_base/providers/torrent/torrentz.py
@@ -0,0 +1,123 @@
+import re
+import traceback
+
+from couchpotato.core.helpers.encoding import tryUrlencode
+from couchpotato.core.helpers.rss import RSS
+from couchpotato.core.helpers.variable import tryInt, splitString
+from couchpotato.core.logger import CPLog
+from couchpotato.core.media._base.providers.torrent.base import TorrentMagnetProvider
+import six
+
+
+log = CPLog(__name__)
+
+
+class Base(TorrentMagnetProvider, RSS):
+
+ urls = {
+ 'detail': 'https://torrentz2.eu/%s',
+ 'search': 'https://torrentz2.eu/feed?f=%s'
+ }
+
+ http_time_between_calls = 0
+
+ def _searchOnTitle(self, title, media, quality, results):
+
+ search_url = self.urls['search']
+
+ # Create search parameters
+ search_params = self.buildUrl(title, media, quality)
+
+ min_seeds = tryInt(self.conf('minimal_seeds'))
+ if min_seeds:
+ search_params += ' seed > %s' % (min_seeds - 1)
+
+ rss_data = self.getRSSData(search_url % search_params)
+
+ if rss_data:
+ try:
+
+ for result in rss_data:
+
+ name = self.getTextElement(result, 'title')
+ detail_url = self.getTextElement(result, 'link')
+ description = self.getTextElement(result, 'description')
+
+ magnet = splitString(detail_url, '/')[-1]
+ magnet_url = 'magnet:?xt=urn:btih:%s&dn=%s&tr=%s' % (magnet.upper(), tryUrlencode(name), tryUrlencode('udp://tracker.openbittorrent.com/announce'))
+
+ reg = re.search('Size: (?P\d+) (?P[KMG]B) Seeds: (?P[\d,]+) Peers: (?P[\d,]+)', six.text_type(description))
+ size = reg.group('size')
+ unit = reg.group('unit')
+ seeds = reg.group('seeds').replace(',', '')
+ peers = reg.group('peers').replace(',', '')
+
+ multiplier = 1
+ if unit == 'GB':
+ multiplier = 1000
+ elif unit == 'KB':
+ multiplier = 0
+
+ results.append({
+ 'id': magnet,
+ 'name': six.text_type(name),
+ 'url': magnet_url,
+ 'detail_url': detail_url,
+ 'size': tryInt(size)*multiplier,
+ 'seeders': tryInt(seeds),
+ 'leechers': tryInt(peers),
+ })
+
+ except:
+ log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
+
+
+config = [{
+ 'name': 'torrentz',
+ 'groups': [
+ {
+ 'tab': 'searcher',
+ 'list': 'torrent_providers',
+ 'name': 'Torrentz',
+ 'description': 'Torrentz.eu was a free, fast and powerful meta-search engine combining results from dozens of search engines, Torrentz2.eu is trying to replace it. Torrentz2 ',
+ 'wizard': True,
+ 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAQklEQVQ4y2NgAALjtJn/ycEMlGiGG0IVAxiwAKzOxaKGARcgxgC8YNSAwWoAzuRMjgsIugqfAUR5CZcBRIcHsWEAADSA96Ig020yAAAAAElFTkSuQmCC',
+ 'options': [
+ {
+ 'name': 'enabled',
+ 'type': 'enabler',
+ 'default': True
+ },
+ {
+ 'name': 'minimal_seeds',
+ 'type': 'int',
+ 'default': 1,
+ 'advanced': True,
+ 'description': 'Only return releases with minimal X seeds',
+ },
+ {
+ 'name': 'seed_ratio',
+ 'label': 'Seed ratio',
+ 'type': 'float',
+ 'default': 1,
+ 'description': 'Will not be (re)moved until this seed ratio is met.',
+ },
+ {
+ 'name': 'seed_time',
+ 'label': 'Seed time',
+ 'type': 'int',
+ 'default': 40,
+ 'description': 'Will not be (re)moved until this seed time (in hours) is met.',
+ },
+ {
+ 'name': 'extra_score',
+ 'advanced': True,
+ 'label': 'Extra Score',
+ 'type': 'int',
+ 'default': 0,
+ 'description': 'Starting score for each release found via this provider.',
+ }
+ ],
+ }
+ ]
+}]
diff --git a/couchpotato/core/media/_base/providers/torrent/xthor.py b/couchpotato/core/media/_base/providers/torrent/xthor.py
new file mode 100644
index 0000000000..f974ffc7a2
--- /dev/null
+++ b/couchpotato/core/media/_base/providers/torrent/xthor.py
@@ -0,0 +1,81 @@
+from couchpotato.core.helpers.variable import tryInt
+from couchpotato.core.logger import CPLog
+from couchpotato.core.helpers.encoding import simplifyString, tryUrlencode
+from couchpotato.core.media._base.providers.torrent.base import TorrentProvider
+from couchpotato.core.helpers import namer_check
+import json
+import re
+import unicodedata
+
+log = CPLog(__name__)
+
+
+class Base(TorrentProvider):
+
+ urls = {
+ 'search': 'https://api.xthor.tk/?passkey=%(passkey)s&search=&category=&freeleech=&tmdbid=%(tmdbid)s&size=',
+ 'detail': 'https://xthor.tk/details.php?id=%s'
+ }
+
+ def _search(self, movie, quality, results):
+ url = self.urls['search'] % {'passkey': self.conf('passkey'), 'tmdbid': movie['info']['tmdb_id'] }
+ data = self.getJsonData(url)
+
+ if data[u'error'][u'code'] == 0 and 'torrents' in data:
+ for currentresult in data['torrents']:
+ new = {}
+
+ new['id'] = currentresult['id']
+ new['name'] = currentresult['name']
+ new['url'] = currentresult['download_link']
+ new['detail_url'] = self.urls['detail'] % currentresult['id']
+ new['size'] = tryInt(currentresult['size']) / 1024 / 1024
+ new['seeders'] = tryInt(currentresult['seeders'])
+ new['leechers'] = tryInt(currentresult['leechers'])
+
+ results.append(new)
+ return
+
+
+config = [{
+ 'name': 'xthor',
+ 'groups': [{
+ 'tab': 'searcher',
+ 'list': 'torrent_providers',
+ 'name': 'xthor',
+ 'description': 'See xthor ',
+ 'icon' : 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAAACXBIWXMAAAsTAAALEwEAmpwYAAACRUlEQVR4nEWSS2/TQBSF752nHT+TJmmTtKVICIkFP4H/L7FBLBArpD5U+nIetWPH45m5wyIFzuboSN/ZfQh/wxgbJXk2mWfjWVpMIwEY3Prx9uH+tq7rEMIRw2NJKWfzVVIsWJyhTlk0Et5gGBKFksOvn9/v766PHw4AWuvlchlnSyw+AlNhfEXJGSBjQg6EZvc0mc6dte2+BgDOGFutzrWOgRcQFbD8jO++iLjEqKD2mZAHJoau0aPk0NR2MLwcl8X4EgBB51Cc8lGm2xvZPYj2jgVHfe0GQ0OHiDI9ada/2XS2xGQJagL5CoNVZlMuztI8jrDLLz8oKUHGgQKZLkqmaZYznZQkBWRTSCZMJ1GWyrQYXXzSk5XKptFswRiDeA5uYH0vVMq4kMA15mdifCmoD2ZnPPYWQnlhQHngqFIYtoAY3ADAGTJkSqBKpHnW6QQoeFU6YOHkyucr1+2DiECMACQAC+7AXLcbaSldTfU9E4pHZbj5SsTtvnM331zbBO9BJMBEoM57wzHQyeki1sp5G0wt8gXrqtBUrroeHn7YwZInQA3tsx36qrrnxpgyicbTuVAjaiu/uwUiiKeBSdtunWnB9PB6E1xfVXeHw4ETUd/tZ+OiHE9QJdS+2G7ruq3vm9BVfmihfQLf1fV6s1m/qTEMw+u2KrOoPHvPi/PgjTetbZ7soQ6HV3L9ZlNtNmsiejsAQN/3z48Pbl9FodMCOBKQPexf9/Wuql6apjnS/219G4hKKSEEIiPy1lrn3D+xj/kDN/1GOELQrVcAAAAASUVORK5CYII=',
+ 'wizard': True,
+ 'options': [{
+ 'name': 'enabled',
+ 'type': 'enabler',
+ 'default': False,
+ },
+ {
+ 'name': 'passkey',
+ 'default': '',
+ },
+ {
+ 'name': 'seed_ratio',
+ 'label': 'Seed ratio',
+ 'type': 'float',
+ 'default': 1,
+ 'description': 'Will not be (re)moved until this seed ratio is met.',
+ },
+ {
+ 'name': 'seed_time',
+ 'label': 'Seed time',
+ 'type': 'int',
+ 'default': 40,
+ 'description': 'Will not be (re)moved until this seed time (in hours) is met.',
+ },
+ {
+ 'name': 'extra_score',
+ 'advanced': True,
+ 'label': 'Extra Score',
+ 'type': 'int',
+ 'default': 0,
+ 'description': 'Starting score for each release found via this provider.',
+ }],
+ },],
+}]
diff --git a/couchpotato/core/media/_base/providers/torrent/yts.py b/couchpotato/core/media/_base/providers/torrent/yts.py
new file mode 100644
index 0000000000..674adc3cdf
--- /dev/null
+++ b/couchpotato/core/media/_base/providers/torrent/yts.py
@@ -0,0 +1,130 @@
+from datetime import datetime
+from couchpotato.core.helpers.variable import tryInt, getIdentifier
+from couchpotato.core.logger import CPLog
+from couchpotato.core.media._base.providers.torrent.base import TorrentMagnetProvider
+import random
+
+log = CPLog(__name__)
+
+
+class Base(TorrentMagnetProvider):
+ # Only qualities allowed: 720p/1080p/3D - the rest will fail.
+ # All YTS.ag torrents are verified
+ urls = {
+ 'detail': 'https://yts.am/api#list_movies',
+ 'search': 'https://yts.am/api/v2/list_movies.json?query_term=%s&limit=%s&page=%s'
+ }
+
+ def _search(self, movie, quality, results):
+ limit = 10
+ page = 1
+ data = self.getJsonData(self.urls['search'] % (getIdentifier(movie), limit, page))
+
+ if data:
+ movie_count = tryInt(data['data']['movie_count'])
+
+ if movie_count == 0:
+ log.debug('%s - found no results', (self.getName()))
+ else:
+
+ movie_results = data['data']['movies']
+ for i in range(0,len(movie_results)):
+ result = data['data']['movies'][i]
+ name = result['title']
+ year = result['year']
+ detail_url = result['url']
+
+ for torrent in result['torrents']:
+ t_quality = torrent['quality']
+
+ if t_quality in quality['label']:
+ hash = torrent['hash']
+ size = tryInt(torrent['size_bytes'] / 1048576)
+ seeders = tryInt(torrent['seeds'])
+ leechers = tryInt(torrent['peers'])
+ pubdate = torrent['date_uploaded'] # format: 2017-02-17 18:40:03
+ pubdate = datetime.strptime(pubdate, '%Y-%m-%d %H:%M:%S')
+ age = (datetime.now() - pubdate).days
+
+ results.append({
+ 'id': random.randint(100, 9999),
+ 'name': '%s (%s) %s %s %s' % (name, year, 'YTS', t_quality, 'BR-Rip'),
+ 'url': self.make_magnet(hash, name),
+ 'size': size,
+ 'seeders': seeders,
+ 'leechers': leechers,
+ 'age': age,
+ 'detail_url': detail_url,
+ 'score': 1
+ })
+
+ return
+
+ def make_magnet(self, hash, name):
+ url_encoded_trackers = 'udp%3A%2F%2Fopen.demonii.com%3A1337%2Fannounce&tr=%0Audp%3A%2F%2Ftracker.openbittorr' \
+ 'ent.com%3A80&tr=%0Audp%3A%2F%2Ftracker.coppersurfer.tk%3A6969&tr=%0Audp%3A%2F%2Fglot' \
+ 'orrents.pw%3A6969%2Fannounce&tr=%0Audp%3A%2F%2Ftracker.opentrackr.org%3A1337%2Fannou' \
+ 'nce&tr=%0Audp%3A%2F%2Ftorrent.gresille.org%3A80%2Fannounce&tr=%0Audp%3A%2F%2Fp4p.are' \
+ 'nabg.com%3A1337&tr=%0Audp%3A%2F%2Ftracker.leechers-paradise.org%3A6969]'
+
+ return 'magnet:?xt=urn:btih:%s&dn=%s&tr=%s' % (hash, name.replace(' ', '+'), url_encoded_trackers)
+
+
+config = [{
+ 'name': 'yts',
+ 'groups': [
+ {
+ 'tab': 'searcher',
+ 'list': 'torrent_providers',
+ 'name': 'YTS',
+ 'description': 'YTS ',
+ 'wizard': True,
+ 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAACL0lEQVR4AS1SPW/UQBAd23fxne/Ld2dvzvHuzPocEBAKokCBqG'
+ 'iQ6IgACYmvUKRBFEQgKKGg4BAlUoggggYUEQpSHOI7CIEoQs/fYcbLaU/efTvvvZlnA1qydoxU5kcxX0CkgmQZtPy0hCUjvK+W'
+ 'gEByOZ5dns1O5bzna8fRVkgsxH8B0YouIvBhdD5T11NiVOoKrsttyUcpRW0InUrFnwe9HzuP2uaQZYhF2LQ76TTXw2RVMTK8mY'
+ 'Ybjfh+zNquMVCrqn93aArLSixPxnafdGDLaz1tjY5rmNa8z5BczEQOxQfCl1GyoqoWxYRN1bkh7ELw3q/vhP6HIL4TG9Kumpjg'
+ 'vwuyM7OsjSj98E/vszMfZ7xvPtMaWxGO5crwIumKCR5HxDtJ0AWKGG204RfUd/3smJYqwem/Q7BTS1ZGfM4LNpVwuKAz6cMeRO'
+ 'st0S2EwNE7GjTehO2H3dxqIpdkydat15G3F8SXBi4GlpBNlSz012L/k2+W0CLLk/jbcf13rf41yJeMQ8QWUZiHCfCA9ad+81nE'
+ 'KPtoS9mJOf9v0NmMJHgUT6xayheK9EIK7JJeU/AF4scDF7Y5SPlJrRcxJ+um4ibNEdObxLiIwJim+eT2AL5D9CIcnZ5zvSJi9e'
+ 'IlNHVVtZ831dk5svPgvjPWTq+ktWkd/kD0qtm71x+sDQe3kt6DXnM7Ct+GajmTxKlkAokWljyAKSm5oWa2w+BH4P2UuVub7eTy'
+ 'iGOQYapY/wEztHduSDYz5gAAAABJRU5ErkJggg==',
+
+ 'options': [
+ {
+ 'name': 'enabled',
+ 'type': 'enabler',
+ 'default': False
+ },
+ {
+ 'name': 'seed_ratio',
+ 'label': 'Seed ratio',
+ 'type': 'float',
+ 'default': 1,
+ 'description': 'Will not be (re)moved until this seed ratio is met.',
+ },
+ {
+ 'name': 'seed_time',
+ 'label': 'Seed time',
+ 'type': 'int',
+ 'default': 40,
+ 'description': 'Will not be (re)moved until this seed time (in hours) is met.',
+ },
+ {
+ 'name': 'info',
+ 'label': 'Info',
+ 'type':'bool',
+ 'default':'False',
+ 'description': 'YTS will only work if you set the minimum size for 720p to 500 and 1080p to 800',
+ },
+ {
+ 'name': 'extra_score',
+ 'advanced': True,
+ 'label': 'Extra Score',
+ 'type': 'int',
+ 'default': 0,
+ 'description': 'Starting score for each release found via this provider.',
+ }
+ ],
+ }
+ ]
+}]
diff --git a/libs/jinja2/testsuite/res/__init__.py b/couchpotato/core/media/_base/providers/userscript/__init__.py
similarity index 100%
rename from libs/jinja2/testsuite/res/__init__.py
rename to couchpotato/core/media/_base/providers/userscript/__init__.py
diff --git a/couchpotato/core/media/_base/providers/userscript/base.py b/couchpotato/core/media/_base/providers/userscript/base.py
new file mode 100644
index 0000000000..3692294650
--- /dev/null
+++ b/couchpotato/core/media/_base/providers/userscript/base.py
@@ -0,0 +1,71 @@
+from urlparse import urlparse
+
+from couchpotato.core.event import addEvent, fireEvent
+from couchpotato.core.helpers.encoding import simplifyString
+from couchpotato.core.helpers.variable import getImdb, md5
+from couchpotato.core.logger import CPLog
+from couchpotato.core.media._base.providers.base import Provider
+
+
+log = CPLog(__name__)
+
+
+class UserscriptBase(Provider):
+
+ type = 'userscript'
+
+ version = 1
+ http_time_between_calls = 0
+
+ includes = []
+ excludes = []
+
+ def __init__(self):
+ addEvent('userscript.get_includes', self.getInclude)
+ addEvent('userscript.get_excludes', self.getExclude)
+ addEvent('userscript.get_provider_version', self.getVersion)
+ addEvent('userscript.get_movie_via_url', self.belongsTo)
+
+ def search(self, name, year = None):
+ result = fireEvent('movie.search', q = '%s %s' % (name, year), limit = 1, merge = True)
+
+ if len(result) > 0:
+ movie = fireEvent('movie.info', identifier = result[0].get('imdb'), extended = False, merge = True)
+ return movie
+ else:
+ return None
+
+ def belongsTo(self, url):
+
+ host = urlparse(url).hostname
+ host_split = host.split('.')
+ if len(host_split) > 2:
+ host = host[len(host_split[0]):]
+
+ for include in self.includes:
+ if host in include:
+ return self.getMovie(url)
+
+ return
+
+ def getUrl(self, url):
+ return self.getCache(md5(simplifyString(url)), url = url)
+
+ def getMovie(self, url):
+ try:
+ data = self.getUrl(url)
+ except:
+ data = ''
+ return self.getInfo(getImdb(data))
+
+ def getInfo(self, identifier):
+ return fireEvent('movie.info', identifier = identifier, extended = False, merge = True)
+
+ def getInclude(self):
+ return self.includes
+
+ def getExclude(self):
+ return self.excludes
+
+ def getVersion(self):
+ return self.version
diff --git a/couchpotato/core/media/_base/search/__init__.py b/couchpotato/core/media/_base/search/__init__.py
new file mode 100644
index 0000000000..c23fdb7290
--- /dev/null
+++ b/couchpotato/core/media/_base/search/__init__.py
@@ -0,0 +1,5 @@
+from .main import Search
+
+
+def autoload():
+ return Search()
diff --git a/couchpotato/core/media/_base/search/main.py b/couchpotato/core/media/_base/search/main.py
new file mode 100644
index 0000000000..1d0603cb6a
--- /dev/null
+++ b/couchpotato/core/media/_base/search/main.py
@@ -0,0 +1,68 @@
+from couchpotato.api import addApiView
+from couchpotato.core.event import fireEvent, addEvent
+from couchpotato.core.helpers.variable import mergeDicts, getImdb
+from couchpotato.core.logger import CPLog
+from couchpotato.core.plugins.base import Plugin
+
+log = CPLog(__name__)
+
+
+class Search(Plugin):
+
+ def __init__(self):
+
+ addApiView('search', self.search, docs = {
+ 'desc': 'Search the info in providers for a movie',
+ 'params': {
+ 'q': {'desc': 'The (partial) movie name you want to search for'},
+ 'type': {'desc': 'Search for a specific media type. Leave empty to search all.'},
+ },
+ 'return': {'type': 'object', 'example': """{
+ 'success': True,
+ 'movies': array,
+ 'show': array,
+ etc
+}"""}
+ })
+
+ addEvent('app.load', self.addSingleSearches)
+
+ def search(self, q = '', types = None, **kwargs):
+
+ # Make sure types is the correct instance
+ if isinstance(types, (str, unicode)):
+ types = [types]
+ elif isinstance(types, (list, tuple, set)):
+ types = list(types)
+
+ imdb_identifier = getImdb(q)
+
+ if not types:
+ if imdb_identifier:
+ result = fireEvent('movie.info', identifier = imdb_identifier, merge = True)
+ result = {result['type']: [result]}
+ else:
+ result = fireEvent('info.search', q = q, merge = True)
+ else:
+ result = {}
+ for media_type in types:
+ if imdb_identifier:
+ result[media_type] = fireEvent('%s.info' % media_type, identifier = imdb_identifier)
+ else:
+ result[media_type] = fireEvent('%s.search' % media_type, q = q)
+
+ return mergeDicts({
+ 'success': True,
+ }, result)
+
+ def createSingleSearch(self, media_type):
+
+ def singleSearch(q, **kwargs):
+ return self.search(q, type = media_type, **kwargs)
+
+ return singleSearch
+
+ def addSingleSearches(self):
+
+ for media_type in fireEvent('media.types', merge = True):
+ addApiView('%s.search' % media_type, self.createSingleSearch(media_type))
diff --git a/couchpotato/core/media/_base/search/static/search.js b/couchpotato/core/media/_base/search/static/search.js
new file mode 100644
index 0000000000..e0aa23ddc5
--- /dev/null
+++ b/couchpotato/core/media/_base/search/static/search.js
@@ -0,0 +1,211 @@
+var BlockSearch = new Class({
+
+ Extends: BlockBase,
+
+ options: {
+ 'animate': true
+ },
+
+ cache: {},
+
+ create: function(){
+ var self = this;
+
+ var focus_timer = 0;
+ self.el = new Element('div.search_form').adopt(
+ new Element('a.icon-search', {
+ 'events': {
+ 'click': self.clear.bind(self)
+ }
+ }),
+ self.wrapper = new Element('div.wrapper').adopt(
+ self.result_container = new Element('div.results_container', {
+ 'events': {
+ 'mousewheel': function(e){
+ (e).stopPropagation();
+ }
+ }
+ }).grab(
+ self.results = new Element('div.results')
+ ),
+ new Element('div.input').grab(
+ self.input = new Element('input', {
+ 'placeholder': 'Search & add a new media',
+ 'events': {
+ 'input': self.keyup.bind(self),
+ 'paste': self.keyup.bind(self),
+ 'change': self.keyup.bind(self),
+ 'keyup': self.keyup.bind(self),
+ 'focus': function(){
+ if(focus_timer) clearRequestTimeout(focus_timer);
+ if(this.get('value'))
+ self.hideResults(false);
+ },
+ 'blur': function(){
+ focus_timer = requestTimeout(function(){
+ self.el.removeClass('focused');
+ self.last_q = null;
+ }, 100);
+ }
+ }
+ })
+ )
+ )
+ );
+
+ self.mask = new Element('div.mask').inject(self.result_container);
+
+ },
+
+ clear: function(e){
+ var self = this;
+ (e).preventDefault();
+
+ if(self.last_q === ''){
+ self.input.blur();
+ self.last_q = null;
+ }
+ else {
+
+ self.last_q = '';
+ self.input.set('value', '');
+ self.el.addClass('focused');
+ self.input.focus();
+
+ self.media = {};
+ self.results.empty();
+ self.el.removeClass('filled');
+
+ // Animate in
+ if(self.options.animate){
+
+ dynamics.css(self.wrapper, {
+ opacity: 0,
+ scale: 0.1
+ });
+
+ dynamics.animate(self.wrapper, {
+ opacity: 1,
+ scale: 1
+ }, {
+ type: dynamics.spring,
+ frequency: 200,
+ friction: 270,
+ duration: 800
+ });
+
+ }
+
+ }
+ },
+
+ hideResults: function(bool){
+ var self = this;
+
+ if(self.hidden == bool) return;
+
+ self.el[bool ? 'removeClass' : 'addClass']('shown');
+
+ if(bool){
+ History.removeEvent('change', self.hideResults.bind(self, !bool));
+ self.el.removeEvent('outerClick', self.hideResults.bind(self, !bool));
+ }
+ else {
+ History.addEvent('change', self.hideResults.bind(self, !bool));
+ self.el.addEvent('outerClick', self.hideResults.bind(self, !bool));
+ }
+
+ self.hidden = bool;
+ },
+
+ keyup: function(){
+ var self = this;
+
+ self.el[self.q() ? 'addClass' : 'removeClass']('filled');
+
+ if(self.q() != self.last_q){
+ if(self.api_request && self.api_request.isRunning())
+ self.api_request.cancel();
+
+ if(self.autocomplete_timer) clearRequestTimeout(self.autocomplete_timer);
+ self.autocomplete_timer = requestTimeout(self.autocomplete.bind(self), 300);
+ }
+
+ },
+
+ autocomplete: function(){
+ var self = this;
+
+ if(!self.q()){
+ self.hideResults(true);
+ return;
+ }
+
+ self.list();
+ },
+
+ list: function(){
+ var self = this,
+ q = self.q(),
+ cache = self.cache[q];
+
+ self.hideResults(false);
+
+ if(!cache){
+ requestTimeout(function(){
+ self.mask.addClass('show');
+ }, 10);
+
+ if(!self.spinner)
+ self.spinner = createSpinner(self.mask);
+
+ self.api_request = Api.request('search', {
+ 'data': {
+ 'q': q
+ },
+ 'onComplete': self.fill.bind(self, q)
+ });
+ }
+ else
+ self.fill(q, cache);
+
+ self.last_q = q;
+
+ },
+
+ fill: function(q, json){
+ var self = this;
+
+ self.cache[q] = json;
+
+ self.media = {};
+ self.results.empty();
+
+ Object.each(json, function(media){
+ if(typeOf(media) == 'array'){
+ Object.each(media, function(me){
+
+ var m = new window['BlockSearch' + me.type.capitalize() + 'Item'](me);
+ $(m).inject(self.results);
+ self.media[m.imdb || 'r-'+Math.floor(Math.random()*10000)] = m;
+
+ if(q == m.imdb)
+ m.showOptions();
+
+ });
+ }
+ });
+
+ self.mask.removeClass('show');
+
+ },
+
+ loading: function(bool){
+ this.el[bool ? 'addClass' : 'removeClass']('loading');
+ },
+
+ q: function(){
+ return this.input.get('value').trim();
+ }
+
+});
diff --git a/couchpotato/core/media/_base/search/static/search.scss b/couchpotato/core/media/_base/search/static/search.scss
new file mode 100644
index 0000000000..b9876b16c9
--- /dev/null
+++ b/couchpotato/core/media/_base/search/static/search.scss
@@ -0,0 +1,535 @@
+@import "_mixins";
+
+.search_form {
+ display: inline-block;
+ z-index: 11;
+ width: 44px;
+ position: relative;
+
+ * {
+ transform: rotateZ(360deg); // Mobile IE redraw fix
+ }
+
+ .icon-search {
+ position: absolute;
+ z-index: 2;
+ top: 50%;
+ left: 0;
+ height: 100%;
+ text-align: center;
+ color: #FFF;
+ font-size: 20px;
+ transform: translateY(-50%);
+
+ &:hover {
+ @include theme(background, menu_off);
+ }
+ }
+
+ .wrapper {
+ position: absolute;
+ left: 44px;
+ bottom: 0;
+ @include theme(background, primary);
+ border-radius: $border_radius 0 0 $border_radius;
+ display: none;
+ box-shadow: 0 0 15px 2px rgba(0,0,0,.15);
+
+ @include theme-dark {
+ box-shadow: 0 5px 15px 2px rgba(0,0,0,.4);
+ }
+
+ &:before {
+ transform: rotate(45deg);
+ content: '';
+ display: block;
+ position: absolute;
+ height: 10px;
+ width: 10px;
+ @include theme(background, primary);
+ left: -6px;
+ bottom: 16px;
+ z-index: 1;
+ }
+ }
+
+ .input {
+ @include theme(background, background);
+ border-radius: $border_radius 0 0 $border_radius;
+ position: relative;
+ left: 4px;
+ height: 44px;
+ overflow: hidden;
+ width: 100%;
+
+ input {
+ position: absolute;
+ top: 0;
+ left: 0;
+ height: 100%;
+ width: 100%;
+ z-index: 1;
+
+ &::-ms-clear {
+ width : 0;
+ height: 0;
+ }
+
+ &:focus {
+ background: rgba(255,255,255, .2);
+
+ @include theme-dark {
+ background: rgba(0,0,0, .2);
+ }
+
+ &::-webkit-input-placeholder {
+ opacity: .7;
+ }
+ &::-moz-placeholder {
+ opacity: .7;
+ }
+ &:-ms-input-placeholder {
+ opacity: .7;
+ }
+ }
+ }
+ }
+
+ &.filled {
+ &.focused .icon-search:before,
+ .page.home & .icon-search:before {
+ content: '\e80e';
+ }
+
+ .input input {
+ background: rgba(255,255,255,.3);
+
+ @include theme-dark {
+ background: rgba(0,0,0,.3);
+ }
+ }
+ }
+
+ &.focused,
+ &.shown,
+ .page.home & {
+ border-color: #04bce6;
+
+ .wrapper {
+ display: block;
+ width: 380px;
+ transform-origin: 0 90%;
+
+ @include media-phablet {
+ width: 260px;
+ }
+ }
+
+ .input {
+
+ input {
+ opacity: 1;
+ }
+ }
+ }
+
+ .results_container {
+ min-height: 50px;
+ text-align: left;
+ position: relative;
+ left: 4px;
+ display: none;
+ @include theme(background, background);
+ border-radius: $border_radius 0 0 0;
+ overflow: hidden;
+
+ .results {
+ max-height: 280px;
+ overflow-x: hidden;
+
+ .media_result {
+ overflow: hidden;
+ height: 50px;
+ position: relative;
+
+ @include media-phablet {
+ font-size: 12px;
+ }
+
+ .options {
+ position: absolute;
+ height: 100%;
+ top: 0;
+ left: 30px;
+ right: 0;
+ display: flex;
+ align-items: center;
+ background: get-theme(off);
+
+ @include theme-dark {
+ background: get-theme-dark(off);
+ }
+
+ @include media-phablet {
+ left: 0;
+ }
+
+ > .in_library_wanted {
+ margin-top: -7px;
+ }
+
+ > div {
+ border: 0;
+ display: flex;
+ padding: 10px;
+ align-items: stretch;
+ justify-content: space-between;
+
+ @include media-phablet {
+ padding: 3px;
+ }
+ }
+
+ select {
+ display: block;
+ height: 100%;
+ width: 100%;
+
+ @include media-phablet {
+ min-width: 0;
+ margin-right: 2px;
+ }
+ }
+
+ .title {
+ margin-right: 5px;
+ width: 210px;
+
+ @include media-phablet {
+ width: 140px;
+ margin-right: 2px;
+ }
+ }
+
+ .profile, .category {
+ margin: 0 5px 0 0;
+
+ @include media-phablet {
+ margin-right: 2px;
+ }
+ }
+
+ .add {
+ width: 42px;
+ flex: 1 auto;
+
+ a {
+ color: #FFF;
+ }
+ }
+
+ .button {
+ display: block;
+ @include theme(background, primary);
+ text-align: center;
+ margin: 0;
+ }
+
+ .message {
+ font-size: 20px;
+ color: #fff;
+ }
+
+ }
+
+ .thumbnail {
+ width: 30px;
+ min-height: 100%;
+ display: block;
+ margin: 0;
+ vertical-align: top;
+
+ @include media-phablet {
+ display: none;
+ }
+ }
+
+ .data {
+ position: absolute;
+ height: 100%;
+ top: 0;
+ left: 30px;
+ right: 0;
+ cursor: pointer;
+ border-top: 1px solid rgba(255,255,255, 0.08);
+ transition: all .4s cubic-bezier(0.9,0,0.1,1);
+ will-change: transform;
+ transform: translateX(0) rotateZ(360deg);
+ @include theme(background, background);
+
+ @include theme-dark {
+ border-color: rgba(255,255,255, 0.08);
+ }
+
+ @include media-phablet {
+ left: 0;
+ }
+
+ &:hover {
+ transform: translateX(2%) rotateZ(360deg);
+ }
+
+ &.open {
+ transform: translateX(100%) rotateZ(360deg);
+ }
+
+ .info {
+ position: absolute;
+ top: 20%;
+ left: 15px;
+ right: 7px;
+ vertical-align: middle;
+
+ h2 {
+ margin: 0;
+ font-weight: 300;
+ font-size: 1.25em;
+ padding: 0;
+ position: absolute;
+ width: 100%;
+ display: flex;
+
+ .title {
+ display: inline-block;
+ margin: 0;
+ text-overflow: ellipsis;
+ overflow: hidden;
+ white-space: nowrap;
+ flex: 1 auto;
+ }
+
+ .year {
+ opacity: .4;
+ padding: 0 5px;
+ width: auto;
+ }
+
+ .in_wanted,
+ .in_library {
+ position: absolute;
+ top: 15px;
+ left: 0;
+ font-size: 11px;
+ @include theme(color, primary);
+ }
+
+ &.in_library_wanted {
+ .title {
+ margin-top: -7px;
+ }
+ }
+ }
+ }
+ }
+
+ &:hover .info h2 .year {
+ display: inline-block;
+ }
+
+ &:last-child .data {
+ border-bottom: 0;
+ }
+ }
+
+ }
+ }
+
+ &.focused.filled,
+ &.shown.filled {
+ .results_container {
+ display: block;
+ }
+
+ .input {
+ border-radius: 0 0 0 $border_radius;
+ }
+ }
+
+ .page.home & {
+ $input_height: 66px;
+ $input_height_mobile: 44px;
+
+ display: block;
+ padding: $padding;
+ width: 100%;
+ max-width: 500px;
+ margin: 0 auto;
+ height: $input_height + 2*$padding;
+ position: relative;
+ margin-top: $padding;
+
+ @include media-phablet {
+ margin-top: $padding/2;
+ height: $input_height_mobile + $padding;
+ }
+
+ .icon-search {
+ display: block;
+ @include theme(color, text);
+ right: $padding;
+ top: $padding;
+ width: $input_height;
+ height: $input_height;
+ line-height: $input_height;
+ left: auto;
+ transform: none;
+ font-size: 2em;
+ opacity: .5;
+
+ &:hover {
+ background: none;
+ }
+
+ @include media-phablet {
+ right: $padding/2;
+ width: $input_height_mobile;
+ height: $input_height_mobile;
+ line-height: $input_height_mobile;
+ right: $padding/2;
+ top: $padding/2;
+ font-size: 1.5em;
+ }
+ }
+
+ .wrapper {
+ border-radius: 0;
+ box-shadow: none;
+ bottom: auto;
+ top: $padding;
+ left: $padding;
+ right: $padding;
+ position: absolute;
+ width: auto;
+
+ @include media-phablet {
+ right: $padding/2;
+ top: $padding/2;
+ left: $padding/2;
+ }
+
+ &:before {
+ display: none;
+ }
+
+ .input {
+ border-radius: 0;
+ left: 0;
+ position: absolute;
+ top: 0;
+ height: $input_height;
+
+ @include media-phablet {
+ height: $input_height_mobile;
+ }
+
+ input {
+ box-shadow: 0;
+ font-size: 2em;
+ font-weight: 400;
+ padding-right: $input_height;
+ @include theme(background, background);
+
+ @include media-phablet {
+ padding-right: $input_height_mobile;
+ font-size: 1em;
+ }
+ }
+ }
+
+ .results_container {
+ min-height: $input_height;
+ position: absolute;
+ top: $input_height;
+ left: 0;
+ right: 0;
+ border: 1px solid get-theme(off);
+ border-top: 0;
+
+ @include theme-dark {
+ border-color: get-theme-dark(off);
+ }
+
+ @include media-phablet {
+ top: $input_height_mobile;
+ min-height: $input_height_mobile;
+ }
+
+
+ @include media-phablet-and-up {
+ .results {
+ max-height: 400px;
+
+ .media_result {
+ height: $input_height;
+
+
+ @include media-phablet {
+ height: $input_height_mobile;
+ }
+
+ .thumbnail {
+ width: 40px;
+ }
+
+ .options {
+ left: 40px;
+
+ .title {
+ margin-right: 5px;
+ width: 320px;
+
+ @include media-phablet {
+ width: 140px;
+ margin-right: 2px;
+ }
+ }
+ }
+
+ .data {
+ left: 40px;
+ }
+ }
+ }
+ }
+
+
+ @include media-phablet {
+ .results {
+ .media_result {
+ height: $input_height_mobile;
+
+ .options {
+
+ .title {
+
+ width: 140px;
+ margin-right: 2px;
+ }
+
+ }
+
+ }
+ }
+ }
+ }
+
+ }
+
+
+ }
+
+}
+
+.big_search {
+ @include theme(background, off);
+}
diff --git a/couchpotato/core/media/_base/searcher/__init__.py b/couchpotato/core/media/_base/searcher/__init__.py
new file mode 100644
index 0000000000..2bf06ebc18
--- /dev/null
+++ b/couchpotato/core/media/_base/searcher/__init__.py
@@ -0,0 +1,103 @@
+from .main import Searcher
+
+
+def autoload():
+ return Searcher()
+
+config = [{
+ 'name': 'searcher',
+ 'order': 20,
+ 'groups': [
+ {
+ 'tab': 'searcher',
+ 'name': 'searcher',
+ 'label': 'Basics',
+ 'description': 'General search options',
+ 'options': [
+ {
+ 'name': 'preferred_method',
+ 'label': 'First search',
+ 'description': 'Which of the methods do you prefer',
+ 'default': 'both',
+ 'type': 'dropdown',
+ 'values': [('usenet & torrents', 'both'), ('usenet', 'nzb'), ('torrents', 'torrent')],
+ },
+ ],
+ }, {
+ 'tab': 'searcher',
+ 'subtab': 'category',
+ 'subtab_label': 'Categories',
+ 'name': 'filter',
+ 'label': 'Global filters',
+ 'description': 'Prefer, ignore & required words in release names',
+ 'options': [
+ {
+ 'name': 'preferred_words',
+ 'label': 'Preferred',
+ 'default': '',
+ 'placeholder': 'Example: CtrlHD, Amiable, Wiki',
+ 'description': 'Words that give the releases a higher score.'
+ },
+ {
+ 'name': 'required_words',
+ 'label': 'Required',
+ 'default': '',
+ 'placeholder': 'Example: DTS, AC3 & English',
+ 'description': 'Release should contain at least one set of words. Sets are separated by "," and each word within a set must be separated with "&"'
+ },
+ {
+ 'name': 'ignored_words',
+ 'label': 'Ignored',
+ 'default': 'german, dutch, french, truefrench, danish, swedish, spanish, italian, korean, dubbed, swesub, korsub, dksubs, vain, HC',
+ 'description': 'Ignores releases that match any of these sets. (Works like explained above)'
+ },
+ {
+ 'name': 'dubbed_version',
+ 'label': 'Dubbed Version',
+ 'type': 'bool',
+ 'default': False,
+ 'description': 'Requests that the releases are with a french audio track (dubbed for international movies)'
+ },
+ ],
+ },
+ ],
+}, {
+ 'name': 'nzb',
+ 'groups': [
+ {
+ 'tab': 'searcher',
+ 'name': 'searcher',
+ 'label': 'NZB',
+ 'wizard': True,
+ 'options': [
+ {
+ 'name': 'retention',
+ 'label': 'Usenet Retention',
+ 'default': 1500,
+ 'type': 'int',
+ 'unit': 'days'
+ },
+ ],
+ },
+ ],
+}, {
+ 'name': 'torrent',
+ 'groups': [
+ {
+ 'tab': 'searcher',
+ 'name': 'searcher',
+ 'wizard': True,
+ 'options': [
+ {
+ 'name': 'minimum_seeders',
+ 'advanced': True,
+ 'label': 'Minimum seeders',
+ 'description': 'Ignore torrents with seeders below this number',
+ 'default': 1,
+ 'type': 'int',
+ 'unit': 'seeders'
+ },
+ ],
+ },
+ ],
+}]
diff --git a/couchpotato/core/media/_base/searcher/base.py b/couchpotato/core/media/_base/searcher/base.py
new file mode 100644
index 0000000000..5322d8505b
--- /dev/null
+++ b/couchpotato/core/media/_base/searcher/base.py
@@ -0,0 +1,43 @@
+from couchpotato.core.event import addEvent, fireEvent
+from couchpotato.core.logger import CPLog
+from couchpotato.core.plugins.base import Plugin
+
+log = CPLog(__name__)
+
+
+class SearcherBase(Plugin):
+
+ in_progress = False
+
+ def __init__(self):
+ super(SearcherBase, self).__init__()
+
+ addEvent('searcher.progress', self.getProgress)
+ addEvent('%s.searcher.progress' % self.getType(), self.getProgress)
+
+ self.initCron()
+
+ def initCron(self):
+ """ Set the searcher cronjob
+ Make sure to reset cronjob after setting has changed
+ """
+
+ _type = self.getType()
+
+ def setCrons():
+ fireEvent('schedule.cron', '%s.searcher.all' % _type, self.searchAll,
+ day = self.conf('cron_day'), hour = self.conf('cron_hour'), minute = self.conf('cron_minute'))
+
+ addEvent('app.load', setCrons)
+ addEvent('setting.save.%s_searcher.cron_day.after' % _type, setCrons)
+ addEvent('setting.save.%s_searcher.cron_hour.after' % _type, setCrons)
+ addEvent('setting.save.%s_searcher.cron_minute.after' % _type, setCrons)
+
+ def getProgress(self, **kwargs):
+ """ Return progress of current searcher"""
+
+ progress = {
+ self.getType(): self.in_progress
+ }
+
+ return progress
diff --git a/couchpotato/core/media/_base/searcher/main.py b/couchpotato/core/media/_base/searcher/main.py
new file mode 100644
index 0000000000..16c5cd27f9
--- /dev/null
+++ b/couchpotato/core/media/_base/searcher/main.py
@@ -0,0 +1,266 @@
+import datetime
+import re
+
+from couchpotato.api import addApiView
+from couchpotato.core.event import addEvent, fireEvent
+from couchpotato.core.helpers.encoding import simplifyString
+from couchpotato.core.helpers.variable import splitString, removeEmpty, removeDuplicate, getAllLanguages
+from couchpotato.core.logger import CPLog
+from couchpotato.core.media._base.searcher.base import SearcherBase
+
+
+log = CPLog(__name__)
+
+
+class Searcher(SearcherBase):
+
+ # noinspection PyMissingConstructor
+ def __init__(self):
+ addEvent('searcher.protocols', self.getSearchProtocols)
+ addEvent('searcher.contains_other_quality', self.containsOtherQuality)
+ addEvent('searcher.correct_3d', self.correct3D)
+ addEvent('searcher.correct_year', self.correctYear)
+ addEvent('searcher.correct_name', self.correctName)
+ addEvent('searcher.correct_words', self.correctWords)
+ addEvent('searcher.correct_language', self.correctLanguage)
+ addEvent('searcher.search', self.search)
+
+ addApiView('searcher.full_search', self.searchAllView, docs = {
+ 'desc': 'Starts a full search for all media',
+ })
+
+ addApiView('searcher.progress', self.getProgressForAll, docs = {
+ 'desc': 'Get the progress of all media searches',
+ 'return': {'type': 'object', 'example': """{
+ 'movie': False || object, total & to_go,
+ 'show': False || object, total & to_go,
+}"""},
+ })
+
+ def searchAllView(self):
+
+ results = {}
+ for _type in fireEvent('media.types'):
+ results[_type] = fireEvent('%s.searcher.all_view' % _type)
+
+ return results
+
+ def getProgressForAll(self):
+ progress = fireEvent('searcher.progress', merge = True)
+ return progress
+
+ def search(self, protocols, media, quality):
+ results = []
+
+ for search_protocol in protocols:
+ protocol_results = fireEvent('provider.search.%s.%s' % (search_protocol, media.get('type')), media, quality, merge = True)
+ if protocol_results:
+ results += protocol_results
+
+ sorted_results = sorted(results, key = lambda k: k['score'], reverse = True)
+
+ download_preference = self.conf('preferred_method', section = 'searcher')
+ if download_preference != 'both':
+ sorted_results = sorted(sorted_results, key = lambda k: k['protocol'][:3], reverse = (download_preference == 'torrent'))
+
+ return sorted_results
+
+ def getSearchProtocols(self):
+
+ download_protocols = fireEvent('download.enabled_protocols', merge = True)
+ provider_protocols = fireEvent('provider.enabled_protocols', merge = True)
+
+ if download_protocols and len(list(set(provider_protocols) & set(download_protocols))) == 0:
+ log.error('There aren\'t any providers enabled for your downloader (%s). Check your settings.', ','.join(download_protocols))
+ return []
+
+ for useless_provider in list(set(provider_protocols) - set(download_protocols)):
+ log.debug('Provider for "%s" enabled, but no downloader.', useless_provider)
+
+ search_protocols = download_protocols
+
+ if len(search_protocols) == 0:
+ log.error('There aren\'t any downloaders enabled. Please pick one in settings.')
+ return []
+
+ return search_protocols
+
+ def containsOtherQuality(self, nzb, movie_year = None, preferred_quality = None):
+ if not preferred_quality: preferred_quality = {}
+
+ found = {}
+
+ # Try guessing via quality tags
+ guess = fireEvent('quality.guess', files = [nzb.get('name')], size = nzb.get('size', None), single = True)
+ if guess:
+ found[guess['identifier']] = True
+
+ # Hack for older movies that don't contain quality tag
+ name = nzb['name']
+ size = nzb.get('size', 0)
+
+ year_name = fireEvent('scanner.name_year', name, single = True)
+ if len(found) == 0 and movie_year < datetime.datetime.now().year - 3 and not year_name.get('year', None):
+ if size > 20000: # Assume bd50
+ log.info('Quality was missing in name, assuming it\'s a BR-Disk based on the size: %s', size)
+ found['bd50'] = True
+ elif size > 3000: # Assume dvdr
+ log.info('Quality was missing in name, assuming it\'s a DVD-R based on the size: %s', size)
+ found['dvdr'] = True
+ else: # Assume dvdrip
+ log.info('Quality was missing in name, assuming it\'s a DVD-Rip based on the size: %s', size)
+ found['dvdrip'] = True
+
+ # Allow other qualities
+ for allowed in preferred_quality.get('allow'):
+ if found.get(allowed):
+ del found[allowed]
+
+ if found.get(preferred_quality['identifier']) and len(found) == 1:
+ return False
+
+ return found
+
+ def correct3D(self, nzb, preferred_quality = None):
+ if not preferred_quality: preferred_quality = {}
+ if not preferred_quality.get('custom'): return
+
+ threed = preferred_quality['custom'].get('3d')
+
+ # Try guessing via quality tags
+ guess = fireEvent('quality.guess', [nzb.get('name')], single = True)
+
+ if guess:
+ return threed == guess.get('is_3d')
+ # If no quality guess, assume not 3d
+ else:
+ return threed == False
+
+ def correctYear(self, haystack, year, year_range):
+
+ if not isinstance(haystack, (list, tuple, set)):
+ haystack = [haystack]
+
+ year_name = {}
+ for string in haystack:
+
+ year_name = fireEvent('scanner.name_year', string, single = True)
+
+ if year_name and ((year - year_range) <= year_name.get('year') <= (year + year_range)):
+ log.debug('Movie year matches range: %s looking for %s', (year_name.get('year'), year))
+ return True
+
+ log.debug('Movie year doesn\'t matche range: %s looking for %s', (year_name.get('year'), year))
+ return False
+
+ def correctName(self, check_name, movie_name):
+
+ check_names = [check_name]
+
+ # Match names between "
+ try: check_names.append(re.search(r'([\'"])[^\1]*\1', check_name).group(0))
+ except: pass
+
+ # Match longest name between []
+ try: check_names.append(max(re.findall(r'[^[]*\[([^]]*)\]', check_name), key = len).strip())
+ except: pass
+
+ for check_name in removeDuplicate(check_names):
+ check_movie = fireEvent('scanner.name_year', check_name, single = True)
+
+ try:
+ check_words = removeEmpty(re.split('\W+', check_movie.get('name', '')))
+ movie_words = removeEmpty(re.split('\W+', simplifyString(movie_name)))
+
+ if len(check_words) > 0 and len(movie_words) > 0 and len(list(set(check_words) - set(movie_words))) == 0:
+ return True
+ except:
+ pass
+
+ return False
+
+ def containsWords(self, rel_name, rel_words, conf, media):
+
+ # Make sure it has required words
+ words = splitString(self.conf('%s_words' % conf, section = 'searcher').lower())
+ try: words = removeDuplicate(words + splitString(media['category'][conf].lower()))
+ except: pass
+
+ req_match = 0
+ for req_set in words:
+ if len(req_set) >= 2 and (req_set[:1] + req_set[-1:]) == '//':
+ if re.search(req_set[1:-1], rel_name):
+ log.debug('Regex match: %s', req_set[1:-1])
+ req_match += 1
+ else:
+ req = splitString(req_set, '&')
+ req_match += len(list(set(rel_words) & set(req))) == len(req)
+
+ return words, req_match > 0
+
+ def correctWords(self, rel_name, media):
+ media_title = fireEvent('searcher.get_search_title', media, single = True)
+ media_words = re.split('\W+', simplifyString(media_title))
+
+ rel_name = simplifyString(rel_name)
+ rel_words = re.split('\W+', rel_name)
+
+ required_words, contains_required = self.containsWords(rel_name, rel_words, 'required', media)
+ if len(required_words) > 0 and not contains_required:
+ log.info2('Wrong: Required word missing: %s', rel_name)
+ return False
+
+ ignored_words, contains_ignored = self.containsWords(rel_name, rel_words, 'ignored', media)
+ if len(ignored_words) > 0 and contains_ignored:
+ log.info2("Wrong: '%s' contains 'ignored words'", rel_name)
+ return False
+
+ # Ignore porn stuff
+ pron_tags = ['xxx', 'sex', 'anal', 'tits', 'fuck', 'porn', 'orgy', 'milf', 'boobs', 'erotica', 'erotic', 'cock', 'dick']
+ pron_words = list(set(rel_words) & set(pron_tags) - set(media_words))
+ if pron_words:
+ log.info('Wrong: %s, probably pr0n', rel_name)
+ return False
+
+ return True
+
+ def correctLanguage(self, rel_name, media):
+ # retrieving the base configuration
+ dubbedVersion = self.conf('dubbed_version', section = 'searcher')
+
+ # retrieving the category configuration
+ try: dubbedVersion = media['category']['dubbed_version']
+ except: pass
+
+ if 'languages' in media['info']:
+ releaseMetaDatas = media['info']['languages']
+
+ rel_name = simplifyString(rel_name)
+ rel_words = re.split('\W+', rel_name)
+ upper_rel_words = [x.upper() for x in rel_words]
+
+ languageWordFound = False;
+ for word in upper_rel_words:
+ matchingTuples = [item for item in getAllLanguages() if item[1].upper() == word]
+ if matchingTuples and any(matchingTuples):
+ languageWordFound = True;
+
+ if dubbedVersion:
+ if 'FRENCH' in upper_rel_words or 'TRUEFRENCH' in upper_rel_words or 'MULTI' in upper_rel_words:
+ return True;
+
+ if languageWordFound == False and 'FRENCH' in releaseMetaDatas:
+ return True;
+ else:
+ if any(l for l in upper_rel_words if l.upper() in releaseMetaDatas) or 'MULTI' in upper_rel_words:
+ return True;
+
+ if languageWordFound == False:
+ return True;
+ else:
+ return True;
+
+ return False
+
+class SearchSetupError(Exception):
+ pass
diff --git a/couchpotato/core/media/movie/__init__.py b/couchpotato/core/media/movie/__init__.py
new file mode 100644
index 0000000000..898529c17d
--- /dev/null
+++ b/couchpotato/core/media/movie/__init__.py
@@ -0,0 +1,6 @@
+from couchpotato.core.media import MediaBase
+
+
+class MovieTypeBase(MediaBase):
+
+ _type = 'movie'
diff --git a/couchpotato/core/media/movie/_base/__init__.py b/couchpotato/core/media/movie/_base/__init__.py
new file mode 100644
index 0000000000..14720463ef
--- /dev/null
+++ b/couchpotato/core/media/movie/_base/__init__.py
@@ -0,0 +1,5 @@
+from .main import MovieBase
+
+
+def autoload():
+ return MovieBase()
diff --git a/couchpotato/core/media/movie/_base/main.py b/couchpotato/core/media/movie/_base/main.py
new file mode 100755
index 0000000000..6c4298c423
--- /dev/null
+++ b/couchpotato/core/media/movie/_base/main.py
@@ -0,0 +1,347 @@
+import traceback
+import time
+
+from CodernityDB.database import RecordNotFound
+from couchpotato import get_db
+from couchpotato.api import addApiView
+from couchpotato.core.event import fireEvent, fireEventAsync, addEvent
+from couchpotato.core.helpers.encoding import toUnicode
+from couchpotato.core.helpers.variable import splitString, getTitle, getImdb, getIdentifier
+from couchpotato.core.logger import CPLog
+from couchpotato.core.media.movie import MovieTypeBase
+import six
+
+
+log = CPLog(__name__)
+
+
+class MovieBase(MovieTypeBase):
+
+ _type = 'movie'
+
+ def __init__(self):
+
+ # Initialize this type
+ super(MovieBase, self).__init__()
+ self.initType()
+
+ addApiView('movie.add', self.addView, docs = {
+ 'desc': 'Add new movie to the wanted list',
+ 'return': {'type': 'object', 'example': """{
+ 'success': True,
+ 'movie': object
+}"""},
+ 'params': {
+ 'identifier': {'desc': 'IMDB id of the movie your want to add.'},
+ 'profile_id': {'desc': 'ID of quality profile you want the add the movie in. If empty will use the default profile.'},
+ 'force_readd': {'desc': 'Force re-add even if movie already in wanted or manage. Default: True'},
+ 'category_id': {'desc': 'ID of category you want the add the movie in. If empty will use no category.'},
+ 'title': {'desc': 'Movie title to use for searches. Has to be one of the titles returned by movie.search.'},
+ }
+ })
+ addApiView('movie.edit', self.edit, docs = {
+ 'desc': 'Add new movie to the wanted list',
+ 'params': {
+ 'id': {'desc': 'Movie ID(s) you want to edit.', 'type': 'int (comma separated)'},
+ 'profile_id': {'desc': 'ID of quality profile you want the edit the movie to.'},
+ 'category_id': {'desc': 'ID of category you want the add the movie in. If empty will use no category.'},
+ 'default_title': {'desc': 'Movie title to use for searches. Has to be one of the titles returned by movie.search.'},
+ }
+ })
+
+ addEvent('movie.add', self.add)
+ addEvent('movie.update', self.update)
+ addEvent('movie.update_release_dates', self.updateReleaseDate)
+
+ def add(self, params = None, force_readd = True, search_after = True, update_after = True, notify_after = True, status = None):
+ if not params: params = {}
+
+ # Make sure it's a correct zero filled imdb id
+ params['identifier'] = getImdb(params.get('identifier', ''))
+
+ if not params.get('identifier'):
+ msg = 'Can\'t add movie without imdb identifier.'
+ log.error(msg)
+ fireEvent('notify.frontend', type = 'movie.is_tvshow', message = msg)
+ return False
+ elif not params.get('info'):
+ try:
+ is_movie = fireEvent('movie.is_movie', identifier = params.get('identifier'), adding = True, single = True)
+ if not is_movie:
+ msg = 'Can\'t add movie, seems to be a TV show.'
+ log.error(msg)
+ fireEvent('notify.frontend', type = 'movie.is_tvshow', message = msg)
+ return False
+ except:
+ pass
+
+ info = params.get('info')
+ if not info or (info and len(info.get('titles', [])) == 0):
+ info = fireEvent('movie.info', merge = True, extended = False, identifier = params.get('identifier'))
+
+ # Allow force re-add overwrite from param
+ if 'force_readd' in params:
+ fra = params.get('force_readd')
+ force_readd = fra.lower() not in ['0', '-1'] if not isinstance(fra, bool) else fra
+
+ # Set default title
+ def_title = self.getDefaultTitle(info)
+
+ # Default profile and category
+ default_profile = {}
+ if (not params.get('profile_id') and status != 'done') or params.get('ignore_previous', False):
+ default_profile = fireEvent('profile.default', single = True)
+ cat_id = params.get('category_id')
+
+ try:
+ db = get_db()
+
+ media = {
+ '_t': 'media',
+ 'type': 'movie',
+ 'title': def_title,
+ 'identifiers': {
+ 'imdb': params.get('identifier')
+ },
+ 'status': status if status else 'active',
+ 'profile_id': params.get('profile_id') or default_profile.get('_id'),
+ 'category_id': cat_id if cat_id is not None and len(cat_id) > 0 and cat_id != '-1' else None,
+ }
+
+ # Update movie info
+ try: del info['in_wanted']
+ except: pass
+ try: del info['in_library']
+ except: pass
+ media['info'] = info
+
+ new = False
+ previous_profile = None
+ try:
+ m = db.get('media', 'imdb-%s' % params.get('identifier'), with_doc = True)['doc']
+
+ try:
+ db.get('id', m.get('profile_id'))
+ previous_profile = m.get('profile_id')
+ except RecordNotFound:
+ pass
+ except:
+ log.error('Failed getting previous profile: %s', traceback.format_exc())
+ except:
+ new = True
+ m = db.insert(media)
+
+ # Update dict to be usable
+ m.update(media)
+
+ added = True
+ do_search = False
+ search_after = search_after and self.conf('search_on_add', section = 'moviesearcher')
+ onComplete = None
+
+ if new:
+ if search_after:
+ onComplete = self.createOnComplete(m['_id'])
+ search_after = False
+ elif force_readd:
+
+ # Clean snatched history
+ for release in fireEvent('release.for_media', m['_id'], single = True):
+ if release.get('status') in ['downloaded', 'snatched', 'seeding', 'done']:
+ if params.get('ignore_previous', False):
+ fireEvent('release.update_status', release['_id'], status = 'ignored')
+ else:
+ fireEvent('release.delete', release['_id'], single = True)
+
+ m['profile_id'] = (params.get('profile_id') or default_profile.get('_id')) if not previous_profile else previous_profile
+ m['category_id'] = cat_id if cat_id is not None and len(cat_id) > 0 else (m.get('category_id') or None)
+ m['last_edit'] = int(time.time())
+ m['tags'] = []
+
+ do_search = True
+ db.update(m)
+ else:
+ try: del params['info']
+ except: pass
+ log.debug('Movie already exists, not updating: %s', params)
+ added = False
+
+ # Trigger update info
+ if added and update_after:
+ # Do full update to get images etc
+ fireEventAsync('movie.update', m['_id'], default_title = params.get('title'), on_complete = onComplete)
+
+ # Remove releases
+ for rel in fireEvent('release.for_media', m['_id'], single = True):
+ if rel['status'] is 'available':
+ db.delete(rel)
+
+ movie_dict = fireEvent('media.get', m['_id'], single = True)
+ if not movie_dict:
+ log.debug('Failed adding media, can\'t find it anymore')
+ return False
+
+ if do_search and search_after:
+ onComplete = self.createOnComplete(m['_id'])
+ onComplete()
+
+ if added and notify_after:
+
+ if params.get('title'):
+ message = 'Successfully added "%s" to your wanted list.' % params.get('title', '')
+ else:
+ title = getTitle(m)
+ if title:
+ message = 'Successfully added "%s" to your wanted list.' % title
+ else:
+ message = 'Successfully added to your wanted list.'
+ fireEvent('notify.frontend', type = 'movie.added', data = movie_dict, message = message)
+
+ return movie_dict
+ except:
+ log.error('Failed adding media: %s', traceback.format_exc())
+
+ def addView(self, **kwargs):
+ add_dict = self.add(params = kwargs)
+
+ return {
+ 'success': True if add_dict else False,
+ 'movie': add_dict,
+ }
+
+ def edit(self, id = '', **kwargs):
+
+ try:
+ db = get_db()
+
+ ids = splitString(id)
+ for media_id in ids:
+
+ try:
+ m = db.get('id', media_id)
+ m['profile_id'] = kwargs.get('profile_id') or m['profile_id']
+
+ cat_id = kwargs.get('category_id')
+ if cat_id is not None:
+ m['category_id'] = cat_id if len(cat_id) > 0 else m['category_id']
+
+ # Remove releases
+ for rel in fireEvent('release.for_media', m['_id'], single = True):
+ if rel['status'] is 'available':
+ db.delete(rel)
+
+ # Default title
+ if kwargs.get('default_title'):
+ m['title'] = kwargs.get('default_title')
+
+ db.update(m)
+
+ fireEvent('media.restatus', m['_id'], single = True)
+
+ m = db.get('id', media_id)
+
+ movie_dict = fireEvent('media.get', m['_id'], single = True)
+ fireEventAsync('movie.searcher.single', movie_dict, on_complete = self.createNotifyFront(media_id))
+
+ except:
+ print traceback.format_exc()
+ log.error('Can\'t edit non-existing media')
+
+ return {
+ 'success': True,
+ }
+ except:
+ log.error('Failed editing media: %s', traceback.format_exc())
+
+ return {
+ 'success': False,
+ }
+
+ def update(self, media_id = None, identifier = None, default_title = None, extended = False):
+ """
+ Update movie information inside media['doc']['info']
+
+ @param media_id: document id
+ @param default_title: default title, if empty, use first one or existing one
+ @param extended: update with extended info (parses more info, actors, images from some info providers)
+ @return: dict, with media
+ """
+
+ if self.shuttingDown():
+ return
+
+ lock_key = 'media.get.%s' % media_id if media_id else identifier
+ self.acquireLock(lock_key)
+
+ media = {}
+ try:
+ db = get_db()
+
+ if media_id:
+ media = db.get('id', media_id)
+ else:
+ media = db.get('media', 'imdb-%s' % identifier, with_doc = True)['doc']
+
+ info = fireEvent('movie.info', merge = True, extended = extended, identifier = getIdentifier(media))
+
+ # Don't need those here
+ try: del info['in_wanted']
+ except: pass
+ try: del info['in_library']
+ except: pass
+
+ if not info or len(info) == 0:
+ log.error('Could not update, no movie info to work with: %s', identifier)
+ return False
+
+ # Update basic info
+ media['info'] = info
+
+ titles = info.get('titles', [])
+ log.debug('Adding titles: %s', titles)
+
+ # Define default title
+ if default_title or media.get('title') == 'UNKNOWN' or len(media.get('title', '')) == 0:
+ media['title'] = self.getDefaultTitle(info, default_title)
+
+ # Files
+ image_urls = info.get('images', [])
+
+ self.getPoster(media, image_urls)
+
+ db.update(media)
+ except:
+ log.error('Failed update media: %s', traceback.format_exc())
+
+ self.releaseLock(lock_key)
+ return media
+
+ def updateReleaseDate(self, media_id):
+ """
+ Update release_date (eta) info only
+
+ @param media_id: document id
+ @return: dict, with dates dvd, theater, bluray, expires
+ """
+
+ try:
+ db = get_db()
+
+ media = db.get('id', media_id)
+
+ if not media.get('info'):
+ media = self.update(media_id)
+ dates = media.get('info', {}).get('release_date')
+ else:
+ dates = media.get('info').get('release_date')
+
+ if dates and (dates.get('expires', 0) < time.time() or dates.get('expires', 0) > time.time() + (604800 * 4)) or not dates:
+ dates = fireEvent('movie.info.release_date', identifier = getIdentifier(media), merge = True)
+ media['info'].update({'release_date': dates})
+ db.update(media)
+
+ return dates
+ except:
+ log.error('Failed updating release dates: %s', traceback.format_exc())
+
+ return {}
diff --git a/couchpotato/core/media/movie/_base/static/details.js b/couchpotato/core/media/movie/_base/static/details.js
new file mode 100644
index 0000000000..dd156baee8
--- /dev/null
+++ b/couchpotato/core/media/movie/_base/static/details.js
@@ -0,0 +1,174 @@
+var MovieDetails = new Class({
+
+ Extends: BlockBase,
+
+ sections: null,
+ buttons: null,
+
+ initialize: function(parent, options){
+ var self = this;
+
+ self.sections = {};
+
+ var category = parent.get('category');
+
+ self.el = new Element('div',{
+ 'class': 'page active movie_details level_' + (options.level || 0)
+ }).adopt(
+ self.overlay = new Element('div.overlay', {
+ 'events': {
+ 'click': self.close.bind(self)
+ }
+ }).grab(
+ new Element('a.close.icon-left-arrow')
+ ),
+ self.content = new Element('div.scroll_content').grab(
+ new Element('div.head').adopt(
+ new Element('h1').grab(
+ self.title_dropdown = new BlockMenu(self, {
+ 'class': 'title',
+ 'button_text': parent.getTitle() + (parent.get('year') ? ' (' + parent.get('year') + ')' : ''),
+ 'button_class': 'icon-dropdown'
+ })
+ ),
+ self.buttons = new Element('div.buttons')
+ )
+ )
+ );
+
+ var eta_date = parent.getETA('%b %Y') ;
+ self.addSection('description', new Element('div').adopt(
+ new Element('div', {
+ 'text': parent.get('plot')
+ }),
+ new Element('div.meta', {
+ 'html':
+ (eta_date ? ('ETA:' + eta_date + ' ') : '') +
+ '' + (parent.get('genres') || []).join(', ') + ' '
+ })
+ ));
+
+
+ // Title dropdown
+ var titles = parent.get('info').titles;
+ $(self.title_dropdown).addEvents({
+ 'click:relay(li a)': function(e, el){
+ (e).stopPropagation();
+
+ // Update category
+ Api.request('movie.edit', {
+ 'data': {
+ 'id': parent.get('_id'),
+ 'default_title': el.get('text')
+ }
+ });
+
+ $(self.title_dropdown).getElements('.icon-ok').removeClass('icon-ok');
+ el.addClass('icon-ok');
+
+ self.title_dropdown.button.set('text', el.get('text') + (parent.get('year') ? ' (' + parent.get('year') + ')' : ''));
+
+ }
+ });
+
+ titles.each(function(t){
+ self.title_dropdown.addLink(new Element('a', {
+ 'text': t,
+ 'class': parent.get('title') == t ? 'icon-ok' : ''
+ }));
+ });
+ },
+
+ addSection: function(name, section_el){
+ var self = this;
+ name = name.toLowerCase();
+
+ self.content.grab(
+ self.sections[name] = new Element('div', {
+ 'class': 'section section_' + name
+ }).grab(section_el)
+ );
+ },
+
+ addButton: function(button){
+ var self = this;
+
+ self.buttons.grab(button);
+ },
+
+ open: function(){
+ var self = this;
+
+ self.el.addClass('show');
+ document.onkeyup = self.keyup.bind(self);
+ //if(!App.mobile_screen){
+ // $(self.content).getElements('> .head, > .section').each(function(section, nr){
+ // dynamics.css(section, {
+ // opacity: 0,
+ // translateY: 100
+ // });
+ //
+ // dynamics.animate(section, {
+ // opacity: 1,
+ // translateY: 0
+ // }, {
+ // type: dynamics.spring,
+ // frequency: 200,
+ // friction: 300,
+ // duration: 1200,
+ // delay: 500 + (nr * 100)
+ // });
+ // });
+ //}
+
+ self.outer_click = function(){
+ self.close();
+ };
+
+ App.addEvent('history.push', self.outer_click);
+
+ },
+
+ keyup: function(e) {
+ if (e.keyCode == 27 /* Esc */) {
+ this.close();
+ }
+ },
+
+ close: function(){
+ var self = this;
+
+ var ended = function() {
+ self.el.dispose();
+ self.overlay.removeEventListener('transitionend', ended);
+ document.onkeyup = null;
+ };
+ self.overlay.addEventListener('transitionend', ended, false);
+
+ // animate out
+ //if(!App.mobile_screen){
+ // $(self.content).getElements('> .head, > .section').reverse().each(function(section, nr){
+ // dynamics.animate(section, {
+ // opacity: 0
+ // }, {
+ // type: dynamics.spring,
+ // frequency: 200,
+ // friction: 300,
+ // duration: 1200,
+ // delay: (nr * 50)
+ // });
+ // });
+ //
+ // dynamics.setTimeout(function(){
+ // self.el.removeClass('show');
+ // }, 200);
+ //}
+ //else {
+ // self.el.removeClass('show');
+ //}
+
+ self.el.removeClass('show');
+
+ App.removeEvent('history.push', self.outer_click);
+ }
+});
diff --git a/couchpotato/core/media/movie/_base/static/list.js b/couchpotato/core/media/movie/_base/static/list.js
new file mode 100644
index 0000000000..c5777a24f0
--- /dev/null
+++ b/couchpotato/core/media/movie/_base/static/list.js
@@ -0,0 +1,681 @@
+var MovieList = new Class({
+
+ Implements: [Events, Options],
+
+ options: {
+ api_call: 'media.list',
+ navigation: true,
+ limit: 50,
+ load_more: true,
+ loader: true,
+ menu: [],
+ add_new: false,
+ force_view: false
+ },
+
+ available_views: ['thumb', 'list'],
+ movies: [],
+ movies_added: {},
+ total_movies: 0,
+ letters: {},
+ filter: null,
+
+ initialize: function(options){
+ var self = this;
+ self.setOptions(options);
+
+ self.offset = 0;
+ self.filter = self.options.filter || {
+ 'starts_with': null,
+ 'search': null
+ };
+
+ self.el = new Element('div.movies').adopt(
+ self.title = self.options.title ? new Element('h2', {
+ 'text': self.options.title,
+ 'styles': {'display': 'none'}
+ }) : null,
+ self.description = self.options.description ? new Element('div.description', {
+ 'html': self.options.description,
+ 'styles': {'display': 'none'}
+ }) : null,
+ self.movie_list = new Element('div', {
+ 'events': {
+ 'click:relay(.movie)': function(e, el){
+ el.retrieve('klass').onClick(e);
+ },
+ 'mouseenter:relay(.movie)': function(e, el){
+ (e).stopPropagation();
+ el.retrieve('klass').onMouseenter(e);
+ },
+ 'change:relay(.movie input)': function(e, el){
+ (e).stopPropagation();
+ el = el.getParent('.movie');
+ var klass = el.retrieve('klass');
+ klass.fireEvent('select');
+ klass.select(klass.select_checkbox.get('checked'));
+ }
+ }
+ }),
+ self.load_more = self.options.load_more ? new Element('a.load_more', {
+ 'events': {
+ 'click': self.loadMore.bind(self)
+ }
+ }) : null
+ );
+
+ self.changeView(self.getSavedView() || self.options.view || 'thumb');
+
+ // Create the alphabet nav
+ if(self.options.navigation)
+ self.createNavigation();
+
+ if(self.options.api_call)
+ self.getMovies();
+
+ App.on('movie.added', self.movieAdded.bind(self));
+ App.on('movie.deleted', self.movieDeleted.bind(self));
+ },
+
+ movieDeleted: function(notification){
+ var self = this;
+
+ if(self.movies_added[notification.data._id]){
+ self.movies.each(function(movie){
+ if(movie.get('_id') == notification.data._id){
+ movie.destroy();
+ delete self.movies_added[notification.data._id];
+ self.setCounter(self.counter_count-1);
+ self.total_movies--;
+ }
+ });
+ }
+
+ self.checkIfEmpty();
+ },
+
+ movieAdded: function(notification){
+ var self = this;
+
+ self.fireEvent('movieAdded', notification);
+ if(self.options.add_new && !self.movies_added[notification.data._id] && notification.data.status == self.options.status){
+ window.scroll(0,0);
+ self.createMovie(notification.data, 'top');
+ self.setCounter(self.counter_count+1);
+
+ self.checkIfEmpty();
+ }
+ },
+
+ create: function(){
+ var self = this;
+
+ if(self.options.load_more){
+ self.scrollspy = new ScrollSpy({
+ container: self.el.getParent(),
+ min: function(){
+ return self.load_more.getCoordinates().top;
+ },
+ onEnter: self.loadMore.bind(self)
+ });
+ }
+
+ self.created = true;
+ },
+
+ addMovies: function(movies, total){
+ var self = this;
+
+
+ if(!self.created) self.create();
+
+ // do scrollspy
+ if(movies.length < self.options.limit && self.scrollspy){
+ self.load_more.hide();
+ self.scrollspy.stop();
+ }
+
+ self.createMovie(movies, 'bottom');
+
+ self.total_movies += total;
+ self.setCounter(total);
+
+ self.calculateSelected();
+ },
+
+ setCounter: function(count){
+ var self = this;
+
+ if(!self.navigation_counter) return;
+
+ self.counter_count = count;
+ self.navigation_counter.set('text', count === 1 ? '1 movie' : (count || 0) + ' movies');
+
+ if (self.empty_message) {
+ self.empty_message.destroy();
+ self.empty_message = null;
+ }
+
+ if(self.total_movies && count === 0 && !self.empty_message){
+ var message = (self.filter.search ? 'for "'+self.filter.search+'"' : '') +
+ (self.filter.starts_with ? ' in '+self.filter.starts_with+' ' : '');
+
+ self.empty_message = new Element('.message', {
+ 'html': 'No movies found ' + message + '. '
+ }).grab(
+ new Element('a', {
+ 'text': 'Reset filter',
+ 'events': {
+ 'click': function(){
+ self.filter = {
+ 'starts_with': null,
+ 'search': null
+ };
+ self.navigation_search_input.set('value', '');
+ self.reset();
+ self.activateLetter();
+ self.getMovies(true);
+ self.last_search_value = '';
+ }
+ }
+ })
+ ).inject(self.movie_list);
+
+ }
+
+ },
+
+ createMovie: function(movie, inject_at, nr){
+ var self = this,
+ movies = Array.isArray(movie) ? movie : [movie],
+ movie_els = [];
+ inject_at = inject_at || 'bottom';
+
+ movies.each(function(movie, nr){
+
+ var m = new Movie(self, {
+ 'actions': self.options.actions,
+ 'view': self.current_view,
+ 'onSelect': self.calculateSelected.bind(self)
+ }, movie);
+
+ var el = $(m);
+
+ if(inject_at === 'bottom'){
+ movie_els.push(el);
+ }
+ else {
+ el.inject(self.movie_list, inject_at);
+ }
+
+ self.movies.include(m);
+ self.movies_added[movie._id] = true;
+ });
+
+ if(movie_els.length > 0){
+ $(self.movie_list).adopt(movie_els);
+ }
+
+ },
+
+ createNavigation: function(){
+ var self = this;
+ var chars = '#ABCDEFGHIJKLMNOPQRSTUVWXYZ';
+
+ self.el.addClass('with_navigation');
+
+ self.navigation = new Element('div.alph_nav').adopt(
+ self.mass_edit_form = new Element('div.mass_edit_form').adopt(
+ new Element('span.select').adopt(
+ self.mass_edit_select = new Element('input[type=checkbox]', {
+ 'events': {
+ 'change': self.massEditToggleAll.bind(self)
+ }
+ }),
+ self.mass_edit_selected = new Element('span.count', {'text': 0}),
+ self.mass_edit_selected_label = new Element('span', {'text': 'selected'})
+ ),
+ new Element('div.quality').adopt(
+ self.mass_edit_quality = new Element('select'),
+ new Element('a.button.orange', {
+ 'text': 'Change quality',
+ 'events': {
+ 'click': self.changeQualitySelected.bind(self)
+ }
+ })
+ ),
+ new Element('div.delete').adopt(
+ new Element('span[text=or]'),
+ new Element('a.button.red', {
+ 'text': 'Delete',
+ 'events': {
+ 'click': self.deleteSelected.bind(self)
+ }
+ })
+ ),
+ new Element('div.refresh').adopt(
+ new Element('span[text=or]'),
+ new Element('a.button.green', {
+ 'text': 'Refresh',
+ 'events': {
+ 'click': self.refreshSelected.bind(self)
+ }
+ })
+ )
+ ),
+ new Element('div.menus').adopt(
+ self.navigation_counter = new Element('span.counter[title=Total]'),
+ self.filter_menu = new BlockMenu(self, {
+ 'class': 'filter',
+ 'button_class': 'icon-filter'
+ }),
+ self.navigation_actions = new Element('div.actions', {
+ 'events': {
+ 'click': function(e, el){
+ (e).preventDefault();
+
+ var new_view = self.current_view == 'list' ? 'thumb' : 'list';
+
+ var a = 'active';
+ self.navigation_actions.getElements('.'+a).removeClass(a);
+ self.changeView(new_view);
+
+ self.navigation_actions.getElement('[data-view='+new_view+']')
+ .addClass(a);
+
+ }
+ }
+ }),
+ self.navigation_menu = new BlockMenu(self, {
+ 'class': 'extra',
+ 'button_class': 'icon-dots'
+ })
+ )
+ );
+
+ // Mass edit
+ Quality.getActiveProfiles().each(function(profile){
+ new Element('option', {
+ 'value': profile.get('_id'),
+ 'text': profile.get('label')
+ }).inject(self.mass_edit_quality);
+ });
+
+ self.filter_menu.addLink(
+ self.navigation_search_input = new Element('input', {
+ 'title': 'Search through ' + self.options.identifier,
+ 'placeholder': 'Search through ' + self.options.identifier,
+ 'events': {
+ 'keyup': self.search.bind(self),
+ 'change': self.search.bind(self)
+ }
+ })
+ ).addClass('search icon-search');
+
+ var available_chars;
+ self.filter_menu.addEvent('open', function(){
+ self.navigation_search_input.focus();
+
+ // Get available chars and highlight
+ if(!available_chars && (self.navigation.isDisplayed() || self.navigation.isVisible()))
+ Api.request('media.available_chars', {
+ 'data': Object.merge({
+ 'status': self.options.status
+ }, self.filter),
+ 'onSuccess': function(json){
+ available_chars = json.chars;
+
+ available_chars.each(function(c){
+ self.letters[c.capitalize()].addClass('available');
+ });
+
+ }
+ });
+ });
+
+ self.filter_menu.addLink(
+ self.navigation_alpha = new Element('ul.numbers', {
+ 'events': {
+ 'click:relay(li.available)': function(e, el){
+ self.activateLetter(el.get('data-letter'));
+ self.getMovies(true);
+ }
+ }
+ })
+ );
+
+ // Actions
+ ['thumb', 'list'].each(function(view){
+ var current = self.current_view == view;
+ new Element('a', {
+ 'class': 'icon-' + view + (current ? ' active ' : ''),
+ 'data-view': view
+ }).inject(self.navigation_actions, current ? 'top' : 'bottom');
+ });
+
+ // All
+ self.letters.all = new Element('li.letter_all.available.active', {
+ 'text': 'ALL'
+ }).inject(self.navigation_alpha);
+
+ // Chars
+ chars.split('').each(function(c){
+ self.letters[c] = new Element('li', {
+ 'text': c,
+ 'class': 'letter_'+c,
+ 'data-letter': c
+ }).inject(self.navigation_alpha);
+ });
+
+ // Add menu or hide
+ if (self.options.menu.length > 0)
+ self.options.menu.each(function(menu_item){
+ self.navigation_menu.addLink(menu_item);
+ });
+ else
+ self.navigation_menu.hide();
+
+ },
+
+ calculateSelected: function(){
+ var self = this;
+
+ var selected = 0,
+ movies = self.movies.length;
+ self.movies.each(function(movie){
+ selected += movie.isSelected() ? 1 : 0;
+ });
+
+ var indeterminate = selected > 0 && selected < movies,
+ checked = selected == movies && selected > 0;
+
+ document.body[selected > 0 ? 'addClass' : 'removeClass']('mass_editing');
+
+ if(self.mass_edit_select){
+ self.mass_edit_select.set('checked', checked);
+ self.mass_edit_select.indeterminate = indeterminate;
+
+ self.mass_edit_selected.set('text', selected);
+ }
+ },
+
+ deleteSelected: function(){
+ var self = this,
+ ids = self.getSelectedMovies(),
+ help_msg = self.identifier == 'wanted' ? 'If you do, you won\'t be able to watch them, as they won\'t get downloaded!' : 'Your files will be safe, this will only delete the references in CouchPotato';
+
+ var qObj = new Question('Are you sure you want to delete '+ids.length+' movie'+ (ids.length != 1 ? 's' : '') +'?', help_msg, [{
+ 'text': 'Yes, delete '+(ids.length != 1 ? 'them' : 'it'),
+ 'class': 'delete',
+ 'events': {
+ 'click': function(e){
+ (e).preventDefault();
+ this.set('text', 'Deleting..');
+ Api.request('media.delete', {
+ 'method': 'post',
+ 'data': {
+ 'id': ids.join(','),
+ 'delete_from': self.options.identifier
+ },
+ 'onSuccess': function(){
+ qObj.close();
+
+ var erase_movies = [];
+ self.movies.each(function(movie){
+ if (movie.isSelected()){
+ $(movie).destroy();
+ erase_movies.include(movie);
+ }
+ });
+
+ erase_movies.each(function(movie){
+ self.movies.erase(movie);
+ movie.destroy();
+ self.setCounter(self.counter_count-1);
+ self.total_movies--;
+ });
+
+ self.calculateSelected();
+ }
+ });
+
+ }
+ }
+ }, {
+ 'text': 'Cancel',
+ 'cancel': true
+ }]);
+
+ },
+
+ changeQualitySelected: function(){
+ var self = this;
+ var ids = self.getSelectedMovies();
+
+ Api.request('movie.edit', {
+ 'method': 'post',
+ 'data': {
+ 'id': ids.join(','),
+ 'profile_id': self.mass_edit_quality.get('value')
+ },
+ 'onSuccess': self.search.bind(self)
+ });
+ },
+
+ refreshSelected: function(){
+ var self = this;
+ var ids = self.getSelectedMovies();
+
+ Api.request('media.refresh', {
+ 'method': 'post',
+ 'data': {
+ 'id': ids.join(',')
+ }
+ });
+ },
+
+ getSelectedMovies: function(){
+ var self = this;
+
+ var ids = [];
+ self.movies.each(function(movie){
+ if (movie.isSelected())
+ ids.include(movie.get('_id'));
+ });
+
+ return ids;
+ },
+
+ massEditToggleAll: function(){
+ var self = this;
+
+ var select = self.mass_edit_select.get('checked');
+
+ self.movies.each(function(movie){
+ movie.select(select);
+ });
+
+ self.calculateSelected();
+ },
+
+ reset: function(){
+ var self = this;
+
+ self.movies = [];
+ if(self.mass_edit_select)
+ self.calculateSelected();
+ if(self.navigation_alpha)
+ self.navigation_alpha.getElements('.active').removeClass('active');
+
+ self.offset = 0;
+ if(self.scrollspy){
+ //self.load_more.show();
+ self.scrollspy.start();
+ }
+ },
+
+ activateLetter: function(letter){
+ var self = this;
+
+ self.reset();
+
+ self.letters[letter || 'all'].addClass('active');
+ self.filter.starts_with = letter;
+
+ },
+
+ changeView: function(new_view){
+ var self = this;
+
+ if(self.available_views.indexOf(new_view) == -1)
+ new_view = 'thumb';
+
+ self.el
+ .removeClass(self.current_view+'_list')
+ .addClass(new_view+'_list');
+
+ self.current_view = new_view;
+ Cookie.write(self.options.identifier+'_view', new_view, {duration: 1000});
+ },
+
+ getSavedView: function(){
+ var self = this;
+ return self.options.force_view ? self.options.view : Cookie.read(self.options.identifier+'_view');
+ },
+
+ search: function(){
+ var self = this;
+
+ if(self.search_timer) clearRequestTimeout(self.search_timer);
+ self.search_timer = requestTimeout(function(){
+ var search_value = self.navigation_search_input.get('value');
+ if (search_value == self.last_search_value) return;
+
+ self.reset();
+
+ self.activateLetter();
+ self.filter.search = search_value;
+
+ self.getMovies(true);
+
+ self.last_search_value = search_value;
+
+ }, 250);
+
+ },
+
+ update: function(){
+ var self = this;
+
+ self.reset();
+ self.getMovies(true);
+ },
+
+ getMovies: function(reset){
+ var self = this;
+
+ if(self.scrollspy){
+ self.scrollspy.stop();
+ self.load_more.set('text', 'loading...');
+ }
+
+ var loader_timeout;
+ if(self.movies.length === 0 && self.options.loader){
+
+ self.loader_first = new Element('div.mask.loading.with_message').grab(
+ new Element('div.message', {'text': self.options.title ? 'Loading \'' + self.options.title + '\'' : 'Loading...'})
+ ).inject(self.el, 'top');
+ createSpinner(self.loader_first);
+
+ var lfc = self.loader_first;
+ loader_timeout = requestTimeout(function(){
+ lfc.addClass('show');
+ }, 10);
+
+ self.el.setStyle('min-height', 220);
+
+ }
+
+ Api.request(self.options.api_call, {
+ 'data': Object.merge({
+ 'type': self.options.type || 'movie',
+ 'status': self.options.status,
+ 'limit_offset': self.options.limit ? self.options.limit + ',' + self.offset : null
+ }, self.filter),
+ 'onSuccess': function(json){
+
+ if(reset)
+ self.movie_list.empty();
+
+ if(loader_timeout) clearRequestTimeout(loader_timeout);
+ if(self.loader_first){
+ var lf = self.loader_first;
+ self.loader_first = null;
+ lf.removeClass('show');
+
+ requestTimeout(function(){
+ lf.destroy();
+ }, 1000);
+ self.el.setStyle('min-height', null);
+ }
+
+ self.store(json.movies);
+ self.addMovies(json.movies, json.total || json.movies.length);
+ if(self.scrollspy) {
+ self.load_more.set('text', 'load more movies');
+ self.scrollspy.start();
+ }
+
+ self.checkIfEmpty();
+ self.fireEvent('loaded');
+ }
+ });
+ },
+
+ loadMore: function(){
+ var self = this;
+ if(self.offset >= self.options.limit)
+ self.getMovies();
+ },
+
+ store: function(movies){
+ var self = this;
+
+ self.offset += movies.length;
+
+ },
+
+ checkIfEmpty: function(){
+ var self = this;
+
+ var is_empty = self.movies.length === 0 && (self.total_movies === 0 || self.total_movies === undefined);
+
+ if(self.title)
+ self.title[is_empty ? 'hide' : 'show']();
+
+ if(self.description)
+ self.description.setStyle('display', [is_empty ? 'none' : '']);
+
+ if(is_empty && self.options.on_empty_element){
+ var ee = typeOf(self.options.on_empty_element) == 'function' ? self.options.on_empty_element() : self.options.on_empty_element;
+ ee.inject(self.loader_first || self.title || self.movie_list, 'after');
+
+ if(self.navigation)
+ self.navigation.hide();
+
+ self.empty_element = ee;
+ }
+ else if(self.empty_element){
+ self.empty_element.destroy();
+
+ if(self.navigation)
+ self.navigation.show();
+ }
+
+ },
+
+ toElement: function(){
+ return this.el;
+ }
+
+});
diff --git a/couchpotato/core/media/movie/_base/static/manage.js b/couchpotato/core/media/movie/_base/static/manage.js
new file mode 100644
index 0000000000..9d379ad74b
--- /dev/null
+++ b/couchpotato/core/media/movie/_base/static/manage.js
@@ -0,0 +1,148 @@
+var MoviesManage = new Class({
+
+ Extends: PageBase,
+
+ order: 20,
+ name: 'manage',
+ title: 'Do stuff to your existing movies!',
+
+ indexAction: function(){
+ var self = this;
+
+ if(!self.list){
+ self.refresh_button = new Element('a', {
+ 'title': 'Rescan your library for new movies',
+ 'text': 'Full library refresh',
+ 'events':{
+ 'click': self.refresh.bind(self, true)
+ }
+ });
+
+ self.refresh_quick = new Element('a', {
+ 'title': 'Just scan for recently changed',
+ 'text': 'Quick library scan',
+ 'events':{
+ 'click': self.refresh.bind(self, false)
+ }
+ });
+
+ self.list = new MovieList({
+ 'identifier': 'manage',
+ 'filter': {
+ 'status': 'done',
+ 'release_status': 'done',
+ 'status_or': 1
+ },
+ 'actions': [MA.IMDB, MA.Files, MA.Trailer, MA.Readd, MA.Delete],
+ 'menu': [self.refresh_button, self.refresh_quick],
+ 'on_empty_element': new Element('div.empty_manage').adopt(
+ new Element('div', {
+ 'text': 'Seems like you don\'t have anything in your library yet. Add your existing movie folders in '
+ }).grab(
+ new Element('a', {
+ 'text': 'Settings > Manage',
+ 'href': App.createUrl('settings/manage')
+ })
+ ),
+ new Element('div.after_manage', {
+ 'text': 'When you\'ve done that, hit this button Б├▓ '
+ }).grab(
+ new Element('a.button.green', {
+ 'text': 'Hit me, but not too hard',
+ 'events':{
+ 'click': self.refresh.bind(self, true)
+ }
+ })
+ )
+ )
+ });
+ $(self.list).inject(self.content);
+
+ // Check if search is in progress
+ self.startProgressInterval();
+ }
+
+ },
+
+ refresh: function(full){
+ var self = this;
+
+ if(!self.update_in_progress){
+
+ Api.request('manage.update', {
+ 'data': {
+ 'full': +full
+ }
+ });
+
+ self.startProgressInterval();
+
+ }
+
+ },
+
+ startProgressInterval: function(){
+ var self = this;
+
+ self.progress_interval = requestInterval(function(){
+
+ if(self.progress_request && self.progress_request.running)
+ return;
+
+ self.update_in_progress = true;
+ self.progress_request = Api.request('manage.progress', {
+ 'onComplete': function(json){
+
+ if(!json || !json.progress){
+ clearRequestInterval(self.progress_interval);
+ self.update_in_progress = false;
+ if(self.progress_container){
+ self.progress_container.destroy();
+ self.list.update();
+ }
+ }
+ else {
+ // Capture progress so we can use it in our *each* closure
+ var progress = json.progress;
+
+ // Don't add loader when page is loading still
+ if(!self.list.navigation)
+ return;
+
+ if(!self.progress_container)
+ self.progress_container = new Element('div.progress')
+ .inject(self.list, 'top');
+
+ self.progress_container.empty();
+
+ var sorted_table = self.parseProgress(json.progress);
+
+ sorted_table.each(function(folder){
+ var folder_progress = progress[folder];
+ new Element('div').adopt(
+ new Element('span.folder', {'text': folder +
+ (folder_progress.eta > 0 ? ', ' + new Date ().increment('second', folder_progress.eta).timeDiffInWords().replace('from now', 'to go') : '')
+ }),
+ new Element('span.percentage', {'text': folder_progress.total ? Math.round(((folder_progress.total-folder_progress.to_go)/folder_progress.total)*100) + '%' : '0%'})
+ ).inject(self.progress_container);
+ });
+
+ }
+ }
+ });
+
+ }, 1000);
+ },
+
+ parseProgress: function (progress_object) {
+ var folder, temp_array = [];
+
+ for (folder in progress_object) {
+ if (progress_object.hasOwnProperty(folder)) {
+ temp_array.push(folder);
+ }
+ }
+ return temp_array.stableSort();
+ }
+
+});
diff --git a/couchpotato/core/media/movie/_base/static/manage.js~HEAD b/couchpotato/core/media/movie/_base/static/manage.js~HEAD
new file mode 100644
index 0000000000..9d379ad74b
--- /dev/null
+++ b/couchpotato/core/media/movie/_base/static/manage.js~HEAD
@@ -0,0 +1,148 @@
+var MoviesManage = new Class({
+
+ Extends: PageBase,
+
+ order: 20,
+ name: 'manage',
+ title: 'Do stuff to your existing movies!',
+
+ indexAction: function(){
+ var self = this;
+
+ if(!self.list){
+ self.refresh_button = new Element('a', {
+ 'title': 'Rescan your library for new movies',
+ 'text': 'Full library refresh',
+ 'events':{
+ 'click': self.refresh.bind(self, true)
+ }
+ });
+
+ self.refresh_quick = new Element('a', {
+ 'title': 'Just scan for recently changed',
+ 'text': 'Quick library scan',
+ 'events':{
+ 'click': self.refresh.bind(self, false)
+ }
+ });
+
+ self.list = new MovieList({
+ 'identifier': 'manage',
+ 'filter': {
+ 'status': 'done',
+ 'release_status': 'done',
+ 'status_or': 1
+ },
+ 'actions': [MA.IMDB, MA.Files, MA.Trailer, MA.Readd, MA.Delete],
+ 'menu': [self.refresh_button, self.refresh_quick],
+ 'on_empty_element': new Element('div.empty_manage').adopt(
+ new Element('div', {
+ 'text': 'Seems like you don\'t have anything in your library yet. Add your existing movie folders in '
+ }).grab(
+ new Element('a', {
+ 'text': 'Settings > Manage',
+ 'href': App.createUrl('settings/manage')
+ })
+ ),
+ new Element('div.after_manage', {
+ 'text': 'When you\'ve done that, hit this button Б├▓ '
+ }).grab(
+ new Element('a.button.green', {
+ 'text': 'Hit me, but not too hard',
+ 'events':{
+ 'click': self.refresh.bind(self, true)
+ }
+ })
+ )
+ )
+ });
+ $(self.list).inject(self.content);
+
+ // Check if search is in progress
+ self.startProgressInterval();
+ }
+
+ },
+
+ refresh: function(full){
+ var self = this;
+
+ if(!self.update_in_progress){
+
+ Api.request('manage.update', {
+ 'data': {
+ 'full': +full
+ }
+ });
+
+ self.startProgressInterval();
+
+ }
+
+ },
+
+ startProgressInterval: function(){
+ var self = this;
+
+ self.progress_interval = requestInterval(function(){
+
+ if(self.progress_request && self.progress_request.running)
+ return;
+
+ self.update_in_progress = true;
+ self.progress_request = Api.request('manage.progress', {
+ 'onComplete': function(json){
+
+ if(!json || !json.progress){
+ clearRequestInterval(self.progress_interval);
+ self.update_in_progress = false;
+ if(self.progress_container){
+ self.progress_container.destroy();
+ self.list.update();
+ }
+ }
+ else {
+ // Capture progress so we can use it in our *each* closure
+ var progress = json.progress;
+
+ // Don't add loader when page is loading still
+ if(!self.list.navigation)
+ return;
+
+ if(!self.progress_container)
+ self.progress_container = new Element('div.progress')
+ .inject(self.list, 'top');
+
+ self.progress_container.empty();
+
+ var sorted_table = self.parseProgress(json.progress);
+
+ sorted_table.each(function(folder){
+ var folder_progress = progress[folder];
+ new Element('div').adopt(
+ new Element('span.folder', {'text': folder +
+ (folder_progress.eta > 0 ? ', ' + new Date ().increment('second', folder_progress.eta).timeDiffInWords().replace('from now', 'to go') : '')
+ }),
+ new Element('span.percentage', {'text': folder_progress.total ? Math.round(((folder_progress.total-folder_progress.to_go)/folder_progress.total)*100) + '%' : '0%'})
+ ).inject(self.progress_container);
+ });
+
+ }
+ }
+ });
+
+ }, 1000);
+ },
+
+ parseProgress: function (progress_object) {
+ var folder, temp_array = [];
+
+ for (folder in progress_object) {
+ if (progress_object.hasOwnProperty(folder)) {
+ temp_array.push(folder);
+ }
+ }
+ return temp_array.stableSort();
+ }
+
+});
diff --git a/couchpotato/core/media/movie/_base/static/manage.js~b839b971765cf032c05b2f3d2627afc41fed332c b/couchpotato/core/media/movie/_base/static/manage.js~b839b971765cf032c05b2f3d2627afc41fed332c
new file mode 100644
index 0000000000..e8618999b0
--- /dev/null
+++ b/couchpotato/core/media/movie/_base/static/manage.js~b839b971765cf032c05b2f3d2627afc41fed332c
@@ -0,0 +1,150 @@
+Page.Manage = new Class({
+
+ Extends: PageBase,
+
+ order: 20,
+ name: 'manage',
+ title: 'Do stuff to your existing movies!',
+
+ indexAction: function(){
+ var self = this;
+
+ if(!self.list){
+ self.refresh_button = new Element('a', {
+ 'title': 'Rescan your library for new movies',
+ 'text': 'Full library refresh',
+ 'events':{
+ 'click': self.refresh.bind(self, true)
+ }
+ });
+
+ self.refresh_quick = new Element('a', {
+ 'title': 'Just scan for recently changed',
+ 'text': 'Quick library scan',
+ 'events':{
+ 'click': self.refresh.bind(self, false)
+ }
+ });
+
+ self.list = new MovieList({
+ 'identifier': 'manage',
+ 'filter': {
+ 'status': 'done',
+ 'release_status': 'done',
+ 'status_or': 1
+ },
+ 'actions': [MA.IMDB, MA.Trailer, MA.Files, MA.Readd, MA.Edit, MA.Delete],
+ 'menu': [self.refresh_button, self.refresh_quick],
+ 'on_empty_element': new Element('div.empty_manage').adopt(
+ new Element('div', {
+ 'text': 'Seems like you don\'t have anything in your library yet.'
+ }),
+ new Element('div', {
+ 'text': 'Add your existing movie folders in '
+ }).adopt(
+ new Element('a', {
+ 'text': 'Settings > Manage',
+ 'href': App.createUrl('settings/manage')
+ })
+ ),
+ new Element('div.after_manage', {
+ 'text': 'When you\'ve done that, hit this button Б├▓ '
+ }).adopt(
+ new Element('a.button.green', {
+ 'text': 'Hit me, but not too hard',
+ 'events':{
+ 'click': self.refresh.bind(self, true)
+ }
+ })
+ )
+ )
+ });
+ $(self.list).inject(self.el);
+
+ // Check if search is in progress
+ self.startProgressInterval();
+ }
+
+ },
+
+ refresh: function(full){
+ var self = this;
+
+ if(!self.update_in_progress){
+
+ Api.request('manage.update', {
+ 'data': {
+ 'full': +full
+ }
+ });
+
+ self.startProgressInterval();
+
+ }
+
+ },
+
+ startProgressInterval: function(){
+ var self = this;
+
+ self.progress_interval = setInterval(function(){
+
+ if(self.progress_request && self.progress_request.running)
+ return;
+
+ self.update_in_progress = true;
+ self.progress_request = Api.request('manage.progress', {
+ 'onComplete': function(json){
+
+ if(!json || !json.progress){
+ clearInterval(self.progress_interval);
+ self.update_in_progress = false;
+ if(self.progress_container){
+ self.progress_container.destroy();
+ self.list.update();
+ }
+ }
+ else {
+ // Capture progress so we can use it in our *each* closure
+ var progress = json.progress;
+
+ // Don't add loader when page is loading still
+ if(!self.list.navigation)
+ return;
+
+ if(!self.progress_container)
+ self.progress_container = new Element('div.progress').inject(self.list.navigation, 'after');
+
+ self.progress_container.empty();
+
+ var sorted_table = self.parseProgress(json.progress);
+
+ sorted_table.each(function(folder){
+ var folder_progress = progress[folder];
+ new Element('div').adopt(
+ new Element('span.folder', {'text': folder +
+ (folder_progress.eta > 0 ? ', ' + new Date ().increment('second', folder_progress.eta).timeDiffInWords().replace('from now', 'to go') : '')
+ }),
+ new Element('span.percentage', {'text': folder_progress.total ? Math.round(((folder_progress.total-folder_progress.to_go)/folder_progress.total)*100) + '%' : '0%'})
+ ).inject(self.progress_container)
+ });
+
+ }
+ }
+ })
+
+ }, 1000);
+ },
+
+ parseProgress: function (progress_object) {
+ var folder, temp_array = [];
+
+ for (folder in progress_object) {
+ if (progress_object.hasOwnProperty(folder)) {
+ temp_array.push(folder)
+ }
+ }
+ return temp_array.stableSort()
+ }
+
+});
diff --git a/couchpotato/core/media/movie/_base/static/movie.actions.js b/couchpotato/core/media/movie/_base/static/movie.actions.js
new file mode 100644
index 0000000000..e5d3eaef86
--- /dev/null
+++ b/couchpotato/core/media/movie/_base/static/movie.actions.js
@@ -0,0 +1,987 @@
+var MovieAction = new Class({
+
+ Implements: [Options],
+
+ class_name: 'action',
+ label: 'UNKNOWN',
+ icon: null,
+ button: null,
+ details: null,
+ detail_button: null,
+
+ initialize: function(movie, options){
+ var self = this;
+ self.setOptions(options);
+
+ self.movie = movie;
+
+ self.create();
+
+ if(self.button){
+ var wrapper = new Element('div', {
+ 'class': self.class_name
+ });
+ self.button.inject(wrapper);
+
+ self.button = wrapper;
+ }
+ },
+
+ create: function(){},
+
+ getButton: function(){
+ return this.button || null;
+ },
+
+ getDetails: function(){
+ return this.details || null;
+ },
+
+ getDetailButton: function(){
+ return this.detail_button || null;
+ },
+
+ getLabel: function(){
+ return this.label;
+ },
+
+ disable: function(){
+ if(this.el)
+ this.el.addClass('disable');
+ },
+
+ enable: function(){
+ if(this.el)
+ this.el.removeClass('disable');
+ },
+
+ getTitle: function(){
+ var self = this;
+
+ try {
+ return self.movie.getTitle(true);
+ }
+ catch(e){
+ try {
+ return self.movie.original_title ? self.movie.original_title : self.movie.titles[0];
+ }
+ catch(e2){
+ return 'Unknown';
+ }
+ }
+ },
+
+ get: function(key){
+ var self = this;
+ try {
+ return self.movie.get(key);
+ }
+ catch(e){
+ return self.movie[key];
+ }
+ },
+
+ createMask: function(){
+ var self = this;
+ self.mask = new Element('div.mask', {
+ 'styles': {
+ 'z-index': '1'
+ }
+ }).inject(self.movie, 'top').fade('hide');
+ },
+
+ toElement: function(){
+ return this.el || null;
+ }
+
+});
+
+var MA = {};
+
+MA.IMDB = new Class({
+
+ Extends: MovieAction,
+ id: null,
+
+ create: function(){
+ var self = this;
+
+ self.id = self.movie.getIdentifier ? self.movie.getIdentifier() : self.get('imdb');
+
+ self.button = self.createButton();
+ self.detail_button = self.createButton();
+
+ if(!self.id) self.disable();
+ },
+
+ createButton: function(){
+ var self = this;
+
+ return new Element('a.imdb', {
+ 'text': 'IMDB',
+ 'title': 'Go to the IMDB page of ' + self.getTitle(),
+ 'href': 'http://www.imdb.com/title/'+self.id+'/',
+ 'target': '_blank'
+ });
+ },
+
+});
+
+MA.Release = new Class({
+
+ Extends: MovieAction,
+ label: 'Releases',
+
+ create: function(){
+ var self = this;
+
+ App.on('movie.searcher.ended', function(notification){
+ if(self.movie.data._id != notification.data._id) return;
+
+ self.releases = null;
+ if(self.options_container){
+ // Releases are currently displayed
+ if(self.options_container.isDisplayed()){
+ self.options_container.destroy();
+ self.getDetails();
+ }
+ else {
+ self.options_container.destroy();
+ self.options_container = null;
+ }
+ }
+ });
+
+ },
+
+ getDetails: function(refresh){
+ var self = this;
+ if(!self.movie.data.releases || self.movie.data.releases.length === 0) return;
+
+ if(!self.options_container || refresh){
+ self.options_container = new Element('div.options').grab(
+ self.release_container = new Element('div.releases.table')
+ );
+
+ // Header
+ new Element('div.item.head').adopt(
+ new Element('span.name', {'text': 'Release name'}),
+ new Element('span.status', {'text': 'Status'}),
+ new Element('span.quality', {'text': 'Quality'}),
+ new Element('span.size', {'text': 'Size'}),
+ new Element('span.age', {'text': 'Age'}),
+ new Element('span.score', {'text': 'Score'}),
+ new Element('span.provider', {'text': 'Provider'}),
+ new Element('span.actions')
+ ).inject(self.release_container);
+
+ if(self.movie.data.releases)
+ self.movie.data.releases.each(function(release){
+
+ var quality = Quality.getQuality(release.quality) || {},
+ info = release.info || {},
+ provider = self.get(release, 'provider') + (info.provider_extra ? self.get(release, 'provider_extra') : '');
+
+ var release_name = self.get(release, 'name');
+ if(release.files && release.files.length > 0){
+ try {
+ var movie_file = release.files.filter(function(file){
+ var type = File.Type.get(file.type_id);
+ return type && type.identifier == 'movie';
+ }).pick();
+ release_name = movie_file.path.split(Api.getOption('path_sep')).getLast();
+ }
+ catch(e){}
+ }
+
+ var size = info.size ? Math.floor(self.get(release, 'size')) : 0;
+ size = size ? ((size < 1000) ? size + 'MB' : Math.round(size*10/1024)/10 + 'GB') : 'n/a';
+
+ // Create release
+ release.el = new Element('div', {
+ 'class': 'item '+release.status,
+ 'id': 'release_'+release._id
+ }).adopt(
+ new Element('span.name', {'text': release_name, 'title': release_name}),
+ new Element('span.status', {'text': release.status, 'class': 'status '+release.status}),
+ new Element('span.quality', {'text': quality.label + (release.is_3d ? ' 3D' : '') || 'n/a'}),
+ new Element('span.size', {'text': size}),
+ new Element('span.age', {'text': self.get(release, 'age')}),
+ new Element('span.score', {'text': self.get(release, 'score')}),
+ new Element('span.provider', { 'text': provider, 'title': provider }),
+ new Element('span.actions').adopt(
+ info.detail_url ? new Element('a.icon-info', {
+ 'href': info.detail_url,
+ 'target': '_blank'
+ }) : new Element('a'),
+ new Element('a.icon-download', {
+ 'events': {
+ 'click': function(e){
+ (e).stopPropagation();
+ if(!this.hasClass('completed'))
+ self.download(release);
+ }
+ }
+ }),
+ new Element('a', {
+ 'class': release.status == 'ignored' ? 'icon-redo' : 'icon-cancel',
+ 'events': {
+ 'click': function(e){
+ (e).stopPropagation();
+ self.ignore(release);
+
+ this.toggleClass('icon-redo');
+ this.toggleClass('icon-cancel');
+ }
+ }
+ })
+ )
+ ).inject(self.release_container);
+
+ if(release.status == 'ignored' || release.status == 'failed' || release.status == 'snatched'){
+ if(!self.last_release || (self.last_release && self.last_release.status != 'snatched' && release.status == 'snatched'))
+ self.last_release = release;
+ }
+ else if(!self.next_release && release.status == 'available'){
+ self.next_release = release;
+ }
+
+ var update_handle = function(notification) {
+ if(notification.data._id != release._id) return;
+
+ var q = self.movie.quality.getElement('.q_' + release.quality),
+ new_status = notification.data.status;
+
+ release.el.set('class', 'item ' + new_status);
+
+ var status_el = release.el.getElement('.status');
+ status_el.set('class', 'status ' + new_status);
+ status_el.set('text', new_status);
+
+ if(!q && (new_status == 'snatched' || new_status == 'seeding' || new_status == 'done'))
+ q = self.addQuality(release.quality_id);
+
+ if(q && !q.hasClass(new_status)) {
+ q.removeClass(release.status).addClass(new_status);
+ q.set('title', q.get('title').replace(release.status, new_status));
+ }
+ };
+
+ App.on('release.update_status', update_handle);
+
+ });
+
+ if(self.last_release)
+ self.release_container.getElements('#release_'+self.last_release._id).addClass('last_release');
+
+ if(self.next_release)
+ self.release_container.getElements('#release_'+self.next_release._id).addClass('next_release');
+
+ if(self.next_release || (self.last_release && ['ignored', 'failed'].indexOf(self.last_release.status) === false)){
+
+ self.trynext_container = new Element('div.buttons.try_container').inject(self.release_container, 'top');
+
+ var nr = self.next_release,
+ lr = self.last_release;
+
+ self.trynext_container.adopt(
+ new Element('span.or', {
+ 'text': 'If anything went wrong, download '
+ }),
+ lr ? new Element('a.orange', {
+ 'text': 'the same release again',
+ 'events': {
+ 'click': function(){
+ self.download(lr);
+ }
+ }
+ }) : null,
+ nr && lr ? new Element('span.or', {
+ 'text': ', '
+ }) : null,
+ nr ? [new Element('a.green', {
+ 'text': lr ? 'another release' : 'the best release',
+ 'events': {
+ 'click': function(){
+ self.download(nr);
+ }
+ }
+ }),
+ new Element('span.or', {
+ 'text': ' or pick one below'
+ })] : null
+ );
+ }
+
+ self.last_release = null;
+ self.next_release = null;
+
+ }
+
+ return self.options_container;
+
+ },
+
+ get: function(release, type){
+ return (release.info && release.info[type] !== undefined) ? release.info[type] : 'n/a';
+ },
+
+ download: function(release){
+ var self = this;
+
+ var release_el = self.release_container.getElement('#release_'+release._id),
+ icon = release_el.getElement('.icon-download');
+
+ if(icon)
+ icon.addClass('icon spinner').removeClass('download');
+
+ Api.request('release.manual_download', {
+ 'data': {
+ 'id': release._id
+ },
+ 'onComplete': function(json){
+ if(icon)
+ icon.removeClass('icon spinner');
+
+ if(json.success){
+ if(icon)
+ icon.addClass('completed');
+ release_el.getElement('.status').set('text', 'snatched');
+ }
+ else
+ if(icon)
+ icon.addClass('attention').set('title', 'Something went wrong when downloading, please check logs.');
+ }
+ });
+ },
+
+ ignore: function(release){
+
+ Api.request('release.ignore', {
+ 'data': {
+ 'id': release._id
+ }
+ });
+
+ }
+
+});
+
+MA.Trailer = new Class({
+
+ Extends: MovieAction,
+ id: null,
+ label: 'Trailer',
+
+ getDetails: function(){
+ var self = this,
+ data_url = 'https://www.googleapis.com/youtube/v3/search?q="{title}" {year} trailer&maxResults=1&type=video&videoDefinition=high&videoEmbeddable=true&part=snippet&key=AIzaSyAT3li1KjfLidaL6Vt8T92MRU7n4VOrjYk';
+
+ if(!self.player_container){
+ self.id = 'trailer-'+randomString();
+
+ self.container = new Element('div.trailer_container').adopt(
+ self.player_container = new Element('div.icon-play[id='+self.id+']', {
+ 'events': {
+ 'click': self.watch.bind(self)
+ }
+ }).adopt(
+ new Element('span[text="watch"]'),
+ new Element('span[text="trailer"]')
+ ),
+ self.background = new Element('div.background')
+ );
+
+ requestTimeout(function(){
+
+ var url = data_url.substitute({
+ 'title': encodeURI(self.getTitle()),
+ 'year': self.get('year')
+ });
+
+ new Request.JSONP({
+ 'url': url,
+ 'onComplete': function(json){
+ if(json.items.length > 0){
+ self.video_id = json.items[0].id.videoId;
+ self.background.setStyle('background-image', 'url('+json.items[0].snippet.thumbnails.high.url+')');
+ self.background.addClass('visible');
+ }
+ else {
+ self.container.getParent('.section').addClass('no_trailer');
+ }
+ }
+ }).send();
+
+ }, 1000);
+ }
+
+ return self.container;
+
+ },
+
+ watch: function(){
+ var self = this;
+
+ new Element('iframe', {
+ 'src': 'https://www.youtube-nocookie.com/embed/'+self.video_id+'?rel=0&showinfo=0&autoplay=1&showsearch=0&iv_load_policy=3&vq=hd720',
+ 'allowfullscreen': 'true'
+ }).inject(self.container);
+ }
+
+
+});
+
+
+MA.Category = new Class({
+
+ Extends: MovieAction,
+
+ create: function(){
+ var self = this;
+
+ var category = self.movie.get('category');
+
+ self.detail_button = new BlockMenu(self, {
+ 'class': 'category',
+ 'button_text': category ? category.label : 'No category',
+ 'button_class': 'icon-dropdown'
+ });
+
+ var categories = CategoryList.getAll();
+ if(categories.length > 0){
+
+ $(self.detail_button).addEvents({
+ 'click:relay(li a)': function(e, el){
+ (e).stopPropagation();
+
+ // Update category
+ Api.request('movie.edit', {
+ 'data': {
+ 'id': self.movie.get('_id'),
+ 'category_id': el.get('data-id')
+ }
+ });
+
+ $(self.detail_button).getElements('.icon-ok').removeClass('icon-ok');
+ el.addClass('icon-ok');
+
+ self.detail_button.button.set('text', el.get('text'));
+
+ }
+ });
+
+ self.detail_button.addLink(new Element('a[text=No category]', {
+ 'class': !category ? 'icon-ok' : '',
+ 'data-id': ''
+ }));
+ categories.each(function(c){
+ self.detail_button.addLink(new Element('a', {
+ 'text': c.get('label'),
+ 'class': category && category._id == c.get('_id') ? 'icon-ok' : '',
+ 'data-id': c.get('_id')
+ }));
+ });
+ }
+ else {
+ $(self.detail_button).hide();
+ }
+
+ }
+
+});
+
+
+MA.Profile = new Class({
+
+ Extends: MovieAction,
+
+ create: function(){
+ var self = this;
+
+ var profile = self.movie.profile;
+
+ self.detail_button = new BlockMenu(self, {
+ 'class': 'profile',
+ 'button_text': profile ? profile.get('label') : 'No profile',
+ 'button_class': 'icon-dropdown'
+ });
+
+ var profiles = Quality.getActiveProfiles();
+ if(profiles.length > 0){
+
+ $(self.detail_button).addEvents({
+ 'click:relay(li a)': function(e, el){
+ (e).stopPropagation();
+
+ // Update category
+ Api.request('movie.edit', {
+ 'data': {
+ 'id': self.movie.get('_id'),
+ 'profile_id': el.get('data-id')
+ }
+ });
+
+ $(self.detail_button).getElements('.icon-ok').removeClass('icon-ok');
+ el.addClass('icon-ok');
+
+ self.detail_button.button.set('text', el.get('text'));
+
+ }
+ });
+
+ profiles.each(function(pr){
+ self.detail_button.addLink(new Element('a', {
+ 'text': pr.get('label'),
+ 'class': profile && profile.get('_id') == pr.get('_id') ? 'icon-ok' : '',
+ 'data-id': pr.get('_id')
+ }));
+ });
+ }
+ else {
+ $(self.detail_button).hide();
+ }
+
+ }
+
+});
+
+MA.Refresh = new Class({
+
+ Extends: MovieAction,
+ icon: 'refresh',
+
+ create: function(){
+ var self = this;
+
+ self.button = self.createButton();
+ self.detail_button = self.createButton();
+
+ },
+
+ createButton: function(){
+ var self = this;
+ return new Element('a.refresh', {
+ 'text': 'Refresh',
+ 'title': 'Refresh the movie info and do a forced search',
+ 'events': {
+ 'click': self.doRefresh.bind(self)
+ }
+ });
+ },
+
+ doRefresh: function(e){
+ var self = this;
+ (e).stop();
+
+ Api.request('media.refresh', {
+ 'data': {
+ 'id': self.movie.get('_id')
+ }
+ });
+ }
+
+});
+
+var SuggestBase = new Class({
+
+ Extends: MovieAction,
+
+ getIMDB: function(){
+ return this.movie.data.info.imdb;
+ },
+
+ refresh: function(json){
+ var self = this;
+
+ if(json && json.movie){
+ self.movie.list.addMovies([json.movie], 1);
+
+ var last_added = self.movie.list.movies[self.movie.list.movies.length-1];
+ $(last_added).inject(self.movie, 'before');
+ }
+
+ self.movie.destroy();
+ }
+
+});
+
+MA.Add = new Class({
+
+ Extends: SuggestBase,
+ label: 'Add',
+ icon: 'plus',
+
+ create: function() {
+ var self = this;
+
+ self.button = new Element('a.add', {
+ 'text': 'Add',
+ 'title': 'Re-add the movie and mark all previous snatched/downloaded as ignored',
+ 'events': {
+ 'click': function(){
+ self.movie.openDetails();
+ }
+ }
+ });
+
+ },
+
+ getDetails: function(){
+ var self = this;
+
+ var m = new BlockSearchMovieItem(self.movie.data.info, {
+ 'onAdded': self.movie.data.status == 'suggested' ? function(){
+
+ Api.request('suggestion.ignore', {
+ 'data': {
+ 'imdb': self.movie.data.info.imdb,
+ 'remove_only': true
+ },
+ 'onComplete': self.refresh.bind(self)
+ });
+
+ } : function(){
+ self.movie.destroy();
+ }
+ });
+ m.showOptions();
+
+ return m;
+ }
+
+});
+
+MA.SuggestSeen = new Class({
+
+ Extends: SuggestBase,
+ icon: 'eye',
+
+ create: function() {
+ var self = this;
+
+ self.button = self.createButton();
+ self.detail_button = self.createButton();
+ },
+
+ createButton: function(){
+ var self = this;
+
+ return new Element('a.seen', {
+ 'text': 'Already seen',
+ 'title': 'Already seen it!',
+ 'events': {
+ 'click': self.markAsSeen.bind(self)
+ }
+ });
+
+ },
+
+ markAsSeen: function(e){
+ var self = this;
+ (e).stopPropagation();
+
+ Api.request('suggestion.ignore', {
+ 'data': {
+ 'imdb': self.getIMDB(),
+ 'mark_seen': 1
+ },
+ 'onComplete': function(json){
+ self.refresh(json);
+ if(self.movie.details){
+ self.movie.details.close();
+ }
+ }
+ });
+ }
+
+});
+
+MA.SuggestIgnore = new Class({
+
+ Extends: SuggestBase,
+ icon: 'error',
+
+ create: function() {
+ var self = this;
+
+ self.button = self.createButton();
+ self.detail_button = self.createButton();
+ },
+
+ createButton: function(){
+ var self = this;
+
+ return new Element('a.ignore', {
+ 'text': 'Ignore',
+ 'title': 'Don\'t suggest this movie anymore',
+ 'events': {
+ 'click': self.markAsIgnored.bind(self)
+ }
+ });
+
+ },
+
+ markAsIgnored: function(e){
+ var self = this;
+ (e).stopPropagation();
+
+ Api.request('suggestion.ignore', {
+ 'data': {
+ 'imdb': self.getIMDB()
+ },
+ 'onComplete': function(json){
+ self.refresh(json);
+ if(self.movie.details){
+ self.movie.details.close();
+ }
+ }
+ });
+ }
+
+});
+
+
+MA.ChartIgnore = new Class({
+
+ Extends: SuggestBase,
+ icon: 'error',
+
+ create: function() {
+ var self = this;
+
+ self.button = self.createButton();
+ self.detail_button = self.createButton();
+ },
+
+ createButton: function(){
+ var self = this;
+
+ return new Element('a.ignore', {
+ 'text': 'Hide',
+ 'title': 'Don\'t show this movie in charts',
+ 'events': {
+ 'click': self.markAsHidden.bind(self)
+ }
+ });
+
+ },
+
+ markAsHidden: function(e){
+ var self = this;
+ (e).stopPropagation();
+
+ Api.request('charts.ignore', {
+ 'data': {
+ 'imdb': self.getIMDB()
+ },
+ 'onComplete': function(json){
+ if(self.movie.details){
+ self.movie.details.close();
+ }
+ self.movie.destroy();
+ }
+ });
+ }
+
+});
+
+MA.Readd = new Class({
+
+ Extends: MovieAction,
+
+ create: function(){
+ var self = this,
+ movie_done = self.movie.data.status == 'done',
+ snatched;
+
+ if(self.movie.data.releases && !movie_done)
+ snatched = self.movie.data.releases.filter(function(release){
+ return release.status && (release.status == 'snatched' || release.status == 'seeding' || release.status == 'downloaded' || release.status == 'done');
+ }).length;
+
+ if(movie_done || snatched && snatched > 0)
+ self.el = new Element('a.readd', {
+ 'title': 'Re-add the movie and mark all previous snatched/downloaded as ignored',
+ 'events': {
+ 'click': self.doReadd.bind(self)
+ }
+ });
+
+ },
+
+ doReadd: function(e){
+ var self = this;
+ (e).stopPropagation();
+
+ Api.request('movie.add', {
+ 'data': {
+ 'identifier': self.movie.getIdentifier(),
+ 'ignore_previous': 1
+ }
+ });
+ }
+
+});
+
+MA.Delete = new Class({
+
+ Extends: MovieAction,
+
+ Implements: [Chain],
+
+ create: function(){
+ var self = this;
+
+ self.button = self.createButton();
+ self.detail_button = self.createButton();
+
+ },
+
+ createButton: function(){
+ var self = this;
+ return new Element('a.delete', {
+ 'text': 'Delete',
+ 'title': 'Remove the movie from this CP list',
+ 'events': {
+ 'click': self.showConfirm.bind(self)
+ }
+ });
+ },
+
+ showConfirm: function(e){
+ var self = this;
+ (e).stopPropagation();
+
+ self.question = new Question('Are you sure you want to delete ' + self.getTitle() + ' ?', '', [{
+ 'text': 'Yes, delete '+self.getTitle(),
+ 'class': 'delete',
+ 'events': {
+ 'click': function(e){
+ e.target.set('text', 'Deleting...');
+
+ self.del();
+ }
+ }
+ }, {
+ 'text': 'Cancel',
+ 'cancel': true
+ }]);
+
+ },
+
+ del: function(){
+ var self = this;
+
+ var movie = $(self.movie);
+
+ Api.request('media.delete', {
+ 'data': {
+ 'id': self.movie.get('_id'),
+ 'delete_from': self.movie.list.options.identifier
+ },
+ 'onComplete': function(){
+ if(self.question)
+ self.question.close();
+
+ dynamics.animate(movie, {
+ opacity: 0,
+ scale: 0
+ }, {
+ type: dynamics.bezier,
+ points: [{'x':0,'y':0,'cp':[{'x':0.876,'y':0}]},{'x':1,'y':1,'cp':[{'x':0.145,'y':1}]}],
+ duration: 400,
+ complete: function(){
+ self.movie.destroy();
+ }
+ });
+ }
+ });
+
+ }
+
+});
+
+MA.Files = new Class({
+
+ Extends: MovieAction,
+ label: 'Files',
+
+ getDetails: function(){
+ var self = this;
+
+ if(!self.movie.data.releases || self.movie.data.releases.length === 0)
+ return;
+
+ if(!self.files_container){
+ self.files_container = new Element('div.files.table');
+
+ // Header
+ new Element('div.item.head').adopt(
+ new Element('span.name', {'text': 'File'}),
+ new Element('span.type', {'text': 'Type'})
+ ).inject(self.files_container);
+
+ if(self.movie.data.releases)
+ Array.each(self.movie.data.releases, function(release){
+ var rel = new Element('div.release').inject(self.files_container);
+
+ Object.each(release.files, function(files, type){
+ Array.each(files, function(file){
+ new Element('div.file.item').adopt(
+ new Element('span.name', {'text': file}),
+ new Element('span.type', {'text': type})
+ ).inject(rel);
+ });
+ });
+ });
+
+ }
+
+ return self.files_container;
+ }
+
+});
+
+
+MA.MarkAsDone = new Class({
+
+ Extends: MovieAction,
+
+ create: function(){
+ var self = this;
+
+ self.button = self.createButton();
+ self.detail_button = self.createButton();
+
+ },
+
+ createButton: function(){
+ var self = this;
+ if(!self.movie.data.releases || self.movie.data.releases.length === 0) return;
+
+ return new Element('a.mark_as_done', {
+ 'text': 'Mark as done',
+ 'title': 'Remove from available list and move to managed movies',
+ 'events': {
+ 'click': self.markMovieDone.bind(self)
+ }
+ });
+ },
+
+ markMovieDone: function(){
+ var self = this;
+
+ Api.request('media.delete', {
+ 'data': {
+ 'id': self.movie.get('_id'),
+ 'delete_from': 'wanted'
+ },
+ 'onComplete': function(){
+ self.movie.destroy();
+ }
+ });
+
+ }
+
+});
diff --git a/couchpotato/core/media/movie/_base/static/movie.js b/couchpotato/core/media/movie/_base/static/movie.js
new file mode 100644
index 0000000000..4801184d7f
--- /dev/null
+++ b/couchpotato/core/media/movie/_base/static/movie.js
@@ -0,0 +1,458 @@
+var Movie = new Class({
+
+ Extends: BlockBase,
+ Implements: [Options, Events],
+
+ actions: null,
+ details: null,
+
+ initialize: function(list, options, data){
+ var self = this;
+
+ self.actions = [];
+ self.data = data;
+ self.list = list;
+
+ self.buttons = [];
+
+ self.el = new Element('a.movie').grab(
+ self.inner = new Element('div.inner')
+ );
+ self.el.store('klass', self);
+
+ self.profile = Quality.getProfile(data.profile_id) || {};
+ self.category = CategoryList.getCategory(data.category_id) || {};
+ self.parent(self, options);
+
+ self.addEvents();
+
+ //if(data.identifiers.imdb == 'tt3181822'){
+ // self.el.fireEvent('mouseenter');
+ // self.openDetails();
+ //}
+ },
+
+ openDetails: function(){
+ var self = this;
+
+ if(!self.details){
+ self.details = new MovieDetails(self, {
+ 'level': 3
+ });
+
+ // Add action items
+ self.actions.each(function(action, nr){
+ var details = action.getDetails();
+ if(details){
+ self.details.addSection(action.getLabel(), details);
+ }
+ else {
+ var button = action.getDetailButton();
+ if(button){
+ self.details.addButton(button);
+ }
+ }
+ });
+ }
+
+ App.getPageContainer().grab(self.details);
+
+ requestTimeout(self.details.open.bind(self.details), 20);
+ },
+
+ addEvents: function(){
+ var self = this;
+
+ self.global_events = {};
+
+ // Do refresh with new data
+ self.global_events['movie.update'] = function(notification){
+ if(self.data._id != notification.data._id) return;
+
+ self.busy(false);
+ requestTimeout(function(){
+ self.update(notification);
+ }, 2000);
+ };
+ App.on('movie.update', self.global_events['movie.update']);
+
+ // Add spinner on load / search
+ ['media.busy', 'movie.searcher.started'].each(function(listener){
+ self.global_events[listener] = function(notification){
+ if(notification.data && (self.data._id == notification.data._id || (typeOf(notification.data._id) == 'array' && notification.data._id.indexOf(self.data._id) > -1)))
+ self.busy(true);
+ };
+ App.on(listener, self.global_events[listener]);
+ });
+
+ // Remove spinner
+ self.global_events['movie.searcher.ended'] = function(notification){
+ if(notification.data && self.data._id == notification.data._id)
+ self.busy(false);
+ };
+ App.on('movie.searcher.ended', self.global_events['movie.searcher.ended']);
+
+ // Reload when releases have updated
+ self.global_events['release.update_status'] = function(notification){
+ var data = notification.data;
+ if(data && self.data._id == data.media_id){
+
+ if(!self.data.releases)
+ self.data.releases = [];
+
+ var updated = false;
+ self.data.releases.each(function(release){
+ if(release._id == data._id){
+ release.status = data.status;
+ updated = true;
+ }
+ });
+
+ if(updated)
+ self.updateReleases();
+ }
+ };
+
+ App.on('release.update_status', self.global_events['release.update_status']);
+
+ },
+
+ destroy: function(){
+ var self = this;
+
+ self.el.destroy();
+ delete self.list.movies_added[self.get('id')];
+ self.list.movies.erase(self);
+
+ self.list.checkIfEmpty();
+
+ if(self.details)
+ self.details.close();
+
+ // Remove events
+ Object.each(self.global_events, function(handle, listener){
+ App.off(listener, handle);
+ });
+ },
+
+ busy: function(set_busy, timeout){
+ var self = this;
+
+ if(!set_busy){
+ requestTimeout(function(){
+ if(self.spinner){
+ self.mask.fade('out');
+ requestTimeout(function(){
+ if(self.mask)
+ self.mask.destroy();
+ if(self.spinner)
+ self.spinner.destroy();
+ self.spinner = null;
+ self.mask = null;
+ }, timeout || 400);
+ }
+ }, timeout || 1000);
+ }
+ else if(!self.spinner) {
+ self.createMask();
+ self.spinner = createSpinner(self.mask);
+ self.mask.fade('in');
+ }
+ },
+
+ createMask: function(){
+ var self = this;
+ self.mask = new Element('div.mask', {
+ 'styles': {
+ 'z-index': 4
+ }
+ }).inject(self.el, 'top').fade('hide');
+ },
+
+ update: function(notification){
+ var self = this;
+
+ self.actions = [];
+ self.data = notification.data;
+ self.inner.empty();
+
+ self.profile = Quality.getProfile(self.data.profile_id) || {};
+ self.category = CategoryList.getCategory(self.data.category_id) || {};
+ self.create();
+
+ self.select(self.select_checkbox.get('checked'));
+
+ self.busy(false);
+ },
+
+ create: function(){
+ var self = this;
+
+ self.el.addClass('status_'+self.get('status'));
+
+ var eta_date = self.getETA();
+
+ var rating, stars;
+ if(['suggested','chart'].indexOf(self.data.status) > -1 && self.data.info && self.data.info.rating && self.data.info.rating.imdb){
+ rating = Array.prototype.slice.call(self.data.info.rating.imdb);
+
+ stars = [];
+
+ var half_rating = rating[0]/2;
+ for(var i = 1; i <= 5; i++){
+ if(half_rating >= 1)
+ stars.push(new Element('span.icon-star'));
+ else if(half_rating > 0)
+ stars.push(new Element('span.icon-star-half'));
+ else
+ stars.push(new Element('span.icon-star-empty'));
+
+ half_rating -= 1;
+ }
+ }
+
+ var thumbnail = new Element('div.poster');
+
+ if(self.data.files && self.data.files.image_poster && self.data.files.image_poster.length > 0){
+ thumbnail = new Element('div', {
+ 'class': 'type_image poster',
+ 'styles': {
+ 'background-image': 'url(' + Api.createUrl('file.cache') + self.data.files.image_poster[0].split(Api.getOption('path_sep')).pop() +')'
+ }
+ });
+ }
+ else if(self.data.info && self.data.info.images && self.data.info.images.poster && self.data.info.images.poster.length > 0){
+ thumbnail = new Element('div', {
+ 'class': 'type_image poster',
+ 'styles': {
+ 'background-image': 'url(' + self.data.info.images.poster[0] +')'
+ }
+ });
+ }
+
+ self.inner.adopt(
+ self.select_checkbox = new Element('input[type=checkbox]'),
+ new Element('div.poster_container').adopt(
+ thumbnail,
+ self.actions_el = new Element('div.actions')
+ ),
+ new Element('div.info').adopt(
+ new Element('div.title').adopt(
+ new Element('span', {
+ 'text': self.getTitle() || 'n/a'
+ }),
+ new Element('div.year', {
+ 'text': self.data.info.year || 'n/a'
+ })
+ ),
+ eta_date ? new Element('div.eta', {
+ 'text': eta_date,
+ 'title': 'ETA'
+ }) : null,
+ self.quality = new Element('div.quality'),
+ rating ? new Element('div.rating[title='+rating[0]+']').adopt(
+ stars,
+ new Element('span.votes[text=('+rating.join(' / ')+')][title=Votes]')
+ ) : null
+ )
+ );
+
+ if(!thumbnail)
+ self.el.addClass('no_thumbnail');
+
+ // Add profile
+ if(self.profile.data)
+ self.profile.getTypes().each(function(type){
+
+ var q = self.addQuality(type.get('quality'), type.get('3d'));
+ if((type.finish === true || type.get('finish')) && !q.hasClass('finish')){
+ q.addClass('finish');
+ q.set('title', q.get('title') + ' Will finish searching for this movie if this quality is found.');
+ }
+
+ });
+
+ // Add releases
+ self.updateReleases();
+
+ },
+
+
+ onClick: function(e){
+ var self = this;
+
+ if(e.target.getParents('.actions').length === 0 && e.target != self.select_checkbox){
+ (e).stopPropagation();
+ self.addActions();
+ self.openDetails();
+ }
+ },
+
+ addActions: function(){
+ var self = this;
+
+ if(self.actions.length <= 0){
+ self.options.actions.each(function(a){
+ var action = new a(self),
+ button = action.getButton();
+ if(button){
+ self.actions_el.grab(button);
+ self.buttons.push(button);
+ }
+
+ self.actions.push(action);
+ });
+ }
+ },
+
+ onMouseenter: function(){
+ var self = this;
+
+ if(App.mobile_screen) return;
+ self.addActions();
+
+ if(self.list.current_view == 'thumb'){
+ self.el.addClass('hover_start');
+ requestTimeout(function(){
+ self.el.removeClass('hover_start');
+ }, 300);
+
+ dynamics.css(self.inner, {
+ scale: 1
+ });
+
+ dynamics.animate(self.inner, {
+ scale: 0.9
+ }, { type: dynamics.bounce });
+
+ self.buttons.each(function(el, nr){
+
+ dynamics.css(el, {
+ opacity: 0,
+ translateY: 50
+ });
+
+ dynamics.animate(el, {
+ opacity: 1,
+ translateY: 0
+ }, {
+ type: dynamics.spring,
+ frequency: 200,
+ friction: 300,
+ duration: 800,
+ delay: 100 + (nr * 40)
+ });
+
+ });
+ }
+ },
+
+ updateReleases: function(){
+ var self = this;
+ if(!self.data.releases || self.data.releases.length === 0) return;
+
+ self.data.releases.each(function(release){
+
+ var q = self.quality.getElement('.q_'+ release.quality+(release.is_3d ? '.is_3d' : ':not(.is_3d)')),
+ status = release.status;
+
+ if(!q && (status == 'snatched' || status == 'seeding' || status == 'done'))
+ q = self.addQuality(release.quality, release.is_3d || false);
+
+ if (q && !q.hasClass(status)){
+ q.addClass(status);
+ q.set('title', (q.get('title') ? q.get('title') : '') + ' status: '+ status);
+ }
+
+ });
+ },
+
+ addQuality: function(quality, is_3d){
+ var self = this;
+
+ var q = Quality.getQuality(quality);
+ return new Element('span', {
+ 'text': q.label + (is_3d ? ' 3D' : ''),
+ 'class': 'q_'+q.identifier + (is_3d ? ' is_3d' : ''),
+ 'title': ''
+ }).inject(self.quality);
+
+ },
+
+ getTitle: function(prefixed){
+ var self = this;
+
+ if(self.data.title)
+ return prefixed ? self.data.title : self.getUnprefixedTitle(self.data.title);
+ else if(self.data.info && self.data.info.titles && self.data.info.titles.length > 0)
+ return prefixed ? self.data.info.titles[0] : self.getUnprefixedTitle(self.data.info.titles[0]);
+
+ return 'Unknown movie';
+ },
+
+ getUnprefixedTitle: function(t){
+ if(t.substr(0, 4).toLowerCase() == 'the ')
+ t = t.substr(4) + ', The';
+ else if(t.substr(0, 3).toLowerCase() == 'an ')
+ t = t.substr(3) + ', An';
+ else if(t.substr(0, 2).toLowerCase() == 'a ')
+ t = t.substr(2) + ', A';
+ return t;
+ },
+
+ getIdentifier: function(){
+ var self = this;
+
+ try {
+ return self.get('identifiers').imdb;
+ }
+ catch (e){ }
+
+ return self.get('imdb');
+ },
+
+ getETA: function(format){
+ var self = this,
+ d = new Date(),
+ now = Math.round(+d/1000),
+ eta = null,
+ eta_date = '';
+
+ if(self.data.info.release_date)
+ [self.data.info.release_date.dvd, self.data.info.release_date.theater].each(function(timestamp){
+ if (timestamp > 0 && (eta === null || Math.abs(timestamp - now) < Math.abs(eta - now)))
+ eta = timestamp;
+ });
+
+ if(eta){
+ eta_date = new Date(eta * 1000);
+ if(+eta_date/1000 < now){
+ eta_date = null;
+ }
+ else {
+ eta_date = format ? eta_date.format(format) : (eta_date.format('%b') + (d.getFullYear() != eta_date.getFullYear() ? ' ' + eta_date.getFullYear() : ''));
+ }
+ }
+
+ return (now+8035200 > eta) ? eta_date : '';
+ },
+
+ get: function(attr){
+ return this.data[attr] || this.data.info[attr];
+ },
+
+ select: function(select){
+ var self = this;
+ self.select_checkbox.set('checked', select);
+ self.el[self.select_checkbox.get('checked') ? 'addClass' : 'removeClass']('checked');
+ },
+
+ isSelected: function(){
+ return this.select_checkbox.get('checked');
+ },
+
+ toElement: function(){
+ return this.el;
+ }
+
+});
diff --git a/couchpotato/core/media/movie/_base/static/movie.scss b/couchpotato/core/media/movie/_base/static/movie.scss
new file mode 100644
index 0000000000..108ca2bfdd
--- /dev/null
+++ b/couchpotato/core/media/movie/_base/static/movie.scss
@@ -0,0 +1,1412 @@
+@import "_mixins";
+
+$mass_edit_height: 44px;
+
+.page.movies {
+ bottom: auto;
+ z-index: 21;
+ height: $header_height;
+
+ .scroll_content {
+ display: none;
+ }
+
+ @include media-phablet {
+ height: $header_width_mobile;
+ }
+}
+
+.page.movies_wanted,
+.page.movies_manage {
+ top: $header_height;
+ padding: 0;
+ will-change: top;
+ transition: top 300ms $cubic;
+
+ @include media-phablet {
+ top: $header_width_mobile;
+ }
+
+ .mass_editing & {
+ top: $header_height + $mass_edit_height;
+ }
+
+ .load_more {
+ text-align: center;
+ padding: $padding;
+ font-size: 2em;
+ display: block;
+ }
+
+ .empty_manage {
+ padding: $padding;
+
+ .after_manage {
+ margin-top: $padding;
+ }
+ }
+}
+
+.movie {
+
+ .ripple {
+ display: none;
+ }
+
+ input[type=checkbox] {
+ display: none;
+ }
+
+ .with_navigation & {
+ input[type=checkbox] {
+ display: inline-block;
+ position: absolute;
+ will-change: opacity;
+ transition: opacity 200ms;
+ opacity: 0;
+ z-index: 2;
+ cursor: pointer;
+
+ @include media-phablet {
+ display: none;
+ }
+
+ &:hover {
+ opacity: 1 !important;
+ }
+ }
+
+ &:hover input[type=checkbox] {
+ opacity: .5;
+ }
+
+ &.checked input[type=checkbox] {
+ opacity: 1;
+ }
+ }
+
+ .quality {
+ font-weight: 400;
+
+ span {
+ display: inline-block;
+ background: get-theme(off);
+ border: 1px solid transparent;
+ color: rgba(get-theme(text), .5);
+ border-radius: 1px;
+ padding: 1px 3px;
+
+ @include theme-dark {
+ color: rgba(get-theme-dark(text), .5);
+ background: get-theme-dark(off);
+ }
+
+ &.failed { background: #993619; color: #FFF; }
+ &.available { color: #009902; border-color: #009902; background: get-theme(background);}
+ &.snatched { background: #548399; color: #FFF }
+ &.downloaded, &.done { background: #009902; color: #FFF }
+
+ @include theme-dark {
+ background: none;
+ &.available { border-color: transparent; background: none;}
+ &.snatched { background: #548399; }
+ &.downloaded, &.done { background: #009902; color: #FFF; }
+ }
+ }
+ }
+
+ .rating {
+ .votes {
+ opacity: .7;
+ margin-left: 4px;
+ }
+ }
+
+ &.status_suggested {
+ .quality {
+ display: none;
+ }
+ }
+
+}
+
+.movies {
+ position: relative;
+
+ .no_movies {
+ display: block;
+ padding: $padding;
+
+ @include media-tablet {
+ padding: $padding/2;
+ }
+
+ a {
+ @include theme(color, primary);
+ }
+ }
+
+ .message {
+ padding: $padding 0;
+ text-align: center;
+
+ a {
+ @include theme(color, primary);
+ }
+ }
+
+ &.movies > h2 {
+ padding: 0 $padding;
+ line-height: $header_height;
+
+ @include media-phablet {
+ line-height: $header_width_mobile;
+ padding: 0 $padding/2;
+ }
+ }
+
+ > .description {
+ position: absolute;
+ top: 0;
+ right: $padding;
+ width: auto;
+ line-height: $header_height;
+ opacity: .7;
+
+ @include media-tablet {
+ display: none;
+ }
+
+ a {
+ @include theme(color, primary);
+ display: inline;
+
+ &:hover {
+ text-decoration: underline;
+ }
+ }
+ }
+
+ > .loading {
+ @include theme(background, background);
+
+ .message {
+ @include theme(color, text);
+ }
+
+ .spinner {
+ @include theme(background-color, background);
+ }
+ }
+
+ .movie .actions {
+ will-change: transform, opacity;
+ transform: rotateZ(360deg);
+
+ @include media-phablet {
+ pointer-events: none;
+ }
+ }
+
+ .progress {
+
+ div {
+ width: 50%;
+ padding: $padding/4 $padding/2;
+ display: flex;
+
+ @include media-tablet {
+ width: 100%;
+ }
+
+ .folder {
+ flex: 1 auto;
+ overflow: hidden;
+ text-overflow: ellipsis;
+ white-space: nowrap;
+ margin-right: $padding/2;
+ }
+
+ .percentage {
+ font-weight: bold;
+ }
+ }
+ }
+}
+
+.list_list {
+ font-weight: 300;
+
+ .movie {
+ display: block;
+ border-bottom: 1px solid transparent;
+ @include theme(border-color, off);
+
+ position: relative;
+ cursor: pointer;
+
+ &:last-child {
+ border-bottom: none;
+ }
+
+ &:hover {
+ background: get-theme(off);
+
+ @include theme-dark {
+ background: get-theme-dark(off);
+ }
+ }
+
+ input[type=checkbox] {
+ left: $padding;
+ top: 50%;
+ transform: translateY(-50%);
+ }
+
+ .poster {
+ display: none;
+ }
+
+ .info {
+ padding: $padding/2 $padding;
+
+ display: flex;
+ flex-flow: row nowrap;
+ align-items: center;
+
+ @include media-tablet {
+ display: block;
+ padding: $padding/2;
+ }
+
+ .title {
+ flex: 1 auto;
+
+ @include media-tablet {
+ display: flex;
+ flex-flow: row nowrap;
+ }
+
+ span {
+ transition: margin 200ms $cubic;
+ overflow: hidden;
+ text-overflow: ellipsis;
+ white-space: nowrap;
+
+ @include media-tablet {
+ width: 100%;
+ }
+
+ }
+
+ .year {
+ display: inline-block;
+ margin: 0 10px;
+ opacity: .5;
+ }
+ }
+
+ .eta {
+ font-size: .8em;
+ opacity: .5;
+ margin-right: 4px;
+
+ @include media-phablet {
+ display: none;
+ }
+ }
+
+ .quality {
+ clear: both;
+ overflow: hidden;
+
+ span {
+ float: left;
+ font-size: .7em;
+ margin: 2px 0 0 2px;
+
+ @include media-tablet {
+ margin: 2px 2px 0 0;
+ }
+ }
+ }
+
+ .rating .vote {
+ display: inline-block;
+ min-width: 60px;
+ text-align: right;
+ }
+ }
+
+ .actions {
+ position: absolute;
+ right: $padding/2;
+ top: 0;
+ bottom: 0;
+ display: none;
+ z-index: 10;
+
+ .action {
+ display: inline-block;
+ }
+
+ a {
+ height: 100%;
+ display: block;
+ @include theme(background, background);
+ @include theme(color, primary);
+ padding: $padding / 2;
+ width: auto;
+ float: right;
+ @include theme(text, text);
+
+ &:before {
+ display: none;
+ }
+
+ &:hover {
+ @include theme(background, off);
+ @include theme(color, text);
+ }
+
+ .icon {
+ display: none;
+ }
+ }
+ }
+
+ &:hover .actions {
+ display: block;
+
+ @include media-tablet {
+ display: none;
+ }
+ }
+ }
+
+ &.with_navigation .movie {
+ &:hover, &.checked {
+ .info .title span {
+ margin-left: $padding;
+
+ @include media-tablet {
+ margin-left: 0;
+ }
+ }
+ }
+ }
+}
+
+.thumb_list {
+
+ $max-split: 20;
+ $split-jump: 225px;
+
+ padding: 0 $padding/4;
+
+ > div:last-child {
+ padding: 0 ($padding/2)+2px;
+ @include media-phablet {
+ padding: 0 $padding/6;
+ }
+ }
+
+ .movie {
+ overflow: visible;
+ display: inline-block;
+ vertical-align: top;
+ margin-bottom: $padding;
+ position: relative;
+ cursor: pointer;
+ width: 150px;
+ border: 0 solid transparent;
+ border-width: 0 $padding/3;
+
+ .inner {
+ will-change: transform;
+ transform: rotateZ(360deg);
+ }
+
+ @while $max-split > 0 {
+ @media (min-width : $split-jump * ($max-split - 1)) and (max-width : $split-jump * $max-split) {
+ width: 100% / $max-split;
+ }
+ $max-split: $max-split - 1;
+ }
+
+ @include media-tablet {
+ width: 33.333%;
+ border-width: 0 $padding/4;
+ }
+
+ @include media-phablet {
+ width: 50%;
+ border-width: 0 $padding/5;
+ }
+
+ input[type=checkbox] {
+ top: $padding/2;
+ left: $padding/2;
+ }
+
+ .poster_container {
+ border-radius: $border_radius;
+ position: relative;
+ width: 100%;
+ padding-bottom: 150%;
+ overflow: hidden;
+ }
+
+ .poster {
+ position: absolute;
+ background: center no-repeat;
+ @include theme(background-color, off);
+ background-size: cover;
+ overflow: hidden;
+ height: 100%;
+ width: 100%;
+ }
+
+ .info {
+ clear: both;
+ font-size: .9em;
+
+ .title {
+ display: flex;
+ padding: 3px 0;
+ font-weight: 400;
+
+ span {
+ flex: 1 auto;
+ white-space: nowrap;
+ overflow: hidden;
+ text-overflow: ellipsis;
+ }
+
+ .year {
+ display: inline-block;
+ margin-left: 5px;
+ opacity: .5;
+ }
+ }
+
+ .eta {
+ opacity: .5;
+ float: right;
+ margin-left: 4px;
+ }
+
+ .quality {
+ white-space: nowrap;
+ overflow: hidden;
+ font-size: .9em;
+
+ span {
+ font-size: .8em;
+ margin-right: 2px;
+ }
+ }
+ }
+
+ .actions {
+ background-image: linear-gradient(25deg, rgba(get-theme(primary),.3) 0%, rgba(get-theme(primary),1) 80%);
+ @include theme-dark {
+ background-image: linear-gradient(25deg, rgba(get-theme-dark(primary),.3) 0%, rgba(get-theme-dark(primary),1) 80%);
+ }
+
+ will-change: opacity, visibility;
+ transition: all 400ms;
+ transition-property: opacity, visibility;
+ opacity: 0;
+ visibility: hidden;
+ position: absolute;
+ top: 0;
+ right: 0;
+ bottom: 0;
+ left: 0;
+ text-align: right;
+
+ .action {
+ position: relative;
+ margin-right: $padding/2;
+ float: right;
+ clear: both;
+
+ &:first-child {
+ margin-top: $padding/2;
+ }
+
+ a {
+ transition: all 150ms $cubic;
+ will-change: color, background;
+ transition-property: color, background;
+ display: block;
+ width: auto;
+ padding: $padding / 3;
+ color: #FFF;
+ border-radius: $border_radius - 1px;
+ font-weight: 400;
+
+ &:hover {
+ @include theme(background, background);
+ @include theme(color, primary);
+
+ @include theme-dark {
+ color: #FFF;
+ }
+ }
+ }
+ }
+ }
+
+ &:hover .actions {
+ opacity: 1;
+ visibility: visible;
+
+ @include media-phablet {
+ display: none;
+ }
+ }
+
+ &.hover_start .actions {
+ pointer-events: none;
+ }
+
+ .mask {
+ bottom: 44px;
+ border-radius: $border_radius;
+ will-change: opacity;
+ transition: opacity 30ms;
+ }
+ }
+
+}
+
+.page.movie_details {
+ pointer-events: none;
+ $gab-width: $header_width/3;
+
+ @include media-phablet {
+ left: 0;
+ }
+
+ .overlay {
+ position: fixed;
+ top: 0;
+ bottom: 0;
+ right: 0;
+ left: $header_width;
+ background: rgba(0,0,0,.6);
+ border-radius: $border_radius 0 0 $border_radius;
+ opacity: 0;
+ will-change: opacity;
+ transform: rotateZ(360deg);
+ transition: opacity 300ms ease 400ms;
+ z-index: 1;
+
+ .ripple {
+ background: #FFF;
+ }
+
+ @include media-phablet {
+ left: 0;
+ border-radius: 0;
+ transition: none;
+ }
+
+ .close {
+ display: inline-block;
+ text-align: center;
+ font-size: 60px;
+ line-height: $header_height;
+ color: #FFF;
+ width: 100%;
+ height: 100%;
+ opacity: 0;
+ will-change: opacity;
+ transition: opacity 300ms ease 200ms;
+
+ &:before {
+ display: block;
+ width: $gab-width;
+ }
+
+ @include media-phablet {
+ width: $header_width_mobile;
+ }
+ }
+ }
+
+ .scroll_content {
+ position: fixed;
+ z-index: 2;
+ top: 0;
+ bottom: 0;
+ right: 0;
+ left: $header_width + $gab-width;
+ @include theme(background, background);
+ border-radius: $border_radius 0 0 $border_radius;
+ overflow-y: auto;
+ will-change: transform;
+ transform: translateX(100%) rotateZ(360deg);
+ transition: transform 450ms $cubic;
+
+ @include media-phablet {
+ left: $header_width_mobile;
+ }
+
+ > .head {
+ display: flex;
+ flex-flow: row wrap;
+ padding: 0 $padding 0 $padding/2;
+ position: relative;
+ z-index: 2;
+ will-change: transform, opacity;
+ transform: rotateZ(360deg);
+
+ @include media-phablet {
+ padding: 0;
+ line-height: $header_width_mobile;
+ }
+
+ h1 {
+ flex: 1 auto;
+ margin: 0;
+ font-size: 24px;
+ font-weight: 300;
+ max-width: 100%;
+
+ @include media-phablet {
+ min-width: 100%;
+ line-height: $header_width_mobile;
+
+ .more_menu {
+ width: 100%;
+ }
+ }
+
+ .more_menu {
+ a {
+ @include theme(color, text);
+ }
+
+ .icon-dropdown {
+ padding-right: $padding*1.5;
+
+ @include media-phablet {
+ &:before {
+ right: $padding/2;
+ }
+ }
+ }
+ }
+ }
+
+ .more_menu {
+ display: inline-block;
+ vertical-align: top;
+ max-width: 100%;
+ margin-bottom: 0;
+
+ &.title .wrapper {
+ transform-origin: 0 0;
+ }
+
+ > a {
+ float: left;
+ line-height: $header_height;
+ @include theme(color, primary);
+
+ &:hover {
+ @include theme(color, text);
+ }
+
+ @include media-phablet {
+ line-height: $header_width_mobile;
+ }
+ }
+
+ .icon-dropdown {
+ position: relative;
+ padding: 0 $padding*1.25 0 $padding/2;
+
+ &:before {
+ position: absolute;
+ right: $padding/2;
+ top: -2px;
+ opacity: .2;
+ }
+
+ &:hover:before {
+ opacity: 1;
+ }
+ }
+
+ .wrapper {
+ top: $header_height - 10px;
+ padding-top: 4px;
+ border-radius: $border_radius $border_radius 0 0;
+ font-size: 14px;
+
+ @include media-phablet {
+ top: 25px;
+ }
+
+ &:before {
+ top: 0;
+ left: auto;
+ right: 22px;
+ }
+
+ ul {
+ border-radius: $border_radius $border_radius 0 0;
+ max-height: 215px;
+ overflow-y: auto;
+ }
+
+ a {
+ padding-right: $padding * 1.5;
+
+ &:before {
+ position: absolute;
+ right: $padding/2;
+ }
+
+ &:hover, &.icon-ok {
+ @include theme(color, primary);
+ }
+ }
+ }
+
+ &.title {
+ > a {
+ display: inline-block;
+ text-overflow: ellipsis;
+ overflow: hidden;
+ white-space: nowrap;
+ width: 100%;
+ }
+
+ .wrapper {
+ left: 0;
+ right: auto;
+
+ @include media-phablet {
+ top: 30px;
+ max-width: 240px;
+ }
+
+ &:before {
+ left: 22px;
+ right: auto;
+ }
+ }
+ }
+
+ }
+
+ .buttons {
+ display: flex;
+ flex-wrap: wrap;
+
+ @include media-phablet {
+ margin: 0;
+ }
+
+ > a {
+ display: inline-block;
+ padding: 0 10px;
+ @include theme(color, primary);
+ line-height: $header_height;
+
+ @include media-phablet {
+ line-height: $header_width_mobile;
+ }
+
+ &:hover {
+ @include theme(background, off);
+ @include theme(color, text);
+ }
+ }
+
+ }
+ }
+
+ .section {
+ padding: $padding;
+ border-top: 1px solid rgba(0,0,0,.1);
+ will-change: transform, opacity;
+ transform: rotateZ(360deg);
+
+ @include theme-dark {
+ border-color: rgba(255,255,255,.1);
+ }
+
+ @include media-phablet {
+ padding: $padding/2;
+ }
+ }
+ }
+
+ &.show {
+ pointer-events: auto;
+
+ .overlay {
+ opacity: 1;
+ transition-delay: 0s;
+
+ .close {
+ opacity: 1;
+ transition-delay: 300ms;
+ }
+ }
+
+ .scroll_content {
+ transition-delay: 50ms;
+ transform: translateX(0) rotateZ(360deg);
+ }
+ }
+
+ .section_description {
+ .meta {
+ text-align: right;
+ font-style: italic;
+ font-size: .90em;
+
+ span {
+ display: inline-block;
+ margin: $padding/2 $padding/2 0;
+
+ &:last-child {
+ margin-right: 0;
+ }
+ }
+ }
+ }
+
+ .section_add {
+ @include theme(background, off);
+
+ .options > div {
+ display: flex;
+ align-items: center;
+
+ select {
+ display: block;
+ width: 100%;
+ }
+
+ .title {
+ min-width: 75px;
+ width: 2000px;
+ margin: 0 10px 0 0;
+ }
+
+ .profile, .category {
+ width: 200px;
+ min-width: 50px;
+ margin: 0 10px 0 0;
+ }
+
+ .add {
+ width: 200px;
+
+ .button {
+ @include theme(background, background);
+ flex: 1 auto;
+ display: block;
+ text-align: center;
+ width: 100%;
+ margin: 0;
+
+ &:hover {
+ @include theme(background, primary);
+ }
+ }
+
+ }
+
+ }
+
+ .thumbnail,
+ .data {
+ display: none;
+ }
+ }
+
+ .files {
+ span {
+ overflow: hidden;
+ white-space: nowrap;
+ text-overflow: ellipsis;
+ text-align: center;
+ padding: $padding/3 0;
+ }
+
+ .name {
+ text-align: left;
+ flex: 1 1 auto;
+ }
+
+ .type {
+ min-width: 80px;
+ }
+ }
+
+ .releases {
+
+ .buttons {
+ margin-bottom: $padding/2;
+
+ a {
+ display: inline;
+ @include theme(color, primary);
+
+ &:hover {
+ text-decoration: underline;
+ }
+ }
+ }
+
+ .item {
+ @include media-phablet {
+ display: block;
+ }
+
+ &:not(.head):hover {
+ @include theme(background, off);
+ @include theme(text, text);
+ }
+
+ span {
+ overflow: hidden;
+ white-space: nowrap;
+ text-overflow: ellipsis;
+ text-align: center;
+ padding: $padding/3 0;
+
+ &:before {
+ display: none;
+ font-weight: bold;
+ opacity: .8;
+ margin-right: 3px;
+ width: 100%;
+ font-size: .9em;
+
+ @include media-phablet {
+ display: inline-block;
+ }
+ }
+
+ @include media-phablet {
+ vertical-align: top;
+ white-space: normal;
+ display: inline-block;
+ width: 50%;
+ padding: 0;
+ min-width: 0;
+ max-width: none;
+ text-align: left;
+ margin-top: 3px;
+ }
+ }
+
+ .name {
+ flex: 1 auto;
+ text-align: left;
+
+ @include media-phablet {
+ width: 100%;
+ font-weight: bold;
+ }
+ }
+
+ &.head {
+
+ @include media-phablet {
+ display: none;
+ }
+ }
+
+ &.ignored {
+ span:not(.actions) {
+ opacity: .3;
+ }
+
+ .name {
+ text-decoration: line-through;
+ }
+ }
+
+ .actions {
+ padding: 0;
+
+ @include media-phablet {
+ width: 100%;
+ text-align: center;
+ }
+
+ a {
+ display: inline-block;
+ vertical-align: top;
+ padding: $padding/3;
+ min-width: 26px;
+ @include theme(color, text);
+
+ &:hover {
+ @include theme(color, primary);
+ }
+
+ @include media-phablet {
+ text-align: center;
+ }
+
+ &:after {
+ margin-left: 3px;
+ font-size: .9em;
+ }
+
+ @include media-phablet {
+ &.icon-info:after { content: "more info"; }
+ &.icon-download:after { content: "download"; }
+ &.icon-cancel:after { content: "ignore"; }
+ }
+ }
+ }
+ }
+
+ .status { min-width: 70px; max-width: 70px; &:before { content: "Status:"; } }
+ .quality { min-width: 60px; max-width: 60px; &:before { content: "Quality:"; } }
+ .size { min-width: 50px; max-width: 50px; &:before { content: "Size:"; } }
+ .age { min-width: 40px; max-width: 40px; &:before { content: "Age:"; } }
+ .score { min-width: 45px; max-width: 45px; &:before { content: "Score:"; } }
+ .provider { min-width: 110px; max-width: 110px; &:before { content: "Provider:"; } }
+ .actions { min-width: 80px; max-width: 80px; }
+
+ }
+
+ .section_trailer.section_trailer {
+ $max_height: 450px;
+ $max_width: $max_height * (16/9);
+
+ padding: 0;
+ @include theme(background, menu);
+ max-height: $max_height;
+ overflow: hidden;
+
+ @include media-phablet {
+ max-height: $max_height;
+ }
+
+ &.no_trailer {
+ display: none;
+ }
+
+ .trailer_container {
+ $play-size: 110px;
+
+ max-height: $max_height;
+ position: relative;
+ overflow: hidden;
+ max-width: $max_width;
+ margin: 0 auto;
+ cursor: pointer;
+
+ @include media-phablet {
+ margin-bottom: $padding/2;
+ }
+
+ .background {
+ opacity: 0;
+ background: no-repeat center;
+ background-size: cover;
+ position: relative;
+ z-index: 1;
+ max-height: $max_height;
+ padding-bottom: 56.25%;
+ will-change: opacity;
+ transition: opacity 1000ms;
+
+ &.visible {
+ opacity: .4;
+ }
+ }
+
+ .icon-play {
+ opacity: 0.9;
+ position: absolute;
+ z-index: 2;
+ text-align: center;
+ width: 100%;
+ top: 50%;
+ transform: translateY(-50%);
+ will-change: opacity;
+ transition: all 300ms;
+ color: #FFF;
+ font-size: $play-size;
+
+ @include media-desktop {
+ font-size: $play-size/2;
+ }
+
+ @include media-phablet {
+ font-size: $play-size/3.5;
+ }
+
+ span {
+ transition: all 300ms;
+ opacity: 0.9;
+ position: absolute;
+ font-size: 1em;
+ top: 50%;
+ left: 50%;
+ margin-left: $play-size/2;
+ transform: translateY(-54%);
+ will-change: opacity;
+
+ @include media-desktop {
+ margin-left: $play-size/4;
+ }
+
+ @include media-phablet {
+ margin-left: $play-size/7;
+ }
+
+ &:first-child {
+ margin-left: -($play-size/2);
+ transform: translate(-100%, -54%);
+
+ @include media-desktop {
+ margin-left: -($play-size/4);
+ }
+
+ @include media-phablet {
+ margin-left: -($play-size/7);
+ }
+ }
+ }
+ }
+
+ &:hover {
+ @include theme(color, primary);
+
+ .icon-play {
+ opacity: 1;
+
+ span {
+ opacity: 1;
+ }
+ }
+ }
+
+ iframe {
+ position: absolute;
+ width: 100%;
+ height: 100%;
+ border: 0;
+ top: 0;
+ left: 0;
+ max-height: $max_height;
+ z-index: 10;
+ }
+ }
+ }
+
+}
+
+
+.alph_nav {
+ position: relative;
+
+ .mass_edit_form {
+ display: flex;
+ @include theme(background, background);
+ position: fixed;
+ top: $header_height;
+ right: 0;
+ left: $header_width;
+ flex-flow: row nowrap;
+ align-items: center;
+ will-change: max-height;
+ transition: max-height 300ms $cubic;
+ max-height: 0;
+ overflow: hidden;
+
+ .mass_editing & {
+ max-height: $mass_edit_height;
+ }
+
+ > * {
+ display: flex;
+ align-items: center;
+ }
+
+ .select {
+ margin: 0 $padding/2 0 $padding;
+
+ @include media-phablet {
+ margin: 0 $padding/4 0 $padding/2;
+ }
+
+ input, .count {
+ margin-right: $padding/4;
+ }
+ }
+
+ }
+
+ .menus {
+
+ .button {
+ padding: 0 $padding/2;
+ line-height: $header_height;
+ }
+
+ .counter, .more_menu, .actions {
+ float: left;
+
+ .wrapper {
+ transform-origin: 92% 0;
+ right: -7px;
+ }
+
+ > a {
+ display: inline-block;
+ width: 30px;
+ line-height: $header_height;
+ text-align: center;
+ float: left;
+
+ &:hover {
+ @include theme(background, off);
+ }
+
+ @include media-tablet {
+ line-height: $header_width_mobile;
+ }
+ }
+ }
+
+ .counter {
+ line-height: $header_height;
+ padding: 0 $padding/2;
+
+ @include media-tablet {
+ display: none;
+ }
+ }
+
+ .actions {
+ a {
+ display: inline-block;
+ }
+
+ .active {
+ display: none;
+ }
+
+ }
+
+ .filter {
+ .wrapper {
+ width: 320px;
+
+ @include media-phablet {
+ right: -70px;
+ transform-origin: 75% 0;
+
+ &:before {
+ right: 83px !important;
+ }
+ }
+ }
+
+ .button {
+ margin-top: -2px;
+ }
+
+ .search {
+ position: relative;
+
+ &:before {
+ position: absolute;
+ height: 100%;
+ line-height: 38px;
+ padding-left: $padding/2;
+ font-size: 16px;
+ opacity: .5;
+ }
+
+ input {
+ width: 100%;
+ padding: $padding/2 $padding/2 $padding/2 $padding*1.5;
+ @include theme(background, background);
+ border: none;
+ border-bottom: 1px solid transparent;
+ @include theme(border-color, off);
+
+ @include media-phablet {
+ font-size: 1.2em;
+ }
+ }
+ }
+
+ .numbers {
+ padding: $padding/2;
+
+ li {
+ float: left;
+ width: 10%;
+ height: 30px;
+ line-height: 30px;
+ text-align: center;
+ opacity: .2;
+ cursor: default;
+ border: 0;
+
+ &.active {
+ @include theme(background, off);
+ }
+
+ &.available {
+ opacity: 1;
+ cursor: pointer;
+
+ &:hover {
+ @include theme(background, off);
+ }
+ }
+ }
+ }
+ }
+
+ .more_menu {
+
+ //&.show .button {
+ // color: rgba(0, 0, 0, 1);
+ //}
+
+ .wrapper {
+ top: $header_height - 10px;
+ padding-top: 4px;
+ border-radius: $border_radius $border_radius 0 0;
+ min-width: 140px;
+
+ @include media-phablet {
+ top: $header_width_mobile;
+ }
+
+ &:before {
+ top: 0;
+ left: auto;
+ right: 22px;
+ }
+
+ ul {
+ border-radius: $border_radius $border_radius 0 0;
+ }
+ }
+ }
+ }
+
+}
diff --git a/couchpotato/core/media/movie/_base/static/page.js b/couchpotato/core/media/movie/_base/static/page.js
new file mode 100644
index 0000000000..98b5083199
--- /dev/null
+++ b/couchpotato/core/media/movie/_base/static/page.js
@@ -0,0 +1,50 @@
+Page.Movies = new Class({
+
+ Extends: PageBase,
+
+ name: 'movies',
+ icon: 'movie',
+ sub_pages: ['Wanted', 'Manage'],
+ default_page: 'Wanted',
+ current_page: null,
+
+ initialize: function(parent, options){
+ var self = this;
+ self.parent(parent, options);
+
+ self.navigation = new BlockNavigation();
+ $(self.navigation).inject(self.el);
+
+ },
+
+ defaultAction: function(action, params){
+ var self = this;
+
+ if(self.current_page){
+ self.current_page.hide();
+
+ if(self.current_page.list && self.current_page.list.navigation)
+ self.current_page.list.navigation.dispose();
+ }
+
+ var route = new Route();
+ route.parse(action);
+
+ var page_name = route.getPage() != 'index' ? route.getPage().capitalize() : self.default_page;
+
+ var page = self.sub_pages.filter(function(page){
+ return page.name == page_name;
+ }).pick()['class'];
+
+ page.open(route.getAction() || 'index', params);
+ page.show();
+
+ if(page.list && page.list.navigation)
+ page.list.navigation.inject(self.navigation);
+
+ self.current_page = page;
+ self.navigation.activate(page_name.toLowerCase());
+
+ }
+
+});
diff --git a/couchpotato/core/media/movie/_base/static/search.js b/couchpotato/core/media/movie/_base/static/search.js
new file mode 100644
index 0000000000..734f4647a9
--- /dev/null
+++ b/couchpotato/core/media/movie/_base/static/search.js
@@ -0,0 +1,240 @@
+var BlockSearchMovieItem = new Class({
+
+ Implements: [Options, Events],
+
+ initialize: function(info, options){
+ var self = this;
+ self.setOptions(options);
+
+ self.info = info;
+ self.alternative_titles = [];
+
+ self.create();
+ },
+
+ create: function(){
+ var self = this,
+ info = self.info;
+
+ var in_library;
+ if(info.in_library){
+ in_library = [];
+ (info.in_library.releases || []).each(function(release){
+ in_library.include(release.quality);
+ });
+ }
+
+ self.el = new Element('div.media_result', {
+ 'id': info.imdb,
+ 'events': {
+ 'click': self.showOptions.bind(self)//,
+ //'mouseenter': self.showOptions.bind(self),
+ //'mouseleave': self.closeOptions.bind(self)
+ }
+ }).adopt(
+ self.thumbnail = info.images && info.images.poster.length > 0 ? new Element('img.thumbnail', {
+ 'src': info.images.poster[0],
+ 'height': null,
+ 'width': null
+ }) : null,
+ self.options_el = new Element('div.options'),
+ self.data_container = new Element('div.data').grab(
+ self.info_container = new Element('div.info').grab(
+ new Element('h2', {
+ 'class': info.in_wanted && info.in_wanted.profile_id || in_library ? 'in_library_wanted' : '',
+ 'title': self.getTitle()
+ }).adopt(
+ self.title = new Element('span.title', {
+ 'text': self.getTitle()
+ }),
+ self.year = info.year ? new Element('span.year', {
+ 'text': info.year
+ }) : null,
+ info.in_wanted && info.in_wanted.profile_id ? new Element('span.in_wanted', {
+ 'text': 'Already in wanted list: ' + Quality.getProfile(info.in_wanted.profile_id).get('label')
+ }) : (in_library ? new Element('span.in_library', {
+ 'text': 'Already in library: ' + in_library.join(', ')
+ }) : null)
+ )
+ )
+ )
+ );
+
+ if(info.titles)
+ info.titles.each(function(title){
+ self.alternativeTitle({
+ 'title': title
+ });
+ });
+ },
+
+ alternativeTitle: function(alternative){
+ var self = this;
+
+ self.alternative_titles.include(alternative);
+ },
+
+ getTitle: function(){
+ var self = this;
+ try {
+ return self.info.original_title ? self.info.original_title : self.info.titles[0];
+ }
+ catch(e){
+ return 'Unknown';
+ }
+ },
+
+ get: function(key){
+ return this.info[key];
+ },
+
+ showOptions: function(){
+ var self = this;
+
+ self.createOptions();
+
+ self.data_container.addClass('open');
+ self.el.addEvent('outerClick', self.closeOptions.bind(self));
+
+ },
+
+ closeOptions: function(){
+ var self = this;
+
+ self.data_container.removeClass('open');
+ self.el.removeEvents('outerClick');
+ },
+
+ add: function(e){
+ var self = this;
+
+ if(e)
+ (e).preventDefault();
+
+ self.loadingMask();
+
+ Api.request('movie.add', {
+ 'data': {
+ 'identifier': self.info.imdb,
+ 'title': self.title_select.get('value'),
+ 'profile_id': self.profile_select.get('value'),
+ 'category_id': self.category_select.get('value')
+ },
+ 'onComplete': function(json){
+ self.options_el.empty();
+ self.options_el.grab(
+ new Element('div.message', {
+ 'text': json.success ? 'Movie successfully added.' : 'Movie didn\'t add properly. Check logs'
+ })
+ );
+ self.mask.fade('out');
+
+ self.fireEvent('added');
+ },
+ 'onFailure': function(){
+ self.options_el.empty();
+ self.options_el.grab(
+ new Element('div.message', {
+ 'text': 'Something went wrong, check the logs for more info.'
+ })
+ );
+ self.mask.fade('out');
+ }
+ });
+ },
+
+ createOptions: function(){
+ var self = this,
+ info = self.info;
+
+ if(!self.options_el.hasClass('set')){
+
+ self.options_el.grab(
+ new Element('div').adopt(
+ new Element('div.title').grab(
+ self.title_select = new Element('select', {
+ 'name': 'title'
+ })
+ ),
+ new Element('div.profile').grab(
+ self.profile_select = new Element('select', {
+ 'name': 'profile'
+ })
+ ),
+ self.category_select_container = new Element('div.category').grab(
+ self.category_select = new Element('select', {
+ 'name': 'category'
+ }).grab(
+ new Element('option', {'value': -1, 'text': 'None'})
+ )
+ ),
+ new Element('div.add').grab(
+ self.add_button = new Element('a.button', {
+ 'text': 'Add',
+ 'events': {
+ 'click': self.add.bind(self)
+ }
+ })
+ )
+ )
+ );
+
+ Array.each(self.alternative_titles, function(alt){
+ new Element('option', {
+ 'text': alt.title
+ }).inject(self.title_select);
+ });
+
+
+ // Fill categories
+ var categories = CategoryList.getAll();
+
+ if(categories.length === 0)
+ self.category_select_container.hide();
+ else {
+ self.category_select_container.show();
+ categories.each(function(category){
+ new Element('option', {
+ 'value': category.data._id,
+ 'text': category.data.label
+ }).inject(self.category_select);
+ });
+ }
+
+ // Fill profiles
+ var profiles = Quality.getActiveProfiles();
+ if(profiles.length == 1)
+ self.profile_select.hide();
+
+ profiles.each(function(profile){
+ new Element('option', {
+ 'value': profile.get('_id'),
+ 'text': profile.get('label')
+ }).inject(self.profile_select);
+ });
+
+ self.options_el.addClass('set');
+
+ if(categories.length === 0 && self.title_select.getElements('option').length == 1 && profiles.length == 1 &&
+ !(self.info.in_wanted && self.info.in_wanted.profile_id || in_library))
+ self.add();
+
+ }
+
+ },
+
+ loadingMask: function(){
+ var self = this;
+
+ self.mask = new Element('div.mask').inject(self.el).fade('hide');
+
+ createSpinner(self.mask);
+ self.mask.fade('in');
+
+ },
+
+ toElement: function(){
+ return this.el;
+ }
+
+});
diff --git a/couchpotato/core/media/movie/_base/static/wanted.js b/couchpotato/core/media/movie/_base/static/wanted.js
new file mode 100644
index 0000000000..094a70b1ce
--- /dev/null
+++ b/couchpotato/core/media/movie/_base/static/wanted.js
@@ -0,0 +1,142 @@
+var MoviesWanted = new Class({
+
+ Extends: PageBase,
+
+ order: 10,
+ name: 'wanted',
+ title: 'Gimme gimme gimme!',
+ folder_browser: null,
+
+ indexAction: function(){
+ var self = this;
+
+ if(!self.list){
+
+ self.manual_search = new Element('a', {
+ 'title': 'Force a search for the full wanted list',
+ 'text': 'Search all wanted',
+ 'events':{
+ 'click': self.doFullSearch.bind(self, true)
+ }
+ });
+
+ self.scan_folder = new Element('a', {
+ 'title': 'Scan a folder and rename all movies in it',
+ 'text': 'Manual folder scan',
+ 'events':{
+ 'click': self.scanFolder.bind(self)
+ }
+ });
+
+ // Wanted movies
+ self.list = new MovieList({
+ 'identifier': 'wanted',
+ 'status': 'active',
+ 'actions': [MA.MarkAsDone, MA.IMDB, MA.Release, MA.Trailer, MA.Refresh, MA.Readd, MA.Delete, MA.Category, MA.Profile],
+ 'add_new': true,
+ 'menu': [self.manual_search, self.scan_folder],
+ 'on_empty_element': function(){
+ return new Element('div.empty_wanted').adopt(
+ new Element('div.no_movies', {
+ 'text': 'Seems like you don\'t have any movies yet.. Maybe add some via search or the extension.'
+ }),
+ App.createUserscriptButtons()
+ );
+ }
+ });
+ $(self.list).inject(self.content);
+
+ // Check if search is in progress
+ requestTimeout(self.startProgressInterval.bind(self), 4000);
+ }
+
+ },
+
+ doFullSearch: function(){
+ var self = this;
+
+ if(!self.search_in_progress){
+
+ Api.request('movie.searcher.full_search');
+ self.startProgressInterval();
+
+ }
+
+ },
+
+ startProgressInterval: function(){
+ var self = this;
+
+ var start_text = self.manual_search.get('text');
+ self.progress_interval = requestInterval(function(){
+ if(self.search_progress && self.search_progress.running) return;
+ self.search_progress = Api.request('movie.searcher.progress', {
+ 'onComplete': function(json){
+ self.search_in_progress = true;
+ if(!json.movie){
+ clearRequestInterval(self.progress_interval);
+ self.search_in_progress = false;
+ self.manual_search.set('text', start_text);
+ }
+ else {
+ var progress = json.movie;
+ self.manual_search.set('text', 'Searching.. (' + Math.round(((progress.total-progress.to_go)/progress.total)*100) + '%)');
+ }
+ }
+ });
+ }, 1000);
+
+ },
+
+ scanFolder: function(e) {
+ (e).stop();
+
+ var self = this;
+ var options = {
+ 'name': 'Scan_folder'
+ };
+
+ if(!self.folder_browser){
+ self.folder_browser = new Option.Directory("Scan", "folder", "", options);
+
+ self.folder_browser.save = function() {
+ var folder = self.folder_browser.getValue();
+ Api.request('renamer.scan', {
+ 'data': {
+ 'base_folder': folder
+ }
+ });
+ };
+
+ self.folder_browser.inject(self.content, 'top');
+ self.folder_browser.fireEvent('injected');
+
+ // Hide the settings box
+ self.folder_browser.directory_inlay.hide();
+ self.folder_browser.el.removeChild(self.folder_browser.el.firstChild);
+
+ self.folder_browser.showBrowser();
+
+ // Make adjustments to the browser
+ self.folder_browser.browser.getElements('.clear.button').hide();
+ self.folder_browser.save_button.text = "Select";
+ self.folder_browser.browser.setStyles({
+ 'z-index': 1000,
+ 'right': 20,
+ 'top': 0,
+ 'margin': 0
+ });
+
+ self.folder_browser.pointer.setStyles({
+ 'right': 20
+ });
+
+ }
+ else{
+ self.folder_browser.showBrowser();
+ }
+
+ self.list.navigation_menu.hide();
+ }
+
+});
diff --git a/couchpotato/core/media/movie/charts/__init__.py b/couchpotato/core/media/movie/charts/__init__.py
new file mode 100644
index 0000000000..0b89eaf448
--- /dev/null
+++ b/couchpotato/core/media/movie/charts/__init__.py
@@ -0,0 +1,20 @@
+from .main import Charts
+
+
+def autoload():
+ return Charts()
+
+
+config = [{
+ 'name': 'charts',
+ 'groups': [
+ {
+ 'label': 'Charts',
+ 'description': 'Displays selected charts on the home page',
+ 'type': 'list',
+ 'name': 'charts_providers',
+ 'tab': 'display',
+ 'options': [],
+ },
+ ],
+}]
diff --git a/couchpotato/core/media/movie/charts/main.py b/couchpotato/core/media/movie/charts/main.py
new file mode 100644
index 0000000000..d42c29a033
--- /dev/null
+++ b/couchpotato/core/media/movie/charts/main.py
@@ -0,0 +1,84 @@
+from CodernityDB.database import RecordNotFound
+from couchpotato import Env, get_db
+from couchpotato.core.helpers.variable import getTitle, splitString
+
+from couchpotato.core.logger import CPLog
+from couchpotato.api import addApiView
+from couchpotato.core.event import fireEvent
+from couchpotato.core.plugins.base import Plugin
+
+
+log = CPLog(__name__)
+
+
+class Charts(Plugin):
+
+ def __init__(self):
+ addApiView('charts.view', self.automationView)
+ addApiView('charts.ignore', self.ignoreView)
+
+ def automationView(self, force_update = False, **kwargs):
+
+ db = get_db()
+
+ charts = fireEvent('automation.get_chart_list', merge = True)
+ ignored = splitString(Env.prop('charts_ignore', default = ''))
+
+ # Create a list the movie/list.js can use
+ for chart in charts:
+ medias = []
+ for media in chart.get('list', []):
+
+ identifier = media.get('imdb')
+ if identifier in ignored:
+ continue
+
+ try:
+ try:
+ in_library = db.get('media', 'imdb-%s' % identifier)
+ if in_library:
+ continue
+ except RecordNotFound:
+ pass
+ except:
+ pass
+
+ # Cache poster
+ posters = media.get('images', {}).get('poster', [])
+ poster = [x for x in posters if 'tmdb' in x]
+ posters = poster if len(poster) > 0 else posters
+
+ cached_poster = fireEvent('file.download', url = posters[0], single = True) if len(posters) > 0 else False
+ files = {'image_poster': [cached_poster] } if cached_poster else {}
+
+ medias.append({
+ 'status': 'chart',
+ 'title': getTitle(media),
+ 'type': 'movie',
+ 'info': media,
+ 'files': files,
+ 'identifiers': {
+ 'imdb': identifier
+ }
+ })
+
+ chart['list'] = medias
+
+ return {
+ 'success': True,
+ 'count': len(charts),
+ 'charts': charts,
+ 'ignored': ignored,
+ }
+
+ def ignoreView(self, imdb = None, **kwargs):
+
+ ignored = splitString(Env.prop('charts_ignore', default = ''))
+
+ if imdb:
+ ignored.append(imdb)
+ Env.prop('charts_ignore', ','.join(set(ignored)))
+
+ return {
+ 'result': True
+ }
diff --git a/couchpotato/core/media/movie/charts/static/charts.js b/couchpotato/core/media/movie/charts/static/charts.js
new file mode 100644
index 0000000000..80b2314d98
--- /dev/null
+++ b/couchpotato/core/media/movie/charts/static/charts.js
@@ -0,0 +1,93 @@
+var Charts = new Class({
+
+ Implements: [Options, Events],
+
+ shown_once: false,
+
+ initialize: function(options){
+ var self = this;
+ self.setOptions(options);
+
+ self.create();
+ },
+
+ create: function(){
+ var self = this;
+
+ self.el = new Element('div.charts').grab(
+ self.el_refresh_container = new Element('div.refresh').grab(
+ self.el_refreshing_text = new Element('span.refreshing', {
+ 'text': 'Refreshing charts...'
+ })
+ )
+ );
+
+ self.show();
+
+ requestTimeout(function(){
+ self.fireEvent('created');
+ }, 0);
+ },
+
+ fill: function(json){
+
+ var self = this;
+
+ self.el_refreshing_text.hide();
+
+ if(json && json.count > 0){
+ json.charts.sort(function(a, b) {
+ return a.order - b.order;
+ });
+
+ Object.each(json.charts, function(chart){
+
+ var chart_list = new MovieList({
+ 'navigation': false,
+ 'identifier': chart.name.toLowerCase().replace(/[^a-z0-9]+/g, '_'),
+ 'title': chart.name,
+ 'description': 'See source ',
+ 'actions': [MA.Add, MA.ChartIgnore, MA.IMDB, MA.Trailer],
+ 'load_more': false,
+ 'view': 'thumb',
+ 'force_view': true,
+ 'api_call': null
+ });
+
+ // Load movies in manually
+ chart_list.store(chart.list);
+ chart_list.addMovies(chart.list, chart.list.length);
+ chart_list.checkIfEmpty();
+ chart_list.fireEvent('loaded');
+
+ $(chart_list).inject(self.el);
+
+ });
+
+ }
+
+ self.fireEvent('loaded');
+
+ },
+
+ show: function(){
+ var self = this;
+
+ self.el.show();
+
+ if(!self.shown_once){
+ requestTimeout(function(){
+ self.api_request = Api.request('charts.view', {
+ 'onComplete': self.fill.bind(self)
+ });
+ }, 100);
+
+ self.shown_once = true;
+ }
+ },
+
+ toElement: function(){
+ return this.el;
+ }
+
+});
diff --git a/couchpotato/core/media/movie/library.py b/couchpotato/core/media/movie/library.py
new file mode 100644
index 0000000000..28cb1b46ed
--- /dev/null
+++ b/couchpotato/core/media/movie/library.py
@@ -0,0 +1,32 @@
+from couchpotato.core.event import addEvent
+from couchpotato.core.helpers.variable import getTitle
+from couchpotato.core.logger import CPLog
+from couchpotato.core.media._base.library.base import LibraryBase
+
+
+log = CPLog(__name__)
+
+autoload = 'MovieLibraryPlugin'
+
+
+class MovieLibraryPlugin(LibraryBase):
+
+ def __init__(self):
+ addEvent('library.query', self.query)
+
+ def query(self, media, first = True, include_year = True, **kwargs):
+ if media.get('type') != 'movie':
+ return
+
+ default_title = getTitle(media)
+ titles = media['info'].get('titles', [])
+ titles.insert(0, default_title)
+
+ # Add year identifier to titles
+ if include_year:
+ titles = [title + (' %s' % str(media['info']['year'])) for title in titles]
+
+ if first:
+ return titles[0] if titles else None
+
+ return titles
diff --git a/libs/migrate/versioning/templates/__init__.py b/couchpotato/core/media/movie/providers/__init__.py
similarity index 100%
rename from libs/migrate/versioning/templates/__init__.py
rename to couchpotato/core/media/movie/providers/__init__.py
diff --git a/couchpotato/core/media/movie/providers/automation/__init__.py b/couchpotato/core/media/movie/providers/automation/__init__.py
new file mode 100644
index 0000000000..93f6c10a40
--- /dev/null
+++ b/couchpotato/core/media/movie/providers/automation/__init__.py
@@ -0,0 +1,21 @@
+config = [{
+ 'name': 'automation_providers',
+ 'groups': [
+ {
+ 'label': 'Watchlists',
+ 'description': 'Check watchlists for new movies',
+ 'type': 'list',
+ 'name': 'watchlist_providers',
+ 'tab': 'automation',
+ 'options': [],
+ },
+ {
+ 'label': 'Automated',
+ 'description': 'Uses minimal requirements',
+ 'type': 'list',
+ 'name': 'automation_providers',
+ 'tab': 'automation',
+ 'options': [],
+ },
+ ],
+}]
diff --git a/couchpotato/core/media/movie/providers/automation/base.py b/couchpotato/core/media/movie/providers/automation/base.py
new file mode 100644
index 0000000000..ee19649a35
--- /dev/null
+++ b/couchpotato/core/media/movie/providers/automation/base.py
@@ -0,0 +1,117 @@
+import time
+import unicodedata
+
+from couchpotato.core.event import addEvent, fireEvent
+from couchpotato.core.logger import CPLog
+from couchpotato.core.media._base.providers.automation.base import AutomationBase
+from couchpotato.environment import Env
+from couchpotato.core.helpers.variable import splitString
+
+
+log = CPLog(__name__)
+
+
+class Automation(AutomationBase):
+
+ enabled_option = 'automation_enabled'
+ chart_enabled_option = 'chart_display_enabled'
+ http_time_between_calls = 2
+
+ interval = 1800
+ last_checked = 0
+
+ def __init__(self):
+ addEvent('automation.get_movies', self._getMovies)
+ addEvent('automation.get_chart_list', self._getChartList)
+
+ def _getMovies(self):
+
+ if self.isDisabled():
+ return
+
+ if not self.canCheck():
+ log.debug('Just checked, skipping %s', self.getName())
+ return []
+
+ self.last_checked = time.time()
+
+ return self.getIMDBids()
+
+ def _getChartList(self):
+
+ if not (self.conf(self.chart_enabled_option) or self.conf(self.chart_enabled_option) is None):
+ return
+
+ return self.getChartList()
+
+ def search(self, name, year = None, imdb_only = False):
+
+ try:
+ cache_name = name.decode('utf-8').encode('ascii', 'ignore')
+ except UnicodeEncodeError:
+ cache_name = unicodedata.normalize('NFKD', name).encode('ascii','ignore')
+
+ prop_name = 'automation.cached.%s.%s' % (cache_name, year)
+ cached_imdb = Env.prop(prop_name, default = False)
+ if cached_imdb and imdb_only:
+ return cached_imdb
+
+ result = fireEvent('movie.search', q = '%s %s' % (name, year if year else ''), limit = 1, merge = True)
+
+ if len(result) > 0:
+ if imdb_only and result[0].get('imdb'):
+ Env.prop(prop_name, result[0].get('imdb'))
+
+ return result[0].get('imdb') if imdb_only else result[0]
+ else:
+ return None
+
+ def isMinimalMovie(self, movie):
+ if not movie.get('rating'):
+ log.info('ignoring %s as no rating is available for.', (movie['original_title']))
+ return False
+
+ if movie['rating'] and movie['rating'].get('imdb'):
+ movie['votes'] = movie['rating']['imdb'][1]
+ movie['rating'] = movie['rating']['imdb'][0]
+
+ for minimal_type in ['year', 'rating', 'votes']:
+ type_value = movie.get(minimal_type, 0)
+ type_min = self.getMinimal(minimal_type)
+ if type_value < type_min:
+ log.info('%s too low for %s, need %s has %s', (minimal_type, movie['original_title'], type_min, type_value))
+ return False
+
+ movie_genres = [genre.lower() for genre in movie['genres']]
+ required_genres = splitString(self.getMinimal('required_genres').lower())
+ ignored_genres = splitString(self.getMinimal('ignored_genres').lower())
+
+ req_match = 0
+ for req_set in required_genres:
+ req = splitString(req_set, '&')
+ req_match += len(list(set(movie_genres) & set(req))) == len(req)
+
+ if self.getMinimal('required_genres') and req_match == 0:
+ log.info2('Required genre(s) missing for %s', movie['original_title'])
+ return False
+
+ for ign_set in ignored_genres:
+ ign = splitString(ign_set, '&')
+ if len(list(set(movie_genres) & set(ign))) == len(ign):
+ log.info2('%s has blacklisted genre(s): %s', (movie['original_title'], ign))
+ return False
+
+ return True
+
+ def getMinimal(self, min_type):
+ return Env.setting(min_type, 'automation')
+
+ def getIMDBids(self):
+ return []
+
+ def getChartList(self):
+ # Example return: [ {'name': 'Display name of list', 'url': 'http://example.com/', 'order': 1, 'list': []} ]
+ return
+
+ def canCheck(self):
+ return time.time() > self.last_checked + self.interval
diff --git a/couchpotato/core/media/movie/providers/automation/bluray.py b/couchpotato/core/media/movie/providers/automation/bluray.py
new file mode 100644
index 0000000000..3cd6fd63bd
--- /dev/null
+++ b/couchpotato/core/media/movie/providers/automation/bluray.py
@@ -0,0 +1,192 @@
+import traceback
+
+from bs4 import BeautifulSoup
+from couchpotato import fireEvent
+from couchpotato.core.helpers.rss import RSS
+from couchpotato.core.helpers.variable import tryInt
+from couchpotato.core.logger import CPLog
+from couchpotato.core.media.movie.providers.automation.base import Automation
+
+
+log = CPLog(__name__)
+
+autoload = 'Bluray'
+
+
+class Bluray(Automation, RSS):
+
+ interval = 1800
+ rss_url = 'http://www.blu-ray.com/rss/newreleasesfeed.xml'
+ backlog_url = 'http://www.blu-ray.com/movies/movies.php?show=newreleases&page=%s'
+ display_url = 'http://www.blu-ray.com/movies/movies.php?show=newreleases'
+ chart_order = 1
+
+ def getIMDBids(self):
+
+ movies = []
+
+ if self.conf('backlog'):
+
+ cookie = {'Cookie': 'listlayout_7=full'}
+ page = 0
+ while True:
+ page += 1
+
+ url = self.backlog_url % page
+ data = self.getHTMLData(url, headers = cookie)
+ soup = BeautifulSoup(data)
+
+ try:
+ # Stop if the release year is before the minimal year
+ brk = False
+ h3s = soup.body.find_all('h3')
+ for h3 in h3s:
+ if h3.parent.name != 'a':
+
+ try:
+ page_year = tryInt(h3.get_text()[-4:])
+ if page_year > 0 and page_year < self.getMinimal('year'):
+ brk = True
+ except:
+ log.error('Failed determining page year: %s', traceback.format_exc())
+ brk = True
+ break
+
+ if brk:
+ break
+
+ for h3 in h3s:
+ try:
+ if h3.parent.name == 'a':
+ name = h3.get_text().lower().split('blu-ray')[0].strip()
+
+ if not name.find('/') == -1: # make sure it is not a double movie release
+ continue
+
+ if not h3.parent.parent.small: # ignore non-movie tables
+ continue
+
+ year = h3.parent.parent.small.get_text().split('|')[1].strip()
+
+ if tryInt(year) < self.getMinimal('year'):
+ continue
+
+ imdb = self.search(name, year)
+
+ if imdb:
+ if self.isMinimalMovie(imdb):
+ movies.append(imdb['imdb'])
+ except:
+ log.debug('Error parsing movie html: %s', traceback.format_exc())
+ break
+ except:
+ log.debug('Error loading page %s: %s', (page, traceback.format_exc()))
+ break
+
+ self.conf('backlog', value = False)
+
+ rss_movies = self.getRSSData(self.rss_url)
+
+ for movie in rss_movies:
+ name = self.getTextElement(movie, 'title').lower().split('blu-ray')[0].strip('(').rstrip()
+ year = self.getTextElement(movie, 'description').split('|')[1].strip('(').strip()
+
+ if not name.find('/') == -1: # make sure it is not a double movie release
+ continue
+
+ if tryInt(year) < self.getMinimal('year'):
+ continue
+
+ imdb = self.search(name, year)
+
+ if imdb:
+ if self.isMinimalMovie(imdb):
+ movies.append(imdb['imdb'])
+
+ return movies
+
+ def getChartList(self):
+ cache_key = 'bluray.charts'
+ movie_list = {
+ 'name': 'Blu-ray.com - New Releases',
+ 'url': self.display_url,
+ 'order': self.chart_order,
+ 'list': self.getCache(cache_key) or []
+ }
+
+ if not movie_list['list']:
+ movie_ids = []
+ max_items = 10
+ rss_movies = self.getRSSData(self.rss_url)
+
+ for movie in rss_movies:
+ name = self.getTextElement(movie, 'title').lower().split('blu-ray')[0].strip('(').rstrip()
+ year = self.getTextElement(movie, 'description').split('|')[1].strip('(').strip()
+
+ if not name.find('/') == -1: # make sure it is not a double movie release
+ continue
+
+ movie = self.search(name, year)
+
+ if movie:
+
+ if movie.get('imdb') in movie_ids:
+ continue
+
+ is_movie = fireEvent('movie.is_movie', identifier = movie.get('imdb'), single = True)
+ if not is_movie:
+ continue
+
+ movie_ids.append(movie.get('imdb'))
+ movie_list['list'].append( movie )
+ if len(movie_list['list']) >= max_items:
+ break
+
+ if not movie_list['list']:
+ return
+
+ self.setCache(cache_key, movie_list['list'], timeout = 259200)
+
+ return [movie_list]
+
+
+config = [{
+ 'name': 'bluray',
+ 'groups': [
+ {
+ 'tab': 'automation',
+ 'list': 'automation_providers',
+ 'name': 'bluray_automation',
+ 'label': 'Blu-ray.com',
+ 'description': 'Imports movies from blu-ray.com.',
+ 'options': [
+ {
+ 'name': 'automation_enabled',
+ 'default': False,
+ 'type': 'enabler',
+ },
+ {
+ 'name': 'backlog',
+ 'advanced': True,
+ 'description': ('Parses the history until the minimum movie year is reached. (Takes a while)', 'Will be disabled once it has completed'),
+ 'default': False,
+ 'type': 'bool',
+ },
+ ],
+ },
+ {
+ 'tab': 'display',
+ 'list': 'charts_providers',
+ 'name': 'bluray_charts_display',
+ 'label': 'Blu-ray.com',
+ 'description': 'Display new releases from Blu-ray.com',
+ 'options': [
+ {
+ 'name': 'chart_display_enabled',
+ 'default': True,
+ 'type': 'enabler',
+ },
+ ],
+ },
+ ],
+}]
diff --git a/couchpotato/core/media/movie/providers/automation/crowdai.py b/couchpotato/core/media/movie/providers/automation/crowdai.py
new file mode 100644
index 0000000000..574310792f
--- /dev/null
+++ b/couchpotato/core/media/movie/providers/automation/crowdai.py
@@ -0,0 +1,90 @@
+import re
+
+from couchpotato.core.helpers.rss import RSS
+from couchpotato.core.helpers.variable import tryInt, splitString
+from couchpotato.core.logger import CPLog
+from couchpotato.core.media.movie.providers.automation.base import Automation
+
+
+log = CPLog(__name__)
+
+autoload = 'CrowdAI'
+
+
+class CrowdAI(Automation, RSS):
+
+ interval = 1800
+
+ def getIMDBids(self):
+
+ movies = []
+
+ urls = dict(zip(splitString(self.conf('automation_urls')), [tryInt(x) for x in splitString(self.conf('automation_urls_use'))]))
+
+ for url in urls:
+
+ if not urls[url]:
+ continue
+
+ rss_movies = self.getRSSData(url)
+
+ for movie in rss_movies:
+
+ description = self.getTextElement(movie, 'description')
+ grabs = 0
+
+ for item in movie:
+ if item.attrib.get('name') == 'grabs':
+ grabs = item.attrib.get('value')
+ break
+
+ if int(grabs) > tryInt(self.conf('number_grabs')):
+ title = re.match(r'.*Title: .a href.*/">(.*) \(\d{4}\).*', description).group(1)
+ log.info2('%s grabs for movie: %s, enqueue...', (grabs, title))
+ year = re.match(r'.*Year: (\d{4}).*', description).group(1)
+ imdb = self.search(title, year)
+
+ if imdb and self.isMinimalMovie(imdb):
+ movies.append(imdb['imdb'])
+
+ return movies
+
+
+config = [{
+ 'name': 'crowdai',
+ 'groups': [
+ {
+ 'tab': 'automation',
+ 'list': 'automation_providers',
+ 'name': 'crowdai_automation',
+ 'label': 'CrowdAI',
+ 'description': ('Imports from any newznab powered NZB providers RSS feed depending on the number of grabs per movie.',
+ 'Go to your newznab site and find the RSS section. Then copy the copy paste the link under "Movies > x264 feed" here.'),
+ 'options': [
+ {
+ 'name': 'automation_enabled',
+ 'default': False,
+ 'type': 'enabler',
+ },
+ {
+ 'name': 'automation_urls_use',
+ 'label': 'Use',
+ 'default': '1',
+ },
+ {
+ 'name': 'automation_urls',
+ 'label': 'url',
+ 'type': 'combined',
+ 'combine': ['automation_urls_use', 'automation_urls'],
+ 'default': 'http://YOUR_PROVIDER/rss?t=THE_MOVIE_CATEGORY&i=YOUR_USER_ID&r=YOUR_API_KEY&res=2&rls=2&num=100',
+ },
+ {
+ 'name': 'number_grabs',
+ 'default': '500',
+ 'label': 'Grab threshold',
+ 'description': 'Number of grabs required',
+ },
+ ],
+ },
+ ],
+}]
diff --git a/couchpotato/core/media/movie/providers/automation/flixster.py b/couchpotato/core/media/movie/providers/automation/flixster.py
new file mode 100644
index 0000000000..ab03c93186
--- /dev/null
+++ b/couchpotato/core/media/movie/providers/automation/flixster.py
@@ -0,0 +1,83 @@
+from couchpotato.core.helpers.variable import tryInt, splitString
+from couchpotato.core.logger import CPLog
+from couchpotato.core.media.movie.providers.automation.base import Automation
+
+log = CPLog(__name__)
+
+autoload = 'Flixster'
+
+
+class Flixster(Automation):
+
+ url = 'http://www.flixster.com/api/users/%s/movies/ratings?scoreTypes=wts'
+
+ interval = 60
+
+ def getIMDBids(self):
+
+ ids = splitString(self.conf('automation_ids'))
+
+ if len(ids) == 0:
+ return []
+
+ movies = []
+
+ for movie in self.getWatchlist():
+ imdb_id = self.search(movie.get('title'), movie.get('year'), imdb_only = True)
+ movies.append(imdb_id)
+
+ return movies
+
+ def getWatchlist(self):
+
+ enablers = [tryInt(x) for x in splitString(self.conf('automation_ids_use'))]
+ ids = splitString(self.conf('automation_ids'))
+
+ index = -1
+ movies = []
+ for user_id in ids:
+
+ index += 1
+ if not enablers[index]:
+ continue
+
+ data = self.getJsonData(self.url % user_id, decode_from = 'iso-8859-1')
+
+ for movie in data:
+ movies.append({
+ 'title': movie['movie']['title'],
+ 'year': movie['movie']['year']
+ })
+
+ return movies
+
+
+config = [{
+ 'name': 'flixster',
+ 'groups': [
+ {
+ 'tab': 'automation',
+ 'list': 'watchlist_providers',
+ 'name': 'flixster_automation',
+ 'label': 'Flixster',
+ 'description': 'Import movies from any public Flixster watchlist',
+ 'options': [
+ {
+ 'name': 'automation_enabled',
+ 'default': False,
+ 'type': 'enabler',
+ },
+ {
+ 'name': 'automation_ids_use',
+ 'label': 'Use',
+ },
+ {
+ 'name': 'automation_ids',
+ 'label': 'User ID',
+ 'type': 'combined',
+ 'combine': ['automation_ids_use', 'automation_ids'],
+ },
+ ],
+ },
+ ],
+}]
diff --git a/couchpotato/core/media/movie/providers/automation/goodfilms.py b/couchpotato/core/media/movie/providers/automation/goodfilms.py
new file mode 100644
index 0000000000..37a5a75a77
--- /dev/null
+++ b/couchpotato/core/media/movie/providers/automation/goodfilms.py
@@ -0,0 +1,84 @@
+from bs4 import BeautifulSoup
+from couchpotato.core.logger import CPLog
+from couchpotato.core.media.movie.providers.automation.base import Automation
+
+log = CPLog(__name__)
+
+autoload = 'Goodfilms'
+
+
+class Goodfilms(Automation):
+
+ url = 'https://goodfil.ms/%s/queue?page=%d&without_layout=1'
+
+ interval = 1800
+
+ def getIMDBids(self):
+
+ if not self.conf('automation_username'):
+ log.error('Please fill in your username')
+ return []
+
+ movies = []
+
+ for movie in self.getWatchlist():
+ imdb_id = self.search(movie.get('title'), movie.get('year'), imdb_only = True)
+ movies.append(imdb_id)
+
+ return movies
+
+ def getWatchlist(self):
+
+ movies = []
+ page = 1
+
+ while True:
+ url = self.url % (self.conf('automation_username'), page)
+ data = self.getHTMLData(url)
+ soup = BeautifulSoup(data)
+
+ this_watch_list = soup.find_all('div', attrs = {
+ 'class': 'movie',
+ 'data-film-title': True
+ })
+
+ if not this_watch_list: # No Movies
+ break
+
+ for movie in this_watch_list:
+ movies.append({
+ 'title': movie['data-film-title'],
+ 'year': movie['data-film-year']
+ })
+
+ if not 'next page' in data.lower():
+ break
+
+ page += 1
+
+ return movies
+
+
+config = [{
+ 'name': 'goodfilms',
+ 'groups': [
+ {
+ 'tab': 'automation',
+ 'list': 'watchlist_providers',
+ 'name': 'goodfilms_automation',
+ 'label': 'Goodfilms',
+ 'description': 'import movies from your Goodfilms queue',
+ 'options': [
+ {
+ 'name': 'automation_enabled',
+ 'default': False,
+ 'type': 'enabler',
+ },
+ {
+ 'name': 'automation_username',
+ 'label': 'Username',
+ },
+ ],
+ },
+ ],
+}]
diff --git a/couchpotato/core/media/movie/providers/automation/hummingbird.py b/couchpotato/core/media/movie/providers/automation/hummingbird.py
new file mode 100644
index 0000000000..188185877f
--- /dev/null
+++ b/couchpotato/core/media/movie/providers/automation/hummingbird.py
@@ -0,0 +1,104 @@
+from couchpotato.core.logger import CPLog
+from couchpotato.core.media.movie.providers.automation.base import Automation
+
+
+log = CPLog(__name__)
+
+autoload = 'Hummingbird'
+
+
+class Hummingbird(Automation):
+
+ def getIMDBids(self):
+ movies = []
+ for movie in self.getWatchlist():
+ imdb = self.search(movie[0], movie[1])
+ if imdb:
+ movies.append(imdb['imdb'])
+ return movies
+
+ def getWatchlist(self):
+ if not self.conf('automation_username'):
+ log.error('You need to fill in a username')
+ return []
+
+ url = "http://hummingbird.me/api/v1/users/%s/library" % self.conf('automation_username')
+ data = self.getJsonData(url)
+
+ chosen_filter = {
+ 'automation_list_current': 'currently-watching',
+ 'automation_list_plan': 'plan-to-watch',
+ 'automation_list_completed': 'completed',
+ 'automation_list_hold': 'on-hold',
+ 'automation_list_dropped': 'dropped',
+ }
+
+ chosen_lists = []
+ for x in chosen_filter:
+ if self.conf(x):
+ chosen_lists.append(chosen_filter[x])
+
+ entries = []
+ for item in data:
+ if item['anime']['show_type'] != 'Movie' or item['status'] not in chosen_lists:
+ continue
+ title = item['anime']['title']
+ year = item['anime']['started_airing']
+ if year:
+ year = year[:4]
+ entries.append([title, year])
+ return entries
+
+config = [{
+ 'name': 'hummingbird',
+ 'groups': [
+ {
+ 'tab': 'automation',
+ 'list': 'watchlist_providers',
+ 'name': 'hummingbird_automation',
+ 'label': 'Hummingbird',
+ 'description': 'Import movies from your Hummingbird.me lists',
+ 'options': [
+ {
+ 'name': 'automation_enabled',
+ 'default': False,
+ 'type': 'enabler',
+ },
+ {
+ 'name': 'automation_username',
+ 'label': 'Username',
+ },
+ {
+ 'name': 'automation_list_current',
+ 'type': 'bool',
+ 'label': 'Currently Watching',
+ 'default': False,
+ },
+ {
+ 'name': 'automation_list_plan',
+ 'type': 'bool',
+ 'label': 'Plan to Watch',
+ 'default': True,
+ },
+ {
+ 'name': 'automation_list_completed',
+ 'type': 'bool',
+ 'label': 'Completed',
+ 'default': False,
+ },
+ {
+ 'name': 'automation_list_hold',
+ 'type': 'bool',
+ 'label': 'On Hold',
+ 'default': False,
+ },
+ {
+ 'name': 'automation_list_dropped',
+ 'type': 'bool',
+ 'label': 'Dropped',
+ 'default': False,
+ },
+ ],
+ },
+ ],
+}]
diff --git a/couchpotato/core/media/movie/providers/automation/imdb.py b/couchpotato/core/media/movie/providers/automation/imdb.py
new file mode 100644
index 0000000000..41974c44a1
--- /dev/null
+++ b/couchpotato/core/media/movie/providers/automation/imdb.py
@@ -0,0 +1,318 @@
+import traceback
+import re
+
+from bs4 import BeautifulSoup
+from couchpotato import fireEvent
+from couchpotato.core.helpers.encoding import ss
+from couchpotato.core.helpers.rss import RSS
+from couchpotato.core.helpers.variable import getImdb, splitString, tryInt
+from couchpotato.core.logger import CPLog
+from couchpotato.core.media._base.providers.base import MultiProvider
+from couchpotato.core.media.movie.providers.automation.base import Automation
+
+
+log = CPLog(__name__)
+
+autoload = 'IMDB'
+
+
+class IMDB(MultiProvider):
+
+ def getTypes(self):
+ return [IMDBWatchlist, IMDBAutomation, IMDBCharts]
+
+
+class IMDBBase(Automation, RSS):
+
+ interval = 1800
+
+ charts = {
+ 'theater': {
+ 'order': 1,
+ 'name': 'IMDB - Movies in Theaters',
+ 'url': 'http://www.imdb.com/movies-in-theaters/',
+ },
+ 'boxoffice': {
+ 'order': 2,
+ 'name': 'IMDB - Box Office',
+ 'url': 'http://www.imdb.com/boxoffice/',
+ },
+ 'top250': {
+ 'order': 3,
+ 'name': 'IMDB - Top 250 Movies',
+ 'url': 'http://www.imdb.com/chart/top',
+ },
+ }
+
+ def getInfo(self, imdb_id):
+ return fireEvent('movie.info', identifier = imdb_id, extended = False, adding = False, merge = True)
+
+ def getFromURL(self, url):
+ log.debug('Getting IMDBs from: %s', url)
+ html = self.getHTMLData(url)
+
+ try:
+ split = splitString(html, split_on = "