diff --git a/.coveragerc b/.coveragerc new file mode 100644 index 0000000000..611707ae9f --- /dev/null +++ b/.coveragerc @@ -0,0 +1,11 @@ +[run] +source= + ./couchpotato/ +omit = + ./libs/* + ./node_modules/* +[report] +omit = + */python?.?/* + ./libs/* + ./node_modules/* diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 0000000000..7c1af9a31d --- /dev/null +++ b/.editorconfig @@ -0,0 +1,16 @@ +# http://editorconfig.org +root = true + +[*] +indent_style = tab +indent_size = 4 +end_of_line = lf +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = true + +[*.py] +indent_style = space + +[*.md] +trim_trailing_whitespace = false \ No newline at end of file diff --git a/.github/contributing.md b/.github/contributing.md new file mode 100644 index 0000000000..9af7d0a6ef --- /dev/null +++ b/.github/contributing.md @@ -0,0 +1,40 @@ +# Contributing to CouchPotatoServer + +1. [Contributing](#contributing) +2. [Submitting an Issue](#issues) +3. [Submitting a Pull Request](#pull-requests) + +## Contributing +Thank you for your interest in contributing to CouchPotato. There are several ways to help out, even if you've never worked on an open source project before. +If you've found a bug or want to request a feature, you can report it by [posting an issue](https://github.com/CouchPotato/CouchPotatoServer/issues/new) - be sure to read the [guidelines](#issues) first! +If you want to contribute your own work, please read the [guidelines](#pull-requests) for submitting a pull request. +Lastly, for anything related to CouchPotato, feel free to stop by the [forum](http://couchpota.to/forum/) or the [#couchpotato](http://webchat.freenode.net/?channels=couchpotato) IRC channel at irc.freenode.net. + +## Issues +Issues are intended for reporting bugs and weird behaviour or suggesting improvements to CouchPotatoServer. +Before you submit an issue, please go through the following checklist: + * **FILL IN ALL THE FIELDS ASKED FOR** + * **POST MORE THAN A SINGLE LINE LOG**, if you do, you'd better have a easy reproducable bug + * Search through existing issues (*including closed issues!*) first: you might be able to get your answer there. + * Double check your issue manually, because it could be an external issue. + * Post logs with your issue: Without seeing what is going on, the developers can't reproduce the error. + * Check the logs yourself before submitting them. Obvious errors like permission or HTTP errors are often not related to CouchPotato. + * What movie and quality are you searching for? + * What are your settings for the specific problem? + * What providers are you using? (While your logs include these, scanning through hundreds of lines of logs isn't our hobby) + * Post the logs from the *config* directory, please do not copy paste the UI. Use pastebin to store these logs! + * Give a short step by step of how to reproduce the error. + * What hardware / OS are you using and what are its limitations? For example: NAS can be slow and maybe have a different version of python installed than when you use CP on OS X or Windows. + * Your issue might be marked with the "can't reproduce" tag. Don't ask why your issue was closed if it says so in the tag. + * If you're running on a NAS (QNAP, Austor, Synology etc.) with pre-made packages, make sure these are set up to use our source repository (CouchPotato/CouchPotatoServer) and nothing else! + * Do not "bump" issues with "Any updates on this" or whatever. Yes I've seen it, you don't have to remind me of it. There will be an update when the code is done or I need information. If you feel the need to do so, you'd better have more info on the issue. + +The more relevant information you provide, the more likely that your issue will be resolved. +If you don't follow any of the checks above, I'll close the issue. If you are wondering why (and ask) I'll block you from posting new issues and the repo. + +## Pull Requests +Pull requests are intended for contributing code or documentation to the project. Before you submit a pull request, consider the following: + * Make sure your pull request is made for the *develop* branch (or relevant feature branch). + * Have you tested your PR? If not, why? + * Does your PR have any limitations I should know of? + * Is your PR up-to-date with the branch you're trying to push into? diff --git a/.github/issue_template.md b/.github/issue_template.md new file mode 100644 index 0000000000..dd24310c03 --- /dev/null +++ b/.github/issue_template.md @@ -0,0 +1,14 @@ +### Steps to reproduce: +1. .. +2. .. + +### Information: +Movie(s) I have this with: ... +Quality of the movie being searched: ... +Providers I use: ... +Version of CouchPotato: ... +Running on: ... + +### Logs: +``` +``` diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 0000000000..fe31e264b9 --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,5 @@ +### Description of what this fixes: +... + +### Related issues: +... diff --git a/.gitignore b/.gitignore index e156f873bb..6de15688c8 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,19 @@ *.pyc /data/ +/_env/ /_source/ .project .pydevproject +/node_modules/ +/.tmp/ +/.sass-cache/ +.coverage +coverage.xml +nosetests.xml + +# Visual Studio + +/.vs + +.DS_Store +/.vscode/ diff --git a/.nosetestsrc b/.nosetestsrc new file mode 100644 index 0000000000..df777870a0 --- /dev/null +++ b/.nosetestsrc @@ -0,0 +1,2 @@ +[nosetests] +where=couchpotato diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 0000000000..9f0c298199 --- /dev/null +++ b/.travis.yml @@ -0,0 +1,35 @@ +language: python + +# with enabled SUDO the build goes slower +sudo: false + +python: + # - "2.6" + - "2.7" + # - "3.2" + # - "3.3" + # - "3.4" + # - "3.5" + # - "3.5-dev" # 3.5 development branch + # - "nightly" # currently points to 3.6-dev + +cache: + pip: true + directories: + - node_modules + - libs + - lib + +# command to install dependencies +install: + - pip install --upgrade pip + - npm install + - pip install -r requirements-dev.txt -t ./libs + +# command to run tests +script: + - grunt test + - grunt coverage + +after_success: + coveralls \ No newline at end of file diff --git a/.vs/CouchPotatoServer/v14/.suo b/.vs/CouchPotatoServer/v14/.suo new file mode 100644 index 0000000000..98c6663bbe Binary files /dev/null and b/.vs/CouchPotatoServer/v14/.suo differ diff --git a/CouchPotato.py b/CouchPotato.py index e777f9bf43..b4a64218d2 100755 --- a/CouchPotato.py +++ b/CouchPotato.py @@ -1,4 +1,5 @@ #!/usr/bin/env python +from __future__ import print_function from logging import handlers from os.path import dirname import logging @@ -9,7 +10,6 @@ import subprocess import sys import traceback -import time # Root path base_path = dirname(os.path.abspath(__file__)) @@ -18,7 +18,12 @@ sys.path.insert(0, os.path.join(base_path, 'libs')) from couchpotato.environment import Env -from couchpotato.core.helpers.variable import getDataDir +from couchpotato.core.helpers.variable import getDataDir, removePyc + + +# Remove pyc files before dynamic load (sees .pyc files regular .py modules) +removePyc(base_path) + class Loader(object): @@ -28,7 +33,7 @@ def __init__(self): # Get options via arg from couchpotato.runner import getOptions - self.options = getOptions(base_path, sys.argv[1:]) + self.options = getOptions(sys.argv[1:]) # Load settings settings = Env.get('settings') @@ -47,9 +52,9 @@ def __init__(self): os.makedirs(self.data_dir) # Create logging dir - self.log_dir = os.path.join(self.data_dir, 'logs'); + self.log_dir = os.path.join(self.data_dir, 'logs') if not os.path.isdir(self.log_dir): - os.mkdir(self.log_dir) + os.makedirs(self.log_dir) # Logging from couchpotato.core.logger import CPLog @@ -66,14 +71,15 @@ def addSignals(self): signal.signal(signal.SIGTERM, lambda signum, stack_frame: sys.exit(1)) from couchpotato.core.event import addEvent - addEvent('app.after_shutdown', self.afterShutdown) + addEvent('app.do_shutdown', self.setRestart) - def afterShutdown(self, restart): + def setRestart(self, restart): self.do_restart = restart + return True def onExit(self, signal, frame): from couchpotato.core.event import fireEvent - fireEvent('app.shutdown', single = True) + fireEvent('app.shutdown', single=True) def run(self): @@ -90,14 +96,15 @@ def restart(self): # remove old pidfile first try: if self.runAsDaemon(): - try: self.daemon.stop() - except: pass + try: + self.daemon.stop() + except: + pass except: self.log.critical(traceback.format_exc()) # Release log files and shutdown logger logging.shutdown() - time.sleep(3) args = [sys.executable] + [os.path.join(base_path, os.path.basename(__file__))] + sys.argv[1:] subprocess.Popen(args) @@ -117,7 +124,7 @@ def daemonize(self): self.log.critical(traceback.format_exc()) def runAsDaemon(self): - return self.options.daemon and self.options.pid_file + return self.options.daemon and self.options.pid_file if __name__ == '__main__': @@ -132,14 +139,15 @@ def runAsDaemon(self): pass except SystemExit: raise - except socket.error as (nr, msg): + except socket.error as e: # log when socket receives SIGINT, but continue. # previous code would have skipped over other types of IO errors too. + nr, msg = e if nr != 4: try: l.log.critical(traceback.format_exc()) except: - print traceback.format_exc() + print(traceback.format_exc()) raise except: try: @@ -148,7 +156,7 @@ def runAsDaemon(self): if l: l.log.critical(traceback.format_exc()) else: - print traceback.format_exc() + print(traceback.format_exc()) except: - print traceback.format_exc() + print(traceback.format_exc()) raise diff --git a/CouchPotatoServer.pyproj b/CouchPotatoServer.pyproj new file mode 100644 index 0000000000..f35ff48e3f --- /dev/null +++ b/CouchPotatoServer.pyproj @@ -0,0 +1,1648 @@ +О╩© + + + Debug + 2.0 + {854ac11a-81d3-4fcf-b9cb-69e38e5adc75} + + CouchPotato.py + + . + . + {888888a0-9f3d-457c-b088-3a5042f75d52} + Standard Python launcher + + + + + + + 10.0 + $(MSBuildExtensionsPath32)\Microsoft\VisualStudio\v$(VisualStudioVersion)\Python Tools\Microsoft.PythonTools.targets + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Code + + + Code + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Code + + + Code + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/CouchPotatoServer.pyproj.user b/CouchPotatoServer.pyproj.user new file mode 100644 index 0000000000..55f44b95fe --- /dev/null +++ b/CouchPotatoServer.pyproj.user @@ -0,0 +1,6 @@ +О╩© + + + ShowAllFiles + + \ No newline at end of file diff --git a/CouchPotatoServer.sln b/CouchPotatoServer.sln new file mode 100644 index 0000000000..53a27a87df --- /dev/null +++ b/CouchPotatoServer.sln @@ -0,0 +1,20 @@ +О╩© +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio 14 +VisualStudioVersion = 14.0.25420.1 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{888888A0-9F3D-457C-B088-3A5042F75D52}") = "CouchPotatoServer", "CouchPotatoServer.pyproj", "{854AC11A-81D3-4FCF-B9CB-69E38E5ADC75}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|Any CPU = Debug|Any CPU + Release|Any CPU = Release|Any CPU + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {854AC11A-81D3-4FCF-B9CB-69E38E5ADC75}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {854AC11A-81D3-4FCF-B9CB-69E38E5ADC75}.Release|Any CPU.ActiveCfg = Release|Any CPU + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection +EndGlobal diff --git a/CouchPotatoServer.v12.suo b/CouchPotatoServer.v12.suo new file mode 100644 index 0000000000..c504008783 Binary files /dev/null and b/CouchPotatoServer.v12.suo differ diff --git a/Gruntfile.js b/Gruntfile.js new file mode 100644 index 0000000000..41db92e5dd --- /dev/null +++ b/Gruntfile.js @@ -0,0 +1,249 @@ +'use strict'; + +module.exports = function(grunt){ + require('jit-grunt')(grunt); + require('time-grunt')(grunt); + + grunt.loadNpmTasks('grunt-shell-spawn'); + + // Configurable paths + var config = { + python: grunt.file.exists('./_env/bin/python') ? './_env/bin/python' : 'python', + // colorful output on travis is not required, so disable it there, using travic'es env var : + colorful_tests_output: ! process.env.TRAVIS, + tmp: '.tmp', + base: 'couchpotato', + css_dest: 'couchpotato/static/style/combined.min.css', + scripts_vendor_dest: 'couchpotato/static/scripts/combined.vendor.min.js', + scripts_base_dest: 'couchpotato/static/scripts/combined.base.min.js', + scripts_plugins_dest: 'couchpotato/static/scripts/combined.plugins.min.js' + }; + + var vendor_scripts_files = [ + 'couchpotato/static/scripts/vendor/mootools.js', + 'couchpotato/static/scripts/vendor/mootools_more.js', + 'couchpotato/static/scripts/vendor/Array.stableSort.js', + 'couchpotato/static/scripts/vendor/history.js', + 'couchpotato/static/scripts/vendor/dynamics.js', + 'couchpotato/static/scripts/vendor/fastclick.js', + 'couchpotato/static/scripts/vendor/requestAnimationFrame.js' + ]; + + var scripts_files = [ + 'couchpotato/static/scripts/library/uniform.js', + 'couchpotato/static/scripts/library/question.js', + 'couchpotato/static/scripts/library/scrollspy.js', + 'couchpotato/static/scripts/couchpotato.js', + 'couchpotato/static/scripts/api.js', + 'couchpotato/static/scripts/page.js', + 'couchpotato/static/scripts/block.js', + 'couchpotato/static/scripts/block/navigation.js', + 'couchpotato/static/scripts/block/header.js', + 'couchpotato/static/scripts/block/footer.js', + 'couchpotato/static/scripts/block/menu.js', + 'couchpotato/static/scripts/page/home.js', + 'couchpotato/static/scripts/page/settings.js', + 'couchpotato/static/scripts/page/about.js', + 'couchpotato/static/scripts/page/login.js' + ]; + + grunt.initConfig({ + + // Project settings + config: config, + + // Make sure code styles are up to par and there are no obvious mistakes + jshint: { + options: { + reporter: require('jshint-stylish'), + unused: false, + camelcase: false, + devel: true + }, + all: [ + '<%= config.base %>/{,**/}*.js', + '!<%= config.base %>/static/scripts/vendor/{,**/}*.js', + '!<%= config.base %>/static/scripts/combined.*.js' + ] + }, + + // Compiles Sass to CSS and generates necessary files if requested + sass: { + options: { + compass: true, + update: true, + sourcemap: 'none' + }, + server: { + files: [{ + expand: true, + cwd: '<%= config.base %>/', + src: ['**/*.scss'], + dest: '<%= config.tmp %>/styles/', + ext: '.css' + }] + } + }, + + // Empties folders to start fresh + clean: { + server: '.tmp' + }, + + // Add vendor prefixed styles + autoprefixer: { + options: { + browsers: ['last 2 versions'], + remove: false, + cascade: false + }, + dist: { + files: [{ + expand: true, + cwd: '<%= config.tmp %>/styles/', + src: '{,**/}*.css', + dest: '<%= config.tmp %>/styles/' + }] + } + }, + + cssmin: { + dist: { + options: { + keepBreaks: true + }, + files: { + '<%= config.css_dest %>': ['<%= config.tmp %>/styles/**/*.css'] + } + } + }, + + uglify: { + options: { + mangle: false, + compress: false, + beautify: true, + screwIE8: true + }, + vendor: { + files: { + '<%= config.scripts_vendor_dest %>': vendor_scripts_files + } + }, + base: { + files: { + '<%= config.scripts_base_dest %>': scripts_files + } + }, + plugins: { + files: { + '<%= config.scripts_plugins_dest %>': ['<%= config.base %>/core/**/*.js'] + } + } + }, + + shell: { + runCouchPotato: { + command: '<%= config.python %> CouchPotato.py', + options: { + stdout: true, + stderr: true + } + } + }, + + // COOL TASKS ============================================================== + watch: { + scss: { + files: ['<%= config.base %>/**/*.{scss,sass}'], + tasks: ['sass:server', 'autoprefixer', 'cssmin'] + }, + js: { + files: [ + '<%= config.base %>/**/*.js', + '!<%= config.base %>/static/scripts/combined.*.js' + ], + tasks: ['uglify:base', 'uglify:plugins', 'jshint'] + }, + livereload: { + options: { + livereload: 35729 + }, + files: [ + '<%= config.css_dest %>', + '<%= config.scripts_vendor_dest %>', + '<%= config.scripts_base_dest %>', + '<%= config.scripts_plugins_dest %>' + ] + } + }, + + // TEST TASKS ============================================================== + env: { + options: { + }, + + test:{ + concat: { + PYTHONPATH: { + 'value' : './libs', + 'delimiter' : ':', + } + } + } + }, + + // for python tests + nose: { + options: { + verbosity: 2, + exe: true, + config: './.nosetestsrc', + // 'rednose' is a colored output for nose test-runner. But we do not requre colors on travis-ci + rednose: config.colorful_tests_output, + externalNose: true, + }, + + test: { + }, + + coverage: { + options:{ + with_coverage: true, + cover_package: "couchpotato", + cover_branches: true, + cover_xml: true, + with_doctest: true, + with_xunit: true, + cover_tests: false, + cover_erase: true, + } + }, + }, + + concurrent: { + options: { + logConcurrentOutput: true + }, + tasks: ['shell:runCouchPotato', 'watch'] + } + + }); + + // testing task + grunt.registerTask('test', ['env:test', 'nose:test']); + + // currently, coverage does not generate local html report, but it is useful and possible + grunt.registerTask('coverage', ['env:test', 'nose:coverage']); + + grunt.registerTask('default', [ + 'clean:server', + 'sass:server', + 'autoprefixer', + 'cssmin', + 'uglify:vendor', + 'uglify:base', + 'uglify:plugins', + 'concurrent' + ]); +}; diff --git a/README.md b/README.md index 8d1e5b89fc..aebf591ccc 100644 --- a/README.md +++ b/README.md @@ -1,42 +1,87 @@ -CouchPotato Server +CouchPotato ===== +[![Join the chat at https://gitter.im/CouchPotato/CouchPotatoServer](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/CouchPotato/CouchPotatoServer?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +[![Build Status](https://travis-ci.org/CouchPotato/CouchPotatoServer.svg?branch=master)](https://travis-ci.org/CouchPotato/CouchPotatoServer) +[![Coverage Status](https://coveralls.io/repos/CouchPotato/CouchPotatoServer/badge.svg?branch=master&service=github)](https://coveralls.io/github/CouchPotato/CouchPotatoServer?branch=master) + CouchPotato (CP) is an automatic NZB and torrent downloader. You can keep a "movies I want"-list and it will search for NZBs/torrents of these movies every X hours. Once a movie is found, it will send it to SABnzbd or download the torrent to a specified directory. ## Running from Source -CouchPotatoServer can be run from source. This will use *git* as updater, so make sure that is installed also. +CouchPotatoServer can be run from source. This will use *git* as updater, so make sure that is installed. -Windows, see [the CP forum](http://couchpota.to/forum/showthread.php?tid=14) for more details: +Windows, see [the CP forum](http://couchpota.to/forum/viewtopic.php?t=14) for more details: * Install [Python 2.7](http://www.python.org/download/releases/2.7.3/) * Then install [PyWin32 2.7](http://sourceforge.net/projects/pywin32/files/pywin32/Build%20217/) and [GIT](http://git-scm.com/) * If you come and ask on the forums 'why directory selection no work?', I will kill a kitten, also this is because you need PyWin32 * Open up `Git Bash` (or CMD) and go to the folder you want to install CP. Something like Program Files. -* Run `git clone https://github.com/RuudBurger/CouchPotatoServer.git`. +* Run `git clone https://github.com/CouchPotato/CouchPotatoServer.git`. * You can now start CP via `CouchPotatoServer\CouchPotato.py` to start -* Your browser should open up, but if it doesn't go to: `http://localhost:5050/` +* Your browser should open up, but if it doesn't go to `http://localhost:5050/` -OSx: +OS X: * If you're on Leopard (10.5) install Python 2.6+: [Python 2.6.5](http://www.python.org/download/releases/2.6.5/) * Install [GIT](http://git-scm.com/) +* Install [LXML](http://lxml.de/installation.html) for better/faster website scraping * Open up `Terminal` * Go to your App folder `cd /Applications` -* Run `git clone https://github.com/RuudBurger/CouchPotatoServer.git` +* Run `git clone https://github.com/CouchPotato/CouchPotatoServer.git` * Then do `python CouchPotatoServer/CouchPotato.py` -* Your browser should open up, but if it doesn't go to: `http://localhost:5050/` +* Your browser should open up, but if it doesn't go to `http://localhost:5050/` -Linux (ubuntu / debian): +Linux: -* Install [GIT](http://git-scm.com/) with `apt-get install git-core` +* (Ubuntu / Debian) Install [GIT](http://git-scm.com/) with `apt-get install git-core` +* (Fedora / CentOS) Install [GIT](http://git-scm.com/) with `yum install git` +* Install [LXML](http://lxml.de/installation.html) for better/faster website scraping * 'cd' to the folder of your choosing. -* Run `git clone https://github.com/RuudBurger/CouchPotatoServer.git` +* Install [PyOpenSSL](https://pypi.python.org/pypi/pyOpenSSL) with `pip install --upgrade pyopenssl` +* Run `git clone https://github.com/CouchPotato/CouchPotatoServer.git` * Then do `python CouchPotatoServer/CouchPotato.py` to start -* To run on boot copy the init script. `sudo cp CouchPotatoServer/init/ubuntu /etc/init.d/couchpotato` -* Change the paths inside the init script. `sudo nano /etc/init.d/couchpotato` -* Make it executable. `sudo chmod +x /etc/init.d/couchpotato` -* Add it to defaults. `sudo update-rc.d couchpotato defaults` -* Open your browser and go to: `http://localhost:5050/` +* (Ubuntu / Debian with upstart) To run on boot copy the init script `sudo cp CouchPotatoServer/init/ubuntu /etc/init.d/couchpotato` +* (Ubuntu / Debian with upstart) Copy the default paths file `sudo cp CouchPotatoServer/init/ubuntu.default /etc/default/couchpotato` +* (Ubuntu / Debian with upstart) Change the paths inside the default file `sudo nano /etc/default/couchpotato` +* (Ubuntu / Debian with upstart) Make it executable `sudo chmod +x /etc/init.d/couchpotato` +* (Ubuntu / Debian with upstart) Add it to defaults `sudo update-rc.d couchpotato defaults` +* (Linux with systemd) To run on boot copy the systemd config `sudo cp CouchPotatoServer/init/couchpotato.service /etc/systemd/system/couchpotato.service` +* (Linux with systemd) Update the systemd config file with your user and path to CouchPotato.py +* (Linux with systemd) Enable it at boot with `sudo systemctl enable couchpotato` +* Open your browser and go to `http://localhost:5050/` + +Docker: +* You can use [linuxserver.io](https://github.com/linuxserver/docker-couchpotato) or [razorgirl's](https://github.com/razorgirl/docker-couchpotato) to quickly build your own isolated app container. It's based on the Linux instructions above. For more info about Docker check out the [official website](https://www.docker.com). + +FreeBSD: + +* Become root with `su` +* Update your repo catalog `pkg update` +* Install required tools `pkg install python py27-sqlite3 fpc-libcurl docbook-xml git-lite` +* For default install location and running as root `cd /usr/local` +* If running as root, expects python here `ln -s /usr/local/bin/python /usr/bin/python` +* Run `git clone https://github.com/CouchPotato/CouchPotatoServer.git` +* Copy the startup script `cp CouchPotatoServer/init/freebsd /usr/local/etc/rc.d/couchpotato` +* Make startup script executable `chmod 555 /usr/local/etc/rc.d/couchpotato` +* Add startup to boot `echo 'couchpotato_enable="YES"' >> /etc/rc.conf` +* Read the options at the top of `more /usr/local/etc/rc.d/couchpotato` +* If not default install, specify options with startup flags in `ee /etc/rc.conf` +* Finally, `service couchpotato start` +* Open your browser and go to: `http://server:5050/` + + +## Development + +Be sure you're running the latest version of [Python 2.7](http://python.org/). + +If you're going to add styling or doing some javascript work you'll need a few tools that build and compress scss -> css and combine the javascript files. [Node/NPM](https://nodejs.org/), [Grunt](http://gruntjs.com/installing-grunt), [Compass](http://compass-style.org/install/) + +After you've got these tools you can install the packages using `npm install`. Once this process has finished you can start CP using the command `grunt`. This will start all the needed tools and watches any files for changes. +You can now change css and javascript and it wil reload the page when needed. + +By default it will combine files used in the core folder. If you're adding a new .scss or .js file, you might need to add it and then restart the grunt process for it to combine it properly. + +Don't forget to enable development inside the CP settings. This disables some functions and also makes sure javascript errors are pushed to console instead of the log. diff --git a/config.rb b/config.rb new file mode 100644 index 0000000000..a26a2ee10e --- /dev/null +++ b/config.rb @@ -0,0 +1,44 @@ +# First, require any additional compass plugins installed on your system. +# require 'zen-grids' +# require 'susy' +# require 'breakpoint' + + +# Toggle this between :development and :production when deploying the CSS to the +# live server. Development mode will retain comments and spacing from the +# original Sass source and adds line numbering comments for easier debugging. +environment = :development +# environment = :development + +# In development, we can turn on the FireSass-compatible debug_info. +firesass = false +# firesass = true + + +# Location of the your project's resources. + + +# Set this to the root of your project. All resource locations above are +# considered to be relative to this path. +http_path = "/" + +# To use relative paths to assets in your compiled CSS files, set this to true. +# relative_assets = true + + +## +## You probably don't need to edit anything below this. +## +sass_dir = "./couchpotato/static/style" +css_dir = "./couchpotato/static/style" + +# You can select your preferred output style here (can be overridden via the command line): +# output_style = :expanded or :nested or :compact or :compressed +output_style = (environment == :development) ? :expanded : :compressed + +# To disable debugging comments that display the original location of your selectors. Uncomment: +# line_comments = false + +# Pass options to sass. For development, we turn on the FireSass-compatible +# debug_info if the firesass config variable above is true. +sass_options = (environment == :development && firesass == true) ? {:debug_info => true} : {} diff --git a/contributing.md b/contributing.md deleted file mode 100644 index 572dd3325a..0000000000 --- a/contributing.md +++ /dev/null @@ -1,15 +0,0 @@ -#So you feel like posting a bug, sending me a pull request or just telling me how awesome I am. No problem! - -##Just make sure you think of the following things: - - * Search through the existing (and closed) issues first. See if you can get your answer there. - * Double check the result manually, because it could be an external issue. - * Post logs! Without seeing what is going on, I can't reproduce the error. - * What is the movie + quality you are searching for. - * What are you settings for the specific problem. - * What providers are you using. (While your logs include these, scanning through hundred of lines of log isn't my hobby). - * Give me a short step by step of how to reproduce. - * What hardware / OS are you using and what are the limits? NAS can be slow and maybe have a different python installed then when you use CP on OSX or Windows for example. - * I will mark issues with the "can't reproduce" tag. Don't go asking me "why closed" if it clearly says the issue in the tag ;) - -**If I don't get enough info, the change of the issue getting closed is a lot bigger ;)** \ No newline at end of file diff --git a/couchpotato/__init__.py b/couchpotato/__init__.py index 38b3617405..9a47120197 100644 --- a/couchpotato/__init__.py +++ b/couchpotato/__init__.py @@ -1,83 +1,205 @@ -from couchpotato.api import api_docs, api_docs_missing -from couchpotato.core.auth import requires_auth +import os +import time +import traceback + +from couchpotato.api import api_docs, api_docs_missing, api from couchpotato.core.event import fireEvent -from couchpotato.core.helpers.request import getParams, jsonified -from couchpotato.core.helpers.variable import md5 +from couchpotato.core.helpers.encoding import sp +from couchpotato.core.helpers.variable import md5, tryInt from couchpotato.core.logger import CPLog from couchpotato.environment import Env -from flask.app import Flask -from flask.blueprints import Blueprint -from flask.globals import request -from flask.helpers import url_for -from flask.templating import render_template -from sqlalchemy.engine import create_engine -from sqlalchemy.orm import scoped_session -from sqlalchemy.orm.session import sessionmaker -from werkzeug.utils import redirect -import os -import time +from tornado import template +from tornado.web import RequestHandler, authenticated + log = CPLog(__name__) -app = Flask(__name__, static_folder = 'nope') -web = Blueprint('web', __name__) +views = {} +template_loader = template.Loader(os.path.join(os.path.dirname(__file__), 'templates')) + + +class BaseHandler(RequestHandler): + + def get_current_user(self): + username = Env.setting('username') + password = Env.setting('password') + + if username and password: + return self.get_secure_cookie('user') + else: # Login when no username or password are set + return True + + +# Main web handler +class WebHandler(BaseHandler): + + @authenticated + def get(self, route, *args, **kwargs): + route = route.strip('/') + if not views.get(route): + page_not_found(self) + return + + try: + self.write(views[route](self)) + except: + log.error("Failed doing web request '%s': %s", (route, traceback.format_exc())) + self.write({'success': False, 'error': 'Failed returning results'}) + + +def addView(route, func): + views[route] = func + + +def get_db(): + return Env.get('db') + + +# Web view +def index(*args): + return template_loader.load('index.html').generate(sep = os.sep, fireEvent = fireEvent, Env = Env) +addView('', index) + + +# Web view +def robots(handler): + handler.set_header('Content-Type', 'text/plain') + + return 'User-agent: * \n' \ + 'Disallow: /' +addView('robots.txt', robots) + + +# Manifest +def manifest(handler): + web_base = Env.get('web_base') + static_base = Env.get('static_path') + + lines = [ + 'CACHE MANIFEST', + '# %s theme' % ('dark' if Env.setting('dark_theme') else 'light'), + '', + 'CACHE:', + '' + ] + + if not Env.get('dev'): + # CSS + for url in fireEvent('clientscript.get_styles', single = True): + lines.append(web_base + url) + # Scripts + for url in fireEvent('clientscript.get_scripts', single = True): + lines.append(web_base + url) -def get_session(engine = None): - return Env.getSession(engine) + # Favicon + lines.append(static_base + 'images/favicon.ico') -def addView(route, func, static = False): - web.add_url_rule(route + ('' if static else '/'), endpoint = route if route else 'index', view_func = func) + # Fonts + font_folder = sp(os.path.join(Env.get('app_dir'), 'couchpotato', 'static', 'fonts')) + for subfolder, dirs, files in os.walk(font_folder, topdown = False): + for file in files: + if '.woff' in file: + lines.append(static_base + 'fonts/' + file + ('?%s' % os.path.getmtime(os.path.join(font_folder, file)))) + else: + lines.append('# Not caching anything in dev mode') + + # End lines + lines.extend(['', + 'NETWORK: ', + '*']) + + handler.set_header('Content-Type', 'text/cache-manifest') + return '\n'.join(lines) + +addView('couchpotato.appcache', manifest) -""" Web view """ -@web.route('/') -@requires_auth -def index(): - return render_template('index.html', sep = os.sep, fireEvent = fireEvent, env = Env) -""" Api view """ -@web.route('docs/') -@requires_auth -def apiDocs(): - from couchpotato import app - routes = [] - for route, x in sorted(app.view_functions.iteritems()): - if route[0:4] == 'api.': - routes += [route[4:].replace('::', '.')] +# API docs +def apiDocs(*args): + routes = list(api.keys()) if api_docs.get(''): del api_docs[''] del api_docs_missing[''] - return render_template('api.html', fireEvent = fireEvent, routes = sorted(routes), api_docs = api_docs, api_docs_missing = sorted(api_docs_missing)) -@web.route('getkey/') -def getApiKey(): + return template_loader.load('api.html').generate(fireEvent = fireEvent, routes = sorted(routes), api_docs = api_docs, api_docs_missing = sorted(api_docs_missing), Env = Env) - api = None - params = getParams() - username = Env.setting('username') - password = Env.setting('password') +addView('docs', apiDocs) - if (params.get('u') == md5(username) or not username) and (params.get('p') == password or not password): - api = Env.setting('api_key') - return jsonified({ - 'success': api is not None, - 'api_key': api - }) +# Database debug manager +def databaseManage(*args): + return template_loader.load('database.html').generate(fireEvent = fireEvent, Env = Env) -@app.errorhandler(404) -def page_not_found(error): - index_url = url_for('web.index') - url = request.path[len(index_url):] +addView('database', databaseManage) - if url[:3] != 'api': - if request.path != '/': - r = request.url.replace(request.path, index_url + '#' + url) + +# Make non basic auth option to get api key +class KeyHandler(RequestHandler): + + def get(self, *args, **kwargs): + api_key = None + + try: + username = Env.setting('username') + password = Env.setting('password') + + if (self.get_argument('u') == md5(username) or not username) and (self.get_argument('p') == password or not password): + api_key = Env.setting('api_key') + + self.write({ + 'success': api_key is not None, + 'api_key': api_key + }) + except: + log.error('Failed doing key request: %s', (traceback.format_exc())) + self.write({'success': False, 'error': 'Failed returning results'}) + + +class LoginHandler(BaseHandler): + + def get(self, *args, **kwargs): + + if self.get_current_user(): + self.redirect(Env.get('web_base')) else: - r = '%s%s' % (request.url.rstrip('/'), index_url + '#' + url) - return redirect(r) + self.write(template_loader.load('login.html').generate(sep = os.sep, fireEvent = fireEvent, Env = Env)) + + def post(self, *args, **kwargs): + + api_key = None + + username = Env.setting('username') + password = Env.setting('password') + + if (self.get_argument('username') == username or not username) and (md5(self.get_argument('password')) == password or not password): + api_key = Env.setting('api_key') + + if api_key: + remember_me = tryInt(self.get_argument('remember_me', default = 0)) + self.set_secure_cookie('user', api_key, expires_days = 30 if remember_me > 0 else None) + + self.redirect(Env.get('web_base')) + + +class LogoutHandler(BaseHandler): + + def get(self, *args, **kwargs): + self.clear_cookie('user') + self.redirect('%slogin/' % Env.get('web_base')) + + +def page_not_found(rh): + index_url = Env.get('web_base') + url = rh.request.uri[len(index_url):] + + if url[:3] != 'api': + r = index_url + '#' + url.lstrip('/') + rh.redirect(r) else: - time.sleep(0.1) - return 'Wrong API key used', 404 + if not Env.get('dev'): + time.sleep(0.1) + rh.set_status(404) + rh.write('Wrong API key used') diff --git a/couchpotato/api.py b/couchpotato/api.py index 718527c937..b5754d8209 100644 --- a/couchpotato/api.py +++ b/couchpotato/api.py @@ -1,61 +1,170 @@ -from flask.blueprints import Blueprint -from flask.helpers import url_for +from functools import wraps +from threading import Thread +import json +import threading +import traceback +import urllib + +from couchpotato.core.helpers.request import getParams +from couchpotato.core.logger import CPLog +from tornado.ioloop import IOLoop from tornado.web import RequestHandler, asynchronous -from werkzeug.utils import redirect -api = Blueprint('api', __name__) + +log = CPLog(__name__) + + +api = {} +api_locks = {} +api_nonblock = {} + api_docs = {} api_docs_missing = [] -api_nonblock = {} +def run_async(func): + @wraps(func) + def async_func(*args, **kwargs): + func_hl = Thread(target = func, args = args, kwargs = kwargs) + func_hl.start() + + return async_func + +@run_async +def run_handler(route, kwargs, callback = None): + try: + res = api[route](**kwargs) + callback(res, route) + except: + log.error('Failed doing api request "%s": %s', (route, traceback.format_exc())) + callback({'success': False, 'error': 'Failed returning results'}, route) + + +# NonBlock API handler class NonBlockHandler(RequestHandler): - def __init__(self, application, request, **kwargs): - cls = NonBlockHandler - cls.stoppers = [] - super(NonBlockHandler, self).__init__(application, request, **kwargs) + stopper = None @asynchronous - def get(self, route): - cls = NonBlockHandler + def get(self, route, *args, **kwargs): + route = route.strip('/') start, stop = api_nonblock[route] - cls.stoppers.append(stop) + self.stopper = stop - start(self.onNewMessage, last_id = self.get_argument("last_id", None)) + start(self.sendData, last_id = self.get_argument('last_id', None)) - def onNewMessage(self, response): - if self.request.connection.stream.closed(): - return - self.finish(response) + def sendData(self, response): + if not self.request.connection.stream.closed(): + try: + self.finish(response) + except: + log.debug('Failed doing nonblock request, probably already closed: %s', (traceback.format_exc())) + try: self.finish({'success': False, 'error': 'Failed returning results'}) + except: pass - def on_connection_close(self): - cls = NonBlockHandler + self.removeStopper() - for stop in cls.stoppers: - stop(self.onNewMessage) + def removeStopper(self): + if self.stopper: + self.stopper(self.sendData) - cls.stoppers = [] + self.stopper = None -def addApiView(route, func, static = False, docs = None, **kwargs): - api.add_url_rule(route + ('' if static else '/'), endpoint = route.replace('.', '::') if route else 'index', view_func = func, **kwargs) +def addNonBlockApiView(route, func_tuple, docs = None, **kwargs): + api_nonblock[route] = func_tuple + if docs: api_docs[route[4:] if route[0:4] == 'api.' else route] = docs else: api_docs_missing.append(route) -def addNonBlockApiView(route, func_tuple, docs = None, **kwargs): - api_nonblock[route] = func_tuple + +# Blocking API handler +class ApiHandler(RequestHandler): + route = None + + @asynchronous + def get(self, route, *args, **kwargs): + self.route = route = route.strip('/') + if not api.get(route): + self.write('API call doesn\'t seem to exist') + self.finish() + return + + # Create lock if it doesn't exist + if route in api_locks and not api_locks.get(route): + api_locks[route] = threading.Lock() + + api_locks[route].acquire() + + try: + + kwargs = {} + for x in self.request.arguments: + kwargs[x] = urllib.unquote(self.get_argument(x)) + + # Split array arguments + kwargs = getParams(kwargs) + kwargs['_request'] = self + + # Remove t random string + try: del kwargs['t'] + except: pass + + # Add async callback handler + run_handler(route, kwargs, callback = self.taskFinished) + + except: + log.error('Failed doing api request "%s": %s', (route, traceback.format_exc())) + try: + self.write({'success': False, 'error': 'Failed returning results'}) + self.finish() + except: + log.error('Failed write error "%s": %s', (route, traceback.format_exc())) + + self.unlock() + + post = get + + def taskFinished(self, result, route): + IOLoop.current().add_callback(self.sendData, result, route) + self.unlock() + + def sendData(self, result, route): + + if not self.request.connection.stream.closed(): + try: + # Check JSONP callback + jsonp_callback = self.get_argument('callback_func', default = None) + + if jsonp_callback: + self.set_header('Content-Type', 'text/javascript') + self.finish(str(jsonp_callback) + '(' + json.dumps(result) + ')') + elif isinstance(result, tuple) and result[0] == 'redirect': + self.redirect(result[1]) + else: + self.finish(result) + except UnicodeDecodeError: + log.error('Failed proper encode: %s', traceback.format_exc()) + except: + log.debug('Failed doing request, probably already closed: %s', (traceback.format_exc())) + try: self.finish({'success': False, 'error': 'Failed returning results'}) + except: pass + + def unlock(self): + try: api_locks[self.route].release() + except: pass + + +def addApiView(route, func, static = False, docs = None, **kwargs): + + if static: func(route) + else: + api[route] = func + api_locks[route] = threading.Lock() if docs: api_docs[route[4:] if route[0:4] == 'api.' else route] = docs else: api_docs_missing.append(route) - -""" Api view """ -def index(): - index_url = url_for('web.index') - return redirect(index_url + 'docs/') - -addApiView('', index) diff --git a/couchpotato/core/_base/_core.py b/couchpotato/core/_base/_core.py new file mode 100644 index 0000000000..47c8bb4e68 --- /dev/null +++ b/couchpotato/core/_base/_core.py @@ -0,0 +1,382 @@ +from uuid import uuid4 +import os +import platform +import signal +import time +import traceback +import webbrowser +import sys + +from couchpotato.api import addApiView +from couchpotato.core.event import fireEvent, addEvent +from couchpotato.core.helpers.variable import cleanHost, md5, isSubFolder, compareVersions +from couchpotato.core.logger import CPLog +from couchpotato.core.plugins.base import Plugin +from couchpotato.environment import Env +from tornado.ioloop import IOLoop + + +log = CPLog(__name__) + +autoload = 'Core' + + +class Core(Plugin): + + ignore_restart = [ + 'Core.restart', 'Core.shutdown', + 'Updater.check', 'Updater.autoUpdate', + ] + shutdown_started = False + + def __init__(self): + addApiView('app.shutdown', self.shutdown, docs = { + 'desc': 'Shutdown the app.', + 'return': {'type': 'string: shutdown'} + }) + addApiView('app.restart', self.restart, docs = { + 'desc': 'Restart the app.', + 'return': {'type': 'string: restart'} + }) + addApiView('app.available', self.available, docs = { + 'desc': 'Check if app available.' + }) + addApiView('app.version', self.versionView, docs = { + 'desc': 'Get version.' + }) + + addEvent('app.shutdown', self.shutdown) + addEvent('app.restart', self.restart) + addEvent('app.load', self.launchBrowser, priority = 1) + addEvent('app.base_url', self.createBaseUrl) + addEvent('app.api_url', self.createApiUrl) + addEvent('app.version', self.version) + addEvent('app.load', self.checkDataDir) + addEvent('app.load', self.cleanUpFolders) + addEvent('app.load.after', self.dependencies) + + addEvent('setting.save.core.password', self.md5Password) + addEvent('setting.save.core.api_key', self.checkApikey) + + # Make sure we can close-down with ctrl+c properly + if not Env.get('desktop'): + self.signalHandler() + + # Set default urlopen timeout + import socket + socket.setdefaulttimeout(30) + + # Don't check ssl by default + try: + if sys.version_info >= (2, 7, 9): + import ssl + ssl._create_default_https_context = ssl._create_unverified_context + except: + log.debug('Failed setting default ssl context: %s', traceback.format_exc()) + + def dependencies(self): + + # Check if lxml is available + try: from lxml import etree + except: log.error('LXML not available, please install for better/faster scraping support: `http://lxml.de/installation.html`') + + try: + import OpenSSL + v = OpenSSL.__version__ + v_needed = '0.15' + if compareVersions(OpenSSL.__version__, v_needed) < 0: + log.error('OpenSSL installed but %s is needed while %s is installed. Run `pip install pyopenssl --upgrade`', (v_needed, v)) + + try: + import ssl + log.debug('OpenSSL detected: pyopenssl (%s) using OpenSSL (%s)', (v, ssl.OPENSSL_VERSION)) + except: + pass + except: + log.error('OpenSSL not available, please install for better requests validation: `https://pyopenssl.readthedocs.org/en/latest/install.html`: %s', traceback.format_exc()) + + def md5Password(self, value): + return md5(value) if value else '' + + def checkApikey(self, value): + return value if value and len(value) > 3 else uuid4().hex + + def checkDataDir(self): + if isSubFolder(Env.get('data_dir'), Env.get('app_dir')): + log.error('You should NOT use your CouchPotato directory to save your settings in. Files will get overwritten or be deleted.') + + return True + + def cleanUpFolders(self): + only_clean = ['couchpotato', 'libs', 'init'] + self.deleteEmptyFolder(Env.get('app_dir'), show_error = False, only_clean = only_clean) + + def available(self, **kwargs): + return { + 'success': True + } + + def shutdown(self, **kwargs): + if self.shutdown_started: + return False + + def shutdown(): + self.initShutdown() + + if IOLoop.current()._closing: + shutdown() + else: + IOLoop.current().add_callback(shutdown) + + return 'shutdown' + + def restart(self, **kwargs): + if self.shutdown_started: + return False + + def restart(): + self.initShutdown(restart = True) + IOLoop.current().add_callback(restart) + + return 'restarting' + + def initShutdown(self, restart = False): + if self.shutdown_started: + log.info('Already shutting down') + return + + log.info('Shutting down' if not restart else 'Restarting') + + self.shutdown_started = True + + fireEvent('app.do_shutdown', restart = restart) + log.debug('Every plugin got shutdown event') + + loop = True + starttime = time.time() + while loop: + log.debug('Asking who is running') + still_running = fireEvent('plugin.running', merge = True) + log.debug('Still running: %s', still_running) + + if len(still_running) == 0: + break + elif starttime < time.time() - 30: # Always force break after 30s wait + break + + running = list(set(still_running) - set(self.ignore_restart)) + if len(running) > 0: + log.info('Waiting on plugins to finish: %s', running) + else: + loop = False + + time.sleep(1) + + log.debug('Safe to shutdown/restart') + + loop = IOLoop.current() + + try: + if not loop._closing: + loop.stop() + except RuntimeError: + pass + except: + log.error('Failed shutting down the server: %s', traceback.format_exc()) + + fireEvent('app.after_shutdown', restart = restart) + + def launchBrowser(self): + + if Env.setting('launch_browser'): + log.info('Launching browser') + + url = self.createBaseUrl() + try: + webbrowser.open(url, 2, 1) + except: + try: + webbrowser.open(url, 1, 1) + except: + log.error('Could not launch a browser.') + + def createBaseUrl(self): + host = Env.setting('host') + if host == '0.0.0.0' or host == '': + host = 'localhost' + port = Env.setting('port') + ssl = Env.setting('ssl_cert') and Env.setting('ssl_key') + + return '%s:%d%s' % (cleanHost(host, ssl = ssl).rstrip('/'), int(port), Env.get('web_base')) + + def createApiUrl(self): + return '%sapi/%s' % (self.createBaseUrl(), Env.setting('api_key')) + + def version(self): + ver = fireEvent('updater.info', single = True) or {'version': {}} + + if os.name == 'nt': platf = 'windows' + elif 'Darwin' in platform.platform(): platf = 'osx' + else: platf = 'linux' + + return '%s - %s-%s - v2' % (platf, ver.get('version').get('type') or 'unknown', ver.get('version').get('hash') or 'unknown') + + def versionView(self, **kwargs): + return { + 'version': self.version() + } + + def signalHandler(self): + if Env.get('daemonized'): return + + def signal_handler(*args, **kwargs): + fireEvent('app.shutdown', single = True) + + signal.signal(signal.SIGINT, signal_handler) + signal.signal(signal.SIGTERM, signal_handler) + + +config = [{ + 'name': 'core', + 'order': 1, + 'groups': [ + { + 'tab': 'general', + 'name': 'basics', + 'description': 'Needs restart before changes take effect.', + 'wizard': True, + 'options': [ + { + 'name': 'username', + 'default': '', + 'ui-meta' : 'rw', + }, + { + 'name': 'password', + 'default': '', + 'type': 'password', + }, + { + 'name': 'port', + 'default': 5050, + 'type': 'int', + 'description': 'The port I should listen to.', + }, + { + 'name': 'languages', + 'default': 'fr, en', + 'ui-meta' : 'rw', + 'description': 'Langue pour le titre des films', + }, + { + 'name': 'ipv6', + 'default': 0, + 'type': 'bool', + 'description': 'Also bind the WebUI to ipv6 address', + }, + { + 'name': 'ssl_cert', + 'description': 'Path to SSL server.crt', + 'advanced': True, + }, + { + 'name': 'ssl_key', + 'description': 'Path to SSL server.key', + 'advanced': True, + }, + { + 'name': 'launch_browser', + 'default': True, + 'type': 'bool', + 'description': 'Launch the browser when I start.', + 'wizard': True, + }, + { + 'name': 'dark_theme', + 'default': False, + 'type': 'bool', + 'description': 'For people with sensitive skin', + 'wizard': True, + }, + + ], + }, + { + 'tab': 'general', + 'name': 'advanced', + 'description': "For those who know what they're doing", + 'advanced': True, + 'options': [ + { + 'name': 'api_key', + 'default': uuid4().hex, + 'ui-meta' : 'ro', + 'description': 'Let 3rd party app do stuff. Docs', + }, + { + 'name': 'dereferer', + 'default': 'http://www.nullrefer.com/?', + 'description': 'Derefer links to external sites, keep empty for no dereferer. Example: http://www.dereferer.org/? or http://www.nullrefer.com/?.', + }, + { + 'name': 'use_proxy', + 'default': 0, + 'type': 'bool', + 'description': 'Route outbound connections via proxy. Currently, only HTTP(S) proxies are supported. ', + }, + { + 'name': 'proxy_server', + 'description': 'Override system default proxy server. Currently, only HTTP(S) proxies are supported. Ex. \"127.0.0.1:8080\". Keep empty to use system default proxy server.', + }, + { + 'name': 'proxy_username', + 'description': 'Only HTTP Basic Auth is supported. Leave blank to disable authentication.', + }, + { + 'name': 'proxy_password', + 'type': 'password', + 'description': 'Leave blank for no password.', + }, + { + 'name': 'bookmarklet_host', + 'description': 'Override default bookmarklet host. This can be useful in a reverse proxy environment. For example: "http://username:password@customHost:1020". Requires restart to take effect.', + 'advanced': True, + }, + { + 'name': 'debug', + 'default': 0, + 'type': 'bool', + 'description': 'Enable debugging.', + }, + { + 'name': 'development', + 'default': 0, + 'type': 'bool', + 'description': 'Enable this if you\'re developing, and NOT in any other case, thanks.', + }, + { + 'name': 'data_dir', + 'type': 'directory', + 'description': 'Where cache/logs/etc are stored. Keep empty for defaults.', + }, + { + 'name': 'url_base', + 'default': '', + 'description': 'When using mod_proxy use this to append the url with this.', + }, + { + 'name': 'permission_folder', + 'default': '0755', + 'label': 'Folder CHMOD', + 'description': 'Can be either decimal (493) or octal (leading zero: 0755). Calculate the correct value', + }, + { + 'name': 'permission_file', + 'default': '0644', + 'label': 'File CHMOD', + 'description': 'See Folder CHMOD description, but for files', + }, + ], + }, + ], +}] diff --git a/couchpotato/core/_base/_core/__init__.py b/couchpotato/core/_base/_core/__init__.py deleted file mode 100644 index c8c3fda68f..0000000000 --- a/couchpotato/core/_base/_core/__init__.py +++ /dev/null @@ -1,100 +0,0 @@ -from .main import Core -from uuid import uuid4 - -def start(): - return Core() - -config = [{ - 'name': 'core', - 'order': 1, - 'groups': [ - { - 'tab': 'general', - 'name': 'basics', - 'description': 'Needs restart before changes take effect.', - 'wizard': True, - 'options': [ - { - 'name': 'username', - 'default': '', - }, - { - 'name': 'password', - 'default': '', - 'type': 'password', - }, - { - 'name': 'port', - 'default': 5050, - 'type': 'int', - 'description': 'The port I should listen to.', - }, - { - 'name': 'ssl_cert', - 'description': 'Path to SSL server.crt', - 'advanced': True, - }, - { - 'name': 'ssl_key', - 'description': 'Path to SSL server.key', - 'advanced': True, - }, - { - 'name': 'launch_browser', - 'default': True, - 'type': 'bool', - 'description': 'Launch the browser when I start.', - 'wizard': True, - }, - ], - }, - { - 'tab': 'general', - 'name': 'advanced', - 'description': "For those who know what they're doing", - 'advanced': True, - 'options': [ - { - 'name': 'api_key', - 'default': uuid4().hex, - 'readonly': 1, - 'description': 'Let 3rd party app do stuff. Docs', - }, - { - 'name': 'debug', - 'default': 0, - 'type': 'bool', - 'description': 'Enable debugging.', - }, - { - 'name': 'development', - 'default': 0, - 'type': 'bool', - 'description': 'Disables some checks/downloads for faster reloading.', - }, - { - 'name': 'data_dir', - 'type': 'directory', - 'description': 'Where cache/logs/etc are stored. Keep empty for defaults.', - }, - { - 'name': 'url_base', - 'default': '', - 'description': 'When using mod_proxy use this to append the url with this.', - }, - { - 'name': 'permission_folder', - 'default': '0755', - 'label': 'Folder CHMOD', - 'description': 'Can be either decimal (493) or octal (leading zero: 0755)', - }, - { - 'name': 'permission_file', - 'default': '0755', - 'label': 'File CHMOD', - 'description': 'Same as Folder CHMOD but for files', - }, - ], - }, - ], -}] diff --git a/couchpotato/core/_base/_core/main.py b/couchpotato/core/_base/_core/main.py deleted file mode 100644 index c91140fab3..0000000000 --- a/couchpotato/core/_base/_core/main.py +++ /dev/null @@ -1,185 +0,0 @@ -from couchpotato.api import addApiView -from couchpotato.core.event import fireEvent, addEvent -from couchpotato.core.helpers.request import jsonified -from couchpotato.core.helpers.variable import cleanHost, md5 -from couchpotato.core.logger import CPLog -from couchpotato.core.plugins.base import Plugin -from couchpotato.environment import Env -from tornado.ioloop import IOLoop -from uuid import uuid4 -import os -import platform -import signal -import time -import traceback -import webbrowser - -log = CPLog(__name__) - - -class Core(Plugin): - - ignore_restart = [ - 'Core.restart', 'Core.shutdown', - 'Updater.check', 'Updater.autoUpdate', - ] - shutdown_started = False - - def __init__(self): - addApiView('app.shutdown', self.shutdown, docs = { - 'desc': 'Shutdown the app.', - 'return': {'type': 'string: shutdown'} - }) - addApiView('app.restart', self.restart, docs = { - 'desc': 'Restart the app.', - 'return': {'type': 'string: restart'} - }) - addApiView('app.available', self.available, docs = { - 'desc': 'Check if app available.' - }) - addApiView('app.version', self.versionView, docs = { - 'desc': 'Get version.' - }) - - addEvent('app.shutdown', self.shutdown) - addEvent('app.restart', self.restart) - addEvent('app.load', self.launchBrowser, priority = 1) - addEvent('app.base_url', self.createBaseUrl) - addEvent('app.api_url', self.createApiUrl) - addEvent('app.version', self.version) - addEvent('app.load', self.checkDataDir) - - addEvent('setting.save.core.password', self.md5Password) - addEvent('setting.save.core.api_key', self.checkApikey) - - # Make sure we can close-down with ctrl+c properly - if not Env.get('desktop'): - self.signalHandler() - - def md5Password(self, value): - return md5(value.encode(Env.get('encoding'))) if value else '' - - def checkApikey(self, value): - return value if value and len(value) > 3 else uuid4().hex - - def checkDataDir(self): - if Env.get('app_dir') in Env.get('data_dir'): - log.error('You should NOT use your CouchPotato directory to save your settings in. Files will get overwritten or be deleted.') - - return True - - def available(self): - return jsonified({ - 'success': True - }) - - def shutdown(self): - if self.shutdown_started: - return False - - def shutdown(): - self.initShutdown() - IOLoop.instance().add_callback(shutdown) - - return 'shutdown' - - def restart(self): - if self.shutdown_started: - return False - - def restart(): - self.initShutdown(restart = True) - IOLoop.instance().add_callback(restart) - - return 'restarting' - - def initShutdown(self, restart = False): - if self.shutdown_started: - log.info('Already shutting down') - return - - log.info('Shutting down' if not restart else 'Restarting') - - self.shutdown_started = True - - fireEvent('app.do_shutdown') - log.debug('Every plugin got shutdown event') - - loop = True - starttime = time.time() - while loop: - log.debug('Asking who is running') - still_running = fireEvent('plugin.running', merge = True) - log.debug('Still running: %s', still_running) - - if len(still_running) == 0: - break - elif starttime < time.time() - 30: # Always force break after 30s wait - break - - running = list(set(still_running) - set(self.ignore_restart)) - if len(running) > 0: - log.info('Waiting on plugins to finish: %s', running) - else: - loop = False - - time.sleep(1) - - log.debug('Save to shutdown/restart') - - try: - IOLoop.instance().stop() - except RuntimeError: - pass - except: - log.error('Failed shutting down the server: %s', traceback.format_exc()) - - fireEvent('app.after_shutdown', restart = restart) - - def launchBrowser(self): - - if Env.setting('launch_browser'): - log.info('Launching browser') - - url = self.createBaseUrl() - try: - webbrowser.open(url, 2, 1) - except: - try: - webbrowser.open(url, 1, 1) - except: - log.error('Could not launch a browser.') - - def createBaseUrl(self): - host = Env.setting('host') - if host == '0.0.0.0' or host == '': - host = 'localhost' - port = Env.setting('port') - - return '%s:%d%s' % (cleanHost(host).rstrip('/'), int(port), '/' + Env.setting('url_base').lstrip('/') if Env.setting('url_base') else '') - - def createApiUrl(self): - return '%s/api/%s' % (self.createBaseUrl(), Env.setting('api_key')) - - def version(self): - ver = fireEvent('updater.info', single = True) - - if os.name == 'nt': platf = 'windows' - elif 'Darwin' in platform.platform(): platf = 'osx' - else: platf = 'linux' - - return '%s - %s-%s - v2' % (platf, ver.get('version')['type'], ver.get('version')['hash']) - - def versionView(self): - return jsonified({ - 'version': self.version() - }) - - def signalHandler(self): - if Env.get('daemonized'): return - - def signal_handler(signal, frame): - fireEvent('app.shutdown', single = True) - - signal.signal(signal.SIGINT, signal_handler) - signal.signal(signal.SIGTERM, signal_handler) diff --git a/couchpotato/core/_base/clientscript.py b/couchpotato/core/_base/clientscript.py new file mode 100644 index 0000000000..ab52003755 --- /dev/null +++ b/couchpotato/core/_base/clientscript.py @@ -0,0 +1,57 @@ +import os + +from couchpotato.core.event import addEvent +from couchpotato.core.helpers.variable import tryInt +from couchpotato.core.logger import CPLog +from couchpotato.core.plugins.base import Plugin +from couchpotato.environment import Env + + +log = CPLog(__name__) + +autoload = 'ClientScript' + + +class ClientScript(Plugin): + + paths = { + 'style': [ + 'style/combined.min.css', + ], + 'script': [ + 'scripts/combined.vendor.min.js', + 'scripts/combined.base.min.js', + 'scripts/combined.plugins.min.js', + ], + } + + def __init__(self): + addEvent('clientscript.get_styles', self.getStyles) + addEvent('clientscript.get_scripts', self.getScripts) + + self.makeRelative() + + def makeRelative(self): + + for static_type in self.paths: + + updates_paths = [] + for rel_path in self.paths.get(static_type): + file_path = os.path.join(Env.get('app_dir'), 'couchpotato', 'static', rel_path) + core_url = 'static/%s?%d' % (rel_path, tryInt(os.path.getmtime(file_path))) + + updates_paths.append(core_url) + + self.paths[static_type] = updates_paths + + def getStyles(self, *args, **kwargs): + return self.get('style', *args, **kwargs) + + def getScripts(self, *args, **kwargs): + return self.get('script', *args, **kwargs) + + def get(self, type): + if type in self.paths: + return self.paths[type] + + return [] diff --git a/couchpotato/core/_base/clientscript/__init__.py b/couchpotato/core/_base/clientscript/__init__.py deleted file mode 100644 index 8490eae7e8..0000000000 --- a/couchpotato/core/_base/clientscript/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -from .main import ClientScript - -def start(): - return ClientScript() - -config = [] diff --git a/couchpotato/core/_base/clientscript/main.py b/couchpotato/core/_base/clientscript/main.py deleted file mode 100644 index f2a30f6f67..0000000000 --- a/couchpotato/core/_base/clientscript/main.py +++ /dev/null @@ -1,169 +0,0 @@ -from couchpotato.core.event import addEvent -from couchpotato.core.helpers.variable import tryInt -from couchpotato.core.logger import CPLog -from couchpotato.core.plugins.base import Plugin -from couchpotato.environment import Env -from minify.cssmin import cssmin -from minify.jsmin import jsmin -import os -import traceback - -log = CPLog(__name__) - - -class ClientScript(Plugin): - - core_static = { - 'style': [ - 'style/main.css', - 'style/uniform.generic.css', - 'style/uniform.css', - 'style/settings.css', - ], - 'script': [ - 'scripts/library/mootools.js', - 'scripts/library/mootools_more.js', - 'scripts/library/prefix_free.js', - 'scripts/library/uniform.js', - 'scripts/library/form_replacement/form_check.js', - 'scripts/library/form_replacement/form_radio.js', - 'scripts/library/form_replacement/form_dropdown.js', - 'scripts/library/form_replacement/form_selectoption.js', - 'scripts/library/question.js', - 'scripts/library/scrollspy.js', - 'scripts/library/spin.js', - 'scripts/couchpotato.js', - 'scripts/api.js', - 'scripts/library/history.js', - 'scripts/page.js', - 'scripts/block.js', - 'scripts/block/navigation.js', - 'scripts/block/footer.js', - 'scripts/block/menu.js', - 'scripts/page/home.js', - 'scripts/page/wanted.js', - 'scripts/page/settings.js', - 'scripts/page/about.js', - 'scripts/page/manage.js', - ], - } - - - urls = {'style': {}, 'script': {}, } - minified = {'style': {}, 'script': {}, } - paths = {'style': {}, 'script': {}, } - comment = { - 'style': '/*** %s:%d ***/\n', - 'script': '// %s:%d\n' - } - - html = { - 'style': '', - 'script': '', - } - - def __init__(self): - addEvent('register_style', self.registerStyle) - addEvent('register_script', self.registerScript) - - addEvent('clientscript.get_styles', self.getStyles) - addEvent('clientscript.get_scripts', self.getScripts) - - addEvent('app.load', self.minify) - - self.addCore() - - def addCore(self): - - for static_type in self.core_static: - for rel_path in self.core_static.get(static_type): - file_path = os.path.join(Env.get('app_dir'), 'couchpotato', 'static', rel_path) - core_url = 'api/%s/static/%s?%s' % (Env.setting('api_key'), rel_path, tryInt(os.path.getmtime(file_path))) - - if static_type == 'script': - self.registerScript(core_url, file_path, position = 'front') - else: - self.registerStyle(core_url, file_path, position = 'front') - - - def minify(self): - - for file_type in ['style', 'script']: - ext = 'js' if file_type is 'script' else 'css' - positions = self.paths.get(file_type, {}) - for position in positions: - files = positions.get(position) - self._minify(file_type, files, position, position + '.' + ext) - - def _minify(self, file_type, files, position, out): - - cache = Env.get('cache_dir') - out_name = 'minified_' + out - out = os.path.join(cache, out_name) - - raw = [] - for file_path in files: - f = open(file_path, 'r').read() - - if file_type == 'script': - data = jsmin(f) - else: - data = cssmin(f) - data = data.replace('../images/', '../static/images/') - - raw.append({'file': file_path, 'date': int(os.path.getmtime(file_path)), 'data': data}) - - # Combine all files together with some comments - data = '' - for r in raw: - data += self.comment.get(file_type) % (r.get('file'), r.get('date')) - data += r.get('data') + '\n\n' - - self.createFile(out, data.strip()) - - if not self.minified.get(file_type): - self.minified[file_type] = {} - if not self.minified[file_type].get(position): - self.minified[file_type][position] = [] - - minified_url = 'api/%s/file.cache/%s?%s' % (Env.setting('api_key'), out_name, tryInt(os.path.getmtime(out))) - self.minified[file_type][position].append(minified_url) - - def getStyles(self, *args, **kwargs): - return self.get('style', *args, **kwargs) - - def getScripts(self, *args, **kwargs): - return self.get('script', *args, **kwargs) - - def get(self, type, as_html = False, location = 'head'): - - data = '' if as_html else [] - - try: - try: - if not Env.get('dev'): - return self.minified[type][location] - except: - pass - - return self.urls[type][location] - except: - log.error('Error getting minified %s, %s: %s', (type, location, traceback.format_exc())) - - return data - - def registerStyle(self, api_path, file_path, position = 'head'): - self.register(api_path, file_path, 'style', position) - - def registerScript(self, api_path, file_path, position = 'head'): - self.register(api_path, file_path, 'script', position) - - def register(self, api_path, file_path, type, location): - - if not self.urls[type].get(location): - self.urls[type][location] = [] - self.urls[type][location].append(api_path) - - if not self.paths[type].get(location): - self.paths[type][location] = [] - self.paths[type][location].append(file_path) diff --git a/couchpotato/core/_base/desktop.py b/couchpotato/core/_base/desktop.py new file mode 100644 index 0000000000..9a3656362f --- /dev/null +++ b/couchpotato/core/_base/desktop.py @@ -0,0 +1,39 @@ +from couchpotato.core.event import fireEvent, addEvent +from couchpotato.core.logger import CPLog +from couchpotato.core.plugins.base import Plugin +from couchpotato.environment import Env + +log = CPLog(__name__) + +autoload = 'Desktop' + + +if Env.get('desktop'): + + class Desktop(Plugin): + + def __init__(self): + + desktop = Env.get('desktop') + desktop.setSettings({ + 'base_url': fireEvent('app.base_url', single = True), + 'api_url': fireEvent('app.api_url', single = True), + 'api': Env.setting('api'), + }) + + # Events from desktop + desktop.addEvents({ + 'onClose': self.onClose, + }) + + # Events to desktop + addEvent('app.after_shutdown', desktop.afterShutdown) + addEvent('app.load', desktop.onAppLoad, priority = 110) + + def onClose(self, event): + return fireEvent('app.shutdown', single = True) + +else: + + class Desktop(Plugin): + pass diff --git a/couchpotato/core/_base/desktop/__init__.py b/couchpotato/core/_base/desktop/__init__.py deleted file mode 100644 index 064492f2e0..0000000000 --- a/couchpotato/core/_base/desktop/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -from .main import Desktop - -def start(): - return Desktop() - -config = [] diff --git a/couchpotato/core/_base/desktop/main.py b/couchpotato/core/_base/desktop/main.py deleted file mode 100644 index c3beff17e7..0000000000 --- a/couchpotato/core/_base/desktop/main.py +++ /dev/null @@ -1,36 +0,0 @@ -from couchpotato.core.event import fireEvent, addEvent -from couchpotato.core.logger import CPLog -from couchpotato.core.plugins.base import Plugin -from couchpotato.environment import Env - -log = CPLog(__name__) - -if Env.get('desktop'): - - class Desktop(Plugin): - - def __init__(self): - - desktop = Env.get('desktop') - desktop.setSettings({ - 'base_url': fireEvent('app.base_url', single = True), - 'api_url': fireEvent('app.api_url', single = True), - 'api': Env.setting('api'), - }) - - # Events from desktop - desktop.addEvents({ - 'onClose': self.onClose, - }) - - # Events to desktop - addEvent('app.after_shutdown', desktop.afterShutdown) - addEvent('app.load', desktop.onAppLoad, priority = 110) - - def onClose(self, event): - return fireEvent('app.shutdown', single = True) - -else: - - class Desktop(Plugin): - pass diff --git a/couchpotato/core/_base/downloader/__init__.py b/couchpotato/core/_base/downloader/__init__.py new file mode 100644 index 0000000000..0b9201bdbf --- /dev/null +++ b/couchpotato/core/_base/downloader/__init__.py @@ -0,0 +1,20 @@ +from .main import Downloader + + +def autoload(): + return Downloader() + + +config = [{ + 'name': 'download_providers', + 'groups': [ + { + 'label': 'Downloaders', + 'description': 'You can select different downloaders for each type (usenet / torrent)', + 'type': 'list', + 'name': 'download_providers', + 'tab': 'downloaders', + 'options': [], + }, + ], +}] diff --git a/couchpotato/core/_base/downloader/main.py b/couchpotato/core/_base/downloader/main.py new file mode 100644 index 0000000000..70126a883c --- /dev/null +++ b/couchpotato/core/_base/downloader/main.py @@ -0,0 +1,230 @@ +from base64 import b32decode, b16encode +import random +import re + +from couchpotato.api import addApiView +from couchpotato.core.event import addEvent +from couchpotato.core.helpers.variable import mergeDicts +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.base import Provider +from couchpotato.core.plugins.base import Plugin + + +log = CPLog(__name__) + + +## This is here to load the static files +class Downloader(Plugin): + pass + + +class DownloaderBase(Provider): + + protocol = [] + http_time_between_calls = 0 + status_support = True + + torrent_sources = [ + 'https://torcache.net/torrent/%s.torrent', + ] + + torrent_trackers = [ + 'udp://tracker.istole.it:80/announce', + 'http://tracker.istole.it/announce', + 'udp://fr33domtracker.h33t.com:3310/announce', + 'http://tracker.publicbt.com/announce', + 'udp://tracker.publicbt.com:80/announce', + 'http://tracker.ccc.de/announce', + 'udp://tracker.ccc.de:80/announce', + 'http://exodus.desync.com/announce', + 'http://exodus.desync.com:6969/announce', + 'http://tracker.publichd.eu/announce', + 'udp://tracker.publichd.eu:80/announce', + 'http://tracker.openbittorrent.com/announce', + 'udp://tracker.openbittorrent.com/announce', + 'udp://tracker.openbittorrent.com:80/announce', + 'udp://open.demonii.com:1337/announce', + ] + + def __init__(self): + addEvent('download', self._download) + addEvent('download.enabled', self._isEnabled) + addEvent('download.enabled_protocols', self.getEnabledProtocol) + addEvent('download.status', self._getAllDownloadStatus) + addEvent('download.remove_failed', self._removeFailed) + addEvent('download.pause', self._pause) + addEvent('download.process_complete', self._processComplete) + addApiView('download.%s.test' % self.getName().lower(), self._test) + + def getEnabledProtocol(self): + for download_protocol in self.protocol: + if self.isEnabled(manual = True, data = {'protocol': download_protocol}): + return self.protocol + + return [] + + def _download(self, data = None, media = None, manual = False, filedata = None): + if not media: media = {} + if not data: data = {} + + if self.isDisabled(manual, data): + return + return self.download(data = data, media = media, filedata = filedata) + + def download(self, *args, **kwargs): + return False + + def _getAllDownloadStatus(self, download_ids): + if self.isDisabled(manual = True, data = {}): + return + + ids = [download_id['id'] for download_id in download_ids if download_id['downloader'] == self.getName()] + + if ids: + return self.getAllDownloadStatus(ids) + else: + return + + def getAllDownloadStatus(self, ids): + return [] + + def _removeFailed(self, release_download): + if self.isDisabled(manual = True, data = {}): + return + + if release_download and release_download.get('downloader') == self.getName(): + if self.conf('delete_failed'): + return self.removeFailed(release_download) + + return False + return + + def removeFailed(self, release_download): + return + + def _processComplete(self, release_download): + if self.isDisabled(manual = True, data = {}): + return + + if release_download and release_download.get('downloader') == self.getName(): + if self.conf('remove_complete', default = False): + return self.processComplete(release_download = release_download, delete_files = self.conf('delete_files', default = False)) + + return False + return + + def processComplete(self, release_download, delete_files): + return + + def isCorrectProtocol(self, protocol): + is_correct = protocol in self.protocol + + if not is_correct: + log.debug("Downloader doesn't support this protocol") + + return is_correct + + def magnetToTorrent(self, magnet_link): + torrent_hash = re.findall('urn:btih:([\w]{32,40})', magnet_link)[0].upper() + + # Convert base 32 to hex + if len(torrent_hash) == 32: + torrent_hash = b16encode(b32decode(torrent_hash)) + + sources = self.torrent_sources + random.shuffle(sources) + + for source in sources: + try: + filedata = self.urlopen(source % torrent_hash, headers = {'Referer': source % torrent_hash}, show_error = False) + if 'torcache' in filedata and 'file not found' in filedata.lower(): + continue + + return filedata + except: + log.debug('Torrent hash "%s" wasn\'t found on: %s', (torrent_hash, source)) + + log.error('Failed converting magnet url to torrent: %s', torrent_hash) + return False + + def downloadReturnId(self, download_id): + return { + 'downloader': self.getName(), + 'status_support': self.status_support, + 'id': download_id + } + + def isDisabled(self, manual = False, data = None): + if not data: data = {} + + return not self.isEnabled(manual, data) + + def _isEnabled(self, manual, data = None): + if not data: data = {} + + if not self.isEnabled(manual, data): + return + return True + + def isEnabled(self, manual = False, data = None): + if not data: data = {} + + d_manual = self.conf('manual', default = False) + return super(DownloaderBase, self).isEnabled() and \ + (d_manual and manual or d_manual is False) and \ + (not data or self.isCorrectProtocol(data.get('protocol'))) + + def _test(self, **kwargs): + t = self.test() + if isinstance(t, tuple): + return {'success': t[0], 'msg': t[1]} + return {'success': t} + + def test(self): + return False + + def _pause(self, release_download, pause = True): + if self.isDisabled(manual = True, data = {}): + return + + if release_download and release_download.get('downloader') == self.getName(): + self.pause(release_download, pause) + return True + + return False + + def pause(self, release_download, pause): + return + + +class ReleaseDownloadList(list): + + provider = None + + def __init__(self, provider, **kwargs): + + self.provider = provider + self.kwargs = kwargs + + super(ReleaseDownloadList, self).__init__() + + def extend(self, results): + for r in results: + self.append(r) + + def append(self, result): + new_result = self.fillResult(result) + super(ReleaseDownloadList, self).append(new_result) + + def fillResult(self, result): + + defaults = { + 'id': 0, + 'status': 'busy', + 'downloader': self.provider.getName(), + 'folder': '', + 'files': [], + } + + return mergeDicts(defaults, result) + diff --git a/couchpotato/core/_base/downloader/static/downloaders.js b/couchpotato/core/_base/downloader/static/downloaders.js new file mode 100644 index 0000000000..fd6185dfbd --- /dev/null +++ b/couchpotato/core/_base/downloader/static/downloaders.js @@ -0,0 +1,76 @@ +var DownloadersBase = new Class({ + + Implements: [Events], + + initialize: function(){ + var self = this; + + // Add test buttons to settings page + App.addEvent('loadSettings', self.addTestButtons.bind(self)); + + }, + + // Downloaders setting tests + addTestButtons: function(){ + var self = this; + + var setting_page = App.getPage('Settings'); + setting_page.addEvent('create', function(){ + Object.each(setting_page.tabs.downloaders.groups, self.addTestButton.bind(self)); + }); + + }, + + addTestButton: function(fieldset, plugin_name){ + var self = this, + button_name = self.testButtonName(fieldset); + + if(button_name.contains('Downloaders')) return; + + new Element('.ctrlHolder.test_button').grab( + new Element('a.button', { + 'text': button_name, + 'events': { + 'click': function(){ + var button = fieldset.getElement('.test_button .button'); + button.set('text', 'Connecting...'); + + Api.request('download.'+plugin_name+'.test', { + 'onComplete': function(json){ + + button.set('text', button_name); + + var message; + if(json.success){ + message = new Element('span.success', { + 'text': 'Connection successful' + }).inject(button, 'after'); + } + else { + var msg_text = 'Connection failed. Check logs for details.'; + if(json.hasOwnProperty('msg')) msg_text = json.msg; + message = new Element('span.failed', { + 'text': msg_text + }).inject(button, 'after'); + } + + requestTimeout(function(){ + message.destroy(); + }, 3000); + } + }); + } + } + }) + ).inject(fieldset); + + }, + + testButtonName: function(fieldset){ + var name = fieldset.getElement('h2 .group_label').get('text'); + return 'Test '+name; + } + +}); + +var Downloaders = new DownloadersBase(); diff --git a/couchpotato/core/_base/scheduler.py b/couchpotato/core/_base/scheduler.py new file mode 100644 index 0000000000..271a2d8187 --- /dev/null +++ b/couchpotato/core/_base/scheduler.py @@ -0,0 +1,82 @@ +from apscheduler.scheduler import Scheduler as Sched +from couchpotato.core.event import addEvent +from couchpotato.core.logger import CPLog +from couchpotato.core.plugins.base import Plugin + +log = CPLog(__name__) + +autoload = 'Scheduler' + + +class Scheduler(Plugin): + + crons = {} + intervals = {} + started = False + + def __init__(self): + + addEvent('schedule.cron', self.cron) + addEvent('schedule.interval', self.interval) + addEvent('schedule.remove', self.remove) + addEvent('schedule.queue', self.queue) + + self.sched = Sched(misfire_grace_time = 60) + self.sched.start() + self.started = True + + def remove(self, identifier): + for cron_type in ['intervals', 'crons']: + try: + self.sched.unschedule_job(getattr(self, cron_type)[identifier]['job']) + log.debug('%s unscheduled %s', (cron_type.capitalize(), identifier)) + except: + pass + + def doShutdown(self, *args, **kwargs): + self.stop() + return super(Scheduler, self).doShutdown(*args, **kwargs) + + def stop(self): + if self.started: + log.debug('Stopping scheduler') + self.sched.shutdown(wait = False) + log.debug('Scheduler stopped') + self.started = False + + def cron(self, identifier = '', handle = None, day = '*', hour = '*', minute = '*'): + log.info('Scheduling "%s", cron: day = %s, hour = %s, minute = %s', (identifier, day, hour, minute)) + + self.remove(identifier) + self.crons[identifier] = { + 'handle': handle, + 'day': day, + 'hour': hour, + 'minute': minute, + 'job': self.sched.add_cron_job(handle, day = day, hour = hour, minute = minute) + } + + def interval(self, identifier = '', handle = None, hours = 0, minutes = 0, seconds = 0): + log.info('Scheduling %s, interval: hours = %s, minutes = %s, seconds = %s', (identifier, hours, minutes, seconds)) + + self.remove(identifier) + self.intervals[identifier] = { + 'handle': handle, + 'hours': hours, + 'minutes': minutes, + 'seconds': seconds, + 'job': self.sched.add_interval_job(handle, hours = hours, minutes = minutes, seconds = seconds) + } + + return True + + def queue(self, handlers = None): + if not handlers: handlers = [] + + for h in handlers: + h() + + if self.shuttingDown(): + break + + return True diff --git a/couchpotato/core/_base/scheduler/__init__.py b/couchpotato/core/_base/scheduler/__init__.py deleted file mode 100644 index aa1c5c90b3..0000000000 --- a/couchpotato/core/_base/scheduler/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -from .main import Scheduler - -def start(): - return Scheduler() - -config = [] diff --git a/couchpotato/core/_base/scheduler/main.py b/couchpotato/core/_base/scheduler/main.py deleted file mode 100644 index 4102552e4f..0000000000 --- a/couchpotato/core/_base/scheduler/main.py +++ /dev/null @@ -1,96 +0,0 @@ -from apscheduler.scheduler import Scheduler as Sched -from couchpotato.core.event import addEvent -from couchpotato.core.logger import CPLog -from couchpotato.core.plugins.base import Plugin - -log = CPLog(__name__) - - -class Scheduler(Plugin): - - crons = {} - intervals = {} - started = False - - def __init__(self): - - addEvent('schedule.cron', self.cron) - addEvent('schedule.interval', self.interval) - addEvent('schedule.start', self.start) - addEvent('schedule.restart', self.start) - - addEvent('app.load', self.start) - - self.sched = Sched(misfire_grace_time = 60) - - def remove(self, identifier): - for type in ['interval', 'cron']: - try: - self.sched.unschedule_job(getattr(self, type)[identifier]['job']) - log.debug('%s unscheduled %s', (type.capitalize(), identifier)) - except: - pass - - def start(self): - - # Stop all running - self.stop() - - # Crons - for identifier in self.crons: - try: - self.remove(identifier) - cron = self.crons[identifier] - job = self.sched.add_cron_job(cron['handle'], day = cron['day'], hour = cron['hour'], minute = cron['minute']) - cron['job'] = job - except ValueError, e: - log.error('Failed adding cronjob: %s', e) - - # Intervals - for identifier in self.intervals: - try: - self.remove(identifier) - interval = self.intervals[identifier] - job = self.sched.add_interval_job(interval['handle'], hours = interval['hours'], minutes = interval['minutes'], seconds = interval['seconds']) - interval['job'] = job - except ValueError, e: - log.error('Failed adding interval cronjob: %s', e) - - # Start it - log.debug('Starting scheduler') - self.sched.start() - self.started = True - log.debug('Scheduler started') - - def doShutdown(self): - super(Scheduler, self).doShutdown() - self.stop() - - def stop(self): - if self.started: - log.debug('Stopping scheduler') - self.sched.shutdown() - log.debug('Scheduler stopped') - self.started = False - - def cron(self, identifier = '', handle = None, day = '*', hour = '*', minute = '*'): - log.info('Scheduling "%s", cron: day = %s, hour = %s, minute = %s', (identifier, day, hour, minute)) - - self.remove(identifier) - self.crons[identifier] = { - 'handle': handle, - 'day': day, - 'hour': hour, - 'minute': minute, - } - - def interval(self, identifier = '', handle = None, hours = 0, minutes = 0, seconds = 0): - log.info('Scheduling %s, interval: hours = %s, minutes = %s, seconds = %s', (identifier, hours, minutes, seconds)) - - self.remove(identifier) - self.intervals[identifier] = { - 'handle': handle, - 'hours': hours, - 'minutes': minutes, - 'seconds': seconds, - } diff --git a/couchpotato/core/_base/updater/__init__.py b/couchpotato/core/_base/updater/__init__.py index a304f9e782..16e08b8193 100644 --- a/couchpotato/core/_base/updater/__init__.py +++ b/couchpotato/core/_base/updater/__init__.py @@ -1,8 +1,10 @@ +import os + from .main import Updater from couchpotato.environment import Env -import os -def start(): + +def autoload(): return Updater() config = [{ diff --git a/couchpotato/core/_base/updater/main.py b/couchpotato/core/_base/updater/main.py index 18d2c3034a..89788836eb 100644 --- a/couchpotato/core/_base/updater/main.py +++ b/couchpotato/core/_base/updater/main.py @@ -1,20 +1,26 @@ +О╩©import json +import os +import shutil +import tarfile +import time +import traceback +import zipfile +from datetime import datetime +from threading import RLock +import re + from couchpotato.api import addApiView from couchpotato.core.event import addEvent, fireEvent, fireEventAsync -from couchpotato.core.helpers.encoding import ss -from couchpotato.core.helpers.request import jsonified +from couchpotato.core.helpers.encoding import sp +from couchpotato.core.helpers.variable import removePyc, tryInt from couchpotato.core.logger import CPLog from couchpotato.core.plugins.base import Plugin from couchpotato.environment import Env -from datetime import datetime from dateutil.parser import parse from git.repository import LocalRepository -import json -import os -import shutil -import tarfile -import time -import traceback import version +from six.moves import filter + log = CPLog(__name__) @@ -22,21 +28,26 @@ class Updater(Plugin): available_notified = False + _lock = RLock() + last_check = 'updater.last_checked' def __init__(self): if Env.get('desktop'): self.updater = DesktopUpdater() elif os.path.isdir(os.path.join(Env.get('app_dir'), '.git')): - self.updater = GitUpdater(self.conf('git_command', default = 'git')) + git_default = 'git' + git_command = self.conf('git_command', default = git_default) + git_command = git_command if git_command != git_default and (os.path.isfile(git_command) or re.match('^[a-zA-Z0-9_/\.\-]+$', git_command)) else git_default + self.updater = GitUpdater(git_command) else: self.updater = SourceUpdater() - fireEvent('schedule.interval', 'updater.check', self.autoUpdate, hours = 6) - addEvent('app.load', self.autoUpdate) + addEvent('app.load', self.logVersion, priority = 10000) + addEvent('app.load', self.setCrons) addEvent('updater.info', self.info) - addApiView('updater.info', self.getInfo, docs = { + addApiView('updater.info', self.info, docs = { 'desc': 'Get updater information', 'return': { 'type': 'object', @@ -52,8 +63,34 @@ def __init__(self): 'return': {'type': 'see updater.info'} }) + addEvent('setting.save.updater.enabled.after', self.setCrons) + + def logVersion(self): + info = self.info() + log.info('=== VERSION %s, using %s ===', (info.get('version', {}).get('repr', 'UNKNOWN'), self.updater.getName())) + + def setCrons(self): + + fireEvent('schedule.remove', 'updater.check', single = True) + if self.isEnabled(): + fireEvent('schedule.interval', 'updater.check', self.autoUpdate, hours = 24) + self.autoUpdate() # Check after enabling + def autoUpdate(self): - if self.check() and self.conf('automatic') and not self.updater.update_failed: + do_check = True + + try: + last_check = tryInt(Env.prop(self.last_check, default = 0)) + now = tryInt(time.time()) + do_check = last_check < now - 43200 + + if do_check: + Env.prop(self.last_check, value = now) + except: + log.error('Failed checking last time to update: %s', traceback.format_exc()) + + if do_check and self.isEnabled() and self.check() and self.conf('automatic') and not self.updater.update_failed: + if self.updater.doUpdate(): # Notify before restarting @@ -61,7 +98,7 @@ def autoUpdate(self): if self.conf('notification'): info = self.updater.info() version_date = datetime.fromtimestamp(info['update_version']['date']) - fireEvent('updater.updated', 'Updated to a new version with hash "%s", this version is from %s' % (info['update_version']['hash'], version_date), data = info) + fireEvent('updater.updated', 'CouchPotato: Updated to a new version with hash "%s", this version is from %s' % (info['update_version']['hash'], version_date), data = info) except: log.error('Failed notifying for update: %s', traceback.format_exc()) @@ -71,31 +108,40 @@ def autoUpdate(self): return False - def check(self): - if self.isDisabled(): + def check(self, force = False): + if not force and self.isDisabled(): return if self.updater.check(): if not self.available_notified and self.conf('notification') and not self.conf('automatic'): - fireEvent('updater.available', message = 'A new update is available', data = self.updater.info()) + info = self.updater.info() + version_date = datetime.fromtimestamp(info['update_version']['date']) + fireEvent('updater.available', message = 'A new update with hash "%s" is available, this version is from %s' % (info['update_version']['hash'], version_date), data = info) self.available_notified = True return True return False - def info(self): - return self.updater.info() + def info(self, **kwargs): + self._lock.acquire() + + info = {} + try: + info = self.updater.info() + except: + log.error('Failed getting updater info: %s', traceback.format_exc()) + + self._lock.release() - def getInfo(self): - return jsonified(self.updater.info()) + return info - def checkView(self): - return jsonified({ - 'update_available': self.check(), + def checkView(self, **kwargs): + return { + 'update_available': self.check(force = True), 'info': self.updater.info() - }) + } - def doUpdateView(self): + def doUpdateView(self, **kwargs): self.check() if not self.updater.update_version: @@ -110,14 +156,20 @@ def doUpdateView(self): if not success: success = True - return jsonified({ + return { 'success': success - }) + } + + def doShutdown(self, *args, **kwargs): + if not Env.get('dev') and not Env.get('desktop'): + removePyc(Env.get('app_dir'), show_logs = False) + + return super(Updater, self).doShutdown(*args, **kwargs) class BaseUpdater(Plugin): - repo_user = 'RuudBurger' + repo_user = 'cyberden' repo_name = 'CouchPotatoServer' branch = version.BRANCH @@ -129,64 +181,46 @@ class BaseUpdater(Plugin): def doUpdate(self): pass - def getInfo(self): - return jsonified(self.info()) - def info(self): + + current_version = self.getVersion() + return { 'last_check': self.last_check, 'update_version': self.update_version, - 'version': self.getVersion(), + 'version': current_version, 'repo_name': '%s/%s' % (self.repo_user, self.repo_name), - 'branch': self.branch, + 'branch': current_version.get('branch', self.branch), } - def check(self): + def getVersion(self): pass - def deletePyc(self, only_excess = True): - - for root, dirs, files in os.walk(ss(Env.get('app_dir'))): - - pyc_files = filter(lambda filename: filename.endswith('.pyc'), files) - py_files = set(filter(lambda filename: filename.endswith('.py'), files)) - excess_pyc_files = filter(lambda pyc_filename: pyc_filename[:-1] not in py_files, pyc_files) if only_excess else pyc_files - - for excess_pyc_file in excess_pyc_files: - full_path = os.path.join(root, excess_pyc_file) - log.debug('Removing old PYC file: %s', full_path) - try: - os.remove(full_path) - except: - log.error('Couldn\'t remove %s: %s', (full_path, traceback.format_exc())) - - for dir_name in dirs: - full_path = os.path.join(root, dir_name) - if len(os.listdir(full_path)) == 0: - try: - os.rmdir(full_path) - except: - log.error('Couldn\'t remove empty directory %s: %s', (full_path, traceback.format_exc())) - + def check(self): + pass class GitUpdater(BaseUpdater): + old_repo = 'cyberden/CouchPotatoServer' + new_repo = 'cyberden/CouchPotatoServer' + def __init__(self, git_command): self.repo = LocalRepository(Env.get('app_dir'), command = git_command) + remote_name = 'origin' + remote = self.repo.getRemoteByName(remote_name) + if self.old_repo in remote.url: + log.info('Changing repo to new github organization: %s -> %s', (self.old_repo, self.new_repo)) + new_url = remote.url.replace(self.old_repo, self.new_repo) + self.repo._executeGitCommandAssertSuccess("remote set-url %s %s" % (remote_name, new_url)) + def doUpdate(self): try: - log.debug('Stashing local changes') - self.repo.saveStash() - log.info('Updating to latest version') self.repo.pull() - # Delete leftover .pyc files - self.deletePyc() - return True except: log.error('Failed updating via GIT: %s', traceback.format_exc()) @@ -198,17 +232,28 @@ def doUpdate(self): def getVersion(self): if not self.version: + + hash = None + date = None + branch = self.branch + try: - output = self.repo.getHead() # Yes, please + output = self.repo.getHead() # Yes, please log.debug('Git version output: %s', output.hash) - self.version = { - 'hash': output.hash[:8], - 'date': output.getDate(), - 'type': 'git', - } - except Exception, e: + + hash = output.hash[:8] + date = output.getDate() + branch = self.repo.getCurrentBranch().name + except Exception as e: log.error('Failed using GIT updater, running from source, you need to have GIT installed. %s', e) - return 'No GIT' + + self.version = { + 'repr': 'git:(%s:%s % s) %s (%s)' % (self.repo_user, self.repo_name, branch, hash or 'unknown_hash', datetime.fromtimestamp(date) if date else 'unknown_date'), + 'hash': hash, + 'date': date, + 'type': 'git', + 'branch': branch + } return self.version @@ -229,7 +274,7 @@ def check(self): local = self.repo.getHead() remote = branch.getHead() - log.info('Versions, local:%s, remote:%s', (local.hash[:8], remote.hash[:8])) + log.debug('Versions, local:%s, remote:%s', (local.hash[:8], remote.hash[:8])) if local.getDate() < remote.getDate(): self.update_version = { @@ -242,7 +287,6 @@ def check(self): return False - class SourceUpdater(BaseUpdater): def __init__(self): @@ -255,11 +299,11 @@ def __init__(self): def doUpdate(self): try: - url = 'https://github.com/%s/%s/tarball/%s' % (self.repo_user, self.repo_name, self.branch) - destination = os.path.join(Env.get('cache_dir'), self.update_version.get('hash') + '.tar.gz') - extracted_path = os.path.join(Env.get('cache_dir'), 'temp_updater') + download_data = fireEvent('cp.source_url', repo = self.repo_user, repo_name = self.repo_name, branch = self.branch, single = True) + destination = os.path.join(Env.get('cache_dir'), self.update_version.get('hash')) + '.' + download_data.get('type') - destination = fireEvent('file.download', url = url, dest = destination, single = True) + extracted_path = os.path.join(Env.get('cache_dir'), 'temp_updater') + destination = fireEvent('file.download', url = download_data.get('url'), dest = destination, single = True) # Cleanup leftover from last time if os.path.isdir(extracted_path): @@ -267,9 +311,15 @@ def doUpdate(self): self.makeDir(extracted_path) # Extract - tar = tarfile.open(destination) - tar.extractall(path = extracted_path) - tar.close() + if download_data.get('type') == 'zip': + zip_file = zipfile.ZipFile(destination) + zip_file.extractall(extracted_path) + zip_file.close() + else: + tar = tarfile.open(destination) + tar.extractall(path = extracted_path) + tar.close() + os.remove(destination) if self.replaceWith(os.path.join(extracted_path, os.listdir(extracted_path)[0])): @@ -286,10 +336,12 @@ def doUpdate(self): return False def replaceWith(self, path): - app_dir = ss(Env.get('app_dir')) + path = sp(path) + app_dir = Env.get('app_dir') + data_dir = Env.get('data_dir') # Get list of files we want to overwrite - self.deletePyc() + removePyc(app_dir) existing_files = [] for root, subfiles, filenames in os.walk(app_dir): for filename in filenames: @@ -318,22 +370,24 @@ def replaceWith(self, path): log.error('Failed overwriting file "%s": %s', (tofile, traceback.format_exc())) return False - if Env.get('app_dir') not in Env.get('data_dir'): - for still_exists in existing_files: - try: - os.remove(still_exists) - except: - log.error('Failed removing non-used file: %s', traceback.format_exc()) + for still_exists in existing_files: - return True + if data_dir in still_exists: + continue + + try: + os.remove(still_exists) + except: + log.error('Failed removing non-used file: %s', traceback.format_exc()) + return True def removeDir(self, path): try: if os.path.isdir(path): shutil.rmtree(path) - except OSError, inst: - os.chmod(inst.filename, 0777) + except OSError as inst: + os.chmod(inst.filename, 0o777) self.removeDir(path) def getVersion(self): @@ -347,7 +401,8 @@ def getVersion(self): log.debug('Source version output: %s', output) self.version = output self.version['type'] = 'source' - except Exception, e: + self.version['repr'] = 'source:(%s:%s % s) %s (%s)' % (self.repo_user, self.repo_name, self.branch, output.get('hash', '')[:8], datetime.fromtimestamp(output.get('date', 0))) + except Exception as e: log.error('Failed using source updater. %s', e) return {} @@ -377,7 +432,7 @@ def latestCommit(self): return { 'hash': commit['sha'], - 'date': int(time.mktime(parse(commit['commit']['committer']['date']).timetuple())), + 'date': int(time.mktime(parse(commit['commit']['committer']['date']).timetuple())), } except: log.error('Failed getting latest request from github: %s', traceback.format_exc()) @@ -422,7 +477,7 @@ def check(self): if latest and latest != current_version.get('hash'): self.update_version = { 'hash': latest, - 'date': None, + 'date': None, 'changelog': self.desktop._changelogURL, } @@ -434,6 +489,7 @@ def check(self): def getVersion(self): return { + 'repr': 'desktop: %s' % self.desktop._esky.active_version, 'hash': self.desktop._esky.active_version, 'date': None, 'type': 'desktop', diff --git a/couchpotato/core/_base/updater/static/updater.js b/couchpotato/core/_base/updater/static/updater.js index cc17be5579..158c8d9c4a 100644 --- a/couchpotato/core/_base/updater/static/updater.js +++ b/couchpotato/core/_base/updater/static/updater.js @@ -5,10 +5,10 @@ var UpdaterBase = new Class({ initialize: function(){ var self = this; - App.addEvent('load', self.info.bind(self, 1000)) + App.addEvent('load', self.info.bind(self, 2000)); App.addEvent('unload', function(){ if(self.timer) - clearTimeout(self.timer); + clearRequestTimeout(self.timer); }); }, @@ -24,19 +24,19 @@ var UpdaterBase = new Class({ self.doUpdate(); else { App.unBlockPage(); - App.fireEvent('message', 'No updates available'); + App.trigger('message', ['No updates available']); } } - }) + }); }, info: function(timeout){ var self = this; - if(self.timer) clearTimeout(self.timer); + if(self.timer) clearRequestTimeout(self.timer); - self.timer = setTimeout(function(){ + self.timer = requestTimeout(function(){ Api.request('updater.info', { 'onComplete': function(json){ self.json = json; @@ -50,8 +50,8 @@ var UpdaterBase = new Class({ self.message.destroy(); } } - }) - }, (timeout || 0)) + }); + }, (timeout || 0)); }, @@ -66,7 +66,7 @@ var UpdaterBase = new Class({ var changelog = 'https://github.com/'+data.repo_name+'/compare/'+data.version.hash+'...'+data.branch; if(data.update_version.changelog) - changelog = data.update_version.changelog + '#' + data.version.hash+'...'+data.update_version.hash + changelog = data.update_version.changelog + '#' + data.version.hash+'...'+data.update_version.hash; self.message = new Element('div.message.update').adopt( new Element('span', { @@ -84,7 +84,7 @@ var UpdaterBase = new Class({ 'click': self.doUpdate.bind(self) } }) - ).inject($(document.body).getElement('.header')) + ).inject(App.getBlock('footer')); }, doUpdate: function(){ @@ -96,15 +96,17 @@ var UpdaterBase = new Class({ if(json.success) self.updating(); else - App.unBlockPage() + App.unBlockPage(); } }); }, updating: function(){ - App.checkAvailable.delay(500, App, [1000, function(){ - window.location.reload(); - }]); + requestTimeout(function(){ + App.checkAvailable(1000, function(){ + window.location.reload(); + }); + }, 500); if(self.message) self.message.destroy(); } diff --git a/couchpotato/core/_base/updater/static/updater.scss b/couchpotato/core/_base/updater/static/updater.scss new file mode 100644 index 0000000000..b83c1bb337 --- /dev/null +++ b/couchpotato/core/_base/updater/static/updater.scss @@ -0,0 +1,17 @@ +@import "_mixins"; + +.update.message { + @include theme(background, background); + padding: $padding; + text-align: center; + font-size: 1.25em; + + @include media-tablet { + font-size: 1em; + } + + a { + @include theme(color, primary); + padding: $padding/4; + } +} diff --git a/couchpotato/core/auth.py b/couchpotato/core/auth.py deleted file mode 100644 index 0111b9abdf..0000000000 --- a/couchpotato/core/auth.py +++ /dev/null @@ -1,26 +0,0 @@ -from couchpotato.core.helpers.variable import md5 -from couchpotato.environment import Env -from flask import request, Response -from functools import wraps - -def check_auth(username, password): - return username == Env.setting('username') and password == Env.setting('password') - -def authenticate(): - return Response( - 'This is not the page you are looking for. *waves hand*', 401, - {'WWW-Authenticate': 'Basic realm="CouchPotato Login"'} - ) - -def requires_auth(f): - - @wraps(f) - def decorated(*args, **kwargs): - auth = getattr(request, 'authorization') - if Env.setting('username') and Env.setting('password'): - if (not auth or not check_auth(auth.username.decode('latin1'), md5(auth.password.decode('latin1').encode(Env.get('encoding'))))): - return authenticate() - - return f(*args, **kwargs) - - return decorated diff --git a/couchpotato/core/database.py b/couchpotato/core/database.py new file mode 100644 index 0000000000..bed427e979 --- /dev/null +++ b/couchpotato/core/database.py @@ -0,0 +1,639 @@ +import json +import os +import time +import traceback +from sqlite3 import OperationalError + +from CodernityDB.database import RecordNotFound +from CodernityDB.index import IndexException, IndexNotFoundException, IndexConflict +from couchpotato import CPLog +from couchpotato.api import addApiView +from couchpotato.core.event import addEvent, fireEvent, fireEventAsync +from couchpotato.core.helpers.encoding import toUnicode, sp +from couchpotato.core.helpers.variable import getImdb, tryInt, randomString + + +log = CPLog(__name__) + + +class Database(object): + + indexes = None + db = None + + def __init__(self): + + self.indexes = {} + + addApiView('database.list_documents', self.listDocuments) + addApiView('database.reindex', self.reindex) + addApiView('database.compact', self.compact) + addApiView('database.document.update', self.updateDocument) + addApiView('database.document.delete', self.deleteDocument) + + addEvent('database.setup.after', self.startup_compact) + addEvent('database.setup_index', self.setupIndex) + addEvent('database.delete_corrupted', self.deleteCorrupted) + + addEvent('app.migrate', self.migrate) + addEvent('app.after_shutdown', self.close) + + def getDB(self): + + if not self.db: + from couchpotato import get_db + self.db = get_db() + + return self.db + + def close(self, **kwargs): + self.getDB().close() + + def setupIndex(self, index_name, klass): + + self.indexes[index_name] = klass + + db = self.getDB() + + # Category index + index_instance = klass(db.path, index_name) + try: + + # Make sure store and bucket don't exist + exists = [] + for x in ['buck', 'stor']: + full_path = os.path.join(db.path, '%s_%s' % (index_name, x)) + if os.path.exists(full_path): + exists.append(full_path) + + if index_name not in db.indexes_names: + + # Remove existing buckets if index isn't there + for x in exists: + os.unlink(x) + + # Add index (will restore buckets) + db.add_index(index_instance) + db.reindex_index(index_name) + else: + # Previous info + previous = db.indexes_names[index_name] + previous_version = previous._version + current_version = klass._version + + # Only edit index if versions are different + if previous_version < current_version: + log.debug('Index "%s" already exists, updating and reindexing', index_name) + db.destroy_index(previous) + db.add_index(index_instance) + db.reindex_index(index_name) + + except: + log.error('Failed adding index %s: %s', (index_name, traceback.format_exc())) + + def deleteDocument(self, **kwargs): + + db = self.getDB() + + try: + + document_id = kwargs.get('_request').get_argument('id') + document = db.get('id', document_id) + db.delete(document) + + return { + 'success': True + } + except: + return { + 'success': False, + 'error': traceback.format_exc() + } + + def updateDocument(self, **kwargs): + + db = self.getDB() + + try: + + document = json.loads(kwargs.get('_request').get_argument('document')) + d = db.update(document) + document.update(d) + + return { + 'success': True, + 'document': document + } + except: + return { + 'success': False, + 'error': traceback.format_exc() + } + + def listDocuments(self, **kwargs): + db = self.getDB() + + results = { + 'unknown': [] + } + + for document in db.all('id'): + key = document.get('_t', 'unknown') + + if kwargs.get('show') and key != kwargs.get('show'): + continue + + if not results.get(key): + results[key] = [] + results[key].append(document) + + return results + + def deleteCorrupted(self, _id, traceback_error = ''): + + db = self.getDB() + + try: + log.debug('Deleted corrupted document "%s": %s', (_id, traceback_error)) + corrupted = db.get('id', _id, with_storage = False) + db._delete_id_index(corrupted.get('_id'), corrupted.get('_rev'), None) + except: + log.debug('Failed deleting corrupted: %s', traceback.format_exc()) + + def reindex(self, **kwargs): + + success = True + try: + db = self.getDB() + db.reindex() + except: + log.error('Failed index: %s', traceback.format_exc()) + success = False + + return { + 'success': success + } + + def compact(self, try_repair = True, **kwargs): + + success = False + db = self.getDB() + + # Removing left over compact files + db_path = sp(db.path) + for f in os.listdir(sp(db.path)): + for x in ['_compact_buck', '_compact_stor']: + if f[-len(x):] == x: + os.unlink(os.path.join(db_path, f)) + + try: + start = time.time() + size = float(db.get_db_details().get('size', 0)) + log.debug('Compacting database, current size: %sMB', round(size/1048576, 2)) + + db.compact() + new_size = float(db.get_db_details().get('size', 0)) + log.debug('Done compacting database in %ss, new size: %sMB, saved: %sMB', (round(time.time()-start, 2), round(new_size/1048576, 2), round((size-new_size)/1048576, 2))) + success = True + except (IndexException, AttributeError): + if try_repair: + log.error('Something wrong with indexes, trying repair') + + # Remove all indexes + old_indexes = self.indexes.keys() + for index_name in old_indexes: + try: + db.destroy_index(index_name) + except IndexNotFoundException: + pass + except: + log.error('Failed removing old index %s', index_name) + + # Add them again + for index_name in self.indexes: + klass = self.indexes[index_name] + + # Category index + index_instance = klass(db.path, index_name) + try: + db.add_index(index_instance) + db.reindex_index(index_name) + except IndexConflict: + pass + except: + log.error('Failed adding index %s', index_name) + raise + + self.compact(try_repair = False) + else: + log.error('Failed compact: %s', traceback.format_exc()) + + except: + log.error('Failed compact: %s', traceback.format_exc()) + + return { + 'success': success + } + + # Compact on start + def startup_compact(self): + from couchpotato import Env + + db = self.getDB() + + # Try fix for migration failures on desktop + if Env.get('desktop'): + try: + list(db.all('profile', with_doc = True)) + except RecordNotFound: + + failed_location = '%s_failed' % db.path + old_db = os.path.join(Env.get('data_dir'), 'couchpotato.db.old') + + if not os.path.isdir(failed_location) and os.path.isfile(old_db): + log.error('Corrupt database, trying migrate again') + db.close() + + # Rename database folder + os.rename(db.path, '%s_failed' % db.path) + + # Rename .old database to try another migrate + os.rename(old_db, old_db[:-4]) + + fireEventAsync('app.restart') + else: + log.error('Migration failed and couldn\'t recover database. Please report on GitHub, with this message.') + db.reindex() + + return + + # Check size and compact if needed + size = db.get_db_details().get('size') + prop_name = 'last_db_compact' + last_check = int(Env.prop(prop_name, default = 0)) + + if last_check < time.time()-604800: # 7 days + self.compact() + Env.prop(prop_name, value = int(time.time())) + + def migrate(self): + + from couchpotato import Env + old_db = os.path.join(Env.get('data_dir'), 'couchpotato.db') + if not os.path.isfile(old_db): return + + log.info('=' * 30) + log.info('Migrating database, hold on..') + time.sleep(1) + + if os.path.isfile(old_db): + + migrate_start = time.time() + + import sqlite3 + conn = sqlite3.connect(old_db) + + migrate_list = { + 'category': ['id', 'label', 'order', 'required', 'preferred', 'ignored', 'destination'], + 'profile': ['id', 'label', 'order', 'core', 'hide'], + 'profiletype': ['id', 'order', 'finish', 'wait_for', 'quality_id', 'profile_id'], + 'quality': ['id', 'identifier', 'order', 'size_min', 'size_max'], + 'movie': ['id', 'last_edit', 'library_id', 'status_id', 'profile_id', 'category_id'], + 'library': ['id', 'identifier', 'info'], + 'librarytitle': ['id', 'title', 'default', 'libraries_id'], + 'library_files__file_library': ['library_id', 'file_id'], + 'release': ['id', 'identifier', 'movie_id', 'status_id', 'quality_id', 'last_edit'], + 'releaseinfo': ['id', 'identifier', 'value', 'release_id'], + 'release_files__file_release': ['release_id', 'file_id'], + 'status': ['id', 'identifier'], + 'properties': ['id', 'identifier', 'value'], + 'file': ['id', 'path', 'type_id'], + 'filetype': ['identifier', 'id'] + } + + migrate_data = {} + rename_old = False + + try: + + c = conn.cursor() + + for ml in migrate_list: + migrate_data[ml] = {} + rows = migrate_list[ml] + + try: + c.execute('SELECT %s FROM `%s`' % ('`' + '`,`'.join(rows) + '`', ml)) + except: + # ignore faulty destination_id database + if ml == 'category': + migrate_data[ml] = {} + else: + rename_old = True + raise + + for p in c.fetchall(): + columns = {} + for row in migrate_list[ml]: + columns[row] = p[rows.index(row)] + + if not migrate_data[ml].get(p[0]): + migrate_data[ml][p[0]] = columns + else: + if not isinstance(migrate_data[ml][p[0]], list): + migrate_data[ml][p[0]] = [migrate_data[ml][p[0]]] + migrate_data[ml][p[0]].append(columns) + + conn.close() + + log.info('Getting data took %s', time.time() - migrate_start) + + db = self.getDB() + if not db.opened: + return + + # Use properties + properties = migrate_data['properties'] + log.info('Importing %s properties', len(properties)) + for x in properties: + property = properties[x] + Env.prop(property.get('identifier'), property.get('value')) + + # Categories + categories = migrate_data.get('category', []) + log.info('Importing %s categories', len(categories)) + category_link = {} + for x in categories: + c = categories[x] + + new_c = db.insert({ + '_t': 'category', + 'order': c.get('order', 999), + 'label': toUnicode(c.get('label', '')), + 'ignored': toUnicode(c.get('ignored', '')), + 'preferred': toUnicode(c.get('preferred', '')), + 'required': toUnicode(c.get('required', '')), + 'destination': toUnicode(c.get('destination', '')), + }) + + category_link[x] = new_c.get('_id') + + # Profiles + log.info('Importing profiles') + new_profiles = db.all('profile', with_doc = True) + new_profiles_by_label = {} + for x in new_profiles: + + # Remove default non core profiles + if not x['doc'].get('core'): + db.delete(x['doc']) + else: + new_profiles_by_label[x['doc']['label']] = x['_id'] + + profiles = migrate_data['profile'] + profile_link = {} + for x in profiles: + p = profiles[x] + + exists = new_profiles_by_label.get(p.get('label')) + + # Update existing with order only + if exists and p.get('core'): + profile = db.get('id', exists) + profile['order'] = tryInt(p.get('order')) + profile['hide'] = p.get('hide') in [1, True, 'true', 'True'] + db.update(profile) + + profile_link[x] = profile.get('_id') + else: + + new_profile = { + '_t': 'profile', + 'label': p.get('label'), + 'order': int(p.get('order', 999)), + 'core': p.get('core', False), + 'qualities': [], + 'wait_for': [], + 'finish': [] + } + + types = migrate_data['profiletype'] + for profile_type in types: + p_type = types[profile_type] + if types[profile_type]['profile_id'] == p['id']: + if p_type['quality_id']: + new_profile['finish'].append(p_type['finish']) + new_profile['wait_for'].append(p_type['wait_for']) + new_profile['qualities'].append(migrate_data['quality'][p_type['quality_id']]['identifier']) + + if len(new_profile['qualities']) > 0: + new_profile.update(db.insert(new_profile)) + profile_link[x] = new_profile.get('_id') + else: + log.error('Corrupt profile list for "%s", using default.', p.get('label')) + + # Qualities + log.info('Importing quality sizes') + new_qualities = db.all('quality', with_doc = True) + new_qualities_by_identifier = {} + for x in new_qualities: + new_qualities_by_identifier[x['doc']['identifier']] = x['_id'] + + qualities = migrate_data['quality'] + quality_link = {} + for x in qualities: + q = qualities[x] + q_id = new_qualities_by_identifier[q.get('identifier')] + + quality = db.get('id', q_id) + quality['order'] = q.get('order') + quality['size_min'] = tryInt(q.get('size_min')) + quality['size_max'] = tryInt(q.get('size_max')) + db.update(quality) + + quality_link[x] = quality + + # Titles + titles = migrate_data['librarytitle'] + titles_by_library = {} + for x in titles: + title = titles[x] + if title.get('default'): + titles_by_library[title.get('libraries_id')] = title.get('title') + + # Releases + releaseinfos = migrate_data['releaseinfo'] + for x in releaseinfos: + info = releaseinfos[x] + + # Skip if release doesn't exist for this info + if not migrate_data['release'].get(info.get('release_id')): + continue + + if not migrate_data['release'][info.get('release_id')].get('info'): + migrate_data['release'][info.get('release_id')]['info'] = {} + + migrate_data['release'][info.get('release_id')]['info'][info.get('identifier')] = info.get('value') + + releases = migrate_data['release'] + releases_by_media = {} + for x in releases: + release = releases[x] + if not releases_by_media.get(release.get('movie_id')): + releases_by_media[release.get('movie_id')] = [] + + releases_by_media[release.get('movie_id')].append(release) + + # Type ids + types = migrate_data['filetype'] + type_by_id = {} + for t in types: + type = types[t] + type_by_id[type.get('id')] = type + + # Media + log.info('Importing %s media items', len(migrate_data['movie'])) + statuses = migrate_data['status'] + libraries = migrate_data['library'] + library_files = migrate_data['library_files__file_library'] + releases_files = migrate_data['release_files__file_release'] + all_files = migrate_data['file'] + poster_type = migrate_data['filetype']['poster'] + medias = migrate_data['movie'] + for x in medias: + m = medias[x] + + status = statuses.get(m['status_id']).get('identifier') + l = libraries.get(m['library_id']) + + # Only migrate wanted movies, Skip if no identifier present + if not l or not getImdb(l.get('identifier')): continue + + profile_id = profile_link.get(m['profile_id']) + category_id = category_link.get(m['category_id']) + title = titles_by_library.get(m['library_id']) + releases = releases_by_media.get(x, []) + info = json.loads(l.get('info', '')) + + files = library_files.get(m['library_id'], []) + if not isinstance(files, list): + files = [files] + + added_media = fireEvent('movie.add', { + 'info': info, + 'identifier': l.get('identifier'), + 'profile_id': profile_id, + 'category_id': category_id, + 'title': title + }, force_readd = False, search_after = False, update_after = False, notify_after = False, status = status, single = True) + + if not added_media: + log.error('Failed adding media %s: %s', (l.get('identifier'), info)) + continue + + added_media['files'] = added_media.get('files', {}) + for f in files: + ffile = all_files[f.get('file_id')] + + # Only migrate posters + if ffile.get('type_id') == poster_type.get('id'): + if ffile.get('path') not in added_media['files'].get('image_poster', []) and os.path.isfile(ffile.get('path')): + added_media['files']['image_poster'] = [ffile.get('path')] + break + + if 'image_poster' in added_media['files']: + db.update(added_media) + + for rel in releases: + + empty_info = False + if not rel.get('info'): + empty_info = True + rel['info'] = {} + + quality = quality_link.get(rel.get('quality_id')) + if not quality: + continue + + release_status = statuses.get(rel.get('status_id')).get('identifier') + + if rel['info'].get('download_id'): + status_support = rel['info'].get('download_status_support', False) in [True, 'true', 'True'] + rel['info']['download_info'] = { + 'id': rel['info'].get('download_id'), + 'downloader': rel['info'].get('download_downloader'), + 'status_support': status_support, + } + + # Add status to keys + rel['info']['status'] = release_status + if not empty_info: + fireEvent('release.create_from_search', [rel['info']], added_media, quality, single = True) + else: + release = { + '_t': 'release', + 'identifier': rel.get('identifier'), + 'media_id': added_media.get('_id'), + 'quality': quality.get('identifier'), + 'status': release_status, + 'last_edit': int(time.time()), + 'files': {} + } + + # Add downloader info if provided + try: + release['download_info'] = rel['info']['download_info'] + del rel['download_info'] + except: + pass + + # Add files + release_files = releases_files.get(rel.get('id'), []) + if not isinstance(release_files, list): + release_files = [release_files] + + if len(release_files) == 0: + continue + + for f in release_files: + rfile = all_files.get(f.get('file_id')) + if not rfile: + continue + + file_type = type_by_id.get(rfile.get('type_id')).get('identifier') + + if not release['files'].get(file_type): + release['files'][file_type] = [] + + release['files'][file_type].append(rfile.get('path')) + + try: + rls = db.get('release_identifier', rel.get('identifier'), with_doc = True)['doc'] + rls.update(release) + db.update(rls) + except: + db.insert(release) + + log.info('Total migration took %s', time.time() - migrate_start) + log.info('=' * 30) + + rename_old = True + + except OperationalError: + log.error('Migrating from faulty database, probably a (too) old version: %s', traceback.format_exc()) + + rename_old = True + except: + log.error('Migration failed: %s', traceback.format_exc()) + + + # rename old database + if rename_old: + random = randomString() + log.info('Renaming old database to %s ', '%s.%s_old' % (old_db, random)) + os.rename(old_db, '%s.%s_old' % (old_db, random)) + + if os.path.isfile(old_db + '-wal'): + os.rename(old_db + '-wal', '%s-wal.%s_old' % (old_db, random)) + if os.path.isfile(old_db + '-shm'): + os.rename(old_db + '-shm', '%s-shm.%s_old' % (old_db, random)) diff --git a/couchpotato/core/downloaders/__init__.py b/couchpotato/core/downloaders/__init__.py index 5fb7125fb1..e69de29bb2 100644 --- a/couchpotato/core/downloaders/__init__.py +++ b/couchpotato/core/downloaders/__init__.py @@ -1,13 +0,0 @@ -config = { - 'name': 'download_providers', - 'groups': [ - { - 'label': 'Downloaders', - 'description': 'You can select different downloaders for each type (usenet / torrent)', - 'type': 'list', - 'name': 'download_providers', - 'tab': 'downloaders', - 'options': [], - }, - ], -} diff --git a/couchpotato/core/downloaders/base.py b/couchpotato/core/downloaders/base.py deleted file mode 100644 index 70500dc09d..0000000000 --- a/couchpotato/core/downloaders/base.py +++ /dev/null @@ -1,118 +0,0 @@ -from base64 import b32decode, b16encode -from couchpotato.core.event import addEvent -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.base import Provider -import random -import re - -log = CPLog(__name__) - - -class Downloader(Provider): - - type = [] - http_time_between_calls = 0 - - torrent_sources = [ - 'http://torrage.com/torrent/%s.torrent', - 'http://torcache.net/torrent/%s.torrent', - ] - - torrent_trackers = [ - 'http://tracker.publicbt.com/announce', - 'udp://tracker.istole.it:80/announce', - 'udp://fr33domtracker.h33t.com:3310/announce', - 'http://tracker.istole.it/announce', - 'http://tracker.ccc.de/announce', - 'udp://tracker.publicbt.com:80/announce', - 'udp://tracker.ccc.de:80/announce', - 'http://exodus.desync.com/announce', - 'http://exodus.desync.com:6969/announce', - 'http://tracker.publichd.eu/announce', - 'http://tracker.openbittorrent.com/announce', - ] - - def __init__(self): - addEvent('download', self._download) - addEvent('download.enabled', self._isEnabled) - addEvent('download.enabled_types', self.getEnabledDownloadType) - addEvent('download.status', self._getAllDownloadStatus) - addEvent('download.remove_failed', self._removeFailed) - - def getEnabledDownloadType(self): - for download_type in self.type: - if self.isEnabled(manual = True, data = {'type': download_type}): - return self.type - - return [] - - def _download(self, data = {}, movie = {}, manual = False, filedata = None): - if self.isDisabled(manual, data): - return - return self.download(data = data, movie = movie, filedata = filedata) - - def _getAllDownloadStatus(self): - if self.isDisabled(manual = True, data = {}): - return - - return self.getAllDownloadStatus() - - def getAllDownloadStatus(self): - return - - def _removeFailed(self, item): - if self.isDisabled(manual = True, data = {}): - return - - if self.conf('delete_failed', default = True): - return self.removeFailed(item) - - return False - - def removeFailed(self, item): - return - - def isCorrectType(self, item_type): - is_correct = item_type in self.type - - if not is_correct: - log.debug("Downloader doesn't support this type") - - return is_correct - - def magnetToTorrent(self, magnet_link): - torrent_hash = re.findall('urn:btih:([\w]{32,40})', magnet_link)[0].upper() - - # Convert base 32 to hex - if len(torrent_hash) == 32: - torrent_hash = b16encode(b32decode(torrent_hash)) - - sources = self.torrent_sources - random.shuffle(sources) - - for source in sources: - try: - filedata = self.urlopen(source % torrent_hash, headers = {'Referer': ''}, show_error = False) - if 'torcache' in filedata and 'file not found' in filedata.lower(): - continue - - return filedata - except: - log.debug('Torrent hash "%s" wasn\'t found on: %s', (torrent_hash, source)) - - log.error('Failed converting magnet url to torrent: %s', (torrent_hash)) - return False - - def isDisabled(self, manual, data): - return not self.isEnabled(manual, data) - - def _isEnabled(self, manual, data = {}): - if not self.isEnabled(manual, data): - return - return True - - def isEnabled(self, manual, data = {}): - d_manual = self.conf('manual', default = False) - return super(Downloader, self).isEnabled() and \ - ((d_manual and manual) or (d_manual is False)) and \ - (not data or self.isCorrectType(data.get('type'))) diff --git a/couchpotato/core/downloaders/blackhole.py b/couchpotato/core/downloaders/blackhole.py new file mode 100644 index 0000000000..e9853f4e53 --- /dev/null +++ b/couchpotato/core/downloaders/blackhole.py @@ -0,0 +1,205 @@ +from __future__ import with_statement +import os +import traceback + +from couchpotato.core._base.downloader.main import DownloaderBase +from couchpotato.core.helpers.encoding import sp +from couchpotato.core.helpers.variable import getDownloadDir +from couchpotato.core.logger import CPLog +from couchpotato.environment import Env + + +log = CPLog(__name__) + +autoload = 'Blackhole' + + +class Blackhole(DownloaderBase): + + protocol = ['nzb', 'torrent', 'torrent_magnet'] + status_support = False + + def download(self, data = None, media = None, filedata = None): + """ Send a torrent/nzb file to the downloader + + :param data: dict returned from provider + Contains the release information + :param media: media dict with information + Used for creating the filename when possible + :param filedata: downloaded torrent/nzb filedata + The file gets downloaded in the searcher and send to this function + This is done to have failed checking before using the downloader, so the downloader + doesn't need to worry about that + :return: boolean + One faile returns false, but the downloaded should log his own errors + """ + + if not media: media = {} + if not data: data = {} + + directory = self.conf('directory') + + # The folder needs to exist + if not directory or not os.path.isdir(directory): + log.error('No directory set for blackhole %s download.', data.get('protocol')) + else: + try: + # Filedata can be empty, which probably means it a magnet link + if not filedata or len(filedata) < 50: + try: + if data.get('protocol') == 'torrent_magnet': + filedata = self.magnetToTorrent(data.get('url')) + data['protocol'] = 'torrent' + except: + log.error('Failed download torrent via magnet url: %s', traceback.format_exc()) + + # If it's still empty, either write the magnet link to a .magnet file, or error out. + if not filedata or len(filedata) < 50: + if self.conf('magnet_file'): + filedata = data.get('url') + '\n' + data['protocol'] = 'magnet' + else: + log.error('No nzb/torrent available: %s', data.get('url')) + return False + + # Create filename with imdb id and other nice stuff + file_name = self.createFileName(data, filedata, media) + full_path = os.path.join(directory, file_name) + + # People want thinks nice and tidy, create a subdir + if self.conf('create_subdir'): + try: + new_path = os.path.splitext(full_path)[0] + if not os.path.exists(new_path): + os.makedirs(new_path) + full_path = os.path.join(new_path, file_name) + except: + log.error('Couldnt create sub dir, reverting to old one: %s', full_path) + + try: + + # Make sure the file doesn't exist yet, no need in overwriting it + if not os.path.isfile(full_path): + log.info('Downloading %s to %s.', (data.get('protocol'), full_path)) + with open(full_path, 'wb') as f: + f.write(filedata) + os.chmod(full_path, Env.getPermission('file')) + return self.downloadReturnId('') + else: + log.info('File %s already exists.', full_path) + return self.downloadReturnId('') + + except: + log.error('Failed to download to blackhole %s', traceback.format_exc()) + pass + + except: + log.info('Failed to download file %s: %s', (data.get('name'), traceback.format_exc())) + return False + + return False + + def test(self): + """ Test and see if the directory is writable + :return: boolean + """ + + directory = self.conf('directory') + if directory and os.path.isdir(directory): + + test_file = sp(os.path.join(directory, 'couchpotato_test.txt')) + + # Check if folder is writable + self.createFile(test_file, 'This is a test file') + if os.path.isfile(test_file): + os.remove(test_file) + return True + + return False + + def getEnabledProtocol(self): + """ What protocols is this downloaded used for + :return: list with protocols + """ + + if self.conf('use_for') == 'both': + return super(Blackhole, self).getEnabledProtocol() + elif self.conf('use_for') == 'torrent': + return ['torrent', 'torrent_magnet'] + else: + return ['nzb'] + + def isEnabled(self, manual = False, data = None): + """ Check if protocol is used (and enabled) + :param manual: The user has clicked to download a link through the webUI + :param data: dict returned from provider + Contains the release information + :return: boolean + """ + if not data: data = {} + for_protocol = ['both'] + if data and 'torrent' in data.get('protocol'): + for_protocol.append('torrent') + elif data: + for_protocol.append(data.get('protocol')) + + return super(Blackhole, self).isEnabled(manual, data) and \ + ((self.conf('use_for') in for_protocol)) + + +config = [{ + 'name': 'blackhole', + 'order': 30, + 'groups': [ + { + 'tab': 'downloaders', + 'list': 'download_providers', + 'name': 'blackhole', + 'label': 'Black hole', + 'description': 'Download the NZB/Torrent to a specific folder. Note: Seeding and copying/linking features do not work with Black hole.', + 'wizard': True, + 'options': [ + { + 'name': 'enabled', + 'default': True, + 'type': 'enabler', + 'radio_group': 'nzb,torrent', + }, + { + 'name': 'directory', + 'type': 'directory', + 'description': 'Directory where the .nzb (or .torrent) file is saved to.', + 'default': getDownloadDir() + }, + { + 'name': 'use_for', + 'label': 'Use for', + 'default': 'both', + 'type': 'dropdown', + 'values': [('usenet & torrents', 'both'), ('usenet', 'nzb'), ('torrent', 'torrent')], + }, + { + 'name': 'create_subdir', + 'default': 0, + 'type': 'bool', + 'advanced': True, + 'description': 'Create a sub directory when saving the .nzb (or .torrent).', + }, + { + 'name': 'manual', + 'default': 0, + 'type': 'bool', + 'advanced': True, + 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.', + }, + { + 'name': 'magnet_file', + 'default': 0, + 'type': 'bool', + 'advanced': True, + 'description': 'If magnet file conversion fails, write down the magnet link in a .magnet file instead.', + }, + ], + } + ], +}] diff --git a/couchpotato/core/downloaders/blackhole/__init__.py b/couchpotato/core/downloaders/blackhole/__init__.py deleted file mode 100644 index 290e8d43dd..0000000000 --- a/couchpotato/core/downloaders/blackhole/__init__.py +++ /dev/null @@ -1,48 +0,0 @@ -from .main import Blackhole -from couchpotato.core.helpers.variable import getDownloadDir - -def start(): - return Blackhole() - -config = [{ - 'name': 'blackhole', - 'order': 30, - 'groups': [ - { - 'tab': 'downloaders', - 'list': 'download_providers', - 'name': 'blackhole', - 'label': 'Black hole', - 'description': 'Download the NZB/Torrent to a specific folder.', - 'wizard': True, - 'options': [ - { - 'name': 'enabled', - 'default': True, - 'type': 'enabler', - 'radio_group': 'nzb,torrent', - }, - { - 'name': 'directory', - 'type': 'directory', - 'description': 'Directory where the .nzb (or .torrent) file is saved to.', - 'default': getDownloadDir() - }, - { - 'name': 'use_for', - 'label': 'Use for', - 'default': 'both', - 'type': 'dropdown', - 'values': [('usenet & torrents', 'both'), ('usenet', 'nzb'), ('torrent', 'torrent')], - }, - { - 'name': 'manual', - 'default': 0, - 'type': 'bool', - 'advanced': True, - 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.', - }, - ], - } - ], -}] diff --git a/couchpotato/core/downloaders/blackhole/main.py b/couchpotato/core/downloaders/blackhole/main.py deleted file mode 100644 index aad9ea7fc5..0000000000 --- a/couchpotato/core/downloaders/blackhole/main.py +++ /dev/null @@ -1,72 +0,0 @@ -from __future__ import with_statement -from couchpotato.core.downloaders.base import Downloader -from couchpotato.core.logger import CPLog -from couchpotato.environment import Env -import os -import traceback - -log = CPLog(__name__) - -class Blackhole(Downloader): - - type = ['nzb', 'torrent', 'torrent_magnet'] - - def download(self, data = {}, movie = {}, filedata = None): - - directory = self.conf('directory') - if not directory or not os.path.isdir(directory): - log.error('No directory set for blackhole %s download.', data.get('type')) - else: - try: - if not filedata or len(filedata) < 50: - try: - if data.get('type') == 'torrent_magnet': - filedata = self.magnetToTorrent(data.get('url')) - data['type'] = 'torrent' - except: - log.error('Failed download torrent via magnet url: %s', traceback.format_exc()) - - if not filedata or len(filedata) < 50: - log.error('No nzb/torrent available: %s', data.get('url')) - return False - - fullPath = os.path.join(directory, self.createFileName(data, filedata, movie)) - - try: - if not os.path.isfile(fullPath): - log.info('Downloading %s to %s.', (data.get('type'), fullPath)) - with open(fullPath, 'wb') as f: - f.write(filedata) - os.chmod(fullPath, Env.getPermission('file')) - return True - else: - log.info('File %s already exists.', fullPath) - return True - - except: - log.error('Failed to download to blackhole %s', traceback.format_exc()) - pass - - except: - log.info('Failed to download file %s: %s', (data.get('name'), traceback.format_exc())) - return False - - return False - - def getEnabledDownloadType(self): - if self.conf('use_for') == 'both': - return super(Blackhole, self).getEnabledDownloadType() - elif self.conf('use_for') == 'torrent': - return ['torrent', 'torrent_magnet'] - else: - return ['nzb'] - - def isEnabled(self, manual, data = {}): - for_type = ['both'] - if data and 'torrent' in data.get('type'): - for_type.append('torrent') - elif data: - for_type.append(data.get('type')) - - return super(Blackhole, self).isEnabled(manual, data) and \ - ((self.conf('use_for') in for_type)) diff --git a/couchpotato/core/downloaders/deluge.py b/couchpotato/core/downloaders/deluge.py new file mode 100644 index 0000000000..aaca40e663 --- /dev/null +++ b/couchpotato/core/downloaders/deluge.py @@ -0,0 +1,421 @@ +from base64 import b64encode, b16encode, b32decode +from datetime import timedelta +from hashlib import sha1 +import os.path +import re +import traceback + +from bencode import bencode as benc, bdecode +from couchpotato.core._base.downloader.main import DownloaderBase, ReleaseDownloadList +from couchpotato.core.helpers.encoding import isInt, sp +from couchpotato.core.helpers.variable import tryFloat, cleanHost +from couchpotato.core.logger import CPLog +from synchronousdeluge import DelugeClient + + +log = CPLog(__name__) + +autoload = 'Deluge' + + +class Deluge(DownloaderBase): + + protocol = ['torrent', 'torrent_magnet'] + log = CPLog(__name__) + drpc = None + + def connect(self, reconnect = False): + """ Connect to the delugeRPC, re-use connection when already available + :param reconnect: force reconnect + :return: DelugeRPC instance + """ + + # Load host from config and split out port. + host = cleanHost(self.conf('host'), protocol = False).split(':') + + # Force host assignment + if len(host) == 1: + host.append(80) + + if not isInt(host[1]): + log.error('Config properties are not filled in correctly, port is missing.') + return False + + if not self.drpc or reconnect: + self.drpc = DelugeRPC(host[0], port = host[1], username = self.conf('username'), password = self.conf('password')) + + return self.drpc + + def download(self, data = None, media = None, filedata = None): + """ Send a torrent/nzb file to the downloader + + :param data: dict returned from provider + Contains the release information + :param media: media dict with information + Used for creating the filename when possible + :param filedata: downloaded torrent/nzb filedata + The file gets downloaded in the searcher and send to this function + This is done to have failed checking before using the downloader, so the downloader + doesn't need to worry about that + :return: boolean + One faile returns false, but the downloaded should log his own errors + """ + + if not media: media = {} + if not data: data = {} + + log.info('Sending "%s" (%s) to Deluge.', (data.get('name'), data.get('protocol'))) + + if not self.connect(): + return False + + if not filedata and data.get('protocol') == 'torrent': + log.error('Failed sending torrent, no data') + return False + + # Set parameters for Deluge + options = { + 'add_paused': self.conf('paused', default = 0), + 'label': self.conf('label') + } + + if self.conf('directory'): + if os.path.isdir(self.conf('directory')): + options['download_location'] = self.conf('directory') + else: + log.error('Download directory from Deluge settings: %s doesn\'t exist', self.conf('directory')) + + if self.conf('completed_directory'): + if os.path.isdir(self.conf('completed_directory')): + options['move_completed'] = 1 + options['move_completed_path'] = self.conf('completed_directory') + else: + log.error('Download directory from Deluge settings: %s doesn\'t exist', self.conf('directory')) + + if data.get('seed_ratio'): + options['stop_at_ratio'] = 1 + options['stop_ratio'] = tryFloat(data.get('seed_ratio')) + +# Deluge only has seed time as a global option. Might be added in +# in a future API release. +# if data.get('seed_time'): + + # Send request to Deluge + if data.get('protocol') == 'torrent_magnet': + remote_torrent = self.drpc.add_torrent_magnet(data.get('url'), options) + else: + filename = self.createFileName(data, filedata, media) + remote_torrent = self.drpc.add_torrent_file(filename, filedata, options) + + if not remote_torrent: + log.error('Failed sending torrent to Deluge') + return False + + log.info('Torrent sent to Deluge successfully.') + return self.downloadReturnId(remote_torrent) + + def test(self): + """ Check if connection works + :return: bool + """ + if self.connect(True) and self.drpc.test(): + return True + return False + + def getAllDownloadStatus(self, ids): + """ Get status of all active downloads + + :param ids: list of (mixed) downloader ids + Used to match the releases for this downloader as there could be + other downloaders active that it should ignore + :return: list of releases + """ + + log.debug('Checking Deluge download status.') + + if not self.connect(): + return [] + + release_downloads = ReleaseDownloadList(self) + + queue = self.drpc.get_alltorrents(ids) + + if not queue: + log.debug('Nothing in queue or error') + return [] + + for torrent_id in queue: + torrent = queue[torrent_id] + + if not 'hash' in torrent: + # When given a list of ids, deluge will return an empty item for a non-existant torrent. + continue + + log.debug('name=%s / id=%s / save_path=%s / move_on_completed=%s / move_completed_path=%s / hash=%s / progress=%s / state=%s / eta=%s / ratio=%s / stop_ratio=%s / is_seed=%s / is_finished=%s / paused=%s', (torrent['name'], torrent['hash'], torrent['save_path'], torrent['move_on_completed'], torrent['move_completed_path'], torrent['hash'], torrent['progress'], torrent['state'], torrent['eta'], torrent['ratio'], torrent['stop_ratio'], torrent['is_seed'], torrent['is_finished'], torrent['paused'])) + + # Deluge has no easy way to work out if a torrent is stalled or failing. + #status = 'failed' + status = 'busy' + # If an user opts to seed a torrent forever (usually associated to private trackers usage), stop_ratio will be 0 or -1 (depending on Deluge version). + # In this scenario the status of the torrent would never change from BUSY to SEEDING. + # The last check takes care of this case. + if torrent['is_seed'] and ((tryFloat(torrent['ratio']) < tryFloat(torrent['stop_ratio'])) or (tryFloat(torrent['stop_ratio']) < 0)): + # We have torrent['seeding_time'] to work out what the seeding time is, but we do not + # have access to the downloader seed_time, as with deluge we have no way to pass it + # when the torrent is added. So Deluge will only look at the ratio. + # See above comment in download(). + status = 'seeding' + elif torrent['is_seed'] and torrent['is_finished'] and torrent['paused'] and torrent['state'] == 'Paused': + status = 'completed' + + download_dir = sp(torrent['save_path']) + if torrent['move_on_completed']: + download_dir = torrent['move_completed_path'] + + torrent_files = [] + for file_item in torrent['files']: + torrent_files.append(sp(os.path.join(download_dir, file_item['path']))) + + release_downloads.append({ + 'id': torrent['hash'], + 'name': torrent['name'], + 'status': status, + 'original_status': torrent['state'], + 'seed_ratio': torrent['ratio'], + 'timeleft': str(timedelta(seconds = torrent['eta'])), + 'folder': sp(download_dir if len(torrent_files) == 1 else os.path.join(download_dir, torrent['name'])), + 'files': torrent_files, + }) + + return release_downloads + + def pause(self, release_download, pause = True): + if pause: + return self.drpc.pause_torrent([release_download['id']]) + else: + return self.drpc.resume_torrent([release_download['id']]) + + def removeFailed(self, release_download): + log.info('%s failed downloading, deleting...', release_download['name']) + return self.drpc.remove_torrent(release_download['id'], True) + + def processComplete(self, release_download, delete_files = False): + log.debug('Requesting Deluge to remove the torrent %s%s.', (release_download['name'], ' and cleanup the downloaded files' if delete_files else '')) + return self.drpc.remove_torrent(release_download['id'], remove_local_data = delete_files) + + +class DelugeRPC(object): + + host = 'localhost' + port = 58846 + username = None + password = None + client = None + + def __init__(self, host = 'localhost', port = 58846, username = None, password = None): + super(DelugeRPC, self).__init__() + + self.host = host + self.port = port + self.username = username + self.password = password + + def connect(self): + self.client = DelugeClient() + self.client.connect(self.host, int(self.port), self.username, self.password) + + def test(self): + try: + self.connect() + except: + return False + return True + + def add_torrent_magnet(self, torrent, options): + torrent_id = False + try: + self.connect() + torrent_id = self.client.core.add_torrent_magnet(torrent, options).get() + if not torrent_id: + torrent_id = self._check_torrent(True, torrent) + + if torrent_id and options['label']: + self.client.label.set_torrent(torrent_id, options['label']).get() + except Exception as err: + log.error('Failed to add torrent magnet %s: %s %s', (torrent, err, traceback.format_exc())) + finally: + if self.client: + self.disconnect() + + return torrent_id + + def add_torrent_file(self, filename, torrent, options): + torrent_id = False + try: + self.connect() + torrent_id = self.client.core.add_torrent_file(filename, b64encode(torrent), options).get() + if not torrent_id: + torrent_id = self._check_torrent(False, torrent) + + if torrent_id and options['label']: + self.client.label.set_torrent(torrent_id, options['label']).get() + except Exception as err: + log.error('Failed to add torrent file %s: %s %s', (filename, err, traceback.format_exc())) + finally: + if self.client: + self.disconnect() + + return torrent_id + + def get_alltorrents(self, ids): + ret = False + try: + self.connect() + ret = self.client.core.get_torrents_status({'id': ids}, ('name', 'hash', 'save_path', 'move_completed_path', 'progress', 'state', 'eta', 'ratio', 'stop_ratio', 'is_seed', 'is_finished', 'paused', 'move_on_completed', 'files')).get() + except Exception as err: + log.error('Failed to get all torrents: %s %s', (err, traceback.format_exc())) + finally: + if self.client: + self.disconnect() + return ret + + def pause_torrent(self, torrent_ids): + try: + self.connect() + self.client.core.pause_torrent(torrent_ids).get() + except Exception as err: + log.error('Failed to pause torrent: %s %s', (err, traceback.format_exc())) + finally: + if self.client: + self.disconnect() + + def resume_torrent(self, torrent_ids): + try: + self.connect() + self.client.core.resume_torrent(torrent_ids).get() + except Exception as err: + log.error('Failed to resume torrent: %s %s', (err, traceback.format_exc())) + finally: + if self.client: + self.disconnect() + + def remove_torrent(self, torrent_id, remove_local_data): + ret = False + try: + self.connect() + ret = self.client.core.remove_torrent(torrent_id, remove_local_data).get() + except Exception as err: + log.error('Failed to remove torrent: %s %s', (err, traceback.format_exc())) + finally: + if self.client: + self.disconnect() + return ret + + def disconnect(self): + self.client.disconnect() + + def _check_torrent(self, magnet, torrent): + # Torrent not added, check if it already existed. + if magnet: + torrent_hash = re.findall('urn:btih:([\w]{32,40})', torrent)[0] + else: + info = bdecode(torrent)["info"] + torrent_hash = sha1(benc(info)).hexdigest() + + # Convert base 32 to hex + if len(torrent_hash) == 32: + torrent_hash = b16encode(b32decode(torrent_hash)) + + torrent_hash = torrent_hash.lower() + torrent_check = self.client.core.get_torrent_status(torrent_hash, {}).get() + if torrent_check['hash']: + return torrent_hash + + return False + + +config = [{ + 'name': 'deluge', + 'groups': [ + { + 'tab': 'downloaders', + 'list': 'download_providers', + 'name': 'deluge', + 'label': 'Deluge', + 'description': 'Use Deluge to download torrents.', + 'wizard': True, + 'options': [ + { + 'name': 'enabled', + 'default': 0, + 'type': 'enabler', + 'radio_group': 'torrent', + }, + { + 'name': 'host', + 'default': 'localhost:58846', + 'description': 'Hostname with port. Usually localhost:58846', + }, + { + 'name': 'username', + }, + { + 'name': 'password', + 'type': 'password', + }, + { + 'name': 'directory', + 'type': 'directory', + 'description': 'Download to this directory. Keep empty for default Deluge download directory.', + }, + { + 'name': 'completed_directory', + 'type': 'directory', + 'description': 'Move completed torrent to this directory. Keep empty for default Deluge options.', + 'advanced': True, + }, + { + 'name': 'label', + 'description': 'Label to add to torrents in the Deluge UI.', + }, + { + 'name': 'remove_complete', + 'label': 'Remove torrent', + 'type': 'bool', + 'default': True, + 'advanced': True, + 'description': 'Remove the torrent from Deluge after it has finished seeding.', + }, + { + 'name': 'delete_files', + 'label': 'Remove files', + 'default': True, + 'type': 'bool', + 'advanced': True, + 'description': 'Also remove the leftover files.', + }, + { + 'name': 'paused', + 'type': 'bool', + 'advanced': True, + 'default': False, + 'description': 'Add the torrent paused.', + }, + { + 'name': 'manual', + 'default': 0, + 'type': 'bool', + 'advanced': True, + 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.', + }, + { + 'name': 'delete_failed', + 'default': True, + 'advanced': True, + 'type': 'bool', + 'description': 'Delete a release after the download has failed.', + }, + ], + } + ], +}] diff --git a/couchpotato/core/downloaders/hadouken.py b/couchpotato/core/downloaders/hadouken.py new file mode 100644 index 0000000000..f96a374e10 --- /dev/null +++ b/couchpotato/core/downloaders/hadouken.py @@ -0,0 +1,590 @@ +from base64 import b16encode, b32decode, b64encode +from distutils.version import LooseVersion +from hashlib import sha1 +import httplib +import json +import os +import re +import urllib2 + +from couchpotato.core._base.downloader.main import DownloaderBase, ReleaseDownloadList +from couchpotato.core.helpers.encoding import isInt, sp +from couchpotato.core.helpers.variable import cleanHost +from couchpotato.core.logger import CPLog +from bencode import bencode as benc, bdecode + + +log = CPLog(__name__) + +autoload = 'Hadouken' + + +class Hadouken(DownloaderBase): + protocol = ['torrent', 'torrent_magnet'] + hadouken_api = None + + def connect(self): + # Load host from config and split out port. + host = cleanHost(self.conf('host'), protocol = False).split(':') + + if not isInt(host[1]): + log.error('Config properties are not filled in correctly, port is missing.') + return False + + # This is where v4 and v5 begin to differ + if(self.conf('version') == 'v4'): + if not self.conf('api_key'): + log.error('Config properties are not filled in correctly, API key is missing.') + return False + + url = 'http://' + str(host[0]) + ':' + str(host[1]) + '/jsonrpc' + client = JsonRpcClient(url, 'Token ' + self.conf('api_key')) + self.hadouken_api = HadoukenAPIv4(client) + + return True + else: + auth_type = self.conf('auth_type') + header = None + + if auth_type == 'api_key': + header = 'Token ' + self.conf('api_key') + elif auth_type == 'user_pass': + header = 'Basic ' + b64encode(self.conf('auth_user') + ':' + self.conf('auth_pass')) + + url = 'http://' + str(host[0]) + ':' + str(host[1]) + '/api' + client = JsonRpcClient(url, header) + self.hadouken_api = HadoukenAPIv5(client) + + return True + + return False + + def download(self, data = None, media = None, filedata = None): + """ Send a torrent/nzb file to the downloader + + :param data: dict returned from provider + Contains the release information + :param media: media dict with information + Used for creating the filename when possible + :param filedata: downloaded torrent/nzb filedata + The file gets downloaded in the searcher and send to this function + This is done to have failed checking before using the downloader, so the downloader + doesn't need to worry about that + :return: boolean + One faile returns false, but the downloaded should log his own errors + """ + + if not media: media = {} + if not data: data = {} + + log.debug("Sending '%s' (%s) to Hadouken.", (data.get('name'), data.get('protocol'))) + + if not self.connect(): + return False + + torrent_params = {} + + if self.conf('label'): + torrent_params['label'] = self.conf('label') + # Set the tags array since that is what v5 expects. + torrent_params['tags'] = [self.conf('label')] + + torrent_filename = self.createFileName(data, filedata, media) + + if data.get('protocol') == 'torrent_magnet': + torrent_hash = re.findall('urn:btih:([\w]{32,40})', data.get('url'))[0].upper() + torrent_params['trackers'] = self.torrent_trackers + torrent_params['name'] = torrent_filename + else: + info = bdecode(filedata)['info'] + torrent_hash = sha1(benc(info)).hexdigest().upper() + + # Convert base 32 to hex + if len(torrent_hash) == 32: + torrent_hash = b16encode(b32decode(torrent_hash)) + + # Send request to Hadouken + if data.get('protocol') == 'torrent_magnet': + self.hadouken_api.add_magnet_link(data.get('url'), torrent_params) + else: + self.hadouken_api.add_file(filedata, torrent_params) + + return self.downloadReturnId(torrent_hash) + + def test(self): + """ Tests the given host:port and API key """ + + if not self.connect(): + return False + + version = self.hadouken_api.get_version() + + if not version: + log.error('Could not get Hadouken version.') + return False + + # The minimum required version of Hadouken is 4.5.6. + if LooseVersion(version) >= LooseVersion('4.5.6'): + return True + + log.error('Hadouken v4.5.6 (or newer) required. Found v%s', version) + return False + + def getAllDownloadStatus(self, ids): + """ Get status of all active downloads + + :param ids: list of (mixed) downloader ids + Used to match the releases for this downloader as there could be + other downloaders active that it should ignore + :return: list of releases + """ + + log.debug('Checking Hadouken download status.') + + if not self.connect(): + return [] + + release_downloads = ReleaseDownloadList(self) + queue = self.hadouken_api.get_by_hash_list(ids) + + if not queue: + return [] + + for torrent in queue: + if torrent is None: + continue + + torrent_filelist = self.hadouken_api.get_files_by_hash(torrent.info_hash) + torrent_files = [] + + for file_item in torrent_filelist: + torrent_files.append(sp(os.path.join(torrent.save_path, file_item))) + + release_downloads.append({ + 'id': torrent.info_hash.upper(), + 'name': torrent.name, + 'status': torrent.get_status(), + 'seed_ratio': torrent.get_seed_ratio(), + 'original_status': torrent.state, + 'timeleft': -1, + 'folder': sp(torrent.save_path if len(torrent_files == 1) else os.path.join(torrent.save_path, torrent.name)), + 'files': torrent_files + }) + + return release_downloads + + def pause(self, release_download, pause = True): + """ Pauses or resumes the torrent specified by the ID field + in release_download. + + Keyword arguments: + release_download -- The CouchPotato release_download to pause/resume. + pause -- Boolean indicating whether to pause or resume. + """ + + if not self.connect(): + return False + + return self.hadouken_api.pause(release_download['id'], pause) + + def removeFailed(self, release_download): + """ Removes a failed torrent and also remove the data associated with it. + + Keyword arguments: + release_download -- The CouchPotato release_download to remove. + """ + + log.info('%s failed downloading, deleting...', release_download['name']) + + if not self.connect(): + return False + + return self.hadouken_api.remove(release_download['id'], remove_data = True) + + def processComplete(self, release_download, delete_files = False): + """ Removes the completed torrent from Hadouken and optionally removes the data + associated with it. + + Keyword arguments: + release_download -- The CouchPotato release_download to remove. + delete_files: Boolean indicating whether to remove the associated data. + """ + + log.debug('Requesting Hadouken to remove the torrent %s%s.', + (release_download['name'], ' and cleanup the downloaded files' if delete_files else '')) + + if not self.connect(): + return False + + return self.hadouken_api.remove(release_download['id'], remove_data = delete_files) + + +class JsonRpcClient(object): + def __init__(self, url, auth_header = None): + self.url = url + self.requestId = 0 + + self.opener = urllib2.build_opener() + self.opener.addheaders = [ + ('User-Agent', 'couchpotato-hadouken-client/1.0'), + ('Accept', 'application/json'), + ('Content-Type', 'application/json') + ] + + if auth_header: + self.opener.addheaders.append(('Authorization', auth_header)) + + def invoke(self, method, params): + self.requestId += 1 + + data = { + 'jsonrpc': '2.0', + 'id': self.requestId, + 'method': method, + 'params': params + } + + request = urllib2.Request(self.url, data = json.dumps(data)) + + try: + f = self.opener.open(request) + response = f.read() + f.close() + + obj = json.loads(response) + + if 'error' in obj.keys(): + log.error('JSONRPC error, %s: %s', (obj['error']['code'], obj['error']['message'])) + return False + + if 'result' in obj.keys(): + return obj['result'] + + return True + except httplib.InvalidURL as err: + log.error('Invalid Hadouken host, check your config %s', err) + except urllib2.HTTPError as err: + if err.code == 401: + log.error('Could not authenticate, check your config') + else: + log.error('Hadouken HTTPError: %s', err) + except urllib2.URLError as err: + log.error('Unable to connect to Hadouken %s', err) + + return False + + +class HadoukenAPI(object): + def __init__(self, rpc_client): + self.rpc = rpc_client + + if not rpc_client: + log.error('No JSONRPC client specified.') + + def add_file(self, data, params): + """ Add a file to Hadouken with the specified parameters. + + Keyword arguments: + filedata -- The binary torrent data. + torrent_params -- Additional parameters for the file. + """ + pass + + def add_magnet_link(self, link, params): + """ Add a magnet link to Hadouken with the specified parameters. + + Keyword arguments: + magnetLink -- The magnet link to send. + torrent_params -- Additional parameters for the magnet link. + """ + pass + + def get_by_hash_list(self, infoHashList): + """ Gets a list of torrents filtered by the given info hash list. + + Keyword arguments: + infoHashList -- A list of info hashes. + """ + pass + + def get_files_by_hash(self, infoHash): + """ Gets a list of files for the torrent identified by the + given info hash. + + Keyword arguments: + infoHash -- The info hash of the torrent to return files for. + """ + pass + + def get_version(self): + """ Gets the version, commitish and build date of Hadouken. """ + pass + + def pause(self, infoHash, pause): + """ Pauses/unpauses the torrent identified by the given info hash. + + Keyword arguments: + infoHash -- The info hash of the torrent to operate on. + pause -- If true, pauses the torrent. Otherwise resumes. + """ + pass + + def remove(self, infoHash, remove_data = False): + """ Removes the torrent identified by the given info hash and + optionally removes the data as well. + + Keyword arguments: + infoHash -- The info hash of the torrent to remove. + remove_data -- If true, removes the data associated with the torrent. + """ + pass + + +class TorrentItem(object): + @property + def info_hash(self): + pass + + @property + def save_path(self): + pass + + @property + def name(self): + pass + + @property + def state(self): + pass + + def get_status(self): + """ Returns the CouchPotato status for a given torrent.""" + pass + + def get_seed_ratio(self): + """ Returns the seed ratio for a given torrent.""" + pass + + +class TorrentItemv5(TorrentItem): + def __init__(self, obj): + self.obj = obj + + def info_hash(self): + return self.obj[0] + + def save_path(self): + return self.obj[26] + + def name(self): + return self.obj[2] + + def state(self): + return self.obj[1] + + def get_status(self): + if self.obj[1] == 32: + return 'completed' + + if self.obj[1] == 1: + return 'seeding' + + return 'busy' + + def get_seed_ratio(self): + up = self.obj[6] + down = self.obj[5] + + if up > 0 and down > 0: + return up / down + + return 0 + + +class HadoukenAPIv5(HadoukenAPI): + + def add_file(self, data, params): + return self.rpc.invoke('webui.addTorrent', ['file', b64encode(data), params]) + + def add_magnet_link(self, link, params): + return self.rpc.invoke('webui.addTorrent', ['url', link, params]) + + def get_by_hash_list(self, infoHashList): + torrents = self.rpc.invoke('webui.list', None) + result = [] + + for torrent in torrents['torrents']: + if torrent[0] in infoHashList: + result.append(TorrentItemv5(torrent)) + + return result + + def get_files_by_hash(self, infoHash): + files = self.rpc.invoke('webui.getFiles', [infoHash]) + result = [] + + for file in files['files'][1]: + result.append(file[0]) + + return result + + def get_version(self): + result = self.rpc.invoke('core.getSystemInfo', None) + + if not result: + return False + + return result['versions']['hadouken'] + + def pause(self, infoHash, pause): + if pause: + return self.rpc.invoke('webui.perform', ['pause', infoHash]) + + return self.rpc.invoke('webui.perform', ['resume', infoHash]) + + def remove(self, infoHash, remove_data=False): + if remove_data: + return self.rpc.invoke('webui.perform', ['removedata', infoHash]) + + return self.rpc.invoke('webui.perform', ['remove', infoHash]) + + +class TorrentItemv4(TorrentItem): + def __init__(self, obj): + self.obj = obj + + def info_hash(self): + return self.obj['InfoHash'] + + def save_path(self): + return self.obj['SavePath'] + + def name(self): + return self.obj['Name'] + + def state(self): + return self.obj['State'] + + def get_status(self): + if self.obj['IsSeeding'] and self.obj['IsFinished'] and self.obj['Paused']: + return 'completed' + + if self.obj['IsSeeding']: + return 'seeding' + + return 'busy' + + def get_seed_ratio(self): + up = self.obj['TotalUploadedBytes'] + down = self.obj['TotalDownloadedBytes'] + + if up > 0 and down > 0: + return up / down + + return 0 + + +class HadoukenAPIv4(object): + def add_file(self, data, params): + return self.rpc.invoke('torrents.addFile', [b64encode(data), params]) + + def add_magnet_link(self, link, params): + return self.rpc.invoke('torrents.addUrl', [link, params]) + + def get_by_hash_list(self, infoHashList): + torrents = self.rpc.invoke('torrents.getByInfoHashList', [infoHashList]) + result = [] + + for torrent in torrents: + result.append(TorrentItemv4(torrent)) + + return result + + def get_files_by_hash(self, infoHash): + files = self.rpc.invoke('torrents.getFiles', [infoHash]) + result = [] + + for file in files: + result.append(file['Path']) + + return result + + def get_version(self): + result = self.rpc.invoke('core.getVersion', None) + + if not result: + return False + + return result['Version'] + + def pause(self, infoHash, pause): + if pause: + return self.rpc.invoke('torrents.pause', [infoHash]) + + return self.rpc.invoke('torrents.resume', [infoHash]) + + def remove(self, infoHash, remove_data = False): + return self.rpc.invoke('torrents.remove', [infoHash, remove_data]) + + +config = [{ + 'name': 'hadouken', + 'groups': [ + { + 'tab': 'downloaders', + 'list': 'download_providers', + 'name': 'hadouken', + 'label': 'Hadouken', + 'description': 'Use Hadouken (>= v4.5.6) to download torrents.', + 'wizard': True, + 'options': [ + { + 'name': 'enabled', + 'default': 0, + 'type': 'enabler', + 'radio_group': 'torrent' + }, + { + 'name': 'version', + 'label': 'Version', + 'type': 'dropdown', + 'default': 'v4', + 'values': [('v4.x', 'v4'), ('v5.x', 'v5')], + 'description': 'Hadouken version.', + }, + { + 'name': 'host', + 'default': 'localhost:7890' + }, + { + 'name': 'auth_type', + 'label': 'Auth. type', + 'type': 'dropdown', + 'default': 'api_key', + 'values': [('None', 'none'), ('API key/Token', 'api_key'), ('Username/Password', 'user_pass')], + 'description': 'Type of authentication', + }, + { + 'name': 'api_key', + 'label': 'API key (v4)/Token (v5)', + 'type': 'password' + }, + { + 'name': 'auth_user', + 'label': 'Username', + 'description': '(only for v5)' + }, + { + 'name': 'auth_pass', + 'label': 'Password', + 'type': 'password', + 'description': '(only for v5)' + }, + { + 'name': 'label', + 'description': 'Label to add torrent as.' + } + ] + } + ] +}] diff --git a/couchpotato/core/downloaders/nzbget.py b/couchpotato/core/downloaders/nzbget.py new file mode 100644 index 0000000000..35a71850aa --- /dev/null +++ b/couchpotato/core/downloaders/nzbget.py @@ -0,0 +1,318 @@ +from base64 import standard_b64encode +from datetime import timedelta +import re +import shutil +import socket +import traceback +import xmlrpclib + +from couchpotato.core._base.downloader.main import DownloaderBase, ReleaseDownloadList +from couchpotato.core.helpers.encoding import ss, sp +from couchpotato.core.helpers.variable import tryInt, md5, cleanHost +from couchpotato.core.logger import CPLog + + +log = CPLog(__name__) + +autoload = 'NZBGet' + + +class NZBGet(DownloaderBase): + + protocol = ['nzb'] + rpc = 'xmlrpc' + + def download(self, data = None, media = None, filedata = None): + """ Send a torrent/nzb file to the downloader + + :param data: dict returned from provider + Contains the release information + :param media: media dict with information + Used for creating the filename when possible + :param filedata: downloaded torrent/nzb filedata + The file gets downloaded in the searcher and send to this function + This is done to have failed checking before using the downloader, so the downloader + doesn't need to worry about that + :return: boolean + One faile returns false, but the downloaded should log his own errors + """ + + if not media: media = {} + if not data: data = {} + + if not filedata: + log.error('Unable to get NZB file: %s', traceback.format_exc()) + return False + + log.info('Sending "%s" to NZBGet.', data.get('name')) + + nzb_name = ss('%s.nzb' % self.createNzbName(data, media)) + + rpc = self.getRPC() + + try: + if rpc.writelog('INFO', 'CouchPotato connected to drop off %s.' % nzb_name): + log.debug('Successfully connected to NZBGet') + else: + log.info('Successfully connected to NZBGet, but unable to send a message') + except socket.error: + log.error('NZBGet is not responding. Please ensure that NZBGet is running and host setting is correct.') + return False + except xmlrpclib.ProtocolError as e: + if e.errcode == 401: + log.error('Password is incorrect.') + else: + log.error('Protocol Error: %s', e) + return False + + if re.search(r"^0", rpc.version()): + xml_response = rpc.append(nzb_name, self.conf('category'), False, standard_b64encode(filedata.strip())) + else: + xml_response = rpc.append(nzb_name, self.conf('category'), tryInt(self.conf('priority')), False, standard_b64encode(filedata.strip())) + + if xml_response: + log.info('NZB sent successfully to NZBGet') + nzb_id = md5(data['url']) # about as unique as they come ;) + couchpotato_id = "couchpotato=" + nzb_id + groups = rpc.listgroups() + file_id = [item['LastID'] for item in groups if item['NZBFilename'] == nzb_name] + confirmed = rpc.editqueue("GroupSetParameter", 0, couchpotato_id, file_id) + if confirmed: + log.debug('couchpotato parameter set in nzbget download') + return self.downloadReturnId(nzb_id) + else: + log.error('NZBGet could not add %s to the queue.', nzb_name) + return False + + def test(self): + """ Check if connection works + :return: bool + """ + + rpc = self.getRPC() + + try: + if rpc.writelog('INFO', 'CouchPotato connected to test connection'): + log.debug('Successfully connected to NZBGet') + else: + log.info('Successfully connected to NZBGet, but unable to send a message') + except socket.error: + log.error('NZBGet is not responding. Please ensure that NZBGet is running and host setting is correct.') + return False + except xmlrpclib.ProtocolError as e: + if e.errcode == 401: + log.error('Password is incorrect.') + else: + log.error('Protocol Error: %s', e) + return False + + return True + + def getAllDownloadStatus(self, ids): + """ Get status of all active downloads + + :param ids: list of (mixed) downloader ids + Used to match the releases for this downloader as there could be + other downloaders active that it should ignore + :return: list of releases + """ + + log.debug('Checking NZBGet download status.') + + rpc = self.getRPC() + + try: + if rpc.writelog('DETAIL', 'CouchPotato connected to check status'): + log.debug('Successfully connected to NZBGet') + else: + log.info('Successfully connected to NZBGet, but unable to send a message') + except socket.error: + log.error('NZBGet is not responding. Please ensure that NZBGet is running and host setting is correct.') + return [] + except xmlrpclib.ProtocolError as e: + if e.errcode == 401: + log.error('Password is incorrect.') + else: + log.error('Protocol Error: %s', e) + return [] + + # Get NZBGet data + try: + status = rpc.status() + groups = rpc.listgroups() + queue = rpc.postqueue(0) + history = rpc.history() + except: + log.error('Failed getting data: %s', traceback.format_exc(1)) + return [] + + release_downloads = ReleaseDownloadList(self) + + for nzb in groups: + try: + nzb_id = [param['Value'] for param in nzb['Parameters'] if param['Name'] == 'couchpotato'][0] + except: + nzb_id = nzb['NZBID'] + + if nzb_id in ids: + log.debug('Found %s in NZBGet download queue', nzb['NZBFilename']) + timeleft = -1 + try: + if nzb['ActiveDownloads'] > 0 and nzb['DownloadRate'] > 0 and not (status['DownloadPaused'] or status['Download2Paused']): + timeleft = str(timedelta(seconds = nzb['RemainingSizeMB'] / status['DownloadRate'] * 2 ^ 20)) + except: + pass + + release_downloads.append({ + 'id': nzb_id, + 'name': nzb['NZBFilename'], + 'original_status': 'DOWNLOADING' if nzb['ActiveDownloads'] > 0 else 'QUEUED', + # Seems to have no native API function for time left. This will return the time left after NZBGet started downloading this item + 'timeleft': timeleft, + }) + + for nzb in queue: # 'Parameters' is not passed in rpc.postqueue + if nzb['NZBID'] in ids: + log.debug('Found %s in NZBGet postprocessing queue', nzb['NZBFilename']) + release_downloads.append({ + 'id': nzb['NZBID'], + 'name': nzb['NZBFilename'], + 'original_status': nzb['Stage'], + 'timeleft': str(timedelta(seconds = 0)) if not status['PostPaused'] else -1, + }) + + for nzb in history: + try: + nzb_id = [param['Value'] for param in nzb['Parameters'] if param['Name'] == 'couchpotato'][0] + except: + nzb_id = nzb['NZBID'] + + if nzb_id in ids: + log.debug('Found %s in NZBGet history. TotalStatus: %s, ParStatus: %s, ScriptStatus: %s, Log: %s', (nzb['NZBFilename'] , nzb['Status'], nzb['ParStatus'], nzb['ScriptStatus'] , nzb['Log'])) + release_downloads.append({ + 'id': nzb_id, + 'name': nzb['NZBFilename'], + 'status': 'completed' if 'SUCCESS' in nzb['Status'] else 'failed', + 'original_status': nzb['Status'], + 'timeleft': str(timedelta(seconds = 0)), + 'folder': sp(nzb['DestDir']) + }) + + return release_downloads + + def removeFailed(self, release_download): + + log.info('%s failed downloading, deleting...', release_download['name']) + + rpc = self.getRPC() + + try: + if rpc.writelog('INFO', 'CouchPotato connected to delete some history'): + log.debug('Successfully connected to NZBGet') + else: + log.info('Successfully connected to NZBGet, but unable to send a message') + except socket.error: + log.error('NZBGet is not responding. Please ensure that NZBGet is running and host setting is correct.') + return False + except xmlrpclib.ProtocolError as e: + if e.errcode == 401: + log.error('Password is incorrect.') + else: + log.error('Protocol Error: %s', e) + return False + + try: + history = rpc.history() + nzb_id = None + path = None + + for hist in history: + for param in hist['Parameters']: + if param['Name'] == 'couchpotato' and param['Value'] == release_download['id']: + nzb_id = hist['ID'] + path = hist['DestDir'] + + if nzb_id and path and rpc.editqueue('HistoryDelete', 0, "", [tryInt(nzb_id)]): + shutil.rmtree(path, True) + except: + log.error('Failed deleting: %s', traceback.format_exc(0)) + return False + + return True + + def getRPC(self): + url = cleanHost(host = self.conf('host'), ssl = self.conf('ssl'), username = self.conf('username'), password = self.conf('password')) + self.rpc + return xmlrpclib.ServerProxy(url) + + +config = [{ + 'name': 'nzbget', + 'groups': [ + { + 'tab': 'downloaders', + 'list': 'download_providers', + 'name': 'nzbget', + 'label': 'NZBGet', + 'description': 'Use NZBGet to download NZBs.', + 'wizard': True, + 'options': [ + { + 'name': 'enabled', + 'default': 0, + 'type': 'enabler', + 'radio_group': 'nzb', + }, + { + 'name': 'host', + 'default': 'localhost:6789', + 'description': 'Hostname with port. Usually localhost:6789', + }, + { + 'name': 'ssl', + 'default': 0, + 'type': 'bool', + 'advanced': True, + 'description': 'Use HyperText Transfer Protocol Secure, or https', + }, + { + 'name': 'username', + 'default': 'nzbget', + 'advanced': True, + 'description': 'Set a different username to connect. Default: nzbget', + }, + { + 'name': 'password', + 'type': 'password', + 'description': 'Default NZBGet password is tegbzn6789', + }, + { + 'name': 'category', + 'default': 'Movies', + 'description': 'The category CP places the nzb in. Like movies or couchpotato', + }, + { + 'name': 'priority', + 'advanced': True, + 'default': '0', + 'type': 'dropdown', + 'values': [('Very Low', -100), ('Low', -50), ('Normal', 0), ('High', 50), ('Very High', 100), ('Forced', 900)], + 'description': 'Only change this if you are using NZBget 13.0 or higher', + }, + { + 'name': 'manual', + 'default': 0, + 'type': 'bool', + 'advanced': True, + 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.', + }, + { + 'name': 'delete_failed', + 'default': True, + 'advanced': True, + 'type': 'bool', + 'description': 'Delete a release after the download has failed.', + }, + ], + } + ], +}] diff --git a/couchpotato/core/downloaders/nzbget/__init__.py b/couchpotato/core/downloaders/nzbget/__init__.py deleted file mode 100644 index 403a7e7d10..0000000000 --- a/couchpotato/core/downloaders/nzbget/__init__.py +++ /dev/null @@ -1,54 +0,0 @@ -from .main import NZBGet - -def start(): - return NZBGet() - -config = [{ - 'name': 'nzbget', - 'groups': [ - { - 'tab': 'downloaders', - 'list': 'download_providers', - 'name': 'nzbget', - 'label': 'NZBGet', - 'description': 'Use NZBGet to download NZBs.', - 'options': [ - { - 'name': 'enabled', - 'default': 0, - 'type': 'enabler', - 'radio_group': 'nzb', - }, - { - 'name': 'host', - 'default': 'localhost:6789', - 'description': 'Hostname with port. Usually localhost:6789', - }, - { - 'name': 'password', - 'type': 'password', - 'description': 'Default NZBGet password is tegbzn6789', - }, - { - 'name': 'category', - 'default': 'Movies', - 'description': 'The category CP places the nzb in. Like movies or couchpotato', - }, - { - 'name': 'priority', - 'default': '0', - 'type': 'dropdown', - 'values': [('Very Low', -100), ('Low', -50), ('Normal', 0), ('High', 50), ('Very High', 100)], - 'description': 'Only change this if you are using NZBget 9.0 or higher', - }, - { - 'name': 'manual', - 'default': 0, - 'type': 'bool', - 'advanced': True, - 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.', - }, - ], - } - ], -}] diff --git a/couchpotato/core/downloaders/nzbget/main.py b/couchpotato/core/downloaders/nzbget/main.py deleted file mode 100644 index 82d8a3634e..0000000000 --- a/couchpotato/core/downloaders/nzbget/main.py +++ /dev/null @@ -1,56 +0,0 @@ -from base64 import standard_b64encode -from couchpotato.core.downloaders.base import Downloader -from couchpotato.core.helpers.encoding import ss -from couchpotato.core.helpers.variable import tryInt -from couchpotato.core.logger import CPLog -import re -import socket -import traceback -import xmlrpclib - -log = CPLog(__name__) - -class NZBGet(Downloader): - - type = ['nzb'] - - url = 'http://nzbget:%(password)s@%(host)s/xmlrpc' - - def download(self, data = {}, movie = {}, filedata = None): - - if not filedata: - log.error('Unable to get NZB file: %s', traceback.format_exc()) - return False - - log.info('Sending "%s" to NZBGet.', data.get('name')) - - url = self.url % {'host': self.conf('host'), 'password': self.conf('password')} - nzb_name = ss('%s.nzb' % self.createNzbName(data, movie)) - - rpc = xmlrpclib.ServerProxy(url) - try: - if rpc.writelog('INFO', 'CouchPotato connected to drop off %s.' % nzb_name): - log.info('Successfully connected to NZBGet') - else: - log.info('Successfully connected to NZBGet, but unable to send a message') - except socket.error: - log.error('NZBGet is not responding. Please ensure that NZBGet is running and host setting is correct.') - return False - except xmlrpclib.ProtocolError, e: - if e.errcode == 401: - log.error('Password is incorrect.') - else: - log.error('Protocol Error: %s', e) - return False - - if re.search(r"^0", rpc.version()): - xml_response = rpc.append(nzb_name, self.conf('category'), False, standard_b64encode(filedata.strip())) - else: - xml_response = rpc.append(nzb_name, self.conf('category'), tryInt(self.conf('priority')), False, standard_b64encode(filedata.strip())) - - if xml_response: - log.info('NZB sent successfully to NZBGet') - return True - else: - log.error('NZBGet could not add %s to the queue.', nzb_name) - return False diff --git a/couchpotato/core/downloaders/nzbvortex.py b/couchpotato/core/downloaders/nzbvortex.py new file mode 100644 index 0000000000..5b1fc843dc --- /dev/null +++ b/couchpotato/core/downloaders/nzbvortex.py @@ -0,0 +1,245 @@ +from base64 import b64encode +import os +from uuid import uuid4 +import hashlib +import traceback + +from requests import HTTPError + +from couchpotato.core._base.downloader.main import DownloaderBase, ReleaseDownloadList +from couchpotato.core.helpers.encoding import tryUrlencode, sp +from couchpotato.core.helpers.variable import cleanHost +from couchpotato.core.logger import CPLog + + +log = CPLog(__name__) + +autoload = 'NZBVortex' + + +class NZBVortex(DownloaderBase): + + protocol = ['nzb'] + api_level = None + session_id = None + + def download(self, data = None, media = None, filedata = None): + """ Send a torrent/nzb file to the downloader + + :param data: dict returned from provider + Contains the release information + :param media: media dict with information + Used for creating the filename when possible + :param filedata: downloaded torrent/nzb filedata + The file gets downloaded in the searcher and send to this function + This is done to have failed checking before using the downloader, so the downloader + doesn't need to worry about that + :return: boolean + One faile returns false, but the downloaded should log his own errors + """ + + if not media: media = {} + if not data: data = {} + + # Send the nzb + try: + nzb_filename = self.createFileName(data, filedata, media, unique_tag = True) + response = self.call('nzb/add', files = {'file': (nzb_filename, filedata, 'application/octet-stream')}, parameters = { + 'name': nzb_filename, + 'groupname': self.conf('group') + }) + + if response and response.get('result', '').lower() == 'ok': + return self.downloadReturnId(nzb_filename) + + log.error('Something went wrong sending the NZB file. Response: %s', response) + return False + except: + log.error('Something went wrong sending the NZB file: %s', traceback.format_exc()) + return False + + def test(self): + """ Check if connection works + :return: bool + """ + + try: + login_result = self.login() + except: + return False + + return login_result + + def getAllDownloadStatus(self, ids): + """ Get status of all active downloads + + :param ids: list of (mixed) downloader ids + Used to match the releases for this downloader as there could be + other downloaders active that it should ignore + :return: list of releases + """ + + raw_statuses = self.call('nzb') + + release_downloads = ReleaseDownloadList(self) + for nzb in raw_statuses.get('nzbs', []): + nzb_id = os.path.basename(nzb['nzbFileName']) + if nzb_id in ids: + + # Check status + status = 'busy' + if nzb['state'] == 20: + status = 'completed' + elif nzb['state'] in [21, 22, 24]: + status = 'failed' + + release_downloads.append({ + 'temp_id': nzb['id'], + 'id': nzb_id, + 'name': nzb['uiTitle'], + 'status': status, + 'original_status': nzb['state'], + 'timeleft': -1, + 'folder': sp(nzb['destinationPath']), + }) + + return release_downloads + + def removeFailed(self, release_download): + + log.info('%s failed downloading, deleting...', release_download['name']) + + try: + self.call('nzb/%s/cancel' % release_download['temp_id']) + except: + log.error('Failed deleting: %s', traceback.format_exc(0)) + return False + + return True + + def login(self): + + nonce = self.call('auth/nonce', auth = False).get('authNonce') + cnonce = uuid4().hex + hashed = b64encode(hashlib.sha256('%s:%s:%s' % (nonce, cnonce, self.conf('api_key'))).digest()) + + params = { + 'nonce': nonce, + 'cnonce': cnonce, + 'hash': hashed + } + + login_data = self.call('auth/login', parameters = params, auth = False) + + # Save for later + if login_data.get('loginResult') == 'successful': + self.session_id = login_data.get('sessionID') + return True + + log.error('Login failed, please check you api-key') + return False + + def call(self, call, parameters = None, is_repeat = False, auth = True, *args, **kwargs): + + # Login first + if not parameters: parameters = {} + if not self.session_id and auth: + self.login() + + # Always add session id to request + if self.session_id: + parameters['sessionid'] = self.session_id + + params = tryUrlencode(parameters) + + url = cleanHost(self.conf('host')) + 'api/' + call + + try: + data = self.getJsonData('%s%s' % (url, '?' + params if params else ''), *args, cache_timeout = 0, show_error = False, **kwargs) + + if data: + return data + except HTTPError as e: + sc = e.response.status_code + if sc == 403: + # Try login and do again + if not is_repeat: + self.login() + return self.call(call, parameters = parameters, is_repeat = True, **kwargs) + + log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc())) + except: + log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc())) + + return {} + + def getApiLevel(self): + + if not self.api_level: + + try: + data = self.call('app/apilevel', auth = False) + self.api_level = float(data.get('apilevel')) + except HTTPError as e: + sc = e.response.status_code + if sc == 403: + log.error('This version of NZBVortex isn\'t supported. Please update to 2.8.6 or higher') + else: + log.error('NZBVortex doesn\'t seem to be running or maybe the remote option isn\'t enabled yet: %s', traceback.format_exc(1)) + + return self.api_level + + def isEnabled(self, manual = False, data = None): + if not data: data = {} + return super(NZBVortex, self).isEnabled(manual, data) and self.getApiLevel() + + +config = [{ + 'name': 'nzbvortex', + 'groups': [ + { + 'tab': 'downloaders', + 'list': 'download_providers', + 'name': 'nzbvortex', + 'label': 'NZBVortex', + 'description': 'Use NZBVortex to download NZBs.', + 'wizard': True, + 'options': [ + { + 'name': 'enabled', + 'default': 0, + 'type': 'enabler', + 'radio_group': 'nzb', + }, + { + 'name': 'host', + 'default': 'https://localhost:4321', + 'description': 'Hostname with port. Usually https://localhost:4321', + }, + { + 'name': 'api_key', + 'label': 'Api Key', + }, + { + 'name': 'group', + 'label': 'Group', + 'description': 'The group CP places the nzb in. Make sure to create it in NZBVortex.', + }, + { + 'name': 'manual', + 'default': False, + 'type': 'bool', + 'advanced': True, + 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.', + }, + { + 'name': 'delete_failed', + 'default': True, + 'advanced': True, + 'type': 'bool', + 'description': 'Delete a release after the download has failed.', + }, + ], + } + ], +}] diff --git a/couchpotato/core/downloaders/nzbvortex/__init__.py b/couchpotato/core/downloaders/nzbvortex/__init__.py deleted file mode 100644 index f1604ea84b..0000000000 --- a/couchpotato/core/downloaders/nzbvortex/__init__.py +++ /dev/null @@ -1,47 +0,0 @@ -from .main import NZBVortex - -def start(): - return NZBVortex() - -config = [{ - 'name': 'nzbvortex', - 'groups': [ - { - 'tab': 'downloaders', - 'list': 'download_providers', - 'name': 'nzbvortex', - 'label': 'NZBVortex', - 'description': 'Use NZBVortex to download NZBs.', - 'wizard': True, - 'options': [ - { - 'name': 'enabled', - 'default': 0, - 'type': 'enabler', - 'radio_group': 'nzb', - }, - { - 'name': 'host', - 'default': 'https://localhost:4321', - }, - { - 'name': 'api_key', - 'label': 'Api Key', - }, - { - 'name': 'manual', - 'default': False, - 'type': 'bool', - 'advanced': True, - 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.', - }, - { - 'name': 'delete_failed', - 'default': True, - 'type': 'bool', - 'description': 'Delete a release after the download has failed.', - }, - ], - } - ], -}] diff --git a/couchpotato/core/downloaders/nzbvortex/main.py b/couchpotato/core/downloaders/nzbvortex/main.py deleted file mode 100644 index 1462c6782b..0000000000 --- a/couchpotato/core/downloaders/nzbvortex/main.py +++ /dev/null @@ -1,170 +0,0 @@ -from base64 import b64encode -from couchpotato.core.downloaders.base import Downloader -from couchpotato.core.helpers.encoding import tryUrlencode, ss -from couchpotato.core.helpers.variable import cleanHost -from couchpotato.core.logger import CPLog -from urllib2 import URLError -from uuid import uuid4 -import hashlib -import httplib -import json -import socket -import ssl -import sys -import traceback -import urllib2 - -log = CPLog(__name__) - -class NZBVortex(Downloader): - - type = ['nzb'] - api_level = None - session_id = None - - def download(self, data = {}, movie = {}, filedata = None): - - # Send the nzb - try: - nzb_filename = self.createFileName(data, filedata, movie) - self.call('nzb/add', params = {'file': (ss(nzb_filename), filedata)}, multipart = True) - - return True - except: - log.error('Something went wrong sending the NZB file: %s', traceback.format_exc()) - return False - - def getAllDownloadStatus(self): - - raw_statuses = self.call('nzb') - - statuses = [] - for item in raw_statuses.get('nzbs', []): - - # Check status - status = 'busy' - if item['state'] == 20: - status = 'completed' - elif item['state'] in [21, 22, 24]: - status = 'failed' - - statuses.append({ - 'id': item['id'], - 'name': item['uiTitle'], - 'status': status, - 'original_status': item['state'], - 'timeleft':-1, - }) - - return statuses - - def removeFailed(self, item): - - log.info('%s failed downloading, deleting...', item['name']) - - try: - self.call('nzb/%s/cancel' % item['id']) - except: - log.error('Failed deleting: %s', traceback.format_exc(0)) - return False - - return True - - def login(self): - - nonce = self.call('auth/nonce', auth = False).get('authNonce') - cnonce = uuid4().hex - hashed = b64encode(hashlib.sha256('%s:%s:%s' % (nonce, cnonce, self.conf('api_key'))).digest()) - - params = { - 'nonce': nonce, - 'cnonce': cnonce, - 'hash': hashed - } - - login_data = self.call('auth/login', parameters = params, auth = False) - - # Save for later - if login_data.get('loginResult') == 'successful': - self.session_id = login_data.get('sessionID') - return True - - log.error('Login failed, please check you api-key') - return False - - - def call(self, call, parameters = {}, repeat = False, auth = True, *args, **kwargs): - - # Login first - if not self.session_id and auth: - self.login() - - # Always add session id to request - if self.session_id: - parameters['sessionid'] = self.session_id - - params = tryUrlencode(parameters) - - url = cleanHost(self.conf('host')) + 'api/' + call - url_opener = urllib2.build_opener(HTTPSHandler()) - - try: - data = self.urlopen('%s?%s' % (url, params), opener = url_opener, *args, **kwargs) - - if data: - return json.loads(data) - except URLError, e: - if hasattr(e, 'code') and e.code == 403: - # Try login and do again - if not repeat: - self.login() - return self.call(call, parameters = parameters, repeat = True, *args, **kwargs) - - log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc())) - except: - log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc())) - - return {} - - def getApiLevel(self): - - if not self.api_level: - - url = cleanHost(self.conf('host')) + 'api/app/apilevel' - url_opener = urllib2.build_opener(HTTPSHandler()) - - try: - data = self.urlopen(url, opener = url_opener, show_error = False) - self.api_level = float(json.loads(data).get('apilevel')) - except URLError, e: - if hasattr(e, 'code') and e.code == 403: - log.error('This version of NZBVortex isn\'t supported. Please update to 2.8.6 or higher') - else: - log.error('NZBVortex doesn\'t seem to be running or maybe the remote option isn\'t enabled yet: %s', traceback.format_exc(1)) - - return self.api_level - - def isEnabled(self, manual, data): - return super(NZBVortex, self).isEnabled(manual, data) and self.getApiLevel() - - -class HTTPSConnection(httplib.HTTPSConnection): - def __init__(self, *args, **kwargs): - httplib.HTTPSConnection.__init__(self, *args, **kwargs) - - def connect(self): - sock = socket.create_connection((self.host, self.port), self.timeout) - if sys.version_info < (2, 6, 7): - if hasattr(self, '_tunnel_host'): - self.sock = sock - self._tunnel() - else: - if self._tunnel_host: - self.sock = sock - self._tunnel() - - self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, ssl_version = ssl.PROTOCOL_TLSv1) - -class HTTPSHandler(urllib2.HTTPSHandler): - def https_open(self, req): - return self.do_open(HTTPSConnection, req) diff --git a/couchpotato/core/downloaders/pneumatic.py b/couchpotato/core/downloaders/pneumatic.py new file mode 100644 index 0000000000..4ad32bdb64 --- /dev/null +++ b/couchpotato/core/downloaders/pneumatic.py @@ -0,0 +1,129 @@ +from __future__ import with_statement +import os +import traceback + +from couchpotato.core._base.downloader.main import DownloaderBase +from couchpotato.core.helpers.encoding import sp +from couchpotato.core.logger import CPLog + + +log = CPLog(__name__) + +autoload = 'Pneumatic' + + +class Pneumatic(DownloaderBase): + + protocol = ['nzb'] + strm_syntax = 'plugin://plugin.program.pneumatic/?mode=strm&type=add_file&nzb=%s&nzbname=%s' + status_support = False + + def download(self, data = None, media = None, filedata = None): + """ Send a torrent/nzb file to the downloader + + :param data: dict returned from provider + Contains the release information + :param media: media dict with information + Used for creating the filename when possible + :param filedata: downloaded torrent/nzb filedata + The file gets downloaded in the searcher and send to this function + This is done to have failed checking before using the downloader, so the downloader + doesn't need to worry about that + :return: boolean + One faile returns false, but the downloaded should log his own errors + """ + + if not media: media = {} + if not data: data = {} + + directory = self.conf('directory') + if not directory or not os.path.isdir(directory): + log.error('No directory set for .strm downloads.') + else: + try: + if not filedata or len(filedata) < 50: + log.error('No nzb available!') + return False + + full_path = os.path.join(directory, self.createFileName(data, filedata, media)) + + try: + if not os.path.isfile(full_path): + log.info('Downloading %s to %s.', (data.get('protocol'), full_path)) + with open(full_path, 'wb') as f: + f.write(filedata) + + nzb_name = self.createNzbName(data, media) + strm_path = os.path.join(directory, nzb_name) + + strm_file = open(strm_path + '.strm', 'wb') + strmContent = self.strm_syntax % (full_path, nzb_name) + strm_file.write(strmContent) + strm_file.close() + + return self.downloadReturnId('') + + else: + log.info('File %s already exists.', full_path) + return self.downloadReturnId('') + + except: + log.error('Failed to download .strm: %s', traceback.format_exc()) + pass + + except: + log.info('Failed to download file %s: %s', (data.get('name'), traceback.format_exc())) + return False + return False + + def test(self): + """ Check if connection works + :return: bool + """ + + directory = self.conf('directory') + if directory and os.path.isdir(directory): + + test_file = sp(os.path.join(directory, 'couchpotato_test.txt')) + + # Check if folder is writable + self.createFile(test_file, 'This is a test file') + if os.path.isfile(test_file): + os.remove(test_file) + return True + + return False + + +config = [{ + 'name': 'pneumatic', + 'order': 30, + 'groups': [ + { + 'tab': 'downloaders', + 'list': 'download_providers', + 'name': 'pneumatic', + 'label': 'Pneumatic', + 'description': 'Use Pneumatic to download .strm files.', + 'options': [ + { + 'name': 'enabled', + 'default': 0, + 'type': 'enabler', + }, + { + 'name': 'directory', + 'type': 'directory', + 'description': 'Directory where the .strm file is saved to.', + }, + { + 'name': 'manual', + 'default': 0, + 'type': 'bool', + 'advanced': True, + 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.', + }, + ], + } + ], +}] diff --git a/couchpotato/core/downloaders/pneumatic/__init__.py b/couchpotato/core/downloaders/pneumatic/__init__.py deleted file mode 100644 index 96574a7a9e..0000000000 --- a/couchpotato/core/downloaders/pneumatic/__init__.py +++ /dev/null @@ -1,37 +0,0 @@ -from .main import Pneumatic - -def start(): - return Pneumatic() - -config = [{ - 'name': 'pneumatic', - 'order': 30, - 'groups': [ - { - 'tab': 'downloaders', - 'list': 'download_providers', - 'name': 'pneumatic', - 'label': 'Pneumatic', - 'description': 'Use Pneumatic to download .strm files.', - 'options': [ - { - 'name': 'enabled', - 'default': 0, - 'type': 'enabler', - }, - { - 'name': 'directory', - 'type': 'directory', - 'description': 'Directory where the .strm file is saved to.', - }, - { - 'name': 'manual', - 'default': 0, - 'type': 'bool', - 'advanced': True, - 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.', - }, - ], - } - ], -}] diff --git a/couchpotato/core/downloaders/pneumatic/main.py b/couchpotato/core/downloaders/pneumatic/main.py deleted file mode 100644 index 5e2b78547d..0000000000 --- a/couchpotato/core/downloaders/pneumatic/main.py +++ /dev/null @@ -1,54 +0,0 @@ -from __future__ import with_statement -from couchpotato.core.downloaders.base import Downloader -from couchpotato.core.logger import CPLog -import os -import traceback - -log = CPLog(__name__) - -class Pneumatic(Downloader): - - type = ['nzb'] - strm_syntax = 'plugin://plugin.program.pneumatic/?mode=strm&type=add_file&nzb=%s&nzbname=%s' - - def download(self, data = {}, movie = {}, filedata = None): - - directory = self.conf('directory') - if not directory or not os.path.isdir(directory): - log.error('No directory set for .strm downloads.') - else: - try: - if not filedata or len(filedata) < 50: - log.error('No nzb available!') - return False - - fullPath = os.path.join(directory, self.createFileName(data, filedata, movie)) - - try: - if not os.path.isfile(fullPath): - log.info('Downloading %s to %s.', (data.get('type'), fullPath)) - with open(fullPath, 'wb') as f: - f.write(filedata) - - nzb_name = self.createNzbName(data, movie) - strm_path = os.path.join(directory, nzb_name) - - strm_file = open(strm_path + '.strm', 'wb') - strmContent = self.strm_syntax % (fullPath, nzb_name) - strm_file.write(strmContent) - strm_file.close() - - return True - - else: - log.info('File %s already exists.', fullPath) - return True - - except: - log.error('Failed to download .strm: %s', traceback.format_exc()) - pass - - except: - log.info('Failed to download file %s: %s', (data.get('name'), traceback.format_exc())) - return False - return False diff --git a/couchpotato/core/downloaders/putio/__init__.py b/couchpotato/core/downloaders/putio/__init__.py new file mode 100644 index 0000000000..0f3654a12b --- /dev/null +++ b/couchpotato/core/downloaders/putio/__init__.py @@ -0,0 +1,74 @@ +from .main import PutIO + + +def autoload(): + return PutIO() + + +config = [{ + 'name': 'putio', + 'groups': [ + { + 'tab': 'downloaders', + 'list': 'download_providers', + 'name': 'putio', + 'label': 'Put.io', + 'description': 'This will start a torrent download on Put.io.', + 'wizard': True, + 'options': [ + { + 'name': 'enabled', + 'default': 0, + 'type': 'enabler', + 'radio_group': 'torrent', + }, + { + 'name': 'oauth_token', + 'label': 'oauth_token', + 'description': 'This is the OAUTH_TOKEN from your putio API', + 'advanced': True, + }, + { + 'name': 'folder', + 'description': ('The folder on putio where you want the upload to go','Will find the first first folder that matches this name'), + 'default': 0, + }, + { + 'name': 'https', + 'description': 'Set to true if your callback host accepts https instead of http', + 'type': 'bool', + 'default': 0, + }, + { + 'name': 'callback_host', + 'description': 'External reachable url to CP so put.io can do it\'s thing', + }, + { + 'name': 'download', + 'description': 'Set this to have CouchPotato download the file from Put.io', + 'type': 'bool', + 'default': 0, + }, + { + 'name': 'delete_file', + 'description': ('Set this to remove the file from putio after sucessful download','Does nothing if you don\'t select download'), + 'type': 'bool', + 'default': 0, + }, + { + 'name': 'download_dir', + 'type': 'directory', + 'label': 'Download Directory', + 'description': 'The Directory to download files to, does nothing if you don\'t select download', + }, + { + 'name': 'manual', + 'default': 0, + 'type': 'bool', + 'advanced': True, + 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.', + }, + ], + } + ], +}] diff --git a/couchpotato/core/downloaders/putio/main.py b/couchpotato/core/downloaders/putio/main.py new file mode 100644 index 0000000000..a49f870f85 --- /dev/null +++ b/couchpotato/core/downloaders/putio/main.py @@ -0,0 +1,185 @@ +from couchpotato.api import addApiView +from couchpotato.core.event import addEvent, fireEventAsync +from couchpotato.core._base.downloader.main import DownloaderBase, ReleaseDownloadList +from couchpotato.core.helpers.variable import cleanHost +from couchpotato.core.logger import CPLog +from couchpotato.environment import Env +from pio import api as pio +import datetime + +log = CPLog(__name__) + +autoload = 'Putiodownload' + + +class PutIO(DownloaderBase): + + protocol = ['torrent', 'torrent_magnet'] + downloading_list = [] + oauth_authenticate = 'https://api.couchpota.to/authorize/putio/' + + def __init__(self): + addApiView('downloader.putio.getfrom', self.getFromPutio, docs = { + 'desc': 'Allows you to download file from prom Put.io', + }) + addApiView('downloader.putio.auth_url', self.getAuthorizationUrl) + addApiView('downloader.putio.credentials', self.getCredentials) + addEvent('putio.download', self.putioDownloader) + + return super(PutIO, self).__init__() + + # This is a recusive function to check for the folders + def recursionFolder(self, client, folder = 0, tfolder = ''): + files = client.File.list(folder) + for f in files: + if f.content_type == 'application/x-directory': + if f.name == tfolder: + return f.id + else: + result = self.recursionFolder(client, f.id, tfolder) + if result != 0: + return result + return 0 + + # This will check the root for the folder, and kick of recusively checking sub folder + def convertFolder(self, client, folder): + if folder == 0: + return 0 + else: + return self.recursionFolder(client, 0, folder) + + def download(self, data = None, media = None, filedata = None): + if not media: media = {} + if not data: data = {} + + log.info('Sending "%s" to put.io', data.get('name')) + url = data.get('url') + client = pio.Client(self.conf('oauth_token')) + putioFolder = self.convertFolder(client, self.conf('folder')) + log.debug('putioFolder ID is %s', putioFolder) + # It might be possible to call getFromPutio from the renamer if we can then we don't need to do this. + # Note callback_host is NOT our address, it's the internet host that putio can call too + callbackurl = None + if self.conf('download'): + pre = 'http://' + if self.conf('https'): + pre = 'https://' + callbackurl = pre + self.conf('callback_host') + '%sdownloader.putio.getfrom/' %Env.get('api_base'.strip('/')) + log.debug('callbackurl is %s', callbackurl) + resp = client.Transfer.add_url(url, callback_url = callbackurl, parent_id = putioFolder) + log.debug('resp is %s', resp.id) + return self.downloadReturnId(resp.id) + + def test(self): + try: + client = pio.Client(self.conf('oauth_token')) + if client.File.list(): + return True + except: + log.info('Failed to get file listing, check OAUTH_TOKEN') + return False + + def getAuthorizationUrl(self, host = None, **kwargs): + + callback_url = cleanHost(host) + '%sdownloader.putio.credentials/' % (Env.get('api_base').lstrip('/')) + log.debug('callback_url is %s', callback_url) + + target_url = self.oauth_authenticate + "?target=" + callback_url + log.debug('target_url is %s', target_url) + + return { + 'success': True, + 'url': target_url, + } + + def getCredentials(self, **kwargs): + try: + oauth_token = kwargs.get('oauth') + except: + return 'redirect', Env.get('web_base') + 'settings/downloaders/' + log.debug('oauth_token is: %s', oauth_token) + self.conf('oauth_token', value = oauth_token); + return 'redirect', Env.get('web_base') + 'settings/downloaders/' + + def getAllDownloadStatus(self, ids): + + log.debug('Checking putio download status.') + client = pio.Client(self.conf('oauth_token')) + + transfers = client.Transfer.list() + + log.debug(transfers); + release_downloads = ReleaseDownloadList(self) + for t in transfers: + if t.id in ids: + + log.debug('downloading list is %s', self.downloading_list) + if t.status == "COMPLETED" and self.conf('download') == False : + status = 'completed' + + # So check if we are trying to download something + elif t.status == "COMPLETED" and self.conf('download') == True: + # Assume we are done + status = 'completed' + if not self.downloading_list: + now = datetime.datetime.utcnow() + date_time = datetime.datetime.strptime(t.finished_at,"%Y-%m-%dT%H:%M:%S") + # We need to make sure a race condition didn't happen + if (now - date_time) < datetime.timedelta(minutes=5): + # 5 minutes haven't passed so we wait + status = 'busy' + else: + # If we have the file_id in the downloading_list mark it as busy + if str(t.file_id) in self.downloading_list: + status = 'busy' + else: + status = 'busy' + release_downloads.append({ + 'id' : t.id, + 'name': t.name, + 'status': status, + 'timeleft': t.estimated_time, + }) + + return release_downloads + + def putioDownloader(self, fid): + + log.info('Put.io Real downloader called with file_id: %s',fid) + client = pio.Client(self.conf('oauth_token')) + + log.debug('About to get file List') + putioFolder = self.convertFolder(client, self.conf('folder')) + log.debug('PutioFolderID is %s', putioFolder) + files = client.File.list(parent_id=putioFolder) + downloaddir = self.conf('download_dir') + + for f in files: + if str(f.id) == str(fid): + client.File.download(f, dest = downloaddir, delete_after_download = self.conf('delete_file')) + # Once the download is complete we need to remove it from the running list. + self.downloading_list.remove(fid) + + return True + + def getFromPutio(self, **kwargs): + + try: + file_id = str(kwargs.get('file_id')) + except: + return { + 'success' : False, + } + + log.info('Put.io Download has been called file_id is %s', file_id) + if file_id not in self.downloading_list: + self.downloading_list.append(file_id) + fireEventAsync('putio.download',fid = file_id) + return { + 'success': True, + } + + return { + 'success': False, + } + diff --git a/couchpotato/core/downloaders/putio/static/putio.js b/couchpotato/core/downloaders/putio/static/putio.js new file mode 100644 index 0000000000..438348f805 --- /dev/null +++ b/couchpotato/core/downloaders/putio/static/putio.js @@ -0,0 +1,68 @@ +var PutIODownloader = new Class({ + + initialize: function(){ + var self = this; + + App.addEvent('loadSettings', self.addRegisterButton.bind(self)); + }, + + addRegisterButton: function(){ + var self = this; + + var setting_page = App.getPage('Settings'); + setting_page.addEvent('create', function(){ + + var fieldset = setting_page.tabs.downloaders.groups.putio, + l = window.location; + + var putio_set = 0; + fieldset.getElements('input[type=text]').each(function(el){ + putio_set += +(el.get('value') !== ''); + }); + + new Element('.ctrlHolder').adopt( + + // Unregister button + (putio_set > 0) ? + [ + self.unregister = new Element('a.button.red', { + 'text': 'Unregister "'+fieldset.getElement('input[name*=oauth_token]').get('value')+'"', + 'events': { + 'click': function(){ + fieldset.getElements('input[name*=oauth_token]').set('value', '').fireEvent('change'); + + self.unregister.destroy(); + self.unregister_or.destroy(); + } + } + }), + self.unregister_or = new Element('span[text=or]') + ] + : null, + + // Register button + new Element('a.button', { + 'text': putio_set > 0 ? 'Register a different account' : 'Register your put.io account', + 'events': { + 'click': function(){ + Api.request('downloader.putio.auth_url', { + 'data': { + 'host': l.protocol + '//' + l.hostname + (l.port ? ':' + l.port : '') + }, + 'onComplete': function(json){ + window.location = json.url; + } + }); + } + } + }) + ).inject(fieldset.getElement('.test_button'), 'before'); + }); + + } + +}); + +window.addEvent('domready', function(){ + new PutIODownloader(); +}); diff --git a/couchpotato/core/downloaders/qbittorrent_.py b/couchpotato/core/downloaders/qbittorrent_.py new file mode 100644 index 0000000000..f36b6e4fcb --- /dev/null +++ b/couchpotato/core/downloaders/qbittorrent_.py @@ -0,0 +1,274 @@ +from base64 import b16encode, b32decode +from hashlib import sha1 +from datetime import timedelta +import os +import re + +from bencode import bencode, bdecode +from couchpotato.core._base.downloader.main import DownloaderBase, ReleaseDownloadList +from couchpotato.core.helpers.encoding import sp +from couchpotato.core.helpers.variable import cleanHost +from couchpotato.core.logger import CPLog +from qbittorrent.client import QBittorrentClient + + +log = CPLog(__name__) + +autoload = 'qBittorrent' + + +class qBittorrent(DownloaderBase): + + protocol = ['torrent', 'torrent_magnet'] + qb = None + + def __init__(self): + super(qBittorrent, self).__init__() + + def connect(self): + if self.qb is not None: + self.qb.logout() + + url = cleanHost(self.conf('host'), protocol = True, ssl = False) + + if self.conf('username') and self.conf('password'): + self.qb = QBittorrentClient(url) + self.qb.login(username=self.conf('username'), password=self.conf('password')) + else: + self.qb = QBittorrentClient(url) + + return self.qb._is_authenticated + + def test(self): + """ Check if connection works + :return: bool + """ + return self.connect() + + def download(self, data = None, media = None, filedata = None): + """ Send a torrent/nzb file to the downloader + + :param data: dict returned from provider + Contains the release information + :param media: media dict with information + Used for creating the filename when possible + :param filedata: downloaded torrent/nzb filedata + The file gets downloaded in the searcher and send to this function + This is done to have failed checking before using the downloader, so the downloader + doesn't need to worry about that + :return: boolean + One faile returns false, but the downloaded should log his own errors + """ + + if not media: media = {} + if not data: data = {} + + log.debug('Sending "%s" to qBittorrent.', (data.get('name'))) + + if not self.connect(): + return False + + if not filedata and data.get('protocol') == 'torrent': + log.error('Failed sending torrent, no data') + return False + + if data.get('protocol') == 'torrent_magnet': + # Send request to qBittorrent directly as a magnet + try: + self.qb.download_from_link(data.get('url'), label=self.conf('label')) + torrent_hash = re.findall('urn:btih:([\w]{32,40})', data.get('url'))[0].upper() + log.info('Torrent [magnet] sent to QBittorrent successfully.') + return self.downloadReturnId(torrent_hash) + + except Exception as e: + log.error('Failed to send torrent to qBittorrent: %s', e) + return False + + if data.get('protocol') == 'torrent': + info = bdecode(filedata)["info"] + torrent_hash = sha1(bencode(info)).hexdigest() + + # Convert base 32 to hex + if len(torrent_hash) == 32: + torrent_hash = b16encode(b32decode(torrent_hash)) + + # Send request to qBittorrent + try: + self.qb.download_from_file(filedata, label=self.conf('label')) + log.info('Torrent [file] sent to QBittorrent successfully.') + return self.downloadReturnId(torrent_hash) + except Exception as e: + log.error('Failed to send torrent to qBittorrent: %s', e) + return False + + def getTorrentStatus(self, torrent): + + if torrent['state'] in ('uploading', 'queuedUP', 'stalledUP'): + return 'seeding' + + if torrent['progress'] == 1: + return 'completed' + + return 'busy' + + def getAllDownloadStatus(self, ids): + """ Get status of all active downloads + + :param ids: list of (mixed) downloader ids + Used to match the releases for this downloader as there could be + other downloaders active that it should ignore + :return: list of releases + """ + + log.debug('Checking qBittorrent download status.') + + if not self.connect(): + return [] + + try: + torrents = self.qb.torrents(status='all', label=self.conf('label')) + + release_downloads = ReleaseDownloadList(self) + + for torrent in torrents: + if torrent['hash'] in ids: + torrent_filelist = self.qb.get_torrent_files(torrent['hash']) + + torrent_files = [] + torrent_dir = os.path.join(torrent['save_path'], torrent['name']) + + if os.path.isdir(torrent_dir): + torrent['save_path'] = torrent_dir + + if len(torrent_filelist) > 1 and os.path.isdir(torrent_dir): # multi file torrent, path.isdir check makes sure we're not in the root download folder + for root, _, files in os.walk(torrent['save_path']): + for f in files: + torrent_files.append(sp(os.path.join(root, f))) + + else: # multi or single file placed directly in torrent.save_path + for f in torrent_filelist: + file_path = os.path.join(torrent['save_path'], f['name']) + if os.path.isfile(file_path): + torrent_files.append(sp(file_path)) + + release_downloads.append({ + 'id': torrent['hash'], + 'name': torrent['name'], + 'status': self.getTorrentStatus(torrent), + 'seed_ratio': torrent['ratio'], + 'original_status': torrent['state'], + 'timeleft': str(timedelta(seconds = torrent['eta'])), + 'folder': sp(torrent['save_path']), + 'files': torrent_files + }) + + return release_downloads + + except Exception as e: + log.error('Failed to get status from qBittorrent: %s', e) + return [] + + def pause(self, release_download, pause = True): + if not self.connect(): + return False + + torrent = self.qb.get_torrent(release_download['id']) + if torrent is None: + return False + + if pause: + return self.qb.pause(release_download['id']) + return self.qb.resume(release_download['id']) + + def removeFailed(self, release_download): + log.info('%s failed downloading, deleting...', release_download['name']) + return self.processComplete(release_download, delete_files = True) + + def processComplete(self, release_download, delete_files): + log.debug('Requesting qBittorrent to remove the torrent %s%s.', + (release_download['name'], ' and cleanup the downloaded files' if delete_files else '')) + + if not self.connect(): + return False + + torrent = self.qb.get_torrent(release_download['id']) + + if torrent is None: + return False + + if delete_files: + self.qb.delete_permanently(release_download['id']) # deletes torrent with data + else: + self.qb.delete(release_download['id']) # just removes the torrent, doesn't delete data + + return True + + +config = [{ + 'name': 'qbittorrent', + 'groups': [ + { + 'tab': 'downloaders', + 'list': 'download_providers', + 'name': 'qbittorrent', + 'label': 'qBittorrent', + 'description': 'Use qBittorrent to download torrents.', + 'wizard': True, + 'options': [ + { + 'name': 'enabled', + 'default': 0, + 'type': 'enabler', + 'radio_group': 'torrent', + }, + { + 'name': 'host', + 'default': 'http://localhost:8080/', + 'description': 'RPC Communication URI. Usually http://localhost:8080/' + }, + { + 'name': 'username', + }, + { + 'name': 'password', + 'type': 'password', + }, + { + 'name': 'label', + 'label': 'Torrent Label', + 'default': 'couchpotato', + }, + { + 'name': 'remove_complete', + 'label': 'Remove torrent', + 'default': False, + 'advanced': True, + 'type': 'bool', + 'description': 'Remove the torrent after it finishes seeding.', + }, + { + 'name': 'delete_files', + 'label': 'Remove files', + 'default': True, + 'type': 'bool', + 'advanced': True, + 'description': 'Also remove the leftover files.', + }, + { + 'name': 'paused', + 'type': 'bool', + 'advanced': True, + 'default': False, + 'description': 'Add the torrent paused.', + }, + { + 'name': 'manual', + 'default': 0, + 'type': 'bool', + 'advanced': True, + 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.', + }, + ], + } + ], +}] diff --git a/couchpotato/core/downloaders/rtorrent_.py b/couchpotato/core/downloaders/rtorrent_.py new file mode 100644 index 0000000000..4902cff044 --- /dev/null +++ b/couchpotato/core/downloaders/rtorrent_.py @@ -0,0 +1,442 @@ +from base64 import b16encode, b32decode +from datetime import timedelta +from hashlib import sha1 +from urlparse import urlparse +import os +import re + +from couchpotato.core._base.downloader.main import DownloaderBase, ReleaseDownloadList +from couchpotato.core.event import addEvent +from couchpotato.core.helpers.encoding import sp +from couchpotato.core.helpers.variable import cleanHost, splitString +from couchpotato.core.logger import CPLog +from bencode import bencode, bdecode +from rtorrent import RTorrent + + +log = CPLog(__name__) + +autoload = 'rTorrent' + + +class rTorrent(DownloaderBase): + + protocol = ['torrent', 'torrent_magnet'] + rt = None + error_msg = '' + + # Migration url to host options + def __init__(self): + super(rTorrent, self).__init__() + + addEvent('app.load', self.migrate) + addEvent('setting.save.rtorrent.*.after', self.settingsChanged) + + def migrate(self): + + url = self.conf('url') + if url: + host_split = splitString(url.split('://')[-1], split_on = '/') + + self.conf('ssl', value = url.startswith('https')) + self.conf('host', value = host_split[0].strip()) + self.conf('rpc_url', value = '/'.join(host_split[1:])) + + self.deleteConf('url') + + def settingsChanged(self): + # Reset active connection if settings have changed + if self.rt: + log.debug('Settings have changed, closing active connection') + + self.rt = None + return True + + def getAuth(self): + if not self.conf('username') or not self.conf('password'): + # Missing username or password parameter + return None + + # Build authentication tuple + return ( + self.conf('authentication'), + self.conf('username'), + self.conf('password') + ) + + def getVerifySsl(self): + # Ensure verification has been enabled + if not self.conf('ssl_verify'): + return False + + # Use ca bundle if defined + ca_bundle = self.conf('ssl_ca_bundle') + + if ca_bundle and os.path.exists(ca_bundle): + return ca_bundle + + # Use default ssl verification + return True + + def connect(self, reconnect = False): + # Already connected? + if not reconnect and self.rt is not None: + return self.rt + + url = cleanHost(self.conf('host'), protocol = True, ssl = self.conf('ssl')) + + # Automatically add '+https' to 'httprpc' protocol if SSL is enabled + if self.conf('ssl') and url.startswith('httprpc://'): + url = url.replace('httprpc://', 'httprpc+https://') + + parsed = urlparse(url) + + # rpc_url is only used on http/https scgi pass-through + if parsed.scheme in ['http', 'https']: + url += self.conf('rpc_url') + + # Construct client + self.rt = RTorrent( + url, self.getAuth(), + verify_ssl=self.getVerifySsl() + ) + + self.error_msg = '' + try: + self.rt.connection.verify() + except AssertionError as e: + self.error_msg = e.message + self.rt = None + + return self.rt + + def test(self): + """ Check if connection works + :return: bool + """ + + if self.connect(True): + return True + + if self.error_msg: + return False, 'Connection failed: ' + self.error_msg + + return False + + + def download(self, data = None, media = None, filedata = None): + """ Send a torrent/nzb file to the downloader + + :param data: dict returned from provider + Contains the release information + :param media: media dict with information + Used for creating the filename when possible + :param filedata: downloaded torrent/nzb filedata + The file gets downloaded in the searcher and send to this function + This is done to have failed checking before using the downloader, so the downloader + doesn't need to worry about that + :return: boolean + One faile returns false, but the downloaded should log his own errors + """ + + if not media: media = {} + if not data: data = {} + + log.debug('Sending "%s" to rTorrent.', (data.get('name'))) + + if not self.connect(): + return False + + torrent_hash = 0 + torrent_params = {} + if self.conf('label'): + torrent_params['label'] = self.conf('label') + + if not filedata and data.get('protocol') == 'torrent': + log.error('Failed sending torrent, no data') + return False + + # Try download magnet torrents + if data.get('protocol') == 'torrent_magnet': + # Send magnet to rTorrent + torrent_hash = re.findall('urn:btih:([\w]{32,40})', data.get('url'))[0].upper() + # Send request to rTorrent + try: + torrent = self.rt.load_magnet(data.get('url'), torrent_hash) + + if not torrent: + log.error('Unable to find the torrent, did it fail to load?') + return False + + except Exception as err: + log.error('Failed to send magnet to rTorrent: %s', err) + return False + + if data.get('protocol') == 'torrent': + info = bdecode(filedata)["info"] + torrent_hash = sha1(bencode(info)).hexdigest().upper() + + # Convert base 32 to hex + if len(torrent_hash) == 32: + torrent_hash = b16encode(b32decode(torrent_hash)) + + # Send request to rTorrent + try: + # Send torrent to rTorrent + torrent = self.rt.load_torrent(filedata, verify_retries=10) + + if not torrent: + log.error('Unable to find the torrent, did it fail to load?') + return False + + except Exception as err: + log.error('Failed to send torrent to rTorrent: %s', err) + return False + + try: + # Set label + if self.conf('label'): + torrent.set_custom(1, self.conf('label')) + + if self.conf('directory'): + torrent.set_directory(self.conf('directory')) + + # Start torrent + if not self.conf('paused', default = 0): + torrent.start() + + return self.downloadReturnId(torrent_hash) + + except Exception as err: + log.error('Failed to send torrent to rTorrent: %s', err) + return False + + + def getTorrentStatus(self, torrent): + if not torrent.complete: + return 'busy' + + if torrent.open: + return 'seeding' + + return 'completed' + + def getAllDownloadStatus(self, ids): + """ Get status of all active downloads + + :param ids: list of (mixed) downloader ids + Used to match the releases for this downloader as there could be + other downloaders active that it should ignore + :return: list of releases + """ + + log.debug('Checking rTorrent download status.') + + if not self.connect(): + return [] + + try: + torrents = self.rt.get_torrents() + + release_downloads = ReleaseDownloadList(self) + + for torrent in torrents: + if torrent.info_hash in ids: + torrent_directory = os.path.normpath(torrent.directory) + torrent_files = [] + + for file in torrent.get_files(): + if not os.path.normpath(file.path).startswith(torrent_directory): + file_path = os.path.join(torrent_directory, file.path.lstrip('/')) + else: + file_path = file.path + + torrent_files.append(sp(file_path)) + + release_downloads.append({ + 'id': torrent.info_hash, + 'name': torrent.name, + 'status': self.getTorrentStatus(torrent), + 'seed_ratio': torrent.ratio, + 'original_status': torrent.state, + 'timeleft': str(timedelta(seconds = float(torrent.left_bytes) / torrent.down_rate)) if torrent.down_rate > 0 else -1, + 'folder': sp(torrent.directory), + 'files': torrent_files + }) + + return release_downloads + + except Exception as err: + log.error('Failed to get status from rTorrent: %s', err) + return [] + + def pause(self, release_download, pause = True): + if not self.connect(): + return False + + torrent = self.rt.find_torrent(release_download['id']) + if torrent is None: + return False + + if pause: + return torrent.pause() + return torrent.resume() + + def removeFailed(self, release_download): + log.info('%s failed downloading, deleting...', release_download['name']) + return self.processComplete(release_download, delete_files = True) + + def processComplete(self, release_download, delete_files): + log.debug('Requesting rTorrent to remove the torrent %s%s.', + (release_download['name'], ' and cleanup the downloaded files' if delete_files else '')) + + if not self.connect(): + return False + + torrent = self.rt.find_torrent(release_download['id']) + + if torrent is None: + return False + + if delete_files: + for file_item in torrent.get_files(): # will only delete files, not dir/sub-dir + os.unlink(os.path.join(torrent.directory, file_item.path)) + + if torrent.is_multi_file() and torrent.directory.endswith(torrent.name): + # Remove empty directories bottom up + try: + for path, _, _ in os.walk(sp(torrent.directory), topdown = False): + os.rmdir(path) + except OSError: + log.info('Directory "%s" contains extra files, unable to remove', torrent.directory) + + torrent.erase() # just removes the torrent, doesn't delete data + + return True + + +config = [{ + 'name': 'rtorrent', + 'groups': [ + { + 'tab': 'downloaders', + 'list': 'download_providers', + 'name': 'rtorrent', + 'label': 'rTorrent', + 'description': 'Use rTorrent to download torrents.', + 'wizard': True, + 'options': [ + { + 'name': 'enabled', + 'default': 0, + 'type': 'enabler', + 'radio_group': 'torrent', + }, + { + 'name': 'ssl', + 'label': 'SSL Enabled', + 'order': 1, + 'default': 0, + 'type': 'bool', + 'advanced': True, + 'description': 'Use HyperText Transfer Protocol Secure, or https', + }, + { + 'name': 'ssl_verify', + 'label': 'SSL Verify', + 'order': 2, + 'default': 1, + 'type': 'bool', + 'advanced': True, + 'description': 'Verify SSL certificate on https connections', + }, + { + 'name': 'ssl_ca_bundle', + 'label': 'SSL CA Bundle', + 'order': 3, + 'type': 'string', + 'advanced': True, + 'description': 'Path to a directory (or file) containing trusted certificate authorities', + }, + { + 'name': 'host', + 'order': 4, + 'default': 'localhost:80', + 'description': 'RPC Communication URI. Usually scgi://localhost:5000, ' + 'httprpc://localhost/rutorrent or localhost:80', + }, + { + 'name': 'rpc_url', + 'order': 5, + 'default': 'RPC2', + 'type': 'string', + 'advanced': True, + 'description': 'Change if your RPC mount is at a different path.', + }, + { + 'name': 'authentication', + 'order': 6, + 'default': 'basic', + 'type': 'dropdown', + 'advanced': True, + 'values': [('Basic', 'basic'), ('Digest', 'digest')], + 'description': 'Authentication method used for http(s) connections', + }, + { + 'name': 'username', + 'order': 7, + }, + { + 'name': 'password', + 'order': 8, + 'type': 'password', + }, + { + 'name': 'label', + 'order': 9, + 'description': 'Label to apply on added torrents.', + }, + { + 'name': 'directory', + 'order': 10, + 'type': 'directory', + 'description': 'Download to this directory. Keep empty for default rTorrent download directory.', + }, + { + 'name': 'remove_complete', + 'label': 'Remove torrent', + 'order': 11, + 'default': False, + 'type': 'bool', + 'advanced': True, + 'description': 'Remove the torrent after it finishes seeding.', + }, + { + 'name': 'delete_files', + 'label': 'Remove files', + 'order': 12, + 'default': True, + 'type': 'bool', + 'advanced': True, + 'description': 'Also remove the leftover files.', + }, + { + 'name': 'paused', + 'order': 13, + 'type': 'bool', + 'advanced': True, + 'default': False, + 'description': 'Add the torrent paused.', + }, + { + 'name': 'manual', + 'order': 14, + 'default': 0, + 'type': 'bool', + 'advanced': True, + 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.', + }, + ], + } + ], +}] diff --git a/couchpotato/core/downloaders/sabnzbd.py b/couchpotato/core/downloaders/sabnzbd.py new file mode 100644 index 0000000000..47c94ada9c --- /dev/null +++ b/couchpotato/core/downloaders/sabnzbd.py @@ -0,0 +1,309 @@ +from datetime import timedelta +from urllib2 import URLError +import json +import os +import traceback + +from couchpotato.core._base.downloader.main import DownloaderBase, ReleaseDownloadList +from couchpotato.core.helpers.encoding import tryUrlencode, ss, sp +from couchpotato.core.helpers.variable import cleanHost, mergeDicts +from couchpotato.core.logger import CPLog +from couchpotato.environment import Env + + +log = CPLog(__name__) + +autoload = 'Sabnzbd' + + +class Sabnzbd(DownloaderBase): + + protocol = ['nzb'] + + def download(self, data = None, media = None, filedata = None): + """ + Send a torrent/nzb file to the downloader + + :param data: dict returned from provider + Contains the release information + :param media: media dict with information + Used for creating the filename when possible + :param filedata: downloaded torrent/nzb filedata + The file gets downloaded in the searcher and send to this function + This is done to have failed checking before using the downloader, so the downloader + doesn't need to worry about that + :return: boolean + One faile returns false, but the downloaded should log his own errors + """ + + if not media: media = {} + if not data: data = {} + + log.info('Sending "%s" to SABnzbd.', data.get('name')) + + req_params = { + 'cat': self.conf('category'), + 'mode': 'addurl', + 'nzbname': self.createNzbName(data, media), + 'priority': self.conf('priority'), + } + + nzb_filename = None + if filedata: + if len(filedata) < 50: + log.error('No proper nzb available: %s', filedata) + return False + + # If it's a .rar, it adds the .rar extension, otherwise it stays .nzb + nzb_filename = self.createFileName(data, filedata, media) + req_params['mode'] = 'addfile' + else: + req_params['name'] = data.get('url') + + try: + if nzb_filename and req_params.get('mode') is 'addfile': + sab_data = self.call(req_params, files = {'nzbfile': (ss(nzb_filename), filedata)}) + else: + sab_data = self.call(req_params) + except URLError: + log.error('Failed sending release, probably wrong HOST: %s', traceback.format_exc(0)) + return False + except: + log.error('Failed sending release, use API key, NOT the NZB key: %s', traceback.format_exc(0)) + return False + + log.debug('Result from SAB: %s', sab_data) + nzo_ids = sab_data.get('nzo_ids', []) + if sab_data.get('status') and not sab_data.get('error') and isinstance(nzo_ids, list) and len(nzo_ids) > 0: + log.info('NZB sent to SAB successfully.') + if filedata: + return self.downloadReturnId(nzo_ids[0]) + else: + return True + else: + log.error('Error getting data from SABNZBd: %s', sab_data) + return False + + def test(self): + """ Check if connection works + Return message if an old version of SAB is used + :return: bool + """ + + try: + sab_data = self.call({ + 'mode': 'version', + }) + v = sab_data.split('.') + if sab_data != 'develop' and int(v[0]) == 0 and int(v[1]) < 7: + return False, 'Your Sabnzbd client is too old, please update to newest version.' + + # the version check will work even with wrong api key, so we need the next check as well + sab_data = self.call({ + 'mode': 'queue', + }) + if not sab_data: + return False + except: + return False + + return True + + def getAllDownloadStatus(self, ids): + """ Get status of all active downloads + + :param ids: list of (mixed) downloader ids + Used to match the releases for this downloader as there could be + other downloaders active that it should ignore + :return: list of releases + """ + + log.debug('Checking SABnzbd download status.') + + # Go through Queue + try: + queue = self.call({ + 'mode': 'queue', + }) + except: + log.error('Failed getting queue: %s', traceback.format_exc(1)) + return [] + + # Go through history items + try: + history = self.call({ + 'mode': 'history', + 'limit': 15, + }) + except: + log.error('Failed getting history json: %s', traceback.format_exc(1)) + return [] + + release_downloads = ReleaseDownloadList(self) + + # Get busy releases + for nzb in queue.get('slots', []): + if nzb['nzo_id'] in ids: + status = 'busy' + if 'ENCRYPTED / ' in nzb['filename']: + status = 'failed' + + release_downloads.append({ + 'id': nzb['nzo_id'], + 'name': nzb['filename'], + 'status': status, + 'original_status': nzb['status'], + 'timeleft': nzb['timeleft'] if not queue['paused'] else -1, + }) + + # Get old releases + for nzb in history.get('slots', []): + if nzb['nzo_id'] in ids: + status = 'busy' + if nzb['status'] == 'Failed' or (nzb['status'] == 'Completed' and nzb['fail_message'].strip()): + status = 'failed' + elif nzb['status'] == 'Completed': + status = 'completed' + + release_downloads.append({ + 'id': nzb['nzo_id'], + 'name': nzb['name'], + 'status': status, + 'original_status': nzb['status'], + 'timeleft': str(timedelta(seconds = 0)), + 'folder': sp(os.path.dirname(nzb['storage']) if os.path.isfile(nzb['storage']) else nzb['storage']), + }) + + return release_downloads + + def removeFailed(self, release_download): + + log.info('%s failed downloading, deleting...', release_download['name']) + + try: + self.call({ + 'mode': 'queue', + 'name': 'delete', + 'del_files': '1', + 'value': release_download['id'] + }, use_json = False) + self.call({ + 'mode': 'history', + 'name': 'delete', + 'del_files': '1', + 'value': release_download['id'] + }, use_json = False) + except: + log.error('Failed deleting: %s', traceback.format_exc(0)) + return False + + return True + + def processComplete(self, release_download, delete_files = False): + log.debug('Requesting SabNZBd to remove the NZB %s.', release_download['name']) + + try: + self.call({ + 'mode': 'history', + 'name': 'delete', + 'del_files': '0', + 'value': release_download['id'] + }, use_json = False) + except: + log.error('Failed removing: %s', traceback.format_exc(0)) + return False + + return True + + def call(self, request_params, use_json = True, **kwargs): + + url = cleanHost(self.conf('host'), ssl = self.conf('ssl')) + 'api?' + tryUrlencode(mergeDicts(request_params, { + 'apikey': self.conf('api_key'), + 'output': 'json' + })) + + data = self.urlopen(url, timeout = 60, show_error = False, headers = {'User-Agent': Env.getIdentifier()}, **kwargs) + if use_json: + d = json.loads(data) + if d.get('error'): + log.error('Error getting data from SABNZBd: %s', d.get('error')) + return {} + + return d.get(request_params['mode']) or d + else: + return data + + +config = [{ + 'name': 'sabnzbd', + 'groups': [ + { + 'tab': 'downloaders', + 'list': 'download_providers', + 'name': 'sabnzbd', + 'label': 'Sabnzbd', + 'description': 'Use SABnzbd (0.7+) to download NZBs.', + 'wizard': True, + 'options': [ + { + 'name': 'enabled', + 'default': 0, + 'type': 'enabler', + 'radio_group': 'nzb', + }, + { + 'name': 'host', + 'default': 'localhost:8080', + }, + { + 'name': 'ssl', + 'default': 0, + 'type': 'bool', + 'advanced': True, + 'description': 'Use HyperText Transfer Protocol Secure, or https', + }, + { + 'name': 'api_key', + 'label': 'Api Key', + 'description': 'Used for all calls to Sabnzbd.', + }, + { + 'name': 'category', + 'label': 'Category', + 'description': 'The category CP places the nzb in. Like movies or couchpotato', + }, + { + 'name': 'priority', + 'label': 'Priority', + 'type': 'dropdown', + 'default': '0', + 'advanced': True, + 'values': [('Paused', -2), ('Low', -1), ('Normal', 0), ('High', 1), ('Forced', 2)], + 'description': 'Add to the queue with this priority.', + }, + { + 'name': 'manual', + 'default': False, + 'type': 'bool', + 'advanced': True, + 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.', + }, + { + 'name': 'remove_complete', + 'advanced': True, + 'label': 'Remove NZB', + 'default': False, + 'type': 'bool', + 'description': 'Remove the NZB from history after it completed.', + }, + { + 'name': 'delete_failed', + 'default': True, + 'advanced': True, + 'type': 'bool', + 'description': 'Delete a release after the download has failed.', + }, + ], + } + ], +}] diff --git a/couchpotato/core/downloaders/sabnzbd/__init__.py b/couchpotato/core/downloaders/sabnzbd/__init__.py deleted file mode 100644 index 6c976f1e49..0000000000 --- a/couchpotato/core/downloaders/sabnzbd/__init__.py +++ /dev/null @@ -1,53 +0,0 @@ -from .main import Sabnzbd - -def start(): - return Sabnzbd() - -config = [{ - 'name': 'sabnzbd', - 'groups': [ - { - 'tab': 'downloaders', - 'list': 'download_providers', - 'name': 'sabnzbd', - 'label': 'Sabnzbd', - 'description': 'Use SABnzbd to download NZBs.', - 'wizard': True, - 'options': [ - { - 'name': 'enabled', - 'default': 0, - 'type': 'enabler', - 'radio_group': 'nzb', - }, - { - 'name': 'host', - 'default': 'localhost:8080', - }, - { - 'name': 'api_key', - 'label': 'Api Key', - 'description': 'Used for all calls to Sabnzbd.', - }, - { - 'name': 'category', - 'label': 'Category', - 'description': 'The category CP places the nzb in. Like movies or couchpotato', - }, - { - 'name': 'manual', - 'default': False, - 'type': 'bool', - 'advanced': True, - 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.', - }, - { - 'name': 'delete_failed', - 'default': True, - 'type': 'bool', - 'description': 'Delete a release after the download has failed.', - }, - ], - } - ], -}] diff --git a/couchpotato/core/downloaders/sabnzbd/main.py b/couchpotato/core/downloaders/sabnzbd/main.py deleted file mode 100644 index a287f119ff..0000000000 --- a/couchpotato/core/downloaders/sabnzbd/main.py +++ /dev/null @@ -1,153 +0,0 @@ -from couchpotato.core.downloaders.base import Downloader -from couchpotato.core.helpers.encoding import tryUrlencode, ss -from couchpotato.core.helpers.variable import cleanHost, mergeDicts -from couchpotato.core.logger import CPLog -from couchpotato.environment import Env -from urllib2 import URLError -import json -import traceback - -log = CPLog(__name__) - -class Sabnzbd(Downloader): - - type = ['nzb'] - - def download(self, data = {}, movie = {}, filedata = None): - - log.info('Sending "%s" to SABnzbd.', data.get('name')) - - params = { - 'apikey': self.conf('api_key'), - 'cat': self.conf('category'), - 'mode': 'addurl', - 'nzbname': self.createNzbName(data, movie), - } - - if filedata: - if len(filedata) < 50: - log.error('No proper nzb available: %s', (filedata)) - return False - - # If it's a .rar, it adds the .rar extension, otherwise it stays .nzb - nzb_filename = self.createFileName(data, filedata, movie) - params['mode'] = 'addfile' - else: - params['name'] = data.get('url') - - url = cleanHost(self.conf('host')) + 'api?' + tryUrlencode(params) - - try: - if params.get('mode') is 'addfile': - sab = self.urlopen(url, timeout = 60, params = {'nzbfile': (ss(nzb_filename), filedata)}, multipart = True, show_error = False, headers = {'User-Agent': Env.getIdentifier()}) - else: - sab = self.urlopen(url, timeout = 60, show_error = False, headers = {'User-Agent': Env.getIdentifier()}) - except URLError: - log.error('Failed sending release, probably wrong HOST: %s', traceback.format_exc(0)) - return False - except: - log.error('Failed sending release, use API key, NOT the NZB key: %s', traceback.format_exc(0)) - return False - - result = sab.strip() - if not result: - log.error('SABnzbd didn\'t return anything.') - return False - - log.debug('Result text from SAB: %s', result[:40]) - if result[:2] == 'ok': - log.info('NZB sent to SAB successfully.') - return True - else: - log.error(result[:40]) - return False - - def getAllDownloadStatus(self): - - log.debug('Checking SABnzbd download status.') - - # Go through Queue - try: - queue = self.call({ - 'mode': 'queue', - }) - except: - log.error('Failed getting queue: %s', traceback.format_exc(1)) - return False - - # Go through history items - try: - history = self.call({ - 'mode': 'history', - 'limit': 15, - }) - except: - log.error('Failed getting history json: %s', traceback.format_exc(1)) - return False - - statuses = [] - - # Get busy releases - for item in queue.get('slots', []): - statuses.append({ - 'id': item['nzo_id'], - 'name': item['filename'], - 'status': 'busy', - 'original_status': item['status'], - 'timeleft': item['timeleft'] if not queue['paused'] else -1, - }) - - # Get old releases - for item in history.get('slots', []): - - status = 'busy' - if item['status'] == 'Failed' or (item['status'] == 'Completed' and item['fail_message'].strip()): - status = 'failed' - elif item['status'] == 'Completed': - status = 'completed' - - statuses.append({ - 'id': item['nzo_id'], - 'name': item['name'], - 'status': status, - 'original_status': item['status'], - 'timeleft': 0, - }) - - return statuses - - def removeFailed(self, item): - - log.info('%s failed downloading, deleting...', item['name']) - - try: - self.call({ - 'mode': 'history', - 'name': 'delete', - 'del_files': '1', - 'value': item['id'] - }, use_json = False) - except: - log.error('Failed deleting: %s', traceback.format_exc(0)) - return False - - return True - - def call(self, params, use_json = True): - - url = cleanHost(self.conf('host')) + 'api?' + tryUrlencode(mergeDicts(params, { - 'apikey': self.conf('api_key'), - 'output': 'json' - })) - - data = self.urlopen(url, timeout = 60, show_error = False, headers = {'User-Agent': Env.getIdentifier()}) - if use_json: - d = json.loads(data) - if d.get('error'): - log.error('Error getting data from SABNZBd: %s', d.get('error')) - return {} - - return d[params['mode']] - else: - return data - diff --git a/couchpotato/core/downloaders/synology.py b/couchpotato/core/downloaders/synology.py new file mode 100644 index 0000000000..4a9b9d7353 --- /dev/null +++ b/couchpotato/core/downloaders/synology.py @@ -0,0 +1,260 @@ +import json +import traceback + +from couchpotato.core._base.downloader.main import DownloaderBase +from couchpotato.core.helpers.encoding import isInt +from couchpotato.core.helpers.variable import cleanHost +from couchpotato.core.logger import CPLog +import requests + + +log = CPLog(__name__) + +autoload = 'Synology' + + +class Synology(DownloaderBase): + + protocol = ['nzb', 'torrent', 'torrent_magnet'] + status_support = False + + def download(self, data = None, media = None, filedata = None): + """ + Send a torrent/nzb file to the downloader + + :param data: dict returned from provider + Contains the release information + :param media: media dict with information + Used for creating the filename when possible + :param filedata: downloaded torrent/nzb filedata + The file gets downloaded in the searcher and send to this function + This is done to have fail checking before using the downloader, so the downloader + doesn't need to worry about that + :return: boolean + One fail returns false, but the downloader should log his own errors + """ + + if not media: media = {} + if not data: data = {} + + response = False + log.info('Sending "%s" (%s) to Synology.', (data['name'], data['protocol'])) + + # Load host from config and split out port. + host = cleanHost(self.conf('host'), protocol = False).split(':') + if not isInt(host[1]): + log.error('Config properties are not filled in correctly, port is missing.') + return False + + try: + # Send request to Synology + srpc = SynologyRPC(host[0], host[1], self.conf('username'), self.conf('password'), self.conf('destination')) + if data['protocol'] == 'torrent_magnet': + log.info('Adding torrent URL %s', data['url']) + response = srpc.create_task(url = data['url']) + elif data['protocol'] in ['nzb', 'torrent']: + log.info('Adding %s' % data['protocol']) + if not filedata: + log.error('No %s data found', data['protocol']) + else: + filename = data['name'] + '.' + data['protocol'] + response = srpc.create_task(filename = filename, filedata = filedata) + except: + log.error('Exception while adding torrent: %s', traceback.format_exc()) + finally: + return self.downloadReturnId('') if response else False + + def test(self): + """ Check if connection works + :return: bool + """ + + host = cleanHost(self.conf('host'), protocol = False).split(':') + try: + srpc = SynologyRPC(host[0], host[1], self.conf('username'), self.conf('password')) + test_result = srpc.test() + except: + return False + + return test_result + + def getEnabledProtocol(self): + if self.conf('use_for') == 'both': + return super(Synology, self).getEnabledProtocol() + elif self.conf('use_for') == 'torrent': + return ['torrent', 'torrent_magnet'] + else: + return ['nzb'] + + def isEnabled(self, manual = False, data = None): + if not data: data = {} + + for_protocol = ['both'] + if data and 'torrent' in data.get('protocol'): + for_protocol.append('torrent') + elif data: + for_protocol.append(data.get('protocol')) + + return super(Synology, self).isEnabled(manual, data) and\ + ((self.conf('use_for') in for_protocol)) + + +class SynologyRPC(object): + + """SynologyRPC lite library""" + + def __init__(self, host = 'localhost', port = 5000, username = None, password = None, destination = None): + + super(SynologyRPC, self).__init__() + + self.download_url = 'http://%s:%s/webapi/DownloadStation/task.cgi' % (host, port) + self.auth_url = 'http://%s:%s/webapi/auth.cgi' % (host, port) + self.sid = None + self.username = username + self.password = password + self.destination = destination + self.session_name = 'DownloadStation' + + def _login(self): + if self.username and self.password: + args = {'api': 'SYNO.API.Auth', 'account': self.username, 'passwd': self.password, 'version': 2, + 'method': 'login', 'session': self.session_name, 'format': 'sid'} + response = self._req(self.auth_url, args) + if response['success']: + self.sid = response['data']['sid'] + log.debug('sid=%s', self.sid) + else: + log.error('Couldn\'t log into Synology, %s', response) + return response['success'] + else: + log.error('User or password missing, not using authentication.') + return False + + def _logout(self): + args = {'api':'SYNO.API.Auth', 'version':1, 'method':'logout', 'session':self.session_name, '_sid':self.sid} + return self._req(self.auth_url, args) + + def _req(self, url, args, files = None): + response = {'success': False} + try: + req = requests.post(url, data = args, files = files, verify = False) + req.raise_for_status() + response = json.loads(req.text) + if response['success']: + log.info('Synology action successfull') + return response + except requests.ConnectionError as err: + log.error('Synology connection error, check your config %s', err) + except requests.HTTPError as err: + log.error('SynologyRPC HTTPError: %s', err) + except Exception as err: + log.error('Exception: %s', err) + finally: + return response + + def create_task(self, url = None, filename = None, filedata = None): + """ Creates new download task in Synology DownloadStation. Either specify + url or pair (filename, filedata). + + Returns True if task was created, False otherwise + """ + result = False + # login + if self._login(): + args = {'api': 'SYNO.DownloadStation.Task', + 'version': '1', + 'method': 'create', + '_sid': self.sid} + + if self.destination and len(self.destination) > 0: + args['destination'] = self.destination + + if url: + log.info('Login success, adding torrent URI') + args['uri'] = url + response = self._req(self.download_url, args = args) + if response['success']: + log.info('Response: %s', response) + else: + log.error('Response: %s', response) + synoerrortype = { + 400 : 'File upload failed', + 401 : 'Max number of tasks reached', + 402 : 'Destination denied', + 403 : 'Destination does not exist', + 404 : 'Invalid task id', + 405 : 'Invalid task action', + 406 : 'No default destination', + 407 : 'Set destination failed', + 408 : 'File does not exist' + } + log.error('DownloadStation returned the following error : %s', synoerrortype[response['error']['code']]) + result = response['success'] + elif filename and filedata: + log.info('Login success, adding torrent') + files = {'file': (filename, filedata)} + response = self._req(self.download_url, args = args, files = files) + log.info('Response: %s', response) + result = response['success'] + else: + log.error('Invalid use of SynologyRPC.create_task: either url or filename+filedata must be specified') + self._logout() + + return result + + def test(self): + return bool(self._login()) + + +config = [{ + 'name': 'synology', + 'groups': [ + { + 'tab': 'downloaders', + 'list': 'download_providers', + 'name': 'synology', + 'label': 'Synology', + 'description': 'Use Synology Download Station to download.', + 'wizard': True, + 'options': [ + { + 'name': 'enabled', + 'default': 0, + 'type': 'enabler', + 'radio_group': 'nzb,torrent', + }, + { + 'name': 'host', + 'default': 'localhost:5000', + 'description': 'Hostname with port. Usually localhost:5000', + }, + { + 'name': 'username', + }, + { + 'name': 'password', + 'type': 'password', + }, + { + 'name': 'destination', + 'description': 'Specify existing destination share to where your files will be downloaded, usually Downloads', + 'advanced': True, + }, + { + 'name': 'use_for', + 'label': 'Use for', + 'default': 'both', + 'type': 'dropdown', + 'values': [('usenet & torrents', 'both'), ('usenet', 'nzb'), ('torrent', 'torrent')], + }, + { + 'name': 'manual', + 'default': 0, + 'type': 'bool', + 'advanced': True, + 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.', + }, + ], + } + ], +}] diff --git a/couchpotato/core/downloaders/synology/__init__.py b/couchpotato/core/downloaders/synology/__init__.py deleted file mode 100644 index 00a135d407..0000000000 --- a/couchpotato/core/downloaders/synology/__init__.py +++ /dev/null @@ -1,45 +0,0 @@ -from .main import Synology - -def start(): - return Synology() - -config = [{ - 'name': 'synology', - 'groups': [ - { - 'tab': 'downloaders', - 'list': 'download_providers', - 'name': 'synology', - 'label': 'Synology', - 'description': 'Use Synology Download Station to download.', - 'wizard': True, - 'options': [ - { - 'name': 'enabled', - 'default': 0, - 'type': 'enabler', - 'radio_group': 'torrent', - }, - { - 'name': 'host', - 'default': 'localhost:5000', - 'description': 'Hostname with port. Usually localhost:5000', - }, - { - 'name': 'username', - }, - { - 'name': 'password', - 'type': 'password', - }, - { - 'name': 'manual', - 'default': 0, - 'type': 'bool', - 'advanced': True, - 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.', - }, - ], - } - ], -}] diff --git a/couchpotato/core/downloaders/synology/main.py b/couchpotato/core/downloaders/synology/main.py deleted file mode 100644 index 6e4059807f..0000000000 --- a/couchpotato/core/downloaders/synology/main.py +++ /dev/null @@ -1,105 +0,0 @@ -from couchpotato.core.downloaders.base import Downloader -from couchpotato.core.helpers.encoding import isInt -from couchpotato.core.logger import CPLog -import httplib -import json -import urllib -import urllib2 - - -log = CPLog(__name__) - -class Synology(Downloader): - - type = ['torrent_magnet'] - log = CPLog(__name__) - - def download(self, data, movie, filedata = None): - - log.error('Sending "%s" (%s) to Synology.', (data.get('name'), data.get('type'))) - - # Load host from config and split out port. - host = self.conf('host').split(':') - if not isInt(host[1]): - log.error('Config properties are not filled in correctly, port is missing.') - return False - - if data.get('type') == 'torrent': - log.error('Can\'t add binary torrent file') - return False - - try: - # Send request to Transmission - srpc = SynologyRPC(host[0], host[1], self.conf('username'), self.conf('password')) - remote_torrent = srpc.add_torrent_uri(data.get('url')) - log.info('Response: %s', remote_torrent) - return remote_torrent['success'] - except Exception, err: - log.error('Exception while adding torrent: %s', err) - return False - - -class SynologyRPC(object): - - '''SynologyRPC lite library''' - - def __init__(self, host = 'localhost', port = 5000, username = None, password = None): - - super(SynologyRPC, self).__init__() - - self.download_url = 'http://%s:%s/webapi/DownloadStation/task.cgi' % (host, port) - self.auth_url = 'http://%s:%s/webapi/auth.cgi' % (host, port) - self.username = username - self.password = password - self.session_name = 'DownloadStation' - - def _login(self): - if self.username and self.password: - args = {'api': 'SYNO.API.Auth', 'account': self.username, 'passwd': self.password, 'version': 2, - 'method': 'login', 'session': self.session_name, 'format': 'sid'} - response = self._req(self.auth_url, args) - if response['success'] == True: - self.sid = response['data']['sid'] - log.debug('Sid=%s', self.sid) - return response - elif self.username or self.password: - log.error('User or password missing, not using authentication.') - return False - - def _logout(self): - args = {'api':'SYNO.API.Auth', 'version':1, 'method':'logout', 'session':self.session_name, '_sid':self.sid} - return self._req(self.auth_url, args) - - def _req(self, url, args): - req_url = url + '?' + urllib.urlencode(args) - try: - req_open = urllib2.urlopen(req_url) - response = json.loads(req_open.read()) - if response['success'] == True: - log.info('Synology action successfull') - return response - except httplib.InvalidURL, err: - log.error('Invalid Transmission host, check your config %s', err) - return False - except urllib2.HTTPError, err: - log.error('SynologyRPC HTTPError: %s', err) - return False - except urllib2.URLError, err: - log.error('Unable to connect to Synology %s', err) - return False - - def add_torrent_uri(self, torrent): - log.info('Adding torrent URL %s', torrent) - response = {} - # login - login = self._login() - if len(login) > 0 and login['success'] == True: - log.info('Login success, adding torrent') - args = {'api':'SYNO.DownloadStation.Task', 'version':1, 'method':'create', 'uri':torrent, '_sid':self.sid} - response = self._req(self.download_url, args) - self._logout() - else: - log.error('Couldn\'t login to Synology, %s', login) - return response - - diff --git a/couchpotato/core/downloaders/transmission.py b/couchpotato/core/downloaders/transmission.py new file mode 100644 index 0000000000..2059044aa4 --- /dev/null +++ b/couchpotato/core/downloaders/transmission.py @@ -0,0 +1,386 @@ +from base64 import b64encode +from datetime import timedelta +import httplib +import json +import os.path +import re +import urllib2 + +from couchpotato.core._base.downloader.main import DownloaderBase, ReleaseDownloadList +from couchpotato.core.helpers.encoding import isInt, sp +from couchpotato.core.helpers.variable import tryInt, tryFloat, cleanHost +from couchpotato.core.logger import CPLog + + +log = CPLog(__name__) + +autoload = 'Transmission' + + +class Transmission(DownloaderBase): + + protocol = ['torrent', 'torrent_magnet'] + log = CPLog(__name__) + trpc = None + + def connect(self): + # Load host from config and split out port. + host = cleanHost(self.conf('host')).rstrip('/').rsplit(':', 1) + if not isInt(host[1]): + log.error('Config properties are not filled in correctly, port is missing.') + return False + + self.trpc = TransmissionRPC(host[0], port = host[1], rpc_url = self.conf('rpc_url').strip('/ '), username = self.conf('username'), password = self.conf('password')) + return self.trpc + + def download(self, data = None, media = None, filedata = None): + """ + Send a torrent/nzb file to the downloader + + :param data: dict returned from provider + Contains the release information + :param media: media dict with information + Used for creating the filename when possible + :param filedata: downloaded torrent/nzb filedata + The file gets downloaded in the searcher and send to this function + This is done to have failed checking before using the downloader, so the downloader + doesn't need to worry about that + :return: boolean + One faile returns false, but the downloaded should log his own errors + """ + + if not media: media = {} + if not data: data = {} + + log.info('Sending "%s" (%s) to Transmission.', (data.get('name'), data.get('protocol'))) + + if not self.connect(): + return False + + if not filedata and data.get('protocol') == 'torrent': + log.error('Failed sending torrent, no data') + return False + + # Set parameters for adding torrent + params = { + 'paused': self.conf('paused', default = False) + } + + if self.conf('directory'): + host = cleanHost(self.conf('host')).rstrip('/').rsplit(':', 1) + if os.path.isdir(self.conf('directory')) or not (host[0] == '127.0.0.1' or host[0] == 'localhost'): + params['download-dir'] = self.conf('directory').rstrip(os.path.sep) + else: + log.error('Download directory from Transmission settings: %s doesn\'t exist', self.conf('directory')) + + # Change parameters of torrent + torrent_params = {} + if data.get('seed_ratio'): + torrent_params['seedRatioLimit'] = tryFloat(data.get('seed_ratio')) + torrent_params['seedRatioMode'] = 1 + + if data.get('seed_time'): + torrent_params['seedIdleLimit'] = tryInt(data.get('seed_time')) * 60 + torrent_params['seedIdleMode'] = 1 + + # Send request to Transmission + if data.get('protocol') == 'torrent_magnet': + remote_torrent = self.trpc.add_torrent_uri(data.get('url'), arguments = params) + torrent_params['trackerAdd'] = self.torrent_trackers + else: + remote_torrent = self.trpc.add_torrent_file(b64encode(filedata), arguments = params) + + if not remote_torrent: + log.error('Failed sending torrent to Transmission') + return False + + data = remote_torrent.get('torrent-added') or remote_torrent.get('torrent-duplicate') + + # Change settings of added torrents + if torrent_params: + self.trpc.set_torrent(data['hashString'], torrent_params) + + log.info('Torrent sent to Transmission successfully.') + return self.downloadReturnId(data['hashString']) + + def test(self): + """ Check if connection works + :return: bool + """ + + if self.connect() and self.trpc.get_session(): + return True + return False + + def getAllDownloadStatus(self, ids): + """ Get status of all active downloads + + :param ids: list of (mixed) downloader ids + Used to match the releases for this downloader as there could be + other downloaders active that it should ignore + :return: list of releases + """ + + log.debug('Checking Transmission download status.') + + if not self.connect(): + return [] + + release_downloads = ReleaseDownloadList(self) + + return_params = { + 'fields': ['id', 'name', 'hashString', 'percentDone', 'status', 'eta', 'isStalled', 'isFinished', 'downloadDir', 'uploadRatio', 'secondsSeeding', 'seedIdleLimit', 'files'] + } + + session = self.trpc.get_session() + queue = self.trpc.get_alltorrents(return_params) + if not (queue and queue.get('torrents')): + log.debug('Nothing in queue or error') + return [] + + for torrent in queue['torrents']: + if torrent['hashString'] in ids: + log.debug('name=%s / id=%s / downloadDir=%s / hashString=%s / percentDone=%s / status=%s / isStalled=%s / eta=%s / uploadRatio=%s / isFinished=%s / incomplete-dir-enabled=%s / incomplete-dir=%s', + (torrent['name'], torrent['id'], torrent['downloadDir'], torrent['hashString'], torrent['percentDone'], torrent['status'], torrent.get('isStalled', 'N/A'), torrent['eta'], torrent['uploadRatio'], torrent['isFinished'], session['incomplete-dir-enabled'], session['incomplete-dir'])) + + """ + https://trac.transmissionbt.com/browser/branches/2.8x/libtransmission/transmission.h#L1853 + 0 = Torrent is stopped + 1 = Queued to check files + 2 = Checking files + 3 = Queued to download + 4 = Downloading + 5 = Queued to seed + 6 = Seeding + """ + + status = 'busy' + if torrent.get('isStalled') and not torrent['percentDone'] == 1 and self.conf('stalled_as_failed'): + status = 'failed' + elif torrent['status'] == 0 and torrent['percentDone'] == 1 and torrent['isFinished']: + status = 'completed' + elif torrent['status'] in [5, 6]: + status = 'seeding' + + if session['incomplete-dir-enabled'] and status == 'busy': + torrent_folder = session['incomplete-dir'] + else: + torrent_folder = torrent['downloadDir'] + + torrent_files = [] + for file_item in torrent['files']: + torrent_files.append(sp(os.path.join(torrent_folder, file_item['name']))) + + release_downloads.append({ + 'id': torrent['hashString'], + 'name': torrent['name'], + 'status': status, + 'original_status': torrent['status'], + 'seed_ratio': torrent['uploadRatio'], + 'timeleft': str(timedelta(seconds = torrent['eta'])), + 'folder': sp(torrent_folder if len(torrent_files) == 1 else os.path.join(torrent_folder, torrent['name'])), + 'files': torrent_files + }) + + return release_downloads + + def pause(self, release_download, pause = True): + if pause: + return self.trpc.stop_torrent(release_download['id']) + else: + return self.trpc.start_torrent(release_download['id']) + + def removeFailed(self, release_download): + log.info('%s failed downloading, deleting...', release_download['name']) + return self.trpc.remove_torrent(release_download['id'], True) + + def processComplete(self, release_download, delete_files = False): + log.debug('Requesting Transmission to remove the torrent %s%s.', (release_download['name'], ' and cleanup the downloaded files' if delete_files else '')) + return self.trpc.remove_torrent(release_download['id'], delete_files) + + +class TransmissionRPC(object): + + """TransmissionRPC lite library""" + def __init__(self, host = 'http://localhost', port = 9091, rpc_url = 'transmission', username = None, password = None): + + super(TransmissionRPC, self).__init__() + + self.url = host + ':' + str(port) + '/' + rpc_url + '/rpc' + self.tag = 0 + self.session_id = 0 + self.session = {} + if username and password: + password_manager = urllib2.HTTPPasswordMgrWithDefaultRealm() + password_manager.add_password(realm = None, uri = self.url, user = username, passwd = password) + opener = urllib2.build_opener(urllib2.HTTPBasicAuthHandler(password_manager)) + opener.addheaders = [('User-agent', 'couchpotato-transmission-client/1.0')] + urllib2.install_opener(opener) + elif username or password: + log.debug('User or password missing, not using authentication.') + self.session = self.get_session() + + def _request(self, ojson): + self.tag += 1 + headers = {'x-transmission-session-id': str(self.session_id)} + request = urllib2.Request(self.url, json.dumps(ojson).encode('utf-8'), headers) + try: + open_request = urllib2.urlopen(request) + response = json.loads(open_request.read()) + log.debug('request: %s', json.dumps(ojson)) + log.debug('response: %s', json.dumps(response)) + if response['result'] == 'success': + log.debug('Transmission action successful') + return response['arguments'] + else: + log.debug('Unknown failure sending command to Transmission. Return text is: %s', response['result']) + return False + except httplib.InvalidURL as err: + log.error('Invalid Transmission host, check your config %s', err) + return False + except urllib2.HTTPError as err: + if err.code == 401: + log.error('Invalid Transmission Username or Password, check your config') + return False + elif err.code == 409: + msg = str(err.read()) + try: + self.session_id = \ + re.search('X-Transmission-Session-Id:\s*(\w+)', msg).group(1) + log.debug('X-Transmission-Session-Id: %s', self.session_id) + + # #resend request with the updated header + + return self._request(ojson) + except: + log.error('Unable to get Transmission Session-Id %s', err) + else: + log.error('TransmissionRPC HTTPError: %s', err) + except urllib2.URLError as err: + log.error('Unable to connect to Transmission %s', err) + + def get_session(self): + post_data = {'method': 'session-get', 'tag': self.tag} + return self._request(post_data) + + def add_torrent_uri(self, torrent, arguments): + arguments['filename'] = torrent + post_data = {'arguments': arguments, 'method': 'torrent-add', 'tag': self.tag} + return self._request(post_data) + + def add_torrent_file(self, torrent, arguments): + arguments['metainfo'] = torrent + post_data = {'arguments': arguments, 'method': 'torrent-add', 'tag': self.tag} + return self._request(post_data) + + def set_torrent(self, torrent_id, arguments): + arguments['ids'] = torrent_id + post_data = {'arguments': arguments, 'method': 'torrent-set', 'tag': self.tag} + return self._request(post_data) + + def get_alltorrents(self, arguments): + post_data = {'arguments': arguments, 'method': 'torrent-get', 'tag': self.tag} + return self._request(post_data) + + def stop_torrent(self, torrent_id): + post_data = {'arguments': {'ids': torrent_id}, 'method': 'torrent-stop', 'tag': self.tag} + return self._request(post_data) + + def start_torrent(self, torrent_id): + post_data = {'arguments': {'ids': torrent_id}, 'method': 'torrent-start', 'tag': self.tag} + return self._request(post_data) + + def remove_torrent(self, torrent_id, delete_local_data): + post_data = {'arguments': {'ids': torrent_id, 'delete-local-data': delete_local_data}, 'method': 'torrent-remove', 'tag': self.tag} + return self._request(post_data) + + +config = [{ + 'name': 'transmission', + 'groups': [ + { + 'tab': 'downloaders', + 'list': 'download_providers', + 'name': 'transmission', + 'label': 'Transmission', + 'description': 'Use Transmission to download torrents.', + 'wizard': True, + 'options': [ + { + 'name': 'enabled', + 'default': 0, + 'type': 'enabler', + 'radio_group': 'torrent', + }, + { + 'name': 'host', + 'default': 'http://localhost:9091', + 'description': 'Hostname with port. Usually http://localhost:9091', + }, + { + 'name': 'rpc_url', + 'type': 'string', + 'default': 'transmission', + 'advanced': True, + 'description': 'Change if you don\'t run Transmission RPC at the default url.', + }, + { + 'name': 'username', + }, + { + 'name': 'password', + 'type': 'password', + }, + { + 'name': 'directory', + 'type': 'directory', + 'description': 'Download to this directory. Keep empty for default Transmission download directory.', + }, + { + 'name': 'remove_complete', + 'label': 'Remove torrent', + 'default': True, + 'advanced': True, + 'type': 'bool', + 'description': 'Remove the torrent from Transmission after it finished seeding.', + }, + { + 'name': 'delete_files', + 'label': 'Remove files', + 'default': True, + 'type': 'bool', + 'advanced': True, + 'description': 'Also remove the leftover files.', + }, + { + 'name': 'paused', + 'type': 'bool', + 'advanced': True, + 'default': False, + 'description': 'Add the torrent paused.', + }, + { + 'name': 'manual', + 'default': 0, + 'type': 'bool', + 'advanced': True, + 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.', + }, + { + 'name': 'stalled_as_failed', + 'default': True, + 'advanced': True, + 'type': 'bool', + 'description': 'Consider a stalled torrent as failed', + }, + { + 'name': 'delete_failed', + 'default': True, + 'advanced': True, + 'type': 'bool', + 'description': 'Delete a release after the download has failed.', + }, + ], + } + ], +}] diff --git a/couchpotato/core/downloaders/transmission/__init__.py b/couchpotato/core/downloaders/transmission/__init__.py deleted file mode 100644 index 210a0d9e46..0000000000 --- a/couchpotato/core/downloaders/transmission/__init__.py +++ /dev/null @@ -1,63 +0,0 @@ -from .main import Transmission - -def start(): - return Transmission() - -config = [{ - 'name': 'transmission', - 'groups': [ - { - 'tab': 'downloaders', - 'list': 'download_providers', - 'name': 'transmission', - 'label': 'Transmission', - 'description': 'Use Transmission to download torrents.', - 'wizard': True, - 'options': [ - { - 'name': 'enabled', - 'default': 0, - 'type': 'enabler', - 'radio_group': 'torrent', - }, - { - 'name': 'host', - 'default': 'localhost:9091', - 'description': 'Hostname with port. Usually localhost:9091', - }, - { - 'name': 'username', - }, - { - 'name': 'password', - 'type': 'password', - }, - { - 'name': 'paused', - 'type': 'bool', - 'default': False, - 'description': 'Add the torrent paused.', - }, - { - 'name': 'directory', - 'type': 'directory', - 'description': 'Where should Transmission saved the downloaded files?', - }, - { - 'name': 'ratio', - 'default': 10, - 'type': 'int', - 'advanced': True, - 'description': 'Stop transfer when reaching ratio', - }, - { - 'name': 'manual', - 'default': 0, - 'type': 'bool', - 'advanced': True, - 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.', - }, - ], - } - ], -}] diff --git a/couchpotato/core/downloaders/transmission/main.py b/couchpotato/core/downloaders/transmission/main.py deleted file mode 100644 index 5c13af6e1c..0000000000 --- a/couchpotato/core/downloaders/transmission/main.py +++ /dev/null @@ -1,148 +0,0 @@ -from base64 import b64encode -from couchpotato.core.downloaders.base import Downloader -from couchpotato.core.helpers.encoding import isInt -from couchpotato.core.logger import CPLog -import httplib -import json -import os.path -import re -import urllib2 - -log = CPLog(__name__) - - -class Transmission(Downloader): - - type = ['torrent', 'torrent_magnet'] - log = CPLog(__name__) - - def download(self, data, movie, filedata = None): - - log.debug('Sending "%s" (%s) to Transmission.', (data.get('name'), data.get('type'))) - - # Load host from config and split out port. - host = self.conf('host').split(':') - if not isInt(host[1]): - log.error('Config properties are not filled in correctly, port is missing.') - return False - - # Set parameters for Transmission - folder_name = self.createFileName(data, filedata, movie)[:-len(data.get('type')) - 1] - folder_path = os.path.join(self.conf('directory', default = ''), folder_name).rstrip(os.path.sep) - - # Create the empty folder to download too - self.makeDir(folder_path) - - params = { - 'paused': self.conf('paused', default = 0), - 'download-dir': folder_path - } - - torrent_params = {} - if self.conf('ratio'): - torrent_params = { - 'seedRatioLimit': self.conf('ratio'), - 'seedRatioMode': self.conf('ratio') - } - - if not filedata and data.get('type') == 'torrent': - log.error('Failed sending torrent, no data') - return False - - # Send request to Transmission - try: - trpc = TransmissionRPC(host[0], port = host[1], username = self.conf('username'), password = self.conf('password')) - if data.get('type') == 'torrent_magnet': - remote_torrent = trpc.add_torrent_uri(data.get('url'), arguments = params) - torrent_params['trackerAdd'] = self.torrent_trackers - else: - remote_torrent = trpc.add_torrent_file(b64encode(filedata), arguments = params) - - # Change settings of added torrents - if torrent_params: - trpc.set_torrent(remote_torrent['torrent-added']['hashString'], torrent_params) - - return True - except Exception, err: - log.error('Failed to change settings for transfer: %s', err) - return False - - -class TransmissionRPC(object): - - """TransmissionRPC lite library""" - - def __init__(self, host = 'localhost', port = 9091, username = None, password = None): - - super(TransmissionRPC, self).__init__() - - self.url = 'http://' + host + ':' + str(port) + '/transmission/rpc' - self.tag = 0 - self.session_id = 0 - self.session = {} - if username and password: - password_manager = urllib2.HTTPPasswordMgrWithDefaultRealm() - password_manager.add_password(realm = None, uri = self.url, user = username, passwd = password) - opener = urllib2.build_opener(urllib2.HTTPBasicAuthHandler(password_manager), urllib2.HTTPDigestAuthHandler(password_manager)) - opener.addheaders = [('User-agent', 'couchpotato-transmission-client/1.0')] - urllib2.install_opener(opener) - elif username or password: - log.debug('User or password missing, not using authentication.') - self.session = self.get_session() - - def _request(self, ojson): - self.tag += 1 - headers = {'x-transmission-session-id': str(self.session_id)} - request = urllib2.Request(self.url, json.dumps(ojson).encode('utf-8'), headers) - try: - open_request = urllib2.urlopen(request) - response = json.loads(open_request.read()) - log.debug('response: %s', json.dumps(response)) - if response['result'] == 'success': - log.debug('Transmission action successfull') - return response['arguments'] - else: - log.debug('Unknown failure sending command to Transmission. Return text is: %s', response['result']) - return False - except httplib.InvalidURL, err: - log.error('Invalid Transmission host, check your config %s', err) - return False - except urllib2.HTTPError, err: - if err.code == 401: - log.error('Invalid Transmission Username or Password, check your config') - return False - elif err.code == 409: - msg = str(err.read()) - try: - self.session_id = \ - re.search('X-Transmission-Session-Id:\s*(\w+)', msg).group(1) - log.debug('X-Transmission-Session-Id: %s', self.session_id) - - # #resend request with the updated header - - return self._request(ojson) - except: - log.error('Unable to get Transmission Session-Id %s', err) - else: - log.error('TransmissionRPC HTTPError: %s', err) - except urllib2.URLError, err: - log.error('Unable to connect to Transmission %s', err) - - def get_session(self): - post_data = {'method': 'session-get', 'tag': self.tag} - return self._request(post_data) - - def add_torrent_uri(self, torrent, arguments): - arguments['filename'] = torrent - post_data = {'arguments': arguments, 'method': 'torrent-add', 'tag': self.tag} - return self._request(post_data) - - def add_torrent_file(self, torrent, arguments): - arguments['metainfo'] = torrent - post_data = {'arguments': arguments, 'method': 'torrent-add', 'tag': self.tag} - return self._request(post_data) - - def set_torrent(self, torrent_id, arguments): - arguments['ids'] = torrent_id - post_data = {'arguments': arguments, 'method': 'torrent-set', 'tag': self.tag} - return self._request(post_data) diff --git a/couchpotato/core/downloaders/utorrent.py b/couchpotato/core/downloaders/utorrent.py new file mode 100644 index 0000000000..264e4965f3 --- /dev/null +++ b/couchpotato/core/downloaders/utorrent.py @@ -0,0 +1,429 @@ +О╩©from base64 import b16encode, b32decode +from datetime import timedelta +from hashlib import sha1 +import cookielib +import httplib +import json +import os +import re +import stat +import time +import urllib +import urllib2 + +from bencode import bencode as benc, bdecode +from couchpotato.core._base.downloader.main import DownloaderBase, ReleaseDownloadList +from couchpotato.core.helpers.encoding import isInt, ss, sp +from couchpotato.core.helpers.variable import tryInt, tryFloat, cleanHost +from couchpotato.core.logger import CPLog +from multipartpost import MultipartPostHandler + + +log = CPLog(__name__) + +autoload = 'uTorrent' + + +class uTorrent(DownloaderBase): + + protocol = ['torrent', 'torrent_magnet'] + utorrent_api = None + status_flags = { + 'STARTED': 1, + 'CHECKING': 2, + 'CHECK-START': 4, + 'CHECKED': 8, + 'ERROR': 16, + 'PAUSED': 32, + 'QUEUED': 64, + 'LOADED': 128 + } + + def connect(self): + # Load host from config and split out port. + host = cleanHost(self.conf('host'), protocol = False).split(':') + if not isInt(host[1]): + log.error('Config properties are not filled in correctly, port is missing.') + return False + + self.utorrent_api = uTorrentAPI(host[0], port = host[1], username = self.conf('username'), password = self.conf('password')) + + return self.utorrent_api + + def download(self, data = None, media = None, filedata = None): + """ + Send a torrent/nzb file to the downloader + + :param data: dict returned from provider + Contains the release information + :param media: media dict with information + Used for creating the filename when possible + :param filedata: downloaded torrent/nzb filedata + The file gets downloaded in the searcher and send to this function + This is done to have failed checking before using the downloader, so the downloader + doesn't need to worry about that + :return: boolean + One faile returns false, but the downloaded should log his own errors + """ + + if not media: media = {} + if not data: data = {} + + log.debug("Sending '%s' (%s) to uTorrent.", (data.get('name'), data.get('protocol'))) + + if not self.connect(): + return False + + torrent_params = {} + if self.conf('label'): + torrent_params['label'] = self.conf('label') + + if not filedata and data.get('protocol') == 'torrent': + log.error('Failed sending torrent, no data') + return False + + if data.get('protocol') == 'torrent_magnet': + torrent_hash = re.findall('urn:btih:([\w]{32,40})', data.get('url'))[0].upper() + torrent_params['trackers'] = '%0D%0A%0D%0A'.join(self.torrent_trackers) + else: + info = bdecode(filedata)['info'] + torrent_hash = sha1(benc(info)).hexdigest().upper() + + torrent_filename = self.createFileName(data, filedata, media) + + if data.get('seed_ratio'): + torrent_params['seed_override'] = 1 + torrent_params['seed_ratio'] = tryInt(tryFloat(data['seed_ratio']) * 1000) + + if data.get('seed_time'): + torrent_params['seed_override'] = 1 + torrent_params['seed_time'] = tryInt(data['seed_time']) * 3600 + + # Convert base 32 to hex + if len(torrent_hash) == 32: + torrent_hash = b16encode(b32decode(torrent_hash)) + + # Send request to uTorrent + if data.get('protocol') == 'torrent_magnet': + self.utorrent_api.add_torrent_uri(torrent_filename, data.get('url')) + else: + self.utorrent_api.add_torrent_file(torrent_filename, filedata) + + # Change settings of added torrent + self.utorrent_api.set_torrent(torrent_hash, torrent_params) + if self.conf('paused', default = 0): + self.utorrent_api.pause_torrent(torrent_hash) + + return self.downloadReturnId(torrent_hash) + + def test(self): + """ Check if connection works + :return: bool + """ + + if self.connect(): + build_version = self.utorrent_api.get_build() + if not build_version: + return False + if build_version < 25406: # This build corresponds to version 3.0.0 stable + return False, 'Your uTorrent client is too old, please update to newest version.' + return True + + return False + + def getAllDownloadStatus(self, ids): + """ Get status of all active downloads + + :param ids: list of (mixed) downloader ids + Used to match the releases for this downloader as there could be + other downloaders active that it should ignore + :return: list of releases + """ + + log.debug('Checking uTorrent download status.') + + if not self.connect(): + return [] + + release_downloads = ReleaseDownloadList(self) + + data = self.utorrent_api.get_status() + if not data: + log.error('Error getting data from uTorrent') + return [] + + queue = json.loads(data) + if queue.get('error'): + log.error('Error getting data from uTorrent: %s', queue.get('error')) + return [] + + if not queue.get('torrents'): + log.debug('Nothing in queue') + return [] + + # Get torrents + for torrent in queue['torrents']: + if torrent[0] in ids: + + #Get files of the torrent + torrent_files = [] + try: + torrent_files = json.loads(self.utorrent_api.get_files(torrent[0])) + torrent_files = [sp(os.path.join(torrent[26], torrent_file[0])) for torrent_file in torrent_files['files'][1]] + except: + log.debug('Failed getting files from torrent: %s', torrent[2]) + + status = 'busy' + if (torrent[1] & self.status_flags['STARTED'] or torrent[1] & self.status_flags['QUEUED']) and torrent[4] == 1000: + status = 'seeding' + elif torrent[1] & self.status_flags['ERROR'] and 'There is not enough space on the disk' not in torrent[21]: + status = 'failed' + elif torrent[4] == 1000: + status = 'completed' + + if not status == 'busy': + self.removeReadOnly(torrent_files) + + release_downloads.append({ + 'id': torrent[0], + 'name': torrent[2], + 'status': status, + 'seed_ratio': float(torrent[7]) / 1000, + 'original_status': torrent[1], + 'timeleft': str(timedelta(seconds = torrent[10])), + 'folder': sp(torrent[26]), + 'files': torrent_files + }) + + return release_downloads + + def pause(self, release_download, pause = True): + if not self.connect(): + return False + return self.utorrent_api.pause_torrent(release_download['id'], pause) + + def removeFailed(self, release_download): + log.info('%s failed downloading, deleting...', release_download['name']) + if not self.connect(): + return False + return self.utorrent_api.remove_torrent(release_download['id'], remove_data = True) + + def processComplete(self, release_download, delete_files = False): + log.debug('Requesting uTorrent to remove the torrent %s%s.', (release_download['name'], ' and cleanup the downloaded files' if delete_files else '')) + if not self.connect(): + return False + return self.utorrent_api.remove_torrent(release_download['id'], remove_data = delete_files) + + def removeReadOnly(self, files): + #Removes all read-on ly flags in a for all files + for filepath in files: + if os.path.isfile(filepath): + #Windows only needs S_IWRITE, but we bitwise-or with current perms to preserve other permission bits on Linux + os.chmod(filepath, stat.S_IWRITE | os.stat(filepath).st_mode) + +class uTorrentAPI(object): + + def __init__(self, host = 'localhost', port = 8000, username = None, password = None): + + super(uTorrentAPI, self).__init__() + + self.url = 'http://' + str(host) + ':' + str(port) + '/gui/' + self.token = '' + self.last_time = time.time() + cookies = cookielib.CookieJar() + self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookies), MultipartPostHandler) + self.opener.addheaders = [('User-agent', 'couchpotato-utorrent-client/1.0')] + if username and password: + password_manager = urllib2.HTTPPasswordMgrWithDefaultRealm() + password_manager.add_password(realm = None, uri = self.url, user = username, passwd = password) + self.opener.add_handler(urllib2.HTTPBasicAuthHandler(password_manager)) + elif username or password: + log.debug('User or password missing, not using authentication.') + self.token = self.get_token() + + def _request(self, action, data = None): + if time.time() > self.last_time + 1800: + self.last_time = time.time() + self.token = self.get_token() + request = urllib2.Request(self.url + '?token=' + self.token + '&' + action, data) + try: + open_request = self.opener.open(request) + response = open_request.read() + if response: + return response + else: + log.debug('Unknown failure sending command to uTorrent. Return text is: %s', response) + except httplib.InvalidURL as err: + log.error('Invalid uTorrent host, check your config %s', err) + except urllib2.HTTPError as err: + if err.code == 401: + log.error('Invalid uTorrent Username or Password, check your config') + else: + log.error('uTorrent HTTPError: %s', err) + except urllib2.URLError as err: + log.error('Unable to connect to uTorrent %s', err) + return False + + def get_token(self): + request = self.opener.open(self.url + 'token.html') + token = re.findall('(.*?)uTorrent (3.0+) to download torrents.', + 'wizard': True, + 'options': [ + { + 'name': 'enabled', + 'default': 0, + 'type': 'enabler', + 'radio_group': 'torrent', + }, + { + 'name': 'host', + 'default': 'localhost:8000', + 'description': 'Port can be found in settings when enabling WebUI.', + }, + { + 'name': 'username', + }, + { + 'name': 'password', + 'type': 'password', + }, + { + 'name': 'label', + 'description': 'Label to add torrent as.', + }, + { + 'name': 'remove_complete', + 'label': 'Remove torrent', + 'default': True, + 'advanced': True, + 'type': 'bool', + 'description': 'Remove the torrent from uTorrent after it finished seeding.', + }, + { + 'name': 'delete_files', + 'label': 'Remove files', + 'default': True, + 'type': 'bool', + 'advanced': True, + 'description': 'Also remove the leftover files.', + }, + { + 'name': 'paused', + 'type': 'bool', + 'advanced': True, + 'default': False, + 'description': 'Add the torrent paused.', + }, + { + 'name': 'manual', + 'default': 0, + 'type': 'bool', + 'advanced': True, + 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.', + }, + { + 'name': 'delete_failed', + 'default': True, + 'advanced': True, + 'type': 'bool', + 'description': 'Delete a release after the download has failed.', + }, + ], + } + ], +}] diff --git a/couchpotato/core/downloaders/utorrent/__init__.py b/couchpotato/core/downloaders/utorrent/__init__.py deleted file mode 100644 index 2c494eb208..0000000000 --- a/couchpotato/core/downloaders/utorrent/__init__.py +++ /dev/null @@ -1,55 +0,0 @@ -from .main import uTorrent - -def start(): - return uTorrent() - -config = [{ - 'name': 'utorrent', - 'groups': [ - { - 'tab': 'downloaders', - 'list': 'download_providers', - 'name': 'utorrent', - 'label': 'uTorrent', - 'description': 'Use uTorrent to download torrents.', - 'wizard': True, - 'options': [ - { - 'name': 'enabled', - 'default': 0, - 'type': 'enabler', - 'radio_group': 'torrent', - }, - { - 'name': 'host', - 'default': 'localhost:8000', - 'description': 'Hostname with port. Usually localhost:8000', - }, - { - 'name': 'username', - }, - { - 'name': 'password', - 'type': 'password', - }, - { - 'name': 'label', - 'description': 'Label to add torrent as.', - }, - { - 'name': 'paused', - 'type': 'bool', - 'default': False, - 'description': 'Add the torrent paused.', - }, - { - 'name': 'manual', - 'default': 0, - 'type': 'bool', - 'advanced': True, - 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.', - }, - ], - } - ], -}] diff --git a/couchpotato/core/downloaders/utorrent/main.py b/couchpotato/core/downloaders/utorrent/main.py deleted file mode 100644 index 5953b11790..0000000000 --- a/couchpotato/core/downloaders/utorrent/main.py +++ /dev/null @@ -1,197 +0,0 @@ -from base64 import b16encode, b32decode -from bencode import bencode, bdecode -from couchpotato.core.downloaders.base import Downloader -from couchpotato.core.helpers.encoding import isInt, ss -from couchpotato.core.logger import CPLog -from hashlib import sha1 -from multipartpost import MultipartPostHandler -import cookielib -import httplib -import json -import re -import time -import urllib -import urllib2 - - -log = CPLog(__name__) - - -class uTorrent(Downloader): - - type = ['torrent', 'torrent_magnet'] - utorrent_api = None - - def download(self, data, movie, filedata = None): - - log.debug('Sending "%s" (%s) to uTorrent.', (data.get('name'), data.get('type'))) - - # Load host from config and split out port. - host = self.conf('host').split(':') - if not isInt(host[1]): - log.error('Config properties are not filled in correctly, port is missing.') - return False - - torrent_params = {} - if self.conf('label'): - torrent_params['label'] = self.conf('label') - - if not filedata and data.get('type') == 'torrent': - log.error('Failed sending torrent, no data') - return False - - if data.get('type') == 'torrent_magnet': - torrent_hash = re.findall('urn:btih:([\w]{32,40})', data.get('url'))[0].upper() - torrent_params['trackers'] = '%0D%0A%0D%0A'.join(self.torrent_trackers) - else: - info = bdecode(filedata)["info"] - torrent_hash = sha1(bencode(info)).hexdigest().upper() - torrent_filename = self.createFileName(data, filedata, movie) - - # Convert base 32 to hex - if len(torrent_hash) == 32: - torrent_hash = b16encode(b32decode(torrent_hash)) - - # Send request to uTorrent - try: - if not self.utorrent_api: - self.utorrent_api = uTorrentAPI(host[0], port = host[1], username = self.conf('username'), password = self.conf('password')) - - if data.get('type') == 'torrent_magnet': - self.utorrent_api.add_torrent_uri(data.get('url')) - else: - self.utorrent_api.add_torrent_file(torrent_filename, filedata) - - # Change settings of added torrents - self.utorrent_api.set_torrent(torrent_hash, torrent_params) - if self.conf('paused', default = 0): - self.utorrent_api.pause_torrent(torrent_hash) - return True - except Exception, err: - log.error('Failed to send torrent to uTorrent: %s', err) - return False - - def getAllDownloadStatus(self): - - log.debug('Checking uTorrent download status.') - - # Load host from config and split out port. - host = self.conf('host').split(':') - if not isInt(host[1]): - log.error('Config properties are not filled in correctly, port is missing.') - return False - - try: - self.utorrent_api = uTorrentAPI(host[0], port = host[1], username = self.conf('username'), password = self.conf('password')) - except Exception, err: - log.error('Failed to get uTorrent object: %s', err) - return False - - data = '' - try: - data = self.utorrent_api.get_status() - queue = json.loads(data) - if queue.get('error'): - log.error('Error getting data from uTorrent: %s', queue.get('error')) - return False - - except Exception, err: - log.error('Failed to get status from uTorrent: %s', err) - return False - - if queue.get('torrents', []) == []: - log.debug('Nothing in queue') - return False - - statuses = [] - - # Get torrents - for item in queue.get('torrents', []): - - # item[21] = Paused | Downloading | Seeding | Finished - status = 'busy' - if item[21] == 'Finished' or item[21] == 'Seeding': - status = 'completed' - - statuses.append({ - 'id': item[0], - 'name': item[2], - 'status': status, - 'original_status': item[1], - 'timeleft': item[10], - }) - - return statuses - - - -class uTorrentAPI(object): - - def __init__(self, host = 'localhost', port = 8000, username = None, password = None): - - super(uTorrentAPI, self).__init__() - - self.url = 'http://' + str(host) + ':' + str(port) + '/gui/' - self.token = '' - self.last_time = time.time() - cookies = cookielib.CookieJar() - self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookies), MultipartPostHandler) - self.opener.addheaders = [('User-agent', 'couchpotato-utorrent-client/1.0')] - if username and password: - password_manager = urllib2.HTTPPasswordMgrWithDefaultRealm() - password_manager.add_password(realm = None, uri = self.url, user = username, passwd = password) - self.opener.add_handler(urllib2.HTTPBasicAuthHandler(password_manager)) - self.opener.add_handler(urllib2.HTTPDigestAuthHandler(password_manager)) - elif username or password: - log.debug('User or password missing, not using authentication.') - self.token = self.get_token() - - def _request(self, action, data = None): - if time.time() > self.last_time + 1800: - self.last_time = time.time() - self.token = self.get_token() - request = urllib2.Request(self.url + "?token=" + self.token + "&" + action, data) - try: - open_request = self.opener.open(request) - response = open_request.read() - if response: - return response - else: - log.debug('Unknown failure sending command to uTorrent. Return text is: %s', response) - except httplib.InvalidURL, err: - log.error('Invalid uTorrent host, check your config %s', err) - except urllib2.HTTPError, err: - if err.code == 401: - log.error('Invalid uTorrent Username or Password, check your config') - else: - log.error('uTorrent HTTPError: %s', err) - except urllib2.URLError, err: - log.error('Unable to connect to uTorrent %s', err) - return False - - def get_token(self): - request = self.opener.open(self.url + "token.html") - token = re.findall("(.*?) 0: + # Dict if isinstance(results[0], dict): + results.reverse() + merged = {} for result in results: - merged = mergeDicts(merged, result) + merged = mergeDicts(merged, result, prepend_list = True) results = merged # Lists @@ -132,23 +161,24 @@ def fireEvent(name, *args, **kwargs): options['on_complete']() return results - except KeyError, e: - pass except Exception: log.error('%s: %s', (name, traceback.format_exc())) + def fireEventAsync(*args, **kwargs): try: - my_thread = threading.Thread(target = fireEvent, args = args, kwargs = kwargs) - my_thread.setDaemon(True) - my_thread.start() + t = threading.Thread(target = fireEvent, args = args, kwargs = kwargs) + t.setDaemon(True) + t.start() return True - except Exception, e: + except Exception as e: log.error('%s: %s', (args[0], e)) + def errorHandler(error): etype, value, tb = error log.error(''.join(traceback.format_exception(etype, value, tb))) + def getEvent(name): return events[name] diff --git a/couchpotato/core/helpers/encoding.py b/couchpotato/core/helpers/encoding.py index a11dd88bcc..f99953eebc 100644 --- a/couchpotato/core/helpers/encoding.py +++ b/couchpotato/core/helpers/encoding.py @@ -1,17 +1,24 @@ -from couchpotato.core.logger import CPLog from string import ascii_letters, digits from urllib import quote_plus +import os import re import traceback import unicodedata +from chardet import detect +from couchpotato.core.logger import CPLog +import six + + log = CPLog(__name__) def toSafeString(original): valid_chars = "-_.() %s%s" % (ascii_letters, digits) - cleanedFilename = unicodedata.normalize('NFKD', toUnicode(original)).encode('ASCII', 'ignore') - return ''.join(c for c in cleanedFilename if c in valid_chars) + cleaned_filename = unicodedata.normalize('NFKD', toUnicode(original)).encode('ASCII', 'ignore') + valid_string = ''.join(c for c in cleaned_filename if c in valid_chars) + return ' '.join(valid_string.split()) + def simplifyString(original): string = stripAccents(original.lower()) @@ -19,37 +26,86 @@ def simplifyString(original): split = re.split('\W+|_', string.lower()) return toUnicode(' '.join(split)) + def toUnicode(original, *args): try: if isinstance(original, unicode): return original else: try: - return unicode(original, *args) + return six.text_type(original, *args) except: try: - return ek(original, *args) + from couchpotato.environment import Env + return original.decode(Env.get("encoding")) except: - raise + try: + detected = detect(original) + try: + if detected.get('confidence') > 0.8: + return original.decode(detected.get('encoding')) + except: + pass + + return ek(original, *args) + except: + raise except: log.error('Unable to decode value "%s..." : %s ', (repr(original)[:20], traceback.format_exc())) - ascii_text = str(original).encode('string_escape') - return toUnicode(ascii_text) + return 'ERROR DECODING STRING' + def ss(original, *args): - from couchpotato.environment import Env - return toUnicode(original, *args).encode(Env.get('encoding')) + + u_original = toUnicode(original, *args) + try: + from couchpotato.environment import Env + return u_original.encode(Env.get('encoding')) + except Exception as e: + log.debug('Failed ss encoding char, force UTF8: %s', e) + try: + return u_original.encode(Env.get('encoding'), 'replace') + except: + return u_original.encode('utf-8', 'replace') + + +def sp(path, *args): + + # Standardise encoding, normalise case, path and strip trailing '/' or '\' + if not path or len(path) == 0: + return path + + # convert windows path (from remote box) to *nix path + if os.path.sep == '/' and '\\' in path: + path = '/' + path.replace(':', '').replace('\\', '/') + + path = os.path.normpath(ss(path, *args)) + + # Remove any trailing path separators + if path != os.path.sep: + path = path.rstrip(os.path.sep) + + # Add a trailing separator in case it is a root folder on windows (crashes guessit) + if len(path) == 2 and path[1] == ':': + path = path + os.path.sep + + # Replace *NIX ambiguous '//' at the beginning of a path with '/' (crashes guessit) + path = re.sub('^//', '/', path) + + return path + def ek(original, *args): if isinstance(original, (str, unicode)): try: from couchpotato.environment import Env - return original.decode(Env.get('encoding')) + return original.decode(Env.get('encoding'), 'ignore') except UnicodeDecodeError: raise return original + def isInt(value): try: int(value) @@ -57,14 +113,16 @@ def isInt(value): except ValueError: return False + def stripAccents(s): return ''.join((c for c in unicodedata.normalize('NFD', toUnicode(s)) if unicodedata.category(c) != 'Mn')) + def tryUrlencode(s): - new = u'' - if isinstance(s, (dict)): - for key, value in s.iteritems(): - new += u'&%s=%s' % (key, tryUrlencode(value)) + new = six.u('') + if isinstance(s, dict): + for key, value in s.items(): + new += six.u('&%s=%s') % (key, tryUrlencode(value)) return new[1:] else: diff --git a/couchpotato/core/helpers/namer_check.py b/couchpotato/core/helpers/namer_check.py new file mode 100644 index 0000000000..1d18b60c6c --- /dev/null +++ b/couchpotato/core/helpers/namer_check.py @@ -0,0 +1,127 @@ +#Namer Check routine by sarakha63 +from xml.dom.minidom import parseString +from xml.dom.minidom import Node +import cookielib +import urllib +import urllib2 +import re +import time +from datetime import datetime +from bs4 import BeautifulSoup +from couchpotato.core.helpers.variable import getTitle, tryInt +from couchpotato.core.logger import CPLog +from couchpotato.core.helpers.encoding import simplifyString, tryUrlencode, toUnicode +from couchpotato.core.helpers.variable import getTitle, mergeDicts +from couchpotato.core.media._base.providers.torrent.base import TorrentProvider +from dateutil.parser import parse +from guessit import guess_movie_info +from couchpotato.core.event import fireEvent + +log = CPLog(__name__) + +clean = '[ _\,\.\(\)\[\]\-](extended.cut|directors.cut|french|by|ioaw|swedisch|danish|dutch|swesub|spanish|german|ac3|dts|custom|dc|divx|divx5|dsr|dsrip|dutch|dvd|dvdr|dvdrip|dvdscr|dvdscreener|screener|dvdivx|cam|fragment|fs|hdtv|hdrip|hdtvrip|internal|limited|multisubs|vost|vostfr|multi|ntsc|ogg|ogm|pal|pdtv|proper|repack|rerip|retail|r3|r5|bd5|se|svcd|swedish|german|read.nfo|nfofix|unrated|ws|telesync|ts|telecine|tc|brrip|bdrip|video_ts|audio_ts|480p|480i|576p|576i|720p|720i|1080p|1080i|hrhd|hrhdtv|hddvd|full|multi|bluray|x264|h264|xvid|xvidvd|xxx|www.www|cd[1-9]|\[.*\])([ _\,\.\(\)\[\]\-]|$)' +multipart_regex = [ + '[ _\.-]+cd[ _\.-]*([0-9a-d]+)', #*cd1 + '[ _\.-]+dvd[ _\.-]*([0-9a-d]+)', #*dvd1 + '[ _\.-]+part[ _\.-]*([0-9a-d]+)', #*part1 + '[ _\.-]+dis[ck][ _\.-]*([0-9a-d]+)', #*disk1 + 'cd[ _\.-]*([0-9a-d]+)$', #cd1.ext + 'dvd[ _\.-]*([0-9a-d]+)$', #dvd1.ext + 'part[ _\.-]*([0-9a-d]+)$', #part1.mkv + 'dis[ck][ _\.-]*([0-9a-d]+)$', #disk1.mkv + '()[ _\.-]+([0-9]*[abcd]+)(\.....?)$', + '([a-z])([0-9]+)(\.....?)$', + '()([ab])(\.....?)$' #*a.mkv + ] + +def correctName(check_name, movie): + MovieTitles = movie['info']['titles'] + result=0 + for movietitle in MovieTitles: + check_names = [simplifyString(check_name)] + + # Match names between " + try: check_names.append(re.search(r'([\'"])[^\1]*\1', check_name).group(0)) + except: pass + + # Match longest name between [] + try: check_names.append(max(check_name.split('['), key = len)) + except: pass + + for check_name in list(set(check_names)): + check_movie = getReleaseNameYear(check_name) + + try: + check_words = filter(None, re.split('\W+', simplifyString(check_movie.get('name', '')))) + movie_words = filter(None, re.split('\W+', simplifyString(movietitle))) + if len(check_words) > 0 and len(movie_words) > 0 and len(list(set(check_words) - set(movie_words))) == 0 and len(list(set(movie_words) - set(check_words))) == 0: + result+=1 + return result + except: + pass + + result+=0 + return result + +def getReleaseNameYear(release_name, file_name = None): + + # Use guessit first + guess = {} + if release_name: + release_name = re.sub(clean, ' ', release_name.lower()) + try: + guess = guess_movie_info(toUnicode(release_name)) + if guess.get('title') and guess.get('year'): + guess = { + 'name': guess.get('title'), + 'year': guess.get('year'), + } + elif guess.get('title'): + guess = { + 'name': guess.get('title'), + 'year': 0, + } + except: + log.debug('Could not detect via guessit "%s": %s', (file_name, traceback.format_exc())) + + # Backup to simple + cleaned = ' '.join(re.split('\W+', simplifyString(release_name))) + for i in range(1,4): + cleaned = re.sub(clean, ' ', cleaned) + cleaned = re.sub(clean, ' ', cleaned) + year = findYear(cleaned) + cp_guess = {} + + if year: # Split name on year + try: + movie_name = cleaned.split(year).pop(0).strip() + cp_guess = { + 'name': movie_name, + 'year': int(year), + } + except: + pass + else: # Split name on multiple spaces + try: + movie_name = cleaned.split(' ').pop(0).strip() + cp_guess = { + 'name': movie_name, + 'year': 0, + } + except: + pass + + if cp_guess.get('year') == guess.get('year') and len(cp_guess.get('name', '')) > len(guess.get('name', '')): + return guess + elif guess == {}: + return cp_guess + if cp_guess.get('year') == guess.get('year') and len(cp_guess.get('name', '')) < len(guess.get('name', '')): + return cp_guess + return guess + +def findYear(text): + matches = re.search('(?P19[0-9]{2}|20[0-9]{2})', text) + if matches: + return matches.group('year') + + return '' \ No newline at end of file diff --git a/couchpotato/core/helpers/request.py b/couchpotato/core/helpers/request.py index 3c6558b134..4c0add187f 100644 --- a/couchpotato/core/helpers/request.py +++ b/couchpotato/core/helpers/request.py @@ -1,19 +1,21 @@ -from couchpotato.core.helpers.encoding import toUnicode -from couchpotato.core.helpers.variable import natcmp -from flask.globals import current_app -from flask.helpers import json, make_response from urllib import unquote -from werkzeug.urls import url_decode -import flask import re -def getParams(): +from couchpotato.core.helpers.encoding import toUnicode +from couchpotato.core.helpers.variable import natsortKey + + +def getParams(params): - params = url_decode(getattr(flask.request, 'environ').get('QUERY_STRING', '')) reg = re.compile('^[a-z0-9_\.]+$') - current = temp = {} - for param, value in sorted(params.iteritems()): + # Sort keys + param_keys = params.keys() + param_keys.sort(key = natsortKey) + + temp = {} + for param in param_keys: + value = params[param] nest = re.split("([\[\]]+)", param) if len(nest) > 1: @@ -36,16 +38,31 @@ def getParams(): current = current[item] else: temp[param] = toUnicode(unquote(value)) + if temp[param].lower() in ['true', 'false']: + temp[param] = temp[param].lower() != 'false' return dictToList(temp) +non_decimal = re.compile(r'[^\d.]+') + def dictToList(params): if type(params) is dict: new = {} - for x, value in params.iteritems(): + for x, value in params.items(): try: - new_value = [dictToList(value[k]) for k in sorted(value.iterkeys(), cmp = natcmp)] + convert = lambda text: int(text) if text.isdigit() else text.lower() + alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)] + sorted_keys = sorted(value.keys(), key = alphanum_key) + + all_ints = 0 + for pnr in sorted_keys: + all_ints += 1 if non_decimal.sub('', pnr) == pnr else 0 + + if all_ints == len(sorted_keys): + new_value = [dictToList(value[k]) for k in sorted_keys] + else: + new_value = value except: new_value = value @@ -54,29 +71,3 @@ def dictToList(params): new = params return new - -def getParam(attr, default = None): - try: - return getParams().get(attr, default) - except: - return default - -def padded_jsonify(callback, *args, **kwargs): - content = str(callback) + '(' + json.dumps(dict(*args, **kwargs)) + ')' - return getattr(current_app, 'response_class')(content, mimetype = 'text/javascript') - -def jsonify(mimetype, *args, **kwargs): - content = json.dumps(dict(*args, **kwargs)) - return getattr(current_app, 'response_class')(content, mimetype = mimetype) - -def jsonified(*args, **kwargs): - callback = getParam('callback_func', None) - if callback: - content = padded_jsonify(callback, *args, **kwargs) - else: - content = jsonify('application/json', *args, **kwargs) - - response = make_response(content) - response.cache_control.no_cache = True - - return response diff --git a/couchpotato/core/helpers/rss.py b/couchpotato/core/helpers/rss.py index d88fdb5399..f455007e89 100644 --- a/couchpotato/core/helpers/rss.py +++ b/couchpotato/core/helpers/rss.py @@ -1,12 +1,15 @@ -from couchpotato.core.logger import CPLog import xml.etree.ElementTree as XMLTree +from couchpotato.core.logger import CPLog + + log = CPLog(__name__) + class RSS(object): def getTextElements(self, xml, path): - ''' Find elements and return tree''' + """ Find elements and return tree""" textelements = [] try: @@ -28,7 +31,7 @@ def getElements(self, xml, path): return elements def getElement(self, xml, path): - ''' Find element and return text''' + """ Find element and return text""" try: return xml.find(path) @@ -36,7 +39,7 @@ def getElement(self, xml, path): return def getTextElement(self, xml, path): - ''' Find element and return text''' + """ Find element and return text""" try: return xml.find(path).text @@ -46,6 +49,6 @@ def getTextElement(self, xml, path): def getItems(self, data, path = 'channel/item'): try: return XMLTree.parse(data).findall(path) - except Exception, e: + except Exception as e: log.error('Error parsing RSS. %s', e) return [] diff --git a/couchpotato/core/helpers/variable.py b/couchpotato/core/helpers/variable.py old mode 100644 new mode 100755 index 82bf88f725..b1dd966e0b --- a/couchpotato/core/helpers/variable.py +++ b/couchpotato/core/helpers/variable.py @@ -1,23 +1,53 @@ -from couchpotato.core.helpers.encoding import simplifyString, toSafeString -from couchpotato.core.logger import CPLog +О╩©import collections +import ctypes import hashlib -import os.path +import os import platform import random import re import string import sys +import traceback + +from couchpotato.core.helpers.encoding import simplifyString, toSafeString, ss, sp, toUnicode +from couchpotato.core.logger import CPLog +import six +from six.moves import map, zip, filter + log = CPLog(__name__) + +def fnEscape(pattern): + return pattern.replace('[', '[[').replace(']', '[]]').replace('[[', '[[]') + + +def link(src, dst): + if os.name == 'nt': + import ctypes + if ctypes.windll.kernel32.CreateHardLinkW(toUnicode(dst), toUnicode(src), 0) == 0: raise ctypes.WinError() + else: + os.link(toUnicode(src), toUnicode(dst)) + + +def symlink(src, dst): + if os.name == 'nt': + import ctypes + if ctypes.windll.kernel32.CreateSymbolicLinkW(toUnicode(dst), toUnicode(src), 1 if os.path.isdir(src) else 0) in [0, 1280]: raise ctypes.WinError() + else: + os.symlink(toUnicode(src), toUnicode(dst)) + + def getUserDir(): try: import pwd - os.environ['HOME'] = pwd.getpwuid(os.geteuid()).pw_dir + if not os.environ['HOME']: + os.environ['HOME'] = sp(pwd.getpwuid(os.geteuid()).pw_dir) except: pass - return os.path.expanduser('~') + return sp(os.path.expanduser('~')) + def getDownloadDir(): user_dir = getUserDir() @@ -31,6 +61,7 @@ def getDownloadDir(): return user_dir + def getDataDir(): # Windows @@ -50,10 +81,12 @@ def getDataDir(): # Linux return os.path.join(user_dir, '.couchpotato') -def isDict(object): - return isinstance(object, dict) -def mergeDicts(a, b): +def isDict(obj): + return isinstance(obj, dict) + + +def mergeDicts(a, b, prepend_list = False): assert isDict(a), isDict(b) dst = a.copy() @@ -67,12 +100,13 @@ def mergeDicts(a, b): if isDict(current_src[key]) and isDict(current_dst[key]): stack.append((current_dst[key], current_src[key])) elif isinstance(current_src[key], list) and isinstance(current_dst[key], list): - current_dst[key].extend(current_src[key]) + current_dst[key] = current_src[key] + current_dst[key] if prepend_list else current_dst[key] + current_src[key] current_dst[key] = removeListDuplicates(current_dst[key]) else: current_dst[key] = current_src[key] return dst + def removeListDuplicates(seq): checked = [] for e in seq: @@ -80,31 +114,79 @@ def removeListDuplicates(seq): checked.append(e) return checked + def flattenList(l): if isinstance(l, list): return sum(map(flattenList, l)) else: return l + def md5(text): - return hashlib.md5(text).hexdigest() + return hashlib.md5(ss(text)).hexdigest() + def sha1(text): return hashlib.sha1(text).hexdigest() + +def isLocalIP(ip): + ip = ip.lstrip('htps:/') + regex = '/(^127\.)|(^192\.168\.)|(^10\.)|(^172\.1[6-9]\.)|(^172\.2[0-9]\.)|(^172\.3[0-1]\.)|(^::1)$/' + return re.search(regex, ip) is not None or 'localhost' in ip or ip[:4] == '127.' + + def getExt(filename): return os.path.splitext(filename)[1][1:] -def cleanHost(host): - if not host.startswith(('http://', 'https://')): - host = 'http://' + host - if not host.endswith('/'): +def cleanHost(host, protocol = True, ssl = False, username = None, password = None): + """Return a cleaned up host with given url options set + + Changes protocol to https if ssl is set to True and http if ssl is set to false. + >>> cleanHost("localhost:80", ssl=True) + 'https://localhost:80/' + >>> cleanHost("localhost:80", ssl=False) + 'http://localhost:80/' + + Username and password is managed with the username and password variables + >>> cleanHost("localhost:80", username="user", password="passwd") + 'http://user:passwd@localhost:80/' + + Output without scheme (protocol) can be forced with protocol=False + >>> cleanHost("localhost:80", protocol=False) + 'localhost:80' + """ + + if not '://' in host and protocol: + host = ('https://' if ssl else 'http://') + host + + if not protocol: + host = host.split('://', 1)[-1] + + if protocol and username and password: + try: + auth = re.findall('^(?:.+?//)(.+?):(.+?)@(?:.+)$', host) + if auth: + log.error('Cleanhost error: auth already defined in url: %s, please remove BasicAuth from url.', host) + else: + host = host.replace('://', '://%s:%s@' % (username, password), 1) + except: + pass + + host = host.rstrip('/ ') + if protocol: host += '/' return host -def getImdb(txt, check_inside = True, multiple = False): + +def getImdb(txt, check_inside = False, multiple = False): + + if not check_inside: + txt = simplifyString(txt) + else: + txt = ss(txt) if check_inside and os.path.isfile(txt): output = open(txt, 'r') @@ -112,60 +194,446 @@ def getImdb(txt, check_inside = True, multiple = False): output.close() try: - ids = re.findall('(tt\d{7})', txt) + ids = re.findall('(tt\d{4,8})', txt) + if multiple: - return ids if len(ids) > 0 else [] - return ids[0] + return removeDuplicate(['tt%07d' % tryInt(x[2:]) for x in ids]) if len(ids) > 0 else [] + + return 'tt%07d' % tryInt(ids[0][2:]) except IndexError: pass return False -def tryInt(s): + +def tryInt(s, default = 0): try: return int(s) - except: return 0 + except: return default + def tryFloat(s): - try: return float(s) if '.' in s else tryInt(s) + try: + if isinstance(s, str): + return float(s) if '.' in s else tryInt(s) + else: + return float(s) except: return 0 -def natsortKey(s): - return map(tryInt, re.findall(r'(\d+|\D+)', s)) -def natcmp(a, b): - return cmp(natsortKey(a), natsortKey(b)) +def natsortKey(string_): + """See http://www.codinghorror.com/blog/archives/001018.html""" + return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_)] + + +def toIterable(value): + if isinstance(value, collections.Iterable): + return value + return [value] + -def getTitle(library_dict): +def getIdentifier(media): + return media.get('identifier') or media.get('identifiers', {}).get('imdb') + + +def getTitle(media_dict): try: try: - return library_dict['titles'][0]['title'] + return media_dict['title'] except: try: - for title in library_dict.titles: - if title.default: - return title.title + return media_dict['titles'][0] except: - log.error('Could not get title for %s', library_dict.identifier) - return None - - log.error('Could not get title for %s', library_dict['identifier']) - return None + try: + return media_dict['info']['titles'][0] + except: + try: + return media_dict['media']['info']['titles'][0] + except: + log.error('Could not get title for %s', getIdentifier(media_dict)) + return None except: - log.error('Could not get title for library item: %s', library_dict) + log.error('Could not get title for library item: %s', media_dict) return None + def possibleTitles(raw_title): - titles = [] + titles = [ + toSafeString(raw_title).lower(), + raw_title.lower(), + simplifyString(raw_title) + ] + + # replace some chars + new_title = raw_title.replace('&', 'and') + titles.append(simplifyString(new_title)) - titles.append(toSafeString(raw_title).lower()) - titles.append(raw_title.lower()) - titles.append(simplifyString(raw_title)) + return removeDuplicate(titles) - return list(set(titles)) def randomString(size = 8, chars = string.ascii_uppercase + string.digits): return ''.join(random.choice(chars) for x in range(size)) -def splitString(str, split_on = ','): - return [x.strip() for x in str.split(split_on)] if str else [] +def getAllLanguages(): + return [ + ('aa', 'Afar'), + ('ab', 'Abkhazian'), + ('af', 'Afrikaans'), + ('ak', 'Akan'), + ('sq', 'Albanian'), + ('am', 'Amharic'), + ('ar', 'Arabic'), + ('an', 'Aragonese'), + ('hy', 'Armenian'), + ('as', 'Assamese'), + ('av', 'Avaric'), + ('ae', 'Avestan'), + ('ay', 'Aymara'), + ('az', 'Azerbaijani'), + ('ba', 'Bashkir'), + ('bm', 'Bambara'), + ('eu', 'Basque'), + ('be', 'Belarusian'), + ('bn', 'Bengali'), + ('bh', 'Bihari languages'), + ('bi', 'Bislama'), + ('bo', 'Tibetan'), + ('bs', 'Bosnian'), + ('br', 'Breton'), + ('bg', 'Bulgarian'), + ('my', 'Burmese'), + ('ca', 'Catalan; Valencian'), + ('cs', 'Czech'), + ('ch', 'Chamorro'), + ('ce', 'Chechen'), + ('zh', 'Chinese'), + ('cu', 'Church Slavic; Old Slavonic; Church Slavonic; Old Bulgarian; Old Church Slavonic'), + ('cv', 'Chuvash'), + ('kw', 'Cornish'), + ('co', 'Corsican'), + ('cr', 'Cree'), + ('cy', 'Welsh'), + ('cs', 'Czech'), + ('da', 'Danish'), + ('de', 'German'), + ('dv', 'Divehi; Dhivehi; Maldivian'), + ('nl', 'Dutch; Flemish'), + ('dz', 'Dzongkha'), + ('el', 'Greek, Modern (1453-)'), + ('en', 'English'), + ('eo', 'Esperanto'), + ('et', 'Estonian'), + ('eu', 'Basque'), + ('ee', 'Ewe'), + ('fo', 'Faroese'), + ('fa', 'Persian'), + ('fj', 'Fijian'), + ('fi', 'Finnish'), + ('fr', 'French'), + ('fr', 'Truefrench'), + ('fy', 'Western Frisian'), + ('ff', 'Fulah'), + ('Ga', 'Georgian'), + ('de', 'German'), + ('gd', 'Gaelic; Scottish Gaelic'), + ('ga', 'Irish'), + ('gl', 'Galician'), + ('gv', 'Manx'), + ('el', 'Greek, Modern (1453-)'), + ('gn', 'Guarani'), + ('gu', 'Gujarati'), + ('ht', 'Haitian; Haitian Creole'), + ('ha', 'Hausa'), + ('he', 'Hebrew'), + ('hz', 'Herero'), + ('hi', 'Hindi'), + ('ho', 'Hiri Motu'), + ('hr', 'Croatian'), + ('hu', 'Hungarian'), + ('hy', 'Armenian'), + ('ig', 'Igbo'), + ('is', 'Icelandic'), + ('io', 'Ido'), + ('ii', 'Sichuan Yi; Nuosu'), + ('iu', 'Inuktitut'), + ('ie', 'Interlingue; Occidental'), + ('ia', 'Interlingua (International Auxiliary Language Association)'), + ('id', 'Indonesian'), + ('ik', 'Inupiaq'), + ('is', 'Icelandic'), + ('it', 'Italian'), + ('jv', 'Javanese'), + ('ja', 'Japanese'), + ('kl', 'Kalaallisut; Greenlandic'), + ('kn', 'Kannada'), + ('ks', 'Kashmiri'), + ('ka', 'Georgian'), + ('kr', 'Kanuri'), + ('kk', 'Kazakh'), + ('km', 'Central Khmer'), + ('ki', 'Kikuyu; Gikuyu'), + ('rw', 'Kinyarwanda'), + ('ky', 'Kirghiz; Kyrgyz'), + ('kv', 'Komi'), + ('kg', 'Kongo'), + ('ko', 'Korean'), + ('kj', 'Kuanyama; Kwanyama'), + ('ku', 'Kurdish'), + ('lo', 'Lao'), + ('la', 'Latin'), + ('lv', 'Latvian'), + ('li', 'Limburgan; Limburger; Limburgish'), + ('ln', 'Lingala'), + ('lt', 'Lithuanian'), + ('lb', 'Luxembourgish; Letzeburgesch'), + ('lu', 'Luba-Katanga'), + ('lg', 'Ganda'), + ('mk', 'Macedonian'), + ('mh', 'Marshallese'), + ('ml', 'Malayalam'), + ('mi', 'Maori'), + ('mr', 'Marathi'), + ('ms', 'Malay'), + ('Mi', 'Micmac'), + ('mk', 'Macedonian'), + ('mg', 'Malagasy'), + ('mt', 'Maltese'), + ('mn', 'Mongolian'), + ('mi', 'Maori'), + ('ms', 'Malay'), + ('my', 'Burmese'), + ('na', 'Nauru'), + ('nv', 'Navajo; Navaho'), + ('nr', 'Ndebele, South; South Ndebele'), + ('nd', 'Ndebele, North; North Ndebele'), + ('ng', 'Ndonga'), + ('ne', 'Nepali'), + ('nl', 'Dutch; Flemish'), + ('nn', 'Norwegian Nynorsk; Nynorsk, Norwegian'), + ('nb', 'Bokmal, Norwegian; Norwegian Bokmal'), + ('no', 'Norwegian'), + ('oc', 'Occitan (post 1500)'), + ('oj', 'Ojibwa'), + ('or', 'Oriya'), + ('om', 'Oromo'), + ('os', 'Ossetian; Ossetic'), + ('pa', 'Panjabi; Punjabi'), + ('fa', 'Persian'), + ('pi', 'Pali'), + ('pl', 'Polish'), + ('pt', 'Portuguese'), + ('ps', 'Pushto; Pashto'), + ('qu', 'Quechua'), + ('rm', 'Romansh'), + ('ro', 'Romanian; Moldavian; Moldovan'), + ('rn', 'Rundi'), + ('ru', 'Russian'), + ('sg', 'Sango'), + ('sa', 'Sanskrit'), + ('si', 'Sinhala; Sinhalese'), + ('sk', 'Slovak'), + ('sk', 'Slovak'), + ('sl', 'Slovenian'), + ('se', 'Northern Sami'), + ('sm', 'Samoan'), + ('sn', 'Shona'), + ('sd', 'Sindhi'), + ('so', 'Somali'), + ('st', 'Sotho, Southern'), + ('es', 'Spanish; Castilian'), + ('sq', 'Albanian'), + ('sc', 'Sardinian'), + ('sr', 'Serbian'), + ('ss', 'Swati'), + ('su', 'Sundanese'), + ('sw', 'Swahili'), + ('sv', 'Swedish'), + ('ty', 'Tahitian'), + ('ta', 'Tamil'), + ('tt', 'Tatar'), + ('te', 'Telugu'), + ('tg', 'Tajik'), + ('tl', 'Tagalog'), + ('th', 'Thai'), + ('bo', 'Tibetan'), + ('ti', 'Tigrinya'), + ('to', 'Tonga (Tonga Islands)'), + ('tn', 'Tswana'), + ('ts', 'Tsonga'), + ('tk', 'Turkmen'), + ('tr', 'Turkish'), + ('tw', 'Twi'), + ('ug', 'Uighur; Uyghur'), + ('uk', 'Ukrainian'), + ('ur', 'Urdu'), + ('uz', 'Uzbek'), + ('ve', 'Venda'), + ('vi', 'Vietnamese'), + ('vo', 'Volapuk'), + ('cy', 'Welsh'), + ('wa', 'Walloon'), + ('wo', 'Wolof'), + ('xh', 'Xhosa'), + ('yi', 'Yiddish'), + ('yo', 'Yoruba'), + ('za', 'Zhuang; Chuang'), + ('zh', 'Chinese'), + ('zu', 'Zulu')] + +def fillingLanguages(languages): + allLanguages = getAllLanguages() + + languagesToAppend = [] + + for currentLanguage in languages: + matchingTuples = [item for item in allLanguages if item[0].upper() == currentLanguage.upper()] + if matchingTuples and any(matchingTuples): + languagesToAppend.append(matchingTuples[0][1].upper()) + + if currentLanguage == 'FR' or currentLanguage == 'FRENCH': + languagesToAppend.append('TRUEFRENCH') + + return languages + languagesToAppend + +def splitString(str, split_on = ',', clean = True): + l = [x.strip() for x in str.split(split_on)] if str else [] + return removeEmpty(l) if clean else l + + +def removeEmpty(l): + return list(filter(None, l)) + + +def removeDuplicate(l): + seen = set() + return [x for x in l if x not in seen and not seen.add(x)] + + +def dictIsSubset(a, b): + return all([k in b and b[k] == v for k, v in a.items()]) + + +# Returns True if sub_folder is the same as or inside base_folder +def isSubFolder(sub_folder, base_folder): + if base_folder and sub_folder: + base = sp(os.path.realpath(base_folder)) + os.path.sep + subfolder = sp(os.path.realpath(sub_folder)) + os.path.sep + return os.path.commonprefix([subfolder, base]) == base + + return False + + +# From SABNZBD +re_password = [re.compile(r'(.+){{([^{}]+)}}$'), re.compile(r'(.+)\s+password\s*=\s*(.+)$', re.I)] + + +def scanForPassword(name): + m = None + for reg in re_password: + m = reg.search(name) + if m: break + + if m: + return m.group(1).strip('. '), m.group(2).strip() + + +under_pat = re.compile(r'_([a-z])') + +def underscoreToCamel(name): + return under_pat.sub(lambda x: x.group(1).upper(), name) + + +def removePyc(folder, only_excess = True, show_logs = True): + + folder = sp(folder) + + for root, dirs, files in os.walk(folder): + + pyc_files = filter(lambda filename: filename.endswith('.pyc'), files) + py_files = set(filter(lambda filename: filename.endswith('.py'), files)) + excess_pyc_files = filter(lambda pyc_filename: pyc_filename[:-1] not in py_files, pyc_files) if only_excess else pyc_files + + for excess_pyc_file in excess_pyc_files: + full_path = os.path.join(root, excess_pyc_file) + if show_logs: log.debug('Removing old PYC file: %s', full_path) + try: + os.remove(full_path) + except: + log.error('Couldn\'t remove %s: %s', (full_path, traceback.format_exc())) + + for dir_name in dirs: + full_path = os.path.join(root, dir_name) + if len(os.listdir(full_path)) == 0: + try: + os.rmdir(full_path) + except: + log.error('Couldn\'t remove empty directory %s: %s', (full_path, traceback.format_exc())) + + +def getFreeSpace(directories): + + single = not isinstance(directories, (tuple, list)) + if single: + directories = [directories] + + free_space = {} + for folder in directories: + + size = None + if os.path.isdir(folder): + if os.name == 'nt': + _, total, free = ctypes.c_ulonglong(), ctypes.c_ulonglong(), \ + ctypes.c_ulonglong() + if sys.version_info >= (3,) or isinstance(folder, unicode): + fun = ctypes.windll.kernel32.GetDiskFreeSpaceExW #@UndefinedVariable + else: + fun = ctypes.windll.kernel32.GetDiskFreeSpaceExA #@UndefinedVariable + ret = fun(folder, ctypes.byref(_), ctypes.byref(total), ctypes.byref(free)) + if ret == 0: + raise ctypes.WinError() + return [total.value, free.value] + else: + s = os.statvfs(folder) + size = [s.f_blocks * s.f_frsize / (1024 * 1024), (s.f_bavail * s.f_frsize) / (1024 * 1024)] + + if single: return size + + free_space[folder] = size + + return free_space + + +def getSize(paths): + + single = not isinstance(paths, (tuple, list)) + if single: + paths = [paths] + + total_size = 0 + for path in paths: + path = sp(path) + + if os.path.isdir(path): + total_size = 0 + for dirpath, _, filenames in os.walk(path): + for f in filenames: + total_size += os.path.getsize(sp(os.path.join(dirpath, f))) + + elif os.path.isfile(path): + total_size += os.path.getsize(path) + + return total_size / 1048576 # MB + + +def find(func, iterable): + for item in iterable: + if func(item): + return item + + return None + + +def compareVersions(version1, version2): + def normalize(v): + return [int(x) for x in re.sub(r'(\.0+)*$','', v).split(".")] + return cmp(normalize(version1), normalize(version2)) diff --git a/couchpotato/core/loader.py b/couchpotato/core/loader.py index a97437a211..5b0f6fca3d 100644 --- a/couchpotato/core/loader.py +++ b/couchpotato/core/loader.py @@ -1,59 +1,71 @@ -from couchpotato.core.event import fireEvent -from couchpotato.core.logger import CPLog -import glob import os +import sys import traceback +from couchpotato.core.event import fireEvent +from couchpotato.core.logger import CPLog +from importhelper import import_module +import six + + log = CPLog(__name__) -class Loader(object): - plugins = {} - providers = {} +class Loader(object): - modules = {} + def __init__(self): + self.plugins = {} + self.providers = {} + self.modules = {} + self.paths = {} def preload(self, root = ''): - core = os.path.join(root, 'couchpotato', 'core') - self.paths = { + self.paths.update({ 'core': (0, 'couchpotato.core._base', os.path.join(core, '_base')), 'plugin': (1, 'couchpotato.core.plugins', os.path.join(core, 'plugins')), 'notifications': (20, 'couchpotato.core.notifications', os.path.join(core, 'notifications')), 'downloaders': (20, 'couchpotato.core.downloaders', os.path.join(core, 'downloaders')), - } + }) - # Add providers to loader - provider_dir = os.path.join(root, 'couchpotato', 'core', 'providers') - for provider in os.listdir(provider_dir): - path = os.path.join(provider_dir, provider) - if os.path.isdir(path): - self.paths[provider + '_provider'] = (25, 'couchpotato.core.providers.' + provider, path) + # Add media to loader + self.addPath(root, ['couchpotato', 'core', 'media'], 25, recursive = True) + # Add custom plugin folder + from couchpotato.environment import Env + custom_plugin_dir = os.path.join(Env.get('data_dir'), 'custom_plugins') + if os.path.isdir(custom_plugin_dir): + sys.path.insert(0, custom_plugin_dir) + self.paths['custom_plugins'] = (30, '', custom_plugin_dir) - for plugin_type, plugin_tuple in self.paths.iteritems(): + # Loop over all paths and add to module list + for plugin_type, plugin_tuple in self.paths.items(): priority, module, dir_name = plugin_tuple self.addFromDir(plugin_type, priority, module, dir_name) def run(self): did_save = 0 - for priority in self.modules: - for module_name, plugin in sorted(self.modules[priority].iteritems()): + for priority in sorted(self.modules): + for module_name, plugin in sorted(self.modules[priority].items()): + # Load module try: - m = getattr(self.loadModule(module_name), plugin.get('name')) + if plugin.get('name')[:2] == '__': + continue - log.info('Loading %s: %s', (plugin['type'], plugin['name'])) + m = self.loadModule(module_name) + if m is None: + continue # Save default settings for plugin/provider did_save += self.loadSettings(m, module_name, save = False) - self.loadPlugins(m, plugin.get('name')) + self.loadPlugins(m, plugin.get('type'), plugin.get('name')) except ImportError as e: # todo:: subclass ImportError for missing requirements. - if (e.message.lower().startswith("missing")): + if e.message.lower().startswith("missing"): log.error(e.message) pass # todo:: this needs to be more descriptive. @@ -65,27 +77,45 @@ def run(self): if did_save: fireEvent('settings.save') + def addPath(self, root, base_path, priority, recursive = False): + root_path = os.path.join(root, *base_path) + for filename in os.listdir(root_path): + path = os.path.join(root_path, filename) + if os.path.isdir(path) and filename[:2] != '__': + if six.u('__init__.py') in os.listdir(path): + new_base_path = ''.join(s + '.' for s in base_path) + filename + self.paths[new_base_path.replace('.', '_')] = (priority, new_base_path, path) + + if recursive: + self.addPath(root, base_path + [filename], priority, recursive = True) + def addFromDir(self, plugin_type, priority, module, dir_name): # Load dir module - try: - m = __import__(module) - splitted = module.split('.') - for sub in splitted[1:]: - m = getattr(m, sub) + if module and len(module) > 0: + self.addModule(priority, plugin_type, module, os.path.basename(dir_name)) - if hasattr(m, 'config'): - fireEvent('settings.options', splitted[-1] + '_config', getattr(m, 'config')) - except: - raise + for name in os.listdir(dir_name): + path = os.path.join(dir_name, name) + ext = os.path.splitext(path)[1] + ext_length = len(ext) + + # SKIP test files: + if path.endswith('_test.py'): + continue - for cur_file in glob.glob(os.path.join(dir_name, '*')): - name = os.path.basename(cur_file) - if os.path.isdir(os.path.join(dir_name, name)): + if name != 'static' and ((os.path.isdir(path) and os.path.isfile(os.path.join(path, '__init__.py'))) + or (os.path.isfile(path) and ext == '.py')): + name = name[:-ext_length] if ext_length > 0 else name module_name = '%s.%s' % (module, name) self.addModule(priority, plugin_type, module_name, name) def loadSettings(self, module, name, save = True): + + if not hasattr(module, 'config'): + #log.debug('Skip loading settings for plugin %s as it has no config section' % module.__file__) + return False + try: for section in module.config: fireEvent('settings.options', section['name'], section) @@ -99,16 +129,22 @@ def loadSettings(self, module, name, save = True): log.debug('Failed loading settings for "%s": %s', (name, traceback.format_exc())) return False - def loadPlugins(self, module, name): - try: - klass = module.start() - klass.registerPlugin() - - if klass and getattr(klass, 'auto_register_static'): - klass.registerStatic(module.__file__) + def loadPlugins(self, module, type, name): + if not hasattr(module, 'autoload'): + #log.debug('Skip startup for plugin %s as it has no start section' % module.__file__) + return False + try: + # Load single file plugin + if isinstance(module.autoload, (str, unicode)): + getattr(module, module.autoload)() + # Load folder plugin + else: + module.autoload() + + log.info('Loaded %s: %s', (type, name)) return True - except Exception, e: + except: log.error('Failed loading plugin "%s": %s', (module.__file__, traceback.format_exc())) return False @@ -117,6 +153,10 @@ def addModule(self, priority, plugin_type, module, name): if not self.modules.get(priority): self.modules[priority] = {} + module = module.lstrip('.') + if plugin_type.startswith('couchpotato_core'): + plugin_type = plugin_type[17:] + self.modules[priority][module] = { 'priority': priority, 'module': module, @@ -126,10 +166,9 @@ def addModule(self, priority, plugin_type, module, name): def loadModule(self, name): try: - m = __import__(name) - splitted = name.split('.') - for sub in splitted[1:-1]: - m = getattr(m, sub) - return m + return import_module(name) + except ImportError: + log.debug('Skip loading module plugin %s: %s', (name, traceback.format_exc())) + return None except: raise diff --git a/couchpotato/core/logger.py b/couchpotato/core/logger.py index 3ad33aa036..ce99d682dc 100644 --- a/couchpotato/core/logger.py +++ b/couchpotato/core/logger.py @@ -1,11 +1,14 @@ import logging import re -import traceback + class CPLog(object): context = '' - replace_private = ['api', 'apikey', 'api_key', 'password', 'username', 'h', 'uid', 'key'] + replace_private = ['api', 'apikey', 'api_key', 'password', 'username', 'h', 'uid', 'key', 'passkey'] + + Env = None + is_develop = False def __init__(self, context = ''): if context.endswith('.main'): @@ -14,6 +17,20 @@ def __init__(self, context = ''): self.context = context self.logger = logging.getLogger() + def setup(self): + + if not self.Env: + from couchpotato.environment import Env + + self.Env = Env + self.is_develop = Env.get('dev') + + from couchpotato.core.event import addEvent + addEvent('app.after_shutdown', self.close) + + def close(self, *args, **kwargs): + logging.shutdown() + def info(self, msg, replace_tuple = ()): self.logger.info(self.addContext(msg, replace_tuple)) @@ -37,23 +54,22 @@ def addContext(self, msg, replace_tuple = ()): def safeMessage(self, msg, replace_tuple = ()): - from couchpotato.environment import Env - from couchpotato.core.helpers.encoding import ss + from couchpotato.core.helpers.encoding import ss, toUnicode msg = ss(msg) try: - msg = msg % replace_tuple - except: - try: - if isinstance(replace_tuple, tuple): - msg = msg % tuple([ss(x) for x in list(replace_tuple)]) - else: - msg = msg % ss(replace_tuple) - except: - self.logger.error(u'Failed encoding stuff to log: %s' % traceback.format_exc()) - - if not Env.get('dev'): + if isinstance(replace_tuple, tuple): + msg = msg % tuple([ss(x) if not isinstance(x, (int, float)) else x for x in list(replace_tuple)]) + elif isinstance(replace_tuple, dict): + msg = msg % dict((k, ss(v) if not isinstance(v, (int, float)) else v) for k, v in replace_tuple.iteritems()) + else: + msg = msg % ss(replace_tuple) + except Exception as e: + self.logger.error('Failed encoding stuff to log "%s": %s' % (msg, e)) + + self.setup() + if not self.is_develop: for replace in self.replace_private: msg = re.sub('(\?%s=)[^\&]+' % replace, '?%s=xxx' % replace, msg) @@ -61,10 +77,10 @@ def safeMessage(self, msg, replace_tuple = ()): # Replace api key try: - api_key = Env.setting('api_key') + api_key = self.Env.setting('api_key') if api_key: msg = msg.replace(api_key, 'API_KEY') except: pass - return msg + return toUnicode(msg) diff --git a/couchpotato/core/media/__init__.py b/couchpotato/core/media/__init__.py new file mode 100755 index 0000000000..17494ef160 --- /dev/null +++ b/couchpotato/core/media/__init__.py @@ -0,0 +1,115 @@ +import os +import traceback + +from couchpotato import CPLog, md5 +from couchpotato.core.event import addEvent, fireEvent, fireEventAsync +from couchpotato.core.helpers.encoding import toUnicode +from couchpotato.core.helpers.variable import getExt +from couchpotato.core.plugins.base import Plugin +import six + + +log = CPLog(__name__) + + +class MediaBase(Plugin): + + _type = None + + def initType(self): + addEvent('media.types', self.getType) + + def getType(self): + return self._type + + def createOnComplete(self, media_id): + + def onComplete(): + try: + media = fireEvent('media.get', media_id, single = True) + if media: + event_name = '%s.searcher.single' % media.get('type') + fireEventAsync(event_name, media, on_complete = self.createNotifyFront(media_id), manual = True) + except: + log.error('Failed creating onComplete: %s', traceback.format_exc()) + + return onComplete + + def createNotifyFront(self, media_id): + + def notifyFront(): + try: + media = fireEvent('media.get', media_id, single = True) + if media: + event_name = '%s.update' % media.get('type') + fireEvent('notify.frontend', type = event_name, data = media) + except: + log.error('Failed creating onComplete: %s', traceback.format_exc()) + + return notifyFront + + def getDefaultTitle(self, info, default_title = None): + + # Set default title + default_title = default_title if default_title else toUnicode(info.get('title')) + titles = info.get('titles', []) + counter = 0 + def_title = None + for title in titles: + if (len(default_title) == 0 and counter == 0) or len(titles) == 1 or title.lower() == toUnicode(default_title.lower()) or (toUnicode(default_title) == six.u('') and toUnicode(titles[0]) == title): + def_title = toUnicode(title) + break + counter += 1 + + if not def_title and titles and len(titles) > 0: + def_title = toUnicode(titles[0]) + + return def_title or 'UNKNOWN' + + def getPoster(self, media, image_urls): + if 'files' not in media: + media['files'] = {} + + existing_files = media['files'] + + image_type = 'poster' + file_type = 'image_%s' % image_type + + # Make existing unique + unique_files = list(set(existing_files.get(file_type, []))) + + # Remove files that can't be found + for ef in unique_files: + if not os.path.isfile(ef): + unique_files.remove(ef) + + # Replace new files list + existing_files[file_type] = unique_files + if len(existing_files) == 0: + del existing_files[file_type] + + images = image_urls.get(image_type, []) + for y in ['SX300', 'tmdb']: + initially_try = [x for x in images if y in x] + images[:-1] = initially_try + + # Loop over type + for image in images: + if not isinstance(image, (str, unicode)): + continue + + # Check if it has top image + filename = '%s.%s' % (md5(image), getExt(image)) + existing = existing_files.get(file_type, []) + has_latest = False + for x in existing: + if filename in x: + has_latest = True + + if not has_latest or file_type not in existing_files or len(existing_files.get(file_type, [])) == 0: + file_path = fireEvent('file.download', url = image, single = True) + if file_path: + existing_files[file_type] = [toUnicode(file_path)] + break + else: + break diff --git a/couchpotato/core/migration/__init__.py b/couchpotato/core/media/_base/__init__.py old mode 100755 new mode 100644 similarity index 100% rename from couchpotato/core/migration/__init__.py rename to couchpotato/core/media/_base/__init__.py diff --git a/couchpotato/core/media/_base/library/__init__.py b/couchpotato/core/media/_base/library/__init__.py new file mode 100644 index 0000000000..a404f81c0f --- /dev/null +++ b/couchpotato/core/media/_base/library/__init__.py @@ -0,0 +1,7 @@ +from .main import Library + + +def autoload(): + return Library() + +config = [] diff --git a/couchpotato/core/media/_base/library/base.py b/couchpotato/core/media/_base/library/base.py new file mode 100644 index 0000000000..553eff5a50 --- /dev/null +++ b/couchpotato/core/media/_base/library/base.py @@ -0,0 +1,13 @@ +from couchpotato.core.event import addEvent +from couchpotato.core.plugins.base import Plugin + + +class LibraryBase(Plugin): + + _type = None + + def initType(self): + addEvent('library.types', self.getType) + + def getType(self): + return self._type diff --git a/couchpotato/core/media/_base/library/main.py b/couchpotato/core/media/_base/library/main.py new file mode 100755 index 0000000000..9e614fb4b2 --- /dev/null +++ b/couchpotato/core/media/_base/library/main.py @@ -0,0 +1,128 @@ +from couchpotato import get_db +from couchpotato.api import addApiView +from couchpotato.core.event import addEvent, fireEvent +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.library.base import LibraryBase + +log = CPLog(__name__) + + +class Library(LibraryBase): + def __init__(self): + addEvent('library.title', self.title) + addEvent('library.related', self.related) + addEvent('library.tree', self.tree) + + addEvent('library.root', self.root) + + addApiView('library.query', self.queryView) + addApiView('library.related', self.relatedView) + addApiView('library.tree', self.treeView) + + def queryView(self, media_id, **kwargs): + db = get_db() + media = db.get('id', media_id) + + return { + 'result': fireEvent('library.query', media, single = True) + } + + def relatedView(self, media_id, **kwargs): + db = get_db() + media = db.get('id', media_id) + + return { + 'result': fireEvent('library.related', media, single = True) + } + + def treeView(self, media_id, **kwargs): + db = get_db() + media = db.get('id', media_id) + + return { + 'result': fireEvent('library.tree', media, single = True) + } + + def title(self, library): + return fireEvent( + 'library.query', + library, + + condense = False, + include_year = False, + include_identifier = False, + single = True + ) + + def related(self, media): + result = {self.key(media['type']): media} + + db = get_db() + cur = media + + while cur and cur.get('parent_id'): + cur = db.get('id', cur['parent_id']) + + result[self.key(cur['type'])] = cur + + children = db.get_many('media_children', media['_id'], with_doc = True) + + for item in children: + key = self.key(item['doc']['type']) + 's' + + if key not in result: + result[key] = [] + + result[key].append(item['doc']) + + return result + + def root(self, media): + db = get_db() + cur = media + + while cur and cur.get('parent_id'): + cur = db.get('id', cur['parent_id']) + + return cur + + def tree(self, media = None, media_id = None): + db = get_db() + + if media: + result = media + elif media_id: + result = db.get('id', media_id, with_doc = True) + else: + return None + + # Find children + items = db.get_many('media_children', result['_id'], with_doc = True) + keys = [] + + # Build children arrays + for item in items: + key = self.key(item['doc']['type']) + 's' + + if key not in result: + result[key] = {} + elif type(result[key]) is not dict: + result[key] = {} + + if key not in keys: + keys.append(key) + + result[key][item['_id']] = fireEvent('library.tree', item['doc'], single = True) + + # Unique children + for key in keys: + result[key] = result[key].values() + + # Include releases + result['releases'] = fireEvent('release.for_media', result['_id'], single = True) + + return result + + def key(self, media_type): + parts = media_type.split('.') + return parts[-1] diff --git a/couchpotato/core/media/_base/matcher/__init__.py b/couchpotato/core/media/_base/matcher/__init__.py new file mode 100644 index 0000000000..c8b1e82197 --- /dev/null +++ b/couchpotato/core/media/_base/matcher/__init__.py @@ -0,0 +1,7 @@ +from .main import Matcher + + +def autoload(): + return Matcher() + +config = [] diff --git a/couchpotato/core/media/_base/matcher/base.py b/couchpotato/core/media/_base/matcher/base.py new file mode 100644 index 0000000000..8651126314 --- /dev/null +++ b/couchpotato/core/media/_base/matcher/base.py @@ -0,0 +1,84 @@ +from couchpotato.core.event import addEvent +from couchpotato.core.helpers.encoding import simplifyString +from couchpotato.core.logger import CPLog +from couchpotato.core.plugins.base import Plugin + +log = CPLog(__name__) + + +class MatcherBase(Plugin): + type = None + + def __init__(self): + if self.type: + addEvent('%s.matcher.correct' % self.type, self.correct) + + def correct(self, chain, release, media, quality): + raise NotImplementedError() + + def flattenInfo(self, info): + # Flatten dictionary of matches (chain info) + if isinstance(info, dict): + return dict([(key, self.flattenInfo(value)) for key, value in info.items()]) + + # Flatten matches + result = None + + for match in info: + if isinstance(match, dict): + if result is None: + result = {} + + for key, value in match.items(): + if key not in result: + result[key] = [] + + result[key].append(value) + else: + if result is None: + result = [] + + result.append(match) + + return result + + def constructFromRaw(self, match): + if not match: + return None + + parts = [ + ''.join([ + y for y in x[1:] if y + ]) for x in match + ] + + return ''.join(parts)[:-1].strip() + + def simplifyValue(self, value): + if not value: + return value + + if isinstance(value, basestring): + return simplifyString(value) + + if isinstance(value, list): + return [self.simplifyValue(x) for x in value] + + raise ValueError("Unsupported value type") + + def chainMatch(self, chain, group, tags): + info = self.flattenInfo(chain.info[group]) + + found_tags = [] + for tag, accepted in tags.items(): + values = [self.simplifyValue(x) for x in info.get(tag, [None])] + + if any([val in accepted for val in values]): + found_tags.append(tag) + + log.debug('tags found: %s, required: %s' % (found_tags, tags.keys())) + + if set(tags.keys()) == set(found_tags): + return True + + return all([key in found_tags for key, value in tags.items()]) diff --git a/couchpotato/core/media/_base/matcher/main.py b/couchpotato/core/media/_base/matcher/main.py new file mode 100644 index 0000000000..64e13ae619 --- /dev/null +++ b/couchpotato/core/media/_base/matcher/main.py @@ -0,0 +1,89 @@ +from couchpotato.core.event import addEvent, fireEvent +from couchpotato.core.helpers.variable import possibleTitles +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.matcher.base import MatcherBase +from caper import Caper + +log = CPLog(__name__) + + +class Matcher(MatcherBase): + + def __init__(self): + super(Matcher, self).__init__() + + self.caper = Caper() + + addEvent('matcher.parse', self.parse) + addEvent('matcher.match', self.match) + + addEvent('matcher.flatten_info', self.flattenInfo) + addEvent('matcher.construct_from_raw', self.constructFromRaw) + + addEvent('matcher.correct_title', self.correctTitle) + addEvent('matcher.correct_quality', self.correctQuality) + + def parse(self, name, parser='scene'): + return self.caper.parse(name, parser) + + def match(self, release, media, quality): + match = fireEvent('matcher.parse', release['name'], single = True) + + if len(match.chains) < 1: + log.info2('Wrong: %s, unable to parse release name (no chains)', release['name']) + return False + + for chain in match.chains: + if fireEvent('%s.matcher.correct' % media['type'], chain, release, media, quality, single = True): + return chain + + return False + + def correctTitle(self, chain, media): + root = fireEvent('library.root', media, single = True) + + if 'show_name' not in chain.info or not len(chain.info['show_name']): + log.info('Wrong: missing show name in parsed result') + return False + + # Get the lower-case parsed show name from the chain + chain_words = [x.lower() for x in chain.info['show_name']] + + # Build a list of possible titles of the media we are searching for + titles = root['info']['titles'] + + # Add year suffix titles (will result in ['', ' ', '', ...]) + suffixes = [None, root['info']['year']] + + titles = [ + title + ((' %s' % suffix) if suffix else '') + for title in titles + for suffix in suffixes + ] + + # Check show titles match + # TODO check xem names + for title in titles: + for valid_words in [x.split(' ') for x in possibleTitles(title)]: + + if valid_words == chain_words: + return True + + return False + + def correctQuality(self, chain, quality, quality_map): + if quality['identifier'] not in quality_map: + log.info2('Wrong: unknown preferred quality %s', quality['identifier']) + return False + + if 'video' not in chain.info: + log.info2('Wrong: no video tags found') + return False + + video_tags = quality_map[quality['identifier']] + + if not self.chainMatch(chain, 'video', video_tags): + log.info2('Wrong: %s tags not in chain', video_tags) + return False + + return True diff --git a/couchpotato/core/media/_base/media/__init__.py b/couchpotato/core/media/_base/media/__init__.py new file mode 100644 index 0000000000..b1cde097fc --- /dev/null +++ b/couchpotato/core/media/_base/media/__init__.py @@ -0,0 +1,5 @@ +from .main import MediaPlugin + + +def autoload(): + return MediaPlugin() diff --git a/couchpotato/core/media/_base/media/index.py b/couchpotato/core/media/_base/media/index.py new file mode 100644 index 0000000000..b40e162be9 --- /dev/null +++ b/couchpotato/core/media/_base/media/index.py @@ -0,0 +1,199 @@ +from string import ascii_letters +from hashlib import md5 + +from CodernityDB.tree_index import MultiTreeBasedIndex, TreeBasedIndex +from couchpotato.core.helpers.encoding import toUnicode, simplifyString + + +class MediaIndex(MultiTreeBasedIndex): + _version = 3 + + custom_header = """from CodernityDB.tree_index import MultiTreeBasedIndex""" + + def __init__(self, *args, **kwargs): + kwargs['key_format'] = '32s' + super(MediaIndex, self).__init__(*args, **kwargs) + + def make_key(self, key): + return md5(key).hexdigest() + + def make_key_value(self, data): + if data.get('_t') == 'media' and (data.get('identifier') or data.get('identifiers')): + + identifiers = data.get('identifiers', {}) + if data.get('identifier') and 'imdb' not in identifiers: + identifiers['imdb'] = data.get('identifier') + + ids = [] + for x in identifiers: + ids.append(md5('%s-%s' % (x, identifiers[x])).hexdigest()) + + return ids, None + + +class MediaStatusIndex(TreeBasedIndex): + _version = 1 + + def __init__(self, *args, **kwargs): + kwargs['key_format'] = '32s' + super(MediaStatusIndex, self).__init__(*args, **kwargs) + + def make_key(self, key): + return md5(key).hexdigest() + + def make_key_value(self, data): + if data.get('_t') == 'media' and data.get('status'): + return md5(data.get('status')).hexdigest(), None + + +class MediaTypeIndex(TreeBasedIndex): + _version = 1 + + def __init__(self, *args, **kwargs): + kwargs['key_format'] = '32s' + super(MediaTypeIndex, self).__init__(*args, **kwargs) + + def make_key(self, key): + return md5(key).hexdigest() + + def make_key_value(self, data): + if data.get('_t') == 'media' and data.get('type'): + return md5(data.get('type')).hexdigest(), None + + +class TitleSearchIndex(MultiTreeBasedIndex): + _version = 1 + + custom_header = """from CodernityDB.tree_index import MultiTreeBasedIndex +from itertools import izip +from couchpotato.core.helpers.encoding import simplifyString""" + + def __init__(self, *args, **kwargs): + kwargs['key_format'] = '32s' + super(TitleSearchIndex, self).__init__(*args, **kwargs) + self.__l = kwargs.get('w_len', 2) + + def make_key_value(self, data): + + if data.get('_t') == 'media' and len(data.get('title', '')) > 0: + + out = set() + title = str(simplifyString(data.get('title').lower())) + l = self.__l + title_split = title.split() + + for x in range(len(title_split)): + combo = ' '.join(title_split[x:])[:32].strip() + out.add(combo.rjust(32, '_')) + combo_range = max(l, min(len(combo), 32)) + + for cx in range(1, combo_range): + ccombo = combo[:-cx].strip() + if len(ccombo) > l: + out.add(ccombo.rjust(32, '_')) + + return out, None + + def make_key(self, key): + return key.rjust(32, '_').lower() + + +class TitleIndex(TreeBasedIndex): + _version = 4 + + custom_header = """from CodernityDB.tree_index import TreeBasedIndex +from string import ascii_letters +from couchpotato.core.helpers.encoding import toUnicode, simplifyString""" + + def __init__(self, *args, **kwargs): + kwargs['key_format'] = '32s' + super(TitleIndex, self).__init__(*args, **kwargs) + + def make_key(self, key): + return self.simplify(key) + + def make_key_value(self, data): + if data.get('_t') == 'media' and data.get('title') is not None and len(data.get('title')) > 0: + return self.simplify(data['title']), None + + def simplify(self, title): + + title = toUnicode(title) + + nr_prefix = '' if title and len(title) > 0 and title[0] in ascii_letters else '#' + title = simplifyString(title) + + for prefix in ['the ', 'an ', 'a ']: + if prefix == title[:len(prefix)]: + title = title[len(prefix):] + break + + return str(nr_prefix + title).ljust(32, ' ')[:32] + + +class StartsWithIndex(TreeBasedIndex): + _version = 3 + + custom_header = """from CodernityDB.tree_index import TreeBasedIndex +from string import ascii_letters +from couchpotato.core.helpers.encoding import toUnicode, simplifyString""" + + def __init__(self, *args, **kwargs): + kwargs['key_format'] = '1s' + super(StartsWithIndex, self).__init__(*args, **kwargs) + + def make_key(self, key): + return self.first(key) + + def make_key_value(self, data): + if data.get('_t') == 'media' and data.get('title') is not None: + return self.first(data['title']), None + + def first(self, title): + title = toUnicode(title) + title = simplifyString(title) + + for prefix in ['the ', 'an ', 'a ']: + if prefix == title[:len(prefix)]: + title = title[len(prefix):] + break + + return str(title[0] if title and len(title) > 0 and title[0] in ascii_letters else '#').lower() + + + +class MediaChildrenIndex(TreeBasedIndex): + _version = 1 + + def __init__(self, *args, **kwargs): + kwargs['key_format'] = '32s' + super(MediaChildrenIndex, self).__init__(*args, **kwargs) + + def make_key(self, key): + return key + + def make_key_value(self, data): + if data.get('_t') == 'media' and data.get('parent_id'): + return data.get('parent_id'), None + + +class MediaTagIndex(MultiTreeBasedIndex): + _version = 2 + + custom_header = """from CodernityDB.tree_index import MultiTreeBasedIndex""" + + def __init__(self, *args, **kwargs): + kwargs['key_format'] = '32s' + super(MediaTagIndex, self).__init__(*args, **kwargs) + + def make_key_value(self, data): + if data.get('_t') == 'media' and data.get('tags') and len(data.get('tags', [])) > 0: + + tags = set() + for tag in data.get('tags', []): + tags.add(self.make_key(tag)) + + return list(tags), None + + def make_key(self, key): + return md5(key).hexdigest() diff --git a/couchpotato/core/media/_base/media/main.py b/couchpotato/core/media/_base/media/main.py new file mode 100755 index 0000000000..01dc0f14be --- /dev/null +++ b/couchpotato/core/media/_base/media/main.py @@ -0,0 +1,584 @@ +from datetime import timedelta +import time +import traceback +from string import ascii_lowercase + +from CodernityDB.database import RecordNotFound, RecordDeleted +from couchpotato import tryInt, get_db +from couchpotato.api import addApiView +from couchpotato.core.event import fireEvent, fireEventAsync, addEvent +from couchpotato.core.helpers.encoding import toUnicode +from couchpotato.core.helpers.variable import splitString, getImdb, getTitle +from couchpotato.core.logger import CPLog +from couchpotato.core.media import MediaBase +from .index import MediaIndex, MediaStatusIndex, MediaTypeIndex, TitleSearchIndex, TitleIndex, StartsWithIndex, MediaChildrenIndex, MediaTagIndex + + +log = CPLog(__name__) + + +class MediaPlugin(MediaBase): + + _database = { + 'media': MediaIndex, + 'media_search_title': TitleSearchIndex, + 'media_status': MediaStatusIndex, + 'media_tag': MediaTagIndex, + 'media_by_type': MediaTypeIndex, + 'media_title': TitleIndex, + 'media_startswith': StartsWithIndex, + 'media_children': MediaChildrenIndex, + } + + def __init__(self): + + addApiView('media.refresh', self.refresh, docs = { + 'desc': 'Refresh a any media type by ID', + 'params': { + 'id': {'desc': 'Movie, Show, Season or Episode ID(s) you want to refresh.', 'type': 'int (comma separated)'}, + } + }) + + addApiView('media.list', self.listView, docs = { + 'desc': 'List media', + 'params': { + 'type': {'type': 'string', 'desc': 'Media type to filter on.'}, + 'status': {'type': 'array or csv', 'desc': 'Filter media by status. Example:"active,done"'}, + 'release_status': {'type': 'array or csv', 'desc': 'Filter media by status of its releases. Example:"snatched,available"'}, + 'limit_offset': {'desc': 'Limit and offset the media list. Examples: "50" or "50,30"'}, + 'starts_with': {'desc': 'Starts with these characters. Example: "a" returns all media starting with the letter "a"'}, + 'search': {'desc': 'Search media title'}, + }, + 'return': {'type': 'object', 'example': """{ + 'success': True, + 'empty': bool, any media returned or not, + 'media': array, media found, +}"""} + }) + + addApiView('media.get', self.getView, docs = { + 'desc': 'Get media by id', + 'params': { + 'id': {'desc': 'The id of the media'}, + } + }) + + addApiView('media.delete', self.deleteView, docs = { + 'desc': 'Delete a media from the wanted list', + 'params': { + 'id': {'desc': 'Media ID(s) you want to delete.', 'type': 'int (comma separated)'}, + 'delete_from': {'desc': 'Delete media from this page', 'type': 'string: all (default), wanted, manage'}, + } + }) + + addApiView('media.available_chars', self.charView) + + addEvent('app.load', self.addSingleRefreshView, priority = 100) + addEvent('app.load', self.addSingleListView, priority = 100) + addEvent('app.load', self.addSingleCharView, priority = 100) + addEvent('app.load', self.addSingleDeleteView, priority = 100) + addEvent('app.load', self.cleanupFaults) + + addEvent('media.get', self.get) + addEvent('media.with_status', self.withStatus) + addEvent('media.with_identifiers', self.withIdentifiers) + addEvent('media.list', self.list) + addEvent('media.delete', self.delete) + addEvent('media.restatus', self.restatus) + addEvent('media.tag', self.tag) + addEvent('media.untag', self.unTag) + + # Wrongly tagged media files + def cleanupFaults(self): + medias = fireEvent('media.with_status', 'ignored', single = True) or [] + + db = get_db() + for media in medias: + try: + media['status'] = 'done' + db.update(media) + except: + pass + + def refresh(self, id = '', **kwargs): + handlers = [] + ids = splitString(id) + + for x in ids: + + refresh_handler = self.createRefreshHandler(x) + if refresh_handler: + handlers.append(refresh_handler) + + fireEvent('notify.frontend', type = 'media.busy', data = {'_id': ids}) + fireEventAsync('schedule.queue', handlers = handlers) + + return { + 'success': True, + } + + def createRefreshHandler(self, media_id): + + try: + media = get_db().get('id', media_id) + event = '%s.update' % media.get('type') + + def handler(): + fireEvent(event, media_id = media_id, on_complete = self.createOnComplete(media_id)) + + return handler + + except: + log.error('Refresh handler for non existing media: %s', traceback.format_exc()) + + def addSingleRefreshView(self): + + for media_type in fireEvent('media.types', merge = True): + addApiView('%s.refresh' % media_type, self.refresh) + + def get(self, media_id): + + try: + db = get_db() + + imdb_id = getImdb(str(media_id)) + + if imdb_id: + media = db.get('media', 'imdb-%s' % imdb_id, with_doc = True)['doc'] + else: + media = db.get('id', media_id) + + if media: + + # Attach category + try: media['category'] = db.get('id', media.get('category_id')) + except: pass + + media['releases'] = fireEvent('release.for_media', media['_id'], single = True) + + return media + + except (RecordNotFound, RecordDeleted): + log.error('Media with id "%s" not found', media_id) + except: + raise + + def getView(self, id = None, **kwargs): + + media = self.get(id) if id else None + + return { + 'success': media is not None, + 'media': media, + } + + def withStatus(self, status, types = None, with_doc = True): + + db = get_db() + + if types and not isinstance(types, (list, tuple)): + types = [types] + + status = list(status if isinstance(status, (list, tuple)) else [status]) + + for s in status: + for ms in db.get_many('media_status', s): + if with_doc: + try: + doc = db.get('id', ms['_id']) + + if types and doc.get('type') not in types: + continue + + yield doc + except (RecordDeleted, RecordNotFound): + log.debug('Record not found, skipping: %s', ms['_id']) + except (ValueError, EOFError): + fireEvent('database.delete_corrupted', ms.get('_id'), traceback_error = traceback.format_exc(0)) + else: + yield ms + + def withIdentifiers(self, identifiers, with_doc = False): + db = get_db() + + for x in identifiers: + try: + return db.get('media', '%s-%s' % (x, identifiers[x]), with_doc = with_doc) + except: + pass + + log.debug('No media found with identifiers: %s', identifiers) + return False + + def list(self, types = None, status = None, release_status = None, status_or = False, limit_offset = None, with_tags = None, starts_with = None, search = None): + + db = get_db() + + # Make a list from string + if status and not isinstance(status, (list, tuple)): + status = [status] + if release_status and not isinstance(release_status, (list, tuple)): + release_status = [release_status] + if types and not isinstance(types, (list, tuple)): + types = [types] + if with_tags and not isinstance(with_tags, (list, tuple)): + with_tags = [with_tags] + + # query media ids + if types: + all_media_ids = set() + for media_type in types: + all_media_ids = all_media_ids.union(set([x['_id'] for x in db.get_many('media_by_type', media_type)])) + else: + all_media_ids = set([x['_id'] for x in db.all('media')]) + + media_ids = list(all_media_ids) + filter_by = {} + + # Filter on movie status + if status and len(status) > 0: + filter_by['media_status'] = set() + for media_status in fireEvent('media.with_status', status, with_doc = False, single = True): + filter_by['media_status'].add(media_status.get('_id')) + + # Filter on release status + if release_status and len(release_status) > 0: + filter_by['release_status'] = set() + for release_status in fireEvent('release.with_status', release_status, with_doc = False, single = True): + filter_by['release_status'].add(release_status.get('media_id')) + + # Add search filters + if starts_with: + starts_with = toUnicode(starts_with.lower())[0] + starts_with = starts_with if starts_with in ascii_lowercase else '#' + filter_by['starts_with'] = [x['_id'] for x in db.get_many('media_startswith', starts_with)] + + # Add tag filter + if with_tags: + filter_by['with_tags'] = set() + for tag in with_tags: + for x in db.get_many('media_tag', tag): + filter_by['with_tags'].add(x['_id']) + + # Filter with search query + if search: + filter_by['search'] = [x['_id'] for x in db.get_many('media_search_title', search)] + + if status_or and 'media_status' in filter_by and 'release_status' in filter_by: + filter_by['status'] = list(filter_by['media_status']) + list(filter_by['release_status']) + del filter_by['media_status'] + del filter_by['release_status'] + + # Filter by combining ids + for x in filter_by: + media_ids = [n for n in media_ids if n in filter_by[x]] + + total_count = len(media_ids) + if total_count == 0: + return 0, [] + + offset = 0 + limit = -1 + if limit_offset: + splt = splitString(limit_offset) if isinstance(limit_offset, (str, unicode)) else limit_offset + limit = tryInt(splt[0]) + offset = tryInt(0 if len(splt) is 1 else splt[1]) + + # List movies based on title order + medias = [] + for m in db.all('media_title'): + media_id = m['_id'] + if media_id not in media_ids: continue + if offset > 0: + offset -= 1 + continue + + media = fireEvent('media.get', media_id, single = True) + + # Skip if no media has been found + if not media: + continue + + # Merge releases with movie dict + medias.append(media) + + # remove from media ids + media_ids.remove(media_id) + if len(media_ids) == 0 or len(medias) == limit: break + + return total_count, medias + + def listView(self, **kwargs): + + total_movies, movies = self.list( + types = splitString(kwargs.get('type')), + status = splitString(kwargs.get('status')), + release_status = splitString(kwargs.get('release_status')), + status_or = kwargs.get('status_or') is not None, + limit_offset = kwargs.get('limit_offset'), + with_tags = splitString(kwargs.get('with_tags')), + starts_with = kwargs.get('starts_with'), + search = kwargs.get('search') + ) + + return { + 'success': True, + 'empty': len(movies) == 0, + 'total': total_movies, + 'movies': movies, + } + + def addSingleListView(self): + + for media_type in fireEvent('media.types', merge = True): + tempList = lambda *args, **kwargs : self.listView(type = media_type, **kwargs) + addApiView('%s.list' % media_type, tempList, docs = { + 'desc': 'List media', + 'params': { + 'status': {'type': 'array or csv', 'desc': 'Filter ' + media_type + ' by status. Example:"active,done"'}, + 'release_status': {'type': 'array or csv', 'desc': 'Filter ' + media_type + ' by status of its releases. Example:"snatched,available"'}, + 'limit_offset': {'desc': 'Limit and offset the ' + media_type + ' list. Examples: "50" or "50,30"'}, + 'starts_with': {'desc': 'Starts with these characters. Example: "a" returns all ' + media_type + 's starting with the letter "a"'}, + 'search': {'desc': 'Search ' + media_type + ' title'}, + }, + 'return': {'type': 'object', 'example': """{ + 'success': True, + 'empty': bool, any """ + media_type + """s returned or not, + 'media': array, media found, + }"""} + }) + + def availableChars(self, types = None, status = None, release_status = None): + + db = get_db() + + # Make a list from string + if status and not isinstance(status, (list, tuple)): + status = [status] + if release_status and not isinstance(release_status, (list, tuple)): + release_status = [release_status] + if types and not isinstance(types, (list, tuple)): + types = [types] + + # query media ids + if types: + all_media_ids = set() + for media_type in types: + all_media_ids = all_media_ids.union(set([x['_id'] for x in db.get_many('media_by_type', media_type)])) + else: + all_media_ids = set([x['_id'] for x in db.all('media')]) + + media_ids = all_media_ids + filter_by = {} + + # Filter on movie status + if status and len(status) > 0: + filter_by['media_status'] = set() + for media_status in fireEvent('media.with_status', status, with_doc = False, single = True): + filter_by['media_status'].add(media_status.get('_id')) + + # Filter on release status + if release_status and len(release_status) > 0: + filter_by['release_status'] = set() + for release_status in fireEvent('release.with_status', release_status, with_doc = False, single = True): + filter_by['release_status'].add(release_status.get('media_id')) + + # Filter by combining ids + for x in filter_by: + media_ids = [n for n in media_ids if n in filter_by[x]] + + chars = set() + for x in db.all('media_startswith'): + if x['_id'] in media_ids: + chars.add(x['key']) + + if len(chars) == 27: + break + + return list(chars) + + def charView(self, **kwargs): + + type = splitString(kwargs.get('type', 'movie')) + status = splitString(kwargs.get('status', None)) + release_status = splitString(kwargs.get('release_status', None)) + chars = self.availableChars(type, status, release_status) + + return { + 'success': True, + 'empty': len(chars) == 0, + 'chars': chars, + } + + def addSingleCharView(self): + + for media_type in fireEvent('media.types', merge = True): + tempChar = lambda *args, **kwargs : self.charView(type = media_type, **kwargs) + addApiView('%s.available_chars' % media_type, tempChar) + + def delete(self, media_id, delete_from = None): + + try: + db = get_db() + + media = db.get('id', media_id) + if media: + deleted = False + + media_releases = fireEvent('release.for_media', media['_id'], single = True) + + if delete_from == 'all': + # Delete connected releases + for release in media_releases: + db.delete(release) + + db.delete(media) + deleted = True + else: + + total_releases = len(media_releases) + total_deleted = 0 + new_media_status = None + + for release in media_releases: + if delete_from in ['wanted', 'snatched', 'late']: + if release.get('status') != 'done': + db.delete(release) + total_deleted += 1 + new_media_status = 'done' + elif delete_from == 'manage': + if release.get('status') == 'done' or media.get('status') == 'done': + db.delete(release) + total_deleted += 1 + + if (total_releases == total_deleted) or (total_releases == 0 and not new_media_status) or (not new_media_status and delete_from == 'late'): + db.delete(media) + deleted = True + elif new_media_status: + media['status'] = new_media_status + + # Remove profile (no use for in manage) + if new_media_status == 'done': + media['profile_id'] = None + + db.update(media) + + fireEvent('media.untag', media['_id'], 'recent', single = True) + else: + fireEvent('media.restatus', media.get('_id'), single = True) + + if deleted: + fireEvent('notify.frontend', type = 'media.deleted', data = media) + except: + log.error('Failed deleting media: %s', traceback.format_exc()) + + return True + + def deleteView(self, id = '', **kwargs): + + ids = splitString(id) + for media_id in ids: + self.delete(media_id, delete_from = kwargs.get('delete_from', 'all')) + + return { + 'success': True, + } + + def addSingleDeleteView(self): + + for media_type in fireEvent('media.types', merge = True): + tempDelete = lambda *args, **kwargs : self.deleteView(type = media_type, **kwargs) + addApiView('%s.delete' % media_type, tempDelete, docs = { + 'desc': 'Delete a ' + media_type + ' from the wanted list', + 'params': { + 'id': {'desc': 'Media ID(s) you want to delete.', 'type': 'int (comma separated)'}, + 'delete_from': {'desc': 'Delete ' + media_type + ' from this page', 'type': 'string: all (default), wanted, manage'}, + } + }) + + def restatus(self, media_id, tag_recent = True, allowed_restatus = None): + + try: + db = get_db() + + m = db.get('id', media_id) + previous_status = m['status'] + + log.debug('Changing status for %s', getTitle(m)) + if not m['profile_id']: + m['status'] = 'done' + else: + m['status'] = 'active' + + try: + profile = db.get('id', m['profile_id']) + media_releases = fireEvent('release.for_media', m['_id'], single = True) + done_releases = [release for release in media_releases if release.get('status') == 'done'] + + if done_releases: + + # Check if we are finished with the media + for release in done_releases: + if fireEvent('quality.isfinish', {'identifier': release['quality'], 'is_3d': release.get('is_3d', False)}, profile, timedelta(seconds = time.time() - release['last_edit']).days, single = True): + m['status'] = 'done' + break + + elif previous_status == 'done': + m['status'] = 'done' + + except RecordNotFound: + log.debug('Failed restatus, keeping previous: %s', traceback.format_exc()) + m['status'] = previous_status + + # Only update when status has changed + if previous_status != m['status'] and (not allowed_restatus or m['status'] in allowed_restatus): + db.update(m) + + # Tag media as recent + if tag_recent: + self.tag(media_id, 'recent', update_edited = True) + + return m['status'] + except: + log.error('Failed restatus: %s', traceback.format_exc()) + + def tag(self, media_id, tag, update_edited = False): + + try: + db = get_db() + m = db.get('id', media_id) + + if update_edited: + m['last_edit'] = int(time.time()) + + tags = m.get('tags') or [] + if tag not in tags: + tags.append(tag) + m['tags'] = tags + db.update(m) + + return True + except: + log.error('Failed tagging: %s', traceback.format_exc()) + + return False + + def unTag(self, media_id, tag): + + try: + db = get_db() + m = db.get('id', media_id) + + tags = m.get('tags') or [] + if tag in tags: + new_tags = list(set(tags)) + new_tags.remove(tag) + + m['tags'] = new_tags + db.update(m) + + return True + except: + log.error('Failed untagging: %s', traceback.format_exc()) + + return False diff --git a/couchpotato/core/providers/__init__.py b/couchpotato/core/media/_base/providers/__init__.py similarity index 100% rename from couchpotato/core/providers/__init__.py rename to couchpotato/core/media/_base/providers/__init__.py diff --git a/couchpotato/core/providers/metadata/__init__.py b/couchpotato/core/media/_base/providers/automation/__init__.py similarity index 100% rename from couchpotato/core/providers/metadata/__init__.py rename to couchpotato/core/media/_base/providers/automation/__init__.py diff --git a/couchpotato/core/media/_base/providers/automation/base.py b/couchpotato/core/media/_base/providers/automation/base.py new file mode 100644 index 0000000000..21d205aeac --- /dev/null +++ b/couchpotato/core/media/_base/providers/automation/base.py @@ -0,0 +1,8 @@ +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.base import Provider + +log = CPLog(__name__) + + +class AutomationBase(Provider): + pass diff --git a/couchpotato/core/media/_base/providers/base.py b/couchpotato/core/media/_base/providers/base.py new file mode 100644 index 0000000000..9ff11f4749 --- /dev/null +++ b/couchpotato/core/media/_base/providers/base.py @@ -0,0 +1,377 @@ +from urlparse import urlparse +import json +import re +from requests import HTTPError +import time +import traceback +import xml.etree.ElementTree as XMLTree + +try: + from xml.etree.ElementTree import ParseError as XmlParseError +except ImportError: + from xml.parsers.expat import ExpatError as XmlParseError + +from couchpotato.core.event import addEvent, fireEvent +from couchpotato.core.helpers.encoding import ss +from couchpotato.core.helpers.variable import tryFloat, mergeDicts, md5, \ + possibleTitles +from couchpotato.core.logger import CPLog +from couchpotato.core.plugins.base import Plugin +from couchpotato.environment import Env + + +log = CPLog(__name__) + + +class MultiProvider(Plugin): + + def __init__(self): + self._classes = [] + + for Type in self.getTypes(): + klass = Type() + + # Overwrite name so logger knows what we're talking about + klass.setName('%s:%s' % (self.getName(), klass.getName())) + + self._classes.append(klass) + + def getTypes(self): + return [] + + def getClasses(self): + return self._classes + + +class Provider(Plugin): + + type = None # movie, show, subtitle, trailer, ... + http_time_between_calls = 10 # Default timeout for url requests + + last_available_check = {} + is_available = {} + + def isAvailable(self, test_url): + + if Env.get('dev'): return True + + now = time.time() + host = urlparse(test_url).hostname + + if self.last_available_check.get(host) < now - 900: + self.last_available_check[host] = now + + try: + self.urlopen(test_url, 30) + self.is_available[host] = True + except: + log.error('"%s" unavailable, trying again in an 15 minutes.', host) + self.is_available[host] = False + + return self.is_available.get(host, False) + + def getJsonData(self, url, decode_from = None, **kwargs): + + cache_key = md5(url) + data = self.getCache(cache_key, url, **kwargs) + + if data: + try: + data = data.strip() + if decode_from: + data = data.decode(decode_from) + + return json.loads(data) + except: + log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc())) + + return [] + + def getRSSData(self, url, item_path = 'channel/item', **kwargs): + + cache_key = md5(url) + data = self.getCache(cache_key, url, **kwargs) + + if data and len(data) > 0: + try: + data = XMLTree.fromstring(data) + return self.getElements(data, item_path) + except: + try: + data = XMLTree.fromstring(ss(data)) + return self.getElements(data, item_path) + except XmlParseError: + log.error('Invalid XML returned, check "%s" manually for issues', url) + except: + log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc())) + + return [] + + def getHTMLData(self, url, **kwargs): + + cache_key = md5(url) + return self.getCache(cache_key, url, **kwargs) + + +class YarrProvider(Provider): + + protocol = None # nzb, torrent, torrent_magnet + + cat_ids = {} + cat_backup_id = None + + size_gb = ['gb', 'gib','go'] + size_mb = ['mb', 'mib','mo'] + size_kb = ['kb', 'kib','ko'] + + last_login_check = None + login_failures = 0 + + login_fail_msg = None + + def __init__(self): + addEvent('provider.enabled_protocols', self.getEnabledProtocol) + addEvent('provider.belongs_to', self.belongsTo) + addEvent('provider.search.%s.%s' % (self.protocol, self.type), self.search) + + def getEnabledProtocol(self): + if self.isEnabled(): + return self.protocol + else: + return [] + + def buildUrl(self, *args, **kwargs): + pass + + def login(self): + + # Check if we are still logged in every hour + now = time.time() + if self.last_login_check and self.last_login_check < (now - 3600): + try: + output = self.urlopen(self.urls['login_check']) + if self.loginCheckSuccess(output): + self.last_login_check = now + return True + except: pass + self.last_login_check = None + + if self.last_login_check: + return True + + log.info('Session expired, attempting a new login.') + + try: + output = self.urlopen(self.urls['login'], data = self.getLoginParams()) + + if self.loginSuccess(output): + self.last_login_check = now + self.login_failures = 0 + return True + + error = 'unknown' + except Exception as e: + if isinstance(e, HTTPError): + if e.response.status_code >= 400 and e.response.status_code < 500: + self.login_failures += 1 + if self.login_failures >= 3: + self.disableAccount() + error = traceback.format_exc() + + self.last_login_check = None + + if self.login_fail_msg and self.login_fail_msg in output: + error = "Login credentials rejected." + self.disableAccount() + + log.error('Failed to login %s: %s', (self.getName(), error)) + return False + + def loginSuccess(self, output): + return True + + def loginCheckSuccess(self, output): + return True + + def loginDownload(self, url = '', nzb_id = ''): + try: + if not self.login(): + log.error('Failed downloading from %s', self.getName()) + return self.urlopen(url) + except: + log.error('Failed downloading from %s: %s', (self.getName(), traceback.format_exc())) + + def getLoginParams(self): + return {} + + def download(self, url = '', nzb_id = ''): + try: + return self.urlopen(url, headers = {'User-Agent': Env.getIdentifier()}, show_error = False) + except: + log.error('Failed getting release from %s: %s', (self.getName(), traceback.format_exc())) + + return 'try_next' + + def search(self, media, quality): + + if self.isDisabled(): + return [] + + # Login if needed + if self.urls.get('login') and not self.login(): + log.error('Failed to login to: %s', self.getName()) + return [] + + # Create result container + imdb_results = hasattr(self, '_search') + results = ResultList(self, media, quality, imdb_results = imdb_results) + + # Do search based on imdb id + if imdb_results: + self._search(media, quality, results) + # Search possible titles + else: + media_title = fireEvent('library.query', media, include_year = False, single = True) + + for title in possibleTitles(media_title): + self._searchOnTitle(title, media, quality, results) + + return results + + def belongsTo(self, url, provider = None, host = None): + try: + if provider and provider == self.getName(): + return self + + hostname = urlparse(url).hostname + if host and hostname in host: + return self + else: + for url_type in self.urls: + download_url = self.urls[url_type] + if hostname in download_url: + return self + except: + log.debug('Url %s doesn\'t belong to %s', (url, self.getName())) + + return + + def parseSize(self, size): + + size_raw = size.lower() + size = tryFloat(re.sub(r'[^0-9.]', '', size).strip()) + + for s in self.size_gb: + if s in size_raw: + return size * 1024 + + for s in self.size_mb: + if s in size_raw: + return size + + for s in self.size_kb: + if s in size_raw: + return size / 1024 + + return 0 + + def getCatId(self, quality = None): + if not quality: quality = {} + identifier = quality.get('identifier') + + want_3d = False + if quality.get('custom'): + want_3d = quality['custom'].get('3d') + + for ids, qualities in self.cat_ids: + if identifier in qualities or (want_3d and '3d' in qualities): + return ids + + if self.cat_backup_id: + return [self.cat_backup_id] + + return [] + + def disableAccount(self): + log.error("Failed %s login, disabling provider. " + "Please check the configuration. Re-enabling the " + "provider without fixing the problem may result " + "in an IP ban, depending on the site.", self.getName()) + self.conf(self.enabled_option, False) + self.login_failures = 0 + + +class ResultList(list): + + result_ids = None + provider = None + media = None + quality = None + + def __init__(self, provider, media, quality, **kwargs): + + self.result_ids = [] + self.provider = provider + self.media = media + self.quality = quality + self.kwargs = kwargs + + super(ResultList, self).__init__() + + def extend(self, results): + for r in results: + self.append(r) + + def append(self, result): + + new_result = self.fillResult(result) + + is_correct = fireEvent('searcher.correct_release', new_result, self.media, self.quality, + imdb_results = self.kwargs.get('imdb_results', False), single = True) + + if is_correct and new_result['id'] not in self.result_ids: + is_correct_weight = float(is_correct) + + new_result['score'] += fireEvent('score.calculate', new_result, self.media, single = True) + + old_score = new_result['score'] + new_result['score'] = int(old_score * is_correct_weight) + + log.info2('Found correct release with weight %.02f, old_score(%d) now scaled to score(%d)', ( + is_correct_weight, + old_score, + new_result['score'] + )) + + self.found(new_result) + self.result_ids.append(result['id']) + + super(ResultList, self).append(new_result) + + def fillResult(self, result): + + defaults = { + 'id': 0, + 'protocol': self.provider.protocol, + 'type': self.provider.type, + 'provider': self.provider.getName(), + 'download': self.provider.loginDownload if self.provider.urls.get('login') else self.provider.download, + 'seed_ratio': Env.setting('seed_ratio', section = self.provider.getName().lower(), default = ''), + 'seed_time': Env.setting('seed_time', section = self.provider.getName().lower(), default = ''), + 'url': '', + 'name': '', + 'age': 0, + 'size': 0, + 'description': '', + 'score': 0 + } + + return mergeDicts(defaults, result) + + def found(self, new_result): + if not new_result.get('provider_extra'): + new_result['provider_extra'] = '' + else: + new_result['provider_extra'] = ', %s' % new_result['provider_extra'] + + log.info('Found: score(%(score)s) on %(provider)s%(provider_extra)s: %(name)s', new_result) diff --git a/couchpotato/core/providers/movie/__init__.py b/couchpotato/core/media/_base/providers/info/__init__.py similarity index 100% rename from couchpotato/core/providers/movie/__init__.py rename to couchpotato/core/media/_base/providers/info/__init__.py diff --git a/couchpotato/core/media/_base/providers/info/base.py b/couchpotato/core/media/_base/providers/info/base.py new file mode 100644 index 0000000000..90a9153ca7 --- /dev/null +++ b/couchpotato/core/media/_base/providers/info/base.py @@ -0,0 +1,5 @@ +from couchpotato.core.media._base.providers.base import Provider + + +class BaseInfoProvider(Provider): + type = 'unknown' diff --git a/couchpotato/core/providers/trailer/__init__.py b/couchpotato/core/media/_base/providers/metadata/__init__.py similarity index 100% rename from couchpotato/core/providers/trailer/__init__.py rename to couchpotato/core/media/_base/providers/metadata/__init__.py diff --git a/couchpotato/core/media/_base/providers/metadata/base.py b/couchpotato/core/media/_base/providers/metadata/base.py new file mode 100644 index 0000000000..2a8c5cfe6d --- /dev/null +++ b/couchpotato/core/media/_base/providers/metadata/base.py @@ -0,0 +1,8 @@ +from couchpotato.core.logger import CPLog +from couchpotato.core.plugins.base import Plugin + +log = CPLog(__name__) + + +class MetaDataBase(Plugin): + pass diff --git a/couchpotato/core/media/_base/providers/nzb/__init__.py b/couchpotato/core/media/_base/providers/nzb/__init__.py new file mode 100644 index 0000000000..88d9865d9a --- /dev/null +++ b/couchpotato/core/media/_base/providers/nzb/__init__.py @@ -0,0 +1,14 @@ +config = [{ + 'name': 'nzb_providers', + 'groups': [ + { + 'label': 'Usenet Providers', + 'description': 'Providers searching usenet for new releases', + 'wizard': True, + 'type': 'list', + 'name': 'nzb_providers', + 'tab': 'searcher', + 'options': [], + }, + ], +}] diff --git a/couchpotato/core/media/_base/providers/nzb/base.py b/couchpotato/core/media/_base/providers/nzb/base.py new file mode 100644 index 0000000000..5e19e5246f --- /dev/null +++ b/couchpotato/core/media/_base/providers/nzb/base.py @@ -0,0 +1,11 @@ +import time + +from couchpotato.core.media._base.providers.base import YarrProvider + + +class NZBProvider(YarrProvider): + + protocol = 'nzb' + + def calculateAge(self, unix): + return int(time.time() - unix) / 24 / 60 / 60 diff --git a/couchpotato/core/providers/userscript/__init__.py b/couchpotato/core/media/_base/providers/nzb/binnewz/__init__.py similarity index 100% rename from couchpotato/core/providers/userscript/__init__.py rename to couchpotato/core/media/_base/providers/nzb/binnewz/__init__.py diff --git a/couchpotato/core/media/_base/providers/nzb/binnewz/binsearch.py b/couchpotato/core/media/_base/providers/nzb/binnewz/binsearch.py new file mode 100644 index 0000000000..3308a29073 --- /dev/null +++ b/couchpotato/core/media/_base/providers/nzb/binnewz/binsearch.py @@ -0,0 +1,87 @@ +import urllib +from bs4 import BeautifulSoup +from couchpotato.core.logger import CPLog +import re +from nzbdownloader import NZBDownloader +from nzbdownloader import NZBPostURLSearchResult +from couchpotato.core.helpers.variable import tryInt, tryFloat +log = CPLog(__name__) + +class BinSearch(NZBDownloader): + + def search(self, filename, minSize, newsgroup=None): + filename=filename.encode('utf8') + if newsgroup != None: + binSearchURLs = [ urllib.urlencode({'server' : 1, 'max': '250', 'adv_g' : newsgroup, 'q' : filename}), urllib.urlencode({'server' : 2, 'max': '250', 'adv_g' : newsgroup, 'q' : filename})] + else: + binSearchURLs = [ urllib.urlencode({'server' : 1, 'max': '250', 'q' : filename}), urllib.urlencode({'server' : 2, 'max': '250', 'q' : filename})] + + for suffixURL in binSearchURLs: + binSearchURL = "https://binsearch.info/?adv_age=&" + suffixURL + + binSearchSoup = BeautifulSoup( self.open(binSearchURL) ) + + foundName = None + sizeInMegs = None + main_table = binSearchSoup.find('table', attrs = {'id': 'r2'}) + if not main_table: + return + + items = main_table.find_all('tr') + for row in items: + title = row.find('span', attrs = {'class': 's'}) + + if not title: continue + + nzb_id = row.find('input', attrs = {'type': 'checkbox'})['name'] + info = row.find('span', attrs = {'class':'d'}) + try: + size_match = re.search('size:.(?P[0-9\.]+.[GMB]+)', info.text) + except: + continue + age = 0 + try: age = re.search('(?P\d+d)', row.find_all('td')[-1:][0].text).group('size')[:-1] + except: pass + + parts = re.search('available:.(?P\d+)./.(?P\d+)', info.text) + total = float(tryInt(parts.group('total'))) + parts = float(tryInt(parts.group('parts'))) + + if (total / parts) < 1 and ((total / parts) < 0.95 or ((total / parts) >= 0.95 and not ('par2' in info.text.lower() or 'pa3' in info.text.lower()))): + log.info2('Wrong: \'%s\', not complete: %s out of %s', (title, parts, total)) + continue + + if 'requires password' in info.text.lower(): + log.info2('Wrong: \'%s\', passworded', (title)) + continue + sizeInMegs=self.parseSize(size_match.group('size')) + if sizeInMegs < minSize: + continue + postData = title + nzbURL = 'https://www.binsearch.info/fcgi/nzb.fcgi?q=' + nzb_id + nzbid=nzb_id + age=tryInt(age) + return NZBPostURLSearchResult( self, nzbURL, postData, sizeInMegs, binSearchURL, age, nzbid ) + + def parseSize(self, size): + size_gb = ['gb', 'gib','go'] + size_mb = ['mb', 'mib','mo'] + size_kb = ['kb', 'kib','ko'] + size_raw = size.lower() + size = tryFloat(re.sub(r'[^0-9.]', '', size).strip()) + + for s in size_gb: + if s in size_raw: + return size * 1024 + + for s in size_mb: + if s in size_raw: + return size + + for s in size_kb: + if s in size_raw: + return size / 1024 + + return 0 + + diff --git a/couchpotato/core/media/_base/providers/nzb/binnewz/main.py b/couchpotato/core/media/_base/providers/nzb/binnewz/main.py new file mode 100644 index 0000000000..55e2802e54 --- /dev/null +++ b/couchpotato/core/media/_base/providers/nzb/binnewz/main.py @@ -0,0 +1,392 @@ +from binsearch import BinSearch +from nzbindex import NZBIndex + +from bs4 import BeautifulSoup +from couchpotato.core.helpers.variable import getTitle, splitString, tryInt +from couchpotato.core.helpers.encoding import simplifyString +from couchpotato.environment import Env +from couchpotato.core.logger import CPLog +from couchpotato.core.helpers import namer_check +from couchpotato.core.media._base.providers.nzb.base import NZBProvider + +log = CPLog(__name__) +import re +import urllib +import urllib2 +import traceback +class Base(NZBProvider): + + urls = { + 'download': 'http://www.binnews.in/', + 'detail': 'http://www.binnews.in/', + 'search': 'http://www.binnews.in/_bin/search2.php', + } + + http_time_between_calls = 4 # Seconds + cat_backup_id = None + + def _search(self, movie, quality, results): + nzbDownloaders = [BinSearch(), NZBIndex()] + MovieTitles = movie['info']['titles'] + moviequality = simplifyString(quality['identifier']) + movieyear = movie['info']['year'] + if quality['custom']['3d']==1: + threeD= True + else: + threeD=False + if moviequality in ("720p","1080p","bd50","2160p"): + cat1='39' + cat2='49' + minSize = 2000 + elif moviequality in ("dvdr"): + cat1='23' + cat2='48' + minSize = 3000 + else: + cat1='6' + cat2='27' + minSize = 500 + + for MovieTitle in MovieTitles: + try: + TitleStringReal = str(MovieTitle.encode("latin-1").replace('-',' ')) + except: + continue + if threeD: + TitleStringReal = TitleStringReal + ' 3d' + data = 'chkInit=1&edTitre='+simplifyString(unicode(TitleStringReal,"latin-1"))+'&chkTitre=on&chkFichier=on&chkCat=on&cats%5B%5D='+cat1+'&cats%5B%5D='+cat2+'&edAge=&edYear=' + try: + soup = BeautifulSoup( urllib2.urlopen(self.urls['search'], data) ) + except Exception, e: + log.error(u"Error trying to load BinNewz response: "+e) + return [] + + tables = soup.findAll("table", id="tabliste") + for table in tables: + + rows = table.findAll("tr") + for row in rows: + + cells = row.select("> td") + if (len(cells) < 11): + continue + + name = cells[2].text.strip() + testname=namer_check.correctName(name,movie) + if testname==0: + continue + language = cells[3].find("img").get("src") + + if not "_fr" in language and not "_frq" in language: + continue + + detectedlang='' + + if "_fr" in language: + detectedlang=' truefrench ' + else: + detectedlang=' french ' + + + # blacklist_groups = [ "alt.binaries.multimedia" ] + blacklist_groups = [] + + newgroupLink = cells[4].find("a") + newsgroup = None + if newgroupLink.contents: + newsgroup = newgroupLink.contents[0] + if newsgroup == "abmulti": + newsgroup = "alt.binaries.multimedia" + elif newsgroup == "ab.moovee": + newsgroup = "alt.binaries.moovee" + elif newsgroup == "abtvseries": + newsgroup = "alt.binaries.tvseries" + elif newsgroup == "abtv": + newsgroup = "alt.binaries.tv" + elif newsgroup == "a.b.teevee": + newsgroup = "alt.binaries.teevee" + elif newsgroup == "abstvdivxf": + newsgroup = "alt.binaries.series.tv.divx.french" + elif newsgroup == "abhdtvx264fr": + newsgroup = "alt.binaries.hdtv.x264.french" + elif newsgroup == "abmom": + newsgroup = "alt.binaries.mom" + elif newsgroup == "abhdtv": + newsgroup = "alt.binaries.hdtv" + elif newsgroup == "abboneless": + newsgroup = "alt.binaries.boneless" + elif newsgroup == "abhdtvf": + newsgroup = "alt.binaries.hdtv.french" + elif newsgroup == "abhdtvx264": + newsgroup = "alt.binaries.hdtv.x264" + elif newsgroup == "absuperman": + newsgroup = "alt.binaries.superman" + elif newsgroup == "abechangeweb": + newsgroup = "alt.binaries.echange-web" + elif newsgroup == "abmdfvost": + newsgroup = "alt.binaries.movies.divx.french.vost" + elif newsgroup == "abdvdr": + newsgroup = "alt.binaries.dvdr" + elif newsgroup == "abmzeromov": + newsgroup = "alt.binaries.movies.zeromovies" + elif newsgroup == "abcfaf": + newsgroup = "alt.binaries.cartoons.french.animes-fansub" + elif newsgroup == "abcfrench": + newsgroup = "alt.binaries.cartoons.french" + elif newsgroup == "abgougouland": + newsgroup = "alt.binaries.gougouland" + elif newsgroup == "abroger": + newsgroup = "alt.binaries.roger" + elif newsgroup == "abtatu": + newsgroup = "alt.binaries.tatu" + elif newsgroup =="abstvf": + newsgroup = "alt.binaries.series.tv.french" + elif newsgroup =="abmdfreposts": + newsgroup="alt.binaries.movies.divx.french.reposts" + elif newsgroup =="abmdf": + newsgroup="alt.binaries.movies.french" + elif newsgroup =="abhdtvfrepost": + newsgroup="alt.binaries.hdtv.french.repost" + elif newsgroup == "abmmkv": + newsgroup = "alt.binaries.movies.mkv" + elif newsgroup == "abf-tv": + newsgroup = "alt.binaries.french-tv" + elif newsgroup == "abmdfo": + newsgroup = "alt.binaries.movies.divx.french.old" + elif newsgroup == "abmf": + newsgroup = "alt.binaries.movies.french" + elif newsgroup == "ab.movies": + newsgroup = "alt.binaries.movies" + elif newsgroup == "a.b.french": + newsgroup = "alt.binaries.french" + elif newsgroup == "a.b.3d": + newsgroup = "alt.binaries.3d" + elif newsgroup == "ab.dvdrip": + newsgroup = "alt.binaries.dvdrip" + elif newsgroup == "ab.welovelori": + newsgroup = "alt.binaries.welovelori" + elif newsgroup == "abblu-ray": + newsgroup = "alt.binaries.blu-ray" + elif newsgroup == "ab.bloaf": + newsgroup = "alt.binaries.bloaf" + elif newsgroup == "ab.hdtv.german": + newsgroup = "alt.binaries.hdtv.german" + elif newsgroup == "abmd": + newsgroup = "alt.binaries.movies.divx" + elif newsgroup == "ab.ath": + newsgroup = "alt.binaries.ath" + elif newsgroup == "a.b.town": + newsgroup = "alt.binaries.town" + elif newsgroup == "a.b.u-4all": + newsgroup = "alt.binaries.u-4all" + elif newsgroup == "ab.amazing": + newsgroup = "alt.binaries.amazing" + elif newsgroup == "ab.astronomy": + newsgroup = "alt.binaries.astronomy" + elif newsgroup == "ab.nospam.cheer": + newsgroup = "alt.binaries.nospam.cheerleaders" + elif newsgroup == "ab.worms": + newsgroup = "alt.binaries.worms" + elif newsgroup == "abcores": + newsgroup = "alt.binaries.cores" + elif newsgroup == "abdvdclassics": + newsgroup = "alt.binaries.dvd.classics" + elif newsgroup == "abdvdf": + newsgroup = "alt.binaries.dvd.french" + elif newsgroup == "abdvds": + newsgroup = "alt.binaries.dvds" + elif newsgroup == "abmdfrance": + newsgroup = "alt.binaries.movies.divx.france" + elif newsgroup == "abmisc": + newsgroup = "alt.binaries.misc" + elif newsgroup == "abnl": + newsgroup = "alt.binaries.nl" + elif newsgroup == "abx": + newsgroup = "alt.binaries.x" + elif newsgroup == "ab.new-movies": + newsgroup = "alt.binaries.new-movies" + elif newsgroup == "ab.triballs": + newsgroup = "alt.binaries.triballs" + elif newsgroup == "abdivxf": + newsgroup = "alt.binaries.divx.french" + elif newsgroup == "ab.solar-xl": + newsgroup = "alt.binaries.solar-xl" + elif newsgroup == "abbig": + newsgroup = "alt.binaries.big" + elif newsgroup == "ab.insiderz": + newsgroup = "alt.binaries.insiderz" + elif newsgroup == "abwarez": + newsgroup = "alt.binaries.warez" + elif newsgroup == "abdvd": + newsgroup = "alt.binaries.dvd" + elif newsgroup == "abdvd9": + newsgroup = "alt.binaries.dvd9" + elif newsgroup == "absvcdf": + newsgroup = "alt.binaries.svcd.french" + elif newsgroup == "ab.ftd": + newsgroup = "alt.binaries.ftd" + elif newsgroup == "ab.u-4all": + newsgroup = "alt.binaries.u-4all" + elif newsgroup == "a.b.u4all": + newsgroup = "alt.binaries.u-4all" + else: + log.error(u"Unknown binnewz newsgroup: " + newsgroup) + continue + + if newsgroup in blacklist_groups: + log.error(u"Ignoring result, newsgroup is blacklisted: " + newsgroup) + continue + + filename = cells[5].contents[0] + + m = re.search("^(.+)\s+{(.*)}$", name) + qualityStr = "" + if m: + name = m.group(1) + qualityStr = m.group(2) + + m = re.search("^(.+)\s+\[(.*)\]$", name) + source = None + if m: + name = m.group(1) + source = m.group(2) + + m = re.search("(.+)\(([0-9]{4})\)", name) + year = "" + if m: + name = m.group(1) + year = m.group(2) + if int(year) > movieyear + 1 or int(year) < movieyear - 1: + continue + + m = re.search("(.+)\((\d{2}/\d{2}/\d{4})\)", name) + dateStr = "" + if m: + name = m.group(1) + dateStr = m.group(2) + year = dateStr[-5:].strip(")").strip("/") + + m = re.search("(.+)\s+S(\d{2})\s+E(\d{2})(.*)", name) + if m: + name = m.group(1) + " S" + m.group(2) + "E" + m.group(3) + m.group(4) + + m = re.search("(.+)\s+S(\d{2})\s+Ep(\d{2})(.*)", name) + if m: + name = m.group(1) + " S" + m.group(2) + "E" + m.group(3) + m.group(4) + + filenameLower = filename.lower() + searchItems = [] + if qualityStr=="": + if source in ("Blu Ray-Rip", "HD DVD-Rip"): + qualityStr="brrip" + elif source =="Blu Ray-Rip 4K": + qualityStr="2160p" + elif source =="DVDRip": + qualityStr="dvdrip" + elif source == "TS": + qualityStr ="ts" + elif source == "DVDSCR": + qualityStr ="scr" + elif source == "CAM": + qualityStr ="cam" + elif moviequality == "dvdr": + qualityStr ="dvdr" + if year =='': + year = '1900' + if len(searchItems) == 0 and qualityStr == str(moviequality): + searchItems.append( filename ) + for searchItem in searchItems: + resultno=1 + for downloader in nzbDownloaders: + + log.info("Searching for download : " + name + ", search string = "+ searchItem + " on " + downloader.__class__.__name__) + try: + binsearch_result = downloader.search(searchItem, minSize, newsgroup ) + if binsearch_result: + new={} + + def extra_check(item): + return True + qualitytag='' + if qualityStr.lower() in ['720p','1080p','2160p']: + qualitytag=' hd x264 h264 ' + elif qualityStr.lower() in ['dvdrip']: + qualitytag=' dvd xvid ' + elif qualityStr.lower() in ['brrip']: + qualitytag=' hdrip ' + elif qualityStr.lower() in ['ts']: + qualitytag=' webrip ' + elif qualityStr.lower() in ['scr']: + qualitytag='' + elif qualityStr.lower() in ['dvdr']: + qualitytag=' pal video_ts ' + new['id'] = binsearch_result.nzbid + new['name'] = name + detectedlang + qualityStr + qualitytag + downloader.__class__.__name__ + new['url'] = binsearch_result.nzburl + new['detail_url'] = binsearch_result.refererURL + new['size'] = binsearch_result.sizeInMegs + new['age'] = binsearch_result.age + new['extra_check'] = extra_check + + results.append(new) + + resultno=resultno+1 + log.info("Found : " + searchItem + " on " + downloader.__class__.__name__) + if resultno==3: + break + except Exception, e: + log.error("Searching from " + downloader.__class__.__name__ + " failed : " + str(e) + traceback.format_exc()) + + def download(self, url = '', nzb_id = ''): + if 'binsearch' in url: + data = { + 'action': 'nzb', + nzb_id: 'on' + } + try: + return self.urlopen(url, data = data, show_error = False) + except: + log.error('Failed getting nzb from %s: %s', (self.getName(), traceback.format_exc())) + return 'try_next' + else: + values = { + 'url' : '/' + } + data_tmp = urllib.urlencode(values) + req = urllib2.Request(url, data_tmp ) + + try: + #log.error('Failed downloading from %s', self.getName()) + return urllib2.urlopen(req).read() + except: + log.error('Failed downloading from %s: %s', (self.getName(), traceback.format_exc())) + + return 'try_next' +config = [{ + 'name': 'binnewz', + 'groups': [ + { + 'tab': 'searcher', + 'list': 'nzb_providers', + 'name': 'binnewz', + 'description': 'Free provider, lots of french nzbs. See binnewz', + 'wizard': True, + 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABHNCSVQICAgIfAhkiAAAAgRJREFUOI1t009rVFcYx/HPuffOTGYmMcZoEmNUkiJRSZRAC1ropuimuy6KuHHhShe+EF+CL8AX4LpQCgoiohhMMKKMqHRTtaJJ5k8nudfFnBkjzoEf5zk8PN/zO3+egFGMYX+MS9hFG604d/A/ulG7yFFkqOGgcuUuSJK32q0NPMMaNrE9RC10UxzCedX6767cqDu2MGV8YlFz62ed9iWVkYvy/IyimEUSFaKD3QwV7ENwapmlHymVU5126tNHVh9MW3s8bfXhOW8b16TpliR5otW8jm6GHiSEYOYoF076Zjx6x29/8OHfssZzNp6Ou3XzF8zicxYtZWBislfUKL4CFgIvd5mcYuowed7PjKOSGTYWwiAsij6srChmJI058Q6qyIYD9jgIIQzWxXygPtZPpUj6gGJv/V4HGoViPsLWt77bK9P7FDtg8zPr21RrX48wT3g11OcA0MG2oii8aXB4jiInK5FmSAcOGBUawwFvtFuJO7dpbLBynuM/UK0Jn0YolXtqNfn4vl/bRZ7pfcsXdrqX3f/rhgd/L+m0J8zMdZ1eKTn7U7C4zNg+yhX+ed2/syZ2AkZQ12umSRyI8wpOqdaXdTszRmocOR5Mz2bu/ZnL81/xIsTnyFCOsKpeg9ViPBo1jxMq1UVpEjS3r+K/Pe81aJQ0qhShlQiuxPxOtL+J1heOZZ0e63LUQAAAAABJRU5ErkJggg==', + 'options': [ + { + 'name': 'enabled', + 'type': 'enabler', + 'default': False, + }, + { + 'name': 'extra_score', + 'advanced': True, + 'label': 'Extra Score', + 'type': 'int', + 'default': 0, + 'description': 'Starting score for each release found via this provider.', + } + ], + }, + ], +}] diff --git a/couchpotato/core/media/_base/providers/nzb/binnewz/nzbclub.py b/couchpotato/core/media/_base/providers/nzb/binnewz/nzbclub.py new file mode 100644 index 0000000000..dcad4e77e9 --- /dev/null +++ b/couchpotato/core/media/_base/providers/nzb/binnewz/nzbclub.py @@ -0,0 +1,64 @@ +from bs4 import BeautifulSoup +from nzbdownloader import NZBDownloader +from nzbdownloader import NZBGetURLSearchResult +from couchpotato.core.helpers.encoding import toUnicode,tryUrlencode +from couchpotato.core.helpers.variable import tryInt +from couchpotato.core.logger import CPLog +from couchpotato.core.helpers.rss import RSS +from couchpotato.core.media._base.providers.nzb.base import NZBProvider +from dateutil.parser import parse +import time +log = CPLog(__name__) +class NZBClub(NZBDownloader, NZBProvider, RSS): + + urls = { + 'search': 'https://www.nzbclub.com/nzbrss.aspx?%s', + } + + http_time_between_calls = 4 #seconds + + def search(self, filename, minSize, newsgroup=None): + + q = filename + + params = tryUrlencode({ + 'q': q, + 'qq': newsgroup, + 'ig': 1, + 'rpp': 200, + 'st': 5, + 'sp': 1, + 'ns': 1, + }) + + nzbs = self.getRSSData(self.urls['search'] % params) + + for nzb in nzbs: + + nzbclub_id = tryInt(self.getTextElement(nzb, "link").split('/nzb_view/')[1].split('/')[0]) + enclosure = self.getElement(nzb, "enclosure").attrib + size = enclosure['length'] + date = self.getTextElement(nzb, "pubDate") + + def extra_check(item): + full_description = self.getCache('nzbclub.%s' % nzbclub_id, item['detail_url'], cache_timeout = 25920000) + + for ignored in ['ARCHIVE inside ARCHIVE', 'Incomplete', 'repair impossible']: + if ignored in full_description: + log.info('Wrong: Seems to be passworded or corrupted files: %s', item['name']) + # return False + + #return True + nzbid = nzbclub_id + #'name': toUnicode(self.getTextElement(nzb, "title")), + age = self.calculateAge(int(time.mktime(parse(date).timetuple()))) + sizeInMegs = (tryInt(size)/1024/1024) + downloadUrl = enclosure['url'].replace(' ', '_') + nzbClubURL = self.getTextElement(nzb, "link") + #'get_more_info': self.getMoreInfo, + #'extra_check': extra_check + + + return NZBGetURLSearchResult( self, downloadUrl, sizeInMegs, nzbClubURL, age, nzbid) + + \ No newline at end of file diff --git a/couchpotato/core/media/_base/providers/nzb/binnewz/nzbdownloader.py b/couchpotato/core/media/_base/providers/nzb/binnewz/nzbdownloader.py new file mode 100644 index 0000000000..687f8e058e --- /dev/null +++ b/couchpotato/core/media/_base/providers/nzb/binnewz/nzbdownloader.py @@ -0,0 +1,83 @@ +import urllib2 +from StringIO import StringIO +import gzip +import cookielib +import time + +class NZBDownloader(object): + + def __init__( self ): + headers = [ + ('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.57 Safari/537.17') + ] + self.cj = cookielib.CookieJar() + self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cj),urllib2.HTTPSHandler(), urllib2.HTTPHandler()) + self.opener.addheaders = headers + self.lastRequestTime = None + + def waitBeforeNextRequest(self): + if self.lastRequestTime and self.lastRequestTime > ( time.mktime(time.localtime()) - 10): + time.sleep( 10 ) + self.lastRequestTime = time.gmtime() + + def open(self, request): + self.waitBeforeNextRequest() + return self.opener.open(request) + +class NZBSearchResult(object): + + def __init__(self, downloader, sizeInMegs, refererURL, age, nzbid): + self.downloader = downloader + self.refererURL = refererURL + self.sizeInMegs = sizeInMegs + self.age = age + self.nzbid = nzbid + def readRequest(self, request): + request.add_header('Accept-encoding', 'gzip') + request.add_header('Referer', self.refererURL) + request.add_header('Accept-Encoding', 'gzip') + request.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.57 Safari/537.17') + + response = self.downloader.open(request) + if response.info().get('Content-Encoding') == 'gzip': + buf = StringIO( response.read()) + f = gzip.GzipFile(fileobj=buf) + return f.read() + else: + return response.read() + + def getNZB(self): + pass + +class NZBGetURLSearchResult( NZBSearchResult ): + + def __init__(self, downloader, nzburl, sizeInMegs, refererURL, age, nzbid): + NZBSearchResult.__init__(self, downloader, sizeInMegs, refererURL, age, nzbid) + self.nzburl = nzburl + + def getNZB(self): + request = urllib2.Request( self.nzburl ) + self.nzbdata = NZBSearchResult.readRequest( self, request ) + return self.nzbdata + +class NZBPostURLSearchResult( NZBSearchResult ): + + def __init__(self, downloader, nzburl, postData, sizeInMegs, refererURL, age, nzbid): + NZBSearchResult.__init__(self, downloader, sizeInMegs, refererURL, age, nzbid) + self.nzburl = nzburl + self.postData = postData + + def getNZB(self): + request = urllib2.Request( self.nzburl, self.postData ) + self.nzbdata = NZBSearchResult.readRequest( self, request ) + return self.nzbdata + +class NZBDataSearchResult( NZBSearchResult ): + + def __init__(self, nzbdata, sizeInMegs, refererURL, age, nzbid): + NZBSearchResult.__init__(self, None, refererURL, age, nzbid) + self.nzbdata = nzbdata + + def getNZB(self): + return self.nzbdata + \ No newline at end of file diff --git a/couchpotato/core/media/_base/providers/nzb/binnewz/nzbindex.py b/couchpotato/core/media/_base/providers/nzb/binnewz/nzbindex.py new file mode 100644 index 0000000000..ea72c00af1 --- /dev/null +++ b/couchpotato/core/media/_base/providers/nzb/binnewz/nzbindex.py @@ -0,0 +1,51 @@ +from bs4 import BeautifulSoup +from nzbdownloader import NZBDownloader, NZBGetURLSearchResult +from couchpotato.core.helpers.rss import RSS +from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode +from couchpotato.core.helpers.variable import tryInt +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.nzb.base import NZBProvider +from couchpotato.environment import Env +from dateutil.parser import parse +import urllib +import time +log = CPLog(__name__) + +class NZBIndex(NZBDownloader,NZBProvider, RSS): + + urls = { + 'download': 'https://www.nzbindex.nl/download/', + 'search': 'http://www.nzbindex.com/rss/?%s', + } + + http_time_between_calls = 5 # Seconds + + def search(self, filename, minSize, newsgroup=None): + + q = filename + arguments = tryUrlencode({ + 'q': q, + 'age': Env.setting('retention', 'nzb'), + 'sort': 'agedesc', + 'minsize': minSize, + 'rating': 1, + 'max': 250, + 'more': 1, + 'complete': 1, + }) + nzbs = self.getRSSData(self.urls['search'] % arguments) + nzbid = None + for nzb in nzbs: + + enclosure = self.getElement(nzb, 'enclosure').attrib + nzbindex_id = int(self.getTextElement(nzb, "link").split('/')[4]) + + + nzbid = nzbindex_id + age = self.calculateAge(int(time.mktime(parse(self.getTextElement(nzb, "pubDate")).timetuple()))) + sizeInMegs = tryInt(enclosure['length']) / 1024 / 1024 + downloadUrl = enclosure['url'] + detailURL = enclosure['url'].replace('/download/', '/release/') + + if nzbid: + return NZBGetURLSearchResult(self, downloadUrl, sizeInMegs, detailURL, age, nzbid) diff --git a/couchpotato/core/media/_base/providers/nzb/binsearch.py b/couchpotato/core/media/_base/providers/nzb/binsearch.py new file mode 100644 index 0000000000..6b798840c1 --- /dev/null +++ b/couchpotato/core/media/_base/providers/nzb/binsearch.py @@ -0,0 +1,120 @@ +import re +import traceback + +from bs4 import BeautifulSoup +from couchpotato.core.helpers.variable import tryInt, simplifyString +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.nzb.base import NZBProvider + + +log = CPLog(__name__) + + +class Base(NZBProvider): + + urls = { + 'download': 'https://www.binsearch.info/fcgi/nzb.fcgi?q=%s', + 'detail': 'https://www.binsearch.info%s', + 'search': 'https://www.binsearch.info/index.php?%s', + } + + http_time_between_calls = 4 # Seconds + + def _search(self, media, quality, results): + + data = self.getHTMLData(self.urls['search'] % self.buildUrl(media, quality)) + + if data: + try: + + html = BeautifulSoup(data) + main_table = html.find('table', attrs = {'id': 'r2'}) + + if not main_table: + return + + items = main_table.find_all('tr') + + for row in items: + title = row.find('span', attrs = {'class': 's'}) + + if not title: continue + + nzb_id = row.find('input', attrs = {'type': 'checkbox'})['name'] + info = row.find('span', attrs = {'class':'d'}) + size_match = re.search('size:.(?P[0-9\.]+.[GMB]+)', info.text) + + age = 0 + try: age = re.search('(?P\d+d)', row.find_all('td')[-1:][0].text).group('size')[:-1] + except: pass + + def extra_check(item): + parts = re.search('available:.(?P\d+)./.(?P\d+)', info.text) + total = float(tryInt(parts.group('total'))) + parts = float(tryInt(parts.group('parts'))) + + if (total / parts) < 1 and ((total / parts) < 0.95 or ((total / parts) >= 0.95 and not ('par2' in info.text.lower() or 'pa3' in info.text.lower()))): + log.info2('Wrong: \'%s\', not complete: %s out of %s', (item['name'], parts, total)) + return False + + if 'requires password' in info.text.lower(): + log.info2('Wrong: \'%s\', passworded', (item['name'])) + return False + + return True + + results.append({ + 'id': nzb_id, + 'name': simplifyString(title.text), + 'age': tryInt(age), + 'size': self.parseSize(size_match.group('size')), + 'url': self.urls['download'] % nzb_id, + 'detail_url': self.urls['detail'] % info.find('a')['href'], + 'extra_check': extra_check + }) + + except: + log.error('Failed to parse HTML response from BinSearch: %s', traceback.format_exc()) + + def download(self, url = '', nzb_id = ''): + + data = { + 'action': 'nzb', + nzb_id: 'on' + } + + try: + return self.urlopen(url, data = data, show_error = False) + except: + log.error('Failed getting nzb from %s: %s', (self.getName(), traceback.format_exc())) + + return 'try_next' + + +config = [{ + 'name': 'binsearch', + 'groups': [ + { + 'tab': 'searcher', + 'list': 'nzb_providers', + 'name': 'binsearch', + 'description': 'Free provider, less accurate. See BinSearch', + 'wizard': True, + 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAAAAAA6mKC9AAAATklEQVQY02NwQAMMWAXOnz+PKvD//3/CAvM//z+fgiwAAs+RBab4PP//vwbFjPlAffgEChzOo2r5fBuIfRAC5w8D+QUofkkp8MHjOWQAAM3Sbogztg2wAAAAAElFTkSuQmCC', + 'options': [ + { + 'name': 'enabled', + 'type': 'enabler', + }, + { + 'name': 'extra_score', + 'advanced': True, + 'label': 'Extra Score', + 'type': 'int', + 'default': 0, + 'description': 'Starting score for each release found via this provider.', + } + ], + }, + ], +}] diff --git a/couchpotato/core/media/_base/providers/nzb/newznab.py b/couchpotato/core/media/_base/providers/nzb/newznab.py new file mode 100644 index 0000000000..0f28db8b65 --- /dev/null +++ b/couchpotato/core/media/_base/providers/nzb/newznab.py @@ -0,0 +1,291 @@ +from urlparse import urlparse +import time +import traceback +import re + +from couchpotato.core.helpers.encoding import tryUrlencode, toUnicode +from couchpotato.core.helpers.rss import RSS +from couchpotato.core.helpers.variable import cleanHost, splitString, tryInt +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.base import ResultList +from couchpotato.core.media._base.providers.nzb.base import NZBProvider +from couchpotato.environment import Env +from dateutil.parser import parse +from requests import HTTPError + + +log = CPLog(__name__) + + +class Base(NZBProvider, RSS): + + urls = { + 'detail': 'details/%s', + 'download': 't=get&id=%s' + } + + passwords_regex = 'password|wachtwoord' + limits_reached = {} + + http_time_between_calls = 2 # Seconds + + def search(self, media, quality): + hosts = self.getHosts() + + results = ResultList(self, media, quality, imdb_results = True) + + for host in hosts: + if self.isDisabled(host): + continue + + self._searchOnHost(host, media, quality, results) + + return results + + def _searchOnHost(self, host, media, quality, results): + + query = self.buildUrl(media, host) + url = '%s%s' % (self.getUrl(host['host']), query) + nzbs = self.getRSSData(url, cache_timeout = 1800, headers = {'User-Agent': Env.getIdentifier()}) + + for nzb in nzbs: + + date = None + spotter = None + for item in nzb: + if date and spotter: + break + if item.attrib.get('name') == 'usenetdate': + date = item.attrib.get('value') + break + + # Get the name of the person who posts the spot + if item.attrib.get('name') == 'poster': + if "@spot.net" in item.attrib.get('value'): + spotter = item.attrib.get('value').split("@")[0] + continue + + if not date: + date = self.getTextElement(nzb, 'pubDate') + + name = self.getTextElement(nzb, 'title') + detail_url = self.getTextElement(nzb, 'guid') + nzb_id = detail_url.split('/')[-1:].pop() + + try: + link = self.getElement(nzb, 'enclosure').attrib['url'] + except: + link = self.getTextElement(nzb, 'link') + + if '://' not in detail_url: + detail_url = (cleanHost(host['host']) + self.urls['detail']) % tryUrlencode(nzb_id) + + if not link: + link = ((self.getUrl(host['host']) + self.urls['download']) % tryUrlencode(nzb_id)) + self.getApiExt(host) + + if not name: + continue + + name_extra = '' + if spotter: + name_extra = spotter + + description = '' + if "@spot.net" in nzb_id: + try: + # Get details for extended description to retrieve passwords + query = self.buildDetailsUrl(nzb_id, host['api_key']) + url = '%s%s' % (self.getUrl(host['host']), query) + nzb_details = self.getRSSData(url, cache_timeout = 1800, headers = {'User-Agent': Env.getIdentifier()})[0] + + description = self.getTextElement(nzb_details, 'description') + + # Extract a password from the description + password = re.search('(?:' + self.passwords_regex + ')(?: *)(?:\:|\=)(?: *)(.*?)\|\n|$', description, flags = re.I).group(1) + if password: + name += ' {{%s}}' % password.strip() + except: + log.debug('Error getting details of "%s": %s', (name, traceback.format_exc())) + + results.append({ + 'id': nzb_id, + 'provider_extra': urlparse(host['host']).hostname or host['host'], + 'name': toUnicode(name), + 'name_extra': name_extra, + 'age': self.calculateAge(int(time.mktime(parse(date).timetuple()))), + 'size': int(self.getElement(nzb, 'enclosure').attrib['length']) / 1024 / 1024, + 'url': link, + 'detail_url': detail_url, + 'content': self.getTextElement(nzb, 'description'), + 'description': description, + 'score': host['extra_score'], + }) + + def getHosts(self): + + uses = splitString(str(self.conf('use')), clean = False) + hosts = splitString(self.conf('host'), clean = False) + api_keys = splitString(self.conf('api_key'), clean = False) + extra_score = splitString(self.conf('extra_score'), clean = False) + custom_tags = splitString(self.conf('custom_tag'), clean = False) + custom_categories = splitString(self.conf('custom_categories'), clean = False) + + list = [] + for nr in range(len(hosts)): + + try: key = api_keys[nr] + except: key = '' + + try: host = hosts[nr] + except: host = '' + + try: score = tryInt(extra_score[nr]) + except: score = 0 + + try: custom_tag = custom_tags[nr] + except: custom_tag = '' + + try: custom_category = custom_categories[nr].replace(" ", ",") + except: custom_category = '' + + list.append({ + 'use': uses[nr], + 'host': host, + 'api_key': key, + 'extra_score': score, + 'custom_tag': custom_tag, + 'custom_category' : custom_category + }) + + return list + + def belongsTo(self, url, provider = None, host = None): + + hosts = self.getHosts() + + for host in hosts: + result = super(Base, self).belongsTo(url, host = host['host'], provider = provider) + if result: + return result + + def getUrl(self, host): + if '?page=newznabapi' in host: + return cleanHost(host)[:-1] + '&' + + return cleanHost(host) + 'api?' + + def isDisabled(self, host = None): + return not self.isEnabled(host) + + def isEnabled(self, host = None): + + # Return true if at least one is enabled and no host is given + if host is None: + for host in self.getHosts(): + if self.isEnabled(host): + return True + return False + + return NZBProvider.isEnabled(self) and host['host'] and host['api_key'] and int(host['use']) + + def getApiExt(self, host): + return '&apikey=%s' % host['api_key'] + + def download(self, url = '', nzb_id = ''): + host = urlparse(url).hostname + + if self.limits_reached.get(host): + # Try again in 3 hours + if self.limits_reached[host] > time.time() - 10800: + return 'try_next' + + try: + data = self.urlopen(url, show_error = False, headers = {'User-Agent': Env.getIdentifier()}) + self.limits_reached[host] = False + return data + except HTTPError as e: + sc = e.response.status_code + if sc in [503, 429]: + response = e.read().lower() + if sc == 429 or 'maximum api' in response or 'download limit' in response: + if not self.limits_reached.get(host): + log.error('Limit reached / to many requests for newznab provider: %s', host) + self.limits_reached[host] = time.time() + return 'try_next' + + log.error('Failed download from %s: %s', (host, traceback.format_exc())) + + return 'try_next' + + def buildDetailsUrl(self, nzb_id, api_key): + query = tryUrlencode({ + 't': 'details', + 'id': nzb_id, + 'apikey': api_key, + }) + return query + + + +config = [{ + 'name': 'newznab', + 'groups': [ + { + 'tab': 'searcher', + 'list': 'nzb_providers', + 'name': 'newznab', + 'order': 10, + 'description': 'Enable NewzNab such as NZB.su, \ + NZBs.org, DOGnzb.cr, \ + Spotweb, NZBGeek, \ + NZBFinder, Usenet-Crawler', + 'wizard': True, + 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQAgMAAABinRfyAAAACVBMVEVjhwD///86aRovd/sBAAAAMklEQVQI12NgAIPQUCCRmQkjssDEShiRuRIqwZqZGcDAGBrqANUhGgIkWAOABKMDxCAA24UK50b26SAAAAAASUVORK5CYII=', + 'options': [ + { + 'name': 'enabled', + 'type': 'enabler', + 'default': True, + }, + { + 'name': 'use', + 'default': '0,0,0,0,0,0' + }, + { + 'name': 'host', + 'default': 'api.nzb.su,api.dognzb.cr,nzbs.org,https://api.nzbgeek.info,https://www.nzbfinder.ws,https://www.usenet-crawler.com', + 'description': 'The hostname of your newznab provider', + }, + { + 'name': 'extra_score', + 'advanced': True, + 'label': 'Extra Score', + 'default': '0,0,0,0,0,0', + 'description': 'Starting score for each release found via this provider.', + }, + { + 'name': 'custom_tag', + 'advanced': True, + 'label': 'Custom tag', + 'default': ',,,,,', + 'description': 'Add custom tags, for example add rls=1 to get only scene releases from nzbs.org', + }, + { + 'name': 'custom_categories', + 'advanced': True, + 'label': 'Custom Categories', + 'default': '2000,2000,2000,2000,2000,2000', + 'description': 'Specify categories to search in seperated by a single space, defaults to all movies. EG: "2030 2040 2060" would only search in HD, SD, and 3D movie categories', + }, + { + 'name': 'api_key', + 'default': ',,,,,', + 'label': 'Api Key', + 'description': 'Can be found on your profile page', + 'type': 'combined', + 'combine': ['use', 'host', 'api_key', 'extra_score', 'custom_tag'], + }, + ], + }, + ], +}] diff --git a/couchpotato/core/media/_base/providers/nzb/nzbclub.py b/couchpotato/core/media/_base/providers/nzb/nzbclub.py new file mode 100644 index 0000000000..4e1c843544 --- /dev/null +++ b/couchpotato/core/media/_base/providers/nzb/nzbclub.py @@ -0,0 +1,100 @@ +import time + +from bs4 import BeautifulSoup +from couchpotato.core.helpers.encoding import toUnicode +from couchpotato.core.helpers.rss import RSS +from couchpotato.core.helpers.variable import tryInt +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.nzb.base import NZBProvider +from dateutil.parser import parse + + +log = CPLog(__name__) + + +class Base(NZBProvider, RSS): + + urls = { + 'search': 'https://www.nzbclub.com/nzbrss.aspx?%s', + } + + http_time_between_calls = 4 # seconds + + def _search(self, media, quality, results): + + nzbs = self.getRSSData(self.urls['search'] % self.buildUrl(media)) + + for nzb in nzbs: + + nzbclub_id = tryInt(self.getTextElement(nzb, "link").split('/nzb_view/')[1].split('/')[0]) + enclosure = self.getElement(nzb, "enclosure").attrib + size = enclosure['length'] + date = self.getTextElement(nzb, "pubDate") + + def extra_check(item): + full_description = self.getCache('nzbclub.%s' % nzbclub_id, item['detail_url'], cache_timeout = 25920000) + + for ignored in ['ARCHIVE inside ARCHIVE', 'Incomplete', 'repair impossible']: + if ignored in full_description: + log.info('Wrong: Seems to be passworded or corrupted files: %s', item['name']) + return False + + return True + + results.append({ + 'id': nzbclub_id, + 'name': toUnicode(self.getTextElement(nzb, "title")), + 'age': self.calculateAge(int(time.mktime(parse(date).timetuple()))), + 'size': tryInt(size) / 1024 / 1024, + 'url': enclosure['url'].replace(' ', '_'), + 'detail_url': self.getTextElement(nzb, "link"), + 'get_more_info': self.getMoreInfo, + 'extra_check': extra_check + }) + + def getMoreInfo(self, item): + full_description = self.getCache('nzbclub.%s' % item['id'], item['detail_url'], cache_timeout = 25920000) + html = BeautifulSoup(full_description) + nfo_pre = html.find('pre', attrs = {'class': 'nfo'}) + description = toUnicode(nfo_pre.text) if nfo_pre else '' + + item['description'] = description + return item + + def extraCheck(self, item): + full_description = self.getCache('nzbclub.%s' % item['id'], item['detail_url'], cache_timeout = 25920000) + + if 'ARCHIVE inside ARCHIVE' in full_description: + log.info('Wrong: Seems to be passworded files: %s', item['name']) + return False + + return True + + +config = [{ + 'name': 'nzbclub', + 'groups': [ + { + 'tab': 'searcher', + 'list': 'nzb_providers', + 'name': 'NZBClub', + 'description': 'Free provider, less accurate. See NZBClub', + 'wizard': True, + 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACEUlEQVQ4y3VSMWgUQRR9/8/s7OzeJSdnTsVGghLEYBNQjBpQiRBFhIB2EcHG1kbs0murhZAmVocExEZQ0c7CxkLINYcJJpoYj9wZcnu72fF21uJSXMzuhyne58/j/fcf4b+KokgBIOSU53lxP5b9oNVqDT36dH+5UjoiKvIwPFEEgWBshGZ3E7/NOupL9fMjx0e+ZhKsrq+c/FPZKJi0w4FsQXMBDEJsd7BNW9h2tuyP9vfTALIJkMIu1hYRtINM+dpzcWc0sbkreK4fUEogyraAmKGF3+7vcT/wtR9QwkCabSAzQQuvk0uglAo5YaQ5DASGYjfMXcHVOqKu6NmR7iehlKAdHWUqWPv1c3i+9uwVdRlEBGaGEAJCCrDo9ShhvF6qPq8tL57bp+DbRn2sHtUuCY9YphLMu5921VhrwYJ5tbt0tt6sjQP4vEfB2Ikz7/ytwbeR6ljHkXCUA6UcOLtPOg4MYhtH8ZcLw5er+xQMDAwEURRNl96X596Y6oxFwsw9fmtTOAr2Ik19nL365FZpsLSdnQPPM8aYewc+lDcX4rkHqbQMAGTJXulOLzycmr1bKBTi3DOGYagajcahiaOT89fbM0/dxEsUu3aidfPljWO3HzebzYNBELi5Z5RSJlrrHd/3w8lT114MrVTWOn875fHRiYVisRhorWMpZXdvNnLKGCOstb0AMlulVJI19w/+nceU4D0aCwAAAABJRU5ErkJggg==', + 'options': [ + { + 'name': 'enabled', + 'type': 'enabler', + }, + { + 'name': 'extra_score', + 'advanced': True, + 'label': 'Extra Score', + 'type': 'int', + 'default': 0, + 'description': 'Starting score for each release found via this provider.', + } + ], + }, + ], +}] diff --git a/couchpotato/core/media/_base/providers/nzb/omgwtfnzbs.py b/couchpotato/core/media/_base/providers/nzb/omgwtfnzbs.py new file mode 100644 index 0000000000..6d4d0a28fc --- /dev/null +++ b/couchpotato/core/media/_base/providers/nzb/omgwtfnzbs.py @@ -0,0 +1,99 @@ +from couchpotato.core.event import fireEvent +from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode +from couchpotato.core.helpers.rss import RSS +from couchpotato.core.helpers.variable import tryInt +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.nzb.base import NZBProvider + + +log = CPLog(__name__) + + +class Base(NZBProvider, RSS): + + urls = { + 'search': 'https://api.omgwtfnzbs.me/json/?%s', + } + + http_time_between_calls = 1 # Seconds + + cat_ids = [ + ([15], ['dvdrip', 'scr', 'r5', 'tc', 'ts', 'cam']), + ([15, 16], ['brrip']), + ([16], ['720p', '1080p', 'bd50']), + ([17], ['dvdr']), + ] + cat_backup_id = 'movie' + + def _searchOnTitle(self, title, movie, quality, results): + + q = '%s %s' % (title, movie['info']['year']) + params = tryUrlencode({ + 'search': q, + 'catid': ','.join([str(x) for x in self.getCatId(quality)]), + 'user': self.conf('username', default = ''), + 'api': self.conf('api_key', default = ''), + }) + + if len(self.conf('custom_tag')) > 0: + params = '%s&%s' % (params, self.conf('custom_tag')) + + nzbs = self.getJsonData(self.urls['search'] % params) + + if isinstance(nzbs, list): + for nzb in nzbs: + + results.append({ + 'id': nzb.get('nzbid'), + 'name': toUnicode(nzb.get('release')), + 'age': self.calculateAge(tryInt(nzb.get('usenetage'))), + 'size': tryInt(nzb.get('sizebytes')) / 1024 / 1024, + 'url': nzb.get('getnzb'), + 'detail_url': nzb.get('details'), + 'description': nzb.get('weblink') + }) + + +config = [{ + 'name': 'omgwtfnzbs', + 'groups': [ + { + 'tab': 'searcher', + 'list': 'nzb_providers', + 'name': 'OMGWTFNZBs', + 'description': 'See OMGWTFNZBs', + 'wizard': True, + 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQEAIAAADAAbR1AAADbElEQVR4AZ2UW0ybZRiAy/OvdHaLYvB0YTRIFi7GkM44zRLmIfNixkWdiRMyYoxRE8/TC7MYvXCGEBmr3mxLwVMwY0wYA7e6Wso4lB6h/U9taSlMGIfBXLYlJMyo0S///2dJI5lxN8/F2/f9nu9737e/jYmXr6KTbN9BGG9HE/NotQ76UWziNzrXFiETk/5ARUNH+7+0kW7fSgTl0VKGOLZzidOkmuuIo7q2oTArNLPIzhdIkqXkerFOm2CaD/5bcKrjIL2c3fkhPxOq93Kcb91v46fV9TQKF4TgV/TbUsQtzfCaK6jMOd5DJrguSIIhexmqqVxN0FXbRR8/ND/LYTTj6J7nl2gnL47OkDW4KJhnQHCa6JpKVNJGA3OC58nwBJoZ//ebbIyKpBxjrr0o1q1FMRkrKXZnHWF85VvxMrJxibwhGyd0f5bLnKzqJs1k0Sfo+EU8hdAUvkbcwKEgs2D0OiV4jmmD1zb+Tp6er0JMMvDxPo5xev9zTBF683NS+N56n1YiB95B5crr93KRuKhKI0tb0Kw2mgLLqTjLEWO8424i9IvURaYeOckwf3+/yCC9e3bQQ/MuD+Monk0k+XFXMUfx7z5EEP+XlXi5tLlMxH8zLppw7idJrugcus30kC86gc7UrQqjLIukM8zWHOACeU+TiMxXN6ExVOkgz4lvPEzice1GIVhxhG4CrZvpl6TH55giKWqXGLy9hZh5aUtgDSew/msSyCKpl+DDNfxJc8NBIsxUxUnz14O/oONu+IIIvso9TLBQ1SY5rUhuSzUhAqJ2mRXBLDOCeUtgUZXsaObT8BffhUJPqWgiV+3zKKzYH0ClvTRLhD77HIqVkyh5jThnivehoG+qJctIRSPn6bxvO4FCgTl9c1DmbpjLajbQFE8aW5SU3rg+zOPGUjTUF9NFpLEbH2c/KmGYlY69/GQJVtGMSUcEp9eCbB1nctbxHTLRdTUkGDf+B02uGWRG3OvpJ/zSMwzif+oxVBID3cQKBavLCiPmB2PM2UuSCUPgrX4VDb97AwEG67bh4+KTOlncvu3M31BwA5rLHbCfEjwkNDky9e/SSbSxnD46Pg0RJtpXRvhmBSZHpRjWtKwFybjuQeXaKxto4WjLZZZvVmC17pZLJFkwxm5++PS2Mrwc7nyIMYZe/IzoP5d6QgEybqTXAAAAAElFTkSuQmCC', + 'options': [ + { + 'name': 'enabled', + 'type': 'enabler', + }, + { + 'name': 'username', + 'default': '', + }, + { + 'name': 'api_key', + 'label': 'Api Key', + 'default': '', + }, + { + 'name': 'custom_tag', + 'advanced': True, + 'label': 'Custom tag', + 'default': '', + 'description': 'Add custom parameters, for example add catid=18 to get foreign (non-english) movies', + }, + { + 'name': 'extra_score', + 'advanced': True, + 'label': 'Extra Score', + 'default': 20, + 'type': 'int', + 'description': 'Starting score for each release found via this provider.', + } + ], + }, + ], +}] diff --git a/couchpotato/core/media/_base/providers/torrent/__init__.py b/couchpotato/core/media/_base/providers/torrent/__init__.py new file mode 100644 index 0000000000..12dda708d0 --- /dev/null +++ b/couchpotato/core/media/_base/providers/torrent/__init__.py @@ -0,0 +1,14 @@ +config = [{ + 'name': 'torrent_providers', + 'groups': [ + { + 'label': 'Torrent Providers', + 'description': 'Providers searching torrent sites for new releases', + 'wizard': True, + 'type': 'list', + 'name': 'torrent_providers', + 'tab': 'searcher', + 'options': [], + }, + ], +}] diff --git a/couchpotato/core/media/_base/providers/torrent/abnormal.py b/couchpotato/core/media/_base/providers/torrent/abnormal.py new file mode 100644 index 0000000000..a9c089dff0 --- /dev/null +++ b/couchpotato/core/media/_base/providers/torrent/abnormal.py @@ -0,0 +1,151 @@ +О╩©import traceback +import urlparse +import urllib +import re +import unicodedata + +from datetime import datetime +from bs4 import BeautifulSoup +from couchpotato.core.helpers.encoding import toUnicode, simplifyString, tryUrlencode +from couchpotato.core.helpers.variable import getTitle, tryInt +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.base import TorrentProvider +import re + +log = CPLog(__name__) + + +class Base(TorrentProvider): + urls = { + 'login' : 'https://abnormal.ws/login.php', + 'detail' : 'https://abnormal.ws/torrents.php?id=%s', + 'search' : 'https://abnormal.ws/torrents.php?', + 'home' : 'https://abnormal.ws/%s', + } + + http_time_between_calls = 1 #seconds + + def _search(self, media, quality, results): + + #urllib.urlencode( {'name': getTitle(media['info']) }) + for title in media['info']['titles']: + try: + TitleStringReal = str(title.encode("latin-1").replace('-',' ')) + + url = self.urls['search'] + 'cat[]=MOVIE|DVDR&cat[]=MOVIE|DVDRIP&cat[]=MOVIE|BDRIP&cat[]=MOVIE|VOSTFR&cat[]=MOVIE|HD|720p&cat[]=MOVIE|HD|1080p&cat[]=MOVIE|REMUXBR&cat[]=MOVIE|FULLBR&cat[]=ANIME&' + urllib.urlencode( {'search': unicodedata.normalize('NFKD', title).encode('ASCII', 'ignore').replace('\'', ' ') }) + '&order=Time&way=desc' + + data = self.getHTMLData(url) + + if data: + html = BeautifulSoup(data) + try: + #Get first entry in table + torrentTable = html.find('table', class_ = 'torrent_table cats') + + if torrentTable: + torrents = torrentTable.find_all('tr', class_=None) + torrents += torrentTable.find_all('tr', class_='tablerow-lightgrey') + + for torrentRow in torrents: + + nameCell = torrentRow.find_all('td')[1] + downloadCell = torrentRow.find_all('td')[3] + sizeCell = torrentRow.find_all('td')[4] + seedersCell = torrentRow.find_all('td')[5] + leechersCell = torrentRow.find_all('td')[6] + + name = nameCell.find_all('a')[0].get_text().upper() + + splittedReleaseName = re.split('(\.[0-9]{4}\.)', name, flags=re.IGNORECASE) + + if len(splittedReleaseName) > 1: + cleanedReleaseName = ''.join(splittedReleaseName[0:-2]) + + match = re.compile(ur"[\w]+", re.UNICODE) + nameSplit = ''.join(match.findall(unicodedata.normalize('NFKD', cleanedReleaseName).encode('ASCII','ignore'))) + titleSplit = ''.join(match.findall(unicodedata.normalize('NFKD', title.upper()).encode('ASCII','ignore'))) + + if titleSplit == nameSplit: + downloadUrl = downloadCell.find('a')['href'] + parsed = urlparse.urlparse(downloadUrl) + torrent_id = urlparse.parse_qs(parsed.query)['id'] + + new = {} + new['id'] = torrent_id + new['name'] = name + new['url'] = self.urls['home'] % (downloadUrl) + new['detail_url'] = self.urls['home'] % (nameCell.find('a')['href']) + new['size'] = self.parseSize(sizeCell.get_text()) + new['seeders'] = tryInt(seedersCell.get_text()) + new['leechers'] = tryInt(leechersCell.get_text()) + + results.append(new) + except: + log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc())) + except: + continue + + def getLoginParams(self): + return { + 'username': self.conf('username'), + 'password': self.conf('password'), + 'Login': '', + } + + def loginSuccess(self, output): + return 'logout.php' in output.lower() + + loginCheckSuccess = loginSuccess + + +config = [{ + 'name': 'abnormal', + 'groups': [ + { + 'tab': 'searcher', + 'list': 'torrent_providers', + 'name': 'abnormal', + 'description': 'See Abnormal', + 'wizard': True, + 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAABzElEQVR4nJWQW08TURSF97nMzJlbO6XtFNuCKQzRcjFFSkCixkSCCSa++Qv9E0QTgw8+mGhqFFQQCS2lthRb6AxzKzPjgyYKosJ623vl2ytrA1xS5M8VQqh066GeM1p7WxcC8oWJq2MlgcnH5qFtdc+4+OwBQq/duI0xAYTyI1MA6D9A4XpZUjSEUBQGABAfGPwXQDmhWLoLACd9/9jsBH2vfOcRJvSvHabK99V46qBVc2xLFJXh0SLGmBDablbPSWCSakzMrVdWP1RW4wmdSbLn2kk9v7D4mDH5nITJ8uKJ76+9fuY6lqQm0pkhbSDDM1FgMiJkd3vtVIIox1J6buP9yzAMAeDrzkeeyYBQFEVRFE7PPZAU7RRwc2GZEH6/sf1j/NauN+tbURiGQeB7ruOYkzP3fgExTadUfLHy5PcHrL95bpldq9fxPZsSWppfUuPpnx0SqeFety2pWjprZHKGnh3VsyO7X94NFYpj47NKLEk5ARDieGFn8y0WmMpRHnOCHEsxUUkkB43izPT8EgBUXj3FmCKMEAACMMZnteQVwnMsCMO+7/qOyQQBYeTZ5sF+ba/6ybGOXMfqdVqN+majutGsfT46bMNl9R01bKCKBrRO8wAAAABJRU5ErkJggg==', + 'options': [ + { + 'name': 'enabled', + 'type': 'enabler', + 'default': False, + }, + { + 'name': 'username', + 'default': '', + }, + { + 'name': 'password', + 'default': '', + 'type': 'password', + }, + { + 'name': 'seed_ratio', + 'label': 'Seed ratio', + 'type': 'float', + 'default': 1, + 'description': 'Will not be (re)moved until this seed ratio is met.', + }, + { + 'name': 'seed_time', + 'label': 'Seed time', + 'type': 'int', + 'default': 40, + 'description': 'Will not be (re)moved until this seed time (in hours) is met.', + }, + { + 'name': 'extra_score', + 'advanced': True, + 'label': 'Extra Score', + 'type': 'int', + 'default': 20, + 'description': 'Starting score for each release found via this provider.', + } + ], + }, + ], +}] \ No newline at end of file diff --git a/couchpotato/core/media/_base/providers/torrent/addict.py b/couchpotato/core/media/_base/providers/torrent/addict.py new file mode 100644 index 0000000000..7f373d1f4d --- /dev/null +++ b/couchpotato/core/media/_base/providers/torrent/addict.py @@ -0,0 +1,256 @@ +from bs4 import BeautifulSoup +from couchpotato.core.helpers.variable import tryInt +from couchpotato.core.logger import CPLog +from couchpotato.core.helpers.encoding import simplifyString +from couchpotato.core.media._base.providers.torrent.base import TorrentProvider +from couchpotato.core.helpers import namer_check +import cookielib +import re +import urllib2 +import urllib +from StringIO import StringIO +import gzip +import time +import datetime +log = CPLog(__name__) + + +class Base(TorrentProvider): + + urls = { + 'test': 'https://addict-to.net/', + 'detail': 'https://addict-to.net/index.php?page=torrent-details&id=%s', + 'search': 'https://addict-to.net/index.php?page=torrents&', + } + + http_time_between_calls = 1 #seconds + cat_backup_id = None + cj = cookielib.CookieJar() + opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj)) + + class NotLoggedInHTTPError(urllib2.HTTPError): + def __init__(self, url, code, msg, headers, fp): + urllib2.HTTPError.__init__(self, url, code, msg, headers, fp) + + class PTPHTTPRedirectHandler(urllib2.HTTPRedirectHandler): + def http_error_302(self, req, fp, code, msg, headers): + log.debug("302 detected; redirected to %s" % headers['Location']) + if (headers['Location'] != 'login.php'): + return urllib2.HTTPRedirectHandler.http_error_302(self, req, fp, code, msg, headers) + else: + raise Base.NotLoggedInHTTPError(req.get_full_url(), code, msg, headers, fp) + + def getSearchParams(self, movie, quality): + results = [] + MovieTitles = movie['info']['titles'] + moviequality = simplifyString(quality['identifier']) + moviegenre = movie['info']['genres'] + if quality['custom']['3d']==1: + category=13 + elif 'Animation' in moviegenre: + category=25 + elif 'Documentaire' in moviegenre or 'Documentary' in moviegenre: + category=48 + else: + + if moviequality in ['720p']: + category=15 + elif moviequality in ['1080p']: + category=14 + elif moviequality in ['dvd-r']: + category=11 + elif moviequality in ['br-disk']: + category=49 + elif moviequality in ['bdrip']: + category=17 + elif moviequality in ['brrip']: + category=18 + else: + category=16 + + + for MovieTitle in MovieTitles: + try: + TitleStringReal = str(MovieTitle.encode("latin-1").replace('-',' ')) + except: + continue + try: + results.append(urllib.urlencode( {'search': TitleStringReal, 'category' : category, 'page' : 'torrents', 'options' : 0, 'active' : 0})) + except: + continue + + return results + + def _search(self, movie, quality, results): + + # Cookie login + if not self.last_login_check and not self.login(): + return + searchStrings= self.getSearchParams(movie,quality) + lastsearch=0 + for searchString in searchStrings: + actualtime=int(time.time()) + if actualtime-lastsearch<10: + timetosleep= 10-(actualtime-lastsearch) + time.sleep(timetosleep) + URL = self.urls['search']+searchString + r = self.opener.open(URL) + soupfull = BeautifulSoup(r) + #hack to avoid dummy parsing css and else + delbegin=str(soupfull.prettify).split('')[1] + restable=delbegin[delbegin.find('')+6] + soup=BeautifulSoup(restable) + resultsTable = soup.find("table") + if resultsTable: + + rows = resultsTable.findAll("tr") + x=0 + for row in rows: + x=x+1 + if (x > 1): + #bypass first row because title only + #bypass date lines + if 'Liste des torrents' in str(row) : + continue + link = row.findAll('td')[1].find("a", href=re.compile("torrent-details")) + if link: + new={} + title = link.text + testname=namer_check.correctName(title,movie) + if testname==0: + continue + downloadURL = self.urls['test'] + "/" + row.find("a",href=re.compile("\.torrent"))['href'] + size= row.findAll('td')[9].text + leecher=row.findAll('td')[7].text + seeder=row.findAll('td')[6].text + date=row.findAll('td')[5].text + detail=self.urls['test'] + "/" + row.find("a",href=re.compile("torrent-details"))['href'] + + def extra_check(item): + return True + + new['id'] = detail[detail.rfind('=')+1:] + new['name'] = title + new['url'] = downloadURL + new['detail_url'] = detail + new['size'] = self.parseSize(size) + new['age'] = self.ageToDays(date) + new['seeders'] = tryInt(seeder) + new['leechers'] = tryInt(leecher) + new['extra_check'] = extra_check + new['download'] = self.download + + results.append(new) + + def ageToDays(self, age_str): + try: + from_dt = datetime.datetime.strptime(age_str[9:11]+'-'+age_str[12:14]+'-'+age_str[15:], "%d-%m-%Y") + except: + from_dt = datetime.datetime.strptime(age_str[9:11]+'-'+age_str[12:14]+'-'+age_str[15:], "%m-%d-%Y") + try: + to_dt = datetime.datetime.strptime(time.strftime("%x"), "%d/%m/%Y") + except: + to_dt = datetime.datetime.strptime(time.strftime("%x"), "%m/%d/%Y") + timedelta = to_dt - from_dt + diff_day = timedelta.days + return tryInt(diff_day) + + def login(self): + + self. opener.addheaders = [ + ('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko)'), + ('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'), + ('Accept-Language', 'fr-fr,fr;q=0.5'), + ('Accept-Charset', 'ISO-8859-1,utf-8;q=0.7,*;q=0.7'), + ('Keep-Alive', '115'), + ('Connection', 'keep-alive'), + ('Cache-Control', 'max-age=0'), + ] + + data = urllib.urlencode({'uid': self.conf('username'), 'pwd' : self.conf('password'), 'submit' : 'Envoyer'}) + + + r = self.opener.open('https://addict-to.net/index.php?page=login',data) + + for index, cookie in enumerate(self.cj): + if (cookie.name == "xbtitFM"): login_done = True + + if not login_done: + log.error('Login to Addict failed') + return False + + if login_done: + log.debug('Login HTTP Addict status 200; seems successful') + self.last_login_check = self.opener + return True + + def download(self, url = '', nzb_id = ''): + if not self.last_login_check and not self.login(): + return + try: + request = urllib2.Request(url) + + response = self.last_login_check.open(request) + # unzip if needed + if response.info().get('Content-Encoding') == 'gzip': + buf = StringIO(response.read()) + f = gzip.GzipFile(fileobj = buf) + data = f.read() + f.close() + else: + data = response.read() + response.close() + return data + except: + return 'try_next' +config = [{ + 'name': 'addict', + 'groups': [ + { + 'tab': 'searcher', + 'list': 'torrent_providers', + 'name': 'addict', + 'description': 'See Addict', + 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABHNCSVQICAgIfAhkiAAAArZJREFUOI2NkktPE2EUht/5vmlH2oyIBAeKICUIMfUCUtuSSDTGaDckbkzcmLgx0Y0iCxe60sSVxhh/gDsNmhBjwMBCkwaiAblIQqhWqIptp1OmU3qZdjq003FHUEF9lue859mcF9gaxuVyXXW73Re32W9Atxr237pzOxkN+/Rypb5eENoSicTkfwvGfpjXNKbmPtHk1mJGiSlraWtLS0tnPB6f+Kfg6YJ5Y3HqyyOWqwW111rUyHSdWcGatJqscjpb2iVJer+tIPDNvDodmH1c0dehpRUsLwSwz9NnI3p6j7omfs5k822CINQqijLzh6D/2u2BH3HmMWNQ5FMSPs0Oo91zFk0dPbDV7a3SUyttSjz6zjDRy3GcXVXVeQAVAKBer/dSIhE+QXRp/7pO2ZXlKbR7/di1uxm5pAS+xgG9lOfKhURXQoyMgqEejuN2apr2EYBJ7Xb7saJe4kvrhVVD+y7s6ThZ5WjqRDYpgiUWBCdHoJcL8J27QuWvi95ENBwg1NJqtVobXC7XPFUUZV4QhC5FSZUJIWlqZOsYUm3bwe5E6OMYtHIGnjOXwVpqUO88gtxquEuOLi0aJtktiiIoAFOW5YnGxkZfLCYSTU0ulwtiay6b2wEOcJ+6BC2TgqEXQVkO+eIaIcTskKXYXLFYHNn4gizLAYfD0anmtaZMShpnWbX74PELlClRlAt5qGkFHwKDONzbB1tt3dD021d3AYR/6UEqlRrneb7BBOlZjUdH02LIx1c3A2UGc5MvcdDjR+zr5+fPHvYPAIhs2US/3z8TCoWqWQvXLUuRN2p6pTubSZMDR0+b4rfgi6Ent24CiG5b5WAwaGqaNme1WgXKWpxKMjLPstjHENvr4cF7A5uPAYD5XbAJwvP8dcOodJRKRaZUMh4AWPpLfksYSul5AIe2C/wE9XA/rBqvYMsAAAAASUVORK5CYII=', + 'wizard': True, + 'options': [ + { + 'name': 'enabled', + 'type': 'enabler', + 'default': False, + }, + { + 'name': 'username', + 'default': '', + }, + { + 'name': 'password', + 'default': '', + 'type': 'password', + }, + { + 'name': 'seed_ratio', + 'label': 'Seed ratio', + 'type': 'float', + 'default': 1, + 'description': 'Will not be (re)moved until this seed ratio is met.', + }, + { + 'name': 'seed_time', + 'label': 'Seed time', + 'type': 'int', + 'default': 40, + 'description': 'Will not be (re)moved until this seed time (in hours) is met.', + }, + { + 'name': 'extra_score', + 'advanced': True, + 'label': 'Extra Score', + 'type': 'int', + 'default': 0, + 'description': 'Starting score for each release found via this provider.', + } + ], + }, + ], +}] diff --git a/couchpotato/core/media/_base/providers/torrent/alpharatio.py b/couchpotato/core/media/_base/providers/torrent/alpharatio.py new file mode 100644 index 0000000000..96d91dedf6 --- /dev/null +++ b/couchpotato/core/media/_base/providers/torrent/alpharatio.py @@ -0,0 +1,134 @@ +import traceback + +from bs4 import BeautifulSoup +from couchpotato.core.helpers.variable import tryInt +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.base import TorrentProvider +import six + + +log = CPLog(__name__) + + +class Base(TorrentProvider): + + urls = { + 'test': 'https://alpharatio.cc/', + 'login': 'https://alpharatio.cc/login.php', + 'login_check': 'https://alpharatio.cc/inbox.php', + 'detail': 'https://alpharatio.cc/torrents.php?torrentid=%s', + 'search': 'https://alpharatio.cc/torrents.php?action=advanced&searchstr=%s&scene=%s&filter_cat[%d]=1', + 'download': 'https://alpharatio.cc/%s', + } + + http_time_between_calls = 1 # Seconds + login_fail_msg = ' attempts remaining.' + + def _search(self, media, quality, results): + + url = self.urls['search'] % self.buildUrl(media, quality) + cleaned_url = url.replace('%3A', '') + data = self.getHTMLData(cleaned_url) + + if data: + html = BeautifulSoup(data) + + try: + result_table = html.find('table', attrs = {'id': 'torrent_table'}) + if not result_table: + return + + entries = result_table.find_all('tr', attrs = {'class': 'torrent'}) + for result in entries: + + link = result.find('a', attrs = {'dir': 'ltr'}) + url = result.find('a', attrs = {'title': 'Download'}) + tds = result.find_all('td') + + results.append({ + 'id': link['href'].replace('torrents.php?id=', '').split('&')[0], + 'name': link.contents[0], + 'url': self.urls['download'] % url['href'], + 'detail_url': self.urls['download'] % link['href'], + 'size': self.parseSize(tds[len(tds)-4].string), + 'seeders': tryInt(tds[len(tds)-2].string), + 'leechers': tryInt(tds[len(tds)-1].string), + }) + except: + log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc())) + + + def getLoginParams(self): + return { + 'username': self.conf('username'), + 'password': self.conf('password'), + 'keeplogged': '1', + 'login': 'Login', + } + + def loginSuccess(self, output): + return 'logout.php' in output.lower() + + loginCheckSuccess = loginSuccess + + def getSceneOnly(self): + return '1' if self.conf('scene_only') else '' + + +config = [{ + 'name': 'alpharatio', + 'groups': [ + { + 'tab': 'searcher', + 'list': 'torrent_providers', + 'name': 'AlphaRatio', + 'description': 'AlphaRatio', + 'wizard': True, + 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACX0lEQVQ4jbWTX0hTURzHv+fu3umdV9GtOZ3pcllGBomJ9RCmkiWIEJUQET2EMqF86aFeegqLHgoio1ICScoieugPiBlFFmpROUjNIub+NKeba2rqvdvuPKeXDIcsgugHB378fj8+X37fcw5hjOFfgvtTc8o7mdveHWv0+YJ5iWb45SQWi2kc7olCnteoHCGUMqbpejBkO99rPDlW5rjV3FjZkmXU+3SiKK8EkOUVxj2+9bZOe8ebhZxSRTCIQmAES1oLQADKp4EIc8gRFr3t+/SNe0oLelatYM0zO56dqS3fmh4eXkoxIrWvAwXegLta8bymYyak9lyGR7d57eHHtOt7aNaQ0AORU8OEqlg0HURTnXi96cCaK0AYEW0l+MAoQoIp48PHke0JAYwyBkYhameUQ3vz7lTt3NRdKH0ajxgqQMJzAMdBkRVdYgAAEA71G2Z6MnOyvSmSJB/bFblN5DHEsosghf3zZduK+1fdQhyEcKitr+r0B2dMAyPOcmd02oxiC2jUjJaSwbPZpoLJhAA1Ci3hGURRlO0Of8nN9/MNUUXSkrQsFQ4meNORG6/G2O/jGXdZ044OKzg3z3r77TUre81tL1pxirLMWnsoMB00LtfjPLh67/OJH3xRMgiHb96JOCVbxbobRONBQNqScffJ6JE4E2VZFvv6BirbXpkboGcA4eGaDOV73G4LAFBKSWRhNsmqfnHCosG159Lxt++GdgC/XuLD3sH60/fdFxjJBNMDAAVZ8CNfVJxPLzbs/uqa2Lj/0stHkWSDFlwS4FIhRKei3a3VNeS//sa/iZ/B6hMIr7Fq4QAAAABJRU5ErkJggg==', + 'options': [ + { + 'name': 'enabled', + 'type': 'enabler', + 'default': False, + }, + { + 'name': 'username', + 'default': '', + }, + { + 'name': 'password', + 'default': '', + 'type': 'password', + }, + { + 'name': 'seed_ratio', + 'label': 'Seed ratio', + 'type': 'float', + 'default': 1, + 'description': 'Will not be (re)moved until this seed ratio is met.', + }, + { + 'name': 'seed_time', + 'label': 'Seed time', + 'type': 'int', + 'default': 40, + 'description': 'Will not be (re)moved until this seed time (in hours) is met.', + }, + { + 'name': 'scene_only', + 'type': 'bool', + 'default': False, + 'description': 'Only allow scene releases.' + }, + { + 'name': 'extra_score', + 'advanced': True, + 'label': 'Extra Score', + 'type': 'int', + 'default': 0, + 'description': 'Starting score for each release found via this provider.', + } + ], + }, + ], +}] diff --git a/couchpotato/core/media/_base/providers/torrent/awesomehd.py b/couchpotato/core/media/_base/providers/torrent/awesomehd.py new file mode 100644 index 0000000000..5a24f517b5 --- /dev/null +++ b/couchpotato/core/media/_base/providers/torrent/awesomehd.py @@ -0,0 +1,154 @@ +import re +import traceback + +from bs4 import BeautifulSoup +from couchpotato.core.helpers.variable import tryInt, getIdentifier +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.base import TorrentProvider + + +log = CPLog(__name__) + + +class Base(TorrentProvider): + + urls = { + 'test': 'https://awesome-hd.me/', + 'detail': 'https://awesome-hd.me/torrents.php?torrentid=%s', + 'search': 'https://awesome-hd.me/searchapi.php?action=imdbsearch&passkey=%s&imdb=%s&internal=%s', + 'download': 'https://awesome-hd.me/torrents.php?action=download&id=%s&authkey=%s&torrent_pass=%s', + } + http_time_between_calls = 1 + login_fail_msg = 'Please check that you provided a valid API Key, username, and action.' + + def _search(self, movie, quality, results): + + data = self.getHTMLData(self.urls['search'] % (self.conf('passkey'), getIdentifier(movie), self.conf('only_internal'))) + + if data: + if self.login_fail_msg in data: + self.disableAccount() + return + + try: + soup = BeautifulSoup(data) + + if soup.find('error'): + log.info(soup.find('error').get_text()) + return + + authkey = soup.find('authkey').get_text() + entries = soup.find_all('torrent') + + for entry in entries: + + torrentscore = 0 + torrent_id = entry.find('id').get_text() + name = entry.find('name').get_text() + year = entry.find('year').get_text() + releasegroup = entry.find('releasegroup').get_text() + resolution = entry.find('resolution').get_text() + encoding = entry.find('encoding').get_text() + freeleech = entry.find('freeleech').get_text() + media = entry.find('media').get_text() + audioformat = entry.find('audioformat').get_text() + + # skip audio channel only releases + if resolution == '': + continue + + torrent_desc = '%s.%s.%s.%s-%s' % (resolution, media, audioformat, encoding, releasegroup) + + if self.conf('prefer_internal') and freeleech in ['0.25', '0.50']: + torrentscore += 200 + + if encoding == 'x264' and self.conf('favor') in ['encode', 'both']: + torrentscore += 200 + elif re.search('Remux', encoding) and self.conf('favor') in ['remux', 'both']: + torrentscore += 200 + + name = re.sub(r'\W', '.', name) + name = re.sub(r'\.+', '.', name) + results.append({ + 'id': torrent_id, + 'name': '%s.%s.%s' % (name, year, torrent_desc), + 'url': self.urls['download'] % (torrent_id, authkey, self.conf('passkey')), + 'detail_url': self.urls['detail'] % torrent_id, + 'size': tryInt(entry.find('size').get_text()) / 1048576, + 'seeders': tryInt(entry.find('seeders').get_text()), + 'leechers': tryInt(entry.find('leechers').get_text()), + 'score': torrentscore + }) + + except: + log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc())) + + +config = [{ + 'name': 'awesomehd', + 'groups': [ + { + 'tab': 'searcher', + 'list': 'torrent_providers', + 'name': 'Awesome-HD', + 'description': 'AHD', + 'wizard': True, + 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAC+UlEQVR4AV1SO0y6dxQ9H4g8CoIoohZ5NA0aR2UgkYpNB5uocTSaLlrDblMH09Gt8d90r3YpJkanxjA4GGkbO7RNxSABq8jDGnkpD+UD5NV7Bxvbk9wvv+/3uPece66A/yEWi42FQqHVfD7/cbPZtIEglUpjOp3uZHR0dBvAn3gDIRqNgjE4OKj0+Xzf3NzcfD4wMCCjf5TLZbTbbajVatzf3+Pu7q5uNpt35ufnvwBQAScQRREEldfr9RWLxan+/n5YrVa+jFarhVfQQyQSCU4EhULhX15engEgSrjC0dHRVqlUmjQYDBgaGgKtuTqz4mTgIoVCASaTCX19fajVapOHh4dbFJBks9mxcDi8qtFoJEajkfVyJWi1WkxMTMDhcIAT8x6D7/Dd6+vr1fHx8TGp2+3+iqo5+YCzBwIBToK5ubl/mQwPDyMSibAs2Gw2UHNRrValz8/PDUk8Hv9EqVRCr9fj4uICTNflcqFer+Pg4AB7e3uoVCq8x9Rxfn6O7u5uqFQq8FspZXxHTekggByA3W4Hr9PpNDeRL3I1cMhkMrBrnZ2dyGQyvNYIs7OzVbJNPjIyAraLwYdcjR8wXl5eIJfLwRIFQQDLYkm3t7c1CdGPPT4+cpOImp4PODMeaK+n10As2jBbrHifHOjS6qAguVFimkqlwAMmIQnHV1dX4NDQhVwuhyZTV6pgIktzDzkkk0lEwhEEzs7ASQr5Ai4vL1nuccfCwsLO/v6+p9FoyJhF6ekJro/cPCzIZLNQa7rQoK77/SdgWWpKkCaJ5EB9aWnpe6nH40nRMBnJV4f5gw+FX3/5GX/8/htXRZdOzzqhJWn6nl6YbTZqqhrhULD16fT0d8FgcFtYW1vD5uamfGVl5cd4IjldKhZACdkJvKfWUANrxEaJV4hiGVaL1b+7653hXzwRZQr2X76xsfG1xWIRaZzbNPv/CdrjEL9cX/+WXFBSgEPgzxuwG3Yans9OT0+naBZMIJDNfzudzp8WFxd/APAX3uAf9WOTxOPLdosAAAAASUVORK5CYII=', + 'options': [ + { + 'name': 'enabled', + 'type': 'enabler', + 'default': False, + }, + { + 'name': 'passkey', + 'default': '', + }, + { + 'name': 'seed_ratio', + 'label': 'Seed ratio', + 'type': 'float', + 'default': 1, + 'description': 'Will not be (re)moved until this seed ratio is met.', + }, + { + 'name': 'seed_time', + 'label': 'Seed time', + 'type': 'int', + 'default': 40, + 'description': 'Will not be (re)moved until this seed time (in hours) is met.', + }, + { + 'name': 'only_internal', + 'advanced': True, + 'type': 'bool', + 'default': 1, + 'description': 'Only search for internal releases.' + }, + { + 'name': 'prefer_internal', + 'advanced': True, + 'type': 'bool', + 'default': 1, + 'description': 'Favors internal releases over non-internal releases.' + }, + { + 'name': 'favor', + 'advanced': True, + 'default': 'both', + 'type': 'dropdown', + 'values': [('Encodes & Remuxes', 'both'), ('Encodes', 'encode'), ('Remuxes', 'remux'), ('None', 'none')], + 'description': 'Give extra scoring to encodes or remuxes.' + }, + { + 'name': 'extra_score', + 'advanced': True, + 'type': 'int', + 'default': 20, + 'description': 'Starting score for each release found via this provider.', + }, + ], + }, + ], +}] + diff --git a/couchpotato/core/media/_base/providers/torrent/base.py b/couchpotato/core/media/_base/providers/torrent/base.py new file mode 100644 index 0000000000..9f5f289067 --- /dev/null +++ b/couchpotato/core/media/_base/providers/torrent/base.py @@ -0,0 +1,78 @@ +import time +import traceback + +from couchpotato.core.helpers.variable import getImdb, md5, cleanHost +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.base import YarrProvider +from couchpotato.environment import Env + + +log = CPLog(__name__) + + +class TorrentProvider(YarrProvider): + + protocol = 'torrent' + + proxy_domain = None + proxy_list = [] + + def imdbMatch(self, url, imdbId): + if getImdb(url) == imdbId: + return True + + if url[:4] == 'http': + try: + cache_key = md5(url) + data = self.getCache(cache_key, url) + except IOError: + log.error('Failed to open %s.', url) + return False + + return getImdb(data) == imdbId + + return False + + def getDomain(self, url = ''): + + forced_domain = self.conf('domain') + if forced_domain: + return cleanHost(forced_domain).rstrip('/') + url + + if not self.proxy_domain: + for proxy in self.proxy_list: + + prop_name = 'proxy.%s' % proxy + last_check = float(Env.prop(prop_name, default = 0)) + + if last_check > time.time() - 86400: + continue + + data = '' + try: + data = self.urlopen(proxy, timeout = 3, show_error = False) + except: + log.debug('Failed %s proxy %s: %s', (self.getName(), proxy, traceback.format_exc())) + + if self.correctProxy(data): + log.debug('Using proxy for %s: %s', (self.getName(), proxy)) + self.proxy_domain = proxy + break + + Env.prop(prop_name, time.time()) + + if not self.proxy_domain: + log.error('No %s proxies left, please add one in settings, or let us know which one to add on the forum.', self.getName()) + return None + + return cleanHost(self.proxy_domain).rstrip('/') + url + + def correctProxy(self, data): + return True + + +class TorrentMagnetProvider(TorrentProvider): + + protocol = 'torrent_magnet' + + download = None diff --git a/couchpotato/core/media/_base/providers/torrent/bithdtv.py b/couchpotato/core/media/_base/providers/torrent/bithdtv.py new file mode 100644 index 0000000000..a3eb1d9f02 --- /dev/null +++ b/couchpotato/core/media/_base/providers/torrent/bithdtv.py @@ -0,0 +1,155 @@ +import traceback + +from bs4 import BeautifulSoup +from couchpotato.core.helpers.encoding import toUnicode +from couchpotato.core.helpers.variable import tryInt +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.base import TorrentProvider + + +log = CPLog(__name__) + + +class Base(TorrentProvider): + + urls = { + 'detail': 'https://www.bit-hdtv.com/details.php?id=%s', + 'search': 'https://www.bit-hdtv.com/torrents.php?', + 'download': 'https://www.bit-hdtv.com/download.php?id=%s', + } + + # Searches for movies only - BiT-HDTV's subcategory and resolution search filters appear to be broken + http_time_between_calls = 1 # Seconds + login_fail_msg = 'Username or password incorrect.' + + def _search(self, media, quality, results): + + query = self.buildUrl(media, quality) + + url = "%s&%s" % (self.urls['search'], query) + + data = self.getHTMLData(url, headers = self.getRequestHeaders()) + + if data: + # Remove BiT-HDTV's output garbage so outdated BS4 versions successfully parse the HTML + split_data = data.partition('-->') + if '## SELECT COUNT(' in split_data[0]: + data = split_data[2] + + html = BeautifulSoup(data, 'html.parser') + + try: + result_tables = html.find_all('table', attrs = {'width': '800', 'class': ''}) + if result_tables is None: + return + + # Take first result + result_table = result_tables[0] + + if result_table is None: + return + + entries = result_table.find_all('tr') + for result in entries[1:]: + + cells = result.find_all('td') + link = cells[2].find('a') + torrent_id = link['href'].split('id=')[1] + + results.append({ + 'id': torrent_id, + 'name': link.contents[0].get_text(), + 'url': self.urls['download'] % torrent_id, + 'detail_url': self.urls['detail'] % torrent_id, + 'size': self.parseSize(cells[6].get_text()), + 'seeders': tryInt(cells[8].string), + 'leechers': tryInt(cells[9].string), + 'get_more_info': self.getMoreInfo, + }) + + except: + log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc())) + + def getRequestHeaders(self): + cookies = 'h_sl={};h_sp={};h_su={}'.format(self.conf('cookiesettingsl') or '', self.conf('cookiesettingsp') or '', self.conf('cookiesettingsu') or '') + return { + 'Cookie': cookies + } + + def getMoreInfo(self, item): + full_description = self.getCache('bithdtv.%s' % item['id'], item['detail_url'], cache_timeout = 25920000) + html = BeautifulSoup(full_description) + nfo_pre = html.find('table', attrs = {'class': 'detail'}) + description = toUnicode(nfo_pre.text) if nfo_pre else '' + + item['description'] = description + return item + + def download(self, url = '', nzb_id = ''): + try: + return self.urlopen(url, headers=self.getRequestHeaders()) + except: + log.error('Failed getting release from %s: %s', (self.getName(), traceback.format_exc())) + + return 'try_next' + +config = [{ + 'name': 'bithdtv', + 'groups': [ + { + 'tab': 'searcher', + 'list': 'torrent_providers', + 'name': 'BiT-HDTV', + 'description': 'BiT-HDTV', + 'wizard': True, + 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAAABnRSTlMAAAAAAABupgeRAAABMklEQVR4AZ3Qu0ojcQCF8W9MJcQbJNgEEQUbQVIqWgnaWfkIvoCgggixEAmIhRtY2GV3w7KwU61B0EYIxmiw0YCik84ipaCuc0nmP5dcjIUgOjqDvxf4OAdf9mnMLcUJyPyGSCP+YRdC+Kp8iagJKhuS+InYRhTGgDbeV2uEMand4ZRxizjXHQEimxhraAnUr73BNqQxMiNeV2SwcjTLEVtb4Zl10mXutvOWm2otw5Sxz6TGTbdd6ncuYvVLXAXrvM+ruyBpy1S3JLGDfUQ1O6jn5vTsrJXvqSt4UNfj6vxTRPxBHER5QeSirhLGk/5rWN+ffB1XZuxjnDy1q87m7TS+xOGA+Iv4gfkbaw+nOMXHDHnITGEk0VfRFnn4Po4vNYm6RGukmggR0L08+l+e4HMeASo/i6AJUjLgAAAAAElFTkSuQmCC', + 'options': [ + { + 'name': 'enabled', + 'type': 'enabler', + 'default': False, + }, + { + 'name': 'cookiesettingsl', + 'label': 'Cookies (h_sl)', + 'default': '', + 'description': 'Cookie h_sl from session', + }, + { + 'name': 'cookiesettingsp', + 'label': 'Cookies (h_sp)', + 'default': '', + 'description': 'Cookie h_sp from session', + }, + { + 'name': 'cookiesettingsu', + 'label': 'Cookies (h_su)', + 'default': '', + 'description': 'Cookie h_su from session', + }, + { + 'name': 'seed_ratio', + 'label': 'Seed ratio', + 'type': 'float', + 'default': 1, + 'description': 'Will not be (re)moved until this seed ratio is met.', + }, + { + 'name': 'seed_time', + 'label': 'Seed time', + 'type': 'int', + 'default': 40, + 'description': 'Will not be (re)moved until this seed time (in hours) is met.', + }, + { + 'name': 'extra_score', + 'advanced': True, + 'label': 'Extra Score', + 'type': 'int', + 'default': 20, + 'description': 'Starting score for each release found via this provider.', + } + ], + }, + ], +}] diff --git a/couchpotato/core/media/_base/providers/torrent/bitsoup.py b/couchpotato/core/media/_base/providers/torrent/bitsoup.py new file mode 100644 index 0000000000..3736f107ed --- /dev/null +++ b/couchpotato/core/media/_base/providers/torrent/bitsoup.py @@ -0,0 +1,138 @@ +import traceback + +from bs4 import BeautifulSoup, SoupStrainer +from couchpotato.core.helpers.variable import tryInt +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.base import TorrentProvider + + +log = CPLog(__name__) + + +class Base(TorrentProvider): + + urls = { + 'test': 'https://www.bitsoup.me/', + 'login': 'https://www.bitsoup.me/takelogin.php', + 'login_check': 'https://www.bitsoup.me/my.php', + 'search': 'https://www.bitsoup.me/browse.php?%s', + 'baseurl': 'https://www.bitsoup.me/%s', + } + + http_time_between_calls = 1 # Seconds + login_fail_msg = 'Login failed!' + only_tables_tags = SoupStrainer('table') + + torrent_name_cell = 1 + torrent_download_cell = 2 + + def _searchOnTitle(self, title, movie, quality, results): + + url = self.urls['search'] % self.buildUrl(title, movie, quality) + data = self.getHTMLData(url) + + if data: + html = BeautifulSoup(data, 'html.parser', parse_only = self.only_tables_tags) + + try: + result_table = html.find('table', attrs = {'class': 'koptekst'}) + if not result_table or 'nothing found!' in data.lower(): + return + + entries = result_table.find_all('tr') + for result in entries[1:]: + + all_cells = result.find_all('td') + + torrent = all_cells[self.torrent_name_cell].find('a') + download = all_cells[self.torrent_download_cell].find('a') + + torrent_id = torrent['href'] + torrent_id = torrent_id.replace('details.php?id=', '') + torrent_id = torrent_id.replace('&hit=1', '') + + torrent_name = torrent.getText() + + torrent_size = self.parseSize(all_cells[8].getText()) + torrent_seeders = tryInt(all_cells[10].getText()) + torrent_leechers = tryInt(all_cells[11].getText()) + torrent_url = self.urls['baseurl'] % download['href'] + torrent_detail_url = self.urls['baseurl'] % torrent['href'] + + results.append({ + 'id': torrent_id, + 'name': torrent_name, + 'size': torrent_size, + 'seeders': torrent_seeders, + 'leechers': torrent_leechers, + 'url': torrent_url, + 'detail_url': torrent_detail_url, + }) + + except: + log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc())) + + def getLoginParams(self): + return { + 'username': self.conf('username'), + 'password': self.conf('password'), + 'ssl': 'yes', + } + + def loginSuccess(self, output): + return 'logout.php' in output.lower() + + loginCheckSuccess = loginSuccess + + +config = [{ + 'name': 'bitsoup', + 'groups': [ + { + 'tab': 'searcher', + 'list': 'torrent_providers', + 'name': 'Bitsoup', + 'description': 'Bitsoup', + 'wizard': True, + 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAAB8ElEQVR4AbWSS2sTURiGz3euk0mswaE37HhNhIrajQheFgF3rgR/lAt/gOBCXNZlo6AbqfUWRVCxi04wqUnTRibpJLaJzdzOOZ6WUumyC5/VHOb9eN/FA91uFx0FjI4IPfgiGLTWH73tn348GKmN7ijD0d2b41fO5qJEaX24AWNIUrVQCTTJ3Llx6vbV6Vtzk7Gi9+ebi996guFDDYAQAVj4FExP5qdOZB49W62t/zH3hECcwsPnbWeMXz6Xi2K1f0ApeK3hMCHHbP5gvvoriBgFAAQJEAxhjJ4u+YWTNsVI6b1JgtPWZkoIefKy4fcii2OTw2BABs7wj3bYDlLL4rvjGWOdTser1j5Xf7c3Q/MbHQYApxItvnm31mhQQ71eX2vUB76/vsWB2hg0QuogrMwLIG8P3InM2/eVGXeDViqVwWB79vRU2lgJYmdHcgXCTAXQFJTN5HguvDCR2Hxsxe8EvT54nlcul5vNpqDIEgwRQanAhAAABgRIyiQcjpIkkTOuWyqVoN/vSylX67XXH74uV1vHRUyxxFqbLBCSmBpiXSq6xcL5QrGYzWZ3XQIAwdlOJB+/aL764ucdmncYs0WsCI7kvTnn+qyDMEnTVCn1Tz5KsBFg6fvWcmsUAcnYNC/g2hnromvvqbHvxv+39S+MX+bWkFXwAgAAAABJRU5ErkJggg==', + 'options': [ + { + 'name': 'enabled', + 'type': 'enabler', + 'default': False, + }, + { + 'name': 'username', + 'default': '', + }, + { + 'name': 'password', + 'default': '', + 'type': 'password', + }, + { + 'name': 'seed_ratio', + 'label': 'Seed ratio', + 'type': 'float', + 'default': 1, + 'description': 'Will not be (re)moved until this seed ratio is met.', + }, + { + 'name': 'seed_time', + 'label': 'Seed time', + 'type': 'int', + 'default': 40, + 'description': 'Will not be (re)moved until this seed time (in hours) is met.', + }, + { + 'name': 'extra_score', + 'advanced': True, + 'label': 'Extra Score', + 'type': 'int', + 'default': 20, + 'description': 'Starting score for each release found via this provider.', + } + ], + }, + ], +}] diff --git a/couchpotato/core/media/_base/providers/torrent/cpasbien.py b/couchpotato/core/media/_base/providers/torrent/cpasbien.py new file mode 100644 index 0000000000..104ff708e6 --- /dev/null +++ b/couchpotato/core/media/_base/providers/torrent/cpasbien.py @@ -0,0 +1,261 @@ +from bs4 import BeautifulSoup +from couchpotato.core.helpers.variable import getTitle, tryInt +from couchpotato.core.logger import CPLog +from couchpotato.core.helpers.encoding import simplifyString, tryUrlencode +from couchpotato.core.media._base.providers.torrent.base import TorrentProvider +import cookielib +import re +import traceback +import urllib +import urllib2 +import unicodedata +from couchpotato.core.helpers import namer_check + +log = CPLog(__name__) + + +class Base(TorrentProvider): + + urls = { + 'test': 'http://www.cpasbien.ch/', + 'search': 'http://www.cpasbien.ch/recherche/', + } + + http_time_between_calls = 1 #seconds + cat_backup_id = None + + class NotLoggedInHTTPError(urllib2.HTTPError): + def __init__(self, url, code, msg, headers, fp): + urllib2.HTTPError.__init__(self, url, code, msg, headers, fp) + + class PTPHTTPRedirectHandler(urllib2.HTTPRedirectHandler): + def http_error_302(self, req, fp, code, msg, headers): + log.debug("302 detected; redirected to %s" % headers['Location']) + if (headers['Location'] != 'login.php'): + return urllib2.HTTPRedirectHandler.http_error_302(self, req, fp, code, msg, headers) + else: + raise Base.NotLoggedInHTTPError(req.get_full_url(), code, msg, headers, fp) + + def _search(self, movie, quality, results): + + # Cookie login + if not self.last_login_check and not self.login(): + return + + + TitleStringReal = (getTitle(movie['info']) + ' ' + simplifyString(quality['identifier'] )).replace('-',' ').replace(' ',' ').replace(' ',' ').replace(' ',' ').encode("utf8") + + URL = (self.urls['search']).encode('UTF8') + URL=unicodedata.normalize('NFD',unicode(URL,"utf8","replace")) + URL=URL.encode('ascii','ignore') + URL = urllib2.quote(URL.encode('utf8'), ":/?=") + + values = { + 'champ_recherche' : TitleStringReal + } + + data_tmp = urllib.urlencode(values) + req = urllib2.Request(URL, data_tmp, headers={'User-Agent' : "Mozilla/5.0"} ) + + data = urllib2.urlopen(req ) + + id = 1000 + + if data: + + try: + html = BeautifulSoup(data) + lin=0 + erlin=0 + resultdiv=[] + while erlin==0: + try: + classlin='ligne'+str(lin) + resultlin=html.findAll(attrs = {'class' : [classlin]}) + if resultlin: + for ele in resultlin: + resultdiv.append(ele) + lin+=1 + else: + erlin=1 + except: + erlin=1 + for result in resultdiv: + + try: + + new = {} + name = result.findAll(attrs = {'class' : ["titre"]})[0].text + testname=namer_check.correctName(name,movie) + if testname==0: + continue + detail_url = result.find("a")['href'] + tmp = detail_url.split('/')[-1].replace('.html','.torrent') + url_download = ('http://www.cpasbien.cm/telechargement/%s' % tmp) + size = result.findAll(attrs = {'class' : ["poid"]})[0].text + seeder = result.findAll(attrs = {'class' : ["seed_ok"]})[0].text + leecher = result.findAll(attrs = {'class' : ["down"]})[0].text + age = '1' + + verify = getTitle(movie['info']).split(' ') + + add = 1 + + for verify_unit in verify: + if (name.lower().find(verify_unit.lower()) == -1) : + add = 0 + + def extra_check(item): + return True + + if add == 1: + + new['id'] = id + new['name'] = name.strip() + new['url'] = url_download + new['detail_url'] = detail_url + + new['size'] = self.parseSize(size) + new['age'] = self.ageToDays(age) + new['seeders'] = tryInt(seeder) + new['leechers'] = tryInt(leecher) + new['extra_check'] = extra_check + new['download'] = self.loginDownload + + #new['score'] = fireEvent('score.calculate', new, movie, single = True) + + #log.error('score') + #log.error(new['score']) + + results.append(new) + + id = id+1 + + except: + log.error('Failed parsing cPASbien: %s', traceback.format_exc()) + + except AttributeError: + log.debug('No search results found.') + else: + log.debug('No search results found.') + + def ageToDays(self, age_str): + age = 0 + age_str = age_str.replace(' ', ' ') + + regex = '(\d*.?\d+).(sec|heure|jour|semaine|mois|ans)+' + matches = re.findall(regex, age_str) + for match in matches: + nr, size = match + mult = 1 + if size == 'semaine': + mult = 7 + elif size == 'mois': + mult = 30.5 + elif size == 'ans': + mult = 365 + + age += tryInt(nr) * mult + + return tryInt(age) + + def login(self): + + cookieprocessor = urllib2.HTTPCookieProcessor(cookielib.CookieJar()) + opener = urllib2.build_opener(cookieprocessor, Base.PTPHTTPRedirectHandler()) + opener.addheaders = [ + ('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko)'), + ('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'), + ('Accept-Language', 'fr-fr,fr;q=0.5'), + ('Accept-Charset', 'ISO-8859-1,utf-8;q=0.7,*;q=0.7'), + ('Keep-Alive', '115'), + ('Connection', 'keep-alive'), + ('Cache-Control', 'max-age=0'), + ] + + try: + response = opener.open('http://www.cpasbien.cm', tryUrlencode({'url': '/'})) + except urllib2.URLError as e: + log.error('Login to cPASbien failed: %s' % e) + return False + + if response.getcode() == 200: + log.debug('Login HTTP cPASbien status 200; seems successful') + self.last_login_check = opener + return True + else: + log.error('Login to cPASbien failed: returned code %d' % response.getcode()) + return False + + + def loginDownload(self, url = '', nzb_id = ''): + values = { + 'url' : '/' + } + data_tmp = urllib.urlencode(values) + req = urllib2.Request(url, data_tmp, headers={'User-Agent' : "Mozilla/5.0"} ) + + try: + if not self.last_login_check and not self.login(): + log.error('Failed downloading from %s', self.getName()) + return urllib2.urlopen(req).read() + except: + log.error('Failed downloading from %s: %s', (self.getName(), traceback.format_exc())) + + def download(self, url = '', nzb_id = ''): + + if not self.last_login_check and not self.login(): + return + + values = { + 'url' : '/' + } + data_tmp = urllib.urlencode(values) + req = urllib2.Request(url, data_tmp, headers={'User-Agent' : "Mozilla/5.0"} ) + + try: + return urllib2.urlopen(req).read() + except: + log.error('Failed downloading from %s: %s', (self.getName(), traceback.format_exc())) +config = [{ + 'name': 'cpasbien', + 'groups': [ + { + 'tab': 'searcher', + 'list': 'torrent_providers', + 'name': 'cpasbien', + 'description': 'See cPASbien', + 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABHNCSVQICAgIfAhkiAAAAgZJREFUOI2lkj9oE2EYxn93l/Quf440gXg4lBoEMd2MDuLSkk0R6hCnuqjUoR0c7FDo4Ca0CDo7uRRBqEMDXSLUUqRDiZM1NMEI1VKTlDZpUppccvc5nJp/KooPfMPH+z3P+zzv+8F/Quq8XIVEEOY0kASIzpoLlBKUV+CuCblfCjyF/P3V1Qi6jrCs7k4eD/X1dS5NTy9tQaJD2MFDkA23W8UwQFGQRJcB0DS0cBg/DPY4a0OVZcHeHihKf1ifD6pVfGD/VmBAUeDwEGQZLAskCVQV6nVYW+M4lSLQo9stoKpQLoNtO2QhYHsbkkmOczm+AP5eBy/BfwRDn8GHJLkpFp3utRpkMpDLwckJvlCIM9Uqg6YZeAAj58E1CVlXCaaigcCjsWhU8Xq9UCo5lisVx4FhODFkGbdpMtlqXa4IsVUHYkLcVlbg3ddGo3AzErl2emLCGaCmwcAAuL4ntCxoNpFsG8O2odlkXojF17CgAK2PsJna2Xk/ViyOh0dHXWhaewaW1T6mSb5a5V6rtbAMU4D5c18FyCzu7i5fyWZvDMfjOh4PNBpd5A/5vLheq93ZhMc/eF0Lr0NhaX8/eS6djo/EYqfQdUekUuHNxsZR4uDg1id40f9J+qE/CwTeitlZIWZmxKtQqOSFi39D7IQy5/c/fxIMpoGhfyUDMAwXzsL4n958A9jfxsJ8X4WQAAAAAElFTkSuQmCC', + 'wizard': True, + 'options': [ + { + 'name': 'enabled', + 'type': 'enabler', + 'default': False, + }, + { + 'name': 'seed_ratio', + 'label': 'Seed ratio', + 'type': 'float', + 'default': 1, + 'description': 'Will not be (re)moved until this seed ratio is met.', + }, + { + 'name': 'seed_time', + 'label': 'Seed time', + 'type': 'int', + 'default': 40, + 'description': 'Will not be (re)moved until this seed time (in hours) is met.', + }, + { + 'name': 'extra_score', + 'advanced': True, + 'label': 'Extra Score', + 'type': 'int', + 'default': 0, + 'description': 'Starting score for each release found via this provider.', + } + ], + }, + ], +}] diff --git a/couchpotato/core/media/_base/providers/torrent/hd4free.py b/couchpotato/core/media/_base/providers/torrent/hd4free.py new file mode 100644 index 0000000000..dbffba58f5 --- /dev/null +++ b/couchpotato/core/media/_base/providers/torrent/hd4free.py @@ -0,0 +1,140 @@ + +import re +import json +import traceback + +from couchpotato.core.helpers.variable import tryInt, getIdentifier +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.base import TorrentProvider + + +log = CPLog(__name__) + + +class Base(TorrentProvider): + + urls = { + 'test': 'https://hd4free.xyz/', + 'detail': 'https://hd4free.xyz/details.php?id=%s', + 'search': 'https://hd4free.xyz/searchapi.php?apikey=%s&username=%s&imdbid=%s&internal=%s', + 'download': 'https://hd4free.xyz/download.php?torrent=%s&torrent_pass=%s', + } + + http_time_between_calls = 1 # Seconds + login_fail_msg = 'Your apikey is not valid! Go to HD4Free and reset your apikey.' + + def _search(self, movie, quality, results): + data = self.getJsonData(self.urls['search'] % (self.conf('apikey'), self.conf('username'), getIdentifier(movie), self.conf('internal_only'))) + + if data: + if 'error' in data: + if self.login_fail_msg in data['error']: # Check for login failure + self.disableAccount() + else: + log.error('%s returned an error (possible rate limit): %s', (self.getName(), data['error'])) + return + + try: + #for result in data[]: + for key, result in data.iteritems(): + if tryInt(result['total_results']) == 0: + return + torrentscore = self.conf('extra_score') + releasegroup = result['releasegroup'] + resolution = result['resolution'] + encoding = result['encoding'] + freeleech = tryInt(result['freeleech']) + seeders = tryInt(result['seeders']) + torrent_desc = '/ %s / %s / %s / %s seeders' % (releasegroup, resolution, encoding, seeders) + + if freeleech > 0 and self.conf('prefer_internal'): + torrent_desc += '/ Internal' + torrentscore += 200 + + if seeders == 0: + torrentscore = 0 + + name = result['release_name'] + year = tryInt(result['year']) + + results.append({ + 'id': tryInt(result['torrentid']), + 'name': re.sub('[^A-Za-z0-9\-_ \(\).]+', '', '%s (%s) %s' % (name, year, torrent_desc)), + 'url': self.urls['download'] % (result['torrentid'], result['torrentpass']), + 'detail_url': self.urls['detail'] % result['torrentid'], + 'size': tryInt(result['size']), + 'seeders': tryInt(result['seeders']), + 'leechers': tryInt(result['leechers']), + 'age': tryInt(result['age']), + 'score': torrentscore + }) + except: + log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc())) +config = [{ + 'name': 'hd4free', + 'groups': [ + { + 'tab': 'searcher', + 'list': 'torrent_providers', + 'name': 'HD4Free', + 'wizard': True, + 'description': 'HD4Free', + 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAMAAAAoLQ9TAAABX1BMVEUF6nsH33cJ03EJ1XIJ1nMKzXIKz28Lym4MxGsMxWsMx2wNvmgNv2kNwGkNwWwOuGgOuWYOuWcOumcOu2cOvmgPtWQPtmUPt2UPt2YQr2IQsGIQsGMQsmMQs2QRqmARq2ARrmERrmISpV4SpmASp14SqF8ToFsToFwToVwTo10TpV0UnFoUn1sVllcVmFgWkFUWklYXjVQXjlMXkFUYh1EYilIYi1MZhlEafk0af04agE4agU4beEobeUsbe0wcdUkeaUQebUYfZEMfZ0QgX0AgYEAgYUEhWj4iVz0iWD0jTzkkSzcmQTMmQzQnPTInPjInPzIoNy8oOC8oODAoOTAoOjApMi0pNC4pNS4qLCoqLSsqLisqMCwrJygrKCgrKCkrKSkrKikrKiorKyosIyYsIycsJCcsJScsJigtHyUuGCIuGiMuGyMuHCMuHCQvEyAvFSEvFiEvFyE0ABU0ABY5lYz4AAAA3ElEQVR4AWNIQAMMiYmJCYkIkMCQnpKWkZ4KBGlARlpaLEOor194kI+Pj6+PT0CET0AYg46Alr22NDeHkBinnq6SkitDrolDgYtaapajdpGppoFfGkMhv2GxE0uuPwNfsk6mhHMOQ54isxmbUJKCtWx+tIZQcDpDtqSol7qIMqsRu3dIhJxxFkOBoF2JG5O7lSqjh5S/tkkWQ5SBTbqnfkymv2WGLa95YCSDhZiMvKIwj4GJCpesuDivK0N6VFRUYlRyfHJUchQQJDMkxsfHJcTHAxEIxMVj+BZDAACjwkqhYgsTAAAAAABJRU5ErkJggg==', + 'options': [ + { + 'name': 'enabled', + 'type': 'enabler', + 'default': False, + }, + { + 'name': 'username', + 'default': '', + 'description': 'Enter your site username.', + }, + { + 'name': 'apikey', + 'default': '', + 'label': 'API Key', + 'description': 'Enter your site api key. This can be found on Profile Security', + }, + { + 'name': 'seed_ratio', + 'label': 'Seed ratio', + 'type': 'float', + 'default': 0, + 'description': 'Will not be (re)moved until this seed ratio is met. HD4Free minimum is 1:1.', + }, + { + 'name': 'seed_time', + 'label': 'Seed time', + 'type': 'int', + 'default': 0, + 'description': 'Will not be (re)moved until this seed time (in hours) is met. HD4Free minimum is 72 hours.', + }, + { + 'name': 'prefer_internal', + 'advanced': True, + 'type': 'bool', + 'default': 1, + 'description': 'Favors internal releases over non-internal releases.', + }, + { + 'name': 'internal_only', + 'advanced': True, + 'label': 'Internal Only', + 'type': 'bool', + 'default': False, + 'description': 'Only download releases marked as HD4Free internal', + }, + { + 'name': 'extra_score', + 'advanced': True, + 'label': 'Extra Score', + 'type': 'int', + 'default': 0, + 'description': 'Starting score for each release found via this provider.', + } + ], + }, + ], +}] diff --git a/couchpotato/core/media/_base/providers/torrent/hdbits.py b/couchpotato/core/media/_base/providers/torrent/hdbits.py new file mode 100644 index 0000000000..ccb429329e --- /dev/null +++ b/couchpotato/core/media/_base/providers/torrent/hdbits.py @@ -0,0 +1,131 @@ +import re +import json +import traceback + +from couchpotato.core.helpers.variable import tryInt, getIdentifier +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.base import TorrentProvider + + +log = CPLog(__name__) + + +class Base(TorrentProvider): + + urls = { + 'test': 'https://hdbits.org/', + 'detail': 'https://hdbits.org/details.php?id=%s', + 'download': 'https://hdbits.org/download.php?id=%s&passkey=%s', + 'api': 'https://hdbits.org/api/torrents' + } + + http_time_between_calls = 1 # Seconds + login_fail_msg = 'Invalid authentication credentials' + + def _post_query(self, **params): + + post_data = { + 'username': self.conf('username'), + 'passkey': self.conf('passkey') + } + post_data.update(params) + + if self.conf('internal_only'): + post_data.update({'origin': [1]}) + + try: + result = self.getJsonData(self.urls['api'], data = json.dumps(post_data)) + + if result: + if result['status'] != 0: + if self.login_fail_msg in result['message']: # Check for login failure + self.disableAccount() + return + log.error('Error searching hdbits: %s' % result['message']) + else: + return result['data'] + except: + pass + + return None + + def _search(self, movie, quality, results): + + match = re.match(r'tt(\d{7})', getIdentifier(movie)) + + data = self._post_query(imdb = {'id': match.group(1)}) + + if data: + try: + for result in data: + results.append({ + 'id': result['id'], + 'name': result['name'], + 'url': self.urls['download'] % (result['id'], self.conf('passkey')), + 'detail_url': self.urls['detail'] % result['id'], + 'size': tryInt(result['size']) / 1024 / 1024, + 'seeders': tryInt(result['seeders']), + 'leechers': tryInt(result['leechers']) + }) + except: + log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc())) + + +config = [{ + 'name': 'hdbits', + 'groups': [ + { + 'tab': 'searcher', + 'list': 'torrent_providers', + 'name': 'HDBits', + 'wizard': True, + 'description': 'HDBits', + 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAABi0lEQVR4AZWSzUsbQRjGdyabTcvSNPTSHlpQQeMHJApC8CJRvHgQQU969+LJP8G7f4N3DwpeFRQvRr0EKaUl0ATSpkigUNFsMl/r9NmZLCEHA/nNO5PfvMPDm0DI6fV3ZxiolEICe1oZCBVCCmBPKwOh2ErKBHGE4KYEXBpSLkUlqO4LcM7f+6nVhRnOhSkOz/hexk+tL+YL0yPF2YmN4tynD++4gTLGkNNac9YFLoREBR1+cnF3dFY6v/m6PD+FaXiNJtgA4xYbABxiGrz6+6HWaI5/+Qh37YS0/3Znc8UxwNGBIIBX22z+/ZdJ+4wzyjpR4PEpODg8tgUXBv2iWUzSpa12B0IR6n6lvt8Aek2lZHb084+fdRNgrwY8z81PjhVy2d2ttUrtV/lbBa+JXGEpDMPnoF2tN1QYRqVUtf6nFbThb7wk7le395elcqhASLb39okDiHY00VCtCTEHwSiH4AI0lkOiT1dwMeSfT3SRxiQWNO7Zwj1egkoVIQFMKvSiC3bcjXq9Jf8DcDIRT3hh10kAAAAASUVORK5CYII=', + 'options': [ + { + 'name': 'enabled', + 'type': 'enabler', + 'default': False, + }, + { + 'name': 'username', + 'default': '', + }, + { + 'name': 'passkey', + 'default': '', + }, + { + 'name': 'seed_ratio', + 'label': 'Seed ratio', + 'type': 'float', + 'default': 1, + 'description': 'Will not be (re)moved until this seed ratio is met.', + }, + { + 'name': 'seed_time', + 'label': 'Seed time', + 'type': 'int', + 'default': 40, + 'description': 'Will not be (re)moved until this seed time (in hours) is met.', + }, + { + 'name': 'extra_score', + 'advanced': True, + 'label': 'Extra Score', + 'type': 'int', + 'default': 0, + 'description': 'Starting score for each release found via this provider.', + }, + { + 'name': 'internal_only', + 'advanced': True, + 'label': 'Internal Only', + 'type': 'bool', + 'default': False, + 'description': 'Only download releases marked as HDBits internal' + } + ], + }, + ], +}] diff --git a/couchpotato/core/media/_base/providers/torrent/hdonly.py b/couchpotato/core/media/_base/providers/torrent/hdonly.py new file mode 100644 index 0000000000..b069b3a705 --- /dev/null +++ b/couchpotato/core/media/_base/providers/torrent/hdonly.py @@ -0,0 +1,181 @@ +О╩©import htmlentitydefs +import json +import re +import unicodedata +import urllib +import time +import traceback + +from couchpotato.core.helpers.encoding import tryUrlencode +from couchpotato.core.helpers.variable import getTitle, tryInt, mergeDicts, getIdentifier +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.base import TorrentProvider +from dateutil.parser import parse +import six +from HTMLParser import HTMLParser + + +log = CPLog(__name__) + + +class Base(TorrentProvider): + + urls = { + 'domain': 'https://hd-only.org', + 'detail': 'https://hd-only.org/ajax.php?action=torrent&id=%s', + 'detailLink': 'https://hd-only.org/torrents.php?id=%s&torrentid=%s', + 'torrent': 'https://hd-only.org/torrents.php?action=download&id=%s&authkey=%s&torrent_pass=%s', + 'login': 'https://hd-only.org/login.php', + 'login_check': 'https://hd-only.org/login.php', + 'search': 'https://hd-only.org/ajax.php?action=browse&searchstr=%s', + 'index': 'https://hd-only.org/ajax.php?action=index' + } + + http_time_between_calls = 2 + + def _search(self, media, quality, results): + + h = HTMLParser() + + indexResponse = self.getJsonData(self.urls['index']) + + authkey = indexResponse['response']['authkey'] + passkey = indexResponse['response']['passkey'] + + title = media['title'] + + TitleStringReal = str(title.encode("latin-1").replace('-',' ')) + + frTitle = self.getFrenchTitle(TitleStringReal) + if frTitle is None: + frTitle = TitleStringReal + + url = self.urls['search'] % tryUrlencode(frTitle) + data = self.getJsonData(url) + + if data['status'] == 'success' and len(data['response']['results']) > 0: + groupId = data[u'response'][u'results'][0][u'groupId'] + name = data['response']['results'][0]['groupName'].upper() + splittedReleaseName = re.split('(\.[0-9]{4}\.)', name, flags=re.IGNORECASE) + cleanedReleaseName = ''.join(splittedReleaseName) + + match = re.compile(ur"[\w]+", re.UNICODE) + nameSplit = ''.join(match.findall(cleanedReleaseName)) + titleSplit = ''.join(match.findall(frTitle.upper())) + + if titleSplit == nameSplit: # and self.matchLanguage(media['info']['languages'], re.split('[\. ]', splittedReleaseName[-1])): + for torrent in data['response']['results'][0]['torrents']: + + detail_url = self.urls['detail'] % torrent['torrentId'] + if not self.getJsonData(detail_url)['response']['torrent']['filePath']: + detail = self.getJsonData(detail_url)['response']['torrent']['fileList'].lower() + else: + detail = self.getJsonData(detail_url)['response']['torrent']['filePath'].lower() + + detailName = h.unescape(detail) + + results.append({ + 'id': torrent['torrentId'], + 'name': detailName, #name + '.' + torrent['encoding'] + '.' + torrent['media'] + '.' + torrent['format'], + 'Source': torrent['media'], + 'Resolution': torrent['encoding'], + 'url': self.urls['torrent'] % (torrent['torrentId'], authkey, passkey), + 'detail_url': self.urls['detailLink'] % (groupId, torrent['torrentId']), + 'date': tryInt(time.mktime(parse(torrent['time']).timetuple())), + 'size': tryInt(torrent['size']) / 1024 / 1024, + 'seeders': tryInt(torrent['seeders']), + 'leechers': tryInt(torrent['leechers']), + }) + + def getLoginParams(self): + return { + 'username': self.conf('username'), + 'password': self.conf('password'), + 'keeplogged': '1', + 'login': tryUrlencode('M\'identifier') + } + + def getFrenchTitle(self, title): + """ + This function uses TMDB API to get the French movie title of the given title. + """ + + url = "https://api.themoviedb.org/3/search/movie?api_key=0f3094295d96461eb7a672626c54574d&language=fr&query=%s" % title + log.debug('#### Looking on TMDB for French title of : ' + title) + #data = self.getJsonData(url, decode_from = 'utf8') + data = self.getJsonData(url) + try: + if data['results'] != None: + for res in data['results']: + #frTitle = res['title'].lower().replace(':','').replace(' ',' ').replace('-','') + frTitle = res['title'].lower().replace(':','').replace(' ',' ') + if frTitle == title: + log.debug('#### TMDB report identical FR and original title') + return None + else: + log.debug(u'#### TMDB API found a french title : ' + frTitle) + return frTitle + else: + log.debug('#### TMDB could not find a movie corresponding to : ' + title) + return None + except: + log.error('#### Failed to parse TMDB API: %s' % (traceback.format_exc())) + + + def loginSuccess(self, output): + return 'logout' in output.lower() + + loginCheckSuccess = loginSuccess + + +config = [{ + 'name': 'hdonly', + 'groups': [ + { + 'tab': 'searcher', + 'list': 'torrent_providers', + 'name': 'hdonly', + 'description': 'HD-Only.org', + 'wizard': True, + 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAACf0lEQVR4nLXSz07TYAAA8O9bW9Ztbcfc2EZEHVu3GDc6wGgwgoGTXow3jUaDIgcv+AK+g/Hgn4MX7ibuICpiUBKBAeLI5sZIgEEGhnXZGPvabmv7dfUZPPh7hh8A/xuM9cVvTz69OTY0s7ByffjScjofDvRUTyQDQF8nk98/HImf/7S4fmt06P3XxcT0a3hvfDISCWd/Z4W4kMvmQnxILIkOxgEAkGXF7/ft7OzGYtF0OiMIfbncJnz55m2xuO/xeI6rx16fFyHJ5/MqsmICwDCMKJY4jhPFstvtrlQq/m4fea6nm6Ygx3V63S6Oc2KsuzpdRtsAAHZ0UG4XRxKEy8k67PZTTtbp5MjP899binLudPfW9q6NYWkrrek6be2gafrh/bv1Ono13y8eAQBIA3J3Yi9gIpFASG62WrWTWqg3QFiI2S9z5bL4eOKRjvHct2Sq/qyn8WSgPzqzPdXltZMLP5YMjNumCQEsiWWMcWFvLz4w+OHjrNFurteeAwIPXbm8urbGMvsHB2eJIB+pVKuB3kAqldIxVlXNztjVltpQW5retjbe1eCNenFaEC78LI6SUCHCPE+R1MHhH4qiQLttGgbWsa5puqrmN3NXh0eOtcEjdWyrfBFjcEabgg/GJ5qNBklRBjZomxVCC8sypgkAMCGEkiSZptlqtkwAgGmSFGlhHA6E6nabDaET2kpLCEFgkWVJlhUIIEKS1UrXEeJYpo4Qy7CEJDdCIT6ZXA6HI6urKx5PV35rU9V0SUK7hT2OY3+lNvhQcCm5Eg7zy8kkHL42upHOxIX+TCYdjcYKhR2v168oMgCAcThK5XIoGMzmcnFBSGfSA3Hhn7f+Ba/6N2aE1SAhAAAAAElFTkSuQmCC', + 'options': [ + { + 'name': 'enabled', + 'type': 'enabler', + 'default': False + }, + { + 'name': 'username', + 'default': '', + }, + { + 'name': 'password', + 'default': '', + 'type': 'password', + }, + { + 'name': 'seed_ratio', + 'label': 'Seed ratio', + 'type': 'float', + 'default': 1, + 'description': 'Will not be (re)moved until this seed ratio is met.', + }, + { + 'name': 'seed_time', + 'label': 'Seed time', + 'type': 'int', + 'default': 40, + 'description': 'Will not be (re)moved until this seed time (in hours) is met.', + }, + { + 'name': 'extra_score', + 'advanced': True, + 'label': 'Extra Score', + 'type': 'int', + 'default': 20, + 'description': 'Starting score for each release found via this provider.', + } + ], + } + ] +}] diff --git a/couchpotato/core/media/_base/providers/torrent/hdtorrents.py b/couchpotato/core/media/_base/providers/torrent/hdtorrents.py new file mode 100644 index 0000000000..c6e88270b1 --- /dev/null +++ b/couchpotato/core/media/_base/providers/torrent/hdtorrents.py @@ -0,0 +1,179 @@ +import traceback + +from datetime import datetime +from bs4 import BeautifulSoup +from couchpotato.core.helpers.encoding import toUnicode +from couchpotato.core.helpers.variable import tryInt +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.base import TorrentProvider +import re + +log = CPLog(__name__) + + +class Base(TorrentProvider): + + urls = { + 'login' : 'https://www.hdts.ru/login.php', + 'detail' : 'https://www.hdts.ru/details.php?id=%s', + 'search' : 'https://www.hdts.ru/torrents.php?search=%s&active=1', + 'home' : 'https://www.hdts.ru/%s', + } + + http_time_between_calls = 1 #seconds + + def _search(self, media, quality, results): + + url = self.urls['search'] % (media['identifiers']['imdb'])#, cats[0]) + data = self.getHTMLData(url) + + if data: + + # Remove HDTorrents NEW list + split_data = data.partition('\n\n\n\n') + data = split_data[2] + + html = BeautifulSoup(data) + try: + #Get first entry in table + entries = html.find_all('td', attrs={'align' : 'center'}) + + if len(entries) < 21: + return + + base = 21 + extend = 0 + + try: + torrent_id = entries[base].find('div')['id'] + except: + extend = 2 + torrent_id = entries[base + extend].find('div')['id'] + + torrent_age = datetime.now() - datetime.strptime(entries[15 + extend].get_text()[:8] + ' ' + entries[15 + extend].get_text()[-10::], '%H:%M:%S %d/%m/%Y') + + results.append({ + 'id': torrent_id, + 'name': entries[20 + extend].find('a')['title'].strip('History - ').replace('Blu-ray', 'bd50'), + 'url': self.urls['home'] % entries[13 + extend].find('a')['href'], + 'detail_url': self.urls['detail'] % torrent_id, + 'size': self.parseSize(entries[16 + extend].get_text()), + 'age': torrent_age.days, + 'seeders': tryInt(entries[18 + extend].get_text()), + 'leechers': tryInt(entries[19 + extend].get_text()), + 'get_more_info': self.getMoreInfo, + }) + + #Now attempt to get any others + result_table = html.find('table', attrs = {'class' : 'mainblockcontenttt'}) + + if not result_table: + return + + entries = result_table.find_all('td', attrs={'align' : 'center', 'class' : 'listas'}) + + if not entries: + return + + for result in entries: + block2 = result.find_parent('tr').find_next_sibling('tr') + if not block2: + continue + cells = block2.find_all('td') + try: + extend = 0 + detail = cells[1 + extend].find('a')['href'] + except: + extend = 1 + detail = cells[1 + extend].find('a')['href'] + torrent_id = detail.replace('details.php?id=', '') + torrent_age = datetime.now() - datetime.strptime(cells[5 + extend].get_text(), '%H:%M:%S %d/%m/%Y') + + results.append({ + 'id': torrent_id, + 'name': cells[1 + extend].find('b').get_text().strip('\t ').replace('Blu-ray', 'bd50'), + 'url': self.urls['home'] % cells[3 + extend].find('a')['href'], + 'detail_url': self.urls['home'] % cells[1 + extend].find('a')['href'], + 'size': self.parseSize(cells[6 + extend].get_text()), + 'age': torrent_age.days, + 'seeders': tryInt(cells[8 + extend].get_text()), + 'leechers': tryInt(cells[9 + extend].get_text()), + 'get_more_info': self.getMoreInfo, + }) + + except: + log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc())) + + def getMoreInfo(self, item): + full_description = self.getCache('hdtorrents.%s' % item['id'], item['detail_url'], cache_timeout = 25920000) + html = BeautifulSoup(full_description) + nfo_pre = html.find('div', attrs = {'id':'details_table'}) + description = toUnicode(nfo_pre.text) if nfo_pre else '' + + item['description'] = description + return item + + def getLoginParams(self): + return { + 'uid': self.conf('username'), + 'pwd': self.conf('password'), + 'Login': 'submit', + } + + def loginSuccess(self, output): + return "if your browser doesn\'t have javascript enabled" or 'logout.php' in output.lower() + + loginCheckSuccess = loginSuccess + + +config = [{ + 'name': 'hdtorrents', + 'groups': [ + { + 'tab': 'searcher', + 'list': 'torrent_providers', + 'name': 'HDTorrents', + 'description': 'See HDTorrents', + 'wizard': True, + 'icon' : 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAABfElEQVR4nM2SO47CMBCGx47zUhJeAiHRIp4NRSo6kCi4Aj0NBZwDUXMJLoI4AAVFCiQeBYIghMBxMPYWYVlg65X27zyebzz6fwP8O6HXg2VZpmlKKQFAfgshRCkNguATKBaL5XL5dDopisI555wHQSCEUFXVtm3P81ar1c9sRVEajQZCCGMMAAghAEgmk9lsFgAwxs1mM7oiEaCqqu/7uq4PBoPRaNTpdOLxuOu6lNLNZjMcDu/3OyEkDEP82AwhwzAwxplMxrZty7ISicRsNuv3+6lUynXd8/kcdb4BjLFarTYej9vt9uFw4JwDwHQ6TafTl8slMgO/uqTruud5vV5vMplIKY/HIwDkcrntdht1vwGMMSHEer2mlO73e9/38/l8t9tljM3nc03TngwAACGk1WohhGKxWPSUYRiFQqFUKkUL1+v1h4FPplKpVKvV3W5HCLndblLKMAwBQNM0x3EWi8VyufxM2nEc0zSFEFHSzzql9Hq9/volf6QvVr6n2OEjGOYAAAAASUVORK5CYII=', + 'options': [ + { + 'name': 'enabled', + 'type': 'enabler', + 'default': False, + }, + { + 'name': 'username', + 'default': '', + }, + { + 'name': 'password', + 'default': '', + 'type': 'password', + }, + { + 'name': 'seed_ratio', + 'label': 'Seed ratio', + 'type': 'float', + 'default': 1, + 'description': 'Will not be (re)moved until this seed ratio is met.', + }, + { + 'name': 'seed_time', + 'label': 'Seed time', + 'type': 'int', + 'default': 40, + 'description': 'Will not be (re)moved until this seed time (in hours) is met.', + }, + { + 'name': 'extra_score', + 'advanced': True, + 'label': 'Extra Score', + 'type': 'int', + 'default': 20, + 'description': 'Starting score for each release found via this provider.', + } + ], + }, + ], +}] \ No newline at end of file diff --git a/couchpotato/core/media/_base/providers/torrent/ilovetorrents.py b/couchpotato/core/media/_base/providers/torrent/ilovetorrents.py new file mode 100644 index 0000000000..f9ec22dd8d --- /dev/null +++ b/couchpotato/core/media/_base/providers/torrent/ilovetorrents.py @@ -0,0 +1,198 @@ +import re +import traceback + +from bs4 import BeautifulSoup +from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode +from couchpotato.core.helpers.variable import tryInt, splitString +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.base import TorrentProvider + + +log = CPLog(__name__) + + +class Base(TorrentProvider): + + urls = { + 'download': 'https://www.ilovetorrents.me/%s', + 'detail': 'https://www.ilovetorrents.me/%s', + 'search': 'https://www.ilovetorrents.me/browse.php?search=%s&page=%s&cat=%s', + 'test': 'https://www.ilovetorrents.me/', + 'login': 'https://www.ilovetorrents.me/takelogin.php', + 'login_check': 'https://www.ilovetorrents.me' + } + + login_fail_msg = 'Login failed!' + + cat_ids = [ + (['80'], ['720p', '1080p']), + (['41'], ['brrip']), + (['19'], ['cam', 'ts', 'dvdrip', 'tc', 'r5', 'scr']), + (['20'], ['dvdr']) + ] + + cat_backup_id = 200 + disable_provider = False + http_time_between_calls = 1 + + def _searchOnTitle(self, title, movie, quality, results): + + page = 0 + total_pages = 1 + cats = self.getCatId(quality) + + while page < total_pages: + + movieTitle = tryUrlencode('"%s" %s' % (title, movie['info']['year'])) + search_url = self.urls['search'] % (movieTitle, page, cats[0]) + page += 1 + + data = self.getHTMLData(search_url) + if data: + try: + + results_table = None + + data_split = splitString(data, '.+'')', i['href']).group('page_number')) for i in pagelinks] + total_pages = max(page_numbers) + except: + pass + + entries = results_table.find_all('tr') + + for result in entries[1:]: + prelink = result.find(href = re.compile('details.php')) + link = prelink['href'] + download = result.find('a', href = re.compile('download.php'))['href'] + + if link and download: + + def extra_score(item): + trusted = (0, 10)[result.find('img', alt = re.compile('Trusted')) is not None] + vip = (0, 20)[result.find('img', alt = re.compile('VIP')) is not None] + confirmed = (0, 30)[result.find('img', alt = re.compile('Helpers')) is not None] + moderated = (0, 50)[result.find('img', alt = re.compile('Moderator')) is not None] + + return confirmed + trusted + vip + moderated + + id = re.search('id=(?P\d+)&', link).group('id') + url = self.urls['download'] % download + + fileSize = self.parseSize(result.select('td.rowhead')[8].text) + results.append({ + 'id': id, + 'name': toUnicode(prelink.find('b').text), + 'url': url, + 'detail_url': self.urls['detail'] % link, + 'size': fileSize, + 'seeders': tryInt(result.find_all('td')[2].string), + 'leechers': tryInt(result.find_all('td')[3].string), + 'extra_score': extra_score, + 'get_more_info': self.getMoreInfo + }) + + except: + log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc())) + + def getLoginParams(self): + return { + 'username': self.conf('username'), + 'password': self.conf('password'), + 'submit': 'Welcome to ILT', + } + + def getMoreInfo(self, item): + cache_key = 'ilt.%s' % item['id'] + description = self.getCache(cache_key) + + if not description: + + try: + full_description = self.getHTMLData(item['detail_url']) + html = BeautifulSoup(full_description) + nfo_pre = html.find('td', attrs = {'class': 'main'}).findAll('table')[1] + description = toUnicode(nfo_pre.text) if nfo_pre else '' + except: + log.error('Failed getting more info for %s', item['name']) + description = '' + + self.setCache(cache_key, description, timeout = 25920000) + + item['description'] = description + return item + + def loginSuccess(self, output): + return 'logout.php' in output.lower() + + loginCheckSuccess = loginSuccess + + +config = [{ + 'name': 'ilovetorrents', + 'groups': [ + { + 'tab': 'searcher', + 'list': 'torrent_providers', + 'name': 'ILoveTorrents', + 'description': 'Where the Love of Torrents is Born. ILoveTorrents', + 'wizard': True, + 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAACPUlEQVR4AYWM0U9SbxjH3+v266I/oNvWZTfd2J1d0ZqbZEFwWrUImOKs4YwtumFKZvvlJJADR2TCQQlMPKg5NmpREgaekAPnBATKgmK1LqQlx6awHnZWF1Tr2Xfvvs+7z+dB0mlO7StpAh+M4S/2jbo3w8+xvJvlnSneEt+10zwer5ujNUOoChjALWFw5XOwdCAk/P57cGvPl+Oht0W7VJHN5NC1uW1BON4hGjXbwpVWMZhsy9v7sEIXAsDNYBXgdkEoIKyWD2CF8ut/aOXTZc/fBSgLWw1BgA4BDHOV0GkT90cBQpXahU5TFomsb38XhJC5/Tbh1P8c6rJlBeGfAeyMhUFwNVcs9lxV9Ot0dwmyd+mrNvRtbJ2fSPC6Z3Vsvub2z3sDFACAAYzk0+kUyxEkyfN7PopqNBro55A+P6yPKIrL5zF1HwjdeBJJCObIsZO79bo3sHhWhglo5WMV3mazuVPb4fLvSL8/FAkB1hK6rXQPwYhMyROK8VK5LAiH/jsMt0HQjxiN4/ePdoilllcqDyt3Mkg8mRBNbIhMb8RERkowQA/p76g0/UDDdCoNmDminM0qSK5vlpE5kugCHhNPxntwWmJPYTMZtYcFR6ABHQsVRlYLukVORaaULvqKI46keFSCv77kSPS6kxrPptLNDHgz16fWBtyxe6v5h08LUy+KI8ushqTPWWIX8Sg6b45IrGtyW6zXFb/hpQf9m3oqfWuB0fpSw0uZ4WB69En69uOk2rmO2V52PXj+A/mI4ESKpb2HAAAAAElFTkSuQmCC', + 'options': [ + { + 'name': 'enabled', + 'type': 'enabler', + 'default': False + }, + { + 'name': 'username', + 'label': 'Username', + 'type': 'string', + 'default': '', + 'description': 'The user name for your ILT account', + }, + { + 'name': 'password', + 'label': 'Password', + 'type': 'password', + 'default': '', + 'description': 'The password for your ILT account.', + }, + { + 'name': 'seed_ratio', + 'label': 'Seed ratio', + 'type': 'float', + 'default': 1, + 'description': 'Will not be (re)moved until this seed ratio is met.', + }, + { + 'name': 'seed_time', + 'label': 'Seed time', + 'type': 'int', + 'default': 40, + 'description': 'Will not be (re)moved until this seed time (in hours) is met.', + }, + { + 'name': 'extra_score', + 'advanced': True, + 'label': 'Extra Score', + 'type': 'int', + 'default': 0, + 'description': 'Starting score for each release found via this provider.', + } + ], + } + ] +}] diff --git a/couchpotato/core/media/_base/providers/torrent/iptorrents.py b/couchpotato/core/media/_base/providers/torrent/iptorrents.py new file mode 100644 index 0000000000..e3331efccc --- /dev/null +++ b/couchpotato/core/media/_base/providers/torrent/iptorrents.py @@ -0,0 +1,175 @@ +import traceback + +from bs4 import BeautifulSoup +from couchpotato.core.helpers.encoding import tryUrlencode +from couchpotato.core.helpers.variable import tryInt +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.base import TorrentProvider +import six + + +log = CPLog(__name__) + + +class Base(TorrentProvider): + + urls = { + 'test': 'https://iptorrents.com/', + 'base_url': 'https://iptorrents.com', + 'login': 'https://iptorrents.com/take_login.php', + 'login_check': 'https://iptorrents.com/oldinbox.php', + 'search': 'https://iptorrents.com/t?%s%%s&q=%s&qf=ti#torrents&p=%%d', + } + + http_time_between_calls = 1 # Seconds + login_fail_msg = 'Invalid username and password combination' + cat_backup_id = None + + def buildUrl(self, title, media, quality): + return self._buildUrl(title.replace(':', ''), quality) + + def _buildUrl(self, query, quality): + + cat_ids = self.getCatId(quality) + + if not cat_ids: + log.warning('Unable to find category ids for identifier "%s"', quality.get('identifier')) + return None + + query = query.replace('"', '') + + return self.urls['search'] % ("&".join(("%d=" % x) for x in cat_ids), tryUrlencode(query).replace('%', '%%')) + + def _searchOnTitle(self, title, media, quality, results): + + freeleech = '' if not self.conf('freeleech') else '&free=on' + + base_url = self.buildUrl(title, media, quality) + if not base_url: return + + pages = 1 + current_page = 1 + while current_page <= pages and not self.shuttingDown(): + data = self.getHTMLData(base_url % (freeleech, current_page)) + + if data: + html = BeautifulSoup(data) + + try: + page_nav = html.find('span', attrs = {'class': 'page_nav'}) + if page_nav: + next_link = page_nav.find("a", text = "Next") + if next_link: + final_page_link = next_link.previous_sibling.previous_sibling + pages = int(final_page_link.string) + + result_table = html.find('table', attrs={'id': 'torrents'}) + + if not result_table or 'nothing found!' in data.lower(): + return + + entries = result_table.find_all('tr') + + for result in entries[1:]: + + torrent = result.find_all('td') + if len(torrent) <= 1: + break + + torrent = torrent[1].find('a') + + torrent_id = torrent['href'].replace('/details.php?id=', '') + torrent_name = six.text_type(torrent.string) + torrent_download_url = self.urls['base_url'] + (result.find_all('td')[3].find('a'))['href'].replace(' ', '.') + torrent_details_url = self.urls['base_url'] + torrent['href'] + torrent_size = self.parseSize(result.find_all('td')[5].string) + torrent_seeders = tryInt(result.find('td', attrs = {'class': 'ac t_seeders'}).string) + torrent_leechers = tryInt(result.find('td', attrs = {'class': 'ac t_leechers'}).string) + + results.append({ + 'id': torrent_id, + 'name': torrent_name, + 'url': torrent_download_url, + 'detail_url': torrent_details_url, + 'size': torrent_size, + 'seeders': torrent_seeders, + 'leechers': torrent_leechers, + }) + + except: + log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc())) + break + + current_page += 1 + + def getLoginParams(self): + return { + 'username': self.conf('username'), + 'password': self.conf('password'), + 'login': 'submit', + } + + def loginSuccess(self, output): + return 'don\'t have an account' not in output.lower() + + def loginCheckSuccess(self, output): + return '/logout.php' in output.lower() + + +config = [{ + 'name': 'iptorrents', + 'groups': [ + { + 'tab': 'searcher', + 'list': 'torrent_providers', + 'name': 'IPTorrents', + 'description': 'IPTorrents', + 'wizard': True, + 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAABRklEQVR42qWQO0vDUBiG8zeKY3EqQUtNO7g0J6ZJ1+ifKIIFQXAqDYKCyaaYxM3udrZLHdRFhXrZ6liCW6mubfk874EESgqaeOCF7/Y8hEh41aq6yZi2nyZgBGya9XKtZs4No05pAkZV2YbEmyMMsoSxLQeC46wCTdPPY4HruPQyGIhF97qLWsS78Miydn4XdK46NJ9OsQAYBzMIMf8MQ9wtCnTdWCaIDx/u7uljOIQEe0hiIWPamSTLay3+RxOCSPI9+RJAo7Er9r2bnqjBFAqyK+VyK4f5/Cr5ni8OFKVCz49PFI5GdNvvU7ttE1M1zMU+8AMqFksEhrMnQsBDzqmDAwzx2ehRLwT7yyCI+vSC99c3mozH1NxrJgWWtR1BOECfEJSVCm6WCzJGCA7+IWhBsM4zywDPwEp4vCjx2DzBH2ODAfsDb33Ps6dQwJgAAAAASUVORK5CYII=', + 'options': [ + { + 'name': 'enabled', + 'type': 'enabler', + 'default': False, + }, + { + 'name': 'username', + 'default': '', + }, + { + 'name': 'password', + 'default': '', + 'type': 'password', + }, + { + 'name': 'freeleech', + 'default': 0, + 'type': 'bool', + 'description': 'Only search for [FreeLeech] torrents.', + }, + { + 'name': 'seed_ratio', + 'label': 'Seed ratio', + 'type': 'float', + 'default': 1, + 'description': 'Will not be (re)moved until this seed ratio is met.', + }, + { + 'name': 'seed_time', + 'label': 'Seed time', + 'type': 'int', + 'default': 40, + 'description': 'Will not be (re)moved until this seed time (in hours) is met.', + }, + { + 'name': 'extra_score', + 'advanced': True, + 'label': 'Extra Score', + 'type': 'int', + 'default': 0, + 'description': 'Starting score for each release found via this provider.', + } + ], + }, + ], +}] diff --git a/couchpotato/core/media/_base/providers/torrent/kickasstorrents.py b/couchpotato/core/media/_base/providers/torrent/kickasstorrents.py new file mode 100644 index 0000000000..791286a4ec --- /dev/null +++ b/couchpotato/core/media/_base/providers/torrent/kickasstorrents.py @@ -0,0 +1,196 @@ +import re +import traceback + +from bs4 import BeautifulSoup +from couchpotato.core.helpers.variable import tryInt, getIdentifier +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.base import TorrentMagnetProvider + + +log = CPLog(__name__) + + +class Base(TorrentMagnetProvider): + + urls = { + 'detail': '%s/%s', + 'search': '%s/%s-i%s/', + } + + cat_ids = [ + (['cam'], ['cam']), + (['telesync'], ['ts', 'tc']), + (['screener', 'tvrip'], ['screener']), + (['x264', '720p', '1080p', 'blu-ray', 'hdrip'], ['bd50', '1080p', '720p', 'brrip']), + (['dvdrip'], ['dvdrip']), + (['dvd'], ['dvdr']), + ] + + http_time_between_calls = 1 # Seconds + cat_backup_id = None + + proxy_list = [ + 'http://flowtorrent.com', + 'http://katcr.to/span', + 'http://dx-torrente.com', + 'https://kickass.unblocked.vip', + 'https://katcr.co', + 'https://kat.how', + 'https://kickass.cd', + 'https://kickass.unlockproject.online', + 'https://kickasstorrents.video', + 'https://kat.al', + 'https://katproxy.al', + 'https://kattor.xyz', + 'https://kickass.unblocked.video', + 'https://kickass.unblocked.rocks', + 'https://kickass.immunicity.live', + 'https://kickass.immunicity.red', + 'https://kickass.immunicity.video', + 'https://kickass.bypassed.live', + 'https://kickass.bypassed.video', + 'https://kickass.bypassed.red', + 'https://kickass.unblocked.pw', + 'https://katproxy.com' + ] + + def _search(self, media, quality, results): + + data = self.getHTMLData(self.urls['search'] % (self.getDomain(), 'm', getIdentifier(media).replace('tt', ''))) + + if data: + + cat_ids = self.getCatId(quality) + table_order = ['name', 'size', None, 'age', 'seeds', 'leechers'] + + try: + html = BeautifulSoup(data) + resultdiv = html.find('div', attrs = {'class': 'tabs'}) + for result in resultdiv.find_all('div', recursive = False): + if result.get('id').lower().strip('tab-') not in cat_ids: + continue + + try: + for temp in result.find_all('tr'): + if temp['class'] is 'firstr' or not temp.get('id'): + continue + + new = {} + + nr = 0 + for td in temp.find_all('td'): + column_name = table_order[nr] + if column_name: + + if column_name == 'name': + link = td.find('div', {'class': 'torrentname'}).find_all('a')[2] + new['id'] = temp.get('id')[-7:] + new['name'] = link.text + new['url'] = td.find('a', {'href': re.compile('magnet:*')})['href'] + new['detail_url'] = self.urls['detail'] % (self.getDomain(), link['href'][1:]) + new['verified'] = True if td.find('i', {'class': re.compile('verify')}) else False + new['score'] = 100 if new['verified'] else 0 + elif column_name is 'size': + new['size'] = self.parseSize(td.text) + elif column_name is 'age': + new['age'] = self.ageToDays(td.text) + elif column_name is 'seeds': + new['seeders'] = tryInt(td.text) + elif column_name is 'leechers': + new['leechers'] = tryInt(td.text) + + nr += 1 + + # Only store verified torrents + if self.conf('only_verified') and not new['verified']: + continue + + results.append(new) + except: + log.error('Failed parsing KickAssTorrents: %s', traceback.format_exc()) + + except AttributeError: + log.debug('No search results found.') + + def ageToDays(self, age_str): + age = 0 + age_str = age_str.replace(' ', ' ') + + regex = '(\d*.?\d+).(sec|hour|day|week|month|year)+' + matches = re.findall(regex, age_str) + for match in matches: + nr, size = match + mult = 1 + if size == 'week': + mult = 7 + elif size == 'month': + mult = 30.5 + elif size == 'year': + mult = 365 + + age += tryInt(nr) * mult + + return tryInt(age) + + def isEnabled(self): + return super(Base, self).isEnabled() and self.getDomain() + + def correctProxy(self, data): + return 'search query' in data.lower() + + +config = [{ + 'name': 'kickasstorrents', + 'groups': [ + { + 'tab': 'searcher', + 'list': 'torrent_providers', + 'name': 'KickAssTorrents', + 'description': 'KickAssTorrents', + 'wizard': True, + 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACD0lEQVR42pXK20uTcRjA8d/fsJsuap0orBuFlm3hir3JJvQOVmuwllN20Lb2isI2nVHKjBqrCWYaNnNuBrkSWxglhDVJOkBdSWUOq5FgoiOrMdRJ2xPPxW+8OUf1ge/FcyCUSVe2qedK5U/OxNTTXRNXEQ52Glb4O6dNEfK1auJkvRY7+/zxnQbA/D596laXcY3OWOiaIX2393SGznUmxkUo/YkDgqHemuzobQ7+NV+reo5Q1mqp68GABdY3+/EloO+JeN4tEqiFU8f3CwhyWo9E7wfMgI0ELTDx0AvjIxcgvZoC9P7NMN7yMmrFeoKa68rfDfmrARsNN0Ihr55cx59ctZWSiwS5bLKpwW4dYJH+M/B6/CYszE0BFZ+egG+Ln+HRoBN/cpl1pV6COIMkOnBVA/w+fXgGKJVM4LxhumMleoL06hJ3wKcCfl+/TAKKx17gnFePRwkqxR4BQSpFkbCrrQJueI7mWpyfATQ9OQY43+uv/+PutBycJ3y2qn2x7jY50GJvnwLKZjOwspyE5I8F4N+1yr1uwqcs3ym63Hwo29EiAyzUWQVr6WVAS4lZCPutQG/2GtES2YiW3d3XflYKtL72kzAcdEDHeSa3czeIMyyz/TApRKvcFfE0isHbJMnrHCf6xTLb1ORvWNlWo91cvHrJUQo0o6ZoRi7dIiT/g2WEDi27Iyov21xMCvgNfXvtwIACfHwAAAAASUVORK5CYII=', + 'options': [ + { + 'name': 'enabled', + 'type': 'enabler', + 'default': True, + }, + { + 'name': 'domain', + 'advanced': True, + 'label': 'Proxy server', + 'description': 'Domain for requests, keep empty to let CouchPotato pick.', + }, + { + 'name': 'seed_ratio', + 'label': 'Seed ratio', + 'type': 'float', + 'default': 1, + 'description': 'Will not be (re)moved until this seed ratio is met.', + }, + { + 'name': 'seed_time', + 'label': 'Seed time', + 'type': 'int', + 'default': 40, + 'description': 'Will not be (re)moved until this seed time (in hours) is met.', + }, + { + 'name': 'only_verified', + 'advanced': True, + 'type': 'bool', + 'default': False, + 'description': 'Only search for verified releases.' + }, + { + 'name': 'extra_score', + 'advanced': True, + 'label': 'Extra Score', + 'type': 'int', + 'default': 0, + 'description': 'Starting score for each release found via this provider.', + } + ], + }, + ], +}] diff --git a/couchpotato/core/media/_base/providers/torrent/magnetdl.py b/couchpotato/core/media/_base/providers/torrent/magnetdl.py new file mode 100755 index 0000000000..f2209dbd96 --- /dev/null +++ b/couchpotato/core/media/_base/providers/torrent/magnetdl.py @@ -0,0 +1,143 @@ +import re +import traceback + +from bs4 import BeautifulSoup +from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode +from couchpotato.core.helpers.variable import tryInt +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.base import TorrentMagnetProvider +import six + + +log = CPLog(__name__) + +class Base(TorrentMagnetProvider): + + urls = { + 'search': 'http://www.magnetdl.com/%s/%s/se/desc/%s/', + 'detail': 'http://www.magnetdl.com/%s' + } + + http_time_between_calls = 1 # Seconds + + def _searchOnTitle(self, title, movie, quality, results): + + movieTitle = tryUrlencode('%s-%s' % (title.replace(':', '').replace(' ', '-'), movie['info']['year'])) + + next_page = True + current_page = 1 + max_page = self.conf('max_pages') + while next_page and current_page <= max_page and not self.shuttingDown(): + + next_page = False + url = self.urls['search'] % (movieTitle[:1], movieTitle, current_page) + data = self.getHTMLData(url) + + if data: + html = BeautifulSoup(data) + + try: + result_table = html.find('table', attrs = {'class': 'download'}) + if not result_table: + return + + entries = result_table.find_all('tr') + for result in entries: + + if result.find('td', attrs = {'class': 'n'}): + link = result.find('td', attrs = {'class': 'n'}).find('a') + url = result.find('td', attrs = {'class': 'm'}).find('a') + tds = result.find_all('td') + size = tds[5].contents[0].strip('\n ') + age = tds[2].contents[0].strip('\n ') + + results.append({ + 'id': link['href'].split('/')[2], + 'name': link['title'], + 'url': url['href'], + 'detail_url': self.urls['detail'] % link['href'], + 'size': self.parseSize(size), + 'age' : self.ageToDays(age), + 'seeders': tryInt(tds[len(tds)-2].string), + 'leechers': tryInt(tds[len(tds)-1].string), + }) + elif result.find('td', attrs = {'id': 'pages'}): + page_td = result.find('td', attrs = {'id': 'pages'}) + next_title = 'Downloads | Page %s' % (current_page + 1) + if page_td.find('a', attrs = {'title': next_title}): + next_page = True + + except: + log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc())) + + current_page += 1 + + def ageToDays(self, age_str): + age = 0 + age_str = age_str.replace(' ', ' ') + + regex = '(\d*.?\d+).(sec|hour|day|week|month|year)+' + matches = re.findall(regex, age_str) + for match in matches: + nr, size = match + mult = 1 + if size == 'week': + mult = 7 + elif size == 'month': + mult = 30.5 + elif size == 'year': + mult = 365 + + age += tryInt(nr) * mult + + return tryInt(age) + +config = [{ + 'name': 'magnetdl', + 'groups': [ + { + 'tab': 'searcher', + 'list': 'torrent_providers', + 'name': 'MagnetDL', + 'description': 'MagnetDL', + 'wizard': True, + 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAhBJREFUOBGFU89rE0EYfbObpk2qbpqY0ItV1NarFy1CqIeeehF68p6bP1Dx4Mn/QEQ8aDWHnEW8CLZo0ZMopQQtHiyWoqAgCdGNaxP3x8zOOjPJDBUW+2D4vtnvfW/mfcwSDNFoNO6L9MJwu1Sr1S7qmo7/5dTr9aTX66klc920O6ZxMprAGEO73VZbmachjWMEKKXwPE/1yTwNaRwjkFt/i1dRpPqcjWZaP3LNtUhwsrLofHinyEagtLqChfy2alxf3UoVKL14hoXxL+AxR/P5pi9JRiAGAQsH3mWehjghWRaE4NyG5hgBJubOooGAzNOgOEEETkagOUZAKtK9bjDkcELMDSx9UgzE1KdgAQW3LDwGbF2TUeyziW2rOouoEBjACNAErcBnysZY5SB2SoVzQ44KXtFZzE1WVD3oi4MEXxaMAE+s5e6OmIOwcfzsLMQ0rj4oOucfTkxMyZjY1qNjc6dU3fViMQeyLAXMuO8VCidz+0ffz0wC+UNHYJ04ja2Xr9H/6WK8VMT0fBV8cw29b1/x6TsHjaPpS53f28bnShC05jMjB/6EOJMPu7B9D4fnqjhanUV5qgJ/4w36ovlzJ4Efxjcv//Ce/nMDuZG4WyzcHs1Y18v7Ejhj4qEIk4wDv8Sz6fQJQpbcuuZ2bwzYuyzoDzLeEXZAiPy1F8UqC58tofEkQ8jSFdf9KDkafwGzPw7miJh+wQAAAABJRU5ErkJggg==', + 'options': [ + { + 'name': 'enabled', + 'type': 'enabler', + 'default': False, + }, + { + 'name': 'seed_ratio', + 'label': 'Seed ratio', + 'type': 'float', + 'default': 1, + 'description': 'Will not be (re)moved until this seed ratio is met.', + }, + { + 'name': 'seed_time', + 'label': 'Seed time', + 'type': 'int', + 'default': 40, + 'description': 'Will not be (re)moved until this seed time (in hours) is met.', + }, + { + 'name': 'max_pages', + 'label': 'Max Pages', + 'type': 'int', + 'default': 3, + 'description': 'Maximum number of pages to scan.', + }, + { + 'name': 'extra_score', + 'advanced': True, + 'label': 'Extra Score', + 'type': 'int', + 'default': 0, + 'description': 'Starting score for each release found via this provider.', + } + ], + }, + ], +}] diff --git a/couchpotato/core/media/_base/providers/torrent/morethantv.py b/couchpotato/core/media/_base/providers/torrent/morethantv.py new file mode 100755 index 0000000000..2b8179c1e6 --- /dev/null +++ b/couchpotato/core/media/_base/providers/torrent/morethantv.py @@ -0,0 +1,135 @@ +import traceback + +from bs4 import BeautifulSoup +from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode +from couchpotato.core.helpers.variable import tryInt +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.base import TorrentProvider +import six + + +log = CPLog(__name__) + + +class Base(TorrentProvider): + + urls = { + 'test': 'https://www.morethan.tv/', + 'login': 'https://www.morethan.tv/login.php', + 'login_check': 'https://www.morethan.tv/inbox.php', + 'detail': 'https://www.morethan.tv/torrents.php?torrentid=%s', + 'search': 'https://www.morethan.tv/torrents.php?%s&filter_cat%%5B1%%5D=1&action=advanced&searchstr=%s', + 'download': 'https://www.morethan.tv/%s', + } + + http_time_between_calls = 1 # Seconds + login_fail_msg = 'You entered an invalid password.' + + def _searchOnTitle(self, title, movie, quality, results): + + movieTitle = tryUrlencode('%s %s' % (title.replace(':', ''), movie['info']['year'])) + url = self.urls['search'] % (self.getSceneOnly(), movieTitle) + data = self.getHTMLData(url) + + if data: + html = BeautifulSoup(data) + + try: + result_table = html.find('table', attrs = {'id': 'torrent_table'}) + if not result_table: + return + + entries = result_table.find_all('tr', attrs = {'class': 'torrent'}) + for result in entries: + + link = result.find('a', attrs = {'dir': 'ltr'}) + url = result.find('span', attrs = {'title': 'Download'}).parent + tds = result.find_all('td') + size = tds[5].contents[0].strip('\n ') + + results.append({ + 'id': link['href'].replace('torrents.php?id=', '').split('&')[0], + 'name': link.contents[0], + 'url': self.urls['download'] % url['href'], + 'detail_url': self.urls['download'] % link['href'], + 'size': self.parseSize(size), + 'seeders': tryInt(tds[len(tds)-2].string), + 'leechers': tryInt(tds[len(tds)-1].string), + }) + except: + log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc())) + + + def getLoginParams(self): + return { + 'username': self.conf('username'), + 'password': self.conf('password'), + 'login': 'Log in', + } + + def loginSuccess(self, output): + return 'logout.php' in output.lower() + + loginCheckSuccess = loginSuccess + + def getSceneOnly(self): + return 'releasetype=24' if self.conf('scene_only') else '' + + +config = [{ + 'name': 'morethantv', + 'groups': [ + { + 'tab': 'searcher', + 'list': 'torrent_providers', + 'name': 'MoreThanTV', + 'description': 'MoreThanTV', + 'wizard': True, + 'icon': 'AAABAAEAEBAAAAEAIABoBAAAFgAAACgAAAAQAAAAIAAAAAEAIAAAAAAAQAQAABMLAAATCwAAAAAAAAAAAAAiHaEEIh2hYCIdoaEiHaGaIh2hmCIdoZgiHaGYIh2hmCIdoZgiHaGYIh2hlyIdoZUiHaHAIh2htiIdoUEAAAAAIh2hJyIdoW0iHaFsIh2hbCIdoWsiHaFrIh2hayIdoWsiHaFrIh2hayIdoWoiHaFbIh2hsyIdof8iHaH7Ih2hQSIdoQciHaEDAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAiHaG8Ih2h/yIdoZgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIh2hoSIdof8iHaGeAAAAAAAAAAAAAAAAIh2hIiIdoZkiHaGZIh2hIiIdoSIiHaGZIh2hiAAAAAAAAAAAAAAAACIdoaEiHaH/Ih2hngAAAAAAAAAAAAAAACIdoaoiHaH/Ih2h/yIdoUQiHaF3Ih2h/yIdof8iHaFEAAAAAAAAAAAiHaGiIh2h/yIdoZ4AAAAAAAAAAAAAAAAiHaG7Ih2h/yIdoREAAAAAIh2h7iIdof8iHaH/Ih2hqgAAAAAAAAAAIh2hoiIdof8iHaGeAAAAAAAAAAAAAAAAIh2huyIdof8AAAAAIh2hVSIdof8iHaGZIh2hzCIdof8iHaERAAAAACIdoaEiHaH/Ih2hngAAAAAAAAAAIh2hZiIdod0iHaH/Ih2hmSIdobsiHaH/Ih2hVSIdoXciHaH/Ih2hdwAAAAAiHaGhIh2h/yIdoZ4AAAAAAAAAACIdoZkiHaH/Ih2h/yIdof8iHaH/Ih2h7gAAAAAiHaEzIh2h/yIdobsAAAAAIh2hoSIdof8iHaGeAAAAAAAAAAAAAAAAIh2huyIdof8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACIdoaEiHaH/Ih2hngAAAAAAAAAAAAAAACIdobsiHaH/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAiHaGhIh2h/yIdoZ4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIh2hoSIdof8iHaGeIh2hCyIdoQYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACIdocUiHaH/Ih2hlSIdoSMiHaFwIh2hfSIdoXEiHaF3Ih2heiIdoXkiHaF5Ih2heSIdoXoiHaFzIh2hYiIdocIiHaH/Ih2h5yIdoS4AAAAAIh2hLyIdoXoiHaGMIh2hcyIdoXMiHaFzIh2hcyIdoXMiHaFyIh2heSIdoY0iHaFsIh2hSSIdoQoAAAAAAAEgNgAAb2Q/+CA1//hTdOA4cGngGCA54hhHZeQIaW7ACG50wIgAUOf4Q0Xn+E9S//hFVj/4PTYAAFJPgAFTUw==', + 'options': [ + { + 'name': 'enabled', + 'type': 'enabler', + 'default': False, + }, + { + 'name': 'username', + 'default': '', + }, + { + 'name': 'password', + 'default': '', + 'type': 'password', + }, + { + 'name': 'seed_ratio', + 'label': 'Seed ratio', + 'type': 'float', + 'default': 1, + 'description': 'Will not be (re)moved until this seed ratio is met.', + }, + { + 'name': 'seed_time', + 'label': 'Seed time', + 'type': 'int', + 'default': 40, + 'description': 'Will not be (re)moved until this seed time (in hours) is met.', + }, + { + 'name': 'scene_only', + 'type': 'bool', + 'default': False, + 'description': 'Only allow scene releases.' + }, + { + 'name': 'extra_score', + 'advanced': True, + 'label': 'Extra Score', + 'type': 'int', + 'default': 0, + 'description': 'Starting score for each release found via this provider.', + } + ], + }, + ], +}] diff --git a/couchpotato/core/media/_base/providers/torrent/nextorrent.py b/couchpotato/core/media/_base/providers/torrent/nextorrent.py new file mode 100644 index 0000000000..99f22dd6d9 --- /dev/null +++ b/couchpotato/core/media/_base/providers/torrent/nextorrent.py @@ -0,0 +1,277 @@ +from bs4 import BeautifulSoup +from couchpotato.core.helpers.variable import getTitle, tryInt +from couchpotato.core.logger import CPLog +from couchpotato.core.helpers.encoding import simplifyString, tryUrlencode +from couchpotato.core.media._base.providers.torrent.base import TorrentProvider +import cookielib +import re +import traceback +import urllib +import urllib2 +import ssl +import unicodedata +from couchpotato.core.helpers import namer_check +from StringIO import StringIO +import gzip +log = CPLog(__name__) + + +class Base(TorrentProvider): + + urls = { + 'test': 'https://www.nextorrent.net', + 'search': 'https://www.nextorrent.net/torrents/recherche/', + } + + http_time_between_calls = 1 #seconds + cat_backup_id = None + cj = cookielib.CookieJar() + opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj)) + + class NotLoggedInHTTPError(urllib2.HTTPError): + def __init__(self, url, code, msg, headers, fp): + urllib2.HTTPError.__init__(self, url, code, msg, headers, fp) + + class PTPHTTPRedirectHandler(urllib2.HTTPRedirectHandler): + def http_error_302(self, req, fp, code, msg, headers): + log.debug("302 detected; redirected to %s" % headers['Location']) + if (headers['Location'] != 'login.php'): + return urllib2.HTTPRedirectHandler.http_error_302(self, req, fp, code, msg, headers) + else: + raise Base.NotLoggedInHTTPError(req.get_full_url(), code, msg, headers, fp) + + def _search(self, movie, quality, results): + + # Cookie login + if not self.last_login_check and not self.login(): + return + + + TitleStringReal = (getTitle(movie['info']) + ' ' + simplifyString(quality['identifier'] )).replace('-',' ').replace(' ',' ').replace(' ',' ').replace(' ',' ').encode("utf8") + + URL = (self.urls['search']).encode('UTF8') + URL=unicodedata.normalize('NFD',unicode(URL,"utf8","replace")) + URL=URL.encode('ascii','ignore') + + + URL = urllib2.quote(URL.encode('utf8'), ":/?=") + URL = URL + TitleStringReal + values = { } + URLTST = (self.urls['test']).encode('UTF8') + + data_tmp = urllib.urlencode(values) + + + req = urllib2.Request(URL, data_tmp, headers={'User-Agent' : "Mozilla/5.0"} ) + + data = urllib2.urlopen(req) + + id = 1000 + + if data: + + try: + html = BeautifulSoup(data) + erlin=0 + resultdiv=[] + while erlin==0: + try: + resultContent = html.findAll(attrs={'class': ["listing-torrent"]})[0] + if resultContent: + resultlin = resultContent.findAll(attrs={'class': ['table-hover']})[0].find('tbody') + if resultlin: + trList= resultlin.findAll("tr"); + for tr in trList: + resultdiv.append(tr) + erlin=1 + except: + erlin=1 + nbrResult = 0 + for result in resultdiv: + + try: + new = {} + firstTd = result.findAll("td")[0] + nothing = firstTd.findAll("center") + if nothing: + continue + name = firstTd.findAll("a")[1]['title']; + testname = namer_check.correctName(name,movie) + if testname == 0 and nbrResult < 5: + values_sec = {} + url_sec = result.findAll("a")[1]['href']; + req_sec = urllib2.Request(URLTST+url_sec, values_sec, headers={'User-Agent': "Mozilla/5.0"}) + data_sec = urllib2.urlopen(req_sec) + if data_sec: + html_sec = BeautifulSoup(data_sec) + classlin_sec = 'torrentsdesc' + resultlin_sec = html_sec.findAll(attrs={'id': [classlin_sec]})[0] + name = resultlin_sec.find("div").text + name = name.replace(".", " ") + testname = namer_check.correctName(name, movie) + if testname == 0: + continue + nbrResult += 1 + values_sec = {} + detail_url = result.findAll("a")[1]['href']; + req_sec = urllib2.Request(URLTST+detail_url, values_sec, headers={'User-Agent': "Mozilla/5.0"}) + data_sec = urllib2.urlopen(req_sec) + html_sec = BeautifulSoup(data_sec) + classlin_sec = 'download' + resultlin_sec = html_sec.findAll(attrs={'class': [classlin_sec]})[0] + url_download = resultlin_sec.findAll("a")[0]['href'] + size = result.findAll("td")[1].text + seeder = result.findAll("td")[2].text + leecher = result.findAll("td")[3].text + age = '1' + + verify = getTitle(movie['info']).split(' ') + + add = 1 + + for verify_unit in verify: + if (name.lower().find(verify_unit.lower()) == -1) : + add = 0 + + def extra_check(item): + return True + + if add == 1: + + new['id'] = id + new['name'] = name.strip() + ' french' + new['url'] = url_download + new['detail_url'] = detail_url + new['size'] = self.parseSize(size) + new['age'] = 10 + new['seeders'] = tryInt(seeder) + new['leechers'] = tryInt(leecher) + new['extra_check'] = extra_check + new['download'] = self.loginDownload + + #new['score'] = fireEvent('score.calculate', new, movie, single = True) + + #log.error('score') + #log.error(new['score']) + + results.append(new) + + id = id+1 + + + except: + log.error('Failed parsing zetorrents: %s', traceback.format_exc()) + + except AttributeError: + log.debug('No search results found.') + else: + log.debug('No search results found.') + + def ageToDays(self, age_str): + age = 0 + age_str = age_str.replace(' ', ' ') + + regex = '(\d*.?\d+).(sec|heure|jour|semaine|mois|ans)+' + matches = re.findall(regex, age_str) + for match in matches: + nr, size = match + mult = 1 + if size == 'semaine': + mult = 7 + elif size == 'mois': + mult = 30.5 + elif size == 'ans': + mult = 365 + + age += tryInt(nr) * mult + + return tryInt(age) + + def login(self): + return True + + + + def loginDownload(self, url = '', nzb_id = ''): + try: + URLTST = (self.urls['test']).encode('UTF8') + request_headers = { + 'User-Agent': 'Mozilla/5.0', + 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', + 'Accept-Language': 'fr,fr-FR;q=0.8,en-US;q=0.5,en;q=0.3', + 'Accept-Encoding': 'gzip, deflate, br', + 'Referer': 'https://www.nextorrent.net/torrent/3183/beaut-cache', + 'Connection': 'keep-alive', + 'Upgrade-Insecure-Requests': '1' + } + request = urllib2.Request(URLTST+url, headers=request_headers) + response = self.opener.open(request) + if response.info().get('Content-Encoding') == 'gzip': + buf = StringIO(response.read()) + f = gzip.GzipFile(fileobj=buf) + data = f.read() + f.close() + else: + data = response.read() + response.close() + return data + except: + log.error('Failed downloading from %s: %s', (self.getName(), traceback.format_exc())) + + def download(self, url = '', nzb_id = ''): + + if not self.last_login_check and not self.login(): + return + + values = { + 'url' : '/' + } + data_tmp = urllib.urlencode(values) + req = urllib2.Request(url, data_tmp, headers={'User-Agent' : "Mozilla/5.0"} ) + + try: + return urllib2.urlopen(req).read() + except: + log.error('Failed downloading from %s: %s', (self.getName(), traceback.format_exc())) +config = [{ + 'name': 'nextorrent', + 'groups': [ + { + 'tab': 'searcher', + 'list': 'torrent_providers', + 'name': 'nextorrent', + 'description': 'See nextorrent', + 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAACXBIWXMAAAsTAAALEwEAmpwYAAAAIGNIUk0AAHolAACAgwAA+f8AAIDpAAB1MAAA6mAAADqYAAAXb5JfxUYAAAI5SURBVHjabJM/T+NAEMV/u57YsQ05pBS00EQiJFKIoOGTUFFDQY0QfAFo4FNQI0FDg+iogPTuafJHCiaOUbzra7DPubuVRlqtZt68eW9W+b7/sbGxsaK1BsBaS5ZlKKXKyPO8vBd5P7lforX+1ev1gna7XQIMBgPe398REUQEpRRpmrK1tcXu7i6e55FlGa+vr444jmP29vY4ODjAGEOtViOKIm5ubnh5eSEIAkSE7+9vWq0Wh4eHrK6ukiQJs9nM6CrtxWLBfD6n1WpxcnJCv99nNpthjEEpVeYVYa3lz0A/J89zkiSh0+lwenpKv98njmOMMfzv6DzPl4q11ogIcRzT6XQ4Ozuj2+0ynU5LkGqNLlQuipMkIY5jgiBgMpnQ7XY5Pz+n3W7z+fmJMWbJCV21yPM8hsMht7e3RFFEs9lkNBrR6/W4uLhgZ2cHYwzW2hJAqpQcx8FxHJ6enhgMBlxdXbG+vs54PGZ/f5/t7W2UUkt6aAClVDmbiNBoNHh+fuby8pLhcMja2hrz+Rzf96nVav9q8LcLIkIYhjw+PnJ9fc1oNCIMQ7IsK/UqGkv1ocrG8zwcx+H+/p56vc7x8TGNRoM0TZcZK6UQETzPK0NrjbWWMAwBuLu7Q2vN0dERzWaTxWJR6iXWWt7e3siyDBFhMpkwHo9xXZc8z6nX66RpysPDQ7mlhRNRFKF8359tbm4Ghbd5ni8tTEG36Oq6bvU3Jsp13Q+l1EpVmOqiFCCFVksOaP31ewAjgDxHOfDVqAAAAABJRU5ErkJggg==', + 'wizard': True, + 'options': [ + { + 'name': 'enabled', + 'type': 'enabler', + 'default': False, + }, + { + 'name': 'seed_ratio', + 'label': 'Seed ratio', + 'type': 'float', + 'default': 1, + 'description': 'Will not be (re)moved until this seed ratio is met.', + }, + { + 'name': 'seed_time', + 'label': 'Seed time', + 'type': 'int', + 'default': 40, + 'description': 'Will not be (re)moved until this seed time (in hours) is met.', + }, + { + 'name': 'extra_score', + 'advanced': True, + 'label': 'Extra Score', + 'type': 'int', + 'default': 0, + 'description': 'Starting score for each release found via this provider.', + } + ], + }, + ], +}] diff --git a/couchpotato/core/media/_base/providers/torrent/passthepopcorn.py b/couchpotato/core/media/_base/providers/torrent/passthepopcorn.py new file mode 100644 index 0000000000..69aa07125a --- /dev/null +++ b/couchpotato/core/media/_base/providers/torrent/passthepopcorn.py @@ -0,0 +1,304 @@ +import htmlentitydefs +import json +import re +import time +import traceback + +from couchpotato.core.helpers.encoding import tryUrlencode +from couchpotato.core.helpers.variable import getTitle, tryInt, mergeDicts, getIdentifier +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.base import TorrentProvider +from dateutil.parser import parse +import six + + +log = CPLog(__name__) + + +class Base(TorrentProvider): + + urls = { + 'domain': 'https://passthepopcorn.me', + 'detail': 'https://passthepopcorn.me/torrents.php?torrentid=%s', + 'torrent': 'https://passthepopcorn.me/torrents.php', + 'login': 'https://passthepopcorn.me/ajax.php?action=login', + 'login_check': 'https://passthepopcorn.me/ajax.php?action=login', + 'search': 'https://passthepopcorn.me/search/%s/0/7/%d' + } + + login_errors = 0 + http_time_between_calls = 2 + + def _search(self, media, quality, results): + + movie_title = getTitle(media) + quality_id = quality['identifier'] + + params = mergeDicts(self.quality_search_params[quality_id].copy(), { + 'order_by': 'relevance', + 'order_way': 'descending', + 'searchstr': getIdentifier(media) + }) + + url = '%s?json=noredirect&%s' % (self.urls['torrent'], tryUrlencode(params)) + res = self.getJsonData(url) + + try: + if not 'Movies' in res: + return + + authkey = res['AuthKey'] + passkey = res['PassKey'] + + for ptpmovie in res['Movies']: + if not 'Torrents' in ptpmovie: + log.debug('Movie %s (%s) has NO torrents', (ptpmovie['Title'], ptpmovie['Year'])) + continue + + log.debug('Movie %s (%s) has %d torrents', (ptpmovie['Title'], ptpmovie['Year'], len(ptpmovie['Torrents']))) + for torrent in ptpmovie['Torrents']: + torrent_id = tryInt(torrent['Id']) + torrentdesc = '' + torrentscore = 0 + + if 'GoldenPopcorn' in torrent and torrent['GoldenPopcorn']: + torrentdesc += ' HQ' + if self.conf('prefer_golden'): + torrentscore += 5000 + if 'FreeleechType' in torrent: + torrentdesc += ' Freeleech' + if self.conf('prefer_freeleech'): + torrentscore += 7000 + if 'Scene' in torrent and torrent['Scene']: + torrentdesc += ' Scene' + if self.conf('prefer_scene'): + torrentscore += 2000 + if self.conf('no_scene'): + torrentscore -= 2000 + if 'RemasterTitle' in torrent and torrent['RemasterTitle']: + torrentdesc += self.htmlToASCII(' %s' % torrent['RemasterTitle']) + + torrent_name = torrent['ReleaseName'] + ' - %s' % torrentdesc + + def extra_check(item): + return self.torrentMeetsQualitySpec(item, quality_id) + + results.append({ + 'id': torrent_id, + 'name': torrent_name, + 'Source': torrent['Source'], + 'Checked': 'true' if torrent['Checked'] else 'false', + 'Resolution': torrent['Resolution'], + 'url': '%s?action=download&id=%d&authkey=%s&torrent_pass=%s' % (self.urls['torrent'], torrent_id, authkey, passkey), + 'detail_url': self.urls['detail'] % torrent_id, + 'date': tryInt(time.mktime(parse(torrent['UploadTime']).timetuple())), + 'size': tryInt(torrent['Size']) / (1024 * 1024), + 'seeders': tryInt(torrent['Seeders']), + 'leechers': tryInt(torrent['Leechers']), + 'score': torrentscore, + 'extra_check': extra_check, + }) + + except: + log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc())) + + def torrentMeetsQualitySpec(self, torrent, quality): + + if not quality in self.post_search_filters: + return True + + reqs = self.post_search_filters[quality].copy() + + if self.conf('require_approval'): + log.debug('Config: Require staff-approval activated') + reqs['Checked'] = ['true'] + + for field, specs in reqs.items(): + matches_one = False + seen_one = False + + if not field in torrent: + log.debug('Torrent with ID %s has no field "%s"; cannot apply post-search-filter for quality "%s"', (torrent['id'], field, quality)) + continue + + for spec in specs: + if len(spec) > 0 and spec[0] == '!': + # a negative rule; if the field matches, return False + if torrent[field] == spec[1:]: + return False + else: + # a positive rule; if any of the possible positive values match the field, return True + log.debug('Checking if torrents field %s equals %s' % (field, spec)) + seen_one = True + if torrent[field] == spec: + log.debug('Torrent satisfied %s == %s' % (field, spec)) + matches_one = True + + if seen_one and not matches_one: + log.debug('Torrent did not satisfy requirements, ignoring') + return False + + return True + + def htmlToUnicode(self, text): + def fixup(m): + txt = m.group(0) + if txt[:2] == "&#": + # character reference + try: + if txt[:3] == "&#x": + return unichr(int(txt[3:-1], 16)) + else: + return unichr(int(txt[2:-1])) + except ValueError: + pass + else: + # named entity + try: + txt = unichr(htmlentitydefs.name2codepoint[txt[1:-1]]) + except KeyError: + pass + return txt # leave as is + return re.sub("&#?\w+;", fixup, six.u('%s') % text) + + def unicodeToASCII(self, text): + import unicodedata + return ''.join(c for c in unicodedata.normalize('NFKD', text) if unicodedata.category(c) != 'Mn') + + def htmlToASCII(self, text): + return self.unicodeToASCII(self.htmlToUnicode(text)) + + def getLoginParams(self): + return { + 'username': self.conf('username'), + 'password': self.conf('password'), + 'passkey': self.conf('passkey'), + 'keeplogged': '1', + 'login': 'Login' + } + + def loginSuccess(self, output): + log.info('PTP Login response : %s', output) + try: + if json.loads(output).get('Result', '').lower() == 'ok': + self.login_errors = 0 + return True + except: + pass + + self.login_errors += 1 + if self.login_errors >= 3: + log.error('Disabling PTP provider after repeated failed logins. ' + 'Please check your configuration. Re-enabling without ' + 'solving the problem may cause an IP ban. response=%s', + output) + self.conf('enabled', value=False) + self.login_errors = 0 + + return False + + loginCheckSuccess = loginSuccess + + +config = [{ + 'name': 'passthepopcorn', + 'groups': [ + { + 'tab': 'searcher', + 'list': 'torrent_providers', + 'name': 'PassThePopcorn', + 'description': 'PassThePopcorn.me', + 'wizard': True, + 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAAARklEQVQoz2NgIAP8BwMiGWRpIN1JNWn/t6T9f5' + '32+W8GkNt7vzz9UkfarZVpb68BuWlbnqW1nU7L2DMx7eCoBlpqGOppCQB83zIgIg+wWQAAAABJRU5ErkJggg==', + 'options': [ + { + 'name': 'enabled', + 'type': 'enabler', + 'default': False + }, + { + 'name': 'domain', + 'advanced': True, + 'label': 'Proxy server', + 'description': 'Domain for requests (HTTPS only!), keep empty to use default (passthepopcorn.me).', + }, + { + 'name': 'username', + 'default': '', + }, + { + 'name': 'password', + 'default': '', + 'type': 'password', + }, + { + 'name': 'passkey', + 'default': '', + }, + { + 'name': 'prefer_golden', + 'advanced': True, + 'type': 'bool', + 'label': 'Prefer golden', + 'default': 1, + 'description': 'Favors Golden Popcorn-releases over all other releases.' + }, + { + 'name': 'prefer_freeleech', + 'advanced': True, + 'type': 'bool', + 'label': 'Prefer Freeleech', + 'default': 1, + 'description': 'Favors torrents marked as freeleech over all other releases.' + }, + { + 'name': 'prefer_scene', + 'advanced': True, + 'type': 'bool', + 'label': 'Prefer scene', + 'default': 0, + 'description': 'Favors scene-releases over non-scene releases.' + }, + { + 'name': 'no_scene', + 'advanced': True, + 'type': 'bool', + 'label': 'Reject scene', + 'default': 0, + 'description': 'Reject scene-releases over non-scene releases.' + }, + { + 'name': 'require_approval', + 'advanced': True, + 'type': 'bool', + 'label': 'Require approval', + 'default': 0, + 'description': 'Require staff-approval for releases to be accepted.' + }, + { + 'name': 'seed_ratio', + 'label': 'Seed ratio', + 'type': 'float', + 'default': 2, + 'description': 'Will not be (re)moved until this seed ratio is met.', + }, + { + 'name': 'seed_time', + 'label': 'Seed time', + 'type': 'int', + 'default': 96, + 'description': 'Will not be (re)moved until this seed time (in hours) is met.', + }, + { + 'name': 'extra_score', + 'advanced': True, + 'label': 'Extra Score', + 'type': 'int', + 'default': 20, + 'description': 'Starting score for each release found via this provider.', + } + ], + } + ] +}] \ No newline at end of file diff --git a/couchpotato/core/media/_base/providers/torrent/rarbg.py b/couchpotato/core/media/_base/providers/torrent/rarbg.py new file mode 100644 index 0000000000..ace33dec3e --- /dev/null +++ b/couchpotato/core/media/_base/providers/torrent/rarbg.py @@ -0,0 +1,230 @@ +import re +import traceback +import random +from datetime import datetime + +from couchpotato import fireEvent +from couchpotato.core.helpers.variable import tryInt, getIdentifier +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.base import TorrentMagnetProvider + +log = CPLog(__name__) + +class Base(TorrentMagnetProvider): + + urls = { + 'test': 'https://torrentapi.org/pubapi_v2.php?app_id=couchpotato', + 'token': 'https://torrentapi.org/pubapi_v2.php?get_token=get_token&app_id=couchpotato', + 'search': 'https://torrentapi.org/pubapi_v2.php?token=%s&mode=search&search_imdb=%s&min_seeders=%s&min_leechers' + '=%s&ranked=%s&category=movies&format=json_extended&app_id=couchpotato', + } + + http_time_between_calls = 2 # Seconds + _token = 0 + + def _search(self, movie, quality, results): + hasresults = 0 + curryear = datetime.now().year + movieid = getIdentifier(movie) + + try: + movieyear = movie['info']['year'] + except: + log.error('RARBG: Couldn\'t get movie year') + movieyear = 0 + + self.getToken() + + if (self._token != 0) and (movieyear == 0 or movieyear <= curryear): + data = self.getJsonData(self.urls['search'] % (self._token, movieid, self.conf('min_seeders'), + self.conf('min_leechers'), self.conf('ranked_only')), headers = self.getRequestHeaders()) + + if data: + if 'error_code' in data: + if data['error'] == 'No results found': + log.debug('RARBG: No results returned from Rarbg') + else: + if data['error_code'] == 10: + log.error(data['error'], movieid) + else: + log.error('RARBG: There is an error in the returned JSON: %s', data['error']) + else: + hasresults = 1 + + try: + if hasresults: + for result in data['torrent_results']: + name = result['title'] + titlesplit = re.split('-', name) + releasegroup = titlesplit[len(titlesplit)-1] + + xtrainfo = self.find_info(name) + encoding = xtrainfo[0] + resolution = xtrainfo[1] + # source = xtrainfo[2] + pubdate = result['pubdate'] # .strip(' +0000') + try: + pubdate = datetime.strptime(pubdate, '%Y-%m-%d %H:%M:%S +0000') + now = datetime.utcnow() + age = (now - pubdate).days + except ValueError: + log.debug('RARBG: Bad pubdate') + age = 0 + + torrentscore = self.conf('extra_score') + seeders = tryInt(result['seeders']) + torrent_desc = '/ %s / %s / %s / %s seeders' % (releasegroup, resolution, encoding, seeders) + + if seeders == 0: + torrentscore = 0 + + sliceyear = result['pubdate'][0:4] + year = tryInt(sliceyear) + + results.append({ + 'id': random.randint(100, 9999), + 'name': re.sub('[^A-Za-z0-9\-_ \(\).]+', '', '%s (%s) %s' % (name, year, torrent_desc)), + 'url': result['download'], + 'detail_url': result['info_page'], + 'size': tryInt(result['size']/1048576), # rarbg sends in bytes + 'seeders': tryInt(result['seeders']), + 'leechers': tryInt(result['leechers']), + 'age': tryInt(age), + 'score': torrentscore + }) + + except RuntimeError: + log.error('RARBG: Failed getting results from %s: %s', (self.getName(), traceback.format_exc())) + + def getToken(self): + tokendata = self.getJsonData(self.urls['token'], cache_timeout = 900, headers = self.getRequestHeaders()) + if tokendata: + try: + token = tokendata['token'] + if self._token != token: + log.debug('RARBG: GOT TOKEN: %s', token) + self._token = token + except: + log.error('RARBG: Failed getting token from Rarbg: %s', traceback.format_exc()) + self._token = 0 + + def getRequestHeaders(self): + return { + 'User-Agent': fireEvent('app.version', single = True) + } + + @staticmethod + def find_info(filename): + # CODEC # + codec = 'x264' + v = re.search('(?i)(x265|h265|h\.265)', filename) + if v: + codec = 'x265' + + v = re.search('(?i)(xvid)', filename) + if v: + codec = 'xvid' + + # RESOLUTION # + resolution = 'SD' + a = re.search('(?i)(720p)', filename) + if a: + resolution = '720p' + + a = re.search('(?i)(1080p)', filename) + if a: + resolution = '1080p' + + a = re.search('(?i)(2160p)', filename) + if a: + resolution = '2160p' + + # SOURCE # + source = 'HD-Rip' + s = re.search('(?i)(WEB-DL|WEB_DL|WEB\.DL)', filename) + if s: + source = 'WEB-DL' + + s = re.search('(?i)(WEBRIP)', filename) + if s: + source = 'WEBRIP' + + s = re.search('(?i)(DVDR|DVDRip|DVD-Rip)', filename) + if s: + source = 'DVD-R' + + s = re.search('(?i)(BRRIP|BDRIP|BluRay)', filename) + if s: + source = 'BR-Rip' + + s = re.search('(?i)BluRay(.*)REMUX', filename) + if s: + source = 'BluRay-Remux' + + s = re.search('(?i)BluRay(.*)\.(AVC|VC-1)\.', filename) + if s: + source = 'BluRay-Full' + + return_info = [codec, resolution, source] + return return_info + +config = [{ + 'name': 'rarbg', + 'groups': [ + { + 'tab': 'searcher', + 'list': 'torrent_providers', + 'name': 'RARBG', + 'wizard': True, + 'description': 'RARBG', + 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAB+UlEQVQ4jYXTP2hcRxDH8c8JJZjbYNy8V7gIr0qhg5AiFnETX' + '+PmVAtSmKDaUhUiFyGxjXFlp0hhHy5cqFd9lSGcU55cBU6EEMIj5dsmMewSjNGmOJ3852wysMyww37n94OdXimlh49xDR/hxGr' + '8hZ/xx0qnlHK5lPKk/H/8U0r5oZTyQSmltzzr+AKfT+ed8UFLeHNAH1UVbA2r88NBfQcX8O2yv74sUqKNWT+T01sy2+zpUbS/w' + '/awvo7H+O0NQEA/LPKlQWXrSgUmR9HxcZQwmbZGw/pc4MsVAIT+IjcNw80aTjaaem1vPCNlGakj1C6uWFiqeDtyTvoyqAKhBn+' + '+E7CkxC6Zzjop57XpUSenpIuMhpXAc/zyHkAicRSjw6fHZ1ewPdqwszWAB2hXACln8+NWSlld9zX9YN7GhajQXz5+joPXR66de' + 'U1J27Zi7FzaqE0OdmwNGzF2Ymzt3j+E8/gJH64AFlozKS4+Be7tjwyaIKVsOpnavX0II9x8ByDLKco5SwvjL0MI/z64tyOcwsf' + 'jQw8PJvAdvsb6GSBlxI7UyTnD37i7OWhe3NrflvOit3djbDKdwR181SulXMXdrkubbdvKaOpK09S/4jP8iG9m8zmJjCoEg0HzO' + '77vna7zp7ju1TqfYIyZxT7dwCd4eWr7BR7h2X8S6gShJlbKYQAAAABJRU5ErkJggg==', + 'options': [ + { + 'name': 'enabled', + 'type': 'enabler', + 'default': False, + }, + { + 'name': 'ranked_only', + 'advanced': True, + 'label': 'Ranked Only', + 'type': 'int', + 'default': 1, + 'description': 'Only ranked torrents (internal), scene releases, rarbg releases. ' + 'Enter 1 (true) or 0 (false)', + }, + { + 'name': 'min_seeders', + 'advanced': True, + 'label': 'Minimum Seeders', + 'type': 'int', + 'default': 10, + 'description': 'Minium amount of seeders the release must have.', + }, + { + 'name': 'min_leechers', + 'advanced': True, + 'label': 'Minimum leechers', + 'type': 'int', + 'default': 0, + 'description': 'Minium amount of leechers the release must have.', + }, + { + 'name': 'extra_score', + 'advanced': True, + 'label': 'Extra Score', + 'type': 'int', + 'default': 0, + 'description': 'Starting score for each release found via this provider.', + } + ], + }, + ], +}] diff --git a/couchpotato/core/media/_base/providers/torrent/sceneaccess.py b/couchpotato/core/media/_base/providers/torrent/sceneaccess.py new file mode 100644 index 0000000000..9db63f7c02 --- /dev/null +++ b/couchpotato/core/media/_base/providers/torrent/sceneaccess.py @@ -0,0 +1,137 @@ +import traceback + +from bs4 import BeautifulSoup +from couchpotato.core.helpers.encoding import toUnicode +from couchpotato.core.helpers.variable import tryInt +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.base import TorrentProvider + + +log = CPLog(__name__) + + +class Base(TorrentProvider): + + urls = { + 'test': 'https://www.sceneaccess.eu/', + 'login': 'https://www.sceneaccess.eu/login', + 'login_check': 'https://www.sceneaccess.eu/inbox', + 'detail': 'https://www.sceneaccess.eu/details?id=%s', + 'search': 'https://www.sceneaccess.eu/browse?c%d=%d', + 'archive': 'https://www.sceneaccess.eu/archive?&c%d=%d', + 'download': 'https://www.sceneaccess.eu/%s', + } + + http_time_between_calls = 1 # Seconds + login_fail_msg = 'Username or password incorrect' + + def _searchOnTitle(self, title, media, quality, results): + + url = self.buildUrl(title, media, quality) + data = self.getHTMLData(url) + + if data: + html = BeautifulSoup(data) + + try: + resultsTable = html.find('table', attrs = {'id': 'torrents-table'}) + if resultsTable is None: + return + + entries = resultsTable.find_all('tr', attrs = {'class': 'tt_row'}) + for result in entries: + + link = result.find('td', attrs = {'class': 'ttr_name'}).find('a') + url = result.find('td', attrs = {'class': 'td_dl'}).find('a') + seeders = result.find('td', attrs = {'class': 'ttr_seeders'}).find('a') + leechers = result.find('td', attrs = {'class': 'ttr_leechers'}).find('a') + torrent_id = link['href'].replace('details?id=', '') + + results.append({ + 'id': torrent_id, + 'name': link['title'], + 'url': self.urls['download'] % url['href'], + 'detail_url': self.urls['detail'] % torrent_id, + 'size': self.parseSize(result.find('td', attrs = {'class': 'ttr_size'}).contents[0]), + 'seeders': tryInt(seeders.string) if seeders else 0, + 'leechers': tryInt(leechers.string) if leechers else 0, + 'get_more_info': self.getMoreInfo, + }) + + except: + log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc())) + + def getMoreInfo(self, item): + full_description = self.getCache('sceneaccess.%s' % item['id'], item['detail_url'], cache_timeout = 25920000) + html = BeautifulSoup(full_description) + nfo_pre = html.find('div', attrs = {'id': 'details_table'}) + description = toUnicode(nfo_pre.text) if nfo_pre else '' + + item['description'] = description + return item + + # Login + def getLoginParams(self): + return { + 'username': self.conf('username'), + 'password': self.conf('password'), + 'submit': 'come on in', + } + + def loginSuccess(self, output): + return '/inbox' in output.lower() + + loginCheckSuccess = loginSuccess + + +config = [{ + 'name': 'sceneaccess', + 'groups': [ + { + 'tab': 'searcher', + 'list': 'torrent_providers', + 'name': 'SceneAccess', + 'description': 'SceneAccess', + 'wizard': True, + 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAAABnRSTlMAAAAAAABupgeRAAACT0lEQVR4AYVQS0sbURidO3OTmajJ5FElTTOkPmZ01GhHrIq0aoWAj1Vc+A/cuRMXbl24V9SlCGqrLhVFCrooEhCp2BAx0mobTY2kaR7qmOm87EXL1EWxh29xL+c7nPMdgGHYO5bF/gdbefnr6WlbWRnxluMwAB4Z0uEgXa7nwaDL7+/RNPzxbYvb/XJ0FBYVfd/ayh0fQ4qCGEHcm0KLRZUk7Pb2YRJPRwcsKMidnKD3t9VVT3s7BDh+z5FOZ3Vfn3h+Hltfx00mRRSRWFcUmmVNhYVqPn8dj3va2oh+txvcQRVF9ebm1fi4k+dRFbosY5rm4Hk7xxULQnJnx93S4g0EIEEQRoDLo6PrWEw8Pc0eHLwYGopMTDirqlJ7eyhYYGHhfgfHCcKYksZGVB/NcXI2mw6HhZERqrjYTNPHi4tFPh8aJIYIhgPlcCRDoZLW1s75+Z/7+59nZ/OJhLWigqAoKZX6Mjf3dXkZ3pydGYLc4aEoCCkInzQ1fRobS2xuvllaonkedfArnY5OTdGVldBkOADgqq2Nr6z8CIWaJietDHOhKB+HhwFKC6Gnq4ukKJvP9zcSbjYDXbeVlkKzuZBhnnV3e3t6UOmaJO0ODibW1hB1GYkg8R/gup7Z3TVZLJ5AILW9LcZiVpYtYBhw16O3t7cauckyeF9Tgz0ATpL2+nopmWycmbnY2LiKRjFk6/d7+/vRJfl4HGzV1T0UIM43MGBvaIBWK/YvwM5w+IMgGH8tkyEgvIpE7M3Nt6qqZrNyOq1kMmouh455Ggz+BhKY4GEc2CfwAAAAAElFTkSuQmCC', + 'options': [ + { + 'name': 'enabled', + 'type': 'enabler', + 'default': False, + }, + { + 'name': 'username', + 'default': '', + }, + { + 'name': 'password', + 'default': '', + 'type': 'password', + }, + { + 'name': 'seed_ratio', + 'label': 'Seed ratio', + 'type': 'float', + 'default': 1, + 'description': 'Will not be (re)moved until this seed ratio is met.', + }, + { + 'name': 'seed_time', + 'label': 'Seed time', + 'type': 'int', + 'default': 40, + 'description': 'Will not be (re)moved until this seed time (in hours) is met.', + }, + { + 'name': 'extra_score', + 'advanced': True, + 'label': 'Extra Score', + 'type': 'int', + 'default': 20, + 'description': 'Starting score for each release found via this provider.', + } + ], + }, + ], +}] diff --git a/couchpotato/core/media/_base/providers/torrent/scenetime.py b/couchpotato/core/media/_base/providers/torrent/scenetime.py new file mode 100644 index 0000000000..6c10cc27a7 --- /dev/null +++ b/couchpotato/core/media/_base/providers/torrent/scenetime.py @@ -0,0 +1,139 @@ +import traceback + +from bs4 import BeautifulSoup +from couchpotato.core.helpers.encoding import tryUrlencode, toUnicode +from couchpotato.core.helpers.variable import tryInt +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.base import TorrentProvider + + +log = CPLog(__name__) + + +class Base(TorrentProvider): + + urls = { + 'test': 'https://www.scenetime.com/', + 'login': 'https://www.scenetime.com/takelogin.php', + 'login_check': 'https://www.scenetime.com/inbox.php', + 'detail': 'https://www.scenetime.com/details.php?id=%s', + 'search': 'https://www.scenetime.com/browse.php?search=%s&cat=%d', + 'download': 'https://www.scenetime.com/download.php/%s/%s', + } + + cat_ids = [ + ([59], ['720p', '1080p']), + ([81], ['brrip']), + ([102], ['bd50']), + ([3], ['dvdrip']), + ] + + http_time_between_calls = 1 # Seconds + login_fail_msg = 'Username or password incorrect' + cat_backup_id = None + + def _searchOnTitle(self, title, movie, quality, results): + + url = self.urls['search'] % (tryUrlencode('%s %s' % (title.replace(':', ''), movie['info']['year'])), self.getCatId(quality)[0]) + data = self.getHTMLData(url) + + if data: + html = BeautifulSoup(data) + + try: + result_table = html.find(attrs = {'id': 'torrenttable'}) + + if not result_table: + log.error('failed to generate result_table') + return + + entries = result_table.find_all('tr') + + for result in entries[1:]: + cells = result.find_all('td') + link = result.find('a', attrs = {'class': 'index'}) + torrent_id = link['href'].replace('download.php/','').split('/')[0] + torrent_file = link['href'].replace('download.php/','').split('/')[1] + size = self.parseSize(cells[5].contents[0] + cells[5].contents[2]) + name_row = cells[1].contents[0] + name = name_row.getText() + seeders_row = cells[6].contents[0] + seeders = seeders_row.getText() + + + results.append({ + 'id': torrent_id, + 'name': name, + 'url': self.urls['download'] % (torrent_id,torrent_file), + 'detail_url': self.urls['detail'] % torrent_id, + 'size': size, + 'seeders': seeders, + }) + + except: + log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc())) + + def getLoginParams(self): + return { + 'login': 'submit', + 'username': self.conf('username'), + 'password': self.conf('password'), + } + + def loginSuccess(self, output): + return 'logout.php' in output.lower() or 'Welcome' in output.lower() + + loginCheckSuccess = loginSuccess + + +config = [{ + 'name': 'scenetime', + 'groups': [ + { + 'tab': 'searcher', + 'list': 'torrent_providers', + 'name': 'SceneTime', + 'description': 'SceneTime', + 'wizard': True, + 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAADsMAAA7DAcdvqGQAAAAYdEVYdFNvZnR3YXJlAHBhaW50Lm5ldCA0LjAuNWWFMmUAAAIwSURBVDhPZZFbSBRRGMePs7Mzjma7+9AWWxpeYrXLkrcIfUwIpIeK3tO1hWhfltKwhyJMFIqgCz2EpdHWRun2oGG02O2hlYyypY21CygrlbhRIYHizO6/mdk5szPtB785hzm//zeXj7Q89q4I4QaQBx6ZHQY84Efq4Rrbg4rxVmx61AJ2pFY/twzvhP1hU4ZwIQ8K7mw1wdzdhrrxQ7g8E0Q09R6flubw+mcM7tHWPJcwt91ghuTQUDWYW8rejbrRA3i1OA0xLYGWJO8bxw6q50YIc70CRoQbNbj2MQgpkwsrpTYI7ze5CoS5UgYjpTd3YWphWg1l1CuwLC4jufQNtaG9JleBWM67YKR6oBlzf+bVoPIOUiaNwVgIzcF9sF3aknMvZFfCnnNCp9eJqqsNSKQ+qw2USssNzrzoh9Dnynmaq6yEPe2AkfX9lXjy5akWz9ZkcgqVFz0mj0KsJ0tgROh2oCfSJ3/3ihaHPA0Rh+/7UNhtN7kKhAsI+J+a3u2If49r8WxFZiawtsuR5xLumBUU3s/B2bkOm0+V4V3yrTwFOgcg8SMBe8CmuxTC+SygFB3l8TzxDLOpWYiSqEWzFf0ahc2/RncphPcSUIqPWPFhPqZFcrUqraLzXkA+Z3WXQvh2eaNR3MHmNVB+YPjNMMqPb9Q9I6YGRR0WTMQj6hOV+f/++wuDLwfg7iqH4GVMQQrh28w3Nvgd2H22Hk09jag6UYoSH4/C9gKTo9NG8A8MPUM4DJp74gAAAABJRU5ErkJggg==', + 'options': [ + { + 'name': 'enabled', + 'type': 'enabler', + 'default': False, + }, + { + 'name': 'username', + 'default': '', + }, + { + 'name': 'password', + 'default': '', + 'type': 'password', + }, + { + 'name': 'seed_ratio', + 'label': 'Seed ratio', + 'type': 'float', + 'default': 1, + 'description': 'Will not be (re)moved until this seed ratio is met.', + }, + { + 'name': 'seed_time', + 'label': 'Seed time', + 'type': 'int', + 'default': 40, + 'description': 'Will not be (re)moved until this seed time (in hours) is met.', + }, + { + 'name': 'extra_score', + 'advanced': True, + 'label': 'Extra Score', + 'type': 'int', + 'default': 20, + 'description': 'Starting score for each release found via this provider.', + } + ], + }, + ], +}] diff --git a/couchpotato/core/media/_base/providers/torrent/t411.py b/couchpotato/core/media/_base/providers/torrent/t411.py new file mode 100644 index 0000000000..c6531bb9ca --- /dev/null +++ b/couchpotato/core/media/_base/providers/torrent/t411.py @@ -0,0 +1,316 @@ +from bs4 import BeautifulSoup +from couchpotato.core.helpers.variable import tryInt +from couchpotato.core.logger import CPLog +from couchpotato.core.helpers.encoding import simplifyString, tryUrlencode +from couchpotato.core.media._base.providers.torrent.base import TorrentProvider +from couchpotato.core.helpers import namer_check +import json +import re +import unicodedata +import traceback +import urllib2 +import sys +import urllib + +log = CPLog(__name__) + +import ast +import operator + +_binOps = { + ast.Add: operator.add, + ast.Sub: operator.sub, + ast.Mult: operator.mul, + ast.Div: operator.div, + ast.Mod: operator.mod +} + +def _arithmeticEval(s): + """ + A safe eval supporting basic arithmetic operations. + + :param s: expression to evaluate + :return: value + """ + node = ast.parse(s, mode='eval') + + def _eval(node): + if isinstance(node, ast.Expression): + return _eval(node.body) + elif isinstance(node, ast.Str): + return node.s + elif isinstance(node, ast.Num): + return node.n + elif isinstance(node, ast.BinOp): + return _binOps[type(node.op)](_eval(node.left), _eval(node.right)) + else: + raise Exception('Unsupported type {}'.format(node)) + + return _eval(node.body) + +class Base(TorrentProvider): + + + urls = { + 'test' : 'https://www.t411.al', + 'login' : 'https://www.t411.al/users/login/', + 'login_check': 'https://www.t411.al', + 'detail': 'https://www.t411.al/torrents/?id=%s', + 'search': 'https://www.t411.al/torrents/search/?search=%s %s', + 'download' : 'http://www.t411.al/torrents/download/?id=%s', + } + + http_time_between_calls = 1 #seconds + cat_backup_id = None + + def _searchOnTitle(self, title, movie, quality, results): + + # test the new title and search for it if valid + newTitle = self.getFrenchTitle(title, str(movie['info']['year'])) + request = '' + if isinstance(title, str): + title = title.decode('utf8') + if newTitle is not None: + request = (u'(' + title + u')|(' + newTitle + u')').replace(':', '') + else: + request = title.replace(':', '') + request = urllib2.quote(request.encode('iso-8859-1')) + + log.debug('Looking on T411 for movie named %s or %s' % (title, newTitle)) + url = self.urls['search'] % (request, acceptableQualityTerms(quality)) + data = self.getHTMLData(url) + + log.debug('Received data from T411') + if data: + log.debug('Data is valid from T411') + html = BeautifulSoup(data) + + try: + result_table = html.find('table', attrs = {'class':'results'}) + if not result_table: + log.debug('No table results from T411') + return + + torrents = result_table.find('tbody').findAll('tr') + for result in torrents: + idt = result.findAll('td')[2].findAll('a')[0]['href'][1:].replace('torrents/nfo/?id=','') + release_name = result.findAll('td')[1].findAll('a')[0]['title'] + words = title.lower().replace(':',' ').split() + if self.conf('ignore_year'): + index = release_name.lower().find(words[-1] if words[-1] != 'the' else words[-2]) + len(words[-1] if words[-1] != 'the' else words[-2]) +1 + index2 = index + 7 + if not str(movie['info']['year']) in release_name[index:index2]: + release_name = release_name[0:index] + '(' + str(movie['info']['year']) + ').' + release_name[index:] + #if 'the' not in release_name.lower() and (words[-1] == 'the' or words[0] == 'the'): + # release_name = 'the.' + release_name + if 'multi' in release_name.lower(): + release_name = release_name.lower().replace('truefrench','').replace('french','') + age = result.findAll('td')[4].text + log.debug('result : name=%s, detail_url=%s' % (replaceTitle(release_name, title, newTitle), (self.urls['detail'] % idt))) + results.append({ + 'id': idt, + 'name': replaceTitle(release_name, title, newTitle), + 'url': self.urls['download'] % idt, + 'detail_url': self.urls['detail'] % idt, + 'age' : age, + 'size': self.parseSize(str(result.findAll('td')[5].text)), + 'seeders': result.findAll('td')[7].text, + 'leechers': result.findAll('td')[8].text, + }) + + except: + log.error('Failed to parse T411: %s' % (traceback.format_exc())) + + def getLoginParams(self): + log.debug('Getting login params for T411') + return { + 'login': self.conf('username'), + 'password': self.conf('password'), + 'remember': '1', + 'url': '/' + } + + def loginSuccess(self, output): + log.debug('Checking login success for T411: %s' % ('True' if ('logout' in output.lower()) else 'False')) + + if 'confirmer le captcha' in output.lower(): + log.debug('Too many login attempts. A captcha is displayed.') + output = self._solveCaptcha(output) + + return 'logout' in output.lower() + + def _solveCaptcha(self, output): + """ + When trying to connect too many times with wrong password, a captcha can be requested. + This captcha is really simple and can be solved by the provider. + + + + +
+ :param output: initial login output + :return: output after captcha resolution + """ + html = BeautifulSoup(output) + + query = html.find('input', {'name': 'captchaQuery'}) + token = html.find('input', {'name': 'captchaToken'}) + if not query or not token: + log.error('Unable to solve login captcha.') + return output + + query_expr = query.attrs['value'].strip('= ') + log.debug(u'Captcha query: ' + query_expr) + answer = _arithmeticEval(query_expr) + + log.debug(u'Captcha answer: %s' % answer) + + login_params = self.getLoginParams() + + login_params['captchaAnswer'] = answer + login_params['captchaQuery'] = query.attrs['value'] + login_params['captchaToken'] = token.attrs['value'] + + return self.urlopen(self.urls['login'], data = login_params) + + loginCheckSuccess = loginSuccess + + def getFrenchTitle(self, title, year): + """ + This function uses TMDB API to get the French movie title of the given title. + """ + + url = "https://api.themoviedb.org/3/search/movie?api_key=0f3094295d96461eb7a672626c54574d&language=fr&query=%s" % title + log.debug('Looking on TMDB for French title of : ' + title) + #data = self.getJsonData(url, decode_from = 'utf8') + data = self.getJsonData(url) + try: + if data['results'] != None: + for res in data['results']: + yearI = res['release_date'] + if year in yearI: + break + frTitle = res['title'].lower() + if frTitle == title: + log.debug('TMDB report identical FR and original title') + return None + else: + log.debug(u'L\'API TMDB a trouve un titre francais => ' + frTitle) + return frTitle + else: + log.debug('TMDB could not find a movie corresponding to : ' + title) + return None + except: + log.error('Failed to parse TMDB API: %s' % (traceback.format_exc())) + +def acceptableQualityTerms(quality): + """ + This function retrieve all the acceptable terms for a quality (eg hdrip and bdrip for brrip) + Then it creates regex accepted by t411 to search for one of this term + t411 have to handle alternatives as OR and then the regex is firstAlternative|secondAlternative + + In alternatives, there can be "doubled terms" as "br rip" or "bd rip" for brrip + These doubled terms have to be handled as AND and are then (firstBit&secondBit) + """ + alternatives = quality.get('alternative', []) + # first acceptable term is the identifier itself + acceptableTerms = [quality['identifier']] + log.debug('Requesting alternative quality terms for : ' + str(acceptableTerms) ) + # handle single terms + acceptableTerms.extend([ term for term in alternatives if type(term) == type('') ]) + # handle doubled terms (such as 'dvd rip') + doubledTerms = [ term for term in alternatives if type(term) == type(('', '')) ] + acceptableTerms.extend([ '('+first+'%26'+second+')' for (first,second) in doubledTerms ]) + # join everything and return + log.debug('Found alternative quality terms : ' + str(acceptableTerms).replace('%26', ' ')) + return '|'.join(acceptableTerms) + +def replaceTitle(releaseNameI, titleI, newTitleI): + """ + This function is replacing the title in the release name by the old one, + so that couchpotato recognise it as a valid release. + """ + + if newTitleI is None: # if the newTitle is empty, do nothing + return releaseNameI + else: + # input as lower case + releaseName = releaseNameI.lower() + title = titleI.lower() + newTitle = newTitleI.lower() + log.debug('Replacing -- ' + newTitle + ' -- in the release -- ' + releaseName + ' -- by the original title -- ' + title) + separatedWords = [] + for s in releaseName.split(' '): + separatedWords.extend(s.split('.')) + # test how far the release name corresponds to the original title + index = 0 + while separatedWords[index] in title.split(' '): + index += 1 + # test how far the release name corresponds to the new title + newIndex = 0 + while separatedWords[newIndex] in newTitle.split(' '): + newIndex += 1 + # then determine if it correspoinds to the new title or old title + if index >= newIndex: + # the release name corresponds to the original title. SO no change needed + log.debug('The release name is already corresponding. Changed nothing.') + return releaseNameI + else: + # otherwise, we replace the french title by the original title + finalName = [title] + finalName.extend(separatedWords[newIndex:]) + newReleaseName = ' '.join(finalName) + log.debug('The new release name is : ' + newReleaseName) + return newReleaseName + +config = [{ + 'name': 't411', + 'groups': [ + { + 'tab': 'searcher', + 'list': 'torrent_providers', + 'name': 't411', + 'description': 'See T411', + 'icon' : 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAAAA3NCSVQICAjb4U/gAAACdklEQVQokW2RX0hTcRTHz+/+cbvz3m1srbv8M6Ws6SbK1hRTkUoKIui5jIJ8sz9vQQTRQxDRexCkIGgmSC+B1YNWNCIrRQ3Z2PyTf5pb2/S2ud2/2723hyIt/b4cDud7+H4OB2CXrpOW+wYLYPju0R66DTABEAWYB7i6lwHtbEYAKi5crPE36Wa6QGKQyYylk1cePPwX4FqPquSSiZVHAN+Gh/JihpezUpGXinmxkBN5Lvjm5U4/1hzwS5JsJIkzkWnmZDtSZF2WQZZ0SSoIgiSJXq+37VjLNhLL7h/ofUzg0Dceutl1ejHOoa0fScUQW1rouXQWw3ANULXbt8cNJ7pudPrcd/pmLp8PBNpa344HDYTqYc2Ls58G+59sI/0uTgBTKj78OQIdTb6W5gKg+PpKaPprUoLB/mBHY/v/CacARru7ucaG6NCrj5vp2rpDWvmBDa83PzDwdJVOl5Zo8S+JQhoD7E/CGMBEKLyYTNWjLKNl6KkP5OsXbE1leGqdNFoBd3K034jbcJzYfqfPTpUZjOHkmkmS+SpzinXYlxdGM+4I5ezkoyHSUcIjHXHY3wWPqM9SOg2ataFMlvQ6YWs5FIvaKxxgmzEfrWYOazanXuAxAGBwGALoNcWePxtx8cKR4wGuBFZo05TI2gXViE3SaiyVn3bQRgU0DABuVdHn7na6iuSMAOk2X6WnrqLcMVlqTVQ5lHw2VaQURtNN+7YoD7L4cQCQKGo9GJsUEGC6bNPfzc1xpZAjWuH7+3u+xHy+BuFLLkYsx7la0yrCAeqdZg0h1kDQFkpVlSyvrG1krM5mNbtK/9wM0wddjF6UNywElpWVX6HUDxDMdBkmAAAAAElFTkSuQmCC', + 'wizard': True, + 'options': [ + { + 'name': 'enabled', + 'type': 'enabler', + 'default': False, + }, + { + 'name': 'username', + 'default': '', + }, + { + 'name': 'password', + 'default': '', + 'type': 'password', + }, + { + 'name': 'seed_ratio', + 'label': 'Seed ratio', + 'type': 'float', + 'default': 1, + 'description': 'Will not be (re)moved until this seed ratio is met.', + }, + { + 'name': 'seed_time', + 'label': 'Seed time', + 'type': 'int', + 'default': 40, + 'description': 'Will not be (re)moved until this seed time (in hours) is met.', + }, + { + 'name': 'extra_score', + 'advanced': True, + 'label': 'Extra Score', + 'type': 'int', + 'default': 0, + 'description': 'Starting score for each release found via this provider.', + } + ], + }, + ], +}] diff --git a/couchpotato/core/media/_base/providers/torrent/t411api.py b/couchpotato/core/media/_base/providers/torrent/t411api.py new file mode 100644 index 0000000000..41ec5f47fb --- /dev/null +++ b/couchpotato/core/media/_base/providers/torrent/t411api.py @@ -0,0 +1,149 @@ +from couchpotato.core.helpers.variable import tryInt +from couchpotato.core.logger import CPLog +from couchpotato.core.helpers.encoding import simplifyString, tryUrlencode +from couchpotato.core.media._base.providers.torrent.base import TorrentProvider +from couchpotato.core.helpers import namer_check +import json +import re +import unicodedata + +log = CPLog(__name__) + + +class Base(TorrentProvider): + + urls = { + 'test': 'https://www.t411.al/', + 'torrent': 'https://www.t411.al/torrents/%s', + 'login': 'https://api.t411.al/auth', + 'detail': 'https://www.t411.al/torrents/?id=%s', + 'search': 'https://api.t411.al/torrents/search/%s', + 'download': 'https://api.t411.al/torrents/download/%s', + } + + http_time_between_calls = 1 #seconds + auth_token = '' + + def _search(self, movie, quality, results): + headers = {} + headers['Authorization'] = self.auth_token + + for title in movie['info']['titles']: + try: + TitleStringReal = str(title.encode("latin-1").replace('-',' ')) + + url = self.urls['search'] % TitleStringReal + url = url + '?cat=631&limit=100' + data = self.getJsonData(url, None, headers = headers) + + for currentresult in data['torrents']: + if currentresult['categoryname'] in ['Film', 'Animation']: + name = currentresult['name'] + splittedReleaseName = re.split('(?:\(|\.|\s)([0-9]{4})(?:\)|\.|\s)', name, flags=re.IGNORECASE) + + if len(splittedReleaseName) > 1: + cleanedReleaseName = ''.join(splittedReleaseName[0:-2]) + + match = re.compile(ur"[\w]+", re.UNICODE) + nameSplit = ''.join(match.findall(unicodedata.normalize('NFKD', cleanedReleaseName.upper()).encode('ASCII','ignore'))) + titleSplit = ''.join(match.findall(unicodedata.normalize('NFKD', title.upper()).encode('ASCII','ignore'))) + + if titleSplit == nameSplit: + new = {} + new['id'] = currentresult['id'] + new['name'] = name + new['url'] = self.urls['download'] % (currentresult['id']) + new['detail_url'] = self.urls['torrent'] % (currentresult['rewritename']) + new['size'] = tryInt(currentresult['size']) / 1024 / 1024 + new['seeders'] = tryInt(currentresult['seeders']) + new['leechers'] = tryInt(currentresult['leechers']) + new['authtoken'] = self.auth_token + new['download'] = self.loginDownload + + results.append(new) + except: + continue + + return + + def getLoginParams(self): + return { + 'username': self.conf('username'), + 'password': self.conf('password') + } + + def loginSuccess(self, output): + try: + jsonData = json.loads(output) + if jsonData.get('uid', '') != '': + self.auth_token = jsonData.get('token', '') + return True + except: + pass + + return False + + loginCheckSuccess = loginSuccess + + def loginDownload(self, url = '', nzb_id = ''): + try: + if not self.login(): + log.error('Failed downloading from %s', self.getName()) + + headers = {} + headers['Authorization'] = self.auth_token + return self.urlopen(url, None, headers = headers) + except: + log.error('Failed downloading from %s: %s', (self.getName(), traceback.format_exc())) + +config = [{ + 'name': 't411api', + 'groups': [ + { + 'tab': 'searcher', + 'list': 'torrent_providers', + 'name': 't411 api version', + 'description': 'See T411', + 'icon' : 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAAAA3NCSVQICAjb4U/gAAACdklEQVQokW2RX0hTcRTHz+/+cbvz3m1srbv8M6Ws6SbK1hRTkUoKIui5jIJ8sz9vQQTRQxDRexCkIGgmSC+B1YNWNCIrRQ3Z2PyTf5pb2/S2ud2/2723hyIt/b4cDud7+H4OB2CXrpOW+wYLYPju0R66DTABEAWYB7i6lwHtbEYAKi5crPE36Wa6QGKQyYylk1cePPwX4FqPquSSiZVHAN+Gh/JihpezUpGXinmxkBN5Lvjm5U4/1hzwS5JsJIkzkWnmZDtSZF2WQZZ0SSoIgiSJXq+37VjLNhLL7h/ofUzg0Dceutl1ejHOoa0fScUQW1rouXQWw3ANULXbt8cNJ7pudPrcd/pmLp8PBNpa344HDYTqYc2Ls58G+59sI/0uTgBTKj78OQIdTb6W5gKg+PpKaPprUoLB/mBHY/v/CacARru7ucaG6NCrj5vp2rpDWvmBDa83PzDwdJVOl5Zo8S+JQhoD7E/CGMBEKLyYTNWjLKNl6KkP5OsXbE1leGqdNFoBd3K034jbcJzYfqfPTpUZjOHkmkmS+SpzinXYlxdGM+4I5ezkoyHSUcIjHXHY3wWPqM9SOg2ataFMlvQ6YWs5FIvaKxxgmzEfrWYOazanXuAxAGBwGALoNcWePxtx8cKR4wGuBFZo05TI2gXViE3SaiyVn3bQRgU0DABuVdHn7na6iuSMAOk2X6WnrqLcMVlqTVQ5lHw2VaQURtNN+7YoD7L4cQCQKGo9GJsUEGC6bNPfzc1xpZAjWuH7+3u+xHy+BuFLLkYsx7la0yrCAeqdZg0h1kDQFkpVlSyvrG1krM5mNbtK/9wM0wddjF6UNywElpWVX6HUDxDMdBkmAAAAAElFTkSuQmCC', + 'wizard': True, + 'options': [ + { + 'name': 'enabled', + 'type': 'enabler', + 'default': False, + }, + { + 'name': 'username', + 'default': '', + }, + { + 'name': 'password', + 'default': '', + 'type': 'password', + }, + { + 'name': 'seed_ratio', + 'label': 'Seed ratio', + 'type': 'float', + 'default': 1, + 'description': 'Will not be (re)moved until this seed ratio is met.', + }, + { + 'name': 'seed_time', + 'label': 'Seed time', + 'type': 'int', + 'default': 40, + 'description': 'Will not be (re)moved until this seed time (in hours) is met.', + }, + { + 'name': 'extra_score', + 'advanced': True, + 'label': 'Extra Score', + 'type': 'int', + 'default': 0, + 'description': 'Starting score for each release found via this provider.', + } + ], + }, + ], +}] diff --git a/couchpotato/core/media/_base/providers/torrent/thepiratebay.py b/couchpotato/core/media/_base/providers/torrent/thepiratebay.py new file mode 100644 index 0000000000..4e84ceb013 --- /dev/null +++ b/couchpotato/core/media/_base/providers/torrent/thepiratebay.py @@ -0,0 +1,205 @@ +import re +import traceback + +from bs4 import BeautifulSoup +from couchpotato.core.event import addEvent +from couchpotato.core.helpers.encoding import toUnicode +from couchpotato.core.helpers.variable import tryInt +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.base import TorrentMagnetProvider +import six + + +log = CPLog(__name__) + + +class Base(TorrentMagnetProvider): + + urls = { + 'detail': '%s/torrent/%s', + 'search': '%s/search/%%s/%%s/7/%%s' + } + + cat_backup_id = 200 + disable_provider = False + http_time_between_calls = 0 + + proxy_list = [ + 'https://pirateproxy.cat', + 'https://pirateproxy.wf', + 'https://pirateproxy.tf', + 'https://urbanproxy.eu', + 'https://piratebays.co', + 'https://pirateproxy.yt', + 'https://thepiratebay.uk.net', + 'https://thebay.tv', + 'https://thepirateproxy.co', + 'https://theproxypirate.pw', + 'https://arrr.xyz', + 'https://tpb.dashitz.com' + ] + + def __init__(self): + super(Base, self).__init__() + + addEvent('app.test', self.doTest) + + def _search(self, media, quality, results): + + page = 0 + total_pages = 1 + cats = self.getCatId(quality) + + base_search_url = self.urls['search'] % self.getDomain() + + while page < total_pages: + + search_url = base_search_url % self.buildUrl(media, page, cats) + + page += 1 + + data = self.getHTMLData(search_url) + + if data: + try: + soup = BeautifulSoup(data) + results_table = soup.find('table', attrs = {'id': 'searchResult'}) + + if not results_table: + return + + try: + total_pages = len(soup.find('div', attrs = {'align': 'center'}).find_all('a')) + except: + pass + + entries = results_table.find_all('tr') + for result in entries[1:]: + link = result.find(href = re.compile('torrent\/\d+\/')) + download = result.find(href = re.compile('magnet:')) + + try: + size = re.search('Size (?P.+),', six.text_type(result.select('font.detDesc')[0])).group('size') + except: + continue + + if link and download: + if self.conf('trusted_only'): + if result.find('img', alt = re.compile('Trusted')) is None and \ + result.find('img', alt = re.compile('VIP')) is None and \ + result.find('img', alt = re.compile('Helpers')) is None and \ + result.find('img', alt = re.compile('Moderator')) is None: + log.info('Skipped torrent %s, untrusted.' % link.string) + continue + + def extra_score(item): + trusted = (0, 10)[result.find('img', alt = re.compile('Trusted')) is not None] + vip = (0, 20)[result.find('img', alt = re.compile('VIP')) is not None] + confirmed = (0, 30)[result.find('img', alt = re.compile('Helpers')) is not None] + moderated = (0, 50)[result.find('img', alt = re.compile('Moderator')) is not None] + + return confirmed + trusted + vip + moderated + + results.append({ + 'id': re.search('/(?P\d+)/', link['href']).group('id'), + 'name': six.text_type(link.string), + 'url': download['href'], + 'detail_url': self.getDomain(link['href']), + 'size': self.parseSize(size), + 'seeders': tryInt(result.find_all('td')[2].string), + 'leechers': tryInt(result.find_all('td')[3].string), + 'extra_score': extra_score, + 'get_more_info': self.getMoreInfo + }) + + except: + log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc())) + + def isEnabled(self): + return super(Base, self).isEnabled() and self.getDomain() + + def correctProxy(self, data): + return 'title="Pirate Search"' in data + + def getMoreInfo(self, item): + full_description = self.getCache('tpb.%s' % item['id'], item['detail_url'], cache_timeout = 25920000) + html = BeautifulSoup(full_description) + nfo_pre = html.find('div', attrs = {'class': 'nfo'}) + description = '' + try: + description = toUnicode(nfo_pre.text) + except: + pass + + item['description'] = description + return item + + def doTest(self): + + for url in self.proxy_list: + try: + data = self.urlopen(url + '/search/test+search') + + if 'value="test+search"' in data: + log.info('Success %s', url) + continue + except: + log.error('%s', traceback.format_exc(0)) + + +config = [{ + 'name': 'thepiratebay', + 'groups': [ + { + 'tab': 'searcher', + 'list': 'torrent_providers', + 'name': 'ThePirateBay', + 'description': 'The world\'s largest bittorrent tracker. ThePirateBay', + 'wizard': True, + 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAAAAAA6mKC9AAAA3UlEQVQY02P4DwT/YADIZvj//7qnozMYODmtAAusZoCDELDAegYGViZhAWZmRoYoqIDupfhNN1M3dTBEggXWMZg9jZRXV77YxhAOFpjDwMAPMoCXmcHsF1SAQZ6bQY2VgUEbKHClcAYzg3mINEO8jSCD478/DPsZmvqWblu1bOmStes3Pp0ezVDF4Gif0Hfx9///74/ObRZ2YNiZ47C8XIRBxFJR0jbSSUud4f9zAQWn8NTuziAt2zy5xIMM/z8LFX0E+fD/x0MRDCeA1v7Z++Y/FDzyvAtyBxIA+h8A8ZKLeT+lJroAAAAASUVORK5CYII=', + 'options': [ + { + 'name': 'enabled', + 'type': 'enabler', + 'default': False + }, + { + 'name': 'domain', + 'advanced': True, + 'label': 'Proxy server', + 'description': 'Domain for requests, keep empty to let CouchPotato pick.', + }, + { + 'name': 'seed_ratio', + 'label': 'Seed ratio', + 'type': 'float', + 'default': 1, + 'description': 'Will not be (re)moved until this seed ratio is met.', + }, + { + 'name': 'seed_time', + 'label': 'Seed time', + 'type': 'int', + 'default': 40, + 'description': 'Will not be (re)moved until this seed time (in hours) is met.', + }, + { + 'name': 'extra_score', + 'advanced': True, + 'label': 'Extra Score', + 'type': 'int', + 'default': 0, + 'description': 'Starting score for each release found via this provider.', + }, + { + 'name': 'trusted_only', + 'advanced': True, + 'label': 'Trusted/VIP Only', + 'type': 'bool', + 'default': False, + 'description': 'Only download releases marked as Trusted or VIP' + } + ], + } + ] +}] diff --git a/couchpotato/core/media/_base/providers/torrent/torrent9.py b/couchpotato/core/media/_base/providers/torrent/torrent9.py new file mode 100644 index 0000000000..6420412099 --- /dev/null +++ b/couchpotato/core/media/_base/providers/torrent/torrent9.py @@ -0,0 +1,152 @@ +from bs4 import BeautifulSoup +from couchpotato.core.helpers.variable import getTitle, tryInt +from couchpotato.core.logger import CPLog +from couchpotato.core.helpers.encoding import simplifyString, tryUrlencode +from couchpotato.core.media._base.providers.torrent.base import TorrentProvider +import cookielib +import re +import traceback +import urllib +import urllib2 +import unicodedata +from couchpotato.core.helpers import namer_check +import sys + +reload(sys) +sys.setdefaultencoding('utf-8') + +log = CPLog(__name__) + + +class Base(TorrentProvider): + urls = { + 'site': 'http://www.torrent9.pe/', + 'search': 'http://www.torrent9.pe/search_torrent/', + } + + def _search(self, movie, quality, results): + TitleStringReal = (getTitle(movie['info']) + ' ' + simplifyString(quality['identifier'])).replace('-',' ').replace(' ',' ').replace(' ',' ').replace(' ',' ').encode("utf-8") + log.info('Title %s', TitleStringReal) + URL = ((self.urls['search']) + TitleStringReal.replace('.', '-').replace(' ', '-') + '.html,trie-seeds-d').encode('utf-8') + + req = urllib2.Request(URL, headers={'User-Agent' : "Mozilla/5.0"}) + log.info('opening url %s', URL) + data = urllib2.urlopen(req,timeout=500) + log.info('data retrieved') + id = 1000 + + if data: + try: + html = BeautifulSoup(data) + torrent_rows = html.findAll('tr') + + for result in torrent_rows: + try: + if not result.find('a'): + continue + + title = result.find('a').get_text(strip=False) + log.info('found title %s',title) + + testname = namer_check.correctName(title.lower(),movie) + if testname == 0: + log.info('%s not match %s',(title.lower(),movie['info']['titles'])) + continue + log.info('title %s match',title) + + tmp = result.find("a")['href'].split('/')[-1].replace('.html', '.torrent').strip() + download_url = (self.urls['site'] + 'get_torrent/{0}'.format(tmp) + ".torrent") + detail_url = (self.urls['site'] + 'torrent/{0}'.format(tmp)) + log.debug('download_url %s',download_url) + + if not all([title, download_url]): + continue + + seeders = int(result.find(class_="seed_ok").get_text(strip=True)) + leechers = int(result.find_all('td')[3].get_text(strip=True)) + size = result.find_all('td')[1].get_text(strip=True) + + def extra_check(item): + return True + + size = size.lower() + size = size.replace("go", "gb") + size = size.replace("mo", "mb") + size = size.replace("ko", "kb") + size = size.replace(' ','') + size = self.parseSize(str(size)) + + new = {} + new['id'] = id + new['name'] = title.strip() + new['url'] = download_url + new['detail_url'] = detail_url + new['size'] = size + new['seeders'] = seeders + new['leechers'] = leechers + new['extra_check'] = extra_check + new['download'] = self.loginDownload + results.append(new) + log.info(results) + id = id + 1 + except StandardError, e: + log.info('boum %s',e) + continue + + except AttributeError: + log.debug('No search results found.') + else: + log.debug('No search results found.') + + def login(self): + log.info('Try to login on torrent9') + return True + + def download(self, url='', nzb_id=''): + log.debug('download %s',url) + req = urllib2.Request(url, headers={'User-Agent' : "Mozilla/5.0"}) + try: + return urllib2.urlopen(req).read() + except: + log.error('Failed downloading from %s: %s', (self.getName(), traceback.format_exc())) + + loginDownload = download + +config = [{ + 'name': 'torrent9', + 'groups': [{ + 'tab': 'searcher', + 'list': 'torrent_providers', + 'name': 'torrent9', + 'description': 'See Torrent9', + 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABHNCSVQICAgIfAhkiAAAAgZJREFUOI2lkj9oE2EYxn93l/Quf440gXg4lBoEMd2MDuLSkk0R6hCnuqjUoR0c7FDo4Ca0CDo7uRRBqEMDXSLUUqRDiZM1NMEI1VKTlDZpUppccvc5nJp/KooPfMPH+z3P+zzv+8F/Quq8XIVEEOY0kASIzpoLlBKUV+CuCblfCjyF/P3V1Qi6jrCs7k4eD/X1dS5NTy9tQaJD2MFDkA23W8UwQFGQRJcB0DS0cBg/DPY4a0OVZcHeHihKf1ifD6pVfGD/VmBAUeDwEGQZLAskCVQV6nVYW+M4lSLQo9stoKpQLoNtO2QhYHsbkkmOczm+AP5eBy/BfwRDn8GHJLkpFp3utRpkMpDLwckJvlCIM9Uqg6YZeAAj58E1CVlXCaaigcCjsWhU8Xq9UCo5lisVx4FhODFkGbdpMtlqXa4IsVUHYkLcVlbg3ddGo3AzErl2emLCGaCmwcAAuL4ntCxoNpFsG8O2odlkXojF17CgAK2PsJna2Xk/ViyOh0dHXWhaewaW1T6mSb5a5V6rtbAMU4D5c18FyCzu7i5fyWZvDMfjOh4PNBpd5A/5vLheq93ZhMc/eF0Lr0NhaX8/eS6djo/EYqfQdUekUuHNxsZR4uDg1id40f9J+qE/CwTeitlZIWZmxKtQqOSFi39D7IQy5/c/fxIMpoGhfyUDMAwXzsL4n958A9jfxsJ8X4WQAAAAAElFTkSuQmCC', + 'wizard': True, + 'options': [{ + 'name': 'enabled', + 'type': 'enabler', + 'default': False, + }, + { + 'name': 'seed_ratio', + 'label': 'Seed ratio', + 'type': 'float', + 'default': 1, + 'description': 'Will not be (re)moved until this seed ratio is met.', + }, + { + 'name': 'seed_time', + 'label': 'Seed time', + 'type': 'int', + 'default': 40, + 'description': 'Will not be (re)moved until this seed time (in hours) is met.', + }, + { + 'name': 'extra_score', + 'advanced': True, + 'label': 'Extra Score', + 'type': 'int', + 'default': 10, + 'description': 'Starting score for each release found via this provider.', + }], + },], +}] diff --git a/couchpotato/core/media/_base/providers/torrent/torrentbytes.py b/couchpotato/core/media/_base/providers/torrent/torrentbytes.py new file mode 100644 index 0000000000..bf225633be --- /dev/null +++ b/couchpotato/core/media/_base/providers/torrent/torrentbytes.py @@ -0,0 +1,138 @@ +import traceback + +from bs4 import BeautifulSoup +from couchpotato.core.helpers.encoding import tryUrlencode, toUnicode +from couchpotato.core.helpers.variable import tryInt +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.base import TorrentProvider + + +log = CPLog(__name__) + + +class Base(TorrentProvider): + + urls = { + 'test': 'https://www.torrentbytes.net/', + 'login': 'https://www.torrentbytes.net/takelogin.php', + 'login_check': 'https://www.torrentbytes.net/inbox.php', + 'detail': 'https://www.torrentbytes.net/details.php?id=%s', + 'search': 'https://www.torrentbytes.net/browse.php?search=%s&cat=%d', + 'download': 'https://www.torrentbytes.net/download.php?id=%s&name=%s', + } + + cat_ids = [ + ([5], ['720p', '1080p', 'bd50']), + ([19], ['cam']), + ([19], ['ts', 'tc']), + ([19], ['r5', 'scr']), + ([19], ['dvdrip']), + ([19], ['brrip']), + ([20], ['dvdr']), + ] + + http_time_between_calls = 1 # Seconds + login_fail_msg = 'Username or password incorrect' + cat_backup_id = None + + def _searchOnTitle(self, title, movie, quality, results): + + url = self.urls['search'] % (tryUrlencode('%s %s' % (title.replace(':', ''), movie['info']['year'])), self.getCatId(quality)[0]) + data = self.getHTMLData(url) + + if data: + html = BeautifulSoup(data) + + try: + result_table = html.find('table', attrs = {'border': '1'}) + if not result_table: + return + + entries = result_table.find_all('tr') + + for result in entries[1:]: + cells = result.find_all('td') + + link = cells[1].find('a', attrs = {'class': 'index'}) + + full_id = link['href'].replace('details.php?id=', '') + torrent_id = full_id[:7] + name = toUnicode(link.get('title', link.contents[0]).encode('ISO-8859-1')).strip() + + results.append({ + 'id': torrent_id, + 'name': name, + 'url': self.urls['download'] % (torrent_id, name), + 'detail_url': self.urls['detail'] % torrent_id, + 'size': self.parseSize(cells[6].contents[0] + cells[6].contents[2]), + 'seeders': tryInt(cells[8].find('span').contents[0]), + 'leechers': tryInt(cells[9].find('span').contents[0]), + }) + + except: + log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc())) + + def getLoginParams(self): + return { + 'username': self.conf('username'), + 'password': self.conf('password'), + 'login': 'submit', + } + + def loginSuccess(self, output): + return 'logout.php' in output.lower() or 'Welcome' in output.lower() + + loginCheckSuccess = loginSuccess + + +config = [{ + 'name': 'torrentbytes', + 'groups': [ + { + 'tab': 'searcher', + 'list': 'torrent_providers', + 'name': 'TorrentBytes', + 'description': 'TorrentBytes', + 'wizard': True, + 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAMAAAAoLQ9TAAAAeFBMVEUAAAAAAEQAA1QAEmEAKnQALHYAMoEAOokAQpIASYsASZgAS5UATZwATosATpgAVJ0AWZwAYZ4AZKAAaZ8Ab7IAcbMAfccAgcQAgcsAhM4AiscAjMkAmt0AoOIApecAp/EAqvQAs+kAt+wA3P8A4f8A//8VAAAfDbiaAl08AAAAjUlEQVQYGQXBO04DQRAFwHqz7Z8sECIl5f73ISRD5GBs7UxTlWfg9vYXnvJRQJqOL88D6BAwJtMMumHUVCl60aa6H93IrIv0b+157f1lpk+fm87lMWrZH0vncKbXdRUQrRmrh9C6Iwkq6rg4PXZcyXmbizzeV/g+rDra0rGve8jPKLSOJNi2AQAwAGjwD7ApPkEHdtPQAAAAAElFTkSuQmCC', + 'options': [ + { + 'name': 'enabled', + 'type': 'enabler', + 'default': False, + }, + { + 'name': 'username', + 'default': '', + }, + { + 'name': 'password', + 'default': '', + 'type': 'password', + }, + { + 'name': 'seed_ratio', + 'label': 'Seed ratio', + 'type': 'float', + 'default': 1, + 'description': 'Will not be (re)moved until this seed ratio is met.', + }, + { + 'name': 'seed_time', + 'label': 'Seed time', + 'type': 'int', + 'default': 40, + 'description': 'Will not be (re)moved until this seed time (in hours) is met.', + }, + { + 'name': 'extra_score', + 'advanced': True, + 'label': 'Extra Score', + 'type': 'int', + 'default': 20, + 'description': 'Starting score for each release found via this provider.', + } + ], + }, + ], +}] diff --git a/couchpotato/core/media/_base/providers/torrent/torrentday.py b/couchpotato/core/media/_base/providers/torrent/torrentday.py new file mode 100644 index 0000000000..ca50a72b4a --- /dev/null +++ b/couchpotato/core/media/_base/providers/torrent/torrentday.py @@ -0,0 +1,132 @@ +import re +from couchpotato.core.helpers.variable import tryInt +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.base import TorrentProvider + +log = CPLog(__name__) + + +class Base(TorrentProvider): + + urls = { + 'test': 'https://www.torrentday.com/', + 'login': 'https://www.torrentday.com/t', + 'login_check': 'https://www.torrentday.com/userdetails.php', + 'detail': 'https://www.torrentday.com/details.php?id=%s', + 'search': 'https://www.torrentday.com/t.json?q=%s', + 'download': 'https://www.torrentday.com/download.php/%s/%s.torrent', + } + + http_time_between_calls = 1 # Seconds + + def loginDownload(self, url = '', nzb_id = ''): + try: + if not self.login(): + log.error('Failed downloading from %s', self.getName()) + return self.urlopen(url, headers=self.getRequestHeaders()) + except: + log.error('Failed downloading from %s: %s', (self.getName(), traceback.format_exc())) + + def _searchOnTitle(self, title, media, quality, results): + + query = '"%s" %s' % (title, media['info']['year']) + + data = { + 'q': query, + } + + data = self.getJsonData(self.urls['search'] % query, headers = self.getRequestHeaders()) + + for torrent in data: + results.append({ + 'id': torrent['t'], + 'name': torrent['name'], + 'url': self.urls['download'] % (torrent['t'], torrent['t']), + 'detail_url': self.urls['detail'] % torrent['t'], + 'size': tryInt(torrent['size']) / (1024 * 1024), + 'seeders': torrent['seeders'], + 'leechers': torrent['leechers'], + }) + + def getRequestHeaders(self): + return { + 'Cookie': self.conf('cookiesetting') or '' + } + + def getLoginParams(self): + return { + 'username': self.conf('username'), + 'password': self.conf('password'), + 'submit.x': 18, + 'submit.y': 11, + 'submit': 'submit', + } + + def loginSuccess(self, output): + often = re.search('You tried too often, please wait .*', output) + if often: + raise Exception(often.group(0)[:-6].strip()) + + return 'Password not correct' not in output + + def loginCheckSuccess(self, output): + return 'logout.php' in output.lower() + + +config = [{ + 'name': 'torrentday', + 'groups': [ + { + 'tab': 'searcher', + 'list': 'torrent_providers', + 'name': 'TorrentDay', + 'description': 'TorrentDay', + 'wizard': True, + 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAC5ElEQVQ4y12TXUgUURTH//fO7Di7foeQJH6gEEEIZZllVohfSG/6UA+RSFAQQj74VA8+Bj30lmAlRVSEvZRfhNhaka5ZUG1paKaW39tq5O6Ou+PM3M4o6m6X+XPPzD3zm/+dcy574r515WfIW8CZBM4YAA5Gc/aQC3yd7oXYEONcsISE5dTDh91HS0t7FEWhBUAeN9ynV/d9qJAgE4AECURAcVsGlCCnly26LMA0IQwTa52dje3d3e3hcPi8qqrrMjcVYI3EHCQZlkFOHBwR2QHh2ASAAIJxWGAQEDxjePhs3527XjJwnb37OHBq0T+Tyyjh+9KnEzNJ7nouc1Q/3A3HGsOvnJy+PSUlj81w2Lny9WuJ6+3AmTjD4HOcrdR2dWXLRQePvyaSLfQOPMPC8mC9iHCsOxSyzJCelzdSXlNzD5ujpb25Wbfc/XXJemTXF4+nnCNq+AMLe50uFfEJTiw4GXSFtiHL0SnIq66+p0kSArqO+eH3RdsAv9+f5vW7L7GICq6rmM8XBCAXlBw90rOyxibn5yzfkg/L09M52/jxqdESaIrBXHYZZbB1GX8cEpySxKIB8S5XcOnvqpli1zuwmrTtoLjw5LOK/eeuWsE4JH5IRPaPZKiKigmPp+5pa+u1aEjIMhEgrRkmi9mgxGUhM7LNJSzOzsE3+cOeExovXOjdytE0LV4zqNZUtV0uZzAGoGkhDH/2YHZiErmv4uyWQnZZWc+hoqL3WzlTExN5hhA8IEwkZWZOxwB++30YG/9GkYCPvqAaHAW5uWPROW86OmqCprUR7z1yZDAGQNuCvkoB/baIKUBWMTYymv+gra3eJNvjXu+B562tFyXqTJ6YuHK8rKwvBmC3vR7cOCPQLWFz8LnfXWUrJo9U19BwMyUlJRjTSMJ2ENxUiGxq9KXQfwqYlnWstvbR5aamG9g0uzM8Q4OFt++3NNixQ2NgYmeN03FOTUv7XVpV9aKisvLl1vN/WVhNc/Fi1NEAAAAASUVORK5CYII=', + 'options': [ + { + 'name': 'enabled', + 'type': 'enabler', + 'default': False, + }, + { + 'name': 'username', + 'default': '', + }, + { + 'name': 'password', + 'default': '', + 'type': 'password', + }, + { + 'name': 'seed_ratio', + 'label': 'Seed ratio', + 'type': 'float', + 'default': 1, + 'description': 'Will not be (re)moved until this seed ratio is met.', + }, + { + 'name': 'seed_time', + 'label': 'Seed time', + 'type': 'int', + 'default': 40, + 'description': 'Will not be (re)moved until this seed time (in hours) is met.', + }, + { + 'name': 'cookiesetting', + 'label': 'Cookies', + 'default': '', + 'description': 'Cookies', + }, + { + 'name': 'extra_score', + 'advanced': True, + 'label': 'Extra Score', + 'type': 'int', + 'default': 0, + 'description': 'Starting score for each release found via this provider.', + } + ], + }, + ], +}] diff --git a/couchpotato/core/media/_base/providers/torrent/torrentleech.py b/couchpotato/core/media/_base/providers/torrent/torrentleech.py new file mode 100644 index 0000000000..10886bc7d8 --- /dev/null +++ b/couchpotato/core/media/_base/providers/torrent/torrentleech.py @@ -0,0 +1,119 @@ +import traceback +import json +from bs4 import BeautifulSoup +from couchpotato.core.helpers.variable import tryInt +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.base import TorrentProvider +import six + + +log = CPLog(__name__) + + +class Base(TorrentProvider): + + urls = { + 'test': 'https://www.torrentleech.org/', + 'login': 'https://www.torrentleech.org/user/account/login/', + 'login_check': 'https://torrentleech.org/user/messages', + 'detail': 'https://www.torrentleech.org/torrent/%s', + 'search': 'https://www.torrentleech.org/torrents/browse/list/categories/%s/query/%s', + 'download': 'https://www.torrentleech.org/download/%s/%s', + } + + http_time_between_calls = 1 # Seconds + login_fail_msg = 'Invalid Username/password combination!' + cat_backup_id = None + + def _searchOnTitle(self, title, media, quality, results): + urlParms = self.buildUrl(title, media, quality) + url = self.urls['search'] % (urlParms[1], urlParms[0]) + + data = self.getHTMLData(url) + jsonResults = json.loads(data) + + if jsonResults: + + try: + + for torrent in jsonResults['torrentList']: + link = self.urls['detail'] % torrent['fid'] + url = self.urls['download'] % (torrent['fid'], torrent['filename']) + currentResult = { + 'id': torrent['fid'], + 'name': six.text_type(torrent['name']), + 'url': url, + 'detail_url': link, + 'size': torrent['size']/1024/1024, + 'seeders': torrent['seeders'], + 'leechers': torrent['leechers'], + } + results.append(currentResult) + except: + log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc())) + + def getLoginParams(self): + return { + 'username': self.conf('username'), + 'password': self.conf('password'), + 'remember_me': 'on', + 'login': 'submit', + } + + def loginSuccess(self, output): + return '/user/account/logout' in output.lower() or 'welcome back' in output.lower() + + loginCheckSuccess = loginSuccess + + +config = [{ + 'name': 'torrentleech', + 'groups': [ + { + 'tab': 'searcher', + 'list': 'torrent_providers', + 'name': 'TorrentLeech', + 'description': 'TorrentLeech', + 'wizard': True, + 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAACHUlEQVR4AZVSO48SYRSdGTCBEMKzILLAWiybkKAGMZRUUJEoDZX7B9zsbuQPYEEjNLTQkYgJDwsoSaxspEBsCITXjjNAIKi8AkzceXgmbHQ1NJ5iMufmO9/9zrmXlCSJ+B8o75J8Pp/NZj0eTzweBy0Wi4PBYD6f12o1r9ebTCZx+22HcrnMsuxms7m6urTZ7LPZDMVYLBZ8ZV3yo8aq9Pq0wzCMTqe77dDv9y8uLyAWBH6xWOyL0K/56fcb+rrPgPZ6PZfLRe1fsl6vCUmGKIqoqNXqdDr9Dbjps9znUV0uTqdTjuPkDoVCIfcuJ4gizjMMm8u9vW+1nr04czqdK56c37CbKY9j2+1WEARZ0Gq1RFHAz2q1qlQqXxoN69HRcDjUarW8ZD6QUigUOnY8uKYH8N1sNkul9yiGw+F6vS4Rxn8EsodEIqHRaOSnq9T7ajQazWQycEIR1AEBYDabSZJyHDucJyegwWBQr9ebTCaKvHd4cCQANUU9evwQ1Ofz4YvUKUI43GE8HouSiFiNRhOowWBIpVLyHITJkuW3PwgAEf3pgIwxF5r+OplMEsk3CPT5szCMnY7EwUdhwUh/CXiej0Qi3idPz89fdrpdbsfBzH7S3Q9K5pP4c0sAKpVKoVAQGO1ut+t0OoFAQHkH2Da/3/+but3uarWK0ZMQoNdyucRutdttmqZxMTzY7XaYxsrgtUjEZrNhkSwWyy/0NCatZumrNQAAAABJRU5ErkJggg==', + 'options': [ + { + 'name': 'enabled', + 'type': 'enabler', + 'default': False, + }, + { + 'name': 'username', + 'default': '', + }, + { + 'name': 'password', + 'default': '', + 'type': 'password', + }, + { + 'name': 'seed_ratio', + 'label': 'Seed ratio', + 'type': 'float', + 'default': 1, + 'description': 'Will not be (re)moved until this seed ratio is met.', + }, + { + 'name': 'seed_time', + 'label': 'Seed time', + 'type': 'int', + 'default': 40, + 'description': 'Will not be (re)moved until this seed time (in hours) is met.', + }, + { + 'name': 'extra_score', + 'advanced': True, + 'label': 'Extra Score', + 'type': 'int', + 'default': 20, + 'description': 'Starting score for each release found via this provider.', + } + ], + }, + ], +}] diff --git a/couchpotato/core/media/_base/providers/torrent/torrentpotato.py b/couchpotato/core/media/_base/providers/torrent/torrentpotato.py new file mode 100644 index 0000000000..5437f41301 --- /dev/null +++ b/couchpotato/core/media/_base/providers/torrent/torrentpotato.py @@ -0,0 +1,188 @@ +from urlparse import urlparse +import re +import traceback + +from couchpotato.core.helpers.encoding import toUnicode +from couchpotato.core.helpers.variable import splitString, tryInt, tryFloat +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.base import ResultList +from couchpotato.core.media._base.providers.torrent.base import TorrentProvider + + +log = CPLog(__name__) + + +class Base(TorrentProvider): + + urls = {} + limits_reached = {} + + http_time_between_calls = 1 # Seconds + + def search(self, media, quality): + hosts = self.getHosts() + + results = ResultList(self, media, quality, imdb_results = True) + + for host in hosts: + if self.isDisabled(host): + continue + + self._searchOnHost(host, media, quality, results) + + return results + + def _searchOnHost(self, host, media, quality, results): + + torrents = self.getJsonData(self.buildUrl(media, host), cache_timeout = 1800) + + if torrents: + try: + if torrents.get('error'): + log.error('%s: %s', (torrents.get('error'), host['host'])) + elif torrents.get('results'): + for torrent in torrents.get('results', []): + results.append({ + 'id': torrent.get('torrent_id'), + 'protocol': 'torrent' if re.match('^(http|https|ftp)://.*$', torrent.get('download_url')) else 'torrent_magnet', + 'provider_extra': urlparse(host['host']).hostname or host['host'], + 'name': toUnicode(torrent.get('release_name')), + 'url': torrent.get('download_url'), + 'detail_url': torrent.get('details_url'), + 'size': torrent.get('size'), + 'score': host['extra_score'], + 'seeders': torrent.get('seeders'), + 'leechers': torrent.get('leechers'), + 'seed_ratio': host['seed_ratio'], + 'seed_time': host['seed_time'], + }) + + except: + log.error('Failed getting results from %s: %s', (host['host'], traceback.format_exc())) + + def getHosts(self): + + uses = splitString(str(self.conf('use')), clean = False) + hosts = splitString(self.conf('host'), clean = False) + names = splitString(self.conf('name'), clean = False) + seed_times = splitString(self.conf('seed_time'), clean = False) + seed_ratios = splitString(self.conf('seed_ratio'), clean = False) + pass_keys = splitString(self.conf('pass_key'), clean = False) + extra_score = splitString(self.conf('extra_score'), clean = False) + + host_list = [] + for nr in range(len(hosts)): + + try: key = pass_keys[nr] + except: key = '' + + try: host = hosts[nr] + except: host = '' + + try: name = names[nr] + except: name = '' + + try: ratio = seed_ratios[nr] + except: ratio = '' + + try: seed_time = seed_times[nr] + except: seed_time = '' + + host_list.append({ + 'use': uses[nr], + 'host': host, + 'name': name, + 'seed_ratio': tryFloat(ratio), + 'seed_time': tryInt(seed_time), + 'pass_key': key, + 'extra_score': tryInt(extra_score[nr]) if len(extra_score) > nr else 0 + }) + + return host_list + + def belongsTo(self, url, provider = None, host = None): + + hosts = self.getHosts() + + for host in hosts: + result = super(Base, self).belongsTo(url, host = host['host'], provider = provider) + if result: + return result + + def isDisabled(self, host = None): + return not self.isEnabled(host) + + def isEnabled(self, host = None): + + # Return true if at least one is enabled and no host is given + if host is None: + for host in self.getHosts(): + if self.isEnabled(host): + return True + return False + + return TorrentProvider.isEnabled(self) and host['host'] and host['pass_key'] and int(host['use']) + + +config = [{ + 'name': 'torrentpotato', + 'groups': [ + { + 'tab': 'searcher', + 'list': 'torrent_providers', + 'name': 'TorrentPotato', + 'order': 10, + 'description': 'CouchPotato torrent provider. Checkout the wiki page about this provider for more info.', + 'wizard': True, + 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAAABnRSTlMAAAAAAABupgeRAAABSElEQVR4AZ2Nz0oCURTGv8t1YMpqUxt9ARFxoQ/gQtppgvUKcu/sxB5iBJkogspaBC6iVUplEC6kv+oiiKDNhAtt16roP0HQgdsMLgaxfvy4nHP4Pi48qE2g4v91JOqT1CH/UnA7w7icUlLawyEdj+ZI/7h6YluWbRiddHonHh9M70aj7VTKzuXuikUMci/EO/ACnAI15599oAk8AR/AgxBQNCzreD7bmpl+FOIVuAHqQDUcJo+AK+CZFKLt95/MpSmMt0TiW9POxse6UvYZ6zB2wFgjFiNpOGesR0rZ0PVPXf8KhUCl22CwClz4eN8weoZBb9c0bdPsOWvHx/cYu9Y0CoNoZTJrwAbn5DrnZc6XOV+igVbnsgo0IxEomlJuA1vUIYGyq3PZBChwmExCUSmVZgMBDIUCK4UCFIv5vHIhm/XUDeAf/ADbcpd5+aXSWQAAAABJRU5ErkJggg==', + 'options': [ + { + 'name': 'enabled', + 'type': 'enabler', + 'default': False, + }, + { + 'name': 'use', + 'default': '' + }, + { + 'name': 'host', + 'default': '', + 'description': 'The url path of your TorrentPotato provider.', + }, + { + 'name': 'extra_score', + 'advanced': True, + 'label': 'Extra Score', + 'default': '0', + 'description': 'Starting score for each release found via this provider.', + }, + { + 'name': 'name', + 'label': 'Username', + 'default': '', + }, + { + 'name': 'seed_ratio', + 'label': 'Seed ratio', + 'default': '1', + 'description': 'Will not be (re)moved until this seed ratio is met.', + }, + { + 'name': 'seed_time', + 'label': 'Seed time', + 'default': '40', + 'description': 'Will not be (re)moved until this seed time (in hours) is met.', + }, + { + 'name': 'pass_key', + 'default': ',', + 'label': 'Pass Key', + 'description': 'Can be found on your profile page', + 'type': 'combined', + 'combine': ['use', 'host', 'pass_key', 'name', 'seed_ratio', 'seed_time', 'extra_score'], + }, + ], + }, + ], +}] diff --git a/couchpotato/core/media/_base/providers/torrent/torrentshack.py b/couchpotato/core/media/_base/providers/torrent/torrentshack.py new file mode 100644 index 0000000000..683f559a4e --- /dev/null +++ b/couchpotato/core/media/_base/providers/torrent/torrentshack.py @@ -0,0 +1,135 @@ +import traceback + +from bs4 import BeautifulSoup +from couchpotato.core.helpers.variable import tryInt +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.base import TorrentProvider +import six + + +log = CPLog(__name__) + + +class Base(TorrentProvider): + + urls = { + 'test': 'https://torrentshack.me/', + 'login': 'https://torrentshack.me/login.php', + 'login_check': 'https://torrentshack.me/inbox.php', + 'detail': 'https://torrentshack.me/torrent/%s', + 'search': 'https://torrentshack.me/torrents.php?action=advanced&searchstr=%s&scene=%s&filter_cat[%d]=1', + 'download': 'https://torrentshack.me/%s', + } + + http_time_between_calls = 1 # Seconds + login_fail_msg = 'You entered an invalid' + + def _search(self, media, quality, results): + + url = self.urls['search'] % self.buildUrl(media, quality) + data = self.getHTMLData(url) + + if data: + html = BeautifulSoup(data) + + try: + result_table = html.find('table', attrs = {'id': 'torrent_table'}) + if not result_table: + return + + entries = result_table.find_all('tr', attrs = {'class': 'torrent'}) + + for result in entries: + + link = result.find('span', attrs = {'class': 'torrent_name_link'}).parent + url = result.find('td', attrs = {'class': 'torrent_td'}).find('a') + size = result.find('td', attrs = {'class': 'size'}).contents[0].strip('\n ') + tds = result.find_all('td') + + results.append({ + 'id': link['href'].replace('torrents.php?torrentid=', ''), + 'name': six.text_type(link.span.string).translate({ord(six.u('\xad')): None}), + 'url': self.urls['download'] % url['href'], + 'detail_url': self.urls['download'] % link['href'], + 'size': self.parseSize(size), + 'seeders': tryInt(tds[len(tds)-2].string), + 'leechers': tryInt(tds[len(tds)-1].string), + }) + + except: + log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc())) + + def getLoginParams(self): + return { + 'username': self.conf('username'), + 'password': self.conf('password'), + 'keeplogged': '1', + 'login': 'Login', + } + + def loginSuccess(self, output): + return 'logout.php' in output.lower() + + loginCheckSuccess = loginSuccess + + def getSceneOnly(self): + return '1' if self.conf('scene_only') else '' + + +config = [{ + 'name': 'torrentshack', + 'groups': [ + { + 'tab': 'searcher', + 'list': 'torrent_providers', + 'name': 'TorrentShack', + 'description': 'TorrentShack', + 'wizard': True, + 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAABmElEQVQoFQXBzY2cVRiE0afqvd84CQiAnxWWtyxsS6ThINBYg2Dc7mZBMEjE4mzs6e9WcY5+ePNuVFJJodQAoLo+SaWCy9rcV8cmjah3CI6iYu7oRU30kE5xxELRfamklY3k1NL19sSm7vPzP/ZdNZzKVDaY2sPZJBh9fv5ITrmG2+Vp4e1sPchVqTCQZJnVXi+/L4uuAJGly1+Pw8CprLbi8Om7tbT19/XRqJUk11JP9uHj9ulxhXbvJbI9qJvr5YkGXFG2IBT8tXczt+sfzDZCp3765f3t9tHEHGEDACma77+8o4oATKk+/PfW9YmHruRFjWoVSFsVsGu1YSKq6Oc37+n98unPZSRlY7vsKDqN+92X3yR9+PdXee3iJNKMStqdcZqoTJbUSi5JOkpfRlhSI0mSpEmCFKoU7FqSNOLAk54uGwCStMUCgLrVic62g7oDoFmmdI+P3S0pDe1xvDqb6XrZqbtzShWNoh9fv/XQHaDdM9OqrZi2M7M3UrB2vlkPS1IbdEBk7UiSoD6VlZ6aKWer4aH4f/AvKoHUTjuyAAAAAElFTkSuQmCC', + 'options': [ + { + 'name': 'enabled', + 'type': 'enabler', + 'default': False, + }, + { + 'name': 'username', + 'default': '', + }, + { + 'name': 'password', + 'default': '', + 'type': 'password', + }, + { + 'name': 'seed_ratio', + 'label': 'Seed ratio', + 'type': 'float', + 'default': 1, + 'description': 'Will not be (re)moved until this seed ratio is met.', + }, + { + 'name': 'seed_time', + 'label': 'Seed time', + 'type': 'int', + 'default': 40, + 'description': 'Will not be (re)moved until this seed time (in hours) is met.', + }, + { + 'name': 'scene_only', + 'type': 'bool', + 'default': False, + 'description': 'Only allow scene releases.' + }, + { + 'name': 'extra_score', + 'advanced': True, + 'label': 'Extra Score', + 'type': 'int', + 'default': 0, + 'description': 'Starting score for each release found via this provider.', + } + ], + }, + ], +}] diff --git a/couchpotato/core/media/_base/providers/torrent/torrentz.py b/couchpotato/core/media/_base/providers/torrent/torrentz.py new file mode 100644 index 0000000000..96e8025579 --- /dev/null +++ b/couchpotato/core/media/_base/providers/torrent/torrentz.py @@ -0,0 +1,123 @@ +import re +import traceback + +from couchpotato.core.helpers.encoding import tryUrlencode +from couchpotato.core.helpers.rss import RSS +from couchpotato.core.helpers.variable import tryInt, splitString +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.base import TorrentMagnetProvider +import six + + +log = CPLog(__name__) + + +class Base(TorrentMagnetProvider, RSS): + + urls = { + 'detail': 'https://torrentz2.eu/%s', + 'search': 'https://torrentz2.eu/feed?f=%s' + } + + http_time_between_calls = 0 + + def _searchOnTitle(self, title, media, quality, results): + + search_url = self.urls['search'] + + # Create search parameters + search_params = self.buildUrl(title, media, quality) + + min_seeds = tryInt(self.conf('minimal_seeds')) + if min_seeds: + search_params += ' seed > %s' % (min_seeds - 1) + + rss_data = self.getRSSData(search_url % search_params) + + if rss_data: + try: + + for result in rss_data: + + name = self.getTextElement(result, 'title') + detail_url = self.getTextElement(result, 'link') + description = self.getTextElement(result, 'description') + + magnet = splitString(detail_url, '/')[-1] + magnet_url = 'magnet:?xt=urn:btih:%s&dn=%s&tr=%s' % (magnet.upper(), tryUrlencode(name), tryUrlencode('udp://tracker.openbittorrent.com/announce')) + + reg = re.search('Size: (?P\d+) (?P[KMG]B) Seeds: (?P[\d,]+) Peers: (?P[\d,]+)', six.text_type(description)) + size = reg.group('size') + unit = reg.group('unit') + seeds = reg.group('seeds').replace(',', '') + peers = reg.group('peers').replace(',', '') + + multiplier = 1 + if unit == 'GB': + multiplier = 1000 + elif unit == 'KB': + multiplier = 0 + + results.append({ + 'id': magnet, + 'name': six.text_type(name), + 'url': magnet_url, + 'detail_url': detail_url, + 'size': tryInt(size)*multiplier, + 'seeders': tryInt(seeds), + 'leechers': tryInt(peers), + }) + + except: + log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc())) + + +config = [{ + 'name': 'torrentz', + 'groups': [ + { + 'tab': 'searcher', + 'list': 'torrent_providers', + 'name': 'Torrentz', + 'description': 'Torrentz.eu was a free, fast and powerful meta-search engine combining results from dozens of search engines, Torrentz2.eu is trying to replace it. Torrentz2', + 'wizard': True, + 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAQklEQVQ4y2NgAALjtJn/ycEMlGiGG0IVAxiwAKzOxaKGARcgxgC8YNSAwWoAzuRMjgsIugqfAUR5CZcBRIcHsWEAADSA96Ig020yAAAAAElFTkSuQmCC', + 'options': [ + { + 'name': 'enabled', + 'type': 'enabler', + 'default': True + }, + { + 'name': 'minimal_seeds', + 'type': 'int', + 'default': 1, + 'advanced': True, + 'description': 'Only return releases with minimal X seeds', + }, + { + 'name': 'seed_ratio', + 'label': 'Seed ratio', + 'type': 'float', + 'default': 1, + 'description': 'Will not be (re)moved until this seed ratio is met.', + }, + { + 'name': 'seed_time', + 'label': 'Seed time', + 'type': 'int', + 'default': 40, + 'description': 'Will not be (re)moved until this seed time (in hours) is met.', + }, + { + 'name': 'extra_score', + 'advanced': True, + 'label': 'Extra Score', + 'type': 'int', + 'default': 0, + 'description': 'Starting score for each release found via this provider.', + } + ], + } + ] +}] diff --git a/couchpotato/core/media/_base/providers/torrent/xthor.py b/couchpotato/core/media/_base/providers/torrent/xthor.py new file mode 100644 index 0000000000..f974ffc7a2 --- /dev/null +++ b/couchpotato/core/media/_base/providers/torrent/xthor.py @@ -0,0 +1,81 @@ +from couchpotato.core.helpers.variable import tryInt +from couchpotato.core.logger import CPLog +from couchpotato.core.helpers.encoding import simplifyString, tryUrlencode +from couchpotato.core.media._base.providers.torrent.base import TorrentProvider +from couchpotato.core.helpers import namer_check +import json +import re +import unicodedata + +log = CPLog(__name__) + + +class Base(TorrentProvider): + + urls = { + 'search': 'https://api.xthor.tk/?passkey=%(passkey)s&search=&category=&freeleech=&tmdbid=%(tmdbid)s&size=', + 'detail': 'https://xthor.tk/details.php?id=%s' + } + + def _search(self, movie, quality, results): + url = self.urls['search'] % {'passkey': self.conf('passkey'), 'tmdbid': movie['info']['tmdb_id'] } + data = self.getJsonData(url) + + if data[u'error'][u'code'] == 0 and 'torrents' in data: + for currentresult in data['torrents']: + new = {} + + new['id'] = currentresult['id'] + new['name'] = currentresult['name'] + new['url'] = currentresult['download_link'] + new['detail_url'] = self.urls['detail'] % currentresult['id'] + new['size'] = tryInt(currentresult['size']) / 1024 / 1024 + new['seeders'] = tryInt(currentresult['seeders']) + new['leechers'] = tryInt(currentresult['leechers']) + + results.append(new) + return + + +config = [{ + 'name': 'xthor', + 'groups': [{ + 'tab': 'searcher', + 'list': 'torrent_providers', + 'name': 'xthor', + 'description': 'See xthor', + 'icon' : 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAAACXBIWXMAAAsTAAALEwEAmpwYAAACRUlEQVR4nEWSS2/TQBSF752nHT+TJmmTtKVICIkFP4H/L7FBLBArpD5U+nIetWPH45m5wyIFzuboSN/ZfQh/wxgbJXk2mWfjWVpMIwEY3Prx9uH+tq7rEMIRw2NJKWfzVVIsWJyhTlk0Et5gGBKFksOvn9/v766PHw4AWuvlchlnSyw+AlNhfEXJGSBjQg6EZvc0mc6dte2+BgDOGFutzrWOgRcQFbD8jO++iLjEqKD2mZAHJoau0aPk0NR2MLwcl8X4EgBB51Cc8lGm2xvZPYj2jgVHfe0GQ0OHiDI9ada/2XS2xGQJagL5CoNVZlMuztI8jrDLLz8oKUHGgQKZLkqmaZYznZQkBWRTSCZMJ1GWyrQYXXzSk5XKptFswRiDeA5uYH0vVMq4kMA15mdifCmoD2ZnPPYWQnlhQHngqFIYtoAY3ADAGTJkSqBKpHnW6QQoeFU6YOHkyucr1+2DiECMACQAC+7AXLcbaSldTfU9E4pHZbj5SsTtvnM331zbBO9BJMBEoM57wzHQyeki1sp5G0wt8gXrqtBUrroeHn7YwZInQA3tsx36qrrnxpgyicbTuVAjaiu/uwUiiKeBSdtunWnB9PB6E1xfVXeHw4ETUd/tZ+OiHE9QJdS+2G7ruq3vm9BVfmihfQLf1fV6s1m/qTEMw+u2KrOoPHvPi/PgjTetbZ7soQ6HV3L9ZlNtNmsiejsAQN/3z48Pbl9FodMCOBKQPexf9/Wuql6apjnS/219G4hKKSEEIiPy1lrn3D+xj/kDN/1GOELQrVcAAAAASUVORK5CYII=', + 'wizard': True, + 'options': [{ + 'name': 'enabled', + 'type': 'enabler', + 'default': False, + }, + { + 'name': 'passkey', + 'default': '', + }, + { + 'name': 'seed_ratio', + 'label': 'Seed ratio', + 'type': 'float', + 'default': 1, + 'description': 'Will not be (re)moved until this seed ratio is met.', + }, + { + 'name': 'seed_time', + 'label': 'Seed time', + 'type': 'int', + 'default': 40, + 'description': 'Will not be (re)moved until this seed time (in hours) is met.', + }, + { + 'name': 'extra_score', + 'advanced': True, + 'label': 'Extra Score', + 'type': 'int', + 'default': 0, + 'description': 'Starting score for each release found via this provider.', + }], + },], +}] diff --git a/couchpotato/core/media/_base/providers/torrent/yts.py b/couchpotato/core/media/_base/providers/torrent/yts.py new file mode 100644 index 0000000000..674adc3cdf --- /dev/null +++ b/couchpotato/core/media/_base/providers/torrent/yts.py @@ -0,0 +1,130 @@ +from datetime import datetime +from couchpotato.core.helpers.variable import tryInt, getIdentifier +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.base import TorrentMagnetProvider +import random + +log = CPLog(__name__) + + +class Base(TorrentMagnetProvider): + # Only qualities allowed: 720p/1080p/3D - the rest will fail. + # All YTS.ag torrents are verified + urls = { + 'detail': 'https://yts.am/api#list_movies', + 'search': 'https://yts.am/api/v2/list_movies.json?query_term=%s&limit=%s&page=%s' + } + + def _search(self, movie, quality, results): + limit = 10 + page = 1 + data = self.getJsonData(self.urls['search'] % (getIdentifier(movie), limit, page)) + + if data: + movie_count = tryInt(data['data']['movie_count']) + + if movie_count == 0: + log.debug('%s - found no results', (self.getName())) + else: + + movie_results = data['data']['movies'] + for i in range(0,len(movie_results)): + result = data['data']['movies'][i] + name = result['title'] + year = result['year'] + detail_url = result['url'] + + for torrent in result['torrents']: + t_quality = torrent['quality'] + + if t_quality in quality['label']: + hash = torrent['hash'] + size = tryInt(torrent['size_bytes'] / 1048576) + seeders = tryInt(torrent['seeds']) + leechers = tryInt(torrent['peers']) + pubdate = torrent['date_uploaded'] # format: 2017-02-17 18:40:03 + pubdate = datetime.strptime(pubdate, '%Y-%m-%d %H:%M:%S') + age = (datetime.now() - pubdate).days + + results.append({ + 'id': random.randint(100, 9999), + 'name': '%s (%s) %s %s %s' % (name, year, 'YTS', t_quality, 'BR-Rip'), + 'url': self.make_magnet(hash, name), + 'size': size, + 'seeders': seeders, + 'leechers': leechers, + 'age': age, + 'detail_url': detail_url, + 'score': 1 + }) + + return + + def make_magnet(self, hash, name): + url_encoded_trackers = 'udp%3A%2F%2Fopen.demonii.com%3A1337%2Fannounce&tr=%0Audp%3A%2F%2Ftracker.openbittorr' \ + 'ent.com%3A80&tr=%0Audp%3A%2F%2Ftracker.coppersurfer.tk%3A6969&tr=%0Audp%3A%2F%2Fglot' \ + 'orrents.pw%3A6969%2Fannounce&tr=%0Audp%3A%2F%2Ftracker.opentrackr.org%3A1337%2Fannou' \ + 'nce&tr=%0Audp%3A%2F%2Ftorrent.gresille.org%3A80%2Fannounce&tr=%0Audp%3A%2F%2Fp4p.are' \ + 'nabg.com%3A1337&tr=%0Audp%3A%2F%2Ftracker.leechers-paradise.org%3A6969]' + + return 'magnet:?xt=urn:btih:%s&dn=%s&tr=%s' % (hash, name.replace(' ', '+'), url_encoded_trackers) + + +config = [{ + 'name': 'yts', + 'groups': [ + { + 'tab': 'searcher', + 'list': 'torrent_providers', + 'name': 'YTS', + 'description': 'YTS', + 'wizard': True, + 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAACL0lEQVR4AS1SPW/UQBAd23fxne/Ld2dvzvHuzPocEBAKokCBqG' + 'iQ6IgACYmvUKRBFEQgKKGg4BAlUoggggYUEQpSHOI7CIEoQs/fYcbLaU/efTvvvZlnA1qydoxU5kcxX0CkgmQZtPy0hCUjvK+W' + 'gEByOZ5dns1O5bzna8fRVkgsxH8B0YouIvBhdD5T11NiVOoKrsttyUcpRW0InUrFnwe9HzuP2uaQZYhF2LQ76TTXw2RVMTK8mY' + 'Ybjfh+zNquMVCrqn93aArLSixPxnafdGDLaz1tjY5rmNa8z5BczEQOxQfCl1GyoqoWxYRN1bkh7ELw3q/vhP6HIL4TG9Kumpjg' + 'vwuyM7OsjSj98E/vszMfZ7xvPtMaWxGO5crwIumKCR5HxDtJ0AWKGG204RfUd/3smJYqwem/Q7BTS1ZGfM4LNpVwuKAz6cMeRO' + 'st0S2EwNE7GjTehO2H3dxqIpdkydat15G3F8SXBi4GlpBNlSz012L/k2+W0CLLk/jbcf13rf41yJeMQ8QWUZiHCfCA9ad+81nE' + 'KPtoS9mJOf9v0NmMJHgUT6xayheK9EIK7JJeU/AF4scDF7Y5SPlJrRcxJ+um4ibNEdObxLiIwJim+eT2AL5D9CIcnZ5zvSJi9e' + 'IlNHVVtZ831dk5svPgvjPWTq+ktWkd/kD0qtm71x+sDQe3kt6DXnM7Ct+GajmTxKlkAokWljyAKSm5oWa2w+BH4P2UuVub7eTy' + 'iGOQYapY/wEztHduSDYz5gAAAABJRU5ErkJggg==', + + 'options': [ + { + 'name': 'enabled', + 'type': 'enabler', + 'default': False + }, + { + 'name': 'seed_ratio', + 'label': 'Seed ratio', + 'type': 'float', + 'default': 1, + 'description': 'Will not be (re)moved until this seed ratio is met.', + }, + { + 'name': 'seed_time', + 'label': 'Seed time', + 'type': 'int', + 'default': 40, + 'description': 'Will not be (re)moved until this seed time (in hours) is met.', + }, + { + 'name': 'info', + 'label': 'Info', + 'type':'bool', + 'default':'False', + 'description': 'YTS will only work if you set the minimum size for 720p to 500 and 1080p to 800', + }, + { + 'name': 'extra_score', + 'advanced': True, + 'label': 'Extra Score', + 'type': 'int', + 'default': 0, + 'description': 'Starting score for each release found via this provider.', + } + ], + } + ] +}] diff --git a/libs/jinja2/testsuite/res/__init__.py b/couchpotato/core/media/_base/providers/userscript/__init__.py similarity index 100% rename from libs/jinja2/testsuite/res/__init__.py rename to couchpotato/core/media/_base/providers/userscript/__init__.py diff --git a/couchpotato/core/media/_base/providers/userscript/base.py b/couchpotato/core/media/_base/providers/userscript/base.py new file mode 100644 index 0000000000..3692294650 --- /dev/null +++ b/couchpotato/core/media/_base/providers/userscript/base.py @@ -0,0 +1,71 @@ +from urlparse import urlparse + +from couchpotato.core.event import addEvent, fireEvent +from couchpotato.core.helpers.encoding import simplifyString +from couchpotato.core.helpers.variable import getImdb, md5 +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.base import Provider + + +log = CPLog(__name__) + + +class UserscriptBase(Provider): + + type = 'userscript' + + version = 1 + http_time_between_calls = 0 + + includes = [] + excludes = [] + + def __init__(self): + addEvent('userscript.get_includes', self.getInclude) + addEvent('userscript.get_excludes', self.getExclude) + addEvent('userscript.get_provider_version', self.getVersion) + addEvent('userscript.get_movie_via_url', self.belongsTo) + + def search(self, name, year = None): + result = fireEvent('movie.search', q = '%s %s' % (name, year), limit = 1, merge = True) + + if len(result) > 0: + movie = fireEvent('movie.info', identifier = result[0].get('imdb'), extended = False, merge = True) + return movie + else: + return None + + def belongsTo(self, url): + + host = urlparse(url).hostname + host_split = host.split('.') + if len(host_split) > 2: + host = host[len(host_split[0]):] + + for include in self.includes: + if host in include: + return self.getMovie(url) + + return + + def getUrl(self, url): + return self.getCache(md5(simplifyString(url)), url = url) + + def getMovie(self, url): + try: + data = self.getUrl(url) + except: + data = '' + return self.getInfo(getImdb(data)) + + def getInfo(self, identifier): + return fireEvent('movie.info', identifier = identifier, extended = False, merge = True) + + def getInclude(self): + return self.includes + + def getExclude(self): + return self.excludes + + def getVersion(self): + return self.version diff --git a/couchpotato/core/media/_base/search/__init__.py b/couchpotato/core/media/_base/search/__init__.py new file mode 100644 index 0000000000..c23fdb7290 --- /dev/null +++ b/couchpotato/core/media/_base/search/__init__.py @@ -0,0 +1,5 @@ +from .main import Search + + +def autoload(): + return Search() diff --git a/couchpotato/core/media/_base/search/main.py b/couchpotato/core/media/_base/search/main.py new file mode 100644 index 0000000000..1d0603cb6a --- /dev/null +++ b/couchpotato/core/media/_base/search/main.py @@ -0,0 +1,68 @@ +from couchpotato.api import addApiView +from couchpotato.core.event import fireEvent, addEvent +from couchpotato.core.helpers.variable import mergeDicts, getImdb +from couchpotato.core.logger import CPLog +from couchpotato.core.plugins.base import Plugin + +log = CPLog(__name__) + + +class Search(Plugin): + + def __init__(self): + + addApiView('search', self.search, docs = { + 'desc': 'Search the info in providers for a movie', + 'params': { + 'q': {'desc': 'The (partial) movie name you want to search for'}, + 'type': {'desc': 'Search for a specific media type. Leave empty to search all.'}, + }, + 'return': {'type': 'object', 'example': """{ + 'success': True, + 'movies': array, + 'show': array, + etc +}"""} + }) + + addEvent('app.load', self.addSingleSearches) + + def search(self, q = '', types = None, **kwargs): + + # Make sure types is the correct instance + if isinstance(types, (str, unicode)): + types = [types] + elif isinstance(types, (list, tuple, set)): + types = list(types) + + imdb_identifier = getImdb(q) + + if not types: + if imdb_identifier: + result = fireEvent('movie.info', identifier = imdb_identifier, merge = True) + result = {result['type']: [result]} + else: + result = fireEvent('info.search', q = q, merge = True) + else: + result = {} + for media_type in types: + if imdb_identifier: + result[media_type] = fireEvent('%s.info' % media_type, identifier = imdb_identifier) + else: + result[media_type] = fireEvent('%s.search' % media_type, q = q) + + return mergeDicts({ + 'success': True, + }, result) + + def createSingleSearch(self, media_type): + + def singleSearch(q, **kwargs): + return self.search(q, type = media_type, **kwargs) + + return singleSearch + + def addSingleSearches(self): + + for media_type in fireEvent('media.types', merge = True): + addApiView('%s.search' % media_type, self.createSingleSearch(media_type)) diff --git a/couchpotato/core/media/_base/search/static/search.js b/couchpotato/core/media/_base/search/static/search.js new file mode 100644 index 0000000000..e0aa23ddc5 --- /dev/null +++ b/couchpotato/core/media/_base/search/static/search.js @@ -0,0 +1,211 @@ +var BlockSearch = new Class({ + + Extends: BlockBase, + + options: { + 'animate': true + }, + + cache: {}, + + create: function(){ + var self = this; + + var focus_timer = 0; + self.el = new Element('div.search_form').adopt( + new Element('a.icon-search', { + 'events': { + 'click': self.clear.bind(self) + } + }), + self.wrapper = new Element('div.wrapper').adopt( + self.result_container = new Element('div.results_container', { + 'events': { + 'mousewheel': function(e){ + (e).stopPropagation(); + } + } + }).grab( + self.results = new Element('div.results') + ), + new Element('div.input').grab( + self.input = new Element('input', { + 'placeholder': 'Search & add a new media', + 'events': { + 'input': self.keyup.bind(self), + 'paste': self.keyup.bind(self), + 'change': self.keyup.bind(self), + 'keyup': self.keyup.bind(self), + 'focus': function(){ + if(focus_timer) clearRequestTimeout(focus_timer); + if(this.get('value')) + self.hideResults(false); + }, + 'blur': function(){ + focus_timer = requestTimeout(function(){ + self.el.removeClass('focused'); + self.last_q = null; + }, 100); + } + } + }) + ) + ) + ); + + self.mask = new Element('div.mask').inject(self.result_container); + + }, + + clear: function(e){ + var self = this; + (e).preventDefault(); + + if(self.last_q === ''){ + self.input.blur(); + self.last_q = null; + } + else { + + self.last_q = ''; + self.input.set('value', ''); + self.el.addClass('focused'); + self.input.focus(); + + self.media = {}; + self.results.empty(); + self.el.removeClass('filled'); + + // Animate in + if(self.options.animate){ + + dynamics.css(self.wrapper, { + opacity: 0, + scale: 0.1 + }); + + dynamics.animate(self.wrapper, { + opacity: 1, + scale: 1 + }, { + type: dynamics.spring, + frequency: 200, + friction: 270, + duration: 800 + }); + + } + + } + }, + + hideResults: function(bool){ + var self = this; + + if(self.hidden == bool) return; + + self.el[bool ? 'removeClass' : 'addClass']('shown'); + + if(bool){ + History.removeEvent('change', self.hideResults.bind(self, !bool)); + self.el.removeEvent('outerClick', self.hideResults.bind(self, !bool)); + } + else { + History.addEvent('change', self.hideResults.bind(self, !bool)); + self.el.addEvent('outerClick', self.hideResults.bind(self, !bool)); + } + + self.hidden = bool; + }, + + keyup: function(){ + var self = this; + + self.el[self.q() ? 'addClass' : 'removeClass']('filled'); + + if(self.q() != self.last_q){ + if(self.api_request && self.api_request.isRunning()) + self.api_request.cancel(); + + if(self.autocomplete_timer) clearRequestTimeout(self.autocomplete_timer); + self.autocomplete_timer = requestTimeout(self.autocomplete.bind(self), 300); + } + + }, + + autocomplete: function(){ + var self = this; + + if(!self.q()){ + self.hideResults(true); + return; + } + + self.list(); + }, + + list: function(){ + var self = this, + q = self.q(), + cache = self.cache[q]; + + self.hideResults(false); + + if(!cache){ + requestTimeout(function(){ + self.mask.addClass('show'); + }, 10); + + if(!self.spinner) + self.spinner = createSpinner(self.mask); + + self.api_request = Api.request('search', { + 'data': { + 'q': q + }, + 'onComplete': self.fill.bind(self, q) + }); + } + else + self.fill(q, cache); + + self.last_q = q; + + }, + + fill: function(q, json){ + var self = this; + + self.cache[q] = json; + + self.media = {}; + self.results.empty(); + + Object.each(json, function(media){ + if(typeOf(media) == 'array'){ + Object.each(media, function(me){ + + var m = new window['BlockSearch' + me.type.capitalize() + 'Item'](me); + $(m).inject(self.results); + self.media[m.imdb || 'r-'+Math.floor(Math.random()*10000)] = m; + + if(q == m.imdb) + m.showOptions(); + + }); + } + }); + + self.mask.removeClass('show'); + + }, + + loading: function(bool){ + this.el[bool ? 'addClass' : 'removeClass']('loading'); + }, + + q: function(){ + return this.input.get('value').trim(); + } + +}); diff --git a/couchpotato/core/media/_base/search/static/search.scss b/couchpotato/core/media/_base/search/static/search.scss new file mode 100644 index 0000000000..b9876b16c9 --- /dev/null +++ b/couchpotato/core/media/_base/search/static/search.scss @@ -0,0 +1,535 @@ +@import "_mixins"; + +.search_form { + display: inline-block; + z-index: 11; + width: 44px; + position: relative; + + * { + transform: rotateZ(360deg); // Mobile IE redraw fix + } + + .icon-search { + position: absolute; + z-index: 2; + top: 50%; + left: 0; + height: 100%; + text-align: center; + color: #FFF; + font-size: 20px; + transform: translateY(-50%); + + &:hover { + @include theme(background, menu_off); + } + } + + .wrapper { + position: absolute; + left: 44px; + bottom: 0; + @include theme(background, primary); + border-radius: $border_radius 0 0 $border_radius; + display: none; + box-shadow: 0 0 15px 2px rgba(0,0,0,.15); + + @include theme-dark { + box-shadow: 0 5px 15px 2px rgba(0,0,0,.4); + } + + &:before { + transform: rotate(45deg); + content: ''; + display: block; + position: absolute; + height: 10px; + width: 10px; + @include theme(background, primary); + left: -6px; + bottom: 16px; + z-index: 1; + } + } + + .input { + @include theme(background, background); + border-radius: $border_radius 0 0 $border_radius; + position: relative; + left: 4px; + height: 44px; + overflow: hidden; + width: 100%; + + input { + position: absolute; + top: 0; + left: 0; + height: 100%; + width: 100%; + z-index: 1; + + &::-ms-clear { + width : 0; + height: 0; + } + + &:focus { + background: rgba(255,255,255, .2); + + @include theme-dark { + background: rgba(0,0,0, .2); + } + + &::-webkit-input-placeholder { + opacity: .7; + } + &::-moz-placeholder { + opacity: .7; + } + &:-ms-input-placeholder { + opacity: .7; + } + } + } + } + + &.filled { + &.focused .icon-search:before, + .page.home & .icon-search:before { + content: '\e80e'; + } + + .input input { + background: rgba(255,255,255,.3); + + @include theme-dark { + background: rgba(0,0,0,.3); + } + } + } + + &.focused, + &.shown, + .page.home & { + border-color: #04bce6; + + .wrapper { + display: block; + width: 380px; + transform-origin: 0 90%; + + @include media-phablet { + width: 260px; + } + } + + .input { + + input { + opacity: 1; + } + } + } + + .results_container { + min-height: 50px; + text-align: left; + position: relative; + left: 4px; + display: none; + @include theme(background, background); + border-radius: $border_radius 0 0 0; + overflow: hidden; + + .results { + max-height: 280px; + overflow-x: hidden; + + .media_result { + overflow: hidden; + height: 50px; + position: relative; + + @include media-phablet { + font-size: 12px; + } + + .options { + position: absolute; + height: 100%; + top: 0; + left: 30px; + right: 0; + display: flex; + align-items: center; + background: get-theme(off); + + @include theme-dark { + background: get-theme-dark(off); + } + + @include media-phablet { + left: 0; + } + + > .in_library_wanted { + margin-top: -7px; + } + + > div { + border: 0; + display: flex; + padding: 10px; + align-items: stretch; + justify-content: space-between; + + @include media-phablet { + padding: 3px; + } + } + + select { + display: block; + height: 100%; + width: 100%; + + @include media-phablet { + min-width: 0; + margin-right: 2px; + } + } + + .title { + margin-right: 5px; + width: 210px; + + @include media-phablet { + width: 140px; + margin-right: 2px; + } + } + + .profile, .category { + margin: 0 5px 0 0; + + @include media-phablet { + margin-right: 2px; + } + } + + .add { + width: 42px; + flex: 1 auto; + + a { + color: #FFF; + } + } + + .button { + display: block; + @include theme(background, primary); + text-align: center; + margin: 0; + } + + .message { + font-size: 20px; + color: #fff; + } + + } + + .thumbnail { + width: 30px; + min-height: 100%; + display: block; + margin: 0; + vertical-align: top; + + @include media-phablet { + display: none; + } + } + + .data { + position: absolute; + height: 100%; + top: 0; + left: 30px; + right: 0; + cursor: pointer; + border-top: 1px solid rgba(255,255,255, 0.08); + transition: all .4s cubic-bezier(0.9,0,0.1,1); + will-change: transform; + transform: translateX(0) rotateZ(360deg); + @include theme(background, background); + + @include theme-dark { + border-color: rgba(255,255,255, 0.08); + } + + @include media-phablet { + left: 0; + } + + &:hover { + transform: translateX(2%) rotateZ(360deg); + } + + &.open { + transform: translateX(100%) rotateZ(360deg); + } + + .info { + position: absolute; + top: 20%; + left: 15px; + right: 7px; + vertical-align: middle; + + h2 { + margin: 0; + font-weight: 300; + font-size: 1.25em; + padding: 0; + position: absolute; + width: 100%; + display: flex; + + .title { + display: inline-block; + margin: 0; + text-overflow: ellipsis; + overflow: hidden; + white-space: nowrap; + flex: 1 auto; + } + + .year { + opacity: .4; + padding: 0 5px; + width: auto; + } + + .in_wanted, + .in_library { + position: absolute; + top: 15px; + left: 0; + font-size: 11px; + @include theme(color, primary); + } + + &.in_library_wanted { + .title { + margin-top: -7px; + } + } + } + } + } + + &:hover .info h2 .year { + display: inline-block; + } + + &:last-child .data { + border-bottom: 0; + } + } + + } + } + + &.focused.filled, + &.shown.filled { + .results_container { + display: block; + } + + .input { + border-radius: 0 0 0 $border_radius; + } + } + + .page.home & { + $input_height: 66px; + $input_height_mobile: 44px; + + display: block; + padding: $padding; + width: 100%; + max-width: 500px; + margin: 0 auto; + height: $input_height + 2*$padding; + position: relative; + margin-top: $padding; + + @include media-phablet { + margin-top: $padding/2; + height: $input_height_mobile + $padding; + } + + .icon-search { + display: block; + @include theme(color, text); + right: $padding; + top: $padding; + width: $input_height; + height: $input_height; + line-height: $input_height; + left: auto; + transform: none; + font-size: 2em; + opacity: .5; + + &:hover { + background: none; + } + + @include media-phablet { + right: $padding/2; + width: $input_height_mobile; + height: $input_height_mobile; + line-height: $input_height_mobile; + right: $padding/2; + top: $padding/2; + font-size: 1.5em; + } + } + + .wrapper { + border-radius: 0; + box-shadow: none; + bottom: auto; + top: $padding; + left: $padding; + right: $padding; + position: absolute; + width: auto; + + @include media-phablet { + right: $padding/2; + top: $padding/2; + left: $padding/2; + } + + &:before { + display: none; + } + + .input { + border-radius: 0; + left: 0; + position: absolute; + top: 0; + height: $input_height; + + @include media-phablet { + height: $input_height_mobile; + } + + input { + box-shadow: 0; + font-size: 2em; + font-weight: 400; + padding-right: $input_height; + @include theme(background, background); + + @include media-phablet { + padding-right: $input_height_mobile; + font-size: 1em; + } + } + } + + .results_container { + min-height: $input_height; + position: absolute; + top: $input_height; + left: 0; + right: 0; + border: 1px solid get-theme(off); + border-top: 0; + + @include theme-dark { + border-color: get-theme-dark(off); + } + + @include media-phablet { + top: $input_height_mobile; + min-height: $input_height_mobile; + } + + + @include media-phablet-and-up { + .results { + max-height: 400px; + + .media_result { + height: $input_height; + + + @include media-phablet { + height: $input_height_mobile; + } + + .thumbnail { + width: 40px; + } + + .options { + left: 40px; + + .title { + margin-right: 5px; + width: 320px; + + @include media-phablet { + width: 140px; + margin-right: 2px; + } + } + } + + .data { + left: 40px; + } + } + } + } + + + @include media-phablet { + .results { + .media_result { + height: $input_height_mobile; + + .options { + + .title { + + width: 140px; + margin-right: 2px; + } + + } + + } + } + } + } + + } + + + } + +} + +.big_search { + @include theme(background, off); +} diff --git a/couchpotato/core/media/_base/searcher/__init__.py b/couchpotato/core/media/_base/searcher/__init__.py new file mode 100644 index 0000000000..2bf06ebc18 --- /dev/null +++ b/couchpotato/core/media/_base/searcher/__init__.py @@ -0,0 +1,103 @@ +from .main import Searcher + + +def autoload(): + return Searcher() + +config = [{ + 'name': 'searcher', + 'order': 20, + 'groups': [ + { + 'tab': 'searcher', + 'name': 'searcher', + 'label': 'Basics', + 'description': 'General search options', + 'options': [ + { + 'name': 'preferred_method', + 'label': 'First search', + 'description': 'Which of the methods do you prefer', + 'default': 'both', + 'type': 'dropdown', + 'values': [('usenet & torrents', 'both'), ('usenet', 'nzb'), ('torrents', 'torrent')], + }, + ], + }, { + 'tab': 'searcher', + 'subtab': 'category', + 'subtab_label': 'Categories', + 'name': 'filter', + 'label': 'Global filters', + 'description': 'Prefer, ignore & required words in release names', + 'options': [ + { + 'name': 'preferred_words', + 'label': 'Preferred', + 'default': '', + 'placeholder': 'Example: CtrlHD, Amiable, Wiki', + 'description': 'Words that give the releases a higher score.' + }, + { + 'name': 'required_words', + 'label': 'Required', + 'default': '', + 'placeholder': 'Example: DTS, AC3 & English', + 'description': 'Release should contain at least one set of words. Sets are separated by "," and each word within a set must be separated with "&"' + }, + { + 'name': 'ignored_words', + 'label': 'Ignored', + 'default': 'german, dutch, french, truefrench, danish, swedish, spanish, italian, korean, dubbed, swesub, korsub, dksubs, vain, HC', + 'description': 'Ignores releases that match any of these sets. (Works like explained above)' + }, + { + 'name': 'dubbed_version', + 'label': 'Dubbed Version', + 'type': 'bool', + 'default': False, + 'description': 'Requests that the releases are with a french audio track (dubbed for international movies)' + }, + ], + }, + ], +}, { + 'name': 'nzb', + 'groups': [ + { + 'tab': 'searcher', + 'name': 'searcher', + 'label': 'NZB', + 'wizard': True, + 'options': [ + { + 'name': 'retention', + 'label': 'Usenet Retention', + 'default': 1500, + 'type': 'int', + 'unit': 'days' + }, + ], + }, + ], +}, { + 'name': 'torrent', + 'groups': [ + { + 'tab': 'searcher', + 'name': 'searcher', + 'wizard': True, + 'options': [ + { + 'name': 'minimum_seeders', + 'advanced': True, + 'label': 'Minimum seeders', + 'description': 'Ignore torrents with seeders below this number', + 'default': 1, + 'type': 'int', + 'unit': 'seeders' + }, + ], + }, + ], +}] diff --git a/couchpotato/core/media/_base/searcher/base.py b/couchpotato/core/media/_base/searcher/base.py new file mode 100644 index 0000000000..5322d8505b --- /dev/null +++ b/couchpotato/core/media/_base/searcher/base.py @@ -0,0 +1,43 @@ +from couchpotato.core.event import addEvent, fireEvent +from couchpotato.core.logger import CPLog +from couchpotato.core.plugins.base import Plugin + +log = CPLog(__name__) + + +class SearcherBase(Plugin): + + in_progress = False + + def __init__(self): + super(SearcherBase, self).__init__() + + addEvent('searcher.progress', self.getProgress) + addEvent('%s.searcher.progress' % self.getType(), self.getProgress) + + self.initCron() + + def initCron(self): + """ Set the searcher cronjob + Make sure to reset cronjob after setting has changed + """ + + _type = self.getType() + + def setCrons(): + fireEvent('schedule.cron', '%s.searcher.all' % _type, self.searchAll, + day = self.conf('cron_day'), hour = self.conf('cron_hour'), minute = self.conf('cron_minute')) + + addEvent('app.load', setCrons) + addEvent('setting.save.%s_searcher.cron_day.after' % _type, setCrons) + addEvent('setting.save.%s_searcher.cron_hour.after' % _type, setCrons) + addEvent('setting.save.%s_searcher.cron_minute.after' % _type, setCrons) + + def getProgress(self, **kwargs): + """ Return progress of current searcher""" + + progress = { + self.getType(): self.in_progress + } + + return progress diff --git a/couchpotato/core/media/_base/searcher/main.py b/couchpotato/core/media/_base/searcher/main.py new file mode 100644 index 0000000000..16c5cd27f9 --- /dev/null +++ b/couchpotato/core/media/_base/searcher/main.py @@ -0,0 +1,266 @@ +import datetime +import re + +from couchpotato.api import addApiView +from couchpotato.core.event import addEvent, fireEvent +from couchpotato.core.helpers.encoding import simplifyString +from couchpotato.core.helpers.variable import splitString, removeEmpty, removeDuplicate, getAllLanguages +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.searcher.base import SearcherBase + + +log = CPLog(__name__) + + +class Searcher(SearcherBase): + + # noinspection PyMissingConstructor + def __init__(self): + addEvent('searcher.protocols', self.getSearchProtocols) + addEvent('searcher.contains_other_quality', self.containsOtherQuality) + addEvent('searcher.correct_3d', self.correct3D) + addEvent('searcher.correct_year', self.correctYear) + addEvent('searcher.correct_name', self.correctName) + addEvent('searcher.correct_words', self.correctWords) + addEvent('searcher.correct_language', self.correctLanguage) + addEvent('searcher.search', self.search) + + addApiView('searcher.full_search', self.searchAllView, docs = { + 'desc': 'Starts a full search for all media', + }) + + addApiView('searcher.progress', self.getProgressForAll, docs = { + 'desc': 'Get the progress of all media searches', + 'return': {'type': 'object', 'example': """{ + 'movie': False || object, total & to_go, + 'show': False || object, total & to_go, +}"""}, + }) + + def searchAllView(self): + + results = {} + for _type in fireEvent('media.types'): + results[_type] = fireEvent('%s.searcher.all_view' % _type) + + return results + + def getProgressForAll(self): + progress = fireEvent('searcher.progress', merge = True) + return progress + + def search(self, protocols, media, quality): + results = [] + + for search_protocol in protocols: + protocol_results = fireEvent('provider.search.%s.%s' % (search_protocol, media.get('type')), media, quality, merge = True) + if protocol_results: + results += protocol_results + + sorted_results = sorted(results, key = lambda k: k['score'], reverse = True) + + download_preference = self.conf('preferred_method', section = 'searcher') + if download_preference != 'both': + sorted_results = sorted(sorted_results, key = lambda k: k['protocol'][:3], reverse = (download_preference == 'torrent')) + + return sorted_results + + def getSearchProtocols(self): + + download_protocols = fireEvent('download.enabled_protocols', merge = True) + provider_protocols = fireEvent('provider.enabled_protocols', merge = True) + + if download_protocols and len(list(set(provider_protocols) & set(download_protocols))) == 0: + log.error('There aren\'t any providers enabled for your downloader (%s). Check your settings.', ','.join(download_protocols)) + return [] + + for useless_provider in list(set(provider_protocols) - set(download_protocols)): + log.debug('Provider for "%s" enabled, but no downloader.', useless_provider) + + search_protocols = download_protocols + + if len(search_protocols) == 0: + log.error('There aren\'t any downloaders enabled. Please pick one in settings.') + return [] + + return search_protocols + + def containsOtherQuality(self, nzb, movie_year = None, preferred_quality = None): + if not preferred_quality: preferred_quality = {} + + found = {} + + # Try guessing via quality tags + guess = fireEvent('quality.guess', files = [nzb.get('name')], size = nzb.get('size', None), single = True) + if guess: + found[guess['identifier']] = True + + # Hack for older movies that don't contain quality tag + name = nzb['name'] + size = nzb.get('size', 0) + + year_name = fireEvent('scanner.name_year', name, single = True) + if len(found) == 0 and movie_year < datetime.datetime.now().year - 3 and not year_name.get('year', None): + if size > 20000: # Assume bd50 + log.info('Quality was missing in name, assuming it\'s a BR-Disk based on the size: %s', size) + found['bd50'] = True + elif size > 3000: # Assume dvdr + log.info('Quality was missing in name, assuming it\'s a DVD-R based on the size: %s', size) + found['dvdr'] = True + else: # Assume dvdrip + log.info('Quality was missing in name, assuming it\'s a DVD-Rip based on the size: %s', size) + found['dvdrip'] = True + + # Allow other qualities + for allowed in preferred_quality.get('allow'): + if found.get(allowed): + del found[allowed] + + if found.get(preferred_quality['identifier']) and len(found) == 1: + return False + + return found + + def correct3D(self, nzb, preferred_quality = None): + if not preferred_quality: preferred_quality = {} + if not preferred_quality.get('custom'): return + + threed = preferred_quality['custom'].get('3d') + + # Try guessing via quality tags + guess = fireEvent('quality.guess', [nzb.get('name')], single = True) + + if guess: + return threed == guess.get('is_3d') + # If no quality guess, assume not 3d + else: + return threed == False + + def correctYear(self, haystack, year, year_range): + + if not isinstance(haystack, (list, tuple, set)): + haystack = [haystack] + + year_name = {} + for string in haystack: + + year_name = fireEvent('scanner.name_year', string, single = True) + + if year_name and ((year - year_range) <= year_name.get('year') <= (year + year_range)): + log.debug('Movie year matches range: %s looking for %s', (year_name.get('year'), year)) + return True + + log.debug('Movie year doesn\'t matche range: %s looking for %s', (year_name.get('year'), year)) + return False + + def correctName(self, check_name, movie_name): + + check_names = [check_name] + + # Match names between " + try: check_names.append(re.search(r'([\'"])[^\1]*\1', check_name).group(0)) + except: pass + + # Match longest name between [] + try: check_names.append(max(re.findall(r'[^[]*\[([^]]*)\]', check_name), key = len).strip()) + except: pass + + for check_name in removeDuplicate(check_names): + check_movie = fireEvent('scanner.name_year', check_name, single = True) + + try: + check_words = removeEmpty(re.split('\W+', check_movie.get('name', ''))) + movie_words = removeEmpty(re.split('\W+', simplifyString(movie_name))) + + if len(check_words) > 0 and len(movie_words) > 0 and len(list(set(check_words) - set(movie_words))) == 0: + return True + except: + pass + + return False + + def containsWords(self, rel_name, rel_words, conf, media): + + # Make sure it has required words + words = splitString(self.conf('%s_words' % conf, section = 'searcher').lower()) + try: words = removeDuplicate(words + splitString(media['category'][conf].lower())) + except: pass + + req_match = 0 + for req_set in words: + if len(req_set) >= 2 and (req_set[:1] + req_set[-1:]) == '//': + if re.search(req_set[1:-1], rel_name): + log.debug('Regex match: %s', req_set[1:-1]) + req_match += 1 + else: + req = splitString(req_set, '&') + req_match += len(list(set(rel_words) & set(req))) == len(req) + + return words, req_match > 0 + + def correctWords(self, rel_name, media): + media_title = fireEvent('searcher.get_search_title', media, single = True) + media_words = re.split('\W+', simplifyString(media_title)) + + rel_name = simplifyString(rel_name) + rel_words = re.split('\W+', rel_name) + + required_words, contains_required = self.containsWords(rel_name, rel_words, 'required', media) + if len(required_words) > 0 and not contains_required: + log.info2('Wrong: Required word missing: %s', rel_name) + return False + + ignored_words, contains_ignored = self.containsWords(rel_name, rel_words, 'ignored', media) + if len(ignored_words) > 0 and contains_ignored: + log.info2("Wrong: '%s' contains 'ignored words'", rel_name) + return False + + # Ignore porn stuff + pron_tags = ['xxx', 'sex', 'anal', 'tits', 'fuck', 'porn', 'orgy', 'milf', 'boobs', 'erotica', 'erotic', 'cock', 'dick'] + pron_words = list(set(rel_words) & set(pron_tags) - set(media_words)) + if pron_words: + log.info('Wrong: %s, probably pr0n', rel_name) + return False + + return True + + def correctLanguage(self, rel_name, media): + # retrieving the base configuration + dubbedVersion = self.conf('dubbed_version', section = 'searcher') + + # retrieving the category configuration + try: dubbedVersion = media['category']['dubbed_version'] + except: pass + + if 'languages' in media['info']: + releaseMetaDatas = media['info']['languages'] + + rel_name = simplifyString(rel_name) + rel_words = re.split('\W+', rel_name) + upper_rel_words = [x.upper() for x in rel_words] + + languageWordFound = False; + for word in upper_rel_words: + matchingTuples = [item for item in getAllLanguages() if item[1].upper() == word] + if matchingTuples and any(matchingTuples): + languageWordFound = True; + + if dubbedVersion: + if 'FRENCH' in upper_rel_words or 'TRUEFRENCH' in upper_rel_words or 'MULTI' in upper_rel_words: + return True; + + if languageWordFound == False and 'FRENCH' in releaseMetaDatas: + return True; + else: + if any(l for l in upper_rel_words if l.upper() in releaseMetaDatas) or 'MULTI' in upper_rel_words: + return True; + + if languageWordFound == False: + return True; + else: + return True; + + return False + +class SearchSetupError(Exception): + pass diff --git a/couchpotato/core/media/movie/__init__.py b/couchpotato/core/media/movie/__init__.py new file mode 100644 index 0000000000..898529c17d --- /dev/null +++ b/couchpotato/core/media/movie/__init__.py @@ -0,0 +1,6 @@ +from couchpotato.core.media import MediaBase + + +class MovieTypeBase(MediaBase): + + _type = 'movie' diff --git a/couchpotato/core/media/movie/_base/__init__.py b/couchpotato/core/media/movie/_base/__init__.py new file mode 100644 index 0000000000..14720463ef --- /dev/null +++ b/couchpotato/core/media/movie/_base/__init__.py @@ -0,0 +1,5 @@ +from .main import MovieBase + + +def autoload(): + return MovieBase() diff --git a/couchpotato/core/media/movie/_base/main.py b/couchpotato/core/media/movie/_base/main.py new file mode 100755 index 0000000000..6c4298c423 --- /dev/null +++ b/couchpotato/core/media/movie/_base/main.py @@ -0,0 +1,347 @@ +import traceback +import time + +from CodernityDB.database import RecordNotFound +from couchpotato import get_db +from couchpotato.api import addApiView +from couchpotato.core.event import fireEvent, fireEventAsync, addEvent +from couchpotato.core.helpers.encoding import toUnicode +from couchpotato.core.helpers.variable import splitString, getTitle, getImdb, getIdentifier +from couchpotato.core.logger import CPLog +from couchpotato.core.media.movie import MovieTypeBase +import six + + +log = CPLog(__name__) + + +class MovieBase(MovieTypeBase): + + _type = 'movie' + + def __init__(self): + + # Initialize this type + super(MovieBase, self).__init__() + self.initType() + + addApiView('movie.add', self.addView, docs = { + 'desc': 'Add new movie to the wanted list', + 'return': {'type': 'object', 'example': """{ + 'success': True, + 'movie': object +}"""}, + 'params': { + 'identifier': {'desc': 'IMDB id of the movie your want to add.'}, + 'profile_id': {'desc': 'ID of quality profile you want the add the movie in. If empty will use the default profile.'}, + 'force_readd': {'desc': 'Force re-add even if movie already in wanted or manage. Default: True'}, + 'category_id': {'desc': 'ID of category you want the add the movie in. If empty will use no category.'}, + 'title': {'desc': 'Movie title to use for searches. Has to be one of the titles returned by movie.search.'}, + } + }) + addApiView('movie.edit', self.edit, docs = { + 'desc': 'Add new movie to the wanted list', + 'params': { + 'id': {'desc': 'Movie ID(s) you want to edit.', 'type': 'int (comma separated)'}, + 'profile_id': {'desc': 'ID of quality profile you want the edit the movie to.'}, + 'category_id': {'desc': 'ID of category you want the add the movie in. If empty will use no category.'}, + 'default_title': {'desc': 'Movie title to use for searches. Has to be one of the titles returned by movie.search.'}, + } + }) + + addEvent('movie.add', self.add) + addEvent('movie.update', self.update) + addEvent('movie.update_release_dates', self.updateReleaseDate) + + def add(self, params = None, force_readd = True, search_after = True, update_after = True, notify_after = True, status = None): + if not params: params = {} + + # Make sure it's a correct zero filled imdb id + params['identifier'] = getImdb(params.get('identifier', '')) + + if not params.get('identifier'): + msg = 'Can\'t add movie without imdb identifier.' + log.error(msg) + fireEvent('notify.frontend', type = 'movie.is_tvshow', message = msg) + return False + elif not params.get('info'): + try: + is_movie = fireEvent('movie.is_movie', identifier = params.get('identifier'), adding = True, single = True) + if not is_movie: + msg = 'Can\'t add movie, seems to be a TV show.' + log.error(msg) + fireEvent('notify.frontend', type = 'movie.is_tvshow', message = msg) + return False + except: + pass + + info = params.get('info') + if not info or (info and len(info.get('titles', [])) == 0): + info = fireEvent('movie.info', merge = True, extended = False, identifier = params.get('identifier')) + + # Allow force re-add overwrite from param + if 'force_readd' in params: + fra = params.get('force_readd') + force_readd = fra.lower() not in ['0', '-1'] if not isinstance(fra, bool) else fra + + # Set default title + def_title = self.getDefaultTitle(info) + + # Default profile and category + default_profile = {} + if (not params.get('profile_id') and status != 'done') or params.get('ignore_previous', False): + default_profile = fireEvent('profile.default', single = True) + cat_id = params.get('category_id') + + try: + db = get_db() + + media = { + '_t': 'media', + 'type': 'movie', + 'title': def_title, + 'identifiers': { + 'imdb': params.get('identifier') + }, + 'status': status if status else 'active', + 'profile_id': params.get('profile_id') or default_profile.get('_id'), + 'category_id': cat_id if cat_id is not None and len(cat_id) > 0 and cat_id != '-1' else None, + } + + # Update movie info + try: del info['in_wanted'] + except: pass + try: del info['in_library'] + except: pass + media['info'] = info + + new = False + previous_profile = None + try: + m = db.get('media', 'imdb-%s' % params.get('identifier'), with_doc = True)['doc'] + + try: + db.get('id', m.get('profile_id')) + previous_profile = m.get('profile_id') + except RecordNotFound: + pass + except: + log.error('Failed getting previous profile: %s', traceback.format_exc()) + except: + new = True + m = db.insert(media) + + # Update dict to be usable + m.update(media) + + added = True + do_search = False + search_after = search_after and self.conf('search_on_add', section = 'moviesearcher') + onComplete = None + + if new: + if search_after: + onComplete = self.createOnComplete(m['_id']) + search_after = False + elif force_readd: + + # Clean snatched history + for release in fireEvent('release.for_media', m['_id'], single = True): + if release.get('status') in ['downloaded', 'snatched', 'seeding', 'done']: + if params.get('ignore_previous', False): + fireEvent('release.update_status', release['_id'], status = 'ignored') + else: + fireEvent('release.delete', release['_id'], single = True) + + m['profile_id'] = (params.get('profile_id') or default_profile.get('_id')) if not previous_profile else previous_profile + m['category_id'] = cat_id if cat_id is not None and len(cat_id) > 0 else (m.get('category_id') or None) + m['last_edit'] = int(time.time()) + m['tags'] = [] + + do_search = True + db.update(m) + else: + try: del params['info'] + except: pass + log.debug('Movie already exists, not updating: %s', params) + added = False + + # Trigger update info + if added and update_after: + # Do full update to get images etc + fireEventAsync('movie.update', m['_id'], default_title = params.get('title'), on_complete = onComplete) + + # Remove releases + for rel in fireEvent('release.for_media', m['_id'], single = True): + if rel['status'] is 'available': + db.delete(rel) + + movie_dict = fireEvent('media.get', m['_id'], single = True) + if not movie_dict: + log.debug('Failed adding media, can\'t find it anymore') + return False + + if do_search and search_after: + onComplete = self.createOnComplete(m['_id']) + onComplete() + + if added and notify_after: + + if params.get('title'): + message = 'Successfully added "%s" to your wanted list.' % params.get('title', '') + else: + title = getTitle(m) + if title: + message = 'Successfully added "%s" to your wanted list.' % title + else: + message = 'Successfully added to your wanted list.' + fireEvent('notify.frontend', type = 'movie.added', data = movie_dict, message = message) + + return movie_dict + except: + log.error('Failed adding media: %s', traceback.format_exc()) + + def addView(self, **kwargs): + add_dict = self.add(params = kwargs) + + return { + 'success': True if add_dict else False, + 'movie': add_dict, + } + + def edit(self, id = '', **kwargs): + + try: + db = get_db() + + ids = splitString(id) + for media_id in ids: + + try: + m = db.get('id', media_id) + m['profile_id'] = kwargs.get('profile_id') or m['profile_id'] + + cat_id = kwargs.get('category_id') + if cat_id is not None: + m['category_id'] = cat_id if len(cat_id) > 0 else m['category_id'] + + # Remove releases + for rel in fireEvent('release.for_media', m['_id'], single = True): + if rel['status'] is 'available': + db.delete(rel) + + # Default title + if kwargs.get('default_title'): + m['title'] = kwargs.get('default_title') + + db.update(m) + + fireEvent('media.restatus', m['_id'], single = True) + + m = db.get('id', media_id) + + movie_dict = fireEvent('media.get', m['_id'], single = True) + fireEventAsync('movie.searcher.single', movie_dict, on_complete = self.createNotifyFront(media_id)) + + except: + print traceback.format_exc() + log.error('Can\'t edit non-existing media') + + return { + 'success': True, + } + except: + log.error('Failed editing media: %s', traceback.format_exc()) + + return { + 'success': False, + } + + def update(self, media_id = None, identifier = None, default_title = None, extended = False): + """ + Update movie information inside media['doc']['info'] + + @param media_id: document id + @param default_title: default title, if empty, use first one or existing one + @param extended: update with extended info (parses more info, actors, images from some info providers) + @return: dict, with media + """ + + if self.shuttingDown(): + return + + lock_key = 'media.get.%s' % media_id if media_id else identifier + self.acquireLock(lock_key) + + media = {} + try: + db = get_db() + + if media_id: + media = db.get('id', media_id) + else: + media = db.get('media', 'imdb-%s' % identifier, with_doc = True)['doc'] + + info = fireEvent('movie.info', merge = True, extended = extended, identifier = getIdentifier(media)) + + # Don't need those here + try: del info['in_wanted'] + except: pass + try: del info['in_library'] + except: pass + + if not info or len(info) == 0: + log.error('Could not update, no movie info to work with: %s', identifier) + return False + + # Update basic info + media['info'] = info + + titles = info.get('titles', []) + log.debug('Adding titles: %s', titles) + + # Define default title + if default_title or media.get('title') == 'UNKNOWN' or len(media.get('title', '')) == 0: + media['title'] = self.getDefaultTitle(info, default_title) + + # Files + image_urls = info.get('images', []) + + self.getPoster(media, image_urls) + + db.update(media) + except: + log.error('Failed update media: %s', traceback.format_exc()) + + self.releaseLock(lock_key) + return media + + def updateReleaseDate(self, media_id): + """ + Update release_date (eta) info only + + @param media_id: document id + @return: dict, with dates dvd, theater, bluray, expires + """ + + try: + db = get_db() + + media = db.get('id', media_id) + + if not media.get('info'): + media = self.update(media_id) + dates = media.get('info', {}).get('release_date') + else: + dates = media.get('info').get('release_date') + + if dates and (dates.get('expires', 0) < time.time() or dates.get('expires', 0) > time.time() + (604800 * 4)) or not dates: + dates = fireEvent('movie.info.release_date', identifier = getIdentifier(media), merge = True) + media['info'].update({'release_date': dates}) + db.update(media) + + return dates + except: + log.error('Failed updating release dates: %s', traceback.format_exc()) + + return {} diff --git a/couchpotato/core/media/movie/_base/static/details.js b/couchpotato/core/media/movie/_base/static/details.js new file mode 100644 index 0000000000..dd156baee8 --- /dev/null +++ b/couchpotato/core/media/movie/_base/static/details.js @@ -0,0 +1,174 @@ +var MovieDetails = new Class({ + + Extends: BlockBase, + + sections: null, + buttons: null, + + initialize: function(parent, options){ + var self = this; + + self.sections = {}; + + var category = parent.get('category'); + + self.el = new Element('div',{ + 'class': 'page active movie_details level_' + (options.level || 0) + }).adopt( + self.overlay = new Element('div.overlay', { + 'events': { + 'click': self.close.bind(self) + } + }).grab( + new Element('a.close.icon-left-arrow') + ), + self.content = new Element('div.scroll_content').grab( + new Element('div.head').adopt( + new Element('h1').grab( + self.title_dropdown = new BlockMenu(self, { + 'class': 'title', + 'button_text': parent.getTitle() + (parent.get('year') ? ' (' + parent.get('year') + ')' : ''), + 'button_class': 'icon-dropdown' + }) + ), + self.buttons = new Element('div.buttons') + ) + ) + ); + + var eta_date = parent.getETA('%b %Y') ; + self.addSection('description', new Element('div').adopt( + new Element('div', { + 'text': parent.get('plot') + }), + new Element('div.meta', { + 'html': + (eta_date ? ('ETA:' + eta_date + '') : '') + + '' + (parent.get('genres') || []).join(', ') + '' + }) + )); + + + // Title dropdown + var titles = parent.get('info').titles; + $(self.title_dropdown).addEvents({ + 'click:relay(li a)': function(e, el){ + (e).stopPropagation(); + + // Update category + Api.request('movie.edit', { + 'data': { + 'id': parent.get('_id'), + 'default_title': el.get('text') + } + }); + + $(self.title_dropdown).getElements('.icon-ok').removeClass('icon-ok'); + el.addClass('icon-ok'); + + self.title_dropdown.button.set('text', el.get('text') + (parent.get('year') ? ' (' + parent.get('year') + ')' : '')); + + } + }); + + titles.each(function(t){ + self.title_dropdown.addLink(new Element('a', { + 'text': t, + 'class': parent.get('title') == t ? 'icon-ok' : '' + })); + }); + }, + + addSection: function(name, section_el){ + var self = this; + name = name.toLowerCase(); + + self.content.grab( + self.sections[name] = new Element('div', { + 'class': 'section section_' + name + }).grab(section_el) + ); + }, + + addButton: function(button){ + var self = this; + + self.buttons.grab(button); + }, + + open: function(){ + var self = this; + + self.el.addClass('show'); + document.onkeyup = self.keyup.bind(self); + //if(!App.mobile_screen){ + // $(self.content).getElements('> .head, > .section').each(function(section, nr){ + // dynamics.css(section, { + // opacity: 0, + // translateY: 100 + // }); + // + // dynamics.animate(section, { + // opacity: 1, + // translateY: 0 + // }, { + // type: dynamics.spring, + // frequency: 200, + // friction: 300, + // duration: 1200, + // delay: 500 + (nr * 100) + // }); + // }); + //} + + self.outer_click = function(){ + self.close(); + }; + + App.addEvent('history.push', self.outer_click); + + }, + + keyup: function(e) { + if (e.keyCode == 27 /* Esc */) { + this.close(); + } + }, + + close: function(){ + var self = this; + + var ended = function() { + self.el.dispose(); + self.overlay.removeEventListener('transitionend', ended); + document.onkeyup = null; + }; + self.overlay.addEventListener('transitionend', ended, false); + + // animate out + //if(!App.mobile_screen){ + // $(self.content).getElements('> .head, > .section').reverse().each(function(section, nr){ + // dynamics.animate(section, { + // opacity: 0 + // }, { + // type: dynamics.spring, + // frequency: 200, + // friction: 300, + // duration: 1200, + // delay: (nr * 50) + // }); + // }); + // + // dynamics.setTimeout(function(){ + // self.el.removeClass('show'); + // }, 200); + //} + //else { + // self.el.removeClass('show'); + //} + + self.el.removeClass('show'); + + App.removeEvent('history.push', self.outer_click); + } +}); diff --git a/couchpotato/core/media/movie/_base/static/list.js b/couchpotato/core/media/movie/_base/static/list.js new file mode 100644 index 0000000000..c5777a24f0 --- /dev/null +++ b/couchpotato/core/media/movie/_base/static/list.js @@ -0,0 +1,681 @@ +var MovieList = new Class({ + + Implements: [Events, Options], + + options: { + api_call: 'media.list', + navigation: true, + limit: 50, + load_more: true, + loader: true, + menu: [], + add_new: false, + force_view: false + }, + + available_views: ['thumb', 'list'], + movies: [], + movies_added: {}, + total_movies: 0, + letters: {}, + filter: null, + + initialize: function(options){ + var self = this; + self.setOptions(options); + + self.offset = 0; + self.filter = self.options.filter || { + 'starts_with': null, + 'search': null + }; + + self.el = new Element('div.movies').adopt( + self.title = self.options.title ? new Element('h2', { + 'text': self.options.title, + 'styles': {'display': 'none'} + }) : null, + self.description = self.options.description ? new Element('div.description', { + 'html': self.options.description, + 'styles': {'display': 'none'} + }) : null, + self.movie_list = new Element('div', { + 'events': { + 'click:relay(.movie)': function(e, el){ + el.retrieve('klass').onClick(e); + }, + 'mouseenter:relay(.movie)': function(e, el){ + (e).stopPropagation(); + el.retrieve('klass').onMouseenter(e); + }, + 'change:relay(.movie input)': function(e, el){ + (e).stopPropagation(); + el = el.getParent('.movie'); + var klass = el.retrieve('klass'); + klass.fireEvent('select'); + klass.select(klass.select_checkbox.get('checked')); + } + } + }), + self.load_more = self.options.load_more ? new Element('a.load_more', { + 'events': { + 'click': self.loadMore.bind(self) + } + }) : null + ); + + self.changeView(self.getSavedView() || self.options.view || 'thumb'); + + // Create the alphabet nav + if(self.options.navigation) + self.createNavigation(); + + if(self.options.api_call) + self.getMovies(); + + App.on('movie.added', self.movieAdded.bind(self)); + App.on('movie.deleted', self.movieDeleted.bind(self)); + }, + + movieDeleted: function(notification){ + var self = this; + + if(self.movies_added[notification.data._id]){ + self.movies.each(function(movie){ + if(movie.get('_id') == notification.data._id){ + movie.destroy(); + delete self.movies_added[notification.data._id]; + self.setCounter(self.counter_count-1); + self.total_movies--; + } + }); + } + + self.checkIfEmpty(); + }, + + movieAdded: function(notification){ + var self = this; + + self.fireEvent('movieAdded', notification); + if(self.options.add_new && !self.movies_added[notification.data._id] && notification.data.status == self.options.status){ + window.scroll(0,0); + self.createMovie(notification.data, 'top'); + self.setCounter(self.counter_count+1); + + self.checkIfEmpty(); + } + }, + + create: function(){ + var self = this; + + if(self.options.load_more){ + self.scrollspy = new ScrollSpy({ + container: self.el.getParent(), + min: function(){ + return self.load_more.getCoordinates().top; + }, + onEnter: self.loadMore.bind(self) + }); + } + + self.created = true; + }, + + addMovies: function(movies, total){ + var self = this; + + + if(!self.created) self.create(); + + // do scrollspy + if(movies.length < self.options.limit && self.scrollspy){ + self.load_more.hide(); + self.scrollspy.stop(); + } + + self.createMovie(movies, 'bottom'); + + self.total_movies += total; + self.setCounter(total); + + self.calculateSelected(); + }, + + setCounter: function(count){ + var self = this; + + if(!self.navigation_counter) return; + + self.counter_count = count; + self.navigation_counter.set('text', count === 1 ? '1 movie' : (count || 0) + ' movies'); + + if (self.empty_message) { + self.empty_message.destroy(); + self.empty_message = null; + } + + if(self.total_movies && count === 0 && !self.empty_message){ + var message = (self.filter.search ? 'for "'+self.filter.search+'"' : '') + + (self.filter.starts_with ? ' in '+self.filter.starts_with+'' : ''); + + self.empty_message = new Element('.message', { + 'html': 'No movies found ' + message + '.
' + }).grab( + new Element('a', { + 'text': 'Reset filter', + 'events': { + 'click': function(){ + self.filter = { + 'starts_with': null, + 'search': null + }; + self.navigation_search_input.set('value', ''); + self.reset(); + self.activateLetter(); + self.getMovies(true); + self.last_search_value = ''; + } + } + }) + ).inject(self.movie_list); + + } + + }, + + createMovie: function(movie, inject_at, nr){ + var self = this, + movies = Array.isArray(movie) ? movie : [movie], + movie_els = []; + inject_at = inject_at || 'bottom'; + + movies.each(function(movie, nr){ + + var m = new Movie(self, { + 'actions': self.options.actions, + 'view': self.current_view, + 'onSelect': self.calculateSelected.bind(self) + }, movie); + + var el = $(m); + + if(inject_at === 'bottom'){ + movie_els.push(el); + } + else { + el.inject(self.movie_list, inject_at); + } + + self.movies.include(m); + self.movies_added[movie._id] = true; + }); + + if(movie_els.length > 0){ + $(self.movie_list).adopt(movie_els); + } + + }, + + createNavigation: function(){ + var self = this; + var chars = '#ABCDEFGHIJKLMNOPQRSTUVWXYZ'; + + self.el.addClass('with_navigation'); + + self.navigation = new Element('div.alph_nav').adopt( + self.mass_edit_form = new Element('div.mass_edit_form').adopt( + new Element('span.select').adopt( + self.mass_edit_select = new Element('input[type=checkbox]', { + 'events': { + 'change': self.massEditToggleAll.bind(self) + } + }), + self.mass_edit_selected = new Element('span.count', {'text': 0}), + self.mass_edit_selected_label = new Element('span', {'text': 'selected'}) + ), + new Element('div.quality').adopt( + self.mass_edit_quality = new Element('select'), + new Element('a.button.orange', { + 'text': 'Change quality', + 'events': { + 'click': self.changeQualitySelected.bind(self) + } + }) + ), + new Element('div.delete').adopt( + new Element('span[text=or]'), + new Element('a.button.red', { + 'text': 'Delete', + 'events': { + 'click': self.deleteSelected.bind(self) + } + }) + ), + new Element('div.refresh').adopt( + new Element('span[text=or]'), + new Element('a.button.green', { + 'text': 'Refresh', + 'events': { + 'click': self.refreshSelected.bind(self) + } + }) + ) + ), + new Element('div.menus').adopt( + self.navigation_counter = new Element('span.counter[title=Total]'), + self.filter_menu = new BlockMenu(self, { + 'class': 'filter', + 'button_class': 'icon-filter' + }), + self.navigation_actions = new Element('div.actions', { + 'events': { + 'click': function(e, el){ + (e).preventDefault(); + + var new_view = self.current_view == 'list' ? 'thumb' : 'list'; + + var a = 'active'; + self.navigation_actions.getElements('.'+a).removeClass(a); + self.changeView(new_view); + + self.navigation_actions.getElement('[data-view='+new_view+']') + .addClass(a); + + } + } + }), + self.navigation_menu = new BlockMenu(self, { + 'class': 'extra', + 'button_class': 'icon-dots' + }) + ) + ); + + // Mass edit + Quality.getActiveProfiles().each(function(profile){ + new Element('option', { + 'value': profile.get('_id'), + 'text': profile.get('label') + }).inject(self.mass_edit_quality); + }); + + self.filter_menu.addLink( + self.navigation_search_input = new Element('input', { + 'title': 'Search through ' + self.options.identifier, + 'placeholder': 'Search through ' + self.options.identifier, + 'events': { + 'keyup': self.search.bind(self), + 'change': self.search.bind(self) + } + }) + ).addClass('search icon-search'); + + var available_chars; + self.filter_menu.addEvent('open', function(){ + self.navigation_search_input.focus(); + + // Get available chars and highlight + if(!available_chars && (self.navigation.isDisplayed() || self.navigation.isVisible())) + Api.request('media.available_chars', { + 'data': Object.merge({ + 'status': self.options.status + }, self.filter), + 'onSuccess': function(json){ + available_chars = json.chars; + + available_chars.each(function(c){ + self.letters[c.capitalize()].addClass('available'); + }); + + } + }); + }); + + self.filter_menu.addLink( + self.navigation_alpha = new Element('ul.numbers', { + 'events': { + 'click:relay(li.available)': function(e, el){ + self.activateLetter(el.get('data-letter')); + self.getMovies(true); + } + } + }) + ); + + // Actions + ['thumb', 'list'].each(function(view){ + var current = self.current_view == view; + new Element('a', { + 'class': 'icon-' + view + (current ? ' active ' : ''), + 'data-view': view + }).inject(self.navigation_actions, current ? 'top' : 'bottom'); + }); + + // All + self.letters.all = new Element('li.letter_all.available.active', { + 'text': 'ALL' + }).inject(self.navigation_alpha); + + // Chars + chars.split('').each(function(c){ + self.letters[c] = new Element('li', { + 'text': c, + 'class': 'letter_'+c, + 'data-letter': c + }).inject(self.navigation_alpha); + }); + + // Add menu or hide + if (self.options.menu.length > 0) + self.options.menu.each(function(menu_item){ + self.navigation_menu.addLink(menu_item); + }); + else + self.navigation_menu.hide(); + + }, + + calculateSelected: function(){ + var self = this; + + var selected = 0, + movies = self.movies.length; + self.movies.each(function(movie){ + selected += movie.isSelected() ? 1 : 0; + }); + + var indeterminate = selected > 0 && selected < movies, + checked = selected == movies && selected > 0; + + document.body[selected > 0 ? 'addClass' : 'removeClass']('mass_editing'); + + if(self.mass_edit_select){ + self.mass_edit_select.set('checked', checked); + self.mass_edit_select.indeterminate = indeterminate; + + self.mass_edit_selected.set('text', selected); + } + }, + + deleteSelected: function(){ + var self = this, + ids = self.getSelectedMovies(), + help_msg = self.identifier == 'wanted' ? 'If you do, you won\'t be able to watch them, as they won\'t get downloaded!' : 'Your files will be safe, this will only delete the references in CouchPotato'; + + var qObj = new Question('Are you sure you want to delete '+ids.length+' movie'+ (ids.length != 1 ? 's' : '') +'?', help_msg, [{ + 'text': 'Yes, delete '+(ids.length != 1 ? 'them' : 'it'), + 'class': 'delete', + 'events': { + 'click': function(e){ + (e).preventDefault(); + this.set('text', 'Deleting..'); + Api.request('media.delete', { + 'method': 'post', + 'data': { + 'id': ids.join(','), + 'delete_from': self.options.identifier + }, + 'onSuccess': function(){ + qObj.close(); + + var erase_movies = []; + self.movies.each(function(movie){ + if (movie.isSelected()){ + $(movie).destroy(); + erase_movies.include(movie); + } + }); + + erase_movies.each(function(movie){ + self.movies.erase(movie); + movie.destroy(); + self.setCounter(self.counter_count-1); + self.total_movies--; + }); + + self.calculateSelected(); + } + }); + + } + } + }, { + 'text': 'Cancel', + 'cancel': true + }]); + + }, + + changeQualitySelected: function(){ + var self = this; + var ids = self.getSelectedMovies(); + + Api.request('movie.edit', { + 'method': 'post', + 'data': { + 'id': ids.join(','), + 'profile_id': self.mass_edit_quality.get('value') + }, + 'onSuccess': self.search.bind(self) + }); + }, + + refreshSelected: function(){ + var self = this; + var ids = self.getSelectedMovies(); + + Api.request('media.refresh', { + 'method': 'post', + 'data': { + 'id': ids.join(',') + } + }); + }, + + getSelectedMovies: function(){ + var self = this; + + var ids = []; + self.movies.each(function(movie){ + if (movie.isSelected()) + ids.include(movie.get('_id')); + }); + + return ids; + }, + + massEditToggleAll: function(){ + var self = this; + + var select = self.mass_edit_select.get('checked'); + + self.movies.each(function(movie){ + movie.select(select); + }); + + self.calculateSelected(); + }, + + reset: function(){ + var self = this; + + self.movies = []; + if(self.mass_edit_select) + self.calculateSelected(); + if(self.navigation_alpha) + self.navigation_alpha.getElements('.active').removeClass('active'); + + self.offset = 0; + if(self.scrollspy){ + //self.load_more.show(); + self.scrollspy.start(); + } + }, + + activateLetter: function(letter){ + var self = this; + + self.reset(); + + self.letters[letter || 'all'].addClass('active'); + self.filter.starts_with = letter; + + }, + + changeView: function(new_view){ + var self = this; + + if(self.available_views.indexOf(new_view) == -1) + new_view = 'thumb'; + + self.el + .removeClass(self.current_view+'_list') + .addClass(new_view+'_list'); + + self.current_view = new_view; + Cookie.write(self.options.identifier+'_view', new_view, {duration: 1000}); + }, + + getSavedView: function(){ + var self = this; + return self.options.force_view ? self.options.view : Cookie.read(self.options.identifier+'_view'); + }, + + search: function(){ + var self = this; + + if(self.search_timer) clearRequestTimeout(self.search_timer); + self.search_timer = requestTimeout(function(){ + var search_value = self.navigation_search_input.get('value'); + if (search_value == self.last_search_value) return; + + self.reset(); + + self.activateLetter(); + self.filter.search = search_value; + + self.getMovies(true); + + self.last_search_value = search_value; + + }, 250); + + }, + + update: function(){ + var self = this; + + self.reset(); + self.getMovies(true); + }, + + getMovies: function(reset){ + var self = this; + + if(self.scrollspy){ + self.scrollspy.stop(); + self.load_more.set('text', 'loading...'); + } + + var loader_timeout; + if(self.movies.length === 0 && self.options.loader){ + + self.loader_first = new Element('div.mask.loading.with_message').grab( + new Element('div.message', {'text': self.options.title ? 'Loading \'' + self.options.title + '\'' : 'Loading...'}) + ).inject(self.el, 'top'); + createSpinner(self.loader_first); + + var lfc = self.loader_first; + loader_timeout = requestTimeout(function(){ + lfc.addClass('show'); + }, 10); + + self.el.setStyle('min-height', 220); + + } + + Api.request(self.options.api_call, { + 'data': Object.merge({ + 'type': self.options.type || 'movie', + 'status': self.options.status, + 'limit_offset': self.options.limit ? self.options.limit + ',' + self.offset : null + }, self.filter), + 'onSuccess': function(json){ + + if(reset) + self.movie_list.empty(); + + if(loader_timeout) clearRequestTimeout(loader_timeout); + if(self.loader_first){ + var lf = self.loader_first; + self.loader_first = null; + lf.removeClass('show'); + + requestTimeout(function(){ + lf.destroy(); + }, 1000); + self.el.setStyle('min-height', null); + } + + self.store(json.movies); + self.addMovies(json.movies, json.total || json.movies.length); + if(self.scrollspy) { + self.load_more.set('text', 'load more movies'); + self.scrollspy.start(); + } + + self.checkIfEmpty(); + self.fireEvent('loaded'); + } + }); + }, + + loadMore: function(){ + var self = this; + if(self.offset >= self.options.limit) + self.getMovies(); + }, + + store: function(movies){ + var self = this; + + self.offset += movies.length; + + }, + + checkIfEmpty: function(){ + var self = this; + + var is_empty = self.movies.length === 0 && (self.total_movies === 0 || self.total_movies === undefined); + + if(self.title) + self.title[is_empty ? 'hide' : 'show'](); + + if(self.description) + self.description.setStyle('display', [is_empty ? 'none' : '']); + + if(is_empty && self.options.on_empty_element){ + var ee = typeOf(self.options.on_empty_element) == 'function' ? self.options.on_empty_element() : self.options.on_empty_element; + ee.inject(self.loader_first || self.title || self.movie_list, 'after'); + + if(self.navigation) + self.navigation.hide(); + + self.empty_element = ee; + } + else if(self.empty_element){ + self.empty_element.destroy(); + + if(self.navigation) + self.navigation.show(); + } + + }, + + toElement: function(){ + return this.el; + } + +}); diff --git a/couchpotato/core/media/movie/_base/static/manage.js b/couchpotato/core/media/movie/_base/static/manage.js new file mode 100644 index 0000000000..9d379ad74b --- /dev/null +++ b/couchpotato/core/media/movie/_base/static/manage.js @@ -0,0 +1,148 @@ +var MoviesManage = new Class({ + + Extends: PageBase, + + order: 20, + name: 'manage', + title: 'Do stuff to your existing movies!', + + indexAction: function(){ + var self = this; + + if(!self.list){ + self.refresh_button = new Element('a', { + 'title': 'Rescan your library for new movies', + 'text': 'Full library refresh', + 'events':{ + 'click': self.refresh.bind(self, true) + } + }); + + self.refresh_quick = new Element('a', { + 'title': 'Just scan for recently changed', + 'text': 'Quick library scan', + 'events':{ + 'click': self.refresh.bind(self, false) + } + }); + + self.list = new MovieList({ + 'identifier': 'manage', + 'filter': { + 'status': 'done', + 'release_status': 'done', + 'status_or': 1 + }, + 'actions': [MA.IMDB, MA.Files, MA.Trailer, MA.Readd, MA.Delete], + 'menu': [self.refresh_button, self.refresh_quick], + 'on_empty_element': new Element('div.empty_manage').adopt( + new Element('div', { + 'text': 'Seems like you don\'t have anything in your library yet. Add your existing movie folders in ' + }).grab( + new Element('a', { + 'text': 'Settings > Manage', + 'href': App.createUrl('settings/manage') + }) + ), + new Element('div.after_manage', { + 'text': 'When you\'ve done that, hit this button Б├▓ ' + }).grab( + new Element('a.button.green', { + 'text': 'Hit me, but not too hard', + 'events':{ + 'click': self.refresh.bind(self, true) + } + }) + ) + ) + }); + $(self.list).inject(self.content); + + // Check if search is in progress + self.startProgressInterval(); + } + + }, + + refresh: function(full){ + var self = this; + + if(!self.update_in_progress){ + + Api.request('manage.update', { + 'data': { + 'full': +full + } + }); + + self.startProgressInterval(); + + } + + }, + + startProgressInterval: function(){ + var self = this; + + self.progress_interval = requestInterval(function(){ + + if(self.progress_request && self.progress_request.running) + return; + + self.update_in_progress = true; + self.progress_request = Api.request('manage.progress', { + 'onComplete': function(json){ + + if(!json || !json.progress){ + clearRequestInterval(self.progress_interval); + self.update_in_progress = false; + if(self.progress_container){ + self.progress_container.destroy(); + self.list.update(); + } + } + else { + // Capture progress so we can use it in our *each* closure + var progress = json.progress; + + // Don't add loader when page is loading still + if(!self.list.navigation) + return; + + if(!self.progress_container) + self.progress_container = new Element('div.progress') + .inject(self.list, 'top'); + + self.progress_container.empty(); + + var sorted_table = self.parseProgress(json.progress); + + sorted_table.each(function(folder){ + var folder_progress = progress[folder]; + new Element('div').adopt( + new Element('span.folder', {'text': folder + + (folder_progress.eta > 0 ? ', ' + new Date ().increment('second', folder_progress.eta).timeDiffInWords().replace('from now', 'to go') : '') + }), + new Element('span.percentage', {'text': folder_progress.total ? Math.round(((folder_progress.total-folder_progress.to_go)/folder_progress.total)*100) + '%' : '0%'}) + ).inject(self.progress_container); + }); + + } + } + }); + + }, 1000); + }, + + parseProgress: function (progress_object) { + var folder, temp_array = []; + + for (folder in progress_object) { + if (progress_object.hasOwnProperty(folder)) { + temp_array.push(folder); + } + } + return temp_array.stableSort(); + } + +}); diff --git a/couchpotato/core/media/movie/_base/static/manage.js~HEAD b/couchpotato/core/media/movie/_base/static/manage.js~HEAD new file mode 100644 index 0000000000..9d379ad74b --- /dev/null +++ b/couchpotato/core/media/movie/_base/static/manage.js~HEAD @@ -0,0 +1,148 @@ +var MoviesManage = new Class({ + + Extends: PageBase, + + order: 20, + name: 'manage', + title: 'Do stuff to your existing movies!', + + indexAction: function(){ + var self = this; + + if(!self.list){ + self.refresh_button = new Element('a', { + 'title': 'Rescan your library for new movies', + 'text': 'Full library refresh', + 'events':{ + 'click': self.refresh.bind(self, true) + } + }); + + self.refresh_quick = new Element('a', { + 'title': 'Just scan for recently changed', + 'text': 'Quick library scan', + 'events':{ + 'click': self.refresh.bind(self, false) + } + }); + + self.list = new MovieList({ + 'identifier': 'manage', + 'filter': { + 'status': 'done', + 'release_status': 'done', + 'status_or': 1 + }, + 'actions': [MA.IMDB, MA.Files, MA.Trailer, MA.Readd, MA.Delete], + 'menu': [self.refresh_button, self.refresh_quick], + 'on_empty_element': new Element('div.empty_manage').adopt( + new Element('div', { + 'text': 'Seems like you don\'t have anything in your library yet. Add your existing movie folders in ' + }).grab( + new Element('a', { + 'text': 'Settings > Manage', + 'href': App.createUrl('settings/manage') + }) + ), + new Element('div.after_manage', { + 'text': 'When you\'ve done that, hit this button Б├▓ ' + }).grab( + new Element('a.button.green', { + 'text': 'Hit me, but not too hard', + 'events':{ + 'click': self.refresh.bind(self, true) + } + }) + ) + ) + }); + $(self.list).inject(self.content); + + // Check if search is in progress + self.startProgressInterval(); + } + + }, + + refresh: function(full){ + var self = this; + + if(!self.update_in_progress){ + + Api.request('manage.update', { + 'data': { + 'full': +full + } + }); + + self.startProgressInterval(); + + } + + }, + + startProgressInterval: function(){ + var self = this; + + self.progress_interval = requestInterval(function(){ + + if(self.progress_request && self.progress_request.running) + return; + + self.update_in_progress = true; + self.progress_request = Api.request('manage.progress', { + 'onComplete': function(json){ + + if(!json || !json.progress){ + clearRequestInterval(self.progress_interval); + self.update_in_progress = false; + if(self.progress_container){ + self.progress_container.destroy(); + self.list.update(); + } + } + else { + // Capture progress so we can use it in our *each* closure + var progress = json.progress; + + // Don't add loader when page is loading still + if(!self.list.navigation) + return; + + if(!self.progress_container) + self.progress_container = new Element('div.progress') + .inject(self.list, 'top'); + + self.progress_container.empty(); + + var sorted_table = self.parseProgress(json.progress); + + sorted_table.each(function(folder){ + var folder_progress = progress[folder]; + new Element('div').adopt( + new Element('span.folder', {'text': folder + + (folder_progress.eta > 0 ? ', ' + new Date ().increment('second', folder_progress.eta).timeDiffInWords().replace('from now', 'to go') : '') + }), + new Element('span.percentage', {'text': folder_progress.total ? Math.round(((folder_progress.total-folder_progress.to_go)/folder_progress.total)*100) + '%' : '0%'}) + ).inject(self.progress_container); + }); + + } + } + }); + + }, 1000); + }, + + parseProgress: function (progress_object) { + var folder, temp_array = []; + + for (folder in progress_object) { + if (progress_object.hasOwnProperty(folder)) { + temp_array.push(folder); + } + } + return temp_array.stableSort(); + } + +}); diff --git a/couchpotato/core/media/movie/_base/static/manage.js~b839b971765cf032c05b2f3d2627afc41fed332c b/couchpotato/core/media/movie/_base/static/manage.js~b839b971765cf032c05b2f3d2627afc41fed332c new file mode 100644 index 0000000000..e8618999b0 --- /dev/null +++ b/couchpotato/core/media/movie/_base/static/manage.js~b839b971765cf032c05b2f3d2627afc41fed332c @@ -0,0 +1,150 @@ +Page.Manage = new Class({ + + Extends: PageBase, + + order: 20, + name: 'manage', + title: 'Do stuff to your existing movies!', + + indexAction: function(){ + var self = this; + + if(!self.list){ + self.refresh_button = new Element('a', { + 'title': 'Rescan your library for new movies', + 'text': 'Full library refresh', + 'events':{ + 'click': self.refresh.bind(self, true) + } + }); + + self.refresh_quick = new Element('a', { + 'title': 'Just scan for recently changed', + 'text': 'Quick library scan', + 'events':{ + 'click': self.refresh.bind(self, false) + } + }); + + self.list = new MovieList({ + 'identifier': 'manage', + 'filter': { + 'status': 'done', + 'release_status': 'done', + 'status_or': 1 + }, + 'actions': [MA.IMDB, MA.Trailer, MA.Files, MA.Readd, MA.Edit, MA.Delete], + 'menu': [self.refresh_button, self.refresh_quick], + 'on_empty_element': new Element('div.empty_manage').adopt( + new Element('div', { + 'text': 'Seems like you don\'t have anything in your library yet.' + }), + new Element('div', { + 'text': 'Add your existing movie folders in ' + }).adopt( + new Element('a', { + 'text': 'Settings > Manage', + 'href': App.createUrl('settings/manage') + }) + ), + new Element('div.after_manage', { + 'text': 'When you\'ve done that, hit this button Б├▓ ' + }).adopt( + new Element('a.button.green', { + 'text': 'Hit me, but not too hard', + 'events':{ + 'click': self.refresh.bind(self, true) + } + }) + ) + ) + }); + $(self.list).inject(self.el); + + // Check if search is in progress + self.startProgressInterval(); + } + + }, + + refresh: function(full){ + var self = this; + + if(!self.update_in_progress){ + + Api.request('manage.update', { + 'data': { + 'full': +full + } + }); + + self.startProgressInterval(); + + } + + }, + + startProgressInterval: function(){ + var self = this; + + self.progress_interval = setInterval(function(){ + + if(self.progress_request && self.progress_request.running) + return; + + self.update_in_progress = true; + self.progress_request = Api.request('manage.progress', { + 'onComplete': function(json){ + + if(!json || !json.progress){ + clearInterval(self.progress_interval); + self.update_in_progress = false; + if(self.progress_container){ + self.progress_container.destroy(); + self.list.update(); + } + } + else { + // Capture progress so we can use it in our *each* closure + var progress = json.progress; + + // Don't add loader when page is loading still + if(!self.list.navigation) + return; + + if(!self.progress_container) + self.progress_container = new Element('div.progress').inject(self.list.navigation, 'after'); + + self.progress_container.empty(); + + var sorted_table = self.parseProgress(json.progress); + + sorted_table.each(function(folder){ + var folder_progress = progress[folder]; + new Element('div').adopt( + new Element('span.folder', {'text': folder + + (folder_progress.eta > 0 ? ', ' + new Date ().increment('second', folder_progress.eta).timeDiffInWords().replace('from now', 'to go') : '') + }), + new Element('span.percentage', {'text': folder_progress.total ? Math.round(((folder_progress.total-folder_progress.to_go)/folder_progress.total)*100) + '%' : '0%'}) + ).inject(self.progress_container) + }); + + } + } + }) + + }, 1000); + }, + + parseProgress: function (progress_object) { + var folder, temp_array = []; + + for (folder in progress_object) { + if (progress_object.hasOwnProperty(folder)) { + temp_array.push(folder) + } + } + return temp_array.stableSort() + } + +}); diff --git a/couchpotato/core/media/movie/_base/static/movie.actions.js b/couchpotato/core/media/movie/_base/static/movie.actions.js new file mode 100644 index 0000000000..e5d3eaef86 --- /dev/null +++ b/couchpotato/core/media/movie/_base/static/movie.actions.js @@ -0,0 +1,987 @@ +var MovieAction = new Class({ + + Implements: [Options], + + class_name: 'action', + label: 'UNKNOWN', + icon: null, + button: null, + details: null, + detail_button: null, + + initialize: function(movie, options){ + var self = this; + self.setOptions(options); + + self.movie = movie; + + self.create(); + + if(self.button){ + var wrapper = new Element('div', { + 'class': self.class_name + }); + self.button.inject(wrapper); + + self.button = wrapper; + } + }, + + create: function(){}, + + getButton: function(){ + return this.button || null; + }, + + getDetails: function(){ + return this.details || null; + }, + + getDetailButton: function(){ + return this.detail_button || null; + }, + + getLabel: function(){ + return this.label; + }, + + disable: function(){ + if(this.el) + this.el.addClass('disable'); + }, + + enable: function(){ + if(this.el) + this.el.removeClass('disable'); + }, + + getTitle: function(){ + var self = this; + + try { + return self.movie.getTitle(true); + } + catch(e){ + try { + return self.movie.original_title ? self.movie.original_title : self.movie.titles[0]; + } + catch(e2){ + return 'Unknown'; + } + } + }, + + get: function(key){ + var self = this; + try { + return self.movie.get(key); + } + catch(e){ + return self.movie[key]; + } + }, + + createMask: function(){ + var self = this; + self.mask = new Element('div.mask', { + 'styles': { + 'z-index': '1' + } + }).inject(self.movie, 'top').fade('hide'); + }, + + toElement: function(){ + return this.el || null; + } + +}); + +var MA = {}; + +MA.IMDB = new Class({ + + Extends: MovieAction, + id: null, + + create: function(){ + var self = this; + + self.id = self.movie.getIdentifier ? self.movie.getIdentifier() : self.get('imdb'); + + self.button = self.createButton(); + self.detail_button = self.createButton(); + + if(!self.id) self.disable(); + }, + + createButton: function(){ + var self = this; + + return new Element('a.imdb', { + 'text': 'IMDB', + 'title': 'Go to the IMDB page of ' + self.getTitle(), + 'href': 'http://www.imdb.com/title/'+self.id+'/', + 'target': '_blank' + }); + }, + +}); + +MA.Release = new Class({ + + Extends: MovieAction, + label: 'Releases', + + create: function(){ + var self = this; + + App.on('movie.searcher.ended', function(notification){ + if(self.movie.data._id != notification.data._id) return; + + self.releases = null; + if(self.options_container){ + // Releases are currently displayed + if(self.options_container.isDisplayed()){ + self.options_container.destroy(); + self.getDetails(); + } + else { + self.options_container.destroy(); + self.options_container = null; + } + } + }); + + }, + + getDetails: function(refresh){ + var self = this; + if(!self.movie.data.releases || self.movie.data.releases.length === 0) return; + + if(!self.options_container || refresh){ + self.options_container = new Element('div.options').grab( + self.release_container = new Element('div.releases.table') + ); + + // Header + new Element('div.item.head').adopt( + new Element('span.name', {'text': 'Release name'}), + new Element('span.status', {'text': 'Status'}), + new Element('span.quality', {'text': 'Quality'}), + new Element('span.size', {'text': 'Size'}), + new Element('span.age', {'text': 'Age'}), + new Element('span.score', {'text': 'Score'}), + new Element('span.provider', {'text': 'Provider'}), + new Element('span.actions') + ).inject(self.release_container); + + if(self.movie.data.releases) + self.movie.data.releases.each(function(release){ + + var quality = Quality.getQuality(release.quality) || {}, + info = release.info || {}, + provider = self.get(release, 'provider') + (info.provider_extra ? self.get(release, 'provider_extra') : ''); + + var release_name = self.get(release, 'name'); + if(release.files && release.files.length > 0){ + try { + var movie_file = release.files.filter(function(file){ + var type = File.Type.get(file.type_id); + return type && type.identifier == 'movie'; + }).pick(); + release_name = movie_file.path.split(Api.getOption('path_sep')).getLast(); + } + catch(e){} + } + + var size = info.size ? Math.floor(self.get(release, 'size')) : 0; + size = size ? ((size < 1000) ? size + 'MB' : Math.round(size*10/1024)/10 + 'GB') : 'n/a'; + + // Create release + release.el = new Element('div', { + 'class': 'item '+release.status, + 'id': 'release_'+release._id + }).adopt( + new Element('span.name', {'text': release_name, 'title': release_name}), + new Element('span.status', {'text': release.status, 'class': 'status '+release.status}), + new Element('span.quality', {'text': quality.label + (release.is_3d ? ' 3D' : '') || 'n/a'}), + new Element('span.size', {'text': size}), + new Element('span.age', {'text': self.get(release, 'age')}), + new Element('span.score', {'text': self.get(release, 'score')}), + new Element('span.provider', { 'text': provider, 'title': provider }), + new Element('span.actions').adopt( + info.detail_url ? new Element('a.icon-info', { + 'href': info.detail_url, + 'target': '_blank' + }) : new Element('a'), + new Element('a.icon-download', { + 'events': { + 'click': function(e){ + (e).stopPropagation(); + if(!this.hasClass('completed')) + self.download(release); + } + } + }), + new Element('a', { + 'class': release.status == 'ignored' ? 'icon-redo' : 'icon-cancel', + 'events': { + 'click': function(e){ + (e).stopPropagation(); + self.ignore(release); + + this.toggleClass('icon-redo'); + this.toggleClass('icon-cancel'); + } + } + }) + ) + ).inject(self.release_container); + + if(release.status == 'ignored' || release.status == 'failed' || release.status == 'snatched'){ + if(!self.last_release || (self.last_release && self.last_release.status != 'snatched' && release.status == 'snatched')) + self.last_release = release; + } + else if(!self.next_release && release.status == 'available'){ + self.next_release = release; + } + + var update_handle = function(notification) { + if(notification.data._id != release._id) return; + + var q = self.movie.quality.getElement('.q_' + release.quality), + new_status = notification.data.status; + + release.el.set('class', 'item ' + new_status); + + var status_el = release.el.getElement('.status'); + status_el.set('class', 'status ' + new_status); + status_el.set('text', new_status); + + if(!q && (new_status == 'snatched' || new_status == 'seeding' || new_status == 'done')) + q = self.addQuality(release.quality_id); + + if(q && !q.hasClass(new_status)) { + q.removeClass(release.status).addClass(new_status); + q.set('title', q.get('title').replace(release.status, new_status)); + } + }; + + App.on('release.update_status', update_handle); + + }); + + if(self.last_release) + self.release_container.getElements('#release_'+self.last_release._id).addClass('last_release'); + + if(self.next_release) + self.release_container.getElements('#release_'+self.next_release._id).addClass('next_release'); + + if(self.next_release || (self.last_release && ['ignored', 'failed'].indexOf(self.last_release.status) === false)){ + + self.trynext_container = new Element('div.buttons.try_container').inject(self.release_container, 'top'); + + var nr = self.next_release, + lr = self.last_release; + + self.trynext_container.adopt( + new Element('span.or', { + 'text': 'If anything went wrong, download ' + }), + lr ? new Element('a.orange', { + 'text': 'the same release again', + 'events': { + 'click': function(){ + self.download(lr); + } + } + }) : null, + nr && lr ? new Element('span.or', { + 'text': ', ' + }) : null, + nr ? [new Element('a.green', { + 'text': lr ? 'another release' : 'the best release', + 'events': { + 'click': function(){ + self.download(nr); + } + } + }), + new Element('span.or', { + 'text': ' or pick one below' + })] : null + ); + } + + self.last_release = null; + self.next_release = null; + + } + + return self.options_container; + + }, + + get: function(release, type){ + return (release.info && release.info[type] !== undefined) ? release.info[type] : 'n/a'; + }, + + download: function(release){ + var self = this; + + var release_el = self.release_container.getElement('#release_'+release._id), + icon = release_el.getElement('.icon-download'); + + if(icon) + icon.addClass('icon spinner').removeClass('download'); + + Api.request('release.manual_download', { + 'data': { + 'id': release._id + }, + 'onComplete': function(json){ + if(icon) + icon.removeClass('icon spinner'); + + if(json.success){ + if(icon) + icon.addClass('completed'); + release_el.getElement('.status').set('text', 'snatched'); + } + else + if(icon) + icon.addClass('attention').set('title', 'Something went wrong when downloading, please check logs.'); + } + }); + }, + + ignore: function(release){ + + Api.request('release.ignore', { + 'data': { + 'id': release._id + } + }); + + } + +}); + +MA.Trailer = new Class({ + + Extends: MovieAction, + id: null, + label: 'Trailer', + + getDetails: function(){ + var self = this, + data_url = 'https://www.googleapis.com/youtube/v3/search?q="{title}" {year} trailer&maxResults=1&type=video&videoDefinition=high&videoEmbeddable=true&part=snippet&key=AIzaSyAT3li1KjfLidaL6Vt8T92MRU7n4VOrjYk'; + + if(!self.player_container){ + self.id = 'trailer-'+randomString(); + + self.container = new Element('div.trailer_container').adopt( + self.player_container = new Element('div.icon-play[id='+self.id+']', { + 'events': { + 'click': self.watch.bind(self) + } + }).adopt( + new Element('span[text="watch"]'), + new Element('span[text="trailer"]') + ), + self.background = new Element('div.background') + ); + + requestTimeout(function(){ + + var url = data_url.substitute({ + 'title': encodeURI(self.getTitle()), + 'year': self.get('year') + }); + + new Request.JSONP({ + 'url': url, + 'onComplete': function(json){ + if(json.items.length > 0){ + self.video_id = json.items[0].id.videoId; + self.background.setStyle('background-image', 'url('+json.items[0].snippet.thumbnails.high.url+')'); + self.background.addClass('visible'); + } + else { + self.container.getParent('.section').addClass('no_trailer'); + } + } + }).send(); + + }, 1000); + } + + return self.container; + + }, + + watch: function(){ + var self = this; + + new Element('iframe', { + 'src': 'https://www.youtube-nocookie.com/embed/'+self.video_id+'?rel=0&showinfo=0&autoplay=1&showsearch=0&iv_load_policy=3&vq=hd720', + 'allowfullscreen': 'true' + }).inject(self.container); + } + + +}); + + +MA.Category = new Class({ + + Extends: MovieAction, + + create: function(){ + var self = this; + + var category = self.movie.get('category'); + + self.detail_button = new BlockMenu(self, { + 'class': 'category', + 'button_text': category ? category.label : 'No category', + 'button_class': 'icon-dropdown' + }); + + var categories = CategoryList.getAll(); + if(categories.length > 0){ + + $(self.detail_button).addEvents({ + 'click:relay(li a)': function(e, el){ + (e).stopPropagation(); + + // Update category + Api.request('movie.edit', { + 'data': { + 'id': self.movie.get('_id'), + 'category_id': el.get('data-id') + } + }); + + $(self.detail_button).getElements('.icon-ok').removeClass('icon-ok'); + el.addClass('icon-ok'); + + self.detail_button.button.set('text', el.get('text')); + + } + }); + + self.detail_button.addLink(new Element('a[text=No category]', { + 'class': !category ? 'icon-ok' : '', + 'data-id': '' + })); + categories.each(function(c){ + self.detail_button.addLink(new Element('a', { + 'text': c.get('label'), + 'class': category && category._id == c.get('_id') ? 'icon-ok' : '', + 'data-id': c.get('_id') + })); + }); + } + else { + $(self.detail_button).hide(); + } + + } + +}); + + +MA.Profile = new Class({ + + Extends: MovieAction, + + create: function(){ + var self = this; + + var profile = self.movie.profile; + + self.detail_button = new BlockMenu(self, { + 'class': 'profile', + 'button_text': profile ? profile.get('label') : 'No profile', + 'button_class': 'icon-dropdown' + }); + + var profiles = Quality.getActiveProfiles(); + if(profiles.length > 0){ + + $(self.detail_button).addEvents({ + 'click:relay(li a)': function(e, el){ + (e).stopPropagation(); + + // Update category + Api.request('movie.edit', { + 'data': { + 'id': self.movie.get('_id'), + 'profile_id': el.get('data-id') + } + }); + + $(self.detail_button).getElements('.icon-ok').removeClass('icon-ok'); + el.addClass('icon-ok'); + + self.detail_button.button.set('text', el.get('text')); + + } + }); + + profiles.each(function(pr){ + self.detail_button.addLink(new Element('a', { + 'text': pr.get('label'), + 'class': profile && profile.get('_id') == pr.get('_id') ? 'icon-ok' : '', + 'data-id': pr.get('_id') + })); + }); + } + else { + $(self.detail_button).hide(); + } + + } + +}); + +MA.Refresh = new Class({ + + Extends: MovieAction, + icon: 'refresh', + + create: function(){ + var self = this; + + self.button = self.createButton(); + self.detail_button = self.createButton(); + + }, + + createButton: function(){ + var self = this; + return new Element('a.refresh', { + 'text': 'Refresh', + 'title': 'Refresh the movie info and do a forced search', + 'events': { + 'click': self.doRefresh.bind(self) + } + }); + }, + + doRefresh: function(e){ + var self = this; + (e).stop(); + + Api.request('media.refresh', { + 'data': { + 'id': self.movie.get('_id') + } + }); + } + +}); + +var SuggestBase = new Class({ + + Extends: MovieAction, + + getIMDB: function(){ + return this.movie.data.info.imdb; + }, + + refresh: function(json){ + var self = this; + + if(json && json.movie){ + self.movie.list.addMovies([json.movie], 1); + + var last_added = self.movie.list.movies[self.movie.list.movies.length-1]; + $(last_added).inject(self.movie, 'before'); + } + + self.movie.destroy(); + } + +}); + +MA.Add = new Class({ + + Extends: SuggestBase, + label: 'Add', + icon: 'plus', + + create: function() { + var self = this; + + self.button = new Element('a.add', { + 'text': 'Add', + 'title': 'Re-add the movie and mark all previous snatched/downloaded as ignored', + 'events': { + 'click': function(){ + self.movie.openDetails(); + } + } + }); + + }, + + getDetails: function(){ + var self = this; + + var m = new BlockSearchMovieItem(self.movie.data.info, { + 'onAdded': self.movie.data.status == 'suggested' ? function(){ + + Api.request('suggestion.ignore', { + 'data': { + 'imdb': self.movie.data.info.imdb, + 'remove_only': true + }, + 'onComplete': self.refresh.bind(self) + }); + + } : function(){ + self.movie.destroy(); + } + }); + m.showOptions(); + + return m; + } + +}); + +MA.SuggestSeen = new Class({ + + Extends: SuggestBase, + icon: 'eye', + + create: function() { + var self = this; + + self.button = self.createButton(); + self.detail_button = self.createButton(); + }, + + createButton: function(){ + var self = this; + + return new Element('a.seen', { + 'text': 'Already seen', + 'title': 'Already seen it!', + 'events': { + 'click': self.markAsSeen.bind(self) + } + }); + + }, + + markAsSeen: function(e){ + var self = this; + (e).stopPropagation(); + + Api.request('suggestion.ignore', { + 'data': { + 'imdb': self.getIMDB(), + 'mark_seen': 1 + }, + 'onComplete': function(json){ + self.refresh(json); + if(self.movie.details){ + self.movie.details.close(); + } + } + }); + } + +}); + +MA.SuggestIgnore = new Class({ + + Extends: SuggestBase, + icon: 'error', + + create: function() { + var self = this; + + self.button = self.createButton(); + self.detail_button = self.createButton(); + }, + + createButton: function(){ + var self = this; + + return new Element('a.ignore', { + 'text': 'Ignore', + 'title': 'Don\'t suggest this movie anymore', + 'events': { + 'click': self.markAsIgnored.bind(self) + } + }); + + }, + + markAsIgnored: function(e){ + var self = this; + (e).stopPropagation(); + + Api.request('suggestion.ignore', { + 'data': { + 'imdb': self.getIMDB() + }, + 'onComplete': function(json){ + self.refresh(json); + if(self.movie.details){ + self.movie.details.close(); + } + } + }); + } + +}); + + +MA.ChartIgnore = new Class({ + + Extends: SuggestBase, + icon: 'error', + + create: function() { + var self = this; + + self.button = self.createButton(); + self.detail_button = self.createButton(); + }, + + createButton: function(){ + var self = this; + + return new Element('a.ignore', { + 'text': 'Hide', + 'title': 'Don\'t show this movie in charts', + 'events': { + 'click': self.markAsHidden.bind(self) + } + }); + + }, + + markAsHidden: function(e){ + var self = this; + (e).stopPropagation(); + + Api.request('charts.ignore', { + 'data': { + 'imdb': self.getIMDB() + }, + 'onComplete': function(json){ + if(self.movie.details){ + self.movie.details.close(); + } + self.movie.destroy(); + } + }); + } + +}); + +MA.Readd = new Class({ + + Extends: MovieAction, + + create: function(){ + var self = this, + movie_done = self.movie.data.status == 'done', + snatched; + + if(self.movie.data.releases && !movie_done) + snatched = self.movie.data.releases.filter(function(release){ + return release.status && (release.status == 'snatched' || release.status == 'seeding' || release.status == 'downloaded' || release.status == 'done'); + }).length; + + if(movie_done || snatched && snatched > 0) + self.el = new Element('a.readd', { + 'title': 'Re-add the movie and mark all previous snatched/downloaded as ignored', + 'events': { + 'click': self.doReadd.bind(self) + } + }); + + }, + + doReadd: function(e){ + var self = this; + (e).stopPropagation(); + + Api.request('movie.add', { + 'data': { + 'identifier': self.movie.getIdentifier(), + 'ignore_previous': 1 + } + }); + } + +}); + +MA.Delete = new Class({ + + Extends: MovieAction, + + Implements: [Chain], + + create: function(){ + var self = this; + + self.button = self.createButton(); + self.detail_button = self.createButton(); + + }, + + createButton: function(){ + var self = this; + return new Element('a.delete', { + 'text': 'Delete', + 'title': 'Remove the movie from this CP list', + 'events': { + 'click': self.showConfirm.bind(self) + } + }); + }, + + showConfirm: function(e){ + var self = this; + (e).stopPropagation(); + + self.question = new Question('Are you sure you want to delete ' + self.getTitle() + '?', '', [{ + 'text': 'Yes, delete '+self.getTitle(), + 'class': 'delete', + 'events': { + 'click': function(e){ + e.target.set('text', 'Deleting...'); + + self.del(); + } + } + }, { + 'text': 'Cancel', + 'cancel': true + }]); + + }, + + del: function(){ + var self = this; + + var movie = $(self.movie); + + Api.request('media.delete', { + 'data': { + 'id': self.movie.get('_id'), + 'delete_from': self.movie.list.options.identifier + }, + 'onComplete': function(){ + if(self.question) + self.question.close(); + + dynamics.animate(movie, { + opacity: 0, + scale: 0 + }, { + type: dynamics.bezier, + points: [{'x':0,'y':0,'cp':[{'x':0.876,'y':0}]},{'x':1,'y':1,'cp':[{'x':0.145,'y':1}]}], + duration: 400, + complete: function(){ + self.movie.destroy(); + } + }); + } + }); + + } + +}); + +MA.Files = new Class({ + + Extends: MovieAction, + label: 'Files', + + getDetails: function(){ + var self = this; + + if(!self.movie.data.releases || self.movie.data.releases.length === 0) + return; + + if(!self.files_container){ + self.files_container = new Element('div.files.table'); + + // Header + new Element('div.item.head').adopt( + new Element('span.name', {'text': 'File'}), + new Element('span.type', {'text': 'Type'}) + ).inject(self.files_container); + + if(self.movie.data.releases) + Array.each(self.movie.data.releases, function(release){ + var rel = new Element('div.release').inject(self.files_container); + + Object.each(release.files, function(files, type){ + Array.each(files, function(file){ + new Element('div.file.item').adopt( + new Element('span.name', {'text': file}), + new Element('span.type', {'text': type}) + ).inject(rel); + }); + }); + }); + + } + + return self.files_container; + } + +}); + + +MA.MarkAsDone = new Class({ + + Extends: MovieAction, + + create: function(){ + var self = this; + + self.button = self.createButton(); + self.detail_button = self.createButton(); + + }, + + createButton: function(){ + var self = this; + if(!self.movie.data.releases || self.movie.data.releases.length === 0) return; + + return new Element('a.mark_as_done', { + 'text': 'Mark as done', + 'title': 'Remove from available list and move to managed movies', + 'events': { + 'click': self.markMovieDone.bind(self) + } + }); + }, + + markMovieDone: function(){ + var self = this; + + Api.request('media.delete', { + 'data': { + 'id': self.movie.get('_id'), + 'delete_from': 'wanted' + }, + 'onComplete': function(){ + self.movie.destroy(); + } + }); + + } + +}); diff --git a/couchpotato/core/media/movie/_base/static/movie.js b/couchpotato/core/media/movie/_base/static/movie.js new file mode 100644 index 0000000000..4801184d7f --- /dev/null +++ b/couchpotato/core/media/movie/_base/static/movie.js @@ -0,0 +1,458 @@ +var Movie = new Class({ + + Extends: BlockBase, + Implements: [Options, Events], + + actions: null, + details: null, + + initialize: function(list, options, data){ + var self = this; + + self.actions = []; + self.data = data; + self.list = list; + + self.buttons = []; + + self.el = new Element('a.movie').grab( + self.inner = new Element('div.inner') + ); + self.el.store('klass', self); + + self.profile = Quality.getProfile(data.profile_id) || {}; + self.category = CategoryList.getCategory(data.category_id) || {}; + self.parent(self, options); + + self.addEvents(); + + //if(data.identifiers.imdb == 'tt3181822'){ + // self.el.fireEvent('mouseenter'); + // self.openDetails(); + //} + }, + + openDetails: function(){ + var self = this; + + if(!self.details){ + self.details = new MovieDetails(self, { + 'level': 3 + }); + + // Add action items + self.actions.each(function(action, nr){ + var details = action.getDetails(); + if(details){ + self.details.addSection(action.getLabel(), details); + } + else { + var button = action.getDetailButton(); + if(button){ + self.details.addButton(button); + } + } + }); + } + + App.getPageContainer().grab(self.details); + + requestTimeout(self.details.open.bind(self.details), 20); + }, + + addEvents: function(){ + var self = this; + + self.global_events = {}; + + // Do refresh with new data + self.global_events['movie.update'] = function(notification){ + if(self.data._id != notification.data._id) return; + + self.busy(false); + requestTimeout(function(){ + self.update(notification); + }, 2000); + }; + App.on('movie.update', self.global_events['movie.update']); + + // Add spinner on load / search + ['media.busy', 'movie.searcher.started'].each(function(listener){ + self.global_events[listener] = function(notification){ + if(notification.data && (self.data._id == notification.data._id || (typeOf(notification.data._id) == 'array' && notification.data._id.indexOf(self.data._id) > -1))) + self.busy(true); + }; + App.on(listener, self.global_events[listener]); + }); + + // Remove spinner + self.global_events['movie.searcher.ended'] = function(notification){ + if(notification.data && self.data._id == notification.data._id) + self.busy(false); + }; + App.on('movie.searcher.ended', self.global_events['movie.searcher.ended']); + + // Reload when releases have updated + self.global_events['release.update_status'] = function(notification){ + var data = notification.data; + if(data && self.data._id == data.media_id){ + + if(!self.data.releases) + self.data.releases = []; + + var updated = false; + self.data.releases.each(function(release){ + if(release._id == data._id){ + release.status = data.status; + updated = true; + } + }); + + if(updated) + self.updateReleases(); + } + }; + + App.on('release.update_status', self.global_events['release.update_status']); + + }, + + destroy: function(){ + var self = this; + + self.el.destroy(); + delete self.list.movies_added[self.get('id')]; + self.list.movies.erase(self); + + self.list.checkIfEmpty(); + + if(self.details) + self.details.close(); + + // Remove events + Object.each(self.global_events, function(handle, listener){ + App.off(listener, handle); + }); + }, + + busy: function(set_busy, timeout){ + var self = this; + + if(!set_busy){ + requestTimeout(function(){ + if(self.spinner){ + self.mask.fade('out'); + requestTimeout(function(){ + if(self.mask) + self.mask.destroy(); + if(self.spinner) + self.spinner.destroy(); + self.spinner = null; + self.mask = null; + }, timeout || 400); + } + }, timeout || 1000); + } + else if(!self.spinner) { + self.createMask(); + self.spinner = createSpinner(self.mask); + self.mask.fade('in'); + } + }, + + createMask: function(){ + var self = this; + self.mask = new Element('div.mask', { + 'styles': { + 'z-index': 4 + } + }).inject(self.el, 'top').fade('hide'); + }, + + update: function(notification){ + var self = this; + + self.actions = []; + self.data = notification.data; + self.inner.empty(); + + self.profile = Quality.getProfile(self.data.profile_id) || {}; + self.category = CategoryList.getCategory(self.data.category_id) || {}; + self.create(); + + self.select(self.select_checkbox.get('checked')); + + self.busy(false); + }, + + create: function(){ + var self = this; + + self.el.addClass('status_'+self.get('status')); + + var eta_date = self.getETA(); + + var rating, stars; + if(['suggested','chart'].indexOf(self.data.status) > -1 && self.data.info && self.data.info.rating && self.data.info.rating.imdb){ + rating = Array.prototype.slice.call(self.data.info.rating.imdb); + + stars = []; + + var half_rating = rating[0]/2; + for(var i = 1; i <= 5; i++){ + if(half_rating >= 1) + stars.push(new Element('span.icon-star')); + else if(half_rating > 0) + stars.push(new Element('span.icon-star-half')); + else + stars.push(new Element('span.icon-star-empty')); + + half_rating -= 1; + } + } + + var thumbnail = new Element('div.poster'); + + if(self.data.files && self.data.files.image_poster && self.data.files.image_poster.length > 0){ + thumbnail = new Element('div', { + 'class': 'type_image poster', + 'styles': { + 'background-image': 'url(' + Api.createUrl('file.cache') + self.data.files.image_poster[0].split(Api.getOption('path_sep')).pop() +')' + } + }); + } + else if(self.data.info && self.data.info.images && self.data.info.images.poster && self.data.info.images.poster.length > 0){ + thumbnail = new Element('div', { + 'class': 'type_image poster', + 'styles': { + 'background-image': 'url(' + self.data.info.images.poster[0] +')' + } + }); + } + + self.inner.adopt( + self.select_checkbox = new Element('input[type=checkbox]'), + new Element('div.poster_container').adopt( + thumbnail, + self.actions_el = new Element('div.actions') + ), + new Element('div.info').adopt( + new Element('div.title').adopt( + new Element('span', { + 'text': self.getTitle() || 'n/a' + }), + new Element('div.year', { + 'text': self.data.info.year || 'n/a' + }) + ), + eta_date ? new Element('div.eta', { + 'text': eta_date, + 'title': 'ETA' + }) : null, + self.quality = new Element('div.quality'), + rating ? new Element('div.rating[title='+rating[0]+']').adopt( + stars, + new Element('span.votes[text=('+rating.join(' / ')+')][title=Votes]') + ) : null + ) + ); + + if(!thumbnail) + self.el.addClass('no_thumbnail'); + + // Add profile + if(self.profile.data) + self.profile.getTypes().each(function(type){ + + var q = self.addQuality(type.get('quality'), type.get('3d')); + if((type.finish === true || type.get('finish')) && !q.hasClass('finish')){ + q.addClass('finish'); + q.set('title', q.get('title') + ' Will finish searching for this movie if this quality is found.'); + } + + }); + + // Add releases + self.updateReleases(); + + }, + + + onClick: function(e){ + var self = this; + + if(e.target.getParents('.actions').length === 0 && e.target != self.select_checkbox){ + (e).stopPropagation(); + self.addActions(); + self.openDetails(); + } + }, + + addActions: function(){ + var self = this; + + if(self.actions.length <= 0){ + self.options.actions.each(function(a){ + var action = new a(self), + button = action.getButton(); + if(button){ + self.actions_el.grab(button); + self.buttons.push(button); + } + + self.actions.push(action); + }); + } + }, + + onMouseenter: function(){ + var self = this; + + if(App.mobile_screen) return; + self.addActions(); + + if(self.list.current_view == 'thumb'){ + self.el.addClass('hover_start'); + requestTimeout(function(){ + self.el.removeClass('hover_start'); + }, 300); + + dynamics.css(self.inner, { + scale: 1 + }); + + dynamics.animate(self.inner, { + scale: 0.9 + }, { type: dynamics.bounce }); + + self.buttons.each(function(el, nr){ + + dynamics.css(el, { + opacity: 0, + translateY: 50 + }); + + dynamics.animate(el, { + opacity: 1, + translateY: 0 + }, { + type: dynamics.spring, + frequency: 200, + friction: 300, + duration: 800, + delay: 100 + (nr * 40) + }); + + }); + } + }, + + updateReleases: function(){ + var self = this; + if(!self.data.releases || self.data.releases.length === 0) return; + + self.data.releases.each(function(release){ + + var q = self.quality.getElement('.q_'+ release.quality+(release.is_3d ? '.is_3d' : ':not(.is_3d)')), + status = release.status; + + if(!q && (status == 'snatched' || status == 'seeding' || status == 'done')) + q = self.addQuality(release.quality, release.is_3d || false); + + if (q && !q.hasClass(status)){ + q.addClass(status); + q.set('title', (q.get('title') ? q.get('title') : '') + ' status: '+ status); + } + + }); + }, + + addQuality: function(quality, is_3d){ + var self = this; + + var q = Quality.getQuality(quality); + return new Element('span', { + 'text': q.label + (is_3d ? ' 3D' : ''), + 'class': 'q_'+q.identifier + (is_3d ? ' is_3d' : ''), + 'title': '' + }).inject(self.quality); + + }, + + getTitle: function(prefixed){ + var self = this; + + if(self.data.title) + return prefixed ? self.data.title : self.getUnprefixedTitle(self.data.title); + else if(self.data.info && self.data.info.titles && self.data.info.titles.length > 0) + return prefixed ? self.data.info.titles[0] : self.getUnprefixedTitle(self.data.info.titles[0]); + + return 'Unknown movie'; + }, + + getUnprefixedTitle: function(t){ + if(t.substr(0, 4).toLowerCase() == 'the ') + t = t.substr(4) + ', The'; + else if(t.substr(0, 3).toLowerCase() == 'an ') + t = t.substr(3) + ', An'; + else if(t.substr(0, 2).toLowerCase() == 'a ') + t = t.substr(2) + ', A'; + return t; + }, + + getIdentifier: function(){ + var self = this; + + try { + return self.get('identifiers').imdb; + } + catch (e){ } + + return self.get('imdb'); + }, + + getETA: function(format){ + var self = this, + d = new Date(), + now = Math.round(+d/1000), + eta = null, + eta_date = ''; + + if(self.data.info.release_date) + [self.data.info.release_date.dvd, self.data.info.release_date.theater].each(function(timestamp){ + if (timestamp > 0 && (eta === null || Math.abs(timestamp - now) < Math.abs(eta - now))) + eta = timestamp; + }); + + if(eta){ + eta_date = new Date(eta * 1000); + if(+eta_date/1000 < now){ + eta_date = null; + } + else { + eta_date = format ? eta_date.format(format) : (eta_date.format('%b') + (d.getFullYear() != eta_date.getFullYear() ? ' ' + eta_date.getFullYear() : '')); + } + } + + return (now+8035200 > eta) ? eta_date : ''; + }, + + get: function(attr){ + return this.data[attr] || this.data.info[attr]; + }, + + select: function(select){ + var self = this; + self.select_checkbox.set('checked', select); + self.el[self.select_checkbox.get('checked') ? 'addClass' : 'removeClass']('checked'); + }, + + isSelected: function(){ + return this.select_checkbox.get('checked'); + }, + + toElement: function(){ + return this.el; + } + +}); diff --git a/couchpotato/core/media/movie/_base/static/movie.scss b/couchpotato/core/media/movie/_base/static/movie.scss new file mode 100644 index 0000000000..108ca2bfdd --- /dev/null +++ b/couchpotato/core/media/movie/_base/static/movie.scss @@ -0,0 +1,1412 @@ +@import "_mixins"; + +$mass_edit_height: 44px; + +.page.movies { + bottom: auto; + z-index: 21; + height: $header_height; + + .scroll_content { + display: none; + } + + @include media-phablet { + height: $header_width_mobile; + } +} + +.page.movies_wanted, +.page.movies_manage { + top: $header_height; + padding: 0; + will-change: top; + transition: top 300ms $cubic; + + @include media-phablet { + top: $header_width_mobile; + } + + .mass_editing & { + top: $header_height + $mass_edit_height; + } + + .load_more { + text-align: center; + padding: $padding; + font-size: 2em; + display: block; + } + + .empty_manage { + padding: $padding; + + .after_manage { + margin-top: $padding; + } + } +} + +.movie { + + .ripple { + display: none; + } + + input[type=checkbox] { + display: none; + } + + .with_navigation & { + input[type=checkbox] { + display: inline-block; + position: absolute; + will-change: opacity; + transition: opacity 200ms; + opacity: 0; + z-index: 2; + cursor: pointer; + + @include media-phablet { + display: none; + } + + &:hover { + opacity: 1 !important; + } + } + + &:hover input[type=checkbox] { + opacity: .5; + } + + &.checked input[type=checkbox] { + opacity: 1; + } + } + + .quality { + font-weight: 400; + + span { + display: inline-block; + background: get-theme(off); + border: 1px solid transparent; + color: rgba(get-theme(text), .5); + border-radius: 1px; + padding: 1px 3px; + + @include theme-dark { + color: rgba(get-theme-dark(text), .5); + background: get-theme-dark(off); + } + + &.failed { background: #993619; color: #FFF; } + &.available { color: #009902; border-color: #009902; background: get-theme(background);} + &.snatched { background: #548399; color: #FFF } + &.downloaded, &.done { background: #009902; color: #FFF } + + @include theme-dark { + background: none; + &.available { border-color: transparent; background: none;} + &.snatched { background: #548399; } + &.downloaded, &.done { background: #009902; color: #FFF; } + } + } + } + + .rating { + .votes { + opacity: .7; + margin-left: 4px; + } + } + + &.status_suggested { + .quality { + display: none; + } + } + +} + +.movies { + position: relative; + + .no_movies { + display: block; + padding: $padding; + + @include media-tablet { + padding: $padding/2; + } + + a { + @include theme(color, primary); + } + } + + .message { + padding: $padding 0; + text-align: center; + + a { + @include theme(color, primary); + } + } + + &.movies > h2 { + padding: 0 $padding; + line-height: $header_height; + + @include media-phablet { + line-height: $header_width_mobile; + padding: 0 $padding/2; + } + } + + > .description { + position: absolute; + top: 0; + right: $padding; + width: auto; + line-height: $header_height; + opacity: .7; + + @include media-tablet { + display: none; + } + + a { + @include theme(color, primary); + display: inline; + + &:hover { + text-decoration: underline; + } + } + } + + > .loading { + @include theme(background, background); + + .message { + @include theme(color, text); + } + + .spinner { + @include theme(background-color, background); + } + } + + .movie .actions { + will-change: transform, opacity; + transform: rotateZ(360deg); + + @include media-phablet { + pointer-events: none; + } + } + + .progress { + + div { + width: 50%; + padding: $padding/4 $padding/2; + display: flex; + + @include media-tablet { + width: 100%; + } + + .folder { + flex: 1 auto; + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; + margin-right: $padding/2; + } + + .percentage { + font-weight: bold; + } + } + } +} + +.list_list { + font-weight: 300; + + .movie { + display: block; + border-bottom: 1px solid transparent; + @include theme(border-color, off); + + position: relative; + cursor: pointer; + + &:last-child { + border-bottom: none; + } + + &:hover { + background: get-theme(off); + + @include theme-dark { + background: get-theme-dark(off); + } + } + + input[type=checkbox] { + left: $padding; + top: 50%; + transform: translateY(-50%); + } + + .poster { + display: none; + } + + .info { + padding: $padding/2 $padding; + + display: flex; + flex-flow: row nowrap; + align-items: center; + + @include media-tablet { + display: block; + padding: $padding/2; + } + + .title { + flex: 1 auto; + + @include media-tablet { + display: flex; + flex-flow: row nowrap; + } + + span { + transition: margin 200ms $cubic; + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; + + @include media-tablet { + width: 100%; + } + + } + + .year { + display: inline-block; + margin: 0 10px; + opacity: .5; + } + } + + .eta { + font-size: .8em; + opacity: .5; + margin-right: 4px; + + @include media-phablet { + display: none; + } + } + + .quality { + clear: both; + overflow: hidden; + + span { + float: left; + font-size: .7em; + margin: 2px 0 0 2px; + + @include media-tablet { + margin: 2px 2px 0 0; + } + } + } + + .rating .vote { + display: inline-block; + min-width: 60px; + text-align: right; + } + } + + .actions { + position: absolute; + right: $padding/2; + top: 0; + bottom: 0; + display: none; + z-index: 10; + + .action { + display: inline-block; + } + + a { + height: 100%; + display: block; + @include theme(background, background); + @include theme(color, primary); + padding: $padding / 2; + width: auto; + float: right; + @include theme(text, text); + + &:before { + display: none; + } + + &:hover { + @include theme(background, off); + @include theme(color, text); + } + + .icon { + display: none; + } + } + } + + &:hover .actions { + display: block; + + @include media-tablet { + display: none; + } + } + } + + &.with_navigation .movie { + &:hover, &.checked { + .info .title span { + margin-left: $padding; + + @include media-tablet { + margin-left: 0; + } + } + } + } +} + +.thumb_list { + + $max-split: 20; + $split-jump: 225px; + + padding: 0 $padding/4; + + > div:last-child { + padding: 0 ($padding/2)+2px; + @include media-phablet { + padding: 0 $padding/6; + } + } + + .movie { + overflow: visible; + display: inline-block; + vertical-align: top; + margin-bottom: $padding; + position: relative; + cursor: pointer; + width: 150px; + border: 0 solid transparent; + border-width: 0 $padding/3; + + .inner { + will-change: transform; + transform: rotateZ(360deg); + } + + @while $max-split > 0 { + @media (min-width : $split-jump * ($max-split - 1)) and (max-width : $split-jump * $max-split) { + width: 100% / $max-split; + } + $max-split: $max-split - 1; + } + + @include media-tablet { + width: 33.333%; + border-width: 0 $padding/4; + } + + @include media-phablet { + width: 50%; + border-width: 0 $padding/5; + } + + input[type=checkbox] { + top: $padding/2; + left: $padding/2; + } + + .poster_container { + border-radius: $border_radius; + position: relative; + width: 100%; + padding-bottom: 150%; + overflow: hidden; + } + + .poster { + position: absolute; + background: center no-repeat; + @include theme(background-color, off); + background-size: cover; + overflow: hidden; + height: 100%; + width: 100%; + } + + .info { + clear: both; + font-size: .9em; + + .title { + display: flex; + padding: 3px 0; + font-weight: 400; + + span { + flex: 1 auto; + white-space: nowrap; + overflow: hidden; + text-overflow: ellipsis; + } + + .year { + display: inline-block; + margin-left: 5px; + opacity: .5; + } + } + + .eta { + opacity: .5; + float: right; + margin-left: 4px; + } + + .quality { + white-space: nowrap; + overflow: hidden; + font-size: .9em; + + span { + font-size: .8em; + margin-right: 2px; + } + } + } + + .actions { + background-image: linear-gradient(25deg, rgba(get-theme(primary),.3) 0%, rgba(get-theme(primary),1) 80%); + @include theme-dark { + background-image: linear-gradient(25deg, rgba(get-theme-dark(primary),.3) 0%, rgba(get-theme-dark(primary),1) 80%); + } + + will-change: opacity, visibility; + transition: all 400ms; + transition-property: opacity, visibility; + opacity: 0; + visibility: hidden; + position: absolute; + top: 0; + right: 0; + bottom: 0; + left: 0; + text-align: right; + + .action { + position: relative; + margin-right: $padding/2; + float: right; + clear: both; + + &:first-child { + margin-top: $padding/2; + } + + a { + transition: all 150ms $cubic; + will-change: color, background; + transition-property: color, background; + display: block; + width: auto; + padding: $padding / 3; + color: #FFF; + border-radius: $border_radius - 1px; + font-weight: 400; + + &:hover { + @include theme(background, background); + @include theme(color, primary); + + @include theme-dark { + color: #FFF; + } + } + } + } + } + + &:hover .actions { + opacity: 1; + visibility: visible; + + @include media-phablet { + display: none; + } + } + + &.hover_start .actions { + pointer-events: none; + } + + .mask { + bottom: 44px; + border-radius: $border_radius; + will-change: opacity; + transition: opacity 30ms; + } + } + +} + +.page.movie_details { + pointer-events: none; + $gab-width: $header_width/3; + + @include media-phablet { + left: 0; + } + + .overlay { + position: fixed; + top: 0; + bottom: 0; + right: 0; + left: $header_width; + background: rgba(0,0,0,.6); + border-radius: $border_radius 0 0 $border_radius; + opacity: 0; + will-change: opacity; + transform: rotateZ(360deg); + transition: opacity 300ms ease 400ms; + z-index: 1; + + .ripple { + background: #FFF; + } + + @include media-phablet { + left: 0; + border-radius: 0; + transition: none; + } + + .close { + display: inline-block; + text-align: center; + font-size: 60px; + line-height: $header_height; + color: #FFF; + width: 100%; + height: 100%; + opacity: 0; + will-change: opacity; + transition: opacity 300ms ease 200ms; + + &:before { + display: block; + width: $gab-width; + } + + @include media-phablet { + width: $header_width_mobile; + } + } + } + + .scroll_content { + position: fixed; + z-index: 2; + top: 0; + bottom: 0; + right: 0; + left: $header_width + $gab-width; + @include theme(background, background); + border-radius: $border_radius 0 0 $border_radius; + overflow-y: auto; + will-change: transform; + transform: translateX(100%) rotateZ(360deg); + transition: transform 450ms $cubic; + + @include media-phablet { + left: $header_width_mobile; + } + + > .head { + display: flex; + flex-flow: row wrap; + padding: 0 $padding 0 $padding/2; + position: relative; + z-index: 2; + will-change: transform, opacity; + transform: rotateZ(360deg); + + @include media-phablet { + padding: 0; + line-height: $header_width_mobile; + } + + h1 { + flex: 1 auto; + margin: 0; + font-size: 24px; + font-weight: 300; + max-width: 100%; + + @include media-phablet { + min-width: 100%; + line-height: $header_width_mobile; + + .more_menu { + width: 100%; + } + } + + .more_menu { + a { + @include theme(color, text); + } + + .icon-dropdown { + padding-right: $padding*1.5; + + @include media-phablet { + &:before { + right: $padding/2; + } + } + } + } + } + + .more_menu { + display: inline-block; + vertical-align: top; + max-width: 100%; + margin-bottom: 0; + + &.title .wrapper { + transform-origin: 0 0; + } + + > a { + float: left; + line-height: $header_height; + @include theme(color, primary); + + &:hover { + @include theme(color, text); + } + + @include media-phablet { + line-height: $header_width_mobile; + } + } + + .icon-dropdown { + position: relative; + padding: 0 $padding*1.25 0 $padding/2; + + &:before { + position: absolute; + right: $padding/2; + top: -2px; + opacity: .2; + } + + &:hover:before { + opacity: 1; + } + } + + .wrapper { + top: $header_height - 10px; + padding-top: 4px; + border-radius: $border_radius $border_radius 0 0; + font-size: 14px; + + @include media-phablet { + top: 25px; + } + + &:before { + top: 0; + left: auto; + right: 22px; + } + + ul { + border-radius: $border_radius $border_radius 0 0; + max-height: 215px; + overflow-y: auto; + } + + a { + padding-right: $padding * 1.5; + + &:before { + position: absolute; + right: $padding/2; + } + + &:hover, &.icon-ok { + @include theme(color, primary); + } + } + } + + &.title { + > a { + display: inline-block; + text-overflow: ellipsis; + overflow: hidden; + white-space: nowrap; + width: 100%; + } + + .wrapper { + left: 0; + right: auto; + + @include media-phablet { + top: 30px; + max-width: 240px; + } + + &:before { + left: 22px; + right: auto; + } + } + } + + } + + .buttons { + display: flex; + flex-wrap: wrap; + + @include media-phablet { + margin: 0; + } + + > a { + display: inline-block; + padding: 0 10px; + @include theme(color, primary); + line-height: $header_height; + + @include media-phablet { + line-height: $header_width_mobile; + } + + &:hover { + @include theme(background, off); + @include theme(color, text); + } + } + + } + } + + .section { + padding: $padding; + border-top: 1px solid rgba(0,0,0,.1); + will-change: transform, opacity; + transform: rotateZ(360deg); + + @include theme-dark { + border-color: rgba(255,255,255,.1); + } + + @include media-phablet { + padding: $padding/2; + } + } + } + + &.show { + pointer-events: auto; + + .overlay { + opacity: 1; + transition-delay: 0s; + + .close { + opacity: 1; + transition-delay: 300ms; + } + } + + .scroll_content { + transition-delay: 50ms; + transform: translateX(0) rotateZ(360deg); + } + } + + .section_description { + .meta { + text-align: right; + font-style: italic; + font-size: .90em; + + span { + display: inline-block; + margin: $padding/2 $padding/2 0; + + &:last-child { + margin-right: 0; + } + } + } + } + + .section_add { + @include theme(background, off); + + .options > div { + display: flex; + align-items: center; + + select { + display: block; + width: 100%; + } + + .title { + min-width: 75px; + width: 2000px; + margin: 0 10px 0 0; + } + + .profile, .category { + width: 200px; + min-width: 50px; + margin: 0 10px 0 0; + } + + .add { + width: 200px; + + .button { + @include theme(background, background); + flex: 1 auto; + display: block; + text-align: center; + width: 100%; + margin: 0; + + &:hover { + @include theme(background, primary); + } + } + + } + + } + + .thumbnail, + .data { + display: none; + } + } + + .files { + span { + overflow: hidden; + white-space: nowrap; + text-overflow: ellipsis; + text-align: center; + padding: $padding/3 0; + } + + .name { + text-align: left; + flex: 1 1 auto; + } + + .type { + min-width: 80px; + } + } + + .releases { + + .buttons { + margin-bottom: $padding/2; + + a { + display: inline; + @include theme(color, primary); + + &:hover { + text-decoration: underline; + } + } + } + + .item { + @include media-phablet { + display: block; + } + + &:not(.head):hover { + @include theme(background, off); + @include theme(text, text); + } + + span { + overflow: hidden; + white-space: nowrap; + text-overflow: ellipsis; + text-align: center; + padding: $padding/3 0; + + &:before { + display: none; + font-weight: bold; + opacity: .8; + margin-right: 3px; + width: 100%; + font-size: .9em; + + @include media-phablet { + display: inline-block; + } + } + + @include media-phablet { + vertical-align: top; + white-space: normal; + display: inline-block; + width: 50%; + padding: 0; + min-width: 0; + max-width: none; + text-align: left; + margin-top: 3px; + } + } + + .name { + flex: 1 auto; + text-align: left; + + @include media-phablet { + width: 100%; + font-weight: bold; + } + } + + &.head { + + @include media-phablet { + display: none; + } + } + + &.ignored { + span:not(.actions) { + opacity: .3; + } + + .name { + text-decoration: line-through; + } + } + + .actions { + padding: 0; + + @include media-phablet { + width: 100%; + text-align: center; + } + + a { + display: inline-block; + vertical-align: top; + padding: $padding/3; + min-width: 26px; + @include theme(color, text); + + &:hover { + @include theme(color, primary); + } + + @include media-phablet { + text-align: center; + } + + &:after { + margin-left: 3px; + font-size: .9em; + } + + @include media-phablet { + &.icon-info:after { content: "more info"; } + &.icon-download:after { content: "download"; } + &.icon-cancel:after { content: "ignore"; } + } + } + } + } + + .status { min-width: 70px; max-width: 70px; &:before { content: "Status:"; } } + .quality { min-width: 60px; max-width: 60px; &:before { content: "Quality:"; } } + .size { min-width: 50px; max-width: 50px; &:before { content: "Size:"; } } + .age { min-width: 40px; max-width: 40px; &:before { content: "Age:"; } } + .score { min-width: 45px; max-width: 45px; &:before { content: "Score:"; } } + .provider { min-width: 110px; max-width: 110px; &:before { content: "Provider:"; } } + .actions { min-width: 80px; max-width: 80px; } + + } + + .section_trailer.section_trailer { + $max_height: 450px; + $max_width: $max_height * (16/9); + + padding: 0; + @include theme(background, menu); + max-height: $max_height; + overflow: hidden; + + @include media-phablet { + max-height: $max_height; + } + + &.no_trailer { + display: none; + } + + .trailer_container { + $play-size: 110px; + + max-height: $max_height; + position: relative; + overflow: hidden; + max-width: $max_width; + margin: 0 auto; + cursor: pointer; + + @include media-phablet { + margin-bottom: $padding/2; + } + + .background { + opacity: 0; + background: no-repeat center; + background-size: cover; + position: relative; + z-index: 1; + max-height: $max_height; + padding-bottom: 56.25%; + will-change: opacity; + transition: opacity 1000ms; + + &.visible { + opacity: .4; + } + } + + .icon-play { + opacity: 0.9; + position: absolute; + z-index: 2; + text-align: center; + width: 100%; + top: 50%; + transform: translateY(-50%); + will-change: opacity; + transition: all 300ms; + color: #FFF; + font-size: $play-size; + + @include media-desktop { + font-size: $play-size/2; + } + + @include media-phablet { + font-size: $play-size/3.5; + } + + span { + transition: all 300ms; + opacity: 0.9; + position: absolute; + font-size: 1em; + top: 50%; + left: 50%; + margin-left: $play-size/2; + transform: translateY(-54%); + will-change: opacity; + + @include media-desktop { + margin-left: $play-size/4; + } + + @include media-phablet { + margin-left: $play-size/7; + } + + &:first-child { + margin-left: -($play-size/2); + transform: translate(-100%, -54%); + + @include media-desktop { + margin-left: -($play-size/4); + } + + @include media-phablet { + margin-left: -($play-size/7); + } + } + } + } + + &:hover { + @include theme(color, primary); + + .icon-play { + opacity: 1; + + span { + opacity: 1; + } + } + } + + iframe { + position: absolute; + width: 100%; + height: 100%; + border: 0; + top: 0; + left: 0; + max-height: $max_height; + z-index: 10; + } + } + } + +} + + +.alph_nav { + position: relative; + + .mass_edit_form { + display: flex; + @include theme(background, background); + position: fixed; + top: $header_height; + right: 0; + left: $header_width; + flex-flow: row nowrap; + align-items: center; + will-change: max-height; + transition: max-height 300ms $cubic; + max-height: 0; + overflow: hidden; + + .mass_editing & { + max-height: $mass_edit_height; + } + + > * { + display: flex; + align-items: center; + } + + .select { + margin: 0 $padding/2 0 $padding; + + @include media-phablet { + margin: 0 $padding/4 0 $padding/2; + } + + input, .count { + margin-right: $padding/4; + } + } + + } + + .menus { + + .button { + padding: 0 $padding/2; + line-height: $header_height; + } + + .counter, .more_menu, .actions { + float: left; + + .wrapper { + transform-origin: 92% 0; + right: -7px; + } + + > a { + display: inline-block; + width: 30px; + line-height: $header_height; + text-align: center; + float: left; + + &:hover { + @include theme(background, off); + } + + @include media-tablet { + line-height: $header_width_mobile; + } + } + } + + .counter { + line-height: $header_height; + padding: 0 $padding/2; + + @include media-tablet { + display: none; + } + } + + .actions { + a { + display: inline-block; + } + + .active { + display: none; + } + + } + + .filter { + .wrapper { + width: 320px; + + @include media-phablet { + right: -70px; + transform-origin: 75% 0; + + &:before { + right: 83px !important; + } + } + } + + .button { + margin-top: -2px; + } + + .search { + position: relative; + + &:before { + position: absolute; + height: 100%; + line-height: 38px; + padding-left: $padding/2; + font-size: 16px; + opacity: .5; + } + + input { + width: 100%; + padding: $padding/2 $padding/2 $padding/2 $padding*1.5; + @include theme(background, background); + border: none; + border-bottom: 1px solid transparent; + @include theme(border-color, off); + + @include media-phablet { + font-size: 1.2em; + } + } + } + + .numbers { + padding: $padding/2; + + li { + float: left; + width: 10%; + height: 30px; + line-height: 30px; + text-align: center; + opacity: .2; + cursor: default; + border: 0; + + &.active { + @include theme(background, off); + } + + &.available { + opacity: 1; + cursor: pointer; + + &:hover { + @include theme(background, off); + } + } + } + } + } + + .more_menu { + + //&.show .button { + // color: rgba(0, 0, 0, 1); + //} + + .wrapper { + top: $header_height - 10px; + padding-top: 4px; + border-radius: $border_radius $border_radius 0 0; + min-width: 140px; + + @include media-phablet { + top: $header_width_mobile; + } + + &:before { + top: 0; + left: auto; + right: 22px; + } + + ul { + border-radius: $border_radius $border_radius 0 0; + } + } + } + } + +} diff --git a/couchpotato/core/media/movie/_base/static/page.js b/couchpotato/core/media/movie/_base/static/page.js new file mode 100644 index 0000000000..98b5083199 --- /dev/null +++ b/couchpotato/core/media/movie/_base/static/page.js @@ -0,0 +1,50 @@ +Page.Movies = new Class({ + + Extends: PageBase, + + name: 'movies', + icon: 'movie', + sub_pages: ['Wanted', 'Manage'], + default_page: 'Wanted', + current_page: null, + + initialize: function(parent, options){ + var self = this; + self.parent(parent, options); + + self.navigation = new BlockNavigation(); + $(self.navigation).inject(self.el); + + }, + + defaultAction: function(action, params){ + var self = this; + + if(self.current_page){ + self.current_page.hide(); + + if(self.current_page.list && self.current_page.list.navigation) + self.current_page.list.navigation.dispose(); + } + + var route = new Route(); + route.parse(action); + + var page_name = route.getPage() != 'index' ? route.getPage().capitalize() : self.default_page; + + var page = self.sub_pages.filter(function(page){ + return page.name == page_name; + }).pick()['class']; + + page.open(route.getAction() || 'index', params); + page.show(); + + if(page.list && page.list.navigation) + page.list.navigation.inject(self.navigation); + + self.current_page = page; + self.navigation.activate(page_name.toLowerCase()); + + } + +}); diff --git a/couchpotato/core/media/movie/_base/static/search.js b/couchpotato/core/media/movie/_base/static/search.js new file mode 100644 index 0000000000..734f4647a9 --- /dev/null +++ b/couchpotato/core/media/movie/_base/static/search.js @@ -0,0 +1,240 @@ +var BlockSearchMovieItem = new Class({ + + Implements: [Options, Events], + + initialize: function(info, options){ + var self = this; + self.setOptions(options); + + self.info = info; + self.alternative_titles = []; + + self.create(); + }, + + create: function(){ + var self = this, + info = self.info; + + var in_library; + if(info.in_library){ + in_library = []; + (info.in_library.releases || []).each(function(release){ + in_library.include(release.quality); + }); + } + + self.el = new Element('div.media_result', { + 'id': info.imdb, + 'events': { + 'click': self.showOptions.bind(self)//, + //'mouseenter': self.showOptions.bind(self), + //'mouseleave': self.closeOptions.bind(self) + } + }).adopt( + self.thumbnail = info.images && info.images.poster.length > 0 ? new Element('img.thumbnail', { + 'src': info.images.poster[0], + 'height': null, + 'width': null + }) : null, + self.options_el = new Element('div.options'), + self.data_container = new Element('div.data').grab( + self.info_container = new Element('div.info').grab( + new Element('h2', { + 'class': info.in_wanted && info.in_wanted.profile_id || in_library ? 'in_library_wanted' : '', + 'title': self.getTitle() + }).adopt( + self.title = new Element('span.title', { + 'text': self.getTitle() + }), + self.year = info.year ? new Element('span.year', { + 'text': info.year + }) : null, + info.in_wanted && info.in_wanted.profile_id ? new Element('span.in_wanted', { + 'text': 'Already in wanted list: ' + Quality.getProfile(info.in_wanted.profile_id).get('label') + }) : (in_library ? new Element('span.in_library', { + 'text': 'Already in library: ' + in_library.join(', ') + }) : null) + ) + ) + ) + ); + + if(info.titles) + info.titles.each(function(title){ + self.alternativeTitle({ + 'title': title + }); + }); + }, + + alternativeTitle: function(alternative){ + var self = this; + + self.alternative_titles.include(alternative); + }, + + getTitle: function(){ + var self = this; + try { + return self.info.original_title ? self.info.original_title : self.info.titles[0]; + } + catch(e){ + return 'Unknown'; + } + }, + + get: function(key){ + return this.info[key]; + }, + + showOptions: function(){ + var self = this; + + self.createOptions(); + + self.data_container.addClass('open'); + self.el.addEvent('outerClick', self.closeOptions.bind(self)); + + }, + + closeOptions: function(){ + var self = this; + + self.data_container.removeClass('open'); + self.el.removeEvents('outerClick'); + }, + + add: function(e){ + var self = this; + + if(e) + (e).preventDefault(); + + self.loadingMask(); + + Api.request('movie.add', { + 'data': { + 'identifier': self.info.imdb, + 'title': self.title_select.get('value'), + 'profile_id': self.profile_select.get('value'), + 'category_id': self.category_select.get('value') + }, + 'onComplete': function(json){ + self.options_el.empty(); + self.options_el.grab( + new Element('div.message', { + 'text': json.success ? 'Movie successfully added.' : 'Movie didn\'t add properly. Check logs' + }) + ); + self.mask.fade('out'); + + self.fireEvent('added'); + }, + 'onFailure': function(){ + self.options_el.empty(); + self.options_el.grab( + new Element('div.message', { + 'text': 'Something went wrong, check the logs for more info.' + }) + ); + self.mask.fade('out'); + } + }); + }, + + createOptions: function(){ + var self = this, + info = self.info; + + if(!self.options_el.hasClass('set')){ + + self.options_el.grab( + new Element('div').adopt( + new Element('div.title').grab( + self.title_select = new Element('select', { + 'name': 'title' + }) + ), + new Element('div.profile').grab( + self.profile_select = new Element('select', { + 'name': 'profile' + }) + ), + self.category_select_container = new Element('div.category').grab( + self.category_select = new Element('select', { + 'name': 'category' + }).grab( + new Element('option', {'value': -1, 'text': 'None'}) + ) + ), + new Element('div.add').grab( + self.add_button = new Element('a.button', { + 'text': 'Add', + 'events': { + 'click': self.add.bind(self) + } + }) + ) + ) + ); + + Array.each(self.alternative_titles, function(alt){ + new Element('option', { + 'text': alt.title + }).inject(self.title_select); + }); + + + // Fill categories + var categories = CategoryList.getAll(); + + if(categories.length === 0) + self.category_select_container.hide(); + else { + self.category_select_container.show(); + categories.each(function(category){ + new Element('option', { + 'value': category.data._id, + 'text': category.data.label + }).inject(self.category_select); + }); + } + + // Fill profiles + var profiles = Quality.getActiveProfiles(); + if(profiles.length == 1) + self.profile_select.hide(); + + profiles.each(function(profile){ + new Element('option', { + 'value': profile.get('_id'), + 'text': profile.get('label') + }).inject(self.profile_select); + }); + + self.options_el.addClass('set'); + + if(categories.length === 0 && self.title_select.getElements('option').length == 1 && profiles.length == 1 && + !(self.info.in_wanted && self.info.in_wanted.profile_id || in_library)) + self.add(); + + } + + }, + + loadingMask: function(){ + var self = this; + + self.mask = new Element('div.mask').inject(self.el).fade('hide'); + + createSpinner(self.mask); + self.mask.fade('in'); + + }, + + toElement: function(){ + return this.el; + } + +}); diff --git a/couchpotato/core/media/movie/_base/static/wanted.js b/couchpotato/core/media/movie/_base/static/wanted.js new file mode 100644 index 0000000000..094a70b1ce --- /dev/null +++ b/couchpotato/core/media/movie/_base/static/wanted.js @@ -0,0 +1,142 @@ +var MoviesWanted = new Class({ + + Extends: PageBase, + + order: 10, + name: 'wanted', + title: 'Gimme gimme gimme!', + folder_browser: null, + + indexAction: function(){ + var self = this; + + if(!self.list){ + + self.manual_search = new Element('a', { + 'title': 'Force a search for the full wanted list', + 'text': 'Search all wanted', + 'events':{ + 'click': self.doFullSearch.bind(self, true) + } + }); + + self.scan_folder = new Element('a', { + 'title': 'Scan a folder and rename all movies in it', + 'text': 'Manual folder scan', + 'events':{ + 'click': self.scanFolder.bind(self) + } + }); + + // Wanted movies + self.list = new MovieList({ + 'identifier': 'wanted', + 'status': 'active', + 'actions': [MA.MarkAsDone, MA.IMDB, MA.Release, MA.Trailer, MA.Refresh, MA.Readd, MA.Delete, MA.Category, MA.Profile], + 'add_new': true, + 'menu': [self.manual_search, self.scan_folder], + 'on_empty_element': function(){ + return new Element('div.empty_wanted').adopt( + new Element('div.no_movies', { + 'text': 'Seems like you don\'t have any movies yet.. Maybe add some via search or the extension.' + }), + App.createUserscriptButtons() + ); + } + }); + $(self.list).inject(self.content); + + // Check if search is in progress + requestTimeout(self.startProgressInterval.bind(self), 4000); + } + + }, + + doFullSearch: function(){ + var self = this; + + if(!self.search_in_progress){ + + Api.request('movie.searcher.full_search'); + self.startProgressInterval(); + + } + + }, + + startProgressInterval: function(){ + var self = this; + + var start_text = self.manual_search.get('text'); + self.progress_interval = requestInterval(function(){ + if(self.search_progress && self.search_progress.running) return; + self.search_progress = Api.request('movie.searcher.progress', { + 'onComplete': function(json){ + self.search_in_progress = true; + if(!json.movie){ + clearRequestInterval(self.progress_interval); + self.search_in_progress = false; + self.manual_search.set('text', start_text); + } + else { + var progress = json.movie; + self.manual_search.set('text', 'Searching.. (' + Math.round(((progress.total-progress.to_go)/progress.total)*100) + '%)'); + } + } + }); + }, 1000); + + }, + + scanFolder: function(e) { + (e).stop(); + + var self = this; + var options = { + 'name': 'Scan_folder' + }; + + if(!self.folder_browser){ + self.folder_browser = new Option.Directory("Scan", "folder", "", options); + + self.folder_browser.save = function() { + var folder = self.folder_browser.getValue(); + Api.request('renamer.scan', { + 'data': { + 'base_folder': folder + } + }); + }; + + self.folder_browser.inject(self.content, 'top'); + self.folder_browser.fireEvent('injected'); + + // Hide the settings box + self.folder_browser.directory_inlay.hide(); + self.folder_browser.el.removeChild(self.folder_browser.el.firstChild); + + self.folder_browser.showBrowser(); + + // Make adjustments to the browser + self.folder_browser.browser.getElements('.clear.button').hide(); + self.folder_browser.save_button.text = "Select"; + self.folder_browser.browser.setStyles({ + 'z-index': 1000, + 'right': 20, + 'top': 0, + 'margin': 0 + }); + + self.folder_browser.pointer.setStyles({ + 'right': 20 + }); + + } + else{ + self.folder_browser.showBrowser(); + } + + self.list.navigation_menu.hide(); + } + +}); diff --git a/couchpotato/core/media/movie/charts/__init__.py b/couchpotato/core/media/movie/charts/__init__.py new file mode 100644 index 0000000000..0b89eaf448 --- /dev/null +++ b/couchpotato/core/media/movie/charts/__init__.py @@ -0,0 +1,20 @@ +from .main import Charts + + +def autoload(): + return Charts() + + +config = [{ + 'name': 'charts', + 'groups': [ + { + 'label': 'Charts', + 'description': 'Displays selected charts on the home page', + 'type': 'list', + 'name': 'charts_providers', + 'tab': 'display', + 'options': [], + }, + ], +}] diff --git a/couchpotato/core/media/movie/charts/main.py b/couchpotato/core/media/movie/charts/main.py new file mode 100644 index 0000000000..d42c29a033 --- /dev/null +++ b/couchpotato/core/media/movie/charts/main.py @@ -0,0 +1,84 @@ +from CodernityDB.database import RecordNotFound +from couchpotato import Env, get_db +from couchpotato.core.helpers.variable import getTitle, splitString + +from couchpotato.core.logger import CPLog +from couchpotato.api import addApiView +from couchpotato.core.event import fireEvent +from couchpotato.core.plugins.base import Plugin + + +log = CPLog(__name__) + + +class Charts(Plugin): + + def __init__(self): + addApiView('charts.view', self.automationView) + addApiView('charts.ignore', self.ignoreView) + + def automationView(self, force_update = False, **kwargs): + + db = get_db() + + charts = fireEvent('automation.get_chart_list', merge = True) + ignored = splitString(Env.prop('charts_ignore', default = '')) + + # Create a list the movie/list.js can use + for chart in charts: + medias = [] + for media in chart.get('list', []): + + identifier = media.get('imdb') + if identifier in ignored: + continue + + try: + try: + in_library = db.get('media', 'imdb-%s' % identifier) + if in_library: + continue + except RecordNotFound: + pass + except: + pass + + # Cache poster + posters = media.get('images', {}).get('poster', []) + poster = [x for x in posters if 'tmdb' in x] + posters = poster if len(poster) > 0 else posters + + cached_poster = fireEvent('file.download', url = posters[0], single = True) if len(posters) > 0 else False + files = {'image_poster': [cached_poster] } if cached_poster else {} + + medias.append({ + 'status': 'chart', + 'title': getTitle(media), + 'type': 'movie', + 'info': media, + 'files': files, + 'identifiers': { + 'imdb': identifier + } + }) + + chart['list'] = medias + + return { + 'success': True, + 'count': len(charts), + 'charts': charts, + 'ignored': ignored, + } + + def ignoreView(self, imdb = None, **kwargs): + + ignored = splitString(Env.prop('charts_ignore', default = '')) + + if imdb: + ignored.append(imdb) + Env.prop('charts_ignore', ','.join(set(ignored))) + + return { + 'result': True + } diff --git a/couchpotato/core/media/movie/charts/static/charts.js b/couchpotato/core/media/movie/charts/static/charts.js new file mode 100644 index 0000000000..80b2314d98 --- /dev/null +++ b/couchpotato/core/media/movie/charts/static/charts.js @@ -0,0 +1,93 @@ +var Charts = new Class({ + + Implements: [Options, Events], + + shown_once: false, + + initialize: function(options){ + var self = this; + self.setOptions(options); + + self.create(); + }, + + create: function(){ + var self = this; + + self.el = new Element('div.charts').grab( + self.el_refresh_container = new Element('div.refresh').grab( + self.el_refreshing_text = new Element('span.refreshing', { + 'text': 'Refreshing charts...' + }) + ) + ); + + self.show(); + + requestTimeout(function(){ + self.fireEvent('created'); + }, 0); + }, + + fill: function(json){ + + var self = this; + + self.el_refreshing_text.hide(); + + if(json && json.count > 0){ + json.charts.sort(function(a, b) { + return a.order - b.order; + }); + + Object.each(json.charts, function(chart){ + + var chart_list = new MovieList({ + 'navigation': false, + 'identifier': chart.name.toLowerCase().replace(/[^a-z0-9]+/g, '_'), + 'title': chart.name, + 'description': 'See source', + 'actions': [MA.Add, MA.ChartIgnore, MA.IMDB, MA.Trailer], + 'load_more': false, + 'view': 'thumb', + 'force_view': true, + 'api_call': null + }); + + // Load movies in manually + chart_list.store(chart.list); + chart_list.addMovies(chart.list, chart.list.length); + chart_list.checkIfEmpty(); + chart_list.fireEvent('loaded'); + + $(chart_list).inject(self.el); + + }); + + } + + self.fireEvent('loaded'); + + }, + + show: function(){ + var self = this; + + self.el.show(); + + if(!self.shown_once){ + requestTimeout(function(){ + self.api_request = Api.request('charts.view', { + 'onComplete': self.fill.bind(self) + }); + }, 100); + + self.shown_once = true; + } + }, + + toElement: function(){ + return this.el; + } + +}); diff --git a/couchpotato/core/media/movie/library.py b/couchpotato/core/media/movie/library.py new file mode 100644 index 0000000000..28cb1b46ed --- /dev/null +++ b/couchpotato/core/media/movie/library.py @@ -0,0 +1,32 @@ +from couchpotato.core.event import addEvent +from couchpotato.core.helpers.variable import getTitle +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.library.base import LibraryBase + + +log = CPLog(__name__) + +autoload = 'MovieLibraryPlugin' + + +class MovieLibraryPlugin(LibraryBase): + + def __init__(self): + addEvent('library.query', self.query) + + def query(self, media, first = True, include_year = True, **kwargs): + if media.get('type') != 'movie': + return + + default_title = getTitle(media) + titles = media['info'].get('titles', []) + titles.insert(0, default_title) + + # Add year identifier to titles + if include_year: + titles = [title + (' %s' % str(media['info']['year'])) for title in titles] + + if first: + return titles[0] if titles else None + + return titles diff --git a/libs/migrate/versioning/templates/__init__.py b/couchpotato/core/media/movie/providers/__init__.py similarity index 100% rename from libs/migrate/versioning/templates/__init__.py rename to couchpotato/core/media/movie/providers/__init__.py diff --git a/couchpotato/core/media/movie/providers/automation/__init__.py b/couchpotato/core/media/movie/providers/automation/__init__.py new file mode 100644 index 0000000000..93f6c10a40 --- /dev/null +++ b/couchpotato/core/media/movie/providers/automation/__init__.py @@ -0,0 +1,21 @@ +config = [{ + 'name': 'automation_providers', + 'groups': [ + { + 'label': 'Watchlists', + 'description': 'Check watchlists for new movies', + 'type': 'list', + 'name': 'watchlist_providers', + 'tab': 'automation', + 'options': [], + }, + { + 'label': 'Automated', + 'description': 'Uses minimal requirements', + 'type': 'list', + 'name': 'automation_providers', + 'tab': 'automation', + 'options': [], + }, + ], +}] diff --git a/couchpotato/core/media/movie/providers/automation/base.py b/couchpotato/core/media/movie/providers/automation/base.py new file mode 100644 index 0000000000..ee19649a35 --- /dev/null +++ b/couchpotato/core/media/movie/providers/automation/base.py @@ -0,0 +1,117 @@ +import time +import unicodedata + +from couchpotato.core.event import addEvent, fireEvent +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.automation.base import AutomationBase +from couchpotato.environment import Env +from couchpotato.core.helpers.variable import splitString + + +log = CPLog(__name__) + + +class Automation(AutomationBase): + + enabled_option = 'automation_enabled' + chart_enabled_option = 'chart_display_enabled' + http_time_between_calls = 2 + + interval = 1800 + last_checked = 0 + + def __init__(self): + addEvent('automation.get_movies', self._getMovies) + addEvent('automation.get_chart_list', self._getChartList) + + def _getMovies(self): + + if self.isDisabled(): + return + + if not self.canCheck(): + log.debug('Just checked, skipping %s', self.getName()) + return [] + + self.last_checked = time.time() + + return self.getIMDBids() + + def _getChartList(self): + + if not (self.conf(self.chart_enabled_option) or self.conf(self.chart_enabled_option) is None): + return + + return self.getChartList() + + def search(self, name, year = None, imdb_only = False): + + try: + cache_name = name.decode('utf-8').encode('ascii', 'ignore') + except UnicodeEncodeError: + cache_name = unicodedata.normalize('NFKD', name).encode('ascii','ignore') + + prop_name = 'automation.cached.%s.%s' % (cache_name, year) + cached_imdb = Env.prop(prop_name, default = False) + if cached_imdb and imdb_only: + return cached_imdb + + result = fireEvent('movie.search', q = '%s %s' % (name, year if year else ''), limit = 1, merge = True) + + if len(result) > 0: + if imdb_only and result[0].get('imdb'): + Env.prop(prop_name, result[0].get('imdb')) + + return result[0].get('imdb') if imdb_only else result[0] + else: + return None + + def isMinimalMovie(self, movie): + if not movie.get('rating'): + log.info('ignoring %s as no rating is available for.', (movie['original_title'])) + return False + + if movie['rating'] and movie['rating'].get('imdb'): + movie['votes'] = movie['rating']['imdb'][1] + movie['rating'] = movie['rating']['imdb'][0] + + for minimal_type in ['year', 'rating', 'votes']: + type_value = movie.get(minimal_type, 0) + type_min = self.getMinimal(minimal_type) + if type_value < type_min: + log.info('%s too low for %s, need %s has %s', (minimal_type, movie['original_title'], type_min, type_value)) + return False + + movie_genres = [genre.lower() for genre in movie['genres']] + required_genres = splitString(self.getMinimal('required_genres').lower()) + ignored_genres = splitString(self.getMinimal('ignored_genres').lower()) + + req_match = 0 + for req_set in required_genres: + req = splitString(req_set, '&') + req_match += len(list(set(movie_genres) & set(req))) == len(req) + + if self.getMinimal('required_genres') and req_match == 0: + log.info2('Required genre(s) missing for %s', movie['original_title']) + return False + + for ign_set in ignored_genres: + ign = splitString(ign_set, '&') + if len(list(set(movie_genres) & set(ign))) == len(ign): + log.info2('%s has blacklisted genre(s): %s', (movie['original_title'], ign)) + return False + + return True + + def getMinimal(self, min_type): + return Env.setting(min_type, 'automation') + + def getIMDBids(self): + return [] + + def getChartList(self): + # Example return: [ {'name': 'Display name of list', 'url': 'http://example.com/', 'order': 1, 'list': []} ] + return + + def canCheck(self): + return time.time() > self.last_checked + self.interval diff --git a/couchpotato/core/media/movie/providers/automation/bluray.py b/couchpotato/core/media/movie/providers/automation/bluray.py new file mode 100644 index 0000000000..3cd6fd63bd --- /dev/null +++ b/couchpotato/core/media/movie/providers/automation/bluray.py @@ -0,0 +1,192 @@ +import traceback + +from bs4 import BeautifulSoup +from couchpotato import fireEvent +from couchpotato.core.helpers.rss import RSS +from couchpotato.core.helpers.variable import tryInt +from couchpotato.core.logger import CPLog +from couchpotato.core.media.movie.providers.automation.base import Automation + + +log = CPLog(__name__) + +autoload = 'Bluray' + + +class Bluray(Automation, RSS): + + interval = 1800 + rss_url = 'http://www.blu-ray.com/rss/newreleasesfeed.xml' + backlog_url = 'http://www.blu-ray.com/movies/movies.php?show=newreleases&page=%s' + display_url = 'http://www.blu-ray.com/movies/movies.php?show=newreleases' + chart_order = 1 + + def getIMDBids(self): + + movies = [] + + if self.conf('backlog'): + + cookie = {'Cookie': 'listlayout_7=full'} + page = 0 + while True: + page += 1 + + url = self.backlog_url % page + data = self.getHTMLData(url, headers = cookie) + soup = BeautifulSoup(data) + + try: + # Stop if the release year is before the minimal year + brk = False + h3s = soup.body.find_all('h3') + for h3 in h3s: + if h3.parent.name != 'a': + + try: + page_year = tryInt(h3.get_text()[-4:]) + if page_year > 0 and page_year < self.getMinimal('year'): + brk = True + except: + log.error('Failed determining page year: %s', traceback.format_exc()) + brk = True + break + + if brk: + break + + for h3 in h3s: + try: + if h3.parent.name == 'a': + name = h3.get_text().lower().split('blu-ray')[0].strip() + + if not name.find('/') == -1: # make sure it is not a double movie release + continue + + if not h3.parent.parent.small: # ignore non-movie tables + continue + + year = h3.parent.parent.small.get_text().split('|')[1].strip() + + if tryInt(year) < self.getMinimal('year'): + continue + + imdb = self.search(name, year) + + if imdb: + if self.isMinimalMovie(imdb): + movies.append(imdb['imdb']) + except: + log.debug('Error parsing movie html: %s', traceback.format_exc()) + break + except: + log.debug('Error loading page %s: %s', (page, traceback.format_exc())) + break + + self.conf('backlog', value = False) + + rss_movies = self.getRSSData(self.rss_url) + + for movie in rss_movies: + name = self.getTextElement(movie, 'title').lower().split('blu-ray')[0].strip('(').rstrip() + year = self.getTextElement(movie, 'description').split('|')[1].strip('(').strip() + + if not name.find('/') == -1: # make sure it is not a double movie release + continue + + if tryInt(year) < self.getMinimal('year'): + continue + + imdb = self.search(name, year) + + if imdb: + if self.isMinimalMovie(imdb): + movies.append(imdb['imdb']) + + return movies + + def getChartList(self): + cache_key = 'bluray.charts' + movie_list = { + 'name': 'Blu-ray.com - New Releases', + 'url': self.display_url, + 'order': self.chart_order, + 'list': self.getCache(cache_key) or [] + } + + if not movie_list['list']: + movie_ids = [] + max_items = 10 + rss_movies = self.getRSSData(self.rss_url) + + for movie in rss_movies: + name = self.getTextElement(movie, 'title').lower().split('blu-ray')[0].strip('(').rstrip() + year = self.getTextElement(movie, 'description').split('|')[1].strip('(').strip() + + if not name.find('/') == -1: # make sure it is not a double movie release + continue + + movie = self.search(name, year) + + if movie: + + if movie.get('imdb') in movie_ids: + continue + + is_movie = fireEvent('movie.is_movie', identifier = movie.get('imdb'), single = True) + if not is_movie: + continue + + movie_ids.append(movie.get('imdb')) + movie_list['list'].append( movie ) + if len(movie_list['list']) >= max_items: + break + + if not movie_list['list']: + return + + self.setCache(cache_key, movie_list['list'], timeout = 259200) + + return [movie_list] + + +config = [{ + 'name': 'bluray', + 'groups': [ + { + 'tab': 'automation', + 'list': 'automation_providers', + 'name': 'bluray_automation', + 'label': 'Blu-ray.com', + 'description': 'Imports movies from blu-ray.com.', + 'options': [ + { + 'name': 'automation_enabled', + 'default': False, + 'type': 'enabler', + }, + { + 'name': 'backlog', + 'advanced': True, + 'description': ('Parses the history until the minimum movie year is reached. (Takes a while)', 'Will be disabled once it has completed'), + 'default': False, + 'type': 'bool', + }, + ], + }, + { + 'tab': 'display', + 'list': 'charts_providers', + 'name': 'bluray_charts_display', + 'label': 'Blu-ray.com', + 'description': 'Display new releases from Blu-ray.com', + 'options': [ + { + 'name': 'chart_display_enabled', + 'default': True, + 'type': 'enabler', + }, + ], + }, + ], +}] diff --git a/couchpotato/core/media/movie/providers/automation/crowdai.py b/couchpotato/core/media/movie/providers/automation/crowdai.py new file mode 100644 index 0000000000..574310792f --- /dev/null +++ b/couchpotato/core/media/movie/providers/automation/crowdai.py @@ -0,0 +1,90 @@ +import re + +from couchpotato.core.helpers.rss import RSS +from couchpotato.core.helpers.variable import tryInt, splitString +from couchpotato.core.logger import CPLog +from couchpotato.core.media.movie.providers.automation.base import Automation + + +log = CPLog(__name__) + +autoload = 'CrowdAI' + + +class CrowdAI(Automation, RSS): + + interval = 1800 + + def getIMDBids(self): + + movies = [] + + urls = dict(zip(splitString(self.conf('automation_urls')), [tryInt(x) for x in splitString(self.conf('automation_urls_use'))])) + + for url in urls: + + if not urls[url]: + continue + + rss_movies = self.getRSSData(url) + + for movie in rss_movies: + + description = self.getTextElement(movie, 'description') + grabs = 0 + + for item in movie: + if item.attrib.get('name') == 'grabs': + grabs = item.attrib.get('value') + break + + if int(grabs) > tryInt(self.conf('number_grabs')): + title = re.match(r'.*Title: .a href.*/">(.*) \(\d{4}\).*', description).group(1) + log.info2('%s grabs for movie: %s, enqueue...', (grabs, title)) + year = re.match(r'.*Year: (\d{4}).*', description).group(1) + imdb = self.search(title, year) + + if imdb and self.isMinimalMovie(imdb): + movies.append(imdb['imdb']) + + return movies + + +config = [{ + 'name': 'crowdai', + 'groups': [ + { + 'tab': 'automation', + 'list': 'automation_providers', + 'name': 'crowdai_automation', + 'label': 'CrowdAI', + 'description': ('Imports from any newznab powered NZB providers RSS feed depending on the number of grabs per movie.', + 'Go to your newznab site and find the RSS section. Then copy the copy paste the link under "Movies > x264 feed" here.'), + 'options': [ + { + 'name': 'automation_enabled', + 'default': False, + 'type': 'enabler', + }, + { + 'name': 'automation_urls_use', + 'label': 'Use', + 'default': '1', + }, + { + 'name': 'automation_urls', + 'label': 'url', + 'type': 'combined', + 'combine': ['automation_urls_use', 'automation_urls'], + 'default': 'http://YOUR_PROVIDER/rss?t=THE_MOVIE_CATEGORY&i=YOUR_USER_ID&r=YOUR_API_KEY&res=2&rls=2&num=100', + }, + { + 'name': 'number_grabs', + 'default': '500', + 'label': 'Grab threshold', + 'description': 'Number of grabs required', + }, + ], + }, + ], +}] diff --git a/couchpotato/core/media/movie/providers/automation/flixster.py b/couchpotato/core/media/movie/providers/automation/flixster.py new file mode 100644 index 0000000000..ab03c93186 --- /dev/null +++ b/couchpotato/core/media/movie/providers/automation/flixster.py @@ -0,0 +1,83 @@ +from couchpotato.core.helpers.variable import tryInt, splitString +from couchpotato.core.logger import CPLog +from couchpotato.core.media.movie.providers.automation.base import Automation + +log = CPLog(__name__) + +autoload = 'Flixster' + + +class Flixster(Automation): + + url = 'http://www.flixster.com/api/users/%s/movies/ratings?scoreTypes=wts' + + interval = 60 + + def getIMDBids(self): + + ids = splitString(self.conf('automation_ids')) + + if len(ids) == 0: + return [] + + movies = [] + + for movie in self.getWatchlist(): + imdb_id = self.search(movie.get('title'), movie.get('year'), imdb_only = True) + movies.append(imdb_id) + + return movies + + def getWatchlist(self): + + enablers = [tryInt(x) for x in splitString(self.conf('automation_ids_use'))] + ids = splitString(self.conf('automation_ids')) + + index = -1 + movies = [] + for user_id in ids: + + index += 1 + if not enablers[index]: + continue + + data = self.getJsonData(self.url % user_id, decode_from = 'iso-8859-1') + + for movie in data: + movies.append({ + 'title': movie['movie']['title'], + 'year': movie['movie']['year'] + }) + + return movies + + +config = [{ + 'name': 'flixster', + 'groups': [ + { + 'tab': 'automation', + 'list': 'watchlist_providers', + 'name': 'flixster_automation', + 'label': 'Flixster', + 'description': 'Import movies from any public Flixster watchlist', + 'options': [ + { + 'name': 'automation_enabled', + 'default': False, + 'type': 'enabler', + }, + { + 'name': 'automation_ids_use', + 'label': 'Use', + }, + { + 'name': 'automation_ids', + 'label': 'User ID', + 'type': 'combined', + 'combine': ['automation_ids_use', 'automation_ids'], + }, + ], + }, + ], +}] diff --git a/couchpotato/core/media/movie/providers/automation/goodfilms.py b/couchpotato/core/media/movie/providers/automation/goodfilms.py new file mode 100644 index 0000000000..37a5a75a77 --- /dev/null +++ b/couchpotato/core/media/movie/providers/automation/goodfilms.py @@ -0,0 +1,84 @@ +from bs4 import BeautifulSoup +from couchpotato.core.logger import CPLog +from couchpotato.core.media.movie.providers.automation.base import Automation + +log = CPLog(__name__) + +autoload = 'Goodfilms' + + +class Goodfilms(Automation): + + url = 'https://goodfil.ms/%s/queue?page=%d&without_layout=1' + + interval = 1800 + + def getIMDBids(self): + + if not self.conf('automation_username'): + log.error('Please fill in your username') + return [] + + movies = [] + + for movie in self.getWatchlist(): + imdb_id = self.search(movie.get('title'), movie.get('year'), imdb_only = True) + movies.append(imdb_id) + + return movies + + def getWatchlist(self): + + movies = [] + page = 1 + + while True: + url = self.url % (self.conf('automation_username'), page) + data = self.getHTMLData(url) + soup = BeautifulSoup(data) + + this_watch_list = soup.find_all('div', attrs = { + 'class': 'movie', + 'data-film-title': True + }) + + if not this_watch_list: # No Movies + break + + for movie in this_watch_list: + movies.append({ + 'title': movie['data-film-title'], + 'year': movie['data-film-year'] + }) + + if not 'next page' in data.lower(): + break + + page += 1 + + return movies + + +config = [{ + 'name': 'goodfilms', + 'groups': [ + { + 'tab': 'automation', + 'list': 'watchlist_providers', + 'name': 'goodfilms_automation', + 'label': 'Goodfilms', + 'description': 'import movies from your Goodfilms queue', + 'options': [ + { + 'name': 'automation_enabled', + 'default': False, + 'type': 'enabler', + }, + { + 'name': 'automation_username', + 'label': 'Username', + }, + ], + }, + ], +}] diff --git a/couchpotato/core/media/movie/providers/automation/hummingbird.py b/couchpotato/core/media/movie/providers/automation/hummingbird.py new file mode 100644 index 0000000000..188185877f --- /dev/null +++ b/couchpotato/core/media/movie/providers/automation/hummingbird.py @@ -0,0 +1,104 @@ +from couchpotato.core.logger import CPLog +from couchpotato.core.media.movie.providers.automation.base import Automation + + +log = CPLog(__name__) + +autoload = 'Hummingbird' + + +class Hummingbird(Automation): + + def getIMDBids(self): + movies = [] + for movie in self.getWatchlist(): + imdb = self.search(movie[0], movie[1]) + if imdb: + movies.append(imdb['imdb']) + return movies + + def getWatchlist(self): + if not self.conf('automation_username'): + log.error('You need to fill in a username') + return [] + + url = "http://hummingbird.me/api/v1/users/%s/library" % self.conf('automation_username') + data = self.getJsonData(url) + + chosen_filter = { + 'automation_list_current': 'currently-watching', + 'automation_list_plan': 'plan-to-watch', + 'automation_list_completed': 'completed', + 'automation_list_hold': 'on-hold', + 'automation_list_dropped': 'dropped', + } + + chosen_lists = [] + for x in chosen_filter: + if self.conf(x): + chosen_lists.append(chosen_filter[x]) + + entries = [] + for item in data: + if item['anime']['show_type'] != 'Movie' or item['status'] not in chosen_lists: + continue + title = item['anime']['title'] + year = item['anime']['started_airing'] + if year: + year = year[:4] + entries.append([title, year]) + return entries + +config = [{ + 'name': 'hummingbird', + 'groups': [ + { + 'tab': 'automation', + 'list': 'watchlist_providers', + 'name': 'hummingbird_automation', + 'label': 'Hummingbird', + 'description': 'Import movies from your Hummingbird.me lists', + 'options': [ + { + 'name': 'automation_enabled', + 'default': False, + 'type': 'enabler', + }, + { + 'name': 'automation_username', + 'label': 'Username', + }, + { + 'name': 'automation_list_current', + 'type': 'bool', + 'label': 'Currently Watching', + 'default': False, + }, + { + 'name': 'automation_list_plan', + 'type': 'bool', + 'label': 'Plan to Watch', + 'default': True, + }, + { + 'name': 'automation_list_completed', + 'type': 'bool', + 'label': 'Completed', + 'default': False, + }, + { + 'name': 'automation_list_hold', + 'type': 'bool', + 'label': 'On Hold', + 'default': False, + }, + { + 'name': 'automation_list_dropped', + 'type': 'bool', + 'label': 'Dropped', + 'default': False, + }, + ], + }, + ], +}] diff --git a/couchpotato/core/media/movie/providers/automation/imdb.py b/couchpotato/core/media/movie/providers/automation/imdb.py new file mode 100644 index 0000000000..41974c44a1 --- /dev/null +++ b/couchpotato/core/media/movie/providers/automation/imdb.py @@ -0,0 +1,318 @@ +import traceback +import re + +from bs4 import BeautifulSoup +from couchpotato import fireEvent +from couchpotato.core.helpers.encoding import ss +from couchpotato.core.helpers.rss import RSS +from couchpotato.core.helpers.variable import getImdb, splitString, tryInt +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.base import MultiProvider +from couchpotato.core.media.movie.providers.automation.base import Automation + + +log = CPLog(__name__) + +autoload = 'IMDB' + + +class IMDB(MultiProvider): + + def getTypes(self): + return [IMDBWatchlist, IMDBAutomation, IMDBCharts] + + +class IMDBBase(Automation, RSS): + + interval = 1800 + + charts = { + 'theater': { + 'order': 1, + 'name': 'IMDB - Movies in Theaters', + 'url': 'http://www.imdb.com/movies-in-theaters/', + }, + 'boxoffice': { + 'order': 2, + 'name': 'IMDB - Box Office', + 'url': 'http://www.imdb.com/boxoffice/', + }, + 'top250': { + 'order': 3, + 'name': 'IMDB - Top 250 Movies', + 'url': 'http://www.imdb.com/chart/top', + }, + } + + def getInfo(self, imdb_id): + return fireEvent('movie.info', identifier = imdb_id, extended = False, adding = False, merge = True) + + def getFromURL(self, url): + log.debug('Getting IMDBs from: %s', url) + html = self.getHTMLData(url) + + try: + split = splitString(html, split_on = "
")[1] + html = splitString(split, split_on = "
")[0] + except: + try: + split = splitString(html, split_on = "
") + + if len(split) < 2: + log.error('Failed parsing IMDB page "%s", unexpected html.', url) + return [] + + html = BeautifulSoup(split[1]) + for x in ['list compact', 'lister', 'list detail sub-list']: + html2 = html.find('div', attrs = { + 'class': x + }) + + if html2: + html = html2.contents + html = ''.join([str(x) for x in html]) + break + except: + log.error('Failed parsing IMDB page "%s": %s', (url, traceback.format_exc())) + + html = ss(html) + imdbs = getImdb(html, multiple = True) if html else [] + + return imdbs + + +class IMDBWatchlist(IMDBBase): + + enabled_option = 'automation_enabled' + + def getIMDBids(self): + + movies = [] + + watchlist_enablers = [tryInt(x) for x in splitString(self.conf('automation_urls_use'))] + watchlist_urls = splitString(self.conf('automation_urls')) + + index = -1 + for watchlist_url in watchlist_urls: + + try: + # Get list ID + ids = re.findall('(?:list/|list_id=)([a-zA-Z0-9\-_]{11})', watchlist_url) + if len(ids) == 1: + watchlist_url = 'http://www.imdb.com/list/%s/?view=compact&sort=created:asc' % ids[0] + # Try find user id with watchlist + else: + userids = re.findall('(ur\d{7,9})', watchlist_url) + if len(userids) == 1: + watchlist_url = 'http://www.imdb.com/user/%s/watchlist?view=compact&sort=created:asc' % userids[0] + except: + log.error('Failed getting id from watchlist: %s', traceback.format_exc()) + + index += 1 + if not watchlist_enablers[index]: + continue + + start = 0 + while True: + try: + + w_url = '%s&start=%s' % (watchlist_url, start) + imdbs = self.getFromURL(w_url) + + for imdb in imdbs: + if imdb not in movies: + movies.append(imdb) + + if self.shuttingDown(): + break + + log.debug('Found %s movies on %s', (len(imdbs), w_url)) + + if len(imdbs) < 225: + break + + start = len(movies) + + except: + log.error('Failed loading IMDB watchlist: %s %s', (watchlist_url, traceback.format_exc())) + break + + return movies + + +class IMDBAutomation(IMDBBase): + + enabled_option = 'automation_providers_enabled' + + def getIMDBids(self): + + movies = [] + + for name in self.charts: + chart = self.charts[name] + url = chart.get('url') + + if self.conf('automation_charts_%s' % name): + imdb_ids = self.getFromURL(url) + + try: + for imdb_id in imdb_ids: + info = self.getInfo(imdb_id) + if info and self.isMinimalMovie(info): + movies.append(imdb_id) + + if self.shuttingDown(): + break + + except: + log.error('Failed loading IMDB chart results from %s: %s', (url, traceback.format_exc())) + + return movies + + +class IMDBCharts(IMDBBase): + + def getChartList(self): + # Nearly identical to 'getIMDBids', but we don't care about minimalMovie and return all movie data (not just id) + movie_lists = [] + max_items = 10 + + for name in self.charts: + chart = self.charts[name].copy() + cache_key = 'imdb.chart_display_%s' % name + + if self.conf('chart_display_%s' % name): + + cached = self.getCache(cache_key) + if cached: + chart['list'] = cached + movie_lists.append(chart) + continue + + url = chart.get('url') + + chart['list'] = [] + imdb_ids = self.getFromURL(url) + + try: + for imdb_id in imdb_ids[0:max_items]: + + is_movie = fireEvent('movie.is_movie', identifier = imdb_id, adding = False, single = True) + if not is_movie: + continue + + info = self.getInfo(imdb_id) + chart['list'].append(info) + + if self.shuttingDown(): + break + except: + log.error('Failed loading IMDB chart results from %s: %s', (url, traceback.format_exc())) + + self.setCache(cache_key, chart['list'], timeout = 259200) + + if chart['list']: + movie_lists.append(chart) + + return movie_lists + + +config = [{ + 'name': 'imdb', + 'groups': [ + { + 'tab': 'automation', + 'list': 'watchlist_providers', + 'name': 'imdb_automation_watchlist', + 'label': 'IMDB', + 'description': 'From any public IMDB watchlists.', + 'options': [ + { + 'name': 'automation_enabled', + 'default': False, + 'type': 'enabler', + }, + { + 'name': 'automation_urls_use', + 'label': 'Use', + }, + { + 'name': 'automation_urls', + 'label': 'url', + 'type': 'combined', + 'combine': ['automation_urls_use', 'automation_urls'], + }, + ], + }, + { + 'tab': 'automation', + 'list': 'automation_providers', + 'name': 'imdb_automation_charts', + 'label': 'IMDB', + 'description': 'Import movies from IMDB Charts', + 'options': [ + { + 'name': 'automation_providers_enabled', + 'default': False, + 'type': 'enabler', + }, + { + 'name': 'automation_charts_theater', + 'type': 'bool', + 'label': 'In Theaters', + 'description': 'New Movies In-Theaters chart', + 'default': True, + }, + { + 'name': 'automation_charts_top250', + 'type': 'bool', + 'label': 'TOP 250', + 'description': 'IMDB TOP 250 chart', + 'default': False, + }, + { + 'name': 'automation_charts_boxoffice', + 'type': 'bool', + 'label': 'Box office TOP 10', + 'description': 'IMDB Box office TOP 10 chart', + 'default': True, + }, + ], + }, + { + 'tab': 'display', + 'list': 'charts_providers', + 'name': 'imdb_charts_display', + 'label': 'IMDB', + 'description': 'Display movies from IMDB Charts', + 'options': [ + { + 'name': 'chart_display_enabled', + 'default': True, + 'type': 'enabler', + }, + { + 'name': 'chart_display_theater', + 'type': 'bool', + 'label': 'In Theaters', + 'description': 'New Movies In-Theaters chart', + 'default': False, + }, + { + 'name': 'chart_display_top250', + 'type': 'bool', + 'label': 'TOP 250', + 'description': 'IMDB TOP 250 chart', + 'default': False, + }, + { + 'name': 'chart_display_boxoffice', + 'type': 'bool', + 'label': 'Box office TOP 10', + 'description': 'IMDB Box office TOP 10 chart', + 'default': True, + }, + ], + }, + ], +}] diff --git a/couchpotato/core/media/movie/providers/automation/itunes.py b/couchpotato/core/media/movie/providers/automation/itunes.py new file mode 100644 index 0000000000..63655f93fb --- /dev/null +++ b/couchpotato/core/media/movie/providers/automation/itunes.py @@ -0,0 +1,97 @@ +from xml.etree.ElementTree import QName +import datetime +import traceback +import xml.etree.ElementTree as XMLTree + +from couchpotato.core.helpers.rss import RSS +from couchpotato.core.helpers.variable import md5, splitString, tryInt +from couchpotato.core.logger import CPLog +from couchpotato.core.media.movie.providers.automation.base import Automation + + +log = CPLog(__name__) + +autoload = 'ITunes' + + +class ITunes(Automation, RSS): + + interval = 1800 + + def getIMDBids(self): + + movies = [] + + enablers = [tryInt(x) for x in splitString(self.conf('automation_urls_use'))] + urls = splitString(self.conf('automation_urls')) + + namespace = 'http://www.w3.org/2005/Atom' + namespace_im = 'http://itunes.apple.com/rss' + + index = -1 + for url in urls: + + index += 1 + if len(enablers) == 0 or len(enablers) < index or not enablers[index]: + continue + + try: + cache_key = 'itunes.rss.%s' % md5(url) + rss_data = self.getCache(cache_key, url) + + data = XMLTree.fromstring(rss_data) + + if data is not None: + entry_tag = str(QName(namespace, 'entry')) + rss_movies = self.getElements(data, entry_tag) + + for movie in rss_movies: + name_tag = str(QName(namespace_im, 'name')) + name = self.getTextElement(movie, name_tag) + + releaseDate_tag = str(QName(namespace_im, 'releaseDate')) + releaseDateText = self.getTextElement(movie, releaseDate_tag) + year = datetime.datetime.strptime(releaseDateText, '%Y-%m-%dT00:00:00-07:00').strftime("%Y") + + imdb = self.search(name, year) + + if imdb and self.isMinimalMovie(imdb): + movies.append(imdb['imdb']) + + except: + log.error('Failed loading iTunes rss feed: %s %s', (url, traceback.format_exc())) + + return movies + + +config = [{ + 'name': 'itunes', + 'groups': [ + { + 'tab': 'automation', + 'list': 'automation_providers', + 'name': 'itunes_automation', + 'label': 'iTunes', + 'description': 'From any iTunes Store feed. Url should be the RSS link.', + 'options': [ + { + 'name': 'automation_enabled', + 'default': False, + 'type': 'enabler', + }, + { + 'name': 'automation_urls_use', + 'label': 'Use', + 'default': ',', + }, + { + 'name': 'automation_urls', + 'label': 'url', + 'type': 'combined', + 'combine': ['automation_urls_use', 'automation_urls'], + 'default': 'https://itunes.apple.com/rss/topmovies/limit=25/xml,', + }, + ], + }, + ], +}] diff --git a/couchpotato/core/media/movie/providers/automation/letterboxd.py b/couchpotato/core/media/movie/providers/automation/letterboxd.py new file mode 100644 index 0000000000..072c416b4a --- /dev/null +++ b/couchpotato/core/media/movie/providers/automation/letterboxd.py @@ -0,0 +1,102 @@ +import re + +from bs4 import BeautifulSoup +from couchpotato.core.helpers.variable import tryInt, splitString, removeEmpty +from couchpotato.core.logger import CPLog +from couchpotato.core.media.movie.providers.automation.base import Automation + + +log = CPLog(__name__) + +autoload = 'Letterboxd' + + +class Letterboxd(Automation): + + url = 'http://letterboxd.com/%s/watchlist/page/%d/' + pattern = re.compile(r'(.*)\((\d*)\)') + + interval = 1800 + + def getIMDBids(self): + + urls = splitString(self.conf('automation_urls')) + + if len(urls) == 0: + return [] + + movies = [] + + for movie in self.getWatchlist(): + imdb_id = self.search(movie.get('title'), movie.get('year'), imdb_only = True) + movies.append(imdb_id) + + return movies + + def getWatchlist(self): + + enablers = [tryInt(x) for x in splitString(self.conf('automation_urls_use'))] + urls = splitString(self.conf('automation_urls')) + + index = -1 + movies = [] + for username in urls: + + index += 1 + if not enablers[index]: + continue + + soup = BeautifulSoup(self.getHTMLData(self.url % (username, 1))) + + pagination = soup.find_all('li', attrs={'class': 'paginate-page'}) + number_of_pages = tryInt(pagination[-1].find('a').get_text()) if pagination else 1 + pages = range(1, number_of_pages) + + for page in pages: + soup = BeautifulSoup(self.getHTMLData(self.url % (username, page))) + movies += self.getMoviesFromHTML(soup) + + return movies + + def getMoviesFromHTML(self, html): + movies = [] + + for movie in html.find_all('li', attrs={'class': 'poster-container'}): + img = movie.find('img') + title = img.get('alt') + + movies.append({ + 'title': title + }) + + return movies + +config = [{ + 'name': 'letterboxd', + 'groups': [ + { + 'tab': 'automation', + 'list': 'watchlist_providers', + 'name': 'letterboxd_automation', + 'label': 'Letterboxd', + 'description': 'Import movies from any public Letterboxd watchlist', + 'options': [ + { + 'name': 'automation_enabled', + 'default': False, + 'type': 'enabler', + }, + { + 'name': 'automation_urls_use', + 'label': 'Use', + }, + { + 'name': 'automation_urls', + 'label': 'Username', + 'type': 'combined', + 'combine': ['automation_urls_use', 'automation_urls'], + }, + ], + }, + ], +}] diff --git a/couchpotato/core/media/movie/providers/automation/popularmovies.py b/couchpotato/core/media/movie/providers/automation/popularmovies.py new file mode 100644 index 0000000000..79e22f138f --- /dev/null +++ b/couchpotato/core/media/movie/providers/automation/popularmovies.py @@ -0,0 +1,48 @@ +from couchpotato import fireEvent +from couchpotato.core.logger import CPLog +from couchpotato.core.media.movie.providers.automation.base import Automation + +log = CPLog(__name__) + +autoload = 'PopularMovies' + + +class PopularMovies(Automation): + + interval = 1800 + url = 'https://s3.amazonaws.com/popular-movies/movies.json' + + def getIMDBids(self): + + movies = [] + retrieved_movies = self.getJsonData(self.url) + + if retrieved_movies: + for movie in retrieved_movies: + imdb_id = movie.get('imdb_id') + info = fireEvent('movie.info', identifier = imdb_id, extended = False, merge = True) + if self.isMinimalMovie(info): + movies.append(imdb_id) + + return movies + + +config = [{ + 'name': 'popularmovies', + 'groups': [ + { + 'tab': 'automation', + 'list': 'automation_providers', + 'name': 'popularmovies_automation', + 'label': 'Popular Movies', + 'description': 'Imports the top titles of movies that have been in theaters. Script provided by Steven Lu', + 'options': [ + { + 'name': 'automation_enabled', + 'default': False, + 'type': 'enabler', + }, + ], + }, + ], +}] diff --git a/couchpotato/core/media/movie/providers/automation/trakt/__init__.py b/couchpotato/core/media/movie/providers/automation/trakt/__init__.py new file mode 100644 index 0000000000..b0e5cba8bd --- /dev/null +++ b/couchpotato/core/media/movie/providers/automation/trakt/__init__.py @@ -0,0 +1,38 @@ +from .main import Trakt + + +def autoload(): + return Trakt() + + +config = [{ + 'name': 'trakt', + 'groups': [ + { + 'tab': 'automation', + 'list': 'watchlist_providers', + 'name': 'trakt_automation', + 'label': 'Trakt', + 'description': 'Import movies from your own watchlist', + 'options': [ + { + 'name': 'automation_enabled', + 'default': False, + 'type': 'enabler', + }, + { + 'name': 'automation_oauth_token', + 'label': 'Auth Token', + 'advanced': 1 + }, + { + 'name': 'automation_oauth_refresh', + 'label': 'Refresh Token', + 'description': ('Used to automatically refresh your oauth token every 3 months', + 'To get a refresh token, reconnect with trakt'), + 'advanced': 1 + }, + ], + }, + ], +}] diff --git a/couchpotato/core/media/movie/providers/automation/trakt/main.py b/couchpotato/core/media/movie/providers/automation/trakt/main.py new file mode 100644 index 0000000000..fcec75e938 --- /dev/null +++ b/couchpotato/core/media/movie/providers/automation/trakt/main.py @@ -0,0 +1,114 @@ +import json +import traceback +import time + +from couchpotato import Env, fireEvent +from couchpotato.api import addApiView +from couchpotato.core.event import addEvent +from couchpotato.core.helpers.variable import cleanHost +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.base import Provider +from couchpotato.core.media.movie.providers.automation.base import Automation + + +log = CPLog(__name__) + + +class TraktBase(Provider): + + client_id = '8a54ed7b5e1b56d874642770ad2e8b73e2d09d6e993c3a92b1e89690bb1c9014' + api_url = 'https://api-v2launch.trakt.tv/' + + def call(self, method_url, post_data = None): + headers = { + 'Content-Type': 'application/json', + 'Authorization': 'Bearer %s' % self.conf('automation_oauth_token'), + 'trakt-api-version': 2, + 'trakt-api-key': self.client_id, + } + + if post_data: + post_data = json.dumps(post_data) + + data = self.getJsonData(self.api_url + method_url, data = post_data or {}, headers = headers) + return data if data else [] + + +class Trakt(Automation, TraktBase): + + urls = { + 'watchlist': 'sync/watchlist/movies/', + 'oauth': 'https://api.couchpota.to/authorize/trakt/', + 'refresh_token': 'https://api.couchpota.to/authorize/trakt_refresh/', + } + + def __init__(self): + super(Trakt, self).__init__() + + addApiView('automation.trakt.auth_url', self.getAuthorizationUrl) + addApiView('automation.trakt.credentials', self.getCredentials) + + fireEvent('schedule.interval', 'updater.check', self.refreshToken, hours = 24) + addEvent('app.load', self.refreshToken) + + def refreshToken(self): + + token = self.conf('automation_oauth_token') + refresh_token = self.conf('automation_oauth_refresh') + if token and refresh_token: + + prop_name = 'last_trakt_refresh' + last_refresh = int(Env.prop(prop_name, default = 0)) + + if last_refresh < time.time()-4838400: # refresh every 8 weeks + log.debug('Refreshing trakt token') + + url = self.urls['refresh_token'] + '?token=' + self.conf('automation_oauth_refresh') + data = fireEvent('cp.api_call', url, cache_timeout = 0, single = True) + if data and 'oauth' in data and 'refresh' in data: + log.debug('Oauth refresh: %s', data) + self.conf('automation_oauth_token', value = data.get('oauth')) + self.conf('automation_oauth_refresh', value = data.get('refresh')) + Env.prop(prop_name, value = int(time.time())) + else: + log.error('Failed refreshing Trakt token, please re-register in settings') + + elif token and not refresh_token: + log.error('Refresh token is missing, please re-register Trakt for autorefresh of the token in the future') + + def getIMDBids(self): + movies = [] + for movie in self.getWatchlist(): + movies.append(movie.get('movie').get('ids').get('imdb')) + + return movies + + def getWatchlist(self): + return self.call(self.urls['watchlist']) + + def getAuthorizationUrl(self, host = None, **kwargs): + callback_url = cleanHost(host) + '%sautomation.trakt.credentials/' % (Env.get('api_base').lstrip('/')) + log.debug('callback_url is %s', callback_url) + + target_url = self.urls['oauth'] + "?target=" + callback_url + log.debug('target_url is %s', target_url) + + return { + 'success': True, + 'url': target_url, + } + + def getCredentials(self, **kwargs): + try: + oauth_token = kwargs.get('oauth') + refresh_token = kwargs.get('refresh') + + log.debug('oauth_token is: %s', oauth_token) + self.conf('automation_oauth_token', value = oauth_token) + self.conf('automation_oauth_refresh', value = refresh_token) + + Env.prop('last_trakt_refresh', value = int(time.time())) + except: + log.error('Failed setting trakt token: %s', traceback.format_exc()) + + return 'redirect', Env.get('web_base') + 'settings/automation/' diff --git a/couchpotato/core/media/movie/providers/automation/trakt/static/trakt.js b/couchpotato/core/media/movie/providers/automation/trakt/static/trakt.js new file mode 100644 index 0000000000..4d757b7dc9 --- /dev/null +++ b/couchpotato/core/media/movie/providers/automation/trakt/static/trakt.js @@ -0,0 +1,67 @@ +var TraktAutomation = new Class({ + + initialize: function(){ + var self = this; + + App.addEvent('loadSettings', self.addRegisterButton.bind(self)); + }, + + addRegisterButton: function(){ + var self = this, + setting_page = App.getPage('Settings'); + + setting_page.addEvent('create', function(){ + + var fieldset = setting_page.tabs.automation.groups.trakt_automation, + l = window.location; + + var trakt_set = 0; + fieldset.getElements('input[type=text]').each(function(el){ + trakt_set += +(el.get('value') !== ''); + }); + + new Element('.ctrlHolder').adopt( + + // Unregister button + (trakt_set > 0) ? + [ + self.unregister = new Element('a.button.red', { + 'text': 'Unregister', + 'events': { + 'click': function(){ + fieldset.getElements('input[name*=oauth_token]').set('value', '').fireEvent('change'); + + self.unregister.destroy(); + self.unregister_or.destroy(); + } + } + }), + self.unregister_or = new Element('span[text=or]') + ] + : null, + + // Register button + new Element('a.button', { + 'text': trakt_set > 0 ? 'Register a different account' : 'Register your trakt.tv account', + 'events': { + 'click': function(){ + Api.request('automation.trakt.auth_url', { + 'data': { + 'host': l.protocol + '//' + l.hostname + (l.port ? ':' + l.port : '') + }, + 'onComplete': function(json){ + window.location = json.url; + } + }); + } + } + }) + + ).inject(fieldset); + }); + + } + +}); + +new TraktAutomation(); diff --git a/couchpotato/core/media/movie/providers/base.py b/couchpotato/core/media/movie/providers/base.py new file mode 100644 index 0000000000..4e80d5d318 --- /dev/null +++ b/couchpotato/core/media/movie/providers/base.py @@ -0,0 +1,5 @@ +from couchpotato.core.media._base.providers.info.base import BaseInfoProvider + + +class MovieProvider(BaseInfoProvider): + type = 'movie' diff --git a/libs/migrate/versioning/templates/repository/__init__.py b/couchpotato/core/media/movie/providers/info/__init__.py similarity index 100% rename from libs/migrate/versioning/templates/repository/__init__.py rename to couchpotato/core/media/movie/providers/info/__init__.py diff --git a/couchpotato/core/media/movie/providers/info/_modifier.py b/couchpotato/core/media/movie/providers/info/_modifier.py new file mode 100644 index 0000000000..832182af7b --- /dev/null +++ b/couchpotato/core/media/movie/providers/info/_modifier.py @@ -0,0 +1,136 @@ +import copy +import traceback + +from CodernityDB.database import RecordNotFound +from couchpotato import get_db +from couchpotato.core.event import addEvent, fireEvent +from couchpotato.core.helpers.variable import mergeDicts, randomString +from couchpotato.core.logger import CPLog +from couchpotato.core.plugins.base import Plugin + + +log = CPLog(__name__) + +autoload = 'MovieResultModifier' + + +class MovieResultModifier(Plugin): + + default_info = { + 'tmdb_id': 0, + 'titles': [], + 'original_title': '', + 'year': 0, + 'images': { + 'poster': [], + 'backdrop': [], + 'poster_original': [], + 'backdrop_original': [], + 'actors': {}, + 'landscape': [], + 'logo': [], + 'clear_art': [], + 'disc_art': [], + 'banner': [], + 'extra_thumbs': [], + 'extra_fanart': [] + }, + 'runtime': 0, + 'plot': '', + 'tagline': '', + 'imdb': '', + 'genres': [], + 'mpaa': None, + 'actors': [], + 'actor_roles': {}, + 'languages': [] + } + + def __init__(self): + addEvent('result.modify.info.search', self.returnByType) + addEvent('result.modify.movie.search', self.combineOnIMDB) + addEvent('result.modify.movie.info', self.checkLibrary) + + def returnByType(self, results): + + new_results = {} + for r in results: + type_name = r.get('type', 'movie') + 's' + if type_name not in new_results: + new_results[type_name] = [] + + new_results[type_name].append(r) + + # Combine movies, needs a cleaner way.. + if 'movies' in new_results: + new_results['movies'] = self.combineOnIMDB(new_results['movies']) + + return new_results + + def combineOnIMDB(self, results): + + temp = {} + order = [] + + # Combine on imdb id + for item in results: + random_string = randomString() + imdb = item.get('imdb', random_string) + imdb = imdb if imdb else random_string + + if not temp.get(imdb): + temp[imdb] = self.getLibraryTags(imdb) + order.append(imdb) + + # Merge dicts + temp[imdb] = mergeDicts(temp[imdb], item) + + # Make it a list again + temp_list = [temp[x] for x in order] + + return temp_list + + def getLibraryTags(self, imdb): + + temp = { + 'in_wanted': False, + 'in_library': False, + } + + # Add release info from current library + db = get_db() + try: + + media = None + try: + media = db.get('media', 'imdb-%s' % imdb, with_doc = True)['doc'] + except RecordNotFound: + pass + + if media: + + if media.get('status') == 'active': + temp['in_wanted'] = media + + try: temp['in_wanted']['profile'] = db.get('id', media['profile_id']) + except: temp['in_wanted']['profile'] = {'label': ''} + + for release in fireEvent('release.for_media', media['_id'], single = True): + if release.get('status') == 'done': + if not temp['in_library']: + temp['in_library'] = media + temp['in_library']['releases'] = [] + + temp['in_library']['releases'].append(release) + except: + log.error('Tried getting more info on searched movies: %s', traceback.format_exc()) + + return temp + + def checkLibrary(self, result): + + result = mergeDicts(copy.deepcopy(self.default_info), copy.deepcopy(result)) + + if result and result.get('imdb'): + return mergeDicts(result, self.getLibraryTags(result['imdb'])) + return result diff --git a/couchpotato/core/media/movie/providers/info/couchpotatoapi.py b/couchpotato/core/media/movie/providers/info/couchpotatoapi.py new file mode 100644 index 0000000000..5256ec963f --- /dev/null +++ b/couchpotato/core/media/movie/providers/info/couchpotatoapi.py @@ -0,0 +1,131 @@ +import base64 +import time + +from couchpotato.core.event import addEvent, fireEvent +from couchpotato.core.helpers.encoding import tryUrlencode, ss +from couchpotato.core.logger import CPLog +from couchpotato.core.media.movie.providers.base import MovieProvider +from couchpotato.environment import Env + + +log = CPLog(__name__) + +autoload = 'CouchPotatoApi' + + +class CouchPotatoApi(MovieProvider): + + urls = { + 'validate': 'https://api.couchpota.to/validate/%s/', + 'search': 'https://api.couchpota.to/search/%s/', + 'info': 'https://api.couchpota.to/info/%s/', + 'is_movie': 'https://api.couchpota.to/ismovie/%s/', + 'eta': 'https://api.couchpota.to/eta/%s/', + 'suggest': 'https://api.couchpota.to/suggest/', + 'updater': 'https://api.couchpota.to/updater/?%s', + 'messages': 'https://api.couchpota.to/messages/?%s', + } + http_time_between_calls = 0 + api_version = 1 + + def __init__(self): + addEvent('movie.info', self.getInfo, priority = 2) + addEvent('movie.info.release_date', self.getReleaseDate) + + addEvent('info.search', self.search, priority = 2) + addEvent('movie.search', self.search, priority = 2) + + addEvent('movie.suggest', self.getSuggestions) + addEvent('movie.is_movie', self.isMovie) + + addEvent('release.validate', self.validate) + + addEvent('cp.api_call', self.call) + + addEvent('cp.source_url', self.getSourceUrl) + addEvent('cp.messages', self.getMessages) + + def call(self, url, **kwargs): + return self.getJsonData(url, headers = self.getRequestHeaders(), **kwargs) + + def getMessages(self, last_check = 0): + + data = self.getJsonData(self.urls['messages'] % tryUrlencode({ + 'last_check': last_check, + }), headers = self.getRequestHeaders(), cache_timeout = 10) + + return data + + def getSourceUrl(self, repo = None, repo_name = None, branch = None): + return self.getJsonData(self.urls['updater'] % tryUrlencode({ + 'repo': repo, + 'name': repo_name, + 'branch': branch, + }), headers = self.getRequestHeaders()) + + def search(self, q, limit = 5): + return self.getJsonData(self.urls['search'] % tryUrlencode(q) + ('?limit=%s' % limit), headers = self.getRequestHeaders()) + + def validate(self, name = None): + + if not name: + return + + name_enc = base64.b64encode(ss(name)) + return self.getJsonData(self.urls['validate'] % name_enc, headers = self.getRequestHeaders()) + + def isMovie(self, identifier = None, adding = False, **kwargs): + + if not identifier: + return + + url = self.urls['is_movie'] % identifier + url += '' if adding else '?ignore=1' + + data = self.getJsonData(url, headers = self.getRequestHeaders()) + if data: + return data.get('is_movie', True) + + return True + + def getInfo(self, identifier = None, adding = False, **kwargs): + + if not identifier: + return + + url = self.urls['info'] % identifier + url += '' if adding else '?ignore=1' + + result = self.getJsonData(url, headers = self.getRequestHeaders()) + if result: + return dict((k, v) for k, v in result.items() if v) + + return {} + + def getReleaseDate(self, identifier = None): + if identifier is None: return {} + + dates = self.getJsonData(self.urls['eta'] % identifier, headers = self.getRequestHeaders()) + log.debug('Found ETA for %s: %s', (identifier, dates)) + + return dates + + def getSuggestions(self, movies = None, ignore = None): + if not ignore: ignore = [] + if not movies: movies = [] + + suggestions = self.getJsonData(self.urls['suggest'], data = { + 'movies': ','.join(movies), + 'ignore': ','.join(ignore), + }, headers = self.getRequestHeaders()) + log.info('Found suggestions for %s movies, %s ignored', (len(movies), len(ignore))) + + return suggestions + + def getRequestHeaders(self): + return { + 'X-CP-Version': fireEvent('app.version', single = True), + 'X-CP-API': self.api_version, + 'X-CP-Time': time.time(), + 'X-CP-Identifier': '+%s' % Env.setting('api_key', 'core')[:10], # Use first 10 as identifier, so we don't need to use IP address in api stats + } diff --git a/couchpotato/core/media/movie/providers/info/fanarttv.py b/couchpotato/core/media/movie/providers/info/fanarttv.py new file mode 100644 index 0000000000..74cd942cd3 --- /dev/null +++ b/couchpotato/core/media/movie/providers/info/fanarttv.py @@ -0,0 +1,133 @@ +import traceback + +from couchpotato import tryInt +from couchpotato.core.event import addEvent +from couchpotato.core.logger import CPLog +from couchpotato.core.media.movie.providers.base import MovieProvider +from requests import HTTPError + + +log = CPLog(__name__) + +autoload = 'FanartTV' + + +class FanartTV(MovieProvider): + + urls = { + 'api': 'http://webservice.fanart.tv/v3/movies/%s?api_key=b28b14e9be662e027cfbc7c3dd600405' + } + + MAX_EXTRAFANART = 20 + http_time_between_calls = 0 + + def __init__(self): + addEvent('movie.info', self.getArt, priority = 1) + + def getArt(self, identifier = None, extended = True, **kwargs): + + if not identifier or not extended: + return {} + + images = {} + + try: + url = self.urls['api'] % identifier + fanart_data = self.getJsonData(url, show_error = False) + + if fanart_data: + log.debug('Found images for %s', fanart_data.get('name')) + images = self._parseMovie(fanart_data) + except HTTPError as e: + log.debug('Failed getting extra art for %s: %s', + (identifier, e)) + except: + log.error('Failed getting extra art for %s: %s', + (identifier, traceback.format_exc())) + return {} + + return { + 'images': images + } + + def _parseMovie(self, movie): + images = { + 'landscape': self._getMultImages(movie.get('moviethumb', []), 1), + 'logo': [], + 'disc_art': self._getMultImages(self._trimDiscs(movie.get('moviedisc', [])), 1), + 'clear_art': self._getMultImages(movie.get('hdmovieart', []), 1), + 'banner': self._getMultImages(movie.get('moviebanner', []), 1), + 'extra_fanart': [], + } + + if len(images['clear_art']) == 0: + images['clear_art'] = self._getMultImages(movie.get('movieart', []), 1) + + images['logo'] = self._getMultImages(movie.get('hdmovielogo', []), 1) + if len(images['logo']) == 0: + images['logo'] = self._getMultImages(movie.get('movielogo', []), 1) + + fanarts = self._getMultImages(movie.get('moviebackground', []), self.MAX_EXTRAFANART + 1) + + if fanarts: + images['backdrop_original'] = [fanarts[0]] + images['extra_fanart'] = fanarts[1:] + + return images + + def _trimDiscs(self, disc_images): + """ + Return a subset of discImages. Only bluray disc images will be returned. + """ + + trimmed = [] + for disc in disc_images: + if disc.get('disc_type') == 'bluray': + trimmed.append(disc) + + if len(trimmed) == 0: + return disc_images + + return trimmed + + def _getImage(self, images): + image_url = None + highscore = -1 + for image in images: + if tryInt(image.get('likes')) > highscore: + highscore = tryInt(image.get('likes')) + image_url = image.get('url') or image.get('href') + + return image_url + + def _getMultImages(self, images, n): + """ + Chooses the best n images and returns them as a list. + If n<0, all images will be returned. + """ + image_urls = [] + pool = [] + for image in images: + if image.get('lang') == 'en': + pool.append(image) + orig_pool_size = len(pool) + + while len(pool) > 0 and (n < 0 or orig_pool_size - len(pool) < n): + best = None + highscore = -1 + for image in pool: + if tryInt(image.get('likes')) > highscore: + highscore = tryInt(image.get('likes')) + best = image + url = best.get('url') or best.get('href') + if url: + image_urls.append(url) + pool.remove(best) + + return image_urls + + def isDisabled(self): + if self.conf('api_key') == '': + log.error('No API key provided.') + return True + return False diff --git a/couchpotato/core/media/movie/providers/info/omdbapi.py b/couchpotato/core/media/movie/providers/info/omdbapi.py new file mode 100644 index 0000000000..6fce1b6151 --- /dev/null +++ b/couchpotato/core/media/movie/providers/info/omdbapi.py @@ -0,0 +1,169 @@ +О╩©import json +import re +import traceback + +from couchpotato import Env +from couchpotato.core.event import addEvent, fireEvent +from couchpotato.core.helpers.encoding import tryUrlencode +from couchpotato.core.helpers.variable import tryInt, tryFloat, splitString, fillingLanguages +from couchpotato.core.logger import CPLog +from couchpotato.core.media.movie.providers.base import MovieProvider + + +log = CPLog(__name__) + +autoload = 'OMDBAPI' + + +class OMDBAPI(MovieProvider): + + urls = { + 'search': 'https://www.omdbapi.com/?apikey=%s&type=movie&%s', + 'info': 'https://www.omdbapi.com/?apikey=%s&type=movie&i=%s', + } + + http_time_between_calls = 0 + + def __init__(self): + addEvent('info.search', self.search) + addEvent('movie.search', self.search) + addEvent('movie.info', self.getInfo) + + def search(self, q, limit = 12): + if self.isDisabled(): + return [] + + name_year = fireEvent('scanner.name_year', q, single = True) + + if not name_year or (name_year and not name_year.get('name')): + name_year = { + 'name': q + } + + cache_key = 'omdbapi.cache.%s' % q + url = self.urls['search'] % (self.getApiKey(), tryUrlencode({'t': name_year.get('name'), 'y': name_year.get('year', '')})) + cached = self.getCache(cache_key, url, timeout = 3, headers = {'User-Agent': Env.getIdentifier()}) + + if cached: + result = self.parseMovie(cached) + if result.get('titles') and len(result.get('titles')) > 0: + log.info('Found: %s', result['titles'][0] + ' (' + str(result.get('year')) + ')') + return [result] + + return [] + + return [] + + def getInfo(self, identifier = None, **kwargs): + if self.isDisabled() or not identifier: + return {} + + cache_key = 'omdbapi.cache.%s' % identifier + url = self.urls['info'] % (self.getApiKey(), identifier) + cached = self.getCache(cache_key, url, timeout = 3, headers = {'User-Agent': Env.getIdentifier()}) + + if cached: + result = self.parseMovie(cached) + if result.get('titles') and len(result.get('titles')) > 0: + log.info('Found: %s', result['titles'][0] + ' (' + str(result['year']) + ')') + return result + + return {} + + def parseMovie(self, movie): + + movie_data = {} + try: + + try: + if isinstance(movie, (str, unicode)): + movie = json.loads(movie) + except ValueError: + log.info('No proper json to decode') + return movie_data + + if movie.get('Response') == 'Parse Error' or movie.get('Response') == 'False': + return movie_data + + if movie.get('Type').lower() != 'movie': + return movie_data + + tmp_movie = movie.copy() + for key in tmp_movie: + tmp_movie_elem = tmp_movie.get(key) + if not isinstance(tmp_movie_elem, (str, unicode)) or tmp_movie_elem.lower() == 'n/a': + del movie[key] + + year = tryInt(movie.get('Year', '')) + + movie_data = { + 'type': 'movie', + 'via_imdb': True, + 'titles': [movie.get('Title')] if movie.get('Title') else [], + 'original_title': movie.get('Title'), + 'images': { + 'poster': [movie.get('Poster', '')] if movie.get('Poster') and len(movie.get('Poster', '')) > 4 else [], + }, + 'rating': { + 'imdb': (tryFloat(movie.get('imdbRating', 0)), tryInt(movie.get('imdbVotes', '').replace(',', ''))), + #'rotten': (tryFloat(movie.get('tomatoRating', 0)), tryInt(movie.get('tomatoReviews', '').replace(',', ''))), + }, + 'imdb': str(movie.get('imdbID', '')), + 'mpaa': str(movie.get('Rated', '')), + 'runtime': self.runtimeToMinutes(movie.get('Runtime', '')), + 'released': movie.get('Released'), + 'year': year if isinstance(year, int) else None, + 'plot': movie.get('Plot'), + 'genres': splitString(movie.get('Genre', '')), + 'directors': splitString(movie.get('Director', '')), + 'writers': splitString(movie.get('Writer', '')), + 'actors': splitString(movie.get('Actors', '')), + 'languages' : fillingLanguages(splitString(movie.get('Language', ''))) + } + movie_data = dict((k, v) for k, v in movie_data.items() if v) + except: + log.error('Failed parsing IMDB API json: %s', traceback.format_exc()) + + return movie_data + + def isDisabled(self): + if self.getApiKey() == '': + log.error('No API key provided.') + return True + return False + + def getApiKey(self): + apikey = self.conf('api_key') + return apikey + + def runtimeToMinutes(self, runtime_str): + runtime = 0 + + regex = '(\d*.?\d+).(h|hr|hrs|mins|min)+' + matches = re.findall(regex, runtime_str) + for match in matches: + nr, size = match + runtime += tryInt(nr) * (60 if 'h' is str(size)[0] else 1) + + return runtime + + +config = [{ + 'name': 'omdbapi', + 'groups': [ + { + 'tab': 'providers', + 'name': 'tmdb', + 'label': 'OMDB API', + 'hidden': True, + 'description': 'Used for all calls to TheMovieDB.', + 'options': [ + { + 'name': 'api_key', + 'default': 'bbc0e412', # Don't be a dick and use this somewhere else + 'label': 'Api Key', + }, + ], + }, + ], +}] diff --git a/couchpotato/core/media/movie/providers/info/themoviedb.py b/couchpotato/core/media/movie/providers/info/themoviedb.py new file mode 100644 index 0000000000..0188d9632b --- /dev/null +++ b/couchpotato/core/media/movie/providers/info/themoviedb.py @@ -0,0 +1,325 @@ +О╩©import random +import traceback +import itertools +from base64 import b64decode as bd + +from couchpotato.core.event import addEvent, fireEvent +from couchpotato.core.helpers.encoding import toUnicode, ss, tryUrlencode +from couchpotato.core.helpers.variable import tryInt, splitString, fillingLanguages +from couchpotato.core.logger import CPLog +from couchpotato.core.media.movie.providers.base import MovieProvider +from couchpotato.environment import Env + +log = CPLog(__name__) + +autoload = 'TheMovieDb' + + +class TheMovieDb(MovieProvider): + + http_time_between_calls = .35 + + configuration = { + 'images': { + 'secure_base_url': 'https://image.tmdb.org/t/p/', + }, + } + + ak = ['ZTIyNGZlNGYzZmVjNWY3YjU1NzA2NDFmN2NkM2RmM2E=', 'ZjZiZDY4N2ZmYTYzY2QyODJiNmZmMmM2ODc3ZjI2Njk='] + + languages = [ 'en' ] + default_language = 'en' + + def __init__(self): + addEvent('info.search', self.search, priority = 1) + addEvent('movie.search', self.search, priority = 1) + addEvent('movie.info', self.getInfo, priority = 1) + addEvent('movie.getfrenchtitle', self.getFrenchTitle) + addEvent('movie.info_by_tmdb', self.getInfo) + addEvent('app.load', self.config) + + def config(self): + + # Reset invalid key + if self.conf('api_key') == '9b939aee0aaafc12a65bf448e4af9543': + self.conf('api_key', '') + + languages = self.getLanguages() + + # languages should never be empty, the first language is the default language used for all the description details + self.default_language = languages[0] + + # en is always downloaded and it is the fallback + if 'en' in languages: + languages.remove('en') + + # default language has a special management + if self.default_language in languages: + languages.remove(self.default_language) + + self.languages = languages + + configuration = self.request('configuration') + if configuration: + self.configuration = configuration + + def search(self, q, limit = 3): + """ Find movie by name """ + + if self.isDisabled(): + return False + + log.debug('Searching for movie: %s', q) + + raw = None + try: + name_year = fireEvent('scanner.name_year', q, single = True) + raw = self.request('search/movie', { + 'query': name_year.get('name', q), + 'year': name_year.get('year'), + 'search_type': 'ngram' if limit > 1 else 'phrase' + }, return_key = 'results') + except: + log.error('Failed searching TMDB for "%s": %s', (q, traceback.format_exc())) + + results = [] + if raw: + try: + nr = 0 + + for movie in raw: + parsed_movie = self.parseMovie(movie, extended = False) + if parsed_movie: + results.append(parsed_movie) + + nr += 1 + if nr == limit: + break + + log.info('Found: %s', [result['titles'][0] + ' (' + str(result.get('year', 0)) + ')' for result in results]) + + return results + except SyntaxError as e: + log.error('Failed to parse XML response: %s', e) + return False + + return results + def getFrenchTitle(self, movie): + movie = self.request('movie/%s' % movie.get('info').get('tmdb_id'), { + 'append_to_response': '', + 'language': 'fr' + }) + + if not movie: + return + + return movie.get('title') + + def getInfo(self, identifier = None, extended = True, **kwargs): + + if not identifier: + return {} + + result = self.parseMovie({ + 'id': identifier + }, extended = extended) + + return result or {} + + def parseMovie(self, movie, extended = True): + + # Do request, append other items + movie = self.request('movie/%s' % movie.get('id'), { + 'append_to_response': 'alternative_titles' + (',images,casts' if extended else ''), + 'language': 'en' + }) + if not movie: + return + + movie_default = movie if self.default_language == 'en' else self.request('movie/%s' % movie.get('id'), { + 'append_to_response': 'alternative_titles' + (',images,casts' if extended else ''), + 'language': self.default_language + }) + + movie_default = movie_default or movie + + movie_others = [ self.request('movie/%s' % movie.get('id'), { + 'append_to_response': 'alternative_titles' + (',images,casts' if extended else ''), + 'language': language + }) for language in self.languages] if self.languages else [] + + # Images + poster = self.getImage(movie, type = 'poster', size = 'w154') + poster_original = self.getImage(movie, type = 'poster', size = 'original') + backdrop_original = self.getImage(movie, type = 'backdrop', size = 'original') + extra_thumbs = self.getMultImages(movie, type = 'backdrops', size = 'original') if extended else [] + + images = { + 'poster': [poster] if poster else [], + #'backdrop': [backdrop] if backdrop else [], + 'poster_original': [poster_original] if poster_original else [], + 'backdrop_original': [backdrop_original] if backdrop_original else [], + 'actors': {}, + 'extra_thumbs': extra_thumbs + } + + # Genres + try: + genres = [genre.get('name') for genre in movie.get('genres', [])] + except: + genres = [] + + # 1900 is the same as None + year = str(movie.get('release_date') or '')[:4] + if not movie.get('release_date') or year == '1900' or year.lower() == 'none': + year = None + + # Gather actors data + actors = {} + if extended: + + # Full data + cast = movie.get('casts', {}).get('cast', []) + + for cast_item in cast: + try: + actors[toUnicode(cast_item.get('name'))] = toUnicode(cast_item.get('character')) + images['actors'][toUnicode(cast_item.get('name'))] = self.getImage(cast_item, type = 'profile', size = 'original') + except: + log.debug('Error getting cast info for %s: %s', (cast_item, traceback.format_exc())) + + movie_data = { + 'type': 'movie', + 'via_tmdb': True, + 'tmdb_id': movie.get('id'), + 'titles': [toUnicode(movie_default.get('title') or movie.get('title'))], + 'original_title': movie.get('original_title'), + 'images': images, + 'imdb': movie.get('imdb_id'), + 'runtime': movie.get('runtime'), + 'released': str(movie.get('release_date')), + 'year': tryInt(year, None), + 'plot': movie_default.get('overview') or movie.get('overview'), + 'genres': genres, + 'collection': getattr(movie.get('belongs_to_collection'), 'name', None), + 'actor_roles': actors, + 'languages' : fillingLanguages(splitString(movie.get('original_language'))) + } + + movie_data = dict((k, v) for k, v in movie_data.items() if v) + + # Add alternative names + movies = [ movie ] + movie_others if movie == movie_default else [ movie, movie_default ] + movie_others + movie_titles = [ self.getTitles(movie) for movie in movies ] + + all_titles = sorted(list(itertools.chain.from_iterable(movie_titles))) + + alternate_titles = movie_data['titles'] + + for title in all_titles: + if title and title not in alternate_titles and title.lower() != 'none' and title is not None: + alternate_titles.append(title) + + movie_data['titles'] = alternate_titles + + return movie_data + + def getImage(self, movie, type = 'poster', size = 'poster'): + + image_url = '' + try: + path = movie.get('%s_path' % type) + if path: + image_url = '%s%s%s' % (self.configuration['images']['secure_base_url'], size, path) + except: + log.debug('Failed getting %s.%s for "%s"', (type, size, ss(str(movie)))) + + return image_url + + def getMultImages(self, movie, type = 'backdrops', size = 'original'): + + image_urls = [] + try: + for image in movie.get('images', {}).get(type, [])[1:5]: + image_urls.append(self.getImage(image, 'file', size)) + except: + log.debug('Failed getting %s.%s for "%s"', (type, size, ss(str(movie)))) + + return image_urls + + def request(self, call = '', params = {}, return_key = None): + + params = dict((k, v) for k, v in params.items() if v) + params = tryUrlencode(params) + + try: + url = 'https://api.themoviedb.org/3/%s?api_key=%s%s' % (call, self.getApiKey(), '&%s' % params if params else '') + data = self.getJsonData(url, show_error = False) + except: + log.debug('Movie not found: %s, %s', (call, params)) + data = None + + if data and return_key and return_key in data: + data = data.get(return_key) + + return data + + def isDisabled(self): + if self.getApiKey() == '': + log.error('No API key provided.') + return True + return False + + def getApiKey(self): + key = self.conf('api_key') + return bd(random.choice(self.ak)) if key == '' else key + + def getLanguages(self): + languages = splitString(Env.setting('languages', section = 'core')) + if len(languages): + return languages + + return [ 'en' ] + + def getTitles(self, movie): + # add the title to the list + title = toUnicode(movie.get('title')) + + titles = [title] if title else [] + + # add the original_title to the list + alternate_title = toUnicode(movie.get('original_title')) + + if alternate_title and alternate_title not in titles: + titles.append(alternate_title) + + # Add alternative titles + alternate_titles = movie.get('alternative_titles', {}).get('titles', []) + + for alt in alternate_titles: + alt_name = toUnicode(alt.get('title')) + if alt_name and alt_name not in titles and alt_name.lower() != 'none' and alt_name is not None: + titles.append(alt_name) + + return titles; + + +config = [{ + 'name': 'themoviedb', + 'groups': [ + { + 'tab': 'providers', + 'name': 'tmdb', + 'label': 'TheMovieDB', + 'hidden': False, + 'description': 'Used for all calls to TheMovieDB.', + 'options': [ + { + 'name': 'api_key', + 'default': '', + 'label': 'Api Key', + }, + ], + }, + ], +}] diff --git a/libs/migrate/versioning/templates/repository/default/__init__.py b/couchpotato/core/media/movie/providers/metadata/__init__.py similarity index 100% rename from libs/migrate/versioning/templates/repository/default/__init__.py rename to couchpotato/core/media/movie/providers/metadata/__init__.py diff --git a/couchpotato/core/media/movie/providers/metadata/base.py b/couchpotato/core/media/movie/providers/metadata/base.py new file mode 100755 index 0000000000..53e78000da --- /dev/null +++ b/couchpotato/core/media/movie/providers/metadata/base.py @@ -0,0 +1,187 @@ +import os +import shutil +import traceback + +from couchpotato.core.event import addEvent, fireEvent +from couchpotato.core.helpers.encoding import sp, toUnicode +from couchpotato.core.helpers.variable import getIdentifier, underscoreToCamel +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.metadata.base import MetaDataBase +from couchpotato.environment import Env + + +log = CPLog(__name__) + + +class MovieMetaData(MetaDataBase): + + enabled_option = 'meta_enabled' + + def __init__(self): + addEvent('renamer.after', self.create) + + def create(self, message = None, group = None): + if self.isDisabled(): return + if not group: group = {} + + log.info('Creating %s metadata.', self.getName()) + + # Update library to get latest info + try: + group['media'] = fireEvent('movie.update', group['media'].get('_id'), identifier = getIdentifier(group['media']), extended = True, single = True) + except: + log.error('Failed to update movie, before creating metadata: %s', traceback.format_exc()) + + root_name = toUnicode(self.getRootName(group)) + meta_name = toUnicode(os.path.basename(root_name)) + root = toUnicode(os.path.dirname(root_name)) + + movie_info = group['media'].get('info') + + for file_type in ['nfo']: + try: + self._createType(meta_name, root, movie_info, group, file_type, 0) + except: + log.error('Unable to create %s file: %s', ('nfo', traceback.format_exc())) + + for file_type in ['thumbnail', 'fanart', 'banner', 'disc_art', 'logo', 'clear_art', 'landscape', 'extra_thumbs', 'extra_fanart']: + try: + if file_type == 'thumbnail': + num_images = len(movie_info['images']['poster_original']) + elif file_type == 'fanart': + num_images = len(movie_info['images']['backdrop_original']) + else: + num_images = len(movie_info['images'][file_type]) + + for i in range(num_images): + self._createType(meta_name, root, movie_info, group, file_type, i) + except: + log.error('Unable to create %s file: %s', (file_type, traceback.format_exc())) + + def _createType(self, meta_name, root, movie_info, group, file_type, i): # Get file path + camelcase_method = underscoreToCamel(file_type.capitalize()) + name = getattr(self, 'get' + camelcase_method + 'Name')(meta_name, root, i) + + if name and (self.conf('meta_' + file_type) or self.conf('meta_' + file_type) is None): + + # Get file content + content = getattr(self, 'get' + camelcase_method)(movie_info = movie_info, data = group, i = i) + if content: + log.debug('Creating %s file: %s', (file_type, name)) + if os.path.isfile(content): + content = sp(content) + name = sp(name) + + if not os.path.exists(os.path.dirname(name)): + os.makedirs(os.path.dirname(name)) + + shutil.copy2(content, name) + shutil.copyfile(content, name) + + # Try and copy stats seperately + try: shutil.copystat(content, name) + except: pass + else: + self.createFile(name, content) + group['renamed_files'].append(name) + + try: + os.chmod(sp(name), Env.getPermission('file')) + except: + log.debug('Failed setting permissions for %s: %s', (name, traceback.format_exc())) + + def getRootName(self, data = None): + if not data: data = {} + return os.path.join(data['destination_dir'], data['filename']) + + def getFanartName(self, name, root, i): + return + + def getThumbnailName(self, name, root, i): + return + + def getBannerName(self, name, root, i): + return + + def getClearArtName(self, name, root, i): + return + + def getLogoName(self, name, root, i): + return + + def getDiscArtName(self, name, root, i): + return + + def getLandscapeName(self, name, root, i): + return + + def getExtraThumbsName(self, name, root, i): + return + + def getExtraFanartName(self, name, root, i): + return + + def getNfoName(self, name, root, i): + return + + def getNfo(self, movie_info = None, data = None, i = 0): + if not data: data = {} + if not movie_info: movie_info = {} + + def getThumbnail(self, movie_info = None, data = None, wanted_file_type = 'poster_original', i = 0): + if not data: data = {} + if not movie_info: movie_info = {} + + # See if it is in current files + files = data['media'].get('files') + if files.get('image_' + wanted_file_type): + if os.path.isfile(files['image_' + wanted_file_type][i]): + return files['image_' + wanted_file_type][i] + + # Download using existing info + try: + images = movie_info['images'][wanted_file_type] + file_path = fireEvent('file.download', url = images[i], single = True) + return file_path + except: + pass + + def getFanart(self, movie_info = None, data = None, i = 0): + if not data: data = {} + if not movie_info: movie_info = {} + return self.getThumbnail(movie_info = movie_info, data = data, wanted_file_type = 'backdrop_original', i = i) + + def getBanner(self, movie_info = None, data = None, i = 0): + if not data: data = {} + if not movie_info: movie_info = {} + return self.getThumbnail(movie_info = movie_info, data = data, wanted_file_type = 'banner', i = i) + + def getClearArt(self, movie_info = None, data = None, i = 0): + if not data: data = {} + if not movie_info: movie_info = {} + return self.getThumbnail(movie_info = movie_info, data = data, wanted_file_type = 'clear_art', i = i) + + def getLogo(self, movie_info = None, data = None, i = 0): + if not data: data = {} + if not movie_info: movie_info = {} + return self.getThumbnail(movie_info = movie_info, data = data, wanted_file_type = 'logo', i = i) + + def getDiscArt(self, movie_info = None, data = None, i = 0): + if not data: data = {} + if not movie_info: movie_info = {} + return self.getThumbnail(movie_info = movie_info, data = data, wanted_file_type = 'disc_art', i = i) + + def getLandscape(self, movie_info = None, data = None, i = 0): + if not data: data = {} + if not movie_info: movie_info = {} + return self.getThumbnail(movie_info = movie_info, data= data, wanted_file_type = 'landscape', i = i) + + def getExtraThumbs(self, movie_info = None, data = None, i = 0): + if not data: data = {} + if not movie_info: movie_info = {} + return self.getThumbnail(movie_info = movie_info, data = data, wanted_file_type = 'extra_thumbs', i = i) + + def getExtraFanart(self, movie_info = None, data = None, i = 0): + if not data: data = {} + if not movie_info: movie_info = {} + return self.getThumbnail(movie_info = movie_info, data = data, wanted_file_type = 'extra_fanart', i = i) diff --git a/couchpotato/core/media/movie/providers/metadata/mediabrowser.py b/couchpotato/core/media/movie/providers/metadata/mediabrowser.py new file mode 100644 index 0000000000..6e40e4c1f6 --- /dev/null +++ b/couchpotato/core/media/movie/providers/metadata/mediabrowser.py @@ -0,0 +1,36 @@ +import os + +from couchpotato.core.media.movie.providers.metadata.base import MovieMetaData + + +autoload = 'MediaBrowser' + + +class MediaBrowser(MovieMetaData): + + def getThumbnailName(self, name, root, i): + return os.path.join(root, 'folder.jpg') + + def getFanartName(self, name, root, i): + return os.path.join(root, 'backdrop.jpg') + + +config = [{ + 'name': 'mediabrowser', + 'groups': [ + { + 'tab': 'renamer', + 'subtab': 'metadata', + 'name': 'mediabrowser_metadata', + 'label': 'MediaBrowser', + 'description': 'Generate folder.jpg and backdrop.jpg', + 'options': [ + { + 'name': 'meta_enabled', + 'default': False, + 'type': 'enabler', + }, + ], + }, + ], +}] diff --git a/couchpotato/core/media/movie/providers/metadata/ps3.py b/couchpotato/core/media/movie/providers/metadata/ps3.py new file mode 100644 index 0000000000..05df0a5393 --- /dev/null +++ b/couchpotato/core/media/movie/providers/metadata/ps3.py @@ -0,0 +1,33 @@ +import os + +from couchpotato.core.media.movie.providers.metadata.base import MovieMetaData + + +autoload = 'SonyPS3' + + +class SonyPS3(MovieMetaData): + + def getThumbnailName(self, name, root, i): + return os.path.join(root, 'cover.jpg') + + +config = [{ + 'name': 'sonyps3', + 'groups': [ + { + 'tab': 'renamer', + 'subtab': 'metadata', + 'name': 'sonyps3_metadata', + 'label': 'Sony PS3', + 'description': 'Generate cover.jpg', + 'options': [ + { + 'name': 'meta_enabled', + 'default': False, + 'type': 'enabler', + }, + ], + }, + ], +}] diff --git a/couchpotato/core/media/movie/providers/metadata/wdtv.py b/couchpotato/core/media/movie/providers/metadata/wdtv.py new file mode 100644 index 0000000000..a2062c7f1f --- /dev/null +++ b/couchpotato/core/media/movie/providers/metadata/wdtv.py @@ -0,0 +1,221 @@ +from xml.etree.ElementTree import Element, SubElement, tostring +import os +import re +import traceback +import xml.dom.minidom + +from couchpotato.core.media.movie.providers.metadata.base import MovieMetaData +from couchpotato.core.helpers.encoding import toUnicode +from couchpotato.core.helpers.variable import getTitle +from couchpotato.core.logger import CPLog + +autoload = 'WdtvLive' + +log = CPLog(__name__) + + +class WdtvLive(MovieMetaData): + + def getThumbnailName(self, name, root, i): + return self.createMetaName('%s.jpg', name, root) + + def createMetaName(self, basename, name, root): + return os.path.join(root, basename.replace('%s', name)) + + def getNfoName(self, name, root, i): + return self.createMetaName('%s.xml', name, root) + + def getNfo(self, movie_info=None, data=None, i=0): + if not data: data = {} + if not movie_info: movie_info = {} + + nfoxml = Element('details') + + # Title + try: + el = SubElement(nfoxml, 'title') + el.text = toUnicode(getTitle(data)) + except: + pass + + # IMDB id + try: + el = SubElement(nfoxml, 'id') + el.text = toUnicode(data['identifier']) + except: + pass + + # Runtime + try: + runtime = SubElement(nfoxml, 'runtime') + runtime.text = '%s min' % movie_info.get('runtime') + except: + pass + + # Other values + types = ['year', 'mpaa', 'originaltitle:original_title', 'outline', 'plot', 'tagline', 'premiered:released'] + for type in types: + + if ':' in type: + name, type = type.split(':') + else: + name = type + + try: + if movie_info.get(type): + el = SubElement(nfoxml, name) + el.text = toUnicode(movie_info.get(type, '')) + except: + pass + + # Rating + for rating_type in ['imdb', 'rotten', 'tmdb']: + try: + r, v = movie_info['rating'][rating_type] + rating = SubElement(nfoxml, 'rating') + rating.text = str(r) + votes = SubElement(nfoxml, 'votes') + votes.text = str(v) + break + except: + log.debug('Failed adding rating info from %s: %s', (rating_type, traceback.format_exc())) + + # Genre + for genre in movie_info.get('genres', []): + genres = SubElement(nfoxml, 'genre') + genres.text = toUnicode(genre) + + # Actors + for actor_name in movie_info.get('actor_roles', {}): + role_name = movie_info['actor_roles'][actor_name] + + actor = SubElement(nfoxml, 'actor') + name = SubElement(actor, 'name') + name.text = toUnicode(actor_name) + if role_name: + role = SubElement(actor, 'role') + role.text = toUnicode(role_name) + if movie_info['images']['actors'].get(actor_name): + thumb = SubElement(actor, 'thumb') + thumb.text = toUnicode(movie_info['images']['actors'].get(actor_name)) + + # Directors + for director_name in movie_info.get('directors', []): + director = SubElement(nfoxml, 'director') + director.text = toUnicode(director_name) + + # Writers + for writer in movie_info.get('writers', []): + writers = SubElement(nfoxml, 'credits') + writers.text = toUnicode(writer) + + # Sets or collections + collection_name = movie_info.get('collection') + if collection_name: + collection = SubElement(nfoxml, 'set') + collection.text = toUnicode(collection_name) + sorttitle = SubElement(nfoxml, 'sorttitle') + sorttitle.text = '%s %s' % (toUnicode(collection_name), movie_info.get('year')) + + # Images + for image_url in movie_info['images']['poster_original']: + image = SubElement(nfoxml, 'thumb') + image.text = toUnicode(image_url) + + image_types = [ + ('fanart', 'backdrop_original'), + ('banner', 'banner'), + ('discart', 'disc_art'), + ('logo', 'logo'), + ('clearart', 'clear_art'), + ('landscape', 'landscape'), + ('extrathumb', 'extra_thumbs'), + ('extrafanart', 'extra_fanart'), + ] + + for image_type in image_types: + sub, type = image_type + + sub_element = SubElement(nfoxml, sub) + for image_url in movie_info['images'][type]: + image = SubElement(sub_element, 'thumb') + image.text = toUnicode(image_url) + + # Add trailer if found + trailer_found = False + if data.get('renamed_files'): + for filename in data.get('renamed_files'): + if 'trailer' in filename: + trailer = SubElement(nfoxml, 'trailer') + trailer.text = toUnicode(filename) + trailer_found = True + if not trailer_found and data['files'].get('trailer'): + trailer = SubElement(nfoxml, 'trailer') + trailer.text = toUnicode(data['files']['trailer'][0]) + + # Add file metadata + fileinfo = SubElement(nfoxml, 'fileinfo') + streamdetails = SubElement(fileinfo, 'streamdetails') + + # Video data + if data['meta_data'].get('video'): + video = SubElement(streamdetails, 'video') + codec = SubElement(video, 'codec') + codec.text = toUnicode(data['meta_data']['video']) + aspect = SubElement(video, 'aspect') + aspect.text = str(data['meta_data']['aspect']) + width = SubElement(video, 'width') + width.text = str(data['meta_data']['resolution_width']) + height = SubElement(video, 'height') + height.text = str(data['meta_data']['resolution_height']) + + # Audio data + if data['meta_data'].get('audio'): + audio = SubElement(streamdetails, 'audio') + codec = SubElement(audio, 'codec') + codec.text = toUnicode(data['meta_data'].get('audio')) + channels = SubElement(audio, 'channels') + channels.text = toUnicode(data['meta_data'].get('audio_channels')) + + # Clean up the xml and return it + nfoxml = xml.dom.minidom.parseString(tostring(nfoxml)) + xml_string = nfoxml.toprettyxml(indent = ' ') + text_re = re.compile('>\n\s+([^<>\s].*?)\n\s+\g<1>', str(i + 1))) + + def getNfo(self, movie_info=None, data=None, i=0): + if not data: data = {} + if not movie_info: movie_info = {} + + # return imdb url only + if self.conf('meta_url_only'): + return 'http://www.imdb.com/title/%s/' % toUnicode(data['identifier']) + + nfoxml = Element('movie') + + # Title + try: + el = SubElement(nfoxml, 'title') + el.text = toUnicode(getTitle(data)) + except: + pass + + # IMDB id + try: + el = SubElement(nfoxml, 'id') + el.text = toUnicode(data['identifier']) + except: + pass + + # Runtime + try: + runtime = SubElement(nfoxml, 'runtime') + runtime.text = '%s min' % movie_info.get('runtime') + except: + pass + + # mpaa + try: + mpaa = SubElement(nfoxml, 'mpaa') + mpaa.text = toUnicode('Rated %s' % movie_info.get('mpaa')) + except: + pass + + # Other values + types = ['year', 'originaltitle:original_title', 'outline', 'plot', 'tagline'] + for type in types: + + if ':' in type: + name, type = type.split(':') + else: + name = type + + try: + if movie_info.get(type): + el = SubElement(nfoxml, name) + el.text = toUnicode(movie_info.get(type, '')) + except: + pass + + # Release date + try: + if movie_info.get('released'): + el = SubElement(nfoxml, 'premiered') + el.text = time.strftime('%Y-%m-%d', time.strptime(movie_info.get('released'), '%d %b %Y')) + except: + log.debug('Failed to parse release date %s: %s', (movie_info.get('released'), traceback.format_exc())) + + # Rating + for rating_type in ['imdb', 'rotten', 'tmdb']: + try: + r, v = movie_info['rating'][rating_type] + rating = SubElement(nfoxml, 'rating') + rating.text = str(r) + votes = SubElement(nfoxml, 'votes') + votes.text = str(v) + break + except: + log.debug('Failed adding rating info from %s: %s', (rating_type, traceback.format_exc())) + + # Genre + for genre in movie_info.get('genres', []): + genres = SubElement(nfoxml, 'genre') + genres.text = toUnicode(genre) + + # Actors + for actor_name in movie_info.get('actor_roles', {}): + role_name = movie_info['actor_roles'][actor_name] + + actor = SubElement(nfoxml, 'actor') + name = SubElement(actor, 'name') + name.text = toUnicode(actor_name) + if role_name: + role = SubElement(actor, 'role') + role.text = toUnicode(role_name) + if movie_info['images']['actors'].get(actor_name): + thumb = SubElement(actor, 'thumb') + thumb.text = toUnicode(movie_info['images']['actors'].get(actor_name)) + + # Directors + for director_name in movie_info.get('directors', []): + director = SubElement(nfoxml, 'director') + director.text = toUnicode(director_name) + + # Writers + for writer in movie_info.get('writers', []): + writers = SubElement(nfoxml, 'credits') + writers.text = toUnicode(writer) + + # Sets or collections + collection_name = movie_info.get('collection') + if collection_name: + collection = SubElement(nfoxml, 'set') + collection.text = toUnicode(collection_name) + sorttitle = SubElement(nfoxml, 'sorttitle') + sorttitle.text = '%s %s' % (toUnicode(collection_name), movie_info.get('year')) + + # Images + for image_url in movie_info['images']['poster_original']: + image = SubElement(nfoxml, 'thumb') + image.text = toUnicode(image_url) + + image_types = [ + ('fanart', 'backdrop_original'), + ('banner', 'banner'), + ('discart', 'disc_art'), + ('logo', 'logo'), + ('clearart', 'clear_art'), + ('landscape', 'landscape'), + ('extrathumb', 'extra_thumbs'), + ('extrafanart', 'extra_fanart'), + ] + + for image_type in image_types: + sub, type = image_type + + sub_element = SubElement(nfoxml, sub) + for image_url in movie_info['images'][type]: + image = SubElement(sub_element, 'thumb') + image.text = toUnicode(image_url) + + # Add trailer if found + trailer_found = False + if data.get('renamed_files'): + for filename in data.get('renamed_files'): + if 'trailer' in filename: + trailer = SubElement(nfoxml, 'trailer') + trailer.text = toUnicode(filename) + trailer_found = True + if not trailer_found and data['files'].get('trailer'): + trailer = SubElement(nfoxml, 'trailer') + trailer.text = toUnicode(data['files']['trailer'][0]) + + # Add file metadata + fileinfo = SubElement(nfoxml, 'fileinfo') + streamdetails = SubElement(fileinfo, 'streamdetails') + + # Video data + if data['meta_data'].get('video'): + video = SubElement(streamdetails, 'video') + codec = SubElement(video, 'codec') + codec.text = toUnicode(data['meta_data']['video']) + aspect = SubElement(video, 'aspect') + aspect.text = str(data['meta_data']['aspect']) + width = SubElement(video, 'width') + width.text = str(data['meta_data']['resolution_width']) + height = SubElement(video, 'height') + height.text = str(data['meta_data']['resolution_height']) + + # Audio data + if data['meta_data'].get('audio'): + audio = SubElement(streamdetails, 'audio') + codec = SubElement(audio, 'codec') + codec.text = toUnicode(data['meta_data'].get('audio')) + channels = SubElement(audio, 'channels') + channels.text = toUnicode(data['meta_data'].get('audio_channels')) + + # Clean up the xml and return it + nfoxml = xml.dom.minidom.parseString(tostring(nfoxml)) + xml_string = nfoxml.toprettyxml(indent = ' ') + text_re = re.compile('>\n\s+([^<>\s].*?)\n\s+\g<1>%s is the rootname of the movie. For example "/path/to/movie cd1.mkv" will be "/path/to/movie"' + }, + { + 'name': 'meta_url_only', + 'label': 'Only IMDB URL', + 'default': False, + 'advanced': True, + 'description': 'Create a nfo with only the IMDB url inside', + 'type': 'bool', + }, + { + 'name': 'meta_fanart', + 'label': 'Fanart', + 'default': True, + 'type': 'bool', + }, + { + 'name': 'meta_fanart_name', + 'label': 'Fanart filename', + 'default': '%s-fanart.jpg', + 'advanced': True, + }, + { + 'name': 'meta_thumbnail', + 'label': 'Thumbnail', + 'default': True, + 'type': 'bool', + }, + { + 'name': 'meta_thumbnail_name', + 'label': 'Thumbnail filename', + 'default': '%s.tbn', + 'advanced': True, + }, + { + 'name': 'meta_banner', + 'label': 'Banner', + 'default': False, + 'type': 'bool' + }, + { + 'name': 'meta_banner_name', + 'label': 'Banner filename', + 'default': 'banner.jpg', + 'advanced': True, + }, + { + 'name': 'meta_clear_art', + 'label': 'ClearArt', + 'default': False, + 'type': 'bool' + }, + { + 'name': 'meta_clear_art_name', + 'label': 'ClearArt filename', + 'default': 'clearart.png', + 'advanced': True, + }, + { + 'name': 'meta_disc_art', + 'label': 'DiscArt', + 'default': False, + 'type': 'bool' + }, + { + 'name': 'meta_disc_art_name', + 'label': 'DiscArt filename', + 'default': 'disc.png', + 'advanced': True, + }, + { + 'name': 'meta_landscape', + 'label': 'Landscape', + 'default': False, + 'type': 'bool' + }, + { + 'name': 'meta_landscape_name', + 'label': 'Landscape filename', + 'default': 'landscape.jpg', + 'advanced': True, + }, + { + 'name': 'meta_logo', + 'label': 'ClearLogo', + 'default': False, + 'type': 'bool' + }, + { + 'name': 'meta_logo_name', + 'label': 'ClearLogo filename', + 'default': 'logo.png', + 'advanced': True, + }, + { + 'name': 'meta_extra_thumbs', + 'label': 'Extrathumbs', + 'default': False, + 'type': 'bool' + }, + { + 'name': 'meta_extra_thumbs_name', + 'label': 'Extrathumbs filename', + 'description': '<i> is the image number, and must be included to have multiple images', + 'default': 'extrathumbs/thumb.jpg', + 'advanced': True + }, + { + 'name': 'meta_extra_fanart', + 'label': 'Extrafanart', + 'default': False, + 'type': 'bool' + }, + { + 'name': 'meta_extra_fanart_name', + 'label': 'Extrafanart filename', + 'default': 'extrafanart/extrafanart.jpg', + 'description': '<i> is the image number, and must be included to have multiple images', + 'advanced': True + } + ], + }, + ], +}] diff --git a/libs/migrate/versioning/templates/repository/default/versions/__init__.py b/couchpotato/core/media/movie/providers/nzb/__init__.py similarity index 100% rename from libs/migrate/versioning/templates/repository/default/versions/__init__.py rename to couchpotato/core/media/movie/providers/nzb/__init__.py diff --git a/couchpotato/templates/_mobile.html b/couchpotato/core/media/movie/providers/nzb/base.py similarity index 100% rename from couchpotato/templates/_mobile.html rename to couchpotato/core/media/movie/providers/nzb/base.py diff --git a/couchpotato/core/media/movie/providers/nzb/binnews.py b/couchpotato/core/media/movie/providers/nzb/binnews.py new file mode 100644 index 0000000000..e9c20d5525 --- /dev/null +++ b/couchpotato/core/media/movie/providers/nzb/binnews.py @@ -0,0 +1,21 @@ +from couchpotato.core.helpers.encoding import tryUrlencode +from couchpotato.core.helpers.variable import getIdentifier +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.nzb.binnewz.main import Base +from couchpotato.core.media.movie.providers.base import MovieProvider + +log = CPLog(__name__) + +autoload = 'BinNewz' + + +class BinNewz(MovieProvider, Base): + + def buildUrl(self, media, api_key): + query = tryUrlencode({ + 't': 'movie', + 'imdbid': getIdentifier(media).replace('tt', ''), + 'apikey': api_key, + 'extended': 1 + }) + return query diff --git a/couchpotato/core/media/movie/providers/nzb/binsearch.py b/couchpotato/core/media/movie/providers/nzb/binsearch.py new file mode 100644 index 0000000000..b3e59c0e8d --- /dev/null +++ b/couchpotato/core/media/movie/providers/nzb/binsearch.py @@ -0,0 +1,27 @@ +from couchpotato.core.helpers.encoding import tryUrlencode +from couchpotato.core.helpers.variable import getIdentifier +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.nzb.binsearch import Base +from couchpotato.core.media.movie.providers.base import MovieProvider +from couchpotato.environment import Env + +log = CPLog(__name__) + +autoload = 'BinSearch' + + +class BinSearch(MovieProvider, Base): + + def buildUrl(self, media, quality): + query = tryUrlencode({ + 'q': getIdentifier(media), + 'm': 'n', + 'max': 400, + 'adv_age': Env.setting('retention', 'nzb'), + 'adv_sort': 'date', + 'adv_col': 'on', + 'adv_nfo': 'on', + 'xminsize': quality.get('size_min'), + 'xmaxsize': quality.get('size_max'), + }) + return query diff --git a/couchpotato/core/media/movie/providers/nzb/newznab.py b/couchpotato/core/media/movie/providers/nzb/newznab.py new file mode 100644 index 0000000000..3392b91286 --- /dev/null +++ b/couchpotato/core/media/movie/providers/nzb/newznab.py @@ -0,0 +1,29 @@ +from couchpotato.core.helpers.encoding import tryUrlencode +from couchpotato.core.helpers.variable import getIdentifier +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.nzb.newznab import Base +from couchpotato.core.media.movie.providers.base import MovieProvider + +log = CPLog(__name__) + +autoload = 'Newznab' + + +class Newznab(MovieProvider, Base): + + def buildUrl(self, media, host): + + query = tryUrlencode({ + 't': 'movie', + 'imdbid': getIdentifier(media).replace('tt', ''), + 'apikey': host['api_key'], + 'extended': 1 + }) + + if len(host.get('custom_tag', '')) > 0: + query = '%s&%s' % (query, host.get('custom_tag')) + + if len(host['custom_category']) > 0: + query = '%s&cat=%s' % (query, host['custom_category']) + + return query diff --git a/couchpotato/core/media/movie/providers/nzb/nzbclub.py b/couchpotato/core/media/movie/providers/nzb/nzbclub.py new file mode 100644 index 0000000000..87131069e5 --- /dev/null +++ b/couchpotato/core/media/movie/providers/nzb/nzbclub.py @@ -0,0 +1,27 @@ +from couchpotato.core.helpers.encoding import tryUrlencode +from couchpotato.core.logger import CPLog +from couchpotato.core.event import fireEvent +from couchpotato.core.media._base.providers.nzb.nzbclub import Base +from couchpotato.core.media.movie.providers.base import MovieProvider + +log = CPLog(__name__) + +autoload = 'NZBClub' + + +class NZBClub(MovieProvider, Base): + + def buildUrl(self, media): + + q = tryUrlencode({ + 'q': '%s' % fireEvent('library.query', media, single = True), + }) + + query = tryUrlencode({ + 'ig': 1, + 'rpp': 200, + 'st': 5, + 'sp': 1, + 'ns': 1, + }) + return '%s&%s' % (q, query) diff --git a/couchpotato/core/media/movie/providers/nzb/omgwtfnzbs.py b/couchpotato/core/media/movie/providers/nzb/omgwtfnzbs.py new file mode 100644 index 0000000000..f4527f6d49 --- /dev/null +++ b/couchpotato/core/media/movie/providers/nzb/omgwtfnzbs.py @@ -0,0 +1,11 @@ +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.nzb.omgwtfnzbs import Base +from couchpotato.core.media.movie.providers.base import MovieProvider + +log = CPLog(__name__) + +autoload = 'OMGWTFNZBs' + + +class OMGWTFNZBs(MovieProvider, Base): + pass diff --git a/libs/migrate/versioning/templates/repository/pylons/__init__.py b/couchpotato/core/media/movie/providers/torrent/__init__.py similarity index 100% rename from libs/migrate/versioning/templates/repository/pylons/__init__.py rename to couchpotato/core/media/movie/providers/torrent/__init__.py diff --git a/couchpotato/core/media/movie/providers/torrent/abnormal.py b/couchpotato/core/media/movie/providers/torrent/abnormal.py new file mode 100644 index 0000000000..dfc30b3fea --- /dev/null +++ b/couchpotato/core/media/movie/providers/torrent/abnormal.py @@ -0,0 +1,11 @@ +О╩©from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.abnormal import Base +from couchpotato.core.media.movie.providers.base import MovieProvider + +log = CPLog(__name__) + +autoload = 'abnormal' + + +class abnormal(MovieProvider, Base): + pass \ No newline at end of file diff --git a/couchpotato/core/media/movie/providers/torrent/addict.py b/couchpotato/core/media/movie/providers/torrent/addict.py new file mode 100644 index 0000000000..67ff72f7bd --- /dev/null +++ b/couchpotato/core/media/movie/providers/torrent/addict.py @@ -0,0 +1,11 @@ +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.addict import Base +from couchpotato.core.media.movie.providers.base import MovieProvider + +log = CPLog(__name__) + +autoload = 'addict' + + +class addict(MovieProvider, Base): + pass \ No newline at end of file diff --git a/couchpotato/core/media/movie/providers/torrent/alpharatio.py b/couchpotato/core/media/movie/providers/torrent/alpharatio.py new file mode 100644 index 0000000000..45aec7676d --- /dev/null +++ b/couchpotato/core/media/movie/providers/torrent/alpharatio.py @@ -0,0 +1,35 @@ +from couchpotato.core.event import fireEvent +from couchpotato.core.helpers.encoding import tryUrlencode +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.alpharatio import Base +from couchpotato.core.media.movie.providers.base import MovieProvider + +log = CPLog(__name__) + +autoload = 'AlphaRatio' + + +class AlphaRatio(MovieProvider, Base): + + # AlphaRatio movie search categories + # 10: MovieUHD + # 13: MoviePackUHD + # 9: MovieHD + # 12: MoviePackHD + # 8: MovieSD + # 11: MoviePackSD + + cat_ids = [ + ([10, 13], ['2160p']), + ([9, 12], ['bd50']), + ([9, 12], ['720p', '1080p']), + ([8, 11], ['dvdr']), + ([8, 11], ['brrip', 'dvdrip']), + ] + cat_backup_id = 8 + + def buildUrl(self, media, quality): + query = (tryUrlencode(fireEvent('library.query', media, single = True)), + self.getSceneOnly(), + self.getCatId(quality)[0]) + return query diff --git a/couchpotato/core/media/movie/providers/torrent/awesomehd.py b/couchpotato/core/media/movie/providers/torrent/awesomehd.py new file mode 100644 index 0000000000..b1c81f187e --- /dev/null +++ b/couchpotato/core/media/movie/providers/torrent/awesomehd.py @@ -0,0 +1,11 @@ +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.awesomehd import Base +from couchpotato.core.media.movie.providers.base import MovieProvider + +log = CPLog(__name__) + +autoload = 'AwesomeHD' + + +class AwesomeHD(MovieProvider, Base): + pass diff --git a/couchpotato/core/media/movie/providers/torrent/bithdtv.py b/couchpotato/core/media/movie/providers/torrent/bithdtv.py new file mode 100644 index 0000000000..da6954c8db --- /dev/null +++ b/couchpotato/core/media/movie/providers/torrent/bithdtv.py @@ -0,0 +1,23 @@ +from couchpotato.core.helpers.encoding import tryUrlencode +from couchpotato.core.logger import CPLog +from couchpotato.core.event import fireEvent +from couchpotato.core.media._base.providers.torrent.bithdtv import Base +from couchpotato.core.media.movie.providers.base import MovieProvider + +log = CPLog(__name__) + +autoload = 'BiTHDTV' + + +class BiTHDTV(MovieProvider, Base): + cat_ids = [ + ([2], ['bd50']), + ] + cat_backup_id = 7 # Movies + + def buildUrl(self, media, quality): + query = tryUrlencode({ + 'search': fireEvent('library.query', media, single = True), + 'cat': self.getCatId(quality)[0] + }) + return query diff --git a/couchpotato/core/media/movie/providers/torrent/bitsoup.py b/couchpotato/core/media/movie/providers/torrent/bitsoup.py new file mode 100644 index 0000000000..b0c8eded88 --- /dev/null +++ b/couchpotato/core/media/movie/providers/torrent/bitsoup.py @@ -0,0 +1,25 @@ +from couchpotato.core.helpers.encoding import tryUrlencode +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.bitsoup import Base +from couchpotato.core.media.movie.providers.base import MovieProvider + +log = CPLog(__name__) + +autoload = 'Bitsoup' + + +class Bitsoup(MovieProvider, Base): + cat_ids = [ + ([17], ['3d']), + ([80], ['720p', '1080p']), + ([20], ['dvdr']), + ([19], ['brrip', 'dvdrip']), + ] + cat_backup_id = 0 + + def buildUrl(self, title, media, quality): + query = tryUrlencode({ + 'search': '"%s" %s' % (title, media['info']['year']), + 'cat': self.getCatId(quality)[0], + }) + return query diff --git a/couchpotato/core/media/movie/providers/torrent/cpasbien.py b/couchpotato/core/media/movie/providers/torrent/cpasbien.py new file mode 100644 index 0000000000..4999404579 --- /dev/null +++ b/couchpotato/core/media/movie/providers/torrent/cpasbien.py @@ -0,0 +1,11 @@ +О╩©from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.cpasbien import Base +from couchpotato.core.media.movie.providers.base import MovieProvider + +log = CPLog(__name__) + +autoload = 'cpasbien' + + +class cpasbien(MovieProvider, Base): + pass \ No newline at end of file diff --git a/couchpotato/core/media/movie/providers/torrent/hd4free.py b/couchpotato/core/media/movie/providers/torrent/hd4free.py new file mode 100644 index 0000000000..37a28a5854 --- /dev/null +++ b/couchpotato/core/media/movie/providers/torrent/hd4free.py @@ -0,0 +1,11 @@ +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.hd4free import Base +from couchpotato.core.media.movie.providers.base import MovieProvider + +log = CPLog(__name__) + +autoload = 'HD4Free' + + +class HD4Free(MovieProvider, Base): + pass diff --git a/couchpotato/core/media/movie/providers/torrent/hdbits.py b/couchpotato/core/media/movie/providers/torrent/hdbits.py new file mode 100644 index 0000000000..016f1a126b --- /dev/null +++ b/couchpotato/core/media/movie/providers/torrent/hdbits.py @@ -0,0 +1,11 @@ +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.hdbits import Base +from couchpotato.core.media.movie.providers.base import MovieProvider + +log = CPLog(__name__) + +autoload = 'HDBits' + + +class HDBits(MovieProvider, Base): + pass diff --git a/couchpotato/core/media/movie/providers/torrent/hdonly.py b/couchpotato/core/media/movie/providers/torrent/hdonly.py new file mode 100644 index 0000000000..9c04bf9b84 --- /dev/null +++ b/couchpotato/core/media/movie/providers/torrent/hdonly.py @@ -0,0 +1,11 @@ +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.hdonly import Base +from couchpotato.core.media.movie.providers.base import MovieProvider + +log = CPLog(__name__) + +autoload = 'hdonly' + + +class hdonly(MovieProvider, Base): + pass \ No newline at end of file diff --git a/couchpotato/core/media/movie/providers/torrent/hdtorrents.py b/couchpotato/core/media/movie/providers/torrent/hdtorrents.py new file mode 100644 index 0000000000..138cec026a --- /dev/null +++ b/couchpotato/core/media/movie/providers/torrent/hdtorrents.py @@ -0,0 +1,32 @@ +from couchpotato.core.helpers.encoding import tryUrlencode +from couchpotato.core.event import fireEvent +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.hdtorrents import Base +from couchpotato.core.media.movie.providers.base import MovieProvider + +log = CPLog(__name__) + +autoload = 'HDTorrents' + + +class HDTorrents(MovieProvider, Base): + + cat_ids = [ + ([22], ['720p', '1080p']), + ([7], ['cam', 'ts', 'dvdrip', 'tc', 'r5', 'scr', 'brrip']), + ([8], ['dvdr']), + ] + + def buildUrl(self, media, quality): + url = self.urls['search'] % ( + self.getCatId(quality['identifier'])[0], + self.getCatId(quality['identifier'])[0] + ) + + arguments = tryUrlencode({ + 'search', fireEvent('library.query', media, single = True), + 'method', 3, + }) + query = '%s&%s' % (url, arguments) + + return query \ No newline at end of file diff --git a/couchpotato/core/media/movie/providers/torrent/ilovetorrents.py b/couchpotato/core/media/movie/providers/torrent/ilovetorrents.py new file mode 100644 index 0000000000..cfd773ad20 --- /dev/null +++ b/couchpotato/core/media/movie/providers/torrent/ilovetorrents.py @@ -0,0 +1,11 @@ +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.ilovetorrents import Base +from couchpotato.core.media.movie.providers.base import MovieProvider + +log = CPLog(__name__) + +autoload = 'ILoveTorrents' + + +class ILoveTorrents(MovieProvider, Base): + pass diff --git a/couchpotato/core/media/movie/providers/torrent/iptorrents.py b/couchpotato/core/media/movie/providers/torrent/iptorrents.py new file mode 100644 index 0000000000..699d5b9d56 --- /dev/null +++ b/couchpotato/core/media/movie/providers/torrent/iptorrents.py @@ -0,0 +1,26 @@ +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.iptorrents import Base +from couchpotato.core.media.movie.providers.base import MovieProvider + +log = CPLog(__name__) + +autoload = 'IPTorrents' + + +class IPTorrents(MovieProvider, Base): + + cat_ids = [ + ([87], ['3d']), + ([89], ['bd50']), + ([48], ['720p', '1080p']), + ([101], ['2160p']), + ([48, 20], ['brrip']), + ([7, 77], ['dvdrip']), + ([6], ['dvdr']), + ([96], ['cam', 'ts', 'tc', 'r5', 'scr']), + ] + + def buildUrl(self, title, media, quality): + query = '"%s" %s' % (title.replace(':', ''), media['info']['year']) + + return self._buildUrl(query, quality) diff --git a/couchpotato/core/media/movie/providers/torrent/kickasstorrents.py b/couchpotato/core/media/movie/providers/torrent/kickasstorrents.py new file mode 100644 index 0000000000..2b9b196990 --- /dev/null +++ b/couchpotato/core/media/movie/providers/torrent/kickasstorrents.py @@ -0,0 +1,11 @@ +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.kickasstorrents import Base +from couchpotato.core.media.movie.providers.base import MovieProvider + +log = CPLog(__name__) + +autoload = 'KickAssTorrents' + + +class KickAssTorrents(MovieProvider, Base): + pass diff --git a/couchpotato/core/media/movie/providers/torrent/magnetdl.py b/couchpotato/core/media/movie/providers/torrent/magnetdl.py new file mode 100755 index 0000000000..4b6bc3fedd --- /dev/null +++ b/couchpotato/core/media/movie/providers/torrent/magnetdl.py @@ -0,0 +1,13 @@ +from couchpotato.core.event import fireEvent +from couchpotato.core.helpers.encoding import tryUrlencode +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.magnetdl import Base +from couchpotato.core.media.movie.providers.base import MovieProvider + +log = CPLog(__name__) + +autoload = 'MagnetDL' + + +class MagnetDL(MovieProvider, Base): + pass diff --git a/couchpotato/core/media/movie/providers/torrent/morethantv.py b/couchpotato/core/media/movie/providers/torrent/morethantv.py new file mode 100755 index 0000000000..7d4cb8a314 --- /dev/null +++ b/couchpotato/core/media/movie/providers/torrent/morethantv.py @@ -0,0 +1,13 @@ +from couchpotato.core.event import fireEvent +from couchpotato.core.helpers.encoding import tryUrlencode +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.morethantv import Base +from couchpotato.core.media.movie.providers.base import MovieProvider + +log = CPLog(__name__) + +autoload = 'MoreThanTV' + + +class MoreThanTV(MovieProvider, Base): + pass diff --git a/couchpotato/core/media/movie/providers/torrent/nextorrent.py b/couchpotato/core/media/movie/providers/torrent/nextorrent.py new file mode 100644 index 0000000000..95d639d46b --- /dev/null +++ b/couchpotato/core/media/movie/providers/torrent/nextorrent.py @@ -0,0 +1,11 @@ +О╩©from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.nextorrent import Base +from couchpotato.core.media.movie.providers.base import MovieProvider + +log = CPLog(__name__) + +autoload = 'nextorrent' + + +class nextorrent(MovieProvider, Base): + pass diff --git a/couchpotato/core/media/movie/providers/torrent/passthepopcorn.py b/couchpotato/core/media/movie/providers/torrent/passthepopcorn.py new file mode 100644 index 0000000000..b2edf4d674 --- /dev/null +++ b/couchpotato/core/media/movie/providers/torrent/passthepopcorn.py @@ -0,0 +1,40 @@ +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.passthepopcorn import Base +from couchpotato.core.media.movie.providers.base import MovieProvider + +log = CPLog(__name__) + +autoload = 'PassThePopcorn' + + +class PassThePopcorn(MovieProvider, Base): + + quality_search_params = { + '2160p': {'resolution': '2160p'}, + 'bd50': {'media': 'Blu-ray', 'format': 'BD50'}, + '1080p': {'resolution': '1080p'}, + '720p': {'resolution': '720p'}, + 'brrip': {'resolution': 'anyhd'}, + 'dvdr': {'resolution': 'anysd'}, + 'dvdrip': {'media': 'DVD'}, + 'scr': {'media': 'DVD-Screener'}, + 'r5': {'media': 'R5'}, + 'tc': {'media': 'TC'}, + 'ts': {'media': 'TS'}, + 'cam': {'media': 'CAM'} + } + + post_search_filters = { + '2160p': {'Resolution': ['2160p']}, + 'bd50': {'Codec': ['BD50']}, + '1080p': {'Resolution': ['1080p']}, + '720p': {'Resolution': ['720p']}, + 'brrip': {'Quality': ['High Definition'], 'Container': ['!ISO']}, + 'dvdr': {'Codec': ['DVD5', 'DVD9']}, + 'dvdrip': {'Source': ['DVD'], 'Codec': ['!DVD5', '!DVD9']}, + 'scr': {'Source': ['DVD-Screener']}, + 'r5': {'Source': ['R5']}, + 'tc': {'Source': ['TC']}, + 'ts': {'Source': ['TS']}, + 'cam': {'Source': ['CAM']} + } diff --git a/couchpotato/core/media/movie/providers/torrent/rarbg.py b/couchpotato/core/media/movie/providers/torrent/rarbg.py new file mode 100644 index 0000000000..849388e7ca --- /dev/null +++ b/couchpotato/core/media/movie/providers/torrent/rarbg.py @@ -0,0 +1,11 @@ +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.rarbg import Base +from couchpotato.core.media.movie.providers.base import MovieProvider + +log = CPLog(__name__) + +autoload = 'Rarbg' + + +class Rarbg(MovieProvider, Base): + pass diff --git a/couchpotato/core/media/movie/providers/torrent/sceneaccess.py b/couchpotato/core/media/movie/providers/torrent/sceneaccess.py new file mode 100644 index 0000000000..579103af26 --- /dev/null +++ b/couchpotato/core/media/movie/providers/torrent/sceneaccess.py @@ -0,0 +1,29 @@ +from couchpotato.core.helpers.encoding import tryUrlencode +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.sceneaccess import Base +from couchpotato.core.media.movie.providers.base import MovieProvider + +log = CPLog(__name__) + +autoload = 'SceneAccess' + + +class SceneAccess(MovieProvider, Base): + + cat_ids = [ + ([22], ['720p', '1080p']), + ([7], ['cam', 'ts', 'dvdrip', 'tc', 'r5', 'scr', 'brrip']), + ([8], ['dvdr']), + ] + + def buildUrl(self, title, media, quality): + cat_id = self.getCatId(quality)[0] + url = self.urls['search'] % (cat_id, cat_id) + + arguments = tryUrlencode({ + 'search': '%s %s' % (title, media['info']['year']), + 'method': 2, + }) + query = "%s&%s" % (url, arguments) + + return query diff --git a/couchpotato/core/media/movie/providers/torrent/scenetime.py b/couchpotato/core/media/movie/providers/torrent/scenetime.py new file mode 100644 index 0000000000..58041ef4f9 --- /dev/null +++ b/couchpotato/core/media/movie/providers/torrent/scenetime.py @@ -0,0 +1,11 @@ +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.scenetime import Base +from couchpotato.core.media.movie.providers.base import MovieProvider + +log = CPLog(__name__) + +autoload = 'SceneTime' + + +class SceneTime(MovieProvider, Base): + pass diff --git a/couchpotato/core/media/movie/providers/torrent/t411.py b/couchpotato/core/media/movie/providers/torrent/t411.py new file mode 100644 index 0000000000..753fc6ef70 --- /dev/null +++ b/couchpotato/core/media/movie/providers/torrent/t411.py @@ -0,0 +1,11 @@ +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.t411 import Base +from couchpotato.core.media.movie.providers.base import MovieProvider + +log = CPLog(__name__) + +autoload = 't411' + + +class t411(MovieProvider, Base): + pass diff --git a/couchpotato/core/media/movie/providers/torrent/t411api.py b/couchpotato/core/media/movie/providers/torrent/t411api.py new file mode 100644 index 0000000000..03642abf0f --- /dev/null +++ b/couchpotato/core/media/movie/providers/torrent/t411api.py @@ -0,0 +1,11 @@ +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.t411api import Base +from couchpotato.core.media.movie.providers.base import MovieProvider + +log = CPLog(__name__) + +autoload = 't411api' + + +class t411api(MovieProvider, Base): + pass diff --git a/couchpotato/core/media/movie/providers/torrent/thepiratebay.py b/couchpotato/core/media/movie/providers/torrent/thepiratebay.py new file mode 100644 index 0000000000..0dc8313d1c --- /dev/null +++ b/couchpotato/core/media/movie/providers/torrent/thepiratebay.py @@ -0,0 +1,27 @@ +from couchpotato.core.helpers.encoding import tryUrlencode +from couchpotato.core.logger import CPLog +from couchpotato.core.event import fireEvent +from couchpotato.core.media._base.providers.torrent.thepiratebay import Base +from couchpotato.core.media.movie.providers.base import MovieProvider + +log = CPLog(__name__) + +autoload = 'ThePirateBay' + + +class ThePirateBay(MovieProvider, Base): + + cat_ids = [ + ([209], ['3d']), + ([207], ['720p', '1080p', 'bd50']), + ([201], ['cam', 'ts', 'dvdrip', 'tc', 'r5', 'scr']), + ([201, 207], ['brrip']), + ([202], ['dvdr']) + ] + + def buildUrl(self, media, page, cats): + return ( + tryUrlencode('"%s"' % fireEvent('library.query', media, single = True)), + page, + ','.join(str(x) for x in cats) + ) diff --git a/couchpotato/core/media/movie/providers/torrent/torrent9.py b/couchpotato/core/media/movie/providers/torrent/torrent9.py new file mode 100644 index 0000000000..7dfae5ec07 --- /dev/null +++ b/couchpotato/core/media/movie/providers/torrent/torrent9.py @@ -0,0 +1,10 @@ +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.torrent9 import Base +from couchpotato.core.media.movie.providers.base import MovieProvider + +log = CPLog(__name__) + +autoload = 'torrent9' + +class torrent9(MovieProvider, Base): + pass diff --git a/couchpotato/core/media/movie/providers/torrent/torrentbytes.py b/couchpotato/core/media/movie/providers/torrent/torrentbytes.py new file mode 100644 index 0000000000..48fc68a413 --- /dev/null +++ b/couchpotato/core/media/movie/providers/torrent/torrentbytes.py @@ -0,0 +1,11 @@ +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.torrentbytes import Base +from couchpotato.core.media.movie.providers.base import MovieProvider + +log = CPLog(__name__) + +autoload = 'TorrentBytes' + + +class TorrentBytes(MovieProvider, Base): + pass diff --git a/couchpotato/core/media/movie/providers/torrent/torrentday.py b/couchpotato/core/media/movie/providers/torrent/torrentday.py new file mode 100644 index 0000000000..768d3043c5 --- /dev/null +++ b/couchpotato/core/media/movie/providers/torrent/torrentday.py @@ -0,0 +1,17 @@ +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.torrentday import Base +from couchpotato.core.media.movie.providers.base import MovieProvider + +log = CPLog(__name__) + +autoload = 'TorrentDay' + + +class TorrentDay(MovieProvider, Base): + + cat_ids = [ + ([11], ['720p', '1080p']), + ([1, 21, 25], ['cam', 'ts', 'dvdrip', 'tc', 'r5', 'scr', 'brrip']), + ([3], ['dvdr']), + ([5], ['bd50']), + ] diff --git a/couchpotato/core/media/movie/providers/torrent/torrentleech.py b/couchpotato/core/media/movie/providers/torrent/torrentleech.py new file mode 100644 index 0000000000..bfa5cd1750 --- /dev/null +++ b/couchpotato/core/media/movie/providers/torrent/torrentleech.py @@ -0,0 +1,29 @@ +from couchpotato.core.helpers.encoding import tryUrlencode +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.torrentleech import Base +from couchpotato.core.media.movie.providers.base import MovieProvider + +log = CPLog(__name__) + +autoload = 'TorrentLeech' + + +class TorrentLeech(MovieProvider, Base): + + cat_ids = [ + ([41, 47], ['2160p']), + ([13, 14, 37, 43], ['720p', '1080p']), + ([13], ['bd50']), + ([8], ['cam']), + ([9], ['ts', 'tc']), + ([10, 11, 37], ['r5', 'scr']), + ([11], ['dvdrip']), + ([13, 14, 37, 43], ['brrip']), + ([12], ['dvdr']), + ] + + def buildUrl(self, title, media, quality): + return ( + tryUrlencode(title.replace(':', '')), + ','.join([str(x) for x in self.getCatId(quality)]) + ) diff --git a/couchpotato/core/media/movie/providers/torrent/torrentpotato.py b/couchpotato/core/media/movie/providers/torrent/torrentpotato.py new file mode 100644 index 0000000000..9bc6b85a15 --- /dev/null +++ b/couchpotato/core/media/movie/providers/torrent/torrentpotato.py @@ -0,0 +1,22 @@ +from couchpotato.core.helpers.encoding import tryUrlencode +from couchpotato.core.helpers.variable import getIdentifier +from couchpotato.core.helpers.variable import getTitle +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.torrentpotato import Base +from couchpotato.core.media.movie.providers.base import MovieProvider + +log = CPLog(__name__) + +autoload = 'TorrentPotato' + + +class TorrentPotato(MovieProvider, Base): + + def buildUrl(self, media, host): + arguments = tryUrlencode({ + 'user': host['name'], + 'passkey': host['pass_key'], + 'imdbid': getIdentifier(media), + 'search' : getTitle(media) + ' ' + str(media['info']['year']), + }) + return '%s?%s' % (host['host'], arguments) diff --git a/couchpotato/core/media/movie/providers/torrent/torrentshack.py b/couchpotato/core/media/movie/providers/torrent/torrentshack.py new file mode 100644 index 0000000000..f9127315cf --- /dev/null +++ b/couchpotato/core/media/movie/providers/torrent/torrentshack.py @@ -0,0 +1,36 @@ +from couchpotato.core.event import fireEvent +from couchpotato.core.helpers.encoding import tryUrlencode +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.torrentshack import Base +from couchpotato.core.media.movie.providers.base import MovieProvider + +log = CPLog(__name__) + +autoload = 'TorrentShack' + + +class TorrentShack(MovieProvider, Base): + + # TorrentShack movie search categories + # Movies/x264 - 300 + # Movies/DVD-R - 350 + # Movies/XviD - 400 + # Full Blu-ray - 970 + # + # REMUX - 320 (not included) + # Movies-HD Pack - 982 (not included) + # Movies-SD Pack - 983 (not included) + + cat_ids = [ + ([970, 320], ['bd50']), + ([300, 320], ['720p', '1080p']), + ([350], ['dvdr']), + ([400], ['brrip', 'dvdrip']), + ] + cat_backup_id = 400 + + def buildUrl(self, media, quality): + query = (tryUrlencode(fireEvent('library.query', media, single = True)), + self.getSceneOnly(), + self.getCatId(quality)[0]) + return query diff --git a/couchpotato/core/media/movie/providers/torrent/torrentz.py b/couchpotato/core/media/movie/providers/torrent/torrentz.py new file mode 100644 index 0000000000..d1294e68f3 --- /dev/null +++ b/couchpotato/core/media/movie/providers/torrent/torrentz.py @@ -0,0 +1,14 @@ +from couchpotato.core.helpers.encoding import tryUrlencode +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.torrentz import Base +from couchpotato.core.media.movie.providers.base import MovieProvider + +log = CPLog(__name__) + +autoload = 'Torrentz' + + +class Torrentz(MovieProvider, Base): + + def buildUrl(self, title, media, quality): + return tryUrlencode('%s %s' % (title, media['info']['year'])) diff --git a/couchpotato/core/media/movie/providers/torrent/xthor.py b/couchpotato/core/media/movie/providers/torrent/xthor.py new file mode 100644 index 0000000000..249285813f --- /dev/null +++ b/couchpotato/core/media/movie/providers/torrent/xthor.py @@ -0,0 +1,11 @@ +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.xthor import Base +from couchpotato.core.media.movie.providers.base import MovieProvider + +log = CPLog(__name__) + +autoload = 'xthor' + + +class xthor(MovieProvider, Base): + pass diff --git a/couchpotato/core/media/movie/providers/torrent/yts.py b/couchpotato/core/media/movie/providers/torrent/yts.py new file mode 100644 index 0000000000..c20117a649 --- /dev/null +++ b/couchpotato/core/media/movie/providers/torrent/yts.py @@ -0,0 +1,10 @@ +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.yts import Base +from couchpotato.core.media.movie.providers.base import MovieProvider + +log = CPLog(__name__) + +autoload = 'Yts' + +class Yts(MovieProvider, Base): + pass diff --git a/libs/migrate/versioning/templates/repository/pylons/versions/__init__.py b/couchpotato/core/media/movie/providers/trailer/__init__.py similarity index 100% rename from libs/migrate/versioning/templates/repository/pylons/versions/__init__.py rename to couchpotato/core/media/movie/providers/trailer/__init__.py diff --git a/couchpotato/core/media/movie/providers/trailer/allocine.py b/couchpotato/core/media/movie/providers/trailer/allocine.py new file mode 100644 index 0000000000..ec330870c6 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/allocine.py @@ -0,0 +1,208 @@ +#!/usr/bin/env python +#-*- coding:utf-8 -*- +""" +A module to use Allocine API V3 in Python +Repository: https://github.com/xbgmsharp/allocine +Base on work from: https://github.com/gromez/allocine-api +License: LGPLv2 http://www.gnu.org/licenses/lgpl.html + +Sample code: + + from allocine import allocine + api = allocine() + api.configure('100043982026','29d185d98c984a359e6e6f26a0474269') + movie = api.movie(27405) + search = api.search("Oblivion") + +""" +# Debug +#from pprint import pprint +# standard module +from datetime import date +import urllib2, urllib +import hashlib, base64 +import json as simplejson + +__version__ = "0.2" +__author__ = "Francois Lacroix" +__license__ = "GPL" +__description__ = "A module to use Allocine API V3 in Python" + +class allocine(object): + """An interface to the Allocine API""" + def __init__(self, partner_key=None, secret_key=None): + """Init values""" + self._api_url = 'http://api.allocine.fr/rest/v3' + self._partner_key = 'aXBob25lLXYy' + self._secret_key = secret_key + self._user_agent = 'AlloCine/2.9.5 CFNetwork/548.1.4 Darwin/11.0.0' + + def configure(self, partner_key=None, secret_key=None): + """Set the keys""" + self._partner_key = 'aXBob25lLXYy' + self._secret_key = secret_key + + def _do_request(self, method=None, params=None): + """Generate and send the request""" + # build the URL + query_url = self._api_url+'/'+method; + + # new algo to build the query + today = date.today() + sed = today.strftime('%Y%m%d') + #print sed + sha1 = hashlib.sha1(self._secret_key+urllib.urlencode(params)+'&sed='+sed).digest() + #print sha1 + b64 = base64.b64encode(sha1) + #print b64 + sig = urllib2.quote(b64) + #query_url += '?'+urllib.urlencode(params)+'&sed='+sed+'&sig='+sig + query_url += '?'+urllib.urlencode(params, True) + #print query_url; + + # do the request + req = urllib2.Request(query_url) + req.add_header('User-agent', self._user_agent) + + response = simplejson.load(urllib2.urlopen(req, timeout = 10)) + + return response; + + def search(self, query, filter="movie"): + """Search for a term + Param: + query -- Term to search for + filter -- Filter by resut type (movie, theater, person, news, tvseries) + """ + # build the params + params = {} + params['format'] = 'json' + params['partner'] = self._partner_key + params['q'] = query + params['filter'] = filter + params['profile'] = "large" + + # do the request + response = self._do_request('search', params); + + return response; + + def movie(self, id, profile="large", mediafmt="mp4-lc:m"): + """Get the movie details by ID + Param: + id -- Unique ID of the movie your search for + profile -- Level of details to return (small, medium, large) + mediafmt -- The media format (flv, mp4-lc, mp4-hip, mp4-archive, mpeg2-theater, mpeg2) + """ + # build the params + params = {} + params['format'] = 'json' + params['partner'] = self._partner_key + params['mediafmt'] = mediafmt + params['profile'] = profile + params['code'] = id + params['striptags'] = 'synopsis,synopsisshort' + + # do the request + response = self._do_request('movie', params); + + return response; + + def tvseries(self, id, profile="large", mediafmt="mp4-lc:m"): + """Get the TVshow details by ID + Param: + id -- Unique ID of the tvseries your search for + profile -- Level of details to return (small, medium, large) + mediafmt -- The media format (flv, mp4-lc, mp4-hip, mp4-archive, mpeg2-theater, mpeg2) + """ + # build the params + params = {} + params['format'] = 'json' + params['partner'] = self._partner_key + params['mediafmt'] = mediafmt + params['profile'] = profile + params['code'] = id + params['striptags'] = 'synopsis,synopsisshort' + + # do the request + response = self._do_request('tvseries', params); + + return response; + + def season(self, id, profile="large"): + """Get the season details by ID + Param: + id -- Unique ID of the season your search for + profile -- Level of details to return (small, medium, large) + """ + # build the params + params = {} + params['format'] = 'json' + params['partner'] = self._partner_key + params['profile'] = profile + params['code'] = id + params['striptags'] = 'synopsis,synopsisshort' + + # do the request + response = self._do_request('season', params); + + return response; + + def episode(self, id, profile="large"): + """Get the episode details by ID + Param: + id -- Unique ID of the episode your search for + profile -- Level of details to return (small, medium, large) + """ + # build the params + params = {} + params['format'] = 'json' + params['partner'] = self._partner_key + params['profile'] = profile + params['code'] = id + params['striptags'] = 'synopsis,synopsisshort' + + # do the request + response = self._do_request('episode', params); + + return response; + + def trailer(self, id, profile="large", mediafmt="mp4-lc:m"): + """Get the movie details by ID + Param: + id -- Unique ID of the movie your search for + profile -- Level of details to return (small, medium, large) + mediafmt -- The media format (flv, mp4-lc, mp4-hip, mp4-archive, mpeg2-theater, mpeg2) + """ + # build the params + params = {} + params['format'] = 'json' + params['partner'] = self._partner_key + params['profile'] = profile + params['code'] = id + + # do the request + response = self._do_request('media',params); + + return response; + + def movielist(self, typemovie, profile="large", mediafmt="mp4-lc:m"): + """Get the movie details by ID + Param: + id -- Unique ID of the movie your search for + profile -- Level of details to return (small, medium, large) + mediafmt -- The media format (flv, mp4-lc, mp4-hip, mp4-archive, mpeg2-theater, mpeg2) + """ + # build the params + params = {} + params['format'] = 'json' + params['partner'] = self._partner_key + params['profile'] = profile + params['filter'] = typemovie + params['order'] = 'toprank' + params['count'] = 30 + + # do the request + response = self._do_request('movielist',params); + + return response; diff --git a/couchpotato/core/media/movie/providers/trailer/base.py b/couchpotato/core/media/movie/providers/trailer/base.py new file mode 100644 index 0000000000..249668f91e --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/base.py @@ -0,0 +1,27 @@ +from couchpotato.core.event import addEvent +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.base import Provider + +log = CPLog(__name__) + + +class TrailerProvider(Provider): + + type = 'trailer' + + def __init__(self): + addEvent('trailer.search', self.search) + + def search(self, *args, **kwargs): + pass + + +class VFTrailerProvider(Provider): + + type = 'vftrailer' + + def __init__(self): + addEvent('vftrailer.search', self.search) + + def search(self, *args, **kwargs): + pass diff --git a/couchpotato/core/media/movie/providers/trailer/hdtrailers.py b/couchpotato/core/media/movie/providers/trailer/hdtrailers.py new file mode 100644 index 0000000000..4cbb64d6cb --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/hdtrailers.py @@ -0,0 +1,124 @@ +from string import digits, ascii_letters +import re + +from bs4 import SoupStrainer, BeautifulSoup +from couchpotato.core.helpers.encoding import tryUrlencode +from couchpotato.core.helpers.variable import mergeDicts, getTitle, getIdentifier +from couchpotato.core.logger import CPLog +from couchpotato.core.media.movie.providers.trailer.base import TrailerProvider +from requests import HTTPError + + +log = CPLog(__name__) + +autoload = 'HDTrailers' + + +class HDTrailers(TrailerProvider): + + urls = { + 'api': 'http://www.hd-trailers.net/movie/%s/', + 'backup': 'http://www.hd-trailers.net/blog/', + } + providers = ['apple.ico', 'yahoo.ico', 'moviefone.ico', 'myspace.ico', 'favicon.ico'] + only_tables_tags = SoupStrainer('table') + + def search(self, group): + + movie_name = getTitle(group) + + url = self.urls['api'] % self.movieUrlName(movie_name) + try: + data = self.getCache('hdtrailers.%s' % getIdentifier(group), url, show_error = False) + except HTTPError: + log.debug('No page found for: %s', movie_name) + data = None + + result_data = {'480p': [], '720p': [], '1080p': []} + + if not data: + return result_data + + did_alternative = False + for provider in self.providers: + results = self.findByProvider(data, provider) + + # Find alternative + if results.get('404') and not did_alternative: + results = self.findViaAlternative(group) + did_alternative = True + + result_data = mergeDicts(result_data, results) + + return result_data + + def findViaAlternative(self, group): + results = {'480p': [], '720p': [], '1080p': []} + + movie_name = getTitle(group) + + url = "%s?%s" % (self.urls['backup'], tryUrlencode({'s':movie_name})) + try: + data = self.getCache('hdtrailers.alt.%s' % getIdentifier(group), url, show_error = False) + except HTTPError: + log.debug('No alternative page found for: %s', movie_name) + data = None + + if not data: + return results + + try: + html = BeautifulSoup(data, parse_only = self.only_tables_tags) + result_table = html.find_all('h2', text = re.compile(movie_name)) + + for h2 in result_table: + if 'trailer' in h2.lower(): + parent = h2.parent.parent.parent + trailerLinks = parent.find_all('a', text = re.compile('480p|720p|1080p')) + try: + for trailer in trailerLinks: + results[trailer].insert(0, trailer.parent['href']) + except: + pass + + except AttributeError: + log.debug('No trailers found in via alternative.') + + return results + + def findByProvider(self, data, provider): + + results = {'480p':[], '720p':[], '1080p':[]} + try: + html = BeautifulSoup(data, parse_only = self.only_tables_tags) + result_table = html.find('table', attrs = {'class':'bottomTable'}) + + for tr in result_table.find_all('tr'): + trtext = str(tr).lower() + if 'clips' in trtext: + break + + if 'trailer' in trtext and not 'clip' in trtext and provider in trtext and not '3d' in trtext: + if 'trailer' not in tr.find('span', 'standardTrailerName').text.lower(): + continue + resolutions = tr.find_all('td', attrs = {'class':'bottomTableResolution'}) + for res in resolutions: + if res.a and str(res.a.contents[0]) in results: + results[str(res.a.contents[0])].insert(0, res.a['href']) + + except AttributeError: + log.debug('No trailers found in provider %s.', provider) + results['404'] = True + + return results + + def movieUrlName(self, string): + safe_chars = ascii_letters + digits + ' ' + r = ''.join([char if char in safe_chars else ' ' for char in string]) + name = re.sub('\s+' , '-', r).lower() + + try: + int(name) + return '-' + name + except: + return name diff --git a/couchpotato/core/media/movie/providers/trailer/mechanize/__init__.py b/couchpotato/core/media/movie/providers/trailer/mechanize/__init__.py new file mode 100644 index 0000000000..c4429be394 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/mechanize/__init__.py @@ -0,0 +1,211 @@ +__all__ = [ + 'AbstractBasicAuthHandler', + 'AbstractDigestAuthHandler', + 'BaseHandler', + 'Browser', + 'BrowserStateError', + 'CacheFTPHandler', + 'ContentTooShortError', + 'Cookie', + 'CookieJar', + 'CookiePolicy', + 'DefaultCookiePolicy', + 'DefaultFactory', + 'FTPHandler', + 'Factory', + 'FileCookieJar', + 'FileHandler', + 'FormNotFoundError', + 'FormsFactory', + 'HTTPBasicAuthHandler', + 'HTTPCookieProcessor', + 'HTTPDefaultErrorHandler', + 'HTTPDigestAuthHandler', + 'HTTPEquivProcessor', + 'HTTPError', + 'HTTPErrorProcessor', + 'HTTPHandler', + 'HTTPPasswordMgr', + 'HTTPPasswordMgrWithDefaultRealm', + 'HTTPProxyPasswordMgr', + 'HTTPRedirectDebugProcessor', + 'HTTPRedirectHandler', + 'HTTPRefererProcessor', + 'HTTPRefreshProcessor', + 'HTTPResponseDebugProcessor', + 'HTTPRobotRulesProcessor', + 'HTTPSClientCertMgr', + 'HeadParser', + 'History', + 'LWPCookieJar', + 'Link', + 'LinkNotFoundError', + 'LinksFactory', + 'LoadError', + 'MSIECookieJar', + 'MozillaCookieJar', + 'OpenerDirector', + 'OpenerFactory', + 'ParseError', + 'ProxyBasicAuthHandler', + 'ProxyDigestAuthHandler', + 'ProxyHandler', + 'Request', + 'RobotExclusionError', + 'RobustFactory', + 'RobustFormsFactory', + 'RobustLinksFactory', + 'RobustTitleFactory', + 'SeekableResponseOpener', + 'TitleFactory', + 'URLError', + 'USE_BARE_EXCEPT', + 'UnknownHandler', + 'UserAgent', + 'UserAgentBase', + 'XHTMLCompatibleHeadParser', + '__version__', + 'build_opener', + 'install_opener', + 'lwp_cookie_str', + 'make_response', + 'request_host', + 'response_seek_wrapper', # XXX deprecate in public interface? + 'seek_wrapped_response', # XXX should probably use this internally in place of response_seek_wrapper() + 'str2time', + 'urlopen', + 'urlretrieve', + 'urljoin', + + # ClientForm API + 'AmbiguityError', + 'ControlNotFoundError', + 'FormParser', + 'ItemCountError', + 'ItemNotFoundError', + 'LocateError', + 'Missing', + 'ParseFile', + 'ParseFileEx', + 'ParseResponse', + 'ParseResponseEx', + 'ParseString', + 'XHTMLCompatibleFormParser', + # deprecated + 'CheckboxControl', + 'Control', + 'FileControl', + 'HTMLForm', + 'HiddenControl', + 'IgnoreControl', + 'ImageControl', + 'IsindexControl', + 'Item', + 'Label', + 'ListControl', + 'PasswordControl', + 'RadioControl', + 'ScalarControl', + 'SelectControl', + 'SubmitButtonControl', + 'SubmitControl', + 'TextControl', + 'TextareaControl', + ] + +import logging +import sys + +from _version import __version__ + +# high-level stateful browser-style interface +from _mechanize import \ + Browser, History, \ + BrowserStateError, LinkNotFoundError, FormNotFoundError + +# configurable URL-opener interface +from _useragent import UserAgentBase, UserAgent +from _html import \ + Link, \ + Factory, DefaultFactory, RobustFactory, \ + FormsFactory, LinksFactory, TitleFactory, \ + RobustFormsFactory, RobustLinksFactory, RobustTitleFactory + +# urllib2 work-alike interface. This is a superset of the urllib2 interface. +from _urllib2 import * +import _urllib2 +if hasattr(_urllib2, "HTTPSHandler"): + __all__.append("HTTPSHandler") +del _urllib2 + +# misc +from _http import HeadParser +from _http import XHTMLCompatibleHeadParser +from _opener import ContentTooShortError, OpenerFactory, urlretrieve +from _response import \ + response_seek_wrapper, seek_wrapped_response, make_response +from _rfc3986 import urljoin +from _util import http2time as str2time + +# cookies +from _clientcookie import Cookie, CookiePolicy, DefaultCookiePolicy, \ + CookieJar, FileCookieJar, LoadError, request_host_lc as request_host, \ + effective_request_host +from _lwpcookiejar import LWPCookieJar, lwp_cookie_str +# 2.4 raises SyntaxError due to generator / try/finally use +if sys.version_info[:2] > (2,4): + try: + import sqlite3 + except ImportError: + pass + else: + from _firefox3cookiejar import Firefox3CookieJar +from _mozillacookiejar import MozillaCookieJar +from _msiecookiejar import MSIECookieJar + +# forms +from _form import ( + AmbiguityError, + ControlNotFoundError, + FormParser, + ItemCountError, + ItemNotFoundError, + LocateError, + Missing, + ParseError, + ParseFile, + ParseFileEx, + ParseResponse, + ParseResponseEx, + ParseString, + XHTMLCompatibleFormParser, + # deprecated + CheckboxControl, + Control, + FileControl, + HTMLForm, + HiddenControl, + IgnoreControl, + ImageControl, + IsindexControl, + Item, + Label, + ListControl, + PasswordControl, + RadioControl, + ScalarControl, + SelectControl, + SubmitButtonControl, + SubmitControl, + TextControl, + TextareaControl, + ) + +# If you hate the idea of turning bugs into warnings, do: +# import mechanize; mechanize.USE_BARE_EXCEPT = False +USE_BARE_EXCEPT = True + +logger = logging.getLogger("mechanize") +if logger.level is logging.NOTSET: + logger.setLevel(logging.CRITICAL) +del logger diff --git a/couchpotato/core/media/movie/providers/trailer/mechanize/_auth.py b/couchpotato/core/media/movie/providers/trailer/mechanize/_auth.py new file mode 100644 index 0000000000..900e201ea6 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/mechanize/_auth.py @@ -0,0 +1,68 @@ +"""HTTP Authentication and Proxy support. + + +Copyright 2006 John J. Lee + +This code is free software; you can redistribute it and/or modify it under +the terms of the BSD or ZPL 2.1 licenses (see the file COPYING.txt +included with the distribution). + +""" + +from _urllib2_fork import HTTPPasswordMgr + + +# TODO: stop deriving from HTTPPasswordMgr +class HTTPProxyPasswordMgr(HTTPPasswordMgr): + # has default realm and host/port + def add_password(self, realm, uri, user, passwd): + # uri could be a single URI or a sequence + if uri is None or isinstance(uri, basestring): + uris = [uri] + else: + uris = uri + passwd_by_domain = self.passwd.setdefault(realm, {}) + for uri in uris: + for default_port in True, False: + reduced_uri = self.reduce_uri(uri, default_port) + passwd_by_domain[reduced_uri] = (user, passwd) + + def find_user_password(self, realm, authuri): + attempts = [(realm, authuri), (None, authuri)] + # bleh, want default realm to take precedence over default + # URI/authority, hence this outer loop + for default_uri in False, True: + for realm, authuri in attempts: + authinfo_by_domain = self.passwd.get(realm, {}) + for default_port in True, False: + reduced_authuri = self.reduce_uri(authuri, default_port) + for uri, authinfo in authinfo_by_domain.iteritems(): + if uri is None and not default_uri: + continue + if self.is_suburi(uri, reduced_authuri): + return authinfo + user, password = None, None + + if user is not None: + break + return user, password + + def reduce_uri(self, uri, default_port=True): + if uri is None: + return None + return HTTPPasswordMgr.reduce_uri(self, uri, default_port) + + def is_suburi(self, base, test): + if base is None: + # default to the proxy's host/port + hostport, path = test + base = (hostport, "/") + return HTTPPasswordMgr.is_suburi(self, base, test) + + +class HTTPSClientCertMgr(HTTPPasswordMgr): + # implementation inheritance: this is not a proper subclass + def add_key_cert(self, uri, key_file, cert_file): + self.add_password(None, uri, key_file, cert_file) + def find_key_cert(self, authuri): + return HTTPPasswordMgr.find_user_password(self, None, authuri) diff --git a/couchpotato/core/media/movie/providers/trailer/mechanize/_beautifulsoup.py b/couchpotato/core/media/movie/providers/trailer/mechanize/_beautifulsoup.py new file mode 100644 index 0000000000..0040140d04 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/mechanize/_beautifulsoup.py @@ -0,0 +1,1077 @@ +"""Beautiful Soup +Elixir and Tonic +"The Screen-Scraper's Friend" +v2.1.1 +http://www.crummy.com/software/BeautifulSoup/ + +Beautiful Soup parses arbitrarily invalid XML- or HTML-like substance +into a tree representation. It provides methods and Pythonic idioms +that make it easy to search and modify the tree. + +A well-formed XML/HTML document will yield a well-formed data +structure. An ill-formed XML/HTML document will yield a +correspondingly ill-formed data structure. If your document is only +locally well-formed, you can use this library to find and process the +well-formed part of it. The BeautifulSoup class has heuristics for +obtaining a sensible parse tree in the face of common HTML errors. + +Beautiful Soup has no external dependencies. It works with Python 2.2 +and up. + +Beautiful Soup defines classes for four different parsing strategies: + + * BeautifulStoneSoup, for parsing XML, SGML, or your domain-specific + language that kind of looks like XML. + + * BeautifulSoup, for parsing run-of-the-mill HTML code, be it valid + or invalid. + + * ICantBelieveItsBeautifulSoup, for parsing valid but bizarre HTML + that trips up BeautifulSoup. + + * BeautifulSOAP, for making it easier to parse XML documents that use + lots of subelements containing a single string, where you'd prefer + they put that string into an attribute (such as SOAP messages). + +You can subclass BeautifulStoneSoup or BeautifulSoup to create a +parsing strategy specific to an XML schema or a particular bizarre +HTML document. Typically your subclass would just override +SELF_CLOSING_TAGS and/or NESTABLE_TAGS. +""" #" +from __future__ import generators + +__author__ = "Leonard Richardson (leonardr@segfault.org)" +__version__ = "2.1.1" +__date__ = "$Date: 2004/10/18 00:14:20 $" +__copyright__ = "Copyright (c) 2004-2005 Leonard Richardson" +__license__ = "PSF" + +from _sgmllib_copy import SGMLParser, SGMLParseError +import types +import re +import _sgmllib_copy as sgmllib + +class NullType(object): + + """Similar to NoneType with a corresponding singleton instance + 'Null' that, unlike None, accepts any message and returns itself. + + Examples: + >>> Null("send", "a", "message")("and one more", + ... "and what you get still") is Null + True + """ + + def __new__(cls): return Null + def __call__(self, *args, **kwargs): return Null +## def __getstate__(self, *args): return Null + def __getattr__(self, attr): return Null + def __getitem__(self, item): return Null + def __setattr__(self, attr, value): pass + def __setitem__(self, item, value): pass + def __len__(self): return 0 + # FIXME: is this a python bug? otherwise ``for x in Null: pass`` + # never terminates... + def __iter__(self): return iter([]) + def __contains__(self, item): return False + def __repr__(self): return "Null" +Null = object.__new__(NullType) + +class PageElement: + """Contains the navigational information for some part of the page + (either a tag or a piece of text)""" + + def setup(self, parent=Null, previous=Null): + """Sets up the initial relations between this element and + other elements.""" + self.parent = parent + self.previous = previous + self.next = Null + self.previousSibling = Null + self.nextSibling = Null + if self.parent and self.parent.contents: + self.previousSibling = self.parent.contents[-1] + self.previousSibling.nextSibling = self + + def findNext(self, name=None, attrs={}, text=None): + """Returns the first item that matches the given criteria and + appears after this Tag in the document.""" + return self._first(self.fetchNext, name, attrs, text) + firstNext = findNext + + def fetchNext(self, name=None, attrs={}, text=None, limit=None): + """Returns all items that match the given criteria and appear + before after Tag in the document.""" + return self._fetch(name, attrs, text, limit, self.nextGenerator) + + def findNextSibling(self, name=None, attrs={}, text=None): + """Returns the closest sibling to this Tag that matches the + given criteria and appears after this Tag in the document.""" + return self._first(self.fetchNextSiblings, name, attrs, text) + firstNextSibling = findNextSibling + + def fetchNextSiblings(self, name=None, attrs={}, text=None, limit=None): + """Returns the siblings of this Tag that match the given + criteria and appear after this Tag in the document.""" + return self._fetch(name, attrs, text, limit, self.nextSiblingGenerator) + + def findPrevious(self, name=None, attrs={}, text=None): + """Returns the first item that matches the given criteria and + appears before this Tag in the document.""" + return self._first(self.fetchPrevious, name, attrs, text) + + def fetchPrevious(self, name=None, attrs={}, text=None, limit=None): + """Returns all items that match the given criteria and appear + before this Tag in the document.""" + return self._fetch(name, attrs, text, limit, self.previousGenerator) + firstPrevious = findPrevious + + def findPreviousSibling(self, name=None, attrs={}, text=None): + """Returns the closest sibling to this Tag that matches the + given criteria and appears before this Tag in the document.""" + return self._first(self.fetchPreviousSiblings, name, attrs, text) + firstPreviousSibling = findPreviousSibling + + def fetchPreviousSiblings(self, name=None, attrs={}, text=None, + limit=None): + """Returns the siblings of this Tag that match the given + criteria and appear before this Tag in the document.""" + return self._fetch(name, attrs, text, limit, + self.previousSiblingGenerator) + + def findParent(self, name=None, attrs={}): + """Returns the closest parent of this Tag that matches the given + criteria.""" + r = Null + l = self.fetchParents(name, attrs, 1) + if l: + r = l[0] + return r + firstParent = findParent + + def fetchParents(self, name=None, attrs={}, limit=None): + """Returns the parents of this Tag that match the given + criteria.""" + return self._fetch(name, attrs, None, limit, self.parentGenerator) + + #These methods do the real heavy lifting. + + def _first(self, method, name, attrs, text): + r = Null + l = method(name, attrs, text, 1) + if l: + r = l[0] + return r + + def _fetch(self, name, attrs, text, limit, generator): + "Iterates over a generator looking for things that match." + if not hasattr(attrs, 'items'): + attrs = {'class' : attrs} + + results = [] + g = generator() + while True: + try: + i = g.next() + except StopIteration: + break + found = None + if isinstance(i, Tag): + if not text: + if not name or self._matches(i, name): + match = True + for attr, matchAgainst in attrs.items(): + check = i.get(attr) + if not self._matches(check, matchAgainst): + match = False + break + if match: + found = i + elif text: + if self._matches(i, text): + found = i + if found: + results.append(found) + if limit and len(results) >= limit: + break + return results + + #Generators that can be used to navigate starting from both + #NavigableTexts and Tags. + def nextGenerator(self): + i = self + while i: + i = i.next + yield i + + def nextSiblingGenerator(self): + i = self + while i: + i = i.nextSibling + yield i + + def previousGenerator(self): + i = self + while i: + i = i.previous + yield i + + def previousSiblingGenerator(self): + i = self + while i: + i = i.previousSibling + yield i + + def parentGenerator(self): + i = self + while i: + i = i.parent + yield i + + def _matches(self, chunk, howToMatch): + #print 'looking for %s in %s' % (howToMatch, chunk) + # + # If given a list of items, return true if the list contains a + # text element that matches. + if isList(chunk) and not isinstance(chunk, Tag): + for tag in chunk: + if isinstance(tag, NavigableText) and self._matches(tag, howToMatch): + return True + return False + if callable(howToMatch): + return howToMatch(chunk) + if isinstance(chunk, Tag): + #Custom match methods take the tag as an argument, but all other + #ways of matching match the tag name as a string + chunk = chunk.name + #Now we know that chunk is a string + if not isinstance(chunk, basestring): + chunk = str(chunk) + if hasattr(howToMatch, 'match'): + # It's a regexp object. + return howToMatch.search(chunk) + if isList(howToMatch): + return chunk in howToMatch + if hasattr(howToMatch, 'items'): + return howToMatch.has_key(chunk) + #It's just a string + return str(howToMatch) == chunk + +class NavigableText(PageElement): + + def __getattr__(self, attr): + "For backwards compatibility, text.string gives you text" + if attr == 'string': + return self + else: + raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__.__name__, attr) + +class NavigableString(str, NavigableText): + pass + +class NavigableUnicodeString(unicode, NavigableText): + pass + +class Tag(PageElement): + + """Represents a found HTML tag with its attributes and contents.""" + + def __init__(self, name, attrs=None, parent=Null, previous=Null): + "Basic constructor." + self.name = name + if attrs == None: + attrs = [] + self.attrs = attrs + self.contents = [] + self.setup(parent, previous) + self.hidden = False + + def get(self, key, default=None): + """Returns the value of the 'key' attribute for the tag, or + the value given for 'default' if it doesn't have that + attribute.""" + return self._getAttrMap().get(key, default) + + def __getitem__(self, key): + """tag[key] returns the value of the 'key' attribute for the tag, + and throws an exception if it's not there.""" + return self._getAttrMap()[key] + + def __iter__(self): + "Iterating over a tag iterates over its contents." + return iter(self.contents) + + def __len__(self): + "The length of a tag is the length of its list of contents." + return len(self.contents) + + def __contains__(self, x): + return x in self.contents + + def __nonzero__(self): + "A tag is non-None even if it has no contents." + return True + + def __setitem__(self, key, value): + """Setting tag[key] sets the value of the 'key' attribute for the + tag.""" + self._getAttrMap() + self.attrMap[key] = value + found = False + for i in range(0, len(self.attrs)): + if self.attrs[i][0] == key: + self.attrs[i] = (key, value) + found = True + if not found: + self.attrs.append((key, value)) + self._getAttrMap()[key] = value + + def __delitem__(self, key): + "Deleting tag[key] deletes all 'key' attributes for the tag." + for item in self.attrs: + if item[0] == key: + self.attrs.remove(item) + #We don't break because bad HTML can define the same + #attribute multiple times. + self._getAttrMap() + if self.attrMap.has_key(key): + del self.attrMap[key] + + def __call__(self, *args, **kwargs): + """Calling a tag like a function is the same as calling its + fetch() method. Eg. tag('a') returns a list of all the A tags + found within this tag.""" + return apply(self.fetch, args, kwargs) + + def __getattr__(self, tag): + if len(tag) > 3 and tag.rfind('Tag') == len(tag)-3: + return self.first(tag[:-3]) + elif tag.find('__') != 0: + return self.first(tag) + + def __eq__(self, other): + """Returns true iff this tag has the same name, the same attributes, + and the same contents (recursively) as the given tag. + + NOTE: right now this will return false if two tags have the + same attributes in a different order. Should this be fixed?""" + if not hasattr(other, 'name') or not hasattr(other, 'attrs') or not hasattr(other, 'contents') or self.name != other.name or self.attrs != other.attrs or len(self) != len(other): + return False + for i in range(0, len(self.contents)): + if self.contents[i] != other.contents[i]: + return False + return True + + def __ne__(self, other): + """Returns true iff this tag is not identical to the other tag, + as defined in __eq__.""" + return not self == other + + def __repr__(self): + """Renders this tag as a string.""" + return str(self) + + def __unicode__(self): + return self.__str__(1) + + def __str__(self, needUnicode=None, showStructureIndent=None): + """Returns a string or Unicode representation of this tag and + its contents. + + NOTE: since Python's HTML parser consumes whitespace, this + method is not certain to reproduce the whitespace present in + the original string.""" + + attrs = [] + if self.attrs: + for key, val in self.attrs: + attrs.append('%s="%s"' % (key, val)) + close = '' + closeTag = '' + if self.isSelfClosing(): + close = ' /' + else: + closeTag = '' % self.name + indentIncrement = None + if showStructureIndent != None: + indentIncrement = showStructureIndent + if not self.hidden: + indentIncrement += 1 + contents = self.renderContents(indentIncrement, needUnicode=needUnicode) + if showStructureIndent: + space = '\n%s' % (' ' * showStructureIndent) + if self.hidden: + s = contents + else: + s = [] + attributeString = '' + if attrs: + attributeString = ' ' + ' '.join(attrs) + if showStructureIndent: + s.append(space) + s.append('<%s%s%s>' % (self.name, attributeString, close)) + s.append(contents) + if closeTag and showStructureIndent != None: + s.append(space) + s.append(closeTag) + s = ''.join(s) + isUnicode = type(s) == types.UnicodeType + if needUnicode and not isUnicode: + s = unicode(s) + elif isUnicode and needUnicode==False: + s = str(s) + return s + + def prettify(self, needUnicode=None): + return self.__str__(needUnicode, showStructureIndent=True) + + def renderContents(self, showStructureIndent=None, needUnicode=None): + """Renders the contents of this tag as a (possibly Unicode) + string.""" + s=[] + for c in self: + text = None + if isinstance(c, NavigableUnicodeString) or type(c) == types.UnicodeType: + text = unicode(c) + elif isinstance(c, Tag): + s.append(c.__str__(needUnicode, showStructureIndent)) + elif needUnicode: + text = unicode(c) + else: + text = str(c) + if text: + if showStructureIndent != None: + if text[-1] == '\n': + text = text[:-1] + s.append(text) + return ''.join(s) + + #Soup methods + + def firstText(self, text, recursive=True): + """Convenience method to retrieve the first piece of text matching the + given criteria. 'text' can be a string, a regular expression object, + a callable that takes a string and returns whether or not the + string 'matches', etc.""" + return self.first(recursive=recursive, text=text) + + def fetchText(self, text, recursive=True, limit=None): + """Convenience method to retrieve all pieces of text matching the + given criteria. 'text' can be a string, a regular expression object, + a callable that takes a string and returns whether or not the + string 'matches', etc.""" + return self.fetch(recursive=recursive, text=text, limit=limit) + + def first(self, name=None, attrs={}, recursive=True, text=None): + """Return only the first child of this + Tag matching the given criteria.""" + r = Null + l = self.fetch(name, attrs, recursive, text, 1) + if l: + r = l[0] + return r + findChild = first + + def fetch(self, name=None, attrs={}, recursive=True, text=None, + limit=None): + """Extracts a list of Tag objects that match the given + criteria. You can specify the name of the Tag and any + attributes you want the Tag to have. + + The value of a key-value pair in the 'attrs' map can be a + string, a list of strings, a regular expression object, or a + callable that takes a string and returns whether or not the + string matches for some custom definition of 'matches'. The + same is true of the tag name.""" + generator = self.recursiveChildGenerator + if not recursive: + generator = self.childGenerator + return self._fetch(name, attrs, text, limit, generator) + fetchChildren = fetch + + #Utility methods + + def isSelfClosing(self): + """Returns true iff this is a self-closing tag as defined in the HTML + standard. + + TODO: This is specific to BeautifulSoup and its subclasses, but it's + used by __str__""" + return self.name in BeautifulSoup.SELF_CLOSING_TAGS + + def append(self, tag): + """Appends the given tag to the contents of this tag.""" + self.contents.append(tag) + + #Private methods + + def _getAttrMap(self): + """Initializes a map representation of this tag's attributes, + if not already initialized.""" + if not getattr(self, 'attrMap'): + self.attrMap = {} + for (key, value) in self.attrs: + self.attrMap[key] = value + return self.attrMap + + #Generator methods + def childGenerator(self): + for i in range(0, len(self.contents)): + yield self.contents[i] + raise StopIteration + + def recursiveChildGenerator(self): + stack = [(self, 0)] + while stack: + tag, start = stack.pop() + if isinstance(tag, Tag): + for i in range(start, len(tag.contents)): + a = tag.contents[i] + yield a + if isinstance(a, Tag) and tag.contents: + if i < len(tag.contents) - 1: + stack.append((tag, i+1)) + stack.append((a, 0)) + break + raise StopIteration + + +def isList(l): + """Convenience method that works with all 2.x versions of Python + to determine whether or not something is listlike.""" + return hasattr(l, '__iter__') \ + or (type(l) in (types.ListType, types.TupleType)) + +def buildTagMap(default, *args): + """Turns a list of maps, lists, or scalars into a single map. + Used to build the SELF_CLOSING_TAGS and NESTABLE_TAGS maps out + of lists and partial maps.""" + built = {} + for portion in args: + if hasattr(portion, 'items'): + #It's a map. Merge it. + for k,v in portion.items(): + built[k] = v + elif isList(portion): + #It's a list. Map each item to the default. + for k in portion: + built[k] = default + else: + #It's a scalar. Map it to the default. + built[portion] = default + return built + +class BeautifulStoneSoup(Tag, SGMLParser): + + """This class contains the basic parser and fetch code. It defines + a parser that knows nothing about tag behavior except for the + following: + + You can't close a tag without closing all the tags it encloses. + That is, "" actually means + "". + + [Another possible explanation is "", but since + this class defines no SELF_CLOSING_TAGS, it will never use that + explanation.] + + This class is useful for parsing XML or made-up markup languages, + or when BeautifulSoup makes an assumption counter to what you were + expecting.""" + + SELF_CLOSING_TAGS = {} + NESTABLE_TAGS = {} + RESET_NESTING_TAGS = {} + QUOTE_TAGS = {} + + #As a public service we will by default silently replace MS smart quotes + #and similar characters with their HTML or ASCII equivalents. + MS_CHARS = { '\x80' : '€', + '\x81' : ' ', + '\x82' : '‚', + '\x83' : 'ƒ', + '\x84' : '„', + '\x85' : '…', + '\x86' : '†', + '\x87' : '‡', + '\x88' : '⁁', + '\x89' : '%', + '\x8A' : 'Š', + '\x8B' : '<', + '\x8C' : 'Œ', + '\x8D' : '?', + '\x8E' : 'Z', + '\x8F' : '?', + '\x90' : '?', + '\x91' : '‘', + '\x92' : '’', + '\x93' : '“', + '\x94' : '”', + '\x95' : '•', + '\x96' : '–', + '\x97' : '—', + '\x98' : '˜', + '\x99' : '™', + '\x9a' : 'š', + '\x9b' : '>', + '\x9c' : 'œ', + '\x9d' : '?', + '\x9e' : 'z', + '\x9f' : 'Ÿ',} + + PARSER_MASSAGE = [(re.compile('(<[^<>]*)/>'), + lambda(x):x.group(1) + ' />'), + (re.compile(']*)>'), + lambda(x):''), + (re.compile("([\x80-\x9f])"), + lambda(x): BeautifulStoneSoup.MS_CHARS.get(x.group(1))) + ] + + ROOT_TAG_NAME = '[document]' + + def __init__(self, text=None, avoidParserProblems=True, + initialTextIsEverything=True): + """Initialize this as the 'root tag' and feed in any text to + the parser. + + NOTE about avoidParserProblems: sgmllib will process most bad + HTML, and BeautifulSoup has tricks for dealing with some HTML + that kills sgmllib, but Beautiful Soup can nonetheless choke + or lose data if your data uses self-closing tags or + declarations incorrectly. By default, Beautiful Soup sanitizes + its input to avoid the vast majority of these problems. The + problems are relatively rare, even in bad HTML, so feel free + to pass in False to avoidParserProblems if they don't apply to + you, and you'll get better performance. The only reason I have + this turned on by default is so I don't get so many tech + support questions. + + The two most common instances of invalid HTML that will choke + sgmllib are fixed by the default parser massage techniques: + +
(No space between name of closing tag and tag close) + (Extraneous whitespace in declaration) + + You can pass in a custom list of (RE object, replace method) + tuples to get Beautiful Soup to scrub your input the way you + want.""" + Tag.__init__(self, self.ROOT_TAG_NAME) + if avoidParserProblems \ + and not isList(avoidParserProblems): + avoidParserProblems = self.PARSER_MASSAGE + self.avoidParserProblems = avoidParserProblems + SGMLParser.__init__(self) + self.quoteStack = [] + self.hidden = 1 + self.reset() + if hasattr(text, 'read'): + #It's a file-type object. + text = text.read() + if text: + self.feed(text) + if initialTextIsEverything: + self.done() + + def __getattr__(self, methodName): + """This method routes method call requests to either the SGMLParser + superclass or the Tag superclass, depending on the method name.""" + if methodName.find('start_') == 0 or methodName.find('end_') == 0 \ + or methodName.find('do_') == 0: + return SGMLParser.__getattr__(self, methodName) + elif methodName.find('__') != 0: + return Tag.__getattr__(self, methodName) + else: + raise AttributeError + + def feed(self, text): + if self.avoidParserProblems: + for fix, m in self.avoidParserProblems: + text = fix.sub(m, text) + SGMLParser.feed(self, text) + + def done(self): + """Called when you're done parsing, so that the unclosed tags can be + correctly processed.""" + self.endData() #NEW + while self.currentTag.name != self.ROOT_TAG_NAME: + self.popTag() + + def reset(self): + SGMLParser.reset(self) + self.currentData = [] + self.currentTag = None + self.tagStack = [] + self.pushTag(self) + + def popTag(self): + tag = self.tagStack.pop() + # Tags with just one string-owning child get the child as a + # 'string' property, so that soup.tag.string is shorthand for + # soup.tag.contents[0] + if len(self.currentTag.contents) == 1 and \ + isinstance(self.currentTag.contents[0], NavigableText): + self.currentTag.string = self.currentTag.contents[0] + + #print "Pop", tag.name + if self.tagStack: + self.currentTag = self.tagStack[-1] + return self.currentTag + + def pushTag(self, tag): + #print "Push", tag.name + if self.currentTag: + self.currentTag.append(tag) + self.tagStack.append(tag) + self.currentTag = self.tagStack[-1] + + def endData(self): + currentData = ''.join(self.currentData) + if currentData: + if not currentData.strip(): + if '\n' in currentData: + currentData = '\n' + else: + currentData = ' ' + c = NavigableString + if type(currentData) == types.UnicodeType: + c = NavigableUnicodeString + o = c(currentData) + o.setup(self.currentTag, self.previous) + if self.previous: + self.previous.next = o + self.previous = o + self.currentTag.contents.append(o) + self.currentData = [] + + def _popToTag(self, name, inclusivePop=True): + """Pops the tag stack up to and including the most recent + instance of the given tag. If inclusivePop is false, pops the tag + stack up to but *not* including the most recent instqance of + the given tag.""" + if name == self.ROOT_TAG_NAME: + return + + numPops = 0 + mostRecentTag = None + for i in range(len(self.tagStack)-1, 0, -1): + if name == self.tagStack[i].name: + numPops = len(self.tagStack)-i + break + if not inclusivePop: + numPops = numPops - 1 + + for i in range(0, numPops): + mostRecentTag = self.popTag() + return mostRecentTag + + def _smartPop(self, name): + + """We need to pop up to the previous tag of this type, unless + one of this tag's nesting reset triggers comes between this + tag and the previous tag of this type, OR unless this tag is a + generic nesting trigger and another generic nesting trigger + comes between this tag and the previous tag of this type. + + Examples: +

FooBar

should pop to 'p', not 'b'. +

Foo

Bar

should pop to 'table', not 'p'. +

Foo

Bar

should pop to 'tr', not 'p'. +

FooBar

should pop to 'p', not 'b'. + +

    • *
    • * should pop to 'ul', not the first 'li'. +
  • ** should pop to 'table', not the first 'tr' + tag should + implicitly close the previous tag within the same
    ** should pop to 'tr', not the first 'td' + """ + + nestingResetTriggers = self.NESTABLE_TAGS.get(name) + isNestable = nestingResetTriggers != None + isResetNesting = self.RESET_NESTING_TAGS.has_key(name) + popTo = None + inclusive = True + for i in range(len(self.tagStack)-1, 0, -1): + p = self.tagStack[i] + if (not p or p.name == name) and not isNestable: + #Non-nestable tags get popped to the top or to their + #last occurance. + popTo = name + break + if (nestingResetTriggers != None + and p.name in nestingResetTriggers) \ + or (nestingResetTriggers == None and isResetNesting + and self.RESET_NESTING_TAGS.has_key(p.name)): + + #If we encounter one of the nesting reset triggers + #peculiar to this tag, or we encounter another tag + #that causes nesting to reset, pop up to but not + #including that tag. + + popTo = p.name + inclusive = False + break + p = p.parent + if popTo: + self._popToTag(popTo, inclusive) + + def unknown_starttag(self, name, attrs, selfClosing=0): + #print "Start tag %s" % name + if self.quoteStack: + #This is not a real tag. + #print "<%s> is not real!" % name + attrs = ''.join(map(lambda(x, y): ' %s="%s"' % (x, y), attrs)) + self.handle_data('<%s%s>' % (name, attrs)) + return + self.endData() + if not name in self.SELF_CLOSING_TAGS and not selfClosing: + self._smartPop(name) + tag = Tag(name, attrs, self.currentTag, self.previous) + if self.previous: + self.previous.next = tag + self.previous = tag + self.pushTag(tag) + if selfClosing or name in self.SELF_CLOSING_TAGS: + self.popTag() + if name in self.QUOTE_TAGS: + #print "Beginning quote (%s)" % name + self.quoteStack.append(name) + self.literal = 1 + + def unknown_endtag(self, name): + if self.quoteStack and self.quoteStack[-1] != name: + #This is not a real end tag. + #print " is not real!" % name + self.handle_data('' % name) + return + self.endData() + self._popToTag(name) + if self.quoteStack and self.quoteStack[-1] == name: + self.quoteStack.pop() + self.literal = (len(self.quoteStack) > 0) + + def handle_data(self, data): + self.currentData.append(data) + + def handle_pi(self, text): + "Propagate processing instructions right through." + self.handle_data("" % text) + + def handle_comment(self, text): + "Propagate comments right through." + self.handle_data("" % text) + + def handle_charref(self, ref): + "Propagate char refs right through." + self.handle_data('&#%s;' % ref) + + def handle_entityref(self, ref): + "Propagate entity refs right through." + self.handle_data('&%s;' % ref) + + def handle_decl(self, data): + "Propagate DOCTYPEs and the like right through." + self.handle_data('' % data) + + def parse_declaration(self, i): + """Treat a bogus SGML declaration as raw data. Treat a CDATA + declaration as regular data.""" + j = None + if self.rawdata[i:i+9] == '', i) + if k == -1: + k = len(self.rawdata) + self.handle_data(self.rawdata[i+9:k]) + j = k+3 + else: + try: + j = SGMLParser.parse_declaration(self, i) + except SGMLParseError: + toHandle = self.rawdata[i:] + self.handle_data(toHandle) + j = i + len(toHandle) + return j + +class BeautifulSoup(BeautifulStoneSoup): + + """This parser knows the following facts about HTML: + + * Some tags have no closing tag and should be interpreted as being + closed as soon as they are encountered. + + * The text inside some tags (ie. 'script') may contain tags which + are not really part of the document and which should be parsed + as text, not tags. If you want to parse the text as tags, you can + always fetch it and parse it explicitly. + + * Tag nesting rules: + + Most tags can't be nested at all. For instance, the occurance of + a

    tag should implicitly close the previous

    tag. + +

    Para1

    Para2 + should be transformed into: +

    Para1

    Para2 + + Some tags can be nested arbitrarily. For instance, the occurance + of a

    tag should _not_ implicitly close the previous +
    tag. + + Alice said:
    Bob said:
    Blah + should NOT be transformed into: + Alice said:
    Bob said:
    Blah + + Some tags can be nested, but the nesting is reset by the + interposition of other tags. For instance, a
    , + but not close a tag in another table. + +
    BlahBlah + should be transformed into: +
    BlahBlah + but, + Blah
    Blah + should NOT be transformed into + Blah
    Blah + + Differing assumptions about tag nesting rules are a major source + of problems with the BeautifulSoup class. If BeautifulSoup is not + treating as nestable a tag your page author treats as nestable, + try ICantBelieveItsBeautifulSoup before writing your own + subclass.""" + + SELF_CLOSING_TAGS = buildTagMap(None, ['br' , 'hr', 'input', 'img', 'meta', + 'spacer', 'link', 'frame', 'base']) + + QUOTE_TAGS = {'script': None} + + #According to the HTML standard, each of these inline tags can + #contain another tag of the same type. Furthermore, it's common + #to actually use these tags this way. + NESTABLE_INLINE_TAGS = ['span', 'font', 'q', 'object', 'bdo', 'sub', 'sup', + 'center'] + + #According to the HTML standard, these block tags can contain + #another tag of the same type. Furthermore, it's common + #to actually use these tags this way. + NESTABLE_BLOCK_TAGS = ['blockquote', 'div', 'fieldset', 'ins', 'del'] + + #Lists can contain other lists, but there are restrictions. + NESTABLE_LIST_TAGS = { 'ol' : [], + 'ul' : [], + 'li' : ['ul', 'ol'], + 'dl' : [], + 'dd' : ['dl'], + 'dt' : ['dl'] } + + #Tables can contain other tables, but there are restrictions. + NESTABLE_TABLE_TAGS = {'table' : [], + 'tr' : ['table', 'tbody', 'tfoot', 'thead'], + 'td' : ['tr'], + 'th' : ['tr'], + } + + NON_NESTABLE_BLOCK_TAGS = ['address', 'form', 'p', 'pre'] + + #If one of these tags is encountered, all tags up to the next tag of + #this type are popped. + RESET_NESTING_TAGS = buildTagMap(None, NESTABLE_BLOCK_TAGS, 'noscript', + NON_NESTABLE_BLOCK_TAGS, + NESTABLE_LIST_TAGS, + NESTABLE_TABLE_TAGS) + + NESTABLE_TAGS = buildTagMap([], NESTABLE_INLINE_TAGS, NESTABLE_BLOCK_TAGS, + NESTABLE_LIST_TAGS, NESTABLE_TABLE_TAGS) + +class ICantBelieveItsBeautifulSoup(BeautifulSoup): + + """The BeautifulSoup class is oriented towards skipping over + common HTML errors like unclosed tags. However, sometimes it makes + errors of its own. For instance, consider this fragment: + + FooBar + + This is perfectly valid (if bizarre) HTML. However, the + BeautifulSoup class will implicitly close the first b tag when it + encounters the second 'b'. It will think the author wrote + "FooBar", and didn't close the first 'b' tag, because + there's no real-world reason to bold something that's already + bold. When it encounters '' it will close two more 'b' + tags, for a grand total of three tags closed instead of two. This + can throw off the rest of your document structure. The same is + true of a number of other tags, listed below. + + It's much more common for someone to forget to close (eg.) a 'b' + tag than to actually use nested 'b' tags, and the BeautifulSoup + class handles the common case. This class handles the + not-co-common case: where you can't believe someone wrote what + they did, but it's valid HTML and BeautifulSoup screwed up by + assuming it wouldn't be. + + If this doesn't do what you need, try subclassing this class or + BeautifulSoup, and providing your own list of NESTABLE_TAGS.""" + + I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS = \ + ['em', 'big', 'i', 'small', 'tt', 'abbr', 'acronym', 'strong', + 'cite', 'code', 'dfn', 'kbd', 'samp', 'strong', 'var', 'b', + 'big'] + + I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS = ['noscript'] + + NESTABLE_TAGS = buildTagMap([], BeautifulSoup.NESTABLE_TAGS, + I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS, + I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS) + +class BeautifulSOAP(BeautifulStoneSoup): + """This class will push a tag with only a single string child into + the tag's parent as an attribute. The attribute's name is the tag + name, and the value is the string child. An example should give + the flavor of the change: + + baz + => + baz + + You can then access fooTag['bar'] instead of fooTag.barTag.string. + + This is, of course, useful for scraping structures that tend to + use subelements instead of attributes, such as SOAP messages. Note + that it modifies its input, so don't print the modified version + out. + + I'm not sure how many people really want to use this class; let me + know if you do. Mainly I like the name.""" + + def popTag(self): + if len(self.tagStack) > 1: + tag = self.tagStack[-1] + parent = self.tagStack[-2] + parent._getAttrMap() + if (isinstance(tag, Tag) and len(tag.contents) == 1 and + isinstance(tag.contents[0], NavigableText) and + not parent.attrMap.has_key(tag.name)): + parent[tag.name] = tag.contents[0] + BeautifulStoneSoup.popTag(self) + +#Enterprise class names! It has come to our attention that some people +#think the names of the Beautiful Soup parser classes are too silly +#and "unprofessional" for use in enterprise screen-scraping. We feel +#your pain! For such-minded folk, the Beautiful Soup Consortium And +#All-Night Kosher Bakery recommends renaming this file to +#"RobustParser.py" (or, in cases of extreme enterprisitude, +#"RobustParserBeanInterface.class") and using the following +#enterprise-friendly class aliases: +class RobustXMLParser(BeautifulStoneSoup): + pass +class RobustHTMLParser(BeautifulSoup): + pass +class RobustWackAssHTMLParser(ICantBelieveItsBeautifulSoup): + pass +class SimplifyingSOAPParser(BeautifulSOAP): + pass + +### + + +#By default, act as an HTML pretty-printer. +if __name__ == '__main__': + import sys + soup = BeautifulStoneSoup(sys.stdin.read()) + print soup.prettify() diff --git a/couchpotato/core/media/movie/providers/trailer/mechanize/_clientcookie.py b/couchpotato/core/media/movie/providers/trailer/mechanize/_clientcookie.py new file mode 100644 index 0000000000..2ed4c87827 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/mechanize/_clientcookie.py @@ -0,0 +1,1725 @@ +"""HTTP cookie handling for web clients. + +This module originally developed from my port of Gisle Aas' Perl module +HTTP::Cookies, from the libwww-perl library. + +Docstrings, comments and debug strings in this code refer to the +attributes of the HTTP cookie system as cookie-attributes, to distinguish +them clearly from Python attributes. + + CookieJar____ + / \ \ + FileCookieJar \ \ + / | \ \ \ + MozillaCookieJar | LWPCookieJar \ \ + | | \ + | ---MSIEBase | \ + | / | | \ + | / MSIEDBCookieJar BSDDBCookieJar + |/ + MSIECookieJar + +Comments to John J Lee . + + +Copyright 2002-2006 John J Lee +Copyright 1997-1999 Gisle Aas (original libwww-perl code) +Copyright 2002-2003 Johnny Lee (original MSIE Perl code) + +This code is free software; you can redistribute it and/or modify it +under the terms of the BSD or ZPL 2.1 licenses (see the file +COPYING.txt included with the distribution). + +""" + +import sys, re, copy, time, urllib, types, logging +try: + import threading + _threading = threading; del threading +except ImportError: + import dummy_threading + _threading = dummy_threading; del dummy_threading + +MISSING_FILENAME_TEXT = ("a filename was not supplied (nor was the CookieJar " + "instance initialised with one)") +DEFAULT_HTTP_PORT = "80" + +from _headersutil import split_header_words, parse_ns_headers +from _util import isstringlike +import _rfc3986 + +debug = logging.getLogger("mechanize.cookies").debug + + +def reraise_unmasked_exceptions(unmasked=()): + # There are a few catch-all except: statements in this module, for + # catching input that's bad in unexpected ways. + # This function re-raises some exceptions we don't want to trap. + import mechanize, warnings + if not mechanize.USE_BARE_EXCEPT: + raise + unmasked = unmasked + (KeyboardInterrupt, SystemExit, MemoryError) + etype = sys.exc_info()[0] + if issubclass(etype, unmasked): + raise + # swallowed an exception + import traceback, StringIO + f = StringIO.StringIO() + traceback.print_exc(None, f) + msg = f.getvalue() + warnings.warn("mechanize bug!\n%s" % msg, stacklevel=2) + + +IPV4_RE = re.compile(r"\.\d+$") +def is_HDN(text): + """Return True if text is a host domain name.""" + # XXX + # This may well be wrong. Which RFC is HDN defined in, if any (for + # the purposes of RFC 2965)? + # For the current implementation, what about IPv6? Remember to look + # at other uses of IPV4_RE also, if change this. + return not (IPV4_RE.search(text) or + text == "" or + text[0] == "." or text[-1] == ".") + +def domain_match(A, B): + """Return True if domain A domain-matches domain B, according to RFC 2965. + + A and B may be host domain names or IP addresses. + + RFC 2965, section 1: + + Host names can be specified either as an IP address or a HDN string. + Sometimes we compare one host name with another. (Such comparisons SHALL + be case-insensitive.) Host A's name domain-matches host B's if + + * their host name strings string-compare equal; or + + * A is a HDN string and has the form NB, where N is a non-empty + name string, B has the form .B', and B' is a HDN string. (So, + x.y.com domain-matches .Y.com but not Y.com.) + + Note that domain-match is not a commutative operation: a.b.c.com + domain-matches .c.com, but not the reverse. + + """ + # Note that, if A or B are IP addresses, the only relevant part of the + # definition of the domain-match algorithm is the direct string-compare. + A = A.lower() + B = B.lower() + if A == B: + return True + if not is_HDN(A): + return False + i = A.rfind(B) + has_form_nb = not (i == -1 or i == 0) + return ( + has_form_nb and + B.startswith(".") and + is_HDN(B[1:]) + ) + +def liberal_is_HDN(text): + """Return True if text is a sort-of-like a host domain name. + + For accepting/blocking domains. + + """ + return not IPV4_RE.search(text) + +def user_domain_match(A, B): + """For blocking/accepting domains. + + A and B may be host domain names or IP addresses. + + """ + A = A.lower() + B = B.lower() + if not (liberal_is_HDN(A) and liberal_is_HDN(B)): + if A == B: + # equal IP addresses + return True + return False + initial_dot = B.startswith(".") + if initial_dot and A.endswith(B): + return True + if not initial_dot and A == B: + return True + return False + +cut_port_re = re.compile(r":\d+$") +def request_host(request): + """Return request-host, as defined by RFC 2965. + + Variation from RFC: returned value is lowercased, for convenient + comparison. + + """ + url = request.get_full_url() + host = _rfc3986.urlsplit(url)[1] + if host is None: + host = request.get_header("Host", "") + # remove port, if present + return cut_port_re.sub("", host, 1) + +def request_host_lc(request): + return request_host(request).lower() + +def eff_request_host(request): + """Return a tuple (request-host, effective request-host name).""" + erhn = req_host = request_host(request) + if req_host.find(".") == -1 and not IPV4_RE.search(req_host): + erhn = req_host + ".local" + return req_host, erhn + +def eff_request_host_lc(request): + req_host, erhn = eff_request_host(request) + return req_host.lower(), erhn.lower() + +def effective_request_host(request): + """Return the effective request-host, as defined by RFC 2965.""" + return eff_request_host(request)[1] + +def request_path(request): + """Return path component of request-URI, as defined by RFC 2965.""" + url = request.get_full_url() + path = escape_path(_rfc3986.urlsplit(url)[2]) + if not path.startswith("/"): + path = "/" + path + return path + +def request_port(request): + host = request.get_host() + i = host.find(':') + if i >= 0: + port = host[i+1:] + try: + int(port) + except ValueError: + debug("nonnumeric port: '%s'", port) + return None + else: + port = DEFAULT_HTTP_PORT + return port + +def request_is_unverifiable(request): + try: + return request.is_unverifiable() + except AttributeError: + if hasattr(request, "unverifiable"): + return request.unverifiable + else: + raise + +# Characters in addition to A-Z, a-z, 0-9, '_', '.', and '-' that don't +# need to be escaped to form a valid HTTP URL (RFCs 2396 and 1738). +HTTP_PATH_SAFE = "%/;:@&=+$,!~*'()" +ESCAPED_CHAR_RE = re.compile(r"%([0-9a-fA-F][0-9a-fA-F])") +def uppercase_escaped_char(match): + return "%%%s" % match.group(1).upper() +def escape_path(path): + """Escape any invalid characters in HTTP URL, and uppercase all escapes.""" + # There's no knowing what character encoding was used to create URLs + # containing %-escapes, but since we have to pick one to escape invalid + # path characters, we pick UTF-8, as recommended in the HTML 4.0 + # specification: + # http://www.w3.org/TR/REC-html40/appendix/notes.html#h-B.2.1 + # And here, kind of: draft-fielding-uri-rfc2396bis-03 + # (And in draft IRI specification: draft-duerst-iri-05) + # (And here, for new URI schemes: RFC 2718) + if isinstance(path, types.UnicodeType): + path = path.encode("utf-8") + path = urllib.quote(path, HTTP_PATH_SAFE) + path = ESCAPED_CHAR_RE.sub(uppercase_escaped_char, path) + return path + +def reach(h): + """Return reach of host h, as defined by RFC 2965, section 1. + + The reach R of a host name H is defined as follows: + + * If + + - H is the host domain name of a host; and, + + - H has the form A.B; and + + - A has no embedded (that is, interior) dots; and + + - B has at least one embedded dot, or B is the string "local". + then the reach of H is .B. + + * Otherwise, the reach of H is H. + + >>> reach("www.acme.com") + '.acme.com' + >>> reach("acme.com") + 'acme.com' + >>> reach("acme.local") + '.local' + + """ + i = h.find(".") + if i >= 0: + #a = h[:i] # this line is only here to show what a is + b = h[i+1:] + i = b.find(".") + if is_HDN(h) and (i >= 0 or b == "local"): + return "."+b + return h + +def is_third_party(request): + """ + + RFC 2965, section 3.3.6: + + An unverifiable transaction is to a third-party host if its request- + host U does not domain-match the reach R of the request-host O in the + origin transaction. + + """ + req_host = request_host_lc(request) + # the origin request's request-host was stuffed into request by + # _urllib2_support.AbstractHTTPHandler + return not domain_match(req_host, reach(request.origin_req_host)) + + +try: + all +except NameError: + # python 2.4 + def all(iterable): + for x in iterable: + if not x: + return False + return True + + +class Cookie: + """HTTP Cookie. + + This class represents both Netscape and RFC 2965 cookies. + + This is deliberately a very simple class. It just holds attributes. It's + possible to construct Cookie instances that don't comply with the cookie + standards. CookieJar.make_cookies is the factory function for Cookie + objects -- it deals with cookie parsing, supplying defaults, and + normalising to the representation used in this class. CookiePolicy is + responsible for checking them to see whether they should be accepted from + and returned to the server. + + version: integer; + name: string; + value: string (may be None); + port: string; None indicates no attribute was supplied (e.g. "Port", rather + than eg. "Port=80"); otherwise, a port string (eg. "80") or a port list + string (e.g. "80,8080") + port_specified: boolean; true if a value was supplied with the Port + cookie-attribute + domain: string; + domain_specified: boolean; true if Domain was explicitly set + domain_initial_dot: boolean; true if Domain as set in HTTP header by server + started with a dot (yes, this really is necessary!) + path: string; + path_specified: boolean; true if Path was explicitly set + secure: boolean; true if should only be returned over secure connection + expires: integer; seconds since epoch (RFC 2965 cookies should calculate + this value from the Max-Age attribute) + discard: boolean, true if this is a session cookie; (if no expires value, + this should be true) + comment: string; + comment_url: string; + rfc2109: boolean; true if cookie arrived in a Set-Cookie: (not + Set-Cookie2:) header, but had a version cookie-attribute of 1 + rest: mapping of other cookie-attributes + + Note that the port may be present in the headers, but unspecified ("Port" + rather than"Port=80", for example); if this is the case, port is None. + + """ + + + _attrs = ("version", "name", "value", + "port", "port_specified", + "domain", "domain_specified", "domain_initial_dot", + "path", "path_specified", + "secure", "expires", "discard", "comment", "comment_url", + "rfc2109", "_rest") + + def __init__(self, version, name, value, + port, port_specified, + domain, domain_specified, domain_initial_dot, + path, path_specified, + secure, + expires, + discard, + comment, + comment_url, + rest, + rfc2109=False, + ): + + if version is not None: version = int(version) + if expires is not None: expires = int(expires) + if port is None and port_specified is True: + raise ValueError("if port is None, port_specified must be false") + + self.version = version + self.name = name + self.value = value + self.port = port + self.port_specified = port_specified + # normalise case, as per RFC 2965 section 3.3.3 + self.domain = domain.lower() + self.domain_specified = domain_specified + # Sigh. We need to know whether the domain given in the + # cookie-attribute had an initial dot, in order to follow RFC 2965 + # (as clarified in draft errata). Needed for the returned $Domain + # value. + self.domain_initial_dot = domain_initial_dot + self.path = path + self.path_specified = path_specified + self.secure = secure + self.expires = expires + self.discard = discard + self.comment = comment + self.comment_url = comment_url + self.rfc2109 = rfc2109 + + self._rest = copy.copy(rest) + + def has_nonstandard_attr(self, name): + return self._rest.has_key(name) + def get_nonstandard_attr(self, name, default=None): + return self._rest.get(name, default) + def set_nonstandard_attr(self, name, value): + self._rest[name] = value + def nonstandard_attr_keys(self): + return self._rest.keys() + + def is_expired(self, now=None): + if now is None: now = time.time() + return (self.expires is not None) and (self.expires <= now) + + def __eq__(self, other): + return all(getattr(self, a) == getattr(other, a) for a in self._attrs) + + def __ne__(self, other): + return not (self == other) + + def __str__(self): + if self.port is None: p = "" + else: p = ":"+self.port + limit = self.domain + p + self.path + if self.value is not None: + namevalue = "%s=%s" % (self.name, self.value) + else: + namevalue = self.name + return "" % (namevalue, limit) + + def __repr__(self): + args = [] + for name in ["version", "name", "value", + "port", "port_specified", + "domain", "domain_specified", "domain_initial_dot", + "path", "path_specified", + "secure", "expires", "discard", "comment", "comment_url", + ]: + attr = getattr(self, name) + args.append("%s=%s" % (name, repr(attr))) + args.append("rest=%s" % repr(self._rest)) + args.append("rfc2109=%s" % repr(self.rfc2109)) + return "Cookie(%s)" % ", ".join(args) + + +class CookiePolicy: + """Defines which cookies get accepted from and returned to server. + + May also modify cookies. + + The subclass DefaultCookiePolicy defines the standard rules for Netscape + and RFC 2965 cookies -- override that if you want a customised policy. + + As well as implementing set_ok and return_ok, implementations of this + interface must also supply the following attributes, indicating which + protocols should be used, and how. These can be read and set at any time, + though whether that makes complete sense from the protocol point of view is + doubtful. + + Public attributes: + + netscape: implement netscape protocol + rfc2965: implement RFC 2965 protocol + rfc2109_as_netscape: + WARNING: This argument will change or go away if is not accepted into + the Python standard library in this form! + If true, treat RFC 2109 cookies as though they were Netscape cookies. The + default is for this attribute to be None, which means treat 2109 cookies + as RFC 2965 cookies unless RFC 2965 handling is switched off (which it is, + by default), and as Netscape cookies otherwise. + hide_cookie2: don't add Cookie2 header to requests (the presence of + this header indicates to the server that we understand RFC 2965 + cookies) + + """ + def set_ok(self, cookie, request): + """Return true if (and only if) cookie should be accepted from server. + + Currently, pre-expired cookies never get this far -- the CookieJar + class deletes such cookies itself. + + cookie: mechanize.Cookie object + request: object implementing the interface defined by + CookieJar.extract_cookies.__doc__ + + """ + raise NotImplementedError() + + def return_ok(self, cookie, request): + """Return true if (and only if) cookie should be returned to server. + + cookie: mechanize.Cookie object + request: object implementing the interface defined by + CookieJar.add_cookie_header.__doc__ + + """ + raise NotImplementedError() + + def domain_return_ok(self, domain, request): + """Return false if cookies should not be returned, given cookie domain. + + This is here as an optimization, to remove the need for checking every + cookie with a particular domain (which may involve reading many files). + The default implementations of domain_return_ok and path_return_ok + (return True) leave all the work to return_ok. + + If domain_return_ok returns true for the cookie domain, path_return_ok + is called for the cookie path. Otherwise, path_return_ok and return_ok + are never called for that cookie domain. If path_return_ok returns + true, return_ok is called with the Cookie object itself for a full + check. Otherwise, return_ok is never called for that cookie path. + + Note that domain_return_ok is called for every *cookie* domain, not + just for the *request* domain. For example, the function might be + called with both ".acme.com" and "www.acme.com" if the request domain + is "www.acme.com". The same goes for path_return_ok. + + For argument documentation, see the docstring for return_ok. + + """ + return True + + def path_return_ok(self, path, request): + """Return false if cookies should not be returned, given cookie path. + + See the docstring for domain_return_ok. + + """ + return True + + +class DefaultCookiePolicy(CookiePolicy): + """Implements the standard rules for accepting and returning cookies. + + Both RFC 2965 and Netscape cookies are covered. RFC 2965 handling is + switched off by default. + + The easiest way to provide your own policy is to override this class and + call its methods in your overriden implementations before adding your own + additional checks. + + import mechanize + class MyCookiePolicy(mechanize.DefaultCookiePolicy): + def set_ok(self, cookie, request): + if not mechanize.DefaultCookiePolicy.set_ok( + self, cookie, request): + return False + if i_dont_want_to_store_this_cookie(): + return False + return True + + In addition to the features required to implement the CookiePolicy + interface, this class allows you to block and allow domains from setting + and receiving cookies. There are also some strictness switches that allow + you to tighten up the rather loose Netscape protocol rules a little bit (at + the cost of blocking some benign cookies). + + A domain blacklist and whitelist is provided (both off by default). Only + domains not in the blacklist and present in the whitelist (if the whitelist + is active) participate in cookie setting and returning. Use the + blocked_domains constructor argument, and blocked_domains and + set_blocked_domains methods (and the corresponding argument and methods for + allowed_domains). If you set a whitelist, you can turn it off again by + setting it to None. + + Domains in block or allow lists that do not start with a dot must + string-compare equal. For example, "acme.com" matches a blacklist entry of + "acme.com", but "www.acme.com" does not. Domains that do start with a dot + are matched by more specific domains too. For example, both "www.acme.com" + and "www.munitions.acme.com" match ".acme.com" (but "acme.com" itself does + not). IP addresses are an exception, and must match exactly. For example, + if blocked_domains contains "192.168.1.2" and ".168.1.2" 192.168.1.2 is + blocked, but 193.168.1.2 is not. + + Additional Public Attributes: + + General strictness switches + + strict_domain: don't allow sites to set two-component domains with + country-code top-level domains like .co.uk, .gov.uk, .co.nz. etc. + This is far from perfect and isn't guaranteed to work! + + RFC 2965 protocol strictness switches + + strict_rfc2965_unverifiable: follow RFC 2965 rules on unverifiable + transactions (usually, an unverifiable transaction is one resulting from + a redirect or an image hosted on another site); if this is false, cookies + are NEVER blocked on the basis of verifiability + + Netscape protocol strictness switches + + strict_ns_unverifiable: apply RFC 2965 rules on unverifiable transactions + even to Netscape cookies + strict_ns_domain: flags indicating how strict to be with domain-matching + rules for Netscape cookies: + DomainStrictNoDots: when setting cookies, host prefix must not contain a + dot (e.g. www.foo.bar.com can't set a cookie for .bar.com, because + www.foo contains a dot) + DomainStrictNonDomain: cookies that did not explicitly specify a Domain + cookie-attribute can only be returned to a domain that string-compares + equal to the domain that set the cookie (e.g. rockets.acme.com won't + be returned cookies from acme.com that had no Domain cookie-attribute) + DomainRFC2965Match: when setting cookies, require a full RFC 2965 + domain-match + DomainLiberal and DomainStrict are the most useful combinations of the + above flags, for convenience + strict_ns_set_initial_dollar: ignore cookies in Set-Cookie: headers that + have names starting with '$' + strict_ns_set_path: don't allow setting cookies whose path doesn't + path-match request URI + + """ + + DomainStrictNoDots = 1 + DomainStrictNonDomain = 2 + DomainRFC2965Match = 4 + + DomainLiberal = 0 + DomainStrict = DomainStrictNoDots|DomainStrictNonDomain + + def __init__(self, + blocked_domains=None, allowed_domains=None, + netscape=True, rfc2965=False, + # WARNING: this argument will change or go away if is not + # accepted into the Python standard library in this form! + # default, ie. treat 2109 as netscape iff not rfc2965 + rfc2109_as_netscape=None, + hide_cookie2=False, + strict_domain=False, + strict_rfc2965_unverifiable=True, + strict_ns_unverifiable=False, + strict_ns_domain=DomainLiberal, + strict_ns_set_initial_dollar=False, + strict_ns_set_path=False, + ): + """ + Constructor arguments should be used as keyword arguments only. + + blocked_domains: sequence of domain names that we never accept cookies + from, nor return cookies to + allowed_domains: if not None, this is a sequence of the only domains + for which we accept and return cookies + + For other arguments, see CookiePolicy.__doc__ and + DefaultCookiePolicy.__doc__.. + + """ + self.netscape = netscape + self.rfc2965 = rfc2965 + self.rfc2109_as_netscape = rfc2109_as_netscape + self.hide_cookie2 = hide_cookie2 + self.strict_domain = strict_domain + self.strict_rfc2965_unverifiable = strict_rfc2965_unverifiable + self.strict_ns_unverifiable = strict_ns_unverifiable + self.strict_ns_domain = strict_ns_domain + self.strict_ns_set_initial_dollar = strict_ns_set_initial_dollar + self.strict_ns_set_path = strict_ns_set_path + + if blocked_domains is not None: + self._blocked_domains = tuple(blocked_domains) + else: + self._blocked_domains = () + + if allowed_domains is not None: + allowed_domains = tuple(allowed_domains) + self._allowed_domains = allowed_domains + + def blocked_domains(self): + """Return the sequence of blocked domains (as a tuple).""" + return self._blocked_domains + def set_blocked_domains(self, blocked_domains): + """Set the sequence of blocked domains.""" + self._blocked_domains = tuple(blocked_domains) + + def is_blocked(self, domain): + for blocked_domain in self._blocked_domains: + if user_domain_match(domain, blocked_domain): + return True + return False + + def allowed_domains(self): + """Return None, or the sequence of allowed domains (as a tuple).""" + return self._allowed_domains + def set_allowed_domains(self, allowed_domains): + """Set the sequence of allowed domains, or None.""" + if allowed_domains is not None: + allowed_domains = tuple(allowed_domains) + self._allowed_domains = allowed_domains + + def is_not_allowed(self, domain): + if self._allowed_domains is None: + return False + for allowed_domain in self._allowed_domains: + if user_domain_match(domain, allowed_domain): + return False + return True + + def set_ok(self, cookie, request): + """ + If you override set_ok, be sure to call this method. If it returns + false, so should your subclass (assuming your subclass wants to be more + strict about which cookies to accept). + + """ + debug(" - checking cookie %s", cookie) + + assert cookie.name is not None + + for n in "version", "verifiability", "name", "path", "domain", "port": + fn_name = "set_ok_"+n + fn = getattr(self, fn_name) + if not fn(cookie, request): + return False + + return True + + def set_ok_version(self, cookie, request): + if cookie.version is None: + # Version is always set to 0 by parse_ns_headers if it's a Netscape + # cookie, so this must be an invalid RFC 2965 cookie. + debug(" Set-Cookie2 without version attribute (%s)", cookie) + return False + if cookie.version > 0 and not self.rfc2965: + debug(" RFC 2965 cookies are switched off") + return False + elif cookie.version == 0 and not self.netscape: + debug(" Netscape cookies are switched off") + return False + return True + + def set_ok_verifiability(self, cookie, request): + if request_is_unverifiable(request) and is_third_party(request): + if cookie.version > 0 and self.strict_rfc2965_unverifiable: + debug(" third-party RFC 2965 cookie during " + "unverifiable transaction") + return False + elif cookie.version == 0 and self.strict_ns_unverifiable: + debug(" third-party Netscape cookie during " + "unverifiable transaction") + return False + return True + + def set_ok_name(self, cookie, request): + # Try and stop servers setting V0 cookies designed to hack other + # servers that know both V0 and V1 protocols. + if (cookie.version == 0 and self.strict_ns_set_initial_dollar and + cookie.name.startswith("$")): + debug(" illegal name (starts with '$'): '%s'", cookie.name) + return False + return True + + def set_ok_path(self, cookie, request): + if cookie.path_specified: + req_path = request_path(request) + if ((cookie.version > 0 or + (cookie.version == 0 and self.strict_ns_set_path)) and + not req_path.startswith(cookie.path)): + debug(" path attribute %s is not a prefix of request " + "path %s", cookie.path, req_path) + return False + return True + + def set_ok_countrycode_domain(self, cookie, request): + """Return False if explicit cookie domain is not acceptable. + + Called by set_ok_domain, for convenience of overriding by + subclasses. + + """ + if cookie.domain_specified and self.strict_domain: + domain = cookie.domain + # since domain was specified, we know that: + assert domain.startswith(".") + if domain.count(".") == 2: + # domain like .foo.bar + i = domain.rfind(".") + tld = domain[i+1:] + sld = domain[1:i] + if (sld.lower() in [ + "co", "ac", + "com", "edu", "org", "net", "gov", "mil", "int", + "aero", "biz", "cat", "coop", "info", "jobs", "mobi", + "museum", "name", "pro", "travel", + ] and + len(tld) == 2): + # domain like .co.uk + return False + return True + + def set_ok_domain(self, cookie, request): + if self.is_blocked(cookie.domain): + debug(" domain %s is in user block-list", cookie.domain) + return False + if self.is_not_allowed(cookie.domain): + debug(" domain %s is not in user allow-list", cookie.domain) + return False + if not self.set_ok_countrycode_domain(cookie, request): + debug(" country-code second level domain %s", cookie.domain) + return False + if cookie.domain_specified: + req_host, erhn = eff_request_host_lc(request) + domain = cookie.domain + if domain.startswith("."): + undotted_domain = domain[1:] + else: + undotted_domain = domain + embedded_dots = (undotted_domain.find(".") >= 0) + if not embedded_dots and domain != ".local": + debug(" non-local domain %s contains no embedded dot", + domain) + return False + if cookie.version == 0: + if (not erhn.endswith(domain) and + (not erhn.startswith(".") and + not ("."+erhn).endswith(domain))): + debug(" effective request-host %s (even with added " + "initial dot) does not end end with %s", + erhn, domain) + return False + if (cookie.version > 0 or + (self.strict_ns_domain & self.DomainRFC2965Match)): + if not domain_match(erhn, domain): + debug(" effective request-host %s does not domain-match " + "%s", erhn, domain) + return False + if (cookie.version > 0 or + (self.strict_ns_domain & self.DomainStrictNoDots)): + host_prefix = req_host[:-len(domain)] + if (host_prefix.find(".") >= 0 and + not IPV4_RE.search(req_host)): + debug(" host prefix %s for domain %s contains a dot", + host_prefix, domain) + return False + return True + + def set_ok_port(self, cookie, request): + if cookie.port_specified: + req_port = request_port(request) + if req_port is None: + req_port = "80" + else: + req_port = str(req_port) + for p in cookie.port.split(","): + try: + int(p) + except ValueError: + debug(" bad port %s (not numeric)", p) + return False + if p == req_port: + break + else: + debug(" request port (%s) not found in %s", + req_port, cookie.port) + return False + return True + + def return_ok(self, cookie, request): + """ + If you override return_ok, be sure to call this method. If it returns + false, so should your subclass (assuming your subclass wants to be more + strict about which cookies to return). + + """ + # Path has already been checked by path_return_ok, and domain blocking + # done by domain_return_ok. + debug(" - checking cookie %s", cookie) + + for n in ("version", "verifiability", "secure", "expires", "port", + "domain"): + fn_name = "return_ok_"+n + fn = getattr(self, fn_name) + if not fn(cookie, request): + return False + return True + + def return_ok_version(self, cookie, request): + if cookie.version > 0 and not self.rfc2965: + debug(" RFC 2965 cookies are switched off") + return False + elif cookie.version == 0 and not self.netscape: + debug(" Netscape cookies are switched off") + return False + return True + + def return_ok_verifiability(self, cookie, request): + if request_is_unverifiable(request) and is_third_party(request): + if cookie.version > 0 and self.strict_rfc2965_unverifiable: + debug(" third-party RFC 2965 cookie during unverifiable " + "transaction") + return False + elif cookie.version == 0 and self.strict_ns_unverifiable: + debug(" third-party Netscape cookie during unverifiable " + "transaction") + return False + return True + + def return_ok_secure(self, cookie, request): + if cookie.secure and request.get_type() != "https": + debug(" secure cookie with non-secure request") + return False + return True + + def return_ok_expires(self, cookie, request): + if cookie.is_expired(self._now): + debug(" cookie expired") + return False + return True + + def return_ok_port(self, cookie, request): + if cookie.port: + req_port = request_port(request) + if req_port is None: + req_port = "80" + for p in cookie.port.split(","): + if p == req_port: + break + else: + debug(" request port %s does not match cookie port %s", + req_port, cookie.port) + return False + return True + + def return_ok_domain(self, cookie, request): + req_host, erhn = eff_request_host_lc(request) + domain = cookie.domain + + # strict check of non-domain cookies: Mozilla does this, MSIE5 doesn't + if (cookie.version == 0 and + (self.strict_ns_domain & self.DomainStrictNonDomain) and + not cookie.domain_specified and domain != erhn): + debug(" cookie with unspecified domain does not string-compare " + "equal to request domain") + return False + + if cookie.version > 0 and not domain_match(erhn, domain): + debug(" effective request-host name %s does not domain-match " + "RFC 2965 cookie domain %s", erhn, domain) + return False + if cookie.version == 0 and not ("."+erhn).endswith(domain): + debug(" request-host %s does not match Netscape cookie domain " + "%s", req_host, domain) + return False + return True + + def domain_return_ok(self, domain, request): + # Liberal check of domain. This is here as an optimization to avoid + # having to load lots of MSIE cookie files unless necessary. + + # Munge req_host and erhn to always start with a dot, so as to err on + # the side of letting cookies through. + dotted_req_host, dotted_erhn = eff_request_host_lc(request) + if not dotted_req_host.startswith("."): + dotted_req_host = "."+dotted_req_host + if not dotted_erhn.startswith("."): + dotted_erhn = "."+dotted_erhn + if not (dotted_req_host.endswith(domain) or + dotted_erhn.endswith(domain)): + #debug(" request domain %s does not match cookie domain %s", + # req_host, domain) + return False + + if self.is_blocked(domain): + debug(" domain %s is in user block-list", domain) + return False + if self.is_not_allowed(domain): + debug(" domain %s is not in user allow-list", domain) + return False + + return True + + def path_return_ok(self, path, request): + debug("- checking cookie path=%s", path) + req_path = request_path(request) + if not req_path.startswith(path): + debug(" %s does not path-match %s", req_path, path) + return False + return True + + +def vals_sorted_by_key(adict): + keys = adict.keys() + keys.sort() + return map(adict.get, keys) + +class MappingIterator: + """Iterates over nested mapping, depth-first, in sorted order by key.""" + def __init__(self, mapping): + self._s = [(vals_sorted_by_key(mapping), 0, None)] # LIFO stack + + def __iter__(self): return self + + def next(self): + # this is hairy because of lack of generators + while 1: + try: + vals, i, prev_item = self._s.pop() + except IndexError: + raise StopIteration() + if i < len(vals): + item = vals[i] + i = i + 1 + self._s.append((vals, i, prev_item)) + try: + item.items + except AttributeError: + # non-mapping + break + else: + # mapping + self._s.append((vals_sorted_by_key(item), 0, item)) + continue + return item + + +# Used as second parameter to dict.get method, to distinguish absent +# dict key from one with a None value. +class Absent: pass + +class CookieJar: + """Collection of HTTP cookies. + + You may not need to know about this class: try mechanize.urlopen(). + + The major methods are extract_cookies and add_cookie_header; these are all + you are likely to need. + + CookieJar supports the iterator protocol: + + for cookie in cookiejar: + # do something with cookie + + Methods: + + add_cookie_header(request) + extract_cookies(response, request) + get_policy() + set_policy(policy) + cookies_for_request(request) + make_cookies(response, request) + set_cookie_if_ok(cookie, request) + set_cookie(cookie) + clear_session_cookies() + clear_expired_cookies() + clear(domain=None, path=None, name=None) + + Public attributes + + policy: CookiePolicy object + + """ + + non_word_re = re.compile(r"\W") + quote_re = re.compile(r"([\"\\])") + strict_domain_re = re.compile(r"\.?[^.]*") + domain_re = re.compile(r"[^.]*") + dots_re = re.compile(r"^\.+") + + def __init__(self, policy=None): + """ + See CookieJar.__doc__ for argument documentation. + + """ + if policy is None: + policy = DefaultCookiePolicy() + self._policy = policy + + self._cookies = {} + + # for __getitem__ iteration in pre-2.2 Pythons + self._prev_getitem_index = 0 + + def get_policy(self): + return self._policy + + def set_policy(self, policy): + self._policy = policy + + def _cookies_for_domain(self, domain, request): + cookies = [] + if not self._policy.domain_return_ok(domain, request): + return [] + debug("Checking %s for cookies to return", domain) + cookies_by_path = self._cookies[domain] + for path in cookies_by_path.keys(): + if not self._policy.path_return_ok(path, request): + continue + cookies_by_name = cookies_by_path[path] + for cookie in cookies_by_name.values(): + if not self._policy.return_ok(cookie, request): + debug(" not returning cookie") + continue + debug(" it's a match") + cookies.append(cookie) + return cookies + + def cookies_for_request(self, request): + """Return a list of cookies to be returned to server. + + The returned list of cookie instances is sorted in the order they + should appear in the Cookie: header for return to the server. + + See add_cookie_header.__doc__ for the interface required of the + request argument. + + New in version 0.1.10 + + """ + self._policy._now = self._now = int(time.time()) + cookies = self._cookies_for_request(request) + # add cookies in order of most specific (i.e. longest) path first + def decreasing_size(a, b): return cmp(len(b.path), len(a.path)) + cookies.sort(decreasing_size) + return cookies + + def _cookies_for_request(self, request): + """Return a list of cookies to be returned to server.""" + # this method still exists (alongside cookies_for_request) because it + # is part of an implied protected interface for subclasses of cookiejar + # XXX document that implied interface, or provide another way of + # implementing cookiejars than subclassing + cookies = [] + for domain in self._cookies.keys(): + cookies.extend(self._cookies_for_domain(domain, request)) + return cookies + + def _cookie_attrs(self, cookies): + """Return a list of cookie-attributes to be returned to server. + + The $Version attribute is also added when appropriate (currently only + once per request). + + >>> jar = CookieJar() + >>> ns_cookie = Cookie(0, "foo", '"bar"', None, False, + ... "example.com", False, False, + ... "/", False, False, None, True, + ... None, None, {}) + >>> jar._cookie_attrs([ns_cookie]) + ['foo="bar"'] + >>> rfc2965_cookie = Cookie(1, "foo", "bar", None, False, + ... ".example.com", True, False, + ... "/", False, False, None, True, + ... None, None, {}) + >>> jar._cookie_attrs([rfc2965_cookie]) + ['$Version=1', 'foo=bar', '$Domain="example.com"'] + + """ + version_set = False + + attrs = [] + for cookie in cookies: + # set version of Cookie header + # XXX + # What should it be if multiple matching Set-Cookie headers have + # different versions themselves? + # Answer: there is no answer; was supposed to be settled by + # RFC 2965 errata, but that may never appear... + version = cookie.version + if not version_set: + version_set = True + if version > 0: + attrs.append("$Version=%s" % version) + + # quote cookie value if necessary + # (not for Netscape protocol, which already has any quotes + # intact, due to the poorly-specified Netscape Cookie: syntax) + if ((cookie.value is not None) and + self.non_word_re.search(cookie.value) and version > 0): + value = self.quote_re.sub(r"\\\1", cookie.value) + else: + value = cookie.value + + # add cookie-attributes to be returned in Cookie header + if cookie.value is None: + attrs.append(cookie.name) + else: + attrs.append("%s=%s" % (cookie.name, value)) + if version > 0: + if cookie.path_specified: + attrs.append('$Path="%s"' % cookie.path) + if cookie.domain.startswith("."): + domain = cookie.domain + if (not cookie.domain_initial_dot and + domain.startswith(".")): + domain = domain[1:] + attrs.append('$Domain="%s"' % domain) + if cookie.port is not None: + p = "$Port" + if cookie.port_specified: + p = p + ('="%s"' % cookie.port) + attrs.append(p) + + return attrs + + def add_cookie_header(self, request): + """Add correct Cookie: header to request (mechanize.Request object). + + The Cookie2 header is also added unless policy.hide_cookie2 is true. + + The request object (usually a mechanize.Request instance) must support + the methods get_full_url, get_host, is_unverifiable, get_type, + has_header, get_header, header_items and add_unredirected_header, as + documented by urllib2. + """ + debug("add_cookie_header") + cookies = self.cookies_for_request(request) + + attrs = self._cookie_attrs(cookies) + if attrs: + if not request.has_header("Cookie"): + request.add_unredirected_header("Cookie", "; ".join(attrs)) + + # if necessary, advertise that we know RFC 2965 + if self._policy.rfc2965 and not self._policy.hide_cookie2: + for cookie in cookies: + if cookie.version != 1 and not request.has_header("Cookie2"): + request.add_unredirected_header("Cookie2", '$Version="1"') + break + + self.clear_expired_cookies() + + def _normalized_cookie_tuples(self, attrs_set): + """Return list of tuples containing normalised cookie information. + + attrs_set is the list of lists of key,value pairs extracted from + the Set-Cookie or Set-Cookie2 headers. + + Tuples are name, value, standard, rest, where name and value are the + cookie name and value, standard is a dictionary containing the standard + cookie-attributes (discard, secure, version, expires or max-age, + domain, path and port) and rest is a dictionary containing the rest of + the cookie-attributes. + + """ + cookie_tuples = [] + + boolean_attrs = "discard", "secure" + value_attrs = ("version", + "expires", "max-age", + "domain", "path", "port", + "comment", "commenturl") + + for cookie_attrs in attrs_set: + name, value = cookie_attrs[0] + + # Build dictionary of standard cookie-attributes (standard) and + # dictionary of other cookie-attributes (rest). + + # Note: expiry time is normalised to seconds since epoch. V0 + # cookies should have the Expires cookie-attribute, and V1 cookies + # should have Max-Age, but since V1 includes RFC 2109 cookies (and + # since V0 cookies may be a mish-mash of Netscape and RFC 2109), we + # accept either (but prefer Max-Age). + max_age_set = False + + bad_cookie = False + + standard = {} + rest = {} + for k, v in cookie_attrs[1:]: + lc = k.lower() + # don't lose case distinction for unknown fields + if lc in value_attrs or lc in boolean_attrs: + k = lc + if k in boolean_attrs and v is None: + # boolean cookie-attribute is present, but has no value + # (like "discard", rather than "port=80") + v = True + if standard.has_key(k): + # only first value is significant + continue + if k == "domain": + if v is None: + debug(" missing value for domain attribute") + bad_cookie = True + break + # RFC 2965 section 3.3.3 + v = v.lower() + if k == "expires": + if max_age_set: + # Prefer max-age to expires (like Mozilla) + continue + if v is None: + debug(" missing or invalid value for expires " + "attribute: treating as session cookie") + continue + if k == "max-age": + max_age_set = True + if v is None: + debug(" missing value for max-age attribute") + bad_cookie = True + break + try: + v = int(v) + except ValueError: + debug(" missing or invalid (non-numeric) value for " + "max-age attribute") + bad_cookie = True + break + # convert RFC 2965 Max-Age to seconds since epoch + # XXX Strictly you're supposed to follow RFC 2616 + # age-calculation rules. Remember that zero Max-Age is a + # is a request to discard (old and new) cookie, though. + k = "expires" + v = self._now + v + if (k in value_attrs) or (k in boolean_attrs): + if (v is None and + k not in ["port", "comment", "commenturl"]): + debug(" missing value for %s attribute" % k) + bad_cookie = True + break + standard[k] = v + else: + rest[k] = v + + if bad_cookie: + continue + + cookie_tuples.append((name, value, standard, rest)) + + return cookie_tuples + + def _cookie_from_cookie_tuple(self, tup, request): + # standard is dict of standard cookie-attributes, rest is dict of the + # rest of them + name, value, standard, rest = tup + + domain = standard.get("domain", Absent) + path = standard.get("path", Absent) + port = standard.get("port", Absent) + expires = standard.get("expires", Absent) + + # set the easy defaults + version = standard.get("version", None) + if version is not None: + try: + version = int(version) + except ValueError: + return None # invalid version, ignore cookie + secure = standard.get("secure", False) + # (discard is also set if expires is Absent) + discard = standard.get("discard", False) + comment = standard.get("comment", None) + comment_url = standard.get("commenturl", None) + + # set default path + if path is not Absent and path != "": + path_specified = True + path = escape_path(path) + else: + path_specified = False + path = request_path(request) + i = path.rfind("/") + if i != -1: + if version == 0: + # Netscape spec parts company from reality here + path = path[:i] + else: + path = path[:i+1] + if len(path) == 0: path = "/" + + # set default domain + domain_specified = domain is not Absent + # but first we have to remember whether it starts with a dot + domain_initial_dot = False + if domain_specified: + domain_initial_dot = bool(domain.startswith(".")) + if domain is Absent: + req_host, erhn = eff_request_host_lc(request) + domain = erhn + elif not domain.startswith("."): + domain = "."+domain + + # set default port + port_specified = False + if port is not Absent: + if port is None: + # Port attr present, but has no value: default to request port. + # Cookie should then only be sent back on that port. + port = request_port(request) + else: + port_specified = True + port = re.sub(r"\s+", "", port) + else: + # No port attr present. Cookie can be sent back on any port. + port = None + + # set default expires and discard + if expires is Absent: + expires = None + discard = True + + return Cookie(version, + name, value, + port, port_specified, + domain, domain_specified, domain_initial_dot, + path, path_specified, + secure, + expires, + discard, + comment, + comment_url, + rest) + + def _cookies_from_attrs_set(self, attrs_set, request): + cookie_tuples = self._normalized_cookie_tuples(attrs_set) + + cookies = [] + for tup in cookie_tuples: + cookie = self._cookie_from_cookie_tuple(tup, request) + if cookie: cookies.append(cookie) + return cookies + + def _process_rfc2109_cookies(self, cookies): + if self._policy.rfc2109_as_netscape is None: + rfc2109_as_netscape = not self._policy.rfc2965 + else: + rfc2109_as_netscape = self._policy.rfc2109_as_netscape + for cookie in cookies: + if cookie.version == 1: + cookie.rfc2109 = True + if rfc2109_as_netscape: + # treat 2109 cookies as Netscape cookies rather than + # as RFC2965 cookies + cookie.version = 0 + + def _make_cookies(self, response, request): + # get cookie-attributes for RFC 2965 and Netscape protocols + headers = response.info() + rfc2965_hdrs = headers.getheaders("Set-Cookie2") + ns_hdrs = headers.getheaders("Set-Cookie") + + rfc2965 = self._policy.rfc2965 + netscape = self._policy.netscape + + if ((not rfc2965_hdrs and not ns_hdrs) or + (not ns_hdrs and not rfc2965) or + (not rfc2965_hdrs and not netscape) or + (not netscape and not rfc2965)): + return [] # no relevant cookie headers: quick exit + + try: + cookies = self._cookies_from_attrs_set( + split_header_words(rfc2965_hdrs), request) + except: + reraise_unmasked_exceptions() + cookies = [] + + if ns_hdrs and netscape: + try: + # RFC 2109 and Netscape cookies + ns_cookies = self._cookies_from_attrs_set( + parse_ns_headers(ns_hdrs), request) + except: + reraise_unmasked_exceptions() + ns_cookies = [] + self._process_rfc2109_cookies(ns_cookies) + + # Look for Netscape cookies (from Set-Cookie headers) that match + # corresponding RFC 2965 cookies (from Set-Cookie2 headers). + # For each match, keep the RFC 2965 cookie and ignore the Netscape + # cookie (RFC 2965 section 9.1). Actually, RFC 2109 cookies are + # bundled in with the Netscape cookies for this purpose, which is + # reasonable behaviour. + if rfc2965: + lookup = {} + for cookie in cookies: + lookup[(cookie.domain, cookie.path, cookie.name)] = None + + def no_matching_rfc2965(ns_cookie, lookup=lookup): + key = ns_cookie.domain, ns_cookie.path, ns_cookie.name + return not lookup.has_key(key) + ns_cookies = filter(no_matching_rfc2965, ns_cookies) + + if ns_cookies: + cookies.extend(ns_cookies) + + return cookies + + def make_cookies(self, response, request): + """Return sequence of Cookie objects extracted from response object. + + See extract_cookies.__doc__ for the interface required of the + response and request arguments. + + """ + self._policy._now = self._now = int(time.time()) + return [cookie for cookie in self._make_cookies(response, request) + if cookie.expires is None or not cookie.expires <= self._now] + + def set_cookie_if_ok(self, cookie, request): + """Set a cookie if policy says it's OK to do so. + + cookie: mechanize.Cookie instance + request: see extract_cookies.__doc__ for the required interface + + """ + self._policy._now = self._now = int(time.time()) + + if self._policy.set_ok(cookie, request): + self.set_cookie(cookie) + + def set_cookie(self, cookie): + """Set a cookie, without checking whether or not it should be set. + + cookie: mechanize.Cookie instance + """ + c = self._cookies + if not c.has_key(cookie.domain): c[cookie.domain] = {} + c2 = c[cookie.domain] + if not c2.has_key(cookie.path): c2[cookie.path] = {} + c3 = c2[cookie.path] + c3[cookie.name] = cookie + + def extract_cookies(self, response, request): + """Extract cookies from response, where allowable given the request. + + Look for allowable Set-Cookie: and Set-Cookie2: headers in the response + object passed as argument. Any of these headers that are found are + used to update the state of the object (subject to the policy.set_ok + method's approval). + + The response object (usually be the result of a call to + mechanize.urlopen, or similar) should support an info method, which + returns a mimetools.Message object (in fact, the 'mimetools.Message + object' may be any object that provides a getheaders method). + + The request object (usually a mechanize.Request instance) must support + the methods get_full_url, get_type, get_host, and is_unverifiable, as + documented by mechanize, and the port attribute (the port number). The + request is used to set default values for cookie-attributes as well as + for checking that the cookie is OK to be set. + + """ + debug("extract_cookies: %s", response.info()) + self._policy._now = self._now = int(time.time()) + + for cookie in self._make_cookies(response, request): + if cookie.expires is not None and cookie.expires <= self._now: + # Expiry date in past is request to delete cookie. This can't be + # in DefaultCookiePolicy, because can't delete cookies there. + try: + self.clear(cookie.domain, cookie.path, cookie.name) + except KeyError: + pass + debug("Expiring cookie, domain='%s', path='%s', name='%s'", + cookie.domain, cookie.path, cookie.name) + elif self._policy.set_ok(cookie, request): + debug(" setting cookie: %s", cookie) + self.set_cookie(cookie) + + def clear(self, domain=None, path=None, name=None): + """Clear some cookies. + + Invoking this method without arguments will clear all cookies. If + given a single argument, only cookies belonging to that domain will be + removed. If given two arguments, cookies belonging to the specified + path within that domain are removed. If given three arguments, then + the cookie with the specified name, path and domain is removed. + + Raises KeyError if no matching cookie exists. + + """ + if name is not None: + if (domain is None) or (path is None): + raise ValueError( + "domain and path must be given to remove a cookie by name") + del self._cookies[domain][path][name] + elif path is not None: + if domain is None: + raise ValueError( + "domain must be given to remove cookies by path") + del self._cookies[domain][path] + elif domain is not None: + del self._cookies[domain] + else: + self._cookies = {} + + def clear_session_cookies(self): + """Discard all session cookies. + + Discards all cookies held by object which had either no Max-Age or + Expires cookie-attribute or an explicit Discard cookie-attribute, or + which otherwise have ended up with a true discard attribute. For + interactive browsers, the end of a session usually corresponds to + closing the browser window. + + Note that the save method won't save session cookies anyway, unless you + ask otherwise by passing a true ignore_discard argument. + + """ + for cookie in self: + if cookie.discard: + self.clear(cookie.domain, cookie.path, cookie.name) + + def clear_expired_cookies(self): + """Discard all expired cookies. + + You probably don't need to call this method: expired cookies are never + sent back to the server (provided you're using DefaultCookiePolicy), + this method is called by CookieJar itself every so often, and the save + method won't save expired cookies anyway (unless you ask otherwise by + passing a true ignore_expires argument). + + """ + now = time.time() + for cookie in self: + if cookie.is_expired(now): + self.clear(cookie.domain, cookie.path, cookie.name) + + def __getitem__(self, i): + if i == 0: + self._getitem_iterator = self.__iter__() + elif self._prev_getitem_index != i-1: raise IndexError( + "CookieJar.__getitem__ only supports sequential iteration") + self._prev_getitem_index = i + try: + return self._getitem_iterator.next() + except StopIteration: + raise IndexError() + + def __iter__(self): + return MappingIterator(self._cookies) + + def __len__(self): + """Return number of contained cookies.""" + i = 0 + for cookie in self: i = i + 1 + return i + + def __repr__(self): + r = [] + for cookie in self: r.append(repr(cookie)) + return "<%s[%s]>" % (self.__class__, ", ".join(r)) + + def __str__(self): + r = [] + for cookie in self: r.append(str(cookie)) + return "<%s[%s]>" % (self.__class__, ", ".join(r)) + + +class LoadError(Exception): pass + +class FileCookieJar(CookieJar): + """CookieJar that can be loaded from and saved to a file. + + Additional methods + + save(filename=None, ignore_discard=False, ignore_expires=False) + load(filename=None, ignore_discard=False, ignore_expires=False) + revert(filename=None, ignore_discard=False, ignore_expires=False) + + Additional public attributes + + filename: filename for loading and saving cookies + + Additional public readable attributes + + delayload: request that cookies are lazily loaded from disk; this is only + a hint since this only affects performance, not behaviour (unless the + cookies on disk are changing); a CookieJar object may ignore it (in fact, + only MSIECookieJar lazily loads cookies at the moment) + + """ + + def __init__(self, filename=None, delayload=False, policy=None): + """ + See FileCookieJar.__doc__ for argument documentation. + + Cookies are NOT loaded from the named file until either the load or + revert method is called. + + """ + CookieJar.__init__(self, policy) + if filename is not None and not isstringlike(filename): + raise ValueError("filename must be string-like") + self.filename = filename + self.delayload = bool(delayload) + + def save(self, filename=None, ignore_discard=False, ignore_expires=False): + """Save cookies to a file. + + filename: name of file in which to save cookies + ignore_discard: save even cookies set to be discarded + ignore_expires: save even cookies that have expired + + The file is overwritten if it already exists, thus wiping all its + cookies. Saved cookies can be restored later using the load or revert + methods. If filename is not specified, self.filename is used; if + self.filename is None, ValueError is raised. + + """ + raise NotImplementedError() + + def load(self, filename=None, ignore_discard=False, ignore_expires=False): + """Load cookies from a file. + + Old cookies are kept unless overwritten by newly loaded ones. + + Arguments are as for .save(). + + If filename is not specified, self.filename is used; if self.filename + is None, ValueError is raised. The named file must be in the format + understood by the class, or LoadError will be raised. This format will + be identical to that written by the save method, unless the load format + is not sufficiently well understood (as is the case for MSIECookieJar). + + """ + if filename is None: + if self.filename is not None: filename = self.filename + else: raise ValueError(MISSING_FILENAME_TEXT) + + f = open(filename) + try: + self._really_load(f, filename, ignore_discard, ignore_expires) + finally: + f.close() + + def revert(self, filename=None, + ignore_discard=False, ignore_expires=False): + """Clear all cookies and reload cookies from a saved file. + + Raises LoadError (or IOError) if reversion is not successful; the + object's state will not be altered if this happens. + + """ + if filename is None: + if self.filename is not None: filename = self.filename + else: raise ValueError(MISSING_FILENAME_TEXT) + + old_state = copy.deepcopy(self._cookies) + self._cookies = {} + try: + self.load(filename, ignore_discard, ignore_expires) + except (LoadError, IOError): + self._cookies = old_state + raise diff --git a/couchpotato/core/media/movie/providers/trailer/mechanize/_debug.py b/couchpotato/core/media/movie/providers/trailer/mechanize/_debug.py new file mode 100644 index 0000000000..8243969990 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/mechanize/_debug.py @@ -0,0 +1,28 @@ +import logging + +from _response import response_seek_wrapper +from _urllib2_fork import BaseHandler + + +class HTTPResponseDebugProcessor(BaseHandler): + handler_order = 900 # before redirections, after everything else + + def http_response(self, request, response): + if not hasattr(response, "seek"): + response = response_seek_wrapper(response) + info = logging.getLogger("mechanize.http_responses").info + try: + info(response.read()) + finally: + response.seek(0) + info("*****************************************************") + return response + + https_response = http_response + +class HTTPRedirectDebugProcessor(BaseHandler): + def http_request(self, request): + if hasattr(request, "redirect_dict"): + info = logging.getLogger("mechanize.http_redirects").info + info("redirecting to %s", request.get_full_url()) + return request diff --git a/couchpotato/core/media/movie/providers/trailer/mechanize/_firefox3cookiejar.py b/couchpotato/core/media/movie/providers/trailer/mechanize/_firefox3cookiejar.py new file mode 100644 index 0000000000..a64d70f35d --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/mechanize/_firefox3cookiejar.py @@ -0,0 +1,248 @@ +"""Firefox 3 "cookies.sqlite" cookie persistence. + +Copyright 2008 John J Lee + +This code is free software; you can redistribute it and/or modify it +under the terms of the BSD or ZPL 2.1 licenses (see the file +COPYING.txt included with the distribution). + +""" + +import logging +import time + +from _clientcookie import CookieJar, Cookie, MappingIterator +from _util import isstringlike, experimental +debug = logging.getLogger("mechanize.cookies").debug + + +class Firefox3CookieJar(CookieJar): + + """Firefox 3 cookie jar. + + The cookies are stored in Firefox 3's "cookies.sqlite" format. + + Constructor arguments: + + filename: filename of cookies.sqlite (typically found at the top level + of a firefox profile directory) + autoconnect: as a convenience, connect to the SQLite cookies database at + Firefox3CookieJar construction time (default True) + policy: an object satisfying the mechanize.CookiePolicy interface + + Note that this is NOT a FileCookieJar, and there are no .load(), + .save() or .restore() methods. The database is in sync with the + cookiejar object's state after each public method call. + + Following Firefox's own behaviour, session cookies are never saved to + the database. + + The file is created, and an sqlite database written to it, if it does + not already exist. The moz_cookies database table is created if it does + not already exist. + """ + + # XXX + # handle DatabaseError exceptions + # add a FileCookieJar (explicit .save() / .revert() / .load() methods) + + def __init__(self, filename, autoconnect=True, policy=None): + experimental("Firefox3CookieJar is experimental code") + CookieJar.__init__(self, policy) + if filename is not None and not isstringlike(filename): + raise ValueError("filename must be string-like") + self.filename = filename + self._conn = None + if autoconnect: + self.connect() + + def connect(self): + import sqlite3 # not available in Python 2.4 stdlib + self._conn = sqlite3.connect(self.filename) + self._conn.isolation_level = "DEFERRED" + self._create_table_if_necessary() + + def close(self): + self._conn.close() + + def _transaction(self, func): + try: + cur = self._conn.cursor() + try: + result = func(cur) + finally: + cur.close() + except: + self._conn.rollback() + raise + else: + self._conn.commit() + return result + + def _execute(self, query, params=()): + return self._transaction(lambda cur: cur.execute(query, params)) + + def _query(self, query, params=()): + # XXX should we bother with a transaction? + cur = self._conn.cursor() + try: + cur.execute(query, params) + return cur.fetchall() + finally: + cur.close() + + def _create_table_if_necessary(self): + self._execute("""\ +CREATE TABLE IF NOT EXISTS moz_cookies (id INTEGER PRIMARY KEY, name TEXT, + value TEXT, host TEXT, path TEXT,expiry INTEGER, + lastAccessed INTEGER, isSecure INTEGER, isHttpOnly INTEGER)""") + + def _cookie_from_row(self, row): + (pk, name, value, domain, path, expires, + last_accessed, secure, http_only) = row + + version = 0 + domain = domain.encode("ascii", "ignore") + path = path.encode("ascii", "ignore") + name = name.encode("ascii", "ignore") + value = value.encode("ascii", "ignore") + secure = bool(secure) + + # last_accessed isn't a cookie attribute, so isn't added to rest + rest = {} + if http_only: + rest["HttpOnly"] = None + + if name == "": + name = value + value = None + + initial_dot = domain.startswith(".") + domain_specified = initial_dot + + discard = False + if expires == "": + expires = None + discard = True + + return Cookie(version, name, value, + None, False, + domain, domain_specified, initial_dot, + path, False, + secure, + expires, + discard, + None, + None, + rest) + + def clear(self, domain=None, path=None, name=None): + CookieJar.clear(self, domain, path, name) + where_parts = [] + sql_params = [] + if domain is not None: + where_parts.append("host = ?") + sql_params.append(domain) + if path is not None: + where_parts.append("path = ?") + sql_params.append(path) + if name is not None: + where_parts.append("name = ?") + sql_params.append(name) + where = " AND ".join(where_parts) + if where: + where = " WHERE " + where + def clear(cur): + cur.execute("DELETE FROM moz_cookies%s" % where, + tuple(sql_params)) + self._transaction(clear) + + def _row_from_cookie(self, cookie, cur): + expires = cookie.expires + if cookie.discard: + expires = "" + + domain = unicode(cookie.domain) + path = unicode(cookie.path) + name = unicode(cookie.name) + value = unicode(cookie.value) + secure = bool(int(cookie.secure)) + + if value is None: + value = name + name = "" + + last_accessed = int(time.time()) + http_only = cookie.has_nonstandard_attr("HttpOnly") + + query = cur.execute("""SELECT MAX(id) + 1 from moz_cookies""") + pk = query.fetchone()[0] + if pk is None: + pk = 1 + + return (pk, name, value, domain, path, expires, + last_accessed, secure, http_only) + + def set_cookie(self, cookie): + if cookie.discard: + CookieJar.set_cookie(self, cookie) + return + + def set_cookie(cur): + # XXX + # is this RFC 2965-correct? + # could this do an UPDATE instead? + row = self._row_from_cookie(cookie, cur) + name, unused, domain, path = row[1:5] + cur.execute("""\ +DELETE FROM moz_cookies WHERE host = ? AND path = ? AND name = ?""", + (domain, path, name)) + cur.execute("""\ +INSERT INTO moz_cookies VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?) +""", row) + self._transaction(set_cookie) + + def __iter__(self): + # session (non-persistent) cookies + for cookie in MappingIterator(self._cookies): + yield cookie + # persistent cookies + for row in self._query("""\ +SELECT * FROM moz_cookies ORDER BY name, path, host"""): + yield self._cookie_from_row(row) + + def _cookies_for_request(self, request): + session_cookies = CookieJar._cookies_for_request(self, request) + def get_cookies(cur): + query = cur.execute("SELECT host from moz_cookies") + domains = [row[0] for row in query.fetchall()] + cookies = [] + for domain in domains: + cookies += self._persistent_cookies_for_domain(domain, + request, cur) + return cookies + persistent_coookies = self._transaction(get_cookies) + return session_cookies + persistent_coookies + + def _persistent_cookies_for_domain(self, domain, request, cur): + cookies = [] + if not self._policy.domain_return_ok(domain, request): + return [] + debug("Checking %s for cookies to return", domain) + query = cur.execute("""\ +SELECT * from moz_cookies WHERE host = ? ORDER BY path""", + (domain,)) + cookies = [self._cookie_from_row(row) for row in query.fetchall()] + last_path = None + r = [] + for cookie in cookies: + if (cookie.path != last_path and + not self._policy.path_return_ok(cookie.path, request)): + last_path = cookie.path + continue + if not self._policy.return_ok(cookie, request): + debug(" not returning cookie") + continue + debug(" it's a match") + r.append(cookie) + return r diff --git a/couchpotato/core/media/movie/providers/trailer/mechanize/_form.py b/couchpotato/core/media/movie/providers/trailer/mechanize/_form.py new file mode 100644 index 0000000000..d45bdfc395 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/mechanize/_form.py @@ -0,0 +1,3280 @@ +"""HTML form handling for web clients. + +HTML form handling for web clients: useful for parsing HTML forms, filling them +in and returning the completed forms to the server. This code developed from a +port of Gisle Aas' Perl module HTML::Form, from the libwww-perl library, but +the interface is not the same. + +The most useful docstring is the one for HTMLForm. + +RFC 1866: HTML 2.0 +RFC 1867: Form-based File Upload in HTML +RFC 2388: Returning Values from Forms: multipart/form-data +HTML 3.2 Specification, W3C Recommendation 14 January 1997 (for ISINDEX) +HTML 4.01 Specification, W3C Recommendation 24 December 1999 + + +Copyright 2002-2007 John J. Lee +Copyright 2005 Gary Poster +Copyright 2005 Zope Corporation +Copyright 1998-2000 Gisle Aas. + +This code is free software; you can redistribute it and/or modify it +under the terms of the BSD or ZPL 2.1 licenses (see the file +COPYING.txt included with the distribution). + +""" + +# TODO: +# Clean up post the merge into mechanize +# * Remove code that was duplicated in ClientForm and mechanize +# * Remove weird import stuff +# * Remove pre-Python 2.4 compatibility cruft +# * Clean up tests +# * Later release: Remove the ClientForm 0.1 backwards-compatibility switch +# Remove parser testing hack +# Clean action URI +# Switch to unicode throughout +# See Wichert Akkerman's 2004-01-22 message to c.l.py. +# Apply recommendations from google code project CURLIES +# Apply recommendations from HTML 5 spec +# Add charset parameter to Content-type headers? How to find value?? +# Functional tests to add: +# Single and multiple file upload +# File upload with missing name (check standards) +# mailto: submission & enctype text/plain?? + +# Replace by_label etc. with moniker / selector concept. Allows, e.g., a +# choice between selection by value / id / label / element contents. Or +# choice between matching labels exactly or by substring. etc. + + +__all__ = ['AmbiguityError', 'CheckboxControl', 'Control', + 'ControlNotFoundError', 'FileControl', 'FormParser', 'HTMLForm', + 'HiddenControl', 'IgnoreControl', 'ImageControl', 'IsindexControl', + 'Item', 'ItemCountError', 'ItemNotFoundError', 'Label', + 'ListControl', 'LocateError', 'Missing', 'ParseError', 'ParseFile', + 'ParseFileEx', 'ParseResponse', 'ParseResponseEx','PasswordControl', + 'RadioControl', 'ScalarControl', 'SelectControl', + 'SubmitButtonControl', 'SubmitControl', 'TextControl', + 'TextareaControl', 'XHTMLCompatibleFormParser'] + +import HTMLParser +from cStringIO import StringIO +import inspect +import logging +import random +import re +import sys +import urllib +import urlparse +import warnings + +import _beautifulsoup +import _request + +# from Python itself, for backwards compatibility of raised exceptions +import sgmllib +# bundled copy of sgmllib +import _sgmllib_copy + + +VERSION = "0.2.11" + +CHUNK = 1024 # size of chunks fed to parser, in bytes + +DEFAULT_ENCODING = "latin-1" + +_logger = logging.getLogger("mechanize.forms") +OPTIMIZATION_HACK = True + +def debug(msg, *args, **kwds): + if OPTIMIZATION_HACK: + return + + caller_name = inspect.stack()[1][3] + extended_msg = '%%s %s' % msg + extended_args = (caller_name,)+args + _logger.debug(extended_msg, *extended_args, **kwds) + +def _show_debug_messages(): + global OPTIMIZATION_HACK + OPTIMIZATION_HACK = False + _logger.setLevel(logging.DEBUG) + handler = logging.StreamHandler(sys.stdout) + handler.setLevel(logging.DEBUG) + _logger.addHandler(handler) + + +def deprecation(message, stack_offset=0): + warnings.warn(message, DeprecationWarning, stacklevel=3+stack_offset) + + +class Missing: pass + +_compress_re = re.compile(r"\s+") +def compress_text(text): return _compress_re.sub(" ", text.strip()) + +def normalize_line_endings(text): + return re.sub(r"(?:(? + w = MimeWriter(f) + ...call w.addheader(key, value) 0 or more times... + + followed by either: + + f = w.startbody(content_type) + ...call f.write(data) for body data... + + or: + + w.startmultipartbody(subtype) + for each part: + subwriter = w.nextpart() + ...use the subwriter's methods to create the subpart... + w.lastpart() + + The subwriter is another MimeWriter instance, and should be + treated in the same way as the toplevel MimeWriter. This way, + writing recursive body parts is easy. + + Warning: don't forget to call lastpart()! + + XXX There should be more state so calls made in the wrong order + are detected. + + Some special cases: + + - startbody() just returns the file passed to the constructor; + but don't use this knowledge, as it may be changed. + + - startmultipartbody() actually returns a file as well; + this can be used to write the initial 'if you can read this your + mailer is not MIME-aware' message. + + - If you call flushheaders(), the headers accumulated so far are + written out (and forgotten); this is useful if you don't need a + body part at all, e.g. for a subpart of type message/rfc822 + that's (mis)used to store some header-like information. + + - Passing a keyword argument 'prefix=' to addheader(), + start*body() affects where the header is inserted; 0 means + append at the end, 1 means insert at the start; default is + append for addheader(), but insert for start*body(), which use + it to determine where the Content-type header goes. + + """ + + def __init__(self, fp, http_hdrs=None): + self._http_hdrs = http_hdrs + self._fp = fp + self._headers = [] + self._boundary = [] + self._first_part = True + + def addheader(self, key, value, prefix=0, + add_to_http_hdrs=0): + """ + prefix is ignored if add_to_http_hdrs is true. + """ + lines = value.split("\r\n") + while lines and not lines[-1]: del lines[-1] + while lines and not lines[0]: del lines[0] + if add_to_http_hdrs: + value = "".join(lines) + # 2.2 urllib2 doesn't normalize header case + self._http_hdrs.append((key.capitalize(), value)) + else: + for i in range(1, len(lines)): + lines[i] = " " + lines[i].strip() + value = "\r\n".join(lines) + "\r\n" + line = key.title() + ": " + value + if prefix: + self._headers.insert(0, line) + else: + self._headers.append(line) + + def flushheaders(self): + self._fp.writelines(self._headers) + self._headers = [] + + def startbody(self, ctype=None, plist=[], prefix=1, + add_to_http_hdrs=0, content_type=1): + """ + prefix is ignored if add_to_http_hdrs is true. + """ + if content_type and ctype: + for name, value in plist: + ctype = ctype + ';\r\n %s=%s' % (name, value) + self.addheader("Content-Type", ctype, prefix=prefix, + add_to_http_hdrs=add_to_http_hdrs) + self.flushheaders() + if not add_to_http_hdrs: self._fp.write("\r\n") + self._first_part = True + return self._fp + + def startmultipartbody(self, subtype, boundary=None, plist=[], prefix=1, + add_to_http_hdrs=0, content_type=1): + boundary = boundary or choose_boundary() + self._boundary.append(boundary) + return self.startbody("multipart/" + subtype, + [("boundary", boundary)] + plist, + prefix=prefix, + add_to_http_hdrs=add_to_http_hdrs, + content_type=content_type) + + def nextpart(self): + boundary = self._boundary[-1] + if self._first_part: + self._first_part = False + else: + self._fp.write("\r\n") + self._fp.write("--" + boundary + "\r\n") + return self.__class__(self._fp) + + def lastpart(self): + if self._first_part: + self.nextpart() + boundary = self._boundary.pop() + self._fp.write("\r\n--" + boundary + "--\r\n") + + +class LocateError(ValueError): pass +class AmbiguityError(LocateError): pass +class ControlNotFoundError(LocateError): pass +class ItemNotFoundError(LocateError): pass + +class ItemCountError(ValueError): pass + +# for backwards compatibility, ParseError derives from exceptions that were +# raised by versions of ClientForm <= 0.2.5 +# TODO: move to _html +class ParseError(sgmllib.SGMLParseError, + HTMLParser.HTMLParseError): + + def __init__(self, *args, **kwds): + Exception.__init__(self, *args, **kwds) + + def __str__(self): + return Exception.__str__(self) + + +class _AbstractFormParser: + """forms attribute contains HTMLForm instances on completion.""" + # thanks to Moshe Zadka for an example of sgmllib/htmllib usage + def __init__(self, entitydefs=None, encoding=DEFAULT_ENCODING): + if entitydefs is None: + entitydefs = get_entitydefs() + self._entitydefs = entitydefs + self._encoding = encoding + + self.base = None + self.forms = [] + self.labels = [] + self._current_label = None + self._current_form = None + self._select = None + self._optgroup = None + self._option = None + self._textarea = None + + # forms[0] will contain all controls that are outside of any form + # self._global_form is an alias for self.forms[0] + self._global_form = None + self.start_form([]) + self.end_form() + self._current_form = self._global_form = self.forms[0] + + def do_base(self, attrs): + debug("%s", attrs) + for key, value in attrs: + if key == "href": + self.base = self.unescape_attr_if_required(value) + + def end_body(self): + debug("") + if self._current_label is not None: + self.end_label() + if self._current_form is not self._global_form: + self.end_form() + + def start_form(self, attrs): + debug("%s", attrs) + if self._current_form is not self._global_form: + raise ParseError("nested FORMs") + name = None + action = None + enctype = "application/x-www-form-urlencoded" + method = "GET" + d = {} + for key, value in attrs: + if key == "name": + name = self.unescape_attr_if_required(value) + elif key == "action": + action = self.unescape_attr_if_required(value) + elif key == "method": + method = self.unescape_attr_if_required(value.upper()) + elif key == "enctype": + enctype = self.unescape_attr_if_required(value.lower()) + d[key] = self.unescape_attr_if_required(value) + controls = [] + self._current_form = (name, action, method, enctype), d, controls + + def end_form(self): + debug("") + if self._current_label is not None: + self.end_label() + if self._current_form is self._global_form: + raise ParseError("end of FORM before start") + self.forms.append(self._current_form) + self._current_form = self._global_form + + def start_select(self, attrs): + debug("%s", attrs) + if self._select is not None: + raise ParseError("nested SELECTs") + if self._textarea is not None: + raise ParseError("SELECT inside TEXTAREA") + d = {} + for key, val in attrs: + d[key] = self.unescape_attr_if_required(val) + + self._select = d + self._add_label(d) + + self._append_select_control({"__select": d}) + + def end_select(self): + debug("") + if self._select is None: + raise ParseError("end of SELECT before start") + + if self._option is not None: + self._end_option() + + self._select = None + + def start_optgroup(self, attrs): + debug("%s", attrs) + if self._select is None: + raise ParseError("OPTGROUP outside of SELECT") + d = {} + for key, val in attrs: + d[key] = self.unescape_attr_if_required(val) + + self._optgroup = d + + def end_optgroup(self): + debug("") + if self._optgroup is None: + raise ParseError("end of OPTGROUP before start") + self._optgroup = None + + def _start_option(self, attrs): + debug("%s", attrs) + if self._select is None: + raise ParseError("OPTION outside of SELECT") + if self._option is not None: + self._end_option() + + d = {} + for key, val in attrs: + d[key] = self.unescape_attr_if_required(val) + + self._option = {} + self._option.update(d) + if (self._optgroup and self._optgroup.has_key("disabled") and + not self._option.has_key("disabled")): + self._option["disabled"] = None + + def _end_option(self): + debug("") + if self._option is None: + raise ParseError("end of OPTION before start") + + contents = self._option.get("contents", "").strip() + self._option["contents"] = contents + if not self._option.has_key("value"): + self._option["value"] = contents + if not self._option.has_key("label"): + self._option["label"] = contents + # stuff dict of SELECT HTML attrs into a special private key + # (gets deleted again later) + self._option["__select"] = self._select + self._append_select_control(self._option) + self._option = None + + def _append_select_control(self, attrs): + debug("%s", attrs) + controls = self._current_form[2] + name = self._select.get("name") + controls.append(("select", name, attrs)) + + def start_textarea(self, attrs): + debug("%s", attrs) + if self._textarea is not None: + raise ParseError("nested TEXTAREAs") + if self._select is not None: + raise ParseError("TEXTAREA inside SELECT") + d = {} + for key, val in attrs: + d[key] = self.unescape_attr_if_required(val) + self._add_label(d) + + self._textarea = d + + def end_textarea(self): + debug("") + if self._textarea is None: + raise ParseError("end of TEXTAREA before start") + controls = self._current_form[2] + name = self._textarea.get("name") + controls.append(("textarea", name, self._textarea)) + self._textarea = None + + def start_label(self, attrs): + debug("%s", attrs) + if self._current_label: + self.end_label() + d = {} + for key, val in attrs: + d[key] = self.unescape_attr_if_required(val) + taken = bool(d.get("for")) # empty id is invalid + d["__text"] = "" + d["__taken"] = taken + if taken: + self.labels.append(d) + self._current_label = d + + def end_label(self): + debug("") + label = self._current_label + if label is None: + # something is ugly in the HTML, but we're ignoring it + return + self._current_label = None + # if it is staying around, it is True in all cases + del label["__taken"] + + def _add_label(self, d): + #debug("%s", d) + if self._current_label is not None: + if not self._current_label["__taken"]: + self._current_label["__taken"] = True + d["__label"] = self._current_label + + def handle_data(self, data): + debug("%s", data) + + if self._option is not None: + # self._option is a dictionary of the OPTION element's HTML + # attributes, but it has two special keys, one of which is the + # special "contents" key contains text between OPTION tags (the + # other is the "__select" key: see the end_option method) + map = self._option + key = "contents" + elif self._textarea is not None: + map = self._textarea + key = "value" + data = normalize_line_endings(data) + # not if within option or textarea + elif self._current_label is not None: + map = self._current_label + key = "__text" + else: + return + + if data and not map.has_key(key): + # according to + # http://www.w3.org/TR/html4/appendix/notes.html#h-B.3.1 line break + # immediately after start tags or immediately before end tags must + # be ignored, but real browsers only ignore a line break after a + # start tag, so we'll do that. + if data[0:2] == "\r\n": + data = data[2:] + elif data[0:1] in ["\n", "\r"]: + data = data[1:] + map[key] = data + else: + map[key] = map[key] + data + + def do_button(self, attrs): + debug("%s", attrs) + d = {} + d["type"] = "submit" # default + for key, val in attrs: + d[key] = self.unescape_attr_if_required(val) + controls = self._current_form[2] + + type = d["type"] + name = d.get("name") + # we don't want to lose information, so use a type string that + # doesn't clash with INPUT TYPE={SUBMIT,RESET,BUTTON} + # e.g. type for BUTTON/RESET is "resetbutton" + # (type for INPUT/RESET is "reset") + type = type+"button" + self._add_label(d) + controls.append((type, name, d)) + + def do_input(self, attrs): + debug("%s", attrs) + d = {} + d["type"] = "text" # default + for key, val in attrs: + d[key] = self.unescape_attr_if_required(val) + controls = self._current_form[2] + + type = d["type"] + name = d.get("name") + self._add_label(d) + controls.append((type, name, d)) + + def do_isindex(self, attrs): + debug("%s", attrs) + d = {} + for key, val in attrs: + d[key] = self.unescape_attr_if_required(val) + controls = self._current_form[2] + + self._add_label(d) + # isindex doesn't have type or name HTML attributes + controls.append(("isindex", None, d)) + + def handle_entityref(self, name): + #debug("%s", name) + self.handle_data(unescape( + '&%s;' % name, self._entitydefs, self._encoding)) + + def handle_charref(self, name): + #debug("%s", name) + self.handle_data(unescape_charref(name, self._encoding)) + + def unescape_attr(self, name): + #debug("%s", name) + return unescape(name, self._entitydefs, self._encoding) + + def unescape_attrs(self, attrs): + #debug("%s", attrs) + escaped_attrs = {} + for key, val in attrs.items(): + try: + val.items + except AttributeError: + escaped_attrs[key] = self.unescape_attr(val) + else: + # e.g. "__select" -- yuck! + escaped_attrs[key] = self.unescape_attrs(val) + return escaped_attrs + + def unknown_entityref(self, ref): self.handle_data("&%s;" % ref) + def unknown_charref(self, ref): self.handle_data("&#%s;" % ref) + + +class XHTMLCompatibleFormParser(_AbstractFormParser, HTMLParser.HTMLParser): + """Good for XHTML, bad for tolerance of incorrect HTML.""" + # thanks to Michael Howitz for this! + def __init__(self, entitydefs=None, encoding=DEFAULT_ENCODING): + HTMLParser.HTMLParser.__init__(self) + _AbstractFormParser.__init__(self, entitydefs, encoding) + + def feed(self, data): + try: + HTMLParser.HTMLParser.feed(self, data) + except HTMLParser.HTMLParseError, exc: + raise ParseError(exc) + + def start_option(self, attrs): + _AbstractFormParser._start_option(self, attrs) + + def end_option(self): + _AbstractFormParser._end_option(self) + + def handle_starttag(self, tag, attrs): + try: + method = getattr(self, "start_" + tag) + except AttributeError: + try: + method = getattr(self, "do_" + tag) + except AttributeError: + pass # unknown tag + else: + method(attrs) + else: + method(attrs) + + def handle_endtag(self, tag): + try: + method = getattr(self, "end_" + tag) + except AttributeError: + pass # unknown tag + else: + method() + + def unescape(self, name): + # Use the entitydefs passed into constructor, not + # HTMLParser.HTMLParser's entitydefs. + return self.unescape_attr(name) + + def unescape_attr_if_required(self, name): + return name # HTMLParser.HTMLParser already did it + def unescape_attrs_if_required(self, attrs): + return attrs # ditto + + def close(self): + HTMLParser.HTMLParser.close(self) + self.end_body() + + +class _AbstractSgmllibParser(_AbstractFormParser): + + def do_option(self, attrs): + _AbstractFormParser._start_option(self, attrs) + + # we override this attr to decode hex charrefs + entity_or_charref = re.compile( + '&(?:([a-zA-Z][-.a-zA-Z0-9]*)|#(x?[0-9a-fA-F]+))(;?)') + def convert_entityref(self, name): + return unescape("&%s;" % name, self._entitydefs, self._encoding) + def convert_charref(self, name): + return unescape_charref("%s" % name, self._encoding) + def unescape_attr_if_required(self, name): + return name # sgmllib already did it + def unescape_attrs_if_required(self, attrs): + return attrs # ditto + + +class FormParser(_AbstractSgmllibParser, _sgmllib_copy.SGMLParser): + """Good for tolerance of incorrect HTML, bad for XHTML.""" + def __init__(self, entitydefs=None, encoding=DEFAULT_ENCODING): + _sgmllib_copy.SGMLParser.__init__(self) + _AbstractFormParser.__init__(self, entitydefs, encoding) + + def feed(self, data): + try: + _sgmllib_copy.SGMLParser.feed(self, data) + except _sgmllib_copy.SGMLParseError, exc: + raise ParseError(exc) + + def close(self): + _sgmllib_copy.SGMLParser.close(self) + self.end_body() + + +class _AbstractBSFormParser(_AbstractSgmllibParser): + + bs_base_class = None + + def __init__(self, entitydefs=None, encoding=DEFAULT_ENCODING): + _AbstractFormParser.__init__(self, entitydefs, encoding) + self.bs_base_class.__init__(self) + + def handle_data(self, data): + _AbstractFormParser.handle_data(self, data) + self.bs_base_class.handle_data(self, data) + + def feed(self, data): + try: + self.bs_base_class.feed(self, data) + except _sgmllib_copy.SGMLParseError, exc: + raise ParseError(exc) + + def close(self): + self.bs_base_class.close(self) + self.end_body() + + +class RobustFormParser(_AbstractBSFormParser, _beautifulsoup.BeautifulSoup): + + """Tries to be highly tolerant of incorrect HTML.""" + + bs_base_class = _beautifulsoup.BeautifulSoup + + +class NestingRobustFormParser(_AbstractBSFormParser, + _beautifulsoup.ICantBelieveItsBeautifulSoup): + + """Tries to be highly tolerant of incorrect HTML. + + Different from RobustFormParser in that it more often guesses nesting + above missing end tags (see BeautifulSoup docs). + """ + + bs_base_class = _beautifulsoup.ICantBelieveItsBeautifulSoup + + +#FormParser = XHTMLCompatibleFormParser # testing hack +#FormParser = RobustFormParser # testing hack + + +def ParseResponseEx(response, + select_default=False, + form_parser_class=FormParser, + request_class=_request.Request, + entitydefs=None, + encoding=DEFAULT_ENCODING, + + # private + _urljoin=urlparse.urljoin, + _urlparse=urlparse.urlparse, + _urlunparse=urlparse.urlunparse, + ): + """Identical to ParseResponse, except that: + + 1. The returned list contains an extra item. The first form in the list + contains all controls not contained in any FORM element. + + 2. The arguments ignore_errors and backwards_compat have been removed. + + 3. Backwards-compatibility mode (backwards_compat=True) is not available. + """ + return _ParseFileEx(response, response.geturl(), + select_default, + False, + form_parser_class, + request_class, + entitydefs, + False, + encoding, + _urljoin=_urljoin, + _urlparse=_urlparse, + _urlunparse=_urlunparse, + ) + +def ParseFileEx(file, base_uri, + select_default=False, + form_parser_class=FormParser, + request_class=_request.Request, + entitydefs=None, + encoding=DEFAULT_ENCODING, + + # private + _urljoin=urlparse.urljoin, + _urlparse=urlparse.urlparse, + _urlunparse=urlparse.urlunparse, + ): + """Identical to ParseFile, except that: + + 1. The returned list contains an extra item. The first form in the list + contains all controls not contained in any FORM element. + + 2. The arguments ignore_errors and backwards_compat have been removed. + + 3. Backwards-compatibility mode (backwards_compat=True) is not available. + """ + return _ParseFileEx(file, base_uri, + select_default, + False, + form_parser_class, + request_class, + entitydefs, + False, + encoding, + _urljoin=_urljoin, + _urlparse=_urlparse, + _urlunparse=_urlunparse, + ) + +def ParseString(text, base_uri, *args, **kwds): + fh = StringIO(text) + return ParseFileEx(fh, base_uri, *args, **kwds) + +def ParseResponse(response, *args, **kwds): + """Parse HTTP response and return a list of HTMLForm instances. + + The return value of mechanize.urlopen can be conveniently passed to this + function as the response parameter. + + mechanize.ParseError is raised on parse errors. + + response: file-like object (supporting read() method) with a method + geturl(), returning the URI of the HTTP response + select_default: for multiple-selection SELECT controls and RADIO controls, + pick the first item as the default if none are selected in the HTML + form_parser_class: class to instantiate and use to pass + request_class: class to return from .click() method (default is + mechanize.Request) + entitydefs: mapping like {"&": "&", ...} containing HTML entity + definitions (a sensible default is used) + encoding: character encoding used for encoding numeric character references + when matching link text. mechanize does not attempt to find the encoding + in a META HTTP-EQUIV attribute in the document itself (mechanize, for + example, does do that and will pass the correct value to mechanize using + this parameter). + + backwards_compat: boolean that determines whether the returned HTMLForm + objects are backwards-compatible with old code. If backwards_compat is + true: + + - ClientForm 0.1 code will continue to work as before. + + - Label searches that do not specify a nr (number or count) will always + get the first match, even if other controls match. If + backwards_compat is False, label searches that have ambiguous results + will raise an AmbiguityError. + + - Item label matching is done by strict string comparison rather than + substring matching. + + - De-selecting individual list items is allowed even if the Item is + disabled. + + The backwards_compat argument will be removed in a future release. + + Pass a true value for select_default if you want the behaviour specified by + RFC 1866 (the HTML 2.0 standard), which is to select the first item in a + RADIO or multiple-selection SELECT control if none were selected in the + HTML. Most browsers (including Microsoft Internet Explorer (IE) and + Netscape Navigator) instead leave all items unselected in these cases. The + W3C HTML 4.0 standard leaves this behaviour undefined in the case of + multiple-selection SELECT controls, but insists that at least one RADIO + button should be checked at all times, in contradiction to browser + behaviour. + + There is a choice of parsers. mechanize.XHTMLCompatibleFormParser (uses + HTMLParser.HTMLParser) works best for XHTML, mechanize.FormParser (uses + bundled copy of sgmllib.SGMLParser) (the default) works better for ordinary + grubby HTML. Note that HTMLParser is only available in Python 2.2 and + later. You can pass your own class in here as a hack to work around bad + HTML, but at your own risk: there is no well-defined interface. + + """ + return _ParseFileEx(response, response.geturl(), *args, **kwds)[1:] + +def ParseFile(file, base_uri, *args, **kwds): + """Parse HTML and return a list of HTMLForm instances. + + mechanize.ParseError is raised on parse errors. + + file: file-like object (supporting read() method) containing HTML with zero + or more forms to be parsed + base_uri: the URI of the document (note that the base URI used to submit + the form will be that given in the BASE element if present, not that of + the document) + + For the other arguments and further details, see ParseResponse.__doc__. + + """ + return _ParseFileEx(file, base_uri, *args, **kwds)[1:] + +def _ParseFileEx(file, base_uri, + select_default=False, + ignore_errors=False, + form_parser_class=FormParser, + request_class=_request.Request, + entitydefs=None, + backwards_compat=True, + encoding=DEFAULT_ENCODING, + _urljoin=urlparse.urljoin, + _urlparse=urlparse.urlparse, + _urlunparse=urlparse.urlunparse, + ): + if backwards_compat: + deprecation("operating in backwards-compatibility mode", 1) + fp = form_parser_class(entitydefs, encoding) + while 1: + data = file.read(CHUNK) + try: + fp.feed(data) + except ParseError, e: + e.base_uri = base_uri + raise + if len(data) != CHUNK: break + fp.close() + if fp.base is not None: + # HTML BASE element takes precedence over document URI + base_uri = fp.base + labels = [] # Label(label) for label in fp.labels] + id_to_labels = {} + for l in fp.labels: + label = Label(l) + labels.append(label) + for_id = l["for"] + coll = id_to_labels.get(for_id) + if coll is None: + id_to_labels[for_id] = [label] + else: + coll.append(label) + forms = [] + for (name, action, method, enctype), attrs, controls in fp.forms: + if action is None: + action = base_uri + else: + action = _urljoin(base_uri, action) + # would be nice to make HTMLForm class (form builder) pluggable + form = HTMLForm( + action, method, enctype, name, attrs, request_class, + forms, labels, id_to_labels, backwards_compat) + form._urlparse = _urlparse + form._urlunparse = _urlunparse + for ii in range(len(controls)): + type, name, attrs = controls[ii] + # index=ii*10 allows ImageControl to return multiple ordered pairs + form.new_control( + type, name, attrs, select_default=select_default, index=ii*10) + forms.append(form) + for form in forms: + form.fixup() + return forms + + +class Label: + def __init__(self, attrs): + self.id = attrs.get("for") + self._text = attrs.get("__text").strip() + self._ctext = compress_text(self._text) + self.attrs = attrs + self._backwards_compat = False # maintained by HTMLForm + + def __getattr__(self, name): + if name == "text": + if self._backwards_compat: + return self._text + else: + return self._ctext + return getattr(Label, name) + + def __setattr__(self, name, value): + if name == "text": + # don't see any need for this, so make it read-only + raise AttributeError("text attribute is read-only") + self.__dict__[name] = value + + def __str__(self): + return "" % (self.id, self.text) + + +def _get_label(attrs): + text = attrs.get("__label") + if text is not None: + return Label(text) + else: + return None + +class Control: + """An HTML form control. + + An HTMLForm contains a sequence of Controls. The Controls in an HTMLForm + are accessed using the HTMLForm.find_control method or the + HTMLForm.controls attribute. + + Control instances are usually constructed using the ParseFile / + ParseResponse functions. If you use those functions, you can ignore the + rest of this paragraph. A Control is only properly initialised after the + fixup method has been called. In fact, this is only strictly necessary for + ListControl instances. This is necessary because ListControls are built up + from ListControls each containing only a single item, and their initial + value(s) can only be known after the sequence is complete. + + The types and values that are acceptable for assignment to the value + attribute are defined by subclasses. + + If the disabled attribute is true, this represents the state typically + represented by browsers by 'greying out' a control. If the disabled + attribute is true, the Control will raise AttributeError if an attempt is + made to change its value. In addition, the control will not be considered + 'successful' as defined by the W3C HTML 4 standard -- ie. it will + contribute no data to the return value of the HTMLForm.click* methods. To + enable a control, set the disabled attribute to a false value. + + If the readonly attribute is true, the Control will raise AttributeError if + an attempt is made to change its value. To make a control writable, set + the readonly attribute to a false value. + + All controls have the disabled and readonly attributes, not only those that + may have the HTML attributes of the same names. + + On assignment to the value attribute, the following exceptions are raised: + TypeError, AttributeError (if the value attribute should not be assigned + to, because the control is disabled, for example) and ValueError. + + If the name or value attributes are None, or the value is an empty list, or + if the control is disabled, the control is not successful. + + Public attributes: + + type: string describing type of control (see the keys of the + HTMLForm.type2class dictionary for the allowable values) (readonly) + name: name of control (readonly) + value: current value of control (subclasses may allow a single value, a + sequence of values, or either) + disabled: disabled state + readonly: readonly state + id: value of id HTML attribute + + """ + def __init__(self, type, name, attrs, index=None): + """ + type: string describing type of control (see the keys of the + HTMLForm.type2class dictionary for the allowable values) + name: control name + attrs: HTML attributes of control's HTML element + + """ + raise NotImplementedError() + + def add_to_form(self, form): + self._form = form + form.controls.append(self) + + def fixup(self): + pass + + def is_of_kind(self, kind): + raise NotImplementedError() + + def clear(self): + raise NotImplementedError() + + def __getattr__(self, name): raise NotImplementedError() + def __setattr__(self, name, value): raise NotImplementedError() + + def pairs(self): + """Return list of (key, value) pairs suitable for passing to urlencode. + """ + return [(k, v) for (i, k, v) in self._totally_ordered_pairs()] + + def _totally_ordered_pairs(self): + """Return list of (key, value, index) tuples. + + Like pairs, but allows preserving correct ordering even where several + controls are involved. + + """ + raise NotImplementedError() + + def _write_mime_data(self, mw, name, value): + """Write data for a subitem of this control to a MimeWriter.""" + # called by HTMLForm + mw2 = mw.nextpart() + mw2.addheader("Content-Disposition", + 'form-data; name="%s"' % name, 1) + f = mw2.startbody(prefix=0) + f.write(value) + + def __str__(self): + raise NotImplementedError() + + def get_labels(self): + """Return all labels (Label instances) for this control. + + If the control was surrounded by a ','').decode("utf-8") + urldic.update({source_title:source_url}) + + except: + x+=1 + continue + x+=1 + self.logg(str(len(urldic))+ ' resultats trouves sur google') + return urldic + + def allocinesearch(self,moviename): + series=['2','3','4','5','6','7','8'] + listallovostfr=[] + listallovo=[] + listallovf=[] + self.logg('Tentative de recherche sur Allocine de ' +moviename[:-5]) + try: + search = api.search(moviename[:-5], "movie") + for result in search['feed']['movie']: + countseries=0 + ficheresult=api.movie(result['code']) + ficheresulttitle=self.cleantitle(ficheresult['movie']['title']) + ficheresulttitleori=self.cleantitle(ficheresult['movie']['originalTitle']) + yearresult=ficheresult['movie']['productionYear'] + if not yearresult: + yearresult=0 + for x in series: + if (x in ficheresulttitle or x in ficheresulttitleori) and (not '3d' in ficheresulttitle and not '3d' in ficheresulttitleori): + if x not in moviename[:-5]: + countseries+=1 + if self.cleantitle(moviename[:-5].decode('unicode-escape')) in ficheresulttitle and countseries==0 and int(moviename[len(moviename)-4:])+2>yearresult and int(moviename[len(moviename)-4:])-2=481: + self.logg('Bande annonce vf et HD trouve sur Allocine jarrete de chercher') + break + else: + self.logg('Bande annonce vf non HD trouve sur Allocine je continue de chercher') + elif 'annonce' in str(lien).lower() and 'vost' in str(lien).lower(): + lienid=lien['href'][:lien['href'].find('&')].replace('/video/player_gen_cmedia=','') + self.logg("Potentiel code de bande annonce [{0}] en VOSTFR".format(lienid)) + trailerallo = api.trailer(lienid) + long=len(trailerallo['media']['rendition']) + bestba=trailerallo['media']['rendition'][long-1] + linkallo=trailerallo['media']['rendition'][long-1]['href'] + heightbaallo=bestba['height'] + longadr=len(linkallo) + extallo=linkallo[longadr-3:] + + listallovostfr.append({'link':linkallo,'ext':extallo,'height':heightbaallo}) + self.logg('Bande annonce vostfr trouve sur Allocine je continue de chercher') + elif 'annonce' in str(lien).lower() and ' VO' in str(lien): + lienid=lien['href'][:lien['href'].find('&')].replace('/video/player_gen_cmedia=','') + trailerallo = api.trailer(lienid) + long=len(trailerallo['media']['rendition']) + bestba=trailerallo['media']['rendition'][long-1] + linkallo=trailerallo['media']['rendition'][long-1]['href'] + heightbaallo=bestba['height'] + longadr=len(linkallo) + extallo=linkallo[longadr-3:] + if hasattr(trailerallo['media'],'subtitles') and trailerallo['media']['subtitles']['$'].lower().replace('Г','c') ==u'francais': + self.logg("Potentiel code de bande annonce [{0}] en VOSTFR".format(lienid)) + listallovostfr.append({'link':linkallo,'ext':extallo,'height':heightbaallo}) + self.logg('Bande annonce vostfr trouve sur Allocine je continue de chercher') + else: + self.logg("Potentiel code de bande annonce [{0}] en VO".format(lienid)) + listallovo.append({'link':linkallo,'ext':extallo,'height':heightbaallo}) + self.logg('Bande annonce vo trouve sur Allocine je continue de chercher') + + else: + continue + except Exception,e: + print e + continue + self.logg(str(len(listallovf)) +" bandes annonces en VF trouvees sur allocine") + self.logg(str(len(listallovostfr)) +" bandes annonces en VOSTFR trouvees sur allocine") + self.logg(str(len(listallovo)) +" bandes annonces en VO trouvees sur allocine") + return listallovf,listallovostfr,listallovo + except : + self.logg(str(len(listallovf)) +" bandes annonces en VF trouvees sur allocine") + self.logg(str(len(listallovostfr)) +" bandes annonces en VOSTFR trouvees sur allocine") + self.logg(str(len(listallovo)) +" bandes annonces en VO trouvees sur allocine") + return listallovf,listallovostfr,listallovo + + def quacontrol(self,url): + quallist=[] + p=subprocess.Popen([sys.executable, 'youtube_dl/__main__.py', '-F',url],cwd=rootDir, shell=True, stdout=subprocess.PIPE,stderr=subprocess.PIPE) + while p.poll() is None: + l = p.stdout.readline() + quallist.append(l) + (out, err) = p.communicate() + for qual in quallist: + if 'best' in qual and ('720' in qual or '1080' in qual): + return True + else: + continue + return False + + def quacontrolallo(self,listallo,type): + bestqualallo=0 + for linkvf in listallo: + if bestqualallo=481: + self.videodl(listvfallo,filename,moviename,destination,True,maxqual) + return True + else: + self.logg('Bande annonce en VF non HD trouvee sur Allocine tentative de recherche dune meilleure qualite sur google') + else: + self.logg('Rien trouve sur Allocine en VF tentative de recherche sur google') + urldic=self.googlesearch(moviename) + listgooglevf, listgooglevostfr,listgooglevo=self.cleandic(urldic,moviename) + if listvfallo: + maxqual=self.quacontrolallo(listvfallo,'vf') + if listgooglevf: + self.logg('Jai trouve des bandes annonces VF sur google, controlons leur qualite') + cleanlistvf,listlowqvf=self.totqualcontrol(listgooglevf,'vf') + if cleanlistvf: + self.logg('Si jen crois google jai trouve mieux que la bande annonce allocine . Lets go') + self.videodl(cleanlistvf,filename,moviename,destination) + return True + else: + self.logg('Rien trouve de mieux sur google pour : '+moviename+' je telecharge donc la bande annonce non HD vf Allocine') + maxqual=self.quacontrolallo(listvfallo,'vf') + self.videodl(listvfallo,filename,moviename,destination,True,maxqual) + return True + else: + self.logg('Rien trouve de mieux sur google pour : '+moviename+' je telecharge donc la bande annonce non HD vf Allocine') + maxqual=self.quacontrolallo(listvfallo,'vf') + self.videodl(listvfallo,filename,moviename,destination,True,maxqual) + return True + + elif listgooglevf: + cleanlistvf,listlowqvf=self.totqualcontrol(listgooglevf,'vf') + if cleanlistvf: + self.logg('Si jen crois google jai trouve mieux que la bande annonce allocine . Lets go') + self.videodl(cleanlistvf,filename,moviename,destination) + return True + elif listlowqvf: + self.logg('Rien trouve sur Allocine pour : ' +moviename+' je recupere donc une bande annonce non HD vf trouve sur google') + self.videodl(listlowqvf,filename,moviename,destination) + return True + elif listvostfrallo: + maxqual=self.quacontrolallo(listvostfrallo,'vostfr') + if maxqual>=481: + self.videodl(cleanlistvf,filename,moviename,destination,True,maxqual) + return True + else: + if listgooglevostfr: + cleanlistvostfr,listlowqvostfr=self.totqualcontrol(listgooglevostfr,'vostfr') + if cleanlistvostfr: + self.logg('Si jen crois google jai trouve mieux que la bande annonce allocine . Lets go') + self.videodl(cleanlistvostfr,filename,moviename,destination) + return True + else: + self.logg('Rien trouve de mieux sur google pour : '+moviename+' je telecharge donc la bande annonce non HD vostfr Allocine') + self.videodl(listvostfrallo,filename,moviename,destination,True,maxqual) + return True + else: + self.logg('Rien trouve de mieux sur google pour : '+moviename+' je telecharge donc la bande annonce non HD vostfr Allocine') + self.videodl(listvostfrallo,filename,moviename,destination,True,maxqual) + return True + + elif listgooglevostfr: + cleanlistvostfr,listlowqvostfr=self.totqualcontrol(listgooglevostfr,'vostfr') + if cleanlistvostfr: + self.logg('Si jen crois google jai trouve mieux que la bande annonce allocine . Lets go') + self.videodl(cleanlistvostfr,filename,moviename,destination) + return True + elif listlowqvostfr: + self.logg('Rien trouve sur Allocine pour : ' +moviename+' je recupere donc une bande annonce non HD vostfr trouve sur google') + self.videodl(listlowqvostfr,filename,moviename,destination) + return True + elif listvoallo: + maxqual=self.quacontrolallo(listvoallo,'vo') + if maxqual>=481: + self.videodl(listvoallo,filename,moviename,destination,True,maxqual) + return True + else: + if listgooglevo: + cleanlistvo,listlowqvo=self.totqualcontrol(listgooglevo,'vo') + if cleanlistvo: + self.logg('Si jen crois google jai trouve mieux que la bande annonce allocine . Lets go') + self.videodl(cleanlistvo,filename,moviename,destination) + return True + else: + self.logg('Rien trouve de mieux sur google pour : '+moviename+' je telecharge donc la bande annonce non HD vo Allocine') + self.videodl(listvoallo,filename,moviename,destination,True,maxqual) + return True + else: + self.logg('Rien trouve de mieux sur google pour : '+moviename+' je telecharge donc la bande annonce non HD vo Allocine') + self.videodl(listvoallo,filename,moviename,destination,True,maxqual) + return True + + elif listgooglevo: + cleanlistvo,listlowqvo=self.totqualcontrol(listgooglevo,'vos') + if cleanlistvo: + self.logg('Si jen crois google jai trouve mieux que la bande annonce allocine . Lets go') + self.videodl(cleanlistvo,filename,moviename,destination) + return True + elif listlowqvo: + self.logg('Rien trouve sur Allocine pour : ' +moviename+' je recupere donc une bande annonce non HD vo trouve sur google') + self.videodl(listlowqvo,filename,moviename,destination) + return True + else: + self.logg('Snifff encore un film pourri pas de bande annonce trouve pour ' + moviename) + return False diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/FileDownloader.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/FileDownloader.py new file mode 100644 index 0000000000..5c8e676a20 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/FileDownloader.py @@ -0,0 +1,12 @@ +# Legacy file for backwards compatibility, use youtube_dl.downloader instead! +from .downloader import FileDownloader as RealFileDownloader +from .downloader import get_suitable_downloader + + +# This class reproduces the old behaviour of FileDownloader +class FileDownloader(RealFileDownloader): + def _do_download(self, filename, info_dict): + real_fd = get_suitable_downloader(info_dict)(self.ydl, self.params) + for ph in self._progress_hooks: + real_fd.add_progress_hook(ph) + return real_fd.download(filename, info_dict) diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/YoutubeDL.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/YoutubeDL.py new file mode 100644 index 0000000000..e2b823f667 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/YoutubeDL.py @@ -0,0 +1,1480 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +from __future__ import absolute_import, unicode_literals + +import collections +import datetime +import errno +import io +import itertools +import json +import locale +import os +import platform +import re +import shutil +import subprocess +import socket +import sys +import time +import traceback + +if os.name == 'nt': + import ctypes + +from .compat import ( + compat_cookiejar, + compat_expanduser, + compat_http_client, + compat_kwargs, + compat_str, + compat_urllib_error, + compat_urllib_request, +) +from .utils import ( + escape_url, + ContentTooShortError, + date_from_str, + DateRange, + DEFAULT_OUTTMPL, + determine_ext, + DownloadError, + encodeFilename, + ExtractorError, + format_bytes, + formatSeconds, + get_term_width, + locked_file, + make_HTTPS_handler, + MaxDownloadsReached, + PagedList, + PostProcessingError, + platform_name, + preferredencoding, + SameFileError, + sanitize_filename, + subtitles_filename, + takewhile_inclusive, + UnavailableVideoError, + url_basename, + write_json_file, + write_string, + YoutubeDLHandler, + prepend_extension, + args_to_str, +) +from .cache import Cache +from .extractor import get_info_extractor, gen_extractors +from .downloader import get_suitable_downloader +from .downloader.rtmp import rtmpdump_version +from .postprocessor import ( + FFmpegMergerPP, + FFmpegPostProcessor, + get_postprocessor, +) +from .version import __version__ + + +class YoutubeDL(object): + """YoutubeDL class. + + YoutubeDL objects are the ones responsible of downloading the + actual video file and writing it to disk if the user has requested + it, among some other tasks. In most cases there should be one per + program. As, given a video URL, the downloader doesn't know how to + extract all the needed information, task that InfoExtractors do, it + has to pass the URL to one of them. + + For this, YoutubeDL objects have a method that allows + InfoExtractors to be registered in a given order. When it is passed + a URL, the YoutubeDL object handles it to the first InfoExtractor it + finds that reports being able to handle it. The InfoExtractor extracts + all the information about the video or videos the URL refers to, and + YoutubeDL process the extracted information, possibly using a File + Downloader to download the video. + + YoutubeDL objects accept a lot of parameters. In order not to saturate + the object constructor with arguments, it receives a dictionary of + options instead. These options are available through the params + attribute for the InfoExtractors to use. The YoutubeDL also + registers itself as the downloader in charge for the InfoExtractors + that are added to it, so this is a "mutual registration". + + Available options: + + username: Username for authentication purposes. + password: Password for authentication purposes. + videopassword: Password for acces a video. + usenetrc: Use netrc for authentication instead. + verbose: Print additional info to stdout. + quiet: Do not print messages to stdout. + no_warnings: Do not print out anything for warnings. + forceurl: Force printing final URL. + forcetitle: Force printing title. + forceid: Force printing ID. + forcethumbnail: Force printing thumbnail URL. + forcedescription: Force printing description. + forcefilename: Force printing final filename. + forceduration: Force printing duration. + forcejson: Force printing info_dict as JSON. + dump_single_json: Force printing the info_dict of the whole playlist + (or video) as a single JSON line. + simulate: Do not download the video files. + format: Video format code. See options.py for more information. + format_limit: Highest quality format to try. + outtmpl: Template for output names. + restrictfilenames: Do not allow "&" and spaces in file names + ignoreerrors: Do not stop on download errors. + nooverwrites: Prevent overwriting files. + playliststart: Playlist item to start at. + playlistend: Playlist item to end at. + playlistreverse: Download playlist items in reverse order. + matchtitle: Download only matching titles. + rejecttitle: Reject downloads for matching titles. + logger: Log messages to a logging.Logger instance. + logtostderr: Log messages to stderr instead of stdout. + writedescription: Write the video description to a .description file + writeinfojson: Write the video description to a .info.json file + writeannotations: Write the video annotations to a .annotations.xml file + writethumbnail: Write the thumbnail image to a file + writesubtitles: Write the video subtitles to a file + writeautomaticsub: Write the automatic subtitles to a file + allsubtitles: Downloads all the subtitles of the video + (requires writesubtitles or writeautomaticsub) + listsubtitles: Lists all available subtitles for the video + subtitlesformat: Subtitle format [srt/sbv/vtt] (default=srt) + subtitleslangs: List of languages of the subtitles to download + keepvideo: Keep the video file after post-processing + daterange: A DateRange object, download only if the upload_date is in the range. + skip_download: Skip the actual download of the video file + cachedir: Location of the cache files in the filesystem. + False to disable filesystem cache. + noplaylist: Download single video instead of a playlist if in doubt. + age_limit: An integer representing the user's age in years. + Unsuitable videos for the given age are skipped. + min_views: An integer representing the minimum view count the video + must have in order to not be skipped. + Videos without view count information are always + downloaded. None for no limit. + max_views: An integer representing the maximum view count. + Videos that are more popular than that are not + downloaded. + Videos without view count information are always + downloaded. None for no limit. + download_archive: File name of a file where all downloads are recorded. + Videos already present in the file are not downloaded + again. + cookiefile: File name where cookies should be read from and dumped to. + nocheckcertificate:Do not verify SSL certificates + prefer_insecure: Use HTTP instead of HTTPS to retrieve information. + At the moment, this is only supported by YouTube. + proxy: URL of the proxy server to use + socket_timeout: Time to wait for unresponsive hosts, in seconds + bidi_workaround: Work around buggy terminals without bidirectional text + support, using fridibi + debug_printtraffic:Print out sent and received HTTP traffic + include_ads: Download ads as well + default_search: Prepend this string if an input url is not valid. + 'auto' for elaborate guessing + encoding: Use this encoding instead of the system-specified. + extract_flat: Do not resolve URLs, return the immediate result. + Pass in 'in_playlist' to only show this behavior for + playlist items. + postprocessors: A list of dictionaries, each with an entry + * key: The name of the postprocessor. See + youtube_dl/postprocessor/__init__.py for a list. + as well as any further keyword arguments for the + postprocessor. + progress_hooks: A list of functions that get called on download + progress, with a dictionary with the entries + * filename: The final filename + * status: One of "downloading" and "finished" + + The dict may also have some of the following entries: + + * downloaded_bytes: Bytes on disk + * total_bytes: Size of the whole file, None if unknown + * tmpfilename: The filename we're currently writing to + * eta: The estimated time in seconds, None if unknown + * speed: The download speed in bytes/second, None if + unknown + + Progress hooks are guaranteed to be called at least once + (with status "finished") if the download is successful. + + + The following parameters are not used by YoutubeDL itself, they are used by + the FileDownloader: + nopart, updatetime, buffersize, ratelimit, min_filesize, max_filesize, test, + noresizebuffer, retries, continuedl, noprogress, consoletitle + + The following options are used by the post processors: + prefer_ffmpeg: If True, use ffmpeg instead of avconv if both are available, + otherwise prefer avconv. + exec_cmd: Arbitrary command to run after downloading + """ + + params = None + _ies = [] + _pps = [] + _download_retcode = None + _num_downloads = None + _screen_file = None + + def __init__(self, params=None, auto_init=True): + """Create a FileDownloader object with the given options.""" + if params is None: + params = {} + self._ies = [] + self._ies_instances = {} + self._pps = [] + self._progress_hooks = [] + self._download_retcode = 0 + self._num_downloads = 0 + self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)] + self._err_file = sys.stderr + self.params = params + self.cache = Cache(self) + + if params.get('bidi_workaround', False): + try: + import pty + master, slave = pty.openpty() + width = get_term_width() + if width is None: + width_args = [] + else: + width_args = ['-w', str(width)] + sp_kwargs = dict( + stdin=subprocess.PIPE, + stdout=slave, + stderr=self._err_file) + try: + self._output_process = subprocess.Popen( + ['bidiv'] + width_args, **sp_kwargs + ) + except OSError: + self._output_process = subprocess.Popen( + ['fribidi', '-c', 'UTF-8'] + width_args, **sp_kwargs) + self._output_channel = os.fdopen(master, 'rb') + except OSError as ose: + if ose.errno == 2: + self.report_warning('Could not find fribidi executable, ignoring --bidi-workaround . Make sure that fribidi is an executable file in one of the directories in your $PATH.') + else: + raise + + if (sys.version_info >= (3,) and sys.platform != 'win32' and + sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968'] + and not params.get('restrictfilenames', False)): + # On Python 3, the Unicode filesystem API will throw errors (#1474) + self.report_warning( + 'Assuming --restrict-filenames since file system encoding ' + 'cannot encode all characters. ' + 'Set the LC_ALL environment variable to fix this.') + self.params['restrictfilenames'] = True + + if '%(stitle)s' in self.params.get('outtmpl', ''): + self.report_warning('%(stitle)s is deprecated. Use the %(title)s and the --restrict-filenames flag(which also secures %(uploader)s et al) instead.') + + self._setup_opener() + + if auto_init: + self.print_debug_header() + self.add_default_info_extractors() + + for pp_def_raw in self.params.get('postprocessors', []): + pp_class = get_postprocessor(pp_def_raw['key']) + pp_def = dict(pp_def_raw) + del pp_def['key'] + pp = pp_class(self, **compat_kwargs(pp_def)) + self.add_post_processor(pp) + + for ph in self.params.get('progress_hooks', []): + self.add_progress_hook(ph) + + def warn_if_short_id(self, argv): + # short YouTube ID starting with dash? + idxs = [ + i for i, a in enumerate(argv) + if re.match(r'^-[0-9A-Za-z_-]{10}$', a)] + if idxs: + correct_argv = ( + ['youtube-dl'] + + [a for i, a in enumerate(argv) if i not in idxs] + + ['--'] + [argv[i] for i in idxs] + ) + self.report_warning( + 'Long argument string detected. ' + 'Use -- to separate parameters and URLs, like this:\n%s\n' % + args_to_str(correct_argv)) + + def add_info_extractor(self, ie): + """Add an InfoExtractor object to the end of the list.""" + self._ies.append(ie) + self._ies_instances[ie.ie_key()] = ie + ie.set_downloader(self) + + def get_info_extractor(self, ie_key): + """ + Get an instance of an IE with name ie_key, it will try to get one from + the _ies list, if there's no instance it will create a new one and add + it to the extractor list. + """ + ie = self._ies_instances.get(ie_key) + if ie is None: + ie = get_info_extractor(ie_key)() + self.add_info_extractor(ie) + return ie + + def add_default_info_extractors(self): + """ + Add the InfoExtractors returned by gen_extractors to the end of the list + """ + for ie in gen_extractors(): + self.add_info_extractor(ie) + + def add_post_processor(self, pp): + """Add a PostProcessor object to the end of the chain.""" + self._pps.append(pp) + pp.set_downloader(self) + + def add_progress_hook(self, ph): + """Add the progress hook (currently only for the file downloader)""" + self._progress_hooks.append(ph) + + def _bidi_workaround(self, message): + if not hasattr(self, '_output_channel'): + return message + + assert hasattr(self, '_output_process') + assert isinstance(message, compat_str) + line_count = message.count('\n') + 1 + self._output_process.stdin.write((message + '\n').encode('utf-8')) + self._output_process.stdin.flush() + res = ''.join(self._output_channel.readline().decode('utf-8') + for _ in range(line_count)) + return res[:-len('\n')] + + def to_screen(self, message, skip_eol=False): + """Print message to stdout if not in quiet mode.""" + return self.to_stdout(message, skip_eol, check_quiet=True) + + def _write_string(self, s, out=None): + write_string(s, out=out, encoding=self.params.get('encoding')) + + def to_stdout(self, message, skip_eol=False, check_quiet=False): + """Print message to stdout if not in quiet mode.""" + if self.params.get('logger'): + self.params['logger'].debug(message) + elif not check_quiet or not self.params.get('quiet', False): + message = self._bidi_workaround(message) + terminator = ['\n', ''][skip_eol] + output = message + terminator + + self._write_string(output, self._screen_file) + + def to_stderr(self, message): + """Print message to stderr.""" + assert isinstance(message, compat_str) + if self.params.get('logger'): + self.params['logger'].error(message) + else: + message = self._bidi_workaround(message) + output = message + '\n' + self._write_string(output, self._err_file) + + def to_console_title(self, message): + if not self.params.get('consoletitle', False): + return + if os.name == 'nt' and ctypes.windll.kernel32.GetConsoleWindow(): + # c_wchar_p() might not be necessary if `message` is + # already of type unicode() + ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message)) + elif 'TERM' in os.environ: + self._write_string('\033]0;%s\007' % message, self._screen_file) + + def save_console_title(self): + if not self.params.get('consoletitle', False): + return + if 'TERM' in os.environ: + # Save the title on stack + self._write_string('\033[22;0t', self._screen_file) + + def restore_console_title(self): + if not self.params.get('consoletitle', False): + return + if 'TERM' in os.environ: + # Restore the title from stack + self._write_string('\033[23;0t', self._screen_file) + + def __enter__(self): + self.save_console_title() + return self + + def __exit__(self, *args): + self.restore_console_title() + + if self.params.get('cookiefile') is not None: + self.cookiejar.save() + + def trouble(self, message=None, tb=None): + """Determine action to take when a download problem appears. + + Depending on if the downloader has been configured to ignore + download errors or not, this method may throw an exception or + not when errors are found, after printing the message. + + tb, if given, is additional traceback information. + """ + if message is not None: + self.to_stderr(message) + if self.params.get('verbose'): + if tb is None: + if sys.exc_info()[0]: # if .trouble has been called from an except block + tb = '' + if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]: + tb += ''.join(traceback.format_exception(*sys.exc_info()[1].exc_info)) + tb += compat_str(traceback.format_exc()) + else: + tb_data = traceback.format_list(traceback.extract_stack()) + tb = ''.join(tb_data) + self.to_stderr(tb) + if not self.params.get('ignoreerrors', False): + if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]: + exc_info = sys.exc_info()[1].exc_info + else: + exc_info = sys.exc_info() + raise DownloadError(message, exc_info) + self._download_retcode = 1 + + def report_warning(self, message): + ''' + Print the message to stderr, it will be prefixed with 'WARNING:' + If stderr is a tty file the 'WARNING:' will be colored + ''' + if self.params.get('logger') is not None: + self.params['logger'].warning(message) + else: + if self.params.get('no_warnings'): + return + if self._err_file.isatty() and os.name != 'nt': + _msg_header = '\033[0;33mWARNING:\033[0m' + else: + _msg_header = 'WARNING:' + warning_message = '%s %s' % (_msg_header, message) + self.to_stderr(warning_message) + + def report_error(self, message, tb=None): + ''' + Do the same as trouble, but prefixes the message with 'ERROR:', colored + in red if stderr is a tty file. + ''' + if self._err_file.isatty() and os.name != 'nt': + _msg_header = '\033[0;31mERROR:\033[0m' + else: + _msg_header = 'ERROR:' + error_message = '%s %s' % (_msg_header, message) + self.trouble(error_message, tb) + + def report_file_already_downloaded(self, file_name): + """Report file has already been fully downloaded.""" + try: + self.to_screen('[download] %s has already been downloaded' % file_name) + except UnicodeEncodeError: + self.to_screen('[download] The file has already been downloaded') + + def prepare_filename(self, info_dict): + """Generate the output filename.""" + try: + template_dict = dict(info_dict) + + template_dict['epoch'] = int(time.time()) + autonumber_size = self.params.get('autonumber_size') + if autonumber_size is None: + autonumber_size = 5 + autonumber_templ = '%0' + str(autonumber_size) + 'd' + template_dict['autonumber'] = autonumber_templ % self._num_downloads + if template_dict.get('playlist_index') is not None: + template_dict['playlist_index'] = '%0*d' % (len(str(template_dict['n_entries'])), template_dict['playlist_index']) + if template_dict.get('resolution') is None: + if template_dict.get('width') and template_dict.get('height'): + template_dict['resolution'] = '%dx%d' % (template_dict['width'], template_dict['height']) + elif template_dict.get('height'): + template_dict['resolution'] = '%sp' % template_dict['height'] + elif template_dict.get('width'): + template_dict['resolution'] = '?x%d' % template_dict['width'] + + sanitize = lambda k, v: sanitize_filename( + compat_str(v), + restricted=self.params.get('restrictfilenames'), + is_id=(k == 'id')) + template_dict = dict((k, sanitize(k, v)) + for k, v in template_dict.items() + if v is not None) + template_dict = collections.defaultdict(lambda: 'NA', template_dict) + + outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL) + tmpl = compat_expanduser(outtmpl) + filename = tmpl % template_dict + return filename + except ValueError as err: + self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')') + return None + + def _match_entry(self, info_dict): + """ Returns None iff the file should be downloaded """ + + video_title = info_dict.get('title', info_dict.get('id', 'video')) + if 'title' in info_dict: + # This can happen when we're just evaluating the playlist + title = info_dict['title'] + matchtitle = self.params.get('matchtitle', False) + if matchtitle: + if not re.search(matchtitle, title, re.IGNORECASE): + return '"' + title + '" title did not match pattern "' + matchtitle + '"' + rejecttitle = self.params.get('rejecttitle', False) + if rejecttitle: + if re.search(rejecttitle, title, re.IGNORECASE): + return '"' + title + '" title matched reject pattern "' + rejecttitle + '"' + date = info_dict.get('upload_date', None) + if date is not None: + dateRange = self.params.get('daterange', DateRange()) + if date not in dateRange: + return '%s upload date is not in range %s' % (date_from_str(date).isoformat(), dateRange) + view_count = info_dict.get('view_count', None) + if view_count is not None: + min_views = self.params.get('min_views') + if min_views is not None and view_count < min_views: + return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views) + max_views = self.params.get('max_views') + if max_views is not None and view_count > max_views: + return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views) + age_limit = self.params.get('age_limit') + if age_limit is not None: + actual_age_limit = info_dict.get('age_limit') + if actual_age_limit is None: + actual_age_limit = 0 + if age_limit < actual_age_limit: + return 'Skipping "' + title + '" because it is age restricted' + if self.in_download_archive(info_dict): + return '%s has already been recorded in archive' % video_title + return None + + @staticmethod + def add_extra_info(info_dict, extra_info): + '''Set the keys from extra_info in info dict if they are missing''' + for key, value in extra_info.items(): + info_dict.setdefault(key, value) + + def extract_info(self, url, download=True, ie_key=None, extra_info={}, + process=True): + ''' + Returns a list with a dictionary for each video we find. + If 'download', also downloads the videos. + extra_info is a dict containing the extra values to add to each result + ''' + + if ie_key: + ies = [self.get_info_extractor(ie_key)] + else: + ies = self._ies + + for ie in ies: + if not ie.suitable(url): + continue + + if not ie.working(): + self.report_warning('The program functionality for this site has been marked as broken, ' + 'and will probably not work.') + + try: + ie_result = ie.extract(url) + if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here) + break + if isinstance(ie_result, list): + # Backwards compatibility: old IE result format + ie_result = { + '_type': 'compat_list', + 'entries': ie_result, + } + self.add_default_extra_info(ie_result, ie, url) + if process: + return self.process_ie_result(ie_result, download, extra_info) + else: + return ie_result + except ExtractorError as de: # An error we somewhat expected + self.report_error(compat_str(de), de.format_traceback()) + break + except MaxDownloadsReached: + raise + except Exception as e: + if self.params.get('ignoreerrors', False): + self.report_error(compat_str(e), tb=compat_str(traceback.format_exc())) + break + else: + raise + else: + self.report_error('no suitable InfoExtractor for URL %s' % url) + + def add_default_extra_info(self, ie_result, ie, url): + self.add_extra_info(ie_result, { + 'extractor': ie.IE_NAME, + 'webpage_url': url, + 'webpage_url_basename': url_basename(url), + 'extractor_key': ie.ie_key(), + }) + + def process_ie_result(self, ie_result, download=True, extra_info={}): + """ + Take the result of the ie(may be modified) and resolve all unresolved + references (URLs, playlist items). + + It will also download the videos if 'download'. + Returns the resolved ie_result. + """ + + result_type = ie_result.get('_type', 'video') + + if result_type in ('url', 'url_transparent'): + extract_flat = self.params.get('extract_flat', False) + if ((extract_flat == 'in_playlist' and 'playlist' in extra_info) or + extract_flat is True): + if self.params.get('forcejson', False): + self.to_stdout(json.dumps(ie_result)) + return ie_result + + if result_type == 'video': + self.add_extra_info(ie_result, extra_info) + return self.process_video_result(ie_result, download=download) + elif result_type == 'url': + # We have to add extra_info to the results because it may be + # contained in a playlist + return self.extract_info(ie_result['url'], + download, + ie_key=ie_result.get('ie_key'), + extra_info=extra_info) + elif result_type == 'url_transparent': + # Use the information from the embedding page + info = self.extract_info( + ie_result['url'], ie_key=ie_result.get('ie_key'), + extra_info=extra_info, download=False, process=False) + + force_properties = dict( + (k, v) for k, v in ie_result.items() if v is not None) + for f in ('_type', 'url'): + if f in force_properties: + del force_properties[f] + new_result = info.copy() + new_result.update(force_properties) + + assert new_result.get('_type') != 'url_transparent' + + return self.process_ie_result( + new_result, download=download, extra_info=extra_info) + elif result_type == 'playlist' or result_type == 'multi_video': + # We process each entry in the playlist + playlist = ie_result.get('title', None) or ie_result.get('id', None) + self.to_screen('[download] Downloading playlist: %s' % playlist) + + playlist_results = [] + + playliststart = self.params.get('playliststart', 1) - 1 + playlistend = self.params.get('playlistend', None) + # For backwards compatibility, interpret -1 as whole list + if playlistend == -1: + playlistend = None + + ie_entries = ie_result['entries'] + if isinstance(ie_entries, list): + n_all_entries = len(ie_entries) + entries = ie_entries[playliststart:playlistend] + n_entries = len(entries) + self.to_screen( + "[%s] playlist %s: Collected %d video ids (downloading %d of them)" % + (ie_result['extractor'], playlist, n_all_entries, n_entries)) + elif isinstance(ie_entries, PagedList): + entries = ie_entries.getslice( + playliststart, playlistend) + n_entries = len(entries) + self.to_screen( + "[%s] playlist %s: Downloading %d videos" % + (ie_result['extractor'], playlist, n_entries)) + else: # iterable + entries = list(itertools.islice( + ie_entries, playliststart, playlistend)) + n_entries = len(entries) + self.to_screen( + "[%s] playlist %s: Downloading %d videos" % + (ie_result['extractor'], playlist, n_entries)) + + if self.params.get('playlistreverse', False): + entries = entries[::-1] + + for i, entry in enumerate(entries, 1): + self.to_screen('[download] Downloading video %s of %s' % (i, n_entries)) + extra = { + 'n_entries': n_entries, + 'playlist': playlist, + 'playlist_id': ie_result.get('id'), + 'playlist_title': ie_result.get('title'), + 'playlist_index': i + playliststart, + 'extractor': ie_result['extractor'], + 'webpage_url': ie_result['webpage_url'], + 'webpage_url_basename': url_basename(ie_result['webpage_url']), + 'extractor_key': ie_result['extractor_key'], + } + + reason = self._match_entry(entry) + if reason is not None: + self.to_screen('[download] ' + reason) + continue + + entry_result = self.process_ie_result(entry, + download=download, + extra_info=extra) + playlist_results.append(entry_result) + ie_result['entries'] = playlist_results + return ie_result + elif result_type == 'compat_list': + self.report_warning( + 'Extractor %s returned a compat_list result. ' + 'It needs to be updated.' % ie_result.get('extractor')) + + def _fixup(r): + self.add_extra_info( + r, + { + 'extractor': ie_result['extractor'], + 'webpage_url': ie_result['webpage_url'], + 'webpage_url_basename': url_basename(ie_result['webpage_url']), + 'extractor_key': ie_result['extractor_key'], + } + ) + return r + ie_result['entries'] = [ + self.process_ie_result(_fixup(r), download, extra_info) + for r in ie_result['entries'] + ] + return ie_result + else: + raise Exception('Invalid result type: %s' % result_type) + + def select_format(self, format_spec, available_formats): + if format_spec == 'best' or format_spec is None: + return available_formats[-1] + elif format_spec == 'worst': + return available_formats[0] + elif format_spec == 'bestaudio': + audio_formats = [ + f for f in available_formats + if f.get('vcodec') == 'none'] + if audio_formats: + return audio_formats[-1] + elif format_spec == 'worstaudio': + audio_formats = [ + f for f in available_formats + if f.get('vcodec') == 'none'] + if audio_formats: + return audio_formats[0] + elif format_spec == 'bestvideo': + video_formats = [ + f for f in available_formats + if f.get('acodec') == 'none'] + if video_formats: + return video_formats[-1] + elif format_spec == 'worstvideo': + video_formats = [ + f for f in available_formats + if f.get('acodec') == 'none'] + if video_formats: + return video_formats[0] + else: + extensions = ['mp4', 'flv', 'webm', '3gp', 'm4a'] + if format_spec in extensions: + filter_f = lambda f: f['ext'] == format_spec + else: + filter_f = lambda f: f['format_id'] == format_spec + matches = list(filter(filter_f, available_formats)) + if matches: + return matches[-1] + return None + + def process_video_result(self, info_dict, download=True): + assert info_dict.get('_type', 'video') == 'video' + + if 'id' not in info_dict: + raise ExtractorError('Missing "id" field in extractor result') + if 'title' not in info_dict: + raise ExtractorError('Missing "title" field in extractor result') + + if 'playlist' not in info_dict: + # It isn't part of a playlist + info_dict['playlist'] = None + info_dict['playlist_index'] = None + + thumbnails = info_dict.get('thumbnails') + if thumbnails: + thumbnails.sort(key=lambda t: ( + t.get('width'), t.get('height'), t.get('url'))) + for t in thumbnails: + if 'width' in t and 'height' in t: + t['resolution'] = '%dx%d' % (t['width'], t['height']) + + if thumbnails and 'thumbnail' not in info_dict: + info_dict['thumbnail'] = thumbnails[-1]['url'] + + if 'display_id' not in info_dict and 'id' in info_dict: + info_dict['display_id'] = info_dict['id'] + + if info_dict.get('upload_date') is None and info_dict.get('timestamp') is not None: + # Working around negative timestamps in Windows + # (see http://bugs.python.org/issue1646728) + if info_dict['timestamp'] < 0 and os.name == 'nt': + info_dict['timestamp'] = 0 + upload_date = datetime.datetime.utcfromtimestamp( + info_dict['timestamp']) + info_dict['upload_date'] = upload_date.strftime('%Y%m%d') + + # This extractors handle format selection themselves + if info_dict['extractor'] in ['Youku']: + if download: + self.process_info(info_dict) + return info_dict + + # We now pick which formats have to be downloaded + if info_dict.get('formats') is None: + # There's only one format available + formats = [info_dict] + else: + formats = info_dict['formats'] + + if not formats: + raise ExtractorError('No video formats found!') + + # We check that all the formats have the format and format_id fields + for i, format in enumerate(formats): + if 'url' not in format: + raise ExtractorError('Missing "url" key in result (index %d)' % i) + + if format.get('format_id') is None: + format['format_id'] = compat_str(i) + if format.get('format') is None: + format['format'] = '{id} - {res}{note}'.format( + id=format['format_id'], + res=self.format_resolution(format), + note=' ({0})'.format(format['format_note']) if format.get('format_note') is not None else '', + ) + # Automatically determine file extension if missing + if 'ext' not in format: + format['ext'] = determine_ext(format['url']).lower() + + format_limit = self.params.get('format_limit', None) + if format_limit: + formats = list(takewhile_inclusive( + lambda f: f['format_id'] != format_limit, formats + )) + + # TODO Central sorting goes here + + if formats[0] is not info_dict: + # only set the 'formats' fields if the original info_dict list them + # otherwise we end up with a circular reference, the first (and unique) + # element in the 'formats' field in info_dict is info_dict itself, + # wich can't be exported to json + info_dict['formats'] = formats + if self.params.get('listformats', None): + self.list_formats(info_dict) + return + + req_format = self.params.get('format') + if req_format is None: + req_format = 'best' + formats_to_download = [] + # The -1 is for supporting YoutubeIE + if req_format in ('-1', 'all'): + formats_to_download = formats + else: + for rfstr in req_format.split(','): + # We can accept formats requested in the format: 34/5/best, we pick + # the first that is available, starting from left + req_formats = rfstr.split('/') + for rf in req_formats: + if re.match(r'.+?\+.+?', rf) is not None: + # Two formats have been requested like '137+139' + format_1, format_2 = rf.split('+') + formats_info = (self.select_format(format_1, formats), + self.select_format(format_2, formats)) + if all(formats_info): + # The first format must contain the video and the + # second the audio + if formats_info[0].get('vcodec') == 'none': + self.report_error('The first format must ' + 'contain the video, try using ' + '"-f %s+%s"' % (format_2, format_1)) + return + selected_format = { + 'requested_formats': formats_info, + 'format': rf, + 'ext': formats_info[0]['ext'], + } + else: + selected_format = None + else: + selected_format = self.select_format(rf, formats) + if selected_format is not None: + formats_to_download.append(selected_format) + break + if not formats_to_download: + raise ExtractorError('requested format not available', + expected=True) + + if download: + if len(formats_to_download) > 1: + self.to_screen('[info] %s: downloading video in %s formats' % (info_dict['id'], len(formats_to_download))) + for format in formats_to_download: + new_info = dict(info_dict) + new_info.update(format) + self.process_info(new_info) + # We update the info dict with the best quality format (backwards compatibility) + info_dict.update(formats_to_download[-1]) + return info_dict + + def process_info(self, info_dict): + """Process a single resolved IE result.""" + + assert info_dict.get('_type', 'video') == 'video' + + max_downloads = self.params.get('max_downloads') + if max_downloads is not None: + if self._num_downloads >= int(max_downloads): + raise MaxDownloadsReached() + + info_dict['fulltitle'] = info_dict['title'] + if len(info_dict['title']) > 200: + info_dict['title'] = info_dict['title'][:197] + '...' + + # Keep for backwards compatibility + info_dict['stitle'] = info_dict['title'] + + if 'format' not in info_dict: + info_dict['format'] = info_dict['ext'] + + reason = self._match_entry(info_dict) + if reason is not None: + self.to_screen('[download] ' + reason) + return + + self._num_downloads += 1 + + filename = self.prepare_filename(info_dict) + + # Forced printings + if self.params.get('forcetitle', False): + self.to_stdout(info_dict['fulltitle']) + if self.params.get('forceid', False): + self.to_stdout(info_dict['id']) + if self.params.get('forceurl', False): + if info_dict.get('requested_formats') is not None: + for f in info_dict['requested_formats']: + self.to_stdout(f['url'] + f.get('play_path', '')) + else: + # For RTMP URLs, also include the playpath + self.to_stdout(info_dict['url'] + info_dict.get('play_path', '')) + if self.params.get('forcethumbnail', False) and info_dict.get('thumbnail') is not None: + self.to_stdout(info_dict['thumbnail']) + if self.params.get('forcedescription', False) and info_dict.get('description') is not None: + self.to_stdout(info_dict['description']) + if self.params.get('forcefilename', False) and filename is not None: + self.to_stdout(filename) + if self.params.get('forceduration', False) and info_dict.get('duration') is not None: + self.to_stdout(formatSeconds(info_dict['duration'])) + if self.params.get('forceformat', False): + self.to_stdout(info_dict['format']) + if self.params.get('forcejson', False): + info_dict['_filename'] = filename + self.to_stdout(json.dumps(info_dict)) + if self.params.get('dump_single_json', False): + info_dict['_filename'] = filename + + # Do nothing else if in simulate mode + if self.params.get('simulate', False): + return + + if filename is None: + return + + try: + dn = os.path.dirname(encodeFilename(filename)) + if dn and not os.path.exists(dn): + os.makedirs(dn) + except (OSError, IOError) as err: + self.report_error('unable to create directory ' + compat_str(err)) + return + + if self.params.get('writedescription', False): + descfn = filename + '.description' + if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(descfn)): + self.to_screen('[info] Video description is already present') + elif info_dict.get('description') is None: + self.report_warning('There\'s no description to write.') + else: + try: + self.to_screen('[info] Writing video description to: ' + descfn) + with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile: + descfile.write(info_dict['description']) + except (OSError, IOError): + self.report_error('Cannot write description file ' + descfn) + return + + if self.params.get('writeannotations', False): + annofn = filename + '.annotations.xml' + if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(annofn)): + self.to_screen('[info] Video annotations are already present') + else: + try: + self.to_screen('[info] Writing video annotations to: ' + annofn) + with io.open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile: + annofile.write(info_dict['annotations']) + except (KeyError, TypeError): + self.report_warning('There are no annotations to write.') + except (OSError, IOError): + self.report_error('Cannot write annotations file: ' + annofn) + return + + subtitles_are_requested = any([self.params.get('writesubtitles', False), + self.params.get('writeautomaticsub')]) + + if subtitles_are_requested and 'subtitles' in info_dict and info_dict['subtitles']: + # subtitles download errors are already managed as troubles in relevant IE + # that way it will silently go on when used with unsupporting IE + subtitles = info_dict['subtitles'] + sub_format = self.params.get('subtitlesformat', 'srt') + for sub_lang in subtitles.keys(): + sub = subtitles[sub_lang] + if sub is None: + continue + try: + sub_filename = subtitles_filename(filename, sub_lang, sub_format) + if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(sub_filename)): + self.to_screen('[info] Video subtitle %s.%s is already_present' % (sub_lang, sub_format)) + else: + self.to_screen('[info] Writing video subtitles to: ' + sub_filename) + with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8') as subfile: + subfile.write(sub) + except (OSError, IOError): + self.report_error('Cannot write subtitles file ' + sub_filename) + return + + if self.params.get('writeinfojson', False): + infofn = os.path.splitext(filename)[0] + '.info.json' + if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(infofn)): + self.to_screen('[info] Video description metadata is already present') + else: + self.to_screen('[info] Writing video description metadata as JSON to: ' + infofn) + try: + write_json_file(info_dict, infofn) + except (OSError, IOError): + self.report_error('Cannot write metadata to JSON file ' + infofn) + return + + if self.params.get('writethumbnail', False): + if info_dict.get('thumbnail') is not None: + thumb_format = determine_ext(info_dict['thumbnail'], 'jpg') + thumb_filename = os.path.splitext(filename)[0] + '.' + thumb_format + if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(thumb_filename)): + self.to_screen('[%s] %s: Thumbnail is already present' % + (info_dict['extractor'], info_dict['id'])) + else: + self.to_screen('[%s] %s: Downloading thumbnail ...' % + (info_dict['extractor'], info_dict['id'])) + try: + uf = self.urlopen(info_dict['thumbnail']) + with open(thumb_filename, 'wb') as thumbf: + shutil.copyfileobj(uf, thumbf) + self.to_screen('[%s] %s: Writing thumbnail to: %s' % + (info_dict['extractor'], info_dict['id'], thumb_filename)) + except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: + self.report_warning('Unable to download thumbnail "%s": %s' % + (info_dict['thumbnail'], compat_str(err))) + + if not self.params.get('skip_download', False): + if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(filename)): + success = True + else: + try: + def dl(name, info): + fd = get_suitable_downloader(info)(self, self.params) + for ph in self._progress_hooks: + fd.add_progress_hook(ph) + if self.params.get('verbose'): + self.to_stdout('[debug] Invoking downloader on %r' % info.get('url')) + return fd.download(name, info) + if info_dict.get('requested_formats') is not None: + downloaded = [] + success = True + merger = FFmpegMergerPP(self, not self.params.get('keepvideo')) + if not merger._executable: + postprocessors = [] + self.report_warning('You have requested multiple ' + 'formats but ffmpeg or avconv are not installed.' + ' The formats won\'t be merged') + else: + postprocessors = [merger] + for f in info_dict['requested_formats']: + new_info = dict(info_dict) + new_info.update(f) + fname = self.prepare_filename(new_info) + fname = prepend_extension(fname, 'f%s' % f['format_id']) + downloaded.append(fname) + partial_success = dl(fname, new_info) + success = success and partial_success + info_dict['__postprocessors'] = postprocessors + info_dict['__files_to_merge'] = downloaded + else: + # Just a single file + success = dl(filename, info_dict) + except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: + self.report_error('unable to download video data: %s' % str(err)) + return + except (OSError, IOError) as err: + raise UnavailableVideoError(err) + except (ContentTooShortError, ) as err: + self.report_error('content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded)) + return + + if success: + try: + self.post_process(filename, info_dict) + except (PostProcessingError) as err: + self.report_error('postprocessing: %s' % str(err)) + return + self.record_download_archive(info_dict) + + def download(self, url_list): + """Download a given list of URLs.""" + outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL) + if (len(url_list) > 1 and + '%' not in outtmpl + and self.params.get('max_downloads') != 1): + raise SameFileError(outtmpl) + + for url in url_list: + try: + # It also downloads the videos + res = self.extract_info(url) + except UnavailableVideoError: + self.report_error('unable to download video') + except MaxDownloadsReached: + self.to_screen('[info] Maximum number of downloaded files reached.') + raise + else: + if self.params.get('dump_single_json', False): + self.to_stdout(json.dumps(res)) + + return self._download_retcode + + def download_with_info_file(self, info_filename): + with io.open(info_filename, 'r', encoding='utf-8') as f: + info = json.load(f) + try: + self.process_ie_result(info, download=True) + except DownloadError: + webpage_url = info.get('webpage_url') + if webpage_url is not None: + self.report_warning('The info failed to download, trying with "%s"' % webpage_url) + return self.download([webpage_url]) + else: + raise + return self._download_retcode + + def post_process(self, filename, ie_info): + """Run all the postprocessors on the given file.""" + info = dict(ie_info) + info['filepath'] = filename + keep_video = None + pps_chain = [] + if ie_info.get('__postprocessors') is not None: + pps_chain.extend(ie_info['__postprocessors']) + pps_chain.extend(self._pps) + for pp in pps_chain: + try: + keep_video_wish, new_info = pp.run(info) + if keep_video_wish is not None: + if keep_video_wish: + keep_video = keep_video_wish + elif keep_video is None: + # No clear decision yet, let IE decide + keep_video = keep_video_wish + except PostProcessingError as e: + self.report_error(e.msg) + if keep_video is False and not self.params.get('keepvideo', False): + try: + self.to_screen('Deleting original file %s (pass -k to keep)' % filename) + os.remove(encodeFilename(filename)) + except (IOError, OSError): + self.report_warning('Unable to remove downloaded video file') + + def _make_archive_id(self, info_dict): + # Future-proof against any change in case + # and backwards compatibility with prior versions + extractor = info_dict.get('extractor_key') + if extractor is None: + if 'id' in info_dict: + extractor = info_dict.get('ie_key') # key in a playlist + if extractor is None: + return None # Incomplete video information + return extractor.lower() + ' ' + info_dict['id'] + + def in_download_archive(self, info_dict): + fn = self.params.get('download_archive') + if fn is None: + return False + + vid_id = self._make_archive_id(info_dict) + if vid_id is None: + return False # Incomplete video information + + try: + with locked_file(fn, 'r', encoding='utf-8') as archive_file: + for line in archive_file: + if line.strip() == vid_id: + return True + except IOError as ioe: + if ioe.errno != errno.ENOENT: + raise + return False + + def record_download_archive(self, info_dict): + fn = self.params.get('download_archive') + if fn is None: + return + vid_id = self._make_archive_id(info_dict) + assert vid_id + with locked_file(fn, 'a', encoding='utf-8') as archive_file: + archive_file.write(vid_id + '\n') + + @staticmethod + def format_resolution(format, default='unknown'): + if format.get('vcodec') == 'none': + return 'audio only' + if format.get('resolution') is not None: + return format['resolution'] + if format.get('height') is not None: + if format.get('width') is not None: + res = '%sx%s' % (format['width'], format['height']) + else: + res = '%sp' % format['height'] + elif format.get('width') is not None: + res = '?x%d' % format['width'] + else: + res = default + return res + + def _format_note(self, fdict): + res = '' + if fdict.get('ext') in ['f4f', 'f4m']: + res += '(unsupported) ' + if fdict.get('format_note') is not None: + res += fdict['format_note'] + ' ' + if fdict.get('tbr') is not None: + res += '%4dk ' % fdict['tbr'] + if fdict.get('container') is not None: + if res: + res += ', ' + res += '%s container' % fdict['container'] + if (fdict.get('vcodec') is not None and + fdict.get('vcodec') != 'none'): + if res: + res += ', ' + res += fdict['vcodec'] + if fdict.get('vbr') is not None: + res += '@' + elif fdict.get('vbr') is not None and fdict.get('abr') is not None: + res += 'video@' + if fdict.get('vbr') is not None: + res += '%4dk' % fdict['vbr'] + if fdict.get('fps') is not None: + res += ', %sfps' % fdict['fps'] + if fdict.get('acodec') is not None: + if res: + res += ', ' + if fdict['acodec'] == 'none': + res += 'video only' + else: + res += '%-5s' % fdict['acodec'] + elif fdict.get('abr') is not None: + if res: + res += ', ' + res += 'audio' + if fdict.get('abr') is not None: + res += '@%3dk' % fdict['abr'] + if fdict.get('asr') is not None: + res += ' (%5dHz)' % fdict['asr'] + if fdict.get('filesize') is not None: + if res: + res += ', ' + res += format_bytes(fdict['filesize']) + elif fdict.get('filesize_approx') is not None: + if res: + res += ', ' + res += '~' + format_bytes(fdict['filesize_approx']) + return res + + def list_formats(self, info_dict): + def line(format, idlen=20): + return (('%-' + compat_str(idlen + 1) + 's%-10s%-12s%s') % ( + format['format_id'], + format['ext'], + self.format_resolution(format), + self._format_note(format), + )) + + formats = info_dict.get('formats', [info_dict]) + idlen = max(len('format code'), + max(len(f['format_id']) for f in formats)) + formats_s = [line(f, idlen) for f in formats] + if len(formats) > 1: + formats_s[0] += (' ' if self._format_note(formats[0]) else '') + '(worst)' + formats_s[-1] += (' ' if self._format_note(formats[-1]) else '') + '(best)' + + header_line = line({ + 'format_id': 'format code', 'ext': 'extension', + 'resolution': 'resolution', 'format_note': 'note'}, idlen=idlen) + self.to_screen('[info] Available formats for %s:\n%s\n%s' % + (info_dict['id'], header_line, '\n'.join(formats_s))) + + def urlopen(self, req): + """ Start an HTTP download """ + + # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not + # always respected by websites, some tend to give out URLs with non percent-encoded + # non-ASCII characters (see telemb.py, ard.py [#3412]) + # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991) + # To work around aforementioned issue we will replace request's original URL with + # percent-encoded one + req_is_string = isinstance(req, basestring if sys.version_info < (3, 0) else compat_str) + url = req if req_is_string else req.get_full_url() + url_escaped = escape_url(url) + + # Substitute URL if any change after escaping + if url != url_escaped: + if req_is_string: + req = url_escaped + else: + req = compat_urllib_request.Request( + url_escaped, data=req.data, headers=req.headers, + origin_req_host=req.origin_req_host, unverifiable=req.unverifiable) + + return self._opener.open(req, timeout=self._socket_timeout) + + def print_debug_header(self): + if not self.params.get('verbose'): + return + + if type('') is not compat_str: + # Python 2.6 on SLES11 SP1 (https://github.com/rg3/youtube-dl/issues/3326) + self.report_warning( + 'Your Python is broken! Update to a newer and supported version') + + stdout_encoding = getattr( + sys.stdout, 'encoding', 'missing (%s)' % type(sys.stdout).__name__) + encoding_str = ( + '[debug] Encodings: locale %s, fs %s, out %s, pref %s\n' % ( + locale.getpreferredencoding(), + sys.getfilesystemencoding(), + stdout_encoding, + self.get_encoding())) + write_string(encoding_str, encoding=None) + + self._write_string('[debug] youtube-dl version ' + __version__ + '\n') + try: + sp = subprocess.Popen( + ['git', 'rev-parse', '--short', 'HEAD'], + stdout=subprocess.PIPE, stderr=subprocess.PIPE, + cwd=os.path.dirname(os.path.abspath(__file__))) + out, err = sp.communicate() + out = out.decode().strip() + if re.match('[0-9a-f]+', out): + self._write_string('[debug] Git HEAD: ' + out + '\n') + except: + try: + sys.exc_clear() + except: + pass + self._write_string('[debug] Python version %s - %s\n' % ( + platform.python_version(), platform_name())) + + exe_versions = FFmpegPostProcessor.get_versions() + exe_versions['rtmpdump'] = rtmpdump_version() + exe_str = ', '.join( + '%s %s' % (exe, v) + for exe, v in sorted(exe_versions.items()) + if v + ) + if not exe_str: + exe_str = 'none' + self._write_string('[debug] exe versions: %s\n' % exe_str) + + proxy_map = {} + for handler in self._opener.handlers: + if hasattr(handler, 'proxies'): + proxy_map.update(handler.proxies) + self._write_string('[debug] Proxy map: ' + compat_str(proxy_map) + '\n') + + def _setup_opener(self): + timeout_val = self.params.get('socket_timeout') + self._socket_timeout = 600 if timeout_val is None else float(timeout_val) + + opts_cookiefile = self.params.get('cookiefile') + opts_proxy = self.params.get('proxy') + + if opts_cookiefile is None: + self.cookiejar = compat_cookiejar.CookieJar() + else: + self.cookiejar = compat_cookiejar.MozillaCookieJar( + opts_cookiefile) + if os.access(opts_cookiefile, os.R_OK): + self.cookiejar.load() + + cookie_processor = compat_urllib_request.HTTPCookieProcessor( + self.cookiejar) + if opts_proxy is not None: + if opts_proxy == '': + proxies = {} + else: + proxies = {'http': opts_proxy, 'https': opts_proxy} + else: + proxies = compat_urllib_request.getproxies() + # Set HTTPS proxy to HTTP one if given (https://github.com/rg3/youtube-dl/issues/805) + if 'http' in proxies and 'https' not in proxies: + proxies['https'] = proxies['http'] + proxy_handler = compat_urllib_request.ProxyHandler(proxies) + + debuglevel = 1 if self.params.get('debug_printtraffic') else 0 + https_handler = make_HTTPS_handler( + self.params.get('nocheckcertificate', False), debuglevel=debuglevel) + ydlh = YoutubeDLHandler(debuglevel=debuglevel) + opener = compat_urllib_request.build_opener( + https_handler, proxy_handler, cookie_processor, ydlh) + # Delete the default user-agent header, which would otherwise apply in + # cases where our custom HTTP handler doesn't come into play + # (See https://github.com/rg3/youtube-dl/issues/1309 for details) + opener.addheaders = [] + self._opener = opener + + def encode(self, s): + if isinstance(s, bytes): + return s # Already encoded + + try: + return s.encode(self.get_encoding()) + except UnicodeEncodeError as err: + err.reason = err.reason + '. Check your system encoding configuration or use the --encoding option.' + raise + + def get_encoding(self): + encoding = self.params.get('encoding') + if encoding is None: + encoding = preferredencoding() + return encoding diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/__init__.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/__init__.py new file mode 100644 index 0000000000..e793203232 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/__init__.py @@ -0,0 +1,367 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +from __future__ import unicode_literals + +__license__ = 'Public Domain' + +import codecs +import io +import os +import random +import sys + + +from .options import ( + parseOpts, +) +from .compat import ( + compat_expanduser, + compat_getpass, + compat_print, + workaround_optparse_bug9161, +) +from .utils import ( + DateRange, + DEFAULT_OUTTMPL, + decodeOption, + DownloadError, + MaxDownloadsReached, + preferredencoding, + read_batch_urls, + SameFileError, + setproctitle, + std_headers, + write_string, +) +from .update import update_self +from .downloader import ( + FileDownloader, +) +from .extractor import gen_extractors +from .YoutubeDL import YoutubeDL + + +def _real_main(argv=None): + # Compatibility fixes for Windows + if sys.platform == 'win32': + # https://github.com/rg3/youtube-dl/issues/820 + codecs.register(lambda name: codecs.lookup('utf-8') if name == 'cp65001' else None) + + workaround_optparse_bug9161() + + setproctitle('youtube-dl') + + parser, opts, args = parseOpts(argv) + + # Set user agent + if opts.user_agent is not None: + std_headers['User-Agent'] = opts.user_agent + + # Set referer + if opts.referer is not None: + std_headers['Referer'] = opts.referer + + # Custom HTTP headers + if opts.headers is not None: + for h in opts.headers: + if h.find(':', 1) < 0: + parser.error('wrong header formatting, it should be key:value, not "%s"' % h) + key, value = h.split(':', 2) + if opts.verbose: + write_string('[debug] Adding header from command line option %s:%s\n' % (key, value)) + std_headers[key] = value + + # Dump user agent + if opts.dump_user_agent: + compat_print(std_headers['User-Agent']) + sys.exit(0) + + # Batch file verification + batch_urls = [] + if opts.batchfile is not None: + try: + if opts.batchfile == '-': + batchfd = sys.stdin + else: + batchfd = io.open(opts.batchfile, 'r', encoding='utf-8', errors='ignore') + batch_urls = read_batch_urls(batchfd) + if opts.verbose: + write_string('[debug] Batch file urls: ' + repr(batch_urls) + '\n') + except IOError: + sys.exit('ERROR: batch file could not be read') + all_urls = batch_urls + args + all_urls = [url.strip() for url in all_urls] + _enc = preferredencoding() + all_urls = [url.decode(_enc, 'ignore') if isinstance(url, bytes) else url for url in all_urls] + + extractors = gen_extractors() + + if opts.list_extractors: + for ie in sorted(extractors, key=lambda ie: ie.IE_NAME.lower()): + compat_print(ie.IE_NAME + (' (CURRENTLY BROKEN)' if not ie._WORKING else '')) + matchedUrls = [url for url in all_urls if ie.suitable(url)] + for mu in matchedUrls: + compat_print(' ' + mu) + sys.exit(0) + if opts.list_extractor_descriptions: + for ie in sorted(extractors, key=lambda ie: ie.IE_NAME.lower()): + if not ie._WORKING: + continue + desc = getattr(ie, 'IE_DESC', ie.IE_NAME) + if desc is False: + continue + if hasattr(ie, 'SEARCH_KEY'): + _SEARCHES = ('cute kittens', 'slithering pythons', 'falling cat', 'angry poodle', 'purple fish', 'running tortoise', 'sleeping bunny') + _COUNTS = ('', '5', '10', 'all') + desc += ' (Example: "%s%s:%s" )' % (ie.SEARCH_KEY, random.choice(_COUNTS), random.choice(_SEARCHES)) + compat_print(desc) + sys.exit(0) + + # Conflicting, missing and erroneous options + if opts.usenetrc and (opts.username is not None or opts.password is not None): + parser.error('using .netrc conflicts with giving username/password') + if opts.password is not None and opts.username is None: + parser.error('account username missing\n') + if opts.outtmpl is not None and (opts.usetitle or opts.autonumber or opts.useid): + parser.error('using output template conflicts with using title, video ID or auto number') + if opts.usetitle and opts.useid: + parser.error('using title conflicts with using video ID') + if opts.username is not None and opts.password is None: + opts.password = compat_getpass('Type account password and press [Return]: ') + if opts.ratelimit is not None: + numeric_limit = FileDownloader.parse_bytes(opts.ratelimit) + if numeric_limit is None: + parser.error('invalid rate limit specified') + opts.ratelimit = numeric_limit + if opts.min_filesize is not None: + numeric_limit = FileDownloader.parse_bytes(opts.min_filesize) + if numeric_limit is None: + parser.error('invalid min_filesize specified') + opts.min_filesize = numeric_limit + if opts.max_filesize is not None: + numeric_limit = FileDownloader.parse_bytes(opts.max_filesize) + if numeric_limit is None: + parser.error('invalid max_filesize specified') + opts.max_filesize = numeric_limit + if opts.retries is not None: + try: + opts.retries = int(opts.retries) + except (TypeError, ValueError): + parser.error('invalid retry count specified') + if opts.buffersize is not None: + numeric_buffersize = FileDownloader.parse_bytes(opts.buffersize) + if numeric_buffersize is None: + parser.error('invalid buffer size specified') + opts.buffersize = numeric_buffersize + if opts.playliststart <= 0: + raise ValueError('Playlist start must be positive') + if opts.playlistend not in (-1, None) and opts.playlistend < opts.playliststart: + raise ValueError('Playlist end must be greater than playlist start') + if opts.extractaudio: + if opts.audioformat not in ['best', 'aac', 'mp3', 'm4a', 'opus', 'vorbis', 'wav']: + parser.error('invalid audio format specified') + if opts.audioquality: + opts.audioquality = opts.audioquality.strip('k').strip('K') + if not opts.audioquality.isdigit(): + parser.error('invalid audio quality specified') + if opts.recodevideo is not None: + if opts.recodevideo not in ['mp4', 'flv', 'webm', 'ogg', 'mkv']: + parser.error('invalid video recode format specified') + if opts.date is not None: + date = DateRange.day(opts.date) + else: + date = DateRange(opts.dateafter, opts.datebefore) + + # Do not download videos when there are audio-only formats + if opts.extractaudio and not opts.keepvideo and opts.format is None: + opts.format = 'bestaudio/best' + + # --all-sub automatically sets --write-sub if --write-auto-sub is not given + # this was the old behaviour if only --all-sub was given. + if opts.allsubtitles and not opts.writeautomaticsub: + opts.writesubtitles = True + + if sys.version_info < (3,): + # In Python 2, sys.argv is a bytestring (also note http://bugs.python.org/issue2128 for Windows systems) + if opts.outtmpl is not None: + opts.outtmpl = opts.outtmpl.decode(preferredencoding()) + outtmpl = ((opts.outtmpl is not None and opts.outtmpl) + or (opts.format == '-1' and opts.usetitle and '%(title)s-%(id)s-%(format)s.%(ext)s') + or (opts.format == '-1' and '%(id)s-%(format)s.%(ext)s') + or (opts.usetitle and opts.autonumber and '%(autonumber)s-%(title)s-%(id)s.%(ext)s') + or (opts.usetitle and '%(title)s-%(id)s.%(ext)s') + or (opts.useid and '%(id)s.%(ext)s') + or (opts.autonumber and '%(autonumber)s-%(id)s.%(ext)s') + or DEFAULT_OUTTMPL) + if not os.path.splitext(outtmpl)[1] and opts.extractaudio: + parser.error('Cannot download a video and extract audio into the same' + ' file! Use "{0}.%(ext)s" instead of "{0}" as the output' + ' template'.format(outtmpl)) + + any_printing = opts.geturl or opts.gettitle or opts.getid or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat or opts.getduration or opts.dumpjson or opts.dump_single_json + download_archive_fn = compat_expanduser(opts.download_archive) if opts.download_archive is not None else opts.download_archive + + # PostProcessors + postprocessors = [] + # Add the metadata pp first, the other pps will copy it + if opts.addmetadata: + postprocessors.append({'key': 'FFmpegMetadata'}) + if opts.extractaudio: + postprocessors.append({ + 'key': 'FFmpegExtractAudio', + 'preferredcodec': opts.audioformat, + 'preferredquality': opts.audioquality, + 'nopostoverwrites': opts.nopostoverwrites, + }) + if opts.recodevideo: + postprocessors.append({ + 'key': 'FFmpegVideoConvertor', + 'preferedformat': opts.recodevideo, + }) + if opts.embedsubtitles: + postprocessors.append({ + 'key': 'FFmpegEmbedSubtitle', + 'subtitlesformat': opts.subtitlesformat, + }) + if opts.xattrs: + postprocessors.append({'key': 'XAttrMetadata'}) + if opts.embedthumbnail: + if not opts.addmetadata: + postprocessors.append({'key': 'FFmpegAudioFix'}) + postprocessors.append({'key': 'AtomicParsley'}) + # Please keep ExecAfterDownload towards the bottom as it allows the user to modify the final file in any way. + # So if the user is able to remove the file before your postprocessor runs it might cause a few problems. + if opts.exec_cmd: + postprocessors.append({ + 'key': 'ExecAfterDownload', + 'verboseOutput': opts.verbose, + 'exec_cmd': opts.exec_cmd, + }) + + ydl_opts = { + 'usenetrc': opts.usenetrc, + 'username': opts.username, + 'password': opts.password, + 'twofactor': opts.twofactor, + 'videopassword': opts.videopassword, + 'quiet': (opts.quiet or any_printing), + 'no_warnings': opts.no_warnings, + 'forceurl': opts.geturl, + 'forcetitle': opts.gettitle, + 'forceid': opts.getid, + 'forcethumbnail': opts.getthumbnail, + 'forcedescription': opts.getdescription, + 'forceduration': opts.getduration, + 'forcefilename': opts.getfilename, + 'forceformat': opts.getformat, + 'forcejson': opts.dumpjson, + 'dump_single_json': opts.dump_single_json, + 'simulate': opts.simulate or any_printing, + 'skip_download': opts.skip_download, + 'format': opts.format, + 'format_limit': opts.format_limit, + 'listformats': opts.listformats, + 'outtmpl': outtmpl, + 'autonumber_size': opts.autonumber_size, + 'restrictfilenames': opts.restrictfilenames, + 'ignoreerrors': opts.ignoreerrors, + 'ratelimit': opts.ratelimit, + 'nooverwrites': opts.nooverwrites, + 'retries': opts.retries, + 'buffersize': opts.buffersize, + 'noresizebuffer': opts.noresizebuffer, + 'continuedl': opts.continue_dl, + 'noprogress': opts.noprogress, + 'progress_with_newline': opts.progress_with_newline, + 'playliststart': opts.playliststart, + 'playlistend': opts.playlistend, + 'playlistreverse': opts.playlist_reverse, + 'noplaylist': opts.noplaylist, + 'logtostderr': opts.outtmpl == '-', + 'consoletitle': opts.consoletitle, + 'nopart': opts.nopart, + 'updatetime': opts.updatetime, + 'writedescription': opts.writedescription, + 'writeannotations': opts.writeannotations, + 'writeinfojson': opts.writeinfojson, + 'writethumbnail': opts.writethumbnail, + 'writesubtitles': opts.writesubtitles, + 'writeautomaticsub': opts.writeautomaticsub, + 'allsubtitles': opts.allsubtitles, + 'listsubtitles': opts.listsubtitles, + 'subtitlesformat': opts.subtitlesformat, + 'subtitleslangs': opts.subtitleslangs, + 'matchtitle': decodeOption(opts.matchtitle), + 'rejecttitle': decodeOption(opts.rejecttitle), + 'max_downloads': opts.max_downloads, + 'prefer_free_formats': opts.prefer_free_formats, + 'verbose': opts.verbose, + 'dump_intermediate_pages': opts.dump_intermediate_pages, + 'write_pages': opts.write_pages, + 'test': opts.test, + 'keepvideo': opts.keepvideo, + 'min_filesize': opts.min_filesize, + 'max_filesize': opts.max_filesize, + 'min_views': opts.min_views, + 'max_views': opts.max_views, + 'daterange': date, + 'cachedir': opts.cachedir, + 'youtube_print_sig_code': opts.youtube_print_sig_code, + 'age_limit': opts.age_limit, + 'download_archive': download_archive_fn, + 'cookiefile': opts.cookiefile, + 'nocheckcertificate': opts.no_check_certificate, + 'prefer_insecure': opts.prefer_insecure, + 'proxy': opts.proxy, + 'socket_timeout': opts.socket_timeout, + 'bidi_workaround': opts.bidi_workaround, + 'debug_printtraffic': opts.debug_printtraffic, + 'prefer_ffmpeg': opts.prefer_ffmpeg, + 'include_ads': opts.include_ads, + 'default_search': opts.default_search, + 'youtube_include_dash_manifest': opts.youtube_include_dash_manifest, + 'encoding': opts.encoding, + 'exec_cmd': opts.exec_cmd, + 'extract_flat': opts.extract_flat, + 'postprocessors': postprocessors, + } + + with YoutubeDL(ydl_opts) as ydl: + # Update version + if opts.update_self: + update_self(ydl.to_screen, opts.verbose) + + # Remove cache dir + if opts.rm_cachedir: + ydl.cache.remove() + + # Maybe do nothing + if (len(all_urls) < 1) and (opts.load_info_filename is None): + if opts.update_self or opts.rm_cachedir: + sys.exit() + + ydl.warn_if_short_id(sys.argv[1:] if argv is None else argv) + parser.error('you must provide at least one URL') + + try: + if opts.load_info_filename is not None: + retcode = ydl.download_with_info_file(opts.load_info_filename) + else: + retcode = ydl.download(all_urls) + except MaxDownloadsReached: + ydl.to_screen('--max-download limit reached, aborting.') + retcode = 101 + + sys.exit(retcode) + + +def main(argv=None): + try: + _real_main(argv) + except DownloadError: + sys.exit(1) + except SameFileError: + sys.exit('ERROR: fixed output name but more than one file to download') + except KeyboardInterrupt: + sys.exit('\nERROR: Interrupted by user') diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/__main__.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/__main__.py new file mode 100644 index 0000000000..65a0f891c5 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/__main__.py @@ -0,0 +1,19 @@ +#!/usr/bin/env python +from __future__ import unicode_literals + +# Execute with +# $ python youtube_dl/__main__.py (2.6+) +# $ python -m youtube_dl (2.7+) + +import sys + +if __package__ is None and not hasattr(sys, "frozen"): + # direct call of __main__.py + import os.path + path = os.path.realpath(os.path.abspath(__file__)) + sys.path.append(os.path.dirname(os.path.dirname(path))) + +import youtube_dl + +if __name__ == '__main__': + youtube_dl.main() diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/aes.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/aes.py new file mode 100644 index 0000000000..5efd0f836b --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/aes.py @@ -0,0 +1,331 @@ +from __future__ import unicode_literals + +__all__ = ['aes_encrypt', 'key_expansion', 'aes_ctr_decrypt', 'aes_cbc_decrypt', 'aes_decrypt_text'] + +import base64 +from math import ceil + +from .utils import bytes_to_intlist, intlist_to_bytes + +BLOCK_SIZE_BYTES = 16 + + +def aes_ctr_decrypt(data, key, counter): + """ + Decrypt with aes in counter mode + + @param {int[]} data cipher + @param {int[]} key 16/24/32-Byte cipher key + @param {instance} counter Instance whose next_value function (@returns {int[]} 16-Byte block) + returns the next counter block + @returns {int[]} decrypted data + """ + expanded_key = key_expansion(key) + block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES)) + + decrypted_data = [] + for i in range(block_count): + counter_block = counter.next_value() + block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES] + block += [0] * (BLOCK_SIZE_BYTES - len(block)) + + cipher_counter_block = aes_encrypt(counter_block, expanded_key) + decrypted_data += xor(block, cipher_counter_block) + decrypted_data = decrypted_data[:len(data)] + + return decrypted_data + + +def aes_cbc_decrypt(data, key, iv): + """ + Decrypt with aes in CBC mode + + @param {int[]} data cipher + @param {int[]} key 16/24/32-Byte cipher key + @param {int[]} iv 16-Byte IV + @returns {int[]} decrypted data + """ + expanded_key = key_expansion(key) + block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES)) + + decrypted_data = [] + previous_cipher_block = iv + for i in range(block_count): + block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES] + block += [0] * (BLOCK_SIZE_BYTES - len(block)) + + decrypted_block = aes_decrypt(block, expanded_key) + decrypted_data += xor(decrypted_block, previous_cipher_block) + previous_cipher_block = block + decrypted_data = decrypted_data[:len(data)] + + return decrypted_data + + +def key_expansion(data): + """ + Generate key schedule + + @param {int[]} data 16/24/32-Byte cipher key + @returns {int[]} 176/208/240-Byte expanded key + """ + data = data[:] # copy + rcon_iteration = 1 + key_size_bytes = len(data) + expanded_key_size_bytes = (key_size_bytes // 4 + 7) * BLOCK_SIZE_BYTES + + while len(data) < expanded_key_size_bytes: + temp = data[-4:] + temp = key_schedule_core(temp, rcon_iteration) + rcon_iteration += 1 + data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes]) + + for _ in range(3): + temp = data[-4:] + data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes]) + + if key_size_bytes == 32: + temp = data[-4:] + temp = sub_bytes(temp) + data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes]) + + for _ in range(3 if key_size_bytes == 32 else 2 if key_size_bytes == 24 else 0): + temp = data[-4:] + data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes]) + data = data[:expanded_key_size_bytes] + + return data + + +def aes_encrypt(data, expanded_key): + """ + Encrypt one block with aes + + @param {int[]} data 16-Byte state + @param {int[]} expanded_key 176/208/240-Byte expanded key + @returns {int[]} 16-Byte cipher + """ + rounds = len(expanded_key) // BLOCK_SIZE_BYTES - 1 + + data = xor(data, expanded_key[:BLOCK_SIZE_BYTES]) + for i in range(1, rounds + 1): + data = sub_bytes(data) + data = shift_rows(data) + if i != rounds: + data = mix_columns(data) + data = xor(data, expanded_key[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES]) + + return data + + +def aes_decrypt(data, expanded_key): + """ + Decrypt one block with aes + + @param {int[]} data 16-Byte cipher + @param {int[]} expanded_key 176/208/240-Byte expanded key + @returns {int[]} 16-Byte state + """ + rounds = len(expanded_key) // BLOCK_SIZE_BYTES - 1 + + for i in range(rounds, 0, -1): + data = xor(data, expanded_key[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES]) + if i != rounds: + data = mix_columns_inv(data) + data = shift_rows_inv(data) + data = sub_bytes_inv(data) + data = xor(data, expanded_key[:BLOCK_SIZE_BYTES]) + + return data + + +def aes_decrypt_text(data, password, key_size_bytes): + """ + Decrypt text + - The first 8 Bytes of decoded 'data' are the 8 high Bytes of the counter + - The cipher key is retrieved by encrypting the first 16 Byte of 'password' + with the first 'key_size_bytes' Bytes from 'password' (if necessary filled with 0's) + - Mode of operation is 'counter' + + @param {str} data Base64 encoded string + @param {str,unicode} password Password (will be encoded with utf-8) + @param {int} key_size_bytes Possible values: 16 for 128-Bit, 24 for 192-Bit or 32 for 256-Bit + @returns {str} Decrypted data + """ + NONCE_LENGTH_BYTES = 8 + + data = bytes_to_intlist(base64.b64decode(data)) + password = bytes_to_intlist(password.encode('utf-8')) + + key = password[:key_size_bytes] + [0] * (key_size_bytes - len(password)) + key = aes_encrypt(key[:BLOCK_SIZE_BYTES], key_expansion(key)) * (key_size_bytes // BLOCK_SIZE_BYTES) + + nonce = data[:NONCE_LENGTH_BYTES] + cipher = data[NONCE_LENGTH_BYTES:] + + class Counter: + __value = nonce + [0] * (BLOCK_SIZE_BYTES - NONCE_LENGTH_BYTES) + + def next_value(self): + temp = self.__value + self.__value = inc(self.__value) + return temp + + decrypted_data = aes_ctr_decrypt(cipher, key, Counter()) + plaintext = intlist_to_bytes(decrypted_data) + + return plaintext + +RCON = (0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36) +SBOX = (0x63, 0x7C, 0x77, 0x7B, 0xF2, 0x6B, 0x6F, 0xC5, 0x30, 0x01, 0x67, 0x2B, 0xFE, 0xD7, 0xAB, 0x76, + 0xCA, 0x82, 0xC9, 0x7D, 0xFA, 0x59, 0x47, 0xF0, 0xAD, 0xD4, 0xA2, 0xAF, 0x9C, 0xA4, 0x72, 0xC0, + 0xB7, 0xFD, 0x93, 0x26, 0x36, 0x3F, 0xF7, 0xCC, 0x34, 0xA5, 0xE5, 0xF1, 0x71, 0xD8, 0x31, 0x15, + 0x04, 0xC7, 0x23, 0xC3, 0x18, 0x96, 0x05, 0x9A, 0x07, 0x12, 0x80, 0xE2, 0xEB, 0x27, 0xB2, 0x75, + 0x09, 0x83, 0x2C, 0x1A, 0x1B, 0x6E, 0x5A, 0xA0, 0x52, 0x3B, 0xD6, 0xB3, 0x29, 0xE3, 0x2F, 0x84, + 0x53, 0xD1, 0x00, 0xED, 0x20, 0xFC, 0xB1, 0x5B, 0x6A, 0xCB, 0xBE, 0x39, 0x4A, 0x4C, 0x58, 0xCF, + 0xD0, 0xEF, 0xAA, 0xFB, 0x43, 0x4D, 0x33, 0x85, 0x45, 0xF9, 0x02, 0x7F, 0x50, 0x3C, 0x9F, 0xA8, + 0x51, 0xA3, 0x40, 0x8F, 0x92, 0x9D, 0x38, 0xF5, 0xBC, 0xB6, 0xDA, 0x21, 0x10, 0xFF, 0xF3, 0xD2, + 0xCD, 0x0C, 0x13, 0xEC, 0x5F, 0x97, 0x44, 0x17, 0xC4, 0xA7, 0x7E, 0x3D, 0x64, 0x5D, 0x19, 0x73, + 0x60, 0x81, 0x4F, 0xDC, 0x22, 0x2A, 0x90, 0x88, 0x46, 0xEE, 0xB8, 0x14, 0xDE, 0x5E, 0x0B, 0xDB, + 0xE0, 0x32, 0x3A, 0x0A, 0x49, 0x06, 0x24, 0x5C, 0xC2, 0xD3, 0xAC, 0x62, 0x91, 0x95, 0xE4, 0x79, + 0xE7, 0xC8, 0x37, 0x6D, 0x8D, 0xD5, 0x4E, 0xA9, 0x6C, 0x56, 0xF4, 0xEA, 0x65, 0x7A, 0xAE, 0x08, + 0xBA, 0x78, 0x25, 0x2E, 0x1C, 0xA6, 0xB4, 0xC6, 0xE8, 0xDD, 0x74, 0x1F, 0x4B, 0xBD, 0x8B, 0x8A, + 0x70, 0x3E, 0xB5, 0x66, 0x48, 0x03, 0xF6, 0x0E, 0x61, 0x35, 0x57, 0xB9, 0x86, 0xC1, 0x1D, 0x9E, + 0xE1, 0xF8, 0x98, 0x11, 0x69, 0xD9, 0x8E, 0x94, 0x9B, 0x1E, 0x87, 0xE9, 0xCE, 0x55, 0x28, 0xDF, + 0x8C, 0xA1, 0x89, 0x0D, 0xBF, 0xE6, 0x42, 0x68, 0x41, 0x99, 0x2D, 0x0F, 0xB0, 0x54, 0xBB, 0x16) +SBOX_INV = (0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38, 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb, + 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87, 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb, + 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d, 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e, + 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2, 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25, + 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92, + 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda, 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84, + 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a, 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06, + 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02, 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b, + 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea, 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73, + 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85, 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e, + 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89, 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b, + 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20, 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4, + 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31, 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f, + 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d, 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef, + 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0, 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61, + 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d) +MIX_COLUMN_MATRIX = ((0x2, 0x3, 0x1, 0x1), + (0x1, 0x2, 0x3, 0x1), + (0x1, 0x1, 0x2, 0x3), + (0x3, 0x1, 0x1, 0x2)) +MIX_COLUMN_MATRIX_INV = ((0xE, 0xB, 0xD, 0x9), + (0x9, 0xE, 0xB, 0xD), + (0xD, 0x9, 0xE, 0xB), + (0xB, 0xD, 0x9, 0xE)) +RIJNDAEL_EXP_TABLE = (0x01, 0x03, 0x05, 0x0F, 0x11, 0x33, 0x55, 0xFF, 0x1A, 0x2E, 0x72, 0x96, 0xA1, 0xF8, 0x13, 0x35, + 0x5F, 0xE1, 0x38, 0x48, 0xD8, 0x73, 0x95, 0xA4, 0xF7, 0x02, 0x06, 0x0A, 0x1E, 0x22, 0x66, 0xAA, + 0xE5, 0x34, 0x5C, 0xE4, 0x37, 0x59, 0xEB, 0x26, 0x6A, 0xBE, 0xD9, 0x70, 0x90, 0xAB, 0xE6, 0x31, + 0x53, 0xF5, 0x04, 0x0C, 0x14, 0x3C, 0x44, 0xCC, 0x4F, 0xD1, 0x68, 0xB8, 0xD3, 0x6E, 0xB2, 0xCD, + 0x4C, 0xD4, 0x67, 0xA9, 0xE0, 0x3B, 0x4D, 0xD7, 0x62, 0xA6, 0xF1, 0x08, 0x18, 0x28, 0x78, 0x88, + 0x83, 0x9E, 0xB9, 0xD0, 0x6B, 0xBD, 0xDC, 0x7F, 0x81, 0x98, 0xB3, 0xCE, 0x49, 0xDB, 0x76, 0x9A, + 0xB5, 0xC4, 0x57, 0xF9, 0x10, 0x30, 0x50, 0xF0, 0x0B, 0x1D, 0x27, 0x69, 0xBB, 0xD6, 0x61, 0xA3, + 0xFE, 0x19, 0x2B, 0x7D, 0x87, 0x92, 0xAD, 0xEC, 0x2F, 0x71, 0x93, 0xAE, 0xE9, 0x20, 0x60, 0xA0, + 0xFB, 0x16, 0x3A, 0x4E, 0xD2, 0x6D, 0xB7, 0xC2, 0x5D, 0xE7, 0x32, 0x56, 0xFA, 0x15, 0x3F, 0x41, + 0xC3, 0x5E, 0xE2, 0x3D, 0x47, 0xC9, 0x40, 0xC0, 0x5B, 0xED, 0x2C, 0x74, 0x9C, 0xBF, 0xDA, 0x75, + 0x9F, 0xBA, 0xD5, 0x64, 0xAC, 0xEF, 0x2A, 0x7E, 0x82, 0x9D, 0xBC, 0xDF, 0x7A, 0x8E, 0x89, 0x80, + 0x9B, 0xB6, 0xC1, 0x58, 0xE8, 0x23, 0x65, 0xAF, 0xEA, 0x25, 0x6F, 0xB1, 0xC8, 0x43, 0xC5, 0x54, + 0xFC, 0x1F, 0x21, 0x63, 0xA5, 0xF4, 0x07, 0x09, 0x1B, 0x2D, 0x77, 0x99, 0xB0, 0xCB, 0x46, 0xCA, + 0x45, 0xCF, 0x4A, 0xDE, 0x79, 0x8B, 0x86, 0x91, 0xA8, 0xE3, 0x3E, 0x42, 0xC6, 0x51, 0xF3, 0x0E, + 0x12, 0x36, 0x5A, 0xEE, 0x29, 0x7B, 0x8D, 0x8C, 0x8F, 0x8A, 0x85, 0x94, 0xA7, 0xF2, 0x0D, 0x17, + 0x39, 0x4B, 0xDD, 0x7C, 0x84, 0x97, 0xA2, 0xFD, 0x1C, 0x24, 0x6C, 0xB4, 0xC7, 0x52, 0xF6, 0x01) +RIJNDAEL_LOG_TABLE = (0x00, 0x00, 0x19, 0x01, 0x32, 0x02, 0x1a, 0xc6, 0x4b, 0xc7, 0x1b, 0x68, 0x33, 0xee, 0xdf, 0x03, + 0x64, 0x04, 0xe0, 0x0e, 0x34, 0x8d, 0x81, 0xef, 0x4c, 0x71, 0x08, 0xc8, 0xf8, 0x69, 0x1c, 0xc1, + 0x7d, 0xc2, 0x1d, 0xb5, 0xf9, 0xb9, 0x27, 0x6a, 0x4d, 0xe4, 0xa6, 0x72, 0x9a, 0xc9, 0x09, 0x78, + 0x65, 0x2f, 0x8a, 0x05, 0x21, 0x0f, 0xe1, 0x24, 0x12, 0xf0, 0x82, 0x45, 0x35, 0x93, 0xda, 0x8e, + 0x96, 0x8f, 0xdb, 0xbd, 0x36, 0xd0, 0xce, 0x94, 0x13, 0x5c, 0xd2, 0xf1, 0x40, 0x46, 0x83, 0x38, + 0x66, 0xdd, 0xfd, 0x30, 0xbf, 0x06, 0x8b, 0x62, 0xb3, 0x25, 0xe2, 0x98, 0x22, 0x88, 0x91, 0x10, + 0x7e, 0x6e, 0x48, 0xc3, 0xa3, 0xb6, 0x1e, 0x42, 0x3a, 0x6b, 0x28, 0x54, 0xfa, 0x85, 0x3d, 0xba, + 0x2b, 0x79, 0x0a, 0x15, 0x9b, 0x9f, 0x5e, 0xca, 0x4e, 0xd4, 0xac, 0xe5, 0xf3, 0x73, 0xa7, 0x57, + 0xaf, 0x58, 0xa8, 0x50, 0xf4, 0xea, 0xd6, 0x74, 0x4f, 0xae, 0xe9, 0xd5, 0xe7, 0xe6, 0xad, 0xe8, + 0x2c, 0xd7, 0x75, 0x7a, 0xeb, 0x16, 0x0b, 0xf5, 0x59, 0xcb, 0x5f, 0xb0, 0x9c, 0xa9, 0x51, 0xa0, + 0x7f, 0x0c, 0xf6, 0x6f, 0x17, 0xc4, 0x49, 0xec, 0xd8, 0x43, 0x1f, 0x2d, 0xa4, 0x76, 0x7b, 0xb7, + 0xcc, 0xbb, 0x3e, 0x5a, 0xfb, 0x60, 0xb1, 0x86, 0x3b, 0x52, 0xa1, 0x6c, 0xaa, 0x55, 0x29, 0x9d, + 0x97, 0xb2, 0x87, 0x90, 0x61, 0xbe, 0xdc, 0xfc, 0xbc, 0x95, 0xcf, 0xcd, 0x37, 0x3f, 0x5b, 0xd1, + 0x53, 0x39, 0x84, 0x3c, 0x41, 0xa2, 0x6d, 0x47, 0x14, 0x2a, 0x9e, 0x5d, 0x56, 0xf2, 0xd3, 0xab, + 0x44, 0x11, 0x92, 0xd9, 0x23, 0x20, 0x2e, 0x89, 0xb4, 0x7c, 0xb8, 0x26, 0x77, 0x99, 0xe3, 0xa5, + 0x67, 0x4a, 0xed, 0xde, 0xc5, 0x31, 0xfe, 0x18, 0x0d, 0x63, 0x8c, 0x80, 0xc0, 0xf7, 0x70, 0x07) + + +def sub_bytes(data): + return [SBOX[x] for x in data] + + +def sub_bytes_inv(data): + return [SBOX_INV[x] for x in data] + + +def rotate(data): + return data[1:] + [data[0]] + + +def key_schedule_core(data, rcon_iteration): + data = rotate(data) + data = sub_bytes(data) + data[0] = data[0] ^ RCON[rcon_iteration] + + return data + + +def xor(data1, data2): + return [x ^ y for x, y in zip(data1, data2)] + + +def rijndael_mul(a, b): + if(a == 0 or b == 0): + return 0 + return RIJNDAEL_EXP_TABLE[(RIJNDAEL_LOG_TABLE[a] + RIJNDAEL_LOG_TABLE[b]) % 0xFF] + + +def mix_column(data, matrix): + data_mixed = [] + for row in range(4): + mixed = 0 + for column in range(4): + # xor is (+) and (-) + mixed ^= rijndael_mul(data[column], matrix[row][column]) + data_mixed.append(mixed) + return data_mixed + + +def mix_columns(data, matrix=MIX_COLUMN_MATRIX): + data_mixed = [] + for i in range(4): + column = data[i * 4: (i + 1) * 4] + data_mixed += mix_column(column, matrix) + return data_mixed + + +def mix_columns_inv(data): + return mix_columns(data, MIX_COLUMN_MATRIX_INV) + + +def shift_rows(data): + data_shifted = [] + for column in range(4): + for row in range(4): + data_shifted.append(data[((column + row) & 0b11) * 4 + row]) + return data_shifted + + +def shift_rows_inv(data): + data_shifted = [] + for column in range(4): + for row in range(4): + data_shifted.append(data[((column - row) & 0b11) * 4 + row]) + return data_shifted + + +def inc(data): + data = data[:] # copy + for i in range(len(data) - 1, -1, -1): + if data[i] == 255: + data[i] = 0 + else: + data[i] = data[i] + 1 + break + return data diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/cache.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/cache.py new file mode 100644 index 0000000000..5fe839eb12 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/cache.py @@ -0,0 +1,93 @@ +from __future__ import unicode_literals + +import errno +import io +import json +import os +import re +import shutil +import traceback + +from .compat import compat_expanduser, compat_getenv +from .utils import write_json_file + + +class Cache(object): + def __init__(self, ydl): + self._ydl = ydl + + def _get_root_dir(self): + res = self._ydl.params.get('cachedir') + if res is None: + cache_root = compat_getenv('XDG_CACHE_HOME', '~/.cache') + res = os.path.join(cache_root, 'youtube-dl') + return compat_expanduser(res) + + def _get_cache_fn(self, section, key, dtype): + assert re.match(r'^[a-zA-Z0-9_.-]+$', section), \ + 'invalid section %r' % section + assert re.match(r'^[a-zA-Z0-9_.-]+$', key), 'invalid key %r' % key + return os.path.join( + self._get_root_dir(), section, '%s.%s' % (key, dtype)) + + @property + def enabled(self): + return self._ydl.params.get('cachedir') is not False + + def store(self, section, key, data, dtype='json'): + assert dtype in ('json',) + + if not self.enabled: + return + + fn = self._get_cache_fn(section, key, dtype) + try: + try: + os.makedirs(os.path.dirname(fn)) + except OSError as ose: + if ose.errno != errno.EEXIST: + raise + write_json_file(data, fn) + except Exception: + tb = traceback.format_exc() + self._ydl.report_warning( + 'Writing cache to %r failed: %s' % (fn, tb)) + + def load(self, section, key, dtype='json', default=None): + assert dtype in ('json',) + + if not self.enabled: + return default + + cache_fn = self._get_cache_fn(section, key, dtype) + try: + try: + with io.open(cache_fn, 'r', encoding='utf-8') as cachef: + return json.load(cachef) + except ValueError: + try: + file_size = os.path.getsize(cache_fn) + except (OSError, IOError) as oe: + file_size = str(oe) + self._ydl.report_warning( + 'Cache retrieval from %s failed (%s)' % (cache_fn, file_size)) + except IOError: + pass # No cache available + + return default + + def remove(self): + if not self.enabled: + self._ydl.to_screen('Cache is disabled (Did you combine --no-cache-dir and --rm-cache-dir?)') + return + + cachedir = self._get_root_dir() + if not any((term in cachedir) for term in ('cache', 'tmp')): + raise Exception('Not removing directory %s - this does not look like a cache dir' % cachedir) + + self._ydl.to_screen( + 'Removing cache dir %s .' % cachedir, skip_eol=True) + if os.path.exists(cachedir): + self._ydl.to_screen('.', skip_eol=True) + shutil.rmtree(cachedir) + self._ydl.to_screen('.') diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/compat.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/compat.py new file mode 100644 index 0000000000..46d4388467 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/compat.py @@ -0,0 +1,358 @@ +from __future__ import unicode_literals + +import getpass +import optparse +import os +import re +import subprocess +import sys + + +try: + import urllib.request as compat_urllib_request +except ImportError: # Python 2 + import urllib2 as compat_urllib_request + +try: + import urllib.error as compat_urllib_error +except ImportError: # Python 2 + import urllib2 as compat_urllib_error + +try: + import urllib.parse as compat_urllib_parse +except ImportError: # Python 2 + import urllib as compat_urllib_parse + +try: + from urllib.parse import urlparse as compat_urllib_parse_urlparse +except ImportError: # Python 2 + from urlparse import urlparse as compat_urllib_parse_urlparse + +try: + import urllib.parse as compat_urlparse +except ImportError: # Python 2 + import urlparse as compat_urlparse + +try: + import http.cookiejar as compat_cookiejar +except ImportError: # Python 2 + import cookielib as compat_cookiejar + +try: + import html.entities as compat_html_entities +except ImportError: # Python 2 + import htmlentitydefs as compat_html_entities + +try: + import html.parser as compat_html_parser +except ImportError: # Python 2 + import HTMLParser as compat_html_parser + +try: + import http.client as compat_http_client +except ImportError: # Python 2 + import httplib as compat_http_client + +try: + from urllib.error import HTTPError as compat_HTTPError +except ImportError: # Python 2 + from urllib2 import HTTPError as compat_HTTPError + +try: + from urllib.request import urlretrieve as compat_urlretrieve +except ImportError: # Python 2 + from urllib import urlretrieve as compat_urlretrieve + + +try: + from subprocess import DEVNULL + compat_subprocess_get_DEVNULL = lambda: DEVNULL +except ImportError: + compat_subprocess_get_DEVNULL = lambda: open(os.path.devnull, 'w') + +try: + from urllib.parse import unquote as compat_urllib_parse_unquote +except ImportError: + def compat_urllib_parse_unquote(string, encoding='utf-8', errors='replace'): + if string == '': + return string + res = string.split('%') + if len(res) == 1: + return string + if encoding is None: + encoding = 'utf-8' + if errors is None: + errors = 'replace' + # pct_sequence: contiguous sequence of percent-encoded bytes, decoded + pct_sequence = b'' + string = res[0] + for item in res[1:]: + try: + if not item: + raise ValueError + pct_sequence += item[:2].decode('hex') + rest = item[2:] + if not rest: + # This segment was just a single percent-encoded character. + # May be part of a sequence of code units, so delay decoding. + # (Stored in pct_sequence). + continue + except ValueError: + rest = '%' + item + # Encountered non-percent-encoded characters. Flush the current + # pct_sequence. + string += pct_sequence.decode(encoding, errors) + rest + pct_sequence = b'' + if pct_sequence: + # Flush the final pct_sequence + string += pct_sequence.decode(encoding, errors) + return string + + +try: + from urllib.parse import parse_qs as compat_parse_qs +except ImportError: # Python 2 + # HACK: The following is the correct parse_qs implementation from cpython 3's stdlib. + # Python 2's version is apparently totally broken + + def _parse_qsl(qs, keep_blank_values=False, strict_parsing=False, + encoding='utf-8', errors='replace'): + qs, _coerce_result = qs, unicode + pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')] + r = [] + for name_value in pairs: + if not name_value and not strict_parsing: + continue + nv = name_value.split('=', 1) + if len(nv) != 2: + if strict_parsing: + raise ValueError("bad query field: %r" % (name_value,)) + # Handle case of a control-name with no equal sign + if keep_blank_values: + nv.append('') + else: + continue + if len(nv[1]) or keep_blank_values: + name = nv[0].replace('+', ' ') + name = compat_urllib_parse_unquote( + name, encoding=encoding, errors=errors) + name = _coerce_result(name) + value = nv[1].replace('+', ' ') + value = compat_urllib_parse_unquote( + value, encoding=encoding, errors=errors) + value = _coerce_result(value) + r.append((name, value)) + return r + + def compat_parse_qs(qs, keep_blank_values=False, strict_parsing=False, + encoding='utf-8', errors='replace'): + parsed_result = {} + pairs = _parse_qsl(qs, keep_blank_values, strict_parsing, + encoding=encoding, errors=errors) + for name, value in pairs: + if name in parsed_result: + parsed_result[name].append(value) + else: + parsed_result[name] = [value] + return parsed_result + +try: + compat_str = unicode # Python 2 +except NameError: + compat_str = str + +try: + compat_chr = unichr # Python 2 +except NameError: + compat_chr = chr + +try: + from xml.etree.ElementTree import ParseError as compat_xml_parse_error +except ImportError: # Python 2.6 + from xml.parsers.expat import ExpatError as compat_xml_parse_error + +try: + from shlex import quote as shlex_quote +except ImportError: # Python < 3.3 + def shlex_quote(s): + if re.match(r'^[-_\w./]+$', s): + return s + else: + return "'" + s.replace("'", "'\"'\"'") + "'" + + +def compat_ord(c): + if type(c) is int: + return c + else: + return ord(c) + + +if sys.version_info >= (3, 0): + compat_getenv = os.getenv + compat_expanduser = os.path.expanduser +else: + # Environment variables should be decoded with filesystem encoding. + # Otherwise it will fail if any non-ASCII characters present (see #3854 #3217 #2918) + + def compat_getenv(key, default=None): + from .utils import get_filesystem_encoding + env = os.getenv(key, default) + if env: + env = env.decode(get_filesystem_encoding()) + return env + + # HACK: The default implementations of os.path.expanduser from cpython do not decode + # environment variables with filesystem encoding. We will work around this by + # providing adjusted implementations. + # The following are os.path.expanduser implementations from cpython 2.7.8 stdlib + # for different platforms with correct environment variables decoding. + + if os.name == 'posix': + def compat_expanduser(path): + """Expand ~ and ~user constructions. If user or $HOME is unknown, + do nothing.""" + if not path.startswith('~'): + return path + i = path.find('/', 1) + if i < 0: + i = len(path) + if i == 1: + if 'HOME' not in os.environ: + import pwd + userhome = pwd.getpwuid(os.getuid()).pw_dir + else: + userhome = compat_getenv('HOME') + else: + import pwd + try: + pwent = pwd.getpwnam(path[1:i]) + except KeyError: + return path + userhome = pwent.pw_dir + userhome = userhome.rstrip('/') + return (userhome + path[i:]) or '/' + elif os.name == 'nt' or os.name == 'ce': + def compat_expanduser(path): + """Expand ~ and ~user constructs. + + If user or $HOME is unknown, do nothing.""" + if path[:1] != '~': + return path + i, n = 1, len(path) + while i < n and path[i] not in '/\\': + i = i + 1 + + if 'HOME' in os.environ: + userhome = compat_getenv('HOME') + elif 'USERPROFILE' in os.environ: + userhome = compat_getenv('USERPROFILE') + elif 'HOMEPATH' not in os.environ: + return path + else: + try: + drive = compat_getenv('HOMEDRIVE') + except KeyError: + drive = '' + userhome = os.path.join(drive, compat_getenv('HOMEPATH')) + + if i != 1: # ~user + userhome = os.path.join(os.path.dirname(userhome), path[1:i]) + + return userhome + path[i:] + else: + compat_expanduser = os.path.expanduser + + +if sys.version_info < (3, 0): + def compat_print(s): + from .utils import preferredencoding + print(s.encode(preferredencoding(), 'xmlcharrefreplace')) +else: + def compat_print(s): + assert isinstance(s, compat_str) + print(s) + + +try: + subprocess_check_output = subprocess.check_output +except AttributeError: + def subprocess_check_output(*args, **kwargs): + assert 'input' not in kwargs + p = subprocess.Popen(*args, stdout=subprocess.PIPE, **kwargs) + output, _ = p.communicate() + ret = p.poll() + if ret: + raise subprocess.CalledProcessError(ret, p.args, output=output) + return output + +if sys.version_info < (3, 0) and sys.platform == 'win32': + def compat_getpass(prompt, *args, **kwargs): + if isinstance(prompt, compat_str): + from .utils import preferredencoding + prompt = prompt.encode(preferredencoding()) + return getpass.getpass(prompt, *args, **kwargs) +else: + compat_getpass = getpass.getpass + +# Old 2.6 and 2.7 releases require kwargs to be bytes +try: + def _testfunc(x): + pass + _testfunc(**{'x': 0}) +except TypeError: + def compat_kwargs(kwargs): + return dict((bytes(k), v) for k, v in kwargs.items()) +else: + compat_kwargs = lambda kwargs: kwargs + + +# Fix https://github.com/rg3/youtube-dl/issues/4223 +# See http://bugs.python.org/issue9161 for what is broken +def workaround_optparse_bug9161(): + op = optparse.OptionParser() + og = optparse.OptionGroup(op, 'foo') + try: + og.add_option('-t') + except TypeError: + real_add_option = optparse.OptionGroup.add_option + + def _compat_add_option(self, *args, **kwargs): + enc = lambda v: ( + v.encode('ascii', 'replace') if isinstance(v, compat_str) + else v) + bargs = [enc(a) for a in args] + bkwargs = dict( + (k, enc(v)) for k, v in kwargs.items()) + return real_add_option(self, *bargs, **bkwargs) + optparse.OptionGroup.add_option = _compat_add_option + + +__all__ = [ + 'compat_HTTPError', + 'compat_chr', + 'compat_cookiejar', + 'compat_expanduser', + 'compat_getenv', + 'compat_getpass', + 'compat_html_entities', + 'compat_html_parser', + 'compat_http_client', + 'compat_kwargs', + 'compat_ord', + 'compat_parse_qs', + 'compat_print', + 'compat_str', + 'compat_subprocess_get_DEVNULL', + 'compat_urllib_error', + 'compat_urllib_parse', + 'compat_urllib_parse_unquote', + 'compat_urllib_parse_urlparse', + 'compat_urllib_request', + 'compat_urlparse', + 'compat_urlretrieve', + 'compat_xml_parse_error', + 'shlex_quote', + 'subprocess_check_output', + 'workaround_optparse_bug9161', +] diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/downloader/__init__.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/downloader/__init__.py new file mode 100644 index 0000000000..31e28df58e --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/downloader/__init__.py @@ -0,0 +1,37 @@ +from __future__ import unicode_literals + +from .common import FileDownloader +from .hls import HlsFD +from .hls import NativeHlsFD +from .http import HttpFD +from .mplayer import MplayerFD +from .rtmp import RtmpFD +from .f4m import F4mFD + +from ..utils import ( + determine_ext, +) + + +def get_suitable_downloader(info_dict): + """Get the downloader class that can handle the info dict.""" + url = info_dict['url'] + protocol = info_dict.get('protocol') + + if url.startswith('rtmp'): + return RtmpFD + if protocol == 'm3u8_native': + return NativeHlsFD + if (protocol == 'm3u8') or (protocol is None and determine_ext(url) == 'm3u8'): + return HlsFD + if url.startswith('mms') or url.startswith('rtsp'): + return MplayerFD + if determine_ext(url) == 'f4m': + return F4mFD + else: + return HttpFD + +__all__ = [ + 'get_suitable_downloader', + 'FileDownloader', +] diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/downloader/common.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/downloader/common.py new file mode 100644 index 0000000000..584bde732f --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/downloader/common.py @@ -0,0 +1,310 @@ +from __future__ import unicode_literals + +import os +import re +import sys +import time + +from ..compat import compat_str +from ..utils import ( + encodeFilename, + format_bytes, + timeconvert, +) + + +class FileDownloader(object): + """File Downloader class. + + File downloader objects are the ones responsible of downloading the + actual video file and writing it to disk. + + File downloaders accept a lot of parameters. In order not to saturate + the object constructor with arguments, it receives a dictionary of + options instead. + + Available options: + + verbose: Print additional info to stdout. + quiet: Do not print messages to stdout. + ratelimit: Download speed limit, in bytes/sec. + retries: Number of times to retry for HTTP error 5xx + buffersize: Size of download buffer in bytes. + noresizebuffer: Do not automatically resize the download buffer. + continuedl: Try to continue downloads if possible. + noprogress: Do not print the progress bar. + logtostderr: Log messages to stderr instead of stdout. + consoletitle: Display progress in console window's titlebar. + nopart: Do not use temporary .part files. + updatetime: Use the Last-modified header to set output file timestamps. + test: Download only first bytes to test the downloader. + min_filesize: Skip files smaller than this size + max_filesize: Skip files larger than this size + + Subclasses of this one must re-define the real_download method. + """ + + _TEST_FILE_SIZE = 10241 + params = None + + def __init__(self, ydl, params): + """Create a FileDownloader object with the given options.""" + self.ydl = ydl + self._progress_hooks = [] + self.params = params + + @staticmethod + def format_seconds(seconds): + (mins, secs) = divmod(seconds, 60) + (hours, mins) = divmod(mins, 60) + if hours > 99: + return '--:--:--' + if hours == 0: + return '%02d:%02d' % (mins, secs) + else: + return '%02d:%02d:%02d' % (hours, mins, secs) + + @staticmethod + def calc_percent(byte_counter, data_len): + if data_len is None: + return None + return float(byte_counter) / float(data_len) * 100.0 + + @staticmethod + def format_percent(percent): + if percent is None: + return '---.-%' + return '%6s' % ('%3.1f%%' % percent) + + @staticmethod + def calc_eta(start, now, total, current): + if total is None: + return None + if now is None: + now = time.time() + dif = now - start + if current == 0 or dif < 0.001: # One millisecond + return None + rate = float(current) / dif + return int((float(total) - float(current)) / rate) + + @staticmethod + def format_eta(eta): + if eta is None: + return '--:--' + return FileDownloader.format_seconds(eta) + + @staticmethod + def calc_speed(start, now, bytes): + dif = now - start + if bytes == 0 or dif < 0.001: # One millisecond + return None + return float(bytes) / dif + + @staticmethod + def format_speed(speed): + if speed is None: + return '%10s' % '---b/s' + return '%10s' % ('%s/s' % format_bytes(speed)) + + @staticmethod + def best_block_size(elapsed_time, bytes): + new_min = max(bytes / 2.0, 1.0) + new_max = min(max(bytes * 2.0, 1.0), 4194304) # Do not surpass 4 MB + if elapsed_time < 0.001: + return int(new_max) + rate = bytes / elapsed_time + if rate > new_max: + return int(new_max) + if rate < new_min: + return int(new_min) + return int(rate) + + @staticmethod + def parse_bytes(bytestr): + """Parse a string indicating a byte quantity into an integer.""" + matchobj = re.match(r'(?i)^(\d+(?:\.\d+)?)([kMGTPEZY]?)$', bytestr) + if matchobj is None: + return None + number = float(matchobj.group(1)) + multiplier = 1024.0 ** 'bkmgtpezy'.index(matchobj.group(2).lower()) + return int(round(number * multiplier)) + + def to_screen(self, *args, **kargs): + self.ydl.to_screen(*args, **kargs) + + def to_stderr(self, message): + self.ydl.to_screen(message) + + def to_console_title(self, message): + self.ydl.to_console_title(message) + + def trouble(self, *args, **kargs): + self.ydl.trouble(*args, **kargs) + + def report_warning(self, *args, **kargs): + self.ydl.report_warning(*args, **kargs) + + def report_error(self, *args, **kargs): + self.ydl.report_error(*args, **kargs) + + def slow_down(self, start_time, now, byte_counter): + """Sleep if the download speed is over the rate limit.""" + rate_limit = self.params.get('ratelimit', None) + if rate_limit is None or byte_counter == 0: + return + if now is None: + now = time.time() + elapsed = now - start_time + if elapsed <= 0.0: + return + speed = float(byte_counter) / elapsed + if speed > rate_limit: + time.sleep(max((byte_counter // rate_limit) - elapsed, 0)) + + def temp_name(self, filename): + """Returns a temporary filename for the given filename.""" + if self.params.get('nopart', False) or filename == '-' or \ + (os.path.exists(encodeFilename(filename)) and not os.path.isfile(encodeFilename(filename))): + return filename + return filename + '.part' + + def undo_temp_name(self, filename): + if filename.endswith('.part'): + return filename[:-len('.part')] + return filename + + def try_rename(self, old_filename, new_filename): + try: + if old_filename == new_filename: + return + os.rename(encodeFilename(old_filename), encodeFilename(new_filename)) + except (IOError, OSError) as err: + self.report_error('unable to rename file: %s' % compat_str(err)) + + def try_utime(self, filename, last_modified_hdr): + """Try to set the last-modified time of the given file.""" + if last_modified_hdr is None: + return + if not os.path.isfile(encodeFilename(filename)): + return + timestr = last_modified_hdr + if timestr is None: + return + filetime = timeconvert(timestr) + if filetime is None: + return filetime + # Ignore obviously invalid dates + if filetime == 0: + return + try: + os.utime(filename, (time.time(), filetime)) + except: + pass + return filetime + + def report_destination(self, filename): + """Report destination filename.""" + self.to_screen('[download] Destination: ' + filename) + + def _report_progress_status(self, msg, is_last_line=False): + fullmsg = '[download] ' + msg + if self.params.get('progress_with_newline', False): + self.to_screen(fullmsg) + else: + if os.name == 'nt': + prev_len = getattr(self, '_report_progress_prev_line_length', + 0) + if prev_len > len(fullmsg): + fullmsg += ' ' * (prev_len - len(fullmsg)) + self._report_progress_prev_line_length = len(fullmsg) + clear_line = '\r' + else: + clear_line = ('\r\x1b[K' if sys.stderr.isatty() else '\r') + self.to_screen(clear_line + fullmsg, skip_eol=not is_last_line) + self.to_console_title('youtube-dl ' + msg) + + def report_progress(self, percent, data_len_str, speed, eta): + """Report download progress.""" + if self.params.get('noprogress', False): + return + if eta is not None: + eta_str = self.format_eta(eta) + else: + eta_str = 'Unknown ETA' + if percent is not None: + percent_str = self.format_percent(percent) + else: + percent_str = 'Unknown %' + speed_str = self.format_speed(speed) + + msg = ('%s of %s at %s ETA %s' % + (percent_str, data_len_str, speed_str, eta_str)) + self._report_progress_status(msg) + + def report_progress_live_stream(self, downloaded_data_len, speed, elapsed): + if self.params.get('noprogress', False): + return + downloaded_str = format_bytes(downloaded_data_len) + speed_str = self.format_speed(speed) + elapsed_str = FileDownloader.format_seconds(elapsed) + msg = '%s at %s (%s)' % (downloaded_str, speed_str, elapsed_str) + self._report_progress_status(msg) + + def report_finish(self, data_len_str, tot_time): + """Report download finished.""" + if self.params.get('noprogress', False): + self.to_screen('[download] Download completed') + else: + self._report_progress_status( + ('100%% of %s in %s' % + (data_len_str, self.format_seconds(tot_time))), + is_last_line=True) + + def report_resuming_byte(self, resume_len): + """Report attempt to resume at given byte.""" + self.to_screen('[download] Resuming download at byte %s' % resume_len) + + def report_retry(self, count, retries): + """Report retry in case of HTTP error 5xx""" + self.to_screen('[download] Got server HTTP error. Retrying (attempt %d of %d)...' % (count, retries)) + + def report_file_already_downloaded(self, file_name): + """Report file has already been fully downloaded.""" + try: + self.to_screen('[download] %s has already been downloaded' % file_name) + except UnicodeEncodeError: + self.to_screen('[download] The file has already been downloaded') + + def report_unable_to_resume(self): + """Report it was impossible to resume download.""" + self.to_screen('[download] Unable to resume') + + def download(self, filename, info_dict): + """Download to a filename using the info from info_dict + Return True on success and False otherwise + """ + # Check file already present + if filename != '-' and self.params.get('continuedl', False) and os.path.isfile(encodeFilename(filename)) and not self.params.get('nopart', False): + self.report_file_already_downloaded(filename) + self._hook_progress({ + 'filename': filename, + 'status': 'finished', + 'total_bytes': os.path.getsize(encodeFilename(filename)), + }) + return True + + return self.real_download(filename, info_dict) + + def real_download(self, filename, info_dict): + """Real download process. Redefine in subclasses.""" + raise NotImplementedError('This method must be implemented by subclasses') + + def _hook_progress(self, status): + for ph in self._progress_hooks: + ph(status) + + def add_progress_hook(self, ph): + # See YoutubeDl.py (search for progress_hooks) for a description of + # this interface + self._progress_hooks.append(ph) diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/downloader/f4m.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/downloader/f4m.py new file mode 100644 index 0000000000..f9f6f3e734 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/downloader/f4m.py @@ -0,0 +1,339 @@ +from __future__ import unicode_literals + +import base64 +import io +import itertools +import os +import time +import xml.etree.ElementTree as etree + +from .common import FileDownloader +from .http import HttpFD +from ..compat import ( + compat_urlparse, +) +from ..utils import ( + struct_pack, + struct_unpack, + format_bytes, + encodeFilename, + sanitize_open, + xpath_text, +) + + +class FlvReader(io.BytesIO): + """ + Reader for Flv files + The file format is documented in https://www.adobe.com/devnet/f4v.html + """ + + # Utility functions for reading numbers and strings + def read_unsigned_long_long(self): + return struct_unpack('!Q', self.read(8))[0] + + def read_unsigned_int(self): + return struct_unpack('!I', self.read(4))[0] + + def read_unsigned_char(self): + return struct_unpack('!B', self.read(1))[0] + + def read_string(self): + res = b'' + while True: + char = self.read(1) + if char == b'\x00': + break + res += char + return res + + def read_box_info(self): + """ + Read a box and return the info as a tuple: (box_size, box_type, box_data) + """ + real_size = size = self.read_unsigned_int() + box_type = self.read(4) + header_end = 8 + if size == 1: + real_size = self.read_unsigned_long_long() + header_end = 16 + return real_size, box_type, self.read(real_size - header_end) + + def read_asrt(self): + # version + self.read_unsigned_char() + # flags + self.read(3) + quality_entry_count = self.read_unsigned_char() + # QualityEntryCount + for i in range(quality_entry_count): + self.read_string() + + segment_run_count = self.read_unsigned_int() + segments = [] + for i in range(segment_run_count): + first_segment = self.read_unsigned_int() + fragments_per_segment = self.read_unsigned_int() + segments.append((first_segment, fragments_per_segment)) + + return { + 'segment_run': segments, + } + + def read_afrt(self): + # version + self.read_unsigned_char() + # flags + self.read(3) + # time scale + self.read_unsigned_int() + + quality_entry_count = self.read_unsigned_char() + # QualitySegmentUrlModifiers + for i in range(quality_entry_count): + self.read_string() + + fragments_count = self.read_unsigned_int() + fragments = [] + for i in range(fragments_count): + first = self.read_unsigned_int() + first_ts = self.read_unsigned_long_long() + duration = self.read_unsigned_int() + if duration == 0: + discontinuity_indicator = self.read_unsigned_char() + else: + discontinuity_indicator = None + fragments.append({ + 'first': first, + 'ts': first_ts, + 'duration': duration, + 'discontinuity_indicator': discontinuity_indicator, + }) + + return { + 'fragments': fragments, + } + + def read_abst(self): + # version + self.read_unsigned_char() + # flags + self.read(3) + + self.read_unsigned_int() # BootstrapinfoVersion + # Profile,Live,Update,Reserved + self.read(1) + # time scale + self.read_unsigned_int() + # CurrentMediaTime + self.read_unsigned_long_long() + # SmpteTimeCodeOffset + self.read_unsigned_long_long() + + self.read_string() # MovieIdentifier + server_count = self.read_unsigned_char() + # ServerEntryTable + for i in range(server_count): + self.read_string() + quality_count = self.read_unsigned_char() + # QualityEntryTable + for i in range(quality_count): + self.read_string() + # DrmData + self.read_string() + # MetaData + self.read_string() + + segments_count = self.read_unsigned_char() + segments = [] + for i in range(segments_count): + box_size, box_type, box_data = self.read_box_info() + assert box_type == b'asrt' + segment = FlvReader(box_data).read_asrt() + segments.append(segment) + fragments_run_count = self.read_unsigned_char() + fragments = [] + for i in range(fragments_run_count): + box_size, box_type, box_data = self.read_box_info() + assert box_type == b'afrt' + fragments.append(FlvReader(box_data).read_afrt()) + + return { + 'segments': segments, + 'fragments': fragments, + } + + def read_bootstrap_info(self): + total_size, box_type, box_data = self.read_box_info() + assert box_type == b'abst' + return FlvReader(box_data).read_abst() + + +def read_bootstrap_info(bootstrap_bytes): + return FlvReader(bootstrap_bytes).read_bootstrap_info() + + +def build_fragments_list(boot_info): + """ Return a list of (segment, fragment) for each fragment in the video """ + res = [] + segment_run_table = boot_info['segments'][0] + # I've only found videos with one segment + segment_run_entry = segment_run_table['segment_run'][0] + n_frags = segment_run_entry[1] + fragment_run_entry_table = boot_info['fragments'][0]['fragments'] + first_frag_number = fragment_run_entry_table[0]['first'] + for (i, frag_number) in zip(range(1, n_frags + 1), itertools.count(first_frag_number)): + res.append((1, frag_number)) + return res + + +def write_flv_header(stream, metadata): + """Writes the FLV header and the metadata to stream""" + # FLV header + stream.write(b'FLV\x01') + stream.write(b'\x05') + stream.write(b'\x00\x00\x00\x09') + # FLV File body + stream.write(b'\x00\x00\x00\x00') + # FLVTAG + # Script data + stream.write(b'\x12') + # Size of the metadata with 3 bytes + stream.write(struct_pack('!L', len(metadata))[1:]) + stream.write(b'\x00\x00\x00\x00\x00\x00\x00') + stream.write(metadata) + # Magic numbers extracted from the output files produced by AdobeHDS.php + # (https://github.com/K-S-V/Scripts) + stream.write(b'\x00\x00\x01\x73') + + +def _add_ns(prop): + return '{http://ns.adobe.com/f4m/1.0}%s' % prop + + +class HttpQuietDownloader(HttpFD): + def to_screen(self, *args, **kargs): + pass + + +class F4mFD(FileDownloader): + """ + A downloader for f4m manifests or AdobeHDS. + """ + + def real_download(self, filename, info_dict): + man_url = info_dict['url'] + requested_bitrate = info_dict.get('tbr') + self.to_screen('[download] Downloading f4m manifest') + manifest = self.ydl.urlopen(man_url).read() + self.report_destination(filename) + http_dl = HttpQuietDownloader( + self.ydl, + { + 'continuedl': True, + 'quiet': True, + 'noprogress': True, + 'ratelimit': self.params.get('ratelimit', None), + 'test': self.params.get('test', False), + } + ) + + doc = etree.fromstring(manifest) + formats = [(int(f.attrib.get('bitrate', -1)), f) for f in doc.findall(_add_ns('media'))] + if requested_bitrate is None: + # get the best format + formats = sorted(formats, key=lambda f: f[0]) + rate, media = formats[-1] + else: + rate, media = list(filter( + lambda f: int(f[0]) == requested_bitrate, formats))[0] + + base_url = compat_urlparse.urljoin(man_url, media.attrib['url']) + bootstrap_node = doc.find(_add_ns('bootstrapInfo')) + if bootstrap_node.text is None: + bootstrap_url = compat_urlparse.urljoin( + base_url, bootstrap_node.attrib['url']) + bootstrap = self.ydl.urlopen(bootstrap_url).read() + else: + bootstrap = base64.b64decode(bootstrap_node.text) + metadata = base64.b64decode(media.find(_add_ns('metadata')).text) + boot_info = read_bootstrap_info(bootstrap) + + fragments_list = build_fragments_list(boot_info) + if self.params.get('test', False): + # We only download the first fragment + fragments_list = fragments_list[:1] + total_frags = len(fragments_list) + # For some akamai manifests we'll need to add a query to the fragment url + akamai_pv = xpath_text(doc, _add_ns('pv-2.0')) + + tmpfilename = self.temp_name(filename) + (dest_stream, tmpfilename) = sanitize_open(tmpfilename, 'wb') + write_flv_header(dest_stream, metadata) + + # This dict stores the download progress, it's updated by the progress + # hook + state = { + 'downloaded_bytes': 0, + 'frag_counter': 0, + } + start = time.time() + + def frag_progress_hook(status): + frag_total_bytes = status.get('total_bytes', 0) + estimated_size = (state['downloaded_bytes'] + + (total_frags - state['frag_counter']) * frag_total_bytes) + if status['status'] == 'finished': + state['downloaded_bytes'] += frag_total_bytes + state['frag_counter'] += 1 + progress = self.calc_percent(state['frag_counter'], total_frags) + byte_counter = state['downloaded_bytes'] + else: + frag_downloaded_bytes = status['downloaded_bytes'] + byte_counter = state['downloaded_bytes'] + frag_downloaded_bytes + frag_progress = self.calc_percent(frag_downloaded_bytes, + frag_total_bytes) + progress = self.calc_percent(state['frag_counter'], total_frags) + progress += frag_progress / float(total_frags) + + eta = self.calc_eta(start, time.time(), estimated_size, byte_counter) + self.report_progress(progress, format_bytes(estimated_size), + status.get('speed'), eta) + http_dl.add_progress_hook(frag_progress_hook) + + frags_filenames = [] + for (seg_i, frag_i) in fragments_list: + name = 'Seg%d-Frag%d' % (seg_i, frag_i) + url = base_url + name + if akamai_pv: + url += '?' + akamai_pv.strip(';') + frag_filename = '%s-%s' % (tmpfilename, name) + success = http_dl.download(frag_filename, {'url': url}) + if not success: + return False + with open(frag_filename, 'rb') as down: + down_data = down.read() + reader = FlvReader(down_data) + while True: + _, box_type, box_data = reader.read_box_info() + if box_type == b'mdat': + dest_stream.write(box_data) + break + frags_filenames.append(frag_filename) + + dest_stream.close() + self.report_finish(format_bytes(state['downloaded_bytes']), time.time() - start) + + self.try_rename(tmpfilename, filename) + for frag_file in frags_filenames: + os.remove(frag_file) + + fsize = os.path.getsize(encodeFilename(filename)) + self._hook_progress({ + 'downloaded_bytes': fsize, + 'total_bytes': fsize, + 'filename': filename, + 'status': 'finished', + }) + + return True diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/downloader/hls.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/downloader/hls.py new file mode 100644 index 0000000000..5bb0f3cfd1 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/downloader/hls.py @@ -0,0 +1,109 @@ +from __future__ import unicode_literals + +import os +import re +import subprocess + +from ..postprocessor.ffmpeg import FFmpegPostProcessor +from .common import FileDownloader +from ..compat import ( + compat_urlparse, + compat_urllib_request, +) +from ..utils import ( + check_executable, + encodeFilename, +) + + +class HlsFD(FileDownloader): + def real_download(self, filename, info_dict): + url = info_dict['url'] + self.report_destination(filename) + tmpfilename = self.temp_name(filename) + + args = [ + '-y', '-i', url, '-f', 'mp4', '-c', 'copy', + '-bsf:a', 'aac_adtstoasc', + encodeFilename(tmpfilename, for_subprocess=True)] + + for program in ['avconv', 'ffmpeg']: + if check_executable(program, ['-version']): + break + else: + self.report_error('m3u8 download detected but ffmpeg or avconv could not be found. Please install one.') + return False + cmd = [program] + args + + ffpp = FFmpegPostProcessor(downloader=self) + ffpp.check_version() + + retval = subprocess.call(cmd) + if retval == 0: + fsize = os.path.getsize(encodeFilename(tmpfilename)) + self.to_screen('\r[%s] %s bytes' % (cmd[0], fsize)) + self.try_rename(tmpfilename, filename) + self._hook_progress({ + 'downloaded_bytes': fsize, + 'total_bytes': fsize, + 'filename': filename, + 'status': 'finished', + }) + return True + else: + self.to_stderr('\n') + self.report_error('%s exited with code %d' % (program, retval)) + return False + + +class NativeHlsFD(FileDownloader): + """ A more limited implementation that does not require ffmpeg """ + + def real_download(self, filename, info_dict): + url = info_dict['url'] + self.report_destination(filename) + tmpfilename = self.temp_name(filename) + + self.to_screen( + '[hlsnative] %s: Downloading m3u8 manifest' % info_dict['id']) + data = self.ydl.urlopen(url).read() + s = data.decode('utf-8', 'ignore') + segment_urls = [] + for line in s.splitlines(): + line = line.strip() + if line and not line.startswith('#'): + segment_url = ( + line + if re.match(r'^https?://', line) + else compat_urlparse.urljoin(url, line)) + segment_urls.append(segment_url) + + is_test = self.params.get('test', False) + remaining_bytes = self._TEST_FILE_SIZE if is_test else None + byte_counter = 0 + with open(tmpfilename, 'wb') as outf: + for i, segurl in enumerate(segment_urls): + self.to_screen( + '[hlsnative] %s: Downloading segment %d / %d' % + (info_dict['id'], i + 1, len(segment_urls))) + seg_req = compat_urllib_request.Request(segurl) + if remaining_bytes is not None: + seg_req.add_header('Range', 'bytes=0-%d' % (remaining_bytes - 1)) + + segment = self.ydl.urlopen(seg_req).read() + if remaining_bytes is not None: + segment = segment[:remaining_bytes] + remaining_bytes -= len(segment) + outf.write(segment) + byte_counter += len(segment) + if remaining_bytes is not None and remaining_bytes <= 0: + break + + self._hook_progress({ + 'downloaded_bytes': byte_counter, + 'total_bytes': byte_counter, + 'filename': filename, + 'status': 'finished', + }) + self.try_rename(tmpfilename, filename) + return True diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/downloader/http.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/downloader/http.py new file mode 100644 index 0000000000..e68f20c9f4 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/downloader/http.py @@ -0,0 +1,228 @@ +from __future__ import unicode_literals + +import os +import time + +from .common import FileDownloader +from ..compat import ( + compat_urllib_request, + compat_urllib_error, +) +from ..utils import ( + ContentTooShortError, + encodeFilename, + sanitize_open, + format_bytes, +) + + +class HttpFD(FileDownloader): + def real_download(self, filename, info_dict): + url = info_dict['url'] + tmpfilename = self.temp_name(filename) + stream = None + + # Do not include the Accept-Encoding header + headers = {'Youtubedl-no-compression': 'True'} + if 'user_agent' in info_dict: + headers['Youtubedl-user-agent'] = info_dict['user_agent'] + if 'http_referer' in info_dict: + headers['Referer'] = info_dict['http_referer'] + add_headers = info_dict.get('http_headers') + if add_headers: + headers.update(add_headers) + data = info_dict.get('http_post_data') + http_method = info_dict.get('http_method') + basic_request = compat_urllib_request.Request(url, data, headers) + request = compat_urllib_request.Request(url, data, headers) + if http_method is not None: + basic_request.get_method = lambda: http_method + request.get_method = lambda: http_method + + is_test = self.params.get('test', False) + + if is_test: + request.add_header('Range', 'bytes=0-%s' % str(self._TEST_FILE_SIZE - 1)) + + # Establish possible resume length + if os.path.isfile(encodeFilename(tmpfilename)): + resume_len = os.path.getsize(encodeFilename(tmpfilename)) + else: + resume_len = 0 + + open_mode = 'wb' + if resume_len != 0: + if self.params.get('continuedl', False): + self.report_resuming_byte(resume_len) + request.add_header('Range', 'bytes=%d-' % resume_len) + open_mode = 'ab' + else: + resume_len = 0 + + count = 0 + retries = self.params.get('retries', 0) + while count <= retries: + # Establish connection + try: + data = self.ydl.urlopen(request) + break + except (compat_urllib_error.HTTPError, ) as err: + if (err.code < 500 or err.code >= 600) and err.code != 416: + # Unexpected HTTP error + raise + elif err.code == 416: + # Unable to resume (requested range not satisfiable) + try: + # Open the connection again without the range header + data = self.ydl.urlopen(basic_request) + content_length = data.info()['Content-Length'] + except (compat_urllib_error.HTTPError, ) as err: + if err.code < 500 or err.code >= 600: + raise + else: + # Examine the reported length + if (content_length is not None and + (resume_len - 100 < int(content_length) < resume_len + 100)): + # The file had already been fully downloaded. + # Explanation to the above condition: in issue #175 it was revealed that + # YouTube sometimes adds or removes a few bytes from the end of the file, + # changing the file size slightly and causing problems for some users. So + # I decided to implement a suggested change and consider the file + # completely downloaded if the file size differs less than 100 bytes from + # the one in the hard drive. + self.report_file_already_downloaded(filename) + self.try_rename(tmpfilename, filename) + self._hook_progress({ + 'filename': filename, + 'status': 'finished', + }) + return True + else: + # The length does not match, we start the download over + self.report_unable_to_resume() + resume_len = 0 + open_mode = 'wb' + break + # Retry + count += 1 + if count <= retries: + self.report_retry(count, retries) + + if count > retries: + self.report_error('giving up after %s retries' % retries) + return False + + data_len = data.info().get('Content-length', None) + + # Range HTTP header may be ignored/unsupported by a webserver + # (e.g. extractor/scivee.py, extractor/bambuser.py). + # However, for a test we still would like to download just a piece of a file. + # To achieve this we limit data_len to _TEST_FILE_SIZE and manually control + # block size when downloading a file. + if is_test and (data_len is None or int(data_len) > self._TEST_FILE_SIZE): + data_len = self._TEST_FILE_SIZE + + if data_len is not None: + data_len = int(data_len) + resume_len + min_data_len = self.params.get("min_filesize", None) + max_data_len = self.params.get("max_filesize", None) + if min_data_len is not None and data_len < min_data_len: + self.to_screen('\r[download] File is smaller than min-filesize (%s bytes < %s bytes). Aborting.' % (data_len, min_data_len)) + return False + if max_data_len is not None and data_len > max_data_len: + self.to_screen('\r[download] File is larger than max-filesize (%s bytes > %s bytes). Aborting.' % (data_len, max_data_len)) + return False + + data_len_str = format_bytes(data_len) + byte_counter = 0 + resume_len + block_size = self.params.get('buffersize', 1024) + start = time.time() + + # measure time over whole while-loop, so slow_down() and best_block_size() work together properly + now = None # needed for slow_down() in the first loop run + before = start # start measuring + while True: + + # Download and write + data_block = data.read(block_size if not is_test else min(block_size, data_len - byte_counter)) + byte_counter += len(data_block) + + # exit loop when download is finished + if len(data_block) == 0: + break + + # Open destination file just in time + if stream is None: + try: + (stream, tmpfilename) = sanitize_open(tmpfilename, open_mode) + assert stream is not None + filename = self.undo_temp_name(tmpfilename) + self.report_destination(filename) + except (OSError, IOError) as err: + self.report_error('unable to open for writing: %s' % str(err)) + return False + try: + stream.write(data_block) + except (IOError, OSError) as err: + self.to_stderr('\n') + self.report_error('unable to write data: %s' % str(err)) + return False + + # Apply rate limit + self.slow_down(start, now, byte_counter - resume_len) + + # end measuring of one loop run + now = time.time() + after = now + + # Adjust block size + if not self.params.get('noresizebuffer', False): + block_size = self.best_block_size(after - before, len(data_block)) + + before = after + + # Progress message + speed = self.calc_speed(start, now, byte_counter - resume_len) + if data_len is None: + eta = percent = None + else: + percent = self.calc_percent(byte_counter, data_len) + eta = self.calc_eta(start, time.time(), data_len - resume_len, byte_counter - resume_len) + self.report_progress(percent, data_len_str, speed, eta) + + self._hook_progress({ + 'downloaded_bytes': byte_counter, + 'total_bytes': data_len, + 'tmpfilename': tmpfilename, + 'filename': filename, + 'status': 'downloading', + 'eta': eta, + 'speed': speed, + }) + + if is_test and byte_counter == data_len: + break + + if stream is None: + self.to_stderr('\n') + self.report_error('Did not get any data blocks') + return False + if tmpfilename != '-': + stream.close() + self.report_finish(data_len_str, (time.time() - start)) + if data_len is not None and byte_counter != data_len: + raise ContentTooShortError(byte_counter, int(data_len)) + self.try_rename(tmpfilename, filename) + + # Update file modification time + if self.params.get('updatetime', True): + info_dict['filetime'] = self.try_utime(filename, data.info().get('last-modified', None)) + + self._hook_progress({ + 'downloaded_bytes': byte_counter, + 'total_bytes': byte_counter, + 'filename': filename, + 'status': 'finished', + }) + + return True diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/downloader/mplayer.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/downloader/mplayer.py new file mode 100644 index 0000000000..c53195da0c --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/downloader/mplayer.py @@ -0,0 +1,47 @@ +from __future__ import unicode_literals + +import os +import subprocess + +from .common import FileDownloader +from ..compat import compat_subprocess_get_DEVNULL +from ..utils import ( + encodeFilename, +) + + +class MplayerFD(FileDownloader): + def real_download(self, filename, info_dict): + url = info_dict['url'] + self.report_destination(filename) + tmpfilename = self.temp_name(filename) + + args = [ + 'mplayer', '-really-quiet', '-vo', 'null', '-vc', 'dummy', + '-dumpstream', '-dumpfile', tmpfilename, url] + # Check for mplayer first + try: + subprocess.call( + ['mplayer', '-h'], + stdout=compat_subprocess_get_DEVNULL(), stderr=subprocess.STDOUT) + except (OSError, IOError): + self.report_error('MMS or RTSP download detected but "%s" could not be run' % args[0]) + return False + + # Download using mplayer. + retval = subprocess.call(args) + if retval == 0: + fsize = os.path.getsize(encodeFilename(tmpfilename)) + self.to_screen('\r[%s] %s bytes' % (args[0], fsize)) + self.try_rename(tmpfilename, filename) + self._hook_progress({ + 'downloaded_bytes': fsize, + 'total_bytes': fsize, + 'filename': filename, + 'status': 'finished', + }) + return True + else: + self.to_stderr('\n') + self.report_error('mplayer exited with code %d' % retval) + return False diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/downloader/rtmp.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/downloader/rtmp.py new file mode 100644 index 0000000000..5346cb9a0a --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/downloader/rtmp.py @@ -0,0 +1,207 @@ +from __future__ import unicode_literals + +import os +import re +import subprocess +import sys +import time + +from .common import FileDownloader +from ..compat import compat_str +from ..utils import ( + check_executable, + encodeFilename, + format_bytes, + get_exe_version, +) + + +def rtmpdump_version(): + return get_exe_version( + 'rtmpdump', ['--help'], r'(?i)RTMPDump\s*v?([0-9a-zA-Z._-]+)') + + +class RtmpFD(FileDownloader): + def real_download(self, filename, info_dict): + def run_rtmpdump(args): + start = time.time() + resume_percent = None + resume_downloaded_data_len = None + proc = subprocess.Popen(args, stderr=subprocess.PIPE) + cursor_in_new_line = True + proc_stderr_closed = False + while not proc_stderr_closed: + # read line from stderr + line = '' + while True: + char = proc.stderr.read(1) + if not char: + proc_stderr_closed = True + break + if char in [b'\r', b'\n']: + break + line += char.decode('ascii', 'replace') + if not line: + # proc_stderr_closed is True + continue + mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec \(([0-9]{1,2}\.[0-9])%\)', line) + if mobj: + downloaded_data_len = int(float(mobj.group(1)) * 1024) + percent = float(mobj.group(2)) + if not resume_percent: + resume_percent = percent + resume_downloaded_data_len = downloaded_data_len + eta = self.calc_eta(start, time.time(), 100 - resume_percent, percent - resume_percent) + speed = self.calc_speed(start, time.time(), downloaded_data_len - resume_downloaded_data_len) + data_len = None + if percent > 0: + data_len = int(downloaded_data_len * 100 / percent) + data_len_str = '~' + format_bytes(data_len) + self.report_progress(percent, data_len_str, speed, eta) + cursor_in_new_line = False + self._hook_progress({ + 'downloaded_bytes': downloaded_data_len, + 'total_bytes': data_len, + 'tmpfilename': tmpfilename, + 'filename': filename, + 'status': 'downloading', + 'eta': eta, + 'speed': speed, + }) + else: + # no percent for live streams + mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec', line) + if mobj: + downloaded_data_len = int(float(mobj.group(1)) * 1024) + time_now = time.time() + speed = self.calc_speed(start, time_now, downloaded_data_len) + self.report_progress_live_stream(downloaded_data_len, speed, time_now - start) + cursor_in_new_line = False + self._hook_progress({ + 'downloaded_bytes': downloaded_data_len, + 'tmpfilename': tmpfilename, + 'filename': filename, + 'status': 'downloading', + 'speed': speed, + }) + elif self.params.get('verbose', False): + if not cursor_in_new_line: + self.to_screen('') + cursor_in_new_line = True + self.to_screen('[rtmpdump] ' + line) + proc.wait() + if not cursor_in_new_line: + self.to_screen('') + return proc.returncode + + url = info_dict['url'] + player_url = info_dict.get('player_url', None) + page_url = info_dict.get('page_url', None) + app = info_dict.get('app', None) + play_path = info_dict.get('play_path', None) + tc_url = info_dict.get('tc_url', None) + flash_version = info_dict.get('flash_version', None) + live = info_dict.get('rtmp_live', False) + conn = info_dict.get('rtmp_conn', None) + protocol = info_dict.get('rtmp_protocol', None) + + self.report_destination(filename) + tmpfilename = self.temp_name(filename) + test = self.params.get('test', False) + + # Check for rtmpdump first + if not check_executable('rtmpdump', ['-h']): + self.report_error('RTMP download detected but "rtmpdump" could not be run. Please install it.') + return False + + # Download using rtmpdump. rtmpdump returns exit code 2 when + # the connection was interrumpted and resuming appears to be + # possible. This is part of rtmpdump's normal usage, AFAIK. + basic_args = ['rtmpdump', '--verbose', '-r', url, '-o', tmpfilename] + if player_url is not None: + basic_args += ['--swfVfy', player_url] + if page_url is not None: + basic_args += ['--pageUrl', page_url] + if app is not None: + basic_args += ['--app', app] + if play_path is not None: + basic_args += ['--playpath', play_path] + if tc_url is not None: + basic_args += ['--tcUrl', url] + if test: + basic_args += ['--stop', '1'] + if flash_version is not None: + basic_args += ['--flashVer', flash_version] + if live: + basic_args += ['--live'] + if isinstance(conn, list): + for entry in conn: + basic_args += ['--conn', entry] + elif isinstance(conn, compat_str): + basic_args += ['--conn', conn] + if protocol is not None: + basic_args += ['--protocol', protocol] + args = basic_args + [[], ['--resume', '--skip', '1']][not live and self.params.get('continuedl', False)] + + if sys.platform == 'win32' and sys.version_info < (3, 0): + # Windows subprocess module does not actually support Unicode + # on Python 2.x + # See http://stackoverflow.com/a/9951851/35070 + subprocess_encoding = sys.getfilesystemencoding() + args = [a.encode(subprocess_encoding, 'ignore') for a in args] + else: + subprocess_encoding = None + + if self.params.get('verbose', False): + if subprocess_encoding: + str_args = [ + a.decode(subprocess_encoding) if isinstance(a, bytes) else a + for a in args] + else: + str_args = args + try: + import pipes + shell_quote = lambda args: ' '.join(map(pipes.quote, str_args)) + except ImportError: + shell_quote = repr + self.to_screen('[debug] rtmpdump command line: ' + shell_quote(str_args)) + + RD_SUCCESS = 0 + RD_FAILED = 1 + RD_INCOMPLETE = 2 + RD_NO_CONNECT = 3 + + retval = run_rtmpdump(args) + + if retval == RD_NO_CONNECT: + self.report_error('[rtmpdump] Could not connect to RTMP server.') + return False + + while (retval == RD_INCOMPLETE or retval == RD_FAILED) and not test and not live: + prevsize = os.path.getsize(encodeFilename(tmpfilename)) + self.to_screen('[rtmpdump] %s bytes' % prevsize) + time.sleep(5.0) # This seems to be needed + retval = run_rtmpdump(basic_args + ['-e'] + [[], ['-k', '1']][retval == RD_FAILED]) + cursize = os.path.getsize(encodeFilename(tmpfilename)) + if prevsize == cursize and retval == RD_FAILED: + break + # Some rtmp streams seem abort after ~ 99.8%. Don't complain for those + if prevsize == cursize and retval == RD_INCOMPLETE and cursize > 1024: + self.to_screen('[rtmpdump] Could not download the whole video. This can happen for some advertisements.') + retval = RD_SUCCESS + break + if retval == RD_SUCCESS or (test and retval == RD_INCOMPLETE): + fsize = os.path.getsize(encodeFilename(tmpfilename)) + self.to_screen('[rtmpdump] %s bytes' % fsize) + self.try_rename(tmpfilename, filename) + self._hook_progress({ + 'downloaded_bytes': fsize, + 'total_bytes': fsize, + 'filename': filename, + 'status': 'finished', + }) + return True + else: + self.to_stderr('\n') + self.report_error('rtmpdump exited with code %d' % retval) + return False diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/__init__.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/__init__.py new file mode 100644 index 0000000000..92aca503c1 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/__init__.py @@ -0,0 +1,564 @@ +from __future__ import unicode_literals + +from .abc import ABCIE +from .academicearth import AcademicEarthCourseIE +from .addanime import AddAnimeIE +from .adobetv import AdobeTVIE +from .adultswim import AdultSwimIE +from .aftonbladet import AftonbladetIE +from .aljazeera import AlJazeeraIE +from .anitube import AnitubeIE +from .anysex import AnySexIE +from .aol import AolIE +from .allocine import AllocineIE +from .aparat import AparatIE +from .appletrailers import AppleTrailersIE +from .archiveorg import ArchiveOrgIE +from .ard import ARDIE, ARDMediathekIE +from .arte import ( + ArteTvIE, + ArteTVPlus7IE, + ArteTVCreativeIE, + ArteTVConcertIE, + ArteTVFutureIE, + ArteTVDDCIE, + ArteTVEmbedIE, +) +from .audiomack import AudiomackIE +from .auengine import AUEngineIE +from .azubu import AzubuIE +from .bambuser import BambuserIE, BambuserChannelIE +from .bandcamp import BandcampIE, BandcampAlbumIE +from .bbccouk import BBCCoUkIE +from .beeg import BeegIE +from .behindkink import BehindKinkIE +from .bet import BetIE +from .bild import BildIE +from .bilibili import BiliBiliIE +from .blinkx import BlinkxIE +from .bliptv import BlipTVIE, BlipTVUserIE +from .bloomberg import BloombergIE +from .bpb import BpbIE +from .br import BRIE +from .breakcom import BreakIE +from .brightcove import BrightcoveIE +from .buzzfeed import BuzzFeedIE +from .byutv import BYUtvIE +from .c56 import C56IE +from .canal13cl import Canal13clIE +from .canalplus import CanalplusIE +from .canalc2 import Canalc2IE +from .cbs import CBSIE +from .cbsnews import CBSNewsIE +from .ceskatelevize import CeskaTelevizeIE +from .channel9 import Channel9IE +from .chilloutzone import ChilloutzoneIE +from .cinchcast import CinchcastIE +from .clipfish import ClipfishIE +from .cliphunter import CliphunterIE +from .clipsyndicate import ClipsyndicateIE +from .cloudy import CloudyIE +from .clubic import ClubicIE +from .cmt import CMTIE +from .cnet import CNETIE +from .cnn import ( + CNNIE, + CNNBlogsIE, +) +from .collegehumor import CollegeHumorIE +from .comedycentral import ComedyCentralIE, ComedyCentralShowsIE +from .comcarcoff import ComCarCoffIE +from .condenast import CondeNastIE +from .cracked import CrackedIE +from .criterion import CriterionIE +from .crunchyroll import ( + CrunchyrollIE, + CrunchyrollShowPlaylistIE +) +from .cspan import CSpanIE +from .dailymotion import ( + DailymotionIE, + DailymotionPlaylistIE, + DailymotionUserIE, +) +from .daum import DaumIE +from .dbtv import DBTVIE +from .deezer import DeezerPlaylistIE +from .dfb import DFBIE +from .dotsub import DotsubIE +from .dreisat import DreiSatIE +from .drtuber import DrTuberIE +from .drtv import DRTVIE +from .dvtv import DVTVIE +from .dump import DumpIE +from .defense import DefenseGouvFrIE +from .discovery import DiscoveryIE +from .divxstage import DivxStageIE +from .dropbox import DropboxIE +from .ebaumsworld import EbaumsWorldIE +from .ehow import EHowIE +from .eighttracks import EightTracksIE +from .einthusan import EinthusanIE +from .eitb import EitbIE +from .ellentv import ( + EllenTVIE, + EllenTVClipsIE, +) +from .elpais import ElPaisIE +from .empflix import EMPFlixIE +from .engadget import EngadgetIE +from .eporner import EpornerIE +from .escapist import EscapistIE +from .everyonesmixtape import EveryonesMixtapeIE +from .exfm import ExfmIE +from .expotv import ExpoTVIE +from .extremetube import ExtremeTubeIE +from .facebook import FacebookIE +from .faz import FazIE +from .fc2 import FC2IE +from .firedrive import FiredriveIE +from .firstpost import FirstpostIE +from .firsttv import FirstTVIE +from .fivemin import FiveMinIE +from .fktv import ( + FKTVIE, + FKTVPosteckeIE, +) +from .flickr import FlickrIE +from .folketinget import FolketingetIE +from .fourtube import FourTubeIE +from .foxgay import FoxgayIE +from .foxnews import FoxNewsIE +from .franceculture import FranceCultureIE +from .franceinter import FranceInterIE +from .francetv import ( + PluzzIE, + FranceTvInfoIE, + FranceTVIE, + GenerationQuoiIE, + CultureboxIE, +) +from .freesound import FreesoundIE +from .freespeech import FreespeechIE +from .freevideo import FreeVideoIE +from .funnyordie import FunnyOrDieIE +from .gamekings import GamekingsIE +from .gameone import ( + GameOneIE, + GameOnePlaylistIE, +) +from .gamespot import GameSpotIE +from .gamestar import GameStarIE +from .gametrailers import GametrailersIE +from .gdcvault import GDCVaultIE +from .generic import GenericIE +from .giantbomb import GiantBombIE +from .glide import GlideIE +from .globo import GloboIE +from .godtube import GodTubeIE +from .goldenmoustache import GoldenMoustacheIE +from .golem import GolemIE +from .googleplus import GooglePlusIE +from .googlesearch import GoogleSearchIE +from .gorillavid import GorillaVidIE +from .goshgay import GoshgayIE +from .grooveshark import GroovesharkIE +from .groupon import GrouponIE +from .hark import HarkIE +from .heise import HeiseIE +from .helsinki import HelsinkiIE +from .hentaistigma import HentaiStigmaIE +from .hornbunny import HornBunnyIE +from .hostingbulk import HostingBulkIE +from .hotnewhiphop import HotNewHipHopIE +from .howcast import HowcastIE +from .howstuffworks import HowStuffWorksIE +from .huffpost import HuffPostIE +from .hypem import HypemIE +from .iconosquare import IconosquareIE +from .ign import IGNIE, OneUPIE +from .imdb import ( + ImdbIE, + ImdbListIE +) +from .ina import InaIE +from .infoq import InfoQIE +from .instagram import InstagramIE, InstagramUserIE +from .internetvideoarchive import InternetVideoArchiveIE +from .iprima import IPrimaIE +from .ivi import ( + IviIE, + IviCompilationIE +) +from .izlesene import IzleseneIE +from .jadorecettepub import JadoreCettePubIE +from .jeuxvideo import JeuxVideoIE +from .jove import JoveIE +from .jukebox import JukeboxIE +from .jpopsukitv import JpopsukiIE +from .kankan import KankanIE +from .keezmovies import KeezMoviesIE +from .khanacademy import KhanAcademyIE +from .kickstarter import KickStarterIE +from .keek import KeekIE +from .kontrtube import KontrTubeIE +from .krasview import KrasViewIE +from .ku6 import Ku6IE +from .la7 import LA7IE +from .laola1tv import Laola1TvIE +from .lifenews import LifeNewsIE +from .liveleak import LiveLeakIE +from .livestream import ( + LivestreamIE, + LivestreamOriginalIE, + LivestreamShortenerIE, +) +from .lrt import LRTIE +from .lynda import ( + LyndaIE, + LyndaCourseIE +) +from .m6 import M6IE +from .macgamestore import MacGameStoreIE +from .mailru import MailRuIE +from .malemotion import MalemotionIE +from .mdr import MDRIE +from .metacafe import MetacafeIE +from .metacritic import MetacriticIE +from .mgoon import MgoonIE +from .minhateca import MinhatecaIE +from .ministrygrid import MinistryGridIE +from .mit import TechTVMITIE, MITIE, OCWMITIE +from .mitele import MiTeleIE +from .mixcloud import MixcloudIE +from .mlb import MLBIE +from .mpora import MporaIE +from .moevideo import MoeVideoIE +from .mofosex import MofosexIE +from .mojvideo import MojvideoIE +from .moniker import MonikerIE +from .mooshare import MooshareIE +from .morningstar import MorningstarIE +from .motherless import MotherlessIE +from .motorsport import MotorsportIE +from .movieclips import MovieClipsIE +from .moviezine import MoviezineIE +from .movshare import MovShareIE +from .mtv import ( + MTVIE, + MTVServicesEmbeddedIE, + MTVIggyIE, +) +from .muenchentv import MuenchenTVIE +from .musicplayon import MusicPlayOnIE +from .musicvault import MusicVaultIE +from .muzu import MuzuTVIE +from .myspace import MySpaceIE, MySpaceAlbumIE +from .myspass import MySpassIE +from .myvideo import MyVideoIE +from .myvidster import MyVidsterIE +from .naver import NaverIE +from .nba import NBAIE +from .nbc import ( + NBCIE, + NBCNewsIE, +) +from .ndr import NDRIE +from .ndtv import NDTVIE +from .nerdcubed import NerdCubedFeedIE +from .newgrounds import NewgroundsIE +from .newstube import NewstubeIE +from .nfb import NFBIE +from .nfl import NFLIE +from .nhl import NHLIE, NHLVideocenterIE +from .niconico import NiconicoIE, NiconicoPlaylistIE +from .ninegag import NineGagIE +from .noco import NocoIE +from .normalboots import NormalbootsIE +from .nosvideo import NosVideoIE +from .novamov import NovaMovIE +from .nowness import NownessIE +from .nowvideo import NowVideoIE +from .npo import ( + NPOIE, + TegenlichtVproIE, +) +from .nrk import ( + NRKIE, + NRKTVIE, +) +from .ntv import NTVIE +from .nytimes import NYTimesIE +from .nuvid import NuvidIE +from .oktoberfesttv import OktoberfestTVIE +from .ooyala import OoyalaIE +from .orf import ( + ORFTVthekIE, + ORFOE1IE, + ORFFM4IE, +) +from .parliamentliveuk import ParliamentLiveUKIE +from .patreon import PatreonIE +from .pbs import PBSIE +from .phoenix import PhoenixIE +from .photobucket import PhotobucketIE +from .planetaplay import PlanetaPlayIE +from .played import PlayedIE +from .playfm import PlayFMIE +from .playvid import PlayvidIE +from .podomatic import PodomaticIE +from .pornhd import PornHdIE +from .pornhub import PornHubIE +from .pornotube import PornotubeIE +from .pornoxo import PornoXOIE +from .promptfile import PromptFileIE +from .prosiebensat1 import ProSiebenSat1IE +from .pyvideo import PyvideoIE +from .quickvid import QuickVidIE +from .radiode import RadioDeIE +from .radiofrance import RadioFranceIE +from .rai import RaiIE +from .rbmaradio import RBMARadioIE +from .redtube import RedTubeIE +from .restudy import RestudyIE +from .reverbnation import ReverbNationIE +from .ringtv import RingTVIE +from .ro220 import Ro220IE +from .rottentomatoes import RottenTomatoesIE +from .roxwel import RoxwelIE +from .rtbf import RTBFIE +from .rtlnl import RtlXlIE +from .rtlnow import RTLnowIE +from .rtp import RTPIE +from .rts import RTSIE +from .rtve import RTVEALaCartaIE, RTVELiveIE +from .ruhd import RUHDIE +from .rutube import ( + RutubeIE, + RutubeChannelIE, + RutubeMovieIE, + RutubePersonIE, +) +from .rutv import RUTVIE +from .sapo import SapoIE +from .savefrom import SaveFromIE +from .sbs import SBSIE +from .scivee import SciVeeIE +from .screencast import ScreencastIE +from .screencastomatic import ScreencastOMaticIE +from .screenwavemedia import CinemassacreIE, ScreenwaveMediaIE, TeamFourIE +from .servingsys import ServingSysIE +from .sexu import SexuIE +from .sexykarma import SexyKarmaIE +from .shared import SharedIE +from .sharesix import ShareSixIE +from .sina import SinaIE +from .slideshare import SlideshareIE +from .slutload import SlutloadIE +from .smotri import ( + SmotriIE, + SmotriCommunityIE, + SmotriUserIE, + SmotriBroadcastIE, +) +from .snotr import SnotrIE +from .sockshare import SockshareIE +from .sohu import SohuIE +from .soundcloud import ( + SoundcloudIE, + SoundcloudSetIE, + SoundcloudUserIE, + SoundcloudPlaylistIE +) +from .soundgasm import SoundgasmIE +from .southpark import ( + SouthParkIE, + SouthparkDeIE, +) +from .space import SpaceIE +from .spankwire import SpankwireIE +from .spiegel import SpiegelIE, SpiegelArticleIE +from .spiegeltv import SpiegeltvIE +from .spike import SpikeIE +from .sport5 import Sport5IE +from .sportbox import SportBoxIE +from .sportdeutschland import SportDeutschlandIE +from .srmediathek import SRMediathekIE +from .stanfordoc import StanfordOpenClassroomIE +from .steam import SteamIE +from .streamcloud import StreamcloudIE +from .streamcz import StreamCZIE +from .sunporno import SunPornoIE +from .swrmediathek import SWRMediathekIE +from .syfy import SyfyIE +from .sztvhu import SztvHuIE +from .tagesschau import TagesschauIE +from .tapely import TapelyIE +from .tass import TassIE +from .teachertube import ( + TeacherTubeIE, + TeacherTubeUserIE, +) +from .teachingchannel import TeachingChannelIE +from .teamcoco import TeamcocoIE +from .techtalks import TechTalksIE +from .ted import TEDIE +from .telebruxelles import TeleBruxellesIE +from .telecinco import TelecincoIE +from .telemb import TeleMBIE +from .tenplay import TenPlayIE +from .testurl import TestURLIE +from .tf1 import TF1IE +from .theonion import TheOnionIE +from .theplatform import ThePlatformIE +from .thesixtyone import TheSixtyOneIE +from .thisav import ThisAVIE +from .tinypic import TinyPicIE +from .tlc import TlcIE, TlcDeIE +from .tmz import TMZIE +from .tnaflix import TNAFlixIE +from .thvideo import ( + THVideoIE, + THVideoPlaylistIE +) +from .toutv import TouTvIE +from .toypics import ToypicsUserIE, ToypicsIE +from .traileraddict import TrailerAddictIE +from .trilulilu import TriluliluIE +from .trutube import TruTubeIE +from .tube8 import Tube8IE +from .tudou import TudouIE +from .tumblr import TumblrIE +from .tunein import TuneInIE +from .turbo import TurboIE +from .tutv import TutvIE +from .tvigle import TvigleIE +from .tvp import TvpIE +from .tvplay import TVPlayIE +from .twentyfourvideo import TwentyFourVideoIE +from .twitch import TwitchIE +from .ubu import UbuIE +from .udemy import ( + UdemyIE, + UdemyCourseIE +) +from .unistra import UnistraIE +from .urort import UrortIE +from .ustream import UstreamIE, UstreamChannelIE +from .vbox7 import Vbox7IE +from .veehd import VeeHDIE +from .veoh import VeohIE +from .vesti import VestiIE +from .vevo import VevoIE +from .vgtv import VGTVIE +from .vh1 import VH1IE +from .vice import ViceIE +from .viddler import ViddlerIE +from .videobam import VideoBamIE +from .videodetective import VideoDetectiveIE +from .videolecturesnet import VideoLecturesNetIE +from .videofyme import VideofyMeIE +from .videomega import VideoMegaIE +from .videopremium import VideoPremiumIE +from .videott import VideoTtIE +from .videoweed import VideoWeedIE +from .vidme import VidmeIE +from .vidzi import VidziIE +from .vimeo import ( + VimeoIE, + VimeoAlbumIE, + VimeoChannelIE, + VimeoGroupsIE, + VimeoLikesIE, + VimeoReviewIE, + VimeoUserIE, + VimeoWatchLaterIE, +) +from .vimple import VimpleIE +from .vine import ( + VineIE, + VineUserIE, +) +from .viki import VikiIE +from .vk import ( + VKIE, + VKUserVideosIE, +) +from .vodlocker import VodlockerIE +from .vporn import VpornIE +from .vrt import VRTIE +from .vube import VubeIE +from .vuclip import VuClipIE +from .vulture import VultureIE +from .walla import WallaIE +from .washingtonpost import WashingtonPostIE +from .wat import WatIE +from .wayofthemaster import WayOfTheMasterIE +from .wdr import ( + WDRIE, + WDRMobileIE, + WDRMausIE, +) +from .weibo import WeiboIE +from .wimp import WimpIE +from .wistia import WistiaIE +from .worldstarhiphop import WorldStarHipHopIE +from .wrzuta import WrzutaIE +from .xbef import XBefIE +from .xboxclips import XboxClipsIE +from .xhamster import XHamsterIE +from .xminus import XMinusIE +from .xnxx import XNXXIE +from .xvideos import XVideosIE +from .xtube import XTubeUserIE, XTubeIE +from .yahoo import ( + YahooIE, + YahooSearchIE, +) +from .yesjapan import YesJapanIE +from .ynet import YnetIE +from .youjizz import YouJizzIE +from .youku import YoukuIE +from .youporn import YouPornIE +from .yourupload import YourUploadIE +from .youtube import ( + YoutubeIE, + YoutubeChannelIE, + YoutubeFavouritesIE, + YoutubeHistoryIE, + YoutubePlaylistIE, + YoutubeRecommendedIE, + YoutubeSearchDateIE, + YoutubeSearchIE, + YoutubeSearchURLIE, + YoutubeShowIE, + YoutubeSubscriptionsIE, + YoutubeTopListIE, + YoutubeTruncatedURLIE, + YoutubeUserIE, + YoutubeWatchLaterIE, +) +from .zdf import ZDFIE, ZDFChannelIE +from .zingmp3 import ( + ZingMp3SongIE, + ZingMp3AlbumIE, +) + +_ALL_CLASSES = [ + klass + for name, klass in globals().items() + if name.endswith('IE') and name != 'GenericIE' +] +_ALL_CLASSES.append(GenericIE) + + +def gen_extractors(): + """ Return a list of an instance of every supported extractor. + The order does matter; the first extractor matched is the one handling the URL. + """ + return [klass() for klass in _ALL_CLASSES] + + +def get_info_extractor(ie_name): + """Returns the info extractor class with the given ie_name""" + return globals()[ie_name + 'IE'] diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/abc.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/abc.py new file mode 100644 index 0000000000..dc0fb85d60 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/abc.py @@ -0,0 +1,47 @@ +from __future__ import unicode_literals + +import re +import json + +from .common import InfoExtractor + + +class ABCIE(InfoExtractor): + IE_NAME = 'abc.net.au' + _VALID_URL = r'http://www\.abc\.net\.au/news/[^/]+/[^/]+/(?P\d+)' + + _TEST = { + 'url': 'http://www.abc.net.au/news/2014-11-05/australia-to-staff-ebola-treatment-centre-in-sierra-leone/5868334', + 'md5': 'cb3dd03b18455a661071ee1e28344d9f', + 'info_dict': { + 'id': '5868334', + 'ext': 'mp4', + 'title': 'Australia to help staff Ebola treatment centre in Sierra Leone', + 'description': 'md5:809ad29c67a05f54eb41f2a105693a67', + }, + } + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + + urls_info_json = self._search_regex( + r'inlineVideoData\.push\((.*?)\);', webpage, 'video urls', + flags=re.DOTALL) + urls_info = json.loads(urls_info_json.replace('\'', '"')) + formats = [{ + 'url': url_info['url'], + 'width': int(url_info['width']), + 'height': int(url_info['height']), + 'tbr': int(url_info['bitrate']), + 'filesize': int(url_info['filesize']), + } for url_info in urls_info] + self._sort_formats(formats) + + return { + 'id': video_id, + 'title': self._og_search_title(webpage), + 'formats': formats, + 'description': self._og_search_description(webpage), + 'thumbnail': self._og_search_thumbnail(webpage), + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/academicearth.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/academicearth.py new file mode 100644 index 0000000000..47313fba87 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/academicearth.py @@ -0,0 +1,41 @@ +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor + + +class AcademicEarthCourseIE(InfoExtractor): + _VALID_URL = r'^https?://(?:www\.)?academicearth\.org/playlists/(?P[^?#/]+)' + IE_NAME = 'AcademicEarth:Course' + _TEST = { + 'url': 'http://academicearth.org/playlists/laws-of-nature/', + 'info_dict': { + 'id': 'laws-of-nature', + 'title': 'Laws of Nature', + 'description': 'Introduce yourself to the laws of nature with these free online college lectures from Yale, Harvard, and MIT.', + }, + 'playlist_count': 4, + } + + def _real_extract(self, url): + playlist_id = self._match_id(url) + + webpage = self._download_webpage(url, playlist_id) + title = self._html_search_regex( + r'

    ]*?>(.*?)

    ', webpage, 'title') + description = self._html_search_regex( + r'

    ]*?>(.*?)

    ', + webpage, 'description', fatal=False) + urls = re.findall( + r'
  • \s*?', + webpage) + entries = [self.url_result(u) for u in urls] + + return { + '_type': 'playlist', + 'id': playlist_id, + 'title': title, + 'description': description, + 'entries': entries, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/addanime.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/addanime.py new file mode 100644 index 0000000000..203936e54a --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/addanime.py @@ -0,0 +1,87 @@ +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..compat import ( + compat_HTTPError, + compat_str, + compat_urllib_parse, + compat_urllib_parse_urlparse, +) +from ..utils import ( + ExtractorError, +) + + +class AddAnimeIE(InfoExtractor): + _VALID_URL = r'^http://(?:\w+\.)?add-anime\.net/watch_video\.php\?(?:.*?)v=(?P[\w_]+)(?:.*)' + _TEST = { + 'url': 'http://www.add-anime.net/watch_video.php?v=24MR3YO5SAS9', + 'md5': '72954ea10bc979ab5e2eb288b21425a0', + 'info_dict': { + 'id': '24MR3YO5SAS9', + 'ext': 'mp4', + 'description': 'One Piece 606', + 'title': 'One Piece 606', + } + } + + def _real_extract(self, url): + video_id = self._match_id(url) + + try: + webpage = self._download_webpage(url, video_id) + except ExtractorError as ee: + if not isinstance(ee.cause, compat_HTTPError) or \ + ee.cause.code != 503: + raise + + redir_webpage = ee.cause.read().decode('utf-8') + action = self._search_regex( + r'
    ', + redir_webpage, 'redirect vc value') + av = re.search( + r'a\.value = ([0-9]+)[+]([0-9]+)[*]([0-9]+);', + redir_webpage) + if av is None: + raise ExtractorError('Cannot find redirect math task') + av_res = int(av.group(1)) + int(av.group(2)) * int(av.group(3)) + + parsed_url = compat_urllib_parse_urlparse(url) + av_val = av_res + len(parsed_url.netloc) + confirm_url = ( + parsed_url.scheme + '://' + parsed_url.netloc + + action + '?' + + compat_urllib_parse.urlencode({ + 'jschl_vc': vc, 'jschl_answer': compat_str(av_val)})) + self._download_webpage( + confirm_url, video_id, + note='Confirming after redirect') + webpage = self._download_webpage(url, video_id) + + formats = [] + for format_id in ('normal', 'hq'): + rex = r"var %s_video_file = '(.*?)';" % re.escape(format_id) + video_url = self._search_regex(rex, webpage, 'video file URLx', + fatal=False) + if not video_url: + continue + formats.append({ + 'format_id': format_id, + 'url': video_url, + }) + self._sort_formats(formats) + video_title = self._og_search_title(webpage) + video_description = self._og_search_description(webpage) + + return { + '_type': 'video', + 'id': video_id, + 'formats': formats, + 'title': video_title, + 'description': video_description + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/adobetv.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/adobetv.py new file mode 100644 index 0000000000..28e07f8b04 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/adobetv.py @@ -0,0 +1,70 @@ +from __future__ import unicode_literals + +from .common import InfoExtractor +from ..utils import ( + parse_duration, + unified_strdate, + str_to_int, +) + + +class AdobeTVIE(InfoExtractor): + _VALID_URL = r'https?://tv\.adobe\.com/watch/[^/]+/(?P[^/]+)' + + _TEST = { + 'url': 'http://tv.adobe.com/watch/the-complete-picture-with-julieanne-kost/quick-tip-how-to-draw-a-circle-around-an-object-in-photoshop/', + 'md5': '9bc5727bcdd55251f35ad311ca74fa1e', + 'info_dict': { + 'id': 'quick-tip-how-to-draw-a-circle-around-an-object-in-photoshop', + 'ext': 'mp4', + 'title': 'Quick Tip - How to Draw a Circle Around an Object in Photoshop', + 'description': 'md5:99ec318dc909d7ba2a1f2b038f7d2311', + 'thumbnail': 're:https?://.*\.jpg$', + 'upload_date': '20110914', + 'duration': 60, + 'view_count': int, + }, + } + + def _real_extract(self, url): + video_id = self._match_id(url) + + webpage = self._download_webpage(url, video_id) + + player = self._parse_json( + self._search_regex(r'html5player:\s*({.+?})\s*\n', webpage, 'player'), + video_id) + + title = player.get('title') or self._search_regex( + r'data-title="([^"]+)"', webpage, 'title') + description = self._og_search_description(webpage) + thumbnail = self._og_search_thumbnail(webpage) + + upload_date = unified_strdate( + self._html_search_meta('datepublished', webpage, 'upload date')) + + duration = parse_duration( + self._html_search_meta('duration', webpage, 'duration') + or self._search_regex(r'Runtime:\s*(\d{2}:\d{2}:\d{2})', webpage, 'duration')) + + view_count = str_to_int(self._search_regex( + r'
    \s*Views?:\s*([\d,.]+)\s*
    ', + webpage, 'view count')) + + formats = [{ + 'url': source['src'], + 'format_id': source.get('quality') or source['src'].split('-')[-1].split('.')[0] or None, + 'tbr': source.get('bitrate'), + } for source in player['sources']] + self._sort_formats(formats) + + return { + 'id': video_id, + 'title': title, + 'description': description, + 'thumbnail': thumbnail, + 'upload_date': upload_date, + 'duration': duration, + 'view_count': view_count, + 'formats': formats, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/adultswim.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/adultswim.py new file mode 100644 index 0000000000..502a9c25ad --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/adultswim.py @@ -0,0 +1,169 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re +import json + +from .common import InfoExtractor +from ..utils import ( + ExtractorError, + xpath_text, + float_or_none, +) + + +class AdultSwimIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?adultswim\.com/videos/(?Pplaylists/)?(?P[^/]+)/(?P[^/?#]+)/?' + + _TESTS = [{ + 'url': 'http://adultswim.com/videos/rick-and-morty/pilot', + 'playlist': [ + { + 'md5': '247572debc75c7652f253c8daa51a14d', + 'info_dict': { + 'id': 'rQxZvXQ4ROaSOqq-or2Mow-0', + 'ext': 'flv', + 'title': 'Rick and Morty - Pilot Part 1', + 'description': "Rick moves in with his daughter's family and establishes himself as a bad influence on his grandson, Morty. " + }, + }, + { + 'md5': '77b0e037a4b20ec6b98671c4c379f48d', + 'info_dict': { + 'id': 'rQxZvXQ4ROaSOqq-or2Mow-3', + 'ext': 'flv', + 'title': 'Rick and Morty - Pilot Part 4', + 'description': "Rick moves in with his daughter's family and establishes himself as a bad influence on his grandson, Morty. " + }, + }, + ], + 'info_dict': { + 'title': 'Rick and Morty - Pilot', + 'description': "Rick moves in with his daughter's family and establishes himself as a bad influence on his grandson, Morty. " + } + }, { + 'url': 'http://www.adultswim.com/videos/playlists/american-parenting/putting-francine-out-of-business/', + 'playlist': [ + { + 'md5': '2eb5c06d0f9a1539da3718d897f13ec5', + 'info_dict': { + 'id': '-t8CamQlQ2aYZ49ItZCFog-0', + 'ext': 'flv', + 'title': 'American Dad - Putting Francine Out of Business', + 'description': 'Stan hatches a plan to get Francine out of the real estate business.Watch more American Dad on [adult swim].' + }, + } + ], + 'info_dict': { + 'title': 'American Dad - Putting Francine Out of Business', + 'description': 'Stan hatches a plan to get Francine out of the real estate business.Watch more American Dad on [adult swim].' + }, + }] + + @staticmethod + def find_video_info(collection, slug): + for video in collection.get('videos'): + if video.get('slug') == slug: + return video + + @staticmethod + def find_collection_by_linkURL(collections, linkURL): + for collection in collections: + if collection.get('linkURL') == linkURL: + return collection + + @staticmethod + def find_collection_containing_video(collections, slug): + for collection in collections: + for video in collection.get('videos'): + if video.get('slug') == slug: + return collection, video + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + show_path = mobj.group('show_path') + episode_path = mobj.group('episode_path') + is_playlist = True if mobj.group('is_playlist') else False + + webpage = self._download_webpage(url, episode_path) + + # Extract the value of `bootstrappedData` from the Javascript in the page. + bootstrappedDataJS = self._search_regex(r'var bootstrappedData = ({.*});', webpage, episode_path) + + try: + bootstrappedData = json.loads(bootstrappedDataJS) + except ValueError as ve: + errmsg = '%s: Failed to parse JSON ' % episode_path + raise ExtractorError(errmsg, cause=ve) + + # Downloading videos from a /videos/playlist/ URL needs to be handled differently. + # NOTE: We are only downloading one video (the current one) not the playlist + if is_playlist: + collections = bootstrappedData['playlists']['collections'] + collection = self.find_collection_by_linkURL(collections, show_path) + video_info = self.find_video_info(collection, episode_path) + + show_title = video_info['showTitle'] + segment_ids = [video_info['videoPlaybackID']] + else: + collections = bootstrappedData['show']['collections'] + collection, video_info = self.find_collection_containing_video(collections, episode_path) + + show = bootstrappedData['show'] + show_title = show['title'] + segment_ids = [clip['videoPlaybackID'] for clip in video_info['clips']] + + episode_id = video_info['id'] + episode_title = video_info['title'] + episode_description = video_info['description'] + episode_duration = video_info.get('duration') + + entries = [] + for part_num, segment_id in enumerate(segment_ids): + segment_url = 'http://www.adultswim.com/videos/api/v0/assets?id=%s&platform=mobile' % segment_id + + segment_title = '%s - %s' % (show_title, episode_title) + if len(segment_ids) > 1: + segment_title += ' Part %d' % (part_num + 1) + + idoc = self._download_xml( + segment_url, segment_title, + 'Downloading segment information', 'Unable to download segment information') + + segment_duration = float_or_none( + xpath_text(idoc, './/trt', 'segment duration').strip()) + + formats = [] + file_els = idoc.findall('.//files/file') + + for file_el in file_els: + bitrate = file_el.attrib.get('bitrate') + ftype = file_el.attrib.get('type') + + formats.append({ + 'format_id': '%s_%s' % (bitrate, ftype), + 'url': file_el.text.strip(), + # The bitrate may not be a number (for example: 'iphone') + 'tbr': int(bitrate) if bitrate.isdigit() else None, + 'quality': 1 if ftype == 'hd' else -1 + }) + + self._sort_formats(formats) + + entries.append({ + 'id': segment_id, + 'title': segment_title, + 'formats': formats, + 'duration': segment_duration, + 'description': episode_description + }) + + return { + '_type': 'playlist', + 'id': episode_id, + 'display_id': episode_path, + 'entries': entries, + 'title': '%s - %s' % (show_title, episode_title), + 'description': episode_description, + 'duration': episode_duration + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/aftonbladet.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/aftonbladet.py new file mode 100644 index 0000000000..cfc7370ae4 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/aftonbladet.py @@ -0,0 +1,66 @@ +# encoding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor + + +class AftonbladetIE(InfoExtractor): + _VALID_URL = r'^http://tv\.aftonbladet\.se/webbtv.+?(?Particle[0-9]+)\.ab(?:$|[?#])' + _TEST = { + 'url': 'http://tv.aftonbladet.se/webbtv/nyheter/vetenskap/rymden/article36015.ab', + 'info_dict': { + 'id': 'article36015', + 'ext': 'mp4', + 'title': 'Vulkanutbrott i rymden - nu slц╓pper NASA bilderna', + 'description': 'Jupiters mц╔ne mest aktiv av alla himlakroppar', + 'timestamp': 1394142732, + 'upload_date': '20140306', + }, + } + + def _real_extract(self, url): + mobj = re.search(self._VALID_URL, url) + + video_id = mobj.group('video_id') + webpage = self._download_webpage(url, video_id) + + # find internal video meta data + meta_url = 'http://aftonbladet-play.drlib.aptoma.no/video/%s.json' + internal_meta_id = self._html_search_regex( + r'data-aptomaId="([\w\d]+)"', webpage, 'internal_meta_id') + internal_meta_url = meta_url % internal_meta_id + internal_meta_json = self._download_json( + internal_meta_url, video_id, 'Downloading video meta data') + + # find internal video formats + format_url = 'http://aftonbladet-play.videodata.drvideo.aptoma.no/actions/video/?id=%s' + internal_video_id = internal_meta_json['videoId'] + internal_formats_url = format_url % internal_video_id + internal_formats_json = self._download_json( + internal_formats_url, video_id, 'Downloading video formats') + + formats = [] + for fmt in internal_formats_json['formats']['http']['pseudostreaming']['mp4']: + p = fmt['paths'][0] + formats.append({ + 'url': 'http://%s:%d/%s/%s' % (p['address'], p['port'], p['path'], p['filename']), + 'ext': 'mp4', + 'width': fmt['width'], + 'height': fmt['height'], + 'tbr': fmt['bitrate'], + 'protocol': 'http', + }) + self._sort_formats(formats) + + return { + 'id': video_id, + 'title': internal_meta_json['title'], + 'formats': formats, + 'thumbnail': internal_meta_json['imageUrl'], + 'description': internal_meta_json['shortPreamble'], + 'timestamp': internal_meta_json['timePublished'], + 'duration': internal_meta_json['duration'], + 'view_count': internal_meta_json['views'], + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/aljazeera.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/aljazeera.py new file mode 100644 index 0000000000..612708e257 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/aljazeera.py @@ -0,0 +1,35 @@ +from __future__ import unicode_literals + +from .common import InfoExtractor + + +class AlJazeeraIE(InfoExtractor): + _VALID_URL = r'http://www\.aljazeera\.com/programmes/.*?/(?P[^/]+)\.html' + + _TEST = { + 'url': 'http://www.aljazeera.com/programmes/the-slum/2014/08/deliverance-201482883754237240.html', + 'info_dict': { + 'id': '3792260579001', + 'ext': 'mp4', + 'title': 'The Slum - Episode 1: Deliverance', + 'description': 'As a birth attendant advocating for family planning, Remy is on the frontline of Tondo\'s battle with overcrowding.', + 'uploader': 'Al Jazeera English', + }, + 'add_ie': ['Brightcove'], + } + + def _real_extract(self, url): + program_name = self._match_id(url) + webpage = self._download_webpage(url, program_name) + brightcove_id = self._search_regex( + r'RenderPagesVideo\(\'(.+?)\'', webpage, 'brightcove id') + + return { + '_type': 'url', + 'url': ( + 'brightcove:' + 'playerKey=AQ~~%2CAAAAmtVJIFk~%2CTVGOQ5ZTwJbeMWnq5d_H4MOM57xfzApc' + '&%40videoPlayer={0}'.format(brightcove_id) + ), + 'ie_key': 'Brightcove', + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/allocine.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/allocine.py new file mode 100644 index 0000000000..7d65b81931 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/allocine.py @@ -0,0 +1,89 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +import re +import json + +from .common import InfoExtractor +from ..compat import compat_str +from ..utils import ( + qualities, +) + + +class AllocineIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?allocine\.fr/(?Particle|video|film)/(fichearticle_gen_carticle=|player_gen_cmedia=|fichefilm_gen_cfilm=|video-)(?P[0-9]+)(?:\.html)?' + + _TESTS = [{ + 'url': 'http://www.allocine.fr/article/fichearticle_gen_carticle=18635087.html', + 'md5': '0c9fcf59a841f65635fa300ac43d8269', + 'info_dict': { + 'id': '19546517', + 'ext': 'mp4', + 'title': 'Astц╘rix - Le Domaine des Dieux Teaser VF', + 'description': 'md5:abcd09ce503c6560512c14ebfdb720d2', + 'thumbnail': 're:http://.*\.jpg', + }, + }, { + 'url': 'http://www.allocine.fr/video/player_gen_cmedia=19540403&cfilm=222257.html', + 'md5': 'd0cdce5d2b9522ce279fdfec07ff16e0', + 'info_dict': { + 'id': '19540403', + 'ext': 'mp4', + 'title': 'Planes 2 Bande-annonce VF', + 'description': 'md5:eeaffe7c2d634525e21159b93acf3b1e', + 'thumbnail': 're:http://.*\.jpg', + }, + }, { + 'url': 'http://www.allocine.fr/film/fichefilm_gen_cfilm=181290.html', + 'md5': '101250fb127ef9ca3d73186ff22a47ce', + 'info_dict': { + 'id': '19544709', + 'ext': 'mp4', + 'title': 'Dragons 2 - Bande annonce finale VF', + 'description': 'md5:71742e3a74b0d692c7fce0dd2017a4ac', + 'thumbnail': 're:http://.*\.jpg', + }, + }, { + 'url': 'http://www.allocine.fr/video/video-19550147/', + 'only_matching': True, + }] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + typ = mobj.group('typ') + display_id = mobj.group('id') + + webpage = self._download_webpage(url, display_id) + + if typ == 'film': + video_id = self._search_regex(r'href="/video/player_gen_cmedia=([0-9]+).+"', webpage, 'video id') + else: + player = self._search_regex(r'data-player=\'([^\']+)\'>', webpage, 'data player') + + player_data = json.loads(player) + video_id = compat_str(player_data['refMedia']) + + xml = self._download_xml('http://www.allocine.fr/ws/AcVisiondataV4.ashx?media=%s' % video_id, display_id) + + video = xml.find('.//AcVisionVideo').attrib + quality = qualities(['ld', 'md', 'hd']) + + formats = [] + for k, v in video.items(): + if re.match(r'.+_path', k): + format_id = k.split('_')[0] + formats.append({ + 'format_id': format_id, + 'quality': quality(format_id), + 'url': v, + }) + self._sort_formats(formats) + + return { + 'id': video_id, + 'title': video['videoTitle'], + 'thumbnail': self._og_search_thumbnail(webpage), + 'formats': formats, + 'description': self._og_search_description(webpage), + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/anitube.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/anitube.py new file mode 100644 index 0000000000..31f0d417ce --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/anitube.py @@ -0,0 +1,59 @@ +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor + + +class AnitubeIE(InfoExtractor): + IE_NAME = 'anitube.se' + _VALID_URL = r'https?://(?:www\.)?anitube\.se/video/(?P\d+)' + + _TEST = { + 'url': 'http://www.anitube.se/video/36621', + 'md5': '59d0eeae28ea0bc8c05e7af429998d43', + 'info_dict': { + 'id': '36621', + 'ext': 'mp4', + 'title': 'Recorder to Randoseru 01', + 'duration': 180.19, + }, + 'skip': 'Blocked in the US', + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + + webpage = self._download_webpage(url, video_id) + key = self._html_search_regex( + r'http://www\.anitube\.se/embed/([A-Za-z0-9_-]*)', webpage, 'key') + + config_xml = self._download_xml( + 'http://www.anitube.se/nuevo/econfig.php?key=%s' % key, key) + + video_title = config_xml.find('title').text + thumbnail = config_xml.find('image').text + duration = float(config_xml.find('duration').text) + + formats = [] + video_url = config_xml.find('file') + if video_url is not None: + formats.append({ + 'format_id': 'sd', + 'url': video_url.text, + }) + video_url = config_xml.find('filehd') + if video_url is not None: + formats.append({ + 'format_id': 'hd', + 'url': video_url.text, + }) + + return { + 'id': video_id, + 'title': video_title, + 'thumbnail': thumbnail, + 'duration': duration, + 'formats': formats + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/anysex.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/anysex.py new file mode 100644 index 0000000000..ad86d6e58a --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/anysex.py @@ -0,0 +1,61 @@ +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import ( + parse_duration, + int_or_none, +) + + +class AnySexIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?anysex\.com/(?P\d+)' + _TEST = { + 'url': 'http://anysex.com/156592/', + 'md5': '023e9fbb7f7987f5529a394c34ad3d3d', + 'info_dict': { + 'id': '156592', + 'ext': 'mp4', + 'title': 'Busty and sexy blondie in her bikini strips for you', + 'description': 'md5:de9e418178e2931c10b62966474e1383', + 'categories': ['Erotic'], + 'duration': 270, + 'age_limit': 18, + } + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + + webpage = self._download_webpage(url, video_id) + + video_url = self._html_search_regex(r"video_url\s*:\s*'([^']+)'", webpage, 'video URL') + + title = self._html_search_regex(r'(.*?)', webpage, 'title') + description = self._html_search_regex( + r'
    ]*>([^<]+)
    ', webpage, 'description', fatal=False) + thumbnail = self._html_search_regex( + r'preview_url\s*:\s*\'(.*?)\'', webpage, 'thumbnail', fatal=False) + + categories = re.findall( + r'
    ([^<]+)', webpage) + + duration = parse_duration(self._search_regex( + r'Duration: (?:)?(\d+:\d+)', webpage, 'duration', fatal=False)) + view_count = int_or_none(self._html_search_regex( + r'Views: (\d+)', webpage, 'view count', fatal=False)) + + return { + 'id': video_id, + 'url': video_url, + 'ext': 'mp4', + 'title': title, + 'description': description, + 'thumbnail': thumbnail, + 'categories': categories, + 'duration': duration, + 'view_count': view_count, + 'age_limit': 18, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/aol.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/aol.py new file mode 100644 index 0000000000..b51eafc459 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/aol.py @@ -0,0 +1,70 @@ +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor + + +class AolIE(InfoExtractor): + IE_NAME = 'on.aol.com' + _VALID_URL = r'''(?x) + (?: + aol-video:| + http://on\.aol\.com/ + (?: + video/.*-| + playlist/(?P[^/?#]+?)-(?P[0-9]+)[?#].*_videoid= + ) + ) + (?P[0-9]+) + (?:$|\?) + ''' + + _TESTS = [{ + 'url': 'http://on.aol.com/video/u-s--official-warns-of-largest-ever-irs-phone-scam-518167793?icid=OnHomepageC2Wide_MustSee_Img', + 'md5': '18ef68f48740e86ae94b98da815eec42', + 'info_dict': { + 'id': '518167793', + 'ext': 'mp4', + 'title': 'U.S. Official Warns Of \'Largest Ever\' IRS Phone Scam', + }, + 'add_ie': ['FiveMin'], + }, { + 'url': 'http://on.aol.com/playlist/brace-yourself---todays-weirdest-news-152147?icid=OnHomepageC4_Omg_Img#_videoid=518184316', + 'info_dict': { + 'id': '152147', + 'title': 'Brace Yourself - Today\'s Weirdest News', + }, + 'playlist_mincount': 10, + }] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + playlist_id = mobj.group('playlist_id') + if not playlist_id or self._downloader.params.get('noplaylist'): + return self.url_result('5min:%s' % video_id) + + self.to_screen('Downloading playlist %s - add --no-playlist to just download video %s' % (playlist_id, video_id)) + + webpage = self._download_webpage(url, playlist_id) + title = self._html_search_regex( + r'

    (.+?)

    ', webpage, 'title') + playlist_html = self._search_regex( + r"(?s)(.*?)", webpage, + 'playlist HTML') + entries = [{ + '_type': 'url', + 'url': 'aol-video:%s' % m.group('id'), + 'ie_key': 'Aol', + } for m in re.finditer( + r"[0-9]+)'\s+class='video-thumb'>", + playlist_html)] + + return { + '_type': 'playlist', + 'id': playlist_id, + 'display_id': mobj.group('playlist_display_id'), + 'title': title, + 'entries': entries, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/aparat.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/aparat.py new file mode 100644 index 0000000000..15006336fa --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/aparat.py @@ -0,0 +1,57 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import ( + ExtractorError, + HEADRequest, +) + + +class AparatIE(InfoExtractor): + _VALID_URL = r'^https?://(?:www\.)?aparat\.com/(?:v/|video/video/embed/videohash/)(?P[a-zA-Z0-9]+)' + + _TEST = { + 'url': 'http://www.aparat.com/v/wP8On', + 'md5': '6714e0af7e0d875c5a39c4dc4ab46ad1', + 'info_dict': { + 'id': 'wP8On', + 'ext': 'mp4', + 'title': 'ь╙ш▄ы┘ з╞ы└з╘ьЁш▄ 11 - ь╡ы┬ы┘ш▄ь╙', + }, + # 'skip': 'Extremely unreliable', + } + + def _real_extract(self, url): + video_id = self._match_id(url) + + # Note: There is an easier-to-parse configuration at + # http://www.aparat.com/video/video/config/videohash/%video_id + # but the URL in there does not work + embed_url = ('http://www.aparat.com/video/video/embed/videohash/' + + video_id + '/vt/frame') + webpage = self._download_webpage(embed_url, video_id) + + video_urls = re.findall(r'fileList\[[0-9]+\]\s*=\s*"([^"]+)"', webpage) + for i, video_url in enumerate(video_urls): + req = HEADRequest(video_url) + res = self._request_webpage( + req, video_id, note='Testing video URL %d' % i, errnote=False) + if res: + break + else: + raise ExtractorError('No working video URLs found') + + title = self._search_regex(r'\s+title:\s*"([^"]+)"', webpage, 'title') + thumbnail = self._search_regex( + r'\s+image:\s*"([^"]+)"', webpage, 'thumbnail', fatal=False) + + return { + 'id': video_id, + 'title': title, + 'url': video_url, + 'ext': 'mp4', + 'thumbnail': thumbnail, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/appletrailers.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/appletrailers.py new file mode 100644 index 0000000000..7cd0482c75 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/appletrailers.py @@ -0,0 +1,139 @@ +from __future__ import unicode_literals + +import re +import json + +from .common import InfoExtractor +from ..compat import compat_urlparse +from ..utils import ( + int_or_none, +) + + +class AppleTrailersIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?trailers\.apple\.com/trailers/(?P[^/]+)/(?P[^/]+)' + _TEST = { + "url": "http://trailers.apple.com/trailers/wb/manofsteel/", + "playlist": [ + { + "md5": "d97a8e575432dbcb81b7c3acb741f8a8", + "info_dict": { + "id": "manofsteel-trailer4", + "ext": "mov", + "duration": 111, + "title": "Trailer 4", + "upload_date": "20130523", + "uploader_id": "wb", + }, + }, + { + "md5": "b8017b7131b721fb4e8d6f49e1df908c", + "info_dict": { + "id": "manofsteel-trailer3", + "ext": "mov", + "duration": 182, + "title": "Trailer 3", + "upload_date": "20130417", + "uploader_id": "wb", + }, + }, + { + "md5": "d0f1e1150989b9924679b441f3404d48", + "info_dict": { + "id": "manofsteel-trailer", + "ext": "mov", + "duration": 148, + "title": "Trailer", + "upload_date": "20121212", + "uploader_id": "wb", + }, + }, + { + "md5": "5fe08795b943eb2e757fa95cb6def1cb", + "info_dict": { + "id": "manofsteel-teaser", + "ext": "mov", + "duration": 93, + "title": "Teaser", + "upload_date": "20120721", + "uploader_id": "wb", + }, + }, + ] + } + + _JSON_RE = r'iTunes.playURL\((.*?)\);' + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + movie = mobj.group('movie') + uploader_id = mobj.group('company') + + playlist_url = compat_urlparse.urljoin(url, 'includes/playlists/itunes.inc') + + def fix_html(s): + s = re.sub(r'(?s).*?', '', s) + s = re.sub(r'', r'', s) + # The ' in the onClick attributes are not escaped, it couldn't be parsed + # like: http://trailers.apple.com/trailers/wb/gravity/ + + def _clean_json(m): + return 'iTunes.playURL(%s);' % m.group(1).replace('\'', ''') + s = re.sub(self._JSON_RE, _clean_json, s) + s = '%s' % s + return s + doc = self._download_xml(playlist_url, movie, transform_source=fix_html) + + playlist = [] + for li in doc.findall('./div/ul/li'): + on_click = li.find('.//a').attrib['onClick'] + trailer_info_json = self._search_regex(self._JSON_RE, + on_click, 'trailer info') + trailer_info = json.loads(trailer_info_json) + title = trailer_info['title'] + video_id = movie + '-' + re.sub(r'[^a-zA-Z0-9]', '', title).lower() + thumbnail = li.find('.//img').attrib['src'] + upload_date = trailer_info['posted'].replace('-', '') + + runtime = trailer_info['runtime'] + m = re.search(r'(?P[0-9]+):(?P[0-9]{1,2})', runtime) + duration = None + if m: + duration = 60 * int(m.group('minutes')) + int(m.group('seconds')) + + first_url = trailer_info['url'] + trailer_id = first_url.split('/')[-1].rpartition('_')[0].lower() + settings_json_url = compat_urlparse.urljoin(url, 'includes/settings/%s.json' % trailer_id) + settings = self._download_json(settings_json_url, trailer_id, 'Downloading settings json') + + formats = [] + for format in settings['metadata']['sizes']: + # The src is a file pointing to the real video file + format_url = re.sub(r'_(\d*p.mov)', r'_h\1', format['src']) + formats.append({ + 'url': format_url, + 'format': format['type'], + 'width': int_or_none(format['width']), + 'height': int_or_none(format['height']), + }) + + self._sort_formats(formats) + + playlist.append({ + '_type': 'video', + 'id': video_id, + 'title': title, + 'formats': formats, + 'title': title, + 'duration': duration, + 'thumbnail': thumbnail, + 'upload_date': upload_date, + 'uploader_id': uploader_id, + 'user_agent': 'QuickTime compatible (youtube-dl)', + }) + + return { + '_type': 'playlist', + 'id': movie, + 'entries': playlist, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/archiveorg.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/archiveorg.py new file mode 100644 index 0000000000..34ce8429b1 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/archiveorg.py @@ -0,0 +1,61 @@ +from __future__ import unicode_literals + +import json +import re + +from .common import InfoExtractor +from ..utils import ( + unified_strdate, +) + + +class ArchiveOrgIE(InfoExtractor): + IE_NAME = 'archive.org' + IE_DESC = 'archive.org videos' + _VALID_URL = r'(?:https?://)?(?:www\.)?archive\.org/details/(?P[^?/]+)(?:[?].*)?$' + _TEST = { + "url": "http://archive.org/details/XD300-23_68HighlightsAResearchCntAugHumanIntellect", + 'file': 'XD300-23_68HighlightsAResearchCntAugHumanIntellect.ogv', + 'md5': '8af1d4cf447933ed3c7f4871162602db', + 'info_dict': { + "title": "1968 Demo - FJCC Conference Presentation Reel #1", + "description": "Reel 1 of 3: Also known as the \"Mother of All Demos\", Doug Engelbart's presentation at the Fall Joint Computer Conference in San Francisco, December 9, 1968 titled \"A Research Center for Augmenting Human Intellect.\" For this presentation, Doug and his team astonished the audience by not only relating their research, but demonstrating it live. This was the debut of the mouse, interactive computing, hypermedia, computer supported software engineering, video teleconferencing, etc. See also Doug's 1968 Demo page for more background, highlights, links, and the detailed paper published in this conference proceedings. Filmed on 3 reels: Reel 1 | Reel 2 | Reel 3", + "upload_date": "19681210", + "uploader": "SRI International" + } + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + + json_url = url + ('?' if '?' in url else '&') + 'output=json' + json_data = self._download_webpage(json_url, video_id) + data = json.loads(json_data) + + title = data['metadata']['title'][0] + description = data['metadata']['description'][0] + uploader = data['metadata']['creator'][0] + upload_date = unified_strdate(data['metadata']['date'][0]) + + formats = [ + { + 'format': fdata['format'], + 'url': 'http://' + data['server'] + data['dir'] + fn, + 'file_size': int(fdata['size']), + } + for fn, fdata in data['files'].items() + if 'Video' in fdata['format']] + + self._sort_formats(formats) + + return { + '_type': 'video', + 'id': video_id, + 'title': title, + 'formats': formats, + 'description': description, + 'uploader': uploader, + 'upload_date': upload_date, + 'thumbnail': data.get('misc', {}).get('image'), + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/ard.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/ard.py new file mode 100644 index 0000000000..967bd865c5 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/ard.py @@ -0,0 +1,194 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from .generic import GenericIE +from ..utils import ( + determine_ext, + ExtractorError, + qualities, + int_or_none, + parse_duration, + unified_strdate, + xpath_text, + parse_xml, +) + + +class ARDMediathekIE(InfoExtractor): + IE_NAME = 'ARD:mediathek' + _VALID_URL = r'^https?://(?:(?:www\.)?ardmediathek\.de|mediathek\.daserste\.de)/(?:.*/)(?P[0-9]+|[^0-9][^/\?]+)[^/\?]*(?:\?.*)?' + + _TESTS = [{ + 'url': 'http://mediathek.daserste.de/sendungen_a-z/328454_anne-will/22429276_vertrauen-ist-gut-spionieren-ist-besser-geht', + 'file': '22429276.mp4', + 'md5': '469751912f1de0816a9fc9df8336476c', + 'info_dict': { + 'title': 'Vertrauen ist gut, Spionieren ist besser - Geht so deutsch-amerikanische Freundschaft?', + 'description': 'Das Erste Mediathek [ARD]: Vertrauen ist gut, Spionieren ist besser - Geht so deutsch-amerikanische Freundschaft?, Anne Will, ц°ber die Spionage-Affц╓re diskutieren Clemens Binninger, Katrin Gц╤ring-Eckardt, Georg Mascolo, Andrew B. Denison und Constanze Kurz.. Das Video zur Sendung Anne Will am Mittwoch, 16.07.2014', + }, + 'skip': 'Blocked outside of Germany', + }, { + 'url': 'http://www.ardmediathek.de/tv/Tatort/Das-Wunder-von-Wolbeck-Video-tgl-ab-20/Das-Erste/Video?documentId=22490580&bcastId=602916', + 'info_dict': { + 'id': '22490580', + 'ext': 'mp4', + 'title': 'Das Wunder von Wolbeck (Video tgl. ab 20 Uhr)', + 'description': 'Auf einem restaurierten Hof bei Wolbeck wird der Heilpraktiker Raffael Lembeck eines morgens von seiner Frau Stella tot aufgefunden. Das Opfer war offensichtlich in seiner Praxis zu Fall gekommen und ist dann verblutet, erklц╓rt Prof. Boerne am Tatort.', + }, + 'skip': 'Blocked outside of Germany', + }] + + def _real_extract(self, url): + # determine video id from url + m = re.match(self._VALID_URL, url) + + numid = re.search(r'documentId=([0-9]+)', url) + if numid: + video_id = numid.group(1) + else: + video_id = m.group('video_id') + + webpage = self._download_webpage(url, video_id) + + if '>Der gewц╪nschte Beitrag ist nicht mehr verfц╪gbar.<' in webpage: + raise ExtractorError('Video %s is no longer available' % video_id, expected=True) + + if re.search(r'[\?&]rss($|[=&])', url): + doc = parse_xml(webpage) + if doc.tag == 'rss': + return GenericIE()._extract_rss(url, video_id, doc) + + title = self._html_search_regex( + [r'(.*?)', + r'', + r'

    (.*?)

    '], + webpage, 'title') + description = self._html_search_meta( + 'dcterms.abstract', webpage, 'description', default=None) + if description is None: + description = self._html_search_meta( + 'description', webpage, 'meta description') + + # Thumbnail is sometimes not present. + # It is in the mobile version, but that seems to use a different URL + # structure altogether. + thumbnail = self._og_search_thumbnail(webpage, default=None) + + media_streams = re.findall(r'''(?x) + mediaCollection\.addMediaStream\([0-9]+,\s*[0-9]+,\s*"[^"]*",\s* + "([^"]+)"''', webpage) + + if media_streams: + QUALITIES = qualities(['lo', 'hi', 'hq']) + formats = [] + for furl in set(media_streams): + if furl.endswith('.f4m'): + fid = 'f4m' + else: + fid_m = re.match(r'.*\.([^.]+)\.[^.]+$', furl) + fid = fid_m.group(1) if fid_m else None + formats.append({ + 'quality': QUALITIES(fid), + 'format_id': fid, + 'url': furl, + }) + else: # request JSON file + media_info = self._download_json( + 'http://www.ardmediathek.de/play/media/%s' % video_id, video_id) + # The second element of the _mediaArray contains the standard http urls + streams = media_info['_mediaArray'][1]['_mediaStreamArray'] + if not streams: + if '"fsk"' in webpage: + raise ExtractorError('This video is only available after 20:00') + + formats = [] + for s in streams: + if type(s['_stream']) == list: + for index, url in enumerate(s['_stream'][::-1]): + quality = s['_quality'] + index + formats.append({ + 'quality': quality, + 'url': url, + 'format_id': '%s-%s' % (determine_ext(url), quality) + }) + continue + + format = { + 'quality': s['_quality'], + 'url': s['_stream'], + } + + format['format_id'] = '%s-%s' % ( + determine_ext(format['url']), format['quality']) + + formats.append(format) + + self._sort_formats(formats) + + return { + 'id': video_id, + 'title': title, + 'description': description, + 'formats': formats, + 'thumbnail': thumbnail, + } + + +class ARDIE(InfoExtractor): + _VALID_URL = '(?Phttps?://(www\.)?daserste\.de/[^?#]+/videos/(?P[^/?#]+)-(?P[0-9]+))\.html' + _TEST = { + 'url': 'http://www.daserste.de/information/reportage-dokumentation/dokus/videos/die-story-im-ersten-mission-unter-falscher-flagge-100.html', + 'md5': 'd216c3a86493f9322545e045ddc3eb35', + 'info_dict': { + 'display_id': 'die-story-im-ersten-mission-unter-falscher-flagge', + 'id': '100', + 'ext': 'mp4', + 'duration': 2600, + 'title': 'Die Story im Ersten: Mission unter falscher Flagge', + 'upload_date': '20140804', + 'thumbnail': 're:^https?://.*\.jpg$', + } + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + display_id = mobj.group('display_id') + + player_url = mobj.group('mainurl') + '~playerXml.xml' + doc = self._download_xml(player_url, display_id) + video_node = doc.find('./video') + upload_date = unified_strdate(xpath_text( + video_node, './broadcastDate')) + thumbnail = xpath_text(video_node, './/teaserImage//variant/url') + + formats = [] + for a in video_node.findall('.//asset'): + f = { + 'format_id': a.attrib['type'], + 'width': int_or_none(a.find('./frameWidth').text), + 'height': int_or_none(a.find('./frameHeight').text), + 'vbr': int_or_none(a.find('./bitrateVideo').text), + 'abr': int_or_none(a.find('./bitrateAudio').text), + 'vcodec': a.find('./codecVideo').text, + 'tbr': int_or_none(a.find('./totalBitrate').text), + } + if a.find('./serverPrefix').text: + f['url'] = a.find('./serverPrefix').text + f['playpath'] = a.find('./fileName').text + else: + f['url'] = a.find('./fileName').text + formats.append(f) + self._sort_formats(formats) + + return { + 'id': mobj.group('id'), + 'formats': formats, + 'display_id': display_id, + 'title': video_node.find('./title').text, + 'duration': parse_duration(video_node.find('./duration').text), + 'upload_date': upload_date, + 'thumbnail': thumbnail, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/arte.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/arte.py new file mode 100644 index 0000000000..219631b9b0 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/arte.py @@ -0,0 +1,252 @@ +# encoding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import ( + find_xpath_attr, + unified_strdate, + get_element_by_id, + get_element_by_attribute, + int_or_none, + qualities, +) + +# There are different sources of video in arte.tv, the extraction process +# is different for each one. The videos usually expire in 7 days, so we can't +# add tests. + + +class ArteTvIE(InfoExtractor): + _VALID_URL = r'http://videos\.arte\.tv/(?Pfr|de)/.*-(?P.*?)\.html' + IE_NAME = 'arte.tv' + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + lang = mobj.group('lang') + video_id = mobj.group('id') + + ref_xml_url = url.replace('/videos/', '/do_delegate/videos/') + ref_xml_url = ref_xml_url.replace('.html', ',view,asPlayerXml.xml') + ref_xml_doc = self._download_xml( + ref_xml_url, video_id, note='Downloading metadata') + config_node = find_xpath_attr(ref_xml_doc, './/video', 'lang', lang) + config_xml_url = config_node.attrib['ref'] + config = self._download_xml( + config_xml_url, video_id, note='Downloading configuration') + + formats = [{ + 'forma_id': q.attrib['quality'], + # The playpath starts at 'mp4:', if we don't manually + # split the url, rtmpdump will incorrectly parse them + 'url': q.text.split('mp4:', 1)[0], + 'play_path': 'mp4:' + q.text.split('mp4:', 1)[1], + 'ext': 'flv', + 'quality': 2 if q.attrib['quality'] == 'hd' else 1, + } for q in config.findall('./urls/url')] + self._sort_formats(formats) + + title = config.find('.//name').text + thumbnail = config.find('.//firstThumbnailUrl').text + return { + 'id': video_id, + 'title': title, + 'thumbnail': thumbnail, + 'formats': formats, + } + + +class ArteTVPlus7IE(InfoExtractor): + IE_NAME = 'arte.tv:+7' + _VALID_URL = r'https?://(?:www\.)?arte\.tv/guide/(?Pfr|de)/(?:(?:sendungen|emissions)/)?(?P.*?)/(?P.*?)(\?.*)?' + + @classmethod + def _extract_url_info(cls, url): + mobj = re.match(cls._VALID_URL, url) + lang = mobj.group('lang') + # This is not a real id, it can be for example AJT for the news + # http://www.arte.tv/guide/fr/emissions/AJT/arte-journal + video_id = mobj.group('id') + return video_id, lang + + def _real_extract(self, url): + video_id, lang = self._extract_url_info(url) + webpage = self._download_webpage(url, video_id) + return self._extract_from_webpage(webpage, video_id, lang) + + def _extract_from_webpage(self, webpage, video_id, lang): + json_url = self._html_search_regex( + [r'arte_vp_url=["\'](.*?)["\']', r'data-url=["\']([^"]+)["\']'], + webpage, 'json vp url') + return self._extract_from_json_url(json_url, video_id, lang) + + def _extract_from_json_url(self, json_url, video_id, lang): + info = self._download_json(json_url, video_id) + player_info = info['videoJsonPlayer'] + + upload_date_str = player_info.get('shootingDate') + if not upload_date_str: + upload_date_str = player_info.get('VDA', '').split(' ')[0] + + title = player_info['VTI'].strip() + subtitle = player_info.get('VSU', '').strip() + if subtitle: + title += ' - %s' % subtitle + + info_dict = { + 'id': player_info['VID'], + 'title': title, + 'description': player_info.get('VDE'), + 'upload_date': unified_strdate(upload_date_str), + 'thumbnail': player_info.get('programImage') or player_info.get('VTU', {}).get('IUR'), + } + qfunc = qualities(['HQ', 'MQ', 'EQ', 'SQ']) + + formats = [] + for format_id, format_dict in player_info['VSR'].items(): + f = dict(format_dict) + versionCode = f.get('versionCode') + + langcode = { + 'fr': 'F', + 'de': 'A', + }.get(lang, lang) + lang_rexs = [r'VO?%s' % langcode, r'VO?.-ST%s' % langcode] + lang_pref = ( + None if versionCode is None else ( + 10 if any(re.match(r, versionCode) for r in lang_rexs) + else -10)) + source_pref = 0 + if versionCode is not None: + # The original version with subtitles has lower relevance + if re.match(r'VO-ST(F|A)', versionCode): + source_pref -= 10 + # The version with sourds/mal subtitles has also lower relevance + elif re.match(r'VO?(F|A)-STM\1', versionCode): + source_pref -= 9 + format = { + 'format_id': format_id, + 'preference': -10 if f.get('videoFormat') == 'M3U8' else None, + 'language_preference': lang_pref, + 'format_note': '%s, %s' % (f.get('versionCode'), f.get('versionLibelle')), + 'width': int_or_none(f.get('width')), + 'height': int_or_none(f.get('height')), + 'tbr': int_or_none(f.get('bitrate')), + 'quality': qfunc(f['quality']), + 'source_preference': source_pref, + } + + if f.get('mediaType') == 'rtmp': + format['url'] = f['streamer'] + format['play_path'] = 'mp4:' + f['url'] + format['ext'] = 'flv' + else: + format['url'] = f['url'] + + formats.append(format) + + self._sort_formats(formats) + + info_dict['formats'] = formats + return info_dict + + +# It also uses the arte_vp_url url from the webpage to extract the information +class ArteTVCreativeIE(ArteTVPlus7IE): + IE_NAME = 'arte.tv:creative' + _VALID_URL = r'https?://creative\.arte\.tv/(?Pfr|de)/(?:magazine?/)?(?P[^?#]+)' + + _TESTS = [{ + 'url': 'http://creative.arte.tv/de/magazin/agentur-amateur-corporate-design', + 'info_dict': { + 'id': '72176', + 'ext': 'mp4', + 'title': 'Folge 2 - Corporate Design', + 'upload_date': '20131004', + }, + }, { + 'url': 'http://creative.arte.tv/fr/Monty-Python-Reunion', + 'info_dict': { + 'id': '160676', + 'ext': 'mp4', + 'title': 'Monty Python live (mostly)', + 'description': 'ц┴vц╘nement ! Quarante-cinqб═ans aprц╗s leurs premiers succц╗s, les lц╘gendaires Monty Python remontent sur scц╗ne.\n', + 'upload_date': '20140805', + } + }] + + +class ArteTVFutureIE(ArteTVPlus7IE): + IE_NAME = 'arte.tv:future' + _VALID_URL = r'https?://future\.arte\.tv/(?Pfr|de)/(thema|sujet)/.*?#article-anchor-(?P\d+)' + + _TEST = { + 'url': 'http://future.arte.tv/fr/sujet/info-sciences#article-anchor-7081', + 'info_dict': { + 'id': '5201', + 'ext': 'mp4', + 'title': 'Les champignons au secours de la planц╗te', + 'upload_date': '20131101', + }, + } + + def _real_extract(self, url): + anchor_id, lang = self._extract_url_info(url) + webpage = self._download_webpage(url, anchor_id) + row = get_element_by_id(anchor_id, webpage) + return self._extract_from_webpage(row, anchor_id, lang) + + +class ArteTVDDCIE(ArteTVPlus7IE): + IE_NAME = 'arte.tv:ddc' + _VALID_URL = r'https?://ddc\.arte\.tv/(?Pemission|folge)/(?P.+)' + + def _real_extract(self, url): + video_id, lang = self._extract_url_info(url) + if lang == 'folge': + lang = 'de' + elif lang == 'emission': + lang = 'fr' + webpage = self._download_webpage(url, video_id) + scriptElement = get_element_by_attribute('class', 'visu_video_block', webpage) + script_url = self._html_search_regex(r'src="(.*?)"', scriptElement, 'script url') + javascriptPlayerGenerator = self._download_webpage(script_url, video_id, 'Download javascript player generator') + json_url = self._search_regex(r"json_url=(.*)&rendering_place.*", javascriptPlayerGenerator, 'json url') + return self._extract_from_json_url(json_url, video_id, lang) + + +class ArteTVConcertIE(ArteTVPlus7IE): + IE_NAME = 'arte.tv:concert' + _VALID_URL = r'https?://concert\.arte\.tv/(?Pde|fr)/(?P.+)' + + _TEST = { + 'url': 'http://concert.arte.tv/de/notwist-im-pariser-konzertclub-divan-du-monde', + 'md5': '9ea035b7bd69696b67aa2ccaaa218161', + 'info_dict': { + 'id': '186', + 'ext': 'mp4', + 'title': 'The Notwist im Pariser Konzertclub "Divan du Monde"', + 'upload_date': '20140128', + 'description': 'md5:486eb08f991552ade77439fe6d82c305', + }, + } + + +class ArteTVEmbedIE(ArteTVPlus7IE): + IE_NAME = 'arte.tv:embed' + _VALID_URL = r'''(?x) + http://www\.arte\.tv + /playerv2/embed\.php\?json_url= + (?P + http://arte\.tv/papi/tvguide/videos/stream/player/ + (?P[^/]+)/(?P[^/]+)[^&]* + ) + ''' + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + lang = mobj.group('lang') + json_url = mobj.group('json_url') + return self._extract_from_json_url(json_url, video_id, lang) diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/audiomack.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/audiomack.py new file mode 100644 index 0000000000..622b209899 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/audiomack.py @@ -0,0 +1,69 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor +from .soundcloud import SoundcloudIE +from ..utils import ExtractorError + +import time + + +class AudiomackIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?audiomack\.com/song/(?P[\w/-]+)' + IE_NAME = 'audiomack' + _TESTS = [ + # hosted on audiomack + { + 'url': 'http://www.audiomack.com/song/roosh-williams/extraordinary', + 'info_dict': + { + 'id': 'roosh-williams/extraordinary', + 'ext': 'mp3', + 'title': 'Roosh Williams - Extraordinary' + } + }, + # hosted on soundcloud via audiomack + { + 'add_ie': ['Soundcloud'], + 'url': 'http://www.audiomack.com/song/xclusiveszone/take-kare', + 'info_dict': { + 'id': '172419696', + 'ext': 'mp3', + 'description': 'md5:1fc3272ed7a635cce5be1568c2822997', + 'title': 'Young Thug ft Lil Wayne - Take Kare', + 'uploader': 'Young Thug World', + 'upload_date': '20141016', + } + }, + ] + + def _real_extract(self, url): + video_id = self._match_id(url) + + api_response = self._download_json( + "http://www.audiomack.com/api/music/url/song/%s?_=%d" % ( + video_id, time.time()), + video_id) + + if "url" not in api_response: + raise ExtractorError("Unable to deduce api url of song") + realurl = api_response["url"] + + # Audiomack wraps a lot of soundcloud tracks in their branded wrapper + # - if so, pass the work off to the soundcloud extractor + if SoundcloudIE.suitable(realurl): + return {'_type': 'url', 'url': realurl, 'ie_key': 'Soundcloud'} + + webpage = self._download_webpage(url, video_id) + artist = self._html_search_regex( + r'(.*?)', webpage, "artist") + songtitle = self._html_search_regex( + r'

    .*?(.*?)

    ', + webpage, "title") + title = artist + " - " + songtitle + + return { + 'id': video_id, + 'title': title, + 'url': realurl, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/auengine.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/auengine.py new file mode 100644 index 0000000000..014a219522 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/auengine.py @@ -0,0 +1,54 @@ +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..compat import compat_urllib_parse +from ..utils import ( + determine_ext, + ExtractorError, +) + + +class AUEngineIE(InfoExtractor): + _VALID_URL = r'http://(?:www\.)?auengine\.com/embed\.php\?.*?file=(?P[^&]+).*?' + + _TEST = { + 'url': 'http://auengine.com/embed.php?file=lfvlytY6&w=650&h=370', + 'md5': '48972bdbcf1a3a2f5533e62425b41d4f', + 'info_dict': { + 'id': 'lfvlytY6', + 'ext': 'mp4', + 'title': '[Commie]The Legend of the Legendary Heroes - 03 - Replication Eye (Alpha Stigma)[F9410F5A]' + } + } + + def _real_extract(self, url): + video_id = self._match_id(url) + + webpage = self._download_webpage(url, video_id) + title = self._html_search_regex(r'(?P<title>.+?)', webpage, 'title') + title = title.strip() + links = re.findall(r'\s(?:file|url):\s*["\']([^\'"]+)["\']', webpage) + links = map(compat_urllib_parse.unquote, links) + + thumbnail = None + video_url = None + for link in links: + if link.endswith('.png'): + thumbnail = link + elif '/videos/' in link: + video_url = link + if not video_url: + raise ExtractorError('Could not find video URL') + ext = '.' + determine_ext(video_url) + if ext == title[-len(ext):]: + title = title[:-len(ext)] + + return { + 'id': video_id, + 'url': video_url, + 'title': title, + 'thumbnail': thumbnail, + 'http_referer': 'http://www.auengine.com/flowplayer/flowplayer.commercial-3.2.14.swf', + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/azubu.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/azubu.py new file mode 100644 index 0000000000..0961d339fd --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/azubu.py @@ -0,0 +1,93 @@ +from __future__ import unicode_literals + +import json + +from .common import InfoExtractor +from ..utils import float_or_none + + +class AzubuIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?azubu\.tv/[^/]+#!/play/(?P\d+)' + _TESTS = [ + { + 'url': 'http://www.azubu.tv/GSL#!/play/15575/2014-hot6-cup-last-big-match-ro8-day-1', + 'md5': 'a88b42fcf844f29ad6035054bd9ecaf4', + 'info_dict': { + 'id': '15575', + 'ext': 'mp4', + 'title': '2014 HOT6 CUP LAST BIG MATCH Ro8 Day 1', + 'description': 'md5:d06bdea27b8cc4388a90ad35b5c66c01', + 'thumbnail': 're:^https?://.*\.jpe?g', + 'timestamp': 1417523507.334, + 'upload_date': '20141202', + 'duration': 9988.7, + 'uploader': 'GSL', + 'uploader_id': 414310, + 'view_count': int, + }, + }, + { + 'url': 'http://www.azubu.tv/FnaticTV#!/play/9344/-fnatic-at-worlds-2014:-toyz---%22i-love-rekkles,-he-has-amazing-mechanics%22-', + 'md5': 'b72a871fe1d9f70bd7673769cdb3b925', + 'info_dict': { + 'id': '9344', + 'ext': 'mp4', + 'title': 'Fnatic at Worlds 2014: Toyz - "I love Rekkles, he has amazing mechanics"', + 'description': 'md5:4a649737b5f6c8b5c5be543e88dc62af', + 'thumbnail': 're:^https?://.*\.jpe?g', + 'timestamp': 1410530893.320, + 'upload_date': '20140912', + 'duration': 172.385, + 'uploader': 'FnaticTV', + 'uploader_id': 272749, + 'view_count': int, + }, + }, + ] + + def _real_extract(self, url): + video_id = self._match_id(url) + + data = self._download_json( + 'http://www.azubu.tv/api/video/%s' % video_id, video_id)['data'] + + title = data['title'].strip() + description = data['description'] + thumbnail = data['thumbnail'] + view_count = data['view_count'] + uploader = data['user']['username'] + uploader_id = data['user']['id'] + + stream_params = json.loads(data['stream_params']) + + timestamp = float_or_none(stream_params['creationDate'], 1000) + duration = float_or_none(stream_params['length'], 1000) + + renditions = stream_params.get('renditions') or [] + video = stream_params.get('FLVFullLength') or stream_params.get('videoFullLength') + if video: + renditions.append(video) + + formats = [{ + 'url': fmt['url'], + 'width': fmt['frameWidth'], + 'height': fmt['frameHeight'], + 'vbr': float_or_none(fmt['encodingRate'], 1000), + 'filesize': fmt['size'], + 'vcodec': fmt['videoCodec'], + 'container': fmt['videoContainer'], + } for fmt in renditions if fmt['url']] + self._sort_formats(formats) + + return { + 'id': video_id, + 'title': title, + 'description': description, + 'thumbnail': thumbnail, + 'timestamp': timestamp, + 'duration': duration, + 'uploader': uploader, + 'uploader_id': uploader_id, + 'view_count': view_count, + 'formats': formats, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/bambuser.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/bambuser.py new file mode 100644 index 0000000000..98e1443ab0 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/bambuser.py @@ -0,0 +1,96 @@ +from __future__ import unicode_literals + +import re +import json +import itertools + +from .common import InfoExtractor +from ..compat import ( + compat_urllib_request, +) + + +class BambuserIE(InfoExtractor): + IE_NAME = 'bambuser' + _VALID_URL = r'https?://bambuser\.com/v/(?P\d+)' + _API_KEY = '005f64509e19a868399060af746a00aa' + + _TEST = { + 'url': 'http://bambuser.com/v/4050584', + # MD5 seems to be flaky, see https://travis-ci.org/rg3/youtube-dl/jobs/14051016#L388 + # 'md5': 'fba8f7693e48fd4e8641b3fd5539a641', + 'info_dict': { + 'id': '4050584', + 'ext': 'flv', + 'title': 'Education engineering days - lightning talks', + 'duration': 3741, + 'uploader': 'pixelversity', + 'uploader_id': '344706', + }, + 'params': { + # It doesn't respect the 'Range' header, it would download the whole video + # caused the travis builds to fail: https://travis-ci.org/rg3/youtube-dl/jobs/14493845#L59 + 'skip_download': True, + }, + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + info_url = ('http://player-c.api.bambuser.com/getVideo.json?' + '&api_key=%s&vid=%s' % (self._API_KEY, video_id)) + info_json = self._download_webpage(info_url, video_id) + info = json.loads(info_json)['result'] + + return { + 'id': video_id, + 'title': info['title'], + 'url': info['url'], + 'thumbnail': info.get('preview'), + 'duration': int(info['length']), + 'view_count': int(info['views_total']), + 'uploader': info['username'], + 'uploader_id': info['uid'], + } + + +class BambuserChannelIE(InfoExtractor): + IE_NAME = 'bambuser:channel' + _VALID_URL = r'https?://bambuser\.com/channel/(?P.*?)(?:/|#|\?|$)' + # The maximum number we can get with each request + _STEP = 50 + _TEST = { + 'url': 'http://bambuser.com/channel/pixelversity', + 'info_dict': { + 'title': 'pixelversity', + }, + 'playlist_mincount': 60, + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + user = mobj.group('user') + urls = [] + last_id = '' + for i in itertools.count(1): + req_url = ( + 'http://bambuser.com/xhr-api/index.php?username={user}' + '&sort=created&access_mode=0%2C1%2C2&limit={count}' + '&method=broadcast&format=json&vid_older_than={last}' + ).format(user=user, count=self._STEP, last=last_id) + req = compat_urllib_request.Request(req_url) + # Without setting this header, we wouldn't get any result + req.add_header('Referer', 'http://bambuser.com/channel/%s' % user) + data = self._download_json( + req, user, 'Downloading page %d' % i) + results = data['result'] + if not results: + break + last_id = results[-1]['vid'] + urls.extend(self.url_result(v['page'], 'Bambuser') for v in results) + + return { + '_type': 'playlist', + 'title': user, + 'entries': urls, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/bandcamp.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/bandcamp.py new file mode 100644 index 0000000000..b45d68a614 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/bandcamp.py @@ -0,0 +1,171 @@ +from __future__ import unicode_literals + +import json +import re + +from .common import InfoExtractor +from ..compat import ( + compat_str, + compat_urlparse, +) +from ..utils import ( + ExtractorError, +) + + +class BandcampIE(InfoExtractor): + _VALID_URL = r'https?://.*?\.bandcamp\.com/track/(?P.*)' + _TESTS = [{ + 'url': 'http://youtube-dl.bandcamp.com/track/youtube-dl-test-song', + 'md5': 'c557841d5e50261777a6585648adf439', + 'info_dict': { + 'id': '1812978515', + 'ext': 'mp3', + 'title': "youtube-dl \"'/\\\u00e4\u21ad - youtube-dl test song \"'/\\\u00e4\u21ad", + 'duration': 9.8485, + }, + '_skip': 'There is a limit of 200 free downloads / month for the test song' + }, { + 'url': 'http://benprunty.bandcamp.com/track/lanius-battle', + 'md5': '2b68e5851514c20efdff2afc5603b8b4', + 'info_dict': { + 'id': '2650410135', + 'ext': 'mp3', + 'title': 'Lanius (Battle)', + 'uploader': 'Ben Prunty Music', + }, + }] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + title = mobj.group('title') + webpage = self._download_webpage(url, title) + m_download = re.search(r'freeDownloadPage: "(.*?)"', webpage) + if not m_download: + m_trackinfo = re.search(r'trackinfo: (.+),\s*?\n', webpage) + if m_trackinfo: + json_code = m_trackinfo.group(1) + data = json.loads(json_code)[0] + + formats = [] + for format_id, format_url in data['file'].items(): + ext, abr_str = format_id.split('-', 1) + formats.append({ + 'format_id': format_id, + 'url': format_url, + 'ext': ext, + 'vcodec': 'none', + 'acodec': ext, + 'abr': int(abr_str), + }) + + self._sort_formats(formats) + + return { + 'id': compat_str(data['id']), + 'title': data['title'], + 'formats': formats, + 'duration': float(data['duration']), + } + else: + raise ExtractorError('No free songs found') + + download_link = m_download.group(1) + video_id = self._search_regex( + r'var TralbumData = {.*?id: (?P<id>\d+),?$', + webpage, 'video id', flags=re.MULTILINE | re.DOTALL) + + download_webpage = self._download_webpage(download_link, video_id, 'Downloading free downloads page') + # We get the dictionary of the track from some javascript code + info = re.search(r'items: (.*?),$', download_webpage, re.MULTILINE).group(1) + info = json.loads(info)[0] + # We pick mp3-320 for now, until format selection can be easily implemented. + mp3_info = info['downloads']['mp3-320'] + # If we try to use this url it says the link has expired + initial_url = mp3_info['url'] + re_url = r'(?P<server>http://(.*?)\.bandcamp\.com)/download/track\?enc=mp3-320&fsig=(?P<fsig>.*?)&id=(?P<id>.*?)&ts=(?P<ts>.*)$' + m_url = re.match(re_url, initial_url) + # We build the url we will use to get the final track url + # This url is build in Bandcamp in the script download_bunde_*.js + request_url = '%s/statdownload/track?enc=mp3-320&fsig=%s&id=%s&ts=%s&.rand=665028774616&.vrs=1' % (m_url.group('server'), m_url.group('fsig'), video_id, m_url.group('ts')) + final_url_webpage = self._download_webpage(request_url, video_id, 'Requesting download url') + # If we could correctly generate the .rand field the url would be + # in the "download_url" key + final_url = re.search(r'"retry_url":"(.*?)"', final_url_webpage).group(1) + + return { + 'id': video_id, + 'title': info['title'], + 'ext': 'mp3', + 'vcodec': 'none', + 'url': final_url, + 'thumbnail': info.get('thumb_url'), + 'uploader': info.get('artist'), + } + + +class BandcampAlbumIE(InfoExtractor): + IE_NAME = 'Bandcamp:album' + _VALID_URL = r'https?://(?:(?P<subdomain>[^.]+)\.)?bandcamp\.com(?:/album/(?P<title>[^?#]+)|/?(?:$|[?#]))' + + _TESTS = [{ + 'url': 'http://blazo.bandcamp.com/album/jazz-format-mixtape-vol-1', + 'playlist': [ + { + 'md5': '39bc1eded3476e927c724321ddf116cf', + 'info_dict': { + 'id': '1353101989', + 'ext': 'mp3', + 'title': 'Intro', + } + }, + { + 'md5': '1a2c32e2691474643e912cc6cd4bffaa', + 'info_dict': { + 'id': '38097443', + 'ext': 'mp3', + 'title': 'Kero One - Keep It Alive (Blazo remix)', + } + }, + ], + 'info_dict': { + 'title': 'Jazz Format Mixtape vol.1', + }, + 'params': { + 'playlistend': 2 + }, + 'skip': 'Bandcamp imposes download limits. See test_playlists:test_bandcamp_album for the playlist test' + }, { + 'url': 'http://nightbringer.bandcamp.com/album/hierophany-of-the-open-grave', + 'info_dict': { + 'title': 'Hierophany of the Open Grave', + }, + 'playlist_mincount': 9, + }, { + 'url': 'http://dotscale.bandcamp.com', + 'info_dict': { + 'title': 'Loom', + }, + 'playlist_mincount': 7, + }] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + playlist_id = mobj.group('subdomain') + title = mobj.group('title') + display_id = title or playlist_id + webpage = self._download_webpage(url, display_id) + tracks_paths = re.findall(r'<a href="(.*?)" itemprop="url">', webpage) + if not tracks_paths: + raise ExtractorError('The page doesn\'t contain any tracks') + entries = [ + self.url_result(compat_urlparse.urljoin(url, t_path), ie=BandcampIE.ie_key()) + for t_path in tracks_paths] + title = self._search_regex(r'album_title : "(.*?)"', webpage, 'title') + return { + '_type': 'playlist', + 'id': playlist_id, + 'display_id': display_id, + 'title': title, + 'entries': entries, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/bbccouk.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/bbccouk.py new file mode 100644 index 0000000000..2d2f742aee --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/bbccouk.py @@ -0,0 +1,262 @@ +from __future__ import unicode_literals + +import xml.etree.ElementTree + +from .subtitles import SubtitlesInfoExtractor +from ..utils import ExtractorError +from ..compat import compat_HTTPError + + +class BBCCoUkIE(SubtitlesInfoExtractor): + IE_NAME = 'bbc.co.uk' + IE_DESC = 'BBC iPlayer' + _VALID_URL = r'https?://(?:www\.)?bbc\.co\.uk/(?:programmes|iplayer/episode)/(?P<id>[\da-z]{8})' + + _TESTS = [ + { + 'url': 'http://www.bbc.co.uk/programmes/b039g8p7', + 'info_dict': { + 'id': 'b039d07m', + 'ext': 'flv', + 'title': 'Kaleidoscope: Leonard Cohen', + 'description': 'md5:db4755d7a665ae72343779f7dacb402c', + 'duration': 1740, + }, + 'params': { + # rtmp download + 'skip_download': True, + } + }, + { + 'url': 'http://www.bbc.co.uk/iplayer/episode/b00yng5w/The_Man_in_Black_Series_3_The_Printed_Name/', + 'info_dict': { + 'id': 'b00yng1d', + 'ext': 'flv', + 'title': 'The Man in Black: Series 3: The Printed Name', + 'description': "Mark Gatiss introduces Nicholas Pierpan's chilling tale of a writer's devilish pact with a mysterious man. Stars Ewan Bailey.", + 'duration': 1800, + }, + 'params': { + # rtmp download + 'skip_download': True, + }, + 'skip': 'Episode is no longer available on BBC iPlayer Radio', + }, + { + 'url': 'http://www.bbc.co.uk/iplayer/episode/b03vhd1f/The_Voice_UK_Series_3_Blind_Auditions_5/', + 'info_dict': { + 'id': 'b00yng1d', + 'ext': 'flv', + 'title': 'The Voice UK: Series 3: Blind Auditions 5', + 'description': "Emma Willis and Marvin Humes present the fifth set of blind auditions in the singing competition, as the coaches continue to build their teams based on voice alone.", + 'duration': 5100, + }, + 'params': { + # rtmp download + 'skip_download': True, + }, + 'skip': 'Currently BBC iPlayer TV programmes are available to play in the UK only', + }, + { + 'url': 'http://www.bbc.co.uk/iplayer/episode/p026c7jt/tomorrows-worlds-the-unearthly-history-of-science-fiction-2-invasion', + 'info_dict': { + 'id': 'b03k3pb7', + 'ext': 'flv', + 'title': "Tomorrow's Worlds: The Unearthly History of Science Fiction", + 'description': '2. Invasion', + 'duration': 3600, + }, + 'params': { + # rtmp download + 'skip_download': True, + }, + 'skip': 'Currently BBC iPlayer TV programmes are available to play in the UK only', + }, + ] + + def _extract_asx_playlist(self, connection, programme_id): + asx = self._download_xml(connection.get('href'), programme_id, 'Downloading ASX playlist') + return [ref.get('href') for ref in asx.findall('./Entry/ref')] + + def _extract_connection(self, connection, programme_id): + formats = [] + protocol = connection.get('protocol') + supplier = connection.get('supplier') + if protocol == 'http': + href = connection.get('href') + # ASX playlist + if supplier == 'asx': + for i, ref in enumerate(self._extract_asx_playlist(connection, programme_id)): + formats.append({ + 'url': ref, + 'format_id': 'ref%s_%s' % (i, supplier), + }) + # Direct link + else: + formats.append({ + 'url': href, + 'format_id': supplier, + }) + elif protocol == 'rtmp': + application = connection.get('application', 'ondemand') + auth_string = connection.get('authString') + identifier = connection.get('identifier') + server = connection.get('server') + formats.append({ + 'url': '%s://%s/%s?%s' % (protocol, server, application, auth_string), + 'play_path': identifier, + 'app': '%s?%s' % (application, auth_string), + 'page_url': 'http://www.bbc.co.uk', + 'player_url': 'http://www.bbc.co.uk/emp/releases/iplayer/revisions/617463_618125_4/617463_618125_4_emp.swf', + 'rtmp_live': False, + 'ext': 'flv', + 'format_id': supplier, + }) + return formats + + def _extract_items(self, playlist): + return playlist.findall('./{http://bbc.co.uk/2008/emp/playlist}item') + + def _extract_medias(self, media_selection): + error = media_selection.find('./{http://bbc.co.uk/2008/mp/mediaselection}error') + if error is not None: + raise ExtractorError( + '%s returned error: %s' % (self.IE_NAME, error.get('id')), expected=True) + return media_selection.findall('./{http://bbc.co.uk/2008/mp/mediaselection}media') + + def _extract_connections(self, media): + return media.findall('./{http://bbc.co.uk/2008/mp/mediaselection}connection') + + def _extract_video(self, media, programme_id): + formats = [] + vbr = int(media.get('bitrate')) + vcodec = media.get('encoding') + service = media.get('service') + width = int(media.get('width')) + height = int(media.get('height')) + file_size = int(media.get('media_file_size')) + for connection in self._extract_connections(media): + conn_formats = self._extract_connection(connection, programme_id) + for format in conn_formats: + format.update({ + 'format_id': '%s_%s' % (service, format['format_id']), + 'width': width, + 'height': height, + 'vbr': vbr, + 'vcodec': vcodec, + 'filesize': file_size, + }) + formats.extend(conn_formats) + return formats + + def _extract_audio(self, media, programme_id): + formats = [] + abr = int(media.get('bitrate')) + acodec = media.get('encoding') + service = media.get('service') + for connection in self._extract_connections(media): + conn_formats = self._extract_connection(connection, programme_id) + for format in conn_formats: + format.update({ + 'format_id': '%s_%s' % (service, format['format_id']), + 'abr': abr, + 'acodec': acodec, + }) + formats.extend(conn_formats) + return formats + + def _extract_captions(self, media, programme_id): + subtitles = {} + for connection in self._extract_connections(media): + captions = self._download_xml(connection.get('href'), programme_id, 'Downloading captions') + lang = captions.get('{http://www.w3.org/XML/1998/namespace}lang', 'en') + ps = captions.findall('./{0}body/{0}div/{0}p'.format('{http://www.w3.org/2006/10/ttaf1}')) + srt = '' + for pos, p in enumerate(ps): + srt += '%s\r\n%s --> %s\r\n%s\r\n\r\n' % (str(pos), p.get('begin'), p.get('end'), + p.text.strip() if p.text is not None else '') + subtitles[lang] = srt + return subtitles + + def _download_media_selector(self, programme_id): + try: + media_selection = self._download_xml( + 'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/pc/vpid/%s' % programme_id, + programme_id, 'Downloading media selection XML') + except ExtractorError as ee: + if isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 403: + media_selection = xml.etree.ElementTree.fromstring(ee.cause.read().encode('utf-8')) + else: + raise + + formats = [] + subtitles = None + + for media in self._extract_medias(media_selection): + kind = media.get('kind') + if kind == 'audio': + formats.extend(self._extract_audio(media, programme_id)) + elif kind == 'video': + formats.extend(self._extract_video(media, programme_id)) + elif kind == 'captions': + subtitles = self._extract_captions(media, programme_id) + + return formats, subtitles + + def _real_extract(self, url): + group_id = self._match_id(url) + + webpage = self._download_webpage(url, group_id, 'Downloading video page') + + programme_id = self._search_regex( + r'"vpid"\s*:\s*"([\da-z]{8})"', webpage, 'vpid', fatal=False, default=None) + if programme_id: + player = self._download_json( + 'http://www.bbc.co.uk/iplayer/episode/%s.json' % group_id, + group_id)['jsConf']['player'] + title = player['title'] + description = player['subtitle'] + duration = player['duration'] + formats, subtitles = self._download_media_selector(programme_id) + else: + playlist = self._download_xml( + 'http://www.bbc.co.uk/iplayer/playlist/%s' % group_id, + group_id, 'Downloading playlist XML') + + no_items = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}noItems') + if no_items is not None: + reason = no_items.get('reason') + if reason == 'preAvailability': + msg = 'Episode %s is not yet available' % group_id + elif reason == 'postAvailability': + msg = 'Episode %s is no longer available' % group_id + elif reason == 'noMedia': + msg = 'Episode %s is not currently available' % group_id + else: + msg = 'Episode %s is not available: %s' % (group_id, reason) + raise ExtractorError(msg, expected=True) + + for item in self._extract_items(playlist): + kind = item.get('kind') + if kind != 'programme' and kind != 'radioProgramme': + continue + title = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}title').text + description = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}summary').text + programme_id = item.get('identifier') + duration = int(item.get('duration')) + formats, subtitles = self._download_media_selector(programme_id) + + if self._downloader.params.get('listsubtitles', False): + self._list_available_subtitles(programme_id, subtitles) + return + + self._sort_formats(formats) + + return { + 'id': programme_id, + 'title': title, + 'description': description, + 'duration': duration, + 'formats': formats, + 'subtitles': subtitles, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/beeg.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/beeg.py new file mode 100644 index 0000000000..4e79fea8f0 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/beeg.py @@ -0,0 +1,65 @@ +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor + + +class BeegIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?beeg\.com/(?P<id>\d+)' + _TEST = { + 'url': 'http://beeg.com/5416503', + 'md5': '634526ae978711f6b748fe0dd6c11f57', + 'info_dict': { + 'id': '5416503', + 'ext': 'mp4', + 'title': 'Sultry Striptease', + 'description': 'md5:6db3c6177972822aaba18652ff59c773', + 'categories': list, # NSFW + 'thumbnail': 're:https?://.*\.jpg$', + 'age_limit': 18, + } + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + + webpage = self._download_webpage(url, video_id) + + quality_arr = self._search_regex( + r'(?s)var\s+qualityArr\s*=\s*{\s*(.+?)\s*}', webpage, 'quality formats') + + formats = [{ + 'url': fmt[1], + 'format_id': fmt[0], + 'height': int(fmt[0][:-1]), + } for fmt in re.findall(r"'([^']+)'\s*:\s*'([^']+)'", quality_arr)] + + self._sort_formats(formats) + + title = self._html_search_regex( + r'<title>([^<]+)\s*-\s*beeg\.?', webpage, 'title') + + description = self._html_search_regex( + r'[0-9]{4})/(?P[0-9]{2})/(?P[0-9]{2})/(?P[^/#?_]+)' + _TEST = { + 'url': 'http://www.behindkink.com/2014/12/05/what-are-you-passionate-about-marley-blaze/', + 'md5': '507b57d8fdcd75a41a9a7bdb7989c762', + 'info_dict': { + 'id': '37127', + 'ext': 'mp4', + 'title': 'What are you passionate about Б─⌠ Marley Blaze', + 'description': 'md5:aee8e9611b4ff70186f752975d9b94b4', + 'upload_date': '20141205', + 'thumbnail': 'http://www.behindkink.com/wp-content/uploads/2014/12/blaze-1.jpg', + 'age_limit': 18, + } + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + display_id = mobj.group('id') + + webpage = self._download_webpage(url, display_id) + + video_url = self._search_regex( + r'.+?)\.html' + _TESTS = [ + { + 'url': 'http://www.bet.com/news/politics/2014/12/08/in-bet-exclusive-obama-talks-race-and-racism.html', + 'info_dict': { + 'id': '417cd61c-c793-4e8e-b006-e445ecc45add', + 'display_id': 'in-bet-exclusive-obama-talks-race-and-racism', + 'ext': 'flv', + 'title': 'BET News Presents: A Conversation With President Obama', + 'description': 'md5:5a88d8ae912c1b33e090290af7ec33c6', + 'duration': 1534, + 'timestamp': 1418075340, + 'upload_date': '20141208', + 'uploader': 'admin', + 'thumbnail': 're:(?i)^https?://.*\.jpg$', + }, + 'params': { + # rtmp download + 'skip_download': True, + }, + }, + { + 'url': 'http://www.bet.com/video/news/national/2014/justice-for-ferguson-a-community-reacts.html', + 'info_dict': { + 'id': '4160e53b-ad41-43b1-980f-8d85f63121f4', + 'display_id': 'justice-for-ferguson-a-community-reacts', + 'ext': 'flv', + 'title': 'Justice for Ferguson: A Community Reacts', + 'description': 'A BET News special.', + 'duration': 1696, + 'timestamp': 1416942360, + 'upload_date': '20141125', + 'uploader': 'admin', + 'thumbnail': 're:(?i)^https?://.*\.jpg$', + }, + 'params': { + # rtmp download + 'skip_download': True, + }, + } + ] + + def _real_extract(self, url): + display_id = self._match_id(url) + + webpage = self._download_webpage(url, display_id) + + media_url = compat_urllib_parse.unquote(self._search_regex( + [r'mediaURL\s*:\s*"([^"]+)"', r"var\s+mrssMediaUrl\s*=\s*'([^']+)'"], + webpage, 'media URL')) + + mrss = self._download_xml(media_url, display_id) + + item = mrss.find('./channel/item') + + NS_MAP = { + 'dc': 'http://purl.org/dc/elements/1.1/', + 'media': 'http://search.yahoo.com/mrss/', + 'ka': 'http://kickapps.com/karss', + } + + title = xpath_text(item, './title', 'title') + description = xpath_text( + item, './description', 'description', fatal=False) + + video_id = xpath_text(item, './guid', 'video id', fatal=False) + + timestamp = parse_iso8601(xpath_text( + item, xpath_with_ns('./dc:date', NS_MAP), + 'upload date', fatal=False)) + uploader = xpath_text( + item, xpath_with_ns('./dc:creator', NS_MAP), + 'uploader', fatal=False) + + media_content = item.find( + xpath_with_ns('./media:content', NS_MAP)) + duration = int_or_none(media_content.get('duration')) + smil_url = media_content.get('url') + + thumbnail = media_content.find( + xpath_with_ns('./media:thumbnail', NS_MAP)).get('url') + + formats = self._extract_smil_formats(smil_url, display_id) + + return { + 'id': video_id, + 'display_id': display_id, + 'title': title, + 'description': description, + 'thumbnail': thumbnail, + 'timestamp': timestamp, + 'uploader': uploader, + 'duration': duration, + 'formats': formats, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/bild.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/bild.py new file mode 100644 index 0000000000..77b562d996 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/bild.py @@ -0,0 +1,39 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor +from ..utils import int_or_none + + +class BildIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?bild\.de/(?:[^/]+/)+(?P[^/]+)-(?P\d+)(?:,auto=true)?\.bild\.html' + IE_DESC = 'Bild.de' + _TEST = { + 'url': 'http://www.bild.de/video/clip/apple-ipad-air/das-koennen-die-neuen-ipads-38184146.bild.html', + 'md5': 'dd495cbd99f2413502a1713a1156ac8a', + 'info_dict': { + 'id': '38184146', + 'ext': 'mp4', + 'title': 'BILD hat sie getestet', + 'thumbnail': 'http://bilder.bild.de/fotos/stand-das-koennen-die-neuen-ipads-38184138/Bild/1.bild.jpg', + 'duration': 196, + 'description': 'Mit dem iPad Air 2 und dem iPad Mini 3 hat Apple zwei neue Tablet-Modelle prц╓sentiert. BILD-Reporter Sven Stein durfte die Gerц╓te bereits testen. ', + } + } + + def _real_extract(self, url): + video_id = self._match_id(url) + + xml_url = url.split(".bild.html")[0] + ",view=xml.bild.xml" + doc = self._download_xml(xml_url, video_id) + + duration = int_or_none(doc.attrib.get('duration'), scale=1000) + + return { + 'id': video_id, + 'title': doc.attrib['ueberschrift'], + 'description': doc.attrib.get('text'), + 'url': doc.attrib['src'], + 'thumbnail': doc.attrib.get('img'), + 'duration': duration, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/bilibili.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/bilibili.py new file mode 100644 index 0000000000..241b904a9e --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/bilibili.py @@ -0,0 +1,105 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..compat import compat_parse_qs +from ..utils import ( + ExtractorError, + int_or_none, + unified_strdate, +) + + +class BiliBiliIE(InfoExtractor): + _VALID_URL = r'http://www\.bilibili\.(?:tv|com)/video/av(?P[0-9]+)/' + + _TEST = { + 'url': 'http://www.bilibili.tv/video/av1074402/', + 'md5': '2c301e4dab317596e837c3e7633e7d86', + 'info_dict': { + 'id': '1074402', + 'ext': 'flv', + 'title': 'Ц─░И┤▒Е²╥Е·┐Ц─▒И┤▒ФЁ║Ф╡╚', + 'duration': 308, + 'upload_date': '20140420', + 'thumbnail': 're:^https?://.+\.jpg', + }, + } + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + + video_code = self._search_regex( + r'(?s)
    (.*?)
    ', webpage, 'video code') + + title = self._html_search_meta( + 'media:title', video_code, 'title', fatal=True) + duration_str = self._html_search_meta( + 'duration', video_code, 'duration') + if duration_str is None: + duration = None + else: + duration_mobj = re.match( + r'^T(?:(?P[0-9]+)H)?(?P[0-9]+)M(?P[0-9]+)S$', + duration_str) + duration = ( + int_or_none(duration_mobj.group('hours'), default=0) * 3600 + + int(duration_mobj.group('minutes')) * 60 + + int(duration_mobj.group('seconds'))) + upload_date = unified_strdate(self._html_search_meta( + 'uploadDate', video_code, fatal=False)) + thumbnail = self._html_search_meta( + 'thumbnailUrl', video_code, 'thumbnail', fatal=False) + + player_params = compat_parse_qs(self._html_search_regex( + r'', + start_page, 'xml root', default=None) + if xml_root is None: + # Probably need to authenticate + login_res = self._login(webpage_url, video_id) + if login_res is None: + self.report_warning('Could not login.') + else: + start_page = login_res + # Grab the url from the authenticated page + xml_root = self._html_search_regex( + r'', + start_page, 'xml filename', default=None) + if xml_name is None: + # Fallback to the older format + xml_name = self._html_search_regex(r'', webpage) + if mobj: + embedded_url = mobj.group(1) + return self.url_result(embedded_url) + + video_title = self._html_search_regex(r'

    ]*>([^<]+)', webpage, 'title') + video_url = compat_urllib_parse.unquote(self._html_search_regex(r'video_url=(.+?)&', webpage, 'video_url')) + if 'encrypted=true' in webpage: + password = self._html_search_regex(r'video_title=(.+?)&', webpage, 'password') + video_url = aes_decrypt_text(video_url, password, 32).decode('utf-8') + path = compat_urllib_parse_urlparse(video_url).path + extension = os.path.splitext(path)[1][1:] + format = path.split('/')[4].split('_')[:2] + format = "-".join(format) + + age_limit = self._rta_search(webpage) + + return { + 'id': video_id, + 'title': video_title, + 'url': video_url, + 'ext': extension, + 'format': format, + 'format_id': format, + 'age_limit': age_limit, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/khanacademy.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/khanacademy.py new file mode 100644 index 0000000000..408d00944c --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/khanacademy.py @@ -0,0 +1,80 @@ +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import ( + unified_strdate, +) + + +class KhanAcademyIE(InfoExtractor): + _VALID_URL = r'^https?://(?:(?:www|api)\.)?khanacademy\.org/(?P[^/]+)/(?:[^/]+/){,2}(?P[^?#/]+)(?:$|[?#])' + IE_NAME = 'KhanAcademy' + + _TESTS = [{ + 'url': 'http://www.khanacademy.org/video/one-time-pad', + 'md5': '7021db7f2d47d4fff89b13177cb1e8f4', + 'info_dict': { + 'id': 'one-time-pad', + 'ext': 'mp4', + 'title': 'The one-time pad', + 'description': 'The perfect cipher', + 'duration': 176, + 'uploader': 'Brit Cruise', + 'upload_date': '20120411', + } + }, { + 'url': 'https://www.khanacademy.org/math/applied-math/cryptography', + 'info_dict': { + 'id': 'cryptography', + 'title': 'Journey into cryptography', + 'description': 'How have humans protected their secret messages through history? What has changed today?', + }, + 'playlist_mincount': 3, + }] + + def _real_extract(self, url): + m = re.match(self._VALID_URL, url) + video_id = m.group('id') + + if m.group('key') == 'video': + data = self._download_json( + 'http://api.khanacademy.org/api/v1/videos/' + video_id, + video_id, 'Downloading video info') + + upload_date = unified_strdate(data['date_added']) + uploader = ', '.join(data['author_names']) + return { + '_type': 'url_transparent', + 'url': data['url'], + 'id': video_id, + 'title': data['title'], + 'thumbnail': data['image_url'], + 'duration': data['duration'], + 'description': data['description'], + 'uploader': uploader, + 'upload_date': upload_date, + } + else: + # topic + data = self._download_json( + 'http://api.khanacademy.org/api/v1/topic/' + video_id, + video_id, 'Downloading topic info') + + entries = [ + { + '_type': 'url', + 'url': c['url'], + 'id': c['id'], + 'title': c['title'], + } + for c in data['children'] if c['kind'] in ('Video', 'Topic')] + + return { + '_type': 'playlist', + 'id': video_id, + 'title': data['title'], + 'description': data['description'], + 'entries': entries, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/kickstarter.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/kickstarter.py new file mode 100644 index 0000000000..7d4b570565 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/kickstarter.py @@ -0,0 +1,57 @@ +# encoding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor + + +class KickStarterIE(InfoExtractor): + _VALID_URL = r'https?://www\.kickstarter\.com/projects/(?P[^/]*)/.*' + _TESTS = [{ + 'url': 'https://www.kickstarter.com/projects/1404461844/intersection-the-story-of-josh-grant?ref=home_location', + 'md5': 'c81addca81327ffa66c642b5d8b08cab', + 'info_dict': { + 'id': '1404461844', + 'ext': 'mp4', + 'title': 'Intersection: The Story of Josh Grant by Kyle Cowling', + 'description': ( + 'A unique motocross documentary that examines the ' + 'life and mind of one of sports most elite athletes: Josh Grant.' + ), + }, + }, { + 'note': 'Embedded video (not using the native kickstarter video service)', + 'url': 'https://www.kickstarter.com/projects/597507018/pebble-e-paper-watch-for-iphone-and-android/posts/659178', + 'info_dict': { + 'id': '78704821', + 'ext': 'mp4', + 'uploader_id': 'pebble', + 'uploader': 'Pebble Technology', + 'title': 'Pebble iOS Notifications', + } + }] + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + + title = self._html_search_regex( + r'\s*(.*?)(?:\s*— Kickstarter)?\s*', + webpage, 'title') + video_url = self._search_regex( + r'data-video-url="(.*?)"', + webpage, 'video URL', default=None) + if video_url is None: # No native kickstarter, look for embedded videos + return { + '_type': 'url_transparent', + 'ie_key': 'Generic', + 'url': url, + 'title': title, + } + + return { + 'id': video_id, + 'url': video_url, + 'title': title, + 'description': self._og_search_description(webpage), + 'thumbnail': self._og_search_thumbnail(webpage), + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/kontrtube.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/kontrtube.py new file mode 100644 index 0000000000..41fd62009a --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/kontrtube.py @@ -0,0 +1,66 @@ +# encoding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import int_or_none + + +class KontrTubeIE(InfoExtractor): + IE_NAME = 'kontrtube' + IE_DESC = 'KontrTube.ru - п╒я─я┐п╠п╟ п╥п╬п╡я▒я┌' + _VALID_URL = r'http://(?:www\.)?kontrtube\.ru/videos/(?P\d+)/.+' + + _TEST = { + 'url': 'http://www.kontrtube.ru/videos/2678/nad-olimpiyskoy-derevney-v-sochi-podnyat-rossiyskiy-flag/', + 'md5': '975a991a4926c9a85f383a736a2e6b80', + 'info_dict': { + 'id': '2678', + 'ext': 'mp4', + 'title': 'п²п╟п╢ п╬п╩п╦п╪п©п╦п╧я│п╨п╬п╧ п╢п╣я─п╣п╡п╫п╣п╧ п╡ п║п╬я┤п╦ п©п╬п╢п╫я▐я┌ я─п╬я│я│п╦п╧я│п╨п╦п╧ я└п╩п╟пЁ', + 'description': 'md5:80edc4c613d5887ae8ccf1d59432be41', + 'thumbnail': 'http://www.kontrtube.ru/contents/videos_screenshots/2000/2678/preview.mp4.jpg', + 'duration': 270, + } + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + + webpage = self._download_webpage(url, video_id, 'Downloading page') + + video_url = self._html_search_regex(r"video_url: '(.+?)/?',", webpage, 'video URL') + thumbnail = self._html_search_regex(r"preview_url: '(.+?)/?',", webpage, 'video thumbnail', fatal=False) + title = self._html_search_regex( + r'(.+?)', webpage, 'video title') + description = self._html_search_meta('description', webpage, 'video description') + + mobj = re.search( + r'
    п■п╩п╦я┌п╣п╩я▄п╫п╬я│я┌я▄: (?P\d+)п╪:(?P\d+)я│
    ', webpage) + duration = int(mobj.group('minutes')) * 60 + int(mobj.group('seconds')) if mobj else None + + view_count = self._html_search_regex( + r'
    п÷я─п╬я│п╪п╬я┌я─п╬п╡: (\d+)
    ', webpage, 'view count', fatal=False) + + comment_count = None + comment_str = self._html_search_regex( + r'п п╬п╪п╪п╣п╫я┌п╟я─п╦п╦: ([^<]+)', webpage, 'comment count', fatal=False) + if comment_str.startswith('п╨п╬п╪п╪п╣п╫я┌п╟я─п╦п╣п╡ п╫п╣я┌'): + comment_count = 0 + else: + mobj = re.search(r'\d+ п╦п╥ (?P\d+) п╨п╬п╪п╪п╣п╫я┌п╟я─п╦п╣п╡', comment_str) + if mobj: + comment_count = mobj.group('total') + + return { + 'id': video_id, + 'url': video_url, + 'thumbnail': thumbnail, + 'title': title, + 'description': description, + 'duration': duration, + 'view_count': int_or_none(view_count), + 'comment_count': int_or_none(comment_count), + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/krasview.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/krasview.py new file mode 100644 index 0000000000..6f3d2345b6 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/krasview.py @@ -0,0 +1,59 @@ +# encoding: utf-8 +from __future__ import unicode_literals + +import json +import re + +from .common import InfoExtractor +from ..utils import ( + int_or_none, + unescapeHTML, +) + + +class KrasViewIE(InfoExtractor): + IE_DESC = 'п я─п╟я│п╡я▄я▌' + _VALID_URL = r'https?://krasview\.ru/video/(?P\d+)' + + _TEST = { + 'url': 'http://krasview.ru/video/512228', + 'md5': '3b91003cf85fc5db277870c8ebd98eae', + 'info_dict': { + 'id': '512228', + 'ext': 'mp4', + 'title': 'п║п╫п╣пЁ, п╩я▒п╢, п╥п╟п╫п╬я│я▀', + 'description': 'п║п╫я▐я┌п╬ п╡ пЁп╬я─п╬п╢п╣ п²я▐пЁп╟п╫я▄, п╡ п╔п╟п╫я┌я▀-п°п╟п╫я│п╦п╧я│п╨п╬п╪ п╟п╡я┌п╬п╫п╬п╪п╫п╬п╪ п╬п╨я─я┐пЁп╣.', + 'duration': 27, + 'thumbnail': 're:^https?://.*\.jpg', + }, + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + + webpage = self._download_webpage(url, video_id) + + flashvars = json.loads(self._search_regex( + r'flashvars\s*:\s*({.+?})\s*}\);', webpage, 'flashvars')) + + video_url = flashvars['url'] + title = unescapeHTML(flashvars['title']) + description = unescapeHTML(flashvars.get('subtitle') or self._og_search_description(webpage, default=None)) + thumbnail = flashvars['image'] + duration = int(flashvars['duration']) + filesize = int(flashvars['size']) + width = int_or_none(self._og_search_property('video:width', webpage, 'video width')) + height = int_or_none(self._og_search_property('video:height', webpage, 'video height')) + + return { + 'id': video_id, + 'url': video_url, + 'title': title, + 'description': description, + 'thumbnail': thumbnail, + 'duration': duration, + 'filesize': filesize, + 'width': width, + 'height': height, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/ku6.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/ku6.py new file mode 100644 index 0000000000..a602980a14 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/ku6.py @@ -0,0 +1,32 @@ +from __future__ import unicode_literals + +from .common import InfoExtractor + + +class Ku6IE(InfoExtractor): + _VALID_URL = r'http://v\.ku6\.com/show/(?P[a-zA-Z0-9\-\_]+)(?:\.)*html' + _TEST = { + 'url': 'http://v.ku6.com/show/JG-8yS14xzBr4bCn1pu0xw...html', + 'md5': '01203549b9efbb45f4b87d55bdea1ed1', + 'info_dict': { + 'id': 'JG-8yS14xzBr4bCn1pu0xw', + 'ext': 'f4v', + 'title': 'techniques test', + } + } + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + + title = self._html_search_regex( + r'

    (.*?)

    ', webpage, 'title') + dataUrl = 'http://v.ku6.com/fetchVideo4Player/%s.html' % video_id + jsonData = self._download_json(dataUrl, video_id) + downloadUrl = jsonData['data']['f'] + + return { + 'id': video_id, + 'title': title, + 'url': downloadUrl + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/la7.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/la7.py new file mode 100644 index 0000000000..db2028e9f5 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/la7.py @@ -0,0 +1,63 @@ +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import ( + parse_duration, +) + + +class LA7IE(InfoExtractor): + IE_NAME = 'la7.tv' + _VALID_URL = r'''(?x) + https?://(?:www\.)?la7\.tv/ + (?: + richplayer/\?assetid=| + \?contentId= + ) + (?P[0-9]+)''' + + _TEST = { + 'url': 'http://www.la7.tv/richplayer/?assetid=50355319', + 'file': '50355319.mp4', + 'md5': 'ec7d1f0224d20ba293ab56cf2259651f', + 'info_dict': { + 'title': 'IL DIVO', + 'description': 'Un film di Paolo Sorrentino con Toni Servillo, Anna Bonaiuto, Giulio Bosetti e Flavio Bucci', + 'duration': 6254, + }, + 'skip': 'Blocked in the US', + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + + xml_url = 'http://www.la7.tv/repliche/content/index.php?contentId=%s' % video_id + doc = self._download_xml(xml_url, video_id) + + video_title = doc.find('title').text + description = doc.find('description').text + duration = parse_duration(doc.find('duration').text) + thumbnail = doc.find('img').text + view_count = int(doc.find('views').text) + + prefix = doc.find('.//fqdn').text.strip().replace('auto:', 'http:') + + formats = [{ + 'format': vnode.find('quality').text, + 'tbr': int(vnode.find('quality').text), + 'url': vnode.find('fms').text.strip().replace('mp4:', prefix), + } for vnode in doc.findall('.//videos/video')] + self._sort_formats(formats) + + return { + 'id': video_id, + 'title': video_title, + 'description': description, + 'thumbnail': thumbnail, + 'duration': duration, + 'formats': formats, + 'view_count': view_count, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/laola1tv.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/laola1tv.py new file mode 100644 index 0000000000..2fd3b4699d --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/laola1tv.py @@ -0,0 +1,77 @@ +from __future__ import unicode_literals + +import random +import re + +from .common import InfoExtractor +from ..utils import ExtractorError + + +class Laola1TvIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?laola1\.tv/(?P[a-z]+)-(?P[a-z]+)/.*?/(?P[0-9]+)\.html' + _TEST = { + 'url': 'http://www.laola1.tv/de-de/live/bwf-bitburger-open-grand-prix-gold-court-1/250019.html', + 'info_dict': { + 'id': '250019', + 'ext': 'mp4', + 'title': 'Bitburger Open Grand Prix Gold - Court 1', + 'categories': ['Badminton'], + 'uploader': 'BWF - Badminton World Federation', + 'is_live': True, + }, + 'params': { + 'skip_download': True, + } + } + + _BROKEN = True # Not really - extractor works fine, but f4m downloader does not support live streams yet. + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + lang = mobj.group('lang') + portal = mobj.group('portal') + + webpage = self._download_webpage(url, video_id) + iframe_url = self._search_regex( + r']*?class="main_tv_player"[^>]*?src="([^"]+)"', + webpage, 'iframe URL') + + iframe = self._download_webpage( + iframe_url, video_id, note='Downloading iframe') + flashvars_m = re.findall( + r'flashvars\.([_a-zA-Z0-9]+)\s*=\s*"([^"]*)";', iframe) + flashvars = dict((m[0], m[1]) for m in flashvars_m) + + xml_url = ('http://www.laola1.tv/server/hd_video.php?' + + 'play=%s&partner=1&portal=%s&v5ident=&lang=%s' % ( + video_id, portal, lang)) + hd_doc = self._download_xml(xml_url, video_id) + + title = hd_doc.find('.//video/title').text + flash_url = hd_doc.find('.//video/url').text + categories = hd_doc.find('.//video/meta_sports').text.split(',') + uploader = hd_doc.find('.//video/meta_organistation').text + + ident = random.randint(10000000, 99999999) + token_url = '%s&ident=%s&klub=0&unikey=0×tamp=%s&auth=%s' % ( + flash_url, ident, flashvars['timestamp'], flashvars['auth']) + + token_doc = self._download_xml( + token_url, video_id, note='Downloading token') + token_attrib = token_doc.find('.//token').attrib + if token_attrib.get('auth') == 'blocked': + raise ExtractorError('Token error: ' % token_attrib.get('comment')) + + video_url = '%s?hdnea=%s&hdcore=3.2.0' % ( + token_attrib['url'], token_attrib['auth']) + + return { + 'id': video_id, + 'is_live': True, + 'title': title, + 'url': video_url, + 'uploader': uploader, + 'categories': categories, + 'ext': 'mp4', + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/lifenews.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/lifenews.py new file mode 100644 index 0000000000..1dfe7f77f4 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/lifenews.py @@ -0,0 +1,74 @@ +# encoding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import ( + int_or_none, + unified_strdate, + ExtractorError, +) + + +class LifeNewsIE(InfoExtractor): + IE_NAME = 'lifenews' + IE_DESC = 'LIFE | NEWS' + _VALID_URL = r'http://lifenews\.ru/(?:mobile/)?news/(?P\d+)' + + _TEST = { + 'url': 'http://lifenews.ru/news/126342', + 'md5': 'e1b50a5c5fb98a6a544250f2e0db570a', + 'info_dict': { + 'id': '126342', + 'ext': 'mp4', + 'title': 'п°п▓п■ я─п╟п╥я▀я│п╨п╦п╡п╟п╣я┌ п╪я┐п╤я┤п╦п╫, п╬я│я┌п╟п╡п╦п╡я┬п╦я┘ п╡ IKEA я│я┐п╪п╨я┐ я│ п╟п╡я┌п╬п╪п╟я┌п╬п╪', + 'description': 'п п╟п╪п╣я─я▀ п╫п╟п╠п╩я▌п╢п╣п╫п╦я▐ пЁп╦п©п╣я─п╪п╟я─п╨п╣я┌п╟ п╥п╟я└п╦п╨я│п╦я─п╬п╡п╟п╩п╦ я┌я─п╬п╦я┘ п╪я┐п╤я┤п╦п╫, я│п©я─я▐я┌п╟п╡я┬п╦я┘ п╬я─я┐п╤п╣п╧п╫я▀п╧ п╟я─я│п╣п╫п╟п╩ п╡ п╨п╟п╪п╣я─п╣ я┘я─п╟п╫п╣п╫п╦я▐.', + 'thumbnail': 're:http://.*\.jpg', + 'upload_date': '20140130', + } + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + + webpage = self._download_webpage('http://lifenews.ru/news/%s' % video_id, video_id, 'Downloading page') + + videos = re.findall(r'[^"]+)".*?src="(?P', webpage) + if not videos: + raise ExtractorError('No media links available for %s' % video_id) + + title = self._og_search_title(webpage) + TITLE_SUFFIX = ' - п÷п╣я─п╡я▀п╧ п©п╬ я│я─п╬я┤п╫я▀п╪ п╫п╬п╡п╬я│я┌я▐п╪ Б─■ LIFE | NEWS' + if title.endswith(TITLE_SUFFIX): + title = title[:-len(TITLE_SUFFIX)] + + description = self._og_search_description(webpage) + + view_count = self._html_search_regex( + r'
    (\d+)
    ', webpage, 'view count', fatal=False) + comment_count = self._html_search_regex( + r'
    \s*(\d+)', webpage, 'comment count', fatal=False) + + upload_date = self._html_search_regex( + r'

    ', webpage, 'title') + title, _, ext = title_str.rpartition('.') + filesize_approx = parse_filesize(self._html_search_regex( + r'

    (.*?)

    ', + webpage, 'file size approximation', fatal=False)) + duration = parse_duration(self._html_search_regex( + r'(?s)

    .*?class="bold">(.*?)<', + webpage, 'duration', fatal=False)) + view_count = int_or_none(self._html_search_regex( + r'

    ([0-9]+)

    ', + webpage, 'view count', fatal=False)) + + return { + 'id': video_id, + 'url': video_url, + 'title': title, + 'ext': ext, + 'filesize_approx': filesize_approx, + 'duration': duration, + 'view_count': view_count, + 'thumbnail': self._og_search_thumbnail(webpage), + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/ministrygrid.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/ministrygrid.py new file mode 100644 index 0000000000..949ad11db2 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/ministrygrid.py @@ -0,0 +1,57 @@ +from __future__ import unicode_literals + +import json +import re + +from .common import InfoExtractor +from ..utils import ( + ExtractorError, + smuggle_url, +) + + +class MinistryGridIE(InfoExtractor): + _VALID_URL = r'https?://www\.ministrygrid.com/([^/?#]*/)*(?P[^/#?]+)/?(?:$|[?#])' + + _TEST = { + 'url': 'http://www.ministrygrid.com/training-viewer/-/training/t4g-2014-conference/the-gospel-by-numbers-4/the-gospel-by-numbers', + 'md5': '844be0d2a1340422759c2a9101bab017', + 'info_dict': { + 'id': '3453494717001', + 'ext': 'mp4', + 'title': 'The Gospel by Numbers', + 'description': 'Coming soon from T4G 2014!', + 'uploader': 'LifeWay Christian Resources (MG)', + }, + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + + webpage = self._download_webpage(url, video_id) + portlets_json = self._search_regex( + r'Liferay\.Portlet\.list=(\[.+?\])', webpage, 'portlet list') + portlets = json.loads(portlets_json) + pl_id = self._search_regex( + r'', re.S).sub('', raw_page) + + base_url = self._search_regex( + r'ipadUrl: \'(.+?cloudfront.net/)', raw_page, 'base url') + formats_json = self._search_regex( + r'bitrates: (\[.+?\])', raw_page, 'video formats') + formats_mit = json.loads(formats_json) + formats = [ + { + 'format_id': f['label'], + 'url': base_url + f['url'].partition(':')[2], + 'ext': f['url'].partition(':')[0], + 'format': f['label'], + 'width': f['width'], + 'vbr': f['bitrate'], + } + for f in formats_mit + ] + + title = get_element_by_id('edit-title', clean_page) + description = clean_html(get_element_by_id('edit-description', clean_page)) + thumbnail = self._search_regex( + r'playlist:.*?url: \'(.+?)\'', + raw_page, 'thumbnail', flags=re.DOTALL) + + return { + 'id': video_id, + 'title': title, + 'formats': formats, + 'description': description, + 'thumbnail': thumbnail, + } + + +class MITIE(TechTVMITIE): + IE_NAME = 'video.mit.edu' + _VALID_URL = r'https?://video\.mit\.edu/watch/(?P[^/]+)' + + _TEST = { + 'url': 'http://video.mit.edu/watch/the-government-is-profiling-you-13222/', + 'md5': '7db01d5ccc1895fc5010e9c9e13648da', + 'info_dict': { + 'id': '21783', + 'ext': 'mp4', + 'title': 'The Government is Profiling You', + 'description': 'md5:ad5795fe1e1623b73620dbfd47df9afd', + }, + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + page_title = mobj.group('title') + webpage = self._download_webpage(url, page_title) + embed_url = self._search_regex( + r'<iframe .*?src="(.+?)"', webpage, 'embed url') + return self.url_result(embed_url, ie='TechTVMIT') + + +class OCWMITIE(InfoExtractor): + IE_NAME = 'ocw.mit.edu' + _VALID_URL = r'^http://ocw\.mit\.edu/courses/(?P<topic>[a-z0-9\-]+)' + _BASE_URL = 'http://ocw.mit.edu/' + + _TESTS = [ + { + 'url': 'http://ocw.mit.edu/courses/electrical-engineering-and-computer-science/6-041-probabilistic-systems-analysis-and-applied-probability-fall-2010/video-lectures/lecture-7-multiple-variables-expectations-independence/', + 'info_dict': { + 'id': 'EObHWIEKGjA', + 'ext': 'mp4', + 'title': 'Lecture 7: Multiple Discrete Random Variables: Expectations, Conditioning, Independence', + 'description': 'In this lecture, the professor discussed multiple random variables, expectations, and binomial distribution.', + # 'subtitles': 'http://ocw.mit.edu/courses/electrical-engineering-and-computer-science/6-041-probabilistic-systems-analysis-and-applied-probability-fall-2010/video-lectures/lecture-7-multiple-variables-expectations-independence/MIT6_041F11_lec07_300k.mp4.srt' + } + }, + { + 'url': 'http://ocw.mit.edu/courses/mathematics/18-01sc-single-variable-calculus-fall-2010/1.-differentiation/part-a-definition-and-basic-rules/session-1-introduction-to-derivatives/', + 'info_dict': { + 'id': '7K1sB05pE0A', + 'ext': 'mp4', + 'title': 'Session 1: Introduction to Derivatives', + 'description': 'This section contains lecture video excerpts, lecture notes, an interactive mathlet with supporting documents, and problem solving videos.', + # 'subtitles': 'http://ocw.mit.edu//courses/mathematics/18-01sc-single-variable-calculus-fall-2010/ocw-18.01-f07-lec01_300k.SRT' + } + } + ] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + topic = mobj.group('topic') + + webpage = self._download_webpage(url, topic) + title = self._html_search_meta('WT.cg_s', webpage) + description = self._html_search_meta('Description', webpage) + + # search for call to ocw_embed_chapter_media(container_id, media_url, provider, page_url, image_url, start, stop, captions_file) + embed_chapter_media = re.search(r'ocw_embed_chapter_media\((.+?)\)', webpage) + if embed_chapter_media: + metadata = re.sub(r'[\'"]', '', embed_chapter_media.group(1)) + metadata = re.split(r', ?', metadata) + yt = metadata[1] + subs = compat_urlparse.urljoin(self._BASE_URL, metadata[7]) + else: + # search for call to ocw_embed_chapter_media(container_id, media_url, provider, page_url, image_url, captions_file) + embed_media = re.search(r'ocw_embed_media\((.+?)\)', webpage) + if embed_media: + metadata = re.sub(r'[\'"]', '', embed_media.group(1)) + metadata = re.split(r', ?', metadata) + yt = metadata[1] + subs = compat_urlparse.urljoin(self._BASE_URL, metadata[5]) + else: + raise ExtractorError('Unable to find embedded YouTube video.') + video_id = YoutubeIE.extract_id(yt) + + return { + '_type': 'url_transparent', + 'id': video_id, + 'title': title, + 'description': description, + 'url': yt, + 'url_transparent' + 'subtitles': subs, + 'ie_key': 'Youtube', + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/mitele.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/mitele.py new file mode 100644 index 0000000000..2567583235 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/mitele.py @@ -0,0 +1,68 @@ +from __future__ import unicode_literals + +import json + +from .common import InfoExtractor +from ..compat import ( + compat_urllib_parse, + compat_urlparse, +) +from ..utils import ( + get_element_by_attribute, + parse_duration, + strip_jsonp, +) + + +class MiTeleIE(InfoExtractor): + IE_NAME = 'mitele.es' + _VALID_URL = r'http://www\.mitele\.es/[^/]+/[^/]+/[^/]+/(?P<id>[^/]+)/' + + _TEST = { + 'url': 'http://www.mitele.es/programas-tv/diario-de/la-redaccion/programa-144/', + 'md5': '6a75fe9d0d3275bead0cb683c616fddb', + 'info_dict': { + 'id': '0fce117d', + 'ext': 'mp4', + 'title': 'Programa 144 - Tor, la web invisible', + 'description': 'md5:3b6fce7eaa41b2d97358726378d9369f', + 'display_id': 'programa-144', + 'duration': 2913, + }, + } + + def _real_extract(self, url): + episode = self._match_id(url) + webpage = self._download_webpage(url, episode) + embed_data_json = self._search_regex( + r'(?s)MSV\.embedData\[.*?\]\s*=\s*({.*?});', webpage, 'embed data', + ).replace('\'', '"') + embed_data = json.loads(embed_data_json) + + domain = embed_data['mediaUrl'] + if not domain.startswith('http'): + # only happens in telecinco.es videos + domain = 'http://' + domain + info_url = compat_urlparse.urljoin( + domain, + compat_urllib_parse.unquote(embed_data['flashvars']['host']) + ) + info_el = self._download_xml(info_url, episode).find('./video/info') + + video_link = info_el.find('videoUrl/link').text + token_query = compat_urllib_parse.urlencode({'id': video_link}) + token_info = self._download_json( + embed_data['flashvars']['ov_tk'] + '?' + token_query, + episode, + transform_source=strip_jsonp + ) + + return { + 'id': embed_data['videoId'], + 'display_id': episode, + 'title': info_el.find('title').text, + 'url': token_info['tokenizedUrl'], + 'description': get_element_by_attribute('class', 'text', webpage), + 'thumbnail': info_el.find('thumb').text, + 'duration': parse_duration(info_el.find('duration').text), + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/mixcloud.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/mixcloud.py new file mode 100644 index 0000000000..07d194562e --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/mixcloud.py @@ -0,0 +1,109 @@ +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..compat import ( + compat_urllib_parse, +) +from ..utils import ( + ExtractorError, + HEADRequest, + int_or_none, + parse_iso8601, +) + + +class MixcloudIE(InfoExtractor): + _VALID_URL = r'^(?:https?://)?(?:www\.)?mixcloud\.com/([^/]+)/([^/]+)' + IE_NAME = 'mixcloud' + + _TEST = { + 'url': 'http://www.mixcloud.com/dholbach/cryptkeeper/', + 'info_dict': { + 'id': 'dholbach-cryptkeeper', + 'ext': 'mp3', + 'title': 'Cryptkeeper', + 'description': 'After quite a long silence from myself, finally another Drum\'n\'Bass mix with my favourite current dance floor bangers.', + 'uploader': 'Daniel Holbach', + 'uploader_id': 'dholbach', + 'upload_date': '20111115', + 'timestamp': 1321359578, + 'thumbnail': 're:https?://.*\.jpg', + 'view_count': int, + 'like_count': int, + }, + } + + def _get_url(self, track_id, template_url): + server_count = 30 + for i in range(server_count): + url = template_url % i + try: + # We only want to know if the request succeed + # don't download the whole file + self._request_webpage( + HEADRequest(url), track_id, + 'Checking URL %d/%d ...' % (i + 1, server_count + 1)) + return url + except ExtractorError: + pass + + return None + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + uploader = mobj.group(1) + cloudcast_name = mobj.group(2) + track_id = compat_urllib_parse.unquote('-'.join((uploader, cloudcast_name))) + + webpage = self._download_webpage(url, track_id) + + preview_url = self._search_regex( + r'\s(?:data-preview-url|m-preview)="(.+?)"', webpage, 'preview url') + song_url = preview_url.replace('/previews/', '/c/originals/') + template_url = re.sub(r'(stream\d*)', 'stream%d', song_url) + final_song_url = self._get_url(track_id, template_url) + if final_song_url is None: + self.to_screen('Trying with m4a extension') + template_url = template_url.replace('.mp3', '.m4a').replace('originals/', 'm4a/64/') + final_song_url = self._get_url(track_id, template_url) + if final_song_url is None: + raise ExtractorError('Unable to extract track url') + + PREFIX = ( + r'<span class="play-button[^"]*?"' + r'(?:\s+[a-zA-Z0-9-]+(?:="[^"]+")?)*?\s+') + title = self._html_search_regex( + PREFIX + r'm-title="([^"]+)"', webpage, 'title') + thumbnail = self._proto_relative_url(self._html_search_regex( + PREFIX + r'm-thumbnail-url="([^"]+)"', webpage, 'thumbnail', + fatal=False)) + uploader = self._html_search_regex( + PREFIX + r'm-owner-name="([^"]+)"', + webpage, 'uploader', fatal=False) + uploader_id = self._search_regex( + r'\s+"profile": "([^"]+)",', webpage, 'uploader id', fatal=False) + description = self._og_search_description(webpage) + like_count = int_or_none(self._search_regex( + r'<meta itemprop="interactionCount" content="UserLikes:([0-9]+)"', + webpage, 'like count', fatal=False)) + view_count = int_or_none(self._search_regex( + r'<meta itemprop="interactionCount" content="UserPlays:([0-9]+)"', + webpage, 'play count', fatal=False)) + timestamp = parse_iso8601(self._search_regex( + r'<time itemprop="dateCreated" datetime="([^"]+)">', + webpage, 'upload date')) + + return { + 'id': track_id, + 'title': title, + 'url': final_song_url, + 'description': description, + 'thumbnail': thumbnail, + 'uploader': uploader, + 'uploader_id': uploader_id, + 'timestamp': timestamp, + 'view_count': view_count, + 'like_count': like_count, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/mlb.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/mlb.py new file mode 100644 index 0000000000..1a241aca77 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/mlb.py @@ -0,0 +1,128 @@ +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import ( + parse_duration, + parse_iso8601, +) + + +class MLBIE(InfoExtractor): + _VALID_URL = r'https?://m(?:lb)?\.mlb\.com/(?:(?:.*?/)?video/(?:topic/[\da-z_-]+/)?v|(?:shared/video/embed/embed\.html|[^/]+/video/play\.jsp)\?.*?\bcontent_id=)(?P<id>n?\d+)' + _TESTS = [ + { + 'url': 'http://m.mlb.com/sea/video/topic/51231442/v34698933/nymsea-ackley-robs-a-home-run-with-an-amazing-catch/?c_id=sea', + 'md5': 'ff56a598c2cf411a9a38a69709e97079', + 'info_dict': { + 'id': '34698933', + 'ext': 'mp4', + 'title': "Ackley's spectacular catch", + 'description': 'md5:7f5a981eb4f3cbc8daf2aeffa2215bf0', + 'duration': 66, + 'timestamp': 1405980600, + 'upload_date': '20140721', + 'thumbnail': 're:^https?://.*\.jpg$', + }, + }, + { + 'url': 'http://m.mlb.com/video/topic/81536970/v34496663/mianym-stanton-practices-for-the-home-run-derby', + 'md5': 'd9c022c10d21f849f49c05ae12a8a7e9', + 'info_dict': { + 'id': '34496663', + 'ext': 'mp4', + 'title': 'Stanton prepares for Derby', + 'description': 'md5:d00ce1e5fd9c9069e9c13ab4faedfa57', + 'duration': 46, + 'timestamp': 1405105800, + 'upload_date': '20140711', + 'thumbnail': 're:^https?://.*\.jpg$', + }, + }, + { + 'url': 'http://m.mlb.com/video/topic/vtp_hrd_sponsor/v34578115/hrd-cespedes-wins-2014-gillette-home-run-derby', + 'md5': '0e6e73d509321e142409b695eadd541f', + 'info_dict': { + 'id': '34578115', + 'ext': 'mp4', + 'title': 'Cespedes repeats as Derby champ', + 'description': 'md5:08df253ce265d4cf6fb09f581fafad07', + 'duration': 488, + 'timestamp': 1405399936, + 'upload_date': '20140715', + 'thumbnail': 're:^https?://.*\.jpg$', + }, + }, + { + 'url': 'http://m.mlb.com/video/v34577915/bautista-on-derby-captaining-duties-his-performance', + 'md5': 'b8fd237347b844365d74ea61d4245967', + 'info_dict': { + 'id': '34577915', + 'ext': 'mp4', + 'title': 'Bautista on Home Run Derby', + 'description': 'md5:b80b34031143d0986dddc64a8839f0fb', + 'duration': 52, + 'timestamp': 1405390722, + 'upload_date': '20140715', + 'thumbnail': 're:^https?://.*\.jpg$', + }, + }, + { + 'url': 'http://m.mlb.com/shared/video/embed/embed.html?content_id=35692085&topic_id=6479266&width=400&height=224&property=mlb', + 'only_matching': True, + }, + { + 'url': 'http://mlb.mlb.com/shared/video/embed/embed.html?content_id=36599553', + 'only_matching': True, + }, + { + 'url': 'http://mlb.mlb.com/es/video/play.jsp?content_id=36599553', + 'only_matching': True, + }, + ] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + + detail = self._download_xml( + 'http://m.mlb.com/gen/multimedia/detail/%s/%s/%s/%s.xml' + % (video_id[-3], video_id[-2], video_id[-1], video_id), video_id) + + title = detail.find('./headline').text + description = detail.find('./big-blurb').text + duration = parse_duration(detail.find('./duration').text) + timestamp = parse_iso8601(detail.attrib['date'][:-5]) + + thumbnails = [{ + 'url': thumbnail.text, + } for thumbnail in detail.findall('./thumbnailScenarios/thumbnailScenario')] + + formats = [] + for media_url in detail.findall('./url'): + playback_scenario = media_url.attrib['playback_scenario'] + fmt = { + 'url': media_url.text, + 'format_id': playback_scenario, + } + m = re.search(r'(?P<vbr>\d+)K_(?P<width>\d+)X(?P<height>\d+)', playback_scenario) + if m: + fmt.update({ + 'vbr': int(m.group('vbr')) * 1000, + 'width': int(m.group('width')), + 'height': int(m.group('height')), + }) + formats.append(fmt) + + self._sort_formats(formats) + + return { + 'id': video_id, + 'title': title, + 'description': description, + 'duration': duration, + 'timestamp': timestamp, + 'formats': formats, + 'thumbnails': thumbnails, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/moevideo.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/moevideo.py new file mode 100644 index 0000000000..184f9c2c9e --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/moevideo.py @@ -0,0 +1,114 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import json +import re + +from .common import InfoExtractor +from ..compat import ( + compat_urllib_parse, + compat_urllib_request, +) +from ..utils import ( + ExtractorError, + int_or_none, +) + + +class MoeVideoIE(InfoExtractor): + IE_DESC = 'LetitBit video services: moevideo.net, playreplay.net and videochart.net' + _VALID_URL = r'''(?x) + https?://(?P<host>(?:www\.)? + (?:(?:moevideo|playreplay|videochart)\.net))/ + (?:video|framevideo)/(?P<id>[0-9]+\.[0-9A-Za-z]+)''' + _API_URL = 'http://api.letitbit.net/' + _API_KEY = 'tVL0gjqo5' + _TESTS = [ + { + 'url': 'http://moevideo.net/video/00297.0036103fe3d513ef27915216fd29', + 'md5': '129f5ae1f6585d0e9bb4f38e774ffb3a', + 'info_dict': { + 'id': '00297.0036103fe3d513ef27915216fd29', + 'ext': 'flv', + 'title': 'Sink cut out machine', + 'description': 'md5:f29ff97b663aefa760bf7ca63c8ca8a8', + 'thumbnail': 're:^https?://.*\.jpg$', + 'width': 540, + 'height': 360, + 'duration': 179, + 'filesize': 17822500, + } + }, + { + 'url': 'http://playreplay.net/video/77107.7f325710a627383d40540d8e991a', + 'md5': '74f0a014d5b661f0f0e2361300d1620e', + 'info_dict': { + 'id': '77107.7f325710a627383d40540d8e991a', + 'ext': 'flv', + 'title': 'Operacion Condor.', + 'description': 'md5:7e68cb2fcda66833d5081c542491a9a3', + 'thumbnail': 're:^https?://.*\.jpg$', + 'width': 480, + 'height': 296, + 'duration': 6027, + 'filesize': 588257923, + } + }, + ] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + + webpage = self._download_webpage( + 'http://%s/video/%s' % (mobj.group('host'), video_id), + video_id, 'Downloading webpage') + + title = self._og_search_title(webpage) + thumbnail = self._og_search_thumbnail(webpage) + description = self._og_search_description(webpage) + + r = [ + self._API_KEY, + [ + 'preview/flv_link', + { + 'uid': video_id, + }, + ], + ] + r_json = json.dumps(r) + post = compat_urllib_parse.urlencode({'r': r_json}) + req = compat_urllib_request.Request(self._API_URL, post) + req.add_header('Content-type', 'application/x-www-form-urlencoded') + + response = self._download_json(req, video_id) + if response['status'] != 'OK': + raise ExtractorError( + '%s returned error: %s' % (self.IE_NAME, response['data']), + expected=True + ) + item = response['data'][0] + video_url = item['link'] + duration = int_or_none(item['length']) + width = int_or_none(item['width']) + height = int_or_none(item['height']) + filesize = int_or_none(item['convert_size']) + + formats = [{ + 'format_id': 'sd', + 'http_headers': {'Range': 'bytes=0-'}, # Required to download + 'url': video_url, + 'width': width, + 'height': height, + 'filesize': filesize, + }] + + return { + 'id': video_id, + 'title': title, + 'thumbnail': thumbnail, + 'description': description, + 'duration': duration, + 'formats': formats, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/mofosex.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/mofosex.py new file mode 100644 index 0000000000..2cec12d35e --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/mofosex.py @@ -0,0 +1,53 @@ +from __future__ import unicode_literals + +import os +import re + +from .common import InfoExtractor +from ..compat import ( + compat_urllib_parse_urlparse, + compat_urllib_request, + compat_urllib_parse, +) + + +class MofosexIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?(?P<url>mofosex\.com/videos/(?P<id>[0-9]+)/.*?\.html)' + _TEST = { + 'url': 'http://www.mofosex.com/videos/5018/japanese-teen-music-video.html', + 'md5': '1b2eb47ac33cc75d4a80e3026b613c5a', + 'info_dict': { + 'id': '5018', + 'ext': 'mp4', + 'title': 'Japanese Teen Music Video', + 'age_limit': 18, + } + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + url = 'http://www.' + mobj.group('url') + + req = compat_urllib_request.Request(url) + req.add_header('Cookie', 'age_verified=1') + webpage = self._download_webpage(req, video_id) + + video_title = self._html_search_regex(r'<h1>(.+?)<', webpage, 'title') + video_url = compat_urllib_parse.unquote(self._html_search_regex(r'flashvars.video_url = \'([^\']+)', webpage, 'video_url')) + path = compat_urllib_parse_urlparse(video_url).path + extension = os.path.splitext(path)[1][1:] + format = path.split('/')[5].split('_')[:2] + format = "-".join(format) + + age_limit = self._rta_search(webpage) + + return { + 'id': video_id, + 'title': video_title, + 'url': video_url, + 'ext': extension, + 'format': format, + 'format_id': format, + 'age_limit': age_limit, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/mojvideo.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/mojvideo.py new file mode 100644 index 0000000000..0ba435dc55 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/mojvideo.py @@ -0,0 +1,58 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import ( + ExtractorError, + parse_duration, +) + + +class MojvideoIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?mojvideo\.com/video-(?P<display_id>[^/]+)/(?P<id>[a-f0-9]+)' + _TEST = { + 'url': 'http://www.mojvideo.com/video-v-avtu-pred-mano-rdecelaska-alfi-nipic/3d1ed4497707730b2906', + 'md5': 'f7fd662cc8ce2be107b0d4f2c0483ae7', + 'info_dict': { + 'id': '3d1ed4497707730b2906', + 'display_id': 'v-avtu-pred-mano-rdecelaska-alfi-nipic', + 'ext': 'mp4', + 'title': 'V avtu pred mano rdeд█elaska - Alfi Nipiд█', + 'thumbnail': 're:^http://.*\.jpg$', + 'duration': 242, + } + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + display_id = mobj.group('display_id') + + # XML is malformed + playerapi = self._download_webpage( + 'http://www.mojvideo.com/playerapi.php?v=%s&t=1' % video_id, display_id) + + if '<error>true</error>' in playerapi: + error_desc = self._html_search_regex( + r'<errordesc>([^<]*)</errordesc>', playerapi, 'error description', fatal=False) + raise ExtractorError('%s said: %s' % (self.IE_NAME, error_desc), expected=True) + + title = self._html_search_regex( + r'<title>([^<]+)', playerapi, 'title') + video_url = self._html_search_regex( + r'([^<]+)', playerapi, 'video URL') + thumbnail = self._html_search_regex( + r'([^<]+)', playerapi, 'thumbnail', fatal=False) + duration = parse_duration(self._html_search_regex( + r'([^<]+)', playerapi, 'duration', fatal=False)) + + return { + 'id': video_id, + 'display_id': display_id, + 'url': video_url, + 'title': title, + 'thumbnail': thumbnail, + 'duration': duration, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/moniker.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/moniker.py new file mode 100644 index 0000000000..5de719bdc4 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/moniker.py @@ -0,0 +1,69 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import os.path +import re + +from .common import InfoExtractor +from ..compat import ( + compat_urllib_parse, + compat_urllib_request, +) + + +class MonikerIE(InfoExtractor): + IE_DESC = 'allmyvideos.net and vidspot.net' + _VALID_URL = r'https?://(?:www\.)?(?:allmyvideos|vidspot)\.net/(?P[a-zA-Z0-9_-]+)' + + _TESTS = [{ + 'url': 'http://allmyvideos.net/jih3nce3x6wn', + 'md5': '710883dee1bfc370ecf9fa6a89307c88', + 'info_dict': { + 'id': 'jih3nce3x6wn', + 'ext': 'mp4', + 'title': 'youtube-dl test video', + }, + }, { + 'url': 'http://vidspot.net/l2ngsmhs8ci5', + 'md5': '710883dee1bfc370ecf9fa6a89307c88', + 'info_dict': { + 'id': 'l2ngsmhs8ci5', + 'ext': 'mp4', + 'title': 'youtube-dl test video', + }, + }, { + 'url': 'https://www.vidspot.net/l2ngsmhs8ci5', + 'only_matching': True, + }] + + def _real_extract(self, url): + video_id = self._match_id(url) + orig_webpage = self._download_webpage(url, video_id) + + fields = re.findall(r'type="hidden" name="(.+?)"\s* value="?(.+?)">', orig_webpage) + data = dict(fields) + + post = compat_urllib_parse.urlencode(data) + headers = { + b'Content-Type': b'application/x-www-form-urlencoded', + } + req = compat_urllib_request.Request(url, post, headers) + webpage = self._download_webpage( + req, video_id, note='Downloading video page ...') + + title = os.path.splitext(data['fname'])[0] + + # Could be several links with different quality + links = re.findall(r'"file" : "?(.+?)",', webpage) + # Assume the links are ordered in quality + formats = [{ + 'url': l, + 'quality': i, + } for i, l in enumerate(links)] + self._sort_formats(formats) + + return { + 'id': video_id, + 'title': title, + 'formats': formats, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/mooshare.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/mooshare.py new file mode 100644 index 0000000000..7603af5e2f --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/mooshare.py @@ -0,0 +1,112 @@ +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..compat import ( + compat_urllib_request, + compat_urllib_parse, +) +from ..utils import ( + ExtractorError, +) + + +class MooshareIE(InfoExtractor): + IE_NAME = 'mooshare' + IE_DESC = 'Mooshare.biz' + _VALID_URL = r'http://(?:www\.)?mooshare\.biz/(?P[\da-z]{12})' + + _TESTS = [ + { + 'url': 'http://mooshare.biz/8dqtk4bjbp8g', + 'md5': '4e14f9562928aecd2e42c6f341c8feba', + 'info_dict': { + 'id': '8dqtk4bjbp8g', + 'ext': 'mp4', + 'title': 'Comedy Football 2011 - (part 1-2)', + 'duration': 893, + }, + }, + { + 'url': 'http://mooshare.biz/aipjtoc4g95j', + 'info_dict': { + 'id': 'aipjtoc4g95j', + 'ext': 'mp4', + 'title': 'Orange Caramel Dashing Through the Snow', + 'duration': 212, + }, + 'params': { + # rtmp download + 'skip_download': True, + } + } + ] + + def _real_extract(self, url): + video_id = self._match_id(url) + page = self._download_webpage(url, video_id, 'Downloading page') + + if re.search(r'>Video Not Found or Deleted<', page) is not None: + raise ExtractorError('Video %s does not exist' % video_id, expected=True) + + hash_key = self._html_search_regex(r'', page, 'hash') + title = self._html_search_regex(r'(?m)
    \s*

    Watch ([^<]+)

    ', page, 'title') + + download_form = { + 'op': 'download1', + 'id': video_id, + 'hash': hash_key, + } + + request = compat_urllib_request.Request( + 'http://mooshare.biz/%s' % video_id, compat_urllib_parse.urlencode(download_form)) + request.add_header('Content-Type', 'application/x-www-form-urlencoded') + + self._sleep(5, video_id) + + video_page = self._download_webpage(request, video_id, 'Downloading video page') + + thumbnail = self._html_search_regex(r'image:\s*"([^"]+)",', video_page, 'thumbnail', fatal=False) + duration_str = self._html_search_regex(r'duration:\s*"(\d+)",', video_page, 'duration', fatal=False) + duration = int(duration_str) if duration_str is not None else None + + formats = [] + + # SD video + mobj = re.search(r'(?m)file:\s*"(?P[^"]+)",\s*provider:', video_page) + if mobj is not None: + formats.append({ + 'url': mobj.group('url'), + 'format_id': 'sd', + 'format': 'SD', + }) + + # HD video + mobj = re.search(r'\'hd-2\': { file: \'(?P[^\']+)\' },', video_page) + if mobj is not None: + formats.append({ + 'url': mobj.group('url'), + 'format_id': 'hd', + 'format': 'HD', + }) + + # rtmp video + mobj = re.search(r'(?m)file: "(?P[^"]+)",\s*streamer: "(?Prtmp://[^"]+)",', video_page) + if mobj is not None: + formats.append({ + 'url': mobj.group('rtmpurl'), + 'play_path': mobj.group('playpath'), + 'rtmp_live': False, + 'ext': 'mp4', + 'format_id': 'rtmp', + 'format': 'HD', + }) + + return { + 'id': video_id, + 'title': title, + 'thumbnail': thumbnail, + 'duration': duration, + 'formats': formats, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/morningstar.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/morningstar.py new file mode 100644 index 0000000000..320d27bdd4 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/morningstar.py @@ -0,0 +1,47 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor + + +class MorningstarIE(InfoExtractor): + IE_DESC = 'morningstar.com' + _VALID_URL = r'https?://(?:www\.)?morningstar\.com/[cC]over/video[cC]enter\.aspx\?id=(?P[0-9]+)' + _TEST = { + 'url': 'http://www.morningstar.com/cover/videocenter.aspx?id=615869', + 'md5': '6c0acface7a787aadc8391e4bbf7b0f5', + 'info_dict': { + 'id': '615869', + 'ext': 'mp4', + 'title': 'Get Ahead of the Curve on 2013 Taxes', + 'description': "Vanguard's Joel Dickson on managing higher tax rates for high-income earners and fund capital-gain distributions in 2013.", + 'thumbnail': r're:^https?://.*m(?:orning)?star\.com/.+thumb\.jpg$' + } + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + + webpage = self._download_webpage(url, video_id) + title = self._html_search_regex( + r'

    (.*?)

    ', webpage, 'title') + video_url = self._html_search_regex( + r'(.*?)
    ', + webpage, 'description', fatal=False) + + return { + 'id': video_id, + 'title': title, + 'url': video_url, + 'thumbnail': thumbnail, + 'description': description, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/motherless.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/motherless.py new file mode 100644 index 0000000000..97d5da626a --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/motherless.py @@ -0,0 +1,105 @@ +from __future__ import unicode_literals + +import datetime +import re + +from .common import InfoExtractor +from ..utils import ( + str_to_int, + unified_strdate, +) + + +class MotherlessIE(InfoExtractor): + _VALID_URL = r'http://(?:www\.)?motherless\.com/(?:g/[a-z0-9_]+/)?(?P[A-Z0-9]+)' + _TESTS = [ + { + 'url': 'http://motherless.com/AC3FFE1', + 'md5': '310f62e325a9fafe64f68c0bccb6e75f', + 'info_dict': { + 'id': 'AC3FFE1', + 'ext': 'mp4', + 'title': 'Fucked in the ass while playing PS3', + 'categories': ['Gaming', 'anal', 'reluctant', 'rough', 'Wife'], + 'upload_date': '20100913', + 'uploader_id': 'famouslyfuckedup', + 'thumbnail': 're:http://.*\.jpg', + 'age_limit': 18, + } + }, + { + 'url': 'http://motherless.com/532291B', + 'md5': 'bc59a6b47d1f958e61fbd38a4d31b131', + 'info_dict': { + 'id': '532291B', + 'ext': 'mp4', + 'title': 'Amazing girl playing the omegle game, PERFECT!', + 'categories': ['Amateur', 'webcam', 'omegle', 'pink', 'young', 'masturbate', 'teen', 'game', 'hairy'], + 'upload_date': '20140622', + 'uploader_id': 'Sulivana7x', + 'thumbnail': 're:http://.*\.jpg', + 'age_limit': 18, + } + }, + { + 'url': 'http://motherless.com/g/cosplay/633979F', + 'md5': '0b2a43f447a49c3e649c93ad1fafa4a0', + 'info_dict': { + 'id': '633979F', + 'ext': 'mp4', + 'title': 'Turtlette', + 'categories': ['superheroine heroine superher'], + 'upload_date': '20140827', + 'uploader_id': 'shade0230', + 'thumbnail': 're:http://.*\.jpg', + 'age_limit': 18, + } + } + ] + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + + title = self._html_search_regex( + r'id="view-upload-title">\s+([^<]+)<', webpage, 'title') + video_url = self._html_search_regex( + r'setup\(\{\s+"file".+: "([^"]+)",', webpage, 'video URL') + age_limit = self._rta_search(webpage) + view_count = str_to_int(self._html_search_regex( + r'Views\s+([^<]+)<', + webpage, 'view count', fatal=False)) + like_count = str_to_int(self._html_search_regex( + r'Favorited\s+([^<]+)<', + webpage, 'like count', fatal=False)) + + upload_date = self._html_search_regex( + r'Uploaded\s+([^<]+)<', webpage, 'upload date') + if 'Ago' in upload_date: + days = int(re.search(r'([0-9]+)', upload_date).group(1)) + upload_date = (datetime.datetime.now() - datetime.timedelta(days=days)).strftime('%Y%m%d') + else: + upload_date = unified_strdate(upload_date) + + comment_count = webpage.count('class="media-comment-contents"') + uploader_id = self._html_search_regex( + r'"thumb-member-username">\s+[^/]+)/(?:$|[?#])' + _TEST = { + 'url': 'http://www.motorsport.com/f1/video/main-gallery/red-bull-racing-2014-rules-explained/', + 'md5': '5592cb7c5005d9b2c163df5ac3dc04e4', + 'info_dict': { + 'id': '7063', + 'ext': 'mp4', + 'title': 'Red Bull Racing: 2014 Rules Explained', + 'duration': 207, + 'description': 'A new clip from Red Bull sees Daniel Ricciardo and Sebastian Vettel explain the 2014 Formula One regulations Б─⌠ which are arguably the most complex the sport has ever seen.', + 'uploader': 'rainiere', + 'thumbnail': r're:^http://.*motorsport\.com/.+\.jpg$' + } + } + + def _real_extract(self, url): + display_id = self._match_id(url) + webpage = self._download_webpage(url, display_id) + + flashvars_code = self._html_search_regex( + r'Video by: (.*?)', webpage, + 'uploader', fatal=False) + + return { + 'id': params['video_id'], + 'display_id': display_id, + 'title': params['title'], + 'url': video_url, + 'description': params.get('description'), + 'thumbnail': params.get('main_thumb'), + 'duration': int_or_none(params.get('duration')), + 'uploader': uploader, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/movieclips.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/movieclips.py new file mode 100644 index 0000000000..04e17d0551 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/movieclips.py @@ -0,0 +1,80 @@ +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..compat import ( + compat_str, +) +from ..utils import ( + ExtractorError, + clean_html, +) + + +class MovieClipsIE(InfoExtractor): + _VALID_URL = r'https?://movieclips\.com/(?P[\da-zA-Z]+)(?:-(?P[\da-z-]+))?' + _TEST = { + 'url': 'http://movieclips.com/Wy7ZU-my-week-with-marilyn-movie-do-you-love-me/', + 'info_dict': { + 'id': 'Wy7ZU', + 'display_id': 'my-week-with-marilyn-movie-do-you-love-me', + 'ext': 'mp4', + 'title': 'My Week with Marilyn - Do You Love Me?', + 'description': 'md5:e86795bd332fe3cff461e7c8dc542acb', + 'thumbnail': 're:^https?://.*\.jpg$', + }, + 'params': { + # rtmp download + 'skip_download': True, + } + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + display_id = mobj.group('display_id') + show_id = display_id or video_id + + config = self._download_xml( + 'http://config.movieclips.com/player/config/%s' % video_id, + show_id, 'Downloading player config') + + if config.find('./country-region').text == 'false': + raise ExtractorError( + '%s said: %s' % (self.IE_NAME, config.find('./region_alert').text), expected=True) + + properties = config.find('./video/properties') + smil_file = properties.attrib['smil_file'] + + smil = self._download_xml(smil_file, show_id, 'Downloading SMIL') + base_url = smil.find('./head/meta').attrib['base'] + + formats = [] + for video in smil.findall('./body/switch/video'): + vbr = int(video.attrib['system-bitrate']) / 1000 + src = video.attrib['src'] + formats.append({ + 'url': base_url, + 'play_path': src, + 'ext': src.split(':')[0], + 'vbr': vbr, + 'format_id': '%dk' % vbr, + }) + + self._sort_formats(formats) + + title = '%s - %s' % (properties.attrib['clip_movie_title'], properties.attrib['clip_title']) + description = clean_html(compat_str(properties.attrib['clip_description'])) + thumbnail = properties.attrib['image'] + categories = properties.attrib['clip_categories'].split(',') + + return { + 'id': video_id, + 'display_id': display_id, + 'title': title, + 'description': description, + 'thumbnail': thumbnail, + 'categories': categories, + 'formats': formats, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/moviezine.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/moviezine.py new file mode 100644 index 0000000000..f130b75c41 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/moviezine.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor + + +class MoviezineIE(InfoExtractor): + _VALID_URL = r'https?://www\.moviezine\.se/video/(?P[^?#]+)' + + _TEST = { + 'url': 'http://www.moviezine.se/video/205866', + 'info_dict': { + 'id': '205866', + 'ext': 'mp4', + 'title': 'Oculus - Trailer 1', + 'description': 'md5:40cc6790fc81d931850ca9249b40e8a4', + 'thumbnail': 're:http://.*\.jpg', + }, + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + + webpage = self._download_webpage(url, video_id) + jsplayer = self._download_webpage('http://www.moviezine.se/api/player.js?video=%s' % video_id, video_id, 'Downloading js api player') + + formats = [{ + 'format_id': 'sd', + 'url': self._html_search_regex(r'file: "(.+?)",', jsplayer, 'file'), + 'quality': 0, + 'ext': 'mp4', + }] + + self._sort_formats(formats) + + return { + 'id': video_id, + 'title': self._search_regex(r'title: "(.+?)",', jsplayer, 'title'), + 'thumbnail': self._search_regex(r'image: "(.+?)",', jsplayer, 'image'), + 'formats': formats, + 'description': self._og_search_description(webpage), + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/movshare.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/movshare.py new file mode 100644 index 0000000000..6101063f2e --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/movshare.py @@ -0,0 +1,27 @@ +from __future__ import unicode_literals + +from .novamov import NovaMovIE + + +class MovShareIE(NovaMovIE): + IE_NAME = 'movshare' + IE_DESC = 'MovShare' + + _VALID_URL = NovaMovIE._VALID_URL_TEMPLATE % {'host': 'movshare\.(?:net|sx|ag)'} + + _HOST = 'www.movshare.net' + + _FILE_DELETED_REGEX = r'>This file no longer exists on our servers.<' + _TITLE_REGEX = r'Title: ([^<]+)

    ' + _DESCRIPTION_REGEX = r'Description: ([^<]+)

    ' + + _TEST = { + 'url': 'http://www.movshare.net/video/559e28be54d96', + 'md5': 'abd31a2132947262c50429e1d16c1bfd', + 'info_dict': { + 'id': '559e28be54d96', + 'ext': 'flv', + 'title': 'dissapeared image', + 'description': 'optical illusion dissapeared image magic illusion', + } + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/mpora.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/mpora.py new file mode 100644 index 0000000000..88c9501cd4 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/mpora.py @@ -0,0 +1,64 @@ +from __future__ import unicode_literals + +import json +import re + +from .common import InfoExtractor +from ..utils import int_or_none + + +class MporaIE(InfoExtractor): + _VALID_URL = r'^https?://(www\.)?mpora\.(?:com|de)/videos/(?P[^?#/]+)' + IE_NAME = 'MPORA' + + _TEST = { + 'url': 'http://mpora.de/videos/AAdo8okx4wiz/embed?locale=de', + 'file': 'AAdo8okx4wiz.mp4', + 'md5': 'a7a228473eedd3be741397cf452932eb', + 'info_dict': { + 'title': 'Katy Curd - Winter in the Forest', + 'duration': 416, + 'uploader': 'Peter Newman Media', + }, + } + + def _real_extract(self, url): + m = re.match(self._VALID_URL, url) + video_id = m.group('id') + + webpage = self._download_webpage(url, video_id) + data_json = self._search_regex( + r"new FM\.Player\('[^']+',\s*(\{.*?)\).player;", webpage, 'json') + + data = json.loads(data_json) + + uploader = data['info_overlay'].get('username') + duration = data['video']['duration'] // 1000 + thumbnail = data['video']['encodings']['sd']['poster'] + title = data['info_overlay']['title'] + + formats = [] + for encoding_id, edata in data['video']['encodings'].items(): + for src in edata['sources']: + width_str = self._search_regex( + r'_([0-9]+)\.[a-zA-Z0-9]+$', src['src'], + False, default=None) + vcodec = src['type'].partition('/')[2] + + formats.append({ + 'format_id': encoding_id + '-' + vcodec, + 'url': src['src'], + 'vcodec': vcodec, + 'width': int_or_none(width_str), + }) + + self._sort_formats(formats) + + return { + 'id': video_id, + 'title': title, + 'formats': formats, + 'uploader': uploader, + 'duration': duration, + 'thumbnail': thumbnail, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/mtv.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/mtv.py new file mode 100644 index 0000000000..5ebc78033a --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/mtv.py @@ -0,0 +1,269 @@ +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..compat import ( + compat_urllib_parse, + compat_urllib_request, +) +from ..utils import ( + ExtractorError, + find_xpath_attr, + fix_xml_ampersands, + HEADRequest, + unescapeHTML, + url_basename, + RegexNotFoundError, +) + + +def _media_xml_tag(tag): + return '{http://search.yahoo.com/mrss/}%s' % tag + + +class MTVServicesInfoExtractor(InfoExtractor): + _MOBILE_TEMPLATE = None + + @staticmethod + def _id_from_uri(uri): + return uri.split(':')[-1] + + # This was originally implemented for ComedyCentral, but it also works here + @staticmethod + def _transform_rtmp_url(rtmp_video_url): + m = re.match(r'^rtmpe?://.*?/(?Pgsp\..+?/.*)$', rtmp_video_url) + if not m: + return rtmp_video_url + base = 'http://viacommtvstrmfs.fplive.net/' + return base + m.group('finalid') + + def _get_feed_url(self, uri): + return self._FEED_URL + + def _get_thumbnail_url(self, uri, itemdoc): + search_path = '%s/%s' % (_media_xml_tag('group'), _media_xml_tag('thumbnail')) + thumb_node = itemdoc.find(search_path) + if thumb_node is None: + return None + else: + return thumb_node.attrib['url'] + + def _extract_mobile_video_formats(self, mtvn_id): + webpage_url = self._MOBILE_TEMPLATE % mtvn_id + req = compat_urllib_request.Request(webpage_url) + # Otherwise we get a webpage that would execute some javascript + req.add_header('Youtubedl-user-agent', 'curl/7') + webpage = self._download_webpage(req, mtvn_id, + 'Downloading mobile page') + metrics_url = unescapeHTML(self._search_regex(r'.+?)(\?|/|$)' + + _TEST = { + # From http://www.thewrap.com/peter-dinklage-sums-up-game-of-thrones-in-45-seconds-video/ + 'url': 'http://media.mtvnservices.com/embed/mgid:uma:video:mtv.com:1043906/cp~vid%3D1043906%26uri%3Dmgid%3Auma%3Avideo%3Amtv.com%3A1043906', + 'md5': 'cb349b21a7897164cede95bd7bf3fbb9', + 'info_dict': { + 'id': '1043906', + 'ext': 'mp4', + 'title': 'Peter Dinklage Sums Up \'Game Of Thrones\' In 45 Seconds', + 'description': '"Sexy sexy sexy, stabby stabby stabby, beautiful language," says Peter Dinklage as he tries summarizing "Game of Thrones" in under a minute.', + }, + } + + def _get_feed_url(self, uri): + video_id = self._id_from_uri(uri) + site_id = uri.replace(video_id, '') + config_url = ('http://media.mtvnservices.com/pmt/e1/players/{0}/' + 'context4/context5/config.xml'.format(site_id)) + config_doc = self._download_xml(config_url, video_id) + feed_node = config_doc.find('.//feed') + feed_url = feed_node.text.strip().split('?')[0] + return feed_url + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + mgid = mobj.group('mgid') + return self._get_videos_info(mgid) + + +class MTVIE(MTVServicesInfoExtractor): + _VALID_URL = r'''(?x)^https?:// + (?:(?:www\.)?mtv\.com/videos/.+?/(?P[0-9]+)/[^/]+$| + m\.mtv\.com/videos/video\.rbml\?.*?id=(?P[^&]+))''' + + _FEED_URL = 'http://www.mtv.com/player/embed/AS3/rss/' + + _TESTS = [ + { + 'url': 'http://www.mtv.com/videos/misc/853555/ours-vh1-storytellers.jhtml', + 'file': '853555.mp4', + 'md5': '850f3f143316b1e71fa56a4edfd6e0f8', + 'info_dict': { + 'title': 'Taylor Swift - "Ours (VH1 Storytellers)"', + 'description': 'Album: Taylor Swift performs "Ours" for VH1 Storytellers at Harvey Mudd College.', + }, + }, + { + 'add_ie': ['Vevo'], + 'url': 'http://www.mtv.com/videos/taylor-swift/916187/everything-has-changed-ft-ed-sheeran.jhtml', + 'file': 'USCJY1331283.mp4', + 'md5': '73b4e7fcadd88929292fe52c3ced8caf', + 'info_dict': { + 'title': 'Everything Has Changed', + 'upload_date': '20130606', + 'uploader': 'Taylor Swift', + }, + 'skip': 'VEVO is only available in some countries', + }, + ] + + def _get_thumbnail_url(self, uri, itemdoc): + return 'http://mtv.mtvnimages.com/uri/' + uri + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('videoid') + uri = mobj.groupdict().get('mgid') + if uri is None: + webpage = self._download_webpage(url, video_id) + + # Some videos come from Vevo.com + m_vevo = re.search(r'isVevoVideo = true;.*?vevoVideoId = "(.*?)";', + webpage, re.DOTALL) + if m_vevo: + vevo_id = m_vevo.group(1) + self.to_screen('Vevo video detected: %s' % vevo_id) + return self.url_result('vevo:%s' % vevo_id, ie='Vevo') + + uri = self._html_search_regex(r'/uri/(.*?)\?', webpage, 'uri') + return self._get_videos_info(uri) + + +class MTVIggyIE(MTVServicesInfoExtractor): + IE_NAME = 'mtviggy.com' + _VALID_URL = r'https?://www\.mtviggy\.com/videos/.+' + _TEST = { + 'url': 'http://www.mtviggy.com/videos/arcade-fire-behind-the-scenes-at-the-biggest-music-experiment-yet/', + 'info_dict': { + 'id': '984696', + 'ext': 'mp4', + 'title': 'Arcade Fire: Behind the Scenes at the Biggest Music Experiment Yet', + } + } + _FEED_URL = 'http://all.mtvworldverticals.com/feed-xml/' diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/muenchentv.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/muenchentv.py new file mode 100644 index 0000000000..b4e8ad17e9 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/muenchentv.py @@ -0,0 +1,75 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import json + +from .common import InfoExtractor +from ..utils import ( + determine_ext, + int_or_none, + js_to_json, +) + + +class MuenchenTVIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?muenchen\.tv/livestream' + IE_DESC = 'mц╪nchen.tv' + _TEST = { + 'url': 'http://www.muenchen.tv/livestream/', + 'info_dict': { + 'id': '5334', + 'display_id': 'live', + 'ext': 'mp4', + 'title': 're:^mц╪nchen.tv-Livestream [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', + 'is_live': True, + 'thumbnail': 're:^https?://.*\.jpg$' + }, + 'params': { + 'skip_download': True, + } + } + + def _real_extract(self, url): + display_id = 'live' + webpage = self._download_webpage(url, display_id) + + title = self._live_title(self._og_search_title(webpage)) + + data_js = self._search_regex( + r'(?s)\nplaylist:\s*(\[.*?}\]),related:', + webpage, 'playlist configuration') + data_json = js_to_json(data_js) + data = json.loads(data_json)[0] + + video_id = data['mediaid'] + thumbnail = data.get('image') + + formats = [] + for format_num, s in enumerate(data['sources']): + ext = determine_ext(s['file'], None) + label_str = s.get('label') + if label_str is None: + label_str = '_%d' % format_num + + if ext is None: + format_id = label_str + else: + format_id = '%s-%s' % (ext, label_str) + + formats.append({ + 'url': s['file'], + 'tbr': int_or_none(s.get('label')), + 'ext': 'mp4', + 'format_id': format_id, + 'preference': -100 if '.smil' in s['file'] else 0, + }) + self._sort_formats(formats) + + return { + 'id': video_id, + 'display_id': display_id, + 'title': title, + 'formats': formats, + 'is_live': True, + 'thumbnail': thumbnail, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/musicplayon.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/musicplayon.py new file mode 100644 index 0000000000..50d92b50ae --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/musicplayon.py @@ -0,0 +1,75 @@ +# encoding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import int_or_none + + +class MusicPlayOnIE(InfoExtractor): + _VALID_URL = r'https?://(?:.+?\.)?musicplayon\.com/play(?:-touch)?\?(?:v|pl=100&play)=(?P\d+)' + + _TEST = { + 'url': 'http://en.musicplayon.com/play?v=433377', + 'info_dict': { + 'id': '433377', + 'ext': 'mp4', + 'title': 'Rick Ross - Interview On Chelsea Lately (2014)', + 'description': 'Rick Ross Interview On Chelsea Lately', + 'duration': 342, + 'uploader': 'ultrafish', + }, + 'params': { + # m3u8 download + 'skip_download': True, + }, + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + + page = self._download_webpage(url, video_id) + + title = self._og_search_title(page) + description = self._og_search_description(page) + thumbnail = self._og_search_thumbnail(page) + duration = self._html_search_meta('video:duration', page, 'duration', fatal=False) + view_count = self._og_search_property('count', page, fatal=False) + uploader = self._html_search_regex( + r'', page, 'uploader', fatal=False) + + formats = [ + { + 'url': 'http://media0-eu-nl.musicplayon.com/stream-mobile?id=%s&type=.mp4' % video_id, + 'ext': 'mp4', + } + ] + + manifest = self._download_webpage( + 'http://en.musicplayon.com/manifest.m3u8?v=%s' % video_id, video_id, 'Downloading manifest') + + for entry in manifest.split('#')[1:]: + if entry.startswith('EXT-X-STREAM-INF:'): + meta, url, _ = entry.split('\n') + params = dict(param.split('=') for param in meta.split(',')[1:]) + formats.append({ + 'url': url, + 'ext': 'mp4', + 'tbr': int(params['BANDWIDTH']), + 'width': int(params['RESOLUTION'].split('x')[1]), + 'height': int(params['RESOLUTION'].split('x')[-1]), + 'format_note': params['NAME'].replace('"', '').strip(), + }) + + return { + 'id': video_id, + 'title': title, + 'description': description, + 'thumbnail': thumbnail, + 'uploader': uploader, + 'duration': int_or_none(duration), + 'view_count': int_or_none(view_count), + 'formats': formats, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/musicvault.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/musicvault.py new file mode 100644 index 0000000000..ebb1eb8e95 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/musicvault.py @@ -0,0 +1,76 @@ +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import ( + parse_duration, + unified_strdate, +) + + +class MusicVaultIE(InfoExtractor): + _VALID_URL = r'https?://www\.musicvault\.com/(?P[^/?#]*)/video/(?P[^/?#]*)_(?P[0-9]+)\.html' + _TEST = { + 'url': 'http://www.musicvault.com/the-allman-brothers-band/video/straight-from-the-heart_1010863.html', + 'md5': '2cdbb3ae75f7fb3519821507d2fb3c15', + 'info_dict': { + 'id': '1010863', + 'ext': 'mp4', + 'uploader_id': 'the-allman-brothers-band', + 'title': 'Straight from the Heart', + 'duration': 244, + 'uploader': 'The Allman Brothers Band', + 'thumbnail': 're:^https?://.*/thumbnail/.*', + 'upload_date': '19811216', + 'location': 'Capitol Theatre (Passaic, NJ)', + 'description': 'Listen to The Allman Brothers Band perform Straight from the Heart at Capitol Theatre (Passaic, NJ) on Dec 16, 1981', + } + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + display_id = mobj.group('display_id') + webpage = self._download_webpage(url, display_id) + + thumbnail = self._search_regex( + r'(.*?)', webpage, 'data fields') + uploader = self._html_search_regex( + r'(.*?)', data_div, 'uploader', fatal=False) + title = self._html_search_regex( + r'(.*?)', data_div, 'title') + upload_date = unified_strdate(self._html_search_regex( + r'(.*?)', data_div, 'uploader', fatal=False)) + location = self._html_search_regex( + r'(.*?)', data_div, 'location', fatal=False) + + duration = parse_duration(self._html_search_meta('duration', webpage)) + + VIDEO_URL_TEMPLATE = 'http://cdnapi.kaltura.com/p/%(uid)s/sp/%(wid)s/playManifest/entryId/%(entry_id)s/format/url/protocol/http' + kaltura_id = self._search_regex( + r'
    \d+)' + IE_NAME = 'muzu.tv' + + _TEST = { + 'url': 'http://www.muzu.tv/defected/marcashken-featuring-sos-cat-walk-original-mix-music-video/1981454/', + 'md5': '98f8b2c7bc50578d6a0364fff2bfb000', + 'info_dict': { + 'id': '1981454', + 'ext': 'mp4', + 'title': 'Cat Walk (Original Mix)', + 'description': 'md5:90e868994de201b2570e4e5854e19420', + 'uploader': 'MarcAshken featuring SOS', + }, + } + + def _real_extract(self, url): + video_id = self._match_id(url) + + info_data = compat_urllib_parse.urlencode({ + 'format': 'json', + 'url': url, + }) + info = self._download_json( + 'http://www.muzu.tv/api/oembed/?%s' % info_data, + video_id, 'Downloading video info') + + player_info = self._download_json( + 'http://player.muzu.tv/player/playerInit?ai=%s' % video_id, + video_id, 'Downloading player info') + video_info = player_info['videos'][0] + for quality in ['1080', '720', '480', '360']: + if video_info.get('v%s' % quality): + break + + data = compat_urllib_parse.urlencode({ + 'ai': video_id, + # Even if each time you watch a video the hash changes, + # it seems to work for different videos, and it will work + # even if you use any non empty string as a hash + 'viewhash': 'VBNff6djeV4HV5TRPW5kOHub2k', + 'device': 'web', + 'qv': quality, + }) + video_url_info = self._download_json( + 'http://player.muzu.tv/player/requestVideo?%s' % data, + video_id, 'Downloading video url') + video_url = video_url_info['url'] + + return { + 'id': video_id, + 'title': info['title'], + 'url': video_url, + 'thumbnail': info['thumbnail_url'], + 'description': info['description'], + 'uploader': info['author_name'], + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/myspace.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/myspace.py new file mode 100644 index 0000000000..83414a2325 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/myspace.py @@ -0,0 +1,180 @@ +# encoding: utf-8 +from __future__ import unicode_literals + +import re +import json + +from .common import InfoExtractor +from ..compat import ( + compat_str, +) +from ..utils import ExtractorError + + +class MySpaceIE(InfoExtractor): + _VALID_URL = r'https?://myspace\.com/([^/]+)/(?Pvideo/[^/]+/|music/song/.*?)(?P\d+)' + + _TESTS = [ + { + 'url': 'https://myspace.com/fiveminutestothestage/video/little-big-town/109594919', + 'info_dict': { + 'id': '109594919', + 'ext': 'flv', + 'title': 'Little Big Town', + 'description': 'This country quartet was all smilesб═whileб═playing a sold out show at the Pacificб═Amphitheatreб═in Orange County, California.', + 'uploader': 'Five Minutes to the Stage', + 'uploader_id': 'fiveminutestothestage', + }, + 'params': { + # rtmp download + 'skip_download': True, + }, + }, + # songs + { + 'url': 'https://myspace.com/killsorrow/music/song/of-weakened-soul...-93388656-103880681', + 'info_dict': { + 'id': '93388656', + 'ext': 'flv', + 'title': 'Of weakened soul...', + 'uploader': 'Killsorrow', + 'uploader_id': 'killsorrow', + }, + 'params': { + # rtmp download + 'skip_download': True, + }, + }, { + 'add_ie': ['Vevo'], + 'url': 'https://myspace.com/threedaysgrace/music/song/animal-i-have-become-28400208-28218041', + 'info_dict': { + 'id': 'USZM20600099', + 'ext': 'mp4', + 'title': 'Animal I Have Become', + 'uploader': 'Three Days Grace', + 'timestamp': int, + 'upload_date': '20060502', + }, + 'skip': 'VEVO is only available in some countries', + }, { + 'add_ie': ['Youtube'], + 'url': 'https://myspace.com/starset2/music/song/first-light-95799905-106964426', + 'info_dict': { + 'id': 'ypWvQgnJrSU', + 'ext': 'mp4', + 'title': 'Starset - First Light', + 'description': 'md5:2d5db6c9d11d527683bcda818d332414', + 'uploader': 'Jacob Soren', + 'uploader_id': 'SorenPromotions', + 'upload_date': '20140725', + } + }, + ] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + webpage = self._download_webpage(url, video_id) + player_url = self._search_regex( + r'playerSwf":"([^"?]*)', webpage, 'player URL') + + if mobj.group('mediatype').startswith('music/song'): + # songs don't store any useful info in the 'context' variable + song_data = self._search_regex( + r'''.*?)\1''' % name, + song_data, name, default='', group='data') + streamUrl = search_data('stream-url') + if not streamUrl: + vevo_id = search_data('vevo-id') + youtube_id = search_data('youtube-id') + if vevo_id: + self.to_screen('Vevo video detected: %s' % vevo_id) + return self.url_result('vevo:%s' % vevo_id, ie='Vevo') + elif youtube_id: + self.to_screen('Youtube video detected: %s' % youtube_id) + return self.url_result(youtube_id, ie='Youtube') + else: + raise ExtractorError( + 'Found song but don\'t know how to download it') + info = { + 'id': video_id, + 'title': self._og_search_title(webpage), + 'uploader': search_data('artist-name'), + 'uploader_id': search_data('artist-username'), + 'thumbnail': self._og_search_thumbnail(webpage), + } + else: + context = json.loads(self._search_regex( + r'context = ({.*?});', webpage, 'context')) + video = context['video'] + streamUrl = video['streamUrl'] + info = { + 'id': compat_str(video['mediaId']), + 'title': video['title'], + 'description': video['description'], + 'thumbnail': video['imageUrl'], + 'uploader': video['artistName'], + 'uploader_id': video['artistUsername'], + } + + rtmp_url, play_path = streamUrl.split(';', 1) + info.update({ + 'url': rtmp_url, + 'play_path': play_path, + 'player_url': player_url, + 'ext': 'flv', + }) + return info + + +class MySpaceAlbumIE(InfoExtractor): + IE_NAME = 'MySpace:album' + _VALID_URL = r'https?://myspace\.com/([^/]+)/music/album/(?P.*-)(?P<id>\d+)' + + _TESTS = [{ + 'url': 'https://myspace.com/starset2/music/album/transmissions-19455773', + 'info_dict': { + 'title': 'Transmissions', + 'id': '19455773', + }, + 'playlist_count': 14, + 'skip': 'this album is only available in some countries', + }, { + 'url': 'https://myspace.com/killsorrow/music/album/the-demo-18596029', + 'info_dict': { + 'title': 'The Demo', + 'id': '18596029', + }, + 'playlist_count': 5, + }] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + playlist_id = mobj.group('id') + display_id = mobj.group('title') + playlist_id + webpage = self._download_webpage(url, display_id) + tracks_paths = re.findall(r'"music:song" content="(.*?)"', webpage) + if not tracks_paths: + raise ExtractorError( + '%s: No songs found, try using proxy' % display_id, + expected=True) + entries = [ + self.url_result(t_path, ie=MySpaceIE.ie_key()) + for t_path in tracks_paths] + return { + '_type': 'playlist', + 'id': playlist_id, + 'display_id': display_id, + 'title': self._og_search_title(webpage), + 'entries': entries, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/myspass.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/myspass.py new file mode 100644 index 0000000000..5b9b9fbcd0 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/myspass.py @@ -0,0 +1,72 @@ +from __future__ import unicode_literals +import os.path + +from .common import InfoExtractor +from ..compat import ( + compat_urllib_parse_urlparse, +) +from ..utils import ( + ExtractorError, +) + + +class MySpassIE(InfoExtractor): + _VALID_URL = r'http://www\.myspass\.de/.*' + _TEST = { + 'url': 'http://www.myspass.de/myspass/shows/tvshows/absolute-mehrheit/Absolute-Mehrheit-vom-17022013-Die-Highlights-Teil-2--/11741/', + 'md5': '0b49f4844a068f8b33f4b7c88405862b', + 'info_dict': { + 'id': '11741', + 'ext': 'mp4', + "description": "Wer kann in die Fu\u00dfstapfen von Wolfgang Kubicki treten und die Mehrheit der Zuschauer hinter sich versammeln? Wird vielleicht sogar die Absolute Mehrheit geknackt und der Jackpot von 200.000 Euro mit nach Hause genommen?", + "title": "Absolute Mehrheit vom 17.02.2013 - Die Highlights, Teil 2", + }, + } + + def _real_extract(self, url): + META_DATA_URL_TEMPLATE = 'http://www.myspass.de/myspass/includes/apps/video/getvideometadataxml.php?id=%s' + + # video id is the last path element of the URL + # usually there is a trailing slash, so also try the second but last + url_path = compat_urllib_parse_urlparse(url).path + url_parent_path, video_id = os.path.split(url_path) + if not video_id: + _, video_id = os.path.split(url_parent_path) + + # get metadata + metadata_url = META_DATA_URL_TEMPLATE % video_id + metadata = self._download_xml(metadata_url, video_id) + + # extract values from metadata + url_flv_el = metadata.find('url_flv') + if url_flv_el is None: + raise ExtractorError('Unable to extract download url') + video_url = url_flv_el.text + title_el = metadata.find('title') + if title_el is None: + raise ExtractorError('Unable to extract title') + title = title_el.text + format_id_el = metadata.find('format_id') + if format_id_el is None: + format = 'mp4' + else: + format = format_id_el.text + description_el = metadata.find('description') + if description_el is not None: + description = description_el.text + else: + description = None + imagePreview_el = metadata.find('imagePreview') + if imagePreview_el is not None: + thumbnail = imagePreview_el.text + else: + thumbnail = None + + return { + 'id': video_id, + 'url': video_url, + 'title': title, + 'format': format, + 'thumbnail': thumbnail, + 'description': description, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/myvideo.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/myvideo.py new file mode 100644 index 0000000000..5e754fcffb --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/myvideo.py @@ -0,0 +1,175 @@ +from __future__ import unicode_literals + +import binascii +import base64 +import hashlib +import re +import json + +from .common import InfoExtractor +from ..compat import ( + compat_ord, + compat_urllib_parse, + compat_urllib_request, +) +from ..utils import ( + ExtractorError, +) + + +class MyVideoIE(InfoExtractor): + _VALID_URL = r'http://(?:www\.)?myvideo\.de/(?:[^/]+/)?watch/(?P<id>[0-9]+)/[^?/]+.*' + IE_NAME = 'myvideo' + _TEST = { + 'url': 'http://www.myvideo.de/watch/8229274/bowling_fail_or_win', + 'md5': '2d2753e8130479ba2cb7e0a37002053e', + 'info_dict': { + 'id': '8229274', + 'ext': 'flv', + 'title': 'bowling-fail-or-win', + } + } + + # Original Code from: https://github.com/dersphere/plugin.video.myvideo_de.git + # Released into the Public Domain by Tristan Fischer on 2013-05-19 + # https://github.com/rg3/youtube-dl/pull/842 + def __rc4crypt(self, data, key): + x = 0 + box = list(range(256)) + for i in list(range(256)): + x = (x + box[i] + compat_ord(key[i % len(key)])) % 256 + box[i], box[x] = box[x], box[i] + x = 0 + y = 0 + out = '' + for char in data: + x = (x + 1) % 256 + y = (y + box[x]) % 256 + box[x], box[y] = box[y], box[x] + out += chr(compat_ord(char) ^ box[(box[x] + box[y]) % 256]) + return out + + def __md5(self, s): + return hashlib.md5(s).hexdigest().encode() + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + + GK = ( + b'WXpnME1EZGhNRGhpTTJNM01XVmhOREU0WldNNVpHTTJOakpt' + b'TW1FMU5tVTBNR05pWkRaa05XRXhNVFJoWVRVd1ptSXhaVEV3' + b'TnpsbA0KTVRkbU1tSTRNdz09' + ) + + # Get video webpage + webpage_url = 'http://www.myvideo.de/watch/%s' % video_id + webpage = self._download_webpage(webpage_url, video_id) + + mobj = re.search('source src=\'(.+?)[.]([^.]+)\'', webpage) + if mobj is not None: + self.report_extraction(video_id) + video_url = mobj.group(1) + '.flv' + + video_title = self._html_search_regex('<title>([^<]+)', + webpage, 'title') + + return { + 'id': video_id, + 'url': video_url, + 'title': video_title, + } + + mobj = re.search(r'data-video-service="/service/data/video/%s/config' % video_id, webpage) + if mobj is not None: + request = compat_urllib_request.Request('http://www.myvideo.de/service/data/video/%s/config' % video_id, '') + response = self._download_webpage(request, video_id, + 'Downloading video info') + info = json.loads(base64.b64decode(response).decode('utf-8')) + return { + 'id': video_id, + 'title': info['title'], + 'url': info['streaming_url'].replace('rtmpe', 'rtmpt'), + 'play_path': info['filename'], + 'ext': 'flv', + 'thumbnail': info['thumbnail'][0]['url'], + } + + # try encxml + mobj = re.search('var flashvars={(.+?)}', webpage) + if mobj is None: + raise ExtractorError('Unable to extract video') + + params = {} + encxml = '' + sec = mobj.group(1) + for (a, b) in re.findall('(.+?):\'(.+?)\',?', sec): + if not a == '_encxml': + params[a] = b + else: + encxml = compat_urllib_parse.unquote(b) + if not params.get('domain'): + params['domain'] = 'www.myvideo.de' + xmldata_url = '%s?%s' % (encxml, compat_urllib_parse.urlencode(params)) + if 'flash_playertype=MTV' in xmldata_url: + self._downloader.report_warning('avoiding MTV player') + xmldata_url = ( + 'http://www.myvideo.de/dynamic/get_player_video_xml.php' + '?flash_playertype=D&ID=%s&_countlimit=4&autorun=yes' + ) % video_id + + # get enc data + enc_data = self._download_webpage(xmldata_url, video_id).split('=')[1] + enc_data_b = binascii.unhexlify(enc_data) + sk = self.__md5( + base64.b64decode(base64.b64decode(GK)) + + self.__md5( + str(video_id).encode('utf-8') + ) + ) + dec_data = self.__rc4crypt(enc_data_b, sk) + + # extracting infos + self.report_extraction(video_id) + + video_url = None + mobj = re.search('connectionurl=\'(.*?)\'', dec_data) + if mobj: + video_url = compat_urllib_parse.unquote(mobj.group(1)) + if 'myvideo2flash' in video_url: + self.report_warning( + 'Rewriting URL to use unencrypted rtmp:// ...', + video_id) + video_url = video_url.replace('rtmpe://', 'rtmp://') + + if not video_url: + # extract non rtmp videos + mobj = re.search('path=\'(http.*?)\' source=\'(.*?)\'', dec_data) + if mobj is None: + raise ExtractorError('unable to extract url') + video_url = compat_urllib_parse.unquote(mobj.group(1)) + compat_urllib_parse.unquote(mobj.group(2)) + + video_file = self._search_regex('source=\'(.*?)\'', dec_data, 'video file') + video_file = compat_urllib_parse.unquote(video_file) + + if not video_file.endswith('f4m'): + ppath, prefix = video_file.split('.') + video_playpath = '%s:%s' % (prefix, ppath) + else: + video_playpath = '' + + video_swfobj = self._search_regex('swfobject.embedSWF\(\'(.+?)\'', webpage, 'swfobj') + video_swfobj = compat_urllib_parse.unquote(video_swfobj) + + video_title = self._html_search_regex("(.*?)", + webpage, 'title') + + return { + 'id': video_id, + 'url': video_url, + 'tc_url': video_url, + 'title': video_title, + 'ext': 'flv', + 'play_path': video_playpath, + 'player_url': video_swfobj, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/myvidster.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/myvidster.py new file mode 100644 index 0000000000..a94ab8358c --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/myvidster.py @@ -0,0 +1,29 @@ +from __future__ import unicode_literals + +from .common import InfoExtractor + + +class MyVidsterIE(InfoExtractor): + _VALID_URL = r'http://(?:www\.)?myvidster\.com/video/(?P\d+)/' + + _TEST = { + 'url': 'http://www.myvidster.com/video/32059805/Hot_chemistry_with_raw_love_making', + 'md5': '95296d0231c1363222c3441af62dc4ca', + 'info_dict': { + 'id': '3685814', + 'title': 'md5:7d8427d6d02c4fbcef50fe269980c749', + 'upload_date': '20141027', + 'uploader_id': 'utkualp', + 'ext': 'mp4', + 'age_limit': 18, + }, + 'add_ie': ['XHamster'], + } + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + + return self.url_result(self._html_search_regex( + r'rel="videolink" href="(?P.*)">', + webpage, 'real video url')) diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/naver.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/naver.py new file mode 100644 index 0000000000..c10405f04d --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/naver.py @@ -0,0 +1,83 @@ +# encoding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..compat import ( + compat_urllib_parse, +) +from ..utils import ( + ExtractorError, + clean_html, +) + + +class NaverIE(InfoExtractor): + _VALID_URL = r'https?://(?:m\.)?tvcast\.naver\.com/v/(?P\d+)' + + _TEST = { + 'url': 'http://tvcast.naver.com/v/81652', + 'info_dict': { + 'id': '81652', + 'ext': 'mp4', + 'title': '[9Л⌡■ К╙╗Л²≤ЙЁ═Л┌╛ М∙╢Л└╓Й╟∙Л²≤][Л┬≤М∙≥_Й╧─Л┐│М²╛] Л┬≤М∙≥ AМ≤∙ 16~20К╡┬', + 'description': 'М∙╘Й╡╘К╤┬КЁ─Л²≤ К╡∙Л╧≥ К╘■Й╟─Л┼╓М└╟К■■ | К╘■Й╟─Л┼╓М└╟К■■ Л┬≤М∙≥ Й╧─Л┐│М²╛ Л└═Л┐²К▀≤Л²╢ 9Л⌡■ К╙╗Л²≤ЙЁ═Л┌╛ Л┬≤М∙≥AМ≤∙ 16К╡┬Л≈░Л└° 20К╡┬Й╧▄Л╖─ М∙╢Л└╓Й╟∙Л²≤К╔╪ ЙЁ╣Й╟°М∙╘К▀┬К▀╓.', + 'upload_date': '20130903', + }, + } + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + + m_id = re.search(r'var rmcPlayer = new nhn.rmcnmv.RMCVideoPlayer\("(.+?)", "(.+?)"', + webpage) + if m_id is None: + m_error = re.search( + r'(?s)
    \s*(?:)?\s*

    (?P.+?)

    \s*
    ', + webpage) + if m_error: + raise ExtractorError(clean_html(m_error.group('msg')), expected=True) + raise ExtractorError('couldn\'t extract vid and key') + vid = m_id.group(1) + key = m_id.group(2) + query = compat_urllib_parse.urlencode({'vid': vid, 'inKey': key, }) + query_urls = compat_urllib_parse.urlencode({ + 'masterVid': vid, + 'protocol': 'p2p', + 'inKey': key, + }) + info = self._download_xml( + 'http://serviceapi.rmcnmv.naver.com/flash/videoInfo.nhn?' + query, + video_id, 'Downloading video info') + urls = self._download_xml( + 'http://serviceapi.rmcnmv.naver.com/flash/playableEncodingOption.nhn?' + query_urls, + video_id, 'Downloading video formats info') + + formats = [] + for format_el in urls.findall('EncodingOptions/EncodingOption'): + domain = format_el.find('Domain').text + f = { + 'url': domain + format_el.find('uri').text, + 'ext': 'mp4', + 'width': int(format_el.find('width').text), + 'height': int(format_el.find('height').text), + } + if domain.startswith('rtmp'): + f.update({ + 'ext': 'flv', + 'rtmp_protocol': '1', # rtmpt + }) + formats.append(f) + self._sort_formats(formats) + + return { + 'id': video_id, + 'title': info.find('Subject').text, + 'formats': formats, + 'description': self._og_search_description(webpage), + 'thumbnail': self._og_search_thumbnail(webpage), + 'upload_date': info.find('WriteDate').text.replace('.', ''), + 'view_count': int(info.find('PlayCount').text), + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/nba.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/nba.py new file mode 100644 index 0000000000..862b706bf9 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/nba.py @@ -0,0 +1,47 @@ +from __future__ import unicode_literals + +from .common import InfoExtractor +from ..utils import ( + remove_end, + parse_duration, +) + + +class NBAIE(InfoExtractor): + _VALID_URL = r'https?://(?:watch\.|www\.)?nba\.com/(?:nba/)?video(?P/[^?]*?)/?(?:/index\.html)?(?:\?.*)?$' + _TESTS = [{ + 'url': 'http://www.nba.com/video/games/nets/2012/12/04/0021200253-okc-bkn-recap.nba/index.html', + 'md5': 'c0edcfc37607344e2ff8f13c378c88a4', + 'info_dict': { + 'id': '0021200253-okc-bkn-recap.nba', + 'ext': 'mp4', + 'title': 'Thunder vs. Nets', + 'description': 'Kevin Durant scores 32 points and dishes out six assists as the Thunder beat the Nets in Brooklyn.', + 'duration': 181, + }, + }, { + 'url': 'http://www.nba.com/video/games/hornets/2014/12/05/0021400276-nyk-cha-play5.nba/', + 'only_matching': True, + }] + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + + video_url = 'http://ht-mobile.cdn.turner.com/nba/big' + video_id + '_nba_1280x720.mp4' + + shortened_video_id = video_id.rpartition('/')[2] + title = remove_end( + self._og_search_title(webpage, default=shortened_video_id), ' : NBA.com') + + description = self._og_search_description(webpage) + duration = parse_duration( + self._html_search_meta('duration', webpage, 'duration')) + + return { + 'id': shortened_video_id, + 'url': video_url, + 'title': title, + 'description': description, + 'duration': duration, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/nbc.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/nbc.py new file mode 100644 index 0000000000..690c46b6a5 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/nbc.py @@ -0,0 +1,134 @@ +from __future__ import unicode_literals + +import re +import json + +from .common import InfoExtractor +from ..compat import ( + compat_str, +) +from ..utils import ( + ExtractorError, + find_xpath_attr, +) + + +class NBCIE(InfoExtractor): + _VALID_URL = r'http://www\.nbc\.com/(?:[^/]+/)+(?Pn?\d+)' + + _TESTS = [ + { + 'url': 'http://www.nbc.com/chicago-fire/video/i-am-a-firefighter/2734188', + # md5 checksum is not stable + 'info_dict': { + 'id': 'bTmnLCvIbaaH', + 'ext': 'flv', + 'title': 'I Am a Firefighter', + 'description': 'An emergency puts Dawson\'sf irefighter skills to the ultimate test in this four-part digital series.', + }, + }, + { + 'url': 'http://www.nbc.com/the-tonight-show/episodes/176', + 'info_dict': { + 'id': 'XwU9KZkp98TH', + 'ext': 'flv', + 'title': 'Ricky Gervais, Steven Van Zandt, ILoveMakonnen', + 'description': 'A brand new episode of The Tonight Show welcomes Ricky Gervais, Steven Van Zandt and ILoveMakonnen.', + }, + 'skip': 'Only works from US', + }, + ] + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + theplatform_url = self._search_regex( + '(?:class="video-player video-player-full" data-mpx-url|class="player" src)="(.*?)"', + webpage, 'theplatform url').replace('_no_endcard', '') + if theplatform_url.startswith('//'): + theplatform_url = 'http:' + theplatform_url + return self.url_result(theplatform_url) + + +class NBCNewsIE(InfoExtractor): + _VALID_URL = r'''(?x)https?://www\.nbcnews\.com/ + ((video/.+?/(?P\d+))| + (feature/[^/]+/(?P.+))) + ''' + + _TESTS = [ + { + 'url': 'http://www.nbcnews.com/video/nbc-news/52753292', + 'md5': '47abaac93c6eaf9ad37ee6c4463a5179', + 'info_dict': { + 'id': '52753292', + 'ext': 'flv', + 'title': 'Crew emerges after four-month Mars food study', + 'description': 'md5:24e632ffac72b35f8b67a12d1b6ddfc1', + }, + }, + { + 'url': 'http://www.nbcnews.com/feature/edward-snowden-interview/how-twitter-reacted-snowden-interview-n117236', + 'md5': 'b2421750c9f260783721d898f4c42063', + 'info_dict': { + 'id': 'I1wpAI_zmhsQ', + 'ext': 'mp4', + 'title': 'How Twitter Reacted To The Snowden Interview', + 'description': 'md5:65a0bd5d76fe114f3c2727aa3a81fe64', + }, + 'add_ie': ['ThePlatform'], + }, + ] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + if video_id is not None: + all_info = self._download_xml('http://www.nbcnews.com/id/%s/displaymode/1219' % video_id, video_id) + info = all_info.find('video') + + return { + 'id': video_id, + 'title': info.find('headline').text, + 'ext': 'flv', + 'url': find_xpath_attr(info, 'media', 'type', 'flashVideo').text, + 'description': compat_str(info.find('caption').text), + 'thumbnail': find_xpath_attr(info, 'media', 'type', 'thumbnail').text, + } + else: + # "feature" pages use theplatform.com + title = mobj.group('title') + webpage = self._download_webpage(url, title) + bootstrap_json = self._search_regex( + r'var bootstrapJson = ({.+})\s*$', webpage, 'bootstrap json', + flags=re.MULTILINE) + bootstrap = json.loads(bootstrap_json) + info = bootstrap['results'][0]['video'] + mpxid = info['mpxId'] + + base_urls = [ + info['fallbackPlaylistUrl'], + info['associatedPlaylistUrl'], + ] + + for base_url in base_urls: + if not base_url: + continue + playlist_url = base_url + '?form=MPXNBCNewsAPI' + all_videos = self._download_json(playlist_url, title)['videos'] + + try: + info = next(v for v in all_videos if v['mpxId'] == mpxid) + break + except StopIteration: + continue + + if info is None: + raise ExtractorError('Could not find video in playlists') + + return { + '_type': 'url', + # We get the best quality video + 'url': info['videoAssets'][-1]['publicUrl'], + 'ie_key': 'ThePlatform', + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/ndr.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/ndr.py new file mode 100644 index 0000000000..f49c666909 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/ndr.py @@ -0,0 +1,94 @@ +# encoding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import ( + ExtractorError, + int_or_none, + qualities, +) + + +class NDRIE(InfoExtractor): + IE_NAME = 'ndr' + IE_DESC = 'NDR.de - Mediathek' + _VALID_URL = r'https?://www\.ndr\.de/.+?(?P<id>\d+)\.html' + + _TESTS = [ + { + 'url': 'http://www.ndr.de/fernsehen/sendungen/nordmagazin/Kartoffeltage-in-der-Lewitz,nordmagazin25866.html', + 'md5': '5bc5f5b92c82c0f8b26cddca34f8bb2c', + 'note': 'Video file', + 'info_dict': { + 'id': '25866', + 'ext': 'mp4', + 'title': 'Kartoffeltage in der Lewitz', + 'description': 'md5:48c4c04dde604c8a9971b3d4e3b9eaa8', + 'duration': 166, + } + }, + { + 'url': 'http://www.ndr.de/info/audio51535.html', + 'md5': 'bb3cd38e24fbcc866d13b50ca59307b8', + 'note': 'Audio file', + 'info_dict': { + 'id': '51535', + 'ext': 'mp3', + 'title': 'La Valette entgeht der Hinrichtung', + 'description': 'md5:22f9541913a40fe50091d5cdd7c9f536', + 'duration': 884, + } + } + ] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + + page = self._download_webpage(url, video_id, 'Downloading page') + + title = self._og_search_title(page).strip() + description = self._og_search_description(page) + if description: + description = description.strip() + + duration = int_or_none(self._html_search_regex(r'duration: (\d+),\n', page, 'duration', fatal=False)) + + formats = [] + + mp3_url = re.search(r'''\{src:'(?P<audio>[^']+)', type:"audio/mp3"},''', page) + if mp3_url: + formats.append({ + 'url': mp3_url.group('audio'), + 'format_id': 'mp3', + }) + + thumbnail = None + + video_url = re.search(r'''3: \{src:'(?P<video>.+?)\.(lo|hi|hq)\.mp4', type:"video/mp4"},''', page) + if video_url: + thumbnails = re.findall(r'''\d+: \{src: "([^"]+)"(?: \|\| '[^']+')?, quality: '([^']+)'}''', page) + if thumbnails: + quality_key = qualities(['xs', 's', 'm', 'l', 'xl']) + largest = max(thumbnails, key=lambda thumb: quality_key(thumb[1])) + thumbnail = 'http://www.ndr.de' + largest[0] + + for format_id in 'lo', 'hi', 'hq': + formats.append({ + 'url': '%s.%s.mp4' % (video_url.group('video'), format_id), + 'format_id': format_id, + }) + + if not formats: + raise ExtractorError('No media links available for %s' % video_id) + + return { + 'id': video_id, + 'title': title, + 'description': description, + 'thumbnail': thumbnail, + 'duration': duration, + 'formats': formats, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/ndtv.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/ndtv.py new file mode 100644 index 0000000000..95e7d63aad --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/ndtv.py @@ -0,0 +1,74 @@ +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import ( + month_by_name, + int_or_none, +) + + +class NDTVIE(InfoExtractor): + _VALID_URL = r'^https?://(?:www\.)?ndtv\.com/video/player/[^/]*/[^/]*/(?P<id>[a-z0-9]+)' + + _TEST = { + 'url': 'http://www.ndtv.com/video/player/news/ndtv-exclusive-don-t-need-character-certificate-from-rahul-gandhi-says-arvind-kejriwal/300710', + 'md5': '39f992dbe5fb531c395d8bbedb1e5e88', + 'info_dict': { + 'id': '300710', + 'ext': 'mp4', + 'title': "NDTV exclusive: Don't need character certificate from Rahul Gandhi, says Arvind Kejriwal", + 'description': 'md5:ab2d4b4a6056c5cb4caa6d729deabf02', + 'upload_date': '20131208', + 'duration': 1327, + 'thumbnail': 'http://i.ndtvimg.com/video/images/vod/medium/2013-12/big_300710_1386518307.jpg', + }, + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + + webpage = self._download_webpage(url, video_id) + + filename = self._search_regex( + r"__filename='([^']+)'", webpage, 'video filename') + video_url = ('http://bitcast-b.bitgravity.com/ndtvod/23372/ndtv/%s' % + filename) + + duration = int_or_none(self._search_regex( + r"__duration='([^']+)'", webpage, 'duration', fatal=False)) + + date_m = re.search(r'''(?x) + <p\s+class="vod_dateline">\s* + Published\s+On:\s* + (?P<monthname>[A-Za-z]+)\s+(?P<day>[0-9]+),\s*(?P<year>[0-9]+) + ''', webpage) + upload_date = None + + if date_m is not None: + month = month_by_name(date_m.group('monthname')) + if month is not None: + upload_date = '%s%02d%02d' % ( + date_m.group('year'), month, int(date_m.group('day'))) + + description = self._og_search_description(webpage) + READ_MORE = ' (Read more)' + if description.endswith(READ_MORE): + description = description[:-len(READ_MORE)] + + title = self._og_search_title(webpage) + TITLE_SUFFIX = ' - NDTV' + if title.endswith(TITLE_SUFFIX): + title = title[:-len(TITLE_SUFFIX)] + + return { + 'id': video_id, + 'url': video_url, + 'title': title, + 'description': description, + 'thumbnail': self._og_search_thumbnail(webpage), + 'duration': duration, + 'upload_date': upload_date, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/nerdcubed.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/nerdcubed.py new file mode 100644 index 0000000000..efc903afa9 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/nerdcubed.py @@ -0,0 +1,35 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import datetime + +from .common import InfoExtractor + + +class NerdCubedFeedIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?nerdcubed\.co\.uk/feed\.json' + _TEST = { + 'url': 'http://www.nerdcubed.co.uk/feed.json', + 'info_dict': { + 'title': 'nerdcubed.co.uk feed', + }, + 'playlist_mincount': 1300, + } + + def _real_extract(self, url): + feed = self._download_json(url, url, "Downloading NerdCubed JSON feed") + + entries = [{ + '_type': 'url', + 'title': feed_entry['title'], + 'uploader': feed_entry['source']['name'] if feed_entry['source'] else None, + 'upload_date': datetime.datetime.strptime(feed_entry['date'], '%Y-%m-%d').strftime('%Y%m%d'), + 'url': "http://www.youtube.com/watch?v=" + feed_entry['youtube_id'], + } for feed_entry in feed] + + return { + '_type': 'playlist', + 'title': 'nerdcubed.co.uk feed', + 'id': 'nerdcubed-feed', + 'entries': entries, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/newgrounds.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/newgrounds.py new file mode 100644 index 0000000000..cd117b04ed --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/newgrounds.py @@ -0,0 +1,42 @@ +from __future__ import unicode_literals + +import json +import re + +from .common import InfoExtractor + + +class NewgroundsIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?newgrounds\.com/audio/listen/(?P<id>[0-9]+)' + _TEST = { + 'url': 'http://www.newgrounds.com/audio/listen/549479', + 'md5': 'fe6033d297591288fa1c1f780386f07a', + 'info_dict': { + 'id': '549479', + 'ext': 'mp3', + 'title': 'B7 - BusMode', + 'uploader': 'Burn7', + } + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + music_id = mobj.group('id') + webpage = self._download_webpage(url, music_id) + + title = self._html_search_regex( + r',"name":"([^"]+)",', webpage, 'music title') + uploader = self._html_search_regex( + r',"artist":"([^"]+)",', webpage, 'music uploader') + + music_url_json_string = self._html_search_regex( + r'({"url":"[^"]+"),', webpage, 'music url') + '}' + music_url_json = json.loads(music_url_json_string) + music_url = music_url_json['url'] + + return { + 'id': music_id, + 'title': title, + 'url': music_url, + 'uploader': uploader, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/newstube.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/newstube.py new file mode 100644 index 0000000000..85fcad06b5 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/newstube.py @@ -0,0 +1,92 @@ +# encoding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import ExtractorError + + +class NewstubeIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?newstube\.ru/media/(?P<id>.+)' + _TEST = { + 'url': 'http://www.newstube.ru/media/telekanal-cnn-peremestil-gorod-slavyansk-v-krym', + 'info_dict': { + 'id': '728e0ef2-e187-4012-bac0-5a081fdcb1f6', + 'ext': 'flv', + 'title': 'п╒п╣п╩п╣п╨п╟п╫п╟п╩ CNN п©п╣я─п╣п╪п╣я│я┌п╦п╩ пЁп╬я─п╬п╢ п║п╩п╟п╡я▐п╫я│п╨ п╡ п я─я▀п╪', + 'description': 'md5:419a8c9f03442bc0b0a794d689360335', + 'duration': 31.05, + }, + 'params': { + # rtmp download + 'skip_download': True, + }, + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + + page = self._download_webpage(url, video_id, 'Downloading page') + + video_guid = self._html_search_regex( + r'<meta property="og:video" content="https?://(?:www\.)?newstube\.ru/freshplayer\.swf\?guid=(?P<guid>[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})', + page, 'video GUID') + + player = self._download_xml( + 'http://p.newstube.ru/v2/player.asmx/GetAutoPlayInfo6?state=&url=%s&sessionId=&id=%s&placement=profile&location=n2' % (url, video_guid), + video_guid, 'Downloading player XML') + + def ns(s): + return s.replace('/', '/%(ns)s') % {'ns': '{http://app1.newstube.ru/N2SiteWS/player.asmx}'} + + error_message = player.find(ns('./ErrorMessage')) + if error_message is not None: + raise ExtractorError('%s returned error: %s' % (self.IE_NAME, error_message.text), expected=True) + + session_id = player.find(ns('./SessionId')).text + media_info = player.find(ns('./Medias/MediaInfo')) + title = media_info.find(ns('./Name')).text + description = self._og_search_description(page) + thumbnail = media_info.find(ns('./KeyFrame')).text + duration = int(media_info.find(ns('./Duration')).text) / 1000.0 + + formats = [] + + for stream_info in media_info.findall(ns('./Streams/StreamInfo')): + media_location = stream_info.find(ns('./MediaLocation')) + if media_location is None: + continue + + server = media_location.find(ns('./Server')).text + app = media_location.find(ns('./App')).text + media_id = stream_info.find(ns('./Id')).text + quality_id = stream_info.find(ns('./QualityId')).text + name = stream_info.find(ns('./Name')).text + width = int(stream_info.find(ns('./Width')).text) + height = int(stream_info.find(ns('./Height')).text) + + formats.append({ + 'url': 'rtmp://%s/%s' % (server, app), + 'app': app, + 'play_path': '01/%s' % video_guid.upper(), + 'rtmp_conn': ['S:%s' % session_id, 'S:%s' % media_id, 'S:n2'], + 'page_url': url, + 'ext': 'flv', + 'format_id': quality_id, + 'format_note': name, + 'width': width, + 'height': height, + }) + + self._sort_formats(formats) + + return { + 'id': video_guid, + 'title': title, + 'description': description, + 'thumbnail': thumbnail, + 'duration': duration, + 'formats': formats, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/nfb.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/nfb.py new file mode 100644 index 0000000000..ea077254b4 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/nfb.py @@ -0,0 +1,94 @@ +from __future__ import unicode_literals + +from .common import InfoExtractor +from ..compat import ( + compat_urllib_request, + compat_urllib_parse, +) + + +class NFBIE(InfoExtractor): + IE_NAME = 'nfb' + IE_DESC = 'National Film Board of Canada' + _VALID_URL = r'https?://(?:www\.)?(?:nfb|onf)\.ca/film/(?P<id>[\da-z_-]+)' + + _TEST = { + 'url': 'https://www.nfb.ca/film/qallunaat_why_white_people_are_funny', + 'info_dict': { + 'id': 'qallunaat_why_white_people_are_funny', + 'ext': 'mp4', + 'title': 'Qallunaat! Why White People Are Funny ', + 'description': 'md5:836d8aff55e087d04d9f6df554d4e038', + 'duration': 3128, + 'uploader': 'Mark Sandiford', + 'uploader_id': 'mark-sandiford', + }, + 'params': { + # rtmp download + 'skip_download': True, + } + } + + def _real_extract(self, url): + video_id = self._match_id(url) + page = self._download_webpage( + 'https://www.nfb.ca/film/%s' % video_id, video_id, + 'Downloading film page') + + uploader_id = self._html_search_regex(r'<a class="director-link" href="/explore-all-directors/([^/]+)/"', + page, 'director id', fatal=False) + uploader = self._html_search_regex(r'<em class="director-name" itemprop="name">([^<]+)</em>', + page, 'director name', fatal=False) + + request = compat_urllib_request.Request('https://www.nfb.ca/film/%s/player_config' % video_id, + compat_urllib_parse.urlencode({'getConfig': 'true'}).encode('ascii')) + request.add_header('Content-Type', 'application/x-www-form-urlencoded') + request.add_header('X-NFB-Referer', 'http://www.nfb.ca/medias/flash/NFBVideoPlayer.swf') + + config = self._download_xml(request, video_id, 'Downloading player config XML') + + title = None + description = None + thumbnail = None + duration = None + formats = [] + + def extract_thumbnail(media): + thumbnails = {} + for asset in media.findall('assets/asset'): + thumbnails[asset.get('quality')] = asset.find('default/url').text + if not thumbnails: + return None + if 'high' in thumbnails: + return thumbnails['high'] + return list(thumbnails.values())[0] + + for media in config.findall('./player/stream/media'): + if media.get('type') == 'posterImage': + thumbnail = extract_thumbnail(media) + elif media.get('type') == 'video': + duration = int(media.get('duration')) + title = media.find('title').text + description = media.find('description').text + # It seems assets always go from lower to better quality, so no need to sort + for asset in media.findall('assets/asset'): + for x in asset: + formats.append({ + 'url': x.find('streamerURI').text, + 'app': x.find('streamerURI').text.split('/', 3)[3], + 'play_path': x.find('url').text, + 'rtmp_live': False, + 'ext': 'mp4', + 'format_id': '%s-%s' % (x.tag, asset.get('quality')), + }) + + return { + 'id': video_id, + 'title': title, + 'description': description, + 'thumbnail': thumbnail, + 'duration': duration, + 'uploader': uploader, + 'uploader_id': uploader_id, + 'formats': formats, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/nfl.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/nfl.py new file mode 100644 index 0000000000..606e2294ef --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/nfl.py @@ -0,0 +1,146 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..compat import ( + compat_urllib_parse_urlparse, +) +from ..utils import ( + ExtractorError, + int_or_none, + remove_end, +) + + +class NFLIE(InfoExtractor): + IE_NAME = 'nfl.com' + _VALID_URL = r'''(?x)https?:// + (?P<host>(?:www\.)?(?:nfl\.com|.*?\.clubs\.nfl\.com))/ + (?:.+?/)* + (?P<id>(?:\d[a-z]{2}\d{13}|\w{8}\-(?:\w{4}\-){3}\w{12}))''' + _TESTS = [ + { + 'url': 'http://www.nfl.com/videos/nfl-game-highlights/0ap3000000398478/Week-3-Redskins-vs-Eagles-highlights', + 'md5': '394ef771ddcd1354f665b471d78ec4c6', + 'info_dict': { + 'id': '0ap3000000398478', + 'ext': 'mp4', + 'title': 'Week 3: Redskins vs. Eagles highlights', + 'description': 'md5:56323bfb0ac4ee5ab24bd05fdf3bf478', + 'upload_date': '20140921', + 'timestamp': 1411337580, + 'thumbnail': 're:^https?://.*\.jpg$', + } + }, + { + 'url': 'http://prod.www.steelers.clubs.nfl.com/video-and-audio/videos/LIVE_Post_Game_vs_Browns/9d72f26a-9e2b-4718-84d3-09fb4046c266', + 'md5': 'cf85bdb4bc49f6e9d3816d130c78279c', + 'info_dict': { + 'id': '9d72f26a-9e2b-4718-84d3-09fb4046c266', + 'ext': 'mp4', + 'title': 'LIVE: Post Game vs. Browns', + 'description': 'md5:6a97f7e5ebeb4c0e69a418a89e0636e8', + 'upload_date': '20131229', + 'timestamp': 1388354455, + 'thumbnail': 're:^https?://.*\.jpg$', + } + } + ] + + @staticmethod + def prepend_host(host, url): + if not url.startswith('http'): + if not url.startswith('/'): + url = '/%s' % url + url = 'http://{0:}{1:}'.format(host, url) + return url + + @staticmethod + def format_from_stream(stream, protocol, host, path_prefix='', + preference=0, note=None): + url = '{protocol:}://{host:}/{prefix:}{path:}'.format( + protocol=protocol, + host=host, + prefix=path_prefix, + path=stream.get('path'), + ) + return { + 'url': url, + 'vbr': int_or_none(stream.get('rate', 0), 1000), + 'preference': preference, + 'format_note': note, + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id, host = mobj.group('id'), mobj.group('host') + + webpage = self._download_webpage(url, video_id) + + config_url = NFLIE.prepend_host(host, self._search_regex( + r'(?:config|configURL)\s*:\s*"([^"]+)"', webpage, 'config URL')) + config = self._download_json(config_url, video_id, + note='Downloading player config') + url_template = NFLIE.prepend_host( + host, '{contentURLTemplate:}'.format(**config)) + video_data = self._download_json( + url_template.format(id=video_id), video_id) + + formats = [] + cdn_data = video_data.get('cdnData', {}) + streams = cdn_data.get('bitrateInfo', []) + if cdn_data.get('format') == 'EXTERNAL_HTTP_STREAM': + parts = compat_urllib_parse_urlparse(cdn_data.get('uri')) + protocol, host = parts.scheme, parts.netloc + for stream in streams: + formats.append( + NFLIE.format_from_stream(stream, protocol, host)) + else: + cdns = config.get('cdns') + if not cdns: + raise ExtractorError('Failed to get CDN data', expected=True) + + for name, cdn in cdns.items(): + # LimeLight streams don't seem to work + if cdn.get('name') == 'LIMELIGHT': + continue + + protocol = cdn.get('protocol') + host = remove_end(cdn.get('host', ''), '/') + if not (protocol and host): + continue + + prefix = cdn.get('pathprefix', '') + if prefix and not prefix.endswith('/'): + prefix = '%s/' % prefix + + preference = 0 + if protocol == 'rtmp': + preference = -2 + elif 'prog' in name.lower(): + preference = 1 + + for stream in streams: + formats.append( + NFLIE.format_from_stream(stream, protocol, host, + prefix, preference, name)) + + self._sort_formats(formats) + + thumbnail = None + for q in ('xl', 'l', 'm', 's', 'xs'): + thumbnail = video_data.get('imagePaths', {}).get(q) + if thumbnail: + break + + return { + 'id': video_id, + 'title': video_data.get('headline'), + 'formats': formats, + 'description': video_data.get('caption'), + 'duration': video_data.get('duration'), + 'thumbnail': thumbnail, + 'timestamp': int_or_none(video_data.get('posted'), 1000), + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/nhl.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/nhl.py new file mode 100644 index 0000000000..d3a4fc5138 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/nhl.py @@ -0,0 +1,155 @@ +from __future__ import unicode_literals + +import re +import json +import os + +from .common import InfoExtractor +from ..compat import ( + compat_urlparse, + compat_urllib_parse, + compat_urllib_parse_urlparse +) +from ..utils import ( + unified_strdate, +) + + +class NHLBaseInfoExtractor(InfoExtractor): + @staticmethod + def _fix_json(json_string): + return json_string.replace('\\\'', '\'') + + def _extract_video(self, info): + video_id = info['id'] + self.report_extraction(video_id) + + initial_video_url = info['publishPoint'] + if info['formats'] == '1': + parsed_url = compat_urllib_parse_urlparse(initial_video_url) + filename, ext = os.path.splitext(parsed_url.path) + path = '%s_sd%s' % (filename, ext) + data = compat_urllib_parse.urlencode({ + 'type': 'fvod', + 'path': compat_urlparse.urlunparse(parsed_url[:2] + (path,) + parsed_url[3:]) + }) + path_url = 'http://video.nhl.com/videocenter/servlets/encryptvideopath?' + data + path_doc = self._download_xml( + path_url, video_id, 'Downloading final video url') + video_url = path_doc.find('path').text + else: + video_url = initial_video_url + + join = compat_urlparse.urljoin + return { + 'id': video_id, + 'title': info['name'], + 'url': video_url, + 'description': info['description'], + 'duration': int(info['duration']), + 'thumbnail': join(join(video_url, '/u/'), info['bigImage']), + 'upload_date': unified_strdate(info['releaseDate'].split('.')[0]), + } + + +class NHLIE(NHLBaseInfoExtractor): + IE_NAME = 'nhl.com' + _VALID_URL = r'https?://video(?P<team>\.[^.]*)?\.nhl\.com/videocenter/console(?:\?(?:.*?[?&])?)id=(?P<id>[-0-9a-zA-Z]+)' + + _TESTS = [{ + 'url': 'http://video.canucks.nhl.com/videocenter/console?catid=6?id=453614', + 'md5': 'db704a4ea09e8d3988c85e36cc892d09', + 'info_dict': { + 'id': '453614', + 'ext': 'mp4', + 'title': 'Quick clip: Weise 4-3 goal vs Flames', + 'description': 'Dale Weise scores his first of the season to put the Canucks up 4-3.', + 'duration': 18, + 'upload_date': '20131006', + }, + }, { + 'url': 'http://video.nhl.com/videocenter/console?id=2014020024-628-h', + 'md5': 'd22e82bc592f52d37d24b03531ee9696', + 'info_dict': { + 'id': '2014020024-628-h', + 'ext': 'mp4', + 'title': 'Alex Galchenyuk Goal on Ray Emery (14:40/3rd)', + 'description': 'Home broadcast - Montreal Canadiens at Philadelphia Flyers - October 11, 2014', + 'duration': 0, + 'upload_date': '20141011', + }, + }, { + 'url': 'http://video.mapleleafs.nhl.com/videocenter/console?id=58665&catid=802', + 'md5': 'c78fc64ea01777e426cfc202b746c825', + 'info_dict': { + 'id': '58665', + 'ext': 'flv', + 'title': 'Classic Game In Six - April 22, 1979', + 'description': 'It was the last playoff game for the Leafs in the decade, and the last time the Leafs and Habs played in the playoffs. Great game, not a great ending.', + 'duration': 400, + 'upload_date': '20100129' + }, + }, { + 'url': 'http://video.flames.nhl.com/videocenter/console?id=630616', + 'only_matching': True, + }] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + json_url = 'http://video.nhl.com/videocenter/servlets/playlist?ids=%s&format=json' % video_id + data = self._download_json( + json_url, video_id, transform_source=self._fix_json) + return self._extract_video(data[0]) + + +class NHLVideocenterIE(NHLBaseInfoExtractor): + IE_NAME = 'nhl.com:videocenter' + IE_DESC = 'NHL videocenter category' + _VALID_URL = r'https?://video\.(?P<team>[^.]*)\.nhl\.com/videocenter/(console\?[^(id=)]*catid=(?P<catid>[0-9]+)(?![&?]id=).*?)?$' + _TEST = { + 'url': 'http://video.canucks.nhl.com/videocenter/console?catid=999', + 'info_dict': { + 'id': '999', + 'title': 'Highlights', + }, + 'playlist_count': 12, + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + team = mobj.group('team') + webpage = self._download_webpage(url, team) + cat_id = self._search_regex( + [r'var defaultCatId = "(.+?)";', + r'{statusIndex:0,index:0,.*?id:(.*?),'], + webpage, 'category id') + playlist_title = self._html_search_regex( + r'tab0"[^>]*?>(.*?)</td>', + webpage, 'playlist title', flags=re.DOTALL).lower().capitalize() + + data = compat_urllib_parse.urlencode({ + 'cid': cat_id, + # This is the default value + 'count': 12, + 'ptrs': 3, + 'format': 'json', + }) + path = '/videocenter/servlets/browse?' + data + request_url = compat_urlparse.urljoin(url, path) + response = self._download_webpage(request_url, playlist_title) + response = self._fix_json(response) + if not response.strip(): + self._downloader.report_warning('Got an empty reponse, trying ' + 'adding the "newvideos" parameter') + response = self._download_webpage(request_url + '&newvideos=true', + playlist_title) + response = self._fix_json(response) + videos = json.loads(response) + + return { + '_type': 'playlist', + 'title': playlist_title, + 'id': cat_id, + 'entries': [self._extract_video(v) for v in videos], + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/niconico.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/niconico.py new file mode 100644 index 0000000000..4c18904169 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/niconico.py @@ -0,0 +1,189 @@ +# encoding: utf-8 +from __future__ import unicode_literals + +import re +import json + +from .common import InfoExtractor +from ..compat import ( + compat_urllib_parse, + compat_urllib_request, + compat_urlparse, +) +from ..utils import ( + ExtractorError, + int_or_none, + parse_duration, + unified_strdate, +) + + +class NiconicoIE(InfoExtractor): + IE_NAME = 'niconico' + IE_DESC = 'Ц┐▀Ц┌ЁЦ┐▀Ц┌ЁЕ▀∙Г■╩' + + _TEST = { + 'url': 'http://www.nicovideo.jp/watch/sm22312215', + 'md5': 'd1a75c0823e2f629128c43e1212760f9', + 'info_dict': { + 'id': 'sm22312215', + 'ext': 'mp4', + 'title': 'Big Buck Bunny', + 'uploader': 'takuya0301', + 'uploader_id': '2698420', + 'upload_date': '20131123', + 'description': '(c) copyright 2008, Blender Foundation / www.bigbuckbunny.org', + 'duration': 33, + }, + 'params': { + 'username': 'ydl.niconico@gmail.com', + 'password': 'youtube-dl', + }, + } + + _VALID_URL = r'https?://(?:www\.|secure\.)?nicovideo\.jp/watch/((?:[a-z]{2})?[0-9]+)' + _NETRC_MACHINE = 'niconico' + # Determine whether the downloader used authentication to download video + _AUTHENTICATED = False + + def _real_initialize(self): + self._login() + + def _login(self): + (username, password) = self._get_login_info() + # No authentication to be performed + if not username: + return True + + # Log in + login_form_strs = { + 'mail': username, + 'password': password, + } + # Convert to UTF-8 *before* urlencode because Python 2.x's urlencode + # chokes on unicode + login_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k, v in login_form_strs.items()) + login_data = compat_urllib_parse.urlencode(login_form).encode('utf-8') + request = compat_urllib_request.Request( + 'https://secure.nicovideo.jp/secure/login', login_data) + login_results = self._download_webpage( + request, None, note='Logging in', errnote='Unable to log in') + if re.search(r'(?i)<h1 class="mb8p4">Log in error</h1>', login_results) is not None: + self._downloader.report_warning('unable to log in: bad username or password') + return False + # Successful login + self._AUTHENTICATED = True + return True + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group(1) + + # Get video webpage. We are not actually interested in it, but need + # the cookies in order to be able to download the info webpage + self._download_webpage('http://www.nicovideo.jp/watch/' + video_id, video_id) + + video_info = self._download_xml( + 'http://ext.nicovideo.jp/api/getthumbinfo/' + video_id, video_id, + note='Downloading video info page') + + if self._AUTHENTICATED: + # Get flv info + flv_info_webpage = self._download_webpage( + 'http://flapi.nicovideo.jp/api/getflv?v=' + video_id, + video_id, 'Downloading flv info') + else: + # Get external player info + ext_player_info = self._download_webpage( + 'http://ext.nicovideo.jp/thumb_watch/' + video_id, video_id) + thumb_play_key = self._search_regex( + r'\'thumbPlayKey\'\s*:\s*\'(.*?)\'', ext_player_info, 'thumbPlayKey') + + # Get flv info + flv_info_data = compat_urllib_parse.urlencode({ + 'k': thumb_play_key, + 'v': video_id + }) + flv_info_request = compat_urllib_request.Request( + 'http://ext.nicovideo.jp/thumb_watch', flv_info_data, + {'Content-Type': 'application/x-www-form-urlencoded'}) + flv_info_webpage = self._download_webpage( + flv_info_request, video_id, + note='Downloading flv info', errnote='Unable to download flv info') + + if 'deleted=' in flv_info_webpage: + raise ExtractorError('The video has been deleted.', + expected=True) + video_real_url = compat_urlparse.parse_qs(flv_info_webpage)['url'][0] + + # Start extracting information + title = video_info.find('.//title').text + extension = video_info.find('.//movie_type').text + video_format = extension.upper() + thumbnail = video_info.find('.//thumbnail_url').text + description = video_info.find('.//description').text + upload_date = unified_strdate(video_info.find('.//first_retrieve').text.split('+')[0]) + view_count = int_or_none(video_info.find('.//view_counter').text) + comment_count = int_or_none(video_info.find('.//comment_num').text) + duration = parse_duration(video_info.find('.//length').text) + webpage_url = video_info.find('.//watch_url').text + + if video_info.find('.//ch_id') is not None: + uploader_id = video_info.find('.//ch_id').text + uploader = video_info.find('.//ch_name').text + elif video_info.find('.//user_id') is not None: + uploader_id = video_info.find('.//user_id').text + uploader = video_info.find('.//user_nickname').text + else: + uploader_id = uploader = None + + return { + 'id': video_id, + 'url': video_real_url, + 'title': title, + 'ext': extension, + 'format': video_format, + 'thumbnail': thumbnail, + 'description': description, + 'uploader': uploader, + 'upload_date': upload_date, + 'uploader_id': uploader_id, + 'view_count': view_count, + 'comment_count': comment_count, + 'duration': duration, + 'webpage_url': webpage_url, + } + + +class NiconicoPlaylistIE(InfoExtractor): + _VALID_URL = r'https?://www\.nicovideo\.jp/mylist/(?P<id>\d+)' + + _TEST = { + 'url': 'http://www.nicovideo.jp/mylist/27411728', + 'info_dict': { + 'id': '27411728', + 'title': 'AKB48Ц│╝Ц┌╙Ц┐╪Ц┐╚Ц┐┼Ц┌╓Ц┐┬Ц┐▀Ц┐┐Ц┐²Ц┐Ё', + }, + 'playlist_mincount': 225, + } + + def _real_extract(self, url): + list_id = self._match_id(url) + webpage = self._download_webpage(url, list_id) + + entries_json = self._search_regex(r'Mylist\.preload\(\d+, (\[.*\])\);', + webpage, 'entries') + entries = json.loads(entries_json) + entries = [{ + '_type': 'url', + 'ie_key': NiconicoIE.ie_key(), + 'url': ('http://www.nicovideo.jp/watch/%s' % + entry['item_data']['video_id']), + } for entry in entries] + + return { + '_type': 'playlist', + 'title': self._search_regex(r'\s+name: "(.*?)"', webpage, 'title'), + 'id': list_id, + 'entries': entries, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/ninegag.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/ninegag.py new file mode 100644 index 0000000000..7f842b5c25 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/ninegag.py @@ -0,0 +1,73 @@ +from __future__ import unicode_literals + +import re +import json + +from .common import InfoExtractor +from ..utils import str_to_int + + +class NineGagIE(InfoExtractor): + IE_NAME = '9gag' + _VALID_URL = r'''(?x)^https?://(?:www\.)?9gag\.tv/ + (?: + v/(?P<numid>[0-9]+)| + p/(?P<id>[a-zA-Z0-9]+)/(?P<display_id>[^?#/]+) + ) + ''' + + _TESTS = [{ + "url": "http://9gag.tv/v/1912", + "info_dict": { + "id": "1912", + "ext": "mp4", + "description": "This 3-minute video will make you smile and then make you feel untalented and insignificant. Anyway, you should share this awesomeness. (Thanks, Dino!)", + "title": "\"People Are Awesome 2013\" Is Absolutely Awesome", + 'uploader_id': 'UCdEH6EjDKwtTe-sO2f0_1XA', + 'uploader': 'CompilationChannel', + 'upload_date': '20131110', + "view_count": int, + "thumbnail": "re:^https?://", + }, + 'add_ie': ['Youtube'] + }, { + 'url': 'http://9gag.tv/p/KklwM/alternate-banned-opening-scene-of-gravity?ref=fsidebar', + 'info_dict': { + 'id': 'KklwM', + 'ext': 'mp4', + 'display_id': 'alternate-banned-opening-scene-of-gravity', + "description": "While Gravity was a pretty awesome movie already, YouTuber Krishna Shenoi came up with a way to improve upon it, introducing a much better solution to Sandra Bullock's seemingly endless tumble in space. The ending is priceless.", + 'title': "Banned Opening Scene Of \"Gravity\" That Changes The Whole Movie", + 'uploader': 'Krishna Shenoi', + 'upload_date': '20140401', + 'uploader_id': 'krishnashenoi93', + }, + }] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('numid') or mobj.group('id') + display_id = mobj.group('display_id') or video_id + + webpage = self._download_webpage(url, display_id) + + post_view = json.loads(self._html_search_regex( + r'var postView = new app\.PostView\({\s*post:\s*({.+?}),\s*posts:\s*prefetchedCurrentPost', webpage, 'post view')) + + youtube_id = post_view['videoExternalId'] + title = post_view['title'] + description = post_view['description'] + view_count = str_to_int(post_view['externalView']) + thumbnail = post_view.get('thumbnail_700w') or post_view.get('ogImageUrl') or post_view.get('thumbnail_300w') + + return { + '_type': 'url_transparent', + 'url': youtube_id, + 'ie_key': 'Youtube', + 'id': video_id, + 'display_id': display_id, + 'title': title, + 'description': description, + 'view_count': view_count, + 'thumbnail': thumbnail, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/noco.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/noco.py new file mode 100644 index 0000000000..251e6da074 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/noco.py @@ -0,0 +1,174 @@ +# encoding: utf-8 +from __future__ import unicode_literals + +import re +import time +import hashlib + +from .common import InfoExtractor +from ..compat import ( + compat_str, + compat_urllib_parse, + compat_urllib_request, +) +from ..utils import ( + clean_html, + ExtractorError, + unified_strdate, +) + + +class NocoIE(InfoExtractor): + _VALID_URL = r'http://(?:(?:www\.)?noco\.tv/emission/|player\.noco\.tv/\?idvideo=)(?P<id>\d+)' + _LOGIN_URL = 'http://noco.tv/do.php' + _API_URL_TEMPLATE = 'https://api.noco.tv/1.1/%s?ts=%s&tk=%s' + _SUB_LANG_TEMPLATE = '&sub_lang=%s' + _NETRC_MACHINE = 'noco' + + _TEST = { + 'url': 'http://noco.tv/emission/11538/nolife/ami-ami-idol-hello-france/', + 'md5': '0a993f0058ddbcd902630b2047ef710e', + 'info_dict': { + 'id': '11538', + 'ext': 'mp4', + 'title': 'Ami Ami Idol - Hello! France', + 'description': 'md5:4eaab46ab68fa4197a317a88a53d3b86', + 'upload_date': '20140412', + 'uploader': 'Nolife', + 'uploader_id': 'NOL', + 'duration': 2851.2, + }, + 'skip': 'Requires noco account', + } + + def _real_initialize(self): + self._login() + + def _login(self): + (username, password) = self._get_login_info() + if username is None: + return + + login_form = { + 'a': 'login', + 'cookie': '1', + 'username': username, + 'password': password, + } + request = compat_urllib_request.Request(self._LOGIN_URL, compat_urllib_parse.urlencode(login_form)) + request.add_header('Content-Type', 'application/x-www-form-urlencoded; charset=UTF-8') + + login = self._download_json(request, None, 'Logging in as %s' % username) + + if 'erreur' in login: + raise ExtractorError('Unable to login: %s' % clean_html(login['erreur']), expected=True) + + def _call_api(self, path, video_id, note, sub_lang=None): + ts = compat_str(int(time.time() * 1000)) + tk = hashlib.md5((hashlib.md5(ts.encode('ascii')).hexdigest() + '#8S?uCraTedap6a').encode('ascii')).hexdigest() + url = self._API_URL_TEMPLATE % (path, ts, tk) + if sub_lang: + url += self._SUB_LANG_TEMPLATE % sub_lang + + resp = self._download_json(url, video_id, note) + + if isinstance(resp, dict) and resp.get('error'): + self._raise_error(resp['error'], resp['description']) + + return resp + + def _raise_error(self, error, description): + raise ExtractorError( + '%s returned error: %s - %s' % (self.IE_NAME, error, description), + expected=True) + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + + medias = self._call_api( + 'shows/%s/medias' % video_id, + video_id, 'Downloading video JSON') + + qualities = self._call_api( + 'qualities', + video_id, 'Downloading qualities JSON') + + formats = [] + + for lang, lang_dict in medias['fr']['video_list'].items(): + for format_id, fmt in lang_dict['quality_list'].items(): + format_id_extended = '%s-%s' % (lang, format_id) if lang != 'none' else format_id + + video = self._call_api( + 'shows/%s/video/%s/fr' % (video_id, format_id.lower()), + video_id, 'Downloading %s video JSON' % format_id_extended, + lang if lang != 'none' else None) + + file_url = video['file'] + if not file_url: + continue + + if file_url in ['forbidden', 'not found']: + popmessage = video['popmessage'] + self._raise_error(popmessage['title'], popmessage['message']) + + formats.append({ + 'url': file_url, + 'format_id': format_id_extended, + 'width': fmt['res_width'], + 'height': fmt['res_lines'], + 'abr': fmt['audiobitrate'], + 'vbr': fmt['videobitrate'], + 'filesize': fmt['filesize'], + 'format_note': qualities[format_id]['quality_name'], + 'preference': qualities[format_id]['priority'], + }) + + self._sort_formats(formats) + + show = self._call_api( + 'shows/by_id/%s' % video_id, + video_id, 'Downloading show JSON')[0] + + upload_date = unified_strdate(show['online_date_start_utc']) + uploader = show['partner_name'] + uploader_id = show['partner_key'] + duration = show['duration_ms'] / 1000.0 + + thumbnails = [] + for thumbnail_key, thumbnail_url in show.items(): + m = re.search(r'^screenshot_(?P<width>\d+)x(?P<height>\d+)$', thumbnail_key) + if not m: + continue + thumbnails.append({ + 'url': thumbnail_url, + 'width': int(m.group('width')), + 'height': int(m.group('height')), + }) + + episode = show.get('show_TT') or show.get('show_OT') + family = show.get('family_TT') or show.get('family_OT') + episode_number = show.get('episode_number') + + title = '' + if family: + title += family + if episode_number: + title += ' #' + compat_str(episode_number) + if episode: + title += ' - ' + episode + + description = show.get('show_resume') or show.get('family_resume') + + return { + 'id': video_id, + 'title': title, + 'description': description, + 'thumbnails': thumbnails, + 'upload_date': upload_date, + 'uploader': uploader, + 'uploader_id': uploader_id, + 'duration': duration, + 'formats': formats, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/normalboots.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/normalboots.py new file mode 100644 index 0000000000..3d35b11ac8 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/normalboots.py @@ -0,0 +1,51 @@ +# encoding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor + +from ..utils import ( + unified_strdate, +) + + +class NormalbootsIE(InfoExtractor): + _VALID_URL = r'http://(?:www\.)?normalboots\.com/video/(?P<videoid>[0-9a-z-]*)/?$' + _TEST = { + 'url': 'http://normalboots.com/video/home-alone-games-jontron/', + 'md5': '8bf6de238915dd501105b44ef5f1e0f6', + 'info_dict': { + 'id': 'home-alone-games-jontron', + 'ext': 'mp4', + 'title': 'Home Alone Games - JonTron - NormalBoots', + 'description': 'Jon is late for Christmas. Typical. Thanks to: Paul Ritchey for Co-Writing/Filming: http://www.youtube.com/user/ContinueShow Michael Azzi for Christmas Intro Animation: http://michafrar.tumblr.com/ Jerrod Waters for Christmas Intro Music: http://www.youtube.com/user/xXJerryTerryXx Casey Ormond for Б─≤Tense Battle ThemeБ─≥:\xa0http://www.youtube.com/Kiamet/', + 'uploader': 'JonTron', + 'upload_date': '20140125', + } + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('videoid') + + webpage = self._download_webpage(url, video_id) + video_uploader = self._html_search_regex(r'Posted\sby\s<a\shref="[A-Za-z0-9/]*">(?P<uploader>[A-Za-z]*)\s</a>', + webpage, 'uploader') + raw_upload_date = self._html_search_regex('<span style="text-transform:uppercase; font-size:inherit;">[A-Za-z]+, (?P<date>.*)</span>', + webpage, 'date') + video_upload_date = unified_strdate(raw_upload_date) + + player_url = self._html_search_regex(r'<iframe\swidth="[0-9]+"\sheight="[0-9]+"\ssrc="(?P<url>[\S]+)"', webpage, 'url') + player_page = self._download_webpage(player_url, video_id) + video_url = self._html_search_regex(r"file:\s'(?P<file>[^']+\.mp4)'", player_page, 'file') + + return { + 'id': video_id, + 'url': video_url, + 'title': self._og_search_title(webpage), + 'description': self._og_search_description(webpage), + 'thumbnail': self._og_search_thumbnail(webpage), + 'uploader': video_uploader, + 'upload_date': video_upload_date, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/nosvideo.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/nosvideo.py new file mode 100644 index 0000000000..f5ef856db0 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/nosvideo.py @@ -0,0 +1,77 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..compat import ( + compat_urllib_request, +) +from ..utils import ( + ExtractorError, + urlencode_postdata, + xpath_text, + xpath_with_ns, +) + +_x = lambda p: xpath_with_ns(p, {'xspf': 'http://xspf.org/ns/0/'}) + + +class NosVideoIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?nosvideo\.com/' + \ + '(?:embed/|\?v=)(?P<id>[A-Za-z0-9]{12})/?' + _PLAYLIST_URL = 'http://nosvideo.com/xml/{xml_id:s}.xml' + _FILE_DELETED_REGEX = r'<b>File Not Found</b>' + _TEST = { + 'url': 'http://nosvideo.com/?v=mu8fle7g7rpq', + 'md5': '6124ed47130d8be3eacae635b071e6b6', + 'info_dict': { + 'id': 'mu8fle7g7rpq', + 'ext': 'mp4', + 'title': 'big_buck_bunny_480p_surround-fix.avi.mp4', + 'thumbnail': 're:^https?://.*\.jpg$', + } + } + + def _real_extract(self, url): + video_id = self._match_id(url) + + fields = { + 'id': video_id, + 'op': 'download1', + 'method_free': 'Continue to Video', + } + req = compat_urllib_request.Request(url, urlencode_postdata(fields)) + req.add_header('Content-type', 'application/x-www-form-urlencoded') + webpage = self._download_webpage(req, video_id, + 'Downloading download page') + if re.search(self._FILE_DELETED_REGEX, webpage) is not None: + raise ExtractorError('Video %s does not exist' % video_id, + expected=True) + + xml_id = self._search_regex(r'php\|([^\|]+)\|', webpage, 'XML ID') + playlist_url = self._PLAYLIST_URL.format(xml_id=xml_id) + playlist = self._download_xml(playlist_url, video_id) + + track = playlist.find(_x('.//xspf:track')) + if track is None: + raise ExtractorError( + 'XML playlist is missing the \'track\' element', + expected=True) + title = xpath_text(track, _x('./xspf:title'), 'title') + url = xpath_text(track, _x('./xspf:file'), 'URL', fatal=True) + thumbnail = xpath_text(track, _x('./xspf:image'), 'thumbnail') + if title is not None: + title = title.strip() + + formats = [{ + 'format_id': 'sd', + 'url': url, + }] + + return { + 'id': video_id, + 'title': title, + 'thumbnail': thumbnail, + 'formats': formats, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/novamov.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/novamov.py new file mode 100644 index 0000000000..04d779890a --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/novamov.py @@ -0,0 +1,71 @@ +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..compat import ( + compat_urlparse, +) +from ..utils import ( + ExtractorError, +) + + +class NovaMovIE(InfoExtractor): + IE_NAME = 'novamov' + IE_DESC = 'NovaMov' + + _VALID_URL_TEMPLATE = r'http://(?:(?:www\.)?%(host)s/(?:file|video)/|(?:(?:embed|www)\.)%(host)s/embed\.php\?(?:.*?&)?v=)(?P<id>[a-z\d]{13})' + _VALID_URL = _VALID_URL_TEMPLATE % {'host': 'novamov\.com'} + + _HOST = 'www.novamov.com' + + _FILE_DELETED_REGEX = r'This file no longer exists on our servers!</h2>' + _FILEKEY_REGEX = r'flashvars\.filekey="(?P<filekey>[^"]+)";' + _TITLE_REGEX = r'(?s)<div class="v_tab blockborder rounded5" id="v_tab1">\s*<h3>([^<]+)</h3>' + _DESCRIPTION_REGEX = r'(?s)<div class="v_tab blockborder rounded5" id="v_tab1">\s*<h3>[^<]+</h3><p>([^<]+)</p>' + + _TEST = { + 'url': 'http://www.novamov.com/video/4rurhn9x446jj', + 'md5': '7205f346a52bbeba427603ba10d4b935', + 'info_dict': { + 'id': '4rurhn9x446jj', + 'ext': 'flv', + 'title': 'search engine optimization', + 'description': 'search engine optimization is used to rank the web page in the google search engine' + }, + 'skip': '"Invalid token" errors abound (in web interface as well as youtube-dl, there is nothing we can do about it.)' + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + + page = self._download_webpage( + 'http://%s/video/%s' % (self._HOST, video_id), video_id, 'Downloading video page') + + if re.search(self._FILE_DELETED_REGEX, page) is not None: + raise ExtractorError('Video %s does not exist' % video_id, expected=True) + + filekey = self._search_regex(self._FILEKEY_REGEX, page, 'filekey') + + title = self._html_search_regex(self._TITLE_REGEX, page, 'title', fatal=False) + description = self._html_search_regex(self._DESCRIPTION_REGEX, page, 'description', default='', fatal=False) + + api_response = self._download_webpage( + 'http://%s/api/player.api.php?key=%s&file=%s' % (self._HOST, filekey, video_id), video_id, + 'Downloading video api response') + + response = compat_urlparse.parse_qs(api_response) + + if 'error_msg' in response: + raise ExtractorError('%s returned error: %s' % (self.IE_NAME, response['error_msg'][0]), expected=True) + + video_url = response['url'][0] + + return { + 'id': video_id, + 'url': video_url, + 'title': title, + 'description': description + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/nowness.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/nowness.py new file mode 100644 index 0000000000..6b2f3f55a6 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/nowness.py @@ -0,0 +1,64 @@ +# encoding: utf-8 +from __future__ import unicode_literals + +import re + +from .brightcove import BrightcoveIE +from .common import InfoExtractor +from ..utils import ExtractorError + + +class NownessIE(InfoExtractor): + _VALID_URL = r'https?://(?:(?:www|cn)\.)?nowness\.com/[^?#]*?/(?P<id>[0-9]+)/(?P<slug>[^/]+?)(?:$|[?#])' + + _TESTS = [ + { + 'url': 'http://www.nowness.com/day/2013/6/27/3131/candor--the-art-of-gesticulation', + 'md5': '068bc0202558c2e391924cb8cc470676', + 'info_dict': { + 'id': '2520295746001', + 'ext': 'mp4', + 'title': 'Candor: The Art of Gesticulation', + 'description': 'Candor: The Art of Gesticulation', + 'thumbnail': 're:^https?://.*\.jpg', + 'uploader': 'Nowness', + } + }, + { + 'url': 'http://cn.nowness.com/day/2014/8/7/4069/kasper-bj-rke-ft-jaakko-eino-kalevi--tnr', + 'md5': 'e79cf125e387216f86b2e0a5b5c63aa3', + 'info_dict': { + 'id': '3716354522001', + 'ext': 'mp4', + 'title': 'Kasper Bjц╦rke ft. Jaakko Eino Kalevi: TNR', + 'description': 'Kasper Bjц╦rke ft. Jaakko Eino Kalevi: TNR', + 'thumbnail': 're:^https?://.*\.jpg', + 'uploader': 'Nowness', + } + }, + ] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('slug') + + webpage = self._download_webpage(url, video_id) + player_url = self._search_regex( + r'"([^"]+/content/issue-[0-9.]+.js)"', webpage, 'player URL') + real_id = self._search_regex( + r'\sdata-videoId="([0-9]+)"', webpage, 'internal video ID') + + player_code = self._download_webpage( + player_url, video_id, + note='Downloading player JavaScript', + errnote='Player download failed') + player_code = player_code.replace("'+d+'", real_id) + + bc_url = BrightcoveIE._extract_brightcove_url(player_code) + if bc_url is None: + raise ExtractorError('Could not find player definition') + return { + '_type': 'url', + 'url': bc_url, + 'ie_key': 'Brightcove', + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/nowvideo.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/nowvideo.py new file mode 100644 index 0000000000..dec09cdfef --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/nowvideo.py @@ -0,0 +1,28 @@ +from __future__ import unicode_literals + +from .novamov import NovaMovIE + + +class NowVideoIE(NovaMovIE): + IE_NAME = 'nowvideo' + IE_DESC = 'NowVideo' + + _VALID_URL = NovaMovIE._VALID_URL_TEMPLATE % {'host': 'nowvideo\.(?:ch|sx|eu|at|ag|co|li)'} + + _HOST = 'www.nowvideo.ch' + + _FILE_DELETED_REGEX = r'>This file no longer exists on our servers.<' + _FILEKEY_REGEX = r'var fkzd="([^"]+)";' + _TITLE_REGEX = r'<h4>([^<]+)</h4>' + _DESCRIPTION_REGEX = r'</h4>\s*<p>([^<]+)</p>' + + _TEST = { + 'url': 'http://www.nowvideo.ch/video/0mw0yow7b6dxa', + 'md5': 'f8fbbc8add72bd95b7850c6a02fc8817', + 'info_dict': { + 'id': '0mw0yow7b6dxa', + 'ext': 'flv', + 'title': 'youtubedl test video _BaW_jenozKc.mp4', + 'description': 'Description', + } + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/npo.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/npo.py new file mode 100644 index 0000000000..8da76ae45a --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/npo.py @@ -0,0 +1,181 @@ +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import ( + unified_strdate, + parse_duration, + qualities, + strip_jsonp, + url_basename, + fix_xml_ampersands, +) + + +class NPOIE(InfoExtractor): + IE_NAME = 'npo.nl' + _VALID_URL = r'https?://www\.npo\.nl/[^/]+/[^/]+/(?P<id>[^/?]+)' + + _TESTS = [ + { + 'url': 'http://www.npo.nl/nieuwsuur/22-06-2014/VPWON_1220719', + 'md5': '4b3f9c429157ec4775f2c9cb7b911016', + 'info_dict': { + 'id': 'VPWON_1220719', + 'ext': 'm4v', + 'title': 'Nieuwsuur', + 'description': 'Dagelijks tussen tien en elf: nieuws, sport en achtergronden.', + 'upload_date': '20140622', + }, + }, + { + 'url': 'http://www.npo.nl/de-mega-mike-mega-thomas-show/27-02-2009/VARA_101191800', + 'md5': 'da50a5787dbfc1603c4ad80f31c5120b', + 'info_dict': { + 'id': 'VARA_101191800', + 'ext': 'm4v', + 'title': 'De Mega Mike & Mega Thomas show', + 'description': 'md5:3b74c97fc9d6901d5a665aac0e5400f4', + 'upload_date': '20090227', + 'duration': 2400, + }, + }, + { + 'url': 'http://www.npo.nl/tegenlicht/25-02-2013/VPWON_1169289', + 'md5': 'f8065e4e5a7824068ed3c7e783178f2c', + 'info_dict': { + 'id': 'VPWON_1169289', + 'ext': 'm4v', + 'title': 'Tegenlicht', + 'description': 'md5:d6476bceb17a8c103c76c3b708f05dd1', + 'upload_date': '20130225', + 'duration': 3000, + }, + }, + { + 'url': 'http://www.npo.nl/de-nieuwe-mens-deel-1/21-07-2010/WO_VPRO_043706', + 'info_dict': { + 'id': 'WO_VPRO_043706', + 'ext': 'wmv', + 'title': 'De nieuwe mens - Deel 1', + 'description': 'md5:518ae51ba1293ffb80d8d8ce90b74e4b', + 'duration': 4680, + }, + 'params': { + # mplayer mms download + 'skip_download': True, + } + }, + ] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + return self._get_info(video_id) + + def _get_info(self, video_id): + metadata = self._download_json( + 'http://e.omroep.nl/metadata/aflevering/%s' % video_id, + video_id, + # We have to remove the javascript callback + transform_source=strip_jsonp, + ) + token_page = self._download_webpage( + 'http://ida.omroep.nl/npoplayer/i.js', + video_id, + note='Downloading token' + ) + token = self._search_regex(r'npoplayer\.token = "(.+?)"', token_page, 'token') + + formats = [] + + pubopties = metadata.get('pubopties') + if pubopties: + quality = qualities(['adaptive', 'wmv_sb', 'h264_sb', 'wmv_bb', 'h264_bb', 'wvc1_std', 'h264_std']) + for format_id in pubopties: + format_info = self._download_json( + 'http://ida.omroep.nl/odi/?prid=%s&puboptions=%s&adaptive=yes&token=%s' + % (video_id, format_id, token), + video_id, 'Downloading %s JSON' % format_id) + if format_info.get('error_code', 0) or format_info.get('errorcode', 0): + continue + streams = format_info.get('streams') + if streams: + video_info = self._download_json( + streams[0] + '&type=json', + video_id, 'Downloading %s stream JSON' % format_id) + else: + video_info = format_info + video_url = video_info.get('url') + if not video_url: + continue + if format_id == 'adaptive': + formats.extend(self._extract_m3u8_formats(video_url, video_id)) + else: + formats.append({ + 'url': video_url, + 'format_id': format_id, + 'quality': quality(format_id), + }) + + streams = metadata.get('streams') + if streams: + for i, stream in enumerate(streams): + stream_url = stream.get('url') + if not stream_url: + continue + asx = self._download_xml( + stream_url, video_id, + 'Downloading stream %d ASX playlist' % i, + transform_source=fix_xml_ampersands) + ref = asx.find('./ENTRY/Ref') + if ref is None: + continue + video_url = ref.get('href') + if not video_url: + continue + formats.append({ + 'url': video_url, + 'ext': stream.get('formaat', 'asf'), + 'quality': stream.get('kwaliteit'), + }) + + self._sort_formats(formats) + + return { + 'id': video_id, + 'title': metadata['titel'], + 'description': metadata['info'], + 'thumbnail': metadata.get('images', [{'url': None}])[-1]['url'], + 'upload_date': unified_strdate(metadata.get('gidsdatum')), + 'duration': parse_duration(metadata.get('tijdsduur')), + 'formats': formats, + } + + +class TegenlichtVproIE(NPOIE): + IE_NAME = 'tegenlicht.vpro.nl' + _VALID_URL = r'https?://tegenlicht\.vpro\.nl/afleveringen/.*?' + + _TESTS = [ + { + 'url': 'http://tegenlicht.vpro.nl/afleveringen/2012-2013/de-toekomst-komt-uit-afrika.html', + 'md5': 'f8065e4e5a7824068ed3c7e783178f2c', + 'info_dict': { + 'id': 'VPWON_1169289', + 'ext': 'm4v', + 'title': 'Tegenlicht', + 'description': 'md5:d6476bceb17a8c103c76c3b708f05dd1', + 'upload_date': '20130225', + }, + }, + ] + + def _real_extract(self, url): + name = url_basename(url) + webpage = self._download_webpage(url, name) + urn = self._html_search_meta('mediaurn', webpage) + info_page = self._download_json( + 'http://rs.vpro.nl/v2/api/media/%s.json' % urn, name) + return self._get_info(info_page['mid']) diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/nrk.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/nrk.py new file mode 100644 index 0000000000..96f0ae1ebd --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/nrk.py @@ -0,0 +1,144 @@ +# encoding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import ( + ExtractorError, + float_or_none, + unified_strdate, +) + + +class NRKIE(InfoExtractor): + _VALID_URL = r'http://(?:www\.)?nrk\.no/(?:video|lyd)/[^/]+/(?P<id>[\dA-F]{16})' + + _TESTS = [ + { + 'url': 'http://www.nrk.no/video/dompap_og_andre_fugler_i_piip_show/D0FA54B5C8B6CE59/emne/piipshow/', + 'md5': 'a6eac35052f3b242bb6bb7f43aed5886', + 'info_dict': { + 'id': '150533', + 'ext': 'flv', + 'title': 'Dompap og andre fugler i Piip-Show', + 'description': 'md5:d9261ba34c43b61c812cb6b0269a5c8f' + } + }, + { + 'url': 'http://www.nrk.no/lyd/lyd_av_oppleser_for_blinde/AEFDDD5473BA0198/', + 'md5': '3471f2a51718195164e88f46bf427668', + 'info_dict': { + 'id': '154915', + 'ext': 'flv', + 'title': 'Slik hц╦res internett ut nц╔r du er blind', + 'description': 'md5:a621f5cc1bd75c8d5104cb048c6b8568', + } + }, + ] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + + page = self._download_webpage(url, video_id) + + video_id = self._html_search_regex(r'<div class="nrk-video" data-nrk-id="(\d+)">', page, 'video id') + + data = self._download_json( + 'http://v7.psapi.nrk.no/mediaelement/%s' % video_id, video_id, 'Downloading media JSON') + + if data['usageRights']['isGeoBlocked']: + raise ExtractorError('NRK har ikke rettig-heter til ц╔ vise dette programmet utenfor Norge', expected=True) + + video_url = data['mediaUrl'] + '?hdcore=3.1.1&plugin=aasp-3.1.1.69.124' + + images = data.get('images') + if images: + thumbnails = images['webImages'] + thumbnails.sort(key=lambda image: image['pixelWidth']) + thumbnail = thumbnails[-1]['imageUrl'] + else: + thumbnail = None + + return { + 'id': video_id, + 'url': video_url, + 'ext': 'flv', + 'title': data['title'], + 'description': data['description'], + 'thumbnail': thumbnail, + } + + +class NRKTVIE(InfoExtractor): + _VALID_URL = r'http://tv\.nrk(?:super)?\.no/(?:serie/[^/]+|program)/(?P<id>[a-zA-Z]{4}\d{8})' + + _TESTS = [ + { + 'url': 'http://tv.nrk.no/serie/20-spoersmaal-tv/MUHH48000314/23-05-2014', + 'md5': '7b96112fbae1faf09a6f9ae1aff6cb84', + 'info_dict': { + 'id': 'MUHH48000314', + 'ext': 'flv', + 'title': '20 spц╦rsmц╔l', + 'description': 'md5:bdea103bc35494c143c6a9acdd84887a', + 'upload_date': '20140523', + 'duration': 1741.52, + } + }, + { + 'url': 'http://tv.nrk.no/program/mdfp15000514', + 'md5': 'af01795a31f1cf7265c8657534d8077b', + 'info_dict': { + 'id': 'mdfp15000514', + 'ext': 'flv', + 'title': 'Kunnskapskanalen: Grunnlovsjubilц╘et - Stor stц╔hei for ingenting', + 'description': 'md5:654c12511f035aed1e42bdf5db3b206a', + 'upload_date': '20140524', + 'duration': 4605.0, + } + }, + ] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + + page = self._download_webpage(url, video_id) + + title = self._html_search_meta('title', page, 'title') + description = self._html_search_meta('description', page, 'description') + thumbnail = self._html_search_regex(r'data-posterimage="([^"]+)"', page, 'thumbnail', fatal=False) + upload_date = unified_strdate(self._html_search_meta('rightsfrom', page, 'upload date', fatal=False)) + duration = float_or_none( + self._html_search_regex(r'data-duration="([^"]+)"', page, 'duration', fatal=False)) + + formats = [] + + f4m_url = re.search(r'data-media="([^"]+)"', page) + if f4m_url: + formats.append({ + 'url': f4m_url.group(1) + '?hdcore=3.1.1&plugin=aasp-3.1.1.69.124', + 'format_id': 'f4m', + 'ext': 'flv', + }) + + m3u8_url = re.search(r'data-hls-media="([^"]+)"', page) + if m3u8_url: + formats.append({ + 'url': m3u8_url.group(1), + 'format_id': 'm3u8', + }) + + self._sort_formats(formats) + + return { + 'id': video_id, + 'title': title, + 'description': description, + 'thumbnail': thumbnail, + 'upload_date': upload_date, + 'duration': duration, + 'formats': formats, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/ntv.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/ntv.py new file mode 100644 index 0000000000..ee740cd9c0 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/ntv.py @@ -0,0 +1,148 @@ +# encoding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import ( + unescapeHTML +) + + +class NTVIE(InfoExtractor): + _VALID_URL = r'http://(?:www\.)?ntv\.ru/(?P<id>.+)' + + _TESTS = [ + { + 'url': 'http://www.ntv.ru/novosti/863142/', + 'info_dict': { + 'id': '746000', + 'ext': 'flv', + 'title': 'п п╬п╪п╟п╫п╢я┐я▌я┴п╦п╧ п╖п╣я─п╫п╬п╪п╬я─я│п╨п╦п╪ я└п╩п╬я┌п╬п╪ п©я─п╬п╡п╣п╩ п©п╣я─п╣пЁп╬п╡п╬я─я▀ п╡ я┬я┌п╟п╠п╣ п▓п°п║ пёп╨я─п╟п╦п╫я▀', + 'description': 'п п╬п╪п╟п╫п╢я┐я▌я┴п╦п╧ п╖п╣я─п╫п╬п╪п╬я─я│п╨п╦п╪ я└п╩п╬я┌п╬п╪ п©я─п╬п╡п╣п╩ п©п╣я─п╣пЁп╬п╡п╬я─я▀ п╡ я┬я┌п╟п╠п╣ п▓п°п║ пёп╨я─п╟п╦п╫я▀', + 'duration': 136, + }, + 'params': { + # rtmp download + 'skip_download': True, + }, + }, + { + 'url': 'http://www.ntv.ru/video/novosti/750370/', + 'info_dict': { + 'id': '750370', + 'ext': 'flv', + 'title': 'п═п╬п╢п╫я▀п╣ п©п╟я│я│п╟п╤п╦я─п╬п╡ п©я─п╬п©п╟п╡я┬п╣пЁп╬ Boeing п╫п╣ п╡п╣я─я▐я┌ п╡ я┌я─п╟пЁп╦я┤п╣я│п╨п╦п╧ п╦я│я┘п╬п╢', + 'description': 'п═п╬п╢п╫я▀п╣ п©п╟я│я│п╟п╤п╦я─п╬п╡ п©я─п╬п©п╟п╡я┬п╣пЁп╬ Boeing п╫п╣ п╡п╣я─я▐я┌ п╡ я┌я─п╟пЁп╦я┤п╣я│п╨п╦п╧ п╦я│я┘п╬п╢', + 'duration': 172, + }, + 'params': { + # rtmp download + 'skip_download': True, + }, + }, + { + 'url': 'http://www.ntv.ru/peredacha/segodnya/m23700/o232416', + 'info_dict': { + 'id': '747480', + 'ext': 'flv', + 'title': 'б╚п║п╣пЁп╬п╢п╫я▐б╩. 21б═п╪п╟я─я┌п╟ 2014б═пЁп╬п╢п╟. 16:00 ', + 'description': 'б╚п║п╣пЁп╬п╢п╫я▐б╩. 21б═п╪п╟я─я┌п╟ 2014б═пЁп╬п╢п╟. 16:00 ', + 'duration': 1496, + }, + 'params': { + # rtmp download + 'skip_download': True, + }, + }, + { + 'url': 'http://www.ntv.ru/kino/Koma_film', + 'info_dict': { + 'id': '758100', + 'ext': 'flv', + 'title': 'п·я│я┌я─п╬я│я▌п╤п╣я┌п╫я▀п╧ я└п╦п╩я▄п╪ б╚п п╬п╪п╟б╩', + 'description': 'п·я│я┌я─п╬я│я▌п╤п╣я┌п╫я▀п╧ я└п╦п╩я▄п╪ б╚п п╬п╪п╟б╩', + 'duration': 5592, + }, + 'params': { + # rtmp download + 'skip_download': True, + }, + }, + { + 'url': 'http://www.ntv.ru/serial/Delo_vrachey/m31760/o233916/', + 'info_dict': { + 'id': '751482', + 'ext': 'flv', + 'title': 'б╚п■п╣п╩п╬ п╡я─п╟я┤п╣п╧б╩: б╚п■п╣я─п╣п╡я├п╣ п╤п╦п╥п╫п╦б╩', + 'description': 'б╚п■п╣п╩п╬ п╡я─п╟я┤п╣п╧б╩: б╚п■п╣я─п╣п╡я├п╣ п╤п╦п╥п╫п╦б╩', + 'duration': 2590, + }, + 'params': { + # rtmp download + 'skip_download': True, + }, + }, + ] + + _VIDEO_ID_REGEXES = [ + r'<meta property="og:url" content="http://www\.ntv\.ru/video/(\d+)', + r'<video embed=[^>]+><id>(\d+)</id>', + r'<video restriction[^>]+><key>(\d+)</key>', + ] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + + page = self._download_webpage(url, video_id) + + video_id = self._html_search_regex(self._VIDEO_ID_REGEXES, page, 'video id') + + player = self._download_xml('http://www.ntv.ru/vi%s/' % video_id, video_id, 'Downloading video XML') + title = unescapeHTML(player.find('./data/title').text) + description = unescapeHTML(player.find('./data/description').text) + + video = player.find('./data/video') + video_id = video.find('./id').text + thumbnail = video.find('./splash').text + duration = int(video.find('./totaltime').text) + view_count = int(video.find('./views').text) + puid22 = video.find('./puid22').text + + apps = { + '4': 'video1', + '7': 'video2', + } + + app = apps.get(puid22, apps['4']) + + formats = [] + for format_id in ['', 'hi', 'webm']: + file = video.find('./%sfile' % format_id) + if file is None: + continue + size = video.find('./%ssize' % format_id) + formats.append({ + 'url': 'rtmp://media.ntv.ru/%s' % app, + 'app': app, + 'play_path': file.text, + 'rtmp_conn': 'B:1', + 'player_url': 'http://www.ntv.ru/swf/vps1.swf?update=20131128', + 'page_url': 'http://www.ntv.ru', + 'flash_version': 'LNX 11,2,202,341', + 'rtmp_live': True, + 'ext': 'flv', + 'filesize': int(size.text), + }) + self._sort_formats(formats) + + return { + 'id': video_id, + 'title': title, + 'description': description, + 'thumbnail': thumbnail, + 'duration': duration, + 'view_count': view_count, + 'formats': formats, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/nuvid.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/nuvid.py new file mode 100644 index 0000000000..57928f2aed --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/nuvid.py @@ -0,0 +1,75 @@ +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..compat import ( + compat_urllib_request, +) +from ..utils import ( + parse_duration, + unified_strdate, +) + + +class NuvidIE(InfoExtractor): + _VALID_URL = r'https?://(?:www|m)\.nuvid\.com/video/(?P<id>[0-9]+)' + _TEST = { + 'url': 'http://m.nuvid.com/video/1310741/', + 'md5': 'eab207b7ac4fccfb4e23c86201f11277', + 'info_dict': { + 'id': '1310741', + 'ext': 'mp4', + 'title': 'Horny babes show their awesome bodeis and', + 'duration': 129, + 'upload_date': '20140508', + 'age_limit': 18, + } + } + + def _real_extract(self, url): + video_id = self._match_id(url) + + formats = [] + + for dwnld_speed, format_id in [(0, '3gp'), (5, 'mp4')]: + request = compat_urllib_request.Request( + 'http://m.nuvid.com/play/%s' % video_id) + request.add_header('Cookie', 'skip_download_page=1; dwnld_speed=%d; adv_show=1' % dwnld_speed) + webpage = self._download_webpage( + request, video_id, 'Downloading %s page' % format_id) + video_url = self._html_search_regex( + r'<a\s+href="([^"]+)"\s+class="b_link">', webpage, '%s video URL' % format_id, fatal=False) + if not video_url: + continue + formats.append({ + 'url': video_url, + 'format_id': format_id, + }) + + webpage = self._download_webpage( + 'http://m.nuvid.com/video/%s' % video_id, video_id, 'Downloading video page') + title = self._html_search_regex( + [r'<span title="([^"]+)">', + r'<div class="thumb-holder video">\s*<h5[^>]*>([^<]+)</h5>'], webpage, 'title').strip() + thumbnails = [ + { + 'url': thumb_url, + } for thumb_url in re.findall(r'<img src="([^"]+)" alt="" />', webpage) + ] + thumbnail = thumbnails[0]['url'] if thumbnails else None + duration = parse_duration(self._html_search_regex( + r'<i class="fa fa-clock-o"></i>\s*(\d{2}:\d{2})', webpage, 'duration', fatal=False)) + upload_date = unified_strdate(self._html_search_regex( + r'<i class="fa fa-user"></i>\s*(\d{4}-\d{2}-\d{2})', webpage, 'upload date', fatal=False)) + + return { + 'id': video_id, + 'title': title, + 'thumbnails': thumbnails, + 'thumbnail': thumbnail, + 'duration': duration, + 'upload_date': upload_date, + 'age_limit': 18, + 'formats': formats, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/nytimes.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/nytimes.py new file mode 100644 index 0000000000..56e1cad3b0 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/nytimes.py @@ -0,0 +1,77 @@ +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import parse_iso8601 + + +class NYTimesIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?nytimes\.com/video/(?:[^/]+/)+(?P<id>\d+)' + + _TEST = { + 'url': 'http://www.nytimes.com/video/opinion/100000002847155/verbatim-what-is-a-photocopier.html?playlistId=100000001150263', + 'md5': '18a525a510f942ada2720db5f31644c0', + 'info_dict': { + 'id': '100000002847155', + 'ext': 'mov', + 'title': 'Verbatim: What Is a Photocopier?', + 'description': 'md5:93603dada88ddbda9395632fdc5da260', + 'timestamp': 1398631707, + 'upload_date': '20140427', + 'uploader': 'Brett Weiner', + 'duration': 419, + } + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + + video_data = self._download_json( + 'http://www.nytimes.com/svc/video/api/v2/video/%s' % video_id, video_id, 'Downloading video JSON') + + title = video_data['headline'] + description = video_data['summary'] + duration = video_data['duration'] / 1000.0 + + uploader = video_data['byline'] + timestamp = parse_iso8601(video_data['publication_date'][:-8]) + + def get_file_size(file_size): + if isinstance(file_size, int): + return file_size + elif isinstance(file_size, dict): + return int(file_size.get('value', 0)) + else: + return 0 + + formats = [ + { + 'url': video['url'], + 'format_id': video['type'], + 'vcodec': video['video_codec'], + 'width': video['width'], + 'height': video['height'], + 'filesize': get_file_size(video['fileSize']), + } for video in video_data['renditions'] + ] + self._sort_formats(formats) + + thumbnails = [ + { + 'url': 'http://www.nytimes.com/%s' % image['url'], + 'resolution': '%dx%d' % (image['width'], image['height']), + } for image in video_data['images'] + ] + + return { + 'id': video_id, + 'title': title, + 'description': description, + 'timestamp': timestamp, + 'uploader': uploader, + 'duration': duration, + 'formats': formats, + 'thumbnails': thumbnails, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/oe1.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/oe1.py new file mode 100644 index 0000000000..38971ab4de --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/oe1.py @@ -0,0 +1,40 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import calendar +import datetime +import re + +from .common import InfoExtractor + +# audios on oe1.orf.at are only available for 7 days, so we can't +# add tests. + + +class OE1IE(InfoExtractor): + IE_DESC = 'oe1.orf.at' + _VALID_URL = r'http://oe1\.orf\.at/programm/(?P<id>[0-9]+)' + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + show_id = mobj.group('id') + + data = self._download_json( + 'http://oe1.orf.at/programm/%s/konsole' % show_id, + show_id + ) + + timestamp = datetime.datetime.strptime('%s %s' % ( + data['item']['day_label'], + data['item']['time'] + ), '%d.%m.%Y %H:%M') + unix_timestamp = calendar.timegm(timestamp.utctimetuple()) + + return { + 'id': show_id, + 'title': data['item']['title'], + 'url': data['item']['url_stream'], + 'ext': 'mp3', + 'description': data['item'].get('info'), + 'timestamp': unix_timestamp + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/oktoberfesttv.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/oktoberfesttv.py new file mode 100644 index 0000000000..4a41c05421 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/oktoberfesttv.py @@ -0,0 +1,47 @@ +# encoding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor + + +class OktoberfestTVIE(InfoExtractor): + _VALID_URL = r'https?://www\.oktoberfest-tv\.de/[^/]+/[^/]+/video/(?P<id>[^/?#]+)' + + _TEST = { + 'url': 'http://www.oktoberfest-tv.de/de/kameras/video/hb-zelt', + 'info_dict': { + 'id': 'hb-zelt', + 'ext': 'mp4', + 'title': 're:^Live-Kamera: Hofbrц╓uzelt [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', + 'thumbnail': 're:^https?://.*\.jpg$', + 'is_live': True, + }, + 'params': { + 'skip_download': True, + } + } + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + + title = self._live_title(self._html_search_regex( + r'<h1><strong>.*?</strong>(.*?)</h1>', webpage, 'title')) + + clip = self._search_regex( + r"clip:\s*\{\s*url:\s*'([^']+)'", webpage, 'clip') + ncurl = self._search_regex( + r"netConnectionUrl:\s*'([^']+)'", webpage, 'rtmp base') + video_url = ncurl + clip + thumbnail = self._search_regex( + r"canvas:\s*\{\s*backgroundImage:\s*'url\(([^)]+)\)'", webpage, + 'thumbnail', fatal=False) + + return { + 'id': video_id, + 'title': title, + 'url': video_url, + 'ext': 'mp4', + 'is_live': True, + 'thumbnail': thumbnail, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/ooyala.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/ooyala.py new file mode 100644 index 0000000000..d5b05c18fe --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/ooyala.py @@ -0,0 +1,97 @@ +from __future__ import unicode_literals +import re +import json + +from .common import InfoExtractor +from ..utils import ( + unescapeHTML, + ExtractorError, +) + + +class OoyalaIE(InfoExtractor): + _VALID_URL = r'(?:ooyala:|https?://.+?\.ooyala\.com/.*?(?:embedCode|ec)=)(?P<id>.+?)(&|$)' + + _TESTS = [ + { + # From http://it.slashdot.org/story/13/04/25/178216/recovering-data-from-broken-hard-drives-and-ssds-video + 'url': 'http://player.ooyala.com/player.js?embedCode=pxczE2YjpfHfn1f3M-ykG_AmJRRn0PD8', + 'info_dict': { + 'id': 'pxczE2YjpfHfn1f3M-ykG_AmJRRn0PD8', + 'ext': 'mp4', + 'title': 'Explaining Data Recovery from Hard Drives and SSDs', + 'description': 'How badly damaged does a drive have to be to defeat Russell and his crew? Apparently, smashed to bits.', + }, + }, { + # Only available for ipad + 'url': 'http://player.ooyala.com/player.js?embedCode=x1b3lqZDq9y_7kMyC2Op5qo-p077tXD0', + 'info_dict': { + 'id': 'x1b3lqZDq9y_7kMyC2Op5qo-p077tXD0', + 'ext': 'mp4', + 'title': 'Simulation Overview - Levels of Simulation', + 'description': '', + }, + }, + ] + + @staticmethod + def _url_for_embed_code(embed_code): + return 'http://player.ooyala.com/player.js?embedCode=%s' % embed_code + + @classmethod + def _build_url_result(cls, embed_code): + return cls.url_result(cls._url_for_embed_code(embed_code), + ie=cls.ie_key()) + + def _extract_result(self, info, more_info): + return { + 'id': info['embedCode'], + 'ext': 'mp4', + 'title': unescapeHTML(info['title']), + 'url': info.get('ipad_url') or info['url'], + 'description': unescapeHTML(more_info['description']), + 'thumbnail': more_info['promo'], + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + embedCode = mobj.group('id') + player_url = 'http://player.ooyala.com/player.js?embedCode=%s' % embedCode + player = self._download_webpage(player_url, embedCode) + mobile_url = self._search_regex(r'mobile_player_url="(.+?)&device="', + player, 'mobile player url') + # Looks like some videos are only available for particular devices + # (e.g. http://player.ooyala.com/player.js?embedCode=x1b3lqZDq9y_7kMyC2Op5qo-p077tXD0 + # is only available for ipad) + # Working around with fetching URLs for all the devices found starting with 'unknown' + # until we succeed or eventually fail for each device. + devices = re.findall(r'device\s*=\s*"([^"]+)";', player) + devices.remove('unknown') + devices.insert(0, 'unknown') + for device in devices: + mobile_player = self._download_webpage( + '%s&device=%s' % (mobile_url, device), embedCode, + 'Downloading mobile player JS for %s device' % device) + videos_info = self._search_regex( + r'var streams=window.oo_testEnv\?\[\]:eval\("\((\[{.*?}\])\)"\);', + mobile_player, 'info', fatal=False, default=None) + if videos_info: + break + if not videos_info: + raise ExtractorError('Unable to extract info') + videos_info = videos_info.replace('\\"', '"') + videos_more_info = self._search_regex( + r'eval\("\(({.*?\\"promo\\".*?})\)"', mobile_player, 'more info').replace('\\"', '"') + videos_info = json.loads(videos_info) + videos_more_info = json.loads(videos_more_info) + + if videos_more_info.get('lineup'): + videos = [self._extract_result(info, more_info) for (info, more_info) in zip(videos_info, videos_more_info['lineup'])] + return { + '_type': 'playlist', + 'id': embedCode, + 'title': unescapeHTML(videos_more_info['title']), + 'entries': videos, + } + else: + return self._extract_result(videos_info[0], videos_more_info) diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/orf.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/orf.py new file mode 100644 index 0000000000..4fed83bd61 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/orf.py @@ -0,0 +1,196 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import json +import re +import calendar +import datetime + +from .common import InfoExtractor +from ..utils import ( + HEADRequest, + unified_strdate, + ExtractorError, +) + + +class ORFTVthekIE(InfoExtractor): + IE_NAME = 'orf:tvthek' + IE_DESC = 'ORF TVthek' + _VALID_URL = r'https?://tvthek\.orf\.at/(?:programs/.+?/episodes|topics?/.+?|program/[^/]+)/(?P<id>\d+)' + + _TESTS = [{ + 'url': 'http://tvthek.orf.at/program/Aufgetischt/2745173/Aufgetischt-Mit-der-Steirischen-Tafelrunde/8891389', + 'playlist': [{ + 'md5': '2942210346ed779588f428a92db88712', + 'info_dict': { + 'id': '8896777', + 'ext': 'mp4', + 'title': 'Aufgetischt: Mit der Steirischen Tafelrunde', + 'description': 'md5:c1272f0245537812d4e36419c207b67d', + 'duration': 2668, + 'upload_date': '20141208', + }, + }], + 'skip': 'Blocked outside of Austria / Germany', + }, { + 'url': 'http://tvthek.orf.at/topic/Im-Wandel-der-Zeit/8002126/Best-of-Ingrid-Thurnher/7982256', + 'playlist': [{ + 'md5': '68f543909aea49d621dfc7703a11cfaf', + 'info_dict': { + 'id': '7982259', + 'ext': 'mp4', + 'title': 'Best of Ingrid Thurnher', + 'upload_date': '20140527', + 'description': 'Viele Jahre war Ingrid Thurnher das "Gesicht" der ZIB 2. Vor ihrem Wechsel zur ZIB 2 im jahr 1995 moderierte sie unter anderem "Land und Leute", "ц√sterreich-Bild" und "Niederц╤sterreich heute".', + } + }], + '_skip': 'Blocked outside of Austria / Germany', + }] + + def _real_extract(self, url): + playlist_id = self._match_id(url) + webpage = self._download_webpage(url, playlist_id) + + data_json = self._search_regex( + r'initializeAdworx\((.+?)\);\n', webpage, 'video info') + all_data = json.loads(data_json) + + def get_segments(all_data): + for data in all_data: + if data['name'] in ( + 'Tracker::EPISODE_DETAIL_PAGE_OVER_PROGRAM', + 'Tracker::EPISODE_DETAIL_PAGE_OVER_TOPIC'): + return data['values']['segments'] + + sdata = get_segments(all_data) + if not sdata: + raise ExtractorError('Unable to extract segments') + + def quality_to_int(s): + m = re.search('([0-9]+)', s) + if m is None: + return -1 + return int(m.group(1)) + + entries = [] + for sd in sdata: + video_id = sd['id'] + formats = [{ + 'preference': -10 if fd['delivery'] == 'hls' else None, + 'format_id': '%s-%s-%s' % ( + fd['delivery'], fd['quality'], fd['quality_string']), + 'url': fd['src'], + 'protocol': fd['protocol'], + 'quality': quality_to_int(fd['quality']), + } for fd in sd['playlist_item_array']['sources']] + + # Check for geoblocking. + # There is a property is_geoprotection, but that's always false + geo_str = sd.get('geoprotection_string') + if geo_str: + try: + http_url = next( + f['url'] + for f in formats + if re.match(r'^https?://.*\.mp4$', f['url'])) + except StopIteration: + pass + else: + req = HEADRequest(http_url) + self._request_webpage( + req, video_id, + note='Testing for geoblocking', + errnote=(( + 'This video seems to be blocked outside of %s. ' + 'You may want to try the streaming-* formats.') + % geo_str), + fatal=False) + + self._sort_formats(formats) + + upload_date = unified_strdate(sd['created_date']) + entries.append({ + '_type': 'video', + 'id': video_id, + 'title': sd['header'], + 'formats': formats, + 'description': sd.get('description'), + 'duration': int(sd['duration_in_seconds']), + 'upload_date': upload_date, + 'thumbnail': sd.get('image_full_url'), + }) + + return { + '_type': 'playlist', + 'entries': entries, + 'id': playlist_id, + } + + +# Audios on ORF radio are only available for 7 days, so we can't add tests. + + +class ORFOE1IE(InfoExtractor): + IE_NAME = 'orf:oe1' + IE_DESC = 'Radio ц√sterreich 1' + _VALID_URL = r'http://oe1\.orf\.at/programm/(?P<id>[0-9]+)' + + def _real_extract(self, url): + show_id = self._match_id(url) + data = self._download_json( + 'http://oe1.orf.at/programm/%s/konsole' % show_id, + show_id + ) + + timestamp = datetime.datetime.strptime('%s %s' % ( + data['item']['day_label'], + data['item']['time'] + ), '%d.%m.%Y %H:%M') + unix_timestamp = calendar.timegm(timestamp.utctimetuple()) + + return { + 'id': show_id, + 'title': data['item']['title'], + 'url': data['item']['url_stream'], + 'ext': 'mp3', + 'description': data['item'].get('info'), + 'timestamp': unix_timestamp + } + + +class ORFFM4IE(InfoExtractor): + IE_DESC = 'orf:fm4' + IE_DESC = 'radio FM4' + _VALID_URL = r'http://fm4\.orf\.at/7tage/?#(?P<date>[0-9]+)/(?P<show>\w+)' + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + show_date = mobj.group('date') + show_id = mobj.group('show') + + data = self._download_json( + 'http://audioapi.orf.at/fm4/json/2.0/broadcasts/%s/4%s' % (show_date, show_id), + show_id + ) + + def extract_entry_dict(info, title, subtitle): + return { + 'id': info['loopStreamId'].replace('.mp3', ''), + 'url': 'http://loopstream01.apa.at/?channel=fm4&id=%s' % info['loopStreamId'], + 'title': title, + 'description': subtitle, + 'duration': (info['end'] - info['start']) / 1000, + 'timestamp': info['start'] / 1000, + 'ext': 'mp3' + } + + entries = [extract_entry_dict(t, data['title'], data['subtitle']) for t in data['streams']] + + return { + '_type': 'playlist', + 'id': show_id, + 'title': data['title'], + 'description': data['subtitle'], + 'entries': entries + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/parliamentliveuk.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/parliamentliveuk.py new file mode 100644 index 0000000000..0a423a08f0 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/parliamentliveuk.py @@ -0,0 +1,53 @@ +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor + + +class ParliamentLiveUKIE(InfoExtractor): + IE_NAME = 'parliamentlive.tv' + IE_DESC = 'UK parliament videos' + _VALID_URL = r'https?://www\.parliamentlive\.tv/Main/Player\.aspx\?(?:[^&]+&)*?meetingId=(?P<id>[0-9]+)' + + _TEST = { + 'url': 'http://www.parliamentlive.tv/Main/Player.aspx?meetingId=15121&player=windowsmedia', + 'info_dict': { + 'id': '15121', + 'ext': 'asf', + 'title': 'hoc home affairs committee, 18 mar 2014.pm', + 'description': 'md5:033b3acdf83304cd43946b2d5e5798d1', + }, + 'params': { + 'skip_download': True, # Requires mplayer (mms) + } + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + webpage = self._download_webpage(url, video_id) + + asx_url = self._html_search_regex( + r'embed.*?src="([^"]+)" name="MediaPlayer"', webpage, + 'metadata URL') + asx = self._download_xml(asx_url, video_id, 'Downloading ASX metadata') + video_url = asx.find('.//REF').attrib['HREF'] + + title = self._search_regex( + r'''(?x)player\.setClipDetails\( + (?:(?:[0-9]+|"[^"]+"),\s*){2} + "([^"]+",\s*"[^"]+)" + ''', + webpage, 'title').replace('", "', ', ') + description = self._html_search_regex( + r'(?s)<span id="MainContentPlaceHolder_CaptionsBlock_WitnessInfo">(.*?)</span>', + webpage, 'description') + + return { + 'id': video_id, + 'ext': 'asf', + 'url': video_url, + 'title': title, + 'description': description, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/patreon.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/patreon.py new file mode 100644 index 0000000000..5429592a75 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/patreon.py @@ -0,0 +1,100 @@ +# encoding: utf-8 +from __future__ import unicode_literals + +import json +import re + +from .common import InfoExtractor +from ..utils import ( + js_to_json, +) + + +class PatreonIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?patreon\.com/creation\?hid=(.+)' + _TESTS = [ + { + 'url': 'http://www.patreon.com/creation?hid=743933', + 'md5': 'e25505eec1053a6e6813b8ed369875cc', + 'info_dict': { + 'id': '743933', + 'ext': 'mp3', + 'title': 'Episode 166: David Smalley of Dogma Debate', + 'uploader': 'Cognitive Dissonance Podcast', + 'thumbnail': 're:^https?://.*$', + }, + }, + { + 'url': 'http://www.patreon.com/creation?hid=754133', + 'md5': '3eb09345bf44bf60451b8b0b81759d0a', + 'info_dict': { + 'id': '754133', + 'ext': 'mp3', + 'title': 'CD 167 Extra', + 'uploader': 'Cognitive Dissonance Podcast', + 'thumbnail': 're:^https?://.*$', + }, + }, + ] + + # Currently Patreon exposes download URL via hidden CSS, so login is not + # needed. Keeping this commented for when this inevitably changes. + ''' + def _login(self): + (username, password) = self._get_login_info() + if username is None: + return + + login_form = { + 'redirectUrl': 'http://www.patreon.com/', + 'email': username, + 'password': password, + } + + request = compat_urllib_request.Request( + 'https://www.patreon.com/processLogin', + compat_urllib_parse.urlencode(login_form).encode('utf-8') + ) + login_page = self._download_webpage(request, None, note='Logging in as %s' % username) + + if re.search(r'onLoginFailed', login_page): + raise ExtractorError('Unable to login, incorrect username and/or password', expected=True) + + def _real_initialize(self): + self._login() + ''' + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group(1) + + webpage = self._download_webpage(url, video_id) + title = self._og_search_title(webpage).strip() + + attach_fn = self._html_search_regex( + r'<div class="attach"><a target="_blank" href="([^"]+)">', + webpage, 'attachment URL', default=None) + if attach_fn is not None: + video_url = 'http://www.patreon.com' + attach_fn + thumbnail = self._og_search_thumbnail(webpage) + uploader = self._html_search_regex( + r'<strong>(.*?)</strong> is creating', webpage, 'uploader') + else: + playlist_js = self._search_regex( + r'(?s)new\s+jPlayerPlaylist\(\s*\{\s*[^}]*},\s*(\[.*?,?\s*\])', + webpage, 'playlist JSON') + playlist_json = js_to_json(playlist_js) + playlist = json.loads(playlist_json) + data = playlist[0] + video_url = self._proto_relative_url(data['mp3']) + thumbnail = self._proto_relative_url(data.get('cover')) + uploader = data.get('artist') + + return { + 'id': video_id, + 'url': video_url, + 'ext': 'mp3', + 'title': title, + 'uploader': uploader, + 'thumbnail': thumbnail, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/pbs.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/pbs.py new file mode 100644 index 0000000000..6118ed5c20 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/pbs.py @@ -0,0 +1,170 @@ +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import ( + unified_strdate, + US_RATINGS, +) + + +class PBSIE(InfoExtractor): + _VALID_URL = r'''(?x)https?:// + (?: + # Direct video URL + video\.pbs\.org/(?:viralplayer|video)/(?P<id>[0-9]+)/? | + # Article with embedded player (or direct video) + (?:www\.)?pbs\.org/(?:[^/]+/){2,5}(?P<presumptive_id>[^/]+?)(?:\.html)?/?(?:$|[?\#]) | + # Player + video\.pbs\.org/(?:widget/)?partnerplayer/(?P<player_id>[^/]+)/ + ) + ''' + + _TESTS = [ + { + 'url': 'http://www.pbs.org/tpt/constitution-usa-peter-sagal/watch/a-more-perfect-union/', + 'md5': 'ce1888486f0908d555a8093cac9a7362', + 'info_dict': { + 'id': '2365006249', + 'ext': 'mp4', + 'title': 'A More Perfect Union', + 'description': 'md5:ba0c207295339c8d6eced00b7c363c6a', + 'duration': 3190, + }, + }, + { + 'url': 'http://www.pbs.org/wgbh/pages/frontline/losing-iraq/', + 'md5': '143c98aa54a346738a3d78f54c925321', + 'info_dict': { + 'id': '2365297690', + 'ext': 'mp4', + 'title': 'Losing Iraq', + 'description': 'md5:f5bfbefadf421e8bb8647602011caf8e', + 'duration': 5050, + }, + }, + { + 'url': 'http://www.pbs.org/newshour/bb/education-jan-june12-cyberschools_02-23/', + 'md5': 'b19856d7f5351b17a5ab1dc6a64be633', + 'info_dict': { + 'id': '2201174722', + 'ext': 'mp4', + 'title': 'Cyber Schools Gain Popularity, but Quality Questions Persist', + 'description': 'md5:5871c15cba347c1b3d28ac47a73c7c28', + 'duration': 801, + }, + }, + { + 'url': 'http://www.pbs.org/wnet/gperf/dudamel-conducts-verdi-requiem-hollywood-bowl-full-episode/3374/', + 'md5': 'c62859342be2a0358d6c9eb306595978', + 'info_dict': { + 'id': '2365297708', + 'ext': 'mp4', + 'description': 'md5:68d87ef760660eb564455eb30ca464fe', + 'title': 'Dudamel Conducts Verdi Requiem at the Hollywood Bowl - Full', + 'duration': 6559, + 'thumbnail': 're:^https?://.*\.jpg$', + } + }, + { + 'url': 'http://www.pbs.org/wgbh/nova/earth/killer-typhoon.html', + 'md5': '908f3e5473a693b266b84e25e1cf9703', + 'info_dict': { + 'id': '2365160389', + 'display_id': 'killer-typhoon', + 'ext': 'mp4', + 'description': 'md5:c741d14e979fc53228c575894094f157', + 'title': 'Killer Typhoon', + 'duration': 3172, + 'thumbnail': 're:^https?://.*\.jpg$', + 'upload_date': '20140122', + } + }, + { + 'url': 'http://www.pbs.org/wgbh/pages/frontline/united-states-of-secrets/', + 'info_dict': { + 'id': 'united-states-of-secrets', + }, + 'playlist_count': 2, + } + ] + + def _extract_webpage(self, url): + mobj = re.match(self._VALID_URL, url) + + presumptive_id = mobj.group('presumptive_id') + display_id = presumptive_id + if presumptive_id: + webpage = self._download_webpage(url, display_id) + + upload_date = unified_strdate(self._search_regex( + r'<input type="hidden" id="air_date_[0-9]+" value="([^"]+)"', + webpage, 'upload date', default=None)) + + # tabbed frontline videos + tabbed_videos = re.findall( + r'<div[^>]+class="videotab[^"]*"[^>]+vid="(\d+)"', webpage) + if tabbed_videos: + return tabbed_videos, presumptive_id, upload_date + + MEDIA_ID_REGEXES = [ + r"div\s*:\s*'videoembed'\s*,\s*mediaid\s*:\s*'(\d+)'", # frontline video embed + r'class="coveplayerid">([^<]+)<', # coveplayer + r'<input type="hidden" id="pbs_video_id_[0-9]+" value="([0-9]+)"/>', # jwplayer + ] + + media_id = self._search_regex( + MEDIA_ID_REGEXES, webpage, 'media ID', fatal=False, default=None) + if media_id: + return media_id, presumptive_id, upload_date + + url = self._search_regex( + r'<iframe\s+(?:class|id)=["\']partnerPlayer["\'].*?\s+src=["\'](.*?)["\']>', + webpage, 'player URL') + mobj = re.match(self._VALID_URL, url) + + player_id = mobj.group('player_id') + if not display_id: + display_id = player_id + if player_id: + player_page = self._download_webpage( + url, display_id, note='Downloading player page', + errnote='Could not download player page') + video_id = self._search_regex( + r'<div\s+id="video_([0-9]+)"', player_page, 'video ID') + else: + video_id = mobj.group('id') + display_id = video_id + + return video_id, display_id, None + + def _real_extract(self, url): + video_id, display_id, upload_date = self._extract_webpage(url) + + if isinstance(video_id, list): + entries = [self.url_result( + 'http://video.pbs.org/video/%s' % vid_id, 'PBS', vid_id) + for vid_id in video_id] + return self.playlist_result(entries, display_id) + + info_url = 'http://video.pbs.org/videoInfo/%s?format=json' % video_id + info = self._download_json(info_url, display_id) + + rating_str = info.get('rating') + if rating_str is not None: + rating_str = rating_str.rpartition('-')[2] + age_limit = US_RATINGS.get(rating_str) + + return { + 'id': video_id, + 'display_id': display_id, + 'title': info['title'], + 'url': info['alternate_encoding']['url'], + 'ext': 'mp4', + 'description': info['program'].get('description'), + 'thumbnail': info.get('image_url'), + 'duration': info.get('duration'), + 'age_limit': age_limit, + 'upload_date': upload_date, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/phoenix.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/phoenix.py new file mode 100644 index 0000000000..a20672c0cc --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/phoenix.py @@ -0,0 +1,31 @@ +from __future__ import unicode_literals + +from .common import InfoExtractor +from .zdf import extract_from_xml_url + + +class PhoenixIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?phoenix\.de/content/(?P<id>[0-9]+)' + _TEST = { + 'url': 'http://www.phoenix.de/content/884301', + 'md5': 'ed249f045256150c92e72dbb70eadec6', + 'info_dict': { + 'id': '884301', + 'ext': 'mp4', + 'title': 'Michael Krons mit Hans-Werner Sinn', + 'description': 'Im Dialog - Sa. 25.10.14, 00.00 - 00.35 Uhr', + 'upload_date': '20141025', + 'uploader': 'Im Dialog', + } + } + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + + internal_id = self._search_regex( + r'<div class="phx_vod" id="phx_vod_([0-9]+)"', + webpage, 'internal video ID') + + api_url = 'http://www.phoenix.de/php/zdfplayer-v1.3/data/beitragsDetails.php?ak=web&id=%s' % internal_id + return extract_from_xml_url(self, video_id, api_url) diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/photobucket.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/photobucket.py new file mode 100644 index 0000000000..c66db3cdc8 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/photobucket.py @@ -0,0 +1,46 @@ +from __future__ import unicode_literals + +import json +import re + +from .common import InfoExtractor +from ..compat import compat_urllib_parse + + +class PhotobucketIE(InfoExtractor): + _VALID_URL = r'http://(?:[a-z0-9]+\.)?photobucket\.com/.*(([\?\&]current=)|_)(?P<id>.*)\.(?P<ext>(flv)|(mp4))' + _TEST = { + 'url': 'http://media.photobucket.com/user/rachaneronas/media/TiredofLinkBuildingTryBacklinkMyDomaincom_zpsc0c3b9fa.mp4.html?filters[term]=search&filters[primary]=videos&filters[secondary]=images&sort=1&o=0', + 'md5': '7dabfb92b0a31f6c16cebc0f8e60ff99', + 'info_dict': { + 'id': 'zpsc0c3b9fa', + 'ext': 'mp4', + 'timestamp': 1367669341, + 'upload_date': '20130504', + 'uploader': 'rachaneronas', + 'title': 'Tired of Link Building? Try BacklinkMyDomain.com!', + } + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + video_extension = mobj.group('ext') + + webpage = self._download_webpage(url, video_id) + + # Extract URL, uploader, and title from webpage + self.report_extraction(video_id) + info_json = self._search_regex(r'Pb\.Data\.Shared\.put\(Pb\.Data\.Shared\.MEDIA, (.*?)\);', + webpage, 'info json') + info = json.loads(info_json) + url = compat_urllib_parse.unquote(self._html_search_regex(r'file=(.+\.mp4)', info['linkcodes']['html'], 'url')) + return { + 'id': video_id, + 'url': url, + 'uploader': info['username'], + 'timestamp': info['creationDate'], + 'title': info['title'], + 'ext': video_extension, + 'thumbnail': info['thumbUrl'], + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/planetaplay.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/planetaplay.py new file mode 100644 index 0000000000..596c621d75 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/planetaplay.py @@ -0,0 +1,60 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import ExtractorError + + +class PlanetaPlayIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?planetaplay\.com/\?sng=(?P<id>[0-9]+)' + _API_URL = 'http://planetaplay.com/action/playlist/?sng={0:}' + _THUMBNAIL_URL = 'http://planetaplay.com/img/thumb/{thumb:}' + _TEST = { + 'url': 'http://planetaplay.com/?sng=3586', + 'md5': '9d569dceb7251a4e01355d5aea60f9db', + 'info_dict': { + 'id': '3586', + 'ext': 'flv', + 'title': 'md5:e829428ee28b1deed00de90de49d1da1', + } + } + + _SONG_FORMATS = { + 'lq': (0, 'http://www.planetaplay.com/videoplayback/{med_hash:}'), + 'hq': (1, 'http://www.planetaplay.com/videoplayback/hi/{med_hash:}'), + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + + response = self._download_json( + self._API_URL.format(video_id), video_id)['response'] + try: + data = response.get('data')[0] + except IndexError: + raise ExtractorError( + '%s: failed to get the playlist' % self.IE_NAME, expected=True) + + title = '{song_artists:} - {sng_name:}'.format(**data) + thumbnail = self._THUMBNAIL_URL.format(**data) + + formats = [] + for format_id, (quality, url_template) in self._SONG_FORMATS.items(): + formats.append({ + 'format_id': format_id, + 'url': url_template.format(**data), + 'quality': quality, + 'ext': 'flv', + }) + + self._sort_formats(formats) + + return { + 'id': video_id, + 'title': title, + 'formats': formats, + 'thumbnail': thumbnail, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/played.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/played.py new file mode 100644 index 0000000000..449d4836c3 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/played.py @@ -0,0 +1,63 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re +import os.path + +from .common import InfoExtractor +from ..compat import ( + compat_urllib_parse, + compat_urllib_request, +) +from ..utils import ( + ExtractorError, +) + + +class PlayedIE(InfoExtractor): + IE_NAME = 'played.to' + _VALID_URL = r'https?://(?:www\.)?played\.to/(?P<id>[a-zA-Z0-9_-]+)' + + _TEST = { + 'url': 'http://played.to/j2f2sfiiukgt', + 'md5': 'c2bd75a368e82980e7257bf500c00637', + 'info_dict': { + 'id': 'j2f2sfiiukgt', + 'ext': 'flv', + 'title': 'youtube-dl_test_video.mp4', + }, + } + + def _real_extract(self, url): + video_id = self._match_id(url) + orig_webpage = self._download_webpage(url, video_id) + + m_error = re.search( + r'(?s)Reason for deletion:.*?<b class="err"[^>]*>(?P<msg>[^<]+)</b>', orig_webpage) + if m_error: + raise ExtractorError(m_error.group('msg'), expected=True) + + fields = re.findall( + r'type="hidden" name="([^"]+)"\s+value="([^"]+)">', orig_webpage) + data = dict(fields) + + self._sleep(2, video_id) + + post = compat_urllib_parse.urlencode(data) + headers = { + b'Content-Type': b'application/x-www-form-urlencoded', + } + req = compat_urllib_request.Request(url, post, headers) + webpage = self._download_webpage( + req, video_id, note='Downloading video page ...') + + title = os.path.splitext(data['fname'])[0] + + video_url = self._search_regex( + r'file: "?(.+?)",', webpage, 'video URL') + + return { + 'id': video_id, + 'title': title, + 'url': video_url, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/playfm.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/playfm.py new file mode 100644 index 0000000000..9576aed0e6 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/playfm.py @@ -0,0 +1,88 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..compat import ( + compat_urllib_parse, + compat_urllib_request, +) +from ..utils import ( + ExtractorError, + float_or_none, + int_or_none, + str_to_int, +) + + +class PlayFMIE(InfoExtractor): + IE_NAME = 'play.fm' + _VALID_URL = r'https?://(?:www\.)?play\.fm/[^?#]*(?P<upload_date>[0-9]{8})(?P<id>[0-9]{6})(?:$|[?#])' + + _TEST = { + 'url': 'http://www.play.fm/recording/leipzigelectronicmusicbatofarparis_fr20140712137220', + 'md5': 'c505f8307825a245d0c7ad1850001f22', + 'info_dict': { + 'id': '137220', + 'ext': 'mp3', + 'title': 'LEIPZIG ELECTRONIC MUSIC @ Batofar (Paris,FR) - 2014-07-12', + 'uploader': 'Sven Tasnadi', + 'uploader_id': 'sventasnadi', + 'duration': 5627.428, + 'upload_date': '20140712', + 'view_count': int, + 'comment_count': int, + 'thumbnail': 're:^https?://.*\.jpg$', + }, + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + upload_date = mobj.group('upload_date') + + rec_data = compat_urllib_parse.urlencode({'rec_id': video_id}) + req = compat_urllib_request.Request( + 'http://www.play.fm/flexRead/recording', data=rec_data) + req.add_header('Content-Type', 'application/x-www-form-urlencoded') + rec_doc = self._download_xml(req, video_id) + + error_node = rec_doc.find('./error') + if error_node is not None: + raise ExtractorError('An error occured: %s (code %s)' % ( + error_node.text, rec_doc.find('./status').text)) + + recording = rec_doc.find('./recording') + title = recording.find('./title').text + view_count = str_to_int(recording.find('./stats/playcount').text) + comment_count = str_to_int(recording.find('./stats/comments').text) + duration = float_or_none(recording.find('./duration').text, scale=1000) + thumbnail = recording.find('./image').text + + artist = recording.find('./artists/artist') + uploader = artist.find('./name').text + uploader_id = artist.find('./slug').text + + video_url = '%s//%s/%s/%s/offset/0/sh/%s/rec/%s/jingle/%s/loc/%s' % ( + 'http:', recording.find('./url').text, + recording.find('./_class').text, recording.find('./file_id').text, + rec_doc.find('./uuid').text, video_id, + rec_doc.find('./jingle/file_id').text, + 'http%3A%2F%2Fwww.play.fm%2Fplayer', + ) + + return { + 'id': video_id, + 'url': video_url, + 'ext': 'mp3', + 'filesize': int_or_none(recording.find('./size').text), + 'title': title, + 'upload_date': upload_date, + 'view_count': view_count, + 'comment_count': comment_count, + 'duration': duration, + 'thumbnail': thumbnail, + 'uploader': uploader, + 'uploader_id': uploader_id, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/playvid.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/playvid.py new file mode 100644 index 0000000000..c3e667e9e7 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/playvid.py @@ -0,0 +1,87 @@ +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..compat import ( + compat_urllib_parse, +) +from ..utils import ( + clean_html, + ExtractorError, +) + + +class PlayvidIE(InfoExtractor): + _VALID_URL = r'https?://www\.playvid\.com/watch(\?v=|/)(?P<id>.+?)(?:#|$)' + _TEST = { + 'url': 'http://www.playvid.com/watch/RnmBNgtrrJu', + 'md5': 'ffa2f6b2119af359f544388d8c01eb6c', + 'info_dict': { + 'id': 'RnmBNgtrrJu', + 'ext': 'mp4', + 'title': 'md5:9256d01c6317e3f703848b5906880dc8', + 'duration': 82, + 'age_limit': 18, + } + } + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + + m_error = re.search( + r'<div class="block-error">\s*<div class="heading">\s*<div>(?P<msg>.+?)</div>\s*</div>', webpage) + if m_error: + raise ExtractorError(clean_html(m_error.group('msg')), expected=True) + + video_title = None + duration = None + video_thumbnail = None + formats = [] + + # most of the information is stored in the flashvars + flashvars = self._html_search_regex( + r'flashvars="(.+?)"', webpage, 'flashvars') + + infos = compat_urllib_parse.unquote(flashvars).split(r'&') + for info in infos: + videovars_match = re.match(r'^video_vars\[(.+?)\]=(.+?)$', info) + if videovars_match: + key = videovars_match.group(1) + val = videovars_match.group(2) + + if key == 'title': + video_title = compat_urllib_parse.unquote_plus(val) + if key == 'duration': + try: + duration = int(val) + except ValueError: + pass + if key == 'big_thumb': + video_thumbnail = val + + videourl_match = re.match( + r'^video_urls\]\[(?P<resolution>[0-9]+)p', key) + if videourl_match: + height = int(videourl_match.group('resolution')) + formats.append({ + 'height': height, + 'url': val, + }) + self._sort_formats(formats) + + # Extract title - should be in the flashvars; if not, look elsewhere + if video_title is None: + video_title = self._html_search_regex( + r'<title>(.*?)</title', webpage, 'title') + + return { + 'id': video_id, + 'formats': formats, + 'title': video_title, + 'thumbnail': video_thumbnail, + 'duration': duration, + 'description': None, + 'age_limit': 18 + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/podomatic.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/podomatic.py new file mode 100644 index 0000000000..f20946a2bd --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/podomatic.py @@ -0,0 +1,69 @@ +from __future__ import unicode_literals + +import json +import re + +from .common import InfoExtractor +from ..utils import int_or_none + + +class PodomaticIE(InfoExtractor): + IE_NAME = 'podomatic' + _VALID_URL = r'^(?P<proto>https?)://(?P<channel>[^.]+)\.podomatic\.com/entry/(?P<id>[^?]+)' + + _TESTS = [ + { + 'url': 'http://scienceteachingtips.podomatic.com/entry/2009-01-02T16_03_35-08_00', + 'md5': '84bb855fcf3429e6bf72460e1eed782d', + 'info_dict': { + 'id': '2009-01-02T16_03_35-08_00', + 'ext': 'mp3', + 'uploader': 'Science Teaching Tips', + 'uploader_id': 'scienceteachingtips', + 'title': '64. When the Moon Hits Your Eye', + 'duration': 446, + } + }, + { + 'url': 'http://ostbahnhof.podomatic.com/entry/2013-11-15T16_31_21-08_00', + 'md5': 'd2cf443931b6148e27638650e2638297', + 'info_dict': { + 'id': '2013-11-15T16_31_21-08_00', + 'ext': 'mp3', + 'uploader': 'Ostbahnhof / Techno Mix', + 'uploader_id': 'ostbahnhof', + 'title': 'Einunddreizig', + 'duration': 3799, + } + }, + ] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + channel = mobj.group('channel') + + json_url = (('%s://%s.podomatic.com/entry/embed_params/%s' + + '?permalink=true&rtmp=0') % + (mobj.group('proto'), channel, video_id)) + data_json = self._download_webpage( + json_url, video_id, 'Downloading video info') + data = json.loads(data_json) + + video_url = data['downloadLink'] + if not video_url: + video_url = '%s/%s' % (data['streamer'].replace('rtmp', 'http'), data['mediaLocation']) + uploader = data['podcast'] + title = data['title'] + thumbnail = data['imageLocation'] + duration = int_or_none(data.get('length'), 1000) + + return { + 'id': video_id, + 'url': video_url, + 'title': title, + 'uploader': uploader, + 'uploader_id': channel, + 'thumbnail': thumbnail, + 'duration': duration, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/pornhd.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/pornhd.py new file mode 100644 index 0000000000..954dfccb75 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/pornhd.py @@ -0,0 +1,70 @@ +from __future__ import unicode_literals + +import re +import json + +from .common import InfoExtractor +from ..utils import ( + int_or_none, + js_to_json, + qualities, +) + + +class PornHdIE(InfoExtractor): + _VALID_URL = r'http://(?:www\.)?pornhd\.com/(?:[a-z]{2,4}/)?videos/(?P<id>\d+)(?:/(?P<display_id>.+))?' + _TEST = { + 'url': 'http://www.pornhd.com/videos/1962/sierra-day-gets-his-cum-all-over-herself-hd-porn-video', + 'md5': '956b8ca569f7f4d8ec563e2c41598441', + 'info_dict': { + 'id': '1962', + 'display_id': 'sierra-day-gets-his-cum-all-over-herself-hd-porn-video', + 'ext': 'mp4', + 'title': 'Sierra loves doing laundry', + 'description': 'md5:8ff0523848ac2b8f9b065ba781ccf294', + 'thumbnail': 're:^https?://.*\.jpg', + 'view_count': int, + 'age_limit': 18, + } + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + display_id = mobj.group('display_id') + + webpage = self._download_webpage(url, display_id or video_id) + + title = self._html_search_regex( + r'<title>(.+) porn HD.+?', webpage, 'title') + description = self._html_search_regex( + r'
    ([^<]+)
    ', webpage, 'description', fatal=False) + view_count = int_or_none(self._html_search_regex( + r'(\d+) views\s*', webpage, 'view count', fatal=False)) + thumbnail = self._search_regex( + r"'poster'\s*:\s*'([^']+)'", webpage, 'thumbnail', fatal=False) + + quality = qualities(['sd', 'hd']) + sources = json.loads(js_to_json(self._search_regex( + r"(?s)'sources'\s*:\s*(\{.+?\})\s*\}\);", webpage, 'sources'))) + formats = [] + for container, s in sources.items(): + for qname, video_url in s.items(): + formats.append({ + 'url': video_url, + 'container': container, + 'format_id': '%s-%s' % (container, qname), + 'quality': quality(qname), + }) + self._sort_formats(formats) + + return { + 'id': video_id, + 'display_id': display_id, + 'title': title, + 'description': description, + 'thumbnail': thumbnail, + 'view_count': view_count, + 'formats': formats, + 'age_limit': 18, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/pornhub.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/pornhub.py new file mode 100644 index 0000000000..634142d0d2 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/pornhub.py @@ -0,0 +1,102 @@ +from __future__ import unicode_literals + +import os +import re + +from .common import InfoExtractor +from ..compat import ( + compat_urllib_parse, + compat_urllib_parse_urlparse, + compat_urllib_request, +) +from ..utils import ( + str_to_int, +) +from ..aes import ( + aes_decrypt_text +) + + +class PornHubIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?pornhub\.com/view_video\.php\?viewkey=(?P[0-9a-f]+)' + _TEST = { + 'url': 'http://www.pornhub.com/view_video.php?viewkey=648719015', + 'md5': '882f488fa1f0026f023f33576004a2ed', + 'info_dict': { + 'id': '648719015', + 'ext': 'mp4', + "uploader": "Babes", + "title": "Seductive Indian beauty strips down and fingers her pink pussy", + "age_limit": 18 + } + } + + def _extract_count(self, pattern, webpage, name): + count = self._html_search_regex(pattern, webpage, '%s count' % name, fatal=False) + if count: + count = str_to_int(count) + return count + + def _real_extract(self, url): + video_id = self._match_id(url) + + req = compat_urllib_request.Request(url) + req.add_header('Cookie', 'age_verified=1') + webpage = self._download_webpage(req, video_id) + + video_title = self._html_search_regex(r'

    ]+>([^<]+)', webpage, 'title') + video_uploader = self._html_search_regex( + r'(?s)From: .+?<(?:a href="/users/|a href="/channels/|([\d,\.]+) views', webpage, 'view') + like_count = self._extract_count(r'([\d,\.]+)', webpage, 'like') + dislike_count = self._extract_count(r'([\d,\.]+)', webpage, 'dislike') + comment_count = self._extract_count( + r'All comments \(([\d,\.]+)', webpage, 'comment') + + video_urls = list(map(compat_urllib_parse.unquote, re.findall(r'"quality_[0-9]{3}p":"([^"]+)', webpage))) + if webpage.find('"encrypted":true') != -1: + password = compat_urllib_parse.unquote_plus(self._html_search_regex(r'"video_title":"([^"]+)', webpage, 'password')) + video_urls = list(map(lambda s: aes_decrypt_text(s, password, 32).decode('utf-8'), video_urls)) + + formats = [] + for video_url in video_urls: + path = compat_urllib_parse_urlparse(video_url).path + extension = os.path.splitext(path)[1][1:] + format = path.split('/')[5].split('_')[:2] + format = "-".join(format) + + m = re.match(r'^(?P[0-9]+)P-(?P[0-9]+)K$', format) + if m is None: + height = None + tbr = None + else: + height = int(m.group('height')) + tbr = int(m.group('tbr')) + + formats.append({ + 'url': video_url, + 'ext': extension, + 'format': format, + 'format_id': format, + 'tbr': tbr, + 'height': height, + }) + self._sort_formats(formats) + + return { + 'id': video_id, + 'uploader': video_uploader, + 'title': video_title, + 'thumbnail': thumbnail, + 'view_count': view_count, + 'like_count': like_count, + 'dislike_count': dislike_count, + 'comment_count': comment_count, + 'formats': formats, + 'age_limit': 18, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/pornotube.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/pornotube.py new file mode 100644 index 0000000000..34735c51e1 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/pornotube.py @@ -0,0 +1,94 @@ +from __future__ import unicode_literals + +import json + +from .common import InfoExtractor +from ..compat import ( + compat_urllib_request, +) +from ..utils import ( + int_or_none, +) + + +class PornotubeIE(InfoExtractor): + _VALID_URL = r'https?://(?:\w+\.)?pornotube\.com/(?:[^?#]*?)/video/(?P[0-9]+)' + _TEST = { + 'url': 'http://www.pornotube.com/orientation/straight/video/4964/title/weird-hot-and-wet-science', + 'md5': '60fc5a4f0d93a97968fc7999d98260c9', + 'info_dict': { + 'id': '4964', + 'ext': 'mp4', + 'upload_date': '20141203', + 'title': 'Weird Hot and Wet Science', + 'description': 'md5:a8304bef7ef06cb4ab476ca6029b01b0', + 'categories': ['Adult Humor', 'Blondes'], + 'uploader': 'Alpha Blue Archives', + 'thumbnail': 're:^https?://.*\\.jpg$', + 'timestamp': 1417582800, + 'age_limit': 18, + } + } + + def _real_extract(self, url): + video_id = self._match_id(url) + + # Fetch origin token + js_config = self._download_webpage( + 'http://www.pornotube.com/assets/src/app/config.js', video_id, + note='Download JS config') + originAuthenticationSpaceKey = self._search_regex( + r"constant\('originAuthenticationSpaceKey',\s*'([^']+)'", + js_config, 'originAuthenticationSpaceKey') + + # Fetch actual token + token_req_data = { + 'authenticationSpaceKey': originAuthenticationSpaceKey, + 'credentials': 'Clip Application', + } + token_req = compat_urllib_request.Request( + 'https://api.aebn.net/auth/v1/token/primal', + data=json.dumps(token_req_data).encode('utf-8')) + token_req.add_header('Content-Type', 'application/json') + token_req.add_header('Origin', 'http://www.pornotube.com') + token_answer = self._download_json( + token_req, video_id, note='Requesting primal token') + token = token_answer['tokenKey'] + + # Get video URL + delivery_req = compat_urllib_request.Request( + 'https://api.aebn.net/delivery/v1/clips/%s/MP4' % video_id) + delivery_req.add_header('Authorization', token) + delivery_info = self._download_json( + delivery_req, video_id, note='Downloading delivery information') + video_url = delivery_info['mediaUrl'] + + # Get additional info (title etc.) + info_req = compat_urllib_request.Request( + 'https://api.aebn.net/content/v1/clips/%s?expand=' + 'title,description,primaryImageNumber,startSecond,endSecond,' + 'movie.title,movie.MovieId,movie.boxCoverFront,movie.stars,' + 'movie.studios,stars.name,studios.name,categories.name,' + 'clipActive,movieActive,publishDate,orientations' % video_id) + info_req.add_header('Authorization', token) + info = self._download_json( + info_req, video_id, note='Downloading metadata') + + timestamp = int_or_none(info.get('publishDate'), scale=1000) + uploader = info.get('studios', [{}])[0].get('name') + movie_id = info['movie']['movieId'] + thumbnail = 'http://pic.aebn.net/dis/t/%s/%s_%08d.jpg' % ( + movie_id, movie_id, info['primaryImageNumber']) + categories = [c['name'] for c in info.get('categories')] + + return { + 'id': video_id, + 'url': video_url, + 'title': info['title'], + 'description': info.get('description'), + 'timestamp': timestamp, + 'uploader': uploader, + 'thumbnail': thumbnail, + 'categories': categories, + 'age_limit': 18, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/pornoxo.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/pornoxo.py new file mode 100644 index 0000000000..202f58673a --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/pornoxo.py @@ -0,0 +1,65 @@ +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import ( + str_to_int, +) + + +class PornoXOIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?pornoxo\.com/videos/(?P\d+)/(?P[^/]+)\.html' + _TEST = { + 'url': 'http://www.pornoxo.com/videos/7564/striptease-from-sexy-secretary.html', + 'md5': '582f28ecbaa9e6e24cb90f50f524ce87', + 'info_dict': { + 'id': '7564', + 'ext': 'flv', + 'title': 'Striptease From Sexy Secretary!', + 'description': 'Striptease From Sexy Secretary!', + 'categories': list, # NSFW + 'thumbnail': 're:https?://.*\.jpg$', + 'age_limit': 18, + } + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + + webpage = self._download_webpage(url, video_id) + + video_url = self._html_search_regex( + r'\'file\'\s*:\s*"([^"]+)"', webpage, 'video_url') + + title = self._html_search_regex( + r'([^<]+)\s*-\s*PornoXO', webpage, 'title') + + description = self._html_search_regex( + r'<meta name="description" content="([^"]+)\s*featuring', + webpage, 'description', fatal=False) + + thumbnail = self._html_search_regex( + r'\'image\'\s*:\s*"([^"]+)"', webpage, 'thumbnail', fatal=False) + + view_count = str_to_int(self._html_search_regex( + r'[vV]iews:\s*([0-9,]+)', webpage, 'view count', fatal=False)) + + categories_str = self._html_search_regex( + r'<meta name="description" content=".*featuring\s*([^"]+)"', + webpage, 'categories', fatal=False) + categories = ( + None if categories_str is None + else categories_str.split(',')) + + return { + 'id': video_id, + 'url': video_url, + 'title': title, + 'description': description, + 'thumbnail': thumbnail, + 'categories': categories, + 'view_count': view_count, + 'age_limit': 18, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/promptfile.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/promptfile.py new file mode 100644 index 0000000000..f536e6e6cd --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/promptfile.py @@ -0,0 +1,67 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..compat import ( + compat_urllib_parse, + compat_urllib_request, +) +from ..utils import ( + determine_ext, + ExtractorError, +) + + +class PromptFileIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?promptfile\.com/l/(?P<id>[0-9A-Z\-]+)' + _TEST = { + 'url': 'http://www.promptfile.com/l/D21B4746E9-F01462F0FF', + 'md5': 'd1451b6302da7215485837aaea882c4c', + 'info_dict': { + 'id': 'D21B4746E9-F01462F0FF', + 'ext': 'mp4', + 'title': 'Birds.mp4', + 'thumbnail': 're:^https?://.*\.jpg$', + } + } + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + + if re.search(r'<div.+id="not_found_msg".+>(?!We are).+</div>[^-]', webpage) is not None: + raise ExtractorError('Video %s does not exist' % video_id, + expected=True) + + fields = dict(re.findall(r'''(?x)type="hidden"\s+ + name="(.+?)"\s+ + value="(.*?)" + ''', webpage)) + post = compat_urllib_parse.urlencode(fields) + req = compat_urllib_request.Request(url, post) + req.add_header('Content-type', 'application/x-www-form-urlencoded') + webpage = self._download_webpage( + req, video_id, 'Downloading video page') + + url = self._html_search_regex(r'url:\s*\'([^\']+)\'', webpage, 'URL') + title = self._html_search_regex( + r'<span.+title="([^"]+)">', webpage, 'title') + thumbnail = self._html_search_regex( + r'<div id="player_overlay">.*button>.*?<img src="([^"]+)"', + webpage, 'thumbnail', fatal=False, flags=re.DOTALL) + + formats = [{ + 'format_id': 'sd', + 'url': url, + 'ext': determine_ext(title), + }] + self._sort_formats(formats) + + return { + 'id': video_id, + 'title': title, + 'thumbnail': thumbnail, + 'formats': formats, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/prosiebensat1.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/prosiebensat1.py new file mode 100644 index 0000000000..385681d06e --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/prosiebensat1.py @@ -0,0 +1,331 @@ +# encoding: utf-8 +from __future__ import unicode_literals + +import re + +from hashlib import sha1 +from .common import InfoExtractor +from ..compat import ( + compat_urllib_parse, +) +from ..utils import ( + unified_strdate, +) + + +class ProSiebenSat1IE(InfoExtractor): + IE_NAME = 'prosiebensat1' + IE_DESC = 'ProSiebenSat.1 Digital' + _VALID_URL = r'https?://(?:www\.)?(?:(?:prosieben|prosiebenmaxx|sixx|sat1|kabeleins|ran|the-voice-of-germany)\.de|fem\.com)/(?P<id>.+)' + + _TESTS = [ + { + 'url': 'http://www.prosieben.de/tv/circus-halligalli/videos/218-staffel-2-episode-18-jahresrueckblick-ganze-folge', + 'info_dict': { + 'id': '2104602', + 'ext': 'mp4', + 'title': 'Staffel 2, Episode 18 - Jahresrц╪ckblick', + 'description': 'md5:8733c81b702ea472e069bc48bb658fc1', + 'upload_date': '20131231', + 'duration': 5845.04, + }, + 'params': { + # rtmp download + 'skip_download': True, + }, + }, + { + 'url': 'http://www.prosieben.de/videokatalog/Gesellschaft/Leben/Trends/video-Lady-Umstyling-f%C3%BCr-Audrina-Rebekka-Audrina-Fergen-billig-aussehen-Battal-Modica-700544.html', + 'info_dict': { + 'id': '2570327', + 'ext': 'mp4', + 'title': 'Lady-Umstyling fц╪r Audrina', + 'description': 'md5:4c16d0c17a3461a0d43ea4084e96319d', + 'upload_date': '20131014', + 'duration': 606.76, + }, + 'params': { + # rtmp download + 'skip_download': True, + }, + 'skip': 'Seems to be broken', + }, + { + 'url': 'http://www.prosiebenmaxx.de/tv/experience/video/144-countdown-fuer-die-autowerkstatt-ganze-folge', + 'info_dict': { + 'id': '2429369', + 'ext': 'mp4', + 'title': 'Countdown fц╪r die Autowerkstatt', + 'description': 'md5:809fc051a457b5d8666013bc40698817', + 'upload_date': '20140223', + 'duration': 2595.04, + }, + 'params': { + # rtmp download + 'skip_download': True, + }, + }, + { + 'url': 'http://www.sixx.de/stars-style/video/sexy-laufen-in-ugg-boots-clip', + 'info_dict': { + 'id': '2904997', + 'ext': 'mp4', + 'title': 'Sexy laufen in Ugg Boots', + 'description': 'md5:edf42b8bd5bc4e5da4db4222c5acb7d6', + 'upload_date': '20140122', + 'duration': 245.32, + }, + 'params': { + # rtmp download + 'skip_download': True, + }, + }, + { + 'url': 'http://www.sat1.de/film/der-ruecktritt/video/im-interview-kai-wiesinger-clip', + 'info_dict': { + 'id': '2906572', + 'ext': 'mp4', + 'title': 'Im Interview: Kai Wiesinger', + 'description': 'md5:e4e5370652ec63b95023e914190b4eb9', + 'upload_date': '20140203', + 'duration': 522.56, + }, + 'params': { + # rtmp download + 'skip_download': True, + }, + }, + { + 'url': 'http://www.kabeleins.de/tv/rosins-restaurants/videos/jagd-auf-fertigkost-im-elsthal-teil-2-ganze-folge', + 'info_dict': { + 'id': '2992323', + 'ext': 'mp4', + 'title': 'Jagd auf Fertigkost im Elsthal - Teil 2', + 'description': 'md5:2669cde3febe9bce13904f701e774eb6', + 'upload_date': '20141014', + 'duration': 2410.44, + }, + 'params': { + # rtmp download + 'skip_download': True, + }, + }, + { + 'url': 'http://www.ran.de/fussball/bundesliga/video/schalke-toennies-moechte-raul-zurueck-ganze-folge', + 'info_dict': { + 'id': '3004256', + 'ext': 'mp4', + 'title': 'Schalke: Tц╤nnies mц╤chte Raul zurц╪ck', + 'description': 'md5:4b5b271d9bcde223b54390754c8ece3f', + 'upload_date': '20140226', + 'duration': 228.96, + }, + 'params': { + # rtmp download + 'skip_download': True, + }, + }, + { + 'url': 'http://www.the-voice-of-germany.de/video/31-andreas-kuemmert-rocket-man-clip', + 'info_dict': { + 'id': '2572814', + 'ext': 'mp4', + 'title': 'Andreas Kц╪mmert: Rocket Man', + 'description': 'md5:6ddb02b0781c6adf778afea606652e38', + 'upload_date': '20131017', + 'duration': 469.88, + }, + 'params': { + # rtmp download + 'skip_download': True, + }, + }, + { + 'url': 'http://www.fem.com/wellness/videos/wellness-video-clip-kurztripps-zum-valentinstag.html', + 'info_dict': { + 'id': '2156342', + 'ext': 'mp4', + 'title': 'Kurztrips zum Valentinstag', + 'description': 'Romantischer Kurztrip zum Valentinstag? Wir verraten, was sich hier wirklich lohnt.', + 'duration': 307.24, + }, + 'params': { + # rtmp download + 'skip_download': True, + }, + }, + { + 'url': 'http://www.prosieben.de/tv/joko-gegen-klaas/videos/playlists/episode-8-ganze-folge-playlist', + 'info_dict': { + 'id': '439664', + 'title': 'Episode 8 - Ganze Folge - Playlist', + 'description': 'md5:63b8963e71f481782aeea877658dec84', + }, + 'playlist_count': 2, + }, + ] + + _CLIPID_REGEXES = [ + r'"clip_id"\s*:\s+"(\d+)"', + r'clipid: "(\d+)"', + r'clip[iI]d=(\d+)', + r"'itemImageUrl'\s*:\s*'/dynamic/thumbnails/full/\d+/(\d+)", + ] + _TITLE_REGEXES = [ + r'<h2 class="subtitle" itemprop="name">\s*(.+?)</h2>', + r'<header class="clearfix">\s*<h3>(.+?)</h3>', + r'<!-- start video -->\s*<h1>(.+?)</h1>', + r'<h1 class="att-name">\s*(.+?)</h1>', + ] + _DESCRIPTION_REGEXES = [ + r'<p itemprop="description">\s*(.+?)</p>', + r'<div class="videoDecription">\s*<p><strong>Beschreibung</strong>: (.+?)</p>', + r'<div class="g-plusone" data-size="medium"></div>\s*</div>\s*</header>\s*(.+?)\s*<footer>', + r'<p class="att-description">\s*(.+?)\s*</p>', + ] + _UPLOAD_DATE_REGEXES = [ + r'<meta property="og:published_time" content="(.+?)">', + r'<span>\s*(\d{2}\.\d{2}\.\d{4} \d{2}:\d{2}) \|\s*<span itemprop="duration"', + r'<footer>\s*(\d{2}\.\d{2}\.\d{4}) \d{2}:\d{2} Uhr', + r'<span style="padding-left: 4px;line-height:20px; color:#404040">(\d{2}\.\d{2}\.\d{4})</span>', + r'(\d{2}\.\d{2}\.\d{4}) \| \d{2}:\d{2} Min<br/>', + ] + _PAGE_TYPE_REGEXES = [ + r'<meta name="page_type" content="([^"]+)">', + r"'itemType'\s*:\s*'([^']*)'", + ] + _PLAYLIST_ID_REGEXES = [ + r'content[iI]d=(\d+)', + r"'itemId'\s*:\s*'([^']*)'", + ] + _PLAYLIST_CLIP_REGEXES = [ + r'(?s)data-qvt=.+?<a href="([^"]+)"', + ] + + def _extract_clip(self, url, webpage): + clip_id = self._html_search_regex(self._CLIPID_REGEXES, webpage, 'clip id') + + access_token = 'testclient' + client_name = 'kolibri-1.2.5' + client_location = url + + videos_api_url = 'http://vas.sim-technik.de/vas/live/v2/videos?%s' % compat_urllib_parse.urlencode({ + 'access_token': access_token, + 'client_location': client_location, + 'client_name': client_name, + 'ids': clip_id, + }) + + videos = self._download_json(videos_api_url, clip_id, 'Downloading videos JSON') + + duration = float(videos[0]['duration']) + source_ids = [source['id'] for source in videos[0]['sources']] + source_ids_str = ','.join(map(str, source_ids)) + + g = '01!8d8F_)r9]4s[qeuXfP%' + + client_id = g[:2] + sha1(''.join([clip_id, g, access_token, client_location, g, client_name]) + .encode('utf-8')).hexdigest() + + sources_api_url = 'http://vas.sim-technik.de/vas/live/v2/videos/%s/sources?%s' % (clip_id, compat_urllib_parse.urlencode({ + 'access_token': access_token, + 'client_id': client_id, + 'client_location': client_location, + 'client_name': client_name, + })) + + sources = self._download_json(sources_api_url, clip_id, 'Downloading sources JSON') + server_id = sources['server_id'] + + client_id = g[:2] + sha1(''.join([g, clip_id, access_token, server_id, + client_location, source_ids_str, g, client_name]) + .encode('utf-8')).hexdigest() + + url_api_url = 'http://vas.sim-technik.de/vas/live/v2/videos/%s/sources/url?%s' % (clip_id, compat_urllib_parse.urlencode({ + 'access_token': access_token, + 'client_id': client_id, + 'client_location': client_location, + 'client_name': client_name, + 'server_id': server_id, + 'source_ids': source_ids_str, + })) + + urls = self._download_json(url_api_url, clip_id, 'Downloading urls JSON') + + title = self._html_search_regex(self._TITLE_REGEXES, webpage, 'title') + description = self._html_search_regex(self._DESCRIPTION_REGEXES, webpage, 'description', fatal=False) + thumbnail = self._og_search_thumbnail(webpage) + + upload_date = unified_strdate(self._html_search_regex( + self._UPLOAD_DATE_REGEXES, webpage, 'upload date', default=None)) + + formats = [] + + urls_sources = urls['sources'] + if isinstance(urls_sources, dict): + urls_sources = urls_sources.values() + + def fix_bitrate(bitrate): + return (bitrate // 1000) if bitrate % 1000 == 0 else bitrate + + for source in urls_sources: + protocol = source['protocol'] + if protocol == 'rtmp' or protocol == 'rtmpe': + mobj = re.search(r'^(?P<url>rtmpe?://[^/]+/(?P<app>[^/]+))/(?P<playpath>.+)$', source['url']) + if not mobj: + continue + formats.append({ + 'url': mobj.group('url'), + 'app': mobj.group('app'), + 'play_path': mobj.group('playpath'), + 'player_url': 'http://livepassdl.conviva.com/hf/ver/2.79.0.17083/LivePassModuleMain.swf', + 'page_url': 'http://www.prosieben.de', + 'vbr': fix_bitrate(source['bitrate']), + 'ext': 'mp4', + 'format_id': '%s_%s' % (source['cdn'], source['bitrate']), + }) + else: + formats.append({ + 'url': source['url'], + 'vbr': fix_bitrate(source['bitrate']), + }) + + self._sort_formats(formats) + + return { + 'id': clip_id, + 'title': title, + 'description': description, + 'thumbnail': thumbnail, + 'upload_date': upload_date, + 'duration': duration, + 'formats': formats, + } + + def _extract_playlist(self, url, webpage): + playlist_id = self._html_search_regex( + self._PLAYLIST_ID_REGEXES, webpage, 'playlist id') + for regex in self._PLAYLIST_CLIP_REGEXES: + playlist_clips = re.findall(regex, webpage) + if playlist_clips: + title = self._html_search_regex( + self._TITLE_REGEXES, webpage, 'title') + description = self._html_search_regex( + self._DESCRIPTION_REGEXES, webpage, 'description', fatal=False) + entries = [ + self.url_result( + re.match('(.+?//.+?)/', url).group(1) + clip_path, + 'ProSiebenSat1') + for clip_path in playlist_clips] + return self.playlist_result(entries, playlist_id, title, description) + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + page_type = self._search_regex( + self._PAGE_TYPE_REGEXES, webpage, + 'page type', default='clip').lower() + if page_type == 'clip': + return self._extract_clip(url, webpage) + elif page_type == 'playlist': + return self._extract_playlist(url, webpage) diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/pyvideo.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/pyvideo.py new file mode 100644 index 0000000000..6d5732d45c --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/pyvideo.py @@ -0,0 +1,59 @@ +from __future__ import unicode_literals + +import re +import os + +from .common import InfoExtractor + + +class PyvideoIE(InfoExtractor): + _VALID_URL = r'http://(?:www\.)?pyvideo\.org/video/(?P<id>\d+)/(.*)' + + _TESTS = [ + { + 'url': 'http://pyvideo.org/video/1737/become-a-logging-expert-in-30-minutes', + 'md5': 'de317418c8bc76b1fd8633e4f32acbc6', + 'info_dict': { + 'id': '24_4WWkSmNo', + 'ext': 'mp4', + 'title': 'Become a logging expert in 30 minutes', + 'description': 'md5:9665350d466c67fb5b1598de379021f7', + 'upload_date': '20130320', + 'uploader': 'NextDayVideo', + 'uploader_id': 'NextDayVideo', + }, + 'add_ie': ['Youtube'], + }, + { + 'url': 'http://pyvideo.org/video/2542/gloriajw-spotifywitherikbernhardsson182m4v', + 'md5': '5fe1c7e0a8aa5570330784c847ff6d12', + 'info_dict': { + 'id': '2542', + 'ext': 'm4v', + 'title': 'Gloriajw-SpotifyWithErikBernhardsson182', + }, + }, + ] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + + webpage = self._download_webpage(url, video_id) + + m_youtube = re.search(r'(https?://www\.youtube\.com/watch\?v=.*)', webpage) + if m_youtube is not None: + return self.url_result(m_youtube.group(1), 'Youtube') + + title = self._html_search_regex( + r'<div class="section">\s*<h3(?:\s+class="[^"]*"[^>]*)?>([^>]+?)</h3>', + webpage, 'title', flags=re.DOTALL) + video_url = self._search_regex( + [r'<source src="(.*?)"', r'<dt>Download</dt>.*?<a href="(.+?)"'], + webpage, 'video url', flags=re.DOTALL) + + return { + 'id': video_id, + 'title': os.path.splitext(title)[0], + 'url': video_url, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/quickvid.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/quickvid.py new file mode 100644 index 0000000000..af7d76cf47 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/quickvid.py @@ -0,0 +1,53 @@ +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..compat import ( + compat_urlparse, +) +from ..utils import ( + determine_ext, + int_or_none, +) + + +class QuickVidIE(InfoExtractor): + _VALID_URL = r'https?://(www\.)?quickvid\.org/watch\.php\?v=(?P<id>[a-zA-Z_0-9-]+)' + _TEST = { + 'url': 'http://quickvid.org/watch.php?v=sUQT3RCG8dx', + 'md5': 'c0c72dd473f260c06c808a05d19acdc5', + 'info_dict': { + 'id': 'sUQT3RCG8dx', + 'ext': 'mp4', + 'title': 'Nick Offerman\'s Summer Reading Recap', + 'thumbnail': 're:^https?://.*\.(?:png|jpg|gif)$', + 'view_count': int, + }, + } + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + + title = self._html_search_regex(r'<h2>(.*?)</h2>', webpage, 'title') + view_count = int_or_none(self._html_search_regex( + r'(?s)<div id="views">(.*?)</div>', + webpage, 'view count', fatal=False)) + video_code = self._search_regex( + r'(?s)<video id="video"[^>]*>(.*?)</video>', webpage, 'video code') + formats = [ + { + 'url': compat_urlparse.urljoin(url, src), + 'format_id': determine_ext(src, None), + } for src in re.findall('<source\s+src="([^"]+)"', video_code) + ] + self._sort_formats(formats) + + return { + 'id': video_id, + 'title': title, + 'formats': formats, + 'thumbnail': self._og_search_thumbnail(webpage), + 'view_count': view_count, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/radiode.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/radiode.py new file mode 100644 index 0000000000..f95bc94543 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/radiode.py @@ -0,0 +1,55 @@ +from __future__ import unicode_literals + +import json + +from .common import InfoExtractor + + +class RadioDeIE(InfoExtractor): + IE_NAME = 'radio.de' + _VALID_URL = r'https?://(?P<id>.+?)\.(?:radio\.(?:de|at|fr|pt|es|pl|it)|rad\.io)' + _TEST = { + 'url': 'http://ndr2.radio.de/', + 'md5': '3b4cdd011bc59174596b6145cda474a4', + 'info_dict': { + 'id': 'ndr2', + 'ext': 'mp3', + 'title': 're:^NDR 2 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', + 'description': 'md5:591c49c702db1a33751625ebfb67f273', + 'thumbnail': 're:^https?://.*\.png', + }, + 'params': { + 'skip_download': True, + } + } + + def _real_extract(self, url): + radio_id = self._match_id(url) + + webpage = self._download_webpage(url, radio_id) + + broadcast = json.loads(self._search_regex( + r'_getBroadcast\s*=\s*function\(\s*\)\s*{\s*return\s+({.+?})\s*;\s*}', + webpage, 'broadcast')) + + title = self._live_title(broadcast['name']) + description = broadcast.get('description') or broadcast.get('shortDescription') + thumbnail = broadcast.get('picture4Url') or broadcast.get('picture4TransUrl') + + formats = [{ + 'url': stream['streamUrl'], + 'ext': stream['streamContentFormat'].lower(), + 'acodec': stream['streamContentFormat'], + 'abr': stream['bitRate'], + 'asr': stream['sampleRate'] + } for stream in broadcast['streamUrls']] + self._sort_formats(formats) + + return { + 'id': radio_id, + 'title': title, + 'description': description, + 'thumbnail': thumbnail, + 'is_live': True, + 'formats': formats, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/radiofrance.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/radiofrance.py new file mode 100644 index 0000000000..09352ed825 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/radiofrance.py @@ -0,0 +1,59 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor + + +class RadioFranceIE(InfoExtractor): + _VALID_URL = r'^https?://maison\.radiofrance\.fr/radiovisions/(?P<id>[^?#]+)' + IE_NAME = 'radiofrance' + + _TEST = { + 'url': 'http://maison.radiofrance.fr/radiovisions/one-one', + 'md5': 'bdbb28ace95ed0e04faab32ba3160daf', + 'info_dict': { + 'id': 'one-one', + 'ext': 'ogg', + "title": "One to one", + "description": "Plutц╢t que d'imaginer la radio de demain comme technologie ou comme crц╘ation de contenu, je veux montrer que quelles que soient ses ц╘volutions, j'ai l'intime conviction que la radio continuera d'ц╙tre un grand mц╘dia de proximitц╘ pour les auditeurs.", + "uploader": "Thomas Hercouц╚t", + }, + } + + def _real_extract(self, url): + m = re.match(self._VALID_URL, url) + video_id = m.group('id') + + webpage = self._download_webpage(url, video_id) + title = self._html_search_regex(r'<h1>(.*?)</h1>', webpage, 'title') + description = self._html_search_regex( + r'<div class="bloc_page_wrapper"><div class="text">(.*?)</div>', + webpage, 'description', fatal=False) + uploader = self._html_search_regex( + r'<div class="credit">  © (.*?)</div>', + webpage, 'uploader', fatal=False) + + formats_str = self._html_search_regex( + r'class="jp-jplayer[^"]*" data-source="([^"]+)">', + webpage, 'audio URLs') + formats = [ + { + 'format_id': fm[0], + 'url': fm[1], + 'vcodec': 'none', + 'preference': i, + } + for i, fm in + enumerate(re.findall(r"([a-z0-9]+)\s*:\s*'([^']+)'", formats_str)) + ] + self._sort_formats(formats) + + return { + 'id': video_id, + 'title': title, + 'formats': formats, + 'description': description, + 'uploader': uploader, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/rai.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/rai.py new file mode 100644 index 0000000000..aa26b7e0bb --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/rai.py @@ -0,0 +1,124 @@ +from __future__ import unicode_literals + +import re + +from .subtitles import SubtitlesInfoExtractor +from ..compat import ( + compat_urllib_parse, +) +from ..utils import ( + parse_duration, + unified_strdate, +) + + +class RaiIE(SubtitlesInfoExtractor): + _VALID_URL = r'(?P<url>http://(?:.+?\.)?(?:rai\.it|rai\.tv|rainews\.it)/dl/.+?-(?P<id>[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})(?:-.+?)?\.html)' + _TESTS = [ + { + 'url': 'http://www.rai.tv/dl/RaiTV/programmi/media/ContentItem-cb27157f-9dd0-4aee-b788-b1f67643a391.html', + 'md5': 'c064c0b2d09c278fb293116ef5d0a32d', + 'info_dict': { + 'id': 'cb27157f-9dd0-4aee-b788-b1f67643a391', + 'ext': 'mp4', + 'title': 'Report del 07/04/2014', + 'description': 'md5:f27c544694cacb46a078db84ec35d2d9', + 'upload_date': '20140407', + 'duration': 6160, + } + }, + { + 'url': 'http://www.raisport.rai.it/dl/raiSport/media/rassegna-stampa-04a9f4bd-b563-40cf-82a6-aad3529cb4a9.html', + 'md5': '8bb9c151924ce241b74dd52ef29ceafa', + 'info_dict': { + 'id': '04a9f4bd-b563-40cf-82a6-aad3529cb4a9', + 'ext': 'mp4', + 'title': 'TG PRIMO TEMPO', + 'description': '', + 'upload_date': '20140612', + 'duration': 1758, + }, + 'skip': 'Error 404', + }, + { + 'url': 'http://www.rainews.it/dl/rainews/media/state-of-the-net-Antonella-La-Carpia-regole-virali-7aafdea9-0e5d-49d5-88a6-7e65da67ae13.html', + 'md5': '35cf7c229f22eeef43e48b5cf923bef0', + 'info_dict': { + 'id': '7aafdea9-0e5d-49d5-88a6-7e65da67ae13', + 'ext': 'mp4', + 'title': 'State of the Net, Antonella La Carpia: regole virali', + 'description': 'md5:b0ba04a324126903e3da7763272ae63c', + 'upload_date': '20140613', + }, + 'skip': 'Error 404', + }, + { + 'url': 'http://www.rai.tv/dl/RaiTV/programmi/media/ContentItem-b4a49761-e0cc-4b14-8736-2729f6f73132-tg2.html', + 'md5': '35694f062977fe6619943f08ed935730', + 'info_dict': { + 'id': 'b4a49761-e0cc-4b14-8736-2729f6f73132', + 'ext': 'mp4', + 'title': 'Alluvione in Sardegna e dissesto idrogeologico', + 'description': 'Edizione delle ore 20:30 ', + } + }, + ] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + + media = self._download_json('%s?json' % mobj.group('url'), video_id, 'Downloading video JSON') + + title = media.get('name') + description = media.get('desc') + thumbnail = media.get('image_300') or media.get('image_medium') or media.get('image') + duration = parse_duration(media.get('length')) + uploader = media.get('author') + upload_date = unified_strdate(media.get('date')) + + formats = [] + + for format_id in ['wmv', 'm3u8', 'mediaUri', 'h264']: + media_url = media.get(format_id) + if not media_url: + continue + formats.append({ + 'url': media_url, + 'format_id': format_id, + 'ext': 'mp4', + }) + + if self._downloader.params.get('listsubtitles', False): + page = self._download_webpage(url, video_id) + self._list_available_subtitles(video_id, page) + return + + subtitles = {} + if self._have_to_download_any_subtitles: + page = self._download_webpage(url, video_id) + subtitles = self.extract_subtitles(video_id, page) + + return { + 'id': video_id, + 'title': title, + 'description': description, + 'thumbnail': thumbnail, + 'uploader': uploader, + 'upload_date': upload_date, + 'duration': duration, + 'formats': formats, + 'subtitles': subtitles, + } + + def _get_available_subtitles(self, video_id, webpage): + subtitles = {} + m = re.search(r'<meta name="closedcaption" content="(?P<captions>[^"]+)"', webpage) + if m: + captions = m.group('captions') + STL_EXT = '.stl' + SRT_EXT = '.srt' + if captions.endswith(STL_EXT): + captions = captions[:-len(STL_EXT)] + SRT_EXT + subtitles['it'] = 'http://www.rai.tv%s' % compat_urllib_parse.quote(captions) + return subtitles diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/rbmaradio.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/rbmaradio.py new file mode 100644 index 0000000000..0f8f3ebde0 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/rbmaradio.py @@ -0,0 +1,55 @@ +# encoding: utf-8 +from __future__ import unicode_literals + +import json +import re + +from .common import InfoExtractor +from ..utils import ( + ExtractorError, +) + + +class RBMARadioIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?rbmaradio\.com/shows/(?P<videoID>[^/]+)$' + _TEST = { + 'url': 'http://www.rbmaradio.com/shows/ford-lopatin-live-at-primavera-sound-2011', + 'md5': '6bc6f9bcb18994b4c983bc3bf4384d95', + 'info_dict': { + 'id': 'ford-lopatin-live-at-primavera-sound-2011', + 'ext': 'mp3', + "uploader_id": "ford-lopatin", + "location": "Spain", + "description": "Joel Ford and Daniel Б─≥Oneohtrix Point NeverБ─≥ Lopatin fly their midified pop extravaganza to Spain. Live at Primavera Sound 2011.", + "uploader": "Ford & Lopatin", + "title": "Live at Primavera Sound 2011", + }, + } + + def _real_extract(self, url): + m = re.match(self._VALID_URL, url) + video_id = m.group('videoID') + + webpage = self._download_webpage(url, video_id) + + json_data = self._search_regex(r'window\.gon.*?gon\.show=(.+?);$', + webpage, 'json data', flags=re.MULTILINE) + + try: + data = json.loads(json_data) + except ValueError as e: + raise ExtractorError('Invalid JSON: ' + str(e)) + + video_url = data['akamai_url'] + '&cbr=256' + + return { + 'id': video_id, + 'url': video_url, + 'title': data['title'], + 'description': data.get('teaser_text'), + 'location': data.get('country_of_origin'), + 'uploader': data.get('host', {}).get('name'), + 'uploader_id': data.get('host', {}).get('slug'), + 'thumbnail': data.get('image', {}).get('large_url_2x'), + 'duration': data.get('duration'), + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/redtube.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/redtube.py new file mode 100644 index 0000000000..846b76c815 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/redtube.py @@ -0,0 +1,40 @@ +from __future__ import unicode_literals + +from .common import InfoExtractor + + +class RedTubeIE(InfoExtractor): + _VALID_URL = r'http://(?:www\.)?redtube\.com/(?P<id>[0-9]+)' + _TEST = { + 'url': 'http://www.redtube.com/66418', + 'info_dict': { + 'id': '66418', + 'ext': 'mp4', + "title": "Sucked on a toilet", + "age_limit": 18, + } + } + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + + video_url = self._html_search_regex( + r'<source src="(.+?)" type="video/mp4">', webpage, 'video URL') + video_title = self._html_search_regex( + r'<h1 class="videoTitle[^"]*">(.+?)</h1>', + webpage, 'title') + video_thumbnail = self._og_search_thumbnail(webpage) + + # No self-labeling, but they describe themselves as + # "Home of Videos Porno" + age_limit = 18 + + return { + 'id': video_id, + 'url': video_url, + 'ext': 'mp4', + 'title': video_title, + 'thumbnail': video_thumbnail, + 'age_limit': age_limit, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/restudy.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/restudy.py new file mode 100644 index 0000000000..b17c2bfc06 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/restudy.py @@ -0,0 +1,40 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor + + +class RestudyIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?restudy\.dk/video/play/id/(?P<id>[0-9]+)' + _TEST = { + 'url': 'https://www.restudy.dk/video/play/id/1637', + 'info_dict': { + 'id': '1637', + 'ext': 'flv', + 'title': 'Leiden-frosteffekt', + 'description': 'Denne video er et eksperiment med flydende kvц╕lstof.', + }, + 'params': { + # rtmp download + 'skip_download': True, + } + } + + def _real_extract(self, url): + video_id = self._match_id(url) + + webpage = self._download_webpage(url, video_id) + + title = self._og_search_title(webpage).strip() + description = self._og_search_description(webpage).strip() + + formats = self._extract_smil_formats( + 'https://www.restudy.dk/awsmedia/SmilDirectory/video_%s.xml' % video_id, + video_id) + + return { + 'id': video_id, + 'title': title, + 'description': description, + 'formats': formats, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/reverbnation.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/reverbnation.py new file mode 100644 index 0000000000..ec7e7df7bc --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/reverbnation.py @@ -0,0 +1,44 @@ +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import str_or_none + + +class ReverbNationIE(InfoExtractor): + _VALID_URL = r'^https?://(?:www\.)?reverbnation\.com/.*?/song/(?P<id>\d+).*?$' + _TESTS = [{ + 'url': 'http://www.reverbnation.com/alkilados/song/16965047-mona-lisa', + 'md5': '3da12ebca28c67c111a7f8b262d3f7a7', + 'info_dict': { + "id": "16965047", + "ext": "mp3", + "title": "MONA LISA", + "uploader": "ALKILADOS", + "uploader_id": "216429", + "thumbnail": "re:^https://gp1\.wac\.edgecastcdn\.net/.*?\.jpg$" + }, + }] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + song_id = mobj.group('id') + + api_res = self._download_json( + 'https://api.reverbnation.com/song/%s' % song_id, + song_id, + note='Downloading information of song %s' % song_id + ) + + return { + 'id': song_id, + 'title': api_res.get('name'), + 'url': api_res.get('url'), + 'uploader': api_res.get('artist', {}).get('name'), + 'uploader_id': str_or_none(api_res.get('artist', {}).get('id')), + 'thumbnail': self._proto_relative_url( + api_res.get('image', api_res.get('thumbnail'))), + 'ext': 'mp3', + 'vcodec': 'none', + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/ringtv.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/ringtv.py new file mode 100644 index 0000000000..59dc137cc2 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/ringtv.py @@ -0,0 +1,43 @@ +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor + + +class RingTVIE(InfoExtractor): + _VALID_URL = r'(?:http://)?(?:www\.)?ringtv\.craveonline\.com/(?P<type>news|videos/video)/(?P<id>[^/?#]+)' + _TEST = { + "url": "http://ringtv.craveonline.com/news/310833-luis-collazo-says-victor-ortiz-better-not-quit-on-jan-30", + "file": "857645.mp4", + "md5": "d25945f5df41cdca2d2587165ac28720", + "info_dict": { + "title": 'Video: Luis Collazo says Victor Ortiz "better not quit on Jan. 30" - Ring TV', + "description": 'Luis Collazo is excited about his Jan. 30 showdown with fellow former welterweight titleholder Victor Ortiz at Barclays Center in his hometown of Brooklyn. The SuperBowl week fight headlines a Golden Boy Live! card on Fox Sports 1.', + } + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id').split('-')[0] + webpage = self._download_webpage(url, video_id) + + if mobj.group('type') == 'news': + video_id = self._search_regex( + r'''(?x)<iframe[^>]+src="http://cms\.springboardplatform\.com/ + embed_iframe/[0-9]+/video/([0-9]+)/''', + webpage, 'real video ID') + title = self._og_search_title(webpage) + description = self._html_search_regex( + r'addthis:description="([^"]+)"', + webpage, 'description', fatal=False) + final_url = "http://ringtv.craveonline.springboardplatform.com/storage/ringtv.craveonline.com/conversion/%s.mp4" % video_id + thumbnail_url = "http://ringtv.craveonline.springboardplatform.com/storage/ringtv.craveonline.com/snapshots/%s.jpg" % video_id + + return { + 'id': video_id, + 'url': final_url, + 'title': title, + 'thumbnail': thumbnail_url, + 'description': description, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/ro220.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/ro220.py new file mode 100644 index 0000000000..962b524e94 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/ro220.py @@ -0,0 +1,43 @@ +from __future__ import unicode_literals + +from .common import InfoExtractor +from ..compat import compat_urllib_parse_unquote + + +class Ro220IE(InfoExtractor): + IE_NAME = '220.ro' + _VALID_URL = r'(?x)(?:https?://)?(?:www\.)?220\.ro/(?P<category>[^/]+)/(?P<shorttitle>[^/]+)/(?P<id>[^/]+)' + _TEST = { + 'url': 'http://www.220.ro/sport/Luati-Le-Banii-Sez-4-Ep-1/LYV6doKo7f/', + 'md5': '03af18b73a07b4088753930db7a34add', + 'info_dict': { + 'id': 'LYV6doKo7f', + 'ext': 'mp4', + 'title': 'Luati-le Banii sez 4 ep 1', + 'description': 're:^Iata-ne reveniti dupa o binemeritata vacanta\. +Va astept si pe Facebook cu pareri si comentarii.$', + } + } + + def _real_extract(self, url): + video_id = self._match_id(url) + + webpage = self._download_webpage(url, video_id) + url = compat_urllib_parse_unquote(self._search_regex( + r'(?s)clip\s*:\s*{.*?url\s*:\s*\'([^\']+)\'', webpage, 'url')) + title = self._og_search_title(webpage) + description = self._og_search_description(webpage) + thumbnail = self._og_search_thumbnail(webpage) + + formats = [{ + 'format_id': 'sd', + 'url': url, + 'ext': 'mp4', + }] + + return { + 'id': video_id, + 'formats': formats, + 'title': title, + 'description': description, + 'thumbnail': thumbnail, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/rottentomatoes.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/rottentomatoes.py new file mode 100644 index 0000000000..c1500b82fe --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/rottentomatoes.py @@ -0,0 +1,18 @@ +from __future__ import unicode_literals + +from .videodetective import VideoDetectiveIE + + +# It just uses the same method as videodetective.com, +# the internetvideoarchive.com is extracted from the og:video property +class RottenTomatoesIE(VideoDetectiveIE): + _VALID_URL = r'https?://www\.rottentomatoes\.com/m/[^/]+/trailers/(?P<id>\d+)' + + _TEST = { + 'url': 'http://www.rottentomatoes.com/m/toy_story_3/trailers/11028566/', + 'file': '613340.mp4', + 'info_dict': { + 'title': 'TOY STORY 3', + 'description': 'From the creators of the beloved TOY STORY films, comes a story that will reunite the gang in a whole new way.', + }, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/roxwel.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/roxwel.py new file mode 100644 index 0000000000..41638c1d01 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/roxwel.py @@ -0,0 +1,53 @@ +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import unified_strdate, determine_ext + + +class RoxwelIE(InfoExtractor): + _VALID_URL = r'https?://www\.roxwel\.com/player/(?P<filename>.+?)(\.|\?|$)' + + _TEST = { + 'url': 'http://www.roxwel.com/player/passionpittakeawalklive.html', + 'info_dict': { + 'id': 'passionpittakeawalklive', + 'ext': 'flv', + 'title': 'Take A Walk (live)', + 'uploader': 'Passion Pit', + 'uploader_id': 'passionpit', + 'upload_date': '20120928', + 'description': 'Passion Pit performs "Take A Walk\" live at The Backyard in Austin, Texas. ', + }, + 'params': { + # rtmp download + 'skip_download': True, + } + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + filename = mobj.group('filename') + info_url = 'http://www.roxwel.com/api/videos/%s' % filename + info = self._download_json(info_url, filename) + + rtmp_rates = sorted([int(r.replace('flv_', '')) for r in info['media_rates'] if r.startswith('flv_')]) + best_rate = rtmp_rates[-1] + url_page_url = 'http://roxwel.com/pl_one_time.php?filename=%s&quality=%s' % (filename, best_rate) + rtmp_url = self._download_webpage(url_page_url, filename, 'Downloading video url') + ext = determine_ext(rtmp_url) + if ext == 'f4v': + rtmp_url = rtmp_url.replace(filename, 'mp4:%s' % filename) + + return { + 'id': filename, + 'title': info['title'], + 'url': rtmp_url, + 'ext': 'flv', + 'description': info['description'], + 'thumbnail': info.get('player_image_url') or info.get('image_url_large'), + 'uploader': info['artist'], + 'uploader_id': info['artistname'], + 'upload_date': unified_strdate(info['dbdate']), + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/rtbf.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/rtbf.py new file mode 100644 index 0000000000..dce64e1517 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/rtbf.py @@ -0,0 +1,49 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re +import json + +from .common import InfoExtractor + + +class RTBFIE(InfoExtractor): + _VALID_URL = r'https?://www.rtbf.be/video/[^\?]+\?id=(?P<id>\d+)' + _TEST = { + 'url': 'https://www.rtbf.be/video/detail_les-diables-au-coeur-episode-2?id=1921274', + 'md5': '799f334ddf2c0a582ba80c44655be570', + 'info_dict': { + 'id': '1921274', + 'ext': 'mp4', + 'title': 'Les Diables au coeur (ц╘pisode 2)', + 'description': 'Football - Diables Rouges', + 'duration': 3099, + 'timestamp': 1398456336, + 'upload_date': '20140425', + } + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + + page = self._download_webpage('https://www.rtbf.be/video/embed?id=%s' % video_id, video_id) + + data = json.loads(self._html_search_regex( + r'<div class="js-player-embed(?: player-embed)?" data-video="([^"]+)"', page, 'data video'))['data'] + + video_url = data.get('downloadUrl') or data.get('url') + + if data['provider'].lower() == 'youtube': + return self.url_result(video_url, 'Youtube') + + return { + 'id': video_id, + 'url': video_url, + 'title': data['title'], + 'description': data.get('description') or data.get('subtitle'), + 'thumbnail': data['thumbnail']['large'], + 'duration': data.get('duration') or data.get('realDuration'), + 'timestamp': data['created'], + 'view_count': data['viewCount'], + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/rtlnl.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/rtlnl.py new file mode 100644 index 0000000000..d029b0ec52 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/rtlnl.py @@ -0,0 +1,71 @@ +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import parse_duration + + +class RtlXlIE(InfoExtractor): + IE_NAME = 'rtlxl.nl' + _VALID_URL = r'https?://www\.rtlxl\.nl/#!/[^/]+/(?P<uuid>[^/?]+)' + + _TEST = { + 'url': 'http://www.rtlxl.nl/#!/rtl-nieuws-132237/6e4203a6-0a5e-3596-8424-c599a59e0677', + 'md5': 'cc16baa36a6c169391f0764fa6b16654', + 'info_dict': { + 'id': '6e4203a6-0a5e-3596-8424-c599a59e0677', + 'ext': 'mp4', + 'title': 'RTL Nieuws - Laat', + 'description': 'md5:6b61f66510c8889923b11f2778c72dc5', + 'timestamp': 1408051800, + 'upload_date': '20140814', + 'duration': 576.880, + }, + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + uuid = mobj.group('uuid') + + info = self._download_json( + 'http://www.rtl.nl/system/s4m/vfd/version=2/uuid=%s/fmt=flash/' % uuid, + uuid) + + material = info['material'][0] + episode_info = info['episodes'][0] + + progname = info['abstracts'][0]['name'] + subtitle = material['title'] or info['episodes'][0]['name'] + + # Use unencrypted m3u8 streams (See https://github.com/rg3/youtube-dl/issues/4118) + videopath = material['videopath'].replace('.f4m', '.m3u8') + m3u8_url = 'http://manifest.us.rtl.nl' + videopath + + formats = self._extract_m3u8_formats(m3u8_url, uuid, ext='mp4') + + video_urlpart = videopath.split('/flash/')[1][:-5] + PG_URL_TEMPLATE = 'http://pg.us.rtl.nl/rtlxl/network/%s/progressive/%s.mp4' + + formats.extend([ + { + 'url': PG_URL_TEMPLATE % ('a2m', video_urlpart), + 'format_id': 'pg-sd', + }, + { + 'url': PG_URL_TEMPLATE % ('a3m', video_urlpart), + 'format_id': 'pg-hd', + 'quality': 0, + } + ]) + + self._sort_formats(formats) + + return { + 'id': uuid, + 'title': '%s - %s' % (progname, subtitle), + 'formats': formats, + 'timestamp': material['original_date'], + 'description': episode_info['synopsis'], + 'duration': parse_duration(material.get('duration')), + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/rtlnow.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/rtlnow.py new file mode 100644 index 0000000000..285c3c4beb --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/rtlnow.py @@ -0,0 +1,156 @@ +# encoding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import ( + ExtractorError, + clean_html, + unified_strdate, + int_or_none, +) + + +class RTLnowIE(InfoExtractor): + """Information Extractor for RTL NOW, RTL2 NOW, RTL NITRO, SUPER RTL NOW, VOX NOW and n-tv NOW""" + _VALID_URL = r'''(?x) + (?:https?://)? + (?P<url> + (?P<domain> + rtl-now\.rtl\.de| + rtl2now\.rtl2\.de| + (?:www\.)?voxnow\.de| + (?:www\.)?rtlnitronow\.de| + (?:www\.)?superrtlnow\.de| + (?:www\.)?n-tvnow\.de) + /+[a-zA-Z0-9-]+/[a-zA-Z0-9-]+\.php\? + (?:container_id|film_id)=(?P<video_id>[0-9]+)& + player=1(?:&season=[0-9]+)?(?:&.*)? + )''' + + _TESTS = [ + { + 'url': 'http://rtl-now.rtl.de/ahornallee/folge-1.php?film_id=90419&player=1&season=1', + 'info_dict': { + 'id': '90419', + 'ext': 'flv', + 'title': 'Ahornallee - Folge 1 - Der Einzug', + 'description': 'md5:ce843b6b5901d9a7f7d04d1bbcdb12de', + 'upload_date': '20070416', + 'duration': 1685, + }, + 'params': { + 'skip_download': True, + }, + 'skip': 'Only works from Germany', + }, + { + 'url': 'http://rtl2now.rtl2.de/aerger-im-revier/episode-15-teil-1.php?film_id=69756&player=1&season=2&index=5', + 'info_dict': { + 'id': '69756', + 'ext': 'flv', + 'title': 'ц└rger im Revier - Ein junger Ladendieb, ein handfester Streit u.a.', + 'description': 'md5:3fb247005ed21a935ffc82b7dfa70cf0', + 'thumbnail': 'http://autoimg.static-fra.de/rtl2now/219850/1500x1500/image2.jpg', + 'upload_date': '20120519', + 'duration': 1245, + }, + 'params': { + 'skip_download': True, + }, + 'skip': 'Only works from Germany', + }, + { + 'url': 'http://www.voxnow.de/voxtours/suedafrika-reporter-ii.php?film_id=13883&player=1&season=17', + 'info_dict': { + 'id': '13883', + 'ext': 'flv', + 'title': 'Voxtours - Sц╪dafrika-Reporter II', + 'description': 'md5:de7f8d56be6fd4fed10f10f57786db00', + 'upload_date': '20090627', + 'duration': 1800, + }, + 'params': { + 'skip_download': True, + }, + }, + { + 'url': 'http://superrtlnow.de/medicopter-117/angst.php?film_id=99205&player=1', + 'info_dict': { + 'id': '99205', + 'ext': 'flv', + 'title': 'Medicopter 117 - Angst!', + 'description': 're:^Im Therapiezentrum \'Sonnalm\' kommen durch eine Unachtsamkeit die fц╪r die B.handlung mit Phobikern gehaltenen Voglespinnen frei\. Eine Ausreiц÷erin', + 'thumbnail': 'http://autoimg.static-fra.de/superrtlnow/287529/1500x1500/image2.jpg', + 'upload_date': '20080928', + 'duration': 2691, + }, + 'params': { + 'skip_download': True, + }, + }, + { + 'url': 'http://www.n-tvnow.de/deluxe-alles-was-spass-macht/thema-ua-luxushotel-fuer-vierbeiner.php?container_id=153819&player=1&season=0', + 'only_matching': True, + }, + ] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_page_url = 'http://%s/' % mobj.group('domain') + video_id = mobj.group('video_id') + + webpage = self._download_webpage('http://' + mobj.group('url'), video_id) + + mobj = re.search(r'(?s)<div style="margin-left: 20px; font-size: 13px;">(.*?)<div id="playerteaser">', webpage) + if mobj: + raise ExtractorError(clean_html(mobj.group(1)), expected=True) + + title = self._og_search_title(webpage) + description = self._og_search_description(webpage) + thumbnail = self._og_search_thumbnail(webpage, default=None) + + upload_date = unified_strdate(self._html_search_meta('uploadDate', webpage, 'upload date')) + + mobj = re.search(r'<meta itemprop="duration" content="PT(?P<seconds>\d+)S" />', webpage) + duration = int(mobj.group('seconds')) if mobj else None + + playerdata_url = self._html_search_regex( + r"'playerdata': '(?P<playerdata_url>[^']+)'", webpage, 'playerdata_url') + + playerdata = self._download_xml(playerdata_url, video_id, 'Downloading player data XML') + + videoinfo = playerdata.find('./playlist/videoinfo') + + formats = [] + for filename in videoinfo.findall('filename'): + mobj = re.search(r'(?P<url>rtmpe://(?:[^/]+/){2})(?P<play_path>.+)', filename.text) + if mobj: + fmt = { + 'url': mobj.group('url'), + 'play_path': 'mp4:' + mobj.group('play_path'), + 'page_url': video_page_url, + 'player_url': video_page_url + 'includes/vodplayer.swf', + } + else: + fmt = { + 'url': filename.text, + } + fmt.update({ + 'width': int_or_none(filename.get('width')), + 'height': int_or_none(filename.get('height')), + 'vbr': int_or_none(filename.get('bitrate')), + 'ext': 'flv', + }) + formats.append(fmt) + + return { + 'id': video_id, + 'title': title, + 'description': description, + 'thumbnail': thumbnail, + 'upload_date': upload_date, + 'duration': duration, + 'formats': formats, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/rtp.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/rtp.py new file mode 100644 index 0000000000..7736cabbac --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/rtp.py @@ -0,0 +1,60 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import json + +from .common import InfoExtractor +from ..utils import js_to_json + + +class RTPIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?rtp\.pt/play/p(?P<program_id>[0-9]+)/(?P<id>[^/?#]+)/?' + _TESTS = [{ + 'url': 'http://www.rtp.pt/play/p405/e174042/paixoes-cruzadas', + 'info_dict': { + 'id': 'e174042', + 'ext': 'mp3', + 'title': 'Paixц╣es Cruzadas', + 'description': 'As paixц╣es musicais de AntцЁnio Cartaxo e AntцЁnio Macedo', + 'thumbnail': 're:^https?://.*\.jpg', + }, + 'params': { + 'skip_download': True, # RTMP download + }, + }, { + 'url': 'http://www.rtp.pt/play/p831/a-quimica-das-coisas', + 'only_matching': True, + }] + + def _real_extract(self, url): + video_id = self._match_id(url) + + webpage = self._download_webpage(url, video_id) + title = self._html_search_meta( + 'twitter:title', webpage, display_name='title', fatal=True) + description = self._html_search_meta('description', webpage) + thumbnail = self._og_search_thumbnail(webpage) + + player_config = self._search_regex( + r'(?s)RTPPLAY\.player\.newPlayer\(\s*(\{.*?\})\s*\)', webpage, 'player config') + config = json.loads(js_to_json(player_config)) + + path, ext = config.get('file').rsplit('.', 1) + formats = [{ + 'app': config.get('application'), + 'play_path': '{ext:s}:{path:s}'.format(ext=ext, path=path), + 'page_url': url, + 'url': 'rtmp://{streamer:s}/{application:s}'.format(**config), + 'rtmp_live': config.get('live', False), + 'ext': ext, + 'vcodec': config.get('type') == 'audio' and 'none' or None, + 'player_url': 'http://programas.rtp.pt/play/player.swf?v3', + }] + + return { + 'id': video_id, + 'title': title, + 'formats': formats, + 'description': description, + 'thumbnail': thumbnail, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/rts.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/rts.py new file mode 100644 index 0000000000..5e84c10980 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/rts.py @@ -0,0 +1,188 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..compat import ( + compat_str, +) +from ..utils import ( + int_or_none, + parse_duration, + parse_iso8601, + unescapeHTML, +) + + +class RTSIE(InfoExtractor): + IE_DESC = 'RTS.ch' + _VALID_URL = r'https?://(?:www\.)?rts\.ch/(?:(?:[^/]+/){2,}(?P<id>[0-9]+)-(?P<display_id>.+?)\.html|play/tv/[^/]+/video/(?P<display_id_new>.+?)\?id=(?P<id_new>[0-9]+))' + + _TESTS = [ + { + 'url': 'http://www.rts.ch/archives/tv/divers/3449373-les-enfants-terribles.html', + 'md5': '753b877968ad8afaeddccc374d4256a5', + 'info_dict': { + 'id': '3449373', + 'display_id': 'les-enfants-terribles', + 'ext': 'mp4', + 'duration': 1488, + 'title': 'Les Enfants Terribles', + 'description': 'France Pommier et sa soeur Luce Feral, les deux filles de ce groupe de 5.', + 'uploader': 'Divers', + 'upload_date': '19680921', + 'timestamp': -40280400, + 'thumbnail': 're:^https?://.*\.image', + 'view_count': int, + }, + }, + { + 'url': 'http://www.rts.ch/emissions/passe-moi-les-jumelles/5624067-entre-ciel-et-mer.html', + 'md5': 'c148457a27bdc9e5b1ffe081a7a8337b', + 'info_dict': { + 'id': '5624067', + 'display_id': 'entre-ciel-et-mer', + 'ext': 'mp4', + 'duration': 3720, + 'title': 'Les yeux dans les cieux - Mon homard au Canada', + 'description': 'md5:d22ee46f5cc5bac0912e5a0c6d44a9f7', + 'uploader': 'Passe-moi les jumelles', + 'upload_date': '20140404', + 'timestamp': 1396635300, + 'thumbnail': 're:^https?://.*\.image', + 'view_count': int, + }, + }, + { + 'url': 'http://www.rts.ch/video/sport/hockey/5745975-1-2-kloten-fribourg-5-2-second-but-pour-gotteron-par-kwiatowski.html', + 'md5': 'b4326fecd3eb64a458ba73c73e91299d', + 'info_dict': { + 'id': '5745975', + 'display_id': '1-2-kloten-fribourg-5-2-second-but-pour-gotteron-par-kwiatowski', + 'ext': 'mp4', + 'duration': 48, + 'title': '1/2, Kloten - Fribourg (5-2): second but pour Gottц╘ron par Kwiatowski', + 'description': 'Hockey - Playoff', + 'uploader': 'Hockey', + 'upload_date': '20140403', + 'timestamp': 1396556882, + 'thumbnail': 're:^https?://.*\.image', + 'view_count': int, + }, + 'skip': 'Blocked outside Switzerland', + }, + { + 'url': 'http://www.rts.ch/video/info/journal-continu/5745356-londres-cachee-par-un-epais-smog.html', + 'md5': '9bb06503773c07ce83d3cbd793cebb91', + 'info_dict': { + 'id': '5745356', + 'display_id': 'londres-cachee-par-un-epais-smog', + 'ext': 'mp4', + 'duration': 33, + 'title': 'Londres cachц╘e par un ц╘pais smog', + 'description': 'Un important voile de smog recouvre Londres depuis mercredi, provoquц╘ par la pollution et du sable du Sahara.', + 'uploader': 'Le Journal en continu', + 'upload_date': '20140403', + 'timestamp': 1396537322, + 'thumbnail': 're:^https?://.*\.image', + 'view_count': int, + }, + }, + { + 'url': 'http://www.rts.ch/audio/couleur3/programmes/la-belle-video-de-stephane-laurenceau/5706148-urban-hippie-de-damien-krisl-03-04-2014.html', + 'md5': 'dd8ef6a22dff163d063e2a52bc8adcae', + 'info_dict': { + 'id': '5706148', + 'display_id': 'urban-hippie-de-damien-krisl-03-04-2014', + 'ext': 'mp3', + 'duration': 123, + 'title': '"Urban Hippie", de Damien Krisl', + 'description': 'Des Hippies super glam.', + 'upload_date': '20140403', + 'timestamp': 1396551600, + }, + }, + { + 'url': 'http://www.rts.ch/play/tv/-/video/le-19h30?id=6348260', + 'md5': '968777c8779e5aa2434be96c54e19743', + 'info_dict': { + 'id': '6348260', + 'display_id': 'le-19h30', + 'ext': 'mp4', + 'duration': 1796, + 'title': 'Le 19h30', + 'description': '', + 'uploader': 'Le 19h30', + 'upload_date': '20141201', + 'timestamp': 1417458600, + 'thumbnail': 're:^https?://.*\.image', + 'view_count': int, + }, + }, + { + 'url': 'http://www.rts.ch/play/tv/le-19h30/video/le-chantier-du-nouveau-parlement-vaudois-a-permis-une-trouvaille-historique?id=6348280', + 'only_matching': True, + } + ] + + def _real_extract(self, url): + m = re.match(self._VALID_URL, url) + video_id = m.group('id') or m.group('id_new') + display_id = m.group('display_id') or m.group('display_id_new') + + def download_json(internal_id): + return self._download_json( + 'http://www.rts.ch/a/%s.html?f=json/article' % internal_id, + display_id) + + all_info = download_json(video_id) + + # video_id extracted out of URL is not always a real id + if 'video' not in all_info and 'audio' not in all_info: + page = self._download_webpage(url, display_id) + internal_id = self._html_search_regex( + r'<(?:video|audio) data-id="([0-9]+)"', page, + 'internal video id') + all_info = download_json(internal_id) + + info = all_info['video']['JSONinfo'] if 'video' in all_info else all_info['audio'] + + upload_timestamp = parse_iso8601(info.get('broadcast_date')) + duration = info.get('duration') or info.get('cutout') or info.get('cutduration') + if isinstance(duration, compat_str): + duration = parse_duration(duration) + view_count = info.get('plays') + thumbnail = unescapeHTML(info.get('preview_image_url')) + + def extract_bitrate(url): + return int_or_none(self._search_regex( + r'-([0-9]+)k\.', url, 'bitrate', default=None)) + + formats = [{ + 'format_id': fid, + 'url': furl, + 'tbr': extract_bitrate(furl), + } for fid, furl in info['streams'].items()] + + if 'media' in info: + formats.extend([{ + 'format_id': '%s-%sk' % (media['ext'], media['rate']), + 'url': 'http://download-video.rts.ch/%s' % media['url'], + 'tbr': media['rate'] or extract_bitrate(media['url']), + } for media in info['media'] if media.get('rate')]) + + self._sort_formats(formats) + + return { + 'id': video_id, + 'display_id': display_id, + 'formats': formats, + 'title': info['title'], + 'description': info.get('intro'), + 'duration': duration, + 'view_count': view_count, + 'uploader': info.get('programName'), + 'timestamp': upload_timestamp, + 'thumbnail': thumbnail, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/rtve.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/rtve.py new file mode 100644 index 0000000000..0ce22d60c7 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/rtve.py @@ -0,0 +1,141 @@ +# encoding: utf-8 +from __future__ import unicode_literals + +import base64 +import re +import time + +from .common import InfoExtractor +from ..utils import ( + struct_unpack, + remove_end, +) + + +def _decrypt_url(png): + encrypted_data = base64.b64decode(png) + text_index = encrypted_data.find(b'tEXt') + text_chunk = encrypted_data[text_index - 4:] + length = struct_unpack('!I', text_chunk[:4])[0] + # Use bytearray to get integers when iterating in both python 2.x and 3.x + data = bytearray(text_chunk[8:8 + length]) + data = [chr(b) for b in data if b != 0] + hash_index = data.index('#') + alphabet_data = data[:hash_index] + url_data = data[hash_index + 1:] + + alphabet = [] + e = 0 + d = 0 + for l in alphabet_data: + if d == 0: + alphabet.append(l) + d = e = (e + 1) % 4 + else: + d -= 1 + url = '' + f = 0 + e = 3 + b = 1 + for letter in url_data: + if f == 0: + l = int(letter) * 10 + f = 1 + else: + if e == 0: + l += int(letter) + url += alphabet[l] + e = (b + 3) % 4 + f = 0 + b += 1 + else: + e -= 1 + + return url + + +class RTVEALaCartaIE(InfoExtractor): + IE_NAME = 'rtve.es:alacarta' + IE_DESC = 'RTVE a la carta' + _VALID_URL = r'http://www\.rtve\.es/alacarta/videos/[^/]+/[^/]+/(?P<id>\d+)' + + _TESTS = [{ + 'url': 'http://www.rtve.es/alacarta/videos/balonmano/o-swiss-cup-masculina-final-espana-suecia/2491869/', + 'md5': '1d49b7e1ca7a7502c56a4bf1b60f1b43', + 'info_dict': { + 'id': '2491869', + 'ext': 'mp4', + 'title': 'Balonmano - Swiss Cup masculina. Final: Espaц╠a-Suecia', + }, + }, { + 'note': 'Live stream', + 'url': 'http://www.rtve.es/alacarta/videos/television/24h-live/1694255/', + 'info_dict': { + 'id': '1694255', + 'ext': 'flv', + 'title': 'TODO', + } + }] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + info = self._download_json( + 'http://www.rtve.es/api/videos/%s/config/alacarta_videos.json' % video_id, + video_id)['page']['items'][0] + png_url = 'http://www.rtve.es/ztnr/movil/thumbnail/default/videos/%s.png' % video_id + png = self._download_webpage(png_url, video_id, 'Downloading url information') + video_url = _decrypt_url(png) + + return { + 'id': video_id, + 'title': info['title'], + 'url': video_url, + 'thumbnail': info.get('image'), + 'page_url': url, + } + + +class RTVELiveIE(InfoExtractor): + IE_NAME = 'rtve.es:live' + IE_DESC = 'RTVE.es live streams' + _VALID_URL = r'http://www\.rtve\.es/(?:deportes/directo|noticias|television)/(?P<id>[a-zA-Z0-9-]+)' + + _TESTS = [{ + 'url': 'http://www.rtve.es/noticias/directo-la-1/', + 'info_dict': { + 'id': 'directo-la-1', + 'ext': 'flv', + 'title': 're:^La 1 de TVE [0-9]{4}-[0-9]{2}-[0-9]{2}Z[0-9]{6}$', + }, + 'params': { + 'skip_download': 'live stream', + } + }] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + start_time = time.gmtime() + video_id = mobj.group('id') + + webpage = self._download_webpage(url, video_id) + player_url = self._search_regex( + r'<param name="movie" value="([^"]+)"/>', webpage, 'player URL') + title = remove_end(self._og_search_title(webpage), ' en directo') + title += ' ' + time.strftime('%Y-%m-%dZ%H%M%S', start_time) + + vidplayer_id = self._search_regex( + r' id="vidplayer([0-9]+)"', webpage, 'internal video ID') + png_url = 'http://www.rtve.es/ztnr/movil/thumbnail/default/videos/%s.png' % vidplayer_id + png = self._download_webpage(png_url, video_id, 'Downloading url information') + video_url = _decrypt_url(png) + + return { + 'id': video_id, + 'ext': 'flv', + 'title': title, + 'url': video_url, + 'app': 'rtve-live-live?ovpfv=2.1.2', + 'player_url': player_url, + 'rtmp_live': True, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/ruhd.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/ruhd.py new file mode 100644 index 0000000000..0e470e73f5 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/ruhd.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from .common import InfoExtractor + + +class RUHDIE(InfoExtractor): + _VALID_URL = r'http://(?:www\.)?ruhd\.ru/play\.php\?vid=(?P<id>\d+)' + _TEST = { + 'url': 'http://www.ruhd.ru/play.php?vid=207', + 'md5': 'd1a9ec4edf8598e3fbd92bb16072ba83', + 'info_dict': { + 'id': '207', + 'ext': 'divx', + 'title': 'п п·п╒ п╠п╟п╟п╟п╟п╟п╪', + 'description': 'п╨п╩п╟я│я│п╫я▀п╧ п╨п╬я┌)', + 'thumbnail': 're:^http://.*\.jpg$', + } + } + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + + video_url = self._html_search_regex( + r'<param name="src" value="([^"]+)"', webpage, 'video url') + title = self._html_search_regex( + r'<title>([^<]+)   RUHD.ru - п▓п╦п╢п╣п╬ п▓я▀я│п╬п╨п╬пЁп╬ п╨п╟я┤п╣я│я┌п╡п╟ Б└√1 п╡ п═п╬я│я│п╦п╦!', + webpage, 'title') + description = self._html_search_regex( + r'(?s)
    (.+?)', + webpage, 'description', fatal=False) + thumbnail = self._html_search_regex( + r'[\da-z]{32})' + + _TEST = { + 'url': 'http://rutube.ru/video/3eac3b4561676c17df9132a9a1e62e3e/', + 'info_dict': { + 'id': '3eac3b4561676c17df9132a9a1e62e3e', + 'ext': 'mp4', + 'title': 'п═п╟п╫п╣п╫п╫я▀п╧ п╨п╣п╫пЁя┐я─я┐ п╥п╟п╠п╣п╤п╟п╩ п╡ п╟п©я┌п╣п╨я┐', + 'description': 'http://www.ntdtv.ru ', + 'duration': 80, + 'uploader': 'NTDRussian', + 'uploader_id': '29790', + 'upload_date': '20131016', + }, + 'params': { + # It requires ffmpeg (m3u8 download) + 'skip_download': True, + }, + } + + def _real_extract(self, url): + video_id = self._match_id(url) + video = self._download_json( + 'http://rutube.ru/api/video/%s/?format=json' % video_id, + video_id, 'Downloading video JSON') + + # Some videos don't have the author field + author = video.get('author') or {} + + options = self._download_json( + 'http://rutube.ru/api/play/options/%s/?format=json' % video_id, + video_id, 'Downloading options JSON') + + m3u8_url = options['video_balancer'].get('m3u8') + if m3u8_url is None: + raise ExtractorError('Couldn\'t find m3u8 manifest url') + formats = self._extract_m3u8_formats(m3u8_url, video_id, ext='mp4') + + return { + 'id': video['id'], + 'title': video['title'], + 'description': video['description'], + 'duration': video['duration'], + 'view_count': video['hits'], + 'formats': formats, + 'thumbnail': video['thumbnail_url'], + 'uploader': author.get('name'), + 'uploader_id': compat_str(author['id']) if author else None, + 'upload_date': unified_strdate(video['created_ts']), + 'age_limit': 18 if video['is_adult'] else 0, + } + + +class RutubeChannelIE(InfoExtractor): + IE_NAME = 'rutube:channel' + IE_DESC = 'Rutube channels' + _VALID_URL = r'http://rutube\.ru/tags/video/(?P\d+)' + _TESTS = [{ + 'url': 'http://rutube.ru/tags/video/1800/', + 'info_dict': { + 'id': '1800', + }, + 'playlist_mincount': 68, + }] + + _PAGE_TEMPLATE = 'http://rutube.ru/api/tags/video/%s/?page=%s&format=json' + + def _extract_videos(self, channel_id, channel_title=None): + entries = [] + for pagenum in itertools.count(1): + page = self._download_json( + self._PAGE_TEMPLATE % (channel_id, pagenum), + channel_id, 'Downloading page %s' % pagenum) + results = page['results'] + if not results: + break + entries.extend(self.url_result(result['video_url'], 'Rutube') for result in results) + if not page['has_next']: + break + return self.playlist_result(entries, channel_id, channel_title) + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + channel_id = mobj.group('id') + return self._extract_videos(channel_id) + + +class RutubeMovieIE(RutubeChannelIE): + IE_NAME = 'rutube:movie' + IE_DESC = 'Rutube movies' + _VALID_URL = r'http://rutube\.ru/metainfo/tv/(?P\d+)' + _TESTS = [] + + _MOVIE_TEMPLATE = 'http://rutube.ru/api/metainfo/tv/%s/?format=json' + _PAGE_TEMPLATE = 'http://rutube.ru/api/metainfo/tv/%s/video?page=%s&format=json' + + def _real_extract(self, url): + movie_id = self._match_id(url) + movie = self._download_json( + self._MOVIE_TEMPLATE % movie_id, movie_id, + 'Downloading movie JSON') + movie_name = movie['name'] + return self._extract_videos(movie_id, movie_name) + + +class RutubePersonIE(RutubeChannelIE): + IE_NAME = 'rutube:person' + IE_DESC = 'Rutube person videos' + _VALID_URL = r'http://rutube\.ru/video/person/(?P\d+)' + _TESTS = [{ + 'url': 'http://rutube.ru/video/person/313878/', + 'info_dict': { + 'id': '313878', + }, + 'playlist_mincount': 37, + }] + + _PAGE_TEMPLATE = 'http://rutube.ru/api/video/person/%s/?page=%s&format=json' diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/rutv.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/rutv.py new file mode 100644 index 0000000000..a73e6f331f --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/rutv.py @@ -0,0 +1,194 @@ +# encoding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import ( + ExtractorError, + int_or_none +) + + +class RUTVIE(InfoExtractor): + IE_DESC = 'RUTV.RU' + _VALID_URL = r'''(?x) + https?://player\.(?:rutv\.ru|vgtrk\.com)/ + (?Pflash2v/container\.swf\?id= + |iframe/(?Pswf|video|live)/id/ + |index/iframe/cast_id/) + (?P\d+)''' + + _TESTS = [ + { + 'url': 'http://player.rutv.ru/flash2v/container.swf?id=774471&sid=kultura&fbv=true&isPlay=true&ssl=false&i=560&acc_video_id=episode_id/972347/video_id/978186/brand_id/31724', + 'info_dict': { + 'id': '774471', + 'ext': 'mp4', + 'title': 'п°п╬п╫п╬п╩п╬пЁп╦ п╫п╟ п╡я│п╣ п╡я─п╣п╪п╣п╫п╟', + 'description': 'md5:18d8b5e6a41fb1faa53819471852d5d5', + 'duration': 2906, + }, + 'params': { + # m3u8 download + 'skip_download': True, + }, + }, + { + 'url': 'https://player.vgtrk.com/flash2v/container.swf?id=774016&sid=russiatv&fbv=true&isPlay=true&ssl=false&i=560&acc_video_id=episode_id/972098/video_id/977760/brand_id/57638', + 'info_dict': { + 'id': '774016', + 'ext': 'mp4', + 'title': 'п╖я┐п╤п╬п╧ п╡ я│п╣п╪я▄п╣ п║я┌п╟п╩п╦п╫п╟', + 'description': '', + 'duration': 2539, + }, + 'params': { + # m3u8 download + 'skip_download': True, + }, + }, + { + 'url': 'http://player.rutv.ru/iframe/swf/id/766888/sid/hitech/?acc_video_id=4000', + 'info_dict': { + 'id': '766888', + 'ext': 'mp4', + 'title': 'п▓п╣я│я┌п╦.net: п╦п╫я┌п╣я─п╫п╣я┌-пЁп╦пЁп╟п╫я┌я▀ п╫п╟я┤п╟п╩п╦ п©п╣я─п╣я┌я▐пЁп╦п╡п╟п╫п╦п╣ п©я─п╬пЁя─п╟п╪п╪п╫я▀я┘ "п╬п╢п╣я▐п╩"', + 'description': 'md5:65ddd47f9830c4f42ed6475f8730c995', + 'duration': 279, + }, + 'params': { + # m3u8 download + 'skip_download': True, + }, + }, + { + 'url': 'http://player.rutv.ru/iframe/video/id/771852/start_zoom/true/showZoomBtn/false/sid/russiatv/?acc_video_id=episode_id/970443/video_id/975648/brand_id/5169', + 'info_dict': { + 'id': '771852', + 'ext': 'mp4', + 'title': 'п÷я─я▐п╪п╬п╧ я█я└п╦я─. п√п╣я─я┌п╡я▀ п╥п╟пЁп╟п╢п╬я┤п╫п╬п╧ п╠п╬п╩п╣п╥п╫п╦: я│п╪п╣я─я┌я▄ п╬я┌ я│я┌п╟я─п╬я│я┌п╦ п╡ 17 п╩п╣я┌', + 'description': 'md5:b81c8c55247a4bd996b43ce17395b2d8', + 'duration': 3096, + }, + 'params': { + # m3u8 download + 'skip_download': True, + }, + }, + { + 'url': 'http://player.rutv.ru/iframe/live/id/51499/showZoomBtn/false/isPlay/true/sid/sochi2014', + 'info_dict': { + 'id': '51499', + 'ext': 'flv', + 'title': 'п║п╬я┤п╦-2014. п▒п╦п╟я┌п╩п╬п╫. п≤п╫п╢п╦п╡п╦п╢я┐п╟п╩я▄п╫п╟я▐ пЁп╬п╫п╨п╟. п°я┐п╤я┤п╦п╫я▀ ', + 'description': 'md5:9e0ed5c9d2fa1efbfdfed90c9a6d179c', + }, + 'params': { + # rtmp download + 'skip_download': True, + }, + 'skip': 'Translation has finished', + }, + ] + + @classmethod + def _extract_url(cls, webpage): + mobj = re.search( + r']+?src=(["\'])(?Phttps?://player\.rutv\.ru/(?:iframe/(?:swf|video|live)/id|index/iframe/cast_id)/.+?)\1', webpage) + if mobj: + return mobj.group('url') + + mobj = re.search( + r']+?property=(["\'])og:video\1[^>]+?content=(["\'])(?Phttps?://player\.(?:rutv\.ru|vgtrk\.com)/flash2v/container\.swf\?id=.+?\2)', + webpage) + if mobj: + return mobj.group('url') + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + video_path = mobj.group('path') + + if video_path.startswith('flash2v'): + video_type = 'video' + elif video_path.startswith('iframe'): + video_type = mobj.group('type') + if video_type == 'swf': + video_type = 'video' + elif video_path.startswith('index/iframe/cast_id'): + video_type = 'live' + + json_data = self._download_json( + 'http://player.rutv.ru/iframe/%splay/id/%s' % ('live-' if video_type == 'live' else '', video_id), + video_id, 'Downloading JSON') + + if json_data['errors']: + raise ExtractorError('%s said: %s' % (self.IE_NAME, json_data['errors']), expected=True) + + playlist = json_data['data']['playlist'] + medialist = playlist['medialist'] + media = medialist[0] + + if media['errors']: + raise ExtractorError('%s said: %s' % (self.IE_NAME, media['errors']), expected=True) + + view_count = playlist.get('count_views') + priority_transport = playlist['priority_transport'] + + thumbnail = media['picture'] + width = int_or_none(media['width']) + height = int_or_none(media['height']) + description = media['anons'] + title = media['title'] + duration = int_or_none(media.get('duration')) + + formats = [] + + for transport, links in media['sources'].items(): + for quality, url in links.items(): + if transport == 'rtmp': + mobj = re.search(r'^(?Prtmp://[^/]+/(?P.+))/(?P.+)$', url) + if not mobj: + continue + fmt = { + 'url': mobj.group('url'), + 'play_path': mobj.group('playpath'), + 'app': mobj.group('app'), + 'page_url': 'http://player.rutv.ru', + 'player_url': 'http://player.rutv.ru/flash2v/osmf.swf?i=22', + 'rtmp_live': True, + 'ext': 'flv', + 'vbr': int(quality), + } + elif transport == 'm3u8': + fmt = { + 'url': url, + 'ext': 'mp4', + } + else: + fmt = { + 'url': url + } + fmt.update({ + 'width': width, + 'height': height, + 'format_id': '%s-%s' % (transport, quality), + 'preference': -1 if priority_transport == transport else -2, + }) + formats.append(fmt) + + if not formats: + raise ExtractorError('No media links available for %s' % video_id) + + self._sort_formats(formats) + + return { + 'id': video_id, + 'title': title, + 'description': description, + 'thumbnail': thumbnail, + 'view_count': view_count, + 'duration': duration, + 'formats': formats, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/sapo.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/sapo.py new file mode 100644 index 0000000000..172cc12752 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/sapo.py @@ -0,0 +1,119 @@ +# encoding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import ( + parse_duration, + unified_strdate, +) + + +class SapoIE(InfoExtractor): + IE_DESC = 'SAPO Vц╜deos' + _VALID_URL = r'https?://(?:(?:v2|www)\.)?videos\.sapo\.(?:pt|cv|ao|mz|tl)/(?P[\da-zA-Z]{20})' + + _TESTS = [ + { + 'url': 'http://videos.sapo.pt/UBz95kOtiWYUMTA5Ghfi', + 'md5': '79ee523f6ecb9233ac25075dee0eda83', + 'note': 'SD video', + 'info_dict': { + 'id': 'UBz95kOtiWYUMTA5Ghfi', + 'ext': 'mp4', + 'title': 'Benfica - Marcas na HitцЁria', + 'description': 'md5:c9082000a128c3fd57bf0299e1367f22', + 'duration': 264, + 'uploader': 'tiago_1988', + 'upload_date': '20080229', + 'categories': ['benfica', 'cabral', 'desporto', 'futebol', 'geovanni', 'hooijdonk', 'joao', 'karel', 'lisboa', 'miccoli'], + }, + }, + { + 'url': 'http://videos.sapo.pt/IyusNAZ791ZdoCY5H5IF', + 'md5': '90a2f283cfb49193fe06e861613a72aa', + 'note': 'HD video', + 'info_dict': { + 'id': 'IyusNAZ791ZdoCY5H5IF', + 'ext': 'mp4', + 'title': 'Codebits VII - Report', + 'description': 'md5:6448d6fd81ce86feac05321f354dbdc8', + 'duration': 144, + 'uploader': 'codebits', + 'upload_date': '20140427', + 'categories': ['codebits', 'codebits2014'], + }, + }, + { + 'url': 'http://v2.videos.sapo.pt/yLqjzPtbTimsn2wWBKHz', + 'md5': 'e5aa7cc0bdc6db9b33df1a48e49a15ac', + 'note': 'v2 video', + 'info_dict': { + 'id': 'yLqjzPtbTimsn2wWBKHz', + 'ext': 'mp4', + 'title': 'Hipnose Condicionativa 4', + 'description': 'md5:ef0481abf8fb4ae6f525088a6dadbc40', + 'duration': 692, + 'uploader': 'sapozen', + 'upload_date': '20090609', + 'categories': ['condicionativa', 'heloisa', 'hipnose', 'miranda', 'sapo', 'zen'], + }, + }, + ] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + + item = self._download_xml( + 'http://rd3.videos.sapo.pt/%s/rss2' % video_id, video_id).find('./channel/item') + + title = item.find('./title').text + description = item.find('./{http://videos.sapo.pt/mrss/}synopse').text + thumbnail = item.find('./{http://search.yahoo.com/mrss/}content').get('url') + duration = parse_duration(item.find('./{http://videos.sapo.pt/mrss/}time').text) + uploader = item.find('./{http://videos.sapo.pt/mrss/}author').text + upload_date = unified_strdate(item.find('./pubDate').text) + view_count = int(item.find('./{http://videos.sapo.pt/mrss/}views').text) + comment_count = int(item.find('./{http://videos.sapo.pt/mrss/}comment_count').text) + tags = item.find('./{http://videos.sapo.pt/mrss/}tags').text + categories = tags.split() if tags else [] + age_limit = 18 if item.find('./{http://videos.sapo.pt/mrss/}m18').text == 'true' else 0 + + video_url = item.find('./{http://videos.sapo.pt/mrss/}videoFile').text + video_size = item.find('./{http://videos.sapo.pt/mrss/}videoSize').text.split('x') + + formats = [{ + 'url': video_url, + 'ext': 'mp4', + 'format_id': 'sd', + 'width': int(video_size[0]), + 'height': int(video_size[1]), + }] + + if item.find('./{http://videos.sapo.pt/mrss/}HD').text == 'true': + formats.append({ + 'url': re.sub(r'/mov/1$', '/mov/39', video_url), + 'ext': 'mp4', + 'format_id': 'hd', + 'width': 1280, + 'height': 720, + }) + + self._sort_formats(formats) + + return { + 'id': video_id, + 'title': title, + 'description': description, + 'thumbnail': thumbnail, + 'duration': duration, + 'uploader': uploader, + 'upload_date': upload_date, + 'view_count': view_count, + 'comment_count': comment_count, + 'categories': categories, + 'age_limit': age_limit, + 'formats': formats, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/savefrom.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/savefrom.py new file mode 100644 index 0000000000..5b7367b941 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/savefrom.py @@ -0,0 +1,37 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import os.path +import re + +from .common import InfoExtractor + + +class SaveFromIE(InfoExtractor): + IE_NAME = 'savefrom.net' + _VALID_URL = r'https?://[^.]+\.savefrom\.net/\#url=(?P.*)$' + + _TEST = { + 'url': 'http://en.savefrom.net/#url=http://youtube.com/watch?v=UlVRAPW2WJY&utm_source=youtube.com&utm_medium=short_domains&utm_campaign=ssyoutube.com', + 'info_dict': { + 'id': 'UlVRAPW2WJY', + 'ext': 'mp4', + 'title': 'About Team Radical MMA | MMA Fighting', + 'upload_date': '20120816', + 'uploader': 'Howcast', + 'uploader_id': 'Howcast', + 'description': 're:(?s).* Hi, my name is Rene Dreifuss\. And I\'m here to show you some MMA.*', + }, + 'params': { + 'skip_download': True + } + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = os.path.splitext(url.split('/')[-1])[0] + return { + '_type': 'url', + 'id': video_id, + 'url': mobj.group('url'), + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/sbs.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/sbs.py new file mode 100644 index 0000000000..b8775c2f99 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/sbs.py @@ -0,0 +1,59 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +import json +import re +from .common import InfoExtractor +from ..utils import ( + js_to_json, + remove_end, +) + + +class SBSIE(InfoExtractor): + IE_DESC = 'sbs.com.au' + _VALID_URL = r'https?://(?:www\.)?sbs\.com\.au/ondemand/video/(?:single/)?(?P[0-9]+)' + + _TESTS = [{ + # Original URL is handled by the generic IE which finds the iframe: + # http://www.sbs.com.au/thefeed/blog/2014/08/21/dingo-conservation + 'url': 'http://www.sbs.com.au/ondemand/video/single/320403011771/?source=drupal&vertical=thefeed', + 'md5': '3150cf278965eeabb5b4cea1c963fe0a', + 'info_dict': { + 'id': '320403011771', + 'ext': 'mp4', + 'title': 'Dingo Conservation', + 'description': 'Dingoes are on the brink of extinction; most of the animals we think are dingoes are in fact crossbred with wild dogs. This family run a dingo conservation park to prevent their extinction', + 'thumbnail': 're:http://.*\.jpg', + }, + 'add_ies': ['generic'], + }, { + 'url': 'http://www.sbs.com.au/ondemand/video/320403011771/Dingo-Conservation-The-Feed', + 'only_matching': True, + }] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + webpage = self._download_webpage(url, video_id) + + release_urls_json = js_to_json(self._search_regex( + r'(?s)playerParams\.releaseUrls\s*=\s*(\{.*?\n\});\n', + webpage, '')) + release_urls = json.loads(release_urls_json) + theplatform_url = ( + release_urls.get('progressive') or release_urls.get('standard')) + + title = remove_end(self._og_search_title(webpage), ' (The Feed)') + description = self._html_search_meta('description', webpage) + thumbnail = self._og_search_thumbnail(webpage) + + return { + '_type': 'url_transparent', + 'id': video_id, + 'url': theplatform_url, + + 'title': title, + 'description': description, + 'thumbnail': thumbnail, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/scivee.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/scivee.py new file mode 100644 index 0000000000..3bf93c870b --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/scivee.py @@ -0,0 +1,56 @@ +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import int_or_none + + +class SciVeeIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?scivee\.tv/node/(?P\d+)' + + _TEST = { + 'url': 'http://www.scivee.tv/node/62352', + 'md5': 'b16699b74c9e6a120f6772a44960304f', + 'info_dict': { + 'id': '62352', + 'ext': 'mp4', + 'title': 'Adam Arkin at the 2014 DOE JGI Genomics of Energy & Environment Meeting', + 'description': 'md5:81f1710638e11a481358fab1b11059d7', + }, + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + + # annotations XML is malformed + annotations = self._download_webpage( + 'http://www.scivee.tv/assets/annotations/%s' % video_id, video_id, 'Downloading annotations') + + title = self._html_search_regex(r'([^<]+)', annotations, 'title') + description = self._html_search_regex(r'([^<]+)', annotations, 'abstract', fatal=False) + filesize = int_or_none(self._html_search_regex( + r'([^<]+)', annotations, 'filesize', fatal=False)) + + formats = [ + { + 'url': 'http://www.scivee.tv/assets/audio/%s' % video_id, + 'ext': 'mp3', + 'format_id': 'audio', + }, + { + 'url': 'http://www.scivee.tv/assets/video/%s' % video_id, + 'ext': 'mp4', + 'format_id': 'video', + 'filesize': filesize, + }, + ] + + return { + 'id': video_id, + 'title': title, + 'description': description, + 'thumbnail': 'http://www.scivee.tv/assets/videothumb/%s' % video_id, + 'formats': formats, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/screencast.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/screencast.py new file mode 100644 index 0000000000..dfd897ba3a --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/screencast.py @@ -0,0 +1,111 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from .common import InfoExtractor +from ..compat import ( + compat_parse_qs, + compat_urllib_request, +) +from ..utils import ( + ExtractorError, +) + + +class ScreencastIE(InfoExtractor): + _VALID_URL = r'https?://www\.screencast\.com/t/(?P[a-zA-Z0-9]+)' + _TESTS = [{ + 'url': 'http://www.screencast.com/t/3ZEjQXlT', + 'md5': '917df1c13798a3e96211dd1561fded83', + 'info_dict': { + 'id': '3ZEjQXlT', + 'ext': 'm4v', + 'title': 'Color Measurement with Ocean Optics Spectrometers', + 'description': 'md5:240369cde69d8bed61349a199c5fb153', + 'thumbnail': 're:^https?://.*\.(?:gif|jpg)$', + } + }, { + 'url': 'http://www.screencast.com/t/V2uXehPJa1ZI', + 'md5': 'e8e4b375a7660a9e7e35c33973410d34', + 'info_dict': { + 'id': 'V2uXehPJa1ZI', + 'ext': 'mov', + 'title': 'The Amadeus Spectrometer', + 'description': 're:^In this video, our friends at.*To learn more about Amadeus, visit', + 'thumbnail': 're:^https?://.*\.(?:gif|jpg)$', + } + }, { + 'url': 'http://www.screencast.com/t/aAB3iowa', + 'md5': 'dedb2734ed00c9755761ccaee88527cd', + 'info_dict': { + 'id': 'aAB3iowa', + 'ext': 'mp4', + 'title': 'Google Earth Export', + 'description': 'Provides a demo of a CommunityViz export to Google Earth, one of the 3D viewing options.', + 'thumbnail': 're:^https?://.*\.(?:gif|jpg)$', + } + }, { + 'url': 'http://www.screencast.com/t/X3ddTrYh', + 'md5': '669ee55ff9c51988b4ebc0877cc8b159', + 'info_dict': { + 'id': 'X3ddTrYh', + 'ext': 'wmv', + 'title': 'Toolkit 6 User Group Webinar (2014-03-04) - Default Judgment and First Impression', + 'description': 'md5:7b9f393bc92af02326a5c5889639eab0', + 'thumbnail': 're:^https?://.*\.(?:gif|jpg)$', + } + }, + ] + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + + video_url = self._html_search_regex( + r'Title: ([^<]*)
    ', + r'class="tabSeperator">>(.*?)<'], + webpage, 'title') + thumbnail = self._og_search_thumbnail(webpage) + description = self._og_search_description(webpage, default=None) + if description is None: + description = self._html_search_meta('description', webpage) + + return { + 'id': video_id, + 'url': video_url, + 'title': title, + 'description': description, + 'thumbnail': thumbnail, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/screencastomatic.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/screencastomatic.py new file mode 100644 index 0000000000..05337421ca --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/screencastomatic.py @@ -0,0 +1,49 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor +from ..compat import compat_urlparse +from ..utils import ( + ExtractorError, + js_to_json, +) + + +class ScreencastOMaticIE(InfoExtractor): + _VALID_URL = r'https?://screencast-o-matic\.com/watch/(?P[0-9a-zA-Z]+)' + _TEST = { + 'url': 'http://screencast-o-matic.com/watch/c2lD3BeOPl', + 'md5': '483583cb80d92588f15ccbedd90f0c18', + 'info_dict': { + 'id': 'c2lD3BeOPl', + 'ext': 'mp4', + 'title': 'Welcome to 3-4 Philosophy @ DECV!', + 'thumbnail': 're:^https?://.*\.jpg$', + 'description': 'as the title says! also: some general info re 1) VCE philosophy and 2) distance learning.', + } + } + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + + setup_js = self._search_regex( + r"(?s)jwplayer\('mp4Player'\).setup\((\{.*?\})\);", + webpage, 'setup code') + data = self._parse_json(setup_js, video_id, transform_source=js_to_json) + try: + video_data = next( + m for m in data['modes'] if m.get('type') == 'html5') + except StopIteration: + raise ExtractorError('Could not find any video entries!') + video_url = compat_urlparse.urljoin(url, video_data['config']['file']) + thumbnail = data.get('image') + + return { + 'id': video_id, + 'title': self._og_search_title(webpage), + 'description': self._og_search_description(webpage), + 'url': video_url, + 'ext': 'mp4', + 'thumbnail': thumbnail, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/screenwavemedia.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/screenwavemedia.py new file mode 100644 index 0000000000..6c9fdb7c1a --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/screenwavemedia.py @@ -0,0 +1,178 @@ +# encoding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import ( + int_or_none, + unified_strdate, +) + + +class ScreenwaveMediaIE(InfoExtractor): + _VALID_URL = r'http://player\.screenwavemedia\.com/play/[a-zA-Z]+\.php\?[^"]*\bid=(?P.+)' + + _TESTS = [{ + 'url': 'http://player.screenwavemedia.com/play/play.php?playerdiv=videoarea&companiondiv=squareAd&id=Cinemassacre-19911', + 'only_matching': True, + }] + + def _real_extract(self, url): + video_id = self._match_id(url) + playerdata = self._download_webpage(url, video_id, 'Downloading player webpage') + + vidtitle = self._search_regex( + r'\'vidtitle\'\s*:\s*"([^"]+)"', playerdata, 'vidtitle').replace('\\/', '/') + vidurl = self._search_regex( + r'\'vidurl\'\s*:\s*"([^"]+)"', playerdata, 'vidurl').replace('\\/', '/') + + videolist_url = None + + mobj = re.search(r"'videoserver'\s*:\s*'(?P[^']+)'", playerdata) + if mobj: + videoserver = mobj.group('videoserver') + mobj = re.search(r'\'vidid\'\s*:\s*"(?P[^\']+)"', playerdata) + vidid = mobj.group('vidid') if mobj else video_id + videolist_url = 'http://%s/vod/smil:%s.smil/jwplayer.smil' % (videoserver, vidid) + else: + mobj = re.search(r"file\s*:\s*'(?Phttp.+?/jwplayer\.smil)'", playerdata) + if mobj: + videolist_url = mobj.group('smil') + + if videolist_url: + videolist = self._download_xml(videolist_url, video_id, 'Downloading videolist XML') + formats = [] + baseurl = vidurl[:vidurl.rfind('/') + 1] + for video in videolist.findall('.//video'): + src = video.get('src') + if not src: + continue + file_ = src.partition(':')[-1] + width = int_or_none(video.get('width')) + height = int_or_none(video.get('height')) + bitrate = int_or_none(video.get('system-bitrate'), scale=1000) + format = { + 'url': baseurl + file_, + 'format_id': src.rpartition('.')[0].rpartition('_')[-1], + } + if width or height: + format.update({ + 'tbr': bitrate, + 'width': width, + 'height': height, + }) + else: + format.update({ + 'abr': bitrate, + 'vcodec': 'none', + }) + formats.append(format) + else: + formats = [{ + 'url': vidurl, + }] + self._sort_formats(formats) + + return { + 'id': video_id, + 'title': vidtitle, + 'formats': formats, + } + + +class CinemassacreIE(InfoExtractor): + _VALID_URL = 'https?://(?:www\.)?cinemassacre\.com/(?P[0-9]{4})/(?P[0-9]{2})/(?P[0-9]{2})/(?P[^?#/]+)' + _TESTS = [ + { + 'url': 'http://cinemassacre.com/2012/11/10/avgn-the-movie-trailer/', + 'md5': 'fde81fbafaee331785f58cd6c0d46190', + 'info_dict': { + 'id': 'Cinemassacre-19911', + 'ext': 'mp4', + 'upload_date': '20121110', + 'title': 'Б─°Angry Video Game Nerd: The MovieБ─² Б─⌠ Trailer', + 'description': 'md5:fb87405fcb42a331742a0dce2708560b', + }, + }, + { + 'url': 'http://cinemassacre.com/2013/10/02/the-mummys-hand-1940', + 'md5': 'd72f10cd39eac4215048f62ab477a511', + 'info_dict': { + 'id': 'Cinemassacre-521be8ef82b16', + 'ext': 'mp4', + 'upload_date': '20131002', + 'title': 'The MummyБ─≥s Hand (1940)', + }, + } + ] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + display_id = mobj.group('display_id') + video_date = mobj.group('date_y') + mobj.group('date_m') + mobj.group('date_d') + + webpage = self._download_webpage(url, display_id) + + playerdata_url = self._search_regex( + r'src="(http://player\.screenwavemedia\.com/play/[a-zA-Z]+\.php\?[^"]*\bid=.+?)"', + webpage, 'player data URL') + video_title = self._html_search_regex( + r'(?P<title>.+?)\|', webpage, 'title') + video_description = self._html_search_regex( + r'<div class="entry-content">(?P<description>.+?)</div>', + webpage, 'description', flags=re.DOTALL, fatal=False) + video_thumbnail = self._og_search_thumbnail(webpage) + + return { + '_type': 'url_transparent', + 'display_id': display_id, + 'title': video_title, + 'description': video_description, + 'upload_date': video_date, + 'thumbnail': video_thumbnail, + 'url': playerdata_url, + } + + +class TeamFourIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?teamfourstar\.com/video/(?P<id>[a-z0-9\-]+)/?' + _TEST = { + 'url': 'http://teamfourstar.com/video/a-moment-with-tfs-episode-4/', + 'info_dict': { + 'id': 'TeamFourStar-5292a02f20bfa', + 'ext': 'mp4', + 'upload_date': '20130401', + 'description': 'Check out this and more on our website: http://teamfourstar.com\nTFS Store: http://sharkrobot.com/team-four-star\nFollow on Twitter: http://twitter.com/teamfourstar\nLike on FB: http://facebook.com/teamfourstar', + 'title': 'A Moment With TFS Episode 4', + } + } + + def _real_extract(self, url): + display_id = self._match_id(url) + webpage = self._download_webpage(url, display_id) + + playerdata_url = self._search_regex( + r'src="(http://player\.screenwavemedia\.com/play/[a-zA-Z]+\.php\?[^"]*\bid=.+?)"', + webpage, 'player data URL') + + video_title = self._html_search_regex( + r'<div class="heroheadingtitle">(?P<title>.+?)</div>', + webpage, 'title') + video_date = unified_strdate(self._html_search_regex( + r'<div class="heroheadingdate">(?P<date>.+?)</div>', + webpage, 'date', fatal=False)) + video_description = self._html_search_regex( + r'(?s)<div class="postcontent">(?P<description>.+?)</div>', + webpage, 'description', fatal=False) + video_thumbnail = self._og_search_thumbnail(webpage) + + return { + '_type': 'url_transparent', + 'display_id': display_id, + 'title': video_title, + 'description': video_description, + 'upload_date': video_date, + 'thumbnail': video_thumbnail, + 'url': playerdata_url, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/servingsys.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/servingsys.py new file mode 100644 index 0000000000..16dc3736b4 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/servingsys.py @@ -0,0 +1,69 @@ +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import ( + int_or_none, +) + + +class ServingSysIE(InfoExtractor): + _VALID_URL = r'https?://(?:[^.]+\.)?serving-sys\.com/BurstingPipe/adServer\.bs\?.*?&pli=(?P<id>[0-9]+)' + + _TEST = { + 'url': 'http://bs.serving-sys.com/BurstingPipe/adServer.bs?cn=is&c=23&pl=VAST&pli=5349193&PluID=0&pos=7135&ord=[timestamp]&cim=1?', + 'playlist': [{ + 'file': '29955898.flv', + 'md5': 'baed851342df6846eb8677a60a011a0f', + 'info_dict': { + 'title': 'AdAPPter_Hyundai_demo (1)', + 'duration': 74, + 'tbr': 1378, + 'width': 640, + 'height': 400, + }, + }, { + 'file': '29907998.flv', + 'md5': '979b4da2655c4bc2d81aeb915a8c5014', + 'info_dict': { + 'title': 'AdAPPter_Hyundai_demo (2)', + 'duration': 34, + 'width': 854, + 'height': 480, + 'tbr': 516, + }, + }], + 'params': { + 'playlistend': 2, + }, + 'skip': 'Blocked in the US [sic]', + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + pl_id = mobj.group('id') + + vast_doc = self._download_xml(url, pl_id) + title = vast_doc.find('.//AdTitle').text + media = vast_doc.find('.//MediaFile').text + info_url = self._search_regex(r'&adData=([^&]+)&', media, 'info URL') + + doc = self._download_xml(info_url, pl_id, 'Downloading video info') + entries = [{ + '_type': 'video', + 'id': a.attrib['id'], + 'title': '%s (%s)' % (title, a.attrib['assetID']), + 'url': a.attrib['URL'], + 'duration': int_or_none(a.attrib.get('length')), + 'tbr': int_or_none(a.attrib.get('bitrate')), + 'height': int_or_none(a.attrib.get('height')), + 'width': int_or_none(a.attrib.get('width')), + } for a in doc.findall('.//AdditionalAssets/asset')] + + return { + '_type': 'playlist', + 'id': pl_id, + 'title': title, + 'entries': entries, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/sexu.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/sexu.py new file mode 100644 index 0000000000..6365a8779d --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/sexu.py @@ -0,0 +1,61 @@ +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor + + +class SexuIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?sexu\.com/(?P<id>\d+)' + _TEST = { + 'url': 'http://sexu.com/961791/', + 'md5': 'ff615aca9691053c94f8f10d96cd7884', + 'info_dict': { + 'id': '961791', + 'ext': 'mp4', + 'title': 'md5:4d05a19a5fc049a63dbbaf05fb71d91b', + 'description': 'md5:c5ed8625eb386855d5a7967bd7b77a54', + 'categories': list, # NSFW + 'thumbnail': 're:https?://.*\.jpg$', + 'age_limit': 18, + } + } + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + + quality_arr = self._search_regex( + r'sources:\s*\[([^\]]+)\]', webpage, 'forrmat string') + formats = [{ + 'url': fmt[0].replace('\\', ''), + 'format_id': fmt[1], + 'height': int(fmt[1][:3]), + } for fmt in re.findall(r'"file":"([^"]+)","label":"([^"]+)"', quality_arr)] + self._sort_formats(formats) + + title = self._html_search_regex( + r'<title>([^<]+)\s*-\s*Sexu\.Com', webpage, 'title') + + description = self._html_search_meta( + 'description', webpage, 'description') + + thumbnail = self._html_search_regex( + r'image:\s*"([^"]+)"', + webpage, 'thumbnail', fatal=False) + + categories_str = self._html_search_meta( + 'keywords', webpage, 'categories') + categories = ( + None if categories_str is None + else categories_str.split(',')) + + return { + 'id': video_id, + 'title': title, + 'description': description, + 'thumbnail': thumbnail, + 'categories': categories, + 'formats': formats, + 'age_limit': 18, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/sexykarma.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/sexykarma.py new file mode 100644 index 0000000000..c833fc8ee8 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/sexykarma.py @@ -0,0 +1,117 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import ( + unified_strdate, + parse_duration, + int_or_none, +) + + +class SexyKarmaIE(InfoExtractor): + IE_DESC = 'Sexy Karma and Watch Indian Porn' + _VALID_URL = r'https?://(?:www\.)?(?:sexykarma\.com|watchindianporn\.net)/(?:[^/]+/)*video/(?P[^/]+)-(?P[a-zA-Z0-9]+)\.html' + _TESTS = [{ + 'url': 'http://www.sexykarma.com/gonewild/video/taking-a-quick-pee-yHI70cOyIHt.html', + 'md5': 'b9798e7d1ef1765116a8f516c8091dbd', + 'info_dict': { + 'id': 'yHI70cOyIHt', + 'display_id': 'taking-a-quick-pee', + 'ext': 'mp4', + 'title': 'Taking a quick pee.', + 'thumbnail': 're:^https?://.*\.jpg$', + 'uploader': 'wildginger7', + 'upload_date': '20141007', + 'duration': 22, + 'view_count': int, + 'comment_count': int, + 'categories': list, + } + }, { + 'url': 'http://www.sexykarma.com/gonewild/video/pot-pixie-tribute-8Id6EZPbuHf.html', + 'md5': 'dd216c68d29b49b12842b9babe762a5d', + 'info_dict': { + 'id': '8Id6EZPbuHf', + 'display_id': 'pot-pixie-tribute', + 'ext': 'mp4', + 'title': 'pot_pixie tribute', + 'thumbnail': 're:^https?://.*\.jpg$', + 'uploader': 'banffite', + 'upload_date': '20141013', + 'duration': 16, + 'view_count': int, + 'comment_count': int, + 'categories': list, + } + }, { + 'url': 'http://www.watchindianporn.net/video/desi-dancer-namrata-stripping-completely-nude-and-dancing-on-a-hot-number-dW2mtctxJfs.html', + 'md5': '9afb80675550406ed9a63ac2819ef69d', + 'info_dict': { + 'id': 'dW2mtctxJfs', + 'display_id': 'desi-dancer-namrata-stripping-completely-nude-and-dancing-on-a-hot-number', + 'ext': 'mp4', + 'title': 'Desi dancer namrata stripping completely nude and dancing on a hot number', + 'thumbnail': 're:^https?://.*\.jpg$', + 'uploader': 'Don', + 'upload_date': '20140213', + 'duration': 83, + 'view_count': int, + 'comment_count': int, + 'categories': list, + } + }] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + display_id = mobj.group('display_id') + + webpage = self._download_webpage(url, display_id) + + video_url = self._html_search_regex( + r"url: escape\('([^']+)'\)", webpage, 'url') + + title = self._html_search_regex( + r'

    (.*?)', + webpage, 'title') + thumbnail = self._html_search_regex( + r'\s*(.*?)', + webpage, 'uploader') + upload_date = unified_strdate(self._html_search_regex( + r'Added: (.+?)', webpage, 'upload date', fatal=False)) + + duration = parse_duration(self._search_regex( + r'

  • \s*\s*\s* - {% endfor %} + {% end %}
    Time:\s*\s*(.+?)\s*', + webpage, 'duration', fatal=False)) + + view_count = int_or_none(self._search_regex( + r'Views:\s*\s*(\d+)\s*', + webpage, 'view count', fatal=False)) + comment_count = int_or_none(self._search_regex( + r'Comments:\s*\s*(\d+)\s*', + webpage, 'comment count', fatal=False)) + + categories = re.findall( + r'([^<]+)', + webpage) + + return { + 'id': video_id, + 'display_id': display_id, + 'url': video_url, + 'title': title, + 'thumbnail': thumbnail, + 'uploader': uploader, + 'upload_date': upload_date, + 'duration': duration, + 'view_count': view_count, + 'comment_count': comment_count, + 'categories': categories, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/shared.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/shared.py new file mode 100644 index 0000000000..26ced716e8 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/shared.py @@ -0,0 +1,63 @@ +from __future__ import unicode_literals + +import re +import base64 + +from .common import InfoExtractor +from ..compat import ( + compat_urllib_parse, + compat_urllib_request, +) +from ..utils import ( + ExtractorError, + int_or_none, +) + + +class SharedIE(InfoExtractor): + _VALID_URL = r'http://shared\.sx/(?P[\da-z]{10})' + + _TEST = { + 'url': 'http://shared.sx/0060718775', + 'md5': '106fefed92a8a2adb8c98e6a0652f49b', + 'info_dict': { + 'id': '0060718775', + 'ext': 'mp4', + 'title': 'Bmp4', + }, + } + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + + if '>File does not exist<' in webpage: + raise ExtractorError( + 'Video %s does not exist' % video_id, expected=True) + + download_form = dict(re.findall( + r'[0-9a-zA-Z]+)' + _TESTS = [ + { + 'url': 'http://sharesix.com/f/OXjQ7Y6', + 'md5': '9e8e95d8823942815a7d7c773110cc93', + 'info_dict': { + 'id': 'OXjQ7Y6', + 'ext': 'mp4', + 'title': 'big_buck_bunny_480p_surround-fix.avi', + 'duration': 596, + 'width': 854, + 'height': 480, + }, + }, + { + 'url': 'http://sharesix.com/lfrwoxp35zdd', + 'md5': 'dd19f1435b7cec2d7912c64beeee8185', + 'info_dict': { + 'id': 'lfrwoxp35zdd', + 'ext': 'flv', + 'title': 'WhiteBoard___a_Mac_vs_PC_Parody_Cartoon.mp4.flv', + 'duration': 65, + 'width': 1280, + 'height': 720, + }, + } + ] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + + fields = { + 'method_free': 'Free' + } + post = compat_urllib_parse.urlencode(fields) + req = compat_urllib_request.Request(url, post) + req.add_header('Content-type', 'application/x-www-form-urlencoded') + + webpage = self._download_webpage(req, video_id, + 'Downloading video page') + + video_url = self._search_regex( + r"var\slnk1\s=\s'([^']+)'", webpage, 'video URL') + title = self._html_search_regex( + r'(?s)
    Filename:
    .+?
    (.+?)
    ', webpage, 'title') + duration = parse_duration( + self._search_regex( + r'(?s)
    Length:
    .+?
    (.+?)
    ', + webpage, + 'duration', + fatal=False + ) + ) + + m = re.search( + r'''(?xs)
    Width\sx\sHeight
    .+? +
    (?P\d+)\sx\s(?P\d+)
    ''', + webpage + ) + width = height = None + if m: + width, height = int(m.group('width')), int(m.group('height')) + + formats = [{ + 'format_id': 'sd', + 'url': video_url, + 'width': width, + 'height': height, + }] + + return { + 'id': video_id, + 'title': title, + 'duration': duration, + 'formats': formats, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/sina.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/sina.py new file mode 100644 index 0000000000..a63d126d45 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/sina.py @@ -0,0 +1,76 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..compat import ( + compat_urllib_request, + compat_urllib_parse, +) + + +class SinaIE(InfoExtractor): + _VALID_URL = r'''https?://(.*?\.)?video\.sina\.com\.cn/ + ( + (.+?/(((?P\d+).html)|(.*?(\#|(vid=)|b/)(?P\d+?)($|&|\-)))) + | + # This is used by external sites like Weibo + (api/sinawebApi/outplay.php/(?P.+?)\.swf) + ) + ''' + + _TESTS = [ + { + 'url': 'http://video.sina.com.cn/news/vlist/zt/chczlj2013/?opsubject_id=top12#110028898', + 'file': '110028898.flv', + 'md5': 'd65dd22ddcf44e38ce2bf58a10c3e71f', + 'info_dict': { + 'title': 'Ц─┼Д╦╜Е⌡╫Ф√╟И≈╩Ц─▀ Ф°²И╡°Х╕│Ф╠┌Е╥╢Ф▀©И╘╛Г╚▀Е█ЁИ┤┼Ф■╬Х╒╚Ф┴ёХ┬╧Е▒≤', + } + }, + { + 'url': 'http://video.sina.com.cn/v/b/101314253-1290078633.html', + 'info_dict': { + 'id': '101314253', + 'ext': 'flv', + 'title': 'Е├⌡Ф√╧Ф▐░И╚≤Е╞╧Ф°²Ф┐┘Ф┼╔Г⌡▒Х╖├Г╨╖Е┬╚', + }, + }, + ] + + @classmethod + def suitable(cls, url): + return re.match(cls._VALID_URL, url, flags=re.VERBOSE) is not None + + def _extract_video(self, video_id): + data = compat_urllib_parse.urlencode({'vid': video_id}) + url_doc = self._download_xml('http://v.iask.com/v_play.php?%s' % data, + video_id, 'Downloading video url') + image_page = self._download_webpage( + 'http://interface.video.sina.com.cn/interface/common/getVideoImage.php?%s' % data, + video_id, 'Downloading thumbnail info') + + return {'id': video_id, + 'url': url_doc.find('./durl/url').text, + 'ext': 'flv', + 'title': url_doc.find('./vname').text, + 'thumbnail': image_page.split('=')[1], + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url, flags=re.VERBOSE) + video_id = mobj.group('id') + if mobj.group('token') is not None: + # The video id is in the redirected url + self.to_screen('Getting video id') + request = compat_urllib_request.Request(url) + request.get_method = lambda: 'HEAD' + (_, urlh) = self._download_webpage_handle(request, 'NA', False) + return self._real_extract(urlh.geturl()) + elif video_id is None: + pseudo_id = mobj.group('pseudo_id') + webpage = self._download_webpage(url, pseudo_id) + video_id = self._search_regex(r'vid:\'(\d+?)\'', webpage, 'video id') + + return self._extract_video(video_id) diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/slideshare.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/slideshare.py new file mode 100644 index 0000000000..e7d776e7bd --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/slideshare.py @@ -0,0 +1,55 @@ +from __future__ import unicode_literals + +import re +import json + +from .common import InfoExtractor +from ..compat import ( + compat_urlparse, +) +from ..utils import ( + ExtractorError, +) + + +class SlideshareIE(InfoExtractor): + _VALID_URL = r'https?://www\.slideshare\.net/[^/]+?/(?P.+?)($|\?)' + + _TEST = { + 'url': 'http://www.slideshare.net/Dataversity/keynote-presentation-managing-scale-and-complexity', + 'info_dict': { + 'id': '25665706', + 'ext': 'mp4', + 'title': 'Managing Scale and Complexity', + 'description': 'This was a keynote presentation at the NoSQL Now! 2013 Conference & Expo (http://www.nosqlnow.com). This presentation was given by Adrian Cockcroft from Netflix.', + }, + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + page_title = mobj.group('title') + webpage = self._download_webpage(url, page_title) + slideshare_obj = self._search_regex( + r'var slideshare_object = ({.*?}); var user_info =', + webpage, 'slideshare object') + info = json.loads(slideshare_obj) + if info['slideshow']['type'] != 'video': + raise ExtractorError('Webpage type is "%s": only video extraction is supported for Slideshare' % info['slideshow']['type'], expected=True) + + doc = info['doc'] + bucket = info['jsplayer']['video_bucket'] + ext = info['jsplayer']['video_extension'] + video_url = compat_urlparse.urljoin(bucket, doc + '-SD.' + ext) + description = self._html_search_regex( + r'<p\s+(?:style="[^"]*"\s+)?class=".*?description.*?"[^>]*>(.*?)</p>', webpage, + 'description', fatal=False) + + return { + '_type': 'video', + 'id': info['slideshow']['id'], + 'title': info['slideshow']['title'], + 'ext': ext, + 'url': video_url, + 'thumbnail': info['slideshow']['pin_image_url'], + 'description': description, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/slutload.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/slutload.py new file mode 100644 index 0000000000..3df71304da --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/slutload.py @@ -0,0 +1,44 @@ +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor + + +class SlutloadIE(InfoExtractor): + _VALID_URL = r'^https?://(?:\w+\.)?slutload\.com/video/[^/]+/(?P<id>[^/]+)/?$' + _TEST = { + 'url': 'http://www.slutload.com/video/virginie-baisee-en-cam/TD73btpBqSxc/', + 'md5': '0cf531ae8006b530bd9df947a6a0df77', + 'info_dict': { + 'id': 'TD73btpBqSxc', + 'ext': 'mp4', + "title": "virginie baisee en cam", + "age_limit": 18, + 'thumbnail': 're:https?://.*?\.jpg' + } + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + + webpage = self._download_webpage(url, video_id) + + video_title = self._html_search_regex(r'<h1><strong>([^<]+)</strong>', + webpage, 'title').strip() + + video_url = self._html_search_regex( + r'(?s)<div id="vidPlayer"\s+data-url="([^"]+)"', + webpage, 'video URL') + thumbnail = self._html_search_regex( + r'(?s)<div id="vidPlayer"\s+.*?previewer-file="([^"]+)"', + webpage, 'thumbnail', fatal=False) + + return { + 'id': video_id, + 'url': video_url, + 'title': video_title, + 'thumbnail': thumbnail, + 'age_limit': 18 + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/smotri.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/smotri.py new file mode 100644 index 0000000000..baef3daa04 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/smotri.py @@ -0,0 +1,374 @@ +# encoding: utf-8 +from __future__ import unicode_literals + +import re +import json +import hashlib +import uuid + +from .common import InfoExtractor +from ..compat import ( + compat_urllib_parse, + compat_urllib_request, +) +from ..utils import ( + ExtractorError, + int_or_none, + unified_strdate, +) + + +class SmotriIE(InfoExtractor): + IE_DESC = 'Smotri.com' + IE_NAME = 'smotri' + _VALID_URL = r'^https?://(?:www\.)?(?:smotri\.com/video/view/\?id=|pics\.smotri\.com/(?:player|scrubber_custom8)\.swf\?file=)(?P<id>v(?P<realvideoid>[0-9]+)[a-z0-9]{4})' + _NETRC_MACHINE = 'smotri' + + _TESTS = [ + # real video id 2610366 + { + 'url': 'http://smotri.com/video/view/?id=v261036632ab', + 'md5': '2a7b08249e6f5636557579c368040eb9', + 'info_dict': { + 'id': 'v261036632ab', + 'ext': 'mp4', + 'title': 'п╨п╟я┌п╟я│я┌я─п╬я└п╟ я│ п╨п╟п╪п╣я─ п╡п╦п╢п╣п╬п╫п╟п╠п╩я▌п╢п╣п╫п╦я▐', + 'uploader': 'rbc2008', + 'uploader_id': 'rbc08', + 'upload_date': '20131118', + 'thumbnail': 'http://frame6.loadup.ru/8b/a9/2610366.3.3.jpg', + }, + }, + # real video id 57591 + { + 'url': 'http://smotri.com/video/view/?id=v57591cb20', + 'md5': '830266dfc21f077eac5afd1883091bcd', + 'info_dict': { + 'id': 'v57591cb20', + 'ext': 'flv', + 'title': 'test', + 'uploader': 'Support Photofile@photofile', + 'uploader_id': 'support-photofile', + 'upload_date': '20070704', + 'thumbnail': 'http://frame4.loadup.ru/03/ed/57591.2.3.jpg', + }, + }, + # video-password + { + 'url': 'http://smotri.com/video/view/?id=v1390466a13c', + 'md5': 'f6331cef33cad65a0815ee482a54440b', + 'info_dict': { + 'id': 'v1390466a13c', + 'ext': 'mp4', + 'title': 'TOCCA_A_NOI_-_LE_COSE_NON_VANNO_CAMBIAMOLE_ORA-1', + 'uploader': 'timoxa40', + 'uploader_id': 'timoxa40', + 'upload_date': '20100404', + 'thumbnail': 'http://frame7.loadup.ru/af/3f/1390466.3.3.jpg', + }, + 'params': { + 'videopassword': 'qwerty', + }, + 'skip': 'Video is not approved by moderator', + }, + # age limit + video-password + { + 'url': 'http://smotri.com/video/view/?id=v15408898bcf', + 'md5': '91e909c9f0521adf5ee86fbe073aad70', + 'info_dict': { + 'id': 'v15408898bcf', + 'ext': 'flv', + 'title': 'я█я┌п╬я┌ я─п╬п╩п╦п╨ п╫п╣ п©п╬п╨п╟п╤я┐я┌ п©п╬ п╒п▓', + 'uploader': 'zzxxx', + 'uploader_id': 'ueggb', + 'upload_date': '20101001', + 'thumbnail': 'http://frame3.loadup.ru/75/75/1540889.1.3.jpg', + 'age_limit': 18, + }, + 'params': { + 'videopassword': '333' + }, + 'skip': 'Video is not approved by moderator', + }, + # swf player + { + 'url': 'http://pics.smotri.com/scrubber_custom8.swf?file=v9188090500', + 'md5': '4d47034979d9390d14acdf59c4935bc2', + 'info_dict': { + 'id': 'v9188090500', + 'ext': 'mp4', + 'title': 'Shakira - Don\'t Bother', + 'uploader': 'HannahL', + 'uploader_id': 'lisaha95', + 'upload_date': '20090331', + 'thumbnail': 'http://frame8.loadup.ru/44/0b/918809.7.3.jpg', + }, + }, + ] + + @classmethod + def _extract_url(cls, webpage): + mobj = re.search( + r'<embed[^>]src=(["\'])(?P<url>http://pics\.smotri\.com/(?:player|scrubber_custom8)\.swf\?file=v.+?\1)', + webpage) + if mobj is not None: + return mobj.group('url') + + mobj = re.search( + r'''(?x)<div\s+class="video_file">http://smotri\.com/video/download/file/[^<]+</div>\s* + <div\s+class="video_image">[^<]+</div>\s* + <div\s+class="video_id">(?P<id>[^<]+)</div>''', webpage) + if mobj is not None: + return 'http://smotri.com/video/view/?id=%s' % mobj.group('id') + + def _search_meta(self, name, html, display_name=None): + if display_name is None: + display_name = name + return self._html_search_regex( + r'<meta itemprop="%s" content="([^"]+)" />' % re.escape(name), + html, display_name, fatal=False) + return self._html_search_meta(name, html, display_name) + + def _real_extract(self, url): + video_id = self._match_id(url) + + video_form = { + 'ticket': video_id, + 'video_url': '1', + 'frame_url': '1', + 'devid': 'LoadupFlashPlayer', + 'getvideoinfo': '1', + } + + request = compat_urllib_request.Request( + 'http://smotri.com/video/view/url/bot/', compat_urllib_parse.urlencode(video_form)) + request.add_header('Content-Type', 'application/x-www-form-urlencoded') + + video = self._download_json(request, video_id, 'Downloading video JSON') + + if video.get('_moderate_no') or not video.get('moderated'): + raise ExtractorError('Video %s has not been approved by moderator' % video_id, expected=True) + + if video.get('error'): + raise ExtractorError('Video %s does not exist' % video_id, expected=True) + + video_url = video.get('_vidURL') or video.get('_vidURL_mp4') + title = video['title'] + thumbnail = video['_imgURL'] + upload_date = unified_strdate(video['added']) + uploader = video['userNick'] + uploader_id = video['userLogin'] + duration = int_or_none(video['duration']) + + # Video JSON does not provide enough meta data + # We will extract some from the video web page instead + webpage_url = 'http://smotri.com/video/view/?id=%s' % video_id + webpage = self._download_webpage(webpage_url, video_id, 'Downloading video page') + + # Warning if video is unavailable + warning = self._html_search_regex( + r'<div class="videoUnModer">(.*?)</div>', webpage, + 'warning message', default=None) + if warning is not None: + self._downloader.report_warning( + 'Video %s may not be available; smotri said: %s ' % + (video_id, warning)) + + # Adult content + if re.search('EroConfirmText">', webpage) is not None: + self.report_age_confirmation() + confirm_string = self._html_search_regex( + r'<a href="/video/view/\?id=%s&confirm=([^"]+)" title="[^"]+">' % video_id, + webpage, 'confirm string') + confirm_url = webpage_url + '&confirm=%s' % confirm_string + webpage = self._download_webpage(confirm_url, video_id, 'Downloading video page (age confirmed)') + adult_content = True + else: + adult_content = False + + view_count = self._html_search_regex( + 'п·п╠я┴п╣п╣ п╨п╬п╩п╦я┤п╣я│я┌п╡п╬ п©я─п╬я│п╪п╬я┌я─п╬п╡.*?<span class="Number">(\\d+)</span>', + webpage, 'view count', fatal=False, flags=re.MULTILINE | re.DOTALL) + + return { + 'id': video_id, + 'url': video_url, + 'title': title, + 'thumbnail': thumbnail, + 'uploader': uploader, + 'upload_date': upload_date, + 'uploader_id': uploader_id, + 'duration': duration, + 'view_count': int_or_none(view_count), + 'age_limit': 18 if adult_content else 0, + } + + +class SmotriCommunityIE(InfoExtractor): + IE_DESC = 'Smotri.com community videos' + IE_NAME = 'smotri:community' + _VALID_URL = r'^https?://(?:www\.)?smotri\.com/community/video/(?P<communityid>[0-9A-Za-z_\'-]+)' + _TEST = { + 'url': 'http://smotri.com/community/video/kommuna', + 'info_dict': { + 'id': 'kommuna', + 'title': 'п п÷п═п╓', + }, + 'playlist_mincount': 4, + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + community_id = mobj.group('communityid') + + url = 'http://smotri.com/export/rss/video/by/community/-/%s/video.xml' % community_id + rss = self._download_xml(url, community_id, 'Downloading community RSS') + + entries = [self.url_result(video_url.text, 'Smotri') + for video_url in rss.findall('./channel/item/link')] + + description_text = rss.find('./channel/description').text + community_title = self._html_search_regex( + '^п▓п╦п╢п╣п╬ я│п╬п╬п╠я┴п╣я│я┌п╡п╟ "([^"]+)"$', description_text, 'community title') + + return self.playlist_result(entries, community_id, community_title) + + +class SmotriUserIE(InfoExtractor): + IE_DESC = 'Smotri.com user videos' + IE_NAME = 'smotri:user' + _VALID_URL = r'^https?://(?:www\.)?smotri\.com/user/(?P<userid>[0-9A-Za-z_\'-]+)' + _TESTS = [{ + 'url': 'http://smotri.com/user/inspector', + 'info_dict': { + 'id': 'inspector', + 'title': 'Inspector', + }, + 'playlist_mincount': 9, + }] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + user_id = mobj.group('userid') + + url = 'http://smotri.com/export/rss/user/video/-/%s/video.xml' % user_id + rss = self._download_xml(url, user_id, 'Downloading user RSS') + + entries = [self.url_result(video_url.text, 'Smotri') + for video_url in rss.findall('./channel/item/link')] + + description_text = rss.find('./channel/description').text + user_nickname = self._html_search_regex( + '^п▓п╦п╢п╣п╬ я─п╣п╤п╦я│я│п╣я─п╟ (.*)$', description_text, + 'user nickname') + + return self.playlist_result(entries, user_id, user_nickname) + + +class SmotriBroadcastIE(InfoExtractor): + IE_DESC = 'Smotri.com broadcasts' + IE_NAME = 'smotri:broadcast' + _VALID_URL = r'^https?://(?:www\.)?(?P<url>smotri\.com/live/(?P<broadcastid>[^/]+))/?.*' + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + broadcast_id = mobj.group('broadcastid') + + broadcast_url = 'http://' + mobj.group('url') + broadcast_page = self._download_webpage(broadcast_url, broadcast_id, 'Downloading broadcast page') + + if re.search('>п═п╣п╤п╦я│я│п╣я─ я│ п╩п╬пЁп╦п╫п╬п╪ <br/>"%s"<br/> <span>п╫п╣ я│я┐я┴п╣я│я┌п╡я┐п╣я┌<' % broadcast_id, broadcast_page) is not None: + raise ExtractorError( + 'Broadcast %s does not exist' % broadcast_id, expected=True) + + # Adult content + if re.search('EroConfirmText">', broadcast_page) is not None: + + (username, password) = self._get_login_info() + if username is None: + raise ExtractorError( + 'Erotic broadcasts allowed only for registered users, ' + 'use --username and --password options to provide account credentials.', + expected=True) + + login_form = { + 'login-hint53': '1', + 'confirm_erotic': '1', + 'login': username, + 'password': password, + } + + request = compat_urllib_request.Request( + broadcast_url + '/?no_redirect=1', compat_urllib_parse.urlencode(login_form)) + request.add_header('Content-Type', 'application/x-www-form-urlencoded') + broadcast_page = self._download_webpage( + request, broadcast_id, 'Logging in and confirming age') + + if re.search('>п²п╣п╡п╣я─п╫я▀п╧ п╩п╬пЁп╦п╫ п╦п╩п╦ п©п╟я─п╬п╩я▄<', broadcast_page) is not None: + raise ExtractorError('Unable to log in: bad username or password', expected=True) + + adult_content = True + else: + adult_content = False + + ticket = self._html_search_regex( + r"window\.broadcast_control\.addFlashVar\('file'\s*,\s*'([^']+)'\)", + broadcast_page, 'broadcast ticket') + + url = 'http://smotri.com/broadcast/view/url/?ticket=%s' % ticket + + broadcast_password = self._downloader.params.get('videopassword', None) + if broadcast_password: + url += '&pass=%s' % hashlib.md5(broadcast_password.encode('utf-8')).hexdigest() + + broadcast_json_page = self._download_webpage( + url, broadcast_id, 'Downloading broadcast JSON') + + try: + broadcast_json = json.loads(broadcast_json_page) + + protected_broadcast = broadcast_json['_pass_protected'] == 1 + if protected_broadcast and not broadcast_password: + raise ExtractorError( + 'This broadcast is protected by a password, use the --video-password option', + expected=True) + + broadcast_offline = broadcast_json['is_play'] == 0 + if broadcast_offline: + raise ExtractorError('Broadcast %s is offline' % broadcast_id, expected=True) + + rtmp_url = broadcast_json['_server'] + mobj = re.search(r'^rtmp://[^/]+/(?P<app>.+)/?$', rtmp_url) + if not mobj: + raise ExtractorError('Unexpected broadcast rtmp URL') + + broadcast_playpath = broadcast_json['_streamName'] + broadcast_app = '%s/%s' % (mobj.group('app'), broadcast_json['_vidURL']) + broadcast_thumbnail = broadcast_json['_imgURL'] + broadcast_title = self._live_title(broadcast_json['title']) + broadcast_description = broadcast_json['description'] + broadcaster_nick = broadcast_json['nick'] + broadcaster_login = broadcast_json['login'] + rtmp_conn = 'S:%s' % uuid.uuid4().hex + except KeyError: + if protected_broadcast: + raise ExtractorError('Bad broadcast password', expected=True) + raise ExtractorError('Unexpected broadcast JSON') + + return { + 'id': broadcast_id, + 'url': rtmp_url, + 'title': broadcast_title, + 'thumbnail': broadcast_thumbnail, + 'description': broadcast_description, + 'uploader': broadcaster_nick, + 'uploader_id': broadcaster_login, + 'age_limit': 18 if adult_content else 0, + 'ext': 'flv', + 'play_path': broadcast_playpath, + 'player_url': 'http://pics.smotri.com/broadcast_play.swf', + 'app': broadcast_app, + 'rtmp_live': True, + 'rtmp_conn': rtmp_conn, + 'is_live': True, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/snotr.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/snotr.py new file mode 100644 index 0000000000..da3b05a8dc --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/snotr.py @@ -0,0 +1,68 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import ( + float_or_none, + str_to_int, + parse_duration, +) + + +class SnotrIE(InfoExtractor): + _VALID_URL = r'http?://(?:www\.)?snotr\.com/video/(?P<id>\d+)/([\w]+)' + _TESTS = [{ + 'url': 'http://www.snotr.com/video/13708/Drone_flying_through_fireworks', + 'info_dict': { + 'id': '13708', + 'ext': 'flv', + 'title': 'Drone flying through fireworks!', + 'duration': 247, + 'filesize_approx': 98566144, + 'description': 'A drone flying through Fourth of July Fireworks', + } + }, { + 'url': 'http://www.snotr.com/video/530/David_Letteman_-_George_W_Bush_Top_10', + 'info_dict': { + 'id': '530', + 'ext': 'flv', + 'title': 'David Letteman - George W. Bush Top 10', + 'duration': 126, + 'filesize_approx': 8912896, + 'description': 'The top 10 George W. Bush moments, brought to you by David Letterman!', + } + }] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + + webpage = self._download_webpage(url, video_id) + title = self._og_search_title(webpage) + + description = self._og_search_description(webpage) + video_url = "http://cdn.videos.snotr.com/%s.flv" % video_id + + view_count = str_to_int(self._html_search_regex( + r'<p>\n<strong>Views:</strong>\n([\d,\.]+)</p>', + webpage, 'view count', fatal=False)) + + duration = parse_duration(self._html_search_regex( + r'<p>\n<strong>Length:</strong>\n\s*([0-9:]+).*?</p>', + webpage, 'duration', fatal=False)) + + filesize_approx = float_or_none(self._html_search_regex( + r'<p>\n<strong>Filesize:</strong>\n\s*([0-9.]+)\s*megabyte</p>', + webpage, 'filesize', fatal=False), invscale=1024 * 1024) + + return { + 'id': video_id, + 'description': description, + 'title': title, + 'url': video_url, + 'view_count': view_count, + 'duration': duration, + 'filesize_approx': filesize_approx, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/sockshare.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/sockshare.py new file mode 100644 index 0000000000..7d3c0e9378 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/sockshare.py @@ -0,0 +1,84 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re + +from ..compat import ( + compat_urllib_parse, + compat_urllib_request, +) +from ..utils import ( + determine_ext, + ExtractorError, +) + +from .common import InfoExtractor + + +class SockshareIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?sockshare\.com/file/(?P<id>[0-9A-Za-z]+)' + _FILE_DELETED_REGEX = r'This file doesn\'t exist, or has been removed\.</div>' + _TEST = { + 'url': 'http://www.sockshare.com/file/437BE28B89D799D7', + 'md5': '9d0bf1cfb6dbeaa8d562f6c97506c5bd', + 'info_dict': { + 'id': '437BE28B89D799D7', + 'title': 'big_buck_bunny_720p_surround.avi', + 'ext': 'avi', + 'thumbnail': 're:^http://.*\.jpg$', + } + } + + def _real_extract(self, url): + video_id = self._match_id(url) + url = 'http://sockshare.com/file/%s' % video_id + webpage = self._download_webpage(url, video_id) + + if re.search(self._FILE_DELETED_REGEX, webpage) is not None: + raise ExtractorError('Video %s does not exist' % video_id, + expected=True) + + confirm_hash = self._html_search_regex(r'''(?x)<input\s+ + type="hidden"\s+ + value="([^"]*)"\s+ + name="hash" + ''', webpage, 'hash') + + fields = { + "hash": confirm_hash, + "confirm": "Continue as Free User" + } + + post = compat_urllib_parse.urlencode(fields) + req = compat_urllib_request.Request(url, post) + # Apparently, this header is required for confirmation to work. + req.add_header('Host', 'www.sockshare.com') + req.add_header('Content-type', 'application/x-www-form-urlencoded') + + webpage = self._download_webpage( + req, video_id, 'Downloading video page') + + video_url = self._html_search_regex( + r'<a href="([^"]*)".+class="download_file_link"', + webpage, 'file url') + video_url = "http://www.sockshare.com" + video_url + title = self._html_search_regex(( + r'<h1>(.+)<strong>', + r'var name = "([^"]+)";'), + webpage, 'title', default=None) + thumbnail = self._html_search_regex( + r'<img\s+src="([^"]*)".+?name="bg"', + webpage, 'thumbnail') + + formats = [{ + 'format_id': 'sd', + 'url': video_url, + 'ext': determine_ext(title), + }] + + return { + 'id': video_id, + 'title': title, + 'thumbnail': thumbnail, + 'formats': formats, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/sohu.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/sohu.py new file mode 100644 index 0000000000..07f514a462 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/sohu.py @@ -0,0 +1,97 @@ +# encoding: utf-8 +from __future__ import unicode_literals + +import json +import re + +from .common import InfoExtractor +from ..utils import ExtractorError + + +class SohuIE(InfoExtractor): + _VALID_URL = r'https?://(?P<mytv>my\.)?tv\.sohu\.com/.+?/(?(mytv)|n)(?P<id>\d+)\.shtml.*?' + + _TEST = { + 'url': 'http://tv.sohu.com/20130724/n382479172.shtml#super', + 'md5': 'bde8d9a6ffd82c63a1eefaef4eeefec7', + 'info_dict': { + 'id': '382479172', + 'ext': 'mp4', + 'title': 'MVО╪ Far East MovementЦ─┼The IllestЦ─▀', + }, + 'skip': 'Only available from China', + } + + def _real_extract(self, url): + + def _fetch_data(vid_id, mytv=False): + if mytv: + base_data_url = 'http://my.tv.sohu.com/play/videonew.do?vid=' + else: + base_data_url = 'http://hot.vrs.sohu.com/vrs_flash.action?vid=' + data_url = base_data_url + str(vid_id) + data_json = self._download_webpage( + data_url, video_id, + note='Downloading JSON data for ' + str(vid_id)) + return json.loads(data_json) + + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + mytv = mobj.group('mytv') is not None + + webpage = self._download_webpage(url, video_id) + raw_title = self._html_search_regex(r'(?s)<title>(.+?)', + webpage, 'video title') + title = raw_title.partition('-')[0].strip() + + vid = self._html_search_regex(r'var vid ?= ?["\'](\d+)["\']', webpage, + 'video path') + data = _fetch_data(vid, mytv) + + QUALITIES = ('ori', 'super', 'high', 'nor') + vid_ids = [data['data'][q + 'Vid'] + for q in QUALITIES + if data['data'][q + 'Vid'] != 0] + if not vid_ids: + raise ExtractorError('No formats available for this video') + + # For now, we just pick the highest available quality + vid_id = vid_ids[-1] + + format_data = data if vid == vid_id else _fetch_data(vid_id, mytv) + part_count = format_data['data']['totalBlocks'] + allot = format_data['allot'] + prot = format_data['prot'] + clipsURL = format_data['data']['clipsURL'] + su = format_data['data']['su'] + + playlist = [] + for i in range(part_count): + part_url = ('http://%s/?prot=%s&file=%s&new=%s' % + (allot, prot, clipsURL[i], su[i])) + part_str = self._download_webpage( + part_url, video_id, + note='Downloading part %d of %d' % (i + 1, part_count)) + + part_info = part_str.split('|') + video_url = '%s%s?key=%s' % (part_info[0], su[i], part_info[3]) + + video_info = { + 'id': '%s_part%02d' % (video_id, i + 1), + 'title': title, + 'url': video_url, + 'ext': 'mp4', + } + playlist.append(video_info) + + if len(playlist) == 1: + info = playlist[0] + info['id'] = video_id + else: + info = { + '_type': 'playlist', + 'entries': playlist, + 'id': video_id, + } + + return info diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/soundcloud.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/soundcloud.py new file mode 100644 index 0000000000..5d60c49395 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/soundcloud.py @@ -0,0 +1,383 @@ +# encoding: utf-8 +from __future__ import unicode_literals + +import re +import itertools + +from .common import InfoExtractor +from ..compat import ( + compat_str, + compat_urlparse, + compat_urllib_parse, +) +from ..utils import ( + ExtractorError, + int_or_none, + unified_strdate, +) + + +class SoundcloudIE(InfoExtractor): + """Information extractor for soundcloud.com + To access the media, the uid of the song and a stream token + must be extracted from the page source and the script must make + a request to media.soundcloud.com/crossdomain.xml. Then + the media can be grabbed by requesting from an url composed + of the stream token and uid + """ + + _VALID_URL = r'''(?x)^(?:https?://)? + (?:(?:(?:www\.|m\.)?soundcloud\.com/ + (?P[\w\d-]+)/ + (?!sets/|likes/?(?:$|[?#])) + (?P[\w\d-]+)/? + (?P<token>[^?]+?)?(?:[?].*)?$) + |(?:api\.soundcloud\.com/tracks/(?P<track_id>\d+) + (?:/?\?secret_token=(?P<secret_token>[^&]+))?) + |(?P<player>(?:w|player|p.)\.soundcloud\.com/player/?.*?url=.*) + ) + ''' + IE_NAME = 'soundcloud' + _TESTS = [ + { + 'url': 'http://soundcloud.com/ethmusic/lostin-powers-she-so-heavy', + 'md5': 'ebef0a451b909710ed1d7787dddbf0d7', + 'info_dict': { + 'id': '62986583', + 'ext': 'mp3', + 'upload_date': '20121011', + 'description': 'No Downloads untill we record the finished version this weekend, i was too pumped n i had to post it , earl is prolly gonna b hella p.o\'d', + 'uploader': 'E.T. ExTerrestrial Music', + 'title': 'Lostin Powers - She so Heavy (SneakPreview) Adrian Ackers Blueprint 1', + 'duration': 143, + } + }, + # not streamable song + { + 'url': 'https://soundcloud.com/the-concept-band/goldrushed-mastered?in=the-concept-band/sets/the-royal-concept-ep', + 'info_dict': { + 'id': '47127627', + 'ext': 'mp3', + 'title': 'Goldrushed', + 'description': 'From Stockholm Sweden\r\nPovel / Magnus / Filip / David\r\nwww.theroyalconcept.com', + 'uploader': 'The Royal Concept', + 'upload_date': '20120521', + 'duration': 227, + }, + 'params': { + # rtmp + 'skip_download': True, + }, + }, + # private link + { + 'url': 'https://soundcloud.com/jaimemf/youtube-dl-test-video-a-y-baw/s-8Pjrp', + 'md5': 'aa0dd32bfea9b0c5ef4f02aacd080604', + 'info_dict': { + 'id': '123998367', + 'ext': 'mp3', + 'title': 'Youtube - Dl Test Video \'\' Aл┬Б├╜', + 'uploader': 'jaimeMF', + 'description': 'test chars: \"\'/\\ц╓Б├╜', + 'upload_date': '20131209', + 'duration': 9, + }, + }, + # private link (alt format) + { + 'url': 'https://api.soundcloud.com/tracks/123998367?secret_token=s-8Pjrp', + 'md5': 'aa0dd32bfea9b0c5ef4f02aacd080604', + 'info_dict': { + 'id': '123998367', + 'ext': 'mp3', + 'title': 'Youtube - Dl Test Video \'\' Aл┬Б├╜', + 'uploader': 'jaimeMF', + 'description': 'test chars: \"\'/\\ц╓Б├╜', + 'upload_date': '20131209', + 'duration': 9, + }, + }, + # downloadable song + { + 'url': 'https://soundcloud.com/oddsamples/bus-brakes', + 'md5': '7624f2351f8a3b2e7cd51522496e7631', + 'info_dict': { + 'id': '128590877', + 'ext': 'mp3', + 'title': 'Bus Brakes', + 'description': 'md5:0053ca6396e8d2fd7b7e1595ef12ab66', + 'uploader': 'oddsamples', + 'upload_date': '20140109', + 'duration': 17, + }, + }, + ] + + _CLIENT_ID = 'b45b1aa10f1ac2941910a7f0d10f8e28' + _IPHONE_CLIENT_ID = '376f225bf427445fc4bfb6b99b72e0bf' + + def report_resolve(self, video_id): + """Report information extraction.""" + self.to_screen('%s: Resolving id' % video_id) + + @classmethod + def _resolv_url(cls, url): + return 'http://api.soundcloud.com/resolve.json?url=' + url + '&client_id=' + cls._CLIENT_ID + + def _extract_info_dict(self, info, full_title=None, quiet=False, secret_token=None): + track_id = compat_str(info['id']) + name = full_title or track_id + if quiet: + self.report_extraction(name) + + thumbnail = info['artwork_url'] + if thumbnail is not None: + thumbnail = thumbnail.replace('-large', '-t500x500') + ext = 'mp3' + result = { + 'id': track_id, + 'uploader': info['user']['username'], + 'upload_date': unified_strdate(info['created_at']), + 'title': info['title'], + 'description': info['description'], + 'thumbnail': thumbnail, + 'duration': int_or_none(info.get('duration'), 1000), + 'webpage_url': info.get('permalink_url'), + } + formats = [] + if info.get('downloadable', False): + # We can build a direct link to the song + format_url = ( + 'https://api.soundcloud.com/tracks/{0}/download?client_id={1}'.format( + track_id, self._CLIENT_ID)) + formats.append({ + 'format_id': 'download', + 'ext': info.get('original_format', 'mp3'), + 'url': format_url, + 'vcodec': 'none', + 'preference': 10, + }) + + # We have to retrieve the url + streams_url = ('http://api.soundcloud.com/i1/tracks/{0}/streams?' + 'client_id={1}&secret_token={2}'.format(track_id, self._IPHONE_CLIENT_ID, secret_token)) + format_dict = self._download_json( + streams_url, + track_id, 'Downloading track url') + + for key, stream_url in format_dict.items(): + if key.startswith('http'): + formats.append({ + 'format_id': key, + 'ext': ext, + 'url': stream_url, + 'vcodec': 'none', + }) + elif key.startswith('rtmp'): + # The url doesn't have an rtmp app, we have to extract the playpath + url, path = stream_url.split('mp3:', 1) + formats.append({ + 'format_id': key, + 'url': url, + 'play_path': 'mp3:' + path, + 'ext': ext, + 'vcodec': 'none', + }) + + if not formats: + # We fallback to the stream_url in the original info, this + # cannot be always used, sometimes it can give an HTTP 404 error + formats.append({ + 'format_id': 'fallback', + 'url': info['stream_url'] + '?client_id=' + self._CLIENT_ID, + 'ext': ext, + 'vcodec': 'none', + }) + + for f in formats: + if f['format_id'].startswith('http'): + f['protocol'] = 'http' + if f['format_id'].startswith('rtmp'): + f['protocol'] = 'rtmp' + + self._sort_formats(formats) + result['formats'] = formats + + return result + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url, flags=re.VERBOSE) + if mobj is None: + raise ExtractorError('Invalid URL: %s' % url) + + track_id = mobj.group('track_id') + token = None + if track_id is not None: + info_json_url = 'http://api.soundcloud.com/tracks/' + track_id + '.json?client_id=' + self._CLIENT_ID + full_title = track_id + token = mobj.group('secret_token') + if token: + info_json_url += "&secret_token=" + token + elif mobj.group('player'): + query = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query) + return self.url_result(query['url'][0]) + else: + # extract uploader (which is in the url) + uploader = mobj.group('uploader') + # extract simple title (uploader + slug of song title) + slug_title = mobj.group('title') + token = mobj.group('token') + full_title = resolve_title = '%s/%s' % (uploader, slug_title) + if token: + resolve_title += '/%s' % token + + self.report_resolve(full_title) + + url = 'http://soundcloud.com/%s' % resolve_title + info_json_url = self._resolv_url(url) + info = self._download_json(info_json_url, full_title, 'Downloading info JSON') + + return self._extract_info_dict(info, full_title, secret_token=token) + + +class SoundcloudSetIE(SoundcloudIE): + _VALID_URL = r'https?://(?:www\.)?soundcloud\.com/(?P<uploader>[\w\d-]+)/sets/(?P<slug_title>[\w\d-]+)(?:/(?P<token>[^?/]+))?' + IE_NAME = 'soundcloud:set' + _TESTS = [{ + 'url': 'https://soundcloud.com/the-concept-band/sets/the-royal-concept-ep', + 'info_dict': { + 'title': 'The Royal Concept EP', + }, + 'playlist_mincount': 6, + }] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + + # extract uploader (which is in the url) + uploader = mobj.group('uploader') + # extract simple title (uploader + slug of song title) + slug_title = mobj.group('slug_title') + full_title = '%s/sets/%s' % (uploader, slug_title) + url = 'http://soundcloud.com/%s/sets/%s' % (uploader, slug_title) + + token = mobj.group('token') + if token: + full_title += '/' + token + url += '/' + token + + self.report_resolve(full_title) + + resolv_url = self._resolv_url(url) + info = self._download_json(resolv_url, full_title) + + if 'errors' in info: + for err in info['errors']: + self._downloader.report_error('unable to download video webpage: %s' % compat_str(err['error_message'])) + return + + return { + '_type': 'playlist', + 'entries': [self._extract_info_dict(track, secret_token=token) for track in info['tracks']], + 'id': info['id'], + 'title': info['title'], + } + + +class SoundcloudUserIE(SoundcloudIE): + _VALID_URL = r'https?://(www\.)?soundcloud\.com/(?P<user>[^/]+)/?((?P<rsrc>tracks|likes)/?)?(\?.*)?$' + IE_NAME = 'soundcloud:user' + _TESTS = [{ + 'url': 'https://soundcloud.com/the-concept-band', + 'info_dict': { + 'id': '9615865', + 'title': 'The Royal Concept', + }, + 'playlist_mincount': 12 + }, { + 'url': 'https://soundcloud.com/the-concept-band/likes', + 'info_dict': { + 'id': '9615865', + 'title': 'The Royal Concept', + }, + 'playlist_mincount': 1, + }] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + uploader = mobj.group('user') + resource = mobj.group('rsrc') + if resource is None: + resource = 'tracks' + elif resource == 'likes': + resource = 'favorites' + + url = 'http://soundcloud.com/%s/' % uploader + resolv_url = self._resolv_url(url) + user = self._download_json( + resolv_url, uploader, 'Downloading user info') + base_url = 'http://api.soundcloud.com/users/%s/%s.json?' % (uploader, resource) + + entries = [] + for i in itertools.count(): + data = compat_urllib_parse.urlencode({ + 'offset': i * 50, + 'limit': 50, + 'client_id': self._CLIENT_ID, + }) + new_entries = self._download_json( + base_url + data, uploader, 'Downloading track page %s' % (i + 1)) + if len(new_entries) == 0: + self.to_screen('%s: End page received' % uploader) + break + entries.extend(self._extract_info_dict(e, quiet=True) for e in new_entries) + + return { + '_type': 'playlist', + 'id': compat_str(user['id']), + 'title': user['username'], + 'entries': entries, + } + + +class SoundcloudPlaylistIE(SoundcloudIE): + _VALID_URL = r'https?://api\.soundcloud\.com/playlists/(?P<id>[0-9]+)(?:/?\?secret_token=(?P<token>[^&]+?))?$' + IE_NAME = 'soundcloud:playlist' + _TESTS = [{ + 'url': 'http://api.soundcloud.com/playlists/4110309', + 'info_dict': { + 'id': '4110309', + 'title': 'TILT Brass - Bowery Poetry Club, August \'03 [Non-Site SCR 02]', + 'description': 're:.*?TILT Brass - Bowery Poetry Club', + }, + 'playlist_count': 6, + }] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + playlist_id = mobj.group('id') + base_url = '%s//api.soundcloud.com/playlists/%s.json?' % (self.http_scheme(), playlist_id) + + data_dict = { + 'client_id': self._CLIENT_ID, + } + token = mobj.group('token') + + if token: + data_dict['secret_token'] = token + + data = compat_urllib_parse.urlencode(data_dict) + data = self._download_json( + base_url + data, playlist_id, 'Downloading playlist') + + entries = [ + self._extract_info_dict(t, quiet=True, secret_token=token) + for t in data['tracks']] + + return { + '_type': 'playlist', + 'id': playlist_id, + 'title': data.get('title'), + 'description': data.get('description'), + 'entries': entries, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/soundgasm.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/soundgasm.py new file mode 100644 index 0000000000..a4f8ce6c3c --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/soundgasm.py @@ -0,0 +1,40 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor + + +class SoundgasmIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?soundgasm\.net/u/(?P<user>[0-9a-zA-Z_\-]+)/(?P<title>[0-9a-zA-Z_\-]+)' + _TEST = { + 'url': 'http://soundgasm.net/u/ytdl/Piano-sample', + 'md5': '010082a2c802c5275bb00030743e75ad', + 'info_dict': { + 'id': '88abd86ea000cafe98f96321b23cc1206cbcbcc9', + 'ext': 'm4a', + 'title': 'ytdl_Piano-sample', + 'description': 'Royalty Free Sample Music' + } + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + display_id = mobj.group('title') + audio_title = mobj.group('user') + '_' + mobj.group('title') + webpage = self._download_webpage(url, display_id) + audio_url = self._html_search_regex( + r'(?s)m4a\:\s"([^"]+)"', webpage, 'audio URL') + audio_id = re.split('\/|\.', audio_url)[-2] + description = self._html_search_regex( + r'(?s)<li>Description:\s(.*?)<\/li>', webpage, 'description', + fatal=False) + + return { + 'id': audio_id, + 'display_id': display_id, + 'url': audio_url, + 'title': audio_title, + 'description': description + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/southpark.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/southpark.py new file mode 100644 index 0000000000..c20397b3d1 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/southpark.py @@ -0,0 +1,36 @@ +from __future__ import unicode_literals + +from .mtv import MTVServicesInfoExtractor + + +class SouthParkIE(MTVServicesInfoExtractor): + IE_NAME = 'southpark.cc.com' + _VALID_URL = r'https?://(www\.)?(?P<url>southpark\.cc\.com/(clips|full-episodes)/(?P<id>.+?)(\?|#|$))' + + _FEED_URL = 'http://www.southparkstudios.com/feeds/video-player/mrss' + + _TESTS = [{ + 'url': 'http://southpark.cc.com/clips/104437/bat-daded#tab=featured', + 'info_dict': { + 'id': 'a7bff6c2-ed00-11e0-aca6-0026b9414f30', + 'ext': 'mp4', + 'title': 'South Park|Bat Daded', + 'description': 'Randy disqualifies South Park by getting into a fight with Bat Dad.', + }, + }] + + +class SouthparkDeIE(SouthParkIE): + IE_NAME = 'southpark.de' + _VALID_URL = r'https?://(www\.)?(?P<url>southpark\.de/(clips|alle-episoden)/(?P<id>.+?)(\?|#|$))' + _FEED_URL = 'http://www.southpark.de/feeds/video-player/mrss/' + + _TESTS = [{ + 'url': 'http://www.southpark.de/clips/uygssh/the-government-wont-respect-my-privacy#tab=featured', + 'info_dict': { + 'id': '85487c96-b3b9-4e39-9127-ad88583d9bf2', + 'ext': 'mp4', + 'title': 'The Government Won\'t Respect My Privacy', + 'description': 'Cartman explains the benefits of "Shitter" to Stan, Kyle and Craig.', + }, + }] diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/southparkstudios.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/southparkstudios.py new file mode 100644 index 0000000000..aea8e64393 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/southparkstudios.py @@ -0,0 +1,36 @@ +from __future__ import unicode_literals + +from .mtv import MTVServicesInfoExtractor + + +class SouthParkStudiosIE(MTVServicesInfoExtractor): + IE_NAME = 'southparkstudios.com' + _VALID_URL = r'https?://(www\.)?(?P<url>southparkstudios\.com/(clips|full-episodes)/(?P<id>.+?)(\?|#|$))' + + _FEED_URL = 'http://www.southparkstudios.com/feeds/video-player/mrss' + + _TESTS = [{ + 'url': 'http://www.southparkstudios.com/clips/104437/bat-daded#tab=featured', + 'info_dict': { + 'id': 'a7bff6c2-ed00-11e0-aca6-0026b9414f30', + 'ext': 'mp4', + 'title': 'Bat Daded', + 'description': 'Randy disqualifies South Park by getting into a fight with Bat Dad.', + }, + }] + + +class SouthparkDeIE(SouthParkStudiosIE): + IE_NAME = 'southpark.de' + _VALID_URL = r'https?://(www\.)?(?P<url>southpark\.de/(clips|alle-episoden)/(?P<id>.+?)(\?|#|$))' + _FEED_URL = 'http://www.southpark.de/feeds/video-player/mrss/' + + _TESTS = [{ + 'url': 'http://www.southpark.de/clips/uygssh/the-government-wont-respect-my-privacy#tab=featured', + 'info_dict': { + 'id': '85487c96-b3b9-4e39-9127-ad88583d9bf2', + 'ext': 'mp4', + 'title': 'The Government Won\'t Respect My Privacy', + 'description': 'Cartman explains the benefits of "Shitter" to Stan, Kyle and Craig.', + }, + }] diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/space.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/space.py new file mode 100644 index 0000000000..c2d0d36a69 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/space.py @@ -0,0 +1,38 @@ +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from .brightcove import BrightcoveIE +from ..utils import RegexNotFoundError, ExtractorError + + +class SpaceIE(InfoExtractor): + _VALID_URL = r'https?://(?:(?:www|m)\.)?space\.com/\d+-(?P<title>[^/\.\?]*?)-video\.html' + _TEST = { + 'add_ie': ['Brightcove'], + 'url': 'http://www.space.com/23373-huge-martian-landforms-detail-revealed-by-european-probe-video.html', + 'info_dict': { + 'id': '2780937028001', + 'ext': 'mp4', + 'title': 'Huge Martian Landforms\' Detail Revealed By European Probe | Video', + 'description': 'md5:db81cf7f3122f95ed234b631a6ea1e61', + 'uploader': 'TechMedia Networks', + }, + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + title = mobj.group('title') + webpage = self._download_webpage(url, title) + try: + # Some videos require the playerKey field, which isn't define in + # the BrightcoveExperience object + brightcove_url = self._og_search_video_url(webpage) + except RegexNotFoundError: + # Other videos works fine with the info from the object + brightcove_url = BrightcoveIE._extract_brightcove_url(webpage) + if brightcove_url is None: + raise ExtractorError( + 'The webpage does not contain a video', expected=True) + return self.url_result(brightcove_url, BrightcoveIE.ie_key()) diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/spankwire.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/spankwire.py new file mode 100644 index 0000000000..b936202f6f --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/spankwire.py @@ -0,0 +1,113 @@ +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..compat import ( + compat_urllib_parse, + compat_urllib_parse_urlparse, + compat_urllib_request, +) +from ..utils import ( + str_to_int, + unified_strdate, +) +from ..aes import aes_decrypt_text + + +class SpankwireIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?(?P<url>spankwire\.com/[^/]*/video(?P<videoid>[0-9]+)/?)' + _TEST = { + 'url': 'http://www.spankwire.com/Buckcherry-s-X-Rated-Music-Video-Crazy-Bitch/video103545/', + 'md5': '8bbfde12b101204b39e4b9fe7eb67095', + 'info_dict': { + 'id': '103545', + 'ext': 'mp4', + 'title': 'Buckcherry`s X Rated Music Video Crazy Bitch', + 'description': 'Crazy Bitch X rated music video.', + 'uploader': 'oreusz', + 'uploader_id': '124697', + 'upload_date': '20070508', + 'age_limit': 18, + } + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('videoid') + url = 'http://www.' + mobj.group('url') + + req = compat_urllib_request.Request(url) + req.add_header('Cookie', 'age_verified=1') + webpage = self._download_webpage(req, video_id) + + title = self._html_search_regex( + r'<h1>([^<]+)', webpage, 'title') + description = self._html_search_regex( + r'<div\s+id="descriptionContent">([^<]+)<', + webpage, 'description', fatal=False) + thumbnail = self._html_search_regex( + r'playerData\.screenShot\s*=\s*["\']([^"\']+)["\']', + webpage, 'thumbnail', fatal=False) + + uploader = self._html_search_regex( + r'by:\s*<a [^>]*>(.+?)</a>', + webpage, 'uploader', fatal=False) + uploader_id = self._html_search_regex( + r'by:\s*<a href="/Profile\.aspx\?.*?UserId=(\d+).*?"', + webpage, 'uploader id', fatal=False) + upload_date = unified_strdate(self._html_search_regex( + r'</a> on (.+?) at \d+:\d+', + webpage, 'upload date', fatal=False)) + + view_count = str_to_int(self._html_search_regex( + r'<div id="viewsCounter"><span>([\d,\.]+)</span> views</div>', + webpage, 'view count', fatal=False)) + comment_count = str_to_int(self._html_search_regex( + r'Comments<span[^>]+>\s*\(([\d,\.]+)\)</span>', + webpage, 'comment count', fatal=False)) + + video_urls = list(map( + compat_urllib_parse.unquote, + re.findall(r'playerData\.cdnPath[0-9]{3,}\s*=\s*["\']([^"\']+)["\']', webpage))) + if webpage.find('flashvars\.encrypted = "true"') != -1: + password = self._html_search_regex( + r'flashvars\.video_title = "([^"]+)', + webpage, 'password').replace('+', ' ') + video_urls = list(map( + lambda s: aes_decrypt_text(s, password, 32).decode('utf-8'), + video_urls)) + + formats = [] + for video_url in video_urls: + path = compat_urllib_parse_urlparse(video_url).path + format = path.split('/')[4].split('_')[:2] + resolution, bitrate_str = format + format = "-".join(format) + height = int(resolution.rstrip('Pp')) + tbr = int(bitrate_str.rstrip('Kk')) + formats.append({ + 'url': video_url, + 'resolution': resolution, + 'format': format, + 'tbr': tbr, + 'height': height, + 'format_id': format, + }) + self._sort_formats(formats) + + age_limit = self._rta_search(webpage) + + return { + 'id': video_id, + 'title': title, + 'description': description, + 'thumbnail': thumbnail, + 'uploader': uploader, + 'uploader_id': uploader_id, + 'upload_date': upload_date, + 'view_count': view_count, + 'comment_count': comment_count, + 'formats': formats, + 'age_limit': age_limit, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/spiegel.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/spiegel.py new file mode 100644 index 0000000000..1e55a9ffb5 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/spiegel.py @@ -0,0 +1,133 @@ +# encoding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..compat import compat_urlparse +from .spiegeltv import SpiegeltvIE + + +class SpiegelIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?spiegel\.de/video/[^/]*-(?P<id>[0-9]+)(?:-embed)?(?:\.html)?(?:#.*)?$' + _TESTS = [{ + 'url': 'http://www.spiegel.de/video/vulkan-tungurahua-in-ecuador-ist-wieder-aktiv-video-1259285.html', + 'md5': '2c2754212136f35fb4b19767d242f66e', + 'info_dict': { + 'id': '1259285', + 'ext': 'mp4', + 'title': 'Vulkanausbruch in Ecuador: Der "Feuerschlund" ist wieder aktiv', + 'description': 'md5:8029d8310232196eb235d27575a8b9f4', + 'duration': 49, + }, + }, { + 'url': 'http://www.spiegel.de/video/schach-wm-videoanalyse-des-fuenften-spiels-video-1309159.html', + 'md5': 'f2cdf638d7aa47654e251e1aee360af1', + 'info_dict': { + 'id': '1309159', + 'ext': 'mp4', + 'title': 'Schach-WM in der Videoanalyse: Carlsen nutzt die Fehlgriffe des Titelverteidigers', + 'description': 'md5:c2322b65e58f385a820c10fa03b2d088', + 'duration': 983, + }, + }, { + 'url': 'http://www.spiegel.de/video/astronaut-alexander-gerst-von-der-iss-station-beantwortet-fragen-video-1519126-embed.html', + 'md5': 'd8eeca6bfc8f1cd6f490eb1f44695d51', + 'info_dict': { + 'id': '1519126', + 'ext': 'mp4', + 'description': 'SPIEGEL ONLINE-Nutzer durften den deutschen Astronauten Alexander Gerst ц╪ber sein Leben auf der ISS-Station befragen. Hier kommen seine Antworten auf die besten sechs Fragen.', + 'title': 'Fragen an Astronaut Alexander Gerst: "Bekommen Sie die Tageszeiten mit?"', + } + }] + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage, handle = self._download_webpage_handle(url, video_id) + + # 302 to spiegel.tv, like http://www.spiegel.de/video/der-film-zum-wochenende-die-wahrheit-ueber-maenner-video-99003272.html + if SpiegeltvIE.suitable(handle.geturl()): + return self.url_result(handle.geturl(), 'Spiegeltv') + + title = re.sub(r'\s+', ' ', self._html_search_regex( + r'(?s)<(?:h1|div) class="module-title"[^>]*>(.*?)</(?:h1|div)>', + webpage, 'title')) + description = self._html_search_meta('description', webpage, 'description') + + base_url = self._search_regex( + r'var\s+server\s*=\s*"([^"]+)\"', webpage, 'server URL') + + xml_url = base_url + video_id + '.xml' + idoc = self._download_xml(xml_url, video_id) + + formats = [ + { + 'format_id': n.tag.rpartition('type')[2], + 'url': base_url + n.find('./filename').text, + 'width': int(n.find('./width').text), + 'height': int(n.find('./height').text), + 'abr': int(n.find('./audiobitrate').text), + 'vbr': int(n.find('./videobitrate').text), + 'vcodec': n.find('./codec').text, + 'acodec': 'MP4A', + } + for n in list(idoc) + # Blacklist type 6, it's extremely LQ and not available on the same server + if n.tag.startswith('type') and n.tag != 'type6' + ] + duration = float(idoc[0].findall('./duration')[0].text) + + self._sort_formats(formats) + + return { + 'id': video_id, + 'title': title, + 'description': description, + 'duration': duration, + 'formats': formats, + } + + +class SpiegelArticleIE(InfoExtractor): + _VALID_URL = 'https?://www\.spiegel\.de/(?!video/)[^?#]*?-(?P<id>[0-9]+)\.html' + IE_NAME = 'Spiegel:Article' + IE_DESC = 'Articles on spiegel.de' + _TESTS = [{ + 'url': 'http://www.spiegel.de/sport/sonst/badminton-wm-die-randsportart-soll-populaerer-werden-a-987092.html', + 'info_dict': { + 'id': '1516455', + 'ext': 'mp4', + 'title': 'Faszination Badminton: Nennt es bloц÷ nicht Federball', + 'description': 're:^Patrick Kц╓mnitz gehц╤rt.{100,}', + }, + }, { + 'url': 'http://www.spiegel.de/wissenschaft/weltall/astronaut-alexander-gerst-antwortet-spiegel-online-lesern-a-989876.html', + 'info_dict': { + + }, + 'playlist_count': 6, + }] + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + + # Single video on top of the page + video_link = self._search_regex( + r'<a href="([^"]+)" onclick="return spOpenVideo\(this,', webpage, + 'video page URL', default=None) + if video_link: + video_url = compat_urlparse.urljoin( + self.http_scheme() + '//spiegel.de/', video_link) + return self.url_result(video_url) + + # Multiple embedded videos + embeds = re.findall( + r'<div class="vid_holder[0-9]+.*?</div>\s*.*?url\s*=\s*"([^"]+)"', + webpage) + entries = [ + self.url_result(compat_urlparse.urljoin( + self.http_scheme() + '//spiegel.de/', embed_path)) + for embed_path in embeds + ] + return self.playlist_result(entries) diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/spiegeltv.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/spiegeltv.py new file mode 100644 index 0000000000..98cf92d89a --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/spiegeltv.py @@ -0,0 +1,80 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor +from ..utils import float_or_none + + +class SpiegeltvIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?spiegel\.tv/(?:#/)?filme/(?P<id>[\-a-z0-9]+)' + _TESTS = [{ + 'url': 'http://www.spiegel.tv/filme/flug-mh370/', + 'info_dict': { + 'id': 'flug-mh370', + 'ext': 'm4v', + 'title': 'Flug MH370', + 'description': 'Das Rц╓tsel um die Boeing 777 der Malaysia-Airlines', + 'thumbnail': 're:http://.*\.jpg$', + }, + 'params': { + # rtmp download + 'skip_download': True, + } + }, { + 'url': 'http://www.spiegel.tv/#/filme/alleskino-die-wahrheit-ueber-maenner/', + 'only_matching': True, + }] + + def _real_extract(self, url): + if '/#/' in url: + url = url.replace('/#/', '/') + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + title = self._html_search_regex(r'<h1.*?>(.*?)</h1>', webpage, 'title') + + apihost = 'http://spiegeltv-ivms2-restapi.s3.amazonaws.com' + version_json = self._download_json( + '%s/version.json' % apihost, video_id, + note='Downloading version information') + version_name = version_json['version_name'] + + slug_json = self._download_json( + '%s/%s/restapi/slugs/%s.json' % (apihost, version_name, video_id), + video_id, + note='Downloading object information') + oid = slug_json['object_id'] + + media_json = self._download_json( + '%s/%s/restapi/media/%s.json' % (apihost, version_name, oid), + video_id, note='Downloading media information') + uuid = media_json['uuid'] + is_wide = media_json['is_wide'] + + server_json = self._download_json( + 'http://www.spiegel.tv/streaming_servers/', video_id, + note='Downloading server information') + server = server_json[0]['endpoint'] + + thumbnails = [] + for image in media_json['images']: + thumbnails.append({ + 'url': image['url'], + 'width': image['width'], + 'height': image['height'], + }) + + description = media_json['subtitle'] + duration = float_or_none(media_json.get('duration_in_ms'), scale=1000) + format = '16x9' if is_wide else '4x3' + + url = server + 'mp4:' + uuid + '_spiegeltv_0500_' + format + '.m4v' + + return { + 'id': video_id, + 'title': title, + 'url': url, + 'ext': 'm4v', + 'description': description, + 'duration': duration, + 'thumbnails': thumbnails + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/spike.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/spike.py new file mode 100644 index 0000000000..a3adf54e30 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/spike.py @@ -0,0 +1,32 @@ +from __future__ import unicode_literals + +import re + +from .mtv import MTVServicesInfoExtractor + + +class SpikeIE(MTVServicesInfoExtractor): + _VALID_URL = r'''(?x)https?:// + (www\.spike\.com/(video-clips|episodes)/.+| + m\.spike\.com/videos/video.rbml\?id=(?P<mobile_id>[^&]+)) + ''' + _TEST = { + 'url': 'http://www.spike.com/video-clips/lhtu8m/auction-hunters-can-allen-ride-a-hundred-year-old-motorcycle', + 'md5': '1a9265f32b0c375793d6c4ce45255256', + 'info_dict': { + 'id': 'b9c8221a-4e50-479a-b86d-3333323e38ba', + 'ext': 'mp4', + 'title': 'Auction Hunters|Can Allen Ride A Hundred Year-Old Motorcycle?', + 'description': 'md5:fbed7e82ed5fad493615b3094a9499cb', + }, + } + + _FEED_URL = 'http://www.spike.com/feeds/mrss/' + _MOBILE_TEMPLATE = 'http://m.spike.com/videos/video.rbml?id=%s' + + def _real_extract(self, url): + mobj = re.search(self._VALID_URL, url) + mobile_id = mobj.group('mobile_id') + if mobile_id is not None: + url = 'http://www.spike.com/video-clips/%s' % mobile_id + return super(SpikeIE, self)._real_extract(url) diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/sport5.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/sport5.py new file mode 100644 index 0000000000..dfe50ed458 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/sport5.py @@ -0,0 +1,92 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import ExtractorError + + +class Sport5IE(InfoExtractor): + _VALID_URL = r'http://(?:www|vod)?\.sport5\.co\.il/.*\b(?:Vi|docID)=(?P<id>\d+)' + _TESTS = [ + { + 'url': 'http://vod.sport5.co.il/?Vc=147&Vi=176331&Page=1', + 'info_dict': { + 'id': 's5-Y59xx1-GUh2', + 'ext': 'mp4', + 'title': 'в∙в°в═в║в≥в■-в╖в∙в╗в⌠в∙в▒в■ 0:3', + 'description': 'в░в°в╖в░в║в╗, в▓в░в≥в≥в■ в∙в╓в▓в∙в°в≥ в║в≥в⌠в╗в∙ в°в╖в▒в∙в╕в■ в╘в° в═в∙в═в∙ в═в≥в╕в≈в∙в÷ в╒в° в╖в∙в╗в⌠в∙в▒в■ в∙в░в╙ в■в·в╖в∙в² в■в╗в░в╘в∙в÷ в▒в°в≥в▓в■', + 'duration': 228, + 'categories': list, + }, + 'skip': 'Blocked outside of Israel', + }, { + 'url': 'http://www.sport5.co.il/articles.aspx?FolderID=3075&docID=176372&lang=HE', + 'info_dict': { + 'id': 's5-SiXxx1-hKh2', + 'ext': 'mp4', + 'title': 'GOALS_CELTIC_270914.mp4', + 'description': '', + 'duration': 87, + 'categories': list, + }, + 'skip': 'Blocked outside of Israel', + } + ] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + media_id = mobj.group('id') + + webpage = self._download_webpage(url, media_id) + + video_id = self._html_search_regex('clipId=([\w-]+)', webpage, 'video id') + + metadata = self._download_xml( + 'http://sport5-metadata-rr-d.nsacdn.com/vod/vod/%s/HDS/metadata.xml' % video_id, + video_id) + + error = metadata.find('./Error') + if error is not None: + raise ExtractorError( + '%s returned error: %s - %s' % ( + self.IE_NAME, + error.find('./Name').text, + error.find('./Description').text), + expected=True) + + title = metadata.find('./Title').text + description = metadata.find('./Description').text + duration = int(metadata.find('./Duration').text) + + posters_el = metadata.find('./PosterLinks') + thumbnails = [{ + 'url': thumbnail.text, + 'width': int(thumbnail.get('width')), + 'height': int(thumbnail.get('height')), + } for thumbnail in posters_el.findall('./PosterIMG')] if posters_el is not None else [] + + categories_el = metadata.find('./Categories') + categories = [ + cat.get('name') for cat in categories_el.findall('./Category') + ] if categories_el is not None else [] + + formats = [{ + 'url': fmt.text, + 'ext': 'mp4', + 'vbr': int(fmt.get('bitrate')), + 'width': int(fmt.get('width')), + 'height': int(fmt.get('height')), + } for fmt in metadata.findall('./PlaybackLinks/FileURL')] + self._sort_formats(formats) + + return { + 'id': video_id, + 'title': title, + 'description': description, + 'thumbnails': thumbnails, + 'duration': duration, + 'categories': categories, + 'formats': formats, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/sportbox.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/sportbox.py new file mode 100644 index 0000000000..becdf658f6 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/sportbox.py @@ -0,0 +1,76 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import ( + parse_duration, + parse_iso8601, +) + + +class SportBoxIE(InfoExtractor): + _VALID_URL = r'https?://news\.sportbox\.ru/Vidy_sporta/(?:[^/]+/)+spbvideo_NI\d+_(?P<display_id>.+)' + _TESTS = [ + { + 'url': 'http://news.sportbox.ru/Vidy_sporta/Avtosport/Rossijskij/spbvideo_NI483529_Gonka-2-zaezd-Obyedinenniy-2000-klassi-Turing-i-S', + 'md5': 'ff56a598c2cf411a9a38a69709e97079', + 'info_dict': { + 'id': '80822', + 'ext': 'mp4', + 'title': 'п⌠п╬п╫п╨п╟ 2 п╥п╟п╣п╥п╢ б╚б╚п·п╠я┼п╣п╢п╦п╫п╣п╫п╫я▀п╧ 2000б╩: п╨п╩п╟я│я│я▀ п╒я┐я─п╦п╫пЁ п╦ п║я┐п©п╣я─-п©я─п╬п╢п╟п╨я┬п╫', + 'description': 'md5:81715fa9c4ea3d9e7915dc8180c778ed', + 'thumbnail': 're:^https?://.*\.jpg$', + 'timestamp': 1411896237, + 'upload_date': '20140928', + 'duration': 4846, + }, + 'params': { + # m3u8 download + 'skip_download': True, + }, + }, { + 'url': 'http://news.sportbox.ru/Vidy_sporta/billiard/spbvideo_NI486287_CHempionat-mira-po-dinamichnoy-piramide-4', + 'only_matching': True, + } + ] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + display_id = mobj.group('display_id') + + webpage = self._download_webpage(url, display_id) + + video_id = self._search_regex( + r'src="/vdl/player/media/(\d+)"', webpage, 'video id') + + player = self._download_webpage( + 'http://news.sportbox.ru/vdl/player/media/%s' % video_id, + display_id, 'Downloading player webpage') + + hls = self._search_regex( + r"var\s+original_hls_file\s*=\s*'([^']+)'", player, 'hls file') + + formats = self._extract_m3u8_formats(hls, display_id, 'mp4') + + title = self._html_search_regex( + r'<h1 itemprop="name">([^<]+)</h1>', webpage, 'title') + description = self._html_search_regex( + r'(?s)<div itemprop="description">(.+?)</div>', webpage, 'description', fatal=False) + thumbnail = self._og_search_thumbnail(webpage) + timestamp = parse_iso8601(self._search_regex( + r'<span itemprop="uploadDate">([^<]+)</span>', webpage, 'timestamp', fatal=False)) + duration = parse_duration(self._html_search_regex( + r'<meta itemprop="duration" content="PT([^"]+)">', webpage, 'duration', fatal=False)) + + return { + 'id': video_id, + 'display_id': display_id, + 'title': title, + 'description': description, + 'thumbnail': thumbnail, + 'timestamp': timestamp, + 'duration': duration, + 'formats': formats, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/sportdeutschland.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/sportdeutschland.py new file mode 100644 index 0000000000..1a57aebf16 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/sportdeutschland.py @@ -0,0 +1,98 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..compat import ( + compat_urllib_request, +) +from ..utils import ( + parse_iso8601, +) + + +class SportDeutschlandIE(InfoExtractor): + _VALID_URL = r'https?://sportdeutschland\.tv/(?P<sport>[^/?#]+)/(?P<id>[^?#/]+)(?:$|[?#])' + _TESTS = [{ + 'url': 'http://sportdeutschland.tv/badminton/live-li-ning-badminton-weltmeisterschaft-2014-kopenhagen', + 'info_dict': { + 'id': 'live-li-ning-badminton-weltmeisterschaft-2014-kopenhagen', + 'ext': 'mp4', + 'title': 're:Li-Ning Badminton Weltmeisterschaft 2014 Kopenhagen', + 'categories': ['Badminton'], + 'view_count': int, + 'thumbnail': 're:^https?://.*\.jpg$', + 'description': 're:Die Badminton-WM 2014 aus Kopenhagen bei Sportdeutschland\.TV', + 'timestamp': int, + 'upload_date': 're:^201408[23][0-9]$', + }, + 'params': { + 'skip_download': 'Live stream', + }, + }, { + 'url': 'http://sportdeutschland.tv/li-ning-badminton-wm-2014/lee-li-ning-badminton-weltmeisterschaft-2014-kopenhagen-herren-einzel-wei-vs', + 'info_dict': { + 'id': 'lee-li-ning-badminton-weltmeisterschaft-2014-kopenhagen-herren-einzel-wei-vs', + 'ext': 'mp4', + 'upload_date': '20140825', + 'description': 'md5:60a20536b57cee7d9a4ec005e8687504', + 'timestamp': 1408976060, + 'title': 'Li-Ning Badminton Weltmeisterschaft 2014 Kopenhagen: Herren Einzel, Wei Lee vs. Keun Lee', + 'thumbnail': 're:^https?://.*\.jpg$', + 'view_count': int, + 'categories': ['Li-Ning Badminton WM 2014'], + } + }] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + sport_id = mobj.group('sport') + + api_url = 'http://splink.tv/api/permalinks/%s/%s' % ( + sport_id, video_id) + req = compat_urllib_request.Request(api_url, headers={ + 'Accept': 'application/vnd.vidibus.v2.html+json', + 'Referer': url, + }) + data = self._download_json(req, video_id) + + categories = list(data.get('section', {}).get('tags', {}).values()) + asset = data['asset'] + assets_info = self._download_json(asset['url'], video_id) + + formats = [] + smil_url = assets_info['video'] + if '.smil' in smil_url: + m3u8_url = smil_url.replace('.smil', '.m3u8') + formats.extend( + self._extract_m3u8_formats(m3u8_url, video_id, ext='mp4')) + + smil_doc = self._download_xml( + smil_url, video_id, note='Downloading SMIL metadata') + base_url = smil_doc.find('./head/meta').attrib['base'] + formats.extend([{ + 'format_id': 'rmtp', + 'url': base_url, + 'play_path': n.attrib['src'], + 'ext': 'flv', + 'preference': -100, + 'format_note': 'Seems to fail at example stream', + } for n in smil_doc.findall('./body/video')]) + else: + formats.append({'url': smil_url}) + + self._sort_formats(formats) + + return { + 'id': video_id, + 'formats': formats, + 'title': asset['title'], + 'thumbnail': asset.get('image'), + 'description': asset.get('teaser'), + 'categories': categories, + 'view_count': asset.get('views'), + 'rtmp_live': asset.get('live'), + 'timestamp': parse_iso8601(asset.get('date')), + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/srmediathek.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/srmediathek.py new file mode 100644 index 0000000000..666a7dcc8a --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/srmediathek.py @@ -0,0 +1,43 @@ +# encoding: utf-8 +from __future__ import unicode_literals + +import json + +from .common import InfoExtractor +from ..utils import js_to_json + + +class SRMediathekIE(InfoExtractor): + IE_DESC = 'Sц╪ddeutscher Rundfunk' + _VALID_URL = r'https?://sr-mediathek\.sr-online\.de/index\.php\?.*?&id=(?P<id>[0-9]+)' + + _TEST = { + 'url': 'http://sr-mediathek.sr-online.de/index.php?seite=7&id=28455', + 'info_dict': { + 'id': '28455', + 'ext': 'mp4', + 'title': 'sportarena (26.10.2014)', + 'description': 'Ringen: KSV Kц╤llerbach gegen Aachen-Walheim; Frauen-Fuц÷ball: 1. FC Saarbrц╪cken gegen Sindelfingen; Motorsport: Rallye in Losheim; dazu: Interview mit Timo Bernhard; Turnen: TG Saar; Reitsport: Deutscher Voltigier-Pokal; Badminton: Interview mit Michael Fuchs ', + 'thumbnail': 're:^https?://.*\.jpg$', + }, + } + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + + murls = json.loads(js_to_json(self._search_regex( + r'var mediaURLs\s*=\s*(.*?);\n', webpage, 'video URLs'))) + formats = [{'url': murl} for murl in murls] + self._sort_formats(formats) + + title = json.loads(js_to_json(self._search_regex( + r'var mediaTitles\s*=\s*(.*?);\n', webpage, 'title')))[0] + + return { + 'id': video_id, + 'title': title, + 'formats': formats, + 'description': self._og_search_description(webpage), + 'thumbnail': self._og_search_thumbnail(webpage), + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/stanfordoc.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/stanfordoc.py new file mode 100644 index 0000000000..4a3d8bb8f2 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/stanfordoc.py @@ -0,0 +1,91 @@ +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import ( + ExtractorError, + orderedSet, + unescapeHTML, +) + + +class StanfordOpenClassroomIE(InfoExtractor): + IE_NAME = 'stanfordoc' + IE_DESC = 'Stanford Open ClassRoom' + _VALID_URL = r'https?://openclassroom\.stanford\.edu(?P<path>/?|(/MainFolder/(?:HomePage|CoursePage|VideoPage)\.php([?]course=(?P<course>[^&]+)(&video=(?P<video>[^&]+))?(&.*)?)?))$' + _TEST = { + 'url': 'http://openclassroom.stanford.edu/MainFolder/VideoPage.php?course=PracticalUnix&video=intro-environment&speed=100', + 'md5': '544a9468546059d4e80d76265b0443b8', + 'info_dict': { + 'id': 'PracticalUnix_intro-environment', + 'ext': 'mp4', + 'title': 'Intro Environment', + } + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + + if mobj.group('course') and mobj.group('video'): # A specific video + course = mobj.group('course') + video = mobj.group('video') + info = { + 'id': course + '_' + video, + 'uploader': None, + 'upload_date': None, + } + + baseUrl = 'http://openclassroom.stanford.edu/MainFolder/courses/' + course + '/videos/' + xmlUrl = baseUrl + video + '.xml' + mdoc = self._download_xml(xmlUrl, info['id']) + try: + info['title'] = mdoc.findall('./title')[0].text + info['url'] = baseUrl + mdoc.findall('./videoFile')[0].text + except IndexError: + raise ExtractorError('Invalid metadata XML file') + return info + elif mobj.group('course'): # A course page + course = mobj.group('course') + info = { + 'id': course, + '_type': 'playlist', + 'uploader': None, + 'upload_date': None, + } + + coursepage = self._download_webpage( + url, info['id'], + note='Downloading course info page', + errnote='Unable to download course info page') + + info['title'] = self._html_search_regex( + r'<h1>([^<]+)</h1>', coursepage, 'title', default=info['id']) + + info['description'] = self._html_search_regex( + r'(?s)<description>([^<]+)</description>', + coursepage, 'description', fatal=False) + + links = orderedSet(re.findall('<a href="(VideoPage.php\?[^"]+)">', coursepage)) + info['entries'] = [self.url_result( + 'http://openclassroom.stanford.edu/MainFolder/%s' % unescapeHTML(l) + ) for l in links] + return info + else: # Root page + info = { + 'id': 'Stanford OpenClassroom', + '_type': 'playlist', + 'uploader': None, + 'upload_date': None, + } + info['title'] = info['id'] + + rootURL = 'http://openclassroom.stanford.edu/MainFolder/HomePage.php' + rootpage = self._download_webpage(rootURL, info['id'], + errnote='Unable to download course info page') + + links = orderedSet(re.findall('<a href="(CoursePage.php\?[^"]+)">', rootpage)) + info['entries'] = [self.url_result( + 'http://openclassroom.stanford.edu/MainFolder/%s' % unescapeHTML(l) + ) for l in links] + return info diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/statigram.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/statigram.py new file mode 100644 index 0000000000..d602e817a0 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/statigram.py @@ -0,0 +1,38 @@ +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor + + +class StatigramIE(InfoExtractor): + _VALID_URL = r'https?://(www\.)?statigr\.am/p/(?P<id>[^/]+)' + _TEST = { + 'url': 'http://statigr.am/p/522207370455279102_24101272', + 'md5': '6eb93b882a3ded7c378ee1d6884b1814', + 'info_dict': { + 'id': '522207370455279102_24101272', + 'ext': 'mp4', + 'uploader_id': 'aguynamedpatrick', + 'title': 'Instagram photo by @aguynamedpatrick (Patrick Janelle)', + }, + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + webpage = self._download_webpage(url, video_id) + html_title = self._html_search_regex( + r'<title>(.+?)', + webpage, 'title') + title = re.sub(r'(?: *\(Videos?\))? \| Statigram$', '', html_title) + uploader_id = self._html_search_regex( + r'@([^ ]+)', title, 'uploader name', fatal=False) + + return { + 'id': video_id, + 'url': self._og_search_video_url(webpage), + 'title': title, + 'thumbnail': self._og_search_thumbnail(webpage), + 'uploader_id': uploader_id + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/steam.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/steam.py new file mode 100644 index 0000000000..183dcb03cc --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/steam.py @@ -0,0 +1,123 @@ +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import ( + ExtractorError, + unescapeHTML, +) + + +class SteamIE(InfoExtractor): + _VALID_URL = r"""(?x) + https?://store\.steampowered\.com/ + (agecheck/)? + (?Pvideo|app)/ #If the page is only for videos or for a game + (?P\d+)/? + (?P\d*)(?P\??) # For urltype == video we sometimes get the videoID + | + https?://(?:www\.)?steamcommunity\.com/sharedfiles/filedetails/\?id=(?P[0-9]+) + """ + _VIDEO_PAGE_TEMPLATE = 'http://store.steampowered.com/video/%s/' + _AGECHECK_TEMPLATE = 'http://store.steampowered.com/agecheck/video/%s/?snr=1_agecheck_agecheck__age-gate&ageDay=1&ageMonth=January&ageYear=1970' + _TESTS = [{ + "url": "http://store.steampowered.com/video/105600/", + "playlist": [ + { + "md5": "f870007cee7065d7c76b88f0a45ecc07", + "info_dict": { + 'id': '81300', + 'ext': 'flv', + "title": "Terraria 1.1 Trailer", + 'playlist_index': 1, + } + }, + { + "md5": "61aaf31a5c5c3041afb58fb83cbb5751", + "info_dict": { + 'id': '80859', + 'ext': 'flv', + "title": "Terraria Trailer", + 'playlist_index': 2, + } + } + ], + 'params': { + 'playlistend': 2, + } + }, { + 'url': 'http://steamcommunity.com/sharedfiles/filedetails/?id=242472205', + 'info_dict': { + 'id': 'WB5DvDOOvAY', + 'ext': 'mp4', + 'upload_date': '20140329', + 'title': 'FRONTIERS - Final Greenlight Trailer', + 'description': 'md5:dc96a773669d0ca1b36c13c1f30250d9', + 'uploader': 'AAD Productions', + 'uploader_id': 'AtomicAgeDogGames', + } + }] + + def _real_extract(self, url): + m = re.match(self._VALID_URL, url) + fileID = m.group('fileID') + if fileID: + videourl = url + playlist_id = fileID + else: + gameID = m.group('gameID') + playlist_id = gameID + videourl = self._VIDEO_PAGE_TEMPLATE % playlist_id + webpage = self._download_webpage(videourl, playlist_id) + + if re.search('

    Please enter your birth date to continue:

    ', webpage) is not None: + videourl = self._AGECHECK_TEMPLATE % playlist_id + self.report_age_confirmation() + webpage = self._download_webpage(videourl, playlist_id) + + if fileID: + playlist_title = self._html_search_regex( + r'
    (.+)
    ', webpage, 'title') + mweb = re.finditer(r'''(?x) + 'movie_(?P[0-9]+)':\s*\{\s* + YOUTUBE_VIDEO_ID:\s*"(?P[^"]+)", + ''', webpage) + videos = [{ + '_type': 'url', + 'url': vid.group('youtube_id'), + 'ie_key': 'Youtube', + } for vid in mweb] + else: + playlist_title = self._html_search_regex( + r'', webpage, 'game title') + + mweb = re.finditer(r'''(?x) + 'movie_(?P[0-9]+)':\s*\{\s* + FILENAME:\s*"(?P[\w:/\.\?=]+)" + (,\s*MOVIE_NAME:\s*\"(?P[\w:/\.\?=\+-]+)\")?\s*\}, + ''', webpage) + titles = re.finditer( + r'(?P.+?)', webpage) + thumbs = re.finditer( + r'', webpage) + videos = [] + + for vid, vtitle, thumb in zip(mweb, titles, thumbs): + video_id = vid.group('videoID') + title = vtitle.group('videoName') + video_url = vid.group('videoURL') + video_thumb = thumb.group('thumbnail') + if not video_url: + raise ExtractorError('Cannot find video url for %s' % video_id) + videos.append({ + 'id': video_id, + 'url': video_url, + 'ext': 'flv', + 'title': unescapeHTML(title), + 'thumbnail': video_thumb + }) + if not videos: + raise ExtractorError('Could not find any videos') + + return self.playlist_result(videos, playlist_id, playlist_title) diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/streamcloud.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/streamcloud.py new file mode 100644 index 0000000000..d4e1340158 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/streamcloud.py @@ -0,0 +1,62 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..compat import ( + compat_urllib_parse, + compat_urllib_request, +) + + +class StreamcloudIE(InfoExtractor): + IE_NAME = 'streamcloud.eu' + _VALID_URL = r'https?://streamcloud\.eu/(?P[a-zA-Z0-9_-]+)(?:/(?P[^#?]*)\.html)?' + + _TEST = { + 'url': 'http://streamcloud.eu/skp9j99s4bpz/youtube-dl_test_video_____________-BaW_jenozKc.mp4.html', + 'md5': '6bea4c7fa5daaacc2a946b7146286686', + 'info_dict': { + 'id': 'skp9j99s4bpz', + 'ext': 'mp4', + 'title': 'youtube-dl test video \'/\\ ц╓ Б├╜', + }, + 'skip': 'Only available from the EU' + } + + def _real_extract(self, url): + video_id = self._match_id(url) + url = 'http://streamcloud.eu/%s' % video_id + + orig_webpage = self._download_webpage(url, video_id) + + fields = re.findall(r'''(?x)]*>([^<]+)<', webpage, 'title') + video_url = self._search_regex( + r'file:\s*"([^"]+)"', webpage, 'video URL') + thumbnail = self._search_regex( + r'image:\s*"([^"]+)"', webpage, 'thumbnail URL', fatal=False) + + return { + 'id': video_id, + 'title': title, + 'url': video_url, + 'thumbnail': thumbnail, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/streamcz.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/streamcz.py new file mode 100644 index 0000000000..c3ceb5f76d --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/streamcz.py @@ -0,0 +1,79 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from .common import InfoExtractor +from ..utils import ( + int_or_none, +) + + +class StreamCZIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?stream\.cz/.+/(?P[0-9]+)' + + _TESTS = [{ + 'url': 'http://www.stream.cz/peklonataliri/765767-ecka-pro-deti', + 'md5': '6d3ca61a8d0633c9c542b92fcb936b0c', + 'info_dict': { + 'id': '765767', + 'ext': 'mp4', + 'title': 'Peklo na talц╜е≥i: ц┴д█ka pro dд⌡ti', + 'description': 'Taе║ka s grцЁnskou pomazц║nkou a dalе║ц╜ pekelnosti ZDE', + 'thumbnail': 're:^http://im.stream.cz/episode/52961d7e19d423f8f06f0100', + 'duration': 256, + }, + }, { + 'url': 'http://www.stream.cz/blanik/10002447-tri-roky-pro-mazanka', + 'md5': 'e54a254fb8b871968fd8403255f28589', + 'info_dict': { + 'id': '10002447', + 'ext': 'mp4', + 'title': 'Kancelц║е≥ Blanц╜k: Tе≥i roky pro Mazц║nka', + 'description': 'md5:3862a00ba7bf0b3e44806b544032c859', + 'thumbnail': 're:^http://im.stream.cz/episode/537f838c50c11f8d21320000', + 'duration': 368, + }, + }] + + def _real_extract(self, url): + video_id = self._match_id(url) + data = self._download_json( + 'http://www.stream.cz/API/episode/%s' % video_id, video_id) + + formats = [] + for quality, video in enumerate(data['video_qualities']): + for f in video['formats']: + typ = f['type'].partition('/')[2] + qlabel = video.get('quality_label') + formats.append({ + 'format_note': '%s-%s' % (qlabel, typ) if qlabel else typ, + 'format_id': '%s-%s' % (typ, f['quality']), + 'url': f['source'], + 'height': int_or_none(f['quality'].rstrip('p')), + 'quality': quality, + }) + self._sort_formats(formats) + + image = data.get('image') + if image: + thumbnail = self._proto_relative_url( + image.replace('{width}', '1240').replace('{height}', '697'), + scheme='http:', + ) + else: + thumbnail = None + + stream = data.get('_embedded', {}).get('stream:show', {}).get('name') + if stream: + title = '%s: %s' % (stream, data['name']) + else: + title = data['name'] + + return { + 'id': video_id, + 'title': title, + 'thumbnail': thumbnail, + 'formats': formats, + 'description': data.get('web_site_text'), + 'duration': int_or_none(data.get('duration')), + 'view_count': int_or_none(data.get('views')), + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/subtitles.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/subtitles.py new file mode 100644 index 0000000000..59a51268d2 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/subtitles.py @@ -0,0 +1,99 @@ +from __future__ import unicode_literals +from .common import InfoExtractor + +from ..compat import compat_str +from ..utils import ( + ExtractorError, +) + + +class SubtitlesInfoExtractor(InfoExtractor): + @property + def _have_to_download_any_subtitles(self): + return any([self._downloader.params.get('writesubtitles', False), + self._downloader.params.get('writeautomaticsub')]) + + def _list_available_subtitles(self, video_id, webpage): + """ outputs the available subtitles for the video """ + sub_lang_list = self._get_available_subtitles(video_id, webpage) + auto_captions_list = self._get_available_automatic_caption(video_id, webpage) + sub_lang = ",".join(list(sub_lang_list.keys())) + self.to_screen('%s: Available subtitles for video: %s' % + (video_id, sub_lang)) + auto_lang = ",".join(auto_captions_list.keys()) + self.to_screen('%s: Available automatic captions for video: %s' % + (video_id, auto_lang)) + + def extract_subtitles(self, video_id, webpage): + """ + returns {sub_lang: sub} ,{} if subtitles not found or None if the + subtitles aren't requested. + """ + if not self._have_to_download_any_subtitles: + return None + available_subs_list = {} + if self._downloader.params.get('writeautomaticsub', False): + available_subs_list.update(self._get_available_automatic_caption(video_id, webpage)) + if self._downloader.params.get('writesubtitles', False): + available_subs_list.update(self._get_available_subtitles(video_id, webpage)) + + if not available_subs_list: # error, it didn't get the available subtitles + return {} + if self._downloader.params.get('allsubtitles', False): + sub_lang_list = available_subs_list + else: + if self._downloader.params.get('subtitleslangs', False): + requested_langs = self._downloader.params.get('subtitleslangs') + elif 'en' in available_subs_list: + requested_langs = ['en'] + else: + requested_langs = [list(available_subs_list.keys())[0]] + + sub_lang_list = {} + for sub_lang in requested_langs: + if sub_lang not in available_subs_list: + self._downloader.report_warning('no closed captions found in the specified language "%s"' % sub_lang) + continue + sub_lang_list[sub_lang] = available_subs_list[sub_lang] + + subtitles = {} + for sub_lang, url in sub_lang_list.items(): + subtitle = self._request_subtitle_url(sub_lang, url) + if subtitle: + subtitles[sub_lang] = subtitle + return subtitles + + def _download_subtitle_url(self, sub_lang, url): + return self._download_webpage(url, None, note=False) + + def _request_subtitle_url(self, sub_lang, url): + """ makes the http request for the subtitle """ + try: + sub = self._download_subtitle_url(sub_lang, url) + except ExtractorError as err: + self._downloader.report_warning('unable to download video subtitles for %s: %s' % (sub_lang, compat_str(err))) + return + if not sub: + self._downloader.report_warning('Did not fetch video subtitles') + return + return sub + + def _get_available_subtitles(self, video_id, webpage): + """ + returns {sub_lang: url} or {} if not available + Must be redefined by the subclasses + """ + + # By default, allow implementations to simply pass in the result + assert isinstance(webpage, dict), \ + '_get_available_subtitles not implemented' + return webpage + + def _get_available_automatic_caption(self, video_id, webpage): + """ + returns {sub_lang: url} or {} if not available + Must be redefined by the subclasses that support automatic captions, + otherwise it will return {} + """ + self._downloader.report_warning('Automatic Captions not supported by this server') + return {} diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/sunporno.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/sunporno.py new file mode 100644 index 0000000000..263f09b464 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/sunporno.py @@ -0,0 +1,70 @@ +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import ( + parse_duration, + int_or_none, + qualities, + determine_ext, +) + + +class SunPornoIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?sunporno\.com/videos/(?P\d+)' + _TEST = { + 'url': 'http://www.sunporno.com/videos/807778/', + 'md5': '6457d3c165fd6de062b99ef6c2ff4c86', + 'info_dict': { + 'id': '807778', + 'ext': 'flv', + 'title': 'md5:0a400058e8105d39e35c35e7c5184164', + 'description': 'md5:a31241990e1bd3a64e72ae99afb325fb', + 'thumbnail': 're:^https?://.*\.jpg$', + 'duration': 302, + 'age_limit': 18, + } + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + + webpage = self._download_webpage(url, video_id) + + title = self._html_search_regex(r'([^<]+)', webpage, 'title') + description = self._html_search_meta('description', webpage, 'description') + thumbnail = self._html_search_regex( + r'poster="([^"]+)"', webpage, 'thumbnail', fatal=False) + + duration = parse_duration(self._search_regex( + r'Duration:\s*(\d+:\d+)\s*<', webpage, 'duration', fatal=False)) + + view_count = int_or_none(self._html_search_regex( + r'class="views">\s*(\d+)\s*<', webpage, 'view count', fatal=False)) + comment_count = int_or_none(self._html_search_regex( + r'(\d+) Comments?', webpage, 'comment count', fatal=False)) + + formats = [] + quality = qualities(['mp4', 'flv']) + for video_url in re.findall(r'[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})' + + _TESTS = [{ + 'url': 'http://swrmediathek.de/player.htm?show=849790d0-dab8-11e3-a953-0026b975f2e6', + 'md5': '8c5f6f0172753368547ca8413a7768ac', + 'info_dict': { + 'id': '849790d0-dab8-11e3-a953-0026b975f2e6', + 'ext': 'mp4', + 'title': 'SWR odysso', + 'description': 'md5:2012e31baad36162e97ce9eb3f157b8a', + 'thumbnail': 're:^http:.*\.jpg$', + 'duration': 2602, + 'upload_date': '20140515', + 'uploader': 'SWR Fernsehen', + 'uploader_id': '990030', + }, + }, { + 'url': 'http://swrmediathek.de/player.htm?show=0e1a8510-ddf2-11e3-9be3-0026b975f2e6', + 'md5': 'b10ab854f912eecc5a6b55cd6fc1f545', + 'info_dict': { + 'id': '0e1a8510-ddf2-11e3-9be3-0026b975f2e6', + 'ext': 'mp4', + 'title': 'Nachtcafц╘ - Alltagsdroge Alkohol - zwischen Sektempfang und Komasaufen', + 'description': 'md5:e0a3adc17e47db2c23aab9ebc36dbee2', + 'thumbnail': 're:http://.*\.jpg', + 'duration': 5305, + 'upload_date': '20140516', + 'uploader': 'SWR Fernsehen', + 'uploader_id': '990030', + }, + }, { + 'url': 'http://swrmediathek.de/player.htm?show=bba23e10-cb93-11e3-bf7f-0026b975f2e6', + 'md5': '4382e4ef2c9d7ce6852535fa867a0dd3', + 'info_dict': { + 'id': 'bba23e10-cb93-11e3-bf7f-0026b975f2e6', + 'ext': 'mp3', + 'title': 'Saе║a Staniе║ic: Vor dem Fest', + 'description': 'md5:5b792387dc3fbb171eb709060654e8c9', + 'thumbnail': 're:http://.*\.jpg', + 'duration': 3366, + 'upload_date': '20140520', + 'uploader': 'SWR 2', + 'uploader_id': '284670', + } + }] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + + video = self._download_json( + 'http://swrmediathek.de/AjaxEntry?ekey=%s' % video_id, video_id, 'Downloading video JSON') + + attr = video['attr'] + media_type = attr['entry_etype'] + + formats = [] + for entry in video['sub']: + if entry['name'] != 'entry_media': + continue + + entry_attr = entry['attr'] + codec = entry_attr['val0'] + quality = int(entry_attr['val1']) + + fmt = { + 'url': entry_attr['val2'], + 'quality': quality, + } + + if media_type == 'Video': + fmt.update({ + 'format_note': ['144p', '288p', '544p', '720p'][quality - 1], + 'vcodec': codec, + }) + elif media_type == 'Audio': + fmt.update({ + 'acodec': codec, + }) + formats.append(fmt) + + self._sort_formats(formats) + + return { + 'id': video_id, + 'title': attr['entry_title'], + 'description': attr['entry_descl'], + 'thumbnail': attr['entry_image_16_9'], + 'duration': parse_duration(attr['entry_durat']), + 'upload_date': attr['entry_pdatet'][:-4], + 'uploader': attr['channel_title'], + 'uploader_id': attr['channel_idkey'], + 'formats': formats, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/syfy.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/syfy.py new file mode 100644 index 0000000000..5ca079f880 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/syfy.py @@ -0,0 +1,46 @@ +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor + + +class SyfyIE(InfoExtractor): + _VALID_URL = r'https?://www\.syfy\.com/(?:videos/.+?vid:(?P[0-9]+)|(?!videos)(?P[^/]+)(?:$|[?#]))' + + _TESTS = [{ + 'url': 'http://www.syfy.com/videos/Robot%20Combat%20League/Behind%20the%20Scenes/vid:2631458', + 'info_dict': { + 'id': 'NmqMrGnXvmO1', + 'ext': 'flv', + 'title': 'George Lucas has Advice for his Daughter', + 'description': 'Listen to what insights George Lucas give his daughter Amanda.', + }, + 'add_ie': ['ThePlatform'], + }, { + 'url': 'http://www.syfy.com/wilwheaton', + 'md5': '94dfa54ee3ccb63295b276da08c415f6', + 'info_dict': { + 'id': '4yoffOOXC767', + 'ext': 'flv', + 'title': 'The Wil Wheaton Project - Premiering May 27th at 10/9c.', + 'description': 'The Wil Wheaton Project premieres May 27th at 10/9c. Don\'t miss it.', + }, + 'add_ie': ['ThePlatform'], + 'skip': 'Blocked outside the US', + }] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_name = mobj.group('video_name') + if video_name: + generic_webpage = self._download_webpage(url, video_name) + video_id = self._search_regex( + r'', + generic_webpage, 'video ID') + url = 'http://www.syfy.com/videos/%s/%s/vid:%s' % ( + video_name, video_name, video_id) + else: + video_id = mobj.group('id') + webpage = self._download_webpage(url, video_id) + return self.url_result(self._og_search_video_url(webpage)) diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/sztvhu.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/sztvhu.py new file mode 100644 index 0000000000..aa5964acb6 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/sztvhu.py @@ -0,0 +1,41 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from .common import InfoExtractor + + +class SztvHuIE(InfoExtractor): + _VALID_URL = r'http://(?:(?:www\.)?sztv\.hu|www\.tvszombathely\.hu)/(?:[^/]+)/.+-(?P[0-9]+)' + _TEST = { + 'url': 'http://sztv.hu/hirek/cserkeszek-nepszerusitettek-a-kornyezettudatos-eletmodot-a-savaria-teren-20130909', + 'md5': 'a6df607b11fb07d0e9f2ad94613375cb', + 'info_dict': { + 'id': '20130909', + 'ext': 'mp4', + 'title': 'Cserkц╘szek nц╘pszerе╠sц╜tettц╘k a kц╤rnyezettudatos ц╘letmцЁdot a Savaria tц╘ren', + 'description': 'A zц╤ld nap jц║tц╘kos ismeretterjesztе▒ programjait a Magyar Cserkц╘sz Szц╤vetsц╘g szervezte, akik az orszц║g nyolc vц║rosц║ban adjц║k ц║t tudц║sukat az ц╘rdeklе▒dе▒knek. A PET...', + }, + } + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + video_file = self._search_regex( + r'file: "...:(.*?)",', webpage, 'video file') + title = self._html_search_regex( + r'', + webpage, 'video description', fatal=False) + thumbnail = self._og_search_thumbnail(webpage) + + video_url = 'http://media.sztv.hu/vod/' + video_file + + return { + 'id': video_id, + 'url': video_url, + 'title': title, + 'description': description, + 'thumbnail': thumbnail, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/tagesschau.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/tagesschau.py new file mode 100644 index 0000000000..bfe07b0241 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/tagesschau.py @@ -0,0 +1,120 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import parse_filesize + + +class TagesschauIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?tagesschau\.de/multimedia/(?:sendung/ts|video/video)(?P-?[0-9]+)\.html' + + _TESTS = [{ + 'url': 'http://www.tagesschau.de/multimedia/video/video1399128.html', + 'md5': 'bcdeac2194fb296d599ce7929dfa4009', + 'info_dict': { + 'id': '1399128', + 'ext': 'mp4', + 'title': 'Harald Range, Generalbundesanwalt, zu den Ermittlungen', + 'description': 'md5:69da3c61275b426426d711bde96463ab', + 'thumbnail': 're:^http:.*\.jpg$', + }, + }, { + 'url': 'http://www.tagesschau.de/multimedia/sendung/ts-5727.html', + 'md5': '3c54c1f6243d279b706bde660ceec633', + 'info_dict': { + 'id': '5727', + 'ext': 'mp4', + 'description': 'md5:695c01bfd98b7e313c501386327aea59', + 'title': 'Sendung: tagesschau \t04.12.2014 20:00 Uhr', + 'thumbnail': 're:^http:.*\.jpg$', + } + }] + + _FORMATS = { + 's': {'width': 256, 'height': 144, 'quality': 1}, + 'm': {'width': 512, 'height': 288, 'quality': 2}, + 'l': {'width': 960, 'height': 544, 'quality': 3}, + } + + def _real_extract(self, url): + video_id = self._match_id(url) + display_id = video_id.lstrip('-') + webpage = self._download_webpage(url, display_id) + + player_url = self._html_search_meta( + 'twitter:player', webpage, 'player URL', default=None) + if player_url: + playerpage = self._download_webpage( + player_url, display_id, 'Downloading player page') + + medias = re.findall( + r'"(http://media.+?)", type:"video/(.+?)", quality:"(.+?)"', + playerpage) + formats = [] + for url, ext, res in medias: + f = { + 'format_id': res + '_' + ext, + 'url': url, + 'ext': ext, + } + f.update(self._FORMATS.get(res, {})) + formats.append(f) + thumbnail_fn = re.findall(r'"(/multimedia/.+?\.jpg)"', playerpage)[-1] + title = self._og_search_title(webpage).strip() + description = self._og_search_description(webpage).strip() + else: + download_text = self._search_regex( + r'(?s)

    Wir bieten dieses Video in folgenden Formaten zum Download an:

    \s*
    (.*?)
    \s*

    ', + webpage, 'download links') + links = re.finditer( + r'

    ', + download_text) + formats = [] + for l in links: + format_id = self._search_regex( + r'.*/[^/.]+\.([^/]+)\.[^/.]+', l.group('url'), 'format ID') + format = { + 'format_id': format_id, + 'url': l.group('url'), + 'format_name': l.group('name'), + } + m = re.match( + r'''(?x) + Video:\s*(?P[a-zA-Z0-9/._-]+)\s*&\#10; + (?P[0-9]+)x(?P[0-9]+)px&\#10; + (?P[0-9]+)kbps&\#10; + Audio:\s*(?P[0-9]+)kbps,\s*(?P[A-Za-z\.0-9]+)&\#10; + Größe:\s*(?P[0-9.,]+\s+[a-zA-Z]*B)''', + l.group('title')) + if m: + format.update({ + 'format_note': m.group('audio_desc'), + 'vcodec': m.group('vcodec'), + 'width': int(m.group('width')), + 'height': int(m.group('height')), + 'abr': int(m.group('abr')), + 'vbr': int(m.group('vbr')), + 'filesize_approx': parse_filesize(m.group('filesize_approx')), + }) + formats.append(format) + thumbnail_fn = self._search_regex( + r'(?s)Sendungsbild(.*?)

    ', + webpage, 'description', fatal=False) + title = self._html_search_regex( + r'(.*?)', webpage, 'title') + + self._sort_formats(formats) + thumbnail = 'http://www.tagesschau.de' + thumbnail_fn + + return { + 'id': display_id, + 'title': title, + 'thumbnail': thumbnail, + 'formats': formats, + 'description': description, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/tapely.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/tapely.py new file mode 100644 index 0000000000..f1f43d0a71 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/tapely.py @@ -0,0 +1,107 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..compat import ( + compat_urllib_request, +) +from ..utils import ( + clean_html, + ExtractorError, + float_or_none, + parse_iso8601, +) + + +class TapelyIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?tape\.ly/(?P[A-Za-z0-9\-_]+)(?:/(?P\d+))?' + _API_URL = 'http://tape.ly/showtape?id={0:}' + _S3_SONG_URL = 'http://mytape.s3.amazonaws.com/{0:}' + _SOUNDCLOUD_SONG_URL = 'http://api.soundcloud.com{0:}' + _TESTS = [ + { + 'url': 'http://tape.ly/my-grief-as-told-by-water', + 'info_dict': { + 'id': 23952, + 'title': 'my grief as told by water', + 'thumbnail': 're:^https?://.*\.png$', + 'uploader_id': 16484, + 'timestamp': 1411848286, + 'description': 'For Robin and Ponkers, whom the tides of life have taken out to sea.', + }, + 'playlist_count': 13, + }, + { + 'url': 'http://tape.ly/my-grief-as-told-by-water/1', + 'md5': '79031f459fdec6530663b854cbc5715c', + 'info_dict': { + 'id': 258464, + 'title': 'Dreaming Awake (My Brightest Diamond)', + 'ext': 'm4a', + }, + }, + ] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + display_id = mobj.group('id') + + playlist_url = self._API_URL.format(display_id) + request = compat_urllib_request.Request(playlist_url) + request.add_header('X-Requested-With', 'XMLHttpRequest') + request.add_header('Accept', 'application/json') + request.add_header('Referer', url) + + playlist = self._download_json(request, display_id) + + tape = playlist['tape'] + + entries = [] + for s in tape['songs']: + song = s['song'] + entry = { + 'id': song['id'], + 'duration': float_or_none(song.get('songduration'), 1000), + 'title': song['title'], + } + if song['source'] == 'S3': + entry.update({ + 'url': self._S3_SONG_URL.format(song['filename']), + }) + entries.append(entry) + elif song['source'] == 'YT': + self.to_screen('YouTube video detected') + yt_id = song['filename'].replace('/youtube/', '') + entry.update(self.url_result(yt_id, 'Youtube', video_id=yt_id)) + entries.append(entry) + elif song['source'] == 'SC': + self.to_screen('SoundCloud song detected') + sc_url = self._SOUNDCLOUD_SONG_URL.format(song['filename']) + entry.update(self.url_result(sc_url, 'Soundcloud')) + entries.append(entry) + else: + self.report_warning('Unknown song source: %s' % song['source']) + + if mobj.group('songnr'): + songnr = int(mobj.group('songnr')) - 1 + try: + return entries[songnr] + except IndexError: + raise ExtractorError( + 'No song with index: %s' % mobj.group('songnr'), + expected=True) + + return { + '_type': 'playlist', + 'id': tape['id'], + 'display_id': display_id, + 'title': tape['name'], + 'entries': entries, + 'thumbnail': tape.get('image_url'), + 'description': clean_html(tape.get('subtext')), + 'like_count': tape.get('likescount'), + 'uploader_id': tape.get('user_id'), + 'timestamp': parse_iso8601(tape.get('published_at')), + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/tass.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/tass.py new file mode 100644 index 0000000000..c4ef70778b --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/tass.py @@ -0,0 +1,62 @@ +# encoding: utf-8 +from __future__ import unicode_literals + +import json + +from .common import InfoExtractor +from ..utils import ( + js_to_json, + qualities, +) + + +class TassIE(InfoExtractor): + _VALID_URL = r'https?://(?:tass\.ru|itar-tass\.com)/[^/]+/(?P\d+)' + _TESTS = [ + { + 'url': 'http://tass.ru/obschestvo/1586870', + 'md5': '3b4cdd011bc59174596b6145cda474a4', + 'info_dict': { + 'id': '1586870', + 'ext': 'mp4', + 'title': 'п÷п╬я│п╣я┌п╦я┌п╣п╩я▐п╪ п╪п╬я│п╨п╬п╡я│п╨п╬пЁп╬ п╥п╬п╬п©п╟я─п╨п╟ п©п╬п╨п╟п╥п╟п╩п╦ п╨я─п╟я│п╫я┐я▌ п©п╟п╫п╢я┐', + 'description': 'п÷я─п╦п╣я┘п╟п╡я┬я┐я▌ п╦п╥ п■я┐п╠п╩п╦п╫п╟ п≈п╣п╧п╫я┐ п╪п╬п╤п╫п╬ я┐п╡п╦п╢п╣я┌я▄ п╡ п©п╟п╡п╦п╩я▄п╬п╫п╣ "п п╬я┬п╨п╦ я┌я─п╬п©п╦п╨п╬п╡"', + 'thumbnail': 're:^https?://.*\.jpg$', + }, + }, + { + 'url': 'http://itar-tass.com/obschestvo/1600009', + 'only_matching': True, + }, + ] + + def _real_extract(self, url): + video_id = self._match_id(url) + + webpage = self._download_webpage(url, video_id) + + sources = json.loads(js_to_json(self._search_regex( + r'(?s)sources\s*:\s*(\[.+?\])', webpage, 'sources'))) + + quality = qualities(['sd', 'hd']) + + formats = [] + for source in sources: + video_url = source.get('file') + if not video_url or not video_url.startswith('http') or not video_url.endswith('.mp4'): + continue + label = source.get('label') + formats.append({ + 'url': video_url, + 'format_id': label, + 'quality': quality(label), + }) + self._sort_formats(formats) + + return { + 'id': video_id, + 'title': self._og_search_title(webpage), + 'description': self._og_search_description(webpage), + 'thumbnail': self._og_search_thumbnail(webpage), + 'formats': formats, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/teachertube.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/teachertube.py new file mode 100644 index 0000000000..6c3445d792 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/teachertube.py @@ -0,0 +1,133 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import ( + qualities, + determine_ext, +) + + +class TeacherTubeIE(InfoExtractor): + IE_NAME = 'teachertube' + IE_DESC = 'teachertube.com videos' + + _VALID_URL = r'https?://(?:www\.)?teachertube\.com/(viewVideo\.php\?video_id=|music\.php\?music_id=|video/(?:[\da-z-]+-)?|audio/)(?P\d+)' + + _TESTS = [{ + 'url': 'http://www.teachertube.com/viewVideo.php?video_id=339997', + 'md5': 'f9434ef992fd65936d72999951ee254c', + 'info_dict': { + 'id': '339997', + 'ext': 'mp4', + 'title': 'Measures of dispersion from a frequency table', + 'description': 'Measures of dispersion from a frequency table', + 'thumbnail': 're:http://.*\.jpg', + }, + }, { + 'url': 'http://www.teachertube.com/viewVideo.php?video_id=340064', + 'md5': '0d625ec6bc9bf50f70170942ad580676', + 'info_dict': { + 'id': '340064', + 'ext': 'mp4', + 'title': 'How to Make Paper Dolls _ Paper Art Projects', + 'description': 'Learn how to make paper dolls in this simple', + 'thumbnail': 're:http://.*\.jpg', + }, + }, { + 'url': 'http://www.teachertube.com/music.php?music_id=8805', + 'md5': '01e8352006c65757caf7b961f6050e21', + 'info_dict': { + 'id': '8805', + 'ext': 'mp3', + 'title': 'PER ASPERA AD ASTRA', + 'description': 'RADIJSKA EMISIJA ZRAKOPLOVNE TEHNI?KE ?KOLE P', + }, + }, { + 'url': 'http://www.teachertube.com/video/intro-video-schleicher-297790', + 'md5': '9c79fbb2dd7154823996fc28d4a26998', + 'info_dict': { + 'id': '297790', + 'ext': 'mp4', + 'title': 'Intro Video - Schleicher', + 'description': 'Intro Video - Why to flip, how flipping will', + }, + }] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + + webpage = self._download_webpage(url, video_id) + + title = self._html_search_meta('title', webpage, 'title', fatal=True) + TITLE_SUFFIX = ' - TeacherTube' + if title.endswith(TITLE_SUFFIX): + title = title[:-len(TITLE_SUFFIX)].strip() + + description = self._html_search_meta('description', webpage, 'description') + if description: + description = description.strip() + + quality = qualities(['mp3', 'flv', 'mp4']) + + media_urls = re.findall(r'data-contenturl="([^"]+)"', webpage) + media_urls.extend(re.findall(r'var\s+filePath\s*=\s*"([^"]+)"', webpage)) + media_urls.extend(re.findall(r'\'file\'\s*:\s*["\']([^"\']+)["\'],', webpage)) + + formats = [ + { + 'url': media_url, + 'quality': quality(determine_ext(media_url)) + } for media_url in set(media_urls) + ] + + self._sort_formats(formats) + + return { + 'id': video_id, + 'title': title, + 'thumbnail': self._html_search_regex(r'\'image\'\s*:\s*["\']([^"\']+)["\']', webpage, 'thumbnail'), + 'formats': formats, + 'description': description, + } + + +class TeacherTubeUserIE(InfoExtractor): + IE_NAME = 'teachertube:user:collection' + IE_DESC = 'teachertube.com user and collection videos' + + _VALID_URL = r'https?://(?:www\.)?teachertube\.com/(user/profile|collection)/(?P[0-9a-zA-Z]+)/?' + + _MEDIA_RE = r'''(?sx) + class="?sidebar_thumb_time"?>[0-9:]+ + \s* + .+)' + + _TEST = { + 'url': 'https://www.teachingchannel.org/videos/teacher-teaming-evolution', + 'info_dict': { + 'id': 'F3bnlzbToeI6pLEfRyrlfooIILUjz4nM', + 'ext': 'mp4', + 'title': 'A History of Teaming', + 'description': 'md5:2a9033db8da81f2edffa4c99888140b3', + }, + 'params': { + # m3u8 download + 'skip_download': True, + }, + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + title = mobj.group('title') + webpage = self._download_webpage(url, title) + ooyala_code = self._search_regex( + r'data-embed-code=\'(.+?)\'', webpage, 'ooyala code') + + return OoyalaIE._build_url_result(ooyala_code) diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/teamcoco.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/teamcoco.py new file mode 100644 index 0000000000..5fa67eb8d4 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/teamcoco.py @@ -0,0 +1,84 @@ +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor + + +class TeamcocoIE(InfoExtractor): + _VALID_URL = r'http://teamcoco\.com/video/(?P[0-9]+)?/?(?P.*)' + _TESTS = [ + { + 'url': 'http://teamcoco.com/video/80187/conan-becomes-a-mary-kay-beauty-consultant', + 'file': '80187.mp4', + 'md5': '3f7746aa0dc86de18df7539903d399ea', + 'info_dict': { + 'title': 'Conan Becomes A Mary Kay Beauty Consultant', + 'description': 'Mary Kay is perhaps the most trusted name in female beauty, so of course Conan is a natural choice to sell their products.' + } + }, { + 'url': 'http://teamcoco.com/video/louis-ck-interview-george-w-bush', + 'file': '19705.mp4', + 'md5': 'cde9ba0fa3506f5f017ce11ead928f9a', + 'info_dict': { + "description": "Louis C.K. got starstruck by George W. Bush, so what? Part one.", + "title": "Louis C.K. Interview Pt. 1 11/3/11" + } + } + ] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + + display_id = mobj.group('display_id') + webpage = self._download_webpage(url, display_id) + + video_id = mobj.group("video_id") + if not video_id: + video_id = self._html_search_regex( + r'data-node-id="(\d+?)"', + webpage, 'video id') + + data_url = 'http://teamcoco.com/cvp/2.0/%s.xml' % video_id + data = self._download_xml( + data_url, display_id, 'Downloading data webpage') + + qualities = ['500k', '480p', '1000k', '720p', '1080p'] + formats = [] + for filed in data.findall('files/file'): + if filed.attrib.get('playmode') == 'all': + # it just duplicates one of the entries + break + file_url = filed.text + m_format = re.search(r'(\d+(k|p))\.mp4', file_url) + if m_format is not None: + format_id = m_format.group(1) + else: + format_id = filed.attrib['bitrate'] + tbr = ( + int(filed.attrib['bitrate']) + if filed.attrib['bitrate'].isdigit() + else None) + + try: + quality = qualities.index(format_id) + except ValueError: + quality = -1 + formats.append({ + 'url': file_url, + 'ext': 'mp4', + 'tbr': tbr, + 'format_id': format_id, + 'quality': quality, + }) + + self._sort_formats(formats) + + return { + 'id': video_id, + 'display_id': display_id, + 'formats': formats, + 'title': self._og_search_title(webpage), + 'thumbnail': self._og_search_thumbnail(webpage), + 'description': self._og_search_description(webpage), + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/techtalks.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/techtalks.py new file mode 100644 index 0000000000..16e945d8e6 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/techtalks.py @@ -0,0 +1,79 @@ +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import ( + get_element_by_attribute, + clean_html, +) + + +class TechTalksIE(InfoExtractor): + _VALID_URL = r'https?://techtalks\.tv/talks/[^/]*/(?P\d+)/' + + _TEST = { + 'url': 'http://techtalks.tv/talks/learning-topic-models-going-beyond-svd/57758/', + 'info_dict': { + 'id': '57758', + 'title': 'Learning Topic Models --- Going beyond SVD', + }, + 'playlist': [ + { + 'info_dict': { + 'id': '57758', + 'ext': 'flv', + 'title': 'Learning Topic Models --- Going beyond SVD', + }, + }, + { + 'info_dict': { + 'id': '57758-slides', + 'ext': 'flv', + 'title': 'Learning Topic Models --- Going beyond SVD', + }, + }, + ], + 'params': { + # rtmp download + 'skip_download': True, + }, + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + talk_id = mobj.group('id') + webpage = self._download_webpage(url, talk_id) + rtmp_url = self._search_regex( + r'netConnectionUrl: \'(.*?)\'', webpage, 'rtmp url') + play_path = self._search_regex( + r'href=\'(.*?)\' [^>]*id="flowplayer_presenter"', + webpage, 'presenter play path') + title = clean_html(get_element_by_attribute('class', 'title', webpage)) + video_info = { + 'id': talk_id, + 'title': title, + 'url': rtmp_url, + 'play_path': play_path, + 'ext': 'flv', + } + m_slides = re.search(r'https?://) + (?Pwww|embed)(?P\.ted\.com/ + ( + (?Pplaylists(?:/\d+)?) # We have a playlist + | + ((?Ptalks)) # We have a simple talk + | + (?Pwatch)/[^/]+/[^/]+ + ) + (/lang/(.*?))? # The url may contain the language + /(?P[\w-]+) # Here goes the name and then ".html" + .*)$ + ''' + _TESTS = [{ + 'url': 'http://www.ted.com/talks/dan_dennett_on_our_consciousness.html', + 'md5': 'fc94ac279feebbce69f21c0c6ee82810', + 'info_dict': { + 'id': '102', + 'ext': 'mp4', + 'title': 'The illusion of consciousness', + 'description': ('Philosopher Dan Dennett makes a compelling ' + 'argument that not only don\'t we understand our own ' + 'consciousness, but that half the time our brains are ' + 'actively fooling us.'), + 'uploader': 'Dan Dennett', + 'width': 854, + 'duration': 1308, + } + }, { + 'url': 'http://www.ted.com/watch/ted-institute/ted-bcg/vishal-sikka-the-beauty-and-power-of-algorithms', + 'md5': '226f4fb9c62380d11b7995efa4c87994', + 'info_dict': { + 'id': 'vishal-sikka-the-beauty-and-power-of-algorithms', + 'ext': 'mp4', + 'title': 'Vishal Sikka: The beauty and power of algorithms', + 'thumbnail': 're:^https?://.+\.jpg', + 'description': 'Adaptive, intelligent, and consistent, algorithms are emerging as the ultimate app for everything from matching consumers to products to assessing medical diagnoses. Vishal Sikka shares his appreciation for the algorithm, charting both its inherent beauty and its growing power.', + } + }, { + 'url': 'http://www.ted.com/talks/gabby_giffords_and_mark_kelly_be_passionate_be_courageous_be_your_best', + 'info_dict': { + 'id': '1972', + 'ext': 'mp4', + 'title': 'Be passionate. Be courageous. Be your best.', + 'uploader': 'Gabby Giffords and Mark Kelly', + 'description': 'md5:5174aed4d0f16021b704120360f72b92', + 'duration': 1128, + }, + }, { + 'url': 'http://www.ted.com/playlists/who_are_the_hackers', + 'info_dict': { + 'id': '10', + 'title': 'Who are the hackers?', + }, + 'playlist_mincount': 6, + }, { + # contains a youtube video + 'url': 'https://www.ted.com/talks/douglas_adams_parrots_the_universe_and_everything', + 'add_ie': ['Youtube'], + 'info_dict': { + 'id': '_ZG8HBuDjgc', + 'ext': 'mp4', + 'title': 'Douglas Adams: Parrots the Universe and Everything', + 'description': 'md5:01ad1e199c49ac640cb1196c0e9016af', + 'uploader': 'University of California Television (UCTV)', + 'uploader_id': 'UCtelevision', + 'upload_date': '20080522', + }, + 'params': { + 'skip_download': True, + }, + }] + + _NATIVE_FORMATS = { + 'low': {'preference': 1, 'width': 320, 'height': 180}, + 'medium': {'preference': 2, 'width': 512, 'height': 288}, + 'high': {'preference': 3, 'width': 854, 'height': 480}, + } + + def _extract_info(self, webpage): + info_json = self._search_regex(r'q\("\w+.init",({.+})\)', + webpage, 'info json') + return json.loads(info_json) + + def _real_extract(self, url): + m = re.match(self._VALID_URL, url, re.VERBOSE) + if m.group('type') == 'embed': + desktop_url = m.group('proto') + 'www' + m.group('urlmain') + return self.url_result(desktop_url, 'TED') + name = m.group('name') + if m.group('type_talk'): + return self._talk_info(url, name) + elif m.group('type_watch'): + return self._watch_info(url, name) + else: + return self._playlist_videos_info(url, name) + + def _playlist_videos_info(self, url, name): + '''Returns the videos of the playlist''' + + webpage = self._download_webpage(url, name, + 'Downloading playlist webpage') + info = self._extract_info(webpage) + playlist_info = info['playlist'] + + playlist_entries = [ + self.url_result('http://www.ted.com/talks/' + talk['slug'], self.ie_key()) + for talk in info['talks'] + ] + return self.playlist_result( + playlist_entries, + playlist_id=compat_str(playlist_info['id']), + playlist_title=playlist_info['title']) + + def _talk_info(self, url, video_name): + webpage = self._download_webpage(url, video_name) + self.report_extraction(video_name) + + talk_info = self._extract_info(webpage)['talks'][0] + + if talk_info.get('external') is not None: + self.to_screen('Found video from %s' % talk_info['external']['service']) + return { + '_type': 'url', + 'url': talk_info['external']['uri'], + } + + formats = [{ + 'url': format_url, + 'format_id': format_id, + 'format': format_id, + } for (format_id, format_url) in talk_info['nativeDownloads'].items() if format_url is not None] + if formats: + for f in formats: + finfo = self._NATIVE_FORMATS.get(f['format_id']) + if finfo: + f.update(finfo) + else: + # Use rtmp downloads + formats = [{ + 'format_id': f['name'], + 'url': talk_info['streamer'], + 'play_path': f['file'], + 'ext': 'flv', + 'width': f['width'], + 'height': f['height'], + 'tbr': f['bitrate'], + } for f in talk_info['resources']['rtmp']] + self._sort_formats(formats) + + video_id = compat_str(talk_info['id']) + # subtitles + video_subtitles = self.extract_subtitles(video_id, talk_info) + if self._downloader.params.get('listsubtitles', False): + self._list_available_subtitles(video_id, talk_info) + return + + thumbnail = talk_info['thumb'] + if not thumbnail.startswith('http'): + thumbnail = 'http://' + thumbnail + return { + 'id': video_id, + 'title': talk_info['title'].strip(), + 'uploader': talk_info['speaker'], + 'thumbnail': thumbnail, + 'description': self._og_search_description(webpage), + 'subtitles': video_subtitles, + 'formats': formats, + 'duration': talk_info.get('duration'), + } + + def _get_available_subtitles(self, video_id, talk_info): + languages = [lang['languageCode'] for lang in talk_info.get('languages', [])] + if languages: + sub_lang_list = {} + for l in languages: + url = 'http://www.ted.com/talks/subtitles/id/%s/lang/%s/format/srt' % (video_id, l) + sub_lang_list[l] = url + return sub_lang_list + else: + self._downloader.report_warning('video doesn\'t have subtitles') + return {} + + def _watch_info(self, url, name): + webpage = self._download_webpage(url, name) + + config_json = self._html_search_regex( + r'"pages\.jwplayer"\s*,\s*({.+?})\s*\)\s*', + webpage, 'config') + config = json.loads(config_json)['config'] + video_url = config['video']['url'] + thumbnail = config.get('image', {}).get('url') + + title = self._html_search_regex( + r"(?s)(.+?)", webpage, 'title') + description = self._html_search_regex( + [ + r'(?s)

    .*?

    (.*?)', + r'(?s)

    About this talk:\s+(.*?)

    ', + ], + webpage, 'description', fatal=False) + + return { + 'id': name, + 'url': video_url, + 'title': title, + 'thumbnail': thumbnail, + 'description': description, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/telebruxelles.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/telebruxelles.py new file mode 100644 index 0000000000..a3d05f97d6 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/telebruxelles.py @@ -0,0 +1,60 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor + + +class TeleBruxellesIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?telebruxelles\.be/(news|sport|dernier-jt)/?(?P[^/#?]+)' + _TESTS = [{ + 'url': 'http://www.telebruxelles.be/news/auditions-devant-parlement-francken-galant-tres-attendus/', + 'md5': '59439e568c9ee42fb77588b2096b214f', + 'info_dict': { + 'id': '11942', + 'display_id': 'auditions-devant-parlement-francken-galant-tres-attendus', + 'ext': 'flv', + 'title': 'Parlement : Francken et Galant rц╘pondent aux interpellations de lБ─≥opposition', + 'description': 're:Les auditions des ministres se poursuivent*' + }, + 'params': { + 'skip_download': 'requires rtmpdump' + }, + }, { + 'url': 'http://www.telebruxelles.be/sport/basket-brussels-bat-mons-80-74/', + 'md5': '181d3fbdcf20b909309e5aef5c6c6047', + 'info_dict': { + 'id': '10091', + 'display_id': 'basket-brussels-bat-mons-80-74', + 'ext': 'flv', + 'title': 'Basket : le Brussels bat Mons 80-74', + 'description': 're:^Ils l\u2019on fait ! En basket, le B*', + }, + 'params': { + 'skip_download': 'requires rtmpdump' + }, + }] + + def _real_extract(self, url): + display_id = self._match_id(url) + webpage = self._download_webpage(url, display_id) + + article_id = self._html_search_regex( + r"
    (.*?)', webpage, 'title') + description = self._og_search_description(webpage) + + rtmp_url = self._html_search_regex( + r"file: \"(rtmp://\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:\d{1,5}/vod/mp4:\" \+ \"\w+\" \+ \".mp4)\"", + webpage, 'RTMP url') + rtmp_url = rtmp_url.replace("\" + \"", "") + + return { + 'id': article_id, + 'display_id': display_id, + 'title': title, + 'description': description, + 'url': rtmp_url, + 'ext': 'flv', + 'rtmp_live': True # if rtmpdump is not called with "--live" argument, the download is blocked and can be completed + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/telecinco.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/telecinco.py new file mode 100644 index 0000000000..be3f72df7c --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/telecinco.py @@ -0,0 +1,19 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .mitele import MiTeleIE + + +class TelecincoIE(MiTeleIE): + IE_NAME = 'telecinco.es' + _VALID_URL = r'https?://www\.telecinco\.es/[^/]+/[^/]+/[^/]+/(?P.*?)\.html' + + _TEST = { + 'url': 'http://www.telecinco.es/robinfood/temporada-01/t01xp14/Bacalao-cocochas-pil-pil_0_1876350223.html', + 'info_dict': { + 'id': 'MDSVID20141015_0058', + 'ext': 'mp4', + 'title': 'Con Martц╜n Berasategui, hacer un bacalao al ...', + 'duration': 662, + }, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/telemb.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/telemb.py new file mode 100644 index 0000000000..1bbd0e7bdf --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/telemb.py @@ -0,0 +1,78 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import remove_start + + +class TeleMBIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?telemb\.be/(?P.+?)_d_(?P\d+)\.html' + _TESTS = [ + { + 'url': 'http://www.telemb.be/mons-cook-with-danielle-des-cours-de-cuisine-en-anglais-_d_13466.html', + 'md5': 'f45ea69878516ba039835794e0f8f783', + 'info_dict': { + 'id': '13466', + 'display_id': 'mons-cook-with-danielle-des-cours-de-cuisine-en-anglais-', + 'ext': 'mp4', + 'title': 'Mons - Cook with Danielle : des cours de cuisine en anglais ! - Les reportages', + 'description': 'md5:bc5225f47b17c309761c856ad4776265', + 'thumbnail': 're:^http://.*\.(?:jpg|png)$', + } + }, + { + # non-ASCII characters in download URL + 'url': 'http://telemb.be/les-reportages-havre-incendie-mortel_d_13514.html', + 'md5': '6e9682736e5ccd4eab7f21e855350733', + 'info_dict': { + 'id': '13514', + 'display_id': 'les-reportages-havre-incendie-mortel', + 'ext': 'mp4', + 'title': 'Havrц╘ - Incendie mortel - Les reportages', + 'description': 'md5:5e54cb449acb029c2b7734e2d946bd4a', + 'thumbnail': 're:^http://.*\.(?:jpg|png)$', + } + }, + ] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + display_id = mobj.group('display_id') + + webpage = self._download_webpage(url, display_id) + + formats = [] + for video_url in re.findall(r'file\s*:\s*"([^"]+)"', webpage): + fmt = { + 'url': video_url, + 'format_id': video_url.split(':')[0] + } + rtmp = re.search(r'^(?Prtmp://[^/]+/(?P.+))/(?Pmp4:.+)$', video_url) + if rtmp: + fmt.update({ + 'play_path': rtmp.group('playpath'), + 'app': rtmp.group('app'), + 'player_url': 'http://p.jwpcdn.com/6/10/jwplayer.flash.swf', + 'page_url': 'http://www.telemb.be', + 'preference': -1, + }) + formats.append(fmt) + self._sort_formats(formats) + + title = remove_start(self._og_search_title(webpage), 'Tц╘lц╘MB : ') + description = self._html_search_regex( + r'', + webpage, 'description', fatal=False) + thumbnail = self._og_search_thumbnail(webpage) + + return { + 'id': video_id, + 'display_id': display_id, + 'title': title, + 'description': description, + 'thumbnail': thumbnail, + 'formats': formats, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/tenplay.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/tenplay.py new file mode 100644 index 0000000000..466155ef80 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/tenplay.py @@ -0,0 +1,81 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor + + +class TenPlayIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?ten(play)?\.com\.au/.+' + _TEST = { + 'url': 'http://tenplay.com.au/ten-insider/extra/season-2013/tenplay-tv-your-way', + 'info_dict': { + 'id': '2695695426001', + 'ext': 'flv', + 'title': 'TENplay: TV your way', + 'description': 'Welcome to a new TV experience. Enjoy a taste of the TENplay benefits.', + 'timestamp': 1380150606.889, + 'upload_date': '20130925', + 'uploader': 'TENplay', + }, + 'params': { + 'skip_download': True, # Requires rtmpdump + } + } + + _video_fields = [ + "id", "name", "shortDescription", "longDescription", "creationDate", + "publishedDate", "lastModifiedDate", "customFields", "videoStillURL", + "thumbnailURL", "referenceId", "length", "playsTotal", + "playsTrailingWeek", "renditions", "captioning", "startDate", "endDate"] + + def _real_extract(self, url): + webpage = self._download_webpage(url, url) + video_id = self._html_search_regex( + r'videoID: "(\d+?)"', webpage, 'video_id') + api_token = self._html_search_regex( + r'apiToken: "([a-zA-Z0-9-_\.]+?)"', webpage, 'api_token') + title = self._html_search_regex( + r'', + webpage, 'title') + + json = self._download_json('https://api.brightcove.com/services/library?command=find_video_by_id&video_id=%s&token=%s&video_fields=%s' % (video_id, api_token, ','.join(self._video_fields)), title) + + formats = [] + for rendition in json['renditions']: + url = rendition['remoteUrl'] or rendition['url'] + protocol = 'rtmp' if url.startswith('rtmp') else 'http' + ext = 'flv' if protocol == 'rtmp' else rendition['videoContainer'].lower() + + if protocol == 'rtmp': + url = url.replace('&mp4:', '') + + formats.append({ + 'format_id': '_'.join(['rtmp', rendition['videoContainer'].lower(), rendition['videoCodec'].lower()]), + 'width': rendition['frameWidth'], + 'height': rendition['frameHeight'], + 'tbr': rendition['encodingRate'] / 1024, + 'filesize': rendition['size'], + 'protocol': protocol, + 'ext': ext, + 'vcodec': rendition['videoCodec'].lower(), + 'container': rendition['videoContainer'].lower(), + 'url': url, + }) + + return { + 'id': video_id, + 'display_id': json['referenceId'], + 'title': json['name'], + 'description': json['shortDescription'] or json['longDescription'], + 'formats': formats, + 'thumbnails': [{ + 'url': json['videoStillURL'] + }, { + 'url': json['thumbnailURL'] + }], + 'thumbnail': json['videoStillURL'], + 'duration': json['length'] / 1000, + 'timestamp': float(json['creationDate']) / 1000, + 'uploader': json['customFields']['production_company_distributor'] if 'production_company_distributor' in json['customFields'] else 'TENplay', + 'view_count': json['playsTotal'] + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/testurl.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/testurl.py new file mode 100644 index 0000000000..c7d559315b --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/testurl.py @@ -0,0 +1,68 @@ +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import ExtractorError + + +class TestURLIE(InfoExtractor): + """ Allows adressing of the test cases as test:yout.*be_1 """ + + IE_DESC = False # Do not list + _VALID_URL = r'test(?:url)?:(?P(?P.+?)(?:_(?P[0-9]+))?)$' + + def _real_extract(self, url): + from ..extractor import gen_extractors + + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + extractor_id = mobj.group('extractor') + all_extractors = gen_extractors() + + rex = re.compile(extractor_id, flags=re.IGNORECASE) + matching_extractors = [ + e for e in all_extractors if rex.search(e.IE_NAME)] + + if len(matching_extractors) == 0: + raise ExtractorError( + 'No extractors matching %r found' % extractor_id, + expected=True) + elif len(matching_extractors) > 1: + # Is it obvious which one to pick? + try: + extractor = next( + ie for ie in matching_extractors + if ie.IE_NAME.lower() == extractor_id.lower()) + except StopIteration: + raise ExtractorError( + ('Found multiple matching extractors: %s' % + ' '.join(ie.IE_NAME for ie in matching_extractors)), + expected=True) + else: + extractor = matching_extractors[0] + + num_str = mobj.group('num') + num = int(num_str) if num_str else 0 + + testcases = [] + t = getattr(extractor, '_TEST', None) + if t: + testcases.append(t) + testcases.extend(getattr(extractor, '_TESTS', [])) + + try: + tc = testcases[num] + except IndexError: + raise ExtractorError( + ('Test case %d not found, got only %d tests' % + (num, len(testcases))), + expected=True) + + self.to_screen('Test URL: %s' % tc['url']) + + return { + '_type': 'url', + 'url': tc['url'], + 'id': video_id, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/tf1.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/tf1.py new file mode 100644 index 0000000000..6e61cc9e2e --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/tf1.py @@ -0,0 +1,37 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor + + +class TF1IE(InfoExtractor): + """TF1 uses the wat.tv player.""" + _VALID_URL = r'http://videos\.tf1\.fr/.*-(?P.*?)\.html' + _TEST = { + 'url': 'http://videos.tf1.fr/auto-moto/citroen-grand-c4-picasso-2013-presentation-officielle-8062060.html', + 'info_dict': { + 'id': '10635995', + 'ext': 'mp4', + 'title': 'Citroц╚n Grand C4 Picasso 2013 : prц╘sentation officielle', + 'description': 'Vidц╘o officielle du nouveau Citroц╚n Grand C4 Picasso, lancц╘ ц═ l\'automne 2013.', + }, + 'params': { + # Sometimes wat serves the whole file with the --test option + 'skip_download': True, + }, + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + webpage = self._download_webpage(url, video_id) + embed_url = self._html_search_regex( + r'"(https://www.wat.tv/embedframe/.*?)"', webpage, 'embed url') + embed_page = self._download_webpage(embed_url, video_id, + 'Downloading embed player page') + wat_id = self._search_regex(r'UVID=(.*?)&', embed_page, 'wat id') + wat_info = self._download_json( + 'http://www.wat.tv/interface/contentv3/%s' % wat_id, video_id) + return self.url_result(wat_info['media']['url'], 'Wat') diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/theonion.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/theonion.py new file mode 100644 index 0000000000..b65d8e03f7 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/theonion.py @@ -0,0 +1,70 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import ExtractorError + + +class TheOnionIE(InfoExtractor): + _VALID_URL = r'(?x)https?://(?:www\.)?theonion\.com/video/[^,]+,(?P[0-9]+)/?' + _TEST = { + 'url': 'http://www.theonion.com/video/man-wearing-mm-jacket-gods-image,36918/', + 'md5': '19eaa9a39cf9b9804d982e654dc791ee', + 'info_dict': { + 'id': '2133', + 'ext': 'mp4', + 'title': 'Man Wearing M&M Jacket Apparently Made In God\'s Image', + 'description': 'md5:cc12448686b5600baae9261d3e180910', + 'thumbnail': 're:^https?://.*\.jpg\?\d+$', + } + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + article_id = mobj.group('article_id') + + webpage = self._download_webpage(url, article_id) + + video_id = self._search_regex( + r'"videoId":\s(\d+),', webpage, 'video ID') + title = self._og_search_title(webpage) + description = self._og_search_description(webpage) + thumbnail = self._og_search_thumbnail(webpage) + + sources = re.findall(r'(?:[^/\?]+/(?:swf|config)|onsite)/select/)? + |theplatform:)(?P[^/\?&]+)''' + + _TEST = { + # from http://www.metacafe.com/watch/cb-e9I_cZgTgIPd/blackberrys_big_bold_z30/ + 'url': 'http://link.theplatform.com/s/dJ5BDC/e9I_cZgTgIPd/meta.smil?format=smil&Tracking=true&mbr=true', + 'info_dict': { + 'id': 'e9I_cZgTgIPd', + 'ext': 'flv', + 'title': 'Blackberry\'s big, bold Z30', + 'description': 'The Z30 is Blackberry\'s biggest, baddest mobile messaging device yet.', + 'duration': 247, + }, + 'params': { + # rtmp download + 'skip_download': True, + }, + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + if mobj.group('config'): + config_url = url + '&form=json' + config_url = config_url.replace('swf/', 'config/') + config_url = config_url.replace('onsite/', 'onsite/config/') + config = self._download_json(config_url, video_id, 'Downloading config') + smil_url = config['releaseUrl'] + '&format=SMIL&formats=MPEG4&manifest=f4m' + else: + smil_url = ('http://link.theplatform.com/s/dJ5BDC/{0}/meta.smil?' + 'format=smil&mbr=true'.format(video_id)) + + meta = self._download_xml(smil_url, video_id) + try: + error_msg = next( + n.attrib['abstract'] + for n in meta.findall(_x('.//smil:ref')) + if n.attrib.get('title') == 'Geographic Restriction') + except StopIteration: + pass + else: + raise ExtractorError(error_msg, expected=True) + + info_url = 'http://link.theplatform.com/s/dJ5BDC/{0}?format=preview'.format(video_id) + info_json = self._download_webpage(info_url, video_id) + info = json.loads(info_json) + + subtitles = {} + captions = info.get('captions') + if isinstance(captions, list): + for caption in captions: + lang, src = caption.get('lang'), caption.get('src') + if lang and src: + subtitles[lang] = src + + if self._downloader.params.get('listsubtitles', False): + self._list_available_subtitles(video_id, subtitles) + return + + subtitles = self.extract_subtitles(video_id, subtitles) + + head = meta.find(_x('smil:head')) + body = meta.find(_x('smil:body')) + + f4m_node = body.find(_x('smil:seq//smil:video')) + if f4m_node is not None and '.f4m' in f4m_node.attrib['src']: + f4m_url = f4m_node.attrib['src'] + if 'manifest.f4m?' not in f4m_url: + f4m_url += '?' + # the parameters are from syfy.com, other sites may use others, + # they also work for nbc.com + f4m_url += '&g=UXWGVKRWHFSP&hdcore=3.0.3' + formats = self._extract_f4m_formats(f4m_url, video_id) + else: + formats = [] + switch = body.find(_x('smil:switch')) + if switch is not None: + base_url = head.find(_x('smil:meta')).attrib['base'] + for f in switch.findall(_x('smil:video')): + attr = f.attrib + width = int(attr['width']) + height = int(attr['height']) + vbr = int(attr['system-bitrate']) // 1000 + format_id = '%dx%d_%dk' % (width, height, vbr) + formats.append({ + 'format_id': format_id, + 'url': base_url, + 'play_path': 'mp4:' + attr['src'], + 'ext': 'flv', + 'width': width, + 'height': height, + 'vbr': vbr, + }) + else: + switch = body.find(_x('smil:seq//smil:switch')) + for f in switch.findall(_x('smil:video')): + attr = f.attrib + vbr = int(attr['system-bitrate']) // 1000 + ext = determine_ext(attr['src']) + if ext == 'once': + ext = 'mp4' + formats.append({ + 'format_id': compat_str(vbr), + 'url': attr['src'], + 'vbr': vbr, + 'ext': ext, + }) + self._sort_formats(formats) + + return { + 'id': video_id, + 'title': info['title'], + 'subtitles': subtitles, + 'formats': formats, + 'description': info['description'], + 'thumbnail': info['defaultThumbnailUrl'], + 'duration': info['duration'] // 1000, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/thesixtyone.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/thesixtyone.py new file mode 100644 index 0000000000..a77c6a2fc9 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/thesixtyone.py @@ -0,0 +1,100 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import json +import re + +from .common import InfoExtractor +from ..utils import unified_strdate + + +class TheSixtyOneIE(InfoExtractor): + _VALID_URL = r'''(?x)https?://(?:www\.)?thesixtyone\.com/ + (?:.*?/)* + (?: + s| + song/comments/list| + song + )/(?P[A-Za-z0-9]+)/?$''' + _SONG_URL_TEMPLATE = 'http://thesixtyone.com/s/{0:}' + _SONG_FILE_URL_TEMPLATE = 'http://{audio_server:}.thesixtyone.com/thesixtyone_production/audio/{0:}_stream' + _THUMBNAIL_URL_TEMPLATE = '{photo_base_url:}_desktop' + _TESTS = [ + { + 'url': 'http://www.thesixtyone.com/s/SrE3zD7s1jt/', + 'md5': '821cc43b0530d3222e3e2b70bb4622ea', + 'info_dict': { + 'id': 'SrE3zD7s1jt', + 'ext': 'mp3', + 'title': 'CASIO - Unicorn War Mixtape', + 'thumbnail': 're:^https?://.*_desktop$', + 'upload_date': '20071217', + 'duration': 3208, + } + }, + { + 'url': 'http://www.thesixtyone.com/song/comments/list/SrE3zD7s1jt', + 'only_matching': True, + }, + { + 'url': 'http://www.thesixtyone.com/s/ULoiyjuJWli#/s/SrE3zD7s1jt/', + 'only_matching': True, + }, + { + 'url': 'http://www.thesixtyone.com/#/s/SrE3zD7s1jt/', + 'only_matching': True, + }, + { + 'url': 'http://www.thesixtyone.com/song/SrE3zD7s1jt/', + 'only_matching': True, + }, + ] + + _DECODE_MAP = { + "x": "a", + "m": "b", + "w": "c", + "q": "d", + "n": "e", + "p": "f", + "a": "0", + "h": "1", + "e": "2", + "u": "3", + "s": "4", + "i": "5", + "o": "6", + "y": "7", + "r": "8", + "c": "9" + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + song_id = mobj.group('id') + + webpage = self._download_webpage( + self._SONG_URL_TEMPLATE.format(song_id), song_id) + + song_data = json.loads(self._search_regex( + r'"%s":\s(\{.*?\})' % song_id, webpage, 'song_data')) + keys = [self._DECODE_MAP.get(s, s) for s in song_data['key']] + url = self._SONG_FILE_URL_TEMPLATE.format( + "".join(reversed(keys)), **song_data) + + formats = [{ + 'format_id': 'sd', + 'url': url, + 'ext': 'mp3', + }] + + return { + 'id': song_id, + 'title': '{artist:} - {name:}'.format(**song_data), + 'formats': formats, + 'comment_count': song_data.get('comments_count'), + 'duration': song_data.get('play_time'), + 'like_count': song_data.get('score'), + 'thumbnail': self._THUMBNAIL_URL_TEMPLATE.format(**song_data), + 'upload_date': unified_strdate(song_data.get('publish_date')), + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/thisav.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/thisav.py new file mode 100644 index 0000000000..7f323c9387 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/thisav.py @@ -0,0 +1,47 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import determine_ext + + +class ThisAVIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?thisav\.com/video/(?P[0-9]+)/.*' + _TEST = { + 'url': 'http://www.thisav.com/video/47734/%98%26sup1%3B%83%9E%83%82---just-fit.html', + 'md5': '0480f1ef3932d901f0e0e719f188f19b', + 'info_dict': { + 'id': '47734', + 'ext': 'flv', + 'title': 'И╚≤Ф╗╧Ц┐·Ц┐╙Ц┌╒ - Just fit', + 'uploader': 'dj7970', + 'uploader_id': 'dj7970' + } + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + + video_id = mobj.group('id') + webpage = self._download_webpage(url, video_id) + title = self._html_search_regex(r'

    ([^<]*)

    ', webpage, 'title') + video_url = self._html_search_regex( + r"addVariable\('file','([^']+)'\);", webpage, 'video url') + uploader = self._html_search_regex( + r':
    ([^<]+)', + webpage, 'uploader name', fatal=False) + uploader_id = self._html_search_regex( + r': (?:[^<]+)', + webpage, 'uploader id', fatal=False) + ext = determine_ext(video_url) + + return { + 'id': video_id, + 'url': video_url, + 'uploader': uploader, + 'uploader_id': uploader_id, + 'title': title, + 'ext': ext, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/thvideo.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/thvideo.py new file mode 100644 index 0000000000..496f15d80b --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/thvideo.py @@ -0,0 +1,84 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import ( + unified_strdate +) + + +class THVideoIE(InfoExtractor): + _VALID_URL = r'http://(?:www\.)?thvideo\.tv/(?:v/th|mobile\.php\?cid=)(?P[0-9]+)' + _TEST = { + 'url': 'http://thvideo.tv/v/th1987/', + 'md5': 'fa107b1f73817e325e9433505a70db50', + 'info_dict': { + 'id': '1987', + 'ext': 'mp4', + 'title': 'Ц─░Е┼╗Г■╩Ц─▒Г╖≤Е╟│Ф╢╩Е┼╗Х╝╟Е╫∙ О╫· The Sealed Esoteric History.Е┬├И∙°Г╗©И╒└Х╖┬', + 'display_id': 'th1987', + 'thumbnail': 'http://thvideo.tv/uploadfile/2014/0722/20140722013459856.jpg', + 'description': 'Г╓╬Е⌡╒Д╨╛И┐╫Е╧╩Ф┐ЁЕ┴╖Е⌡╒Г └Г╛╛Д╦─Д╦╙Д╦°Ф√╧Д╨▄Ф╛║Е░▄Д╨╨Е┼╗Г■╩Д╫°Е⌠│Ц─▄Г╖≤Е╟│Ф╢╩Е┼╗Х╝╟Е╫∙ О╫· The Sealed Esoteric History.Ц─█ Ф°╛Х╖├И╒▒Ф≤╞Х╞╔Е┼╗Г■╩Г╛╛Д╦─Ф°÷Г └Е┬├И∙°Х█┴Г╗©...', + 'upload_date': '20140722' + } + } + + def _real_extract(self, url): + video_id = self._match_id(url) + + # extract download link from mobile player page + webpage_player = self._download_webpage( + 'http://thvideo.tv/mobile.php?cid=%s-0' % (video_id), + video_id, note='Downloading video source page') + video_url = self._html_search_regex( + r'', webpage, + 'upload date', fatal=False)) + + return { + 'id': video_id, + 'ext': 'mp4', + 'url': video_url, + 'title': title, + 'display_id': display_id, + 'thumbnail': thumbnail, + 'description': description, + 'upload_date': upload_date + } + + +class THVideoPlaylistIE(InfoExtractor): + _VALID_URL = r'http?://(?:www\.)?thvideo\.tv/mylist(?P[0-9]+)' + _TEST = { + 'url': 'http://thvideo.tv/mylist2', + 'info_dict': { + 'id': '2', + 'title': 'Е╧╩Ф┐ЁД╦┤Х▐╞И▐║', + }, + 'playlist_mincount': 23, + } + + def _real_extract(self, url): + playlist_id = self._match_id(url) + + webpage = self._download_webpage(url, playlist_id) + list_title = self._html_search_regex( + r'

    (.*?)[^&]+)&s=\d+' + + _TEST = { + 'url': 'http://tinypic.com/player.php?v=6xw7tc%3E&s=5#.UtqZmbRFCM8', + 'md5': '609b74432465364e72727ebc6203f044', + 'info_dict': { + 'id': '6xw7tc', + 'ext': 'flv', + 'title': 'shadow phenomenon weird', + } + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + + webpage = self._download_webpage(url, video_id, 'Downloading page') + + mobj = re.search(r'(?m)fo\.addVariable\("file",\s"(?P[\da-z]+)"\);\n' + '\s+fo\.addVariable\("s",\s"(?P\d+)"\);', webpage) + if mobj is None: + raise ExtractorError('Video %s does not exist' % video_id, expected=True) + + file_id = mobj.group('fileid') + server_id = mobj.group('serverid') + + KEYWORDS_SUFFIX = ', Video, images, photos, videos, myspace, ebay, video hosting, photo hosting' + keywords = self._html_search_meta('keywords', webpage, 'title') + title = keywords[:-len(KEYWORDS_SUFFIX)] if keywords.endswith(KEYWORDS_SUFFIX) else '' + + video_url = 'http://v%s.tinypic.com/%s.flv' % (server_id, file_id) + thumbnail = 'http://v%s.tinypic.com/%s_th.jpg' % (server_id, file_id) + + return { + 'id': file_id, + 'url': video_url, + 'thumbnail': thumbnail, + 'title': title + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/tlc.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/tlc.py new file mode 100644 index 0000000000..9f9e388c50 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/tlc.py @@ -0,0 +1,66 @@ +# encoding: utf-8 +from __future__ import unicode_literals +import re + +from .common import InfoExtractor +from .brightcove import BrightcoveIE +from .discovery import DiscoveryIE +from ..compat import compat_urlparse + + +class TlcIE(DiscoveryIE): + IE_NAME = 'tlc.com' + _VALID_URL = r'http://www\.tlc\.com\/[a-zA-Z0-9\-]*/[a-zA-Z0-9\-]*/videos/(?P[a-zA-Z0-9\-]*)(.htm)?' + + _TEST = { + 'url': 'http://www.tlc.com/tv-shows/cake-boss/videos/too-big-to-fly.htm', + 'md5': 'c4038f4a9b44d0b5d74caaa64ed2a01a', + 'info_dict': { + 'id': '853232', + 'ext': 'mp4', + 'title': 'Cake Boss: Too Big to Fly', + 'description': 'Buddy has taken on a high flying task.', + 'duration': 119, + }, + } + + +class TlcDeIE(InfoExtractor): + IE_NAME = 'tlc.de' + _VALID_URL = r'http://www\.tlc\.de/sendungen/[^/]+/videos/(?P[^/?]+)' + + _TEST = { + 'url': 'http://www.tlc.de/sendungen/breaking-amish/videos/#3235167922001', + 'info_dict': { + 'id': '3235167922001', + 'ext': 'mp4', + 'title': 'Breaking Amish: Die Welt da drauц÷en', + 'uploader': 'Discovery Networks - Germany', + 'description': ( + 'Vier Amische und eine Mennonitin wagen in New York' + ' den Sprung in ein komplett anderes Leben. Begleitet sie auf' + ' ihrem spannenden Weg.'), + }, + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + title = mobj.group('title') + webpage = self._download_webpage(url, title) + iframe_url = self._search_regex( + '<iframe src="(http://www\.tlc\.de/wp-content/.+?)"', webpage, + 'iframe url') + # Otherwise we don't get the correct 'BrightcoveExperience' element, + # example: http://www.tlc.de/sendungen/cake-boss/videos/cake-boss-cannoli-drama/ + iframe_url = iframe_url.replace('.htm?', '.php?') + url_fragment = compat_urlparse.urlparse(url).fragment + if url_fragment: + # Since the fragment is not send to the server, we always get the same iframe + iframe_url = re.sub(r'playlist=(\d+)', 'playlist=%s' % url_fragment, iframe_url) + iframe = self._download_webpage(iframe_url, title) + + return { + '_type': 'url', + 'url': BrightcoveIE._extract_brightcove_url(iframe), + 'ie': BrightcoveIE.ie_key(), + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/tmz.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/tmz.py new file mode 100644 index 0000000000..c5c6fdc51b --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/tmz.py @@ -0,0 +1,32 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor + + +class TMZIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?tmz\.com/videos/(?P<id>[^/]+)/?' + _TEST = { + 'url': 'http://www.tmz.com/videos/0_okj015ty/', + 'md5': '791204e3bf790b1426cb2db0706184c0', + 'info_dict': { + 'id': '0_okj015ty', + 'url': 'http://tmz.vo.llnwd.net/o28/2014-03/13/0_okj015ty_0_rt8ro3si_2.mp4', + 'ext': 'mp4', + 'title': 'Kim Kardashian\'s Boobs Unlock a Mystery!', + 'description': 'Did Kim Kardasain try to one-up Khloe by one-upping Kylie??? Or is she just showing off her amazing boobs?', + 'thumbnail': r're:http://cdnbakmi\.kaltura\.com/.*thumbnail.*', + } + } + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + + return { + 'id': video_id, + 'url': self._html_search_meta('VideoURL', webpage, fatal=True), + 'title': self._og_search_title(webpage), + 'description': self._og_search_description(webpage), + 'thumbnail': self._html_search_meta('ThumbURL', webpage), + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/tnaflix.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/tnaflix.py new file mode 100644 index 0000000000..0ecd695f85 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/tnaflix.py @@ -0,0 +1,84 @@ +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import ( + parse_duration, + fix_xml_ampersands, +) + + +class TNAFlixIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?tnaflix\.com/(?P<cat_id>[\w-]+)/(?P<display_id>[\w-]+)/video(?P<id>\d+)' + + _TITLE_REGEX = None + _DESCRIPTION_REGEX = r'<h3 itemprop="description">([^<]+)</h3>' + _CONFIG_REGEX = r'flashvars\.config\s*=\s*escape\("([^"]+)"' + + _TEST = { + 'url': 'http://www.tnaflix.com/porn-stars/Carmella-Decesare-striptease/video553878', + 'md5': 'ecf3498417d09216374fc5907f9c6ec0', + 'info_dict': { + 'id': '553878', + 'display_id': 'Carmella-Decesare-striptease', + 'ext': 'mp4', + 'title': 'Carmella Decesare - striptease', + 'description': '', + 'thumbnail': 're:https?://.*\.jpg$', + 'duration': 91, + 'age_limit': 18, + } + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + display_id = mobj.group('display_id') + + webpage = self._download_webpage(url, display_id) + + title = self._html_search_regex( + self._TITLE_REGEX, webpage, 'title') if self._TITLE_REGEX else self._og_search_title(webpage) + description = self._html_search_regex( + self._DESCRIPTION_REGEX, webpage, 'description', fatal=False, default='') + + age_limit = self._rta_search(webpage) + + duration = self._html_search_meta('duration', webpage, 'duration', default=None) + if duration: + duration = parse_duration(duration[1:]) + + cfg_url = self._html_search_regex( + self._CONFIG_REGEX, webpage, 'flashvars.config') + + cfg_xml = self._download_xml( + cfg_url, display_id, note='Downloading metadata', + transform_source=fix_xml_ampersands) + + thumbnail = cfg_xml.find('./startThumb').text + + formats = [] + for item in cfg_xml.findall('./quality/item'): + video_url = re.sub('speed=\d+', 'speed=', item.find('videoLink').text) + format_id = item.find('res').text + fmt = { + 'url': video_url, + 'format_id': format_id, + } + m = re.search(r'^(\d+)', format_id) + if m: + fmt['height'] = int(m.group(1)) + formats.append(fmt) + self._sort_formats(formats) + + return { + 'id': video_id, + 'display_id': display_id, + 'title': title, + 'description': description, + 'thumbnail': thumbnail, + 'duration': duration, + 'age_limit': age_limit, + 'formats': formats, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/toutv.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/toutv.py new file mode 100644 index 0000000000..2837f9c8e5 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/toutv.py @@ -0,0 +1,73 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import ( + ExtractorError, + unified_strdate, +) + + +class TouTvIE(InfoExtractor): + IE_NAME = 'tou.tv' + _VALID_URL = r'https?://www\.tou\.tv/(?P<id>[a-zA-Z0-9_-]+(?:/(?P<episode>S[0-9]+E[0-9]+)))' + + _TEST = { + 'url': 'http://www.tou.tv/30-vies/S04E41', + 'file': '30-vies_S04E41.mp4', + 'info_dict': { + 'title': '30 vies Saison 4 / ц┴pisode 41', + 'description': 'md5:da363002db82ccbe4dafeb9cab039b09', + 'age_limit': 8, + 'uploader': 'Groupe des Nouveaux Mц╘dias', + 'duration': 1296, + 'upload_date': '20131118', + 'thumbnail': 'http://static.tou.tv/medias/images/2013-11-18_19_00_00_30VIES_0341_01_L.jpeg', + }, + 'params': { + 'skip_download': True, # Requires rtmpdump + }, + 'skip': 'Only available in Canada' + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + webpage = self._download_webpage(url, video_id) + + mediaId = self._search_regex( + r'"idMedia":\s*"([^"]+)"', webpage, 'media ID') + + streams_url = 'http://release.theplatform.com/content.select?pid=' + mediaId + streams_doc = self._download_xml( + streams_url, video_id, note='Downloading stream list') + + video_url = next(n.text + for n in streams_doc.findall('.//choice/url') + if '//ad.doubleclick' not in n.text) + if video_url.endswith('/Unavailable.flv'): + raise ExtractorError( + 'Access to this video is blocked from outside of Canada', + expected=True) + + duration_str = self._html_search_meta( + 'video:duration', webpage, 'duration') + duration = int(duration_str) if duration_str else None + upload_date_str = self._html_search_meta( + 'video:release_date', webpage, 'upload date') + upload_date = unified_strdate(upload_date_str) if upload_date_str else None + + return { + 'id': video_id, + 'title': self._og_search_title(webpage), + 'url': video_url, + 'description': self._og_search_description(webpage), + 'uploader': self._dc_search_uploader(webpage), + 'thumbnail': self._og_search_thumbnail(webpage), + 'age_limit': self._media_rating_search(webpage), + 'duration': duration, + 'upload_date': upload_date, + 'ext': 'mp4', + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/toypics.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/toypics.py new file mode 100644 index 0000000000..2756f56d3a --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/toypics.py @@ -0,0 +1,85 @@ +# -*- coding:utf-8 -*- +from __future__ import unicode_literals + +from .common import InfoExtractor +import re + + +class ToypicsIE(InfoExtractor): + IE_DESC = 'Toypics user profile' + _VALID_URL = r'https?://videos\.toypics\.net/view/(?P<id>[0-9]+)/.*' + _TEST = { + 'url': 'http://videos.toypics.net/view/514/chancebulged,-2-1/', + 'md5': '16e806ad6d6f58079d210fe30985e08b', + 'info_dict': { + 'id': '514', + 'ext': 'mp4', + 'title': 'Chance-Bulge\'d, 2', + 'age_limit': 18, + 'uploader': 'kidsune', + } + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + page = self._download_webpage(url, video_id) + video_url = self._html_search_regex( + r'src:\s+"(http://static[0-9]+\.toypics\.net/flvideo/[^"]+)"', page, 'video URL') + title = self._html_search_regex( + r'<title>Toypics - ([^<]+)', page, 'title') + username = self._html_search_regex( + r'toypics.net/([^/"]+)" class="user-name">', page, 'username') + return { + 'id': video_id, + 'url': video_url, + 'title': title, + 'uploader': username, + 'age_limit': 18, + } + + +class ToypicsUserIE(InfoExtractor): + IE_DESC = 'Toypics user profile' + _VALID_URL = r'http://videos\.toypics\.net/(?P[^/?]+)(?:$|[?#])' + _TEST = { + 'url': 'http://videos.toypics.net/Mikey', + 'info_dict': { + 'id': 'Mikey', + }, + 'playlist_mincount': 19, + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + username = mobj.group('username') + + profile_page = self._download_webpage( + url, username, note='Retrieving profile page') + + video_count = int(self._search_regex( + r'public/">Public Videos \(([0-9]+)\)', profile_page, + 'video count')) + + PAGE_SIZE = 8 + urls = [] + page_count = (video_count + PAGE_SIZE + 1) // PAGE_SIZE + for n in range(1, page_count + 1): + lpage_url = url + '/public/%d' % n + lpage = self._download_webpage( + lpage_url, username, + note='Downloading page %d/%d' % (n, page_count)) + urls.extend( + re.findall( + r'

    \s+', + lpage)) + + return { + '_type': 'playlist', + 'id': username, + 'entries': [{ + '_type': 'url', + 'url': eurl, + 'ie_key': 'Toypics', + } for eurl in urls] + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/traileraddict.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/traileraddict.py new file mode 100644 index 0000000000..1c53a3fd09 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/traileraddict.py @@ -0,0 +1,64 @@ +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor + + +class TrailerAddictIE(InfoExtractor): + _WORKING = False + _VALID_URL = r'(?:http://)?(?:www\.)?traileraddict\.com/(?:trailer|clip)/(?P.+?)/(?P.+)' + _TEST = { + 'url': 'http://www.traileraddict.com/trailer/prince-avalanche/trailer', + 'md5': '41365557f3c8c397d091da510e73ceb4', + 'info_dict': { + 'id': '76184', + 'ext': 'mp4', + 'title': 'Prince Avalanche Trailer', + 'description': 'Trailer for Prince Avalanche.\n\nTwo highway road workers spend the summer of 1988 away from their city lives. The isolated landscape becomes a place of misadventure as the men find themselves at odds with each other and the women they left behind.', + } + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + name = mobj.group('movie') + '/' + mobj.group('trailer_name') + webpage = self._download_webpage(url, name) + + title = self._search_regex(r'(.+?)', + webpage, 'video title').replace(' - Trailer Addict', '') + view_count_str = self._search_regex( + r'([0-9,.]+)', + webpage, 'view count', fatal=False) + view_count = ( + None if view_count_str is None + else int(view_count_str.replace(',', ''))) + video_id = self._search_regex( + r'', + webpage, 'video id') + + # Presence of (no)watchplus function indicates HD quality is available + if re.search(r'function (no)?watchplus()', webpage): + fvar = "fvarhd" + else: + fvar = "fvar" + + info_url = "http://www.traileraddict.com/%s.php?tid=%s" % (fvar, str(video_id)) + info_webpage = self._download_webpage(info_url, video_id, "Downloading the info webpage") + + final_url = self._search_regex(r'&fileurl=(.+)', + info_webpage, 'Download url').replace('%3F', '?') + thumbnail_url = self._search_regex(r'&image=(.+?)&', + info_webpage, 'thumbnail url') + + description = self._html_search_regex( + r'(?s)

    .*?
    ]*>(.*?)
    ', + webpage, 'description', fatal=False) + + return { + 'id': video_id, + 'url': final_url, + 'title': title, + 'thumbnail': thumbnail_url, + 'description': description, + 'view_count': view_count, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/trilulilu.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/trilulilu.py new file mode 100644 index 0000000000..220a05b7b4 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/trilulilu.py @@ -0,0 +1,65 @@ +from __future__ import unicode_literals + +import json + +from .common import InfoExtractor + + +class TriluliluIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?trilulilu\.ro/video-[^/]+/(?P[^/]+)' + _TEST = { + 'url': 'http://www.trilulilu.ro/video-animatie/big-buck-bunny-1', + 'info_dict': { + 'id': 'big-buck-bunny-1', + 'ext': 'mp4', + 'title': 'Big Buck Bunny', + 'description': ':) pentru copilul din noi', + }, + # Server ignores Range headers (--test) + 'params': { + 'skip_download': True + } + } + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + + title = self._og_search_title(webpage) + thumbnail = self._og_search_thumbnail(webpage) + description = self._og_search_description(webpage) + + log_str = self._search_regex( + r'block_flash_vars[ ]=[ ]({[^}]+})', webpage, 'log info') + log = json.loads(log_str) + + format_url = ('http://fs%(server)s.trilulilu.ro/%(hash)s/' + 'video-formats2' % log) + format_doc = self._download_xml( + format_url, video_id, + note='Downloading formats', + errnote='Error while downloading formats') + + video_url_template = ( + 'http://fs%(server)s.trilulilu.ro/stream.php?type=video' + '&source=site&hash=%(hash)s&username=%(userid)s&' + 'key=ministhebest&format=%%s&sig=&exp=' % + log) + formats = [ + { + 'format': fnode.text, + 'url': video_url_template % fnode.text, + 'ext': fnode.text.partition('-')[0] + } + + for fnode in format_doc.findall('./formats/format') + ] + + return { + '_type': 'video', + 'id': video_id, + 'formats': formats, + 'title': title, + 'description': description, + 'thumbnail': thumbnail, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/trutube.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/trutube.py new file mode 100644 index 0000000000..e7b79243a8 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/trutube.py @@ -0,0 +1,40 @@ +from __future__ import unicode_literals + +from .common import InfoExtractor +from ..utils import xpath_text + + +class TruTubeIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?trutube\.tv/(?:video/|nuevo/player/embed\.php\?v=)(?P[0-9]+)' + _TESTS = [{ + 'url': 'http://trutube.tv/video/14880/Ramses-II-Proven-To-Be-A-Red-Headed-Caucasoid-', + 'md5': 'c5b6e301b0a2040b074746cbeaa26ca1', + 'info_dict': { + 'id': '14880', + 'ext': 'flv', + 'title': 'Ramses II - Proven To Be A Red Headed Caucasoid', + 'thumbnail': 're:^http:.*\.jpg$', + } + }, { + 'url': 'https://trutube.tv/nuevo/player/embed.php?v=14880', + 'only_matching': True, + }] + + def _real_extract(self, url): + video_id = self._match_id(url) + + config = self._download_xml( + 'https://trutube.tv/nuevo/player/config.php?v=%s' % video_id, + video_id, transform_source=lambda s: s.strip()) + + # filehd is always 404 + video_url = xpath_text(config, './file', 'video URL', fatal=True) + title = xpath_text(config, './title', 'title').strip() + thumbnail = xpath_text(config, './image', ' thumbnail') + + return { + 'id': video_id, + 'url': video_url, + 'title': title, + 'thumbnail': thumbnail, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/tube8.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/tube8.py new file mode 100644 index 0000000000..d73ad3762a --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/tube8.py @@ -0,0 +1,95 @@ +from __future__ import unicode_literals + +import json +import re + +from .common import InfoExtractor +from ..compat import ( + compat_urllib_parse_urlparse, + compat_urllib_request, +) +from ..utils import ( + int_or_none, + str_to_int, +) +from ..aes import aes_decrypt_text + + +class Tube8IE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?tube8\.com/(?:[^/]+/)+(?P[^/]+)/(?P\d+)' + _TESTS = [ + { + 'url': 'http://www.tube8.com/teen/kasia-music-video/229795/', + 'md5': '44bf12b98313827dd52d35b8706a4ea0', + 'info_dict': { + 'id': '229795', + 'display_id': 'kasia-music-video', + 'ext': 'mp4', + 'description': 'hot teen Kasia grinding', + 'uploader': 'unknown', + 'title': 'Kasia music video', + 'age_limit': 18, + } + }, + { + 'url': 'http://www.tube8.com/shemale/teen/blonde-cd-gets-kidnapped-by-two-blacks-and-punished-for-being-a-slutty-girl/19569151/', + 'only_matching': True, + }, + ] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + display_id = mobj.group('display_id') + + req = compat_urllib_request.Request(url) + req.add_header('Cookie', 'age_verified=1') + webpage = self._download_webpage(req, display_id) + + flashvars = json.loads(self._html_search_regex( + r'var flashvars\s*=\s*({.+?})', webpage, 'flashvars')) + + video_url = flashvars['video_url'] + if flashvars.get('encrypted') is True: + video_url = aes_decrypt_text(video_url, flashvars['video_title'], 32).decode('utf-8') + path = compat_urllib_parse_urlparse(video_url).path + format_id = '-'.join(path.split('/')[4].split('_')[:2]) + + thumbnail = flashvars.get('image_url') + + title = self._html_search_regex( + r'videotitle\s*=\s*"([^"]+)', webpage, 'title') + description = self._html_search_regex( + r'>Description:(.+?)<', webpage, 'description', fatal=False) + uploader = self._html_search_regex( + r'(?:
    )?([^<]+)(?:)?', + webpage, 'uploader', fatal=False) + + like_count = int_or_none(self._html_search_regex( + r"rupVar\s*=\s*'(\d+)'", webpage, 'like count', fatal=False)) + dislike_count = int_or_none(self._html_search_regex( + r"rdownVar\s*=\s*'(\d+)'", webpage, 'dislike count', fatal=False)) + view_count = self._html_search_regex( + r'Views: ([\d,\.]+)', webpage, 'view count', fatal=False) + if view_count: + view_count = str_to_int(view_count) + comment_count = self._html_search_regex( + r'(\d+)', webpage, 'comment count', fatal=False) + if comment_count: + comment_count = str_to_int(comment_count) + + return { + 'id': video_id, + 'display_id': display_id, + 'url': video_url, + 'title': title, + 'description': description, + 'thumbnail': thumbnail, + 'uploader': uploader, + 'format_id': format_id, + 'view_count': view_count, + 'like_count': like_count, + 'dislike_count': dislike_count, + 'comment_count': comment_count, + 'age_limit': 18, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/tudou.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/tudou.py new file mode 100644 index 0000000000..161e47624b --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/tudou.py @@ -0,0 +1,90 @@ +# coding: utf-8 + +from __future__ import unicode_literals + +import re +import json + +from .common import InfoExtractor + + +class TudouIE(InfoExtractor): + _VALID_URL = r'(?:http://)?(?:www\.)?tudou\.com/(?:listplay|programs|albumplay)/(?:view|(.+?))/(?:([^/]+)|([^/]+))(?:\.html)?' + _TESTS = [{ + 'url': 'http://www.tudou.com/listplay/zzdE77v6Mmo/2xN2duXMxmw.html', + 'md5': '140a49ed444bd22f93330985d8475fcb', + 'info_dict': { + 'id': '159448201', + 'ext': 'f4v', + 'title': 'Е█║И╘╛Д╧■Е⌡╫Х╤ЁЕ╪─Е╓╖Х└ И∙©Д╪═Е├╡Е░┼И⌡├И■╕', + 'thumbnail': 're:^https?://.*\.jpg$', + } + }, { + 'url': 'http://www.tudou.com/programs/view/ajX3gyhL0pc/', + 'info_dict': { + 'id': '117049447', + 'ext': 'f4v', + 'title': 'La Sylphide-Bolshoi-Ekaterina Krysanova & Vyacheslav Lopatin 2012', + 'thumbnail': 're:^https?://.*\.jpg$', + } + }, { + 'url': 'http://www.tudou.com/albumplay/TenTw_JgiPM/PzsAs5usU9A.html', + 'info_dict': { + 'title': 'todo.mp4', + }, + 'add_ie': ['Youku'], + 'skip': 'Only works from China' + }] + + def _url_for_id(self, id, quality=None): + info_url = "http://v2.tudou.com/f?id=" + str(id) + if quality: + info_url += '&hd' + quality + webpage = self._download_webpage(info_url, id, "Opening the info webpage") + final_url = self._html_search_regex('>(.+?)', webpage, 'video url') + return final_url + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group(2) + webpage = self._download_webpage(url, video_id) + + m = re.search(r'vcode:\s*[\'"](.+?)[\'"]', webpage) + if m and m.group(1): + return { + '_type': 'url', + 'url': 'youku:' + m.group(1), + 'ie_key': 'Youku' + } + + title = self._search_regex( + r",kw:\s*['\"](.+?)[\"']", webpage, 'title') + thumbnail_url = self._search_regex( + r",pic:\s*[\"'](.+?)[\"']", webpage, 'thumbnail URL', fatal=False) + + segs_json = self._search_regex(r'segs: \'(.*)\'', webpage, 'segments') + segments = json.loads(segs_json) + # It looks like the keys are the arguments that have to be passed as + # the hd field in the request url, we pick the higher + # Also, filter non-number qualities (see issue #3643). + quality = sorted(filter(lambda k: k.isdigit(), segments.keys()), + key=lambda k: int(k))[-1] + parts = segments[quality] + result = [] + len_parts = len(parts) + if len_parts > 1: + self.to_screen('%s: found %s parts' % (video_id, len_parts)) + for part in parts: + part_id = part['k'] + final_url = self._url_for_id(part_id, quality) + ext = (final_url.split('?')[0]).split('.')[-1] + part_info = { + 'id': '%s' % part_id, + 'url': final_url, + 'ext': ext, + 'title': title, + 'thumbnail': thumbnail_url, + } + result.append(part_info) + + return result diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/tumblr.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/tumblr.py new file mode 100644 index 0000000000..2a1ae5a717 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/tumblr.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor + + +class TumblrIE(InfoExtractor): + _VALID_URL = r'http://(?P.*?)\.tumblr\.com/(?:post|video)/(?P[0-9]+)(?:$|[/?#])' + _TESTS = [{ + 'url': 'http://tatianamaslanydaily.tumblr.com/post/54196191430/orphan-black-dvd-extra-behind-the-scenes', + 'md5': '479bb068e5b16462f5176a6828829767', + 'info_dict': { + 'id': '54196191430', + 'ext': 'mp4', + 'title': 'tatiana maslany news, Orphan Black || DVD extra - behind the scenes Б├Ё...', + 'description': 'md5:37db8211e40b50c7c44e95da14f630b7', + 'thumbnail': 're:http://.*\.jpg', + } + }, { + 'url': 'http://5sostrum.tumblr.com/post/90208453769/yall-forgetting-the-greatest-keek-of-them-all', + 'md5': 'bf348ef8c0ef84fbf1cbd6fa6e000359', + 'info_dict': { + 'id': '90208453769', + 'ext': 'mp4', + 'title': '5SOS STRUM ;]', + 'description': 'md5:dba62ac8639482759c8eb10ce474586a', + 'thumbnail': 're:http://.*\.jpg', + } + }] + + def _real_extract(self, url): + m_url = re.match(self._VALID_URL, url) + video_id = m_url.group('id') + blog = m_url.group('blog_name') + + url = 'http://%s.tumblr.com/post/%s/' % (blog, video_id) + webpage = self._download_webpage(url, video_id) + + iframe_url = self._search_regex( + r'src=\'(https?://www\.tumblr\.com/video/[^\']+)\'', + webpage, 'iframe url') + iframe = self._download_webpage(iframe_url, video_id) + video_url = self._search_regex(r'(?P.*?)(?: \| Tumblr)?', + webpage, 'title') + + return { + 'id': video_id, + 'url': video_url, + 'ext': 'mp4', + 'title': video_title, + 'description': self._og_search_description(webpage), + 'thumbnail': self._og_search_thumbnail(webpage), + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/tunein.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/tunein.py new file mode 100644 index 0000000000..4ce5aeeba2 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/tunein.py @@ -0,0 +1,99 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import json +import re + +from .common import InfoExtractor +from ..utils import ExtractorError + + +class TuneInIE(InfoExtractor): + _VALID_URL = r'''(?x)https?://(?:www\.)? + (?: + tunein\.com/ + (?: + radio/.*?-s| + station/.*?StationId\= + )(?P[0-9]+) + |tun\.in/(?P[A-Za-z0-9]+) + ) + ''' + _API_URL_TEMPLATE = 'http://tunein.com/tuner/tune/?stationId={0:}&tuneType=Station' + + _INFO_DICT = { + 'id': '34682', + 'title': 'Jazz 24 on 88.5 Jazz24 - KPLU-HD2', + 'ext': 'AAC', + 'thumbnail': 're:^https?://.*\.png$', + 'location': 'Tacoma, WA', + } + _TESTS = [ + { + 'url': 'http://tunein.com/radio/Jazz24-885-s34682/', + 'info_dict': _INFO_DICT, + 'params': { + 'skip_download': True, # live stream + }, + }, + { # test redirection + 'url': 'http://tun.in/ser7s', + 'info_dict': _INFO_DICT, + 'params': { + 'skip_download': True, # live stream + }, + }, + ] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + redirect_id = mobj.group('redirect_id') + if redirect_id: + # The server doesn't support HEAD requests + urlh = self._request_webpage( + url, redirect_id, note='Downloading redirect page') + url = urlh.geturl() + self.to_screen('Following redirect: %s' % url) + mobj = re.match(self._VALID_URL, url) + station_id = mobj.group('id') + + station_info = self._download_json( + self._API_URL_TEMPLATE.format(station_id), + station_id, note='Downloading station JSON') + + title = station_info['Title'] + thumbnail = station_info.get('Logo') + location = station_info.get('Location') + streams_url = station_info.get('StreamUrl') + if not streams_url: + raise ExtractorError('No downloadable streams found', + expected=True) + stream_data = self._download_webpage( + streams_url, station_id, note='Downloading stream data') + streams = json.loads(self._search_regex( + r'\((.*)\);', stream_data, 'stream info'))['Streams'] + + is_live = None + formats = [] + for stream in streams: + if stream.get('Type') == 'Live': + is_live = True + formats.append({ + 'abr': stream.get('Bandwidth'), + 'ext': stream.get('MediaType'), + 'acodec': stream.get('MediaType'), + 'vcodec': 'none', + 'url': stream.get('Url'), + # Sometimes streams with the highest quality do not exist + 'preference': stream.get('Reliability'), + }) + self._sort_formats(formats) + + return { + 'id': station_id, + 'title': title, + 'formats': formats, + 'thumbnail': thumbnail, + 'location': location, + 'is_live': is_live, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/turbo.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/turbo.py new file mode 100644 index 0000000000..29703a8a9a --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/turbo.py @@ -0,0 +1,67 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import ( + ExtractorError, + int_or_none, + qualities, + xpath_text, +) + + +class TurboIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?turbo\.fr/videos-voiture/(?P[0-9]+)-' + _API_URL = 'http://www.turbo.fr/api/tv/xml.php?player_generique=player_generique&id={0:}' + _TEST = { + 'url': 'http://www.turbo.fr/videos-voiture/454443-turbo-du-07-09-2014-renault-twingo-3-bentley-continental-gt-speed-ces-guide-achat-dacia.html', + 'md5': '33f4b91099b36b5d5a91f84b5bcba600', + 'info_dict': { + 'id': '454443', + 'ext': 'mp4', + 'duration': 3715, + 'title': 'Turbo du 07/09/2014 : Renault Twingo 3, Bentley Continental GT Speed, CES, Guide Achat Dacia... ', + 'description': 'Retrouvez dans cette rubrique toutes les vidц╘os de l\'Turbo du 07/09/2014 : Renault Twingo 3, Bentley Continental GT Speed, CES, Guide Achat Dacia... ', + 'thumbnail': 're:^https?://.*\.jpg$', + } + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + + webpage = self._download_webpage(url, video_id) + + playlist = self._download_xml(self._API_URL.format(video_id), video_id) + item = playlist.find('./channel/item') + if item is None: + raise ExtractorError('Playlist item was not found', expected=True) + + title = xpath_text(item, './title', 'title') + duration = int_or_none(xpath_text(item, './durate', 'duration')) + thumbnail = xpath_text(item, './visuel_clip', 'thumbnail') + description = self._og_search_description(webpage) + + formats = [] + get_quality = qualities(['3g', 'sd', 'hq']) + for child in item: + m = re.search(r'url_video_(?P.+)', child.tag) + if m: + quality = m.group('quality') + formats.append({ + 'format_id': quality, + 'url': child.text, + 'quality': get_quality(quality), + }) + self._sort_formats(formats) + + return { + 'id': video_id, + 'title': title, + 'duration': duration, + 'thumbnail': thumbnail, + 'description': description, + 'formats': formats, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/tutv.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/tutv.py new file mode 100644 index 0000000000..4de0aac523 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/tutv.py @@ -0,0 +1,35 @@ +from __future__ import unicode_literals + +import base64 + +from .common import InfoExtractor +from ..compat import compat_parse_qs + + +class TutvIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?tu\.tv/videos/(?P[^/?]+)' + _TEST = { + 'url': 'http://tu.tv/videos/robots-futbolistas', + 'md5': '627c7c124ac2a9b5ab6addb94e0e65f7', + 'info_dict': { + 'id': '2973058', + 'ext': 'flv', + 'title': 'Robots futbolistas', + }, + } + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + + internal_id = self._search_regex(r'codVideo=([0-9]+)', webpage, 'internal video ID') + + data_content = self._download_webpage( + 'http://tu.tv/flvurl.php?codVideo=%s' % internal_id, video_id, 'Downloading video info') + video_url = base64.b64decode(compat_parse_qs(data_content)['kpt'][0]).decode('utf-8') + + return { + 'id': internal_id, + 'url': video_url, + 'title': self._og_search_title(webpage), + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/tvigle.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/tvigle.py new file mode 100644 index 0000000000..ba65996dc0 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/tvigle.py @@ -0,0 +1,84 @@ +# encoding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor +from ..utils import ( + float_or_none, + parse_age_limit, +) + + +class TvigleIE(InfoExtractor): + IE_NAME = 'tvigle' + IE_DESC = 'п≤п╫я┌п╣я─п╫п╣я┌-я┌п╣п╩п╣п╡п╦п╢п╣п╫п╦п╣ Tvigle.ru' + _VALID_URL = r'http://(?:www\.)?tvigle\.ru/(?:[^/]+/)+(?P[^/]+)/$' + + _TESTS = [ + { + 'url': 'http://www.tvigle.ru/video/sokrat/', + 'md5': '36514aed3657d4f70b4b2cef8eb520cd', + 'info_dict': { + 'id': '1848932', + 'display_id': 'sokrat', + 'ext': 'flv', + 'title': 'п║п╬п╨я─п╟я┌', + 'description': 'md5:a05bd01be310074d5833efc6743be95e', + 'duration': 6586, + 'age_limit': 0, + }, + }, + { + 'url': 'http://www.tvigle.ru/video/vladimir-vysotskii/vedushchii-teleprogrammy-60-minut-ssha-o-vladimire-vysotskom/', + 'md5': 'd9012d7c7c598fe7a11d7fb46dc1f574', + 'info_dict': { + 'id': '5142516', + 'ext': 'mp4', + 'title': 'п▓п╣п╢я┐я┴п╦п╧ я┌п╣п╩п╣п©я─п╬пЁя─п╟п╪п╪я▀ б╚60 п╪п╦п╫я┐я┌б╩ (п║п╗п░) п╬ п▓п╩п╟п╢п╦п╪п╦я─п╣ п▓я▀я│п╬я├п╨п╬п╪', + 'description': 'md5:027f7dc872948f14c96d19b4178428a4', + 'duration': 186.080, + 'age_limit': 0, + }, + }, + ] + + def _real_extract(self, url): + display_id = self._match_id(url) + + webpage = self._download_webpage(url, display_id) + + video_id = self._html_search_regex( + r'
  • ', webpage, 'video id') + + video_data = self._download_json( + 'http://cloud.tvigle.ru/api/play/video/%s/' % video_id, display_id) + + item = video_data['playlist']['items'][0] + + title = item['title'] + description = item['description'] + thumbnail = item['thumbnail'] + duration = float_or_none(item.get('durationMilliseconds'), 1000) + age_limit = parse_age_limit(item.get('ageRestrictions')) + + formats = [] + for vcodec, fmts in item['videos'].items(): + for quality, video_url in fmts.items(): + formats.append({ + 'url': video_url, + 'format_id': '%s-%s' % (vcodec, quality), + 'vcodec': vcodec, + 'height': int(quality[:-1]), + 'filesize': item['video_files_size'][vcodec][quality], + }) + self._sort_formats(formats) + + return { + 'id': video_id, + 'display_id': display_id, + 'title': title, + 'description': description, + 'thumbnail': thumbnail, + 'duration': duration, + 'age_limit': age_limit, + 'formats': formats, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/tvp.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/tvp.py new file mode 100644 index 0000000000..a645800057 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/tvp.py @@ -0,0 +1,37 @@ +from __future__ import unicode_literals + +from .common import InfoExtractor + + +class TvpIE(InfoExtractor): + IE_NAME = 'tvp.pl' + _VALID_URL = r'https?://www\.tvp\.pl/.*?wideo/(?P\d+)/(?P\d+)' + + _TEST = { + 'url': 'http://www.tvp.pl/warszawa/magazyny/campusnews/wideo/31102013/12878238', + 'md5': '148408967a6a468953c0a75cbdaf0d7a', + 'info_dict': { + 'id': '12878238', + 'ext': 'wmv', + 'title': '31.10.2013 - Odcinek 2', + 'description': '31.10.2013 - Odcinek 2', + }, + 'skip': 'Download has to use same server IP as extraction. Therefore, a good (load-balancing) DNS resolver will make the download fail.' + } + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + json_url = 'http://www.tvp.pl/pub/stat/videofileinfo?video_id=%s' % video_id + params = self._download_json( + json_url, video_id, "Downloading video metadata") + video_url = params['video_url'] + + return { + 'id': video_id, + 'title': self._og_search_title(webpage), + 'ext': 'wmv', + 'url': video_url, + 'description': self._og_search_description(webpage), + 'thumbnail': self._og_search_thumbnail(webpage), + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/tvplay.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/tvplay.py new file mode 100644 index 0000000000..9a53a3c741 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/tvplay.py @@ -0,0 +1,230 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..compat import compat_str +from ..utils import ( + parse_iso8601, + qualities, +) + + +class TVPlayIE(InfoExtractor): + IE_DESC = 'TV3Play and related services' + _VALID_URL = r'''(?x)http://(?:www\.)? + (?:tvplay\.lv/parraides| + tv3play\.lt/programos| + tv3play\.ee/sisu| + tv3play\.se/program| + tv6play\.se/program| + tv8play\.se/program| + tv10play\.se/program| + tv3play\.no/programmer| + viasat4play\.no/programmer| + tv6play\.no/programmer| + tv3play\.dk/programmer| + )/[^/]+/(?P\d+) + ''' + _TESTS = [ + { + 'url': 'http://www.tvplay.lv/parraides/vinas-melo-labak/418113?autostart=true', + 'info_dict': { + 'id': '418113', + 'ext': 'flv', + 'title': 'Kд│di ir д╚ri? - Viе├as melo labд│k', + 'description': 'Baiba apsmej д╚rus, kд│di tie ir un ko viе├i dara.', + 'duration': 25, + 'timestamp': 1406097056, + 'upload_date': '20140723', + }, + 'params': { + # rtmp download + 'skip_download': True, + }, + }, + { + 'url': 'http://www.tv3play.lt/programos/moterys-meluoja-geriau/409229?autostart=true', + 'info_dict': { + 'id': '409229', + 'ext': 'flv', + 'title': 'Moterys meluoja geriau', + 'description': 'md5:9aec0fc68e2cbc992d2a140bd41fa89e', + 'duration': 1330, + 'timestamp': 1403769181, + 'upload_date': '20140626', + }, + 'params': { + # rtmp download + 'skip_download': True, + }, + }, + { + 'url': 'http://www.tv3play.ee/sisu/kodu-keset-linna/238551?autostart=true', + 'info_dict': { + 'id': '238551', + 'ext': 'flv', + 'title': 'Kodu keset linna 398537', + 'description': 'md5:7df175e3c94db9e47c0d81ffa5d68701', + 'duration': 1257, + 'timestamp': 1292449761, + 'upload_date': '20101215', + }, + 'params': { + # rtmp download + 'skip_download': True, + }, + }, + { + 'url': 'http://www.tv3play.se/program/husraddarna/395385?autostart=true', + 'info_dict': { + 'id': '395385', + 'ext': 'flv', + 'title': 'Husrц╓ddarna S02E07', + 'description': 'md5:f210c6c89f42d4fc39faa551be813777', + 'duration': 2574, + 'timestamp': 1400596321, + 'upload_date': '20140520', + }, + 'params': { + # rtmp download + 'skip_download': True, + }, + }, + { + 'url': 'http://www.tv6play.se/program/den-sista-dokusapan/266636?autostart=true', + 'info_dict': { + 'id': '266636', + 'ext': 'flv', + 'title': 'Den sista dokusц╔pan S01E08', + 'description': 'md5:295be39c872520221b933830f660b110', + 'duration': 1492, + 'timestamp': 1330522854, + 'upload_date': '20120229', + }, + 'params': { + # rtmp download + 'skip_download': True, + }, + }, + { + 'url': 'http://www.tv8play.se/program/antikjakten/282756?autostart=true', + 'info_dict': { + 'id': '282756', + 'ext': 'flv', + 'title': 'Antikjakten S01E10', + 'description': 'md5:1b201169beabd97e20c5ad0ad67b13b8', + 'duration': 2646, + 'timestamp': 1348575868, + 'upload_date': '20120925', + }, + 'params': { + # rtmp download + 'skip_download': True, + }, + }, + { + 'url': 'http://www.tv3play.no/programmer/anna-anka-soker-assistent/230898?autostart=true', + 'info_dict': { + 'id': '230898', + 'ext': 'flv', + 'title': 'Anna Anka sц╦ker assistent - Ep. 8', + 'description': 'md5:f80916bf5bbe1c5f760d127f8dd71474', + 'duration': 2656, + 'timestamp': 1277720005, + 'upload_date': '20100628', + }, + 'params': { + # rtmp download + 'skip_download': True, + }, + }, + { + 'url': 'http://www.viasat4play.no/programmer/budbringerne/21873?autostart=true', + 'info_dict': { + 'id': '21873', + 'ext': 'flv', + 'title': 'Budbringerne program 10', + 'description': 'md5:4db78dc4ec8a85bb04fd322a3ee5092d', + 'duration': 1297, + 'timestamp': 1254205102, + 'upload_date': '20090929', + }, + 'params': { + # rtmp download + 'skip_download': True, + }, + }, + { + 'url': 'http://www.tv6play.no/programmer/hotelinspektor-alex-polizzi/361883?autostart=true', + 'info_dict': { + 'id': '361883', + 'ext': 'flv', + 'title': 'Hotelinspektц╦r Alex Polizzi - Ep. 10', + 'description': 'md5:3ecf808db9ec96c862c8ecb3a7fdaf81', + 'duration': 2594, + 'timestamp': 1393236292, + 'upload_date': '20140224', + }, + 'params': { + # rtmp download + 'skip_download': True, + }, + }, + ] + + def _real_extract(self, url): + video_id = self._match_id(url) + + video = self._download_json( + 'http://playapi.mtgx.tv/v1/videos/%s' % video_id, video_id, 'Downloading video JSON') + + if video['is_geo_blocked']: + self.report_warning( + 'This content might not be available in your country due to copyright reasons') + + streams = self._download_json( + 'http://playapi.mtgx.tv/v1/videos/stream/%s' % video_id, video_id, 'Downloading streams JSON') + + quality = qualities(['hls', 'medium', 'high']) + formats = [] + for format_id, video_url in streams['streams'].items(): + if not video_url or not isinstance(video_url, compat_str): + continue + fmt = { + 'format_id': format_id, + 'preference': quality(format_id), + } + if video_url.startswith('rtmp'): + m = re.search(r'^(?Prtmp://[^/]+/(?P[^/]+))/(?P.+)$', video_url) + if not m: + continue + fmt.update({ + 'ext': 'flv', + 'url': m.group('url'), + 'app': m.group('app'), + 'play_path': m.group('playpath'), + }) + elif video_url.endswith('.f4m'): + formats.extend(self._extract_f4m_formats( + video_url + '?hdcore=3.5.0&plugin=aasp-3.5.0.151.81', video_id)) + continue + else: + fmt.update({ + 'url': video_url, + }) + formats.append(fmt) + + self._sort_formats(formats) + + return { + 'id': video_id, + 'title': video['title'], + 'description': video['description'], + 'duration': video['duration'], + 'timestamp': parse_iso8601(video['created_at']), + 'view_count': video['views']['total'], + 'age_limit': video.get('age_limit', 0), + 'formats': formats, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/twentyfourvideo.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/twentyfourvideo.py new file mode 100644 index 0000000000..67e8bfea03 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/twentyfourvideo.py @@ -0,0 +1,109 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor +from ..utils import ( + parse_iso8601, + int_or_none, +) + + +class TwentyFourVideoIE(InfoExtractor): + IE_NAME = '24video' + _VALID_URL = r'https?://(?:www\.)?24video\.net/(?:video/(?:view|xml)/|player/new24_play\.swf\?id=)(?P\d+)' + + _TESTS = [ + { + 'url': 'http://www.24video.net/video/view/1044982', + 'md5': '48dd7646775690a80447a8dca6a2df76', + 'info_dict': { + 'id': '1044982', + 'ext': 'mp4', + 'title': 'п╜я─п╬я┌п╦п╨п╟ п╨п╟п╪п╣п╫п╫п╬пЁп╬ п╡п╣п╨п╟', + 'description': 'п п╟п╨ я│п╪п╬я┌я─п╣п╩п╦ п©п╬я─п╫п╬ п╡ п╨п╟п╪п╣п╫п╫п╬п╪ п╡п╣п╨п╣.', + 'thumbnail': 're:^https?://.*\.jpg$', + 'uploader': 'SUPERTELO', + 'duration': 31, + 'timestamp': 1275937857, + 'upload_date': '20100607', + 'age_limit': 18, + 'like_count': int, + 'dislike_count': int, + }, + }, + { + 'url': 'http://www.24video.net/player/new24_play.swf?id=1044982', + 'only_matching': True, + } + ] + + def _real_extract(self, url): + video_id = self._match_id(url) + + webpage = self._download_webpage( + 'http://www.24video.net/video/view/%s' % video_id, video_id) + + title = self._og_search_title(webpage) + description = self._html_search_regex( + r'([^<]+)', webpage, 'description', fatal=False) + thumbnail = self._og_search_thumbnail(webpage) + duration = int_or_none(self._og_search_property( + 'duration', webpage, 'duration', fatal=False)) + timestamp = parse_iso8601(self._search_regex( + r'
  • + ) + (.*?) + (?: + + ) + ''', webpage, 'links') + title = self._html_search_regex( + r'(.*?)-\s*Vuclip', webpage, 'title').strip() + + quality_order = qualities(['Reg', 'Hi']) + formats = [] + for url, q in re.findall( + r'[^"]+)".*?>(?:]*>)?(?P[^<]+)(?:)?', links_code): + format_id = compat_urllib_parse_urlparse(url).scheme + '-' + q + formats.append({ + 'format_id': format_id, + 'url': url, + 'quality': quality_order(q), + }) + self._sort_formats(formats) + + duration = parse_duration(self._search_regex( + r'\(([0-9:]+)\)', webpage, 'duration', fatal=False)) + + return { + 'id': video_id, + 'formats': formats, + 'title': title, + 'duration': duration, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/vulture.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/vulture.py new file mode 100644 index 0000000000..1eb24a3d67 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/vulture.py @@ -0,0 +1,69 @@ +from __future__ import unicode_literals + +import json +import os.path +import re + +from .common import InfoExtractor +from ..utils import ( + int_or_none, + parse_iso8601, +) + + +class VultureIE(InfoExtractor): + IE_NAME = 'vulture.com' + _VALID_URL = r'https?://video\.vulture\.com/video/(?P[^/]+)/' + _TEST = { + 'url': 'http://video.vulture.com/video/Mindy-Kaling-s-Harvard-Speech/player?layout=compact&read_more=1', + 'md5': '8d997845642a2b5152820f7257871bc8', + 'info_dict': { + 'id': '6GHRQL3RV7MSD1H4', + 'ext': 'mp4', + 'title': 'kaling-speech-2-MAGNIFY STANDARD CONTAINER REVISED', + 'uploader_id': 'Sarah', + 'thumbnail': 're:^http://.*\.jpg$', + 'timestamp': 1401288564, + 'upload_date': '20140528', + 'description': 'Uplifting and witty, as predicted.', + 'duration': 1015, + } + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + display_id = mobj.group('display_id') + + webpage = self._download_webpage(url, display_id) + query_string = self._search_regex( + r"queryString\s*=\s*'([^']+)'", webpage, 'query string') + video_id = self._search_regex( + r'content=([^&]+)', query_string, 'video ID') + query_url = 'http://video.vulture.com/embed/player/container/1000/1000/?%s' % query_string + + query_webpage = self._download_webpage( + query_url, display_id, note='Downloading query page') + params_json = self._search_regex( + r'(?sm)new MagnifyEmbeddablePlayer\({.*?contentItem:\s*(\{.*?\})\n,\n', + query_webpage, + 'player params') + params = json.loads(params_json) + + upload_timestamp = parse_iso8601(params['posted'].replace(' ', 'T')) + uploader_id = params.get('user', {}).get('handle') + + media_item = params['media_item'] + title = os.path.splitext(media_item['title'])[0] + duration = int_or_none(media_item.get('duration_seconds')) + + return { + 'id': video_id, + 'display_id': display_id, + 'url': media_item['pipeline_xid'], + 'title': title, + 'timestamp': upload_timestamp, + 'thumbnail': params.get('thumbnail_url'), + 'uploader_id': uploader_id, + 'description': params.get('description'), + 'duration': duration, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/walla.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/walla.py new file mode 100644 index 0000000000..672bda7a7a --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/walla.py @@ -0,0 +1,89 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re + +from .subtitles import SubtitlesInfoExtractor +from ..utils import ( + xpath_text, + int_or_none, +) + + +class WallaIE(SubtitlesInfoExtractor): + _VALID_URL = r'http://vod\.walla\.co\.il/[^/]+/(?P\d+)/(?P.+)' + _TEST = { + 'url': 'http://vod.walla.co.il/movie/2642630/one-direction-all-for-one', + 'info_dict': { + 'id': '2642630', + 'display_id': 'one-direction-all-for-one', + 'ext': 'flv', + 'title': 'в∙в∙в░в÷ в⌠в≥в≥в╗в╖в╘в÷: в■в■в≥в║в≤в╗в≥в■', + 'description': 'md5:de9e2512a92442574cdb0913c49bc4d8', + 'thumbnail': 're:^https?://.*\.jpg', + 'duration': 3600, + }, + 'params': { + # rtmp download + 'skip_download': True, + } + } + + _SUBTITLE_LANGS = { + 'в╒в▒в╗в≥в╙': 'heb', + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + display_id = mobj.group('display_id') + + video = self._download_xml( + 'http://video2.walla.co.il/?w=null/null/%s/@@/video/flv_pl' % video_id, + display_id) + + item = video.find('./items/item') + + title = xpath_text(item, './title', 'title') + description = xpath_text(item, './synopsis', 'description') + thumbnail = xpath_text(item, './preview_pic', 'thumbnail') + duration = int_or_none(xpath_text(item, './duration', 'duration')) + + subtitles = {} + for subtitle in item.findall('./subtitles/subtitle'): + lang = xpath_text(subtitle, './title') + subtitles[self._SUBTITLE_LANGS.get(lang, lang)] = xpath_text(subtitle, './src') + + if self._downloader.params.get('listsubtitles', False): + self._list_available_subtitles(video_id, subtitles) + return + + subtitles = self.extract_subtitles(video_id, subtitles) + + formats = [] + for quality in item.findall('./qualities/quality'): + format_id = xpath_text(quality, './title') + fmt = { + 'url': 'rtmp://wafla.walla.co.il/vod', + 'play_path': xpath_text(quality, './src'), + 'player_url': 'http://isc.walla.co.il/w9/swf/video_swf/vod/WallaMediaPlayerAvod.swf', + 'page_url': url, + 'ext': 'flv', + 'format_id': xpath_text(quality, './title'), + } + m = re.search(r'^(?P\d+)[Pp]', format_id) + if m: + fmt['height'] = int(m.group('height')) + formats.append(fmt) + self._sort_formats(formats) + + return { + 'id': video_id, + 'display_id': display_id, + 'title': title, + 'description': description, + 'thumbnail': thumbnail, + 'duration': duration, + 'formats': formats, + 'subtitles': subtitles, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/washingtonpost.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/washingtonpost.py new file mode 100644 index 0000000000..88bbbb2196 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/washingtonpost.py @@ -0,0 +1,106 @@ +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import ( + int_or_none, + strip_jsonp, +) + + +class WashingtonPostIE(InfoExtractor): + _VALID_URL = r'^https?://(?:www\.)?washingtonpost\.com/.*?/(?P[^/]+)/(?:$|[?#])' + _TEST = { + 'url': 'http://www.washingtonpost.com/sf/national/2014/03/22/sinkhole-of-bureaucracy/', + 'info_dict': { + 'title': 'Sinkhole of bureaucracy', + }, + 'playlist': [{ + 'md5': 'c3f4b4922ffa259243f68e928db2db8c', + 'info_dict': { + 'id': 'fc433c38-b146-11e3-b8b3-44b1d1cd4c1f', + 'ext': 'mp4', + 'title': 'Breaking Points: The Paper Mine', + 'duration': 1287, + 'description': 'Overly complicated paper pushing is nothing new to government bureaucracy. But the way federal retirement applications are filed may be the most outdated. David Fahrenthold explains.', + 'uploader': 'The Washington Post', + 'timestamp': 1395527908, + 'upload_date': '20140322', + }, + }, { + 'md5': 'f645a07652c2950cd9134bb852c5f5eb', + 'info_dict': { + 'id': '41255e28-b14a-11e3-b8b3-44b1d1cd4c1f', + 'ext': 'mp4', + 'title': 'The town bureaucracy sustains', + 'description': 'Underneath the friendly town of Boyers is a sea of government paperwork. In a disused limestone mine, hundreds of locals now track, file and process retirement applications for the federal government. We set out to find out what it\'s like to do paperwork 230 feet underground.', + 'duration': 2217, + 'timestamp': 1395528005, + 'upload_date': '20140322', + 'uploader': 'The Washington Post', + }, + }] + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + page_id = mobj.group('id') + + webpage = self._download_webpage(url, page_id) + title = self._og_search_title(webpage) + uuids = re.findall(r'data-video-uuid="([^"]+)"', webpage) + entries = [] + for i, uuid in enumerate(uuids, start=1): + vinfo_all = self._download_json( + 'http://www.washingtonpost.com/posttv/c/videojson/%s?resType=jsonp' % uuid, + page_id, + transform_source=strip_jsonp, + note='Downloading information of video %d/%d' % (i, len(uuids)) + ) + vinfo = vinfo_all[0]['contentConfig'] + uploader = vinfo.get('credits', {}).get('source') + timestamp = int_or_none( + vinfo.get('dateConfig', {}).get('dateFirstPublished'), 1000) + + formats = [{ + 'format_id': ( + '%s-%s-%s' % (s.get('type'), s.get('width'), s.get('bitrate')) + if s.get('width') + else s.get('type')), + 'vbr': s.get('bitrate') if s.get('width') != 0 else None, + 'width': s.get('width'), + 'height': s.get('height'), + 'acodec': s.get('audioCodec'), + 'vcodec': s.get('videoCodec') if s.get('width') != 0 else 'none', + 'filesize': s.get('fileSize'), + 'url': s.get('url'), + 'ext': 'mp4', + 'protocol': { + 'MP4': 'http', + 'F4F': 'f4m', + }.get(s.get('type')) + } for s in vinfo.get('streams', [])] + source_media_url = vinfo.get('sourceMediaURL') + if source_media_url: + formats.append({ + 'format_id': 'source_media', + 'url': source_media_url, + }) + self._sort_formats(formats) + entries.append({ + 'id': uuid, + 'title': vinfo['title'], + 'description': vinfo.get('blurb'), + 'uploader': uploader, + 'formats': formats, + 'duration': int_or_none(vinfo.get('videoDuration'), 100), + 'timestamp': timestamp, + }) + + return { + '_type': 'playlist', + 'entries': entries, + 'id': page_id, + 'title': title, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/wat.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/wat.py new file mode 100644 index 0000000000..bf9e40bad7 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/wat.py @@ -0,0 +1,137 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re +import hashlib + +from .common import InfoExtractor +from ..utils import ( + ExtractorError, + unified_strdate, +) + + +class WatIE(InfoExtractor): + _VALID_URL = r'http://www\.wat\.tv/video/(?P.*)-(?P.*?)_.*?\.html' + IE_NAME = 'wat.tv' + _TESTS = [ + { + 'url': 'http://www.wat.tv/video/soupe-figues-l-orange-aux-epices-6z1uz_2hvf7_.html', + 'md5': 'ce70e9223945ed26a8056d413ca55dc9', + 'info_dict': { + 'id': '11713067', + 'display_id': 'soupe-figues-l-orange-aux-epices', + 'ext': 'mp4', + 'title': 'Soupe de figues ц═ l\'orange et aux ц╘pices', + 'description': 'Retrouvez l\'ц╘mission "Petits plats en ц╘quilibre", diffusц╘e le 18 aoц╩t 2014.', + 'upload_date': '20140819', + 'duration': 120, + }, + }, + { + 'url': 'http://www.wat.tv/video/gregory-lemarchal-voix-ange-6z1v7_6ygkj_.html', + 'md5': 'fbc84e4378165278e743956d9c1bf16b', + 'info_dict': { + 'id': '11713075', + 'display_id': 'gregory-lemarchal-voix-ange', + 'ext': 'mp4', + 'title': 'Grц╘gory Lemarchal, une voix d\'ange depuis 10 ans (1/3)', + 'description': 'md5:b7a849cf16a2b733d9cd10c52906dee3', + 'upload_date': '20140816', + 'duration': 2910, + }, + 'skip': "Ce contenu n'est pas disponible pour l'instant.", + }, + ] + + def download_video_info(self, real_id): + # 'contentv4' is used in the website, but it also returns the related + # videos, we don't need them + info = self._download_json('http://www.wat.tv/interface/contentv3/' + real_id, real_id) + return info['media'] + + def _real_extract(self, url): + def real_id_for_chapter(chapter): + return chapter['tc_start'].split('-')[0] + mobj = re.match(self._VALID_URL, url) + short_id = mobj.group('short_id') + display_id = mobj.group('display_id') + webpage = self._download_webpage(url, display_id or short_id) + real_id = self._search_regex(r'xtpage = ".*-(.*?)";', webpage, 'real id') + + video_info = self.download_video_info(real_id) + + error_desc = video_info.get('error_desc') + if error_desc: + raise ExtractorError( + '%s returned error: %s' % (self.IE_NAME, error_desc), expected=True) + + geo_list = video_info.get('geoList') + country = geo_list[0] if geo_list else '' + + chapters = video_info['chapters'] + first_chapter = chapters[0] + files = video_info['files'] + first_file = files[0] + + if real_id_for_chapter(first_chapter) != real_id: + self.to_screen('Multipart video detected') + chapter_urls = [] + for chapter in chapters: + chapter_id = real_id_for_chapter(chapter) + # Yes, when we this chapter is processed by WatIE, + # it will download the info again + chapter_info = self.download_video_info(chapter_id) + chapter_urls.append(chapter_info['url']) + entries = [self.url_result(chapter_url) for chapter_url in chapter_urls] + return self.playlist_result(entries, real_id, video_info['title']) + + upload_date = None + if 'date_diffusion' in first_chapter: + upload_date = unified_strdate(first_chapter['date_diffusion']) + # Otherwise we can continue and extract just one part, we have to use + # the short id for getting the video url + + formats = [{ + 'url': 'http://wat.tv/get/android5/%s.mp4' % real_id, + 'format_id': 'Mobile', + }] + + fmts = [('SD', 'web')] + if first_file.get('hasHD'): + fmts.append(('HD', 'webhd')) + + def compute_token(param): + timestamp = '%08x' % int(self._download_webpage( + 'http://www.wat.tv/servertime', real_id, + 'Downloading server time').split('|')[0]) + magic = '9b673b13fa4682ed14c3cfa5af5310274b514c4133e9b3a81e6e3aba009l2564' + return '%s/%s' % (hashlib.md5((magic + param + timestamp).encode('ascii')).hexdigest(), timestamp) + + for fmt in fmts: + webid = '/%s/%s' % (fmt[1], real_id) + video_url = self._download_webpage( + 'http://www.wat.tv/get%s?token=%s&getURL=1&country=%s' % (webid, compute_token(webid), country), + real_id, + 'Downloding %s video URL' % fmt[0], + 'Failed to download %s video URL' % fmt[0], + False) + if not video_url: + continue + formats.append({ + 'url': video_url, + 'ext': 'mp4', + 'format_id': fmt[0], + }) + + return { + 'id': real_id, + 'display_id': display_id, + 'title': first_chapter['title'], + 'thumbnail': first_chapter['preview'], + 'description': first_chapter['description'], + 'view_count': video_info['views'], + 'upload_date': upload_date, + 'duration': first_file['duration'], + 'formats': formats, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/wayofthemaster.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/wayofthemaster.py new file mode 100644 index 0000000000..af7bb8b492 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/wayofthemaster.py @@ -0,0 +1,52 @@ +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor + + +class WayOfTheMasterIE(InfoExtractor): + _VALID_URL = r'https?://www\.wayofthemaster\.com/([^/?#]*/)*(?P[^/?#]+)\.s?html(?:$|[?#])' + + _TEST = { + 'url': 'http://www.wayofthemaster.com/hbks.shtml', + 'md5': '5316b57487ada8480606a93cb3d18d24', + 'info_dict': { + 'id': 'hbks', + 'ext': 'mp4', + 'title': 'Intelligent Design vs. Evolution', + }, + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + + webpage = self._download_webpage(url, video_id) + + title = self._search_regex( + r'(.*?)', webpage, 'page title') + + url_base = self._search_regex( + r'https?://www\d?\.(?:wdr\d?|funkhauseuropa)\.de/)(?P.+?)(?P%s)?\.html' % _PLAYER_REGEX + + _TESTS = [ + { + 'url': 'http://www1.wdr.de/mediathek/video/sendungen/servicezeit/videoservicezeit560-videoplayer_size-L.html', + 'info_dict': { + 'id': 'mdb-362427', + 'ext': 'flv', + 'title': 'Servicezeit', + 'description': 'md5:c8f43e5e815eeb54d0b96df2fba906cb', + 'upload_date': '20140310', + }, + 'params': { + 'skip_download': True, + }, + }, + { + 'url': 'http://www1.wdr.de/themen/av/videomargaspiegelisttot101-videoplayer.html', + 'info_dict': { + 'id': 'mdb-363194', + 'ext': 'flv', + 'title': 'Marga Spiegel ist tot', + 'description': 'md5:2309992a6716c347891c045be50992e4', + 'upload_date': '20140311', + }, + 'params': { + 'skip_download': True, + }, + }, + { + 'url': 'http://www1.wdr.de/themen/kultur/audioerlebtegeschichtenmargaspiegel100-audioplayer.html', + 'md5': '83e9e8fefad36f357278759870805898', + 'info_dict': { + 'id': 'mdb-194332', + 'ext': 'mp3', + 'title': 'Erlebte Geschichten: Marga Spiegel (29.11.2009)', + 'description': 'md5:2309992a6716c347891c045be50992e4', + 'upload_date': '20091129', + }, + }, + { + 'url': 'http://www.funkhauseuropa.de/av/audioflaviacoelhoamaramar100-audioplayer.html', + 'md5': '99a1443ff29af19f6c52cf6f4dc1f4aa', + 'info_dict': { + 'id': 'mdb-478135', + 'ext': 'mp3', + 'title': 'Flavia Coelho: Amar ц╘ Amar', + 'description': 'md5:7b29e97e10dfb6e265238b32fa35b23a', + 'upload_date': '20140717', + }, + }, + ] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + page_url = mobj.group('url') + page_id = mobj.group('id') + + webpage = self._download_webpage(url, page_id) + + if mobj.group('player') is None: + entries = [ + self.url_result(page_url + href, 'WDR') + for href in re.findall(r'[0-9]+) + /[0-9]+/[0-9]+/ + (?P[0-9]+)_(?P[0-9]+)''' + IE_NAME = 'wdr:mobile' + _TEST = { + 'url': 'http://mobile-ondemand.wdr.de/CMS2010/mdb/ondemand/weltweit/fsk0/42/421735/421735_4283021.mp4', + 'info_dict': { + 'title': '4283021', + 'id': '421735', + 'ext': 'mp4', + 'age_limit': 0, + }, + 'skip': 'Problems with loading data.' + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + return { + 'id': mobj.group('id'), + 'title': mobj.group('title'), + 'age_limit': int(mobj.group('age_limit')), + 'url': url, + 'user_agent': 'mobile', + } + + +class WDRMausIE(InfoExtractor): + _VALID_URL = 'http://(?:www\.)?wdrmaus\.de/(?:[^/]+/){,2}(?P<id>[^/?#]+)(?:/index\.php5|(?<!index)\.php5|/(?:$|[?#]))' + IE_DESC = 'Sendung mit der Maus' + _TESTS = [{ + 'url': 'http://www.wdrmaus.de/aktuelle-sendung/index.php5', + 'info_dict': { + 'id': 'aktuelle-sendung', + 'ext': 'mp4', + 'thumbnail': 're:^http://.+\.jpg', + 'upload_date': 're:^[0-9]{8}$', + 'title': 're:^[0-9.]{10} - Aktuelle Sendung$', + } + }, { + 'url': 'http://www.wdrmaus.de/sachgeschichten/sachgeschichten/40_jahre_maus.php5', + 'md5': '3b1227ca3ed28d73ec5737c65743b2a3', + 'info_dict': { + 'id': '40_jahre_maus', + 'ext': 'mp4', + 'thumbnail': 're:^http://.+\.jpg', + 'upload_date': '20131007', + 'title': '12.03.2011 - 40 Jahre Maus', + } + }] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + + webpage = self._download_webpage(url, video_id) + param_code = self._html_search_regex( + r'<a href="\?startVideo=1&([^"]+)"', webpage, 'parameters') + + title_date = self._search_regex( + r'<div class="sendedatum"><p>Sendedatum:\s*([0-9\.]+)</p>', + webpage, 'air date') + title_str = self._html_search_regex( + r'<h1>(.*?)</h1>', webpage, 'title') + title = '%s - %s' % (title_date, title_str) + upload_date = unified_strdate( + self._html_search_meta('dc.date', webpage)) + + fields = compat_parse_qs(param_code) + video_url = fields['firstVideo'][0] + thumbnail = compat_urlparse.urljoin(url, fields['startPicture'][0]) + + formats = [{ + 'format_id': 'rtmp', + 'url': video_url, + }] + + jscode = self._download_webpage( + 'http://www.wdrmaus.de/codebase/js/extended-medien.min.js', + video_id, fatal=False, + note='Downloading URL translation table', + errnote='Could not download URL translation table') + if jscode: + for m in re.finditer( + r"stream:\s*'dslSrc=(?P<stream>[^']+)',\s*download:\s*'(?P<dl>[^']+)'\s*\}", + jscode): + if video_url.startswith(m.group('stream')): + http_url = video_url.replace( + m.group('stream'), m.group('dl')) + formats.append({ + 'format_id': 'http', + 'url': http_url, + }) + break + + self._sort_formats(formats) + + return { + 'id': video_id, + 'title': title, + 'formats': formats, + 'thumbnail': thumbnail, + 'upload_date': upload_date, + } + +# TODO test _1 diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/weibo.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/weibo.py new file mode 100644 index 0000000000..20bb039d38 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/weibo.py @@ -0,0 +1,49 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor + + +class WeiboIE(InfoExtractor): + """ + The videos in Weibo come from different sites, this IE just finds the link + to the external video and returns it. + """ + _VALID_URL = r'https?://video\.weibo\.com/v/weishipin/t_(?P<id>.+?)\.htm' + + _TEST = { + 'url': 'http://video.weibo.com/v/weishipin/t_zjUw2kZ.htm', + 'info_dict': { + 'id': '98322879', + 'ext': 'flv', + 'title': 'И╜■Её╟Х─ЁФ°╨Ф°─Ф√╟Е╧©Е▒┼Б─°All Eyes On UsБ─²', + }, + 'params': { + 'skip_download': True, + }, + 'add_ie': ['Sina'], + } + + # Additional example videos from different sites + # Youku: http://video.weibo.com/v/weishipin/t_zQGDWQ8.htm + # 56.com: http://video.weibo.com/v/weishipin/t_zQ44HxN.htm + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url, flags=re.VERBOSE) + video_id = mobj.group('id') + info_url = 'http://video.weibo.com/?s=v&a=play_list&format=json&mix_video_id=t_%s' % video_id + info = self._download_json(info_url, video_id) + + videos_urls = map(lambda v: v['play_page_url'], info['result']['data']) + # Prefer sina video since they have thumbnails + videos_urls = sorted(videos_urls, key=lambda u: 'video.sina.com' in u) + player_url = videos_urls[-1] + m_sina = re.match(r'https?://video\.sina\.com\.cn/v/b/(\d+)-\d+\.html', + player_url) + if m_sina is not None: + self.to_screen('Sina video detected') + sina_id = m_sina.group(1) + player_url = 'http://you.video.sina.com.cn/swf/quotePlayer.swf?vid=%s' % sina_id + return self.url_result(player_url) diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/wimp.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/wimp.py new file mode 100644 index 0000000000..d6dec25ca9 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/wimp.py @@ -0,0 +1,55 @@ +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from .youtube import YoutubeIE + + +class WimpIE(InfoExtractor): + _VALID_URL = r'http://(?:www\.)?wimp\.com/([^/]+)/' + _TESTS = [{ + 'url': 'http://www.wimp.com/maruexhausted/', + 'md5': 'f1acced123ecb28d9bb79f2479f2b6a1', + 'info_dict': { + 'id': 'maruexhausted', + 'ext': 'flv', + 'title': 'Maru is exhausted.', + 'description': 'md5:57e099e857c0a4ea312542b684a869b8', + } + }, { + # youtube video + 'url': 'http://www.wimp.com/clowncar/', + 'info_dict': { + 'id': 'cG4CEr2aiSg', + 'ext': 'mp4', + 'title': 'Basset hound clown car...incredible!', + 'description': 'md5:8d228485e0719898c017203f900b3a35', + 'uploader': 'Gretchen Hoey', + 'uploader_id': 'gretchenandjeff1', + 'upload_date': '20140303', + }, + 'add_ie': ['Youtube'], + }] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group(1) + webpage = self._download_webpage(url, video_id) + video_url = self._search_regex( + r"[\"']file[\"']\s*[:,]\s*[\"'](.+?)[\"']", webpage, 'video URL') + if YoutubeIE.suitable(video_url): + self.to_screen('Found YouTube video') + return { + '_type': 'url', + 'url': video_url, + 'ie_key': YoutubeIE.ie_key(), + } + + return { + 'id': video_id, + 'url': video_url, + 'title': self._og_search_title(webpage), + 'thumbnail': self._og_search_thumbnail(webpage), + 'description': self._og_search_description(webpage), + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/wistia.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/wistia.py new file mode 100644 index 0000000000..13a079151c --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/wistia.py @@ -0,0 +1,63 @@ +from __future__ import unicode_literals + +from .common import InfoExtractor +from ..compat import compat_urllib_request +from ..utils import ExtractorError + + +class WistiaIE(InfoExtractor): + _VALID_URL = r'https?://(?:fast\.)?wistia\.net/embed/iframe/(?P<id>[a-z0-9]+)' + _API_URL = 'http://fast.wistia.com/embed/medias/{0:}.json' + + _TEST = { + 'url': 'http://fast.wistia.net/embed/iframe/sh7fpupwlt', + 'md5': 'cafeb56ec0c53c18c97405eecb3133df', + 'info_dict': { + 'id': 'sh7fpupwlt', + 'ext': 'mov', + 'title': 'Being Resourceful', + 'duration': 117, + }, + } + + def _real_extract(self, url): + video_id = self._match_id(url) + + request = compat_urllib_request.Request(self._API_URL.format(video_id)) + request.add_header('Referer', url) # Some videos require this. + data_json = self._download_json(request, video_id) + if data_json.get('error'): + raise ExtractorError('Error while getting the playlist', + expected=True) + data = data_json['media'] + + formats = [] + thumbnails = [] + for atype, a in data['assets'].items(): + if atype == 'still': + thumbnails.append({ + 'url': a['url'], + 'resolution': '%dx%d' % (a['width'], a['height']), + }) + continue + if atype == 'preview': + continue + formats.append({ + 'format_id': atype, + 'url': a['url'], + 'width': a['width'], + 'height': a['height'], + 'filesize': a['size'], + 'ext': a['ext'], + 'preference': 1 if atype == 'original' else None, + }) + + self._sort_formats(formats) + + return { + 'id': video_id, + 'title': data['name'], + 'formats': formats, + 'thumbnails': thumbnails, + 'duration': data.get('duration'), + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/worldstarhiphop.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/worldstarhiphop.py new file mode 100644 index 0000000000..d5c26a032b --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/worldstarhiphop.py @@ -0,0 +1,53 @@ +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor + + +class WorldStarHipHopIE(InfoExtractor): + _VALID_URL = r'https?://(?:www|m)\.worldstar(?:candy|hiphop)\.com/videos/video\.php\?v=(?P<id>.*)' + _TEST = { + "url": "http://www.worldstarhiphop.com/videos/video.php?v=wshh6a7q1ny0G34ZwuIO", + "md5": "9d04de741161603bf7071bbf4e883186", + "info_dict": { + "id": "wshh6a7q1ny0G34ZwuIO", + "ext": "mp4", + "title": "KO Of The Week: MMA Fighter Gets Knocked Out By Swift Head Kick!" + } + } + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + + m_vevo_id = re.search(r'videoId=(.*?)&?', webpage) + if m_vevo_id is not None: + return self.url_result('vevo:%s' % m_vevo_id.group(1), ie='Vevo') + + video_url = self._search_regex( + r'so\.addVariable\("file","(.*?)"\)', webpage, 'video URL') + + if 'youtube' in video_url: + return self.url_result(video_url, ie='Youtube') + + video_title = self._html_search_regex( + r'(?s)<div class="content-heading">\s*<h1>(.*?)</h1>', + webpage, 'title') + + # Getting thumbnail and if not thumbnail sets correct title for WSHH candy video. + thumbnail = self._html_search_regex( + r'rel="image_src" href="(.*)" />', webpage, 'thumbnail', + fatal=False) + if not thumbnail: + _title = r'candytitles.*>(.*)</span>' + mobj = re.search(_title, webpage) + if mobj is not None: + video_title = mobj.group(1) + + return { + 'id': video_id, + 'url': video_url, + 'title': video_title, + 'thumbnail': thumbnail, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/wrzuta.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/wrzuta.py new file mode 100644 index 0000000000..c427649211 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/wrzuta.py @@ -0,0 +1,82 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import ( + int_or_none, + qualities, +) + + +class WrzutaIE(InfoExtractor): + IE_NAME = 'wrzuta.pl' + + _VALID_URL = r'https?://(?P<uploader>[0-9a-zA-Z]+)\.wrzuta\.pl/(?P<typ>film|audio)/(?P<id>[0-9a-zA-Z]+)' + + _TESTS = [{ + 'url': 'http://laboratoriumdextera.wrzuta.pl/film/aq4hIZWrkBu/nike_football_the_last_game', + 'md5': '9e67e05bed7c03b82488d87233a9efe7', + 'info_dict': { + 'id': 'aq4hIZWrkBu', + 'ext': 'mp4', + 'title': 'Nike Football: The Last Game', + 'duration': 307, + 'uploader_id': 'laboratoriumdextera', + 'description': 'md5:7fb5ef3c21c5893375fda51d9b15d9cd', + }, + }, { + 'url': 'http://jolka85.wrzuta.pl/audio/063jOPX5ue2/liber_natalia_szroeder_-_teraz_ty', + 'md5': 'bc78077859bea7bcfe4295d7d7fc9025', + 'info_dict': { + 'id': '063jOPX5ue2', + 'ext': 'ogg', + 'title': 'Liber & Natalia Szroeder - Teraz Ty', + 'duration': 203, + 'uploader_id': 'jolka85', + 'description': 'md5:2d2b6340f9188c8c4cd891580e481096', + }, + }] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + typ = mobj.group('typ') + uploader = mobj.group('uploader') + + webpage = self._download_webpage(url, video_id) + + quality = qualities(['SD', 'MQ', 'HQ', 'HD']) + + audio_table = {'flv': 'mp3', 'webm': 'ogg', '???': 'mp3'} + + embedpage = self._download_json('http://www.wrzuta.pl/npp/embed/%s/%s' % (uploader, video_id), video_id) + + formats = [] + for media in embedpage['url']: + fmt = media['type'].split('@')[0] + if typ == 'audio': + ext = audio_table.get(fmt, fmt) + else: + ext = fmt + + formats.append({ + 'format_id': '%s_%s' % (ext, media['quality'].lower()), + 'url': media['url'], + 'ext': ext, + 'quality': quality(media['quality']), + }) + + self._sort_formats(formats) + + return { + 'id': video_id, + 'title': self._og_search_title(webpage), + 'thumbnail': self._og_search_thumbnail(webpage), + 'formats': formats, + 'duration': int_or_none(embedpage['duration']), + 'uploader_id': uploader, + 'description': self._og_search_description(webpage), + 'age_limit': embedpage.get('minimalAge', 0), + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/xbef.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/xbef.py new file mode 100644 index 0000000000..80c48c37d3 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/xbef.py @@ -0,0 +1,46 @@ +from __future__ import unicode_literals + +from .common import InfoExtractor +from ..compat import ( + compat_urllib_parse, +) + + +class XBefIE(InfoExtractor): + _VALID_URL = r'http://(?:www\.)?xbef\.com/video/(?P<id>[0-9]+)' + _TEST = { + 'url': 'http://xbef.com/video/5119-glamourous-lesbians-smoking-drinking-and-fucking', + 'md5': 'a478b565baff61634a98f5e5338be995', + 'info_dict': { + 'id': '5119', + 'ext': 'mp4', + 'title': 'md5:7358a9faef8b7b57acda7c04816f170e', + 'age_limit': 18, + 'thumbnail': 're:^http://.*\.jpg', + } + } + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + + title = self._html_search_regex( + r'<h1[^>]*>(.*?)</h1>', webpage, 'title') + + config_url_enc = self._download_webpage( + 'http://xbef.com/Main/GetVideoURLEncoded/%s' % video_id, video_id, + note='Retrieving config URL') + config_url = compat_urllib_parse.unquote(config_url_enc) + config = self._download_xml( + config_url, video_id, note='Retrieving config') + + video_url = config.find('./file').text + thumbnail = config.find('./image').text + + return { + 'id': video_id, + 'url': video_url, + 'title': title, + 'thumbnail': thumbnail, + 'age_limit': 18, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/xboxclips.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/xboxclips.py new file mode 100644 index 0000000000..a9aa72e73c --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/xboxclips.py @@ -0,0 +1,57 @@ +# encoding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import ( + parse_iso8601, + float_or_none, + int_or_none, +) + + +class XboxClipsIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?xboxclips\.com/video\.php\?.*vid=(?P<id>[\w-]{36})' + _TEST = { + 'url': 'https://xboxclips.com/video.php?uid=2533274823424419&gamertag=Iabdulelah&vid=074a69a9-5faf-46aa-b93b-9909c1720325', + 'md5': 'fbe1ec805e920aeb8eced3c3e657df5d', + 'info_dict': { + 'id': '074a69a9-5faf-46aa-b93b-9909c1720325', + 'ext': 'mp4', + 'title': 'Iabdulelah playing Upload Studio', + 'filesize_approx': 28101836.8, + 'timestamp': 1407388500, + 'upload_date': '20140807', + 'duration': 56, + } + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + + webpage = self._download_webpage(url, video_id) + + video_url = self._html_search_regex( + r'>Link: <a href="([^"]+)">', webpage, 'video URL') + title = self._html_search_regex( + r'<title>XboxClips \| ([^<]+)', webpage, 'title') + timestamp = parse_iso8601(self._html_search_regex( + r'>Recorded: ([^<]+)<', webpage, 'upload date', fatal=False)) + filesize = float_or_none(self._html_search_regex( + r'>Size: ([\d\.]+)MB<', webpage, 'file size', fatal=False), invscale=1024 * 1024) + duration = int_or_none(self._html_search_regex( + r'>Duration: (\d+) Seconds<', webpage, 'duration', fatal=False)) + view_count = int_or_none(self._html_search_regex( + r'>Views: (\d+)<', webpage, 'view count', fatal=False)) + + return { + 'id': video_id, + 'url': video_url, + 'title': title, + 'timestamp': timestamp, + 'filesize_approx': filesize, + 'duration': duration, + 'view_count': view_count, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/xhamster.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/xhamster.py new file mode 100644 index 0000000000..6b37bcbc95 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/xhamster.py @@ -0,0 +1,130 @@ +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import ( + ExtractorError, + unified_strdate, + str_to_int, + int_or_none, + parse_duration, +) + + +class XHamsterIE(InfoExtractor): + """Information Extractor for xHamster""" + _VALID_URL = r'http://(?:.+?\.)?xhamster\.com/movies/(?P[0-9]+)/(?P.+?)\.html(?:\?.*)?' + _TESTS = [ + { + 'url': 'http://xhamster.com/movies/1509445/femaleagent_shy_beauty_takes_the_bait.html', + 'info_dict': { + 'id': '1509445', + 'ext': 'mp4', + 'title': 'FemaleAgent Shy beauty takes the bait', + 'upload_date': '20121014', + 'uploader_id': 'Ruseful2011', + 'duration': 893, + 'age_limit': 18, + } + }, + { + 'url': 'http://xhamster.com/movies/2221348/britney_spears_sexy_booty.html?hd', + 'info_dict': { + 'id': '2221348', + 'ext': 'mp4', + 'title': 'Britney Spears Sexy Booty', + 'upload_date': '20130914', + 'uploader_id': 'jojo747400', + 'duration': 200, + 'age_limit': 18, + } + } + ] + + def _real_extract(self, url): + def extract_video_url(webpage): + mp4 = re.search(r'', webpage) + if mp4 is None: + raise ExtractorError('Unable to extract media URL') + else: + return mp4.group(1) + + def is_hd(webpage): + return '', + webpage, 'duration', fatal=False)) + + view_count = self._html_search_regex(r'Views: ([^<]+)', webpage, 'view count', fatal=False) + if view_count: + view_count = str_to_int(view_count) + + mobj = re.search(r"hint='(?P\d+) Likes / (?P\d+) Dislikes'", webpage) + (like_count, dislike_count) = (mobj.group('likecount'), mobj.group('dislikecount')) if mobj else (None, None) + + mobj = re.search(r'Comments \((?P\d+)\)', webpage) + comment_count = mobj.group('commentcount') if mobj else 0 + + age_limit = self._rta_search(webpage) + + hd = is_hd(webpage) + + video_url = extract_video_url(webpage) + formats = [{ + 'url': video_url, + 'format_id': 'hd' if hd else 'sd', + 'preference': 1, + }] + + if not hd: + mrss_url = self._search_regex(r'[0-9]+)' + _TEST = { + 'url': 'http://x-minus.org/track/4542/%D0%BF%D0%B5%D1%81%D0%B5%D0%BD%D0%BA%D0%B0-%D1%88%D0%BE%D1%84%D0%B5%D1%80%D0%B0.html', + 'md5': '401a15f2d2dcf6d592cb95528d72a2a8', + 'info_dict': { + 'id': '4542', + 'ext': 'mp3', + 'title': 'п⌡п╣п╬п╫п╦п╢ п░пЁя┐я┌п╦п╫-п÷п╣я│п╣п╫п╨п╟ я┬п╬я└п╣я─п╟', + 'duration': 156, + 'tbr': 320, + 'filesize_approx': 5900000, + 'view_count': int, + 'description': 'md5:03238c5b663810bc79cf42ef3c03e371', + } + } + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + + artist = self._html_search_regex( + r'minus_track\.artist="(.+?)"', webpage, 'artist') + title = artist + '-' + self._html_search_regex( + r'minus_track\.title="(.+?)"', webpage, 'title') + duration = int_or_none(self._html_search_regex( + r'minus_track\.dur_sec=\'([0-9]*?)\'', + webpage, 'duration', fatal=False)) + filesize_approx = parse_filesize(self._html_search_regex( + r'
    \s*([0-9.]+\s*[a-zA-Z][bB])', + webpage, 'approximate filesize', fatal=False)) + tbr = int_or_none(self._html_search_regex( + r'
    \s*([0-9]+)\s*kbps', + webpage, 'bitrate', fatal=False)) + view_count = int_or_none(self._html_search_regex( + r'
    (.*?)
    [0-9]+)/(.*)' + _TEST = { + 'url': 'http://video.xnxx.com/video1135332/lida_naked_funny_actress_5_', + 'md5': '0831677e2b4761795f68d417e0b7b445', + 'info_dict': { + 'id': '1135332', + 'ext': 'flv', + 'title': 'lida б╩ Naked Funny Actress (5)', + 'age_limit': 18, + } + } + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + + video_url = self._search_regex(r'flv_url=(.*?)&', + webpage, 'video URL') + video_url = compat_urllib_parse.unquote(video_url) + + video_title = self._html_search_regex(r'(.*?)\s+-\s+XNXX.COM', + webpage, 'title') + + video_thumbnail = self._search_regex(r'url_bigthumb=(.*?)&', + webpage, 'thumbnail', fatal=False) + + return { + 'id': video_id, + 'url': video_url, + 'title': video_title, + 'ext': 'flv', + 'thumbnail': video_thumbnail, + 'age_limit': 18, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/xtube.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/xtube.py new file mode 100644 index 0000000000..cf74d4fd50 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/xtube.py @@ -0,0 +1,132 @@ +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..compat import ( + compat_urllib_request, + compat_urllib_parse, +) +from ..utils import ( + parse_duration, + str_to_int, +) + + +class XTubeIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?(?P<url>xtube\.com/watch\.php\?v=(?P<id>[^/?&#]+))' + _TEST = { + 'url': 'http://www.xtube.com/watch.php?v=kVTUy_G222_', + 'md5': '092fbdd3cbe292c920ef6fc6a8a9cdab', + 'info_dict': { + 'id': 'kVTUy_G222_', + 'ext': 'mp4', + 'title': 'strange erotica', + 'description': 'http://www.xtube.com an ET kind of thing', + 'uploader': 'greenshowers', + 'duration': 450, + 'age_limit': 18, + } + } + + def _real_extract(self, url): + video_id = self._match_id(url) + + req = compat_urllib_request.Request(url) + req.add_header('Cookie', 'age_verified=1') + webpage = self._download_webpage(req, video_id) + + video_title = self._html_search_regex( + r'<p class="title">([^<]+)', webpage, 'title') + video_uploader = self._html_search_regex( + [r"var\s+contentOwnerId\s*=\s*'([^']+)", + r'By:\s*<a href="/community/profile\.php?user=([^"]+)'], + webpage, 'uploader', fatal=False) + video_description = self._html_search_regex( + r'<p class="fieldsDesc">([^<]+)', + webpage, 'description', fatal=False) + duration = parse_duration(self._html_search_regex( + r'<span class="bold">Runtime:</span> ([^<]+)</p>', + webpage, 'duration', fatal=False)) + view_count = str_to_int(self._html_search_regex( + r'<span class="bold">Views:</span> ([\d,\.]+)</p>', + webpage, 'view count', fatal=False)) + comment_count = str_to_int(self._html_search_regex( + r'<div id="commentBar">([\d,\.]+) Comments</div>', + webpage, 'comment count', fatal=False)) + + formats = [] + for format_id, video_url in re.findall( + r'flashvars\.quality_(.+?)\s*=\s*"([^"]+)"', webpage): + fmt = { + 'url': compat_urllib_parse.unquote(video_url), + 'format_id': format_id, + } + m = re.search(r'^(?P<height>\d+)[pP]', format_id) + if m: + fmt['height'] = int(m.group('height')) + formats.append(fmt) + + if not formats: + video_url = compat_urllib_parse.unquote(self._search_regex( + r'flashvars\.video_url\s*=\s*"([^"]+)"', + webpage, 'video URL')) + formats.append({'url': video_url}) + + self._sort_formats(formats) + + return { + 'id': video_id, + 'title': video_title, + 'uploader': video_uploader, + 'description': video_description, + 'duration': duration, + 'view_count': view_count, + 'comment_count': comment_count, + 'formats': formats, + 'age_limit': 18, + } + + +class XTubeUserIE(InfoExtractor): + IE_DESC = 'XTube user profile' + _VALID_URL = r'https?://(?:www\.)?xtube\.com/community/profile\.php\?(.*?)user=(?P<username>[^&#]+)(?:$|[&#])' + _TEST = { + 'url': 'http://www.xtube.com/community/profile.php?user=greenshowers', + 'info_dict': { + 'id': 'greenshowers', + }, + 'playlist_mincount': 155, + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + username = mobj.group('username') + + profile_page = self._download_webpage( + url, username, note='Retrieving profile page') + + video_count = int(self._search_regex( + r'<strong>%s\'s Videos \(([0-9]+)\)</strong>' % username, profile_page, + 'video count')) + + PAGE_SIZE = 25 + urls = [] + page_count = (video_count + PAGE_SIZE + 1) // PAGE_SIZE + for n in range(1, page_count + 1): + lpage_url = 'http://www.xtube.com/user_videos.php?page=%d&u=%s' % (n, username) + lpage = self._download_webpage( + lpage_url, username, + note='Downloading page %d/%d' % (n, page_count)) + urls.extend( + re.findall(r'addthis:url="([^"]+)"', lpage)) + + return { + '_type': 'playlist', + 'id': username, + 'entries': [{ + '_type': 'url', + 'url': eurl, + 'ie_key': 'XTube', + } for eurl in urls] + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/xvideos.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/xvideos.py new file mode 100644 index 0000000000..2a45dc5742 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/xvideos.py @@ -0,0 +1,50 @@ +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..compat import ( + compat_urllib_parse, +) +from ..utils import ( + clean_html, + ExtractorError, +) + + +class XVideosIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?xvideos\.com/video(?P<id>[0-9]+)(?:.*)' + _TEST = { + 'url': 'http://www.xvideos.com/video4588838/biker_takes_his_girl', + 'md5': '4b46ae6ea5e6e9086e714d883313c0c9', + 'info_dict': { + 'id': '4588838', + 'ext': 'flv', + 'title': 'Biker Takes his Girl', + 'age_limit': 18, + } + } + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + + mobj = re.search(r'<h1 class="inlineError">(.+?)</h1>', webpage) + if mobj: + raise ExtractorError('%s said: %s' % (self.IE_NAME, clean_html(mobj.group(1))), expected=True) + + video_url = compat_urllib_parse.unquote( + self._search_regex(r'flv_url=(.+?)&', webpage, 'video URL')) + video_title = self._html_search_regex( + r'<title>(.*?)\s+-\s+XVID', webpage, 'title') + video_thumbnail = self._search_regex( + r'url_bigthumb=(.+?)&', webpage, 'thumbnail', fatal=False) + + return { + 'id': video_id, + 'url': video_url, + 'title': video_title, + 'ext': 'flv', + 'thumbnail': video_thumbnail, + 'age_limit': 18, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/yahoo.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/yahoo.py new file mode 100644 index 0000000000..f8e7041a08 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/yahoo.py @@ -0,0 +1,262 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import itertools +import json +import re + +from .common import InfoExtractor, SearchInfoExtractor +from ..compat import ( + compat_urllib_parse, + compat_urlparse, +) +from ..utils import ( + clean_html, + unescapeHTML, + ExtractorError, + int_or_none, +) + + +class YahooIE(InfoExtractor): + IE_DESC = 'Yahoo screen and movies' + _VALID_URL = r'(?P<url>(?P<host>https?://(?:[a-zA-Z]{2}\.)?[\da-zA-Z_-]+\.yahoo\.com)/(?:[^/]+/)*(?P<display_id>.+?)-(?P<id>[0-9]+)(?:-[a-z]+)?\.html)' + _TESTS = [ + { + 'url': 'http://screen.yahoo.com/julian-smith-travis-legg-watch-214727115.html', + 'md5': '4962b075c08be8690a922ee026d05e69', + 'info_dict': { + 'id': '2d25e626-2378-391f-ada0-ddaf1417e588', + 'ext': 'mp4', + 'title': 'Julian Smith & Travis Legg Watch Julian Smith', + 'description': 'Julian and Travis watch Julian Smith', + 'duration': 6863, + }, + }, + { + 'url': 'http://screen.yahoo.com/wired/codefellas-s1-ep12-cougar-lies-103000935.html', + 'md5': 'd6e6fc6e1313c608f316ddad7b82b306', + 'info_dict': { + 'id': 'd1dedf8c-d58c-38c3-8963-e899929ae0a9', + 'ext': 'mp4', + 'title': 'Codefellas - The Cougar Lies with Spanish Moss', + 'description': 'md5:66b627ab0a282b26352136ca96ce73c1', + 'duration': 151, + }, + }, + { + 'url': 'https://screen.yahoo.com/community/community-sizzle-reel-203225340.html?format=embed', + 'md5': '60e8ac193d8fb71997caa8fce54c6460', + 'info_dict': { + 'id': '4fe78544-8d48-39d8-97cd-13f205d9fcdb', + 'ext': 'mp4', + 'title': "Yahoo Saves 'Community'", + 'description': 'md5:4d4145af2fd3de00cbb6c1d664105053', + 'duration': 170, + } + }, + { + 'url': 'https://tw.screen.yahoo.com/election-2014-askmayor/Ф∙╒Е∙▐Е╦┌И∙╥-И╩┐Г╖─И°°Ф┴╧ХЁ╢Ф╦┘Е╬╥-И²·Е╦╦И╚≤Е┌╡-033009720.html', + 'md5': '3a09cf59349cfaddae1797acc3c087fc', + 'info_dict': { + 'id': 'cac903b3-fcf4-3c14-b632-643ab541712f', + 'ext': 'mp4', + 'title': 'Ф∙╒Е∙▐Е╦┌И∙╥О╪▐И╩┐Г╖─И°°Ф┴╧ХЁ╢Ф╦┘Е╬╥Ц─▄И²·Е╦╦И╚≤Е┌╡Ц─█', + 'description': 'Г⌡╢Х╗─Е▐╟Е█≈Ф╡▓Ф█╥И│▀ Д╨╓И─ Е╠┘Д╨■И┐╫Д╧▀Ф°╚', + 'duration': 396, + } + }, + { + 'url': 'https://uk.screen.yahoo.com/editor-picks/cute-raccoon-freed-drain-using-091756545.html', + 'md5': '0b51660361f0e27c9789e7037ef76f4b', + 'info_dict': { + 'id': 'b3affa53-2e14-3590-852b-0e0db6cd1a58', + 'ext': 'mp4', + 'title': 'Cute Raccoon Freed From Drain\u00a0Using Angle Grinder', + 'description': 'md5:f66c890e1490f4910a9953c941dee944', + 'duration': 97, + } + }, + { + 'url': 'https://ca.sports.yahoo.com/video/program-makes-hockey-more-affordable-013127711.html', + 'md5': '57e06440778b1828a6079d2f744212c4', + 'info_dict': { + 'id': 'c9fa2a36-0d4d-3937-b8f6-cc0fb1881e73', + 'ext': 'mp4', + 'title': 'Program that makes hockey more affordable not offered in Manitoba', + 'description': 'md5:c54a609f4c078d92b74ffb9bf1f496f4', + 'duration': 121, + } + }, { + 'url': 'https://ca.finance.yahoo.com/news/hackers-sony-more-trouble-well-154609075.html', + 'md5': '226a895aae7e21b0129e2a2006fe9690', + 'info_dict': { + 'id': 'e624c4bc-3389-34de-9dfc-025f74943409', + 'ext': 'mp4', + 'title': '\'The Interview\' TV Spot: War', + 'description': 'The Interview', + 'duration': 30, + } + }, { + 'url': 'http://news.yahoo.com/video/china-moses-crazy-blues-104538833.html', + 'md5': '67010fdf3a08d290e060a4dd96baa07b', + 'info_dict': { + 'id': 'f885cf7f-43d4-3450-9fac-46ac30ece521', + 'ext': 'mp4', + 'title': 'China Moses Is Crazy About the Blues', + 'description': 'md5:9900ab8cd5808175c7b3fe55b979bed0', + 'duration': 128, + } + }, { + 'url': 'https://in.lifestyle.yahoo.com/video/connect-dots-dark-side-virgo-090247395.html', + 'md5': 'd9a083ccf1379127bf25699d67e4791b', + 'info_dict': { + 'id': '52aeeaa3-b3d1-30d8-9ef8-5d0cf05efb7c', + 'ext': 'mp4', + 'title': 'Connect the Dots: Dark Side of Virgo', + 'description': 'md5:1428185051cfd1949807ad4ff6d3686a', + 'duration': 201, + } + }, { + 'url': 'https://www.yahoo.com/movies/v/true-story-trailer-173000497.html', + 'md5': '989396ae73d20c6f057746fb226aa215', + 'info_dict': { + 'id': '071c4013-ce30-3a93-a5b2-e0413cd4a9d1', + 'ext': 'mp4', + 'title': '\'True Story\' Trailer', + 'description': 'True Story', + 'duration': 150, + }, + }, { + 'url': 'https://gma.yahoo.com/pizza-delivery-man-surprised-huge-tip-college-kids-195200785.html', + 'only_matching': True, + } + ] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + display_id = mobj.group('display_id') + page_id = mobj.group('id') + url = mobj.group('url') + host = mobj.group('host') + webpage = self._download_webpage(url, display_id) + + # Look for iframed media first + iframe_m = re.search(r'<iframe[^>]+src="(/video/.+?-\d+\.html\?format=embed.*?)"', webpage) + if iframe_m: + iframepage = self._download_webpage( + host + iframe_m.group(1), display_id, 'Downloading iframe webpage') + items_json = self._search_regex( + r'mediaItems: (\[.+?\])$', iframepage, 'items', flags=re.MULTILINE, default=None) + if items_json: + items = json.loads(items_json) + video_id = items[0]['id'] + return self._get_info(video_id, display_id, webpage) + + items_json = self._search_regex( + r'mediaItems: ({.*?})$', webpage, 'items', flags=re.MULTILINE, + default=None) + if items_json is None: + CONTENT_ID_REGEXES = [ + r'YUI\.namespace\("Media"\)\.CONTENT_ID\s*=\s*"([^"]+)"', + r'root\.App\.Cache\.context\.videoCache\.curVideo = \{"([^"]+)"', + r'"first_videoid"\s*:\s*"([^"]+)"', + r'%s[^}]*"ccm_id"\s*:\s*"([^"]+)"' % re.escape(page_id), + ] + video_id = self._search_regex(CONTENT_ID_REGEXES, webpage, 'content ID') + else: + items = json.loads(items_json) + info = items['mediaItems']['query']['results']['mediaObj'][0] + # The 'meta' field is not always in the video webpage, we request it + # from another page + video_id = info['id'] + return self._get_info(video_id, display_id, webpage) + + def _get_info(self, video_id, display_id, webpage): + region = self._search_regex( + r'\\?"region\\?"\s*:\s*\\?"([^"]+?)\\?"', + webpage, 'region', fatal=False, default='US') + data = compat_urllib_parse.urlencode({ + 'protocol': 'http', + 'region': region, + }) + query_url = ( + 'https://video.media.yql.yahoo.com/v1/video/sapi/streams/' + '{id}?{data}'.format(id=video_id, data=data)) + query_result = self._download_json( + query_url, display_id, 'Downloading video info') + + info = query_result['query']['results']['mediaObj'][0] + meta = info.get('meta') + + if not meta: + msg = info['status'].get('msg') + if msg: + raise ExtractorError( + '%s returned error: %s' % (self.IE_NAME, msg), expected=True) + raise ExtractorError('Unable to extract media object meta') + + formats = [] + for s in info['streams']: + format_info = { + 'width': int_or_none(s.get('width')), + 'height': int_or_none(s.get('height')), + 'tbr': int_or_none(s.get('bitrate')), + } + + host = s['host'] + path = s['path'] + if host.startswith('rtmp'): + format_info.update({ + 'url': host, + 'play_path': path, + 'ext': 'flv', + }) + else: + format_url = compat_urlparse.urljoin(host, path) + format_info['url'] = format_url + formats.append(format_info) + + self._sort_formats(formats) + + return { + 'id': video_id, + 'display_id': display_id, + 'title': unescapeHTML(meta['title']), + 'formats': formats, + 'description': clean_html(meta['description']), + 'thumbnail': meta['thumbnail'] if meta.get('thumbnail') else self._og_search_thumbnail(webpage), + 'duration': int_or_none(meta.get('duration')), + } + + +class YahooSearchIE(SearchInfoExtractor): + IE_DESC = 'Yahoo screen search' + _MAX_RESULTS = 1000 + IE_NAME = 'screen.yahoo:search' + _SEARCH_KEY = 'yvsearch' + + def _get_n_results(self, query, n): + """Get a specified number of results for a query""" + entries = [] + for pagenum in itertools.count(0): + result_url = 'http://video.search.yahoo.com/search/?p=%s&fr=screen&o=js&gs=0&b=%d' % (compat_urllib_parse.quote_plus(query), pagenum * 30) + info = self._download_json(result_url, query, + note='Downloading results page ' + str(pagenum + 1)) + m = info['m'] + results = info['results'] + + for (i, r) in enumerate(results): + if (pagenum * 30) + i >= n: + break + mobj = re.search(r'(?P<url>screen\.yahoo\.com/.*?-\d*?\.html)"', r) + e = self.url_result('http://' + mobj.group('url'), 'Yahoo') + entries.append(e) + if (pagenum * 30 + i >= n) or (m['last'] >= (m['total'] - 1)): + break + + return { + '_type': 'playlist', + 'id': query, + 'entries': entries, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/yesjapan.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/yesjapan.py new file mode 100644 index 0000000000..112a6c0301 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/yesjapan.py @@ -0,0 +1,62 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor +from ..utils import ( + HEADRequest, + get_element_by_attribute, + parse_iso8601, +) + + +class YesJapanIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?yesjapan\.com/video/(?P<slug>[A-Za-z0-9\-]*)_(?P<id>[A-Za-z0-9]+)\.html' + _TEST = { + 'url': 'http://www.yesjapan.com/video/japanese-in-5-20-wa-and-ga-particle-usages_726497834.html', + 'md5': 'f0be416314e5be21a12b499b330c21cf', + 'info_dict': { + 'id': '726497834', + 'title': 'Japanese in 5! #20 - WA And GA Particle Usages', + 'description': 'This should clear up some issues most students of Japanese encounter with WA and GA....', + 'ext': 'mp4', + 'timestamp': 1416391590, + 'upload_date': '20141119', + 'thumbnail': 're:^https?://.*\.jpg$', + } + } + + def _real_extract(self, url): + video_id = self._match_id(url) + + webpage = self._download_webpage(url, video_id) + title = self._og_search_title(webpage) + video_url = self._og_search_video_url(webpage) + description = self._og_search_description(webpage) + thumbnail = self._og_search_thumbnail(webpage) + + timestamp = None + submit_info = get_element_by_attribute('class', 'pm-submit-data', webpage) + if submit_info: + timestamp = parse_iso8601(self._search_regex( + r'datetime="([^"]+)"', submit_info, 'upload date', fatal=False, default=None)) + + # attempt to resolve the final URL in order to get a proper extension + redirect_req = HEADRequest(video_url) + req = self._request_webpage( + redirect_req, video_id, note='Resolving final URL', errnote='Could not resolve final URL', fatal=False) + if req: + video_url = req.geturl() + + formats = [{ + 'format_id': 'sd', + 'url': video_url, + }] + + return { + 'id': video_id, + 'title': title, + 'formats': formats, + 'description': description, + 'timestamp': timestamp, + 'thumbnail': thumbnail, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/ynet.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/ynet.py new file mode 100644 index 0000000000..894678a23d --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/ynet.py @@ -0,0 +1,50 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re +import json + +from .common import InfoExtractor +from ..compat import compat_urllib_parse + + +class YnetIE(InfoExtractor): + _VALID_URL = r'http://(?:.+?\.)?ynet\.co\.il/(?:.+?/)?0,7340,(?P<id>L(?:-[0-9]+)+),00\.html' + _TESTS = [ + { + 'url': 'http://hot.ynet.co.il/home/0,7340,L-11659-99244,00.html', + 'info_dict': { + 'id': 'L-11659-99244', + 'ext': 'flv', + 'title': 'в░в≥в╘ в°в░ в≥в∙в⌠в╒ в·в░в≥в╓в■ в▒в░в═в∙', + 'thumbnail': 're:^https?://.*\.jpg', + } + }, { + 'url': 'http://hot.ynet.co.il/home/0,7340,L-8859-84418,00.html', + 'info_dict': { + 'id': 'L-8859-84418', + 'ext': 'flv', + 'title': "в╕в╓в∙: в■в═в╘в≥в╖в■ в■в°в∙в■в≤в╙ в╘в° в╙в∙в╗в▓в≥' в∙в≥в∙в°в≥в■ в╓в°в∙в≤в╖в≥в÷", + 'thumbnail': 're:^https?://.*\.jpg', + } + } + ] + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + + content = compat_urllib_parse.unquote_plus(self._og_search_video_url(webpage)) + config = json.loads(self._search_regex(r'config=({.+?})$', content, 'video config')) + f4m_url = config['clip']['url'] + title = self._og_search_title(webpage) + m = re.search(r'ynet - HOT -- (["\']+)(?P<title>.+?)\1', title) + if m: + title = m.group('title') + + return { + 'id': video_id, + 'title': title, + 'formats': self._extract_f4m_formats(f4m_url, video_id), + 'thumbnail': self._og_search_thumbnail(webpage), + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/youjizz.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/youjizz.py new file mode 100644 index 0000000000..c642075dcf --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/youjizz.py @@ -0,0 +1,61 @@ +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import ( + ExtractorError, +) + + +class YouJizzIE(InfoExtractor): + _VALID_URL = r'https?://(?:\w+\.)?youjizz\.com/videos/[^/#?]+-(?P<id>[0-9]+)\.html(?:$|[?#])' + _TEST = { + 'url': 'http://www.youjizz.com/videos/zeichentrick-1-2189178.html', + 'md5': '07e15fa469ba384c7693fd246905547c', + 'info_dict': { + 'id': '2189178', + 'ext': 'flv', + "title": "Zeichentrick 1", + "age_limit": 18, + } + } + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + age_limit = self._rta_search(webpage) + video_title = self._html_search_regex( + r'<title>\s*(.*)\s*', webpage, 'title') + + embed_page_url = self._search_regex( + r'(https?://www.youjizz.com/videos/embed/[0-9]+)', + webpage, 'embed page') + webpage = self._download_webpage( + embed_page_url, video_id, note='downloading embed page') + + # Get the video URL + m_playlist = re.search(r'so.addVariable\("playlist", ?"(?P.+?)"\);', webpage) + if m_playlist is not None: + playlist_url = m_playlist.group('playlist') + playlist_page = self._download_webpage(playlist_url, video_id, + 'Downloading playlist page') + m_levels = list(re.finditer(r'[^"]+)"\)\);', + webpage, 'video URL') + + return { + 'id': video_id, + 'url': video_url, + 'title': video_title, + 'ext': 'flv', + 'format': 'flv', + 'player_url': embed_page_url, + 'age_limit': age_limit, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/youku.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/youku.py new file mode 100644 index 0000000000..97b98bbe88 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/youku.py @@ -0,0 +1,123 @@ +# coding: utf-8 + +from __future__ import unicode_literals + +import math +import random +import re +import time + +from .common import InfoExtractor +from ..utils import ( + ExtractorError, +) + + +class YoukuIE(InfoExtractor): + _VALID_URL = r'''(?x) + (?: + http://(?:v|player)\.youku\.com/(?:v_show/id_|player\.php/sid/)| + youku:) + (?P[A-Za-z0-9]+)(?:\.html|/v\.swf|) + ''' + _TEST = { + 'url': 'http://v.youku.com/v_show/id_XNDgyMDQ2NTQw.html', + 'md5': 'ffe3f2e435663dc2d1eea34faeff5b5b', + 'params': { + 'test': False + }, + 'info_dict': { + 'id': 'XNDgyMDQ2NTQw_part00', + 'ext': 'flv', + 'title': 'youtube-dl test video "\'/\\ц╓Б├╜П²∙░' + } + } + + def _gen_sid(self): + nowTime = int(time.time() * 1000) + random1 = random.randint(1000, 1998) + random2 = random.randint(1000, 9999) + + return "%d%d%d" % (nowTime, random1, random2) + + def _get_file_ID_mix_string(self, seed): + mixed = [] + source = list("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ/\:._-1234567890") + seed = float(seed) + for i in range(len(source)): + seed = (seed * 211 + 30031) % 65536 + index = math.floor(seed / 65536 * len(source)) + mixed.append(source[int(index)]) + source.remove(source[int(index)]) + # return ''.join(mixed) + return mixed + + def _get_file_id(self, fileId, seed): + mixed = self._get_file_ID_mix_string(seed) + ids = fileId.split('*') + realId = [] + for ch in ids: + if ch: + realId.append(mixed[int(ch)]) + return ''.join(realId) + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + + info_url = 'http://v.youku.com/player/getPlayList/VideoIDS/' + video_id + + config = self._download_json(info_url, video_id) + + error_code = config['data'][0].get('error_code') + if error_code: + # -8 means blocked outside China. + error = config['data'][0].get('error') # Chinese and English, separated by newline. + raise ExtractorError(error or 'Server reported error %i' % error_code, + expected=True) + + video_title = config['data'][0]['title'] + seed = config['data'][0]['seed'] + + format = self._downloader.params.get('format', None) + supported_format = list(config['data'][0]['streamfileids'].keys()) + + # TODO proper format selection + if format is None or format == 'best': + if 'hd2' in supported_format: + format = 'hd2' + else: + format = 'flv' + ext = 'flv' + elif format == 'worst': + format = 'mp4' + ext = 'mp4' + else: + format = 'flv' + ext = 'flv' + + fileid = config['data'][0]['streamfileids'][format] + keys = [s['k'] for s in config['data'][0]['segs'][format]] + # segs is usually a dictionary, but an empty *list* if an error occured. + + files_info = [] + sid = self._gen_sid() + fileid = self._get_file_id(fileid, seed) + + # column 8,9 of fileid represent the segment number + # fileid[7:9] should be changed + for index, key in enumerate(keys): + temp_fileid = '%s%02X%s' % (fileid[0:8], index, fileid[10:]) + download_url = 'http://k.youku.com/player/getFlvPath/sid/%s_%02X/st/flv/fileid/%s?k=%s' % (sid, index, temp_fileid, key) + + info = { + 'id': '%s_part%02d' % (video_id, index), + 'url': download_url, + 'uploader': None, + 'upload_date': None, + 'title': video_title, + 'ext': ext, + } + files_info.append(info) + + return files_info diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/youporn.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/youporn.py new file mode 100644 index 0000000000..107c9ac36e --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/youporn.py @@ -0,0 +1,120 @@ +from __future__ import unicode_literals + + +import json +import re +import sys + +from .common import InfoExtractor +from ..compat import ( + compat_urllib_parse_urlparse, + compat_urllib_request, +) +from ..utils import ( + ExtractorError, + unescapeHTML, + unified_strdate, +) +from ..aes import ( + aes_decrypt_text +) + + +class YouPornIE(InfoExtractor): + _VALID_URL = r'^(?Phttps?://)(?:www\.)?(?Pyouporn\.com/watch/(?P[0-9]+)/(?P[^/]+))' + _TEST = { + 'url': 'http://www.youporn.com/watch/505835/sex-ed-is-it-safe-to-masturbate-daily/', + 'info_dict': { + 'id': '505835', + 'ext': 'mp4', + 'upload_date': '20101221', + 'description': 'Love & Sex Answers: http://bit.ly/DanAndJenn -- Is It Unhealthy To Masturbate Daily?', + 'uploader': 'Ask Dan And Jennifer', + 'title': 'Sex Ed: Is It Safe To Masturbate Daily?', + 'age_limit': 18, + } + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('videoid') + url = mobj.group('proto') + 'www.' + mobj.group('url') + + req = compat_urllib_request.Request(url) + req.add_header('Cookie', 'age_verified=1') + webpage = self._download_webpage(req, video_id) + age_limit = self._rta_search(webpage) + + # Get JSON parameters + json_params = self._search_regex( + r'var currentVideo = new Video\((.*)\)[,;]', + webpage, 'JSON parameters') + try: + params = json.loads(json_params) + except: + raise ExtractorError('Invalid JSON') + + self.report_extraction(video_id) + try: + video_title = params['title'] + upload_date = unified_strdate(params['release_date_f']) + video_description = params['description'] + video_uploader = params['submitted_by'] + thumbnail = params['thumbnails'][0]['image'] + except KeyError: + raise ExtractorError('Missing JSON parameter: ' + sys.exc_info()[1]) + + # Get all of the links from the page + DOWNLOAD_LIST_RE = r'(?s)<ul class="downloadList">(?P<download_list>.*?)</ul>' + download_list_html = self._search_regex(DOWNLOAD_LIST_RE, + webpage, 'download list').strip() + LINK_RE = r'<a href="([^"]+)">' + links = re.findall(LINK_RE, download_list_html) + + # Get all encrypted links + encrypted_links = re.findall(r'var encryptedQuality[0-9]{3}URL = \'([a-zA-Z0-9+/]+={0,2})\';', webpage) + for encrypted_link in encrypted_links: + link = aes_decrypt_text(encrypted_link, video_title, 32).decode('utf-8') + links.append(link) + + formats = [] + for link in links: + # A link looks like this: + # http://cdn1.download.youporn.phncdn.com/201210/31/8004515/480p_370k_8004515/YouPorn%20-%20Nubile%20Films%20The%20Pillow%20Fight.mp4?nvb=20121113051249&nva=20121114051249&ir=1200&sr=1200&hash=014b882080310e95fb6a0 + # A path looks like this: + # /201210/31/8004515/480p_370k_8004515/YouPorn%20-%20Nubile%20Films%20The%20Pillow%20Fight.mp4 + video_url = unescapeHTML(link) + path = compat_urllib_parse_urlparse(video_url).path + format_parts = path.split('/')[4].split('_')[:2] + + dn = compat_urllib_parse_urlparse(video_url).netloc.partition('.')[0] + + resolution = format_parts[0] + height = int(resolution[:-len('p')]) + bitrate = int(format_parts[1][:-len('k')]) + format = '-'.join(format_parts) + '-' + dn + + formats.append({ + 'url': video_url, + 'format': format, + 'format_id': format, + 'height': height, + 'tbr': bitrate, + 'resolution': resolution, + }) + + self._sort_formats(formats) + + if not formats: + raise ExtractorError('ERROR: no known formats available for video') + + return { + 'id': video_id, + 'uploader': video_uploader, + 'upload_date': upload_date, + 'title': video_title, + 'thumbnail': thumbnail, + 'description': video_description, + 'age_limit': age_limit, + 'formats': formats, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/yourupload.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/yourupload.py new file mode 100644 index 0000000000..40fc4165f4 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/yourupload.py @@ -0,0 +1,58 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor + + +class YourUploadIE(InfoExtractor): + _VALID_URL = r'''(?x)https?://(?:www\.)? + (?:yourupload\.com/watch| + embed\.yourupload\.com| + embed\.yucache\.net + )/(?P<id>[A-Za-z0-9]+) + ''' + _TESTS = [ + { + 'url': 'http://yourupload.com/watch/14i14h', + 'md5': 'bf5c2f95c4c917536e80936af7bc51e1', + 'info_dict': { + 'id': '14i14h', + 'ext': 'mp4', + 'title': 'BigBuckBunny_320x180.mp4', + 'thumbnail': 're:^https?://.*\.jpe?g', + } + }, + { + 'url': 'http://embed.yourupload.com/14i14h', + 'only_matching': True, + }, + { + 'url': 'http://embed.yucache.net/14i14h?client_file_id=803349', + 'only_matching': True, + }, + ] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + + url = 'http://embed.yucache.net/{0:}'.format(video_id) + webpage = self._download_webpage(url, video_id) + + title = self._og_search_title(webpage) + thumbnail = self._og_search_thumbnail(webpage) + url = self._og_search_video_url(webpage) + + formats = [{ + 'format_id': 'sd', + 'url': url, + }] + + return { + 'id': video_id, + 'title': title, + 'formats': formats, + 'thumbnail': thumbnail, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/youtube.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/youtube.py new file mode 100644 index 0000000000..550e18733e --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/youtube.py @@ -0,0 +1,1689 @@ +# coding: utf-8 + +from __future__ import unicode_literals + + +import itertools +import json +import os.path +import re +import time +import traceback + +from .common import InfoExtractor, SearchInfoExtractor +from .subtitles import SubtitlesInfoExtractor +from ..jsinterp import JSInterpreter +from ..swfinterp import SWFInterpreter +from ..compat import ( + compat_chr, + compat_parse_qs, + compat_urllib_parse, + compat_urllib_request, + compat_urlparse, + compat_str, +) +from ..utils import ( + clean_html, + ExtractorError, + get_element_by_attribute, + get_element_by_id, + int_or_none, + OnDemandPagedList, + orderedSet, + unescapeHTML, + unified_strdate, + uppercase_escape, +) + + +class YoutubeBaseInfoExtractor(InfoExtractor): + """Provide base functions for Youtube extractors""" + _LOGIN_URL = 'https://accounts.google.com/ServiceLogin' + _TWOFACTOR_URL = 'https://accounts.google.com/SecondFactor' + _NETRC_MACHINE = 'youtube' + # If True it will raise an error if no login info is provided + _LOGIN_REQUIRED = False + + def _set_language(self): + self._set_cookie( + '.youtube.com', 'PREF', 'f1=50000000&hl=en', + # YouTube sets the expire time to about two months + expire_time=time.time() + 2 * 30 * 24 * 3600) + + def _login(self): + """ + Attempt to log in to YouTube. + True is returned if successful or skipped. + False is returned if login failed. + + If _LOGIN_REQUIRED is set and no authentication was provided, an error is raised. + """ + (username, password) = self._get_login_info() + # No authentication to be performed + if username is None: + if self._LOGIN_REQUIRED: + raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True) + return True + + login_page = self._download_webpage( + self._LOGIN_URL, None, + note='Downloading login page', + errnote='unable to fetch login page', fatal=False) + if login_page is False: + return + + galx = self._search_regex(r'(?s)<input.+?name="GALX".+?value="(.+?)"', + login_page, 'Login GALX parameter') + + # Log in + login_form_strs = { + 'continue': 'https://www.youtube.com/signin?action_handle_signin=true&feature=sign_in_button&hl=en_US&nomobiletemp=1', + 'Email': username, + 'GALX': galx, + 'Passwd': password, + + 'PersistentCookie': 'yes', + '_utf8': 'И°╠', + 'bgresponse': 'js_disabled', + 'checkConnection': '', + 'checkedDomains': 'youtube', + 'dnConn': '', + 'pstMsg': '0', + 'rmShown': '1', + 'secTok': '', + 'signIn': 'Sign in', + 'timeStmp': '', + 'service': 'youtube', + 'uilel': '3', + 'hl': 'en_US', + } + + # Convert to UTF-8 *before* urlencode because Python 2.x's urlencode + # chokes on unicode + login_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k, v in login_form_strs.items()) + login_data = compat_urllib_parse.urlencode(login_form).encode('ascii') + + req = compat_urllib_request.Request(self._LOGIN_URL, login_data) + login_results = self._download_webpage( + req, None, + note='Logging in', errnote='unable to log in', fatal=False) + if login_results is False: + return False + + if re.search(r'id="errormsg_0_Passwd"', login_results) is not None: + raise ExtractorError('Please use your account password and a two-factor code instead of an application-specific password.', expected=True) + + # Two-Factor + # TODO add SMS and phone call support - these require making a request and then prompting the user + + if re.search(r'(?i)<form[^>]* id="gaia_secondfactorform"', login_results) is not None: + tfa_code = self._get_tfa_info() + + if tfa_code is None: + self._downloader.report_warning('Two-factor authentication required. Provide it with --twofactor <code>') + self._downloader.report_warning('(Note that only TOTP (Google Authenticator App) codes work at this time.)') + return False + + # Unlike the first login form, secTok and timeStmp are both required for the TFA form + + match = re.search(r'id="secTok"\n\s+value=\'(.+)\'/>', login_results, re.M | re.U) + if match is None: + self._downloader.report_warning('Failed to get secTok - did the page structure change?') + secTok = match.group(1) + match = re.search(r'id="timeStmp"\n\s+value=\'(.+)\'/>', login_results, re.M | re.U) + if match is None: + self._downloader.report_warning('Failed to get timeStmp - did the page structure change?') + timeStmp = match.group(1) + + tfa_form_strs = { + 'continue': 'https://www.youtube.com/signin?action_handle_signin=true&feature=sign_in_button&hl=en_US&nomobiletemp=1', + 'smsToken': '', + 'smsUserPin': tfa_code, + 'smsVerifyPin': 'Verify', + + 'PersistentCookie': 'yes', + 'checkConnection': '', + 'checkedDomains': 'youtube', + 'pstMsg': '1', + 'secTok': secTok, + 'timeStmp': timeStmp, + 'service': 'youtube', + 'hl': 'en_US', + } + tfa_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k, v in tfa_form_strs.items()) + tfa_data = compat_urllib_parse.urlencode(tfa_form).encode('ascii') + + tfa_req = compat_urllib_request.Request(self._TWOFACTOR_URL, tfa_data) + tfa_results = self._download_webpage( + tfa_req, None, + note='Submitting TFA code', errnote='unable to submit tfa', fatal=False) + + if tfa_results is False: + return False + + if re.search(r'(?i)<form[^>]* id="gaia_secondfactorform"', tfa_results) is not None: + self._downloader.report_warning('Two-factor code expired. Please try again, or use a one-use backup code instead.') + return False + if re.search(r'(?i)<form[^>]* id="gaia_loginform"', tfa_results) is not None: + self._downloader.report_warning('unable to log in - did the page structure change?') + return False + if re.search(r'smsauth-interstitial-reviewsettings', tfa_results) is not None: + self._downloader.report_warning('Your Google account has a security notice. Please log in on your web browser, resolve the notice, and try again.') + return False + + if re.search(r'(?i)<form[^>]* id="gaia_loginform"', login_results) is not None: + self._downloader.report_warning('unable to log in: bad username or password') + return False + return True + + def _real_initialize(self): + if self._downloader is None: + return + self._set_language() + if not self._login(): + return + + +class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor): + IE_DESC = 'YouTube.com' + _VALID_URL = r"""(?x)^ + ( + (?:https?://|//) # http(s):// or protocol-independent URL + (?:(?:(?:(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/| + (?:www\.)?deturl\.com/www\.youtube\.com/| + (?:www\.)?pwnyoutube\.com/| + (?:www\.)?yourepeat\.com/| + tube\.majestyc\.net/| + youtube\.googleapis\.com/) # the various hostnames, with wildcard subdomains + (?:.*?\#/)? # handle anchor (#/) redirect urls + (?: # the various things that can precede the ID: + (?:(?:v|embed|e)/(?!videoseries)) # v/ or embed/ or e/ + |(?: # or the v= param in all its forms + (?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx) + (?:\?|\#!?) # the params delimiter ? or # or #! + (?:.*?&)? # any other preceding param (like /?s=tuff&v=xxxx) + v= + ) + )) + |youtu\.be/ # just youtu.be/xxxx + |(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId= + ) + )? # all until now is optional -> you can pass the naked ID + ([0-9A-Za-z_-]{11}) # here is it! the YouTube video ID + (?!.*?&list=) # combined list/video URLs are handled by the playlist IE + (?(1).+)? # if we found the ID, everything can follow + $""" + _NEXT_URL_RE = r'[\?&]next_url=([^&]+)' + _formats = { + '5': {'ext': 'flv', 'width': 400, 'height': 240}, + '6': {'ext': 'flv', 'width': 450, 'height': 270}, + '13': {'ext': '3gp'}, + '17': {'ext': '3gp', 'width': 176, 'height': 144}, + '18': {'ext': 'mp4', 'width': 640, 'height': 360}, + '22': {'ext': 'mp4', 'width': 1280, 'height': 720}, + '34': {'ext': 'flv', 'width': 640, 'height': 360}, + '35': {'ext': 'flv', 'width': 854, 'height': 480}, + '36': {'ext': '3gp', 'width': 320, 'height': 240}, + '37': {'ext': 'mp4', 'width': 1920, 'height': 1080}, + '38': {'ext': 'mp4', 'width': 4096, 'height': 3072}, + '43': {'ext': 'webm', 'width': 640, 'height': 360}, + '44': {'ext': 'webm', 'width': 854, 'height': 480}, + '45': {'ext': 'webm', 'width': 1280, 'height': 720}, + '46': {'ext': 'webm', 'width': 1920, 'height': 1080}, + + + # 3d videos + '82': {'ext': 'mp4', 'height': 360, 'format_note': '3D', 'preference': -20}, + '83': {'ext': 'mp4', 'height': 480, 'format_note': '3D', 'preference': -20}, + '84': {'ext': 'mp4', 'height': 720, 'format_note': '3D', 'preference': -20}, + '85': {'ext': 'mp4', 'height': 1080, 'format_note': '3D', 'preference': -20}, + '100': {'ext': 'webm', 'height': 360, 'format_note': '3D', 'preference': -20}, + '101': {'ext': 'webm', 'height': 480, 'format_note': '3D', 'preference': -20}, + '102': {'ext': 'webm', 'height': 720, 'format_note': '3D', 'preference': -20}, + + # Apple HTTP Live Streaming + '92': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'preference': -10}, + '93': {'ext': 'mp4', 'height': 360, 'format_note': 'HLS', 'preference': -10}, + '94': {'ext': 'mp4', 'height': 480, 'format_note': 'HLS', 'preference': -10}, + '95': {'ext': 'mp4', 'height': 720, 'format_note': 'HLS', 'preference': -10}, + '96': {'ext': 'mp4', 'height': 1080, 'format_note': 'HLS', 'preference': -10}, + '132': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'preference': -10}, + '151': {'ext': 'mp4', 'height': 72, 'format_note': 'HLS', 'preference': -10}, + + # DASH mp4 video + '133': {'ext': 'mp4', 'height': 240, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40}, + '134': {'ext': 'mp4', 'height': 360, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40}, + '135': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40}, + '136': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40}, + '137': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40}, + '138': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40}, + '160': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40}, + '264': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40}, + '298': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'h264'}, + '299': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'h264'}, + '266': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'vcodec': 'h264'}, + + # Dash mp4 audio + '139': {'ext': 'm4a', 'format_note': 'DASH audio', 'vcodec': 'none', 'abr': 48, 'preference': -50}, + '140': {'ext': 'm4a', 'format_note': 'DASH audio', 'vcodec': 'none', 'abr': 128, 'preference': -50}, + '141': {'ext': 'm4a', 'format_note': 'DASH audio', 'vcodec': 'none', 'abr': 256, 'preference': -50}, + + # Dash webm + '167': {'ext': 'webm', 'height': 360, 'width': 640, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40}, + '168': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40}, + '169': {'ext': 'webm', 'height': 720, 'width': 1280, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40}, + '170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40}, + '218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40}, + '219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40}, + '278': {'ext': 'webm', 'height': 144, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'container': 'webm', 'vcodec': 'VP9'}, + '242': {'ext': 'webm', 'height': 240, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40}, + '243': {'ext': 'webm', 'height': 360, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40}, + '244': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40}, + '245': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40}, + '246': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40}, + '247': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40}, + '248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40}, + '271': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40}, + '272': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40}, + '302': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'VP9'}, + '303': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'VP9'}, + '313': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'vcodec': 'VP9'}, + + # Dash webm audio + '171': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'abr': 128, 'preference': -50}, + '172': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'abr': 256, 'preference': -50}, + + # Dash webm audio with opus inside + '249': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 50, 'preference': -50}, + '250': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 70, 'preference': -50}, + '251': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 160, 'preference': -50}, + + # RTMP (unnamed) + '_rtmp': {'protocol': 'rtmp'}, + } + + IE_NAME = 'youtube' + _TESTS = [ + { + 'url': 'http://www.youtube.com/watch?v=BaW_jenozKc', + 'info_dict': { + 'id': 'BaW_jenozKc', + 'ext': 'mp4', + 'title': 'youtube-dl test video "\'/\\ц╓Б├╜П²∙░', + 'uploader': 'Philipp Hagemeister', + 'uploader_id': 'phihag', + 'upload_date': '20121002', + 'description': 'test chars: "\'/\\ц╓Б├╜П²∙░\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .', + 'categories': ['Science & Technology'], + 'like_count': int, + 'dislike_count': int, + } + }, + { + 'url': 'http://www.youtube.com/watch?v=UxxajLWwzqY', + 'note': 'Test generic use_cipher_signature video (#897)', + 'info_dict': { + 'id': 'UxxajLWwzqY', + 'ext': 'mp4', + 'upload_date': '20120506', + 'title': 'Icona Pop - I Love It (feat. Charli XCX) [OFFICIAL VIDEO]', + 'description': 'md5:fea86fda2d5a5784273df5c7cc994d9f', + 'uploader': 'Icona Pop', + 'uploader_id': 'IconaPop', + } + }, + { + 'url': 'https://www.youtube.com/watch?v=07FYdnEawAQ', + 'note': 'Test VEVO video with age protection (#956)', + 'info_dict': { + 'id': '07FYdnEawAQ', + 'ext': 'mp4', + 'upload_date': '20130703', + 'title': 'Justin Timberlake - Tunnel Vision (Explicit)', + 'description': 'md5:64249768eec3bc4276236606ea996373', + 'uploader': 'justintimberlakeVEVO', + 'uploader_id': 'justintimberlakeVEVO', + } + }, + { + 'url': '//www.YouTube.com/watch?v=yZIXLfi8CZQ', + 'note': 'Embed-only video (#1746)', + 'info_dict': { + 'id': 'yZIXLfi8CZQ', + 'ext': 'mp4', + 'upload_date': '20120608', + 'title': 'Principal Sexually Assaults A Teacher - Episode 117 - 8th June 2012', + 'description': 'md5:09b78bd971f1e3e289601dfba15ca4f7', + 'uploader': 'SET India', + 'uploader_id': 'setindia' + } + }, + { + 'url': 'http://www.youtube.com/watch?v=a9LDPn-MO4I', + 'note': '256k DASH audio (format 141) via DASH manifest', + 'info_dict': { + 'id': 'a9LDPn-MO4I', + 'ext': 'm4a', + 'upload_date': '20121002', + 'uploader_id': '8KVIDEO', + 'description': '', + 'uploader': '8KVIDEO', + 'title': 'UHDTV TEST 8K VIDEO.mp4' + }, + 'params': { + 'youtube_include_dash_manifest': True, + 'format': '141', + }, + }, + # DASH manifest with encrypted signature + { + 'url': 'https://www.youtube.com/watch?v=IB3lcPjvWLA', + 'info_dict': { + 'id': 'IB3lcPjvWLA', + 'ext': 'm4a', + 'title': 'Afrojack, Spree Wilson - The Spark ft. Spree Wilson', + 'description': 'md5:12e7067fa6735a77bdcbb58cb1187d2d', + 'uploader': 'AfrojackVEVO', + 'uploader_id': 'AfrojackVEVO', + 'upload_date': '20131011', + }, + 'params': { + 'youtube_include_dash_manifest': True, + 'format': '141', + }, + }, + # Controversy video + { + 'url': 'https://www.youtube.com/watch?v=T4XJQO3qol8', + 'info_dict': { + 'id': 'T4XJQO3qol8', + 'ext': 'mp4', + 'upload_date': '20100909', + 'uploader': 'The Amazing Atheist', + 'uploader_id': 'TheAmazingAtheist', + 'title': 'Burning Everyone\'s Koran', + 'description': 'SUBSCRIBE: http://www.youtube.com/saturninefilms\n\nEven Obama has taken a stand against freedom on this issue: http://www.huffingtonpost.com/2010/09/09/obama-gma-interview-quran_n_710282.html', + } + }, + # Normal age-gate video (No vevo, embed allowed) + { + 'url': 'http://youtube.com/watch?v=HtVdAasjOgU', + 'info_dict': { + 'id': 'HtVdAasjOgU', + 'ext': 'mp4', + 'title': 'The Witcher 3: Wild Hunt - The Sword Of Destiny Trailer', + 'description': 'md5:eca57043abae25130f58f655ad9a7771', + 'uploader': 'The Witcher', + 'uploader_id': 'WitcherGame', + 'upload_date': '20140605', + }, + }, + # video_info is None (https://github.com/rg3/youtube-dl/issues/4421) + { + 'url': '__2ABJjxzNo', + 'info_dict': { + 'id': '__2ABJjxzNo', + 'ext': 'mp4', + 'upload_date': '20100430', + 'uploader_id': 'deadmau5', + 'description': 'md5:12c56784b8032162bb936a5f76d55360', + 'uploader': 'deadmau5', + 'title': 'Deadmau5 - Some Chords (HD)', + }, + 'expected_warnings': [ + 'DASH manifest missing', + ] + }, + # Olympics (https://github.com/rg3/youtube-dl/issues/4431) + { + 'url': 'lqQg6PlCWgI', + 'info_dict': { + 'id': 'lqQg6PlCWgI', + 'ext': 'mp4', + 'upload_date': '20120731', + 'uploader_id': 'olympic', + 'description': 'HO09 - Women - GER-AUS - Hockey - 31 July 2012 - London 2012 Olympic Games', + 'uploader': 'Olympics', + 'title': 'Hockey - Women - GER-AUS - London 2012 Olympic Games', + }, + 'params': { + 'skip_download': 'requires avconv', + } + }, + ] + + def __init__(self, *args, **kwargs): + super(YoutubeIE, self).__init__(*args, **kwargs) + self._player_cache = {} + + def report_video_info_webpage_download(self, video_id): + """Report attempt to download video info webpage.""" + self.to_screen('%s: Downloading video info webpage' % video_id) + + def report_information_extraction(self, video_id): + """Report attempt to extract video information.""" + self.to_screen('%s: Extracting video information' % video_id) + + def report_unavailable_format(self, video_id, format): + """Report extracted video URL.""" + self.to_screen('%s: Format %s not available' % (video_id, format)) + + def report_rtmp_download(self): + """Indicate the download will use the RTMP protocol.""" + self.to_screen('RTMP download detected') + + def _signature_cache_id(self, example_sig): + """ Return a string representation of a signature """ + return '.'.join(compat_str(len(part)) for part in example_sig.split('.')) + + def _extract_signature_function(self, video_id, player_url, example_sig): + id_m = re.match( + r'.*?-(?P<id>[a-zA-Z0-9_-]+)(?:/watch_as3|/html5player)?\.(?P<ext>[a-z]+)$', + player_url) + if not id_m: + raise ExtractorError('Cannot identify player %r' % player_url) + player_type = id_m.group('ext') + player_id = id_m.group('id') + + # Read from filesystem cache + func_id = '%s_%s_%s' % ( + player_type, player_id, self._signature_cache_id(example_sig)) + assert os.path.basename(func_id) == func_id + + cache_spec = self._downloader.cache.load('youtube-sigfuncs', func_id) + if cache_spec is not None: + return lambda s: ''.join(s[i] for i in cache_spec) + + if player_type == 'js': + code = self._download_webpage( + player_url, video_id, + note='Downloading %s player %s' % (player_type, player_id), + errnote='Download of %s failed' % player_url) + res = self._parse_sig_js(code) + elif player_type == 'swf': + urlh = self._request_webpage( + player_url, video_id, + note='Downloading %s player %s' % (player_type, player_id), + errnote='Download of %s failed' % player_url) + code = urlh.read() + res = self._parse_sig_swf(code) + else: + assert False, 'Invalid player type %r' % player_type + + if cache_spec is None: + test_string = ''.join(map(compat_chr, range(len(example_sig)))) + cache_res = res(test_string) + cache_spec = [ord(c) for c in cache_res] + + self._downloader.cache.store('youtube-sigfuncs', func_id, cache_spec) + return res + + def _print_sig_code(self, func, example_sig): + def gen_sig_code(idxs): + def _genslice(start, end, step): + starts = '' if start == 0 else str(start) + ends = (':%d' % (end + step)) if end + step >= 0 else ':' + steps = '' if step == 1 else (':%d' % step) + return 's[%s%s%s]' % (starts, ends, steps) + + step = None + # Quelch pyflakes warnings - start will be set when step is set + start = '(Never used)' + for i, prev in zip(idxs[1:], idxs[:-1]): + if step is not None: + if i - prev == step: + continue + yield _genslice(start, prev, step) + step = None + continue + if i - prev in [-1, 1]: + step = i - prev + start = prev + continue + else: + yield 's[%d]' % prev + if step is None: + yield 's[%d]' % i + else: + yield _genslice(start, i, step) + + test_string = ''.join(map(compat_chr, range(len(example_sig)))) + cache_res = func(test_string) + cache_spec = [ord(c) for c in cache_res] + expr_code = ' + '.join(gen_sig_code(cache_spec)) + signature_id_tuple = '(%s)' % ( + ', '.join(compat_str(len(p)) for p in example_sig.split('.'))) + code = ('if tuple(len(p) for p in s.split(\'.\')) == %s:\n' + ' return %s\n') % (signature_id_tuple, expr_code) + self.to_screen('Extracted signature function:\n' + code) + + def _parse_sig_js(self, jscode): + funcname = self._search_regex( + r'\.sig\|\|([a-zA-Z0-9]+)\(', jscode, + 'Initial JS player signature function name') + + jsi = JSInterpreter(jscode) + initial_function = jsi.extract_function(funcname) + return lambda s: initial_function([s]) + + def _parse_sig_swf(self, file_contents): + swfi = SWFInterpreter(file_contents) + TARGET_CLASSNAME = 'SignatureDecipher' + searched_class = swfi.extract_class(TARGET_CLASSNAME) + initial_function = swfi.extract_function(searched_class, 'decipher') + return lambda s: initial_function([s]) + + def _decrypt_signature(self, s, video_id, player_url, age_gate=False): + """Turn the encrypted s field into a working signature""" + + if player_url is None: + raise ExtractorError('Cannot decrypt signature without player_url') + + if player_url.startswith('//'): + player_url = 'https:' + player_url + try: + player_id = (player_url, self._signature_cache_id(s)) + if player_id not in self._player_cache: + func = self._extract_signature_function( + video_id, player_url, s + ) + self._player_cache[player_id] = func + func = self._player_cache[player_id] + if self._downloader.params.get('youtube_print_sig_code'): + self._print_sig_code(func, s) + return func(s) + except Exception as e: + tb = traceback.format_exc() + raise ExtractorError( + 'Signature extraction failed: ' + tb, cause=e) + + def _get_available_subtitles(self, video_id, webpage): + try: + sub_list = self._download_webpage( + 'https://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id, + video_id, note=False) + except ExtractorError as err: + self._downloader.report_warning('unable to download video subtitles: %s' % compat_str(err)) + return {} + lang_list = re.findall(r'name="([^"]*)"[^>]+lang_code="([\w\-]+)"', sub_list) + + sub_lang_list = {} + for l in lang_list: + lang = l[1] + if lang in sub_lang_list: + continue + params = compat_urllib_parse.urlencode({ + 'lang': lang, + 'v': video_id, + 'fmt': self._downloader.params.get('subtitlesformat', 'srt'), + 'name': unescapeHTML(l[0]).encode('utf-8'), + }) + url = 'https://www.youtube.com/api/timedtext?' + params + sub_lang_list[lang] = url + if not sub_lang_list: + self._downloader.report_warning('video doesn\'t have subtitles') + return {} + return sub_lang_list + + def _get_available_automatic_caption(self, video_id, webpage): + """We need the webpage for getting the captions url, pass it as an + argument to speed up the process.""" + sub_format = self._downloader.params.get('subtitlesformat', 'srt') + self.to_screen('%s: Looking for automatic captions' % video_id) + mobj = re.search(r';ytplayer.config = ({.*?});', webpage) + err_msg = 'Couldn\'t find automatic captions for %s' % video_id + if mobj is None: + self._downloader.report_warning(err_msg) + return {} + player_config = json.loads(mobj.group(1)) + try: + args = player_config['args'] + caption_url = args['ttsurl'] + timestamp = args['timestamp'] + # We get the available subtitles + list_params = compat_urllib_parse.urlencode({ + 'type': 'list', + 'tlangs': 1, + 'asrs': 1, + }) + list_url = caption_url + '&' + list_params + caption_list = self._download_xml(list_url, video_id) + original_lang_node = caption_list.find('track') + if original_lang_node is None or original_lang_node.attrib.get('kind') != 'asr': + self._downloader.report_warning('Video doesn\'t have automatic captions') + return {} + original_lang = original_lang_node.attrib['lang_code'] + + sub_lang_list = {} + for lang_node in caption_list.findall('target'): + sub_lang = lang_node.attrib['lang_code'] + params = compat_urllib_parse.urlencode({ + 'lang': original_lang, + 'tlang': sub_lang, + 'fmt': sub_format, + 'ts': timestamp, + 'kind': 'asr', + }) + sub_lang_list[sub_lang] = caption_url + '&' + params + return sub_lang_list + # An extractor error can be raise by the download process if there are + # no automatic captions but there are subtitles + except (KeyError, ExtractorError): + self._downloader.report_warning(err_msg) + return {} + + @classmethod + def extract_id(cls, url): + mobj = re.match(cls._VALID_URL, url, re.VERBOSE) + if mobj is None: + raise ExtractorError('Invalid URL: %s' % url) + video_id = mobj.group(2) + return video_id + + def _extract_from_m3u8(self, manifest_url, video_id): + url_map = {} + + def _get_urls(_manifest): + lines = _manifest.split('\n') + urls = filter(lambda l: l and not l.startswith('#'), + lines) + return urls + manifest = self._download_webpage(manifest_url, video_id, 'Downloading formats manifest') + formats_urls = _get_urls(manifest) + for format_url in formats_urls: + itag = self._search_regex(r'itag/(\d+?)/', format_url, 'itag') + url_map[itag] = format_url + return url_map + + def _extract_annotations(self, video_id): + url = 'https://www.youtube.com/annotations_invideo?features=1&legacy=1&video_id=%s' % video_id + return self._download_webpage(url, video_id, note='Searching for annotations.', errnote='Unable to download video annotations.') + + def _parse_dash_manifest( + self, video_id, dash_manifest_url, player_url, age_gate): + def decrypt_sig(mobj): + s = mobj.group(1) + dec_s = self._decrypt_signature(s, video_id, player_url, age_gate) + return '/signature/%s' % dec_s + dash_manifest_url = re.sub(r'/s/([\w\.]+)', decrypt_sig, dash_manifest_url) + dash_doc = self._download_xml( + dash_manifest_url, video_id, + note='Downloading DASH manifest', + errnote='Could not download DASH manifest') + + formats = [] + for r in dash_doc.findall('.//{urn:mpeg:DASH:schema:MPD:2011}Representation'): + url_el = r.find('{urn:mpeg:DASH:schema:MPD:2011}BaseURL') + if url_el is None: + continue + format_id = r.attrib['id'] + video_url = url_el.text + filesize = int_or_none(url_el.attrib.get('{http://youtube.com/yt/2012/10/10}contentLength')) + f = { + 'format_id': format_id, + 'url': video_url, + 'width': int_or_none(r.attrib.get('width')), + 'tbr': int_or_none(r.attrib.get('bandwidth'), 1000), + 'asr': int_or_none(r.attrib.get('audioSamplingRate')), + 'filesize': filesize, + 'fps': int_or_none(r.attrib.get('frameRate')), + } + try: + existing_format = next( + fo for fo in formats + if fo['format_id'] == format_id) + except StopIteration: + f.update(self._formats.get(format_id, {})) + formats.append(f) + else: + existing_format.update(f) + return formats + + def _real_extract(self, url): + proto = ( + 'http' if self._downloader.params.get('prefer_insecure', False) + else 'https') + + # Extract original video URL from URL with redirection, like age verification, using next_url parameter + mobj = re.search(self._NEXT_URL_RE, url) + if mobj: + url = proto + '://www.youtube.com/' + compat_urllib_parse.unquote(mobj.group(1)).lstrip('/') + video_id = self.extract_id(url) + + # Get video webpage + url = proto + '://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1&bpctr=9999999999' % video_id + video_webpage = self._download_webpage(url, video_id) + + # Attempt to extract SWF player URL + mobj = re.search(r'swfConfig.*?"(https?:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage) + if mobj is not None: + player_url = re.sub(r'\\(.)', r'\1', mobj.group(1)) + else: + player_url = None + + # Get video info + if re.search(r'player-age-gate-content">', video_webpage) is not None: + age_gate = True + # We simulate the access to the video from www.youtube.com/v/{video_id} + # this can be viewed without login into Youtube + data = compat_urllib_parse.urlencode({ + 'video_id': video_id, + 'eurl': 'https://youtube.googleapis.com/v/' + video_id, + 'sts': self._search_regex( + r'"sts"\s*:\s*(\d+)', video_webpage, 'sts', default=''), + }) + video_info_url = proto + '://www.youtube.com/get_video_info?' + data + video_info_webpage = self._download_webpage( + video_info_url, video_id, + note='Refetching age-gated info webpage', + errnote='unable to download video info webpage') + video_info = compat_parse_qs(video_info_webpage) + else: + age_gate = False + try: + # Try looking directly into the video webpage + mobj = re.search(r';ytplayer\.config\s*=\s*({.*?});', video_webpage) + if not mobj: + raise ValueError('Could not find ytplayer.config') # caught below + json_code = uppercase_escape(mobj.group(1)) + ytplayer_config = json.loads(json_code) + args = ytplayer_config['args'] + # Convert to the same format returned by compat_parse_qs + video_info = dict((k, [v]) for k, v in args.items()) + if 'url_encoded_fmt_stream_map' not in args: + raise ValueError('No stream_map present') # caught below + except ValueError: + # We fallback to the get_video_info pages (used by the embed page) + self.report_video_info_webpage_download(video_id) + for el_type in ['&el=embedded', '&el=detailpage', '&el=vevo', '']: + video_info_url = ( + '%s://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en' + % (proto, video_id, el_type)) + video_info_webpage = self._download_webpage( + video_info_url, + video_id, note=False, + errnote='unable to download video info webpage') + video_info = compat_parse_qs(video_info_webpage) + if 'token' in video_info: + break + if 'token' not in video_info: + if 'reason' in video_info: + raise ExtractorError( + 'YouTube said: %s' % video_info['reason'][0], + expected=True, video_id=video_id) + else: + raise ExtractorError( + '"token" parameter not in video info for unknown reason', + video_id=video_id) + + if 'view_count' in video_info: + view_count = int(video_info['view_count'][0]) + else: + view_count = None + + # Check for "rental" videos + if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info: + raise ExtractorError('"rental" videos not supported') + + # Start extracting information + self.report_information_extraction(video_id) + + # uploader + if 'author' not in video_info: + raise ExtractorError('Unable to extract uploader name') + video_uploader = compat_urllib_parse.unquote_plus(video_info['author'][0]) + + # uploader_id + video_uploader_id = None + mobj = re.search(r'<link itemprop="url" href="http://www.youtube.com/(?:user|channel)/([^"]+)">', video_webpage) + if mobj is not None: + video_uploader_id = mobj.group(1) + else: + self._downloader.report_warning('unable to extract uploader nickname') + + # title + if 'title' in video_info: + video_title = video_info['title'][0] + else: + self._downloader.report_warning('Unable to extract video title') + video_title = '_' + + # thumbnail image + # We try first to get a high quality image: + m_thumb = re.search(r'<span itemprop="thumbnail".*?href="(.*?)">', + video_webpage, re.DOTALL) + if m_thumb is not None: + video_thumbnail = m_thumb.group(1) + elif 'thumbnail_url' not in video_info: + self._downloader.report_warning('unable to extract video thumbnail') + video_thumbnail = None + else: # don't panic if we can't find it + video_thumbnail = compat_urllib_parse.unquote_plus(video_info['thumbnail_url'][0]) + + # upload date + upload_date = None + mobj = re.search(r'(?s)id="eow-date.*?>(.*?)</span>', video_webpage) + if mobj is None: + mobj = re.search( + r'(?s)id="watch-uploader-info".*?>.*?(?:Published|Uploaded|Streamed live) on (.*?)</strong>', + video_webpage) + if mobj is not None: + upload_date = ' '.join(re.sub(r'[/,-]', r' ', mobj.group(1)).split()) + upload_date = unified_strdate(upload_date) + + m_cat_container = self._search_regex( + r'(?s)<h4[^>]*>\s*Category\s*</h4>\s*<ul[^>]*>(.*?)</ul>', + video_webpage, 'categories', default=None) + if m_cat_container: + category = self._html_search_regex( + r'(?s)<a[^<]+>(.*?)</a>', m_cat_container, 'category', + default=None) + video_categories = None if category is None else [category] + else: + video_categories = None + + # description + video_description = get_element_by_id("eow-description", video_webpage) + if video_description: + video_description = re.sub(r'''(?x) + <a\s+ + (?:[a-zA-Z-]+="[^"]+"\s+)*? + title="([^"]+)"\s+ + (?:[a-zA-Z-]+="[^"]+"\s+)*? + class="yt-uix-redirect-link"\s*> + [^<]+ + </a> + ''', r'\1', video_description) + video_description = clean_html(video_description) + else: + fd_mobj = re.search(r'<meta name="description" content="([^"]+)"', video_webpage) + if fd_mobj: + video_description = unescapeHTML(fd_mobj.group(1)) + else: + video_description = '' + + def _extract_count(count_name): + count = self._search_regex( + r'id="watch-%s"[^>]*>.*?([\d,]+)\s*</span>' % re.escape(count_name), + video_webpage, count_name, default=None) + if count is not None: + return int(count.replace(',', '')) + return None + like_count = _extract_count('like') + dislike_count = _extract_count('dislike') + + # subtitles + video_subtitles = self.extract_subtitles(video_id, video_webpage) + + if self._downloader.params.get('listsubtitles', False): + self._list_available_subtitles(video_id, video_webpage) + return + + if 'length_seconds' not in video_info: + self._downloader.report_warning('unable to extract video duration') + video_duration = None + else: + video_duration = int(compat_urllib_parse.unquote_plus(video_info['length_seconds'][0])) + + # annotations + video_annotations = None + if self._downloader.params.get('writeannotations', False): + video_annotations = self._extract_annotations(video_id) + + def _map_to_format_list(urlmap): + formats = [] + for itag, video_real_url in urlmap.items(): + dct = { + 'format_id': itag, + 'url': video_real_url, + 'player_url': player_url, + } + if itag in self._formats: + dct.update(self._formats[itag]) + formats.append(dct) + return formats + + if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'): + self.report_rtmp_download() + formats = [{ + 'format_id': '_rtmp', + 'protocol': 'rtmp', + 'url': video_info['conn'][0], + 'player_url': player_url, + }] + elif len(video_info.get('url_encoded_fmt_stream_map', [''])[0]) >= 1 or len(video_info.get('adaptive_fmts', [''])[0]) >= 1: + encoded_url_map = video_info.get('url_encoded_fmt_stream_map', [''])[0] + ',' + video_info.get('adaptive_fmts', [''])[0] + if 'rtmpe%3Dyes' in encoded_url_map: + raise ExtractorError('rtmpe downloads are not supported, see https://github.com/rg3/youtube-dl/issues/343 for more information.', expected=True) + url_map = {} + for url_data_str in encoded_url_map.split(','): + url_data = compat_parse_qs(url_data_str) + if 'itag' not in url_data or 'url' not in url_data: + continue + format_id = url_data['itag'][0] + url = url_data['url'][0] + + if 'sig' in url_data: + url += '&signature=' + url_data['sig'][0] + elif 's' in url_data: + encrypted_sig = url_data['s'][0] + + if not age_gate: + jsplayer_url_json = self._search_regex( + r'"assets":.+?"js":\s*("[^"]+")', + video_webpage, 'JS player URL') + player_url = json.loads(jsplayer_url_json) + if player_url is None: + player_url_json = self._search_regex( + r'ytplayer\.config.*?"url"\s*:\s*("[^"]+")', + video_webpage, 'age gate player URL') + player_url = json.loads(player_url_json) + + if self._downloader.params.get('verbose'): + if player_url is None: + player_version = 'unknown' + player_desc = 'unknown' + else: + if player_url.endswith('swf'): + player_version = self._search_regex( + r'-(.+?)(?:/watch_as3)?\.swf$', player_url, + 'flash player', fatal=False) + player_desc = 'flash player %s' % player_version + else: + player_version = self._search_regex( + r'html5player-([^/]+?)(?:/html5player)?\.js', + player_url, + 'html5 player', fatal=False) + player_desc = 'html5 player %s' % player_version + + parts_sizes = self._signature_cache_id(encrypted_sig) + self.to_screen('{%s} signature length %s, %s' % + (format_id, parts_sizes, player_desc)) + + signature = self._decrypt_signature( + encrypted_sig, video_id, player_url, age_gate) + url += '&signature=' + signature + if 'ratebypass' not in url: + url += '&ratebypass=yes' + url_map[format_id] = url + formats = _map_to_format_list(url_map) + elif video_info.get('hlsvp'): + manifest_url = video_info['hlsvp'][0] + url_map = self._extract_from_m3u8(manifest_url, video_id) + formats = _map_to_format_list(url_map) + else: + raise ExtractorError('no conn, hlsvp or url_encoded_fmt_stream_map information found in video info') + + # Look for the DASH manifest + if self._downloader.params.get('youtube_include_dash_manifest', True): + dash_mpd = video_info.get('dashmpd') + if dash_mpd: + dash_manifest_url = dash_mpd[0] + try: + dash_formats = self._parse_dash_manifest( + video_id, dash_manifest_url, player_url, age_gate) + except (ExtractorError, KeyError) as e: + self.report_warning( + 'Skipping DASH manifest: %r' % e, video_id) + else: + formats.extend(dash_formats) + + self._sort_formats(formats) + + return { + 'id': video_id, + 'uploader': video_uploader, + 'uploader_id': video_uploader_id, + 'upload_date': upload_date, + 'title': video_title, + 'thumbnail': video_thumbnail, + 'description': video_description, + 'categories': video_categories, + 'subtitles': video_subtitles, + 'duration': video_duration, + 'age_limit': 18 if age_gate else 0, + 'annotations': video_annotations, + 'webpage_url': proto + '://www.youtube.com/watch?v=%s' % video_id, + 'view_count': view_count, + 'like_count': like_count, + 'dislike_count': dislike_count, + 'formats': formats, + } + + +class YoutubePlaylistIE(YoutubeBaseInfoExtractor): + IE_DESC = 'YouTube.com playlists' + _VALID_URL = r"""(?x)(?: + (?:https?://)? + (?:\w+\.)? + youtube\.com/ + (?: + (?:course|view_play_list|my_playlists|artist|playlist|watch|embed/videoseries) + \? (?:.*?&)*? (?:p|a|list)= + | p/ + ) + ( + (?:PL|LL|EC|UU|FL|RD)?[0-9A-Za-z-_]{10,} + # Top tracks, they can also include dots + |(?:MC)[\w\.]* + ) + .* + | + ((?:PL|LL|EC|UU|FL|RD)[0-9A-Za-z-_]{10,}) + )""" + _TEMPLATE_URL = 'https://www.youtube.com/playlist?list=%s' + _VIDEO_RE = r'href="\s*/watch\?v=(?P<id>[0-9A-Za-z_-]{11})&[^"]*?index=(?P<index>\d+)' + IE_NAME = 'youtube:playlist' + _TESTS = [{ + 'url': 'https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re', + 'info_dict': { + 'title': 'ytdl test PL', + 'id': 'PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re', + }, + 'playlist_count': 3, + }, { + 'url': 'https://www.youtube.com/playlist?list=PLtPgu7CB4gbZDA7i_euNxn75ISqxwZPYx', + 'info_dict': { + 'title': 'YDL_Empty_List', + }, + 'playlist_count': 0, + }, { + 'note': 'Playlist with deleted videos (#651). As a bonus, the video #51 is also twice in this list.', + 'url': 'https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC', + 'info_dict': { + 'title': '29C3: Not my department', + }, + 'playlist_count': 95, + }, { + 'note': 'issue #673', + 'url': 'PLBB231211A4F62143', + 'info_dict': { + 'title': '[OLD]Team Fortress 2 (Class-based LP)', + }, + 'playlist_mincount': 26, + }, { + 'note': 'Large playlist', + 'url': 'https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q', + 'info_dict': { + 'title': 'Uploads from Cauchemar', + }, + 'playlist_mincount': 799, + }, { + 'url': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl', + 'info_dict': { + 'title': 'YDL_safe_search', + }, + 'playlist_count': 2, + }, { + 'note': 'embedded', + 'url': 'http://www.youtube.com/embed/videoseries?list=PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu', + 'playlist_count': 4, + 'info_dict': { + 'title': 'JODA15', + } + }, { + 'note': 'Embedded SWF player', + 'url': 'http://www.youtube.com/p/YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ?hl=en_US&fs=1&rel=0', + 'playlist_count': 4, + 'info_dict': { + 'title': 'JODA7', + } + }, { + 'note': 'Buggy playlist: the webpage has a "Load more" button but it doesn\'t have more videos', + 'url': 'https://www.youtube.com/playlist?list=UUXw-G3eDE9trcvY2sBMM_aA', + 'info_dict': { + 'title': 'Uploads from Interstellar Movie', + }, + 'playlist_mincout': 21, + }] + + def _real_initialize(self): + self._login() + + def _ids_to_results(self, ids): + return [ + self.url_result(vid_id, 'Youtube', video_id=vid_id) + for vid_id in ids] + + def _extract_mix(self, playlist_id): + # The mixes are generated from a a single video + # the id of the playlist is just 'RD' + video_id + url = 'https://youtube.com/watch?v=%s&list=%s' % (playlist_id[-11:], playlist_id) + webpage = self._download_webpage( + url, playlist_id, 'Downloading Youtube mix') + search_title = lambda class_name: get_element_by_attribute('class', class_name, webpage) + title_span = ( + search_title('playlist-title') or + search_title('title long-title') or + search_title('title')) + title = clean_html(title_span) + ids = orderedSet(re.findall( + r'''(?xs)data-video-username=".*?".*? + href="/watch\?v=([0-9A-Za-z_-]{11})&[^"]*?list=%s''' % re.escape(playlist_id), + webpage)) + url_results = self._ids_to_results(ids) + + return self.playlist_result(url_results, playlist_id, title) + + def _real_extract(self, url): + # Extract playlist id + mobj = re.match(self._VALID_URL, url) + if mobj is None: + raise ExtractorError('Invalid URL: %s' % url) + playlist_id = mobj.group(1) or mobj.group(2) + + # Check if it's a video-specific URL + query_dict = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query) + if 'v' in query_dict: + video_id = query_dict['v'][0] + if self._downloader.params.get('noplaylist'): + self.to_screen('Downloading just video %s because of --no-playlist' % video_id) + return self.url_result(video_id, 'Youtube', video_id=video_id) + else: + self.to_screen('Downloading playlist %s - add --no-playlist to just download video %s' % (playlist_id, video_id)) + + if playlist_id.startswith('RD'): + # Mixes require a custom extraction process + return self._extract_mix(playlist_id) + if playlist_id.startswith('TL'): + raise ExtractorError('For downloading YouTube.com top lists, use ' + 'the "yttoplist" keyword, for example "youtube-dl \'yttoplist:music:Top Tracks\'"', expected=True) + + url = self._TEMPLATE_URL % playlist_id + page = self._download_webpage(url, playlist_id) + more_widget_html = content_html = page + + # Check if the playlist exists or is private + if re.search(r'<div class="yt-alert-message">[^<]*?(The|This) playlist (does not exist|is private)[^<]*?</div>', page) is not None: + raise ExtractorError( + 'The playlist doesn\'t exist or is private, use --username or ' + '--netrc to access it.', + expected=True) + + # Extract the video ids from the playlist pages + ids = [] + + for page_num in itertools.count(1): + matches = re.finditer(self._VIDEO_RE, content_html) + # We remove the duplicates and the link with index 0 + # (it's not the first video of the playlist) + new_ids = orderedSet(m.group('id') for m in matches if m.group('index') != '0') + ids.extend(new_ids) + + mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html) + if not mobj: + break + + more = self._download_json( + 'https://youtube.com/%s' % mobj.group('more'), playlist_id, + 'Downloading page #%s' % page_num, + transform_source=uppercase_escape) + content_html = more['content_html'] + if not content_html.strip(): + # Some webpages show a "Load more" button but they don't + # have more videos + break + more_widget_html = more['load_more_widget_html'] + + playlist_title = self._html_search_regex( + r'(?s)<h1 class="pl-header-title[^"]*">\s*(.*?)\s*</h1>', + page, 'title') + + url_results = self._ids_to_results(ids) + return self.playlist_result(url_results, playlist_id, playlist_title) + + +class YoutubeTopListIE(YoutubePlaylistIE): + IE_NAME = 'youtube:toplist' + IE_DESC = ('YouTube.com top lists, "yttoplist:{channel}:{list title}"' + ' (Example: "yttoplist:music:Top Tracks")') + _VALID_URL = r'yttoplist:(?P<chann>.*?):(?P<title>.*?)$' + _TESTS = [{ + 'url': 'yttoplist:music:Trending', + 'playlist_mincount': 5, + 'skip': 'Only works for logged-in users', + }] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + channel = mobj.group('chann') + title = mobj.group('title') + query = compat_urllib_parse.urlencode({'title': title}) + channel_page = self._download_webpage( + 'https://www.youtube.com/%s' % channel, title) + link = self._html_search_regex( + r'''(?x) + <a\s+href="([^"]+)".*?>\s* + <span\s+class="branded-page-module-title-text">\s* + <span[^>]*>.*?%s.*?</span>''' % re.escape(query), + channel_page, 'list') + url = compat_urlparse.urljoin('https://www.youtube.com/', link) + + video_re = r'data-index="\d+".*?data-video-id="([0-9A-Za-z_-]{11})"' + ids = [] + # sometimes the webpage doesn't contain the videos + # retry until we get them + for i in itertools.count(0): + msg = 'Downloading Youtube mix' + if i > 0: + msg += ', retry #%d' % i + + webpage = self._download_webpage(url, title, msg) + ids = orderedSet(re.findall(video_re, webpage)) + if ids: + break + url_results = self._ids_to_results(ids) + return self.playlist_result(url_results, playlist_title=title) + + +class YoutubeChannelIE(InfoExtractor): + IE_DESC = 'YouTube.com channels' + _VALID_URL = r'https?://(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie)?\.com)/channel/(?P<id>[0-9A-Za-z_-]+)' + IE_NAME = 'youtube:channel' + _TESTS = [{ + 'note': 'paginated channel', + 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w', + 'playlist_mincount': 91, + }] + + def extract_videos_from_page(self, page): + ids_in_page = [] + for mobj in re.finditer(r'href="/watch\?v=([0-9A-Za-z_-]+)&?', page): + if mobj.group(1) not in ids_in_page: + ids_in_page.append(mobj.group(1)) + return ids_in_page + + def _real_extract(self, url): + channel_id = self._match_id(url) + + video_ids = [] + url = 'https://www.youtube.com/channel/%s/videos' % channel_id + channel_page = self._download_webpage(url, channel_id) + autogenerated = re.search(r'''(?x) + class="[^"]*?(?: + channel-header-autogenerated-label| + yt-channel-title-autogenerated + )[^"]*"''', channel_page) is not None + + if autogenerated: + # The videos are contained in a single page + # the ajax pages can't be used, they are empty + video_ids = self.extract_videos_from_page(channel_page) + entries = [ + self.url_result(video_id, 'Youtube', video_id=video_id) + for video_id in video_ids] + return self.playlist_result(entries, channel_id) + + def _entries(): + more_widget_html = content_html = channel_page + for pagenum in itertools.count(1): + + ids_in_page = self.extract_videos_from_page(content_html) + for video_id in ids_in_page: + yield self.url_result( + video_id, 'Youtube', video_id=video_id) + + mobj = re.search( + r'data-uix-load-more-href="/?(?P<more>[^"]+)"', + more_widget_html) + if not mobj: + break + + more = self._download_json( + 'https://youtube.com/%s' % mobj.group('more'), channel_id, + 'Downloading page #%s' % (pagenum + 1), + transform_source=uppercase_escape) + content_html = more['content_html'] + more_widget_html = more['load_more_widget_html'] + + return self.playlist_result(_entries(), channel_id) + + +class YoutubeUserIE(InfoExtractor): + IE_DESC = 'YouTube.com user videos (URL or "ytuser" keyword)' + _VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?youtube\.com/(?:user/)?(?!(?:attribution_link|watch|results)(?:$|[^a-z_A-Z0-9-])))|ytuser:)(?!feed/)(?P<id>[A-Za-z0-9_-]+)' + _TEMPLATE_URL = 'https://gdata.youtube.com/feeds/api/users/%s' + _GDATA_PAGE_SIZE = 50 + _GDATA_URL = 'https://gdata.youtube.com/feeds/api/users/%s/uploads?max-results=%d&start-index=%d&alt=json' + IE_NAME = 'youtube:user' + + _TESTS = [{ + 'url': 'https://www.youtube.com/user/TheLinuxFoundation', + 'playlist_mincount': 320, + 'info_dict': { + 'title': 'TheLinuxFoundation', + } + }, { + 'url': 'ytuser:phihag', + 'only_matching': True, + }] + + @classmethod + def suitable(cls, url): + # Don't return True if the url can be extracted with other youtube + # extractor, the regex would is too permissive and it would match. + other_ies = iter(klass for (name, klass) in globals().items() if name.endswith('IE') and klass is not cls) + if any(ie.suitable(url) for ie in other_ies): + return False + else: + return super(YoutubeUserIE, cls).suitable(url) + + def _real_extract(self, url): + username = self._match_id(url) + + # Download video ids using YouTube Data API. Result size per + # query is limited (currently to 50 videos) so we need to query + # page by page until there are no video ids - it means we got + # all of them. + + def download_page(pagenum): + start_index = pagenum * self._GDATA_PAGE_SIZE + 1 + + gdata_url = self._GDATA_URL % (username, self._GDATA_PAGE_SIZE, start_index) + page = self._download_webpage( + gdata_url, username, + 'Downloading video ids from %d to %d' % ( + start_index, start_index + self._GDATA_PAGE_SIZE)) + + try: + response = json.loads(page) + except ValueError as err: + raise ExtractorError('Invalid JSON in API response: ' + compat_str(err)) + if 'entry' not in response['feed']: + return + + # Extract video identifiers + entries = response['feed']['entry'] + for entry in entries: + title = entry['title']['$t'] + video_id = entry['id']['$t'].split('/')[-1] + yield { + '_type': 'url', + 'url': video_id, + 'ie_key': 'Youtube', + 'id': video_id, + 'title': title, + } + url_results = OnDemandPagedList(download_page, self._GDATA_PAGE_SIZE) + + return self.playlist_result(url_results, playlist_title=username) + + +class YoutubeSearchIE(SearchInfoExtractor): + IE_DESC = 'YouTube.com searches' + _API_URL = 'https://gdata.youtube.com/feeds/api/videos?q=%s&start-index=%i&max-results=50&v=2&alt=jsonc' + _MAX_RESULTS = 1000 + IE_NAME = 'youtube:search' + _SEARCH_KEY = 'ytsearch' + + def _get_n_results(self, query, n): + """Get a specified number of results for a query""" + + video_ids = [] + pagenum = 0 + limit = n + PAGE_SIZE = 50 + + while (PAGE_SIZE * pagenum) < limit: + result_url = self._API_URL % ( + compat_urllib_parse.quote_plus(query.encode('utf-8')), + (PAGE_SIZE * pagenum) + 1) + data_json = self._download_webpage( + result_url, video_id='query "%s"' % query, + note='Downloading page %s' % (pagenum + 1), + errnote='Unable to download API page') + data = json.loads(data_json) + api_response = data['data'] + + if 'items' not in api_response: + raise ExtractorError( + '[youtube] No video results', expected=True) + + new_ids = list(video['id'] for video in api_response['items']) + video_ids += new_ids + + limit = min(n, api_response['totalItems']) + pagenum += 1 + + if len(video_ids) > n: + video_ids = video_ids[:n] + videos = [self.url_result(video_id, 'Youtube', video_id=video_id) + for video_id in video_ids] + return self.playlist_result(videos, query) + + +class YoutubeSearchDateIE(YoutubeSearchIE): + IE_NAME = YoutubeSearchIE.IE_NAME + ':date' + _API_URL = 'https://gdata.youtube.com/feeds/api/videos?q=%s&start-index=%i&max-results=50&v=2&alt=jsonc&orderby=published' + _SEARCH_KEY = 'ytsearchdate' + IE_DESC = 'YouTube.com searches, newest videos first' + + +class YoutubeSearchURLIE(InfoExtractor): + IE_DESC = 'YouTube.com search URLs' + IE_NAME = 'youtube:search_url' + _VALID_URL = r'https?://(?:www\.)?youtube\.com/results\?(.*?&)?search_query=(?P<query>[^&]+)(?:[&]|$)' + _TESTS = [{ + 'url': 'https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video', + 'playlist_mincount': 5, + 'info_dict': { + 'title': 'youtube-dl test video', + } + }] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + query = compat_urllib_parse.unquote_plus(mobj.group('query')) + + webpage = self._download_webpage(url, query) + result_code = self._search_regex( + r'(?s)<ol class="item-section"(.*?)</ol>', webpage, 'result HTML') + + part_codes = re.findall( + r'(?s)<h3 class="yt-lockup-title">(.*?)</h3>', result_code) + entries = [] + for part_code in part_codes: + part_title = self._html_search_regex( + [r'(?s)title="([^"]+)"', r'>([^<]+)</a>'], part_code, 'item title', fatal=False) + part_url_snippet = self._html_search_regex( + r'(?s)href="([^"]+)"', part_code, 'item URL') + part_url = compat_urlparse.urljoin( + 'https://www.youtube.com/', part_url_snippet) + entries.append({ + '_type': 'url', + 'url': part_url, + 'title': part_title, + }) + + return { + '_type': 'playlist', + 'entries': entries, + 'title': query, + } + + +class YoutubeShowIE(InfoExtractor): + IE_DESC = 'YouTube.com (multi-season) shows' + _VALID_URL = r'https?://www\.youtube\.com/show/(?P<id>[^?#]*)' + IE_NAME = 'youtube:show' + _TESTS = [{ + 'url': 'http://www.youtube.com/show/airdisasters', + 'playlist_mincount': 3, + 'info_dict': { + 'id': 'airdisasters', + 'title': 'Air Disasters', + } + }] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + playlist_id = mobj.group('id') + webpage = self._download_webpage( + url, playlist_id, 'Downloading show webpage') + # There's one playlist for each season of the show + m_seasons = list(re.finditer(r'href="(/playlist\?list=.*?)"', webpage)) + self.to_screen('%s: Found %s seasons' % (playlist_id, len(m_seasons))) + entries = [ + self.url_result( + 'https://www.youtube.com' + season.group(1), 'YoutubePlaylist') + for season in m_seasons + ] + title = self._og_search_title(webpage, fatal=False) + + return { + '_type': 'playlist', + 'id': playlist_id, + 'title': title, + 'entries': entries, + } + + +class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor): + """ + Base class for extractors that fetch info from + http://www.youtube.com/feed_ajax + Subclasses must define the _FEED_NAME and _PLAYLIST_TITLE properties. + """ + _LOGIN_REQUIRED = True + # use action_load_personal_feed instead of action_load_system_feed + _PERSONAL_FEED = False + + @property + def _FEED_TEMPLATE(self): + action = 'action_load_system_feed' + if self._PERSONAL_FEED: + action = 'action_load_personal_feed' + return 'https://www.youtube.com/feed_ajax?%s=1&feed_name=%s&paging=%%s' % (action, self._FEED_NAME) + + @property + def IE_NAME(self): + return 'youtube:%s' % self._FEED_NAME + + def _real_initialize(self): + self._login() + + def _real_extract(self, url): + feed_entries = [] + paging = 0 + for i in itertools.count(1): + info = self._download_json( + self._FEED_TEMPLATE % paging, + '%s feed' % self._FEED_NAME, + 'Downloading page %s' % i, + transform_source=uppercase_escape) + feed_html = info.get('feed_html') or info.get('content_html') + load_more_widget_html = info.get('load_more_widget_html') or feed_html + m_ids = re.finditer(r'"/watch\?v=(.*?)["&]', feed_html) + ids = orderedSet(m.group(1) for m in m_ids) + feed_entries.extend( + self.url_result(video_id, 'Youtube', video_id=video_id) + for video_id in ids) + mobj = re.search( + r'data-uix-load-more-href="/?[^"]+paging=(?P<paging>\d+)', + load_more_widget_html) + if mobj is None: + break + paging = mobj.group('paging') + return self.playlist_result(feed_entries, playlist_title=self._PLAYLIST_TITLE) + + +class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor): + IE_DESC = 'YouTube.com recommended videos, ":ytrec" for short (requires authentication)' + _VALID_URL = r'https?://www\.youtube\.com/feed/recommended|:ytrec(?:ommended)?' + _FEED_NAME = 'recommended' + _PLAYLIST_TITLE = 'Youtube Recommended videos' + + +class YoutubeWatchLaterIE(YoutubeFeedsInfoExtractor): + IE_DESC = 'Youtube watch later list, ":ytwatchlater" for short (requires authentication)' + _VALID_URL = r'https?://www\.youtube\.com/feed/watch_later|:ytwatchlater' + _FEED_NAME = 'watch_later' + _PLAYLIST_TITLE = 'Youtube Watch Later' + _PERSONAL_FEED = True + + +class YoutubeHistoryIE(YoutubeFeedsInfoExtractor): + IE_DESC = 'Youtube watch history, ":ythistory" for short (requires authentication)' + _VALID_URL = 'https?://www\.youtube\.com/feed/history|:ythistory' + _FEED_NAME = 'history' + _PERSONAL_FEED = True + _PLAYLIST_TITLE = 'Youtube Watch History' + + +class YoutubeFavouritesIE(YoutubeBaseInfoExtractor): + IE_NAME = 'youtube:favorites' + IE_DESC = 'YouTube.com favourite videos, ":ytfav" for short (requires authentication)' + _VALID_URL = r'https?://www\.youtube\.com/my_favorites|:ytfav(?:ou?rites)?' + _LOGIN_REQUIRED = True + + def _real_extract(self, url): + webpage = self._download_webpage('https://www.youtube.com/my_favorites', 'Youtube Favourites videos') + playlist_id = self._search_regex(r'list=(.+?)["&]', webpage, 'favourites playlist id') + return self.url_result(playlist_id, 'YoutubePlaylist') + + +class YoutubeSubscriptionsIE(YoutubePlaylistIE): + IE_NAME = 'youtube:subscriptions' + IE_DESC = 'YouTube.com subscriptions feed, "ytsubs" keyword (requires authentication)' + _VALID_URL = r'https?://www\.youtube\.com/feed/subscriptions|:ytsubs(?:criptions)?' + _TESTS = [] + + def _real_extract(self, url): + title = 'Youtube Subscriptions' + page = self._download_webpage('https://www.youtube.com/feed/subscriptions', title) + + # The extraction process is the same as for playlists, but the regex + # for the video ids doesn't contain an index + ids = [] + more_widget_html = content_html = page + + for page_num in itertools.count(1): + matches = re.findall(r'href="\s*/watch\?v=([0-9A-Za-z_-]{11})', content_html) + new_ids = orderedSet(matches) + ids.extend(new_ids) + + mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html) + if not mobj: + break + + more = self._download_json( + 'https://youtube.com/%s' % mobj.group('more'), title, + 'Downloading page #%s' % page_num, + transform_source=uppercase_escape) + content_html = more['content_html'] + more_widget_html = more['load_more_widget_html'] + + return { + '_type': 'playlist', + 'title': title, + 'entries': self._ids_to_results(ids), + } + + +class YoutubeTruncatedURLIE(InfoExtractor): + IE_NAME = 'youtube:truncated_url' + IE_DESC = False # Do not list + _VALID_URL = r'''(?x) + (?:https?://)?[^/]+/watch\?(?: + feature=[a-z_]+| + annotation_id=annotation_[^&]+ + )?$| + (?:https?://)?(?:www\.)?youtube\.com/attribution_link\?a=[^&]+$ + ''' + + _TESTS = [{ + 'url': 'http://www.youtube.com/watch?annotation_id=annotation_3951667041', + 'only_matching': True, + }, { + 'url': 'http://www.youtube.com/watch?', + 'only_matching': True, + }] + + def _real_extract(self, url): + raise ExtractorError( + 'Did you forget to quote the URL? Remember that & is a meta ' + 'character in most shells, so you want to put the URL in quotes, ' + 'like youtube-dl ' + '"http://www.youtube.com/watch?feature=foo&v=BaW_jenozKc" ' + ' or simply youtube-dl BaW_jenozKc .', + expected=True) diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/zdf.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/zdf.py new file mode 100644 index 0000000000..74c76a9a04 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/zdf.py @@ -0,0 +1,159 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import functools +import re + +from .common import InfoExtractor +from ..utils import ( + int_or_none, + unified_strdate, + OnDemandPagedList, +) + + +def extract_from_xml_url(ie, video_id, xml_url): + doc = ie._download_xml( + xml_url, video_id, + note='Downloading video info', + errnote='Failed to download video info') + + title = doc.find('.//information/title').text + description = doc.find('.//information/detail').text + duration = int(doc.find('.//details/lengthSec').text) + uploader_node = doc.find('.//details/originChannelTitle') + uploader = None if uploader_node is None else uploader_node.text + uploader_id_node = doc.find('.//details/originChannelId') + uploader_id = None if uploader_id_node is None else uploader_id_node.text + upload_date = unified_strdate(doc.find('.//details/airtime').text) + + def xml_to_format(fnode): + video_url = fnode.find('url').text + is_available = 'http://www.metafilegenerator' not in video_url + + format_id = fnode.attrib['basetype'] + format_m = re.match(r'''(?x) + (?P<vcodec>[^_]+)_(?P<acodec>[^_]+)_(?P<container>[^_]+)_ + (?P<proto>[^_]+)_(?P<index>[^_]+)_(?P<indexproto>[^_]+) + ''', format_id) + + ext = format_m.group('container') + proto = format_m.group('proto').lower() + + quality = fnode.find('./quality').text + abr = int(fnode.find('./audioBitrate').text) // 1000 + vbr_node = fnode.find('./videoBitrate') + vbr = None if vbr_node is None else int(vbr_node.text) // 1000 + + width_node = fnode.find('./width') + width = None if width_node is None else int_or_none(width_node.text) + height_node = fnode.find('./height') + height = None if height_node is None else int_or_none(height_node.text) + + format_note = '' + if not format_note: + format_note = None + + return { + 'format_id': format_id + '-' + quality, + 'url': video_url, + 'ext': ext, + 'acodec': format_m.group('acodec'), + 'vcodec': format_m.group('vcodec'), + 'abr': abr, + 'vbr': vbr, + 'width': width, + 'height': height, + 'filesize': int_or_none(fnode.find('./filesize').text), + 'format_note': format_note, + 'protocol': proto, + '_available': is_available, + } + + format_nodes = doc.findall('.//formitaeten/formitaet') + formats = list(filter( + lambda f: f['_available'], + map(xml_to_format, format_nodes))) + ie._sort_formats(formats) + + return { + 'id': video_id, + 'title': title, + 'description': description, + 'duration': duration, + 'uploader': uploader, + 'uploader_id': uploader_id, + 'upload_date': upload_date, + 'formats': formats, + } + + +class ZDFIE(InfoExtractor): + _VALID_URL = r'(?:zdf:|zdf:video:|https?://www\.zdf\.de/ZDFmediathek(?:#)?/(.*beitrag/(?:video/)?))(?P<id>[0-9]+)(?:/[^/?]+)?(?:\?.*)?' + + _TEST = { + 'url': 'http://www.zdf.de/ZDFmediathek/beitrag/video/2037704/ZDFspezial---Ende-des-Machtpokers--?bc=sts;stt', + 'info_dict': { + 'id': '2037704', + 'ext': 'webm', + 'title': 'ZDFspezial - Ende des Machtpokers', + 'description': 'Union und SPD haben sich auf einen Koalitionsvertrag geeinigt. Aber was bedeutet das fц╪r die Bц╪rger? Sehen Sie hierzu das ZDFspezial "Ende des Machtpokers - Groц÷e Koalition fц╪r Deutschland".', + 'duration': 1022, + 'uploader': 'spezial', + 'uploader_id': '225948', + 'upload_date': '20131127', + }, + 'skip': 'Videos on ZDF.de are depublicised in short order', + } + + def _real_extract(self, url): + video_id = self._match_id(url) + xml_url = 'http://www.zdf.de/ZDFmediathek/xmlservice/web/beitragsDetails?ak=web&id=%s' % video_id + return extract_from_xml_url(self, video_id, xml_url) + + +class ZDFChannelIE(InfoExtractor): + _VALID_URL = r'(?:zdf:topic:|https?://www\.zdf\.de/ZDFmediathek(?:#)?/.*kanaluebersicht/)(?P<id>[0-9]+)' + _TEST = { + 'url': 'http://www.zdf.de/ZDFmediathek#/kanaluebersicht/1586442/sendung/Titanic', + 'info_dict': { + 'id': '1586442', + }, + 'playlist_count': 4, + } + _PAGE_SIZE = 50 + + def _fetch_page(self, channel_id, page): + offset = page * self._PAGE_SIZE + xml_url = ( + 'http://www.zdf.de/ZDFmediathek/xmlservice/web/aktuellste?ak=web&offset=%d&maxLength=%d&id=%s' + % (offset, self._PAGE_SIZE, channel_id)) + doc = self._download_xml( + xml_url, channel_id, + note='Downloading channel info', + errnote='Failed to download channel info') + + title = doc.find('.//information/title').text + description = doc.find('.//information/detail').text + for asset in doc.findall('.//teasers/teaser'): + a_type = asset.find('./type').text + a_id = asset.find('./details/assetId').text + if a_type not in ('video', 'topic'): + continue + yield { + '_type': 'url', + 'playlist_title': title, + 'playlist_description': description, + 'url': 'zdf:%s:%s' % (a_type, a_id), + } + + def _real_extract(self, url): + channel_id = self._match_id(url) + entries = OnDemandPagedList( + functools.partial(self._fetch_page, channel_id), self._PAGE_SIZE) + + return { + '_type': 'playlist', + 'id': channel_id, + 'entries': entries, + } diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/zingmp3.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/zingmp3.py new file mode 100644 index 0000000000..1afbe68ed6 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/zingmp3.py @@ -0,0 +1,107 @@ +# coding=utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor + + +class ZingMp3BaseInfoExtractor(InfoExtractor): + + @staticmethod + def _extract_item(item): + title = item.find('./title').text.strip() + source = item.find('./source').text + extension = item.attrib['type'] + thumbnail = item.find('./backimage').text + + return { + 'title': title, + 'url': source, + 'ext': extension, + 'thumbnail': thumbnail, + } + + def _extract_player_xml(self, player_xml_url, id, playlist_title=None): + player_xml = self._download_xml(player_xml_url, id, 'Downloading Player XML') + items = player_xml.findall('./item') + + if len(items) == 1: + # one single song + data = self._extract_item(items[0]) + data['id'] = id + + return data + else: + # playlist of songs + entries = [] + + for i, item in enumerate(items, 1): + entry = self._extract_item(item) + entry['id'] = '%s-%d' % (id, i) + entries.append(entry) + + return { + '_type': 'playlist', + 'id': id, + 'title': playlist_title, + 'entries': entries, + } + + +class ZingMp3SongIE(ZingMp3BaseInfoExtractor): + _VALID_URL = r'https?://mp3\.zing\.vn/bai-hat/(?P<slug>[^/]+)/(?P<song_id>\w+)\.html' + _TESTS = [{ + 'url': 'http://mp3.zing.vn/bai-hat/Xa-Mai-Xa-Bao-Thy/ZWZB9WAB.html', + 'md5': 'ead7ae13693b3205cbc89536a077daed', + 'info_dict': { + 'id': 'ZWZB9WAB', + 'title': 'Xa Mцёi Xa', + 'ext': 'mp3', + 'thumbnail': 're:^https?://.*\.jpg$', + }, + }] + IE_NAME = 'zingmp3:song' + IE_DESC = 'mp3.zing.vn songs' + + def _real_extract(self, url): + matched = re.match(self._VALID_URL, url) + slug = matched.group('slug') + song_id = matched.group('song_id') + + webpage = self._download_webpage( + 'http://mp3.zing.vn/bai-hat/%s/%s.html' % (slug, song_id), song_id) + + player_xml_url = self._search_regex( + r'&xmlURL=(?P<xml_url>[^&]+)&', webpage, 'player xml url') + + return self._extract_player_xml(player_xml_url, song_id) + + +class ZingMp3AlbumIE(ZingMp3BaseInfoExtractor): + _VALID_URL = r'https?://mp3\.zing\.vn/album/(?P<slug>[^/]+)/(?P<album_id>\w+)\.html' + _TESTS = [{ + 'url': 'http://mp3.zing.vn/album/Lau-Dai-Tinh-Ai-Bang-Kieu-Minh-Tuyet/ZWZBWDAF.html', + 'info_dict': { + '_type': 'playlist', + 'id': 'ZWZBWDAF', + 'title': 'Lц╒u д░ц═i Tц╛nh ц│i - BА╨╠ng KiА╩│u ft. Minh TuyА╨©t | Album 320 lossless', + }, + 'playlist_count': 10, + }] + IE_NAME = 'zingmp3:album' + IE_DESC = 'mp3.zing.vn albums' + + def _real_extract(self, url): + matched = re.match(self._VALID_URL, url) + slug = matched.group('slug') + album_id = matched.group('album_id') + + webpage = self._download_webpage( + 'http://mp3.zing.vn/album/%s/%s.html' % (slug, album_id), album_id) + player_xml_url = self._search_regex( + r'&xmlURL=(?P<xml_url>[^&]+)&', webpage, 'player xml url') + + return self._extract_player_xml( + player_xml_url, album_id, + playlist_title=self._og_search_title(webpage)) diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/jsinterp.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/jsinterp.py new file mode 100644 index 0000000000..b4617fbad0 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/jsinterp.py @@ -0,0 +1,181 @@ +from __future__ import unicode_literals + +import json +import re + +from .utils import ( + ExtractorError, +) + + +class JSInterpreter(object): + def __init__(self, code): + self.code = code + self._functions = {} + self._objects = {} + + def interpret_statement(self, stmt, local_vars, allow_recursion=20): + if allow_recursion < 0: + raise ExtractorError('Recursion limit reached') + + if stmt.startswith('var '): + stmt = stmt[len('var '):] + ass_m = re.match(r'^(?P<out>[a-z]+)(?:\[(?P<index>[^\]]+)\])?' + + r'=(?P<expr>.*)$', stmt) + if ass_m: + if ass_m.groupdict().get('index'): + def assign(val): + lvar = local_vars[ass_m.group('out')] + idx = self.interpret_expression( + ass_m.group('index'), local_vars, allow_recursion) + assert isinstance(idx, int) + lvar[idx] = val + return val + expr = ass_m.group('expr') + else: + def assign(val): + local_vars[ass_m.group('out')] = val + return val + expr = ass_m.group('expr') + elif stmt.startswith('return '): + assign = lambda v: v + expr = stmt[len('return '):] + else: + # Try interpreting it as an expression + expr = stmt + assign = lambda v: v + + v = self.interpret_expression(expr, local_vars, allow_recursion) + return assign(v) + + def interpret_expression(self, expr, local_vars, allow_recursion): + if expr.isdigit(): + return int(expr) + + if expr.isalpha(): + return local_vars[expr] + + try: + return json.loads(expr) + except ValueError: + pass + + m = re.match( + r'^(?P<var>[$a-zA-Z0-9_]+)\.(?P<member>[^(]+)(?:\(+(?P<args>[^()]*)\))?$', + expr) + if m: + variable = m.group('var') + member = m.group('member') + arg_str = m.group('args') + + if variable in local_vars: + obj = local_vars[variable] + else: + if variable not in self._objects: + self._objects[variable] = self.extract_object(variable) + obj = self._objects[variable] + + if arg_str is None: + # Member access + if member == 'length': + return len(obj) + return obj[member] + + assert expr.endswith(')') + # Function call + if arg_str == '': + argvals = tuple() + else: + argvals = tuple([ + self.interpret_expression(v, local_vars, allow_recursion) + for v in arg_str.split(',')]) + + if member == 'split': + assert argvals == ('',) + return list(obj) + if member == 'join': + assert len(argvals) == 1 + return argvals[0].join(obj) + if member == 'reverse': + assert len(argvals) == 0 + obj.reverse() + return obj + if member == 'slice': + assert len(argvals) == 1 + return obj[argvals[0]:] + if member == 'splice': + assert isinstance(obj, list) + index, howMany = argvals + res = [] + for i in range(index, min(index + howMany, len(obj))): + res.append(obj.pop(index)) + return res + + return obj[member](argvals) + + m = re.match( + r'^(?P<in>[a-z]+)\[(?P<idx>.+)\]$', expr) + if m: + val = local_vars[m.group('in')] + idx = self.interpret_expression( + m.group('idx'), local_vars, allow_recursion - 1) + return val[idx] + + m = re.match(r'^(?P<a>.+?)(?P<op>[%])(?P<b>.+?)$', expr) + if m: + a = self.interpret_expression( + m.group('a'), local_vars, allow_recursion) + b = self.interpret_expression( + m.group('b'), local_vars, allow_recursion) + return a % b + + m = re.match( + r'^(?P<func>[a-zA-Z$]+)\((?P<args>[a-z0-9,]+)\)$', expr) + if m: + fname = m.group('func') + argvals = tuple([ + int(v) if v.isdigit() else local_vars[v] + for v in m.group('args').split(',')]) + if fname not in self._functions: + self._functions[fname] = self.extract_function(fname) + return self._functions[fname](argvals) + raise ExtractorError('Unsupported JS expression %r' % expr) + + def extract_object(self, objname): + obj = {} + obj_m = re.search( + (r'(?:var\s+)?%s\s*=\s*\{' % re.escape(objname)) + + r'\s*(?P<fields>([a-zA-Z$0-9]+\s*:\s*function\(.*?\)\s*\{.*?\})*)' + + r'\}\s*;', + self.code) + fields = obj_m.group('fields') + # Currently, it only supports function definitions + fields_m = re.finditer( + r'(?P<key>[a-zA-Z$0-9]+)\s*:\s*function' + r'\((?P<args>[a-z,]+)\){(?P<code>[^}]+)}', + fields) + for f in fields_m: + argnames = f.group('args').split(',') + obj[f.group('key')] = self.build_function(argnames, f.group('code')) + + return obj + + def extract_function(self, funcname): + func_m = re.search( + (r'(?:function %s|[{;]%s\s*=\s*function)' % ( + re.escape(funcname), re.escape(funcname))) + + r'\((?P<args>[a-z,]+)\){(?P<code>[^}]+)}', + self.code) + if func_m is None: + raise ExtractorError('Could not find JS function %r' % funcname) + argnames = func_m.group('args').split(',') + + return self.build_function(argnames, func_m.group('code')) + + def build_function(self, argnames, code): + def resf(args): + local_vars = dict(zip(argnames, args)) + for stmt in code.split(';'): + res = self.interpret_statement(stmt, local_vars) + return res + return resf diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/options.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/options.py new file mode 100644 index 0000000000..21c4521414 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/options.py @@ -0,0 +1,668 @@ +from __future__ import unicode_literals + +import os.path +import optparse +import shlex +import sys + +from .compat import ( + compat_expanduser, + compat_getenv, + compat_kwargs, +) +from .utils import ( + get_term_width, + write_string, +) +from .version import __version__ + + +def parseOpts(overrideArguments=None): + def _readOptions(filename_bytes, default=[]): + try: + optionf = open(filename_bytes) + except IOError: + return default # silently skip if file is not present + try: + res = [] + for l in optionf: + res += shlex.split(l, comments=True) + finally: + optionf.close() + return res + + def _readUserConf(): + xdg_config_home = compat_getenv('XDG_CONFIG_HOME') + if xdg_config_home: + userConfFile = os.path.join(xdg_config_home, 'youtube-dl', 'config') + if not os.path.isfile(userConfFile): + userConfFile = os.path.join(xdg_config_home, 'youtube-dl.conf') + else: + userConfFile = os.path.join(compat_expanduser('~'), '.config', 'youtube-dl', 'config') + if not os.path.isfile(userConfFile): + userConfFile = os.path.join(compat_expanduser('~'), '.config', 'youtube-dl.conf') + userConf = _readOptions(userConfFile, None) + + if userConf is None: + appdata_dir = compat_getenv('appdata') + if appdata_dir: + userConf = _readOptions( + os.path.join(appdata_dir, 'youtube-dl', 'config'), + default=None) + if userConf is None: + userConf = _readOptions( + os.path.join(appdata_dir, 'youtube-dl', 'config.txt'), + default=None) + + if userConf is None: + userConf = _readOptions( + os.path.join(compat_expanduser('~'), 'youtube-dl.conf'), + default=None) + if userConf is None: + userConf = _readOptions( + os.path.join(compat_expanduser('~'), 'youtube-dl.conf.txt'), + default=None) + + if userConf is None: + userConf = [] + + return userConf + + def _format_option_string(option): + ''' ('-o', '--option') -> -o, --format METAVAR''' + + opts = [] + + if option._short_opts: + opts.append(option._short_opts[0]) + if option._long_opts: + opts.append(option._long_opts[0]) + if len(opts) > 1: + opts.insert(1, ', ') + + if option.takes_value(): + opts.append(' %s' % option.metavar) + + return "".join(opts) + + def _comma_separated_values_options_callback(option, opt_str, value, parser): + setattr(parser.values, option.dest, value.split(',')) + + def _hide_login_info(opts): + opts = list(opts) + for private_opt in ['-p', '--password', '-u', '--username', '--video-password']: + try: + i = opts.index(private_opt) + opts[i + 1] = 'PRIVATE' + except ValueError: + pass + return opts + + # No need to wrap help messages if we're on a wide console + columns = get_term_width() + max_width = columns if columns else 80 + max_help_position = 80 + + fmt = optparse.IndentedHelpFormatter(width=max_width, max_help_position=max_help_position) + fmt.format_option_strings = _format_option_string + + kw = { + 'version': __version__, + 'formatter': fmt, + 'usage': '%prog [options] url [url...]', + 'conflict_handler': 'resolve', + } + + parser = optparse.OptionParser(**compat_kwargs(kw)) + + general = optparse.OptionGroup(parser, 'General Options') + general.add_option( + '-h', '--help', + action='help', + help='print this help text and exit') + general.add_option( + '-v', '--version', + action='version', + help='print program version and exit') + general.add_option( + '-U', '--update', + action='store_true', dest='update_self', + help='update this program to latest version. Make sure that you have sufficient permissions (run with sudo if needed)') + general.add_option( + '-i', '--ignore-errors', + action='store_true', dest='ignoreerrors', default=False, + help='continue on download errors, for example to skip unavailable videos in a playlist') + general.add_option( + '--abort-on-error', + action='store_false', dest='ignoreerrors', + help='Abort downloading of further videos (in the playlist or the command line) if an error occurs') + general.add_option( + '--dump-user-agent', + action='store_true', dest='dump_user_agent', default=False, + help='display the current browser identification') + general.add_option( + '--list-extractors', + action='store_true', dest='list_extractors', default=False, + help='List all supported extractors and the URLs they would handle') + general.add_option( + '--extractor-descriptions', + action='store_true', dest='list_extractor_descriptions', default=False, + help='Output descriptions of all supported extractors') + general.add_option( + '--proxy', dest='proxy', + default=None, metavar='URL', + help='Use the specified HTTP/HTTPS proxy. Pass in an empty string (--proxy "") for direct connection') + general.add_option( + '--socket-timeout', + dest='socket_timeout', type=float, default=None, + help='Time to wait before giving up, in seconds') + general.add_option( + '--default-search', + dest='default_search', metavar='PREFIX', + help='Use this prefix for unqualified URLs. For example "gvsearch2:" downloads two videos from google videos for youtube-dl "large apple". Use the value "auto" to let youtube-dl guess ("auto_warning" to emit a warning when guessing). "error" just throws an error. The default value "fixup_error" repairs broken URLs, but emits an error if this is not possible instead of searching.') + general.add_option( + '--ignore-config', + action='store_true', + help='Do not read configuration files. ' + 'When given in the global configuration file /etc/youtube-dl.conf: ' + 'Do not read the user configuration in ~/.config/youtube-dl/config ' + '(%APPDATA%/youtube-dl/config.txt on Windows)') + general.add_option( + '--flat-playlist', + action='store_const', dest='extract_flat', const='in_playlist', + default=False, + help='Do not extract the videos of a playlist, only list them.') + + selection = optparse.OptionGroup(parser, 'Video Selection') + selection.add_option( + '--playlist-start', + dest='playliststart', metavar='NUMBER', default=1, type=int, + help='playlist video to start at (default is %default)') + selection.add_option( + '--playlist-end', + dest='playlistend', metavar='NUMBER', default=None, type=int, + help='playlist video to end at (default is last)') + selection.add_option( + '--match-title', + dest='matchtitle', metavar='REGEX', + help='download only matching titles (regex or caseless sub-string)') + selection.add_option( + '--reject-title', + dest='rejecttitle', metavar='REGEX', + help='skip download for matching titles (regex or caseless sub-string)') + selection.add_option( + '--max-downloads', + dest='max_downloads', metavar='NUMBER', type=int, default=None, + help='Abort after downloading NUMBER files') + selection.add_option( + '--min-filesize', + metavar='SIZE', dest='min_filesize', default=None, + help='Do not download any videos smaller than SIZE (e.g. 50k or 44.6m)') + selection.add_option( + '--max-filesize', + metavar='SIZE', dest='max_filesize', default=None, + help='Do not download any videos larger than SIZE (e.g. 50k or 44.6m)') + selection.add_option( + '--date', + metavar='DATE', dest='date', default=None, + help='download only videos uploaded in this date') + selection.add_option( + '--datebefore', + metavar='DATE', dest='datebefore', default=None, + help='download only videos uploaded on or before this date (i.e. inclusive)') + selection.add_option( + '--dateafter', + metavar='DATE', dest='dateafter', default=None, + help='download only videos uploaded on or after this date (i.e. inclusive)') + selection.add_option( + '--min-views', + metavar='COUNT', dest='min_views', default=None, type=int, + help='Do not download any videos with less than COUNT views',) + selection.add_option( + '--max-views', + metavar='COUNT', dest='max_views', default=None, type=int, + help='Do not download any videos with more than COUNT views') + selection.add_option( + '--no-playlist', + action='store_true', dest='noplaylist', default=False, + help='If the URL refers to a video and a playlist, download only the video.') + selection.add_option( + '--age-limit', + metavar='YEARS', dest='age_limit', default=None, type=int, + help='download only videos suitable for the given age') + selection.add_option( + '--download-archive', metavar='FILE', + dest='download_archive', + help='Download only videos not listed in the archive file. Record the IDs of all downloaded videos in it.') + selection.add_option( + '--include-ads', + dest='include_ads', action='store_true', + help='Download advertisements as well (experimental)') + + authentication = optparse.OptionGroup(parser, 'Authentication Options') + authentication.add_option( + '-u', '--username', + dest='username', metavar='USERNAME', + help='login with this account ID') + authentication.add_option( + '-p', '--password', + dest='password', metavar='PASSWORD', + help='account password') + authentication.add_option( + '-2', '--twofactor', + dest='twofactor', metavar='TWOFACTOR', + help='two-factor auth code') + authentication.add_option( + '-n', '--netrc', + action='store_true', dest='usenetrc', default=False, + help='use .netrc authentication data') + authentication.add_option( + '--video-password', + dest='videopassword', metavar='PASSWORD', + help='video password (vimeo, smotri)') + + video_format = optparse.OptionGroup(parser, 'Video Format Options') + video_format.add_option( + '-f', '--format', + action='store', dest='format', metavar='FORMAT', default=None, + help=( + 'video format code, specify the order of preference using' + ' slashes: -f 22/17/18 . -f mp4 , -f m4a and -f flv are also' + ' supported. You can also use the special names "best",' + ' "bestvideo", "bestaudio", "worst", "worstvideo" and' + ' "worstaudio". By default, youtube-dl will pick the best quality.' + ' Use commas to download multiple audio formats, such as' + ' -f 136/137/mp4/bestvideo,140/m4a/bestaudio.' + ' You can merge the video and audio of two formats into a single' + ' file using -f <video-format>+<audio-format> (requires ffmpeg or' + ' avconv), for example -f bestvideo+bestaudio.')) + video_format.add_option( + '--all-formats', + action='store_const', dest='format', const='all', + help='download all available video formats') + video_format.add_option( + '--prefer-free-formats', + action='store_true', dest='prefer_free_formats', default=False, + help='prefer free video formats unless a specific one is requested') + video_format.add_option( + '--max-quality', + action='store', dest='format_limit', metavar='FORMAT', + help='highest quality format to download') + video_format.add_option( + '-F', '--list-formats', + action='store_true', dest='listformats', + help='list all available formats') + video_format.add_option( + '--youtube-include-dash-manifest', + action='store_true', dest='youtube_include_dash_manifest', default=True, + help=optparse.SUPPRESS_HELP) + video_format.add_option( + '--youtube-skip-dash-manifest', + action='store_false', dest='youtube_include_dash_manifest', + help='Do not download the DASH manifest on YouTube videos') + + subtitles = optparse.OptionGroup(parser, 'Subtitle Options') + subtitles.add_option( + '--write-sub', '--write-srt', + action='store_true', dest='writesubtitles', default=False, + help='write subtitle file') + subtitles.add_option( + '--write-auto-sub', '--write-automatic-sub', + action='store_true', dest='writeautomaticsub', default=False, + help='write automatic subtitle file (youtube only)') + subtitles.add_option( + '--all-subs', + action='store_true', dest='allsubtitles', default=False, + help='downloads all the available subtitles of the video') + subtitles.add_option( + '--list-subs', + action='store_true', dest='listsubtitles', default=False, + help='lists all available subtitles for the video') + subtitles.add_option( + '--sub-format', + action='store', dest='subtitlesformat', metavar='FORMAT', default='srt', + help='subtitle format (default=srt) ([sbv/vtt] youtube only)') + subtitles.add_option( + '--sub-lang', '--sub-langs', '--srt-lang', + action='callback', dest='subtitleslangs', metavar='LANGS', type='str', + default=[], callback=_comma_separated_values_options_callback, + help='languages of the subtitles to download (optional) separated by commas, use IETF language tags like \'en,pt\'') + + downloader = optparse.OptionGroup(parser, 'Download Options') + downloader.add_option( + '-r', '--rate-limit', + dest='ratelimit', metavar='LIMIT', + help='maximum download rate in bytes per second (e.g. 50K or 4.2M)') + downloader.add_option( + '-R', '--retries', + dest='retries', metavar='RETRIES', default=10, + help='number of retries (default is %default)') + downloader.add_option( + '--buffer-size', + dest='buffersize', metavar='SIZE', default='1024', + help='size of download buffer (e.g. 1024 or 16K) (default is %default)') + downloader.add_option( + '--no-resize-buffer', + action='store_true', dest='noresizebuffer', default=False, + help='do not automatically adjust the buffer size. By default, the buffer size is automatically resized from an initial value of SIZE.') + downloader.add_option( + '--test', + action='store_true', dest='test', default=False, + help=optparse.SUPPRESS_HELP) + downloader.add_option( + '--playlist-reverse', + action='store_true', + help='Download playlist videos in reverse order') + + workarounds = optparse.OptionGroup(parser, 'Workarounds') + workarounds.add_option( + '--encoding', + dest='encoding', metavar='ENCODING', + help='Force the specified encoding (experimental)') + workarounds.add_option( + '--no-check-certificate', + action='store_true', dest='no_check_certificate', default=False, + help='Suppress HTTPS certificate validation.') + workarounds.add_option( + '--prefer-insecure', + '--prefer-unsecure', action='store_true', dest='prefer_insecure', + help='Use an unencrypted connection to retrieve information about the video. (Currently supported only for YouTube)') + workarounds.add_option( + '--user-agent', + metavar='UA', dest='user_agent', + help='specify a custom user agent') + workarounds.add_option( + '--referer', + metavar='URL', dest='referer', default=None, + help='specify a custom referer, use if the video access is restricted to one domain', + ) + workarounds.add_option( + '--add-header', + metavar='FIELD:VALUE', dest='headers', action='append', + help='specify a custom HTTP header and its value, separated by a colon \':\'. You can use this option multiple times', + ) + workarounds.add_option( + '--bidi-workaround', + dest='bidi_workaround', action='store_true', + help='Work around terminals that lack bidirectional text support. Requires bidiv or fribidi executable in PATH') + + verbosity = optparse.OptionGroup(parser, 'Verbosity / Simulation Options') + verbosity.add_option( + '-q', '--quiet', + action='store_true', dest='quiet', default=False, + help='activates quiet mode') + verbosity.add_option( + '--no-warnings', + dest='no_warnings', action='store_true', default=False, + help='Ignore warnings') + verbosity.add_option( + '-s', '--simulate', + action='store_true', dest='simulate', default=False, + help='do not download the video and do not write anything to disk',) + verbosity.add_option( + '--skip-download', + action='store_true', dest='skip_download', default=False, + help='do not download the video',) + verbosity.add_option( + '-g', '--get-url', + action='store_true', dest='geturl', default=False, + help='simulate, quiet but print URL') + verbosity.add_option( + '-e', '--get-title', + action='store_true', dest='gettitle', default=False, + help='simulate, quiet but print title') + verbosity.add_option( + '--get-id', + action='store_true', dest='getid', default=False, + help='simulate, quiet but print id') + verbosity.add_option( + '--get-thumbnail', + action='store_true', dest='getthumbnail', default=False, + help='simulate, quiet but print thumbnail URL') + verbosity.add_option( + '--get-description', + action='store_true', dest='getdescription', default=False, + help='simulate, quiet but print video description') + verbosity.add_option( + '--get-duration', + action='store_true', dest='getduration', default=False, + help='simulate, quiet but print video length') + verbosity.add_option( + '--get-filename', + action='store_true', dest='getfilename', default=False, + help='simulate, quiet but print output filename') + verbosity.add_option( + '--get-format', + action='store_true', dest='getformat', default=False, + help='simulate, quiet but print output format') + verbosity.add_option( + '-j', '--dump-json', + action='store_true', dest='dumpjson', default=False, + help='simulate, quiet but print JSON information. See --output for a description of available keys.') + verbosity.add_option( + '-J', '--dump-single-json', + action='store_true', dest='dump_single_json', default=False, + help='simulate, quiet but print JSON information for each command-line argument. If the URL refers to a playlist, dump the whole playlist information in a single line.') + verbosity.add_option( + '--newline', + action='store_true', dest='progress_with_newline', default=False, + help='output progress bar as new lines') + verbosity.add_option( + '--no-progress', + action='store_true', dest='noprogress', default=False, + help='do not print progress bar') + verbosity.add_option( + '--console-title', + action='store_true', dest='consoletitle', default=False, + help='display progress in console titlebar') + verbosity.add_option( + '-v', '--verbose', + action='store_true', dest='verbose', default=False, + help='print various debugging information') + verbosity.add_option( + '--dump-intermediate-pages', + action='store_true', dest='dump_intermediate_pages', default=False, + help='print downloaded pages to debug problems (very verbose)') + verbosity.add_option( + '--write-pages', + action='store_true', dest='write_pages', default=False, + help='Write downloaded intermediary pages to files in the current directory to debug problems') + verbosity.add_option( + '--youtube-print-sig-code', + action='store_true', dest='youtube_print_sig_code', default=False, + help=optparse.SUPPRESS_HELP) + verbosity.add_option( + '--print-traffic', + dest='debug_printtraffic', action='store_true', default=False, + help='Display sent and read HTTP traffic') + + filesystem = optparse.OptionGroup(parser, 'Filesystem Options') + filesystem.add_option( + '-a', '--batch-file', + dest='batchfile', metavar='FILE', + help='file containing URLs to download (\'-\' for stdin)') + filesystem.add_option( + '--id', default=False, + action='store_true', dest='useid', help='use only video ID in file name') + filesystem.add_option( + '-o', '--output', + dest='outtmpl', metavar='TEMPLATE', + help=('output filename template. Use %(title)s to get the title, ' + '%(uploader)s for the uploader name, %(uploader_id)s for the uploader nickname if different, ' + '%(autonumber)s to get an automatically incremented number, ' + '%(ext)s for the filename extension, ' + '%(format)s for the format description (like "22 - 1280x720" or "HD"), ' + '%(format_id)s for the unique id of the format (like Youtube\'s itags: "137"), ' + '%(upload_date)s for the upload date (YYYYMMDD), ' + '%(extractor)s for the provider (youtube, metacafe, etc), ' + '%(id)s for the video id, ' + '%(playlist_title)s, %(playlist_id)s, or %(playlist)s (=title if present, ID otherwise) for the playlist the video is in, ' + '%(playlist_index)s for the position in the playlist. ' + '%(height)s and %(width)s for the width and height of the video format. ' + '%(resolution)s for a textual description of the resolution of the video format. ' + '%% for a literal percent. ' + 'Use - to output to stdout. Can also be used to download to a different directory, ' + 'for example with -o \'/my/downloads/%(uploader)s/%(title)s-%(id)s.%(ext)s\' .')) + filesystem.add_option( + '--autonumber-size', + dest='autonumber_size', metavar='NUMBER', + help='Specifies the number of digits in %(autonumber)s when it is present in output filename template or --auto-number option is given') + filesystem.add_option( + '--restrict-filenames', + action='store_true', dest='restrictfilenames', default=False, + help='Restrict filenames to only ASCII characters, and avoid "&" and spaces in filenames') + filesystem.add_option( + '-A', '--auto-number', + action='store_true', dest='autonumber', default=False, + help='[deprecated; use -o "%(autonumber)s-%(title)s.%(ext)s" ] number downloaded files starting from 00000') + filesystem.add_option( + '-t', '--title', + action='store_true', dest='usetitle', default=False, + help='[deprecated] use title in file name (default)') + filesystem.add_option( + '-l', '--literal', default=False, + action='store_true', dest='usetitle', + help='[deprecated] alias of --title') + filesystem.add_option( + '-w', '--no-overwrites', + action='store_true', dest='nooverwrites', default=False, + help='do not overwrite files') + filesystem.add_option( + '-c', '--continue', + action='store_true', dest='continue_dl', default=True, + help='force resume of partially downloaded files. By default, youtube-dl will resume downloads if possible.') + filesystem.add_option( + '--no-continue', + action='store_false', dest='continue_dl', + help='do not resume partially downloaded files (restart from beginning)') + filesystem.add_option( + '--no-part', + action='store_true', dest='nopart', default=False, + help='do not use .part files - write directly into output file') + filesystem.add_option( + '--no-mtime', + action='store_false', dest='updatetime', default=True, + help='do not use the Last-modified header to set the file modification time') + filesystem.add_option( + '--write-description', + action='store_true', dest='writedescription', default=False, + help='write video description to a .description file') + filesystem.add_option( + '--write-info-json', + action='store_true', dest='writeinfojson', default=False, + help='write video metadata to a .info.json file') + filesystem.add_option( + '--write-annotations', + action='store_true', dest='writeannotations', default=False, + help='write video annotations to a .annotation file') + filesystem.add_option( + '--write-thumbnail', + action='store_true', dest='writethumbnail', default=False, + help='write thumbnail image to disk') + filesystem.add_option( + '--load-info', + dest='load_info_filename', metavar='FILE', + help='json file containing the video information (created with the "--write-json" option)') + filesystem.add_option( + '--cookies', + dest='cookiefile', metavar='FILE', + help='file to read cookies from and dump cookie jar in') + filesystem.add_option( + '--cache-dir', dest='cachedir', default=None, metavar='DIR', + help='Location in the filesystem where youtube-dl can store some downloaded information permanently. By default $XDG_CACHE_HOME/youtube-dl or ~/.cache/youtube-dl . At the moment, only YouTube player files (for videos with obfuscated signatures) are cached, but that may change.') + filesystem.add_option( + '--no-cache-dir', action='store_const', const=False, dest='cachedir', + help='Disable filesystem caching') + filesystem.add_option( + '--rm-cache-dir', + action='store_true', dest='rm_cachedir', + help='Delete all filesystem cache files') + + postproc = optparse.OptionGroup(parser, 'Post-processing Options') + postproc.add_option( + '-x', '--extract-audio', + action='store_true', dest='extractaudio', default=False, + help='convert video files to audio-only files (requires ffmpeg or avconv and ffprobe or avprobe)') + postproc.add_option( + '--audio-format', metavar='FORMAT', dest='audioformat', default='best', + help='"best", "aac", "vorbis", "mp3", "m4a", "opus", or "wav"; "%default" by default') + postproc.add_option( + '--audio-quality', metavar='QUALITY', + dest='audioquality', default='5', + help='ffmpeg/avconv audio quality specification, insert a value between 0 (better) and 9 (worse) for VBR or a specific bitrate like 128K (default %default)') + postproc.add_option( + '--recode-video', + metavar='FORMAT', dest='recodevideo', default=None, + help='Encode the video to another format if necessary (currently supported: mp4|flv|ogg|webm|mkv)') + postproc.add_option( + '-k', '--keep-video', + action='store_true', dest='keepvideo', default=False, + help='keeps the video file on disk after the post-processing; the video is erased by default') + postproc.add_option( + '--no-post-overwrites', + action='store_true', dest='nopostoverwrites', default=False, + help='do not overwrite post-processed files; the post-processed files are overwritten by default') + postproc.add_option( + '--embed-subs', + action='store_true', dest='embedsubtitles', default=False, + help='embed subtitles in the video (only for mp4 videos)') + postproc.add_option( + '--embed-thumbnail', + action='store_true', dest='embedthumbnail', default=False, + help='embed thumbnail in the audio as cover art') + postproc.add_option( + '--add-metadata', + action='store_true', dest='addmetadata', default=False, + help='write metadata to the video file') + postproc.add_option( + '--xattrs', + action='store_true', dest='xattrs', default=False, + help='write metadata to the video file\'s xattrs (using dublin core and xdg standards)') + postproc.add_option( + '--prefer-avconv', + action='store_false', dest='prefer_ffmpeg', + help='Prefer avconv over ffmpeg for running the postprocessors (default)') + postproc.add_option( + '--prefer-ffmpeg', + action='store_true', dest='prefer_ffmpeg', + help='Prefer ffmpeg over avconv for running the postprocessors') + postproc.add_option( + '--exec', + metavar='CMD', dest='exec_cmd', + help='Execute a command on the file after downloading, similar to find\'s -exec syntax. Example: --exec \'adb push {} /sdcard/Music/ && rm {}\'') + + parser.add_option_group(general) + parser.add_option_group(selection) + parser.add_option_group(downloader) + parser.add_option_group(filesystem) + parser.add_option_group(verbosity) + parser.add_option_group(workarounds) + parser.add_option_group(video_format) + parser.add_option_group(subtitles) + parser.add_option_group(authentication) + parser.add_option_group(postproc) + + if overrideArguments is not None: + opts, args = parser.parse_args(overrideArguments) + if opts.verbose: + write_string('[debug] Override config: ' + repr(overrideArguments) + '\n') + else: + commandLineConf = sys.argv[1:] + if '--ignore-config' in commandLineConf: + systemConf = [] + userConf = [] + else: + systemConf = _readOptions('/etc/youtube-dl.conf') + if '--ignore-config' in systemConf: + userConf = [] + else: + userConf = _readUserConf() + argv = systemConf + userConf + commandLineConf + + opts, args = parser.parse_args(argv) + if opts.verbose: + write_string('[debug] System config: ' + repr(_hide_login_info(systemConf)) + '\n') + write_string('[debug] User config: ' + repr(_hide_login_info(userConf)) + '\n') + write_string('[debug] Command-line args: ' + repr(_hide_login_info(commandLineConf)) + '\n') + + return parser, opts, args diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/postprocessor/__init__.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/postprocessor/__init__.py new file mode 100644 index 0000000000..7f505b58e2 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/postprocessor/__init__.py @@ -0,0 +1,32 @@ +from __future__ import unicode_literals + +from .atomicparsley import AtomicParsleyPP +from .ffmpeg import ( + FFmpegPostProcessor, + FFmpegAudioFixPP, + FFmpegEmbedSubtitlePP, + FFmpegExtractAudioPP, + FFmpegMergerPP, + FFmpegMetadataPP, + FFmpegVideoConvertorPP, +) +from .xattrpp import XAttrMetadataPP +from .execafterdownload import ExecAfterDownloadPP + + +def get_postprocessor(key): + return globals()[key + 'PP'] + + +__all__ = [ + 'AtomicParsleyPP', + 'ExecAfterDownloadPP', + 'FFmpegAudioFixPP', + 'FFmpegEmbedSubtitlePP', + 'FFmpegExtractAudioPP', + 'FFmpegMergerPP', + 'FFmpegMetadataPP', + 'FFmpegPostProcessor', + 'FFmpegVideoConvertorPP', + 'XAttrMetadataPP', +] diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/postprocessor/atomicparsley.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/postprocessor/atomicparsley.py new file mode 100644 index 0000000000..448ccc5f34 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/postprocessor/atomicparsley.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + + +import os +import subprocess + +from .common import PostProcessor +from ..compat import ( + compat_urlretrieve, +) +from ..utils import ( + check_executable, + encodeFilename, + PostProcessingError, + prepend_extension, + shell_quote +) + + +class AtomicParsleyPPError(PostProcessingError): + pass + + +class AtomicParsleyPP(PostProcessor): + def run(self, info): + if not check_executable('AtomicParsley', ['-v']): + raise AtomicParsleyPPError('AtomicParsley was not found. Please install.') + + filename = info['filepath'] + temp_filename = prepend_extension(filename, 'temp') + temp_thumbnail = prepend_extension(filename, 'thumb') + + if not info.get('thumbnail'): + raise AtomicParsleyPPError('Thumbnail was not found. Nothing to do.') + + compat_urlretrieve(info['thumbnail'], temp_thumbnail) + + cmd = ['AtomicParsley', filename, '--artwork', temp_thumbnail, '-o', temp_filename] + + self._downloader.to_screen('[atomicparsley] Adding thumbnail to "%s"' % filename) + + if self._downloader.params.get('verbose', False): + self._downloader.to_screen('[debug] AtomicParsley command line: %s' % shell_quote(cmd)) + + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + stdout, stderr = p.communicate() + + if p.returncode != 0: + msg = stderr.decode('utf-8', 'replace').strip() + raise AtomicParsleyPPError(msg) + + os.remove(encodeFilename(filename)) + os.remove(encodeFilename(temp_thumbnail)) + os.rename(encodeFilename(temp_filename), encodeFilename(filename)) + + return True, info diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/postprocessor/common.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/postprocessor/common.py new file mode 100644 index 0000000000..e54ae678da --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/postprocessor/common.py @@ -0,0 +1,51 @@ +from __future__ import unicode_literals + +from ..utils import PostProcessingError + + +class PostProcessor(object): + """Post Processor class. + + PostProcessor objects can be added to downloaders with their + add_post_processor() method. When the downloader has finished a + successful download, it will take its internal chain of PostProcessors + and start calling the run() method on each one of them, first with + an initial argument and then with the returned value of the previous + PostProcessor. + + The chain will be stopped if one of them ever returns None or the end + of the chain is reached. + + PostProcessor objects follow a "mutual registration" process similar + to InfoExtractor objects. + """ + + _downloader = None + + def __init__(self, downloader=None): + self._downloader = downloader + + def set_downloader(self, downloader): + """Sets the downloader for this PP.""" + self._downloader = downloader + + def run(self, information): + """Run the PostProcessor. + + The "information" argument is a dictionary like the ones + composed by InfoExtractors. The only difference is that this + one has an extra field called "filepath" that points to the + downloaded file. + + This method returns a tuple, the first element of which describes + whether the original file should be kept (i.e. not deleted - None for + no preference), and the second of which is the updated information. + + In addition, this method may raise a PostProcessingError + exception if post processing fails. + """ + return None, information # by default, keep file and do nothing + + +class AudioConversionError(PostProcessingError): + pass diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/postprocessor/execafterdownload.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/postprocessor/execafterdownload.py new file mode 100644 index 0000000000..75c0f7bbe8 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/postprocessor/execafterdownload.py @@ -0,0 +1,28 @@ +from __future__ import unicode_literals + +import subprocess + +from .common import PostProcessor +from ..compat import shlex_quote +from ..utils import PostProcessingError + + +class ExecAfterDownloadPP(PostProcessor): + def __init__(self, downloader=None, verboseOutput=None, exec_cmd=None): + self.verboseOutput = verboseOutput + self.exec_cmd = exec_cmd + + def run(self, information): + cmd = self.exec_cmd + if '{}' not in cmd: + cmd += ' {}' + + cmd = cmd.replace('{}', shlex_quote(information['filepath'])) + + self._downloader.to_screen("[exec] Executing command: %s" % cmd) + retCode = subprocess.call(cmd, shell=True) + if retCode != 0: + raise PostProcessingError( + 'Command returned error code %d' % retCode) + + return None, information # by default, keep file and do nothing diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/postprocessor/ffmpeg.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/postprocessor/ffmpeg.py new file mode 100644 index 0000000000..048525efca --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/postprocessor/ffmpeg.py @@ -0,0 +1,541 @@ +from __future__ import unicode_literals + +import os +import subprocess +import sys +import time + + +from .common import AudioConversionError, PostProcessor + +from ..compat import ( + compat_subprocess_get_DEVNULL, +) +from ..utils import ( + encodeArgument, + encodeFilename, + get_exe_version, + is_outdated_version, + PostProcessingError, + prepend_extension, + shell_quote, + subtitles_filename, +) + + +class FFmpegPostProcessorError(PostProcessingError): + pass + + +class FFmpegPostProcessor(PostProcessor): + def __init__(self, downloader=None, deletetempfiles=False): + PostProcessor.__init__(self, downloader) + self._versions = self.get_versions() + self._deletetempfiles = deletetempfiles + + def check_version(self): + if not self._executable: + raise FFmpegPostProcessorError('ffmpeg or avconv not found. Please install one.') + + required_version = '10-0' if self._uses_avconv() else '1.0' + if is_outdated_version( + self._versions[self._executable], required_version): + warning = 'Your copy of %s is outdated, update %s to version %s or newer if you encounter any errors.' % ( + self._executable, self._executable, required_version) + if self._downloader: + self._downloader.report_warning(warning) + + @staticmethod + def get_versions(): + programs = ['avprobe', 'avconv', 'ffmpeg', 'ffprobe'] + return dict((p, get_exe_version(p, args=['-version'])) for p in programs) + + @property + def _executable(self): + if self._downloader.params.get('prefer_ffmpeg', False): + prefs = ('ffmpeg', 'avconv') + else: + prefs = ('avconv', 'ffmpeg') + for p in prefs: + if self._versions[p]: + return p + return None + + @property + def _probe_executable(self): + if self._downloader.params.get('prefer_ffmpeg', False): + prefs = ('ffprobe', 'avprobe') + else: + prefs = ('avprobe', 'ffprobe') + for p in prefs: + if self._versions[p]: + return p + return None + + def _uses_avconv(self): + return self._executable == 'avconv' + + def run_ffmpeg_multiple_files(self, input_paths, out_path, opts): + self.check_version() + + files_cmd = [] + for path in input_paths: + files_cmd.extend(['-i', encodeFilename(path, True)]) + cmd = ([self._executable, '-y'] + files_cmd + + [encodeArgument(o) for o in opts] + + [encodeFilename(self._ffmpeg_filename_argument(out_path), True)]) + + if self._downloader.params.get('verbose', False): + self._downloader.to_screen('[debug] ffmpeg command line: %s' % shell_quote(cmd)) + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + stdout, stderr = p.communicate() + if p.returncode != 0: + stderr = stderr.decode('utf-8', 'replace') + msg = stderr.strip().split('\n')[-1] + raise FFmpegPostProcessorError(msg) + if self._deletetempfiles: + for ipath in input_paths: + os.remove(ipath) + + def run_ffmpeg(self, path, out_path, opts): + self.run_ffmpeg_multiple_files([path], out_path, opts) + + def _ffmpeg_filename_argument(self, fn): + # ffmpeg broke --, see https://ffmpeg.org/trac/ffmpeg/ticket/2127 for details + if fn.startswith('-'): + return './' + fn + return fn + + +class FFmpegExtractAudioPP(FFmpegPostProcessor): + def __init__(self, downloader=None, preferredcodec=None, preferredquality=None, nopostoverwrites=False): + FFmpegPostProcessor.__init__(self, downloader) + if preferredcodec is None: + preferredcodec = 'best' + self._preferredcodec = preferredcodec + self._preferredquality = preferredquality + self._nopostoverwrites = nopostoverwrites + + def get_audio_codec(self, path): + + if not self._probe_executable: + raise PostProcessingError('ffprobe or avprobe not found. Please install one.') + try: + cmd = [ + self._probe_executable, + '-show_streams', + encodeFilename(self._ffmpeg_filename_argument(path), True)] + handle = subprocess.Popen(cmd, stderr=compat_subprocess_get_DEVNULL(), stdout=subprocess.PIPE) + output = handle.communicate()[0] + if handle.wait() != 0: + return None + except (IOError, OSError): + return None + audio_codec = None + for line in output.decode('ascii', 'ignore').split('\n'): + if line.startswith('codec_name='): + audio_codec = line.split('=')[1].strip() + elif line.strip() == 'codec_type=audio' and audio_codec is not None: + return audio_codec + return None + + def run_ffmpeg(self, path, out_path, codec, more_opts): + if codec is None: + acodec_opts = [] + else: + acodec_opts = ['-acodec', codec] + opts = ['-vn'] + acodec_opts + more_opts + try: + FFmpegPostProcessor.run_ffmpeg(self, path, out_path, opts) + except FFmpegPostProcessorError as err: + raise AudioConversionError(err.msg) + + def run(self, information): + path = information['filepath'] + + filecodec = self.get_audio_codec(path) + if filecodec is None: + raise PostProcessingError('WARNING: unable to obtain file audio codec with ffprobe') + + uses_avconv = self._uses_avconv() + more_opts = [] + if self._preferredcodec == 'best' or self._preferredcodec == filecodec or (self._preferredcodec == 'm4a' and filecodec == 'aac'): + if filecodec == 'aac' and self._preferredcodec in ['m4a', 'best']: + # Lossless, but in another container + acodec = 'copy' + extension = 'm4a' + more_opts = ['-bsf:a' if uses_avconv else '-absf', 'aac_adtstoasc'] + elif filecodec in ['aac', 'mp3', 'vorbis', 'opus']: + # Lossless if possible + acodec = 'copy' + extension = filecodec + if filecodec == 'aac': + more_opts = ['-f', 'adts'] + if filecodec == 'vorbis': + extension = 'ogg' + else: + # MP3 otherwise. + acodec = 'libmp3lame' + extension = 'mp3' + more_opts = [] + if self._preferredquality is not None: + if int(self._preferredquality) < 10: + more_opts += ['-q:a' if uses_avconv else '-aq', self._preferredquality] + else: + more_opts += ['-b:a' if uses_avconv else '-ab', self._preferredquality + 'k'] + else: + # We convert the audio (lossy) + acodec = {'mp3': 'libmp3lame', 'aac': 'aac', 'm4a': 'aac', 'opus': 'opus', 'vorbis': 'libvorbis', 'wav': None}[self._preferredcodec] + extension = self._preferredcodec + more_opts = [] + if self._preferredquality is not None: + # The opus codec doesn't support the -aq option + if int(self._preferredquality) < 10 and extension != 'opus': + more_opts += ['-q:a' if uses_avconv else '-aq', self._preferredquality] + else: + more_opts += ['-b:a' if uses_avconv else '-ab', self._preferredquality + 'k'] + if self._preferredcodec == 'aac': + more_opts += ['-f', 'adts'] + if self._preferredcodec == 'm4a': + more_opts += ['-bsf:a' if uses_avconv else '-absf', 'aac_adtstoasc'] + if self._preferredcodec == 'vorbis': + extension = 'ogg' + if self._preferredcodec == 'wav': + extension = 'wav' + more_opts += ['-f', 'wav'] + + prefix, sep, ext = path.rpartition('.') # not os.path.splitext, since the latter does not work on unicode in all setups + new_path = prefix + sep + extension + + # If we download foo.mp3 and convert it to... foo.mp3, then don't delete foo.mp3, silly. + if new_path == path: + self._nopostoverwrites = True + + try: + if self._nopostoverwrites and os.path.exists(encodeFilename(new_path)): + self._downloader.to_screen('[youtube] Post-process file %s exists, skipping' % new_path) + else: + self._downloader.to_screen('[' + self._executable + '] Destination: ' + new_path) + self.run_ffmpeg(path, new_path, acodec, more_opts) + except: + etype, e, tb = sys.exc_info() + if isinstance(e, AudioConversionError): + msg = 'audio conversion failed: ' + e.msg + else: + msg = 'error running ' + self._executable + raise PostProcessingError(msg) + + # Try to update the date time for extracted audio file. + if information.get('filetime') is not None: + try: + os.utime(encodeFilename(new_path), (time.time(), information['filetime'])) + except: + self._downloader.report_warning('Cannot update utime of audio file') + + information['filepath'] = new_path + return self._nopostoverwrites, information + + +class FFmpegVideoConvertorPP(FFmpegPostProcessor): + def __init__(self, downloader=None, preferedformat=None): + super(FFmpegVideoConvertorPP, self).__init__(downloader) + self._preferedformat = preferedformat + + def run(self, information): + path = information['filepath'] + prefix, sep, ext = path.rpartition('.') + outpath = prefix + sep + self._preferedformat + if information['ext'] == self._preferedformat: + self._downloader.to_screen('[ffmpeg] Not converting video file %s - already is in target format %s' % (path, self._preferedformat)) + return True, information + self._downloader.to_screen('[' + 'ffmpeg' + '] Converting video from %s to %s, Destination: ' % (information['ext'], self._preferedformat) + outpath) + self.run_ffmpeg(path, outpath, []) + information['filepath'] = outpath + information['format'] = self._preferedformat + information['ext'] = self._preferedformat + return False, information + + +class FFmpegEmbedSubtitlePP(FFmpegPostProcessor): + # See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt + _lang_map = { + 'aa': 'aar', + 'ab': 'abk', + 'ae': 'ave', + 'af': 'afr', + 'ak': 'aka', + 'am': 'amh', + 'an': 'arg', + 'ar': 'ara', + 'as': 'asm', + 'av': 'ava', + 'ay': 'aym', + 'az': 'aze', + 'ba': 'bak', + 'be': 'bel', + 'bg': 'bul', + 'bh': 'bih', + 'bi': 'bis', + 'bm': 'bam', + 'bn': 'ben', + 'bo': 'bod', + 'br': 'bre', + 'bs': 'bos', + 'ca': 'cat', + 'ce': 'che', + 'ch': 'cha', + 'co': 'cos', + 'cr': 'cre', + 'cs': 'ces', + 'cu': 'chu', + 'cv': 'chv', + 'cy': 'cym', + 'da': 'dan', + 'de': 'deu', + 'dv': 'div', + 'dz': 'dzo', + 'ee': 'ewe', + 'el': 'ell', + 'en': 'eng', + 'eo': 'epo', + 'es': 'spa', + 'et': 'est', + 'eu': 'eus', + 'fa': 'fas', + 'ff': 'ful', + 'fi': 'fin', + 'fj': 'fij', + 'fo': 'fao', + 'fr': 'fra', + 'fy': 'fry', + 'ga': 'gle', + 'gd': 'gla', + 'gl': 'glg', + 'gn': 'grn', + 'gu': 'guj', + 'gv': 'glv', + 'ha': 'hau', + 'he': 'heb', + 'hi': 'hin', + 'ho': 'hmo', + 'hr': 'hrv', + 'ht': 'hat', + 'hu': 'hun', + 'hy': 'hye', + 'hz': 'her', + 'ia': 'ina', + 'id': 'ind', + 'ie': 'ile', + 'ig': 'ibo', + 'ii': 'iii', + 'ik': 'ipk', + 'io': 'ido', + 'is': 'isl', + 'it': 'ita', + 'iu': 'iku', + 'ja': 'jpn', + 'jv': 'jav', + 'ka': 'kat', + 'kg': 'kon', + 'ki': 'kik', + 'kj': 'kua', + 'kk': 'kaz', + 'kl': 'kal', + 'km': 'khm', + 'kn': 'kan', + 'ko': 'kor', + 'kr': 'kau', + 'ks': 'kas', + 'ku': 'kur', + 'kv': 'kom', + 'kw': 'cor', + 'ky': 'kir', + 'la': 'lat', + 'lb': 'ltz', + 'lg': 'lug', + 'li': 'lim', + 'ln': 'lin', + 'lo': 'lao', + 'lt': 'lit', + 'lu': 'lub', + 'lv': 'lav', + 'mg': 'mlg', + 'mh': 'mah', + 'mi': 'mri', + 'mk': 'mkd', + 'ml': 'mal', + 'mn': 'mon', + 'mr': 'mar', + 'ms': 'msa', + 'mt': 'mlt', + 'my': 'mya', + 'na': 'nau', + 'nb': 'nob', + 'nd': 'nde', + 'ne': 'nep', + 'ng': 'ndo', + 'nl': 'nld', + 'nn': 'nno', + 'no': 'nor', + 'nr': 'nbl', + 'nv': 'nav', + 'ny': 'nya', + 'oc': 'oci', + 'oj': 'oji', + 'om': 'orm', + 'or': 'ori', + 'os': 'oss', + 'pa': 'pan', + 'pi': 'pli', + 'pl': 'pol', + 'ps': 'pus', + 'pt': 'por', + 'qu': 'que', + 'rm': 'roh', + 'rn': 'run', + 'ro': 'ron', + 'ru': 'rus', + 'rw': 'kin', + 'sa': 'san', + 'sc': 'srd', + 'sd': 'snd', + 'se': 'sme', + 'sg': 'sag', + 'si': 'sin', + 'sk': 'slk', + 'sl': 'slv', + 'sm': 'smo', + 'sn': 'sna', + 'so': 'som', + 'sq': 'sqi', + 'sr': 'srp', + 'ss': 'ssw', + 'st': 'sot', + 'su': 'sun', + 'sv': 'swe', + 'sw': 'swa', + 'ta': 'tam', + 'te': 'tel', + 'tg': 'tgk', + 'th': 'tha', + 'ti': 'tir', + 'tk': 'tuk', + 'tl': 'tgl', + 'tn': 'tsn', + 'to': 'ton', + 'tr': 'tur', + 'ts': 'tso', + 'tt': 'tat', + 'tw': 'twi', + 'ty': 'tah', + 'ug': 'uig', + 'uk': 'ukr', + 'ur': 'urd', + 'uz': 'uzb', + 've': 'ven', + 'vi': 'vie', + 'vo': 'vol', + 'wa': 'wln', + 'wo': 'wol', + 'xh': 'xho', + 'yi': 'yid', + 'yo': 'yor', + 'za': 'zha', + 'zh': 'zho', + 'zu': 'zul', + } + + def __init__(self, downloader=None, subtitlesformat='srt'): + super(FFmpegEmbedSubtitlePP, self).__init__(downloader) + self._subformat = subtitlesformat + + @classmethod + def _conver_lang_code(cls, code): + """Convert language code from ISO 639-1 to ISO 639-2/T""" + return cls._lang_map.get(code[:2]) + + def run(self, information): + if information['ext'] != 'mp4': + self._downloader.to_screen('[ffmpeg] Subtitles can only be embedded in mp4 files') + return True, information + if not information.get('subtitles'): + self._downloader.to_screen('[ffmpeg] There aren\'t any subtitles to embed') + return True, information + + sub_langs = [key for key in information['subtitles']] + filename = information['filepath'] + input_files = [filename] + [subtitles_filename(filename, lang, self._subformat) for lang in sub_langs] + + opts = ['-map', '0:0', '-map', '0:1', '-c:v', 'copy', '-c:a', 'copy'] + for (i, lang) in enumerate(sub_langs): + opts.extend(['-map', '%d:0' % (i + 1), '-c:s:%d' % i, 'mov_text']) + lang_code = self._conver_lang_code(lang) + if lang_code is not None: + opts.extend(['-metadata:s:s:%d' % i, 'language=%s' % lang_code]) + opts.extend(['-f', 'mp4']) + + temp_filename = filename + '.temp' + self._downloader.to_screen('[ffmpeg] Embedding subtitles in \'%s\'' % filename) + self.run_ffmpeg_multiple_files(input_files, temp_filename, opts) + os.remove(encodeFilename(filename)) + os.rename(encodeFilename(temp_filename), encodeFilename(filename)) + + return True, information + + +class FFmpegMetadataPP(FFmpegPostProcessor): + def run(self, info): + metadata = {} + if info.get('title') is not None: + metadata['title'] = info['title'] + if info.get('upload_date') is not None: + metadata['date'] = info['upload_date'] + if info.get('uploader') is not None: + metadata['artist'] = info['uploader'] + elif info.get('uploader_id') is not None: + metadata['artist'] = info['uploader_id'] + + if not metadata: + self._downloader.to_screen('[ffmpeg] There isn\'t any metadata to add') + return True, info + + filename = info['filepath'] + temp_filename = prepend_extension(filename, 'temp') + + if info['ext'] == 'm4a': + options = ['-vn', '-acodec', 'copy'] + else: + options = ['-c', 'copy'] + + for (name, value) in metadata.items(): + options.extend(['-metadata', '%s=%s' % (name, value)]) + + self._downloader.to_screen('[ffmpeg] Adding metadata to \'%s\'' % filename) + self.run_ffmpeg(filename, temp_filename, options) + os.remove(encodeFilename(filename)) + os.rename(encodeFilename(temp_filename), encodeFilename(filename)) + return True, info + + +class FFmpegMergerPP(FFmpegPostProcessor): + def run(self, info): + filename = info['filepath'] + args = ['-c', 'copy', '-map', '0:v:0', '-map', '1:a:0', '-shortest'] + self._downloader.to_screen('[ffmpeg] Merging formats into "%s"' % filename) + self.run_ffmpeg_multiple_files(info['__files_to_merge'], filename, args) + return True, info + + +class FFmpegAudioFixPP(FFmpegPostProcessor): + def run(self, info): + filename = info['filepath'] + temp_filename = prepend_extension(filename, 'temp') + + options = ['-vn', '-acodec', 'copy'] + self._downloader.to_screen('[ffmpeg] Fixing audio file "%s"' % filename) + self.run_ffmpeg(filename, temp_filename, options) + + os.remove(encodeFilename(filename)) + os.rename(encodeFilename(temp_filename), encodeFilename(filename)) + + return True, info diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/postprocessor/xattrpp.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/postprocessor/xattrpp.py new file mode 100644 index 0000000000..f6c63fe975 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/postprocessor/xattrpp.py @@ -0,0 +1,112 @@ +from __future__ import unicode_literals + +import os +import subprocess +import sys + +from .common import PostProcessor +from ..compat import ( + subprocess_check_output +) +from ..utils import ( + check_executable, + hyphenate_date, +) + + +class XAttrMetadataPP(PostProcessor): + + # + # More info about extended attributes for media: + # http://freedesktop.org/wiki/CommonExtendedAttributes/ + # http://www.freedesktop.org/wiki/PhreedomDraft/ + # http://dublincore.org/documents/usageguide/elements.shtml + # + # TODO: + # * capture youtube keywords and put them in 'user.dublincore.subject' (comma-separated) + # * figure out which xattrs can be used for 'duration', 'thumbnail', 'resolution' + # + + def run(self, info): + """ Set extended attributes on downloaded file (if xattr support is found). """ + + # This mess below finds the best xattr tool for the job and creates a + # "write_xattr" function. + try: + # try the pyxattr module... + import xattr + + def write_xattr(path, key, value): + return xattr.setxattr(path, key, value) + + except ImportError: + if os.name == 'nt': + # Write xattrs to NTFS Alternate Data Streams: + # http://en.wikipedia.org/wiki/NTFS#Alternate_data_streams_.28ADS.29 + def write_xattr(path, key, value): + assert ':' not in key + assert os.path.exists(path) + + ads_fn = path + ":" + key + with open(ads_fn, "wb") as f: + f.write(value) + else: + user_has_setfattr = check_executable("setfattr", ['--version']) + user_has_xattr = check_executable("xattr", ['-h']) + + if user_has_setfattr or user_has_xattr: + + def write_xattr(path, key, value): + if user_has_setfattr: + cmd = ['setfattr', '-n', key, '-v', value, path] + elif user_has_xattr: + cmd = ['xattr', '-w', key, value, path] + + subprocess_check_output(cmd) + + else: + # On Unix, and can't find pyxattr, setfattr, or xattr. + if sys.platform.startswith('linux'): + self._downloader.report_error( + "Couldn't find a tool to set the xattrs. " + "Install either the python 'pyxattr' or 'xattr' " + "modules, or the GNU 'attr' package " + "(which contains the 'setfattr' tool).") + else: + self._downloader.report_error( + "Couldn't find a tool to set the xattrs. " + "Install either the python 'xattr' module, " + "or the 'xattr' binary.") + + # Write the metadata to the file's xattrs + self._downloader.to_screen('[metadata] Writing metadata to file\'s xattrs') + + filename = info['filepath'] + + try: + xattr_mapping = { + 'user.xdg.referrer.url': 'webpage_url', + # 'user.xdg.comment': 'description', + 'user.dublincore.title': 'title', + 'user.dublincore.date': 'upload_date', + 'user.dublincore.description': 'description', + 'user.dublincore.contributor': 'uploader', + 'user.dublincore.format': 'format', + } + + for xattrname, infoname in xattr_mapping.items(): + + value = info.get(infoname) + + if value: + if infoname == "upload_date": + value = hyphenate_date(value) + + byte_value = value.encode('utf-8') + write_xattr(filename, xattrname, byte_value) + + return True, info + + except (subprocess.CalledProcessError, OSError): + self._downloader.report_error("This filesystem doesn't support extended attributes. (You may have to enable them in your /etc/fstab)") + return False, info diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/swfinterp.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/swfinterp.py new file mode 100644 index 0000000000..e60505ace8 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/swfinterp.py @@ -0,0 +1,829 @@ +from __future__ import unicode_literals + +import collections +import io +import zlib + +from .compat import compat_str +from .utils import ( + ExtractorError, + struct_unpack, +) + + +def _extract_tags(file_contents): + if file_contents[1:3] != b'WS': + raise ExtractorError( + 'Not an SWF file; header is %r' % file_contents[:3]) + if file_contents[:1] == b'C': + content = zlib.decompress(file_contents[8:]) + else: + raise NotImplementedError( + 'Unsupported compression format %r' % + file_contents[:1]) + + # Determine number of bits in framesize rectangle + framesize_nbits = struct_unpack('!B', content[:1])[0] >> 3 + framesize_len = (5 + 4 * framesize_nbits + 7) // 8 + + pos = framesize_len + 2 + 2 + while pos < len(content): + header16 = struct_unpack('<H', content[pos:pos + 2])[0] + pos += 2 + tag_code = header16 >> 6 + tag_len = header16 & 0x3f + if tag_len == 0x3f: + tag_len = struct_unpack('<I', content[pos:pos + 4])[0] + pos += 4 + assert pos + tag_len <= len(content), \ + ('Tag %d ends at %d+%d - that\'s longer than the file (%d)' + % (tag_code, pos, tag_len, len(content))) + yield (tag_code, content[pos:pos + tag_len]) + pos += tag_len + + +class _AVMClass_Object(object): + def __init__(self, avm_class): + self.avm_class = avm_class + + def __repr__(self): + return '%s#%x' % (self.avm_class.name, id(self)) + + +class _ScopeDict(dict): + def __init__(self, avm_class): + super(_ScopeDict, self).__init__() + self.avm_class = avm_class + + def __repr__(self): + return '%s__Scope(%s)' % ( + self.avm_class.name, + super(_ScopeDict, self).__repr__()) + + +class _AVMClass(object): + def __init__(self, name_idx, name, static_properties=None): + self.name_idx = name_idx + self.name = name + self.method_names = {} + self.method_idxs = {} + self.methods = {} + self.method_pyfunctions = {} + self.static_properties = static_properties if static_properties else {} + + self.variables = _ScopeDict(self) + self.constants = {} + + def make_object(self): + return _AVMClass_Object(self) + + def __repr__(self): + return '_AVMClass(%s)' % (self.name) + + def register_methods(self, methods): + self.method_names.update(methods.items()) + self.method_idxs.update(dict( + (idx, name) + for name, idx in methods.items())) + + +class _Multiname(object): + def __init__(self, kind): + self.kind = kind + + def __repr__(self): + return '[MULTINAME kind: 0x%x]' % self.kind + + +def _read_int(reader): + res = 0 + shift = 0 + for _ in range(5): + buf = reader.read(1) + assert len(buf) == 1 + b = struct_unpack('<B', buf)[0] + res = res | ((b & 0x7f) << shift) + if b & 0x80 == 0: + break + shift += 7 + return res + + +def _u30(reader): + res = _read_int(reader) + assert res & 0xf0000000 == 0 + return res +_u32 = _read_int + + +def _s32(reader): + v = _read_int(reader) + if v & 0x80000000 != 0: + v = - ((v ^ 0xffffffff) + 1) + return v + + +def _s24(reader): + bs = reader.read(3) + assert len(bs) == 3 + last_byte = b'\xff' if (ord(bs[2:3]) >= 0x80) else b'\x00' + return struct_unpack('<i', bs + last_byte)[0] + + +def _read_string(reader): + slen = _u30(reader) + resb = reader.read(slen) + assert len(resb) == slen + return resb.decode('utf-8') + + +def _read_bytes(count, reader): + assert count >= 0 + resb = reader.read(count) + assert len(resb) == count + return resb + + +def _read_byte(reader): + resb = _read_bytes(1, reader=reader) + res = struct_unpack('<B', resb)[0] + return res + + +StringClass = _AVMClass('(no name idx)', 'String') +ByteArrayClass = _AVMClass('(no name idx)', 'ByteArray') +TimerClass = _AVMClass('(no name idx)', 'Timer') +TimerEventClass = _AVMClass('(no name idx)', 'TimerEvent', {'TIMER': 'timer'}) +_builtin_classes = { + StringClass.name: StringClass, + ByteArrayClass.name: ByteArrayClass, + TimerClass.name: TimerClass, + TimerEventClass.name: TimerEventClass, +} + + +class _Undefined(object): + def __bool__(self): + return False + __nonzero__ = __bool__ + + def __hash__(self): + return 0 + + def __str__(self): + return 'undefined' + __repr__ = __str__ + +undefined = _Undefined() + + +class SWFInterpreter(object): + def __init__(self, file_contents): + self._patched_functions = { + (TimerClass, 'addEventListener'): lambda params: undefined, + } + code_tag = next(tag + for tag_code, tag in _extract_tags(file_contents) + if tag_code == 82) + p = code_tag.index(b'\0', 4) + 1 + code_reader = io.BytesIO(code_tag[p:]) + + # Parse ABC (AVM2 ByteCode) + + # Define a couple convenience methods + u30 = lambda *args: _u30(*args, reader=code_reader) + s32 = lambda *args: _s32(*args, reader=code_reader) + u32 = lambda *args: _u32(*args, reader=code_reader) + read_bytes = lambda *args: _read_bytes(*args, reader=code_reader) + read_byte = lambda *args: _read_byte(*args, reader=code_reader) + + # minor_version + major_version + read_bytes(2 + 2) + + # Constant pool + int_count = u30() + self.constant_ints = [0] + for _c in range(1, int_count): + self.constant_ints.append(s32()) + self.constant_uints = [0] + uint_count = u30() + for _c in range(1, uint_count): + self.constant_uints.append(u32()) + double_count = u30() + read_bytes(max(0, (double_count - 1)) * 8) + string_count = u30() + self.constant_strings = [''] + for _c in range(1, string_count): + s = _read_string(code_reader) + self.constant_strings.append(s) + namespace_count = u30() + for _c in range(1, namespace_count): + read_bytes(1) # kind + u30() # name + ns_set_count = u30() + for _c in range(1, ns_set_count): + count = u30() + for _c2 in range(count): + u30() + multiname_count = u30() + MULTINAME_SIZES = { + 0x07: 2, # QName + 0x0d: 2, # QNameA + 0x0f: 1, # RTQName + 0x10: 1, # RTQNameA + 0x11: 0, # RTQNameL + 0x12: 0, # RTQNameLA + 0x09: 2, # Multiname + 0x0e: 2, # MultinameA + 0x1b: 1, # MultinameL + 0x1c: 1, # MultinameLA + } + self.multinames = [''] + for _c in range(1, multiname_count): + kind = u30() + assert kind in MULTINAME_SIZES, 'Invalid multiname kind %r' % kind + if kind == 0x07: + u30() # namespace_idx + name_idx = u30() + self.multinames.append(self.constant_strings[name_idx]) + elif kind == 0x09: + name_idx = u30() + u30() + self.multinames.append(self.constant_strings[name_idx]) + else: + self.multinames.append(_Multiname(kind)) + for _c2 in range(MULTINAME_SIZES[kind]): + u30() + + # Methods + method_count = u30() + MethodInfo = collections.namedtuple( + 'MethodInfo', + ['NEED_ARGUMENTS', 'NEED_REST']) + method_infos = [] + for method_id in range(method_count): + param_count = u30() + u30() # return type + for _ in range(param_count): + u30() # param type + u30() # name index (always 0 for youtube) + flags = read_byte() + if flags & 0x08 != 0: + # Options present + option_count = u30() + for c in range(option_count): + u30() # val + read_bytes(1) # kind + if flags & 0x80 != 0: + # Param names present + for _ in range(param_count): + u30() # param name + mi = MethodInfo(flags & 0x01 != 0, flags & 0x04 != 0) + method_infos.append(mi) + + # Metadata + metadata_count = u30() + for _c in range(metadata_count): + u30() # name + item_count = u30() + for _c2 in range(item_count): + u30() # key + u30() # value + + def parse_traits_info(): + trait_name_idx = u30() + kind_full = read_byte() + kind = kind_full & 0x0f + attrs = kind_full >> 4 + methods = {} + constants = None + if kind == 0x00: # Slot + u30() # Slot id + u30() # type_name_idx + vindex = u30() + if vindex != 0: + read_byte() # vkind + elif kind == 0x06: # Const + u30() # Slot id + u30() # type_name_idx + vindex = u30() + vkind = 'any' + if vindex != 0: + vkind = read_byte() + if vkind == 0x03: # Constant_Int + value = self.constant_ints[vindex] + elif vkind == 0x04: # Constant_UInt + value = self.constant_uints[vindex] + else: + return {}, None # Ignore silently for now + constants = {self.multinames[trait_name_idx]: value} + elif kind in (0x01, 0x02, 0x03): # Method / Getter / Setter + u30() # disp_id + method_idx = u30() + methods[self.multinames[trait_name_idx]] = method_idx + elif kind == 0x04: # Class + u30() # slot_id + u30() # classi + elif kind == 0x05: # Function + u30() # slot_id + function_idx = u30() + methods[function_idx] = self.multinames[trait_name_idx] + else: + raise ExtractorError('Unsupported trait kind %d' % kind) + + if attrs & 0x4 != 0: # Metadata present + metadata_count = u30() + for _c3 in range(metadata_count): + u30() # metadata index + + return methods, constants + + # Classes + class_count = u30() + classes = [] + for class_id in range(class_count): + name_idx = u30() + + cname = self.multinames[name_idx] + avm_class = _AVMClass(name_idx, cname) + classes.append(avm_class) + + u30() # super_name idx + flags = read_byte() + if flags & 0x08 != 0: # Protected namespace is present + u30() # protected_ns_idx + intrf_count = u30() + for _c2 in range(intrf_count): + u30() + u30() # iinit + trait_count = u30() + for _c2 in range(trait_count): + trait_methods, trait_constants = parse_traits_info() + avm_class.register_methods(trait_methods) + if trait_constants: + avm_class.constants.update(trait_constants) + + assert len(classes) == class_count + self._classes_by_name = dict((c.name, c) for c in classes) + + for avm_class in classes: + avm_class.cinit_idx = u30() + trait_count = u30() + for _c2 in range(trait_count): + trait_methods, trait_constants = parse_traits_info() + avm_class.register_methods(trait_methods) + if trait_constants: + avm_class.constants.update(trait_constants) + + # Scripts + script_count = u30() + for _c in range(script_count): + u30() # init + trait_count = u30() + for _c2 in range(trait_count): + parse_traits_info() + + # Method bodies + method_body_count = u30() + Method = collections.namedtuple('Method', ['code', 'local_count']) + self._all_methods = [] + for _c in range(method_body_count): + method_idx = u30() + u30() # max_stack + local_count = u30() + u30() # init_scope_depth + u30() # max_scope_depth + code_length = u30() + code = read_bytes(code_length) + m = Method(code, local_count) + self._all_methods.append(m) + for avm_class in classes: + if method_idx in avm_class.method_idxs: + avm_class.methods[avm_class.method_idxs[method_idx]] = m + exception_count = u30() + for _c2 in range(exception_count): + u30() # from + u30() # to + u30() # target + u30() # exc_type + u30() # var_name + trait_count = u30() + for _c2 in range(trait_count): + parse_traits_info() + + assert p + code_reader.tell() == len(code_tag) + + def patch_function(self, avm_class, func_name, f): + self._patched_functions[(avm_class, func_name)] = f + + def extract_class(self, class_name, call_cinit=True): + try: + res = self._classes_by_name[class_name] + except KeyError: + raise ExtractorError('Class %r not found' % class_name) + + if call_cinit and hasattr(res, 'cinit_idx'): + res.register_methods({'$cinit': res.cinit_idx}) + res.methods['$cinit'] = self._all_methods[res.cinit_idx] + cinit = self.extract_function(res, '$cinit') + cinit([]) + + return res + + def extract_function(self, avm_class, func_name): + p = self._patched_functions.get((avm_class, func_name)) + if p: + return p + if func_name in avm_class.method_pyfunctions: + return avm_class.method_pyfunctions[func_name] + if func_name in self._classes_by_name: + return self._classes_by_name[func_name].make_object() + if func_name not in avm_class.methods: + raise ExtractorError('Cannot find function %s.%s' % ( + avm_class.name, func_name)) + m = avm_class.methods[func_name] + + def resfunc(args): + # Helper functions + coder = io.BytesIO(m.code) + s24 = lambda: _s24(coder) + u30 = lambda: _u30(coder) + + registers = [avm_class.variables] + list(args) + [None] * m.local_count + stack = [] + scopes = collections.deque([ + self._classes_by_name, avm_class.constants, avm_class.variables]) + while True: + opcode = _read_byte(coder) + if opcode == 9: # label + pass # Spec says: "Do nothing." + elif opcode == 16: # jump + offset = s24() + coder.seek(coder.tell() + offset) + elif opcode == 17: # iftrue + offset = s24() + value = stack.pop() + if value: + coder.seek(coder.tell() + offset) + elif opcode == 18: # iffalse + offset = s24() + value = stack.pop() + if not value: + coder.seek(coder.tell() + offset) + elif opcode == 19: # ifeq + offset = s24() + value2 = stack.pop() + value1 = stack.pop() + if value2 == value1: + coder.seek(coder.tell() + offset) + elif opcode == 20: # ifne + offset = s24() + value2 = stack.pop() + value1 = stack.pop() + if value2 != value1: + coder.seek(coder.tell() + offset) + elif opcode == 21: # iflt + offset = s24() + value2 = stack.pop() + value1 = stack.pop() + if value1 < value2: + coder.seek(coder.tell() + offset) + elif opcode == 32: # pushnull + stack.append(None) + elif opcode == 33: # pushundefined + stack.append(undefined) + elif opcode == 36: # pushbyte + v = _read_byte(coder) + stack.append(v) + elif opcode == 37: # pushshort + v = u30() + stack.append(v) + elif opcode == 38: # pushtrue + stack.append(True) + elif opcode == 39: # pushfalse + stack.append(False) + elif opcode == 40: # pushnan + stack.append(float('NaN')) + elif opcode == 42: # dup + value = stack[-1] + stack.append(value) + elif opcode == 44: # pushstring + idx = u30() + stack.append(self.constant_strings[idx]) + elif opcode == 48: # pushscope + new_scope = stack.pop() + scopes.append(new_scope) + elif opcode == 66: # construct + arg_count = u30() + args = list(reversed( + [stack.pop() for _ in range(arg_count)])) + obj = stack.pop() + res = obj.avm_class.make_object() + stack.append(res) + elif opcode == 70: # callproperty + index = u30() + mname = self.multinames[index] + arg_count = u30() + args = list(reversed( + [stack.pop() for _ in range(arg_count)])) + obj = stack.pop() + + if obj == StringClass: + if mname == 'String': + assert len(args) == 1 + assert isinstance(args[0], ( + int, compat_str, _Undefined)) + if args[0] == undefined: + res = 'undefined' + else: + res = compat_str(args[0]) + stack.append(res) + continue + else: + raise NotImplementedError( + 'Function String.%s is not yet implemented' + % mname) + elif isinstance(obj, _AVMClass_Object): + func = self.extract_function(obj.avm_class, mname) + res = func(args) + stack.append(res) + continue + elif isinstance(obj, _AVMClass): + func = self.extract_function(obj, mname) + res = func(args) + stack.append(res) + continue + elif isinstance(obj, _ScopeDict): + if mname in obj.avm_class.method_names: + func = self.extract_function(obj.avm_class, mname) + res = func(args) + else: + res = obj[mname] + stack.append(res) + continue + elif isinstance(obj, compat_str): + if mname == 'split': + assert len(args) == 1 + assert isinstance(args[0], compat_str) + if args[0] == '': + res = list(obj) + else: + res = obj.split(args[0]) + stack.append(res) + continue + elif mname == 'charCodeAt': + assert len(args) <= 1 + idx = 0 if len(args) == 0 else args[0] + assert isinstance(idx, int) + res = ord(obj[idx]) + stack.append(res) + continue + elif isinstance(obj, list): + if mname == 'slice': + assert len(args) == 1 + assert isinstance(args[0], int) + res = obj[args[0]:] + stack.append(res) + continue + elif mname == 'join': + assert len(args) == 1 + assert isinstance(args[0], compat_str) + res = args[0].join(obj) + stack.append(res) + continue + raise NotImplementedError( + 'Unsupported property %r on %r' + % (mname, obj)) + elif opcode == 71: # returnvoid + res = undefined + return res + elif opcode == 72: # returnvalue + res = stack.pop() + return res + elif opcode == 73: # constructsuper + # Not yet implemented, just hope it works without it + arg_count = u30() + args = list(reversed( + [stack.pop() for _ in range(arg_count)])) + obj = stack.pop() + elif opcode == 74: # constructproperty + index = u30() + arg_count = u30() + args = list(reversed( + [stack.pop() for _ in range(arg_count)])) + obj = stack.pop() + + mname = self.multinames[index] + assert isinstance(obj, _AVMClass) + + # We do not actually call the constructor for now; + # we just pretend it does nothing + stack.append(obj.make_object()) + elif opcode == 79: # callpropvoid + index = u30() + mname = self.multinames[index] + arg_count = u30() + args = list(reversed( + [stack.pop() for _ in range(arg_count)])) + obj = stack.pop() + if isinstance(obj, _AVMClass_Object): + func = self.extract_function(obj.avm_class, mname) + res = func(args) + assert res is undefined + continue + if isinstance(obj, _ScopeDict): + assert mname in obj.avm_class.method_names + func = self.extract_function(obj.avm_class, mname) + res = func(args) + assert res is undefined + continue + if mname == 'reverse': + assert isinstance(obj, list) + obj.reverse() + else: + raise NotImplementedError( + 'Unsupported (void) property %r on %r' + % (mname, obj)) + elif opcode == 86: # newarray + arg_count = u30() + arr = [] + for i in range(arg_count): + arr.append(stack.pop()) + arr = arr[::-1] + stack.append(arr) + elif opcode == 93: # findpropstrict + index = u30() + mname = self.multinames[index] + for s in reversed(scopes): + if mname in s: + res = s + break + else: + res = scopes[0] + if mname not in res and mname in _builtin_classes: + stack.append(_builtin_classes[mname]) + else: + stack.append(res[mname]) + elif opcode == 94: # findproperty + index = u30() + mname = self.multinames[index] + for s in reversed(scopes): + if mname in s: + res = s + break + else: + res = avm_class.variables + stack.append(res) + elif opcode == 96: # getlex + index = u30() + mname = self.multinames[index] + for s in reversed(scopes): + if mname in s: + scope = s + break + else: + scope = avm_class.variables + + if mname in scope: + res = scope[mname] + elif mname in _builtin_classes: + res = _builtin_classes[mname] + else: + # Assume unitialized + # TODO warn here + res = undefined + stack.append(res) + elif opcode == 97: # setproperty + index = u30() + value = stack.pop() + idx = self.multinames[index] + if isinstance(idx, _Multiname): + idx = stack.pop() + obj = stack.pop() + obj[idx] = value + elif opcode == 98: # getlocal + index = u30() + stack.append(registers[index]) + elif opcode == 99: # setlocal + index = u30() + value = stack.pop() + registers[index] = value + elif opcode == 102: # getproperty + index = u30() + pname = self.multinames[index] + if pname == 'length': + obj = stack.pop() + assert isinstance(obj, (compat_str, list)) + stack.append(len(obj)) + elif isinstance(pname, compat_str): # Member access + obj = stack.pop() + if isinstance(obj, _AVMClass): + res = obj.static_properties[pname] + stack.append(res) + continue + + assert isinstance(obj, (dict, _ScopeDict)),\ + 'Accessing member %r on %r' % (pname, obj) + res = obj.get(pname, undefined) + stack.append(res) + else: # Assume attribute access + idx = stack.pop() + assert isinstance(idx, int) + obj = stack.pop() + assert isinstance(obj, list) + stack.append(obj[idx]) + elif opcode == 104: # initproperty + index = u30() + value = stack.pop() + idx = self.multinames[index] + if isinstance(idx, _Multiname): + idx = stack.pop() + obj = stack.pop() + obj[idx] = value + elif opcode == 115: # convert_ + value = stack.pop() + intvalue = int(value) + stack.append(intvalue) + elif opcode == 128: # coerce + u30() + elif opcode == 130: # coerce_a + value = stack.pop() + # um, yes, it's any value + stack.append(value) + elif opcode == 133: # coerce_s + assert isinstance(stack[-1], (type(None), compat_str)) + elif opcode == 147: # decrement + value = stack.pop() + assert isinstance(value, int) + stack.append(value - 1) + elif opcode == 149: # typeof + value = stack.pop() + return { + _Undefined: 'undefined', + compat_str: 'String', + int: 'Number', + float: 'Number', + }[type(value)] + elif opcode == 160: # add + value2 = stack.pop() + value1 = stack.pop() + res = value1 + value2 + stack.append(res) + elif opcode == 161: # subtract + value2 = stack.pop() + value1 = stack.pop() + res = value1 - value2 + stack.append(res) + elif opcode == 162: # multiply + value2 = stack.pop() + value1 = stack.pop() + res = value1 * value2 + stack.append(res) + elif opcode == 164: # modulo + value2 = stack.pop() + value1 = stack.pop() + res = value1 % value2 + stack.append(res) + elif opcode == 168: # bitand + value2 = stack.pop() + value1 = stack.pop() + assert isinstance(value1, int) + assert isinstance(value2, int) + res = value1 & value2 + stack.append(res) + elif opcode == 171: # equals + value2 = stack.pop() + value1 = stack.pop() + result = value1 == value2 + stack.append(result) + elif opcode == 175: # greaterequals + value2 = stack.pop() + value1 = stack.pop() + result = value1 >= value2 + stack.append(result) + elif opcode == 192: # increment_i + value = stack.pop() + assert isinstance(value, int) + stack.append(value + 1) + elif opcode == 208: # getlocal_0 + stack.append(registers[0]) + elif opcode == 209: # getlocal_1 + stack.append(registers[1]) + elif opcode == 210: # getlocal_2 + stack.append(registers[2]) + elif opcode == 211: # getlocal_3 + stack.append(registers[3]) + elif opcode == 212: # setlocal_0 + registers[0] = stack.pop() + elif opcode == 213: # setlocal_1 + registers[1] = stack.pop() + elif opcode == 214: # setlocal_2 + registers[2] = stack.pop() + elif opcode == 215: # setlocal_3 + registers[3] = stack.pop() + else: + raise NotImplementedError( + 'Unsupported opcode %d' % opcode) + + avm_class.method_pyfunctions[func_name] = resfunc + return resfunc diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/update.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/update.py new file mode 100644 index 0000000000..2d2703368d --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/update.py @@ -0,0 +1,208 @@ +from __future__ import unicode_literals + +import io +import json +import traceback +import hashlib +import os +import subprocess +import sys +from zipimport import zipimporter + +from .compat import ( + compat_str, + compat_urllib_request, +) +from .version import __version__ + + +def rsa_verify(message, signature, key): + from struct import pack + from hashlib import sha256 + + assert isinstance(message, bytes) + block_size = 0 + n = key[0] + while n: + block_size += 1 + n >>= 8 + signature = pow(int(signature, 16), key[1], key[0]) + raw_bytes = [] + while signature: + raw_bytes.insert(0, pack("B", signature & 0xFF)) + signature >>= 8 + signature = (block_size - len(raw_bytes)) * b'\x00' + b''.join(raw_bytes) + if signature[0:2] != b'\x00\x01': + return False + signature = signature[2:] + if b'\x00' not in signature: + return False + signature = signature[signature.index(b'\x00') + 1:] + if not signature.startswith(b'\x30\x31\x30\x0D\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20'): + return False + signature = signature[19:] + if signature != sha256(message).digest(): + return False + return True + + +def update_self(to_screen, verbose): + """Update the program file with the latest version from the repository""" + + UPDATE_URL = "http://rg3.github.io/youtube-dl/update/" + VERSION_URL = UPDATE_URL + 'LATEST_VERSION' + JSON_URL = UPDATE_URL + 'versions.json' + UPDATES_RSA_KEY = (0x9d60ee4d8f805312fdb15a62f87b95bd66177b91df176765d13514a0f1754bcd2057295c5b6f1d35daa6742c3ffc9a82d3e118861c207995a8031e151d863c9927e304576bc80692bc8e094896fcf11b66f3e29e04e3a71e9a11558558acea1840aec37fc396fb6b65dc81a1c4144e03bd1c011de62e3f1357b327d08426fe93, 65537) + + if not isinstance(globals().get('__loader__'), zipimporter) and not hasattr(sys, "frozen"): + to_screen('It looks like you installed youtube-dl with a package manager, pip, setup.py or a tarball. Please use that to update.') + return + + # Check if there is a new version + try: + newversion = compat_urllib_request.urlopen(VERSION_URL).read().decode('utf-8').strip() + except: + if verbose: + to_screen(compat_str(traceback.format_exc())) + to_screen('ERROR: can\'t find the current version. Please try again later.') + return + if newversion == __version__: + to_screen('youtube-dl is up-to-date (' + __version__ + ')') + return + + # Download and check versions info + try: + versions_info = compat_urllib_request.urlopen(JSON_URL).read().decode('utf-8') + versions_info = json.loads(versions_info) + except: + if verbose: + to_screen(compat_str(traceback.format_exc())) + to_screen('ERROR: can\'t obtain versions info. Please try again later.') + return + if 'signature' not in versions_info: + to_screen('ERROR: the versions file is not signed or corrupted. Aborting.') + return + signature = versions_info['signature'] + del versions_info['signature'] + if not rsa_verify(json.dumps(versions_info, sort_keys=True).encode('utf-8'), signature, UPDATES_RSA_KEY): + to_screen('ERROR: the versions file signature is invalid. Aborting.') + return + + version_id = versions_info['latest'] + + def version_tuple(version_str): + return tuple(map(int, version_str.split('.'))) + if version_tuple(__version__) >= version_tuple(version_id): + to_screen('youtube-dl is up to date (%s)' % __version__) + return + + to_screen('Updating to version ' + version_id + ' ...') + version = versions_info['versions'][version_id] + + print_notes(to_screen, versions_info['versions']) + + filename = sys.argv[0] + # Py2EXE: Filename could be different + if hasattr(sys, "frozen") and not os.path.isfile(filename): + if os.path.isfile(filename + '.exe'): + filename += '.exe' + + if not os.access(filename, os.W_OK): + to_screen('ERROR: no write permissions on %s' % filename) + return + + # Py2EXE + if hasattr(sys, "frozen"): + exe = os.path.abspath(filename) + directory = os.path.dirname(exe) + if not os.access(directory, os.W_OK): + to_screen('ERROR: no write permissions on %s' % directory) + return + + try: + urlh = compat_urllib_request.urlopen(version['exe'][0]) + newcontent = urlh.read() + urlh.close() + except (IOError, OSError): + if verbose: + to_screen(compat_str(traceback.format_exc())) + to_screen('ERROR: unable to download latest version') + return + + newcontent_hash = hashlib.sha256(newcontent).hexdigest() + if newcontent_hash != version['exe'][1]: + to_screen('ERROR: the downloaded file hash does not match. Aborting.') + return + + try: + with open(exe + '.new', 'wb') as outf: + outf.write(newcontent) + except (IOError, OSError): + if verbose: + to_screen(compat_str(traceback.format_exc())) + to_screen('ERROR: unable to write the new version') + return + + try: + bat = os.path.join(directory, 'youtube-dl-updater.bat') + with io.open(bat, 'w') as batfile: + batfile.write(''' +@echo off +echo Waiting for file handle to be closed ... +ping 127.0.0.1 -n 5 -w 1000 > NUL +move /Y "%s.new" "%s" > NUL +echo Updated youtube-dl to version %s. +start /b "" cmd /c del "%%~f0"&exit /b" + \n''' % (exe, exe, version_id)) + + subprocess.Popen([bat]) # Continues to run in the background + return # Do not show premature success messages + except (IOError, OSError): + if verbose: + to_screen(compat_str(traceback.format_exc())) + to_screen('ERROR: unable to overwrite current version') + return + + # Zip unix package + elif isinstance(globals().get('__loader__'), zipimporter): + try: + urlh = compat_urllib_request.urlopen(version['bin'][0]) + newcontent = urlh.read() + urlh.close() + except (IOError, OSError): + if verbose: + to_screen(compat_str(traceback.format_exc())) + to_screen('ERROR: unable to download latest version') + return + + newcontent_hash = hashlib.sha256(newcontent).hexdigest() + if newcontent_hash != version['bin'][1]: + to_screen('ERROR: the downloaded file hash does not match. Aborting.') + return + + try: + with open(filename, 'wb') as outf: + outf.write(newcontent) + except (IOError, OSError): + if verbose: + to_screen(compat_str(traceback.format_exc())) + to_screen('ERROR: unable to overwrite current version') + return + + to_screen('Updated youtube-dl. Restart youtube-dl to use the new version.') + + +def get_notes(versions, fromVersion): + notes = [] + for v, vdata in sorted(versions.items()): + if v > fromVersion: + notes.extend(vdata.get('notes', [])) + return notes + + +def print_notes(to_screen, versions, fromVersion=__version__): + notes = get_notes(versions, fromVersion) + if notes: + to_screen('PLEASE NOTE:') + for note in notes: + to_screen(note) diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/utils.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/utils.py new file mode 100644 index 0000000000..43b7c94ba4 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/utils.py @@ -0,0 +1,1545 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +from __future__ import unicode_literals + +import calendar +import codecs +import contextlib +import ctypes +import datetime +import email.utils +import errno +import gzip +import itertools +import io +import json +import locale +import math +import os +import pipes +import platform +import re +import ssl +import socket +import struct +import subprocess +import sys +import tempfile +import traceback +import xml.etree.ElementTree +import zlib + +from .compat import ( + compat_chr, + compat_getenv, + compat_html_entities, + compat_parse_qs, + compat_str, + compat_urllib_error, + compat_urllib_parse, + compat_urllib_parse_urlparse, + compat_urllib_request, + compat_urlparse, + shlex_quote, +) + + +# This is not clearly defined otherwise +compiled_regex_type = type(re.compile('')) + +std_headers = { + 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20100101 Firefox/10.0 (Chrome)', + 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7', + 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', + 'Accept-Encoding': 'gzip, deflate', + 'Accept-Language': 'en-us,en;q=0.5', +} + + +def preferredencoding(): + """Get preferred encoding. + + Returns the best encoding scheme for the system, based on + locale.getpreferredencoding() and some further tweaks. + """ + try: + pref = locale.getpreferredencoding() + 'TEST'.encode(pref) + except: + pref = 'UTF-8' + + return pref + + +def write_json_file(obj, fn): + """ Encode obj as JSON and write it to fn, atomically if possible """ + + fn = encodeFilename(fn) + if sys.version_info < (3, 0) and sys.platform != 'win32': + encoding = get_filesystem_encoding() + # os.path.basename returns a bytes object, but NamedTemporaryFile + # will fail if the filename contains non ascii characters unless we + # use a unicode object + path_basename = lambda f: os.path.basename(fn).decode(encoding) + # the same for os.path.dirname + path_dirname = lambda f: os.path.dirname(fn).decode(encoding) + else: + path_basename = os.path.basename + path_dirname = os.path.dirname + + args = { + 'suffix': '.tmp', + 'prefix': path_basename(fn) + '.', + 'dir': path_dirname(fn), + 'delete': False, + } + + # In Python 2.x, json.dump expects a bytestream. + # In Python 3.x, it writes to a character stream + if sys.version_info < (3, 0): + args['mode'] = 'wb' + else: + args.update({ + 'mode': 'w', + 'encoding': 'utf-8', + }) + + tf = tempfile.NamedTemporaryFile(**args) + + try: + with tf: + json.dump(obj, tf) + if sys.platform == 'win32': + # Need to remove existing file on Windows, else os.rename raises + # WindowsError or FileExistsError. + try: + os.unlink(fn) + except OSError: + pass + os.rename(tf.name, fn) + except: + try: + os.remove(tf.name) + except OSError: + pass + raise + + +if sys.version_info >= (2, 7): + def find_xpath_attr(node, xpath, key, val): + """ Find the xpath xpath[@key=val] """ + assert re.match(r'^[a-zA-Z-]+$', key) + assert re.match(r'^[a-zA-Z0-9@\s:._-]*$', val) + expr = xpath + "[@%s='%s']" % (key, val) + return node.find(expr) +else: + def find_xpath_attr(node, xpath, key, val): + # Here comes the crazy part: In 2.6, if the xpath is a unicode, + # .//node does not match if a node is a direct child of . ! + if isinstance(xpath, unicode): + xpath = xpath.encode('ascii') + + for f in node.findall(xpath): + if f.attrib.get(key) == val: + return f + return None + +# On python2.6 the xml.etree.ElementTree.Element methods don't support +# the namespace parameter + + +def xpath_with_ns(path, ns_map): + components = [c.split(':') for c in path.split('/')] + replaced = [] + for c in components: + if len(c) == 1: + replaced.append(c[0]) + else: + ns, tag = c + replaced.append('{%s}%s' % (ns_map[ns], tag)) + return '/'.join(replaced) + + +def xpath_text(node, xpath, name=None, fatal=False): + if sys.version_info < (2, 7): # Crazy 2.6 + xpath = xpath.encode('ascii') + + n = node.find(xpath) + if n is None or n.text is None: + if fatal: + name = xpath if name is None else name + raise ExtractorError('Could not find XML element %s' % name) + else: + return None + return n.text + + +def get_element_by_id(id, html): + """Return the content of the tag with the specified ID in the passed HTML document""" + return get_element_by_attribute("id", id, html) + + +def get_element_by_attribute(attribute, value, html): + """Return the content of the tag with the specified attribute in the passed HTML document""" + + m = re.search(r'''(?xs) + <([a-zA-Z0-9:._-]+) + (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]+|="[^"]+"|='[^']+'))*? + \s+%s=['"]?%s['"]? + (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]+|="[^"]+"|='[^']+'))*? + \s*> + (?P<content>.*?) + </\1> + ''' % (re.escape(attribute), re.escape(value)), html) + + if not m: + return None + res = m.group('content') + + if res.startswith('"') or res.startswith("'"): + res = res[1:-1] + + return unescapeHTML(res) + + +def clean_html(html): + """Clean an HTML snippet into a readable string""" + # Newline vs <br /> + html = html.replace('\n', ' ') + html = re.sub(r'\s*<\s*br\s*/?\s*>\s*', '\n', html) + html = re.sub(r'<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html) + # Strip html tags + html = re.sub('<.*?>', '', html) + # Replace html entities + html = unescapeHTML(html) + return html.strip() + + +def sanitize_open(filename, open_mode): + """Try to open the given filename, and slightly tweak it if this fails. + + Attempts to open the given filename. If this fails, it tries to change + the filename slightly, step by step, until it's either able to open it + or it fails and raises a final exception, like the standard open() + function. + + It returns the tuple (stream, definitive_file_name). + """ + try: + if filename == '-': + if sys.platform == 'win32': + import msvcrt + msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) + return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename) + stream = open(encodeFilename(filename), open_mode) + return (stream, filename) + except (IOError, OSError) as err: + if err.errno in (errno.EACCES,): + raise + + # In case of error, try to remove win32 forbidden chars + alt_filename = os.path.join( + re.sub('[/<>:"\\|\\\\?\\*]', '#', path_part) + for path_part in os.path.split(filename) + ) + if alt_filename == filename: + raise + else: + # An exception here should be caught in the caller + stream = open(encodeFilename(filename), open_mode) + return (stream, alt_filename) + + +def timeconvert(timestr): + """Convert RFC 2822 defined time string into system timestamp""" + timestamp = None + timetuple = email.utils.parsedate_tz(timestr) + if timetuple is not None: + timestamp = email.utils.mktime_tz(timetuple) + return timestamp + + +def sanitize_filename(s, restricted=False, is_id=False): + """Sanitizes a string so it could be used as part of a filename. + If restricted is set, use a stricter subset of allowed characters. + Set is_id if this is not an arbitrary string, but an ID that should be kept if possible + """ + def replace_insane(char): + if char == '?' or ord(char) < 32 or ord(char) == 127: + return '' + elif char == '"': + return '' if restricted else '\'' + elif char == ':': + return '_-' if restricted else ' -' + elif char in '\\/|*<>': + return '_' + if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()): + return '_' + if restricted and ord(char) > 127: + return '_' + return char + + result = ''.join(map(replace_insane, s)) + if not is_id: + while '__' in result: + result = result.replace('__', '_') + result = result.strip('_') + # Common case of "Foreign band name - English song title" + if restricted and result.startswith('-_'): + result = result[2:] + if not result: + result = '_' + return result + + +def orderedSet(iterable): + """ Remove all duplicates from the input iterable """ + res = [] + for el in iterable: + if el not in res: + res.append(el) + return res + + +def _htmlentity_transform(entity): + """Transforms an HTML entity to a character.""" + # Known non-numeric HTML entity + if entity in compat_html_entities.name2codepoint: + return compat_chr(compat_html_entities.name2codepoint[entity]) + + mobj = re.match(r'#(x?[0-9]+)', entity) + if mobj is not None: + numstr = mobj.group(1) + if numstr.startswith('x'): + base = 16 + numstr = '0%s' % numstr + else: + base = 10 + return compat_chr(int(numstr, base)) + + # Unknown entity in name, return its literal representation + return ('&%s;' % entity) + + +def unescapeHTML(s): + if s is None: + return None + assert type(s) == compat_str + + return re.sub( + r'&([^;]+);', lambda m: _htmlentity_transform(m.group(1)), s) + + +def encodeFilename(s, for_subprocess=False): + """ + @param s The name of the file + """ + + assert type(s) == compat_str + + # Python 3 has a Unicode API + if sys.version_info >= (3, 0): + return s + + if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5: + # Pass '' directly to use Unicode APIs on Windows 2000 and up + # (Detecting Windows NT 4 is tricky because 'major >= 4' would + # match Windows 9x series as well. Besides, NT 4 is obsolete.) + if not for_subprocess: + return s + else: + # For subprocess calls, encode with locale encoding + # Refer to http://stackoverflow.com/a/9951851/35070 + encoding = preferredencoding() + else: + encoding = sys.getfilesystemencoding() + if encoding is None: + encoding = 'utf-8' + return s.encode(encoding, 'ignore') + + +def encodeArgument(s): + if not isinstance(s, compat_str): + # Legacy code that uses byte strings + # Uncomment the following line after fixing all post processors + # assert False, 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s)) + s = s.decode('ascii') + return encodeFilename(s, True) + + +def decodeOption(optval): + if optval is None: + return optval + if isinstance(optval, bytes): + optval = optval.decode(preferredencoding()) + + assert isinstance(optval, compat_str) + return optval + + +def formatSeconds(secs): + if secs > 3600: + return '%d:%02d:%02d' % (secs // 3600, (secs % 3600) // 60, secs % 60) + elif secs > 60: + return '%d:%02d' % (secs // 60, secs % 60) + else: + return '%d' % secs + + +def make_HTTPS_handler(opts_no_check_certificate, **kwargs): + if hasattr(ssl, 'create_default_context'): # Python >= 3.4 or 2.7.9 + context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) + if opts_no_check_certificate: + context.verify_mode = ssl.CERT_NONE + try: + return compat_urllib_request.HTTPSHandler(context=context, **kwargs) + except TypeError: + # Python 2.7.8 + # (create_default_context present but HTTPSHandler has no context=) + pass + + if sys.version_info < (3, 2): + import httplib + + class HTTPSConnectionV3(httplib.HTTPSConnection): + def __init__(self, *args, **kwargs): + httplib.HTTPSConnection.__init__(self, *args, **kwargs) + + def connect(self): + sock = socket.create_connection((self.host, self.port), self.timeout) + if getattr(self, '_tunnel_host', False): + self.sock = sock + self._tunnel() + try: + self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_TLSv1) + except ssl.SSLError: + self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_SSLv23) + + class HTTPSHandlerV3(compat_urllib_request.HTTPSHandler): + def https_open(self, req): + return self.do_open(HTTPSConnectionV3, req) + return HTTPSHandlerV3(**kwargs) + else: # Python < 3.4 + context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) + context.verify_mode = (ssl.CERT_NONE + if opts_no_check_certificate + else ssl.CERT_REQUIRED) + context.set_default_verify_paths() + return compat_urllib_request.HTTPSHandler(context=context, **kwargs) + + +class ExtractorError(Exception): + """Error during info extraction.""" + + def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None): + """ tb, if given, is the original traceback (so that it can be printed out). + If expected is set, this is a normal error message and most likely not a bug in youtube-dl. + """ + + if sys.exc_info()[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError): + expected = True + if video_id is not None: + msg = video_id + ': ' + msg + if cause: + msg += ' (caused by %r)' % cause + if not expected: + if ytdl_is_updateable(): + update_cmd = 'type youtube-dl -U to update' + else: + update_cmd = 'see https://yt-dl.org/update on how to update' + msg += '; please report this issue on https://yt-dl.org/bug .' + msg += ' Make sure you are using the latest version; %s.' % update_cmd + msg += ' Be sure to call youtube-dl with the --verbose flag and include its complete output.' + super(ExtractorError, self).__init__(msg) + + self.traceback = tb + self.exc_info = sys.exc_info() # preserve original exception + self.cause = cause + self.video_id = video_id + + def format_traceback(self): + if self.traceback is None: + return None + return ''.join(traceback.format_tb(self.traceback)) + + +class RegexNotFoundError(ExtractorError): + """Error when a regex didn't match""" + pass + + +class DownloadError(Exception): + """Download Error exception. + + This exception may be thrown by FileDownloader objects if they are not + configured to continue on errors. They will contain the appropriate + error message. + """ + + def __init__(self, msg, exc_info=None): + """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """ + super(DownloadError, self).__init__(msg) + self.exc_info = exc_info + + +class SameFileError(Exception): + """Same File exception. + + This exception will be thrown by FileDownloader objects if they detect + multiple files would have to be downloaded to the same file on disk. + """ + pass + + +class PostProcessingError(Exception): + """Post Processing exception. + + This exception may be raised by PostProcessor's .run() method to + indicate an error in the postprocessing task. + """ + + def __init__(self, msg): + self.msg = msg + + +class MaxDownloadsReached(Exception): + """ --max-downloads limit has been reached. """ + pass + + +class UnavailableVideoError(Exception): + """Unavailable Format exception. + + This exception will be thrown when a video is requested + in a format that is not available for that video. + """ + pass + + +class ContentTooShortError(Exception): + """Content Too Short exception. + + This exception may be raised by FileDownloader objects when a file they + download is too small for what the server announced first, indicating + the connection was probably interrupted. + """ + # Both in bytes + downloaded = None + expected = None + + def __init__(self, downloaded, expected): + self.downloaded = downloaded + self.expected = expected + + +class YoutubeDLHandler(compat_urllib_request.HTTPHandler): + """Handler for HTTP requests and responses. + + This class, when installed with an OpenerDirector, automatically adds + the standard headers to every HTTP request and handles gzipped and + deflated responses from web servers. If compression is to be avoided in + a particular request, the original request in the program code only has + to include the HTTP header "Youtubedl-No-Compression", which will be + removed before making the real request. + + Part of this code was copied from: + + http://techknack.net/python-urllib2-handlers/ + + Andrew Rowls, the author of that code, agreed to release it to the + public domain. + """ + + @staticmethod + def deflate(data): + try: + return zlib.decompress(data, -zlib.MAX_WBITS) + except zlib.error: + return zlib.decompress(data) + + @staticmethod + def addinfourl_wrapper(stream, headers, url, code): + if hasattr(compat_urllib_request.addinfourl, 'getcode'): + return compat_urllib_request.addinfourl(stream, headers, url, code) + ret = compat_urllib_request.addinfourl(stream, headers, url) + ret.code = code + return ret + + def http_request(self, req): + for h, v in std_headers.items(): + if h not in req.headers: + req.add_header(h, v) + if 'Youtubedl-no-compression' in req.headers: + if 'Accept-encoding' in req.headers: + del req.headers['Accept-encoding'] + del req.headers['Youtubedl-no-compression'] + if 'Youtubedl-user-agent' in req.headers: + if 'User-agent' in req.headers: + del req.headers['User-agent'] + req.headers['User-agent'] = req.headers['Youtubedl-user-agent'] + del req.headers['Youtubedl-user-agent'] + + if sys.version_info < (2, 7) and '#' in req.get_full_url(): + # Python 2.6 is brain-dead when it comes to fragments + req._Request__original = req._Request__original.partition('#')[0] + req._Request__r_type = req._Request__r_type.partition('#')[0] + + return req + + def http_response(self, req, resp): + old_resp = resp + # gzip + if resp.headers.get('Content-encoding', '') == 'gzip': + content = resp.read() + gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb') + try: + uncompressed = io.BytesIO(gz.read()) + except IOError as original_ioerror: + # There may be junk add the end of the file + # See http://stackoverflow.com/q/4928560/35070 for details + for i in range(1, 1024): + try: + gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb') + uncompressed = io.BytesIO(gz.read()) + except IOError: + continue + break + else: + raise original_ioerror + resp = self.addinfourl_wrapper(uncompressed, old_resp.headers, old_resp.url, old_resp.code) + resp.msg = old_resp.msg + # deflate + if resp.headers.get('Content-encoding', '') == 'deflate': + gz = io.BytesIO(self.deflate(resp.read())) + resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code) + resp.msg = old_resp.msg + return resp + + https_request = http_request + https_response = http_response + + +def parse_iso8601(date_str, delimiter='T'): + """ Return a UNIX timestamp from the given date """ + + if date_str is None: + return None + + m = re.search( + r'(\.[0-9]+)?(?:Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$)', + date_str) + if not m: + timezone = datetime.timedelta() + else: + date_str = date_str[:-len(m.group(0))] + if not m.group('sign'): + timezone = datetime.timedelta() + else: + sign = 1 if m.group('sign') == '+' else -1 + timezone = datetime.timedelta( + hours=sign * int(m.group('hours')), + minutes=sign * int(m.group('minutes'))) + date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter) + dt = datetime.datetime.strptime(date_str, date_format) - timezone + return calendar.timegm(dt.timetuple()) + + +def unified_strdate(date_str, day_first=True): + """Return a string with the date in the format YYYYMMDD""" + + if date_str is None: + return None + upload_date = None + # Replace commas + date_str = date_str.replace(',', ' ') + # %z (UTC offset) is only supported in python>=3.2 + date_str = re.sub(r' ?(\+|-)[0-9]{2}:?[0-9]{2}$', '', date_str) + # Remove AM/PM + timezone + date_str = re.sub(r'(?i)\s*(?:AM|PM)\s+[A-Z]+', '', date_str) + + format_expressions = [ + '%d %B %Y', + '%d %b %Y', + '%B %d %Y', + '%b %d %Y', + '%b %dst %Y %I:%M%p', + '%b %dnd %Y %I:%M%p', + '%b %dth %Y %I:%M%p', + '%Y-%m-%d', + '%Y/%m/%d', + '%d.%m.%Y', + '%d/%m/%Y', + '%d/%m/%y', + '%Y/%m/%d %H:%M:%S', + '%Y-%m-%d %H:%M:%S', + '%Y-%m-%d %H:%M:%S.%f', + '%d.%m.%Y %H:%M', + '%d.%m.%Y %H.%M', + '%Y-%m-%dT%H:%M:%SZ', + '%Y-%m-%dT%H:%M:%S.%fZ', + '%Y-%m-%dT%H:%M:%S.%f0Z', + '%Y-%m-%dT%H:%M:%S', + '%Y-%m-%dT%H:%M:%S.%f', + '%Y-%m-%dT%H:%M', + ] + if day_first: + format_expressions.extend([ + '%d/%m/%Y %H:%M:%S', + ]) + else: + format_expressions.extend([ + '%m/%d/%Y %H:%M:%S', + ]) + for expression in format_expressions: + try: + upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d') + except ValueError: + pass + if upload_date is None: + timetuple = email.utils.parsedate_tz(date_str) + if timetuple: + upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d') + return upload_date + + +def determine_ext(url, default_ext='unknown_video'): + if url is None: + return default_ext + guess = url.partition('?')[0].rpartition('.')[2] + if re.match(r'^[A-Za-z0-9]+$', guess): + return guess + else: + return default_ext + + +def subtitles_filename(filename, sub_lang, sub_format): + return filename.rsplit('.', 1)[0] + '.' + sub_lang + '.' + sub_format + + +def date_from_str(date_str): + """ + Return a datetime object from a string in the format YYYYMMDD or + (now|today)[+-][0-9](day|week|month|year)(s)?""" + today = datetime.date.today() + if date_str in ('now', 'today'): + return today + if date_str == 'yesterday': + return today - datetime.timedelta(days=1) + match = re.match('(now|today)(?P<sign>[+-])(?P<time>\d+)(?P<unit>day|week|month|year)(s)?', date_str) + if match is not None: + sign = match.group('sign') + time = int(match.group('time')) + if sign == '-': + time = -time + unit = match.group('unit') + # A bad aproximation? + if unit == 'month': + unit = 'day' + time *= 30 + elif unit == 'year': + unit = 'day' + time *= 365 + unit += 's' + delta = datetime.timedelta(**{unit: time}) + return today + delta + return datetime.datetime.strptime(date_str, "%Y%m%d").date() + + +def hyphenate_date(date_str): + """ + Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format""" + match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str) + if match is not None: + return '-'.join(match.groups()) + else: + return date_str + + +class DateRange(object): + """Represents a time interval between two dates""" + + def __init__(self, start=None, end=None): + """start and end must be strings in the format accepted by date""" + if start is not None: + self.start = date_from_str(start) + else: + self.start = datetime.datetime.min.date() + if end is not None: + self.end = date_from_str(end) + else: + self.end = datetime.datetime.max.date() + if self.start > self.end: + raise ValueError('Date range: "%s" , the start date must be before the end date' % self) + + @classmethod + def day(cls, day): + """Returns a range that only contains the given day""" + return cls(day, day) + + def __contains__(self, date): + """Check if the date is in the range""" + if not isinstance(date, datetime.date): + date = date_from_str(date) + return self.start <= date <= self.end + + def __str__(self): + return '%s - %s' % (self.start.isoformat(), self.end.isoformat()) + + +def platform_name(): + """ Returns the platform name as a compat_str """ + res = platform.platform() + if isinstance(res, bytes): + res = res.decode(preferredencoding()) + + assert isinstance(res, compat_str) + return res + + +def _windows_write_string(s, out): + """ Returns True if the string was written using special methods, + False if it has yet to be written out.""" + # Adapted from http://stackoverflow.com/a/3259271/35070 + + import ctypes + import ctypes.wintypes + + WIN_OUTPUT_IDS = { + 1: -11, + 2: -12, + } + + try: + fileno = out.fileno() + except AttributeError: + # If the output stream doesn't have a fileno, it's virtual + return False + if fileno not in WIN_OUTPUT_IDS: + return False + + GetStdHandle = ctypes.WINFUNCTYPE( + ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)( + (b"GetStdHandle", ctypes.windll.kernel32)) + h = GetStdHandle(WIN_OUTPUT_IDS[fileno]) + + WriteConsoleW = ctypes.WINFUNCTYPE( + ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR, + ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD), + ctypes.wintypes.LPVOID)((b"WriteConsoleW", ctypes.windll.kernel32)) + written = ctypes.wintypes.DWORD(0) + + GetFileType = ctypes.WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)((b"GetFileType", ctypes.windll.kernel32)) + FILE_TYPE_CHAR = 0x0002 + FILE_TYPE_REMOTE = 0x8000 + GetConsoleMode = ctypes.WINFUNCTYPE( + ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, + ctypes.POINTER(ctypes.wintypes.DWORD))( + (b"GetConsoleMode", ctypes.windll.kernel32)) + INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value + + def not_a_console(handle): + if handle == INVALID_HANDLE_VALUE or handle is None: + return True + return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR + or GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0) + + if not_a_console(h): + return False + + def next_nonbmp_pos(s): + try: + return next(i for i, c in enumerate(s) if ord(c) > 0xffff) + except StopIteration: + return len(s) + + while s: + count = min(next_nonbmp_pos(s), 1024) + + ret = WriteConsoleW( + h, s, count if count else 2, ctypes.byref(written), None) + if ret == 0: + raise OSError('Failed to write string') + if not count: # We just wrote a non-BMP character + assert written.value == 2 + s = s[1:] + else: + assert written.value > 0 + s = s[written.value:] + return True + + +def write_string(s, out=None, encoding=None): + if out is None: + out = sys.stderr + assert type(s) == compat_str + + if sys.platform == 'win32' and encoding is None and hasattr(out, 'fileno'): + if _windows_write_string(s, out): + return + + if ('b' in getattr(out, 'mode', '') or + sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr + byt = s.encode(encoding or preferredencoding(), 'ignore') + out.write(byt) + elif hasattr(out, 'buffer'): + enc = encoding or getattr(out, 'encoding', None) or preferredencoding() + byt = s.encode(enc, 'ignore') + out.buffer.write(byt) + else: + out.write(s) + out.flush() + + +def bytes_to_intlist(bs): + if not bs: + return [] + if isinstance(bs[0], int): # Python 3 + return list(bs) + else: + return [ord(c) for c in bs] + + +def intlist_to_bytes(xs): + if not xs: + return b'' + return struct_pack('%dB' % len(xs), *xs) + + +# Cross-platform file locking +if sys.platform == 'win32': + import ctypes.wintypes + import msvcrt + + class OVERLAPPED(ctypes.Structure): + _fields_ = [ + ('Internal', ctypes.wintypes.LPVOID), + ('InternalHigh', ctypes.wintypes.LPVOID), + ('Offset', ctypes.wintypes.DWORD), + ('OffsetHigh', ctypes.wintypes.DWORD), + ('hEvent', ctypes.wintypes.HANDLE), + ] + + kernel32 = ctypes.windll.kernel32 + LockFileEx = kernel32.LockFileEx + LockFileEx.argtypes = [ + ctypes.wintypes.HANDLE, # hFile + ctypes.wintypes.DWORD, # dwFlags + ctypes.wintypes.DWORD, # dwReserved + ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow + ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh + ctypes.POINTER(OVERLAPPED) # Overlapped + ] + LockFileEx.restype = ctypes.wintypes.BOOL + UnlockFileEx = kernel32.UnlockFileEx + UnlockFileEx.argtypes = [ + ctypes.wintypes.HANDLE, # hFile + ctypes.wintypes.DWORD, # dwReserved + ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow + ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh + ctypes.POINTER(OVERLAPPED) # Overlapped + ] + UnlockFileEx.restype = ctypes.wintypes.BOOL + whole_low = 0xffffffff + whole_high = 0x7fffffff + + def _lock_file(f, exclusive): + overlapped = OVERLAPPED() + overlapped.Offset = 0 + overlapped.OffsetHigh = 0 + overlapped.hEvent = 0 + f._lock_file_overlapped_p = ctypes.pointer(overlapped) + handle = msvcrt.get_osfhandle(f.fileno()) + if not LockFileEx(handle, 0x2 if exclusive else 0x0, 0, + whole_low, whole_high, f._lock_file_overlapped_p): + raise OSError('Locking file failed: %r' % ctypes.FormatError()) + + def _unlock_file(f): + assert f._lock_file_overlapped_p + handle = msvcrt.get_osfhandle(f.fileno()) + if not UnlockFileEx(handle, 0, + whole_low, whole_high, f._lock_file_overlapped_p): + raise OSError('Unlocking file failed: %r' % ctypes.FormatError()) + +else: + import fcntl + + def _lock_file(f, exclusive): + fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH) + + def _unlock_file(f): + fcntl.flock(f, fcntl.LOCK_UN) + + +class locked_file(object): + def __init__(self, filename, mode, encoding=None): + assert mode in ['r', 'a', 'w'] + self.f = io.open(filename, mode, encoding=encoding) + self.mode = mode + + def __enter__(self): + exclusive = self.mode != 'r' + try: + _lock_file(self.f, exclusive) + except IOError: + self.f.close() + raise + return self + + def __exit__(self, etype, value, traceback): + try: + _unlock_file(self.f) + finally: + self.f.close() + + def __iter__(self): + return iter(self.f) + + def write(self, *args): + return self.f.write(*args) + + def read(self, *args): + return self.f.read(*args) + + +def get_filesystem_encoding(): + encoding = sys.getfilesystemencoding() + return encoding if encoding is not None else 'utf-8' + + +def shell_quote(args): + quoted_args = [] + encoding = get_filesystem_encoding() + for a in args: + if isinstance(a, bytes): + # We may get a filename encoded with 'encodeFilename' + a = a.decode(encoding) + quoted_args.append(pipes.quote(a)) + return ' '.join(quoted_args) + + +def takewhile_inclusive(pred, seq): + """ Like itertools.takewhile, but include the latest evaluated element + (the first element so that Not pred(e)) """ + for e in seq: + yield e + if not pred(e): + return + + +def smuggle_url(url, data): + """ Pass additional data in a URL for internal use. """ + + sdata = compat_urllib_parse.urlencode( + {'__youtubedl_smuggle': json.dumps(data)}) + return url + '#' + sdata + + +def unsmuggle_url(smug_url, default=None): + if '#__youtubedl_smuggle' not in smug_url: + return smug_url, default + url, _, sdata = smug_url.rpartition('#') + jsond = compat_parse_qs(sdata)['__youtubedl_smuggle'][0] + data = json.loads(jsond) + return url, data + + +def format_bytes(bytes): + if bytes is None: + return 'N/A' + if type(bytes) is str: + bytes = float(bytes) + if bytes == 0.0: + exponent = 0 + else: + exponent = int(math.log(bytes, 1024.0)) + suffix = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'][exponent] + converted = float(bytes) / float(1024 ** exponent) + return '%.2f%s' % (converted, suffix) + + +def parse_filesize(s): + if s is None: + return None + + # The lower-case forms are of course incorrect and inofficial, + # but we support those too + _UNIT_TABLE = { + 'B': 1, + 'b': 1, + 'KiB': 1024, + 'KB': 1000, + 'kB': 1024, + 'Kb': 1000, + 'MiB': 1024 ** 2, + 'MB': 1000 ** 2, + 'mB': 1024 ** 2, + 'Mb': 1000 ** 2, + 'GiB': 1024 ** 3, + 'GB': 1000 ** 3, + 'gB': 1024 ** 3, + 'Gb': 1000 ** 3, + 'TiB': 1024 ** 4, + 'TB': 1000 ** 4, + 'tB': 1024 ** 4, + 'Tb': 1000 ** 4, + 'PiB': 1024 ** 5, + 'PB': 1000 ** 5, + 'pB': 1024 ** 5, + 'Pb': 1000 ** 5, + 'EiB': 1024 ** 6, + 'EB': 1000 ** 6, + 'eB': 1024 ** 6, + 'Eb': 1000 ** 6, + 'ZiB': 1024 ** 7, + 'ZB': 1000 ** 7, + 'zB': 1024 ** 7, + 'Zb': 1000 ** 7, + 'YiB': 1024 ** 8, + 'YB': 1000 ** 8, + 'yB': 1024 ** 8, + 'Yb': 1000 ** 8, + } + + units_re = '|'.join(re.escape(u) for u in _UNIT_TABLE) + m = re.match( + r'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)' % units_re, s) + if not m: + return None + + num_str = m.group('num').replace(',', '.') + mult = _UNIT_TABLE[m.group('unit')] + return int(float(num_str) * mult) + + +def get_term_width(): + columns = compat_getenv('COLUMNS', None) + if columns: + return int(columns) + + try: + sp = subprocess.Popen( + ['stty', 'size'], + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out, err = sp.communicate() + return int(out.split()[1]) + except: + pass + return None + + +def month_by_name(name): + """ Return the number of a month by (locale-independently) English name """ + + ENGLISH_NAMES = [ + 'January', 'February', 'March', 'April', 'May', 'June', + 'July', 'August', 'September', 'October', 'November', 'December'] + try: + return ENGLISH_NAMES.index(name) + 1 + except ValueError: + return None + + +def fix_xml_ampersands(xml_str): + """Replace all the '&' by '&' in XML""" + return re.sub( + r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)', + '&', + xml_str) + + +def setproctitle(title): + assert isinstance(title, compat_str) + try: + libc = ctypes.cdll.LoadLibrary("libc.so.6") + except OSError: + return + title_bytes = title.encode('utf-8') + buf = ctypes.create_string_buffer(len(title_bytes)) + buf.value = title_bytes + try: + libc.prctl(15, buf, 0, 0, 0) + except AttributeError: + return # Strange libc, just skip this + + +def remove_start(s, start): + if s.startswith(start): + return s[len(start):] + return s + + +def remove_end(s, end): + if s.endswith(end): + return s[:-len(end)] + return s + + +def url_basename(url): + path = compat_urlparse.urlparse(url).path + return path.strip('/').split('/')[-1] + + +class HEADRequest(compat_urllib_request.Request): + def get_method(self): + return "HEAD" + + +def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1): + if get_attr: + if v is not None: + v = getattr(v, get_attr, None) + if v == '': + v = None + return default if v is None else (int(v) * invscale // scale) + + +def str_or_none(v, default=None): + return default if v is None else compat_str(v) + + +def str_to_int(int_str): + """ A more relaxed version of int_or_none """ + if int_str is None: + return None + int_str = re.sub(r'[,\.\+]', '', int_str) + return int(int_str) + + +def float_or_none(v, scale=1, invscale=1, default=None): + return default if v is None else (float(v) * invscale / scale) + + +def parse_duration(s): + if s is None: + return None + + s = s.strip() + + m = re.match( + r'''(?ix)T? + (?: + (?P<only_mins>[0-9.]+)\s*(?:mins?|minutes?)\s*| + (?P<only_hours>[0-9.]+)\s*(?:hours?)| + + (?: + (?:(?P<hours>[0-9]+)\s*(?:[:h]|hours?)\s*)? + (?P<mins>[0-9]+)\s*(?:[:m]|mins?|minutes?)\s* + )? + (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*(?:s|secs?|seconds?)? + )$''', s) + if not m: + return None + res = 0 + if m.group('only_mins'): + return float_or_none(m.group('only_mins'), invscale=60) + if m.group('only_hours'): + return float_or_none(m.group('only_hours'), invscale=60 * 60) + if m.group('secs'): + res += int(m.group('secs')) + if m.group('mins'): + res += int(m.group('mins')) * 60 + if m.group('hours'): + res += int(m.group('hours')) * 60 * 60 + if m.group('ms'): + res += float(m.group('ms')) + return res + + +def prepend_extension(filename, ext): + name, real_ext = os.path.splitext(filename) + return '{0}.{1}{2}'.format(name, ext, real_ext) + + +def check_executable(exe, args=[]): + """ Checks if the given binary is installed somewhere in PATH, and returns its name. + args can be a list of arguments for a short output (like -version) """ + try: + subprocess.Popen([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() + except OSError: + return False + return exe + + +def get_exe_version(exe, args=['--version'], + version_re=None, unrecognized='present'): + """ Returns the version of the specified executable, + or False if the executable is not present """ + try: + out, _ = subprocess.Popen( + [exe] + args, + stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate() + except OSError: + return False + if isinstance(out, bytes): # Python 2.x + out = out.decode('ascii', 'ignore') + return detect_exe_version(out, version_re, unrecognized) + + +def detect_exe_version(output, version_re=None, unrecognized='present'): + assert isinstance(output, compat_str) + if version_re is None: + version_re = r'version\s+([-0-9._a-zA-Z]+)' + m = re.search(version_re, output) + if m: + return m.group(1) + else: + return unrecognized + + +class PagedList(object): + def __len__(self): + # This is only useful for tests + return len(self.getslice()) + + +class OnDemandPagedList(PagedList): + def __init__(self, pagefunc, pagesize): + self._pagefunc = pagefunc + self._pagesize = pagesize + + def getslice(self, start=0, end=None): + res = [] + for pagenum in itertools.count(start // self._pagesize): + firstid = pagenum * self._pagesize + nextfirstid = pagenum * self._pagesize + self._pagesize + if start >= nextfirstid: + continue + + page_results = list(self._pagefunc(pagenum)) + + startv = ( + start % self._pagesize + if firstid <= start < nextfirstid + else 0) + + endv = ( + ((end - 1) % self._pagesize) + 1 + if (end is not None and firstid <= end <= nextfirstid) + else None) + + if startv != 0 or endv is not None: + page_results = page_results[startv:endv] + res.extend(page_results) + + # A little optimization - if current page is not "full", ie. does + # not contain page_size videos then we can assume that this page + # is the last one - there are no more ids on further pages - + # i.e. no need to query again. + if len(page_results) + startv < self._pagesize: + break + + # If we got the whole page, but the next page is not interesting, + # break out early as well + if end == nextfirstid: + break + return res + + +class InAdvancePagedList(PagedList): + def __init__(self, pagefunc, pagecount, pagesize): + self._pagefunc = pagefunc + self._pagecount = pagecount + self._pagesize = pagesize + + def getslice(self, start=0, end=None): + res = [] + start_page = start // self._pagesize + end_page = ( + self._pagecount if end is None else (end // self._pagesize + 1)) + skip_elems = start - start_page * self._pagesize + only_more = None if end is None else end - start + for pagenum in range(start_page, end_page): + page = list(self._pagefunc(pagenum)) + if skip_elems: + page = page[skip_elems:] + skip_elems = None + if only_more is not None: + if len(page) < only_more: + only_more -= len(page) + else: + page = page[:only_more] + res.extend(page) + break + res.extend(page) + return res + + +def uppercase_escape(s): + unicode_escape = codecs.getdecoder('unicode_escape') + return re.sub( + r'\\U[0-9a-fA-F]{8}', + lambda m: unicode_escape(m.group(0))[0], + s) + + +def escape_rfc3986(s): + """Escape non-ASCII characters as suggested by RFC 3986""" + if sys.version_info < (3, 0) and isinstance(s, unicode): + s = s.encode('utf-8') + return compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]") + + +def escape_url(url): + """Escape URL as suggested by RFC 3986""" + url_parsed = compat_urllib_parse_urlparse(url) + return url_parsed._replace( + path=escape_rfc3986(url_parsed.path), + params=escape_rfc3986(url_parsed.params), + query=escape_rfc3986(url_parsed.query), + fragment=escape_rfc3986(url_parsed.fragment) + ).geturl() + +try: + struct.pack('!I', 0) +except TypeError: + # In Python 2.6 (and some 2.7 versions), struct requires a bytes argument + def struct_pack(spec, *args): + if isinstance(spec, compat_str): + spec = spec.encode('ascii') + return struct.pack(spec, *args) + + def struct_unpack(spec, *args): + if isinstance(spec, compat_str): + spec = spec.encode('ascii') + return struct.unpack(spec, *args) +else: + struct_pack = struct.pack + struct_unpack = struct.unpack + + +def read_batch_urls(batch_fd): + def fixup(url): + if not isinstance(url, compat_str): + url = url.decode('utf-8', 'replace') + BOM_UTF8 = '\xef\xbb\xbf' + if url.startswith(BOM_UTF8): + url = url[len(BOM_UTF8):] + url = url.strip() + if url.startswith(('#', ';', ']')): + return False + return url + + with contextlib.closing(batch_fd) as fd: + return [url for url in map(fixup, fd) if url] + + +def urlencode_postdata(*args, **kargs): + return compat_urllib_parse.urlencode(*args, **kargs).encode('ascii') + + +try: + etree_iter = xml.etree.ElementTree.Element.iter +except AttributeError: # Python <=2.6 + etree_iter = lambda n: n.findall('.//*') + + +def parse_xml(s): + class TreeBuilder(xml.etree.ElementTree.TreeBuilder): + def doctype(self, name, pubid, system): + pass # Ignore doctypes + + parser = xml.etree.ElementTree.XMLParser(target=TreeBuilder()) + kwargs = {'parser': parser} if sys.version_info >= (2, 7) else {} + tree = xml.etree.ElementTree.XML(s.encode('utf-8'), **kwargs) + # Fix up XML parser in Python 2.x + if sys.version_info < (3, 0): + for n in etree_iter(tree): + if n.text is not None: + if not isinstance(n.text, compat_str): + n.text = n.text.decode('utf-8') + return tree + + +US_RATINGS = { + 'G': 0, + 'PG': 10, + 'PG-13': 13, + 'R': 16, + 'NC': 18, +} + + +def parse_age_limit(s): + if s is None: + return None + m = re.match(r'^(?P<age>\d{1,2})\+?$', s) + return int(m.group('age')) if m else US_RATINGS.get(s, None) + + +def strip_jsonp(code): + return re.sub( + r'(?s)^[a-zA-Z0-9_]+\s*\(\s*(.*)\);?\s*?(?://[^\n]*)*$', r'\1', code) + + +def js_to_json(code): + def fix_kv(m): + v = m.group(0) + if v in ('true', 'false', 'null'): + return v + if v.startswith('"'): + return v + if v.startswith("'"): + v = v[1:-1] + v = re.sub(r"\\\\|\\'|\"", lambda m: { + '\\\\': '\\\\', + "\\'": "'", + '"': '\\"', + }[m.group(0)], v) + return '"%s"' % v + + res = re.sub(r'''(?x) + "(?:[^"\\]*(?:\\\\|\\")?)*"| + '(?:[^'\\]*(?:\\\\|\\')?)*'| + [a-zA-Z_][a-zA-Z_0-9]* + ''', fix_kv, code) + res = re.sub(r',(\s*\])', lambda m: m.group(1), res) + return res + + +def qualities(quality_ids): + """ Get a numeric quality value out of a list of possible values """ + def q(qid): + try: + return quality_ids.index(qid) + except ValueError: + return -1 + return q + + +DEFAULT_OUTTMPL = '%(title)s-%(id)s.%(ext)s' + + +def limit_length(s, length): + """ Add ellipses to overly long strings """ + if s is None: + return None + ELLIPSES = '...' + if len(s) > length: + return s[:length - len(ELLIPSES)] + ELLIPSES + return s + + +def version_tuple(v): + return tuple(int(e) for e in re.split(r'[-.]', v)) + + +def is_outdated_version(version, limit, assume_new=True): + if not version: + return not assume_new + try: + return version_tuple(version) < version_tuple(limit) + except ValueError: + return not assume_new + + +def ytdl_is_updateable(): + """ Returns if youtube-dl can be updated with -U """ + from zipimport import zipimporter + + return isinstance(globals().get('__loader__'), zipimporter) or hasattr(sys, 'frozen') + + +def args_to_str(args): + # Get a short string representation for a subprocess command + return ' '.join(shlex_quote(a) for a in args) diff --git a/couchpotato/core/media/movie/providers/trailer/youtube_dl/version.py b/couchpotato/core/media/movie/providers/trailer/youtube_dl/version.py new file mode 100644 index 0000000000..1420af7467 --- /dev/null +++ b/couchpotato/core/media/movie/providers/trailer/youtube_dl/version.py @@ -0,0 +1,3 @@ +from __future__ import unicode_literals + +__version__ = '2014.12.17.2' diff --git a/libs/migrate/versioning/templates/script/__init__.py b/couchpotato/core/media/movie/providers/userscript/__init__.py similarity index 100% rename from libs/migrate/versioning/templates/script/__init__.py rename to couchpotato/core/media/movie/providers/userscript/__init__.py diff --git a/couchpotato/core/media/movie/providers/userscript/allocine.py b/couchpotato/core/media/movie/providers/userscript/allocine.py new file mode 100644 index 0000000000..238a6b54dc --- /dev/null +++ b/couchpotato/core/media/movie/providers/userscript/allocine.py @@ -0,0 +1,36 @@ +import traceback + +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.userscript.base import UserscriptBase + + +log = CPLog(__name__) + +autoload = 'AlloCine' + + +class AlloCine(UserscriptBase): + + includes = ['http://www.allocine.fr/film/*'] + + def getMovie(self, url): + + if not 'fichefilm_gen_cfilm' in url: + return 'Url isn\'t from a movie' + + try: + data = self.getUrl(url) + except: + return + + try: + start = data.find('<title>') + end = data.find('', start) + page_title = data[start + len(''):end].strip().split('-') + + name = page_title[0].strip() + year = page_title[1].strip()[-4:] + return self.search(name, year) + except: + log.error('Failed parsing page for title and year: %s', traceback.format_exc()) + diff --git a/couchpotato/core/media/movie/providers/userscript/appletrailers.py b/couchpotato/core/media/movie/providers/userscript/appletrailers.py new file mode 100644 index 0000000000..23df222f9e --- /dev/null +++ b/couchpotato/core/media/movie/providers/userscript/appletrailers.py @@ -0,0 +1,35 @@ +import re +import traceback + +from couchpotato import tryInt, CPLog +from couchpotato.core.media._base.providers.userscript.base import UserscriptBase + +log = CPLog(__name__) + +autoload = 'AppleTrailers' + + +class AppleTrailers(UserscriptBase): + + includes = ['http://trailers.apple.com/trailers/*'] + + def getMovie(self, url): + + try: + data = self.getUrl(url) + except: + return + + try: + id = re.search("FilmId.*=.*\'(?P<id>.*)\';", data) + id = id.group('id') + + data = self.getJsonData('https://trailers.apple.com/trailers/feeds/data/%s.json' % id) + + name = data['page']['movie_title'] + year = tryInt(data['page']['release_date'][0:4]) + + return self.search(name, year) + except: + log.error('Failed getting apple trailer info: %s', traceback.format_exc()) + return None diff --git a/couchpotato/core/media/movie/providers/userscript/criticker.py b/couchpotato/core/media/movie/providers/userscript/criticker.py new file mode 100644 index 0000000000..cc0bee844b --- /dev/null +++ b/couchpotato/core/media/movie/providers/userscript/criticker.py @@ -0,0 +1,8 @@ +from couchpotato.core.media._base.providers.userscript.base import UserscriptBase + +autoload = 'Criticker' + + +class Criticker(UserscriptBase): + + includes = ['http://www.criticker.com/film/*'] diff --git a/couchpotato/core/media/movie/providers/userscript/filmcentrum.py b/couchpotato/core/media/movie/providers/userscript/filmcentrum.py new file mode 100644 index 0000000000..b2b15a9e23 --- /dev/null +++ b/couchpotato/core/media/movie/providers/userscript/filmcentrum.py @@ -0,0 +1,8 @@ +from couchpotato.core.media._base.providers.userscript.base import UserscriptBase + +autoload = 'FilmCentrum' + + +class FilmCentrum(UserscriptBase): + + includes = ['*://filmcentrum.nl/films/*'] diff --git a/couchpotato/core/media/movie/providers/userscript/filmstarts.py b/couchpotato/core/media/movie/providers/userscript/filmstarts.py new file mode 100644 index 0000000000..5201ce0336 --- /dev/null +++ b/couchpotato/core/media/movie/providers/userscript/filmstarts.py @@ -0,0 +1,31 @@ +from bs4 import BeautifulSoup +from couchpotato.core.media._base.providers.userscript.base import UserscriptBase +import re + +autoload = 'Filmstarts' + + +class Filmstarts(UserscriptBase): + + includes = ['*://www.filmstarts.de/kritiken/*'] + + def getMovie(self, url): + try: + data = self.getUrl(url) + except: + return + + html = BeautifulSoup(data) + table = html.find("section", attrs={"class": "section ovw ovw-synopsis", "id": "synopsis-details"}) + + if table.find(text=re.compile('Originaltitel')): #some trailing whitespaces on some pages + # Get original film title from the table specified above + name = name = table.find("span", text=re.compile("Originaltitel")).findNext('h2').text + else: + # If none is available get the title from the meta data + name = html.find("meta", {"property":"og:title"})['content'] + + # Year of production is not available in the meta data, so get it from the table + year = table.find("span", text=re.compile("Produktionsjahr")).findNext('span').text + + return self.search(name, year) diff --git a/couchpotato/core/media/movie/providers/userscript/filmweb.py b/couchpotato/core/media/movie/providers/userscript/filmweb.py new file mode 100644 index 0000000000..1f7b371e7d --- /dev/null +++ b/couchpotato/core/media/movie/providers/userscript/filmweb.py @@ -0,0 +1,30 @@ +from bs4 import BeautifulSoup +from couchpotato import fireEvent + +from couchpotato.core.media._base.providers.userscript.base import UserscriptBase + +autoload = 'Filmweb' + + +class Filmweb(UserscriptBase): + + version = 3 + + includes = ['http://www.filmweb.pl/film/*'] + + def getMovie(self, url): + + cookie = {'Cookie': 'welcomeScreen=welcome_screen'} + + try: + data = self.urlopen(url, headers = cookie) + except: + return + + html = BeautifulSoup(data) + name = html.find('meta', {'name': 'title'})['content'][:-9].strip() + name_year = fireEvent('scanner.name_year', name, single = True) + name = name_year.get('name') + year = name_year.get('year') + + return self.search(name, year) diff --git a/couchpotato/core/media/movie/providers/userscript/flickchart.py b/couchpotato/core/media/movie/providers/userscript/flickchart.py new file mode 100644 index 0000000000..8b48ad7c14 --- /dev/null +++ b/couchpotato/core/media/movie/providers/userscript/flickchart.py @@ -0,0 +1,36 @@ +import traceback + +from couchpotato.core.event import fireEvent +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.userscript.base import UserscriptBase + + +log = CPLog(__name__) + +autoload = 'Flickchart' + + +class Flickchart(UserscriptBase): + + version = 2 + + includes = ['http://www.flickchart.com/movie/*'] + + def getMovie(self, url): + + try: + data = self.getUrl(url) + except: + return + + try: + start = data.find('<title>') + end = data.find('', start) + page_title = data[start + len(''):end].strip().split('- Flick') + + year_name = fireEvent('scanner.name_year', page_title[0], single = True) + + return self.search(year_name.get('name'), year_name.get('year')) + except: + log.error('Failed parsing page for title and year: %s', traceback.format_exc()) + diff --git a/couchpotato/core/media/movie/providers/userscript/imdb.py b/couchpotato/core/media/movie/providers/userscript/imdb.py new file mode 100644 index 0000000000..dccc4832e0 --- /dev/null +++ b/couchpotato/core/media/movie/providers/userscript/imdb.py @@ -0,0 +1,12 @@ +from couchpotato.core.helpers.variable import getImdb +from couchpotato.core.media._base.providers.userscript.base import UserscriptBase + +autoload = 'IMDB' + + +class IMDB(UserscriptBase): + + includes = ['*://*.imdb.com/title/tt*', '*://imdb.com/title/tt*'] + + def getMovie(self, url): + return self.getInfo(getImdb(url)) diff --git a/couchpotato/core/media/movie/providers/userscript/letterboxd.py b/couchpotato/core/media/movie/providers/userscript/letterboxd.py new file mode 100644 index 0000000000..43b5d309c7 --- /dev/null +++ b/couchpotato/core/media/movie/providers/userscript/letterboxd.py @@ -0,0 +1,8 @@ +from couchpotato.core.media._base.providers.userscript.base import UserscriptBase + +autoload = 'Letterboxd' + + +class Letterboxd(UserscriptBase): + + includes = ['*://letterboxd.com/film/*'] diff --git a/couchpotato/core/media/movie/providers/userscript/moviemeter.py b/couchpotato/core/media/movie/providers/userscript/moviemeter.py new file mode 100644 index 0000000000..bd2d6fb39c --- /dev/null +++ b/couchpotato/core/media/movie/providers/userscript/moviemeter.py @@ -0,0 +1,22 @@ +from couchpotato.core.helpers.variable import getImdb +from couchpotato.core.media._base.providers.userscript.base import UserscriptBase + +autoload = 'MovieMeter' + + +class MovieMeter(UserscriptBase): + + includes = ['*://*.moviemeter.nl/film/*', '*://moviemeter.nl/film/*'] + + version = 3 + + def getMovie(self, url): + + cookie = {'Cookie': 'cok=1'} + + try: + data = self.urlopen(url, headers = cookie) + except: + return + + return self.getInfo(getImdb(data)) diff --git a/couchpotato/core/media/movie/providers/userscript/moviesio.py b/couchpotato/core/media/movie/providers/userscript/moviesio.py new file mode 100644 index 0000000000..0381d64ab1 --- /dev/null +++ b/couchpotato/core/media/movie/providers/userscript/moviesio.py @@ -0,0 +1,8 @@ +from couchpotato.core.media._base.providers.userscript.base import UserscriptBase + +autoload = 'MoviesIO' + + +class MoviesIO(UserscriptBase): + + includes = ['*://movies.io/m/*'] diff --git a/couchpotato/core/media/movie/providers/userscript/reddit.py b/couchpotato/core/media/movie/providers/userscript/reddit.py new file mode 100644 index 0000000000..8cb810797a --- /dev/null +++ b/couchpotato/core/media/movie/providers/userscript/reddit.py @@ -0,0 +1,20 @@ +from couchpotato import fireEvent +from couchpotato.core.helpers.variable import splitString +from couchpotato.core.media._base.providers.userscript.base import UserscriptBase + +autoload = 'Reddit' + + +class Reddit(UserscriptBase): + + includes = ['*://www.reddit.com/r/Ijustwatched/comments/*'] + + def getMovie(self, url): + name = splitString(splitString(url, '/ijw_')[-1], '/')[0] + + if name.startswith('ijw_'): + name = name[4:] + + year_name = fireEvent('scanner.name_year', name, single = True) + + return self.search(year_name.get('name'), year_name.get('year')) diff --git a/couchpotato/core/media/movie/providers/userscript/rottentomatoes.py b/couchpotato/core/media/movie/providers/userscript/rottentomatoes.py new file mode 100644 index 0000000000..e6ff262c23 --- /dev/null +++ b/couchpotato/core/media/movie/providers/userscript/rottentomatoes.py @@ -0,0 +1,40 @@ +import re +import traceback + +from couchpotato import fireEvent +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.userscript.base import UserscriptBase + + +log = CPLog(__name__) + +autoload = 'RottenTomatoes' + + +class RottenTomatoes(UserscriptBase): + + includes = ['*://www.rottentomatoes.com/m/*'] + excludes = ['*://www.rottentomatoes.com/m/*/*/'] + + version = 4 + + def getMovie(self, url): + + try: + data = self.getUrl(url) + except: + return + + try: + title = re.findall("<title>(.*)", data) + title = title[0].split(' - Rotten')[0].replace(' ', ' ').decode('unicode_escape') + name_year = fireEvent('scanner.name_year', title, single = True) + + name = name_year.get('name') + year = name_year.get('year') + + if name and year: + return self.search(name, year) + + except: + log.error('Failed parsing page for title and year: %s', traceback.format_exc()) diff --git a/couchpotato/core/media/movie/providers/userscript/tmdb.py b/couchpotato/core/media/movie/providers/userscript/tmdb.py new file mode 100644 index 0000000000..fe7b139374 --- /dev/null +++ b/couchpotato/core/media/movie/providers/userscript/tmdb.py @@ -0,0 +1,22 @@ +import re + +from couchpotato.core.event import fireEvent +from couchpotato.core.media._base.providers.userscript.base import UserscriptBase + + +autoload = 'TMDB' + + +class TMDB(UserscriptBase): + + version = 2 + + includes = ['*://www.themoviedb.org/movie/*'] + + def getMovie(self, url): + match = re.search('(?P\d+)', url) + movie = fireEvent('movie.info_by_tmdb', identifier = match.group('id'), extended = False, merge = True) + + if movie['imdb']: + return self.getInfo(movie['imdb']) + diff --git a/couchpotato/core/media/movie/providers/userscript/trakt.py b/couchpotato/core/media/movie/providers/userscript/trakt.py new file mode 100644 index 0000000000..1d7f0531e4 --- /dev/null +++ b/couchpotato/core/media/movie/providers/userscript/trakt.py @@ -0,0 +1,11 @@ +from couchpotato.core.media._base.providers.userscript.base import UserscriptBase + +autoload = 'Trakt' + + +class Trakt(UserscriptBase): + + version = 2 + + includes = ['*://trakt.tv/movies/*', '*://*.trakt.tv/movies/*'] + excludes = ['*://trakt.tv/movies/*/*', '*://*.trakt.tv/movies/*/*'] diff --git a/couchpotato/core/media/movie/providers/userscript/youteather.py b/couchpotato/core/media/movie/providers/userscript/youteather.py new file mode 100644 index 0000000000..e7e63b464c --- /dev/null +++ b/couchpotato/core/media/movie/providers/userscript/youteather.py @@ -0,0 +1,17 @@ +import re + +from couchpotato.core.media._base.providers.userscript.base import UserscriptBase + + +autoload = 'YouTheater' + + +class YouTheater(UserscriptBase): + id_re = re.compile("view\.php\?id=(\d+)") + includes = ['http://www.youtheater.com/view.php?id=*', 'http://youtheater.com/view.php?id=*', + 'http://www.sratim.co.il/view.php?id=*', 'http://sratim.co.il/view.php?id=*'] + + def getMovie(self, url): + id = self.id_re.findall(url)[0] + url = 'http://www.youtheater.com/view.php?id=%s' % id + return super(YouTheater, self).getMovie(url) diff --git a/couchpotato/core/media/movie/searcher.py b/couchpotato/core/media/movie/searcher.py new file mode 100755 index 0000000000..a65b95d9c4 --- /dev/null +++ b/couchpotato/core/media/movie/searcher.py @@ -0,0 +1,483 @@ +from datetime import date +import random +import re +import time +import traceback + +from couchpotato import get_db +from couchpotato.api import addApiView +from couchpotato.core.event import addEvent, fireEvent, fireEventAsync +from couchpotato.core.helpers.encoding import simplifyString +from couchpotato.core.helpers.variable import getTitle, possibleTitles, getImdb, getIdentifier, tryInt +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.searcher.base import SearcherBase +from couchpotato.core.media.movie import MovieTypeBase +from couchpotato.environment import Env + + +log = CPLog(__name__) + +autoload = 'MovieSearcher' + + +class MovieSearcher(SearcherBase, MovieTypeBase): + + in_progress = False + + def __init__(self): + super(MovieSearcher, self).__init__() + + addEvent('movie.searcher.all', self.searchAll) + addEvent('movie.searcher.all_view', self.searchAllView) + addEvent('movie.searcher.single', self.single) + addEvent('movie.searcher.try_next_release', self.tryNextRelease) + addEvent('movie.searcher.could_be_released', self.couldBeReleased) + addEvent('searcher.correct_release', self.correctRelease) + addEvent('searcher.get_search_title', self.getSearchTitle) + + addApiView('movie.searcher.try_next', self.tryNextReleaseView, docs = { + 'desc': 'Marks the snatched results as ignored and try the next best release', + 'params': { + 'media_id': {'desc': 'The id of the media'}, + }, + }) + + addApiView('movie.searcher.full_search', self.searchAllView, docs = { + 'desc': 'Starts a full search for all wanted movies', + }) + + addApiView('movie.searcher.progress', self.getProgress, docs = { + 'desc': 'Get the progress of current full search', + 'return': {'type': 'object', 'example': """{ + 'progress': False || object, total & to_go, +}"""}, + }) + + if self.conf('run_on_launch'): + addEvent('app.load', self.searchAll) + + def searchAllView(self, **kwargs): + + fireEventAsync('movie.searcher.all', manual = True) + + return { + 'success': not self.in_progress + } + + def searchAll(self, manual = False): + + if self.in_progress: + log.info('Search already in progress') + fireEvent('notify.frontend', type = 'movie.searcher.already_started', data = True, message = 'Full search already in progress') + return + + self.in_progress = True + fireEvent('notify.frontend', type = 'movie.searcher.started', data = True, message = 'Full search started') + + medias = [x['_id'] for x in fireEvent('media.with_status', 'active', types = 'movie', with_doc = False, single = True)] + random.shuffle(medias) + + total = len(medias) + self.in_progress = { + 'total': total, + 'to_go': total, + } + + try: + search_protocols = fireEvent('searcher.protocols', single = True) + + for media_id in medias: + + media = fireEvent('media.get', media_id, single = True) + if not media: continue + + try: + self.single(media, search_protocols, manual = manual) + except IndexError: + log.error('Forcing library update for %s, if you see this often, please report: %s', (getIdentifier(media), traceback.format_exc())) + fireEvent('movie.update', media_id) + except: + log.error('Search failed for %s: %s', (getIdentifier(media), traceback.format_exc())) + + self.in_progress['to_go'] -= 1 + + # Break if CP wants to shut down + if self.shuttingDown(): + break + + except SearchSetupError: + pass + + self.in_progress = False + + def single(self, movie, search_protocols = None, manual = False, force_download = False): + + # Find out search type + try: + if not search_protocols: + search_protocols = fireEvent('searcher.protocols', single = True) + except SearchSetupError: + return + + if not movie['profile_id'] or (movie['status'] == 'done' and not manual): + log.debug('Movie doesn\'t have a profile or already done, assuming in manage tab.') + fireEvent('media.restatus', movie['_id'], single = True) + return + + default_title = getTitle(movie) + if not default_title: + log.error('No proper info found for movie, removing it from library to stop it from causing more issues.') + fireEvent('media.delete', movie['_id'], single = True) + return + + # Update media status and check if it is still not done (due to the stop searching after feature + if fireEvent('media.restatus', movie['_id'], single = True) == 'done': + log.debug('No better quality found, marking movie %s as done.', default_title) + + pre_releases = fireEvent('quality.pre_releases', single = True) + release_dates = fireEvent('movie.update_release_dates', movie['_id'], merge = True) + + found_releases = [] + previous_releases = movie.get('releases', []) + too_early_to_search = [] + outside_eta_results = 0 + always_search = self.conf('always_search') + ignore_eta = manual + total_result_count = 0 + + fireEvent('notify.frontend', type = 'movie.searcher.started', data = {'_id': movie['_id']}, message = 'Searching for "%s"' % default_title) + + # Ignore eta once every 7 days + if not always_search: + prop_name = 'last_ignored_eta.%s' % movie['_id'] + last_ignored_eta = float(Env.prop(prop_name, default = 0)) + if last_ignored_eta < time.time() - 604800: + ignore_eta = True + Env.prop(prop_name, value = time.time()) + + db = get_db() + + profile = db.get('id', movie['profile_id']) + ret = False + + for index, q_identifier in enumerate(profile.get('qualities', [])): + quality_custom = { + 'index': index, + 'quality': q_identifier, + 'finish': profile['finish'][index], + 'wait_for': tryInt(profile['wait_for'][index]), + '3d': profile['3d'][index] if profile.get('3d') else False, + 'minimum_score': profile.get('minimum_score', 1), + } + + could_not_be_released = not self.couldBeReleased(q_identifier in pre_releases, release_dates, movie['info']['year']) + if not always_search and could_not_be_released: + too_early_to_search.append(q_identifier) + + # Skip release, if ETA isn't ignored + if not ignore_eta: + continue + + has_better_quality = 0 + + # See if better quality is available + for release in movie.get('releases', []): + if release['status'] not in ['available', 'ignored', 'failed']: + is_higher = fireEvent('quality.ishigher', \ + {'identifier': q_identifier, 'is_3d': quality_custom.get('3d', 0)}, \ + {'identifier': release['quality'], 'is_3d': release.get('is_3d', 0)}, \ + profile, single = True) + if is_higher != 'higher': + has_better_quality += 1 + + # Don't search for quality lower then already available. + if has_better_quality > 0: + log.info('Better quality (%s) already available or snatched for %s', (q_identifier, default_title)) + fireEvent('media.restatus', movie['_id'], single = True) + break + + quality = fireEvent('quality.single', identifier = q_identifier, single = True) + log.info('Search for %s in %s%s', (default_title, quality['label'], ' ignoring ETA' if always_search or ignore_eta else '')) + + # Extend quality with profile customs + quality['custom'] = quality_custom + + results = fireEvent('searcher.search', search_protocols, movie, quality, single = True) or [] + + # Check if movie isn't deleted while searching + if not fireEvent('media.get', movie.get('_id'), single = True): + break + + # Add them to this movie releases list + found_releases += fireEvent('release.create_from_search', results, movie, quality, single = True) + results_count = len(found_releases) + total_result_count += results_count + if results_count == 0: + log.debug('Nothing found for %s in %s', (default_title, quality['label'])) + + # Keep track of releases found outside ETA window + outside_eta_results += results_count if could_not_be_released else 0 + + # Don't trigger download, but notify user of available releases + if could_not_be_released and results_count > 0: + log.debug('Found %s releases for "%s", but ETA isn\'t correct yet.', (results_count, default_title)) + + # Try find a valid result and download it + if (force_download or not could_not_be_released or always_search) and fireEvent('release.try_download_result', results, movie, quality_custom, single = True): + ret = True + + # Remove releases that aren't found anymore + temp_previous_releases = [] + for release in previous_releases: + if release.get('status') == 'available' and release.get('identifier') not in found_releases: + fireEvent('release.delete', release.get('_id'), single = True) + else: + temp_previous_releases.append(release) + previous_releases = temp_previous_releases + del temp_previous_releases + + # Break if CP wants to shut down + if self.shuttingDown() or ret: + break + + if total_result_count > 0: + fireEvent('media.tag', movie['_id'], 'recent', update_edited = True, single = True) + + if len(too_early_to_search) > 0: + log.info2('Too early to search for %s, %s', (too_early_to_search, default_title)) + + if outside_eta_results > 0: + message = 'Found %s releases for "%s" before ETA. Select and download via the dashboard.' % (outside_eta_results, default_title) + log.info(message) + + if not manual: + fireEvent('media.available', message = message, data = {}) + + fireEvent('notify.frontend', type = 'movie.searcher.ended', data = {'_id': movie['_id']}) + + return ret + + def correctRelease(self, nzb = None, media = None, quality = None, **kwargs): + + if media.get('type') != 'movie': return + + media_title = fireEvent('searcher.get_search_title', media, single = True) + + imdb_results = kwargs.get('imdb_results', False) + retention = Env.setting('retention', section = 'nzb') + + if nzb.get('seeders') is None and 0 < retention < nzb.get('age', 0): + log.info2('Wrong: Outside retention, age is %s, needs %s or lower: %s', (nzb['age'], retention, nzb['name'])) + return False + + # Check for Original Version flag + if not fireEvent('searcher.correct_language', nzb['name'], media, single = True): + log.info2('Wrong: requested language configuration not found') + return False + + # Check for required and ignored words + if not fireEvent('searcher.correct_words', nzb['name'], media, single = True): + return False + + preferred_quality = quality if quality else fireEvent('quality.single', identifier = quality['identifier'], single = True) + + # Contains lower quality string + contains_other = fireEvent('searcher.contains_other_quality', nzb, movie_year = media['info']['year'], preferred_quality = preferred_quality, single = True) + if contains_other and isinstance(contains_other, dict): + log.info2('Wrong: %s, looking for %s, found %s', (nzb['name'], quality['label'], [x for x in contains_other] if contains_other else 'no quality')) + return False + + # Contains lower quality string + if not fireEvent('searcher.correct_3d', nzb, preferred_quality = preferred_quality, single = True): + log.info2('Wrong: %s, %slooking for %s in 3D', (nzb['name'], ('' if preferred_quality['custom'].get('3d') else 'NOT '), quality['label'])) + return False + + # File to small + if nzb['size'] and tryInt(preferred_quality['size_min']) > tryInt(nzb['size']): + log.info2('Wrong: "%s" is too small to be %s. %sMB instead of the minimal of %sMB.', (nzb['name'], preferred_quality['label'], nzb['size'], preferred_quality['size_min'])) + return False + + # File to large + if nzb['size'] and tryInt(preferred_quality['size_max']) < tryInt(nzb['size']): + log.info2('Wrong: "%s" is too large to be %s. %sMB instead of the maximum of %sMB.', (nzb['name'], preferred_quality['label'], nzb['size'], preferred_quality['size_max'])) + return False + + # Provider specific functions + get_more = nzb.get('get_more_info') + if get_more: + get_more(nzb) + + extra_check = nzb.get('extra_check') + if extra_check and not extra_check(nzb): + return False + + + if imdb_results: + return True + + # Check if nzb contains imdb link + if getImdb(nzb.get('description', '')) == getIdentifier(media): + return True + + for raw_title in media['info']['titles']: + for movie_title in possibleTitles(raw_title): + movie_words = re.split('\W+', simplifyString(movie_title)) + + if fireEvent('searcher.correct_name', nzb['name'], movie_title, single = True): + # if no IMDB link, at least check year range 1 + if len(movie_words) > 2 and fireEvent('searcher.correct_year', nzb['name'], media['info']['year'], 1, single = True): + return True + + # if no IMDB link, at least check year + if len(movie_words) <= 2 and fireEvent('searcher.correct_year', nzb['name'], media['info']['year'], 0, single = True): + return True + + log.info("Wrong: %s, undetermined naming. Looking for '%s (%s)'", (nzb['name'], media_title, media['info']['year'])) + return False + + def couldBeReleased(self, is_pre_release, dates, year = None): + + now = int(time.time()) + now_year = date.today().year + now_month = date.today().month + + if (year is None or year < now_year - 1 or (year <= now_year - 1 and now_month > 4)) and (not dates or (dates.get('theater', 0) == 0 and dates.get('dvd', 0) == 0)): + return True + else: + + # Don't allow movies with years to far in the future + add_year = 1 if now_month > 10 else 0 # Only allow +1 year if end of the year + if year is not None and year > (now_year + add_year): + return False + + # For movies before 1972 + if not dates or dates.get('theater', 0) < 0 or dates.get('dvd', 0) < 0: + return True + + if is_pre_release: + # Prerelease 1 week before theaters + if dates.get('theater') - 604800 < now: + return True + else: + # 12 weeks after theater release + if dates.get('theater') > 0 and dates.get('theater') + 7257600 < now: + return True + + if dates.get('dvd') > 0: + + # 4 weeks before dvd release + if dates.get('dvd') - 2419200 < now: + return True + + # Dvd should be released + if dates.get('dvd') < now: + return True + + + return False + + def tryNextReleaseView(self, media_id = None, **kwargs): + + trynext = self.tryNextRelease(media_id, manual = True, force_download = True) + + return { + 'success': trynext + } + + def tryNextRelease(self, media_id, manual = False, force_download = False): + + try: + + rels = fireEvent('release.for_media', media_id, single = True) + + for rel in rels: + if rel.get('status') in ['snatched', 'done']: + fireEvent('release.update_status', rel.get('_id'), status = 'ignored') + + media = fireEvent('media.get', media_id, single = True) + if media: + log.info('Trying next release for: %s', getTitle(media)) + self.single(media, manual = manual, force_download = force_download) + + return True + + return False + except: + log.error('Failed searching for next release: %s', traceback.format_exc()) + return False + + def getSearchTitle(self, media): + if media['type'] == 'movie': + return getTitle(media) + +class SearchSetupError(Exception): + pass + + +config = [{ + 'name': 'moviesearcher', + 'order': 20, + 'groups': [ + { + 'tab': 'searcher', + 'name': 'movie_searcher', + 'label': 'Movie search', + 'description': 'Search options for movies', + 'advanced': True, + 'options': [ + { + 'name': 'always_search', + 'default': False, + 'migrate_from': 'searcher', + 'type': 'bool', + 'label': 'Always search', + 'description': 'Search for movies even before there is a ETA. Enabling this will probably get you a lot of fakes.', + }, + { + 'name': 'run_on_launch', + 'migrate_from': 'searcher', + 'label': 'Run on launch', + 'advanced': True, + 'default': 0, + 'type': 'bool', + 'description': 'Force run the searcher after (re)start.', + }, + { + 'name': 'search_on_add', + 'label': 'Search after add', + 'advanced': True, + 'default': 1, + 'type': 'bool', + 'description': 'Disable this to only search for movies on cron.', + }, + { + 'name': 'cron_day', + 'migrate_from': 'searcher', + 'label': 'Day', + 'advanced': True, + 'default': '*', + 'type': 'string', + 'description': '*: Every day, */2: Every 2 days, 1: Every first of the month. See
    APScheduler for details.', + }, + { + 'name': 'cron_hour', + 'migrate_from': 'searcher', + 'label': 'Hour', + 'advanced': True, + 'default': random.randint(0, 23), + 'type': 'string', + 'description': '*: Every hour, */8: Every 8 hours, 3: At 3, midnight.', + }, + { + 'name': 'cron_minute', + 'migrate_from': 'searcher', + 'label': 'Minute', + 'advanced': True, + 'default': random.randint(0, 59), + 'type': 'string', + 'description': "Just keep it random, so the providers don't get DDOSed by every CP user on a 'full' hour." + }, + ], + }, + ], +}] diff --git a/couchpotato/core/media/movie/suggestion.py b/couchpotato/core/media/movie/suggestion.py new file mode 100644 index 0000000000..04cda51d10 --- /dev/null +++ b/couchpotato/core/media/movie/suggestion.py @@ -0,0 +1,165 @@ +import time +from couchpotato.api import addApiView +from couchpotato.core.event import fireEvent, addEvent +from couchpotato.core.helpers.variable import splitString, removeDuplicate, getIdentifier, getTitle +from couchpotato.core.plugins.base import Plugin +from couchpotato.environment import Env + + +autoload = 'Suggestion' + + +class Suggestion(Plugin): + + def __init__(self): + + addApiView('suggestion.view', self.suggestView) + addApiView('suggestion.ignore', self.ignoreView) + + def test(): + time.sleep(1) + self.suggestView() + + addEvent('app.load', test) + + def suggestView(self, limit = 6, **kwargs): + if self.isDisabled(): + return { + 'success': True, + 'movies': [] + } + + movies = splitString(kwargs.get('movies', '')) + ignored = splitString(kwargs.get('ignored', '')) + seen = splitString(kwargs.get('seen', '')) + + cached_suggestion = self.getCache('suggestion_cached') + if cached_suggestion: + suggestions = cached_suggestion + else: + + if not movies or len(movies) == 0: + active_movies = fireEvent('media.with_status', ['active', 'done'], types = 'movie', single = True) + movies = [getIdentifier(x) for x in active_movies] + + if not ignored or len(ignored) == 0: + ignored = splitString(Env.prop('suggest_ignore', default = '')) + if not seen or len(seen) == 0: + movies.extend(splitString(Env.prop('suggest_seen', default = ''))) + + suggestions = fireEvent('movie.suggest', movies = movies, ignore = ignored, single = True) + self.setCache('suggestion_cached', suggestions, timeout = 6048000) # Cache for 10 weeks + + medias = [] + for suggestion in suggestions[:int(limit)]: + + # Cache poster + posters = suggestion.get('images', {}).get('poster', []) + poster = [x for x in posters if 'tmdb' in x] + posters = poster if len(poster) > 0 else posters + + cached_poster = fireEvent('file.download', url = posters[0], single = True) if len(posters) > 0 else False + files = {'image_poster': [cached_poster] } if cached_poster else {} + + medias.append({ + 'status': 'suggested', + 'title': getTitle(suggestion), + 'type': 'movie', + 'info': suggestion, + 'files': files, + 'identifiers': { + 'imdb': suggestion.get('imdb') + } + }) + + return { + 'success': True, + 'movies': medias + } + + def ignoreView(self, imdb = None, limit = 6, remove_only = False, mark_seen = False, **kwargs): + + ignored = splitString(Env.prop('suggest_ignore', default = '')) + seen = splitString(Env.prop('suggest_seen', default = '')) + + new_suggestions = [] + if imdb: + if mark_seen: + seen.append(imdb) + Env.prop('suggest_seen', ','.join(set(seen))) + elif not remove_only: + ignored.append(imdb) + Env.prop('suggest_ignore', ','.join(set(ignored))) + + new_suggestions = self.updateSuggestionCache(ignore_imdb = imdb, limit = limit, ignored = ignored, seen = seen) + + if len(new_suggestions) <= limit: + return { + 'result': False + } + + # Only return new (last) item + media = { + 'status': 'suggested', + 'title': getTitle(new_suggestions[limit]), + 'type': 'movie', + 'info': new_suggestions[limit], + 'identifiers': { + 'imdb': new_suggestions[limit].get('imdb') + } + } + + return { + 'result': True, + 'movie': media + } + + def updateSuggestionCache(self, ignore_imdb = None, limit = 6, ignored = None, seen = None): + + # Combine with previous suggestion_cache + cached_suggestion = self.getCache('suggestion_cached') or [] + new_suggestions = [] + ignored = [] if not ignored else ignored + seen = [] if not seen else seen + + if ignore_imdb: + suggested_imdbs = [] + for cs in cached_suggestion: + if cs.get('imdb') != ignore_imdb and cs.get('imdb') not in suggested_imdbs: + suggested_imdbs.append(cs.get('imdb')) + new_suggestions.append(cs) + + # Get new results and add them + if len(new_suggestions) - 1 < limit: + active_movies = fireEvent('media.with_status', ['active', 'done'], single = True) + movies = [getIdentifier(x) for x in active_movies] + movies.extend(seen) + + ignored.extend([x.get('imdb') for x in cached_suggestion]) + suggestions = fireEvent('movie.suggest', movies = movies, ignore = removeDuplicate(ignored), single = True) + + if suggestions: + new_suggestions.extend(suggestions) + + self.setCache('suggestion_cached', new_suggestions, timeout = 3024000) + + return new_suggestions + +config = [{ + 'name': 'suggestion', + 'groups': [ + { + 'label': 'Suggestions', + 'description': 'Displays suggestions on the home page', + 'name': 'suggestions', + 'tab': 'display', + 'options': [ + { + 'name': 'enabled', + 'default': True, + 'type': 'enabler', + }, + ], + }, + ], +}] diff --git a/couchpotato/core/migration/migrate.cfg b/couchpotato/core/migration/migrate.cfg deleted file mode 100644 index f17e967a6e..0000000000 --- a/couchpotato/core/migration/migrate.cfg +++ /dev/null @@ -1,4 +0,0 @@ -[db_settings] -repository_id = CouchPotato -version_table = migrate_version -required_dbs = ['sqlite'] diff --git a/couchpotato/core/migration/versions/__init__.py b/couchpotato/core/migration/versions/__init__.py deleted file mode 100755 index 7e6e44bf0f..0000000000 --- a/couchpotato/core/migration/versions/__init__.py +++ /dev/null @@ -1,30 +0,0 @@ -""" - Examples - - Adding a column: - - from migrate import * - from migrate.changeset.schema import create_column - from sqlalchemy import * - - meta = MetaData() - - def upgrade(migrate_engine): - meta.bind = migrate_engine - - #print changeset.schema - path_column = Column('path', String) - resource = Table('resource', meta, path_column) - - create_column(path_column, resource) - - - - Adding Relation table: http://www.mail-archive.com/sqlelixir@googlegroups.com/msg02061.html - - person = Table('person', metadata, Column('id', Integer)) - person_column = Column('person_id', Integer, ForeignKey('person.id'), nullable=False) - movie = Table('movie', metadata, person_column) - person_constraint = ForeignKeyConstraint(['person_id'], ['person.id'], ondelete="restrict", table=movie) - -""" diff --git a/couchpotato/core/notifications/__init__.py b/couchpotato/core/notifications/__init__.py index 8ac24dfbeb..5958fe66d7 100644 --- a/couchpotato/core/notifications/__init__.py +++ b/couchpotato/core/notifications/__init__.py @@ -1,4 +1,4 @@ -config = { +config = [{ 'name': 'notification_providers', 'groups': [ { @@ -10,4 +10,4 @@ 'options': [], }, ], -} +}] diff --git a/couchpotato/core/notifications/androidpn.py b/couchpotato/core/notifications/androidpn.py new file mode 100644 index 0000000000..318b6e1f50 --- /dev/null +++ b/couchpotato/core/notifications/androidpn.py @@ -0,0 +1,81 @@ +import traceback + +from couchpotato.core.helpers.encoding import toUnicode +from couchpotato.core.logger import CPLog +from couchpotato.core.notifications.base import Notification + + +log = CPLog(__name__) + +autoload = 'AndroidPN' + + +class AndroidPN(Notification): + + def notify(self, message = '', data = None, listener = None): + if not data: data = {} + + data = { + 'action': "send", + 'uri': "", + 'title': self.default_title, + 'message': toUnicode(message), + 'broadcast': self.conf('broadcast'), + 'username': self.conf('username'), + } + + headers = { + 'Content-type': 'application/x-www-form-urlencoded' + } + + try: + self.urlopen(self.conf('url'), headers = headers, data = data, show_error = False) + return True + except: + log.error('AndroidPN failed: %s', traceback.format_exc()) + + return False + + +config = [{ + 'name': 'androidpn', + 'groups': [ + { + 'tab': 'notifications', + 'list': 'notification_providers', + 'name': 'androidpn', + 'description': 'Self hosted Android push notification server', + 'options': [ + { + 'name': 'enabled', + 'default': 0, + 'type': 'enabler', + }, + { + 'name': 'broadcast', + 'label': 'Broadcast', + 'default': 1, + 'type': 'bool', + 'description': 'Send notification to all users', + }, + { + 'name': 'username', + 'label': 'Username', + 'description': 'Required if broadcast not selected', + }, + { + 'name': 'url', + 'label': 'Url', + 'description': 'URL of server', + }, + { + 'name': 'on_snatch', + 'default': 0, + 'type': 'bool', + 'advanced': True, + 'description': 'Also send message when movie is snatched.', + }, + ], + } + ], +}] diff --git a/couchpotato/core/notifications/base.py b/couchpotato/core/notifications/base.py index 7e98fa513c..761411e3c2 100644 --- a/couchpotato/core/notifications/base.py +++ b/couchpotato/core/notifications/base.py @@ -1,26 +1,29 @@ from couchpotato.api import addApiView from couchpotato.core.event import addEvent -from couchpotato.core.helpers.request import jsonified from couchpotato.core.logger import CPLog -from couchpotato.core.plugins.base import Plugin +from couchpotato.core.media._base.providers.base import Provider from couchpotato.environment import Env log = CPLog(__name__) -class Notification(Plugin): +class Notification(Provider): + + type = 'notification' default_title = Env.get('appname') test_message = 'ZOMG Lazors Pewpewpew!' listen_to = [ + 'media.available', 'renamer.after', 'movie.snatched', 'updater.available', 'updater.updated', + 'core.message.important', ] dont_listen_to = [] def __init__(self): - addEvent('notify.%s' % self.getName().lower(), self.notify) + addEvent('notify.%s' % self.getName().lower(), self._notify) addApiView(self.testNotifyName(), self.test) @@ -30,29 +33,41 @@ def __init__(self): addEvent(listener, self.createNotifyHandler(listener)) def createNotifyHandler(self, listener): - def notify(message = None, group = {}, data = None): + def notify(message = None, group = None, data = None): + if not group: group = {} + if not self.conf('on_snatch', default = True) and listener == 'movie.snatched': return - return self.notify(message = message, data = data if data else group, listener = listener) + return self._notify(message = message, data = data if data else group, listener = listener) return notify - def notify(self, message = '', data = {}, listener = None): - pass + def getNotificationImage(self, size = 'small'): + return 'https://raw.github.com/CouchPotato/CouchPotatoServer/master/couchpotato/static/images/notify.couch.%s.png' % size + + def _notify(self, *args, **kwargs): + if self.isEnabled(): + return self.notify(*args, **kwargs) + return False + + def notify(self, message = '', data = None, listener = None): + if not data: data = {} - def test(self): + def test(self, **kwargs): test_type = self.testNotifyName() log.info('Sending test to %s', test_type) - success = self.notify( + success = self._notify( message = self.test_message, data = {}, listener = 'test' ) - return jsonified({'success': success}) + return { + 'success': success + } def testNotifyName(self): return 'notify.%s.test' % self.getName().lower() diff --git a/couchpotato/core/notifications/boxcar/__init__.py b/couchpotato/core/notifications/boxcar/__init__.py deleted file mode 100644 index ab244c3248..0000000000 --- a/couchpotato/core/notifications/boxcar/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -from .main import Boxcar - -def start(): - return Boxcar() - -config = [{ - 'name': 'boxcar', - 'groups': [ - { - 'tab': 'notifications', - 'list': 'notification_providers', - 'name': 'boxcar', - 'options': [ - { - 'name': 'enabled', - 'default': 0, - 'type': 'enabler', - }, - { - 'name': 'email', - 'description': 'Your Boxcar registration emailaddress.' - }, - { - 'name': 'on_snatch', - 'default': 0, - 'type': 'bool', - 'advanced': True, - 'description': 'Also send message when movie is snatched.', - }, - ], - } - ], -}] diff --git a/couchpotato/core/notifications/boxcar/main.py b/couchpotato/core/notifications/boxcar/main.py deleted file mode 100644 index 3135614a9d..0000000000 --- a/couchpotato/core/notifications/boxcar/main.py +++ /dev/null @@ -1,35 +0,0 @@ -from couchpotato.core.helpers.encoding import toUnicode -from couchpotato.core.logger import CPLog -from couchpotato.core.notifications.base import Notification -import time - -log = CPLog(__name__) - - -class Boxcar(Notification): - - url = 'https://boxcar.io/devices/providers/7MNNXY3UIzVBwvzkKwkC/notifications' - - def notify(self, message = '', data = {}, listener = None): - if self.isDisabled(): return - - try: - message = message.strip() - - params = { - 'email': self.conf('email'), - 'notification[from_screen_name]': self.default_title, - 'notification[message]': toUnicode(message), - 'notification[from_remote_service_id]': int(time.time()), - } - - self.urlopen(self.url, params = params) - except: - log.error('Check your email and added services on boxcar.io') - return False - - log.info('Boxcar notification successful.') - return True - - def isEnabled(self): - return super(Boxcar, self).isEnabled() and self.conf('email') diff --git a/couchpotato/core/notifications/boxcar2.py b/couchpotato/core/notifications/boxcar2.py new file mode 100644 index 0000000000..c6f49e6bd9 --- /dev/null +++ b/couchpotato/core/notifications/boxcar2.py @@ -0,0 +1,74 @@ +from couchpotato.core.helpers.encoding import toUnicode +from couchpotato.core.logger import CPLog +from couchpotato.core.notifications.base import Notification + +log = CPLog(__name__) + +autoload = 'Boxcar2' + + +class Boxcar2(Notification): + + url = 'https://new.boxcar.io/api/notifications' + LOGO_URL = 'https://raw.githubusercontent.com/CouchPotato/CouchPotatoServer/master/couchpotato/static/images/notify.couch.small.png' + + def notify(self, message = '', data = None, listener = None): + if not data: data = {} + + try: + message = message.strip() + + long_message = '' + if listener == 'test': + long_message = 'This is a test message' + elif data.get('identifier'): + long_message = 'More movie info on IMDB' % data['identifier'] + + data = { + 'user_credentials': self.conf('token'), + 'notification[title]': toUnicode('%s - %s' % (self.default_title, message)), + 'notification[long_message]': toUnicode(long_message), + 'notification[icon_url]': self.LOGO_URL, + 'notification[source_name]': 'CouchPotato', + } + + self.urlopen(self.url, data = data) + except: + log.error('Make sure the token provided is for the correct device') + return False + + log.info('Boxcar notification successful.') + return True + + def isEnabled(self): + return super(Boxcar2, self).isEnabled() and self.conf('token') + + +config = [{ + 'name': 'boxcar2', + 'groups': [ + { + 'tab': 'notifications', + 'list': 'notification_providers', + 'name': 'boxcar2', + 'options': [ + { + 'name': 'enabled', + 'default': 0, + 'type': 'enabler', + }, + { + 'name': 'token', + 'description': ('Your Boxcar access token.', 'Can be found in the app under settings') + }, + { + 'name': 'on_snatch', + 'default': 0, + 'type': 'bool', + 'advanced': True, + 'description': 'Also send message when movie is snatched.', + }, + ], + } + ], +}] diff --git a/couchpotato/core/notifications/core/__init__.py b/couchpotato/core/notifications/core/__init__.py index 6e923dac54..9c4fb37347 100644 --- a/couchpotato/core/notifications/core/__init__.py +++ b/couchpotato/core/notifications/core/__init__.py @@ -1,6 +1,5 @@ from .main import CoreNotifier -def start(): - return CoreNotifier() -config = [] +def autoload(): + return CoreNotifier() diff --git a/couchpotato/core/notifications/core/index.py b/couchpotato/core/notifications/core/index.py new file mode 100644 index 0000000000..c7985b5b05 --- /dev/null +++ b/couchpotato/core/notifications/core/index.py @@ -0,0 +1,37 @@ +from CodernityDB.tree_index import TreeBasedIndex + + +class NotificationIndex(TreeBasedIndex): + _version = 1 + + custom_header = """from CodernityDB.tree_index import TreeBasedIndex +import time""" + + def __init__(self, *args, **kwargs): + kwargs['key_format'] = 'I' + super(NotificationIndex, self).__init__(*args, **kwargs) + + def make_key(self, key): + return key + + def make_key_value(self, data): + if data.get('_t') == 'notification': + return data.get('time'), None + + +class NotificationUnreadIndex(TreeBasedIndex): + _version = 1 + + custom_header = """from CodernityDB.tree_index import TreeBasedIndex +import time""" + + def __init__(self, *args, **kwargs): + kwargs['key_format'] = 'I' + super(NotificationUnreadIndex, self).__init__(*args, **kwargs) + + def make_key(self, key): + return key + + def make_key_value(self, data): + if data.get('_t') == 'notification' and not data.get('read'): + return data.get('time'), None diff --git a/couchpotato/core/notifications/core/main.py b/couchpotato/core/notifications/core/main.py index bc8bd86066..4c39fb775e 100644 --- a/couchpotato/core/notifications/core/main.py +++ b/couchpotato/core/notifications/core/main.py @@ -1,29 +1,39 @@ -from couchpotato import get_session +from operator import itemgetter +import threading +import time +import traceback +import uuid +from CodernityDB.database import RecordDeleted + +from couchpotato import get_db from couchpotato.api import addApiView, addNonBlockApiView -from couchpotato.core.event import addEvent +from couchpotato.core.event import addEvent, fireEvent from couchpotato.core.helpers.encoding import toUnicode -from couchpotato.core.helpers.request import jsonified, getParam from couchpotato.core.helpers.variable import tryInt, splitString from couchpotato.core.logger import CPLog from couchpotato.core.notifications.base import Notification -from couchpotato.core.settings.model import Notification as Notif -from sqlalchemy.sql.expression import or_ -import threading -import time -import uuid +from .index import NotificationIndex, NotificationUnreadIndex +from couchpotato.environment import Env +from tornado.ioloop import IOLoop + log = CPLog(__name__) class CoreNotifier(Notification): - m_lock = threading.Lock() - messages = [] - listeners = [] + _database = { + 'notification': NotificationIndex, + 'notification_unread': NotificationUnreadIndex + } + + m_lock = None listen_to = [ + 'media.available', 'renamer.after', 'movie.snatched', 'updater.available', 'updater.updated', + 'core.message', 'core.message.important', ] def __init__(self): @@ -54,86 +64,118 @@ def __init__(self): addNonBlockApiView('notification.listener', (self.addListener, self.removeListener)) addApiView('notification.listener', self.listener) - addEvent('app.load', self.clean) - - def clean(self): - - db = get_session() - db.query(Notif).filter(Notif.added <= (int(time.time()) - 2419200)).delete() - db.commit() - - - def markAsRead(self): - - ids = None - if getParam('ids'): - ids = splitString(getParam('ids')) - - db = get_session() - - if ids: - q = db.query(Notif).filter(or_(*[Notif.id == tryInt(s) for s in ids])) - else: - q = db.query(Notif).filter_by(read = False) + fireEvent('schedule.interval', 'core.check_messages', self.checkMessages, hours = 12, single = True) + fireEvent('schedule.interval', 'core.clean_messages', self.cleanMessages, seconds = 15, single = True) - q.update({Notif.read: True}) + addEvent('app.load', self.clean) - db.commit() + if not Env.get('dev'): + addEvent('app.load', self.checkMessages) - return jsonified({ - 'success': True - }) + self.messages = [] + self.listeners = [] + self.m_lock = threading.Lock() - def listView(self): + def clean(self): + try: + db = get_db() + for n in db.all('notification', with_doc = True): + if n['doc'].get('time', 0) <= (int(time.time()) - 2419200): + db.delete(n['doc']) + except: + log.error('Failed cleaning notification: %s', traceback.format_exc()) + + def markAsRead(self, ids = None, **kwargs): + + ids = splitString(ids) if ids else None + + try: + db = get_db() + for x in db.all('notification_unread', with_doc = True): + if not ids or x['_id'] in ids: + x['doc']['read'] = True + db.update(x['doc']) + return { + 'success': True + } + except: + log.error('Failed mark as read: %s', traceback.format_exc()) + + return { + 'success': False + } - db = get_session() - limit_offset = getParam('limit_offset', None) + def listView(self, limit_offset = None, **kwargs): - q = db.query(Notif) + db = get_db() if limit_offset: splt = splitString(limit_offset) - limit = splt[0] - offset = 0 if len(splt) is 1 else splt[1] - q = q.limit(limit).offset(offset) + limit = tryInt(splt[0]) + offset = tryInt(0 if len(splt) is 1 else splt[1]) + results = db.all('notification', limit = limit, offset = offset, with_doc = True) else: - q = q.limit(200) + results = db.all('notification', limit = 200, with_doc = True) - results = q.all() notifications = [] for n in results: - ndict = n.to_dict() - ndict['type'] = 'notification' - notifications.append(ndict) + notifications.append(n['doc']) - return jsonified({ + return { 'success': True, 'empty': len(notifications) == 0, 'notifications': notifications - }) + } + + def checkMessages(self): + + prop_name = 'messages.last_check' + last_check = tryInt(Env.prop(prop_name, default = 0)) + + messages = fireEvent('cp.messages', last_check = last_check, single = True) or [] + + for message in messages: + if message.get('time') > last_check: + message['sticky'] = True # Always sticky core messages + + message_type = 'core.message.important' if message.get('important') else 'core.message' + fireEvent(message_type, message = message.get('message'), data = message) - def notify(self, message = '', data = {}, listener = None): + if last_check < message.get('time'): + last_check = message.get('time') - db = get_session() + Env.prop(prop_name, value = last_check) - data['notification_type'] = listener if listener else 'unknown' + def notify(self, message = '', data = None, listener = None): + if not data: data = {} - n = Notif( - message = toUnicode(message), - data = data - ) - db.add(n) - db.commit() + n = { + '_t': 'notification', + 'time': int(time.time()), + } + + try: + db = get_db() + + n['message'] = toUnicode(message) - ndict = n.to_dict() - ndict['type'] = 'notification' - ndict['time'] = time.time() + if data.get('sticky'): + n['sticky'] = True + if data.get('important'): + n['important'] = True - self.frontend(type = listener, data = data) + db.insert(n) - return True + self.frontend(type = listener, data = n) - def frontend(self, type = 'notification', data = {}, message = None): + return True + except: + log.error('Failed notify "%s": %s', (n, traceback.format_exc())) + + def frontend(self, type = 'notification', data = None, message = None): + if not data: data = {} + + log.debug('Notifying frontend') self.m_lock.acquire() notification = { @@ -148,15 +190,17 @@ def frontend(self, type = 'notification', data = {}, message = None): while len(self.listeners) > 0 and not self.shuttingDown(): try: listener, last_id = self.listeners.pop() - listener({ + IOLoop.current().add_callback(listener, { 'success': True, 'result': [notification], }) except: - break + log.debug('Failed sending to listener: %s', traceback.format_exc()) + self.listeners = [] self.m_lock.release() - self.cleanMessages() + + log.debug('Done notifying frontend') def addListener(self, callback, last_id = None): @@ -168,59 +212,77 @@ def addListener(self, callback, last_id = None): 'result': messages, }) + self.m_lock.acquire() self.listeners.append((callback, last_id)) + self.m_lock.release() def removeListener(self, callback): + self.m_lock.acquire() + new_listeners = [] for list_tuple in self.listeners: try: listener, last_id = list_tuple - if listener == callback: - self.listeners.remove(list_tuple) + if listener != callback: + new_listeners.append(list_tuple) except: - pass + log.debug('Failed removing listener: %s', traceback.format_exc()) + + self.listeners = new_listeners + self.m_lock.release() def cleanMessages(self): + + if len(self.messages) == 0: + return + + log.debug('Cleaning messages') self.m_lock.acquire() - for message in self.messages: - if message['time'] < (time.time() - 15): - self.messages.remove(message) + time_ago = (time.time() - 15) + self.messages[:] = [m for m in self.messages if (m['time'] > time_ago)] self.m_lock.release() + log.debug('Done cleaning messages') def getMessages(self, last_id): + + log.debug('Getting messages with id: %s', last_id) self.m_lock.acquire() recent = [] - index = 0 - for i in xrange(len(self.messages)): - index = len(self.messages) - i - 1 - if self.messages[index]["message_id"] == last_id: break - recent = self.messages[index:] + try: + index = map(itemgetter('message_id'), self.messages).index(last_id) + recent = self.messages[index + 1:] + except: + pass self.m_lock.release() + log.debug('Returning for %s %s messages', (last_id, len(recent))) - return recent or [] + return recent - def listener(self): + def listener(self, init = False, **kwargs): messages = [] - # Get unread - if getParam('init'): - db = get_session() + # Get last message + if init: + db = get_db() + + notifications = db.all('notification') - notifications = db.query(Notif) \ - .filter(or_(Notif.read == False, Notif.added > (time.time() - 259200))) \ - .all() for n in notifications: - ndict = n.to_dict() - ndict['type'] = 'notification' - messages.append(ndict) - return jsonified({ + try: + doc = db.get('id', n.get('_id')) + if doc.get('time') > (time.time() - 604800): + messages.append(doc) + except RecordDeleted: + pass + + return { 'success': True, 'result': messages, - }) + } diff --git a/couchpotato/core/notifications/core/static/notification.js b/couchpotato/core/notifications/core/static/notification.js index 52062e93d1..9cdd215356 100644 --- a/couchpotato/core/notifications/core/static/notification.js +++ b/couchpotato/core/notifications/core/static/notification.js @@ -2,6 +2,7 @@ var NotificationBase = new Class({ Extends: BlockBase, Implements: [Options, Events], + stopped: false, initialize: function(options){ var self = this; @@ -10,78 +11,84 @@ var NotificationBase = new Class({ // Listener App.addEvent('unload', self.stopPoll.bind(self)); App.addEvent('reload', self.startInterval.bind(self, [true])); - App.addEvent('notification', self.notify.bind(self)); - App.addEvent('message', self.showMessage.bind(self)); + App.on('notification', self.notify.bind(self)); + App.on('message', self.showMessage.bind(self)); // Add test buttons to settings page - App.addEvent('load', self.addTestButtons.bind(self)); + App.addEvent('loadSettings', self.addTestButtons.bind(self)); // Notification bar - self.notifications = [] + self.notifications = []; App.addEvent('load', function(){ - App.block.notification = new Block.Menu(self, { + App.block.notification = new BlockMenu(self, { + 'button_class': 'icon-notifications', 'class': 'notification_menu', 'onOpen': self.markAsRead.bind(self) - }) + }); $(App.block.notification).inject(App.getBlock('search'), 'after'); self.badge = new Element('div.badge').inject(App.block.notification, 'top').hide(); - /* App.getBlock('notification').addLink(new Element('a.more', { - 'href': App.createUrl('notifications'), - 'text': 'Show older notifications' - })); */ - }); + requestTimeout(function(){ + self.startInterval(); + }, $(window).getSize().x <= 480 ? 2000 : 100); - window.addEvent('load', function(){ - self.startInterval.delay(Browser.safari ? 100 : 0, self) }); - }, notify: function(result){ var self = this; var added = new Date(); - added.setTime(result.added*1000) + added.setTime(result.added*1000); result.el = App.getBlock('notification').addLink( new Element('span.'+(result.read ? 'read' : '' )).adopt( - new Element('span.message', {'text': result.message}), + new Element('span.message', {'html': result.message}), new Element('span.added', {'text': added.timeDiffInWords(), 'title': added}) - ) - , 'top'); + ), 'top'); self.notifications.include(result); - if(!result.read) - self.setBadge(self.notifications.filter(function(n){ return !n.read}).length) + if((result.important !== undefined || result.sticky !== undefined) && !result.read){ + var sticky = true; + App.trigger('message', [result.message, sticky, result]); + } + else if(!result.read){ + self.setBadge(self.notifications.filter(function(n){ return !n.read; }).length); + } }, setBadge: function(value){ var self = this; - self.badge.set('text', value) - self.badge[value ? 'show' : 'hide']() + self.badge.set('text', value); + self.badge[value ? 'show' : 'hide'](); }, - markAsRead: function(){ - var self = this; + markAsRead: function(force_ids){ + var self = this, + ids = force_ids; - var rn = self.notifications.filter(function(n){ - return !n.read - }) + if(!force_ids) { + var rn = self.notifications.filter(function(n){ + return !n.read && n.important === undefined; + }); - var ids = [] - rn.each(function(n){ - ids.include(n.id) - }) + ids = []; + rn.each(function(n){ + ids.include(n._id); + }); + } if(ids.length > 0) Api.request('notification.markread', { + 'data': { + 'ids': ids.join(',') + }, 'onSuccess': function(){ - self.setBadge('') + self.setBadge(''); } - }) + }); }, @@ -93,76 +100,107 @@ var NotificationBase = new Class({ return; } - Api.request('notification.listener', { - 'data': {'init':true}, - 'onSuccess': self.processData.bind(self) - }).send() + self.request = Api.request('notification.listener', { + 'data': {'init':true}, + 'onSuccess': function(json){ + self.processData(json, true); + } + }).send(); + + requestInterval(function(){ + + if(self.request && self.request.isRunning()){ + self.request.cancel(); + self.startPoll(); + } + + }, 120000); }, startPoll: function(){ var self = this; - if(self.stopped || (self.request && self.request.isRunning())) + if(self.stopped) return; + if(self.request && self.request.isRunning()) + self.request.cancel(); + self.request = Api.request('nonblock/notification.listener', { - 'onSuccess': self.processData.bind(self), - 'data': { - 'last_id': self.last_id - }, - 'onFailure': function(){ - self.startPoll.delay(2000, self) - } - }).send() + 'onSuccess': function(json){ + self.processData(json, false); + }, + 'data': { + 'last_id': self.last_id + }, + 'onFailure': function(){ + requestTimeout(self.startPoll.bind(self), 2000); + } + }).send(); }, stopPoll: function(){ if(this.request) - this.request.cancel() + this.request.cancel(); this.stopped = true; }, - processData: function(json){ + processData: function(json, init){ var self = this; // Process data - if(json){ + if(json && json.result){ Array.each(json.result, function(result){ - App.fireEvent(result.type, result); - if(result.message && result.read === undefined) + App.trigger(result._t || result.type, [result]); + if(result.message && result.read === undefined && !init) self.showMessage(result.message); - }) + }); if(json.result.length > 0) - self.last_id = json.result.getLast().message_id + self.last_id = json.result.getLast().message_id; } // Restart poll - self.startPoll() + requestTimeout(self.startPoll.bind(self), 1500); }, - showMessage: function(message){ + showMessage: function(message, sticky, data){ var self = this; if(!self.message_container) self.message_container = new Element('div.messages').inject(document.body); - var new_message = new Element('div.message', { - 'text': message - }).inject(self.message_container); + var new_message = new Element('div', { + 'class': 'message' + (sticky ? ' sticky' : ''), + 'html': '
    ' + message + '
    ' + }).inject(self.message_container, 'top'); - setTimeout(function(){ - new_message.addClass('show') + requestTimeout(function(){ + new_message.addClass('show'); }, 10); - setTimeout(function(){ - new_message.addClass('hide') - setTimeout(function(){ + var hide_message = function(){ + new_message.addClass('hide'); + requestTimeout(function(){ new_message.destroy(); }, 1000); - }, 4000); + }; + + if(sticky) + new_message.grab( + new Element('a.icon-cancel', { + 'events': { + 'click': function(){ + self.markAsRead([data._id]); + hide_message(); + } + } + }) + ); + else + requestTimeout(hide_message, 4000); }, @@ -170,10 +208,10 @@ var NotificationBase = new Class({ addTestButtons: function(){ var self = this; - var setting_page = App.getPage('Settings') + var setting_page = App.getPage('Settings'); setting_page.addEvent('create', function(){ - Object.each(setting_page.tabs.notifications.groups, self.addTestButton.bind(self)) - }) + Object.each(setting_page.tabs.notifications.groups, self.addTestButton.bind(self)); + }); }, @@ -183,7 +221,7 @@ var NotificationBase = new Class({ if(button_name.contains('Notifications')) return; - new Element('.ctrlHolder.test_button').adopt( + new Element('.ctrlHolder.test_button').grab( new Element('a.button', { 'text': button_name, 'events': { @@ -196,20 +234,21 @@ var NotificationBase = new Class({ button.set('text', button_name); + var message; if(json.success){ - var message = new Element('span.success', { + message = new Element('span.success', { 'text': 'Notification successful' - }).inject(button, 'after') + }).inject(button, 'after'); } else { - var message = new Element('span.failed', { + message = new Element('span.failed', { 'text': 'Notification failed. Check logs for details.' - }).inject(button, 'after') + }).inject(button, 'after'); } - (function(){ + requestTimeout(function(){ message.destroy(); - }).delay(3000) + }, 3000); } }); } @@ -219,7 +258,7 @@ var NotificationBase = new Class({ }, testButtonName: function(fieldset){ - var name = String(fieldset.getElement('h2').innerHTML).substring(0,String(fieldset.getElement('h2').innerHTML).indexOf(" 0: - log.debug("Logging on to SMTP server using username \'%s\'%s", (smtp_user, " and a password" if len(smtp_pass) > 0 else "")) - mailserver.login(smtp_user, smtp_pass) - - # Send the e-mail - log.debug("Sending the email") - mailserver.sendmail(from_address, to_address, message.as_string()) - - # Close the SMTP connection - mailserver.quit() - - log.info('Email notification sent') - - return True - except: - log.error('E-mail failed: %s', traceback.format_exc()) - return False - - return False diff --git a/couchpotato/core/notifications/email_.py b/couchpotato/core/notifications/email_.py new file mode 100644 index 0000000000..533f3bb7ff --- /dev/null +++ b/couchpotato/core/notifications/email_.py @@ -0,0 +1,137 @@ +from email.mime.text import MIMEText +from email.utils import formatdate, make_msgid +import smtplib +import traceback + +from couchpotato.core.helpers.encoding import toUnicode +from couchpotato.core.helpers.variable import splitString +from couchpotato.core.logger import CPLog +from couchpotato.core.notifications.base import Notification +from couchpotato.environment import Env + + +log = CPLog(__name__) + +autoload = 'Email' + + +class Email(Notification): + + def notify(self, message = '', data = None, listener = None): + if not data: data = {} + + # Extract all the settings from settings + from_address = self.conf('from') + to_address = self.conf('to') + ssl = self.conf('ssl') + smtp_server = self.conf('smtp_server') + smtp_user = self.conf('smtp_user') + smtp_pass = self.conf('smtp_pass') + smtp_port = self.conf('smtp_port') + starttls = self.conf('starttls') + + # Make the basic message + email = MIMEText(toUnicode(message), _charset = Env.get('encoding')) + email['Subject'] = '%s: %s' % (self.default_title, toUnicode(message)) + email['From'] = from_address + email['To'] = to_address + email['Date'] = formatdate(localtime = 1) + email['Message-ID'] = make_msgid() + + try: + # Open the SMTP connection, via SSL if requested + log.debug("Connecting to host %s on port %s" % (smtp_server, smtp_port)) + log.debug("SMTP over SSL %s", ("enabled" if ssl == 1 else "disabled")) + mailserver = smtplib.SMTP_SSL(smtp_server, smtp_port) if ssl == 1 else smtplib.SMTP(smtp_server, smtp_port) + + if starttls: + log.debug("Using StartTLS to initiate the connection with the SMTP server") + mailserver.starttls() + + # Say hello to the server + mailserver.ehlo() + + # Check too see if an login attempt should be attempted + if len(smtp_user) > 0: + log.debug("Logging on to SMTP server using username \'%s\'%s", (smtp_user, " and a password" if len(smtp_pass) > 0 else "")) + mailserver.login(smtp_user.encode('utf-8'), smtp_pass.encode('utf-8')) + + # Send the e-mail + log.debug("Sending the email") + mailserver.sendmail(from_address, splitString(to_address), email.as_string()) + + # Close the SMTP connection + mailserver.quit() + + log.info('Email notification sent') + + return True + except: + log.error('E-mail failed: %s', traceback.format_exc()) + + return False + + +config = [{ + 'name': 'email', + 'groups': [ + { + 'tab': 'notifications', + 'list': 'notification_providers', + 'name': 'email', + 'options': [ + { + 'name': 'enabled', + 'default': 0, + 'type': 'enabler', + }, + { + 'name': 'from', + 'label': 'Send e-mail from', + }, + { + 'name': 'to', + 'label': 'Send e-mail to', + }, + { + 'name': 'smtp_server', + 'label': 'SMTP server', + }, + { + 'name': 'smtp_port', + 'label': 'SMTP server port', + 'default': '25', + 'type': 'int', + }, + { + 'name': 'ssl', + 'label': 'Enable SSL', + 'default': 0, + 'type': 'bool', + }, + { + 'name': 'starttls', + 'label': 'Enable StartTLS', + 'default': 0, + 'type': 'bool', + }, + { + 'name': 'smtp_user', + 'label': 'SMTP user', + }, + { + 'name': 'smtp_pass', + 'label': 'SMTP password', + 'type': 'password', + }, + { + 'name': 'on_snatch', + 'default': 0, + 'type': 'bool', + 'advanced': True, + 'description': 'Also send message when movie is snatched.', + }, + ], + } + ], +}] diff --git a/couchpotato/core/notifications/emby.py b/couchpotato/core/notifications/emby.py new file mode 100644 index 0000000000..baba521fdf --- /dev/null +++ b/couchpotato/core/notifications/emby.py @@ -0,0 +1,89 @@ +import json +import urllib, urllib2 + +from couchpotato.core.helpers.variable import cleanHost +from couchpotato.core.logger import CPLog +from couchpotato.core.notifications.base import Notification + + +log = CPLog(__name__) + +autoload = 'Emby' + + +class Emby(Notification): + + def notify(self, message = '', data = None, listener = None): + host = self.conf('host') + apikey = self.conf('apikey') + + host = cleanHost(host) + url = '%semby/Library/Movies/Updated' % (host) + values = {} + data = urllib.urlencode(values) + + try: + req = urllib2.Request(url, data) + req.add_header('X-MediaBrowser-Token', apikey) + + response = urllib2.urlopen(req) + result = response.read() + response.close() + return True + + except (urllib2.URLError, IOError), e: + return False + + def test(self, **kwargs): + host = self.conf('host') + apikey = self.conf('apikey') + message = self.test_message + + host = cleanHost(host) + url = '%semby/Notifications/Admin' % (host) + values = {'Name': 'CouchPotato', 'Description': message, 'ImageUrl': 'https://raw.githubusercontent.com/CouchPotato/CouchPotatoServer/master/couchpotato/static/images/notify.couch.small.png'} + data = json.dumps(values) + + try: + req = urllib2.Request(url, data) + req.add_header('X-MediaBrowser-Token', apikey) + req.add_header('Content-Type', 'application/json') + + response = urllib2.urlopen(req) + result = response.read() + response.close() + return { + 'success': True + } + + except (urllib2.URLError, IOError), e: + return False + + +config = [{ + 'name': 'emby', + 'groups': [ + { + 'tab': 'notifications', + 'list': 'notification_providers', + 'name': 'emby', + 'options': [ + { + 'name': 'enabled', + 'default': 0, + 'type': 'enabler', + }, + { + 'name': 'host', + 'default': 'localhost:8096', + 'description': 'IP:Port, default localhost:8096' + }, + { + 'name': 'apikey', + 'label': 'API Key', + 'default': '', + }, + ], + } + ], +}] diff --git a/couchpotato/core/notifications/growl.py b/couchpotato/core/notifications/growl.py new file mode 100644 index 0000000000..a0081a235a --- /dev/null +++ b/couchpotato/core/notifications/growl.py @@ -0,0 +1,113 @@ +import traceback + +from couchpotato.core.event import fireEvent, addEvent +from couchpotato.core.logger import CPLog +from couchpotato.core.notifications.base import Notification +from couchpotato.environment import Env +from gntp import notifier + + +log = CPLog(__name__) + +autoload = 'Growl' + + +class Growl(Notification): + + registered = False + + def __init__(self): + super(Growl, self).__init__() + + self.growl = None + + if self.isEnabled(): + addEvent('app.load', self.register) + + def register(self): + if self.registered: return + try: + + hostname = self.conf('hostname') + password = self.conf('password') + port = self.conf('port') + + self.growl = notifier.GrowlNotifier( + applicationName = Env.get('appname'), + notifications = ['Updates'], + defaultNotifications = ['Updates'], + applicationIcon = self.getNotificationImage('medium'), + hostname = hostname if hostname else 'localhost', + password = password if password else None, + port = port if port else 23053 + ) + self.growl.register() + self.registered = True + except Exception as e: + if 'timed out' in str(e): + self.registered = True + else: + log.error('Failed register of growl: %s', traceback.format_exc()) + + def notify(self, message = '', data = None, listener = None): + if not data: data = {} + + self.register() + + try: + self.growl.notify( + noteType = 'Updates', + title = self.default_title, + description = message, + sticky = False, + priority = 1, + ) + + log.info('Growl notifications sent.') + return True + except: + log.error('Failed growl notification.') + + return False + + +config = [{ + 'name': 'growl', + 'groups': [ + { + 'tab': 'notifications', + 'list': 'notification_providers', + 'name': 'growl', + 'description': 'Version 1.4+', + 'options': [ + { + 'name': 'enabled', + 'default': 0, + 'type': 'enabler', + }, + { + 'name': 'on_snatch', + 'default': False, + 'type': 'bool', + 'advanced': True, + 'description': 'Also send message when movie is snatched.', + }, + { + 'name': 'hostname', + 'description': 'Notify growl over network. Needs restart.', + 'advanced': True, + }, + { + 'name': 'port', + 'type': 'int', + 'advanced': True, + }, + { + 'name': 'password', + 'type': 'password', + 'advanced': True, + }, + ], + } + ], +}] diff --git a/couchpotato/core/notifications/growl/__init__.py b/couchpotato/core/notifications/growl/__init__.py deleted file mode 100644 index 8e462236b5..0000000000 --- a/couchpotato/core/notifications/growl/__init__.py +++ /dev/null @@ -1,45 +0,0 @@ -from .main import Growl - -def start(): - return Growl() - -config = [{ - 'name': 'growl', - 'groups': [ - { - 'tab': 'notifications', - 'list': 'notification_providers', - 'name': 'growl', - 'description': 'Version 1.4+', - 'options': [ - { - 'name': 'enabled', - 'default': 0, - 'type': 'enabler', - }, - { - 'name': 'on_snatch', - 'default': False, - 'type': 'bool', - 'advanced': True, - 'description': 'Also send message when movie is snatched.', - }, - { - 'name': 'hostname', - 'description': 'Notify growl over network. Needs restart.', - 'advanced': True, - }, - { - 'name': 'port', - 'type': 'int', - 'advanced': True, - }, - { - 'name': 'password', - 'type': 'password', - 'advanced': True, - }, - ], - } - ], -}] diff --git a/couchpotato/core/notifications/growl/main.py b/couchpotato/core/notifications/growl/main.py deleted file mode 100644 index 7f1398d9a9..0000000000 --- a/couchpotato/core/notifications/growl/main.py +++ /dev/null @@ -1,66 +0,0 @@ -from couchpotato.core.event import fireEvent, addEvent -from couchpotato.core.logger import CPLog -from couchpotato.core.notifications.base import Notification -from couchpotato.environment import Env -from gntp import notifier -import traceback - -log = CPLog(__name__) - - -class Growl(Notification): - - registered = False - - def __init__(self): - super(Growl, self).__init__() - - if self.isEnabled(): - addEvent('app.load', self.register) - - def register(self): - if self.registered: return - try: - - hostname = self.conf('hostname') - password = self.conf('password') - port = self.conf('port') - - self.growl = notifier.GrowlNotifier( - applicationName = Env.get('appname'), - notifications = ["Updates"], - defaultNotifications = ["Updates"], - applicationIcon = '%s/static/images/couch.png' % fireEvent('app.api_url', single = True), - hostname = hostname if hostname else 'localhost', - password = password if password else None, - port = port if port else 23053 - ) - self.growl.register() - self.registered = True - except Exception, e: - if 'timed out' in str(e): - self.registered = True - else: - log.error('Failed register of growl: %s', traceback.format_exc()) - - def notify(self, message = '', data = {}, listener = None): - if self.isDisabled(): return - - self.register() - - try: - self.growl.notify( - noteType = "Updates", - title = self.default_title, - description = message, - sticky = False, - priority = 1, - ) - - log.info('Growl notifications sent.') - return True - except: - log.error('Failed growl notification.') - - return False - diff --git a/couchpotato/core/notifications/homey.py b/couchpotato/core/notifications/homey.py new file mode 100644 index 0000000000..9418ccc356 --- /dev/null +++ b/couchpotato/core/notifications/homey.py @@ -0,0 +1,64 @@ +import traceback + +from couchpotato.core.helpers.encoding import toUnicode +from couchpotato.core.helpers.variable import getIdentifier, getTitle +from couchpotato.core.logger import CPLog +from couchpotato.core.notifications.base import Notification + +log = CPLog(__name__) + +autoload = 'Homey' + +class Homey(Notification): + + listen_to = [ + 'media.available', + 'renamer.after', 'movie.snatched', + ] + + def notify(self, message = '', data = None, listener = None): + if not data: data = {} + + url = self.conf('url') + + if not url: + log.error('Please provide the URL') + return False + + post_data = { + 'type': listener, + 'movie': getTitle(data) if listener != 'test' else 'Test Movie Title (2016)', + 'message': toUnicode(message) + } + + try: + self.urlopen(url, data = post_data, show_error = False) + return True + except: + log.error('Webhook notification failed: %s', traceback.format_exc()) + + return False + + +config = [{ + 'name': 'homey', + 'groups': [ + { + 'tab': 'notifications', + 'list': 'notification_providers', + 'name': 'homey', + 'label': 'Homey', + 'options': [ + { + 'name': 'enabled', + 'default': 0, + 'type': 'enabler', + }, + { + 'name': 'url', + 'description': 'Create a new one at webhooks.athom.com and add to to Homey Settings' + } + ] + } + ] +}] diff --git a/couchpotato/core/notifications/join.py b/couchpotato/core/notifications/join.py new file mode 100644 index 0000000000..06bf4e5dd4 --- /dev/null +++ b/couchpotato/core/notifications/join.py @@ -0,0 +1,84 @@ +from couchpotato.core.helpers.encoding import toUnicode +from couchpotato.core.helpers.encoding import tryUrlencode +from couchpotato.core.helpers.variable import splitString +from couchpotato.core.logger import CPLog +from couchpotato.core.notifications.base import Notification + + +log = CPLog(__name__) + +autoload = 'Join' + + +class Join(Notification): + + # URL for request + url = 'https://joinjoaomgcd.appspot.com/_ah/api/messaging/v1/sendPush?title=%s&text=%s&deviceId=%s&icon=%s' + + # URL for notification icon + icon = tryUrlencode('https://raw.githubusercontent.com/CouchPotato/CouchPotatoServer/master/couchpotato/static/images/icons/android.png') + + def notify(self, message = '', data = None, listener = None): + if not data: data = {} + + # default for devices + device_default = [None] + + apikey = self.conf('apikey') + if apikey is not None: + # Add apikey to request url + self.url = self.url + '&apikey=' + apikey + # If api key is present, default to sending to all devices + device_default = ['group.all'] + + devices = self.getDevices() or device_default + successful = 0 + for device in devices: + response = self.urlopen(self.url % (self.default_title, tryUrlencode(toUnicode(message)), device, self.icon)) + + if response: + successful += 1 + else: + log.error('Unable to push notification to Join device with ID %s' % device) + + return successful == len(devices) + + def getDevices(self): + return splitString(self.conf('devices')) + + +config = [{ + 'name': 'join', + 'groups': [ + { + 'tab': 'notifications', + 'list': 'notification_providers', + 'name': 'join', + 'options': [ + { + 'name': 'enabled', + 'default': 0, + 'type': 'enabler', + }, + { + 'name': 'devices', + 'default': '', + 'description': 'IDs of devices to notify, or group to send to if API key is specified (ex: group.all)' + }, + { + 'name': 'apikey', + 'default': '', + 'advanced': True, + 'description': 'API Key for sending to all devices, or group' + }, + { + 'name': 'on_snatch', + 'default': 0, + 'type': 'bool', + 'advanced': True, + 'description': 'Also send message when movie is snatched.', + }, + ], + } + ], +}] diff --git a/couchpotato/core/notifications/nmj.py b/couchpotato/core/notifications/nmj.py new file mode 100644 index 0000000000..665837f183 --- /dev/null +++ b/couchpotato/core/notifications/nmj.py @@ -0,0 +1,154 @@ +import re +import telnetlib + +from couchpotato.api import addApiView +from couchpotato.core.event import addEvent +from couchpotato.core.helpers.encoding import tryUrlencode +from couchpotato.core.logger import CPLog +from couchpotato.core.notifications.base import Notification + + +try: + import xml.etree.cElementTree as etree +except ImportError: + import xml.etree.ElementTree as etree + +log = CPLog(__name__) + +autoload = 'NMJ' + + +class NMJ(Notification): + + # noinspection PyMissingConstructor + def __init__(self): + addApiView(self.testNotifyName(), self.test) + addApiView('notify.nmj.auto_config', self.autoConfig) + + addEvent('renamer.after', self.addToLibrary) + + def autoConfig(self, host = 'localhost', **kwargs): + + mount = '' + + try: + terminal = telnetlib.Telnet(host) + except Exception: + log.error('Warning: unable to get a telnet session to %s', host) + return self.failed() + + log.debug('Connected to %s via telnet', host) + terminal.read_until('sh-3.00# ') + terminal.write('cat /tmp/source\n') + terminal.write('cat /tmp/netshare\n') + terminal.write('exit\n') + tnoutput = terminal.read_all() + + match = re.search(r'(.+\.db)\r\n?(.+)(?=sh-3.00# cat /tmp/netshare)', tnoutput) + + if match: + database = match.group(1) + device = match.group(2) + log.info('Found NMJ database %s on device %s', (database, device)) + else: + log.error('Could not get current NMJ database on %s, NMJ is probably not running!', host) + return self.failed() + + if device.startswith('NETWORK_SHARE/'): + match = re.search('.*(?=\r\n?%s)' % (re.escape(device[14:])), tnoutput) + + if match: + mount = match.group().replace('127.0.0.1', host) + log.info('Found mounting url on the Popcorn Hour in configuration: %s', mount) + else: + log.error('Detected a network share on the Popcorn Hour, but could not get the mounting url') + return self.failed() + + return { + 'success': True, + 'database': database, + 'mount': mount, + } + + def addToLibrary(self, message = None, group = None): + if self.isDisabled(): return + if not group: group = {} + + host = self.conf('host') + mount = self.conf('mount') + database = self.conf('database') + + if mount: + log.debug('Try to mount network drive via url: %s', mount) + try: + self.urlopen(mount) + except: + return False + + params = { + 'arg0': 'scanner_start', + 'arg1': database, + 'arg2': 'background', + 'arg3': '', + } + params = tryUrlencode(params) + update_url = 'http://%(host)s:8008/metadata_database?%(params)s' % {'host': host, 'params': params} + + try: + response = self.urlopen(update_url) + except: + return False + + try: + et = etree.fromstring(response) + result = et.findtext('returnValue') + except SyntaxError as e: + log.error('Unable to parse XML returned from the Popcorn Hour: %s', e) + return False + + if int(result) > 0: + log.error('Popcorn Hour returned an errorcode: %s', result) + return False + else: + log.info('NMJ started background scan') + return True + + def failed(self): + return { + 'success': False + } + + def test(self, **kwargs): + return { + 'success': self.addToLibrary() + } + + +config = [{ + 'name': 'nmj', + 'groups': [ + { + 'tab': 'notifications', + 'list': 'notification_providers', + 'name': 'nmj', + 'label': 'NMJ', + 'options': [ + { + 'name': 'enabled', + 'default': 0, + 'type': 'enabler', + }, + { + 'name': 'host', + 'default': 'localhost', + }, + { + 'name': 'database', + }, + { + 'name': 'mount', + }, + ], + } + ], +}] diff --git a/couchpotato/core/notifications/nmj/__init__.py b/couchpotato/core/notifications/nmj/__init__.py deleted file mode 100644 index 08a21a3ed7..0000000000 --- a/couchpotato/core/notifications/nmj/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -from .main import NMJ - -def start(): - return NMJ() - -config = [{ - 'name': 'nmj', - 'groups': [ - { - 'tab': 'notifications', - 'list': 'notification_providers', - 'name': 'nmj', - 'label': 'NMJ', - 'options': [ - { - 'name': 'enabled', - 'default': 0, - 'type': 'enabler', - }, - { - 'name': 'host', - 'default': 'localhost', - }, - { - 'name': 'database', - }, - { - 'name': 'mount', - }, - ], - } - ], -}] diff --git a/couchpotato/core/notifications/nmj/main.py b/couchpotato/core/notifications/nmj/main.py deleted file mode 100644 index cdf531dc29..0000000000 --- a/couchpotato/core/notifications/nmj/main.py +++ /dev/null @@ -1,121 +0,0 @@ -from couchpotato.api import addApiView -from couchpotato.core.event import addEvent -from couchpotato.core.helpers.encoding import tryUrlencode -from couchpotato.core.helpers.request import getParams, jsonified -from couchpotato.core.logger import CPLog -from couchpotato.core.notifications.base import Notification -import re -import telnetlib - -try: - import xml.etree.cElementTree as etree -except ImportError: - import xml.etree.ElementTree as etree - -log = CPLog(__name__) - - -class NMJ(Notification): - - def __init__(self): - addEvent('renamer.after', self.addToLibrary) - addApiView(self.testNotifyName(), self.test) - addApiView('notify.nmj.auto_config', self.autoConfig) - - def autoConfig(self): - - params = getParams() - host = params.get('host', 'localhost') - - database = '' - mount = '' - - try: - terminal = telnetlib.Telnet(host) - except Exception: - log.error('Warning: unable to get a telnet session to %s', (host)) - return self.failed() - - log.debug('Connected to %s via telnet', (host)) - terminal.read_until('sh-3.00# ') - terminal.write('cat /tmp/source\n') - terminal.write('cat /tmp/netshare\n') - terminal.write('exit\n') - tnoutput = terminal.read_all() - - match = re.search(r'(.+\.db)\r\n?(.+)(?=sh-3.00# cat /tmp/netshare)', tnoutput) - - if match: - database = match.group(1) - device = match.group(2) - log.info('Found NMJ database %s on device %s', (database, device)) - else: - log.error('Could not get current NMJ database on %s, NMJ is probably not running!', (host)) - return self.failed() - - if device.startswith('NETWORK_SHARE/'): - match = re.search('.*(?=\r\n?%s)' % (re.escape(device[14:])), tnoutput) - - if match: - mount = match.group().replace('127.0.0.1', host) - log.info('Found mounting url on the Popcorn Hour in configuration: %s', (mount)) - else: - log.error('Detected a network share on the Popcorn Hour, but could not get the mounting url') - return self.failed() - - return jsonified({ - 'success': True, - 'database': database, - 'mount': mount, - }) - - def addToLibrary(self, message = None, group = {}): - if self.isDisabled(): return - - host = self.conf('host') - mount = self.conf('mount') - database = self.conf('database') - - if mount: - log.debug('Try to mount network drive via url: %s', (mount)) - try: - data = self.urlopen(mount) - except: - return False - - params = { - 'arg0': 'scanner_start', - 'arg1': database, - 'arg2': 'background', - 'arg3': '', - } - params = tryUrlencode(params) - UPDATE_URL = 'http://%(host)s:8008/metadata_database?%(params)s' - updateUrl = UPDATE_URL % {'host': host, 'params': params} - - try: - response = self.urlopen(updateUrl) - except: - return False - - try: - et = etree.fromstring(response) - result = et.findtext('returnValue') - except SyntaxError, e: - log.error('Unable to parse XML returned from the Popcorn Hour: %s', (e)) - return False - - if int(result) > 0: - log.error('Popcorn Hour returned an errorcode: %s', (result)) - return False - else: - log.info('NMJ started background scan') - return True - - def failed(self): - return jsonified({'success': False}) - - def test(self): - return jsonified({'success': self.addToLibrary()}) - - diff --git a/couchpotato/core/notifications/notifo/__init__.py b/couchpotato/core/notifications/notifo/__init__.py deleted file mode 100644 index 941246cc32..0000000000 --- a/couchpotato/core/notifications/notifo/__init__.py +++ /dev/null @@ -1,36 +0,0 @@ -from .main import Notifo - -def start(): - return Notifo() - -config = [{ - 'name': 'notifo', - 'groups': [ - { - 'tab': 'notifications', - 'list': 'notification_providers', - 'name': 'notifo', - 'description': 'Keep in mind that Notifo service will end soon.', - 'options': [ - { - 'name': 'enabled', - 'default': 0, - 'type': 'enabler', - }, - { - 'name': 'username', - }, - { - 'name': 'api_key', - }, - { - 'name': 'on_snatch', - 'default': 0, - 'type': 'bool', - 'advanced': True, - 'description': 'Also send message when movie is snatched.', - }, - ], - } - ], -}] diff --git a/couchpotato/core/notifications/notifo/main.py b/couchpotato/core/notifications/notifo/main.py deleted file mode 100644 index 64eee5eccc..0000000000 --- a/couchpotato/core/notifications/notifo/main.py +++ /dev/null @@ -1,39 +0,0 @@ -from couchpotato.core.helpers.encoding import toUnicode -from couchpotato.core.logger import CPLog -from couchpotato.core.notifications.base import Notification -from flask.helpers import json -import base64 -import traceback - -log = CPLog(__name__) - - -class Notifo(Notification): - - url = 'https://api.notifo.com/v1/send_notification' - - def notify(self, message = '', data = {}, listener = None): - if self.isDisabled(): return - - try: - params = { - 'label': self.default_title, - 'msg': toUnicode(message), - } - - headers = { - 'Authorization': "Basic %s" % base64.encodestring('%s:%s' % (self.conf('username'), self.conf('api_key')))[:-1] - } - - handle = self.urlopen(self.url, params = params, headers = headers) - result = json.loads(handle) - - if result['status'] != 'success' or result['response_message'] != 'OK': - raise Exception - - except: - log.error('Notification failed: %s', traceback.format_exc()) - return False - - log.info('Notifo notification successful.') - return True diff --git a/couchpotato/core/notifications/notifymyandroid.py b/couchpotato/core/notifications/notifymyandroid.py new file mode 100644 index 0000000000..ed7a24c8db --- /dev/null +++ b/couchpotato/core/notifications/notifymyandroid.py @@ -0,0 +1,78 @@ +from couchpotato.core.helpers.variable import splitString +from couchpotato.core.logger import CPLog +from couchpotato.core.notifications.base import Notification +import pynma +import six + +log = CPLog(__name__) + +autoload = 'NotifyMyAndroid' + + +class NotifyMyAndroid(Notification): + + def notify(self, message = '', data = None, listener = None): + if not data: data = {} + + nma = pynma.PyNMA() + keys = splitString(self.conf('api_key')) + nma.addkey(keys) + nma.developerkey(self.conf('dev_key')) + + response = nma.push( + application = self.default_title, + event = message.split(' ')[0], + description = message, + priority = self.conf('priority'), + batch_mode = len(keys) > 1 + ) + + successful = 0 + for key in keys: + if not response[str(key)]['code'] == six.u('200'): + log.error('Could not send notification to NotifyMyAndroid (%s). %s', (key, response[key]['message'])) + else: + successful += 1 + + return successful == len(keys) + + +config = [{ + 'name': 'notifymyandroid', + 'groups': [ + { + 'tab': 'notifications', + 'list': 'notification_providers', + 'name': 'notifymyandroid', + 'label': 'Notify My Android', + 'options': [ + { + 'name': 'enabled', + 'default': 0, + 'type': 'enabler', + }, + { + 'name': 'api_key', + 'description': 'Multiple keys seperated by a comma. Maximum of 5.' + }, + { + 'name': 'dev_key', + 'advanced': True, + }, + { + 'name': 'priority', + 'default': 0, + 'type': 'dropdown', + 'values': [('Very Low', -2), ('Moderate', -1), ('Normal', 0), ('High', 1), ('Emergency', 2)], + }, + { + 'name': 'on_snatch', + 'default': 0, + 'type': 'bool', + 'advanced': True, + 'description': 'Also send message when movie is snatched.', + }, + ], + } + ], +}] diff --git a/couchpotato/core/notifications/notifymyandroid/__init__.py b/couchpotato/core/notifications/notifymyandroid/__init__.py deleted file mode 100644 index 9ee5d90a61..0000000000 --- a/couchpotato/core/notifications/notifymyandroid/__init__.py +++ /dev/null @@ -1,44 +0,0 @@ -from .main import NotifyMyAndroid - -def start(): - return NotifyMyAndroid() - -config = [{ - 'name': 'notifymyandroid', - 'groups': [ - { - 'tab': 'notifications', - 'list': 'notification_providers', - 'name': 'notifymyandroid', - 'label': 'Notify My Android', - 'options': [ - { - 'name': 'enabled', - 'default': 0, - 'type': 'enabler', - }, - { - 'name': 'api_key', - 'description': 'Multiple keys seperated by a comma. Maximum of 5.' - }, - { - 'name': 'dev_key', - 'advanced': True, - }, - { - 'name': 'priority', - 'default': 0, - 'type': 'dropdown', - 'values': [('Very Low', -2), ('Moderate', -1), ('Normal', 0), ('High', 1), ('Emergency', 2)], - }, - { - 'name': 'on_snatch', - 'default': 0, - 'type': 'bool', - 'advanced': True, - 'description': 'Also send message when movie is snatched.', - }, - ], - } - ], -}] diff --git a/couchpotato/core/notifications/notifymyandroid/main.py b/couchpotato/core/notifications/notifymyandroid/main.py deleted file mode 100644 index cc6ef6623a..0000000000 --- a/couchpotato/core/notifications/notifymyandroid/main.py +++ /dev/null @@ -1,37 +0,0 @@ -from couchpotato.core.helpers.variable import splitString -from couchpotato.core.logger import CPLog -from couchpotato.core.notifications.base import Notification -import pynma - -log = CPLog(__name__) - - -class NotifyMyAndroid(Notification): - - def notify(self, message = '', data = {}, listener = None): - if self.isDisabled(): return - - nma = pynma.PyNMA() - keys = splitString(self.conf('api_key')) - nma.addkey(keys) - nma.developerkey(self.conf('dev_key')) - - # hacky fix for the event type - # as it seems to be part of the message now - self.event = message.split(' ')[0] - response = nma.push( - application = self.default_title, - event = self.event, - description = message, - priority = self.conf('priority'), - batch_mode = len(keys) > 1 - ) - - successful = 0 - for key in keys: - if not response[str(key)]['code'] == u'200': - log.error('Could not send notification to NotifyMyAndroid (%s). %s', (key, response[key]['message'])) - else: - successful += 1 - - return successful == len(keys) diff --git a/couchpotato/core/notifications/notifymywp/__init__.py b/couchpotato/core/notifications/notifymywp/__init__.py deleted file mode 100644 index 6e0bd06d7f..0000000000 --- a/couchpotato/core/notifications/notifymywp/__init__.py +++ /dev/null @@ -1,44 +0,0 @@ -from .main import NotifyMyWP - -def start(): - return NotifyMyWP() - -config = [{ - 'name': 'notifymywp', - 'groups': [ - { - 'tab': 'notifications', - 'list': 'notification_providers', - 'name': 'notifymywp', - 'label': 'Windows Phone', - 'options': [ - { - 'name': 'enabled', - 'default': 0, - 'type': 'enabler', - }, - { - 'name': 'api_key', - 'description': 'Multiple keys seperated by a comma. Maximum of 5.' - }, - { - 'name': 'dev_key', - 'advanced': True, - }, - { - 'name': 'priority', - 'default': 0, - 'type': 'dropdown', - 'values': [('Very Low', -2), ('Moderate', -1), ('Normal', 0), ('High', 1), ('Emergency', 2)], - }, - { - 'name': 'on_snatch', - 'default': 0, - 'type': 'bool', - 'advanced': True, - 'description': 'Also send message when movie is snatched.', - }, - ], - } - ], -}] diff --git a/couchpotato/core/notifications/notifymywp/main.py b/couchpotato/core/notifications/notifymywp/main.py deleted file mode 100644 index 3258e85fe1..0000000000 --- a/couchpotato/core/notifications/notifymywp/main.py +++ /dev/null @@ -1,24 +0,0 @@ -from couchpotato.core.helpers.variable import splitString -from couchpotato.core.logger import CPLog -from couchpotato.core.notifications.base import Notification -from pynmwp import PyNMWP - -log = CPLog(__name__) - - -class NotifyMyWP(Notification): - - def notify(self, message = '', data = {}, listener = None): - if self.isDisabled(): return - - keys = splitString(self.conf('api_key')) - p = PyNMWP(keys, self.conf('dev_key')) - - response = p.push(application = self.default_title, event = message, description = message, priority = self.conf('priority'), batch_mode = len(keys) > 1) - - for key in keys: - if not response[key]['Code'] == u'200': - log.error('Could not send notification to NotifyMyWindowsPhone (%s). %s', (key, response[key]['message'])) - return False - - return response diff --git a/couchpotato/core/notifications/plex/__init__.py b/couchpotato/core/notifications/plex/__init__.py old mode 100644 new mode 100755 index 8d89a40f1a..3f4051657e --- a/couchpotato/core/notifications/plex/__init__.py +++ b/couchpotato/core/notifications/plex/__init__.py @@ -1,6 +1,7 @@ from .main import Plex -def start(): + +def autoload(): return Plex() config = [{ @@ -17,10 +18,42 @@ def start(): 'type': 'enabler', }, { - 'name': 'host', + 'name': 'media_server', + 'label': 'Media Server', 'default': 'localhost', - 'description': 'Default should be on localhost', + 'description': 'Comma separated list of hostnames/IPs, default localhost' + }, + { + 'name': 'username', + 'label': 'Username', + 'default': '', + 'description': 'Required for myPlex' + }, + { + 'name': 'password', + 'label': 'Password', + 'default': '', + 'type': 'password', + 'description': 'Required for myPlex' + }, + { + 'name': 'auth_token', + 'label': 'Auth Token', + 'default': '', + 'advanced': True, + 'description': 'Required for myPlex' + }, + { + 'name': 'clients', + 'default': '', + 'description': 'Comma separated list of client names\'s (computer names). Top right when you start Plex' + }, + { + 'name': 'on_snatch', + 'default': 0, + 'type': 'bool', 'advanced': True, + 'description': 'Also send message when movie is snatched.', }, ], } diff --git a/couchpotato/core/notifications/plex/client.py b/couchpotato/core/notifications/plex/client.py new file mode 100644 index 0000000000..84cf7af6a9 --- /dev/null +++ b/couchpotato/core/notifications/plex/client.py @@ -0,0 +1,87 @@ +import json + +from couchpotato import CPLog +from couchpotato.core.event import addEvent +from couchpotato.core.helpers.encoding import tryUrlencode +import requests + + +log = CPLog(__name__) + + +class PlexClientProtocol(object): + def __init__(self, plex): + self.plex = plex + + addEvent('notify.plex.notifyClient', self.notify) + + def notify(self, client, message): + raise NotImplementedError() + + +class PlexClientHTTP(PlexClientProtocol): + def request(self, command, client): + url = 'http://%s:%s/xbmcCmds/xbmcHttp/?%s' % ( + client['address'], + client['port'], + tryUrlencode(command) + ) + + headers = {} + + try: + self.plex.urlopen(url, headers = headers, timeout = 3, show_error = False) + except Exception as err: + log.error("Couldn't sent command to Plex: %s", err) + return False + + return True + + def notify(self, client, message): + if client.get('protocol') != 'xbmchttp': + return None + + data = { + 'command': 'ExecBuiltIn', + 'parameter': 'Notification(CouchPotato, %s)' % message + } + + return self.request(data, client) + + +class PlexClientJSON(PlexClientProtocol): + def request(self, method, params, client): + log.debug('sendJSON("%s", %s, %s)', (method, params, client)) + url = 'http://%s:%s/jsonrpc' % ( + client['address'], + client['port'] + ) + + headers = { + 'Content-Type': 'application/json' + } + + request = { + 'id': 1, + 'jsonrpc': '2.0', + 'method': method, + 'params': params + } + + try: + requests.post(url, headers = headers, timeout = 3, data = json.dumps(request)) + except Exception as err: + log.error("Couldn't sent command to Plex: %s", err) + return False + + return True + + def notify(self, client, message): + if client.get('protocol') not in ['xbmcjson', 'plex']: + return None + + params = { + 'title': 'CouchPotato', + 'message': message + } + return self.request('GUI.ShowNotification', params, client) diff --git a/couchpotato/core/notifications/plex/main.py b/couchpotato/core/notifications/plex/main.py old mode 100644 new mode 100755 index 9790ccfef4..a6853b2f83 --- a/couchpotato/core/notifications/plex/main.py +++ b/couchpotato/core/notifications/plex/main.py @@ -1,90 +1,78 @@ -from couchpotato.core.event import addEvent -from couchpotato.core.helpers.encoding import tryUrlencode -from couchpotato.core.helpers.request import jsonified -from couchpotato.core.helpers.variable import cleanHost +from couchpotato.core.event import addEvent, fireEvent from couchpotato.core.logger import CPLog from couchpotato.core.notifications.base import Notification -from urllib2 import URLError -from xml.dom import minidom -import traceback +from .client import PlexClientHTTP, PlexClientJSON +from .server import PlexServer log = CPLog(__name__) class Plex(Notification): + http_time_between_calls = 0 + def __init__(self): super(Plex, self).__init__() - addEvent('renamer.after', self.addToLibrary) - - def addToLibrary(self, message = None, group = {}): - if self.isDisabled(): return - - log.info('Sending notification to Plex') - hosts = [cleanHost(x.strip() + ':32400') for x in self.conf('host').split(",")] - for host in hosts: + self.server = PlexServer(self) - source_type = ['movie'] - base_url = '%slibrary/sections' % host - refresh_url = '%s/%%s/refresh' % base_url + self.client_protocols = { + 'http': PlexClientHTTP(self), + 'json': PlexClientJSON(self) + } - try: - sections_xml = self.urlopen(base_url) - xml_sections = minidom.parseString(sections_xml) - sections = xml_sections.getElementsByTagName('Directory') + addEvent('renamer.after', self.addToLibrary) - for s in sections: - if s.getAttribute('type') in source_type: - url = refresh_url % s.getAttribute('key') - x = self.urlopen(url) + def addToLibrary(self, message = None, group = None): + if self.isDisabled(): return + if not group: group = {} - except: - log.error('Plex library update failed for %s, Media Server not running: %s', (host, traceback.format_exc(1))) - return False + return self.server.refresh() - return True + def getClientNames(self): + return [ + x.strip().lower() + for x in self.conf('clients').split(',') + ] - def notify(self, message = '', data = {}, listener = None): - if self.isDisabled(): return + def notifyClients(self, message, client_names): + success = True - hosts = [x.strip() + ':3000' for x in self.conf('host').split(",")] - successful = 0 - for host in hosts: - if self.send({'command': 'ExecBuiltIn', 'parameter': 'Notification(CouchPotato, %s)' % message}, host): - successful += 1 + for client_name in client_names: - return successful == len(hosts) + client_success = False + client = self.server.clients.get(client_name) - def send(self, command, host): + if client and client['found']: + client_success = fireEvent('notify.plex.notifyClient', client, message, single = True) - url = 'http://%s/xbmcCmds/xbmcHttp/?%s' % (host, tryUrlencode(command)) + if not client_success: + if self.server.staleClients() or not client: + log.info('Failed to send notification to client "%s". ' + 'Client list is stale, updating the client list and retrying.', client_name) + self.server.updateClients(self.getClientNames()) + else: + log.warning('Failed to send notification to client %s, skipping this time', client_name) + success = False - headers = {} + return success - try: - self.urlopen(url, headers = headers, show_error = False) - except URLError: - log.error("Couldn't sent command to Plex, probably just running Media Server") - return False - except: - log.error("Couldn't sent command to Plex: %s", traceback.format_exc()) - return False + def notify(self, message = '', data = None, listener = None): + if not data: data = {} + return self.notifyClients(message, self.getClientNames()) - log.info('Plex notification to %s successful.', host) - return True - - def test(self): + def test(self, **kwargs): test_type = self.testNotifyName() log.info('Sending test to %s', test_type) - success = self.notify( + notify_success = self.notify( message = self.test_message, data = {}, listener = 'test' ) - success2 = self.addToLibrary() - return jsonified({'success': success or success2}) + refresh_success = self.addToLibrary() + + return {'success': notify_success or refresh_success} diff --git a/couchpotato/core/notifications/plex/server.py b/couchpotato/core/notifications/plex/server.py new file mode 100644 index 0000000000..51f52f99da --- /dev/null +++ b/couchpotato/core/notifications/plex/server.py @@ -0,0 +1,153 @@ +from datetime import timedelta, datetime +from urlparse import urlparse +import traceback + +from couchpotato.core.helpers.variable import cleanHost +from couchpotato import CPLog + + +try: + import xml.etree.cElementTree as etree +except ImportError: + import xml.etree.ElementTree as etree + +log = CPLog(__name__) + + +class PlexServer(object): + def __init__(self, plex): + self.plex = plex + + self.clients = {} + self.last_clients_update = None + + def staleClients(self): + if not self.last_clients_update: + return True + + return self.last_clients_update + timedelta(minutes=15) < datetime.now() + + def request(self, hostname, path, data_type='xml'): + if not self.plex.conf('media_server'): + log.warning("Plex media server hostname is required") + return None + + if path.startswith('/'): + path = path[1:] + + #Maintain support for older Plex installations without myPlex + if not self.plex.conf('auth_token') and not self.plex.conf('username') and not self.plex.conf('password'): + data = self.plex.urlopen('%s/%s' % ( + self.createHost(hostname, port = 32400), + path + )) + else: + #Fetch X-Plex-Token if it doesn't exist but a username/password do + if not self.plex.conf('auth_token') and (self.plex.conf('username') and self.plex.conf('password')): + import urllib2, base64 + log.info("Fetching a new X-Plex-Token from plex.tv") + username = self.plex.conf('username') + password = self.plex.conf('password') + req = urllib2.Request("https://plex.tv/users/sign_in.xml", data="") + authheader = "Basic %s" % base64.encodestring('%s:%s' % (username, password))[:-1] + req.add_header("Authorization", authheader) + req.add_header("X-Plex-Device-Name", "CouchPotato") + req.add_header("X-Plex-Product", "CouchPotato Notifier") + req.add_header("X-Plex-Client-Identifier", "b3a6b24dcab2224bdb101fc6aa08ea5e2f3147d6") + req.add_header("X-Plex-Version", "1.0") + + try: + response = urllib2.urlopen(req) + except urllib2.URLError, e: + log.info('Error fetching token from plex.tv: %s', traceback.format_exc()) + + try: + auth_tree = etree.parse(response) + token = auth_tree.findall(".//authentication-token")[0].text + self.plex.conf('auth_token', token) + + except (ValueError, IndexError) as e: + log.info("Error parsing plex.tv response: " + repr(e)) + + #Add X-Plex-Token header for myPlex support workaround + data = self.plex.urlopen('%s/%s?X-Plex-Token=%s' % ( + self.createHost(hostname, port = 32400), + path, + self.plex.conf('auth_token') + )) + + if data_type == 'xml': + return etree.fromstring(data) + else: + return data + + def updateClients(self, client_names): + log.info('Searching for clients on Plex Media Server') + + self.clients = {} + + for hostname in self.plex.conf('media_server').split(','): + result = self.request(hostname, 'clients') + if not result: + return + + found_clients = [ + c for c in result.findall('Server') + if c.get('name') and c.get('name').lower() in client_names + ] + + # Store client details in cache + for client in found_clients: + name = client.get('name').lower() + + self.clients[name] = { + 'name': client.get('name'), + 'found': True, + 'address': client.get('address'), + 'port': client.get('port'), + 'protocol': client.get('protocol', 'xbmchttp') + } + + client_names.remove(name) + + # Store dummy info for missing clients + for client_name in client_names: + self.clients[client_name] = { + 'found': False + } + + if len(client_names) > 0: + log.debug('Unable to find clients: %s', ', '.join(client_names)) + + self.last_clients_update = datetime.now() + + def refresh(self, section_types=None): + if not section_types: + section_types = ['movie'] + + for hostname in self.plex.conf('media_server').split(','): + sections = self.request(hostname, 'library/sections') + + try: + for section in sections.findall('Directory'): + if section.get('type') not in section_types: + continue + + self.request(hostname, 'library/sections/%s/refresh' % section.get('key'), 'text') + except: + log.error('Plex library update failed for %s, Media Server not running: %s', + (hostname, traceback.format_exc(1))) + return False + + return True + + def createHost(self, host, port = None): + + h = cleanHost(host) + p = urlparse(h) + h = h.rstrip('/') + + if port and not p.port: + h += ':%s' % port + + return h diff --git a/couchpotato/core/notifications/prowl.py b/couchpotato/core/notifications/prowl.py new file mode 100644 index 0000000000..fdece326c6 --- /dev/null +++ b/couchpotato/core/notifications/prowl.py @@ -0,0 +1,75 @@ +import traceback + +from couchpotato.core.helpers.encoding import toUnicode +from couchpotato.core.logger import CPLog +from couchpotato.core.notifications.base import Notification + + +log = CPLog(__name__) + +autoload = 'Prowl' + + +class Prowl(Notification): + + urls = { + 'api': 'https://api.prowlapp.com/publicapi/add' + } + + def notify(self, message = '', data = None, listener = None): + if not data: data = {} + + data = { + 'apikey': self.conf('api_key'), + 'application': self.default_title, + 'description': toUnicode(message), + 'priority': self.conf('priority'), + } + headers = { + 'Content-type': 'application/x-www-form-urlencoded' + } + + try: + self.urlopen(self.urls['api'], headers = headers, data = data, show_error = False) + log.info('Prowl notifications sent.') + return True + except: + log.error('Prowl failed: %s', traceback.format_exc()) + + return False + + +config = [{ + 'name': 'prowl', + 'groups': [ + { + 'tab': 'notifications', + 'list': 'notification_providers', + 'name': 'prowl', + 'options': [ + { + 'name': 'enabled', + 'default': 0, + 'type': 'enabler', + }, + { + 'name': 'api_key', + 'label': 'Api key', + }, + { + 'name': 'priority', + 'default': '0', + 'type': 'dropdown', + 'values': [('Very Low', -2), ('Moderate', -1), ('Normal', 0), ('High', 1), ('Emergency', 2)] + }, + { + 'name': 'on_snatch', + 'default': 0, + 'type': 'bool', + 'advanced': True, + 'description': 'Also send message when movie is snatched.', + }, + ], + } + ], +}] diff --git a/couchpotato/core/notifications/prowl/__init__.py b/couchpotato/core/notifications/prowl/__init__.py deleted file mode 100644 index e056428907..0000000000 --- a/couchpotato/core/notifications/prowl/__init__.py +++ /dev/null @@ -1,39 +0,0 @@ -from .main import Prowl - -def start(): - return Prowl() - -config = [{ - 'name': 'prowl', - 'groups': [ - { - 'tab': 'notifications', - 'list': 'notification_providers', - 'name': 'prowl', - 'options': [ - { - 'name': 'enabled', - 'default': 0, - 'type': 'enabler', - }, - { - 'name': 'api_key', - 'label': 'Api key', - }, - { - 'name': 'priority', - 'default': '0', - 'type': 'dropdown', - 'values': [('Very Low', -2), ('Moderate', -1), ('Normal', 0), ('High', 1), ('Emergency', 2)] - }, - { - 'name': 'on_snatch', - 'default': 0, - 'type': 'bool', - 'advanced': True, - 'description': 'Also send message when movie is snatched.', - }, - ], - } - ], -}] diff --git a/couchpotato/core/notifications/prowl/main.py b/couchpotato/core/notifications/prowl/main.py deleted file mode 100644 index 0990d0d1ea..0000000000 --- a/couchpotato/core/notifications/prowl/main.py +++ /dev/null @@ -1,35 +0,0 @@ -from couchpotato.core.helpers.encoding import toUnicode -from couchpotato.core.logger import CPLog -from couchpotato.core.notifications.base import Notification -import traceback - -log = CPLog(__name__) - - -class Prowl(Notification): - - urls = { - 'api': 'https://api.prowlapp.com/publicapi/add' - } - - def notify(self, message = '', data = {}, listener = None): - if self.isDisabled(): return - - data = { - 'apikey': self.conf('api_key'), - 'application': self.default_title, - 'description': toUnicode(message), - 'priority': self.conf('priority'), - } - headers = { - 'Content-type': 'application/x-www-form-urlencoded' - } - - try: - self.urlopen(self.urls['api'], headers = headers, params = data, multipart = True, show_error = False) - log.info('Prowl notifications sent.') - return True - except: - log.error('Prowl failed: %s', traceback.format_exc()) - - return False diff --git a/couchpotato/core/notifications/pushalot.py b/couchpotato/core/notifications/pushalot.py new file mode 100644 index 0000000000..fa781bc5f1 --- /dev/null +++ b/couchpotato/core/notifications/pushalot.py @@ -0,0 +1,87 @@ +import traceback + +from couchpotato.core.helpers.encoding import toUnicode +from couchpotato.core.logger import CPLog +from couchpotato.core.notifications.base import Notification + + +log = CPLog(__name__) + +autoload = 'Pushalot' + + +class Pushalot(Notification): + + urls = { + 'api': 'https://pushalot.com/api/sendmessage' + } + + def notify(self, message = '', data = None, listener = None): + if not data: data = {} + + data = { + 'AuthorizationToken': self.conf('auth_token'), + 'Title': self.default_title, + 'Body': toUnicode(message), + 'IsImportant': self.conf('important'), + 'IsSilent': self.conf('silent'), + 'Image': toUnicode(self.getNotificationImage('medium') + '?1'), + 'Source': toUnicode(self.default_title) + } + + headers = { + 'Content-type': 'application/x-www-form-urlencoded' + } + + try: + self.urlopen(self.urls['api'], headers = headers, data = data, show_error = False) + return True + except: + log.error('PushAlot failed: %s', traceback.format_exc()) + + return False + + +config = [{ + 'name': 'pushalot', + 'groups': [ + { + 'tab': 'notifications', + 'list': 'notification_providers', + 'name': 'pushalot', + 'description': 'for Windows Phone and Windows 8', + 'options': [ + { + 'name': 'enabled', + 'default': 0, + 'type': 'enabler', + }, + { + 'name': 'auth_token', + 'label': 'Auth Token', + }, + { + 'name': 'silent', + 'label': 'Silent', + 'default': 0, + 'type': 'bool', + 'description': 'Don\'t send Toast notifications. Only update Live Tile', + }, + { + 'name': 'important', + 'label': 'High Priority', + 'default': 0, + 'type': 'bool', + 'description': 'Send message with High priority.', + }, + { + 'name': 'on_snatch', + 'default': 0, + 'type': 'bool', + 'advanced': True, + 'description': 'Also send message when movie is snatched.', + }, + ], + } + ], +}] diff --git a/couchpotato/core/notifications/pushalot/__init__.py b/couchpotato/core/notifications/pushalot/__init__.py deleted file mode 100644 index a2a297a39c..0000000000 --- a/couchpotato/core/notifications/pushalot/__init__.py +++ /dev/null @@ -1,48 +0,0 @@ -from .main import Pushalot - -def start(): - return Pushalot() - -config = [{ - 'name': 'pushalot', - 'groups': [ - { - 'tab': 'notifications', - 'list': 'notification_providers', - 'name': 'pushalot', - 'description': 'for Windows Phone and Windows 8', - 'options': [ - { - 'name': 'enabled', - 'default': 0, - 'type': 'enabler', - }, - { - 'name': 'auth_token', - 'label': 'Auth Token', - }, - { - 'name': 'silent', - 'label': 'Silent', - 'default': 0, - 'type': 'bool', - 'description': 'Don\'t send Toast notifications. Only update Live Tile', - }, - { - 'name': 'important', - 'label': 'High Priority', - 'default': 0, - 'type': 'bool', - 'description': 'Send message with High priority.', - }, - { - 'name': 'on_snatch', - 'default': 0, - 'type': 'bool', - 'advanced': True, - 'description': 'Also send message when movie is snatched.', - }, - ], - } - ], -}] diff --git a/couchpotato/core/notifications/pushalot/main.py b/couchpotato/core/notifications/pushalot/main.py deleted file mode 100644 index 3b11331127..0000000000 --- a/couchpotato/core/notifications/pushalot/main.py +++ /dev/null @@ -1,37 +0,0 @@ -from couchpotato.core.helpers.encoding import toUnicode -from couchpotato.core.logger import CPLog -from couchpotato.core.notifications.base import Notification -import traceback - -log = CPLog(__name__) - -class Pushalot(Notification): - - urls = { - 'api': 'https://pushalot.com/api/sendmessage' - } - - def notify(self, message = '', data = {}, listener = None): - if self.isDisabled(): return - - data = { - 'AuthorizationToken': self.conf('auth_token'), - 'Title': self.default_title, - 'Body': toUnicode(message), - 'LinkTitle': toUnicode("CouchPotato"), - 'link': toUnicode("https://couchpota.to/"), - 'IsImportant': self.conf('important'), - 'IsSilent': self.conf('silent'), - } - - headers = { - 'Content-type': 'application/x-www-form-urlencoded' - } - - try: - self.urlopen(self.urls['api'], headers = headers, params = data, multipart = True, show_error = False) - return True - except: - log.error('PushAlot failed: %s', traceback.format_exc()) - - return False diff --git a/couchpotato/core/notifications/pushbullet.py b/couchpotato/core/notifications/pushbullet.py new file mode 100644 index 0000000000..1b8ebe6e1a --- /dev/null +++ b/couchpotato/core/notifications/pushbullet.py @@ -0,0 +1,112 @@ +from couchpotato.core.helpers.encoding import toUnicode +from couchpotato.core.helpers.variable import splitString +from couchpotato.core.logger import CPLog +from couchpotato.core.notifications.base import Notification + + +log = CPLog(__name__) + +autoload = 'Pushbullet' + + +class Pushbullet(Notification): + + url = 'https://api.pushbullet.com/v2/%s' + + def notify(self, message = '', data = None, listener = None): + if not data: data = {} + + # Get all the device IDs linked to this user + devices = self.getDevices() or [None] + successful = 0 + for device in devices: + response = self.request( + 'pushes', + device_iden = device, + type = 'note', + title = self.default_title, + body = toUnicode(message) + ) + + if response: + successful += 1 + else: + log.error('Unable to push notification to Pushbullet device with ID %s' % device) + + for channel in self.getChannels(): + self.request( + 'pushes', + channel_tag = channel, + type = 'note', + title = self.default_title, + body = toUnicode(message) + ) + + return successful == len(devices) + + def getDevices(self): + return splitString(self.conf('devices')) + + def getChannels(self): + return splitString(self.conf('channels')) + + def request(self, method, **kwargs): + try: + headers = { + 'Access-Token': self.conf('api_key') + } + + if kwargs.get('device_iden') is None: + try: del kwargs['device_iden'] + except: pass + + return self.getJsonData(self.url % method, cache_timeout = -1, headers = headers, data = kwargs) + + except Exception as ex: + log.error('Pushbullet request failed') + log.debug(ex) + + return None + + +config = [{ + 'name': 'pushbullet', + 'groups': [ + { + 'tab': 'notifications', + 'list': 'notification_providers', + 'name': 'pushbullet', + 'options': [ + { + 'name': 'enabled', + 'default': 0, + 'type': 'enabler', + }, + { + 'name': 'api_key', + 'label': 'Access Token', + 'description': 'Can be found on Account Settings', + }, + { + 'name': 'devices', + 'default': '', + 'advanced': True, + 'description': 'IDs of devices to send notifications to, empty = all devices' + }, + { + 'name': 'channels', + 'default': '', + 'advanced': True, + 'description': 'IDs of channels to send notifications to, empty = no channels' + }, + { + 'name': 'on_snatch', + 'default': 0, + 'type': 'bool', + 'advanced': True, + 'description': 'Also send message when movie is snatched.', + }, + ], + } + ], +}] diff --git a/couchpotato/core/notifications/pushover.py b/couchpotato/core/notifications/pushover.py new file mode 100644 index 0000000000..ebce97c1ff --- /dev/null +++ b/couchpotato/core/notifications/pushover.py @@ -0,0 +1,88 @@ +from couchpotato.core.helpers.encoding import toUnicode +from couchpotato.core.helpers.variable import getTitle, getIdentifier +from couchpotato.core.logger import CPLog +from couchpotato.core.notifications.base import Notification + + +log = CPLog(__name__) + +autoload = 'Pushover' + + +class Pushover(Notification): + + api_url = 'https://api.pushover.net' + + def notify(self, message = '', data = None, listener = None): + if not data: data = {} + + api_data = { + 'user': self.conf('user_key'), + 'token': self.conf('api_token'), + 'message': toUnicode(message), + 'priority': self.conf('priority'), + 'sound': self.conf('sound'), + } + + if data and getIdentifier(data): + api_data.update({ + 'url': toUnicode('http://www.imdb.com/title/%s/' % getIdentifier(data)), + 'url_title': toUnicode('%s on IMDb' % getTitle(data)), + }) + + try: + data = self.urlopen('%s/%s' % (self.api_url, '1/messages.json'), + headers = {'Content-type': 'application/x-www-form-urlencoded'}, + data = api_data) + log.info2('Pushover responded with: %s', data) + return True + except: + return False + + + +config = [{ + 'name': 'pushover', + 'groups': [ + { + 'tab': 'notifications', + 'list': 'notification_providers', + 'name': 'pushover', + 'options': [ + { + 'name': 'enabled', + 'default': 0, + 'type': 'enabler', + }, + { + 'name': 'user_key', + 'description': 'Register on pushover.net to get one.' + }, + { + 'name': 'api_token', + 'description': 'Register on pushover.net to get one.', + 'advanced': True, + 'default': 'YkxHMYDZp285L265L3IwH3LmzkTaCy', + }, + { + 'name': 'priority', + 'default': 0, + 'type': 'dropdown', + 'values': [('Lowest', -2), ('Low', -1), ('Normal', 0), ('High', 1)], + }, + { + 'name': 'on_snatch', + 'default': 0, + 'type': 'bool', + 'advanced': True, + 'description': 'Also send message when movie is snatched.', + }, + { + 'name': 'sound', + 'advanced': True, + 'description': 'Define custom sound for Pushover alert.' + }, + ], + } + ], +}] diff --git a/couchpotato/core/notifications/pushover/__init__.py b/couchpotato/core/notifications/pushover/__init__.py deleted file mode 100644 index 1ea1d5c014..0000000000 --- a/couchpotato/core/notifications/pushover/__init__.py +++ /dev/null @@ -1,39 +0,0 @@ -from .main import Pushover - -def start(): - return Pushover() - -config = [{ - 'name': 'pushover', - 'groups': [ - { - 'tab': 'notifications', - 'list': 'notification_providers', - 'name': 'pushover', - 'options': [ - { - 'name': 'enabled', - 'default': 0, - 'type': 'enabler', - }, - { - 'name': 'user_key', - 'description': 'Register on pushover.net to get one.' - }, - { - 'name': 'priority', - 'default': 0, - 'type': 'dropdown', - 'values': [('Normal', 0), ('High', 1)], - }, - { - 'name': 'on_snatch', - 'default': 0, - 'type': 'bool', - 'advanced': True, - 'description': 'Also send message when movie is snatched.', - }, - ], - } - ], -}] diff --git a/couchpotato/core/notifications/pushover/main.py b/couchpotato/core/notifications/pushover/main.py deleted file mode 100644 index bcd3245ce4..0000000000 --- a/couchpotato/core/notifications/pushover/main.py +++ /dev/null @@ -1,42 +0,0 @@ -from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode -from couchpotato.core.logger import CPLog -from couchpotato.core.notifications.base import Notification -from httplib import HTTPSConnection - -log = CPLog(__name__) - - -class Pushover(Notification): - - app_token = 'YkxHMYDZp285L265L3IwH3LmzkTaCy' - - def notify(self, message = '', data = {}, listener = None): - if self.isDisabled(): return - - http_handler = HTTPSConnection("api.pushover.net:443") - - data = { - 'user': self.conf('user_key'), - 'token': self.app_token, - 'message': toUnicode(message), - 'priority': self.conf('priority') - } - - http_handler.request('POST', - "/1/messages.json", - headers = {'Content-type': 'application/x-www-form-urlencoded'}, - body = tryUrlencode(data) - ) - - response = http_handler.getresponse() - request_status = response.status - - if request_status == 200: - log.info('Pushover notifications sent.') - return True - elif request_status == 401: - log.error('Pushover auth failed: %s', response.reason) - return False - else: - log.error('Pushover notification failed.') - return False diff --git a/couchpotato/core/notifications/script.py b/couchpotato/core/notifications/script.py new file mode 100644 index 0000000000..1736c33b56 --- /dev/null +++ b/couchpotato/core/notifications/script.py @@ -0,0 +1,68 @@ +import traceback +import subprocess +import os + +from couchpotato.core.helpers.encoding import toUnicode +from couchpotato.core.helpers.variable import getIdentifier +from couchpotato.api import addApiView +from couchpotato.core.event import addEvent +from couchpotato.core.logger import CPLog +from couchpotato.core.notifications.base import Notification + + + + +log = CPLog(__name__) + +autoload = 'Script' + +class Script(Notification): + + def __init__(self): + addApiView(self.testNotifyName(), self.test) + + addEvent('renamer.after', self.runScript) + + def runScript(self, message = None, group = None): + if self.isDisabled(): return + if not group: group = {} + + command = [self.conf('path'), group.get('destination_dir')] + log.info('Executing script command: %s ', command) + try: + p = subprocess.Popen(command, stdout = subprocess.PIPE, stderr = subprocess.STDOUT) + out = p.communicate() + log.info('Result from script: %s', str(out)) + return True + except OSError as e: + log.error('Unable to run script: %s', e) + + return False + + def test(self, **kwargs): + return { + 'success': os.path.isfile(self.conf('path')) + } + +config = [{ + 'name': 'script', + 'groups': [ + { + 'tab': 'notifications', + 'list': 'notification_providers', + 'name': 'script', + 'label': 'Script', + 'options': [ + { + 'name': 'enabled', + 'default': 0, + 'type': 'enabler', + }, + { + 'name': 'path', + 'description': 'The path to the script to execute.' + } + ] + } + ] +}] diff --git a/couchpotato/core/notifications/slack.py b/couchpotato/core/notifications/slack.py new file mode 100644 index 0000000000..5da8456871 --- /dev/null +++ b/couchpotato/core/notifications/slack.py @@ -0,0 +1,126 @@ +import json +from couchpotato.core.logger import CPLog +from couchpotato.core.notifications.base import Notification + +log = CPLog(__name__) +autoload = 'Slack' + + +class Slack(Notification): + url = 'https://slack.com/api/chat.postMessage' + required_confs = ('token', 'channels',) + + def notify(self, message='', data=None, listener=None): + for key in self.required_confs: + if not self.conf(key): + log.warning('Slack notifications are enabled, but ' + '"{0}" is not specified.'.format(key)) + return False + + data = data or {} + message = message.strip() + + if self.conf('include_imdb') and 'identifier' in data: + template = ' http://www.imdb.com/title/{0[identifier]}/' + message += template.format(data) + + payload = { + 'token': self.conf('token'), + 'text': message, + 'username': self.conf('bot_name'), + 'unfurl_links': self.conf('include_imdb'), + 'as_user': self.conf('as_user'), + 'icon_url': self.conf('icon_url'), + 'icon_emoji': self.conf('icon_emoji') + } + + channels = self.conf('channels').split(',') + for channel in channels: + payload['channel'] = channel.strip() + response = self.urlopen(self.url, data=payload) + response = json.loads(response) + if not response['ok']: + log.warning('Notification sending to Slack has failed. Error ' + 'code: %s.', response['error']) + return False + return True + + +config = [{ + 'name': 'slack', + 'groups': [ + { + 'tab': 'notifications', + 'list': 'notification_providers', + 'name': 'slack', + 'options': [ + { + 'name': 'enabled', + 'default': 0, + 'type': 'enabler', + }, + { + 'name': 'token', + 'description': ( + 'Your Slack authentication token.', + 'Can be created at https://api.slack.com/web' + ) + }, + { + 'name': 'channels', + 'description': ( + 'Channel to send notifications to.', + 'Can be a public channel, private group or IM ' + 'channel. Can be an encoded ID or a name ' + '(staring with a hashtag, e.g. #general). ' + 'Separate with commas in order to notify multiple ' + 'channels. It is however recommended to send ' + 'notifications to only one channel due to ' + 'the Slack API rate limits.' + ) + }, + { + 'name': 'include_imdb', + 'default': True, + 'type': 'bool', + 'descrpition': 'Include a link to the movie page on IMDB.' + }, + { + 'name': 'bot_name', + 'description': 'Name of bot.', + 'default': 'CouchPotato', + 'advanced': True, + }, + { + 'name': 'as_user', + 'description': 'Send message as the authentication token ' + ' user.', + 'default': False, + 'type': 'bool', + 'advanced': True + }, + { + 'name': 'icon_url', + 'description': 'URL to an image to use as the icon for ' + 'notifications.', + 'advanced': True, + }, + { + 'name': 'icon_emoji', + 'description': ( + 'Emoji to use as the icon for notifications.', + 'Overrides icon_url' + ), + 'advanced': True, + }, + { + 'name': 'on_snatch', + 'default': 0, + 'type': 'bool', + 'advanced': True, + 'description': 'Also send message when movie is snatched.', + }, + ], + } + ], +}] diff --git a/couchpotato/core/notifications/synoindex.py b/couchpotato/core/notifications/synoindex.py new file mode 100644 index 0000000000..b14e1a0345 --- /dev/null +++ b/couchpotato/core/notifications/synoindex.py @@ -0,0 +1,63 @@ +import os +import subprocess + +from couchpotato.api import addApiView +from couchpotato.core.event import addEvent +from couchpotato.core.logger import CPLog +from couchpotato.core.notifications.base import Notification + + +log = CPLog(__name__) + +autoload = 'Synoindex' + + +class Synoindex(Notification): + + index_path = '/usr/syno/bin/synoindex' + + def __init__(self): + addApiView(self.testNotifyName(), self.test) + + addEvent('renamer.after', self.addToLibrary) + + def addToLibrary(self, message = None, group = None): + if self.isDisabled(): return + if not group: group = {} + + command = [self.index_path, '-A', group.get('destination_dir')] + log.info('Executing synoindex command: %s ', command) + try: + p = subprocess.Popen(command, stdout = subprocess.PIPE, stderr = subprocess.STDOUT) + out = p.communicate() + log.info('Result from synoindex: %s', str(out)) + return True + except OSError as e: + log.error('Unable to run synoindex: %s', e) + + return False + + def test(self, **kwargs): + return { + 'success': os.path.isfile(self.index_path) + } + + +config = [{ + 'name': 'synoindex', + 'groups': [ + { + 'tab': 'notifications', + 'list': 'notification_providers', + 'name': 'synoindex', + 'description': 'Automaticly adds index to Synology Media Server.', + 'options': [ + { + 'name': 'enabled', + 'default': 0, + 'type': 'enabler', + } + ], + } + ], +}] diff --git a/couchpotato/core/notifications/synoindex/__init__.py b/couchpotato/core/notifications/synoindex/__init__.py deleted file mode 100644 index eb3a793f80..0000000000 --- a/couchpotato/core/notifications/synoindex/__init__.py +++ /dev/null @@ -1,23 +0,0 @@ -from .main import Synoindex - -def start(): - return Synoindex() - -config = [{ - 'name': 'synoindex', - 'groups': [ - { - 'tab': 'notifications', - 'list': 'notification_providers', - 'name': 'synoindex', - 'description': 'Automaticly adds index to Synology Media Server.', - 'options': [ - { - 'name': 'enabled', - 'default': 0, - 'type': 'enabler', - } - ], - } - ], -}] diff --git a/couchpotato/core/notifications/synoindex/main.py b/couchpotato/core/notifications/synoindex/main.py deleted file mode 100644 index 01bd2fc819..0000000000 --- a/couchpotato/core/notifications/synoindex/main.py +++ /dev/null @@ -1,36 +0,0 @@ -from couchpotato.core.event import addEvent -from couchpotato.core.helpers.request import jsonified -from couchpotato.core.logger import CPLog -from couchpotato.core.notifications.base import Notification -import os -import subprocess - -log = CPLog(__name__) - - -class Synoindex(Notification): - - index_path = '/usr/syno/bin/synoindex' - - def __init__(self): - super(Synoindex, self).__init__() - addEvent('renamer.after', self.addToLibrary) - - def addToLibrary(self, message = None, group = {}): - if self.isDisabled(): return - - command = [self.index_path, '-A', group.get('destination_dir')] - log.info('Executing synoindex command: %s ', command) - try: - p = subprocess.Popen(command, stdout = subprocess.PIPE, stderr = subprocess.STDOUT) - out = p.communicate() - log.info('Result from synoindex: %s', str(out)) - return True - except OSError, e: - log.error('Unable to run synoindex: %s', e) - return False - - return True - - def test(self): - return jsonified({'success': os.path.isfile(self.index_path)}) diff --git a/couchpotato/core/notifications/telegrambot.py b/couchpotato/core/notifications/telegrambot.py new file mode 100644 index 0000000000..3b59f2cb68 --- /dev/null +++ b/couchpotato/core/notifications/telegrambot.py @@ -0,0 +1,78 @@ +from couchpotato.core.helpers.variable import getIdentifier +from couchpotato.core.logger import CPLog +from couchpotato.core.notifications.base import Notification +import requests +import six + +log = CPLog(__name__) + +autoload = 'TelegramBot' + +class TelegramBot(Notification): + + TELEGRAM_API = "https://api.telegram.org/bot%s/%s" + + def notify(self, message = '', data = None, listener = None): + if not data: data = {} + + # Get configuration data + token = self.conf('bot_token') + usr_id = self.conf('receiver_user_id') + + # Add IMDB url to message: + if data: + imdb_id = getIdentifier(data) + if imdb_id: + url = 'http://www.imdb.com/title/{0}/'.format(imdb_id) + message = '{0}\n{1}'.format(message, url) + + # Cosntruct message + payload = {'chat_id': usr_id, 'text': message, 'parse_mode': 'Markdown'} + + # Send message user Telegram's Bot API + response = requests.post(self.TELEGRAM_API % (token, "sendMessage"), data=payload) + + # Error logging + sent_successfuly = True + if not response.status_code == 200: + log.error('Could not send notification to TelegramBot (token=%s). Response: [%s]', (token, response.text)) + sent_successfuly = False + + return sent_successfuly + + +config = [{ + 'name': 'telegrambot', + 'groups': [ + { + 'tab': 'notifications', + 'list': 'notification_providers', + 'name': 'telegrambot', + 'label': 'Telegram Bot', + 'description': 'Notification provider which utilizes the bot API of the famous Telegram IM.', + 'options': [ + { + 'name': 'enabled', + 'default': 0, + 'type': 'enabler', + }, + { + 'name': 'bot_token', + 'description': 'Your bot token. Contact @BotFather on Telegram to get one.' + }, + { + 'name': 'receiver_user_id', + 'label': 'Recieving User/Group ID', + 'description': 'Receiving user/group - notifications will be sent to this user or group. Contact @myidbot on Telegram to get an ID.' + }, + { + 'name': 'on_snatch', + 'default': 0, + 'type': 'bool', + 'advanced': True, + 'description': 'Also send message when movie is snatched.', + }, + ], + } + ], +}] diff --git a/couchpotato/core/notifications/toasty.py b/couchpotato/core/notifications/toasty.py new file mode 100644 index 0000000000..118043e833 --- /dev/null +++ b/couchpotato/core/notifications/toasty.py @@ -0,0 +1,65 @@ +import traceback + +from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode +from couchpotato.core.logger import CPLog +from couchpotato.core.notifications.base import Notification + + +log = CPLog(__name__) + +autoload = 'Toasty' + + +class Toasty(Notification): + + urls = { + 'api': 'http://api.supertoasty.com/notify/%s?%s' + } + + def notify(self, message = '', data = None, listener = None): + if not data: data = {} + + data = { + 'title': self.default_title, + 'text': toUnicode(message), + 'sender': toUnicode("CouchPotato"), + 'image': 'https://raw.github.com/CouchPotato/CouchPotatoServer/master/couchpotato/static/images/homescreen.png', + } + + try: + self.urlopen(self.urls['api'] % (self.conf('api_key'), tryUrlencode(data)), show_error = False) + return True + except: + log.error('Toasty failed: %s', traceback.format_exc()) + + return False + + +config = [{ + 'name': 'toasty', + 'groups': [ + { + 'tab': 'notifications', + 'list': 'notification_providers', + 'name': 'toasty', + 'options': [ + { + 'name': 'enabled', + 'default': 0, + 'type': 'enabler', + }, + { + 'name': 'api_key', + 'label': 'Device ID', + }, + { + 'name': 'on_snatch', + 'default': 0, + 'type': 'bool', + 'advanced': True, + 'description': 'Also send message when movie is snatched.', + }, + ], + } + ], +}] diff --git a/couchpotato/core/notifications/toasty/__init__.py b/couchpotato/core/notifications/toasty/__init__.py deleted file mode 100644 index 8e2dae76b6..0000000000 --- a/couchpotato/core/notifications/toasty/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -from .main import Toasty - -def start(): - return Toasty() - -config = [{ - 'name': 'toasty', - 'groups': [ - { - 'tab': 'notifications', - 'list': 'notification_providers', - 'name': 'toasty', - 'options': [ - { - 'name': 'enabled', - 'default': 0, - 'type': 'enabler', - }, - { - 'name': 'api_key', - 'label': 'Device ID', - }, - { - 'name': 'on_snatch', - 'default': 0, - 'type': 'bool', - 'advanced': True, - 'description': 'Also send message when movie is snatched.', - }, - ], - } - ], -}] diff --git a/couchpotato/core/notifications/toasty/main.py b/couchpotato/core/notifications/toasty/main.py deleted file mode 100644 index 638c75dd52..0000000000 --- a/couchpotato/core/notifications/toasty/main.py +++ /dev/null @@ -1,30 +0,0 @@ -from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode -from couchpotato.core.logger import CPLog -from couchpotato.core.notifications.base import Notification -import traceback - -log = CPLog(__name__) - -class Toasty(Notification): - - urls = { - 'api': 'http://api.supertoasty.com/notify/%s?%s' - } - - def notify(self, message = '', data = {}, listener = None): - if self.isDisabled(): return - - data = { - 'title': self.default_title, - 'text': toUnicode(message), - 'sender': toUnicode("CouchPotato"), - 'image': 'https://raw.github.com/RuudBurger/CouchPotatoServer/master/couchpotato/static/images/homescreen.png', - } - - try: - self.urlopen(self.urls['api'] % (self.conf('api_key'), tryUrlencode(data)), show_error = False) - return True - except: - log.error('Toasty failed: %s', traceback.format_exc()) - - return False diff --git a/couchpotato/core/notifications/trakt.py b/couchpotato/core/notifications/trakt.py new file mode 100644 index 0000000000..e3ae3d3929 --- /dev/null +++ b/couchpotato/core/notifications/trakt.py @@ -0,0 +1,67 @@ +from couchpotato.core.helpers.variable import getTitle, getIdentifier +from couchpotato.core.logger import CPLog +from couchpotato.core.media.movie.providers.automation.trakt.main import TraktBase +from couchpotato.core.notifications.base import Notification + +log = CPLog(__name__) + +autoload = 'Trakt' + + +class Trakt(Notification, TraktBase): + + urls = { + 'library': 'sync/collection', + 'unwatchlist': 'sync/watchlist/remove', + 'test': 'sync/last_activities', + } + + listen_to = ['renamer.after'] + enabled_option = 'notification_enabled' + + def notify(self, message = '', data = None, listener = None): + if not data: data = {} + + if listener == 'test': + result = self.call((self.urls['test'])) + + return result + + else: + + post_data = { + 'movies': [{'ids': {'imdb': getIdentifier(data)}}] if data else [] + } + + result = self.call((self.urls['library']), post_data) + if self.conf('remove_watchlist_enabled'): + result = result and self.call((self.urls['unwatchlist']), post_data) + + return result + + +config = [{ + 'name': 'trakt', + 'groups': [ + { + 'tab': 'notifications', + 'list': 'notification_providers', + 'name': 'trakt', + 'label': 'Trakt', + 'description': 'add movies to your collection once downloaded. Connect your account in Automation Trakt settings', + 'options': [ + { + 'name': 'notification_enabled', + 'default': False, + 'type': 'enabler', + }, + { + 'name': 'remove_watchlist_enabled', + 'label': 'Remove from watchlist', + 'default': False, + 'type': 'bool', + }, + ], + } + ], +}] diff --git a/couchpotato/core/notifications/twitter/__init__.py b/couchpotato/core/notifications/twitter/__init__.py index 9db8dcb878..b6b42bb5d8 100644 --- a/couchpotato/core/notifications/twitter/__init__.py +++ b/couchpotato/core/notifications/twitter/__init__.py @@ -1,6 +1,7 @@ from .main import Twitter -def start(): + +def autoload(): return Twitter() config = [{ diff --git a/couchpotato/core/notifications/twitter/main.py b/couchpotato/core/notifications/twitter/main.py index bbb9262b76..d760b0b2a4 100644 --- a/couchpotato/core/notifications/twitter/main.py +++ b/couchpotato/core/notifications/twitter/main.py @@ -1,21 +1,22 @@ +from urlparse import parse_qsl + from couchpotato.api import addApiView from couchpotato.core.helpers.encoding import tryUrlencode -from couchpotato.core.helpers.request import jsonified, getParam from couchpotato.core.helpers.variable import cleanHost from couchpotato.core.logger import CPLog from couchpotato.core.notifications.base import Notification -from flask.helpers import url_for -from pytwitter import Api, parse_qsl -from werkzeug.utils import redirect +from couchpotato.environment import Env +from pytwitter import Api import oauth2 + log = CPLog(__name__) class Twitter(Notification): - consumer_key = '3POVsO3KW90LKZXyzPOjQ' - consumer_secret = 'Qprb94hx9ucXvD4Wvg2Ctsk4PDK7CcQAKgCELXoyIjE' + consumer_key = 'xcVNnQ7VjAB7DyuKXREkyLHy5' + consumer_secret = 'iYeY4i5haITAsHToDJgv1VlLE2H1xnYuXKzZof7OKCOIIPGCLT' request_token = None @@ -31,8 +32,8 @@ def __init__(self): addApiView('notify.%s.auth_url' % self.getName().lower(), self.getAuthorizationUrl) addApiView('notify.%s.credentials' % self.getName().lower(), self.getCredentials) - def notify(self, message = '', data = {}, listener = None): - if self.isDisabled(): return + def notify(self, message = '', data = None, listener = None): + if not data: data = {} api = Api(self.consumer_key, self.consumer_secret, self.conf('access_token_key'), self.conf('access_token_secret')) @@ -53,7 +54,7 @@ def notify(self, message = '', data = {}, listener = None): try: if direct_message: for user in direct_message_users.split(): - api.PostDirectMessage(user, '[%s] %s' % (self.default_title, message)) + api.PostDirectMessage('[%s] %s' % (self.default_title, message), screen_name = user) else: update_message = '[%s] %s' % (self.default_title, message) if len(update_message) > 140: @@ -65,16 +66,15 @@ def notify(self, message = '', data = {}, listener = None): api.PostUpdate(update_message[135:] + ' 2/2') else: api.PostUpdate(update_message) - except Exception, e: + except Exception as e: log.error('Error sending tweet: %s', e) return False return True - def getAuthorizationUrl(self): + def getAuthorizationUrl(self, host = None, **kwargs): - referer = getParam('host') - callback_url = cleanHost(referer) + '%snotify.%s.credentials/' % (url_for('api.index').lstrip('/'), self.getName().lower()) + callback_url = cleanHost(host) + '%snotify.%s.credentials/' % (Env.get('api_base').lstrip('/'), self.getName().lower()) oauth_consumer = oauth2.Consumer(self.consumer_key, self.consumer_secret) oauth_client = oauth2.Client(oauth_consumer) @@ -83,31 +83,29 @@ def getAuthorizationUrl(self): if resp['status'] != '200': log.error('Invalid response from Twitter requesting temp token: %s', resp['status']) - return jsonified({ + return { 'success': False, - }) + } else: self.request_token = dict(parse_qsl(content)) auth_url = self.urls['authorize'] + ("?oauth_token=%s" % self.request_token['oauth_token']) log.info('Redirecting to "%s"', auth_url) - return jsonified({ + return { 'success': True, 'url': auth_url, - }) - - def getCredentials(self): + } - key = getParam('oauth_verifier') + def getCredentials(self, oauth_verifier, **kwargs): token = oauth2.Token(self.request_token['oauth_token'], self.request_token['oauth_token_secret']) - token.set_verifier(key) + token.set_verifier(oauth_verifier) oauth_consumer = oauth2.Consumer(key = self.consumer_key, secret = self.consumer_secret) oauth_client = oauth2.Client(oauth_consumer, token) - resp, content = oauth_client.request(self.urls['access'], method = 'POST', body = 'oauth_verifier=%s' % key) + resp, content = oauth_client.request(self.urls['access'], method = 'POST', body = 'oauth_verifier=%s' % oauth_verifier) access_token = dict(parse_qsl(content)) if resp['status'] != '200': @@ -122,4 +120,4 @@ def getCredentials(self): self.request_token = None - return redirect(url_for('web.index') + 'settings/notifications/') + return 'redirect', Env.get('web_base') + 'settings/notifications/' diff --git a/couchpotato/core/notifications/twitter/static/twitter.js b/couchpotato/core/notifications/twitter/static/twitter.js index 2c4e6e316b..97465b64bf 100644 --- a/couchpotato/core/notifications/twitter/static/twitter.js +++ b/couchpotato/core/notifications/twitter/static/twitter.js @@ -2,7 +2,7 @@ var TwitterNotification = new Class({ initialize: function(){ var self = this; - App.addEvent('load', self.addRegisterButton.bind(self)); + App.addEvent('loadSettings', self.addRegisterButton.bind(self)); }, addRegisterButton: function(){ @@ -16,7 +16,7 @@ var TwitterNotification = new Class({ var twitter_set = 0; fieldset.getElements('input[type=text]').each(function(el){ - twitter_set += +(el.get('value') != ''); + twitter_set += +(el.get('value') !== ''); }); @@ -57,9 +57,9 @@ var TwitterNotification = new Class({ } }) ).inject(fieldset.getElement('.test_button'), 'before'); - }) + }); - }, + } }); diff --git a/couchpotato/core/notifications/webhook.py b/couchpotato/core/notifications/webhook.py new file mode 100644 index 0000000000..82fb4b1be6 --- /dev/null +++ b/couchpotato/core/notifications/webhook.py @@ -0,0 +1,68 @@ +import traceback + +from couchpotato.core.helpers.encoding import toUnicode +from couchpotato.core.helpers.variable import getIdentifier +from couchpotato.core.logger import CPLog +from couchpotato.core.notifications.base import Notification + + +log = CPLog(__name__) + +autoload = 'Webhook' + +class Webhook(Notification): + + def notify(self, message = '', data = None, listener = None): + if not data: data = {} + + post_data = { + 'message': toUnicode(message) + } + + if getIdentifier(data): + post_data.update({ + 'imdb_id': getIdentifier(data) + }) + + headers = { + 'Content-type': 'application/x-www-form-urlencoded' + } + + try: + self.urlopen(self.conf('url'), headers = headers, data = post_data, show_error = False) + return True + except: + log.error('Webhook notification failed: %s', traceback.format_exc()) + + return False + + +config = [{ + 'name': 'webhook', + 'groups': [ + { + 'tab': 'notifications', + 'list': 'notification_providers', + 'name': 'webhook', + 'label': 'Webhook', + 'options': [ + { + 'name': 'enabled', + 'default': 0, + 'type': 'enabler', + }, + { + 'name': 'url', + 'description': 'The URL to send notification data to when something happens' + }, + { + 'name': 'on_snatch', + 'default': 0, + 'type': 'bool', + 'advanced': True, + 'description': 'Also send message when movie is snatched.', + } + ] + } + ] +}] diff --git a/couchpotato/core/notifications/xbmc.py b/couchpotato/core/notifications/xbmc.py new file mode 100644 index 0000000000..670d72b6d8 --- /dev/null +++ b/couchpotato/core/notifications/xbmc.py @@ -0,0 +1,280 @@ +import base64 +import json +import socket +import traceback +import urllib + +from couchpotato.core.helpers.variable import splitString, getTitle +from couchpotato.core.logger import CPLog +from couchpotato.core.notifications.base import Notification +from requests.exceptions import ConnectionError, Timeout +from requests.packages.urllib3.exceptions import MaxRetryError + + +log = CPLog(__name__) + +autoload = 'XBMC' + + +class XBMC(Notification): + + listen_to = ['renamer.after', 'movie.snatched'] + use_json_notifications = {} + http_time_between_calls = 0 + + def notify(self, message = '', data = None, listener = None): + if not data: data = {} + + hosts = splitString(self.conf('host')) + + successful = 0 + max_successful = 0 + for host in hosts: + + if self.use_json_notifications.get(host) is None: + self.getXBMCJSONversion(host, message = message) + + if self.use_json_notifications.get(host): + calls = [ + ('GUI.ShowNotification', None, {'title': self.default_title, 'message': message, 'image': self.getNotificationImage('small')}), + ] + + if data and data.get('destination_dir') and (not self.conf('only_first') or hosts.index(host) == 0): + param = {} + if not self.conf('force_full_scan') and (self.conf('remote_dir_scan') or socket.getfqdn('localhost') == socket.getfqdn(host.split(':')[0])): + param = {'directory': data['destination_dir']} + + calls.append(('VideoLibrary.Scan', None, param)) + + max_successful += len(calls) + response = self.request(host, calls) + else: + response = self.notifyXBMCnoJSON(host, {'title': self.default_title, 'message': message}) + + if data and data.get('destination_dir') and (not self.conf('only_first') or hosts.index(host) == 0): + response += self.request(host, [('VideoLibrary.Scan', None, {})]) + max_successful += 1 + + max_successful += 1 + + try: + for result in response: + if result.get('result') and result['result'] == 'OK': + successful += 1 + elif result.get('error'): + log.error('Kodi error; %s: %s (%s)', (result['id'], result['error']['message'], result['error']['code'])) + + except: + log.error('Failed parsing results: %s', traceback.format_exc()) + + return successful == max_successful + + def getXBMCJSONversion(self, host, message = ''): + + success = False + + # Kodi JSON-RPC version request + response = self.request(host, [ + ('JSONRPC.Version', None, {}) + ]) + for result in response: + if result.get('result') and type(result['result']['version']).__name__ == 'int': + # only v2 and v4 return an int object + # v6 (as of XBMC v12(Frodo)) is required to send notifications + xbmc_rpc_version = str(result['result']['version']) + + log.debug('Kodi JSON-RPC Version: %s ; Notifications by JSON-RPC only supported for v6 [as of XBMC v12(Frodo)]', xbmc_rpc_version) + + # disable JSON use + self.use_json_notifications[host] = False + + # send the text message + resp = self.notifyXBMCnoJSON(host, {'title': self.default_title, 'message': message}) + for r in resp: + if r.get('result') and r['result'] == 'OK': + log.debug('Message delivered successfully!') + success = True + break + elif r.get('error'): + log.error('Kodi error; %s: %s (%s)', (r['id'], r['error']['message'], r['error']['code'])) + break + + elif result.get('result') and type(result['result']['version']).__name__ == 'dict': + # Kodi JSON-RPC v6 returns an array object containing + # major, minor and patch number + xbmc_rpc_version = str(result['result']['version']['major']) + xbmc_rpc_version += '.' + str(result['result']['version']['minor']) + xbmc_rpc_version += '.' + str(result['result']['version']['patch']) + + log.debug('Kodi JSON-RPC Version: %s', xbmc_rpc_version) + + # ok, Kodi version is supported + self.use_json_notifications[host] = True + + # send the text message + resp = self.request(host, [('GUI.ShowNotification', None, {'title':self.default_title, 'message':message, 'image': self.getNotificationImage('small')})]) + for r in resp: + if r.get('result') and r['result'] == 'OK': + log.debug('Message delivered successfully!') + success = True + break + elif r.get('error'): + log.error('Kodi error; %s: %s (%s)', (r['id'], r['error']['message'], r['error']['code'])) + break + + # error getting version info (we do have contact with Kodi though) + elif result.get('error'): + log.error('Kodi error; %s: %s (%s)', (result['id'], result['error']['message'], result['error']['code'])) + + log.debug('Use JSON notifications: %s ', self.use_json_notifications) + + return success + + def notifyXBMCnoJSON(self, host, data): + + server = 'http://%s/xbmcCmds/' % host + + # Notification(title, message [, timeout , image]) + cmd = "xbmcHttp?command=ExecBuiltIn(Notification(%s,%s,'',%s))" % (urllib.quote(getTitle(data)), urllib.quote(data['message']), urllib.quote(self.getNotificationImage('medium'))) + server += cmd + + # I have no idea what to set to, just tried text/plain and seems to be working :) + headers = { + 'Content-Type': 'text/plain', + } + + # authentication support + if self.conf('password'): + base64string = base64.encodestring('%s:%s' % (self.conf('username'), self.conf('password'))).replace('\n', '') + headers['Authorization'] = 'Basic %s' % base64string + + try: + log.debug('Sending non-JSON-type request to %s: %s', (host, data)) + + # response wil either be 'OK': + # + #
  • OK + # + # + # or 'Error': + # + #
  • Error: + # + # + response = self.urlopen(server, headers = headers, timeout = 3, show_error = False) + + if 'OK' in response: + log.debug('Returned from non-JSON-type request %s: %s', (host, response)) + # manually fake expected response array + return [{'result': 'OK'}] + else: + log.error('Returned from non-JSON-type request %s: %s', (host, response)) + # manually fake expected response array + return [{'result': 'Error'}] + + except (MaxRetryError, Timeout, ConnectionError): + log.info2('Couldn\'t send request to Kodi, assuming it\'s turned off') + return [{'result': 'Error'}] + except: + log.error('Failed sending non-JSON-type request to Kodi: %s', traceback.format_exc()) + return [{'result': 'Error'}] + + def request(self, host, do_requests): + server = 'http://%s/jsonrpc' % host + + data = [] + for req in do_requests: + method, id, kwargs = req + + data.append({ + 'method': method, + 'params': kwargs, + 'jsonrpc': '2.0', + 'id': id if id else method, + }) + data = json.dumps(data) + + headers = { + 'Content-Type': 'application/json', + } + + if self.conf('password'): + base64string = base64.encodestring('%s:%s' % (self.conf('username'), self.conf('password'))).replace('\n', '') + headers['Authorization'] = 'Basic %s' % base64string + + try: + log.debug('Sending request to %s: %s', (host, data)) + response = self.getJsonData(server, headers = headers, data = data, timeout = 3, show_error = False) + log.debug('Returned from request %s: %s', (host, response)) + + return response + except (MaxRetryError, Timeout, ConnectionError): + log.info2('Couldn\'t send request to Kodi, assuming it\'s turned off') + return [] + except: + log.error('Failed sending request to Kodi: %s', traceback.format_exc()) + return [] + + +config = [{ + 'name': 'xbmc', + 'groups': [ + { + 'tab': 'notifications', + 'list': 'notification_providers', + 'name': 'xbmc', + 'label': 'Kodi', + 'description': 'v14 (Helix), v15 (Isengard)', + 'options': [ + { + 'name': 'enabled', + 'default': 0, + 'type': 'enabler', + }, + { + 'name': 'host', + 'default': 'localhost:8080', + }, + { + 'name': 'username', + 'default': 'xbmc', + }, + { + 'name': 'password', + 'default': '', + 'type': 'password', + }, + { + 'name': 'only_first', + 'default': 0, + 'type': 'bool', + 'advanced': True, + 'description': 'Only update the first host when movie snatched, useful for synced Kodi', + }, + { + 'name': 'remote_dir_scan', + 'label': 'Remote Folder Scan', + 'default': 0, + 'type': 'bool', + 'advanced': True, + 'description': ('Only scan new movie folder at remote Kodi servers.', 'Useful if the Kodi path is different from the path CPS uses.'), + }, + { + 'name': 'force_full_scan', + 'label': 'Always do a full scan', + 'default': 0, + 'type': 'bool', + 'advanced': True, + 'description': ('Do a full scan instead of only the new movie.', 'Useful if the Kodi path is different from the path CPS uses.'), + }, + { + 'name': 'on_snatch', + 'default': False, + 'type': 'bool', + 'advanced': True, + 'description': 'Also send message when movie is snatched.', + }, + ], + } + ], +}] diff --git a/couchpotato/core/notifications/xbmc/__init__.py b/couchpotato/core/notifications/xbmc/__init__.py deleted file mode 100644 index 0753c82aa5..0000000000 --- a/couchpotato/core/notifications/xbmc/__init__.py +++ /dev/null @@ -1,37 +0,0 @@ -from .main import XBMC - -def start(): - return XBMC() - -config = [{ - 'name': 'xbmc', - 'groups': [ - { - 'tab': 'notifications', - 'list': 'notification_providers', - 'name': 'xbmc', - 'label': 'XBMC', - 'description': 'v11 (Eden) and v12 (Frodo)', - 'options': [ - { - 'name': 'enabled', - 'default': 0, - 'type': 'enabler', - }, - { - 'name': 'host', - 'default': 'localhost:8080', - }, - { - 'name': 'username', - 'default': 'xbmc', - }, - { - 'name': 'password', - 'default': '', - 'type': 'password', - }, - ], - } - ], -}] diff --git a/couchpotato/core/notifications/xbmc/main.py b/couchpotato/core/notifications/xbmc/main.py deleted file mode 100755 index a1987bfa13..0000000000 --- a/couchpotato/core/notifications/xbmc/main.py +++ /dev/null @@ -1,189 +0,0 @@ -from couchpotato.core.helpers.variable import splitString -from couchpotato.core.logger import CPLog -from couchpotato.core.notifications.base import Notification -from flask.helpers import json -import base64 -import traceback -import urllib - -log = CPLog(__name__) - - -class XBMC(Notification): - - listen_to = ['renamer.after'] - use_json_notifications = {} - couch_logo_url = 'https://raw.github.com/RuudBurger/CouchPotatoServer/master/couchpotato/static/images/xbmc-notify.png' - - def notify(self, message = '', data = {}, listener = None): - if self.isDisabled(): return - - hosts = splitString(self.conf('host')) - - successful = 0 - for host in hosts: - - if self.use_json_notifications.get(host) is None: - self.getXBMCJSONversion(host, message = message) - - if self.use_json_notifications.get(host): - response = self.request(host, [ - ('GUI.ShowNotification', {'title': self.default_title, 'message': message, 'image': self.couch_logo_url}), - ('VideoLibrary.Scan', {}), - ]) - else: - response = self.notifyXBMCnoJSON(host, {'title':self.default_title, 'message':message}) - response += self.request(host, [('VideoLibrary.Scan', {})]) - - try: - for result in response: - if (result.get('result') and result['result'] == 'OK'): - successful += 1 - elif (result.get('error')): - log.error('XBMC error; %s: %s (%s)', (result['id'], result['error']['message'], result['error']['code'])) - - except: - log.error('Failed parsing results: %s', traceback.format_exc()) - - return successful == len(hosts) * 2 - - def getXBMCJSONversion(self, host, message = ''): - - success = False - - # XBMC JSON-RPC version request - response = self.request(host, [ - ('JSONRPC.Version', {}) - ]) - for result in response: - if (result.get('result') and type(result['result']['version']).__name__ == 'int'): - # only v2 and v4 return an int object - # v6 (as of XBMC v12(Frodo)) is required to send notifications - xbmc_rpc_version = str(result['result']['version']) - - log.debug('XBMC JSON-RPC Version: %s ; Notifications by JSON-RPC only supported for v6 [as of XBMC v12(Frodo)]', xbmc_rpc_version) - - # disable JSON use - self.use_json_notifications[host] = False - - # send the text message - resp = self.notifyXBMCnoJSON(host, {'title':self.default_title, 'message':message}) - for result in resp: - if (result.get('result') and result['result'] == 'OK'): - log.debug('Message delivered successfully!') - success = True - break - elif (result.get('error')): - log.error('XBMC error; %s: %s (%s)', (result['id'], result['error']['message'], result['error']['code'])) - break - - elif (result.get('result') and type(result['result']['version']).__name__ == 'dict'): - # XBMC JSON-RPC v6 returns an array object containing - # major, minor and patch number - xbmc_rpc_version = str(result['result']['version']['major']) - xbmc_rpc_version += '.' + str(result['result']['version']['minor']) - xbmc_rpc_version += '.' + str(result['result']['version']['patch']) - - log.debug('XBMC JSON-RPC Version: %s', xbmc_rpc_version) - - # ok, XBMC version is supported - self.use_json_notifications[host] = True - - # send the text message - resp = self.request(host, [('GUI.ShowNotification', {'title':self.default_title, 'message':message, 'image':self.couch_logo_url})]) - for result in resp: - if (result.get('result') and result['result'] == 'OK'): - log.debug('Message delivered successfully!') - success = True - break - elif (result.get('error')): - log.error('XBMC error; %s: %s (%s)', (result['id'], result['error']['message'], result['error']['code'])) - break - - # error getting version info (we do have contact with XBMC though) - elif (result.get('error')): - log.error('XBMC error; %s: %s (%s)', (result['id'], result['error']['message'], result['error']['code'])) - - log.debug('Use JSON notifications: %s ', self.use_json_notifications) - - return success - - def notifyXBMCnoJSON(self, host, data): - - server = 'http://%s/xbmcCmds/' % host - - # Notification(title, message [, timeout , image]) - cmd = "xbmcHttp?command=ExecBuiltIn(Notification(%s,%s,'',%s))" % (urllib.quote(data['title']), urllib.quote(data['message']), urllib.quote(self.couch_logo_url)) - server += cmd - - # I have no idea what to set to, just tried text/plain and seems to be working :) - headers = { - 'Content-Type': 'text/plain', - } - - # authentication support - if self.conf('password'): - base64string = base64.encodestring('%s:%s' % (self.conf('username'), self.conf('password'))).replace('\n', '') - headers['Authorization'] = 'Basic %s' % base64string - - try: - log.debug('Sending non-JSON-type request to %s: %s', (host, data)) - - # response wil either be 'OK': - # - #
  • OK - # - # - # or 'Error': - # - #
  • Error: - # - # - response = self.urlopen(server, headers = headers) - - if 'OK' in response: - log.debug('Returned from non-JSON-type request %s: %s', (host, response)) - # manually fake expected response array - return [{'result': 'OK'}] - else: - log.error('Returned from non-JSON-type request %s: %s', (host, response)) - # manually fake expected response array - return [{'result': 'Error'}] - - except: - log.error('Failed sending non-JSON-type request to XBMC: %s', traceback.format_exc()) - return [{'result': 'Error'}] - - def request(self, host, requests): - server = 'http://%s/jsonrpc' % host - - data = [] - for req in requests: - method, kwargs = req - data.append({ - 'method': method, - 'params': kwargs, - 'jsonrpc': '2.0', - 'id': method, - }) - data = json.dumps(data) - - headers = { - 'Content-Type': 'application/json', - } - - if self.conf('password'): - base64string = base64.encodestring('%s:%s' % (self.conf('username'), self.conf('password'))).replace('\n', '') - headers['Authorization'] = 'Basic %s' % base64string - - try: - log.debug('Sending request to %s: %s', (host, data)) - rdata = self.urlopen(server, headers = headers, params = data, multipart = True) - response = json.loads(rdata) - log.debug('Returned from request %s: %s', (host, response)) - - return response - except: - log.error('Failed sending request to XBMC: %s', traceback.format_exc()) - return [] - diff --git a/couchpotato/core/notifications/xmpp_.py b/couchpotato/core/notifications/xmpp_.py new file mode 100644 index 0000000000..f9916cd020 --- /dev/null +++ b/couchpotato/core/notifications/xmpp_.py @@ -0,0 +1,96 @@ +from time import sleep +import traceback + +from couchpotato.core.logger import CPLog +from couchpotato.core.notifications.base import Notification +import xmpp + + +log = CPLog(__name__) + +autoload = 'Xmpp' + + +class Xmpp(Notification): + + def notify(self, message = '', data = None, listener = None): + if not data: data = {} + + try: + jid = xmpp.protocol.JID(self.conf('username')) + client = xmpp.Client(jid.getDomain(), debug = []) + + # Connect + if not client.connect(server = (self.conf('hostname'), self.conf('port'))): + log.error('XMPP failed: Connection to server failed.') + return False + + # Authenticate + if not client.auth(jid.getNode(), self.conf('password'), resource = jid.getResource()): + log.error('XMPP failed: Failed to authenticate.') + return False + + # Send message + client.send(xmpp.protocol.Message(to = self.conf('to'), body = message, typ = 'chat')) + + # Disconnect + # some older servers will not send the message if you disconnect immediately after sending + sleep(1) + client.disconnect() + + log.info('XMPP notifications sent.') + return True + + except: + log.error('XMPP failed: %s', traceback.format_exc()) + + return False + + +config = [{ + 'name': 'xmpp', + 'groups': [ + { + 'tab': 'notifications', + 'list': 'notification_providers', + 'name': 'xmpp', + 'label': 'XMPP', + 'description`': 'for Jabber, Hangouts (Google Talk), AIM...', + 'options': [ + { + 'name': 'enabled', + 'default': 0, + 'type': 'enabler', + }, + { + 'name': 'username', + 'description': 'User sending the message. For Hangouts, e-mail of a single-step authentication Google account.', + }, + { + 'name': 'password', + 'type': 'Password', + }, + { + 'name': 'hostname', + 'default': 'talk.google.com', + }, + { + 'name': 'to', + 'description': 'Username (or e-mail for Hangouts) of the person to send the messages to.', + }, + { + 'name': 'port', + 'type': 'int', + 'default': 5222, + }, + { + 'name': 'on_snatch', + 'default': 0, + 'type': 'bool', + 'advanced': True, + 'description': 'Also send message when movie is snatched.', + }, + ], + } + ], +}] diff --git a/couchpotato/core/plugins/automation.py b/couchpotato/core/plugins/automation.py new file mode 100644 index 0000000000..e98a00a619 --- /dev/null +++ b/couchpotato/core/plugins/automation.py @@ -0,0 +1,105 @@ +from couchpotato.core.event import addEvent, fireEvent +from couchpotato.core.logger import CPLog +from couchpotato.core.plugins.base import Plugin +from couchpotato.environment import Env + +log = CPLog(__name__) + +autoload = 'Automation' + + +class Automation(Plugin): + + def __init__(self): + + addEvent('app.load', self.setCrons) + + if not Env.get('dev'): + addEvent('app.load', self.addMovies) + + addEvent('setting.save.automation.hour.after', self.setCrons) + + def setCrons(self): + fireEvent('schedule.interval', 'automation.add_movies', self.addMovies, hours = self.conf('hour', default = 12)) + + def addMovies(self): + + movies = fireEvent('automation.get_movies', merge = True) + movie_ids = [] + + for imdb_id in movies: + + if self.shuttingDown(): + break + + prop_name = 'automation.added.%s' % imdb_id + added = Env.prop(prop_name, default = False) + if not added: + added_movie = fireEvent('movie.add', params = {'identifier': imdb_id}, force_readd = False, search_after = False, update_after = True, single = True) + if added_movie: + movie_ids.append(added_movie['_id']) + Env.prop(prop_name, True) + + for movie_id in movie_ids: + + if self.shuttingDown(): + break + + movie_dict = fireEvent('media.get', movie_id, single = True) + if movie_dict: + fireEvent('movie.searcher.single', movie_dict) + + return True + + +config = [{ + 'name': 'automation', + 'order': 101, + 'groups': [ + { + 'tab': 'automation', + 'name': 'automation', + 'label': 'Minimal movie requirements', + 'options': [ + { + 'name': 'year', + 'default': 2011, + 'type': 'int', + }, + { + 'name': 'votes', + 'default': 1000, + 'type': 'int', + }, + { + 'name': 'rating', + 'default': 7.0, + 'type': 'float', + }, + { + 'name': 'hour', + 'advanced': True, + 'default': 12, + 'label': 'Check every', + 'type': 'int', + 'unit': 'hours', + 'description': 'hours', + }, + { + 'name': 'required_genres', + 'label': 'Required Genres', + 'default': '', + 'placeholder': 'Example: Action, Crime & Drama', + 'description': ('Ignore movies that don\'t contain at least one set of genres.', 'Sets are separated by "," and each word within a set must be separated with "&"') + }, + { + 'name': 'ignored_genres', + 'label': 'Ignored Genres', + 'default': '', + 'placeholder': 'Example: Horror, Comedy & Drama & Romance', + 'description': 'Ignore movies that contain at least one set of genres. Sets work the same as above.' + }, + ], + }, + ], +}] diff --git a/couchpotato/core/plugins/automation/__init__.py b/couchpotato/core/plugins/automation/__init__.py deleted file mode 100644 index b7b1ab2869..0000000000 --- a/couchpotato/core/plugins/automation/__init__.py +++ /dev/null @@ -1,42 +0,0 @@ -from .main import Automation - -def start(): - return Automation() - -config = [{ - 'name': 'automation', - 'order': 101, - 'groups': [ - { - 'tab': 'automation', - 'name': 'automation', - 'label': 'Minimal movie requirements', - 'options': [ - { - 'name': 'year', - 'default': 2011, - 'type': 'int', - }, - { - 'name': 'votes', - 'default': 1000, - 'type': 'int', - }, - { - 'name': 'rating', - 'default': 7.0, - 'type': 'float', - }, - { - 'name': 'hour', - 'advanced': True, - 'default': 12, - 'label': 'Check every', - 'type': 'int', - 'unit': 'hours', - 'description': 'hours', - }, - ], - }, - ], -}] diff --git a/couchpotato/core/plugins/automation/main.py b/couchpotato/core/plugins/automation/main.py deleted file mode 100644 index f4ede40dd4..0000000000 --- a/couchpotato/core/plugins/automation/main.py +++ /dev/null @@ -1,34 +0,0 @@ -from couchpotato.core.event import addEvent, fireEvent -from couchpotato.core.logger import CPLog -from couchpotato.core.plugins.base import Plugin -from couchpotato.environment import Env - -log = CPLog(__name__) - - -class Automation(Plugin): - - def __init__(self): - - fireEvent('schedule.interval', 'automation.add_movies', self.addMovies, hours = self.conf('hour', default = 12)) - - if not Env.get('dev'): - addEvent('app.load', self.addMovies) - - def addMovies(self): - - movies = fireEvent('automation.get_movies', merge = True) - movie_ids = [] - - for imdb_id in movies: - prop_name = 'automation.added.%s' % imdb_id - added = Env.prop(prop_name, default = False) - if not added: - added_movie = fireEvent('movie.add', params = {'identifier': imdb_id}, force_readd = False, search_after = False, update_library = True, single = True) - if added_movie: - movie_ids.append(added_movie['id']) - Env.prop(prop_name, True) - - for movie_id in movie_ids: - movie_dict = fireEvent('movie.get', movie_id, single = True) - fireEvent('searcher.single', movie_dict) diff --git a/couchpotato/core/plugins/base.py b/couchpotato/core/plugins/base.py index 9330631de9..6f6cf5fe3f 100644 --- a/couchpotato/core/plugins/base.py +++ b/couchpotato/core/plugins/base.py @@ -1,174 +1,254 @@ -from StringIO import StringIO -from couchpotato import addView -from couchpotato.core.event import fireEvent, addEvent -from couchpotato.core.helpers.encoding import tryUrlencode, ss, toSafeString -from couchpotato.core.helpers.variable import getExt, md5 -from couchpotato.core.logger import CPLog -from couchpotato.environment import Env -from flask.templating import render_template_string -from multipartpost import MultipartPostHandler +import threading +from urllib import quote, getproxies from urlparse import urlparse -import cookielib -import glob -import gzip -import math import os.path -import re import time import traceback -import urllib2 + +from couchpotato.core.event import fireEvent, addEvent +from couchpotato.core.helpers.encoding import ss, toSafeString, \ + toUnicode, sp +from couchpotato.core.helpers.variable import md5, isLocalIP, scanForPassword, tryInt, getIdentifier, \ + randomString +from couchpotato.core.logger import CPLog +from couchpotato.environment import Env +import requests +from requests.packages.urllib3 import Timeout +from requests.packages.urllib3.exceptions import MaxRetryError +from tornado import template log = CPLog(__name__) class Plugin(object): + _class_name = None + _database = None + plugin_path = None + enabled_option = 'enabled' - auto_register_static = True _needs_shutdown = False + _running = None + _locks = {} + + user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:45.0) Gecko/20100101 Firefox/45.0' http_last_use = {} + http_last_use_queue = {} http_time_between_calls = 0 http_failed_request = {} http_failed_disabled = {} + def __new__(cls, *args, **kwargs): + new_plugin = super(Plugin, cls).__new__(cls) + new_plugin.registerPlugin() + + return new_plugin + def registerPlugin(self): addEvent('app.do_shutdown', self.doShutdown) addEvent('plugin.running', self.isRunning) + self._running = [] - def conf(self, attr, value = None, default = None): - return Env.setting(attr, self.getName().lower(), value = value, default = default) - - def getName(self): - return self.__class__.__name__ + # Setup database + if self._database: + addEvent('database.setup', self.databaseSetup) - def renderTemplate(self, parent_file, template, **params): + def databaseSetup(self): - template = open(os.path.join(os.path.dirname(parent_file), template), 'r').read() - return render_template_string(template, **params) + for index_name in self._database: + klass = self._database[index_name] - def registerStatic(self, plugin_file, add_to_head = True): + fireEvent('database.setup_index', index_name, klass) - # Register plugin path - self.plugin_path = os.path.dirname(plugin_file) + def conf(self, attr, value = None, default = None, section = None): + class_name = self.getName().lower().split(':')[0].lower() + return Env.setting(attr, section = section if section else class_name, value = value, default = default) - # Get plugin_name from PluginName - s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', self.__class__.__name__) - class_name = re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower() + def deleteConf(self, attr): + return Env._settings.delete(attr, section = self.getName().lower().split(':')[0].lower()) - path = 'api/%s/static/%s/' % (Env.setting('api_key'), class_name) - addView(path + '', self.showStatic, static = True) + def getName(self): + return self._class_name or self.__class__.__name__ - if add_to_head: - for f in glob.glob(os.path.join(self.plugin_path, 'static', '*')): - ext = getExt(f) - if ext in ['js', 'css']: - fireEvent('register_%s' % ('script' if ext in 'js' else 'style'), path + os.path.basename(f), f) + def setName(self, name): + self._class_name = name - def showStatic(self, filename): - d = os.path.join(self.plugin_path, 'static') + def renderTemplate(self, parent_file, templ, **params): - from flask.helpers import send_from_directory - return send_from_directory(d, filename) + t = template.Template(open(os.path.join(os.path.dirname(parent_file), templ), 'r').read()) + return t.generate(**params) def createFile(self, path, content, binary = False): - path = ss(path) + path = sp(path) self.makeDir(os.path.dirname(path)) - try: - f = open(path, 'w+' if not binary else 'w+b') - f.write(content) - f.close() - os.chmod(path, Env.getPermission('file')) - except Exception, e: - log.error('Unable writing to file "%s": %s', (path, e)) + if os.path.exists(path): + log.debug('%s already exists, overwriting file with new version', path) + + write_type = 'w+' if not binary else 'w+b' + + # Stream file using response object + if isinstance(content, requests.models.Response): + + # Write file to temp + with open('%s.tmp' % path, write_type) as f: + for chunk in content.iter_content(chunk_size = 1048576): + if chunk: # filter out keep-alive new chunks + f.write(chunk) + f.flush() + + # Rename to destination + os.rename('%s.tmp' % path, path) + + else: + try: + f = open(path, write_type) + f.write(content) + f.close() + + try: + os.chmod(path, Env.getPermission('file')) + except: + log.error('Failed writing permission to file "%s": %s', (path, traceback.format_exc())) + + except: + log.error('Unable to write file "%s": %s', (path, traceback.format_exc())) + if os.path.isfile(path): + os.remove(path) def makeDir(self, path): - path = ss(path) + path = sp(path) try: if not os.path.isdir(path): os.makedirs(path, Env.getPermission('folder')) + os.chmod(path, Env.getPermission('folder')) return True - except Exception, e: + except Exception as e: log.error('Unable to create folder "%s": %s', (path, e)) return False + def deleteEmptyFolder(self, folder, show_error = True, only_clean = None): + folder = sp(folder) + + for item in os.listdir(folder): + full_folder = sp(os.path.join(folder, item)) + + if not only_clean or (item in only_clean and os.path.isdir(full_folder)): + + for subfolder, dirs, files in os.walk(full_folder, topdown = False): + + try: + os.rmdir(subfolder) + except: + if show_error: + log.info2('Couldn\'t remove directory %s: %s', (subfolder, traceback.format_exc())) + + try: + os.rmdir(folder) + except: + if show_error: + log.error('Couldn\'t remove empty directory %s: %s', (folder, traceback.format_exc())) + # http request - def urlopen(self, url, timeout = 30, params = None, headers = None, opener = None, multipart = False, show_error = True): - url = ss(url) + def urlopen(self, url, timeout = 30, data = None, headers = None, files = None, show_error = True, stream = False): + url = quote(ss(url), safe = "%/:=&?~#+!$,;'@()*[]") if not headers: headers = {} - if not params: params = {} + if not data: data = {} # Fill in some headers - headers['Referer'] = headers.get('Referer', urlparse(url).hostname) - headers['Host'] = headers.get('Host', urlparse(url).hostname) - headers['User-Agent'] = headers.get('User-Agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:10.0.2) Gecko/20100101 Firefox/10.0.2') + parsed_url = urlparse(url) + host = '%s%s' % (parsed_url.hostname, (':' + str(parsed_url.port) if parsed_url.port else '')) + + headers['Referer'] = headers.get('Referer', '%s://%s' % (parsed_url.scheme, host)) + headers['Host'] = headers.get('Host', None) + headers['User-Agent'] = headers.get('User-Agent', self.user_agent) headers['Accept-encoding'] = headers.get('Accept-encoding', 'gzip') + headers['Connection'] = headers.get('Connection', 'keep-alive') + headers['Cache-Control'] = headers.get('Cache-Control', 'max-age=0') + + if headers.get('Authorization', '') != '': + headers['Authorization'] = headers.get('Authorization', '') + + use_proxy = Env.setting('use_proxy') + proxy_url = None + + if use_proxy: + proxy_server = Env.setting('proxy_server') + proxy_username = Env.setting('proxy_username') + proxy_password = Env.setting('proxy_password') + + if proxy_server: + loc = "{0}:{1}@{2}".format(proxy_username, proxy_password, proxy_server) if proxy_username else proxy_server + proxy_url = { + "http": "http://"+loc, + "https": "https://"+loc, + } + else: + proxy_url = getproxies() - host = urlparse(url).hostname + r = Env.get('http_opener') # Don't try for failed requests if self.http_failed_disabled.get(host, 0) > 0: if self.http_failed_disabled[host] > (time.time() - 900): log.info2('Disabled calls to %s for 15 minutes because so many failed requests.', host) if not show_error: - raise + raise Exception('Disabled calls to %s for 15 minutes because so many failed requests' % host) else: return '' else: del self.http_failed_request[host] del self.http_failed_disabled[host] - self.wait(host) + self.wait(host, url) + status_code = None try: - if multipart: - log.info('Opening multipart url: %s, params: %s', (url, [x for x in params.iterkeys()] if isinstance(params, dict) else 'with data')) - request = urllib2.Request(url, params, headers) - - if opener: - opener.add_handler(MultipartPostHandler()) - else: - cookies = cookielib.CookieJar() - opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookies), MultipartPostHandler) - - response = opener.open(request, timeout = timeout) + kwargs = { + 'headers': headers, + 'data': data if len(data) > 0 else None, + 'timeout': timeout, + 'files': files, + 'verify': False, #verify_ssl, Disable for now as to many wrongly implemented certificates.. + 'stream': stream, + 'proxies': proxy_url, + } + method = 'post' if len(data) > 0 or files else 'get' + + log.info('Opening url: %s %s, data: %s', (method, url, [x for x in data.keys()] if isinstance(data, dict) else 'with data')) + response = r.request(method, url, **kwargs) + + status_code = response.status_code + if response.status_code == requests.codes.ok: + data = response if stream else response.content else: - log.info('Opening url: %s, params: %s', (url, [x for x in params.iterkeys()])) - data = tryUrlencode(params) if len(params) > 0 else None - request = urllib2.Request(url, data, headers) - - if opener: - response = opener.open(request, timeout = timeout) - else: - response = urllib2.urlopen(request, timeout = timeout) - - # unzip if needed - if response.info().get('Content-Encoding') == 'gzip': - buf = StringIO(response.read()) - f = gzip.GzipFile(fileobj = buf) - data = f.read() - else: - data = response.read() + response.raise_for_status() self.http_failed_request[host] = 0 - except IOError: + except (IOError, MaxRetryError, Timeout): if show_error: - log.error('Failed opening url in %s: %s %s', (self.getName(), url, traceback.format_exc(1))) + log.error('Failed opening url in %s: %s %s', (self.getName(), url, traceback.format_exc(0))) # Save failed requests by hosts try: + + # To many requests + if status_code in [429]: + self.http_failed_request[host] = 1 + self.http_failed_disabled[host] = time.time() + if not self.http_failed_request.get(host): self.http_failed_request[host] = 1 else: self.http_failed_request[host] += 1 # Disable temporarily - if self.http_failed_request[host] > 5: + if self.http_failed_request[host] > 5 and not isLocalIP(host): self.http_failed_disabled[host] = time.time() except: @@ -180,16 +260,34 @@ def urlopen(self, url, timeout = 30, params = None, headers = None, opener = Non return data - def wait(self, host = ''): - now = time.time() + def wait(self, host = '', url = ''): + if self.http_time_between_calls == 0: + return + + try: + if host not in self.http_last_use_queue: + self.http_last_use_queue[host] = [] - last_use = self.http_last_use.get(host, 0) + self.http_last_use_queue[host].append(url) - wait = math.ceil(last_use - now + self.http_time_between_calls) + while True and not self.shuttingDown(): + wait = (self.http_last_use.get(host, 0) - time.time()) + self.http_time_between_calls + + if self.http_last_use_queue[host][0] != url: + time.sleep(.1) + continue + + if wait > 0: + log.debug('Waiting for %s, %d seconds', (self.getName(), max(1, wait))) + time.sleep(min(wait, 30)) + else: + self.http_last_use_queue[host] = self.http_last_use_queue[host][1:] + self.http_last_use[host] = time.time() + break + except: + log.error('Failed handling waiting call: %s', traceback.format_exc()) + time.sleep(self.http_time_between_calls) - if wait > 0: - log.debug('Waiting for %s, %d seconds', (self.getName(), wait)) - time.sleep(last_use - now + self.http_time_between_calls) def beforeCall(self, handler): self.isRunning('%s.%s' % (self.getName(), handler.__name__)) @@ -197,7 +295,7 @@ def beforeCall(self, handler): def afterCall(self, handler): self.isRunning('%s.%s' % (self.getName(), handler.__name__), False) - def doShutdown(self): + def doShutdown(self, *args, **kwargs): self.shuttingDown(True) return True @@ -209,9 +307,6 @@ def shuttingDown(self, value = None): def isRunning(self, value = None, boolean = True): - if not hasattr(self, '_running'): - self._running = [] - if value is None: return self._running @@ -223,55 +318,134 @@ def isRunning(self, value = None, boolean = True): except: log.error("Something went wrong when finishing the plugin function. Could not find the 'is_running' key") - def getCache(self, cache_key, url = None, **kwargs): - cache_key = md5(ss(cache_key)) - cache = Env.get('cache').get(cache_key) - if cache: - if not Env.get('dev'): log.debug('Getting cache %s', cache_key) - return cache + + use_cache = not len(kwargs.get('data', {})) > 0 and not kwargs.get('files') + + if use_cache: + cache_key_md5 = md5(cache_key) + cache = Env.get('cache').get(cache_key_md5) + if cache: + if not Env.get('dev'): log.debug('Getting cache %s', cache_key) + return cache if url: try: cache_timeout = 300 - if kwargs.get('cache_timeout'): + if 'cache_timeout' in kwargs: cache_timeout = kwargs.get('cache_timeout') del kwargs['cache_timeout'] data = self.urlopen(url, **kwargs) - if data: + if data and cache_timeout > 0 and use_cache: self.setCache(cache_key, data, timeout = cache_timeout) return data except: if not kwargs.get('show_error', True): raise + log.debug('Failed getting cache: %s', (traceback.format_exc(0))) return '' def setCache(self, cache_key, value, timeout = 300): + cache_key_md5 = md5(cache_key) log.debug('Setting cache %s', cache_key) - Env.get('cache').set(cache_key, value, timeout) + Env.get('cache').set(cache_key_md5, value, timeout) return value - def createNzbName(self, data, movie): - tag = self.cpTag(movie) - return '%s%s' % (toSafeString(data.get('name')[:127 - len(tag)]), tag) + def createNzbName(self, data, media, unique_tag = False): + release_name = data.get('name') + tag = self.cpTag(media, unique_tag = unique_tag) + + # Check if password is filename + name_password = scanForPassword(data.get('name')) + if name_password: + release_name, password = name_password + tag += '{{%s}}' % password + elif data.get('password'): + tag += '{{%s}}' % data.get('password') + + max_length = 127 - len(tag) # Some filesystems don't support 128+ long filenames + return '%s%s' % (toSafeString(toUnicode(release_name)[:max_length]), tag) - def createFileName(self, data, filedata, movie): - name = os.path.join(self.createNzbName(data, movie)) - if data.get('type') == 'nzb' and 'DOCTYPE nzb' not in filedata and '' not in filedata: + def createFileName(self, data, filedata, media, unique_tag = False): + name = self.createNzbName(data, media, unique_tag = unique_tag) + if data.get('protocol') == 'nzb' and 'DOCTYPE nzb' not in filedata and '' not in filedata: return '%s.%s' % (name, 'rar') - return '%s.%s' % (name, data.get('type')) + return '%s.%s' % (name, data.get('protocol')) - def cpTag(self, movie): - if Env.setting('enabled', 'renamer'): - return '.cp(' + movie['library'].get('identifier') + ')' if movie['library'].get('identifier') else '' + def cpTag(self, media, unique_tag = False): - return '' + tag = '' + if Env.setting('enabled', 'renamer') or unique_tag: + identifier = getIdentifier(media) or '' + unique_tag = ', ' + randomString() if unique_tag else '' + + tag = '.cp(' + tag += identifier + tag += ', ' if unique_tag and identifier else '' + tag += randomString() if unique_tag else '' + tag += ')' + + return tag if len(tag) > 7 else '' + + def checkFilesChanged(self, files, unchanged_for = 60): + now = time.time() + file_too_new = False + + file_time = [] + for cur_file in files: + + # File got removed while checking + if not os.path.isfile(cur_file): + file_too_new = now + break + + # File has changed in last 60 seconds + file_time = self.getFileTimes(cur_file) + for t in file_time: + if t > now - unchanged_for: + file_too_new = tryInt(time.time() - t) + break + + if file_too_new: + break + + if file_too_new: + try: + time_string = time.ctime(file_time[0]) + except: + try: + time_string = time.ctime(file_time[1]) + except: + time_string = 'unknown' + + return file_too_new, time_string + + return False, None + + def getFileTimes(self, file_path): + return [os.path.getmtime(file_path), os.path.getctime(file_path) if os.name != 'posix' else 0] def isDisabled(self): return not self.isEnabled() def isEnabled(self): - return self.conf(self.enabled_option) or self.conf(self.enabled_option) == None + return self.conf(self.enabled_option) or self.conf(self.enabled_option) is None + + def acquireLock(self, key): + + lock = self._locks.get(key) + if not lock: + self._locks[key] = threading.RLock() + + log.debug('Acquiring lock: %s', key) + self._locks.get(key).acquire() + + def releaseLock(self, key): + + lock = self._locks.get(key) + if lock: + log.debug('Releasing lock: %s', key) + self._locks.get(key).release() diff --git a/couchpotato/core/plugins/browser.py b/couchpotato/core/plugins/browser.py new file mode 100644 index 0000000000..14b48f34e3 --- /dev/null +++ b/couchpotato/core/plugins/browser.py @@ -0,0 +1,150 @@ +import ctypes +import os +import string +import traceback +import time + +from couchpotato import CPLog +from couchpotato.api import addApiView +from couchpotato.core.event import addEvent +from couchpotato.core.helpers.encoding import sp, ss, toUnicode +from couchpotato.core.helpers.variable import getUserDir +from couchpotato.core.plugins.base import Plugin + +from couchpotato.environment import Env + +log = CPLog(__name__) + +if os.name == 'nt': + import imp + try: + imp.find_module('win32file') + except: + # todo:: subclass ImportError for missing dependencies, vs. broken plugins? + raise ImportError("Missing the win32file module, which is a part of the prerequisite \ + pywin32 package. You can get it from http://sourceforge.net/projects/pywin32/files/pywin32/") + else: + # noinspection PyUnresolvedReferences + import win32file + +autoload = 'FileBrowser' + +class FileBrowser(Plugin): + + def __init__(self): + addApiView('directory.list', self.view, docs = { + 'desc': 'Return the directory list of a given directory', + 'params': { + 'path': {'desc': 'The directory to scan'}, + 'show_hidden': {'desc': 'Also show hidden files'} + }, + 'return': {'type': 'object', 'example': """{ + 'is_root': bool, //is top most folder + 'parent': string, //parent folder of requested path + 'home': string, //user home folder + 'empty': bool, //directory is empty + 'dirs': array, //directory names +}"""} + }) + + def getDirectories(self, path = '/', show_hidden = True): + + # Return driveletters or root if path is empty + if path == '/' or not path or path == '\\': + if os.name == 'nt': + return self.getDriveLetters() + path = '/' + + dirs = [] + path = sp(path) + for f in os.listdir(path): + p = sp(os.path.join(path, f)) + if os.path.isdir(p) and ((self.is_hidden(p) and bool(int(show_hidden))) or not self.is_hidden(p)): + dirs.append(toUnicode('%s%s' % (p, os.path.sep))) + + return sorted(dirs) + + def getFiles(self): + pass + + def getDriveLetters(self): + + driveletters = [] + for drive in string.ascii_uppercase: + if win32file.GetDriveType(drive + ':') in [win32file.DRIVE_FIXED, win32file.DRIVE_REMOTE, win32file.DRIVE_RAMDISK, win32file.DRIVE_REMOVABLE]: + driveletters.append(drive + ':\\') + + return driveletters + + def view(self, path = '/', show_hidden = True, **kwargs): + + soft_chroot = Env.get('softchroot') + + home = getUserDir() + if soft_chroot.enabled: + if not soft_chroot.is_subdir(home): + home = soft_chroot.get_chroot() + + if not path: + path = home + if path.endswith(os.path.sep): + path = path.rstrip(os.path.sep) + else: + path = soft_chroot.chroot2abs(path) + + try: + dirs = self.getDirectories(path = path, show_hidden = show_hidden) + except: + log.error('Failed getting directory "%s" : %s', (path, traceback.format_exc())) + dirs = [] + + if soft_chroot.enabled: + dirs = map(soft_chroot.abs2chroot, dirs) + + parent = os.path.dirname(path.rstrip(os.path.sep)) + if parent == path.rstrip(os.path.sep): + parent = '/' + elif parent != '/' and parent[-2:] != ':\\': + parent += os.path.sep + + # TODO : check on windows: + is_root = path == '/' + + if soft_chroot.enabled: + is_root = soft_chroot.is_root_abs(path) + + # fix paths: + if soft_chroot.is_subdir(parent): + parent = soft_chroot.abs2chroot(parent) + else: + parent = os.path.sep + + home = soft_chroot.abs2chroot(home) + + return { + 'is_root': is_root, + 'empty': len(dirs) == 0, + 'parent': parent, + 'home': home, + 'platform': os.name, + 'dirs': dirs, + } + + + def is_hidden(self, filepath): + name = ss(os.path.basename(os.path.abspath(filepath))) + return name.startswith('.') or self.has_hidden_attribute(filepath) + + def has_hidden_attribute(self, filepath): + + result = False + try: + attrs = ctypes.windll.kernel32.GetFileAttributesW(sp(filepath)) #@UndefinedVariable + assert attrs != -1 + result = bool(attrs & 2) + except (AttributeError, AssertionError): + pass + except: + log.error('Failed getting hidden attribute: %s', traceback.format_exc()) + + return result diff --git a/couchpotato/core/plugins/browser/__init__.py b/couchpotato/core/plugins/browser/__init__.py deleted file mode 100644 index 976fcd102c..0000000000 --- a/couchpotato/core/plugins/browser/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -from .main import FileBrowser - -def start(): - return FileBrowser() - -config = [] diff --git a/couchpotato/core/plugins/browser/main.py b/couchpotato/core/plugins/browser/main.py deleted file mode 100644 index 3eee85bb91..0000000000 --- a/couchpotato/core/plugins/browser/main.py +++ /dev/null @@ -1,106 +0,0 @@ -from couchpotato.api import addApiView -from couchpotato.core.helpers.request import getParam, jsonified -from couchpotato.core.helpers.variable import getUserDir -from couchpotato.core.plugins.base import Plugin -import ctypes -import os -import string - -if os.name == 'nt': - import imp - try: - imp.find_module('win32file') - except: - # todo:: subclass ImportError for missing dependencies, vs. broken plugins? - raise ImportError("Missing the win32file module, which is a part of the prerequisite \ - pywin32 package. You can get it from http://sourceforge.net/projects/pywin32/files/pywin32/"); - else: - import win32file #@UnresolvedImport - -class FileBrowser(Plugin): - - def __init__(self): - addApiView('directory.list', self.view, docs = { - 'desc': 'Return the directory list of a given directory', - 'params': { - 'path': {'desc': 'The directory to scan'}, - 'show_hidden': {'desc': 'Also show hidden files'} - }, - 'return': {'type': 'object', 'example': """{ - 'is_root': bool, //is top most folder - 'parent': string, //parent folder of requested path - 'home': string, //user home folder - 'empty': bool, //directory is empty - 'dirs': array, //directory names -}"""} - }) - - def getDirectories(self, path = '/', show_hidden = True): - - # Return driveletters or root if path is empty - if path == '/' or not path or path == '\\': - if os.name == 'nt': - return self.getDriveLetters() - path = '/' - - dirs = [] - for f in os.listdir(path): - p = os.path.join(path, f) - if os.path.isdir(p) and ((self.is_hidden(p) and bool(int(show_hidden))) or not self.is_hidden(p)): - dirs.append(p + os.path.sep) - - return sorted(dirs) - - def getFiles(self): - pass - - def getDriveLetters(self): - - driveletters = [] - for drive in string.ascii_uppercase: - if win32file.GetDriveType(drive + ":") in [win32file.DRIVE_FIXED, win32file.DRIVE_REMOTE, win32file.DRIVE_RAMDISK, win32file.DRIVE_REMOVABLE]: - driveletters.append(drive + ":\\") - - return driveletters - - def view(self): - - path = getParam('path', '/') - home = getUserDir() - - if not path: - path = home - - try: - dirs = self.getDirectories(path = path, show_hidden = getParam('show_hidden', True)) - except: - dirs = [] - - parent = os.path.dirname(path.rstrip(os.path.sep)) - if parent == path.rstrip(os.path.sep): - parent = '/' - elif parent != '/' and parent[-2:] != ':\\': - parent += os.path.sep - - return jsonified({ - 'is_root': path == '/', - 'empty': len(dirs) == 0, - 'parent': parent, - 'home': home + os.path.sep, - 'platform': os.name, - 'dirs': dirs, - }) - - - def is_hidden(self, filepath): - name = os.path.basename(os.path.abspath(filepath)) - return name.startswith('.') or self.has_hidden_attribute(filepath) - - def has_hidden_attribute(self, filepath): - try: - attrs = ctypes.windll.kernel32.GetFileAttributesW(unicode(filepath)) #@UndefinedVariable - assert attrs != -1 - result = bool(attrs & 2) - except (AttributeError, AssertionError): - result = False - return result diff --git a/couchpotato/core/plugins/browser_test.py b/couchpotato/core/plugins/browser_test.py new file mode 100644 index 0000000000..d1ac3bc903 --- /dev/null +++ b/couchpotato/core/plugins/browser_test.py @@ -0,0 +1,52 @@ +#import sys +import os + +import mock +import unittest +from unittest import TestCase + + +from couchpotato.core.plugins.browser import FileBrowser +from couchpotato.core.softchroot import SoftChroot + + +CHROOT_DIR = '/tmp/' + +# 'couchpotato.core.plugins.browser.Env', +@mock.patch('couchpotato.core.plugins.browser.Env', name='EnvMock') +class FileBrowserChrootedTest(TestCase): + + def setUp(self): + self.b = FileBrowser() + + def tuneMock(self, env): + #set up mock: + sc = SoftChroot() + sc.initialize(CHROOT_DIR) + env.get.return_value = sc + + + def test_view__chrooted_path_none(self, env): + #def view(self, path = '/', show_hidden = True, **kwargs): + + self.tuneMock(env) + + r = self.b.view(None) + self.assertEqual(r['home'], '/') + self.assertEqual(r['parent'], '/') + self.assertTrue(r['is_root']) + + def test_view__chrooted_path_chroot(self, env): + #def view(self, path = '/', show_hidden = True, **kwargs): + + self.tuneMock(env) + + for path, parent in [('/asdf','/'), (CHROOT_DIR, '/'), ('/mnk/123/t', '/mnk/123/')]: + r = self.b.view(path) + path_strip = path + if (path.endswith(os.path.sep)): + path_strip = path_strip.rstrip(os.path.sep) + + self.assertEqual(r['home'], '/') + self.assertEqual(r['parent'], parent) + self.assertFalse(r['is_root']) diff --git a/couchpotato/core/plugins/category/__init__.py b/couchpotato/core/plugins/category/__init__.py new file mode 100644 index 0000000000..d147092f3d --- /dev/null +++ b/couchpotato/core/plugins/category/__init__.py @@ -0,0 +1,5 @@ +from .main import CategoryPlugin + + +def autoload(): + return CategoryPlugin() diff --git a/couchpotato/core/plugins/category/index.py b/couchpotato/core/plugins/category/index.py new file mode 100644 index 0000000000..6445de3ca8 --- /dev/null +++ b/couchpotato/core/plugins/category/index.py @@ -0,0 +1,31 @@ +from CodernityDB.tree_index import TreeBasedIndex + + +class CategoryIndex(TreeBasedIndex): + _version = 1 + + def __init__(self, *args, **kwargs): + kwargs['key_format'] = 'i' + super(CategoryIndex, self).__init__(*args, **kwargs) + + def make_key(self, key): + return key + + def make_key_value(self, data): + if data.get('_t') == 'category': + return data.get('order', -99), None + + +class CategoryMediaIndex(TreeBasedIndex): + _version = 1 + + def __init__(self, *args, **kwargs): + kwargs['key_format'] = '32s' + super(CategoryMediaIndex, self).__init__(*args, **kwargs) + + def make_key(self, key): + return str(key) + + def make_key_value(self, data): + if data.get('_t') == 'media' and data.get('category_id'): + return str(data.get('category_id')), None diff --git a/couchpotato/core/plugins/category/main.py b/couchpotato/core/plugins/category/main.py new file mode 100644 index 0000000000..2a8d847d5c --- /dev/null +++ b/couchpotato/core/plugins/category/main.py @@ -0,0 +1,151 @@ +import traceback + +from couchpotato import get_db +from couchpotato.api import addApiView +from couchpotato.core.event import addEvent +from couchpotato.core.helpers.encoding import toUnicode +from couchpotato.core.logger import CPLog +from couchpotato.core.plugins.base import Plugin +from .index import CategoryIndex, CategoryMediaIndex + + +log = CPLog(__name__) + + +class CategoryPlugin(Plugin): + + _database = { + 'category': CategoryIndex, + 'category_media': CategoryMediaIndex, + } + + def __init__(self): + addApiView('category.save', self.save) + addApiView('category.save_order', self.saveOrder) + addApiView('category.delete', self.delete) + addApiView('category.list', self.allView, docs = { + 'desc': 'List all available categories', + 'return': {'type': 'object', 'example': """{ + 'success': True, + 'categories': array, categories +}"""} + }) + + addEvent('category.all', self.all) + + def allView(self, **kwargs): + + return { + 'success': True, + 'categories': self.all() + } + + def all(self): + + db = get_db() + categories = db.all('category', with_doc = True) + + return [x['doc'] for x in categories] + + def save(self, **kwargs): + + try: + db = get_db() + + category = { + '_t': 'category', + 'order': kwargs.get('order', 999), + 'label': toUnicode(kwargs.get('label', '')), + 'ignored': toUnicode(kwargs.get('ignored', '')), + 'preferred': toUnicode(kwargs.get('preferred', '')), + 'required': toUnicode(kwargs.get('required', '')), + 'dubbed_version': kwargs.get('dubbed_version', False), + 'destination': toUnicode(kwargs.get('destination', '')), + } + + try: + c = db.get('id', kwargs.get('id')) + category['order'] = c.get('order', category['order']) + c.update(category) + + db.update(c) + except: + c = db.insert(category) + c.update(category) + + return { + 'success': True, + 'category': c + } + except: + log.error('Failed: %s', traceback.format_exc()) + + return { + 'success': False, + 'category': None + } + + def saveOrder(self, **kwargs): + + try: + db = get_db() + + order = 0 + for category_id in kwargs.get('ids', []): + c = db.get('id', category_id) + c['order'] = order + db.update(c) + + order += 1 + + return { + 'success': True + } + except: + log.error('Failed: %s', traceback.format_exc()) + + return { + 'success': False + } + + def delete(self, id = None, **kwargs): + + try: + db = get_db() + + success = False + message = '' + try: + c = db.get('id', id) + db.delete(c) + + # Force defaults on all empty category movies + self.removeFromMovie(id) + + success = True + except: + message = log.error('Failed deleting category: %s', traceback.format_exc()) + + return { + 'success': success, + 'message': message + } + except: + log.error('Failed: %s', traceback.format_exc()) + + return { + 'success': False + } + + def removeFromMovie(self, category_id): + + try: + db = get_db() + movies = [x['doc'] for x in db.get_many('category_media', category_id, with_doc = True)] + + if len(movies) > 0: + for movie in movies: + movie['category_id'] = None + db.update(movie) + except: + log.error('Failed: %s', traceback.format_exc()) diff --git a/couchpotato/core/plugins/category/static/category.js b/couchpotato/core/plugins/category/static/category.js new file mode 100644 index 0000000000..8038d47132 --- /dev/null +++ b/couchpotato/core/plugins/category/static/category.js @@ -0,0 +1,337 @@ +var CategoryListBase = new Class({ + + initialize: function(){ + var self = this; + + App.addEvent('loadSettings', self.addSettings.bind(self)); + }, + + setup: function(categories){ + var self = this; + + self.categories = []; + Array.each(categories, self.createCategory.bind(self)); + + }, + + addSettings: function(){ + var self = this; + + self.settings = App.getPage('Settings'); + self.settings.addEvent('create', function(){ + var tab = self.settings.createSubTab('category', { + 'label': 'Categories', + 'name': 'category', + 'subtab_label': 'Category & filtering' + }, self.settings.tabs.searcher ,'searcher'); + + self.tab = tab.tab; + self.content = tab.content; + + self.createList(); + self.createOrdering(); + + }); + + // Add categories in renamer + self.settings.addEvent('create', function(){ + var renamer_group = self.settings.tabs.renamer.groups.renamer; + + self.categories.each(function(category){ + + var input = new Option.Directory('section_name', 'option.name', category.get('destination'), { + 'name': category.get('label') + }); + input.inject(renamer_group.getElement('.renamer_to')); + input.fireEvent('injected'); + + input.save = function(){ + category.data.destination = input.getValue(); + category.save(); + }; + + }); + + }); + + }, + + createList: function(){ + var self = this; + + var count = self.categories.length; + + self.settings.createGroup({ + 'label': 'Categories', + 'description': 'Create categories, each one extending global filters. (Needs refresh \'' +(App.isMac() ? 'CMD+R' : 'F5')+ '\' after editing)' + }).inject(self.content).adopt( + self.category_container = new Element('div.container'), + new Element('a.add_new_category', { + 'text': count > 0 ? 'Create another category' : 'Click here to create a category.', + 'events': { + 'click': function(){ + var category = self.createCategory(); + $(category).inject(self.category_container); + } + } + }) + ); + + // Add categories, that aren't part of the core (for editing) + Array.each(self.categories, function(category){ + $(category).inject(self.category_container); + }); + + }, + + getCategory: function(id){ + return this.categories.filter(function(category){ + return category.data._id == id; + }).pick(); + }, + + getAll: function(){ + return this.categories; + }, + + createCategory: function(data){ + var self = this; + + data = data || {'id': randomString()}; + var category = new Category(data); + self.categories.include(category); + + return category; + }, + + createOrdering: function(){ + var self = this; + + var category_list; + self.settings.createGroup({ + 'label': 'Category ordering' + }).adopt( + new Element('.ctrlHolder#category_ordering').adopt( + new Element('label[text=Order]'), + category_list = new Element('ul'), + new Element('p.formHint', { + 'html': 'Change the order the categories are in the dropdown list.' + }) + ) + ).inject(self.content); + + Array.each(self.categories, function(category){ + new Element('li', {'data-id': category.data._id}).adopt( + new Element('span.category_label', { + 'text': category.data.label + }), + new Element('span.handle.icon-handle') + ).inject(category_list); + + }); + + // Sortable + self.category_sortable = new Sortables(category_list, { + 'revert': true, + 'handle': '', + 'opacity': 0.5, + 'onComplete': self.saveOrdering.bind(self) + }); + + }, + + saveOrdering: function(){ + var self = this; + + var ids = []; + + self.category_sortable.list.getElements('li').each(function(el){ + ids.include(el.get('data-id')); + }); + + Api.request('category.save_order', { + 'data': { + 'ids': ids + } + }); + + } + +}); + +window.CategoryList = new CategoryListBase(); + +var Category = new Class({ + + data: {}, + + initialize: function(data){ + var self = this; + + self.data = data; + + self.create(); + + self.el.addEvents({ + 'change:relay(select)': self.save.bind(self, 0), + 'keyup:relay(input[type=text])': self.save.bind(self, [300]), + "change:relay(input[type=checkbox])": self.save.bind(self, 0) + }); + + }, + + create: function(){ + var self = this; + + var data = self.data; + + self.el = new Element('div.category').adopt( + self.delete_button = new Element('span.delete.icon-delete', { + 'events': { + 'click': self.del.bind(self) + } + }), + new Element('.category_label.ctrlHolder').adopt( + new Element('label', {'text':'Name'}), + new Element('input', { + 'type':'text', + 'value': data.label, + 'placeholder': 'Example: Kids, Horror or His' + }), + new Element('p.formHint', {'text': 'See global filters for explanation.'}) + ), + new Element('.category_preferred.ctrlHolder').adopt( + new Element('label', {'text':'Preferred'}), + new Element('input', { + 'type':'text', + 'value': data.preferred, + 'placeholder': 'Blu-ray, DTS' + }) + ), + new Element('.category_required.ctrlHolder').adopt( + new Element('label', {'text':'Required'}), + new Element('input', { + 'type':'text', + 'value': data.required, + 'placeholder': 'Example: DTS, AC3 & English' + }) + ), + new Element('.category_ignored.ctrlHolder').adopt( + new Element('label', {'text':'Ignored'}), + new Element('input', { + 'type':'text', + 'value': data.ignored, + 'placeholder': 'Example: dubbed, swesub, french' + }) + ), + new Element('.category_dubbed_version.ctrlHolder').adopt( + new Element('label', { 'text': 'Dubbed version' }), + new Element('input', { + 'type': 'checkbox', + 'checked': data.dubbed_version + }) + ) + ); + + self.makeSortable(); + + }, + + save: function(delay){ + var self = this; + + if(self.save_timer) clearRequestTimeout(self.save_timer); + self.save_timer = requestTimeout(function(){ + + Api.request('category.save', { + 'data': self.getData(), + 'useSpinner': true, + 'spinnerOptions': { + 'target': self.el + }, + 'onComplete': function(json){ + if(json.success){ + self.data = json.category; + } + } + }); + + }, delay || 0); + + }, + + getData: function(){ + var self = this; + + return { + 'id' : self.data._id, + 'label' : self.el.getElement('.category_label input').get('value'), + 'required' : self.el.getElement('.category_required input').get('value'), + 'preferred' : self.el.getElement('.category_preferred input').get('value'), + 'ignored': self.el.getElement('.category_ignored input').get('value'), + 'dubbed_version': self.el.getElement(".category_dubbed_version input").get("checked"), + 'destination': self.data.destination + }; + }, + + del: function(){ + var self = this; + + if(self.data.label === undefined){ + self.el.destroy(); + return; + } + + var label = self.el.getElement('.category_label input').get('value'); + var qObj = new Question('Are you sure you want to delete "'+label+'"?', '', [{ + 'text': 'Delete "'+label+'"', + 'class': 'delete', + 'events': { + 'click': function(e){ + (e).preventDefault(); + Api.request('category.delete', { + 'data': { + 'id': self.data._id + }, + 'useSpinner': true, + 'spinnerOptions': { + 'target': self.el + }, + 'onComplete': function(json){ + if(json.success) { + qObj.close(); + self.el.destroy(); + } else { + alert(json.message); + } + } + }); + } + } + }, { + 'text': 'Cancel', + 'cancel': true + }]); + + }, + + makeSortable: function(){ + var self = this; + + self.sortable = new Sortables(self.category_container, { + 'revert': true, + 'handle': '.handle', + 'opacity': 0.5, + 'onComplete': self.save.bind(self, 300) + }); + }, + + get: function(attr){ + return this.data[attr]; + }, + + toElement: function(){ + return this.el; + } + +}); diff --git a/couchpotato/core/plugins/category/static/category.scss b/couchpotato/core/plugins/category/static/category.scss new file mode 100644 index 0000000000..f1568f07a4 --- /dev/null +++ b/couchpotato/core/plugins/category/static/category.scss @@ -0,0 +1,78 @@ +@import "_mixins"; + +.add_new_category { + padding: 20px; + display: block; + text-align: center; + font-size: 20px; +} + +.category { + margin-bottom: 20px; + position: relative; + + > .delete { + position: absolute; + padding: $padding/3 $padding; + right: 0; + cursor: pointer; + opacity: 0.6; + color: #fd5353; + font-size: 1.5em; + z-index: 2; + + &:hover { + opacity: 1; + } + } + + .ctrlHolder:hover { + background: none; + } + + .formHint { + opacity: 0.1; + } + + &:hover .formHint { + opacity: 1; + } +} + +#category_ordering { + + ul { + float: left; + margin: 0; + width: 275px; + padding: 0; + } + + li { + cursor: grab; + border-bottom: 1px solid transparent; + @include theme(border-color, off); + padding: 5px; + list-style: none; + + &:last-child { border: 0; } + + .check { + margin: 2px 10px 0 0; + vertical-align: top; + } + + > span { + display: inline-block; + height: 20px; + vertical-align: top; + line-height: 20px; + } + + .handle { + width: 20px; + float: right; + } + } + +} diff --git a/couchpotato/core/plugins/profile/static/handle.png b/couchpotato/core/plugins/category/static/handle.png similarity index 100% rename from couchpotato/core/plugins/profile/static/handle.png rename to couchpotato/core/plugins/category/static/handle.png diff --git a/couchpotato/core/plugins/custom.py b/couchpotato/core/plugins/custom.py new file mode 100644 index 0000000000..20b4c3f7a7 --- /dev/null +++ b/couchpotato/core/plugins/custom.py @@ -0,0 +1,25 @@ +import os + +from couchpotato.core.event import addEvent +from couchpotato.core.logger import CPLog +from couchpotato.core.plugins.base import Plugin +from couchpotato.environment import Env + + +log = CPLog(__name__) + +autoload = 'Custom' + + +class Custom(Plugin): + + def __init__(self): + addEvent('app.load', self.createStructure) + + def createStructure(self): + + custom_dir = os.path.join(Env.get('data_dir'), 'custom_plugins') + + if not os.path.isdir(custom_dir): + self.makeDir(custom_dir) + self.createFile(os.path.join(custom_dir, '__init__.py'), '# Don\'t remove this file') diff --git a/couchpotato/core/plugins/dashboard.py b/couchpotato/core/plugins/dashboard.py new file mode 100644 index 0000000000..1c58d89db6 --- /dev/null +++ b/couchpotato/core/plugins/dashboard.py @@ -0,0 +1,116 @@ +import random as rndm +import time +from CodernityDB.database import RecordDeleted, RecordNotFound + +from couchpotato import get_db +from couchpotato.api import addApiView +from couchpotato.core.event import fireEvent +from couchpotato.core.helpers.variable import splitString, tryInt +from couchpotato.core.logger import CPLog +from couchpotato.core.plugins.base import Plugin + + +log = CPLog(__name__) + +autoload = 'Dashboard' + + +class Dashboard(Plugin): + + def __init__(self): + addApiView('dashboard.soon', self.getSoonView) + + def getSoonView(self, limit_offset = None, random = False, late = False, **kwargs): + + db = get_db() + now = time.time() + + # Get profiles first, determine pre or post theater + profiles = fireEvent('profile.all', single = True) + pre_releases = fireEvent('quality.pre_releases', single = True) + + # See what the profile contain and cache it + profile_pre = {} + for profile in profiles: + contains = {} + for q_identifier in profile.get('qualities', []): + contains['theater' if q_identifier in pre_releases else 'dvd'] = True + + profile_pre[profile.get('_id')] = contains + + # Add limit + limit = 12 + if limit_offset: + splt = splitString(limit_offset) if isinstance(limit_offset, (str, unicode)) else limit_offset + limit = tryInt(splt[0]) + + # Get all active medias + active_ids = [x['_id'] for x in fireEvent('media.with_status', 'active', with_doc = False, single = True)] + + medias = [] + + if len(active_ids) > 0: + + # Order by title or randomize + if not random: + orders_ids = db.all('media_title') + active_ids = [x['_id'] for x in orders_ids if x['_id'] in active_ids] + else: + rndm.shuffle(active_ids) + + for media_id in active_ids: + try: + media = fireEvent('media.get', media_id, single = True) + except RecordDeleted: + log.debug('Record already deleted: %s', media_id) + continue + + except RecordNotFound: + log.debug('Record not found: %s', media_id) + continue + + pp = profile_pre.get(media.get('profile_id')) + if not pp: continue + + eta = media['info'].get('release_date', {}) or {} + coming_soon = False + + # Theater quality + if pp.get('theater') and fireEvent('movie.searcher.could_be_released', True, eta, media['info']['year'], single = True): + coming_soon = 'theater' + elif pp.get('dvd') and fireEvent('movie.searcher.could_be_released', False, eta, media['info']['year'], single = True): + coming_soon = 'dvd' + + if coming_soon: + + # Don't list older movies + eta_date = eta.get(coming_soon) + eta_3month_passed = eta_date < (now - 7862400) # Release was more than 3 months ago + + if (not late and not eta_3month_passed) or \ + (late and eta_3month_passed): + + add = True + + # Check if it doesn't have any releases + if late: + media['releases'] = fireEvent('release.for_media', media['_id'], single = True) + + for release in media.get('releases', []): + if release.get('status') in ['snatched', 'available', 'seeding', 'downloaded']: + add = False + break + + if add: + medias.append(media) + + if len(medias) >= limit: + break + + return { + 'success': True, + 'empty': len(medias) == 0, + 'movies': medias, + } + + getLateView = getSoonView diff --git a/couchpotato/core/plugins/dashboard/__init__.py b/couchpotato/core/plugins/dashboard/__init__.py deleted file mode 100644 index 81279291c0..0000000000 --- a/couchpotato/core/plugins/dashboard/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -from .main import Dashboard - -def start(): - return Dashboard() - -config = [] diff --git a/couchpotato/core/plugins/dashboard/main.py b/couchpotato/core/plugins/dashboard/main.py deleted file mode 100644 index d5f9ef0be8..0000000000 --- a/couchpotato/core/plugins/dashboard/main.py +++ /dev/null @@ -1,120 +0,0 @@ -from couchpotato import get_session -from couchpotato.api import addApiView -from couchpotato.core.event import fireEvent -from couchpotato.core.helpers.request import jsonified, getParams -from couchpotato.core.helpers.variable import splitString, tryInt -from couchpotato.core.logger import CPLog -from couchpotato.core.plugins.base import Plugin -from couchpotato.core.settings.model import Movie -from sqlalchemy.sql.expression import or_ -import random - -log = CPLog(__name__) - - -class Dashboard(Plugin): - - def __init__(self): - - addApiView('dashboard.suggestions', self.suggestView) - addApiView('dashboard.soon', self.getSoonView) - - def newSuggestions(self): - - movies = fireEvent('movie.list', status = ['active', 'done'], limit_offset = (20, 0), single = True) - movie_identifiers = [m['library']['identifier'] for m in movies[1]] - - ignored_movies = fireEvent('movie.list', status = ['ignored', 'deleted'], limit_offset = (100, 0), single = True) - ignored_identifiers = [m['library']['identifier'] for m in ignored_movies[1]] - - suggestions = fireEvent('movie.suggest', movies = movie_identifiers, ignore = ignored_identifiers, single = True) - suggest_status = fireEvent('status.get', 'suggest', single = True) - - for suggestion in suggestions: - fireEvent('movie.add', params = {'identifier': suggestion}, force_readd = False, search_after = False, status_id = suggest_status.get('id')) - - def suggestView(self): - - db = get_session() - - movies = db.query(Movie).limit(20).all() - identifiers = [m.library.identifier for m in movies] - - suggestions = fireEvent('movie.suggest', movies = identifiers, single = True) - print suggestions - - return jsonified({ - 'result': True, - 'suggestions': suggestions - }) - - def getSoonView(self): - - params = getParams() - db = get_session() - - # Get profiles first, determine pre or post theater - profiles = fireEvent('profile.all', single = True) - qualities = fireEvent('quality.all', single = True) - pre_releases = fireEvent('quality.pre_releases', single = True) - - id_pre = {} - for quality in qualities: - id_pre[quality.get('id')] = quality.get('identifier') in pre_releases - - # See what the profile contain and cache it - profile_pre = {} - for profile in profiles: - contains = {} - for profile_type in profile.get('types', []): - contains['theater' if id_pre.get(profile_type.get('quality_id')) else 'dvd'] = True - - profile_pre[profile.get('id')] = contains - - # Get all active movies - q = db.query(Movie) \ - .join(Movie.profile, Movie.library) \ - .filter(or_(*[Movie.status.has(identifier = s) for s in ['active']])) \ - .group_by(Movie.id) - - # Add limit - limit_offset = params.get('limit_offset') - limit = 12 - if limit_offset: - splt = splitString(limit_offset) if isinstance(limit_offset, (str, unicode)) else limit_offset - limit = tryInt(splt[0]) - - all_movies = q.all() - - if params.get('random', False): - random.shuffle(all_movies) - - movies = [] - for movie in all_movies: - pp = profile_pre.get(movie.profile.id) - eta = movie.library.info.get('release_date', {}) - coming_soon = False - - # Theater quality - if pp.get('theater') and fireEvent('searcher.could_be_released', True, eta, single = True): - coming_soon = True - if pp.get('dvd') and fireEvent('searcher.could_be_released', False, eta, single = True): - coming_soon = True - - if coming_soon: - temp = movie.to_dict({ - 'profile': {'types': {}}, - 'releases': {'files':{}, 'info': {}}, - 'library': {'titles': {}, 'files':{}}, - 'files': {}, - }) - movies.append(temp) - - if len(movies) >= limit: - break - - return jsonified({ - 'success': True, - 'empty': len(movies) == 0, - 'movies': movies, - }) diff --git a/couchpotato/core/plugins/file.py b/couchpotato/core/plugins/file.py new file mode 100644 index 0000000000..56c5230ddc --- /dev/null +++ b/couchpotato/core/plugins/file.py @@ -0,0 +1,115 @@ +import os.path +import traceback + +from couchpotato import get_db +from couchpotato.api import addApiView +from couchpotato.core.event import addEvent, fireEvent +from couchpotato.core.helpers.encoding import toUnicode, ss, sp +from couchpotato.core.helpers.variable import md5, getExt, isSubFolder +from couchpotato.core.logger import CPLog +from couchpotato.core.plugins.base import Plugin +from couchpotato.environment import Env +from tornado.web import StaticFileHandler + + +log = CPLog(__name__) + +autoload = 'FileManager' + + +class FileManager(Plugin): + + def __init__(self): + addEvent('file.download', self.download) + + addApiView('file.cache/(.*)', self.showCacheFile, static = True, docs = { + 'desc': 'Return a file from the cp_data/cache directory', + 'params': { + 'filename': {'desc': 'path/filename of the wanted file'} + }, + 'return': {'type': 'file'} + }) + + fireEvent('schedule.interval', 'file.cleanup', self.cleanup, hours = 24) + + addEvent('app.test', self.doSubfolderTest) + + def cleanup(self): + + # Wait a bit after starting before cleanup + log.debug('Cleaning up unused files') + + try: + db = get_db() + cache_dir = Env.get('cache_dir') + medias = db.all('media', with_doc = True) + + files = [] + for media in medias: + file_dict = media['doc'].get('files', {}) + for x in file_dict.keys(): + files.extend(file_dict[x]) + + for f in os.listdir(cache_dir): + if os.path.splitext(f)[1] in ['.png', '.jpg', '.jpeg']: + file_path = os.path.join(cache_dir, f) + if toUnicode(file_path) not in files: + os.remove(file_path) + except: + log.error('Failed removing unused file: %s', traceback.format_exc()) + + def showCacheFile(self, route, **kwargs): + Env.get('app').add_handlers(".*$", [('%s%s' % (Env.get('api_base'), route), StaticFileHandler, {'path': toUnicode(Env.get('cache_dir'))})]) + + def download(self, url = '', dest = None, overwrite = False, urlopen_kwargs = None): + if not urlopen_kwargs: urlopen_kwargs = {} + + # Return response object to stream download + urlopen_kwargs['stream'] = True + + if not dest: # to Cache + dest = os.path.join(Env.get('cache_dir'), ss('%s.%s' % (md5(url), getExt(url)))) + + dest = sp(dest) + + if not overwrite and os.path.isfile(dest): + return dest + + try: + filedata = self.urlopen(url, **urlopen_kwargs) + except: + log.error('Failed downloading file %s: %s', (url, traceback.format_exc())) + return False + + self.createFile(dest, filedata, binary = True) + return dest + + def doSubfolderTest(self): + + tests = { + ('/test/subfolder', '/test/sub'): False, + ('/test/sub/folder', '/test/sub'): True, + ('/test/sub/folder', '/test/sub2'): False, + ('/sub/fold', '/test/sub/fold'): False, + ('/sub/fold', '/test/sub/folder'): False, + ('/opt/couchpotato', '/var/opt/couchpotato'): False, + ('/var/opt', '/var/opt/couchpotato'): False, + ('/CapItaLs/Are/OK', '/CapItaLs/Are/OK'): True, + ('/CapItaLs/Are/OK', '/CapItaLs/Are/OK2'): False, + ('/capitals/are/not/OK', '/capitals/are/NOT'): False, + ('\\\\Mounted\\Volume\\Test', '\\\\Mounted\\Volume'): True, + ('C:\\\\test\\path', 'C:\\\\test2'): False + } + + failed = 0 + for x in tests: + if isSubFolder(x[0], x[1]) is not tests[x]: + log.error('Failed subfolder test %s %s', x) + failed += 1 + + if failed > 0: + log.error('Subfolder test failed %s tests', failed) + else: + log.info('Subfolder test succeeded') + + return failed == 0 diff --git a/couchpotato/core/plugins/file/__init__.py b/couchpotato/core/plugins/file/__init__.py deleted file mode 100644 index 54d9cbe542..0000000000 --- a/couchpotato/core/plugins/file/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -from .main import FileManager - -def start(): - return FileManager() - -config = [] diff --git a/couchpotato/core/plugins/file/main.py b/couchpotato/core/plugins/file/main.py deleted file mode 100644 index 0dc0178340..0000000000 --- a/couchpotato/core/plugins/file/main.py +++ /dev/null @@ -1,161 +0,0 @@ -from couchpotato import get_session -from couchpotato.api import addApiView -from couchpotato.core.event import addEvent -from couchpotato.core.helpers.encoding import toUnicode -from couchpotato.core.helpers.request import jsonified -from couchpotato.core.helpers.variable import md5, getExt -from couchpotato.core.logger import CPLog -from couchpotato.core.plugins.base import Plugin -from couchpotato.core.plugins.scanner.main import Scanner -from couchpotato.core.settings.model import FileType, File -from couchpotato.environment import Env -import os.path -import time -import traceback - -log = CPLog(__name__) - - -class FileManager(Plugin): - - def __init__(self): - addEvent('file.add', self.add) - addEvent('file.download', self.download) - addEvent('file.types', self.getTypes) - - addApiView('file.cache/', self.showCacheFile, static = True, docs = { - 'desc': 'Return a file from the cp_data/cache directory', - 'params': { - 'filename': {'desc': 'path/filename of the wanted file'} - }, - 'return': {'type': 'file'} - }) - - addApiView('file.types', self.getTypesView, docs = { - 'desc': 'Return a list of all the file types and their ids.', - 'return': {'type': 'object', 'example': """{ - 'types': [ - { - "identifier": "poster_original", - "type": "image", - "id": 1, - "name": "Poster_original" - }, - { - "identifier": "poster", - "type": "image", - "id": 2, - "name": "Poster" - }, - etc - ] -}"""} - }) - - addEvent('app.load', self.cleanup) - addEvent('app.load', self.init) - - def init(self): - - for type_tuple in Scanner.file_types.values(): - self.getType(type_tuple) - - def cleanup(self): - - # Wait a bit after starting before cleanup - time.sleep(3) - log.debug('Cleaning up unused files') - - python_cache = Env.get('cache')._path - try: - db = get_session() - for root, dirs, walk_files in os.walk(Env.get('cache_dir')): - for filename in walk_files: - if root == python_cache or 'minified' in filename: continue - file_path = os.path.join(root, filename) - f = db.query(File).filter(File.path == toUnicode(file_path)).first() - if not f: - os.remove(file_path) - except: - log.error('Failed removing unused file: %s', traceback.format_exc()) - - def showCacheFile(self, filename = ''): - - cache_dir = Env.get('cache_dir') - filename = os.path.basename(filename) - - from flask.helpers import send_from_directory - return send_from_directory(cache_dir, filename) - - def download(self, url = '', dest = None, overwrite = False, urlopen_kwargs = {}): - - if not dest: # to Cache - dest = os.path.join(Env.get('cache_dir'), '%s.%s' % (md5(url), getExt(url))) - - if not overwrite and os.path.isfile(dest): - return dest - - try: - filedata = self.urlopen(url, **urlopen_kwargs) - except: - log.error('Failed downloading file %s: %s', (url, traceback.format_exc())) - return False - - self.createFile(dest, filedata, binary = True) - return dest - - def add(self, path = '', part = 1, type_tuple = (), available = 1, properties = {}): - type_id = self.getType(type_tuple).get('id') - db = get_session() - - f = db.query(File).filter(File.path == toUnicode(path)).first() - if not f: - f = File() - db.add(f) - - f.path = toUnicode(path) - f.part = part - f.available = available - f.type_id = type_id - - db.commit() - - file_dict = f.to_dict() - - return file_dict - - def getType(self, type_tuple): - - db = get_session() - type_type, type_identifier = type_tuple - - ft = db.query(FileType).filter_by(identifier = type_identifier).first() - if not ft: - ft = FileType( - type = toUnicode(type_type), - identifier = type_identifier, - name = toUnicode(type_identifier[0].capitalize() + type_identifier[1:]) - ) - db.add(ft) - db.commit() - - type_dict = ft.to_dict() - return type_dict - - def getTypes(self): - - db = get_session() - - results = db.query(FileType).all() - - types = [] - for type_object in results: - types.append(type_object.to_dict()) - - return types - - def getTypesView(self): - - return jsonified({ - 'types': self.getTypes() - }) diff --git a/couchpotato/core/plugins/file/static/file.js b/couchpotato/core/plugins/file/static/file.js deleted file mode 100644 index 7b893e88e6..0000000000 --- a/couchpotato/core/plugins/file/static/file.js +++ /dev/null @@ -1,83 +0,0 @@ -var File = new Class({ - - initialize: function(file){ - var self = this; - - if(!file){ - self.empty = true; - self.el = new Element('div'); - return - } - - self.data = file; - self.type = File.Type.get(file.type_id); - - self['create'+(self.type.type).capitalize()]() - - }, - - createImage: function(){ - var self = this; - - var file_name = self.data.path.replace(/^.*[\\\/]/, ''); - - self.el = new Element('div', { - 'class': 'type_image ' + self.type.identifier - }).adopt( - new Element('img', { - 'src': Api.createUrl('file.cache') + file_name - }) - ) - }, - - toElement: function(){ - return this.el; - } - -}); - -var FileSelect = new Class({ - - multiple: function(type, files, single){ - - var results = files.filter(function(file){ - return file.type_id == File.Type.get(type).id; - }); - - if(single) - return new File(results.pop()); - - return results; - - }, - - single: function(type, files){ - return this.multiple(type, files, true); - } - -}); -window.File.Select = new FileSelect(); - -var FileTypeBase = new Class({ - - setup: function(types){ - var self = this; - - self.typesById = {}; - self.typesByKey = {}; - Object.each(types, function(type){ - self.typesByKey[type.identifier] = type; - self.typesById[type.id] = type; - }); - - }, - - get: function(identifier){ - if(typeOf(identifier) == 'number') - return this.typesById[identifier] - else - return this.typesByKey[identifier] - } - -}); -window.File.Type = new FileTypeBase(); diff --git a/couchpotato/core/plugins/library/__init__.py b/couchpotato/core/plugins/library/__init__.py deleted file mode 100644 index f597032999..0000000000 --- a/couchpotato/core/plugins/library/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -from .main import LibraryPlugin - -def start(): - return LibraryPlugin() - -config = [] diff --git a/couchpotato/core/plugins/library/main.py b/couchpotato/core/plugins/library/main.py deleted file mode 100644 index aa1611dddc..0000000000 --- a/couchpotato/core/plugins/library/main.py +++ /dev/null @@ -1,166 +0,0 @@ -from couchpotato import get_session -from couchpotato.core.event import addEvent, fireEventAsync, fireEvent -from couchpotato.core.helpers.encoding import toUnicode, simplifyString -from couchpotato.core.helpers.variable import mergeDicts -from couchpotato.core.logger import CPLog -from couchpotato.core.plugins.base import Plugin -from couchpotato.core.settings.model import Library, LibraryTitle, File -from string import ascii_letters -import time -import traceback - -log = CPLog(__name__) - -class LibraryPlugin(Plugin): - - default_dict = {'titles': {}, 'files':{}} - - def __init__(self): - addEvent('library.add', self.add) - addEvent('library.update', self.update) - addEvent('library.update_release_date', self.updateReleaseDate) - - - def add(self, attrs = {}, update_after = True): - - db = get_session() - - l = db.query(Library).filter_by(identifier = attrs.get('identifier')).first() - if not l: - status = fireEvent('status.get', 'needs_update', single = True) - l = Library( - year = attrs.get('year'), - identifier = attrs.get('identifier'), - plot = toUnicode(attrs.get('plot')), - tagline = toUnicode(attrs.get('tagline')), - status_id = status.get('id') - ) - - title = LibraryTitle( - title = toUnicode(attrs.get('title')), - simple_title = self.simplifyTitle(attrs.get('title')) - ) - - l.titles.append(title) - - db.add(l) - db.commit() - - # Update library info - if update_after is not False: - handle = fireEventAsync if update_after is 'async' else fireEvent - handle('library.update', identifier = l.identifier, default_title = toUnicode(attrs.get('title', ''))) - - library_dict = l.to_dict(self.default_dict) - - return library_dict - - def update(self, identifier, default_title = '', force = False): - - db = get_session() - library = db.query(Library).filter_by(identifier = identifier).first() - done_status = fireEvent('status.get', 'done', single = True) - - if library: - library_dict = library.to_dict(self.default_dict) - - do_update = True - - if library.status_id == done_status.get('id') and not force: - do_update = False - else: - info = fireEvent('movie.info', merge = True, identifier = identifier) - - # Don't need those here - try: del info['in_wanted'] - except: pass - try: del info['in_library'] - except: pass - - if not info or len(info) == 0: - log.error('Could not update, no movie info to work with: %s', identifier) - return False - - # Main info - if do_update: - library.plot = toUnicode(info.get('plot', '')) - library.tagline = toUnicode(info.get('tagline', '')) - library.year = info.get('year', 0) - library.status_id = done_status.get('id') - library.info = info - db.commit() - - # Titles - [db.delete(title) for title in library.titles] - db.commit() - - titles = info.get('titles', []) - log.debug('Adding titles: %s', titles) - for title in titles: - if not title: - continue - title = toUnicode(title) - t = LibraryTitle( - title = title, - simple_title = self.simplifyTitle(title), - default = title.lower() == toUnicode(default_title.lower()) or (toUnicode(default_title) == u'' and toUnicode(titles[0]) == title) - ) - library.titles.append(t) - - db.commit() - - # Files - images = info.get('images', []) - for image_type in ['poster']: - for image in images.get(image_type, []): - if not isinstance(image, (str, unicode)): - continue - - file_path = fireEvent('file.download', url = image, single = True) - if file_path: - file_obj = fireEvent('file.add', path = file_path, type_tuple = ('image', image_type), single = True) - try: - file_obj = db.query(File).filter_by(id = file_obj.get('id')).one() - library.files.append(file_obj) - db.commit() - - break - except: - log.debug('Failed to attach to library: %s', traceback.format_exc()) - - library_dict = library.to_dict(self.default_dict) - - return library_dict - - def updateReleaseDate(self, identifier): - - db = get_session() - library = db.query(Library).filter_by(identifier = identifier).first() - - if not library.info: - library_dict = self.update(identifier, force = True) - dates = library_dict.get('info', {}).get('release_date') - else: - dates = library.info.get('release_date') - - if dates and dates.get('expires', 0) < time.time() or not dates: - dates = fireEvent('movie.release_date', identifier = identifier, merge = True) - library.info = mergeDicts(library.info, {'release_date': dates }) - db.commit() - - return dates - - - def simplifyTitle(self, title): - - title = toUnicode(title) - - nr_prefix = '' if title[0] in ascii_letters else '#' - title = simplifyString(title) - - for prefix in ['the ']: - if prefix == title[:len(prefix)]: - title = title[len(prefix):] - break - - return nr_prefix + title diff --git a/couchpotato/core/plugins/log/__init__.py b/couchpotato/core/plugins/log/__init__.py index 33dcf338a3..3760b5675f 100644 --- a/couchpotato/core/plugins/log/__init__.py +++ b/couchpotato/core/plugins/log/__init__.py @@ -1,6 +1,5 @@ from .main import Logging -def start(): - return Logging() -config = [] +def autoload(): + return Logging() diff --git a/couchpotato/core/plugins/log/main.py b/couchpotato/core/plugins/log/main.py index e6ff1133c0..4bf7cf3e55 100644 --- a/couchpotato/core/plugins/log/main.py +++ b/couchpotato/core/plugins/log/main.py @@ -1,12 +1,14 @@ +import os +import re +import traceback + from couchpotato.api import addApiView from couchpotato.core.helpers.encoding import toUnicode -from couchpotato.core.helpers.request import jsonified, getParam, getParams -from couchpotato.core.helpers.variable import tryInt +from couchpotato.core.helpers.variable import tryInt, splitString from couchpotato.core.logger import CPLog from couchpotato.core.plugins.base import Plugin from couchpotato.environment import Env -import os -import traceback + log = CPLog(__name__) @@ -21,7 +23,11 @@ def __init__(self): }, 'return': {'type': 'object', 'example': """{ 'success': True, - 'log': string, //Log file + 'log': [{ + 'time': '03-12 09:12:59', + 'type': 'INFO', + 'message': 'Log message' + }, ..], //Log file 'total': int, //Total log files available }"""} }) @@ -33,7 +39,11 @@ def __init__(self): }, 'return': {'type': 'object', 'example': """{ 'success': True, - 'log': string, //Log file + 'log': [{ + 'time': '03-12 09:12:59', + 'type': 'INFO', + 'message': 'Log message' + }, ..] }"""} }) addApiView('logging.clear', self.clear, docs = { @@ -43,13 +53,13 @@ def __init__(self): 'desc': 'Log errors', 'params': { 'type': {'desc': 'Type of logging, default "error"'}, - '**kwargs': {'type':'object', 'desc': 'All other params will be printed in the log string.'}, + '**kwargs': {'type': 'object', 'desc': 'All other params will be printed in the log string.'}, } }) - def get(self): + def get(self, nr = 0, **kwargs): - nr = int(getParam('nr', 0)) + nr = tryInt(nr) current_path = None total = 1 @@ -66,21 +76,22 @@ def get(self): if x is nr: current_path = path - log = '' + log_content = '' if current_path: f = open(current_path, 'r') - log = f.read() + log_content = f.read() + logs = self.toList(log_content) - return jsonified({ + return { 'success': True, - 'log': toUnicode(log), + 'log': logs, 'total': total, - }) + } - def partial(self): + def partial(self, type = 'all', lines = 30, offset = 0, **kwargs): - log_type = getParam('type', 'all') - total_lines = tryInt(getParam('lines', 30)) + total_lines = tryInt(lines) + offset = tryInt(offset) log_lines = [] @@ -92,37 +103,66 @@ def partial(self): if not os.path.isfile(path): break - reversed_lines = [] f = open(path, 'r') - reversed_lines = toUnicode(f.read()).split('[0m\n') - reversed_lines.reverse() + log_content = toUnicode(f.read()) + raw_lines = self.toList(log_content) + raw_lines.reverse() brk = False - for line in reversed_lines: + for line in raw_lines: - if log_type == 'all' or '%s ' % log_type.upper() in line: + if type == 'all' or line.get('type') == type.upper(): log_lines.append(line) - if len(log_lines) >= total_lines: + if len(log_lines) >= (total_lines + offset): brk = True break if brk: break + log_lines = log_lines[offset:] log_lines.reverse() - return jsonified({ + + return { 'success': True, - 'log': '[0m\n'.join(log_lines), - }) + 'log': log_lines, + } + + def toList(self, log_content = ''): - def clear(self): + logs_raw = re.split(r'\[0m\n', toUnicode(log_content)) + + logs = [] + re_split = r'\x1b' + for log_line in logs_raw: + split = re.split(re_split, log_line) + if split and len(split) == 3: + try: + date, time, log_type = splitString(split[0], ' ') + timestamp = '%s %s' % (date, time) + except: + timestamp = 'UNKNOWN' + log_type = 'UNKNOWN' + + message = ''.join(split[1]) if len(split) > 1 else split[0] + message = re.sub('\[\d+m\[', '[', message) + + logs.append({ + 'time': timestamp, + 'type': log_type, + 'message': message + }) + + return logs + + def clear(self, **kwargs): for x in range(0, 50): path = '%s%s' % (Env.get('log_path'), '.%s' % x if x > 0 else '') if not os.path.isfile(path): - break + continue try: @@ -135,24 +175,21 @@ def clear(self): except: log.error('Couldn\'t delete file "%s": %s', (path, traceback.format_exc())) - return jsonified({ + return { 'success': True - }) - - def log(self): + } - params = getParams() + def log(self, type = 'error', **kwargs): try: - log_message = 'API log: %s' % params + log_message = 'API log: %s' % kwargs try: - getattr(log, params.get('type', 'error'))(log_message) + getattr(log, type)(log_message) except: log.error(log_message) except: - log.error('Couldn\'t log via API: %s', params) + log.error('Couldn\'t log via API: %s', kwargs) - - return jsonified({ + return { 'success': True - }) + } diff --git a/couchpotato/core/plugins/log/static/log.css b/couchpotato/core/plugins/log/static/log.css deleted file mode 100644 index 222b8efa03..0000000000 --- a/couchpotato/core/plugins/log/static/log.css +++ /dev/null @@ -1,66 +0,0 @@ -.page.log .nav { - display: block; - text-align: center; - padding: 20px 0; - margin: 0; - font-size: 20px; - position: fixed; - width: 960px; - bottom: 0; - background: #4E5969; -} - - .page.log .nav li { - display: inline; - padding: 5px 10px; - margin: 0; - cursor: pointer; - } - - .page.log .nav li:hover:not(.active) { - background: rgba(255, 255, 255, 0.1); - } - - .page.log .nav li.active { - font-weight: bold; - cursor: default; - font-size: 30px; - } - -.page.log .loading { - text-align: center; - font-size: 20px; - padding: 50px; -} - -.page.log .container { - padding: 30px 0 60px; - overflow: hidden; -} - -.page.log .container span { - float: left; - width: 86%; - line-height: 150%; - padding: 3px 0; - border-top: 1px solid rgba(255, 255, 255, 0.2); - font-size: 11px; - font-family: Lucida Console, Monaco, Nimbus Mono L; -} - - .page.log .container .error { - color: #FFA4A4; - white-space: pre-wrap; - } - .page.log .container .debug { color: lightgrey; } - - .page.log .container .time { - clear: both; - width: 14%; - color: lightgrey; - padding: 3px 0; - font-size: 10px; - } - - .page.log .container .time:last-child { display: none; } - diff --git a/couchpotato/core/plugins/log/static/log.js b/couchpotato/core/plugins/log/static/log.js index 0e276b5838..997ec87b4a 100644 --- a/couchpotato/core/plugins/log/static/log.js +++ b/couchpotato/core/plugins/log/static/log.js @@ -2,94 +2,302 @@ Page.Log = new Class({ Extends: PageBase, + disable_pointer_onscroll: false, + order: 60, name: 'log', title: 'Show recent logs.', has_tab: false, - initialize: function(options){ - var self = this; - self.parent(options) - + navigation: null, + log_items: [], + report_text: '### Steps to reproduce:\n'+ + '1. ..\n'+ + '2. ..\n'+ + '\n'+ + '### Information:\n'+ + 'Movie(s) I have this with: ...\n'+ + 'Quality of the movie being searched: ...\n'+ + 'Providers I use: ...\n'+ + 'Version of CouchPotato: {version}\n'+ + 'Running on: ...\n'+ + '\n'+ + '### Logs:\n'+ + '```\n{issue}```', - App.getBlock('more').addLink(new Element('a', { - 'href': App.createUrl(self.name), - 'text': self.name.capitalize(), - 'title': self.title - })) - - }, - - indexAction: function(){ + indexAction: function () { var self = this; self.getLogs(0); }, - getLogs: function(nr){ + getLogs: function (nr) { var self = this; - if(self.log) self.log.destroy(); + if (self.log) self.log.destroy(); + self.log = new Element('div.container.loading', { - 'text': 'loading...' - }).inject(self.el); + 'text': 'loading...', + 'events': { + 'mouseup:relay(.time)': function(e){ + requestTimeout(function(){ + self.showSelectionButton(e); + }, 100); + } + } + }).inject(self.content); - Api.request('logging.get', { + if(self.navigation){ + var nav = self.navigation.getElement('.nav'); + nav.getElements('.active').removeClass('active'); + + self.navigation.getElements('li')[nr+1].addClass('active'); + } + + if(self.request && self.request.running) self.request.cancel(); + self.request = Api.request('logging.get', { 'data': { 'nr': nr }, - 'onComplete': function(json){ - self.log.set('html', self.addColors(json.log)); + 'onComplete': function (json) { + self.log.set('text', ''); + self.log_items = self.createLogElements(json.log); + self.log.adopt(self.log_items); self.log.removeClass('loading'); + self.scrollToBottom(); - new Fx.Scroll(window, {'duration': 0}).toBottom(); + if(!self.navigation){ + self.navigation = new Element('div.navigation').adopt( + new Element('h2[text=Logs]'), + new Element('div.hint', { + 'text': 'Select multiple lines & report an issue' + }) + ); - var nav = new Element('ul.nav').inject(self.log, 'top'); - for (var i = 0; i <= json.total; i++) { - new Element('li', { - 'text': i+1, - 'class': nr == i ? 'active': '', + var nav = new Element('ul.nav', { 'events': { - 'click': function(e){ - self.getLogs(e.target.get('text')-1); + 'click:relay(li.select)': function (e, el) { + self.getLogs(parseInt(el.get('text')) - 1); } } - }).inject(nav); - }; + }).inject(self.navigation); - new Element('li', { - 'text': 'clear', - 'events': { - 'click': function(){ - Api.request('logging.clear', { - 'onComplete': function(){ - self.getLogs(0); + // Type selection + new Element('li.filter').grab( + new Element('select', { + 'events': { + 'change': function () { + var type_filter = this.getSelected()[0].get('value'); + self.content.set('data-filter', type_filter); + self.scrollToBottom(); } - }); + } + }).adopt( + new Element('option', {'value': 'ALL', 'text': 'Show all logs'}), + new Element('option', {'value': 'INFO', 'text': 'Show only INFO'}), + new Element('option', {'value': 'DEBUG', 'text': 'Show only DEBUG'}), + new Element('option', {'value': 'ERROR', 'text': 'Show only ERROR'}) + ) + ).inject(nav); - } + // Selections + for (var i = 0; i <= json.total; i++) { + new Element('li', { + 'text': i + 1, + 'class': 'select ' + (nr == i ? 'active' : '') + }).inject(nav); } - }).inject(nav) + + // Clear button + new Element('li.clear', { + 'text': 'clear', + 'events': { + 'click': function () { + Api.request('logging.clear', { + 'onComplete': function () { + self.getLogs(0); + } + }); + + } + } + }).inject(nav); + + // Add to page + self.navigation.inject(self.content, 'top'); + } } }); }, - addColors: function(text){ - var self = this; + createLogElements: function (logs) { + + var elements = []; + + logs.each(function (log) { + elements.include(new Element('div', { + 'class': 'time ' + log.type.toLowerCase() + }).adopt( + new Element('span.time_type', { + 'text': log.time + ' ' + log.type + }), + new Element('span.message', { + 'text': log.message + }) + )); + }); + + return elements; + }, + + scrollToBottom: function () { + new Fx.Scroll(this.content, {'duration': 0}).toBottom(); + }, + + showSelectionButton: function(e){ + var self = this, + selection = self.getSelected(), + start_node = selection.anchorNode, + parent_start = start_node.parentNode.getParent('.time'), + end_node = selection.focusNode.parentNode.getParent('.time'), + text = ''; + + var remove_button = function(){ + self.log.getElements('.highlight').removeClass('highlight'); + if(self.do_report) + self.do_report.destroy(); + document.body.removeEvent('click', remove_button); + }; + remove_button(); + + if(parent_start) + start_node = parent_start; + + var index = { + 'start': self.log_items.indexOf(start_node), + 'end': self.log_items.indexOf(end_node) + }; + + if(index.start > index.end){ + index = { + 'start': index.end, + 'end': index.start + }; + } + + var nodes = self.log_items.slice(index.start, index.end + 1); + + nodes.each(function(node, nr){ + node.addClass('highlight'); + node.getElements('span').each(function(span){ + text += self.spaceFill(span.get('text') + ' ', 6); + }); + text += '\n'; + }); + + self.do_report = new Element('a.do_report.button', { + 'text': 'Report issue', + 'styles': { + 'top': e.page.y, + 'left': e.page.x + }, + 'events': { + 'click': function(e){ + (e).stop(); + + self.showReport(text); + } + } + }).inject(document.body); + + requestTimeout(function(){ + document.body.addEvent('click', remove_button); + }, 0); + + }, + + showReport: function(text){ + var self = this, + version = Updater.getInfo(), + body = self.report_text + .replace('{issue}', text) + .replace('{version}', version ? version.version.repr : '...'), + textarea; + + var overlay = new Element('div.mask.report_popup', { + 'method': 'post', + 'events': { + 'click': function(e){ + overlay.destroy(); + } + } + }).grab( + new Element('div.bug', { + 'events': { + 'click': function(e){ + (e).stopPropagation(); + } + } + }).adopt( + new Element('h1', { + 'text': 'Report a bug' + }), + new Element('span').adopt( + new Element('span', { + 'text': 'Read ' + }), + new Element('a.button', { + 'target': '_blank', + 'text': 'the contributing guide', + 'href': 'https://github.com/CouchPotato/CouchPotatoServer/wiki/Developer-branch' + }), + new Element('span', { + 'html': ' before posting, then copy the text below and FILL IN the dots.' + }) + ), + textarea = new Element('textarea', { + 'text': body + }), + new Element('a.button', { + 'target': '_blank', + 'text': 'Create a new issue on GitHub with the text above', + 'href': 'https://github.com/CouchPotato/CouchPotatoServer/issues/new', + 'events': { + 'click': function(e){ + (e).stop(); + + var body = textarea.get('value'), + bdy = '?body=' + (body.length < 2000 ? encodeURIComponent(body) : 'Paste the text here'), + win = window.open(e.target.get('href') + bdy, '_blank'); + win.focus(); + } + } + }) + ) + ); + + overlay.inject(document.body); + }, + + getSelected: function(){ + if (window.getSelection) + return window.getSelection(); + else if (document.getSelection) + return document.getSelection(); + else { + var selection = document.selection && document.selection.createRange(); + if (selection.text) + return selection.text; + } + return false; + + }, - text = text - .replace(/&/g, '&') - .replace(//g, '>') - .replace(/"/g, '"') - .replace(/\u001b\[31m/gi, '') - .replace(/\u001b\[36m/gi, '') - .replace(/\u001b\[33m/gi, '') - .replace(/\u001b\[0m\n/gi, '') - .replace(/\u001b\[0m/gi, '') - - return '' + text + ''; + spaceFill: function( number, width ){ + if ( number.toString().length >= width ) + return number; + return ( new Array( width ).join( ' ' ) + number.toString() ).substr( -width ); } -}) \ No newline at end of file +}); diff --git a/couchpotato/core/plugins/log/static/log.scss b/couchpotato/core/plugins/log/static/log.scss new file mode 100644 index 0000000000..2f15274ea6 --- /dev/null +++ b/couchpotato/core/plugins/log/static/log.scss @@ -0,0 +1,190 @@ +@import "_mixins"; + +.page.log { + + .nav { + text-align: right; + padding: 0; + margin: 0; + + li { + display: inline-block; + padding: 5px 10px; + margin: 0; + + @include media-tablet { + &.filter, &:nth-child(7),&:nth-child(8),&:nth-child(9),&:nth-child(10),&:nth-child(11),&:nth-child(12) { + display: none; + } + + &:last-child { + display: inline-block; + } + } + + &.select, &.clear { + cursor: pointer; + } + + &:hover:not(.active):not(.filter) { + background: rgba(255,255,255,.1); + } + + &.active { + font-weight: bold; + cursor: default; + background: rgba(255,255,255,.1); + } + } + } + + .hint { + font-style: italic; + opacity: .5; + margin-top: 3px; + + @include media-tablet { + display: none; + } + } + + .container { + padding: $padding; + overflow: hidden; + line-height: 150%; + transform: rotateZ(360deg); + + @include media-phablet { + padding: $padding $padding/2; + } + + &.loading { + text-align: center; + font-size: 20px; + padding: 100px 50px; + } + + select { + vertical-align: top; + } + + .time { + font-size: .75em; + border-top: 1px solid transparent; + @include theme(border-color, off); + padding: 0 3px; + font-family: Lucida Console, Monaco, Nimbus Mono L, monospace, serif; + display: block; + cursor: pointer; + position: relative; + + &:hover { + @include theme(background, off); + } + + &.highlight { + @include theme(background, off); + } + + span { + display: inline-block; + padding: 5px 0 3px; + } + + .time_type { + position: absolute; + width: 130px; + top: 0; + left: 0; + + @include media-tablet { + position: static; + width: auto; + } + } + + .message { + //white-space: pre-wrap; + display: block; + margin: 0 0 0 130px; + + @include media-tablet { + display: inline; + margin: 0 0 0 $padding/3; + padding: 0; + } + } + + } + + .error { color: #FFA4A4; } + .debug span { opacity: .6; } + } + + + + [data-filter=INFO] .error, + [data-filter=INFO] .debug, + [data-filter=ERROR] .debug, + [data-filter=ERROR] .info, + [data-filter=DEBUG] .info, + [data-filter=DEBUG] .error { + display: none; + } +} + +.report_popup.report_popup { + position: fixed; + left: 0; + right: 0; + bottom: 0; + top: 0; + z-index: 99999; + font-size: 14px; + display: flex; + justify-content: center; + align-items: center; + opacity: 1; + color: #FFF; + pointer-events: auto; + + .button { + margin: 10px 0; + padding: 10px; + color: #FFF; + @include theme(background, primary); + } + + .bug { + width: 80%; + height: 80%; + max-height: 800px; + max-width: 800px; + + display: flex; + flex-flow: column nowrap; + + > span { + margin: $padding/2 0 $padding 0; + } + + textarea { + display: block; + width: 100%; + background: #FFF; + padding: 20px; + overflow: auto; + color: #666; + height: 70%; + font-size: 12px; + } + } +} + +.do_report.do_report { + z-index: 10000; + position: absolute; + padding: 10px; + @include theme(background, primary); + color: #FFF !important; +} diff --git a/couchpotato/core/plugins/manage.py b/couchpotato/core/plugins/manage.py new file mode 100755 index 0000000000..c639354c6e --- /dev/null +++ b/couchpotato/core/plugins/manage.py @@ -0,0 +1,322 @@ +import os +import time +import traceback + +from couchpotato import get_db +from couchpotato.api import addApiView +from couchpotato.core.event import fireEvent, addEvent, fireEventAsync +from couchpotato.core.helpers.encoding import sp +from couchpotato.core.helpers.variable import splitString, getTitle, tryInt, getIdentifier, getFreeSpace +from couchpotato.core.logger import CPLog +from couchpotato.core.plugins.base import Plugin +from couchpotato.environment import Env + + +log = CPLog(__name__) + +autoload = 'Manage' + + +class Manage(Plugin): + + in_progress = False + + def __init__(self): + + fireEvent('scheduler.interval', identifier = 'manage.update_library', handle = self.updateLibrary, hours = 2) + + addEvent('manage.update', self.updateLibrary) + addEvent('manage.diskspace', self.getDiskSpace) + + # Add files after renaming + def after_rename(message = None, group = None): + if not group: group = {} + return self.scanFilesToLibrary(folder = group['destination_dir'], files = group['renamed_files'], release_download = group['release_download']) + addEvent('renamer.after', after_rename, priority = 110) + + addApiView('manage.update', self.updateLibraryView, docs = { + 'desc': 'Update the library by scanning for new movies', + 'params': { + 'full': {'desc': 'Do a full update or just recently changed/added movies.'}, + } + }) + + addApiView('manage.progress', self.getProgress, docs = { + 'desc': 'Get the progress of current manage update', + 'return': {'type': 'object', 'example': """{ + 'progress': False || object, total & to_go, +}"""}, + }) + + if not Env.get('dev') and self.conf('startup_scan'): + addEvent('app.load', self.updateLibraryQuick) + + addEvent('app.load', self.setCrons) + + # Enable / disable interval + addEvent('setting.save.manage.library_refresh_interval.after', self.setCrons) + + def setCrons(self): + + fireEvent('schedule.remove', 'manage.update_library') + refresh = tryInt(self.conf('library_refresh_interval')) + if refresh > 0: + fireEvent('schedule.interval', 'manage.update_library', self.updateLibrary, hours = refresh, single = True) + + return True + + def getProgress(self, **kwargs): + return { + 'progress': self.in_progress + } + + def updateLibraryView(self, full = 1, **kwargs): + + fireEventAsync('manage.update', full = True if full == '1' else False) + + return { + 'progress': self.in_progress, + 'success': True + } + + def updateLibraryQuick(self): + return self.updateLibrary(full = False) + + def updateLibrary(self, full = True): + last_update_key = 'manage.last_update%s' % ('_full' if full else '') + last_update = float(Env.prop(last_update_key, default = 0)) + + if self.in_progress: + log.info('Already updating library: %s', self.in_progress) + return + elif self.isDisabled() or (last_update > time.time() - 20): + return + + self.in_progress = {} + fireEvent('notify.frontend', type = 'manage.updating', data = True) + + try: + + directories = self.directories() + directories.sort() + added_identifiers = [] + + # Add some progress + for directory in directories: + self.in_progress[os.path.normpath(directory)] = { + 'started': False, + 'eta': -1, + 'total': None, + 'to_go': None, + } + + for directory in directories: + folder = os.path.normpath(directory) + self.in_progress[os.path.normpath(directory)]['started'] = tryInt(time.time()) + + if not os.path.isdir(folder): + if len(directory) > 0: + log.error('Directory doesn\'t exist: %s', folder) + continue + + log.info('Updating manage library: %s', folder) + fireEvent('notify.frontend', type = 'manage.update', data = True, message = 'Scanning for movies in "%s"' % folder) + + onFound = self.createAddToLibrary(folder, added_identifiers) + fireEvent('scanner.scan', folder = folder, simple = True, newer_than = last_update if not full else 0, check_file_date = False, on_found = onFound, single = True) + + # Break if CP wants to shut down + if self.shuttingDown(): + break + + # If cleanup option is enabled, remove offline files from database + if self.conf('cleanup') and full and not self.shuttingDown(): + + # Get movies with done status + total_movies, done_movies = fireEvent('media.list', types = 'movie', status = 'done', release_status = 'done', status_or = True, single = True) + + deleted_releases = [] + for done_movie in done_movies: + if getIdentifier(done_movie) not in added_identifiers: + fireEvent('media.delete', media_id = done_movie['_id'], delete_from = 'all') + else: + + releases = done_movie.get('releases', []) + + for release in releases: + if release.get('files'): + brk = False + for file_type in release.get('files', {}): + for release_file in release['files'][file_type]: + # Remove release not available anymore + if not os.path.isfile(sp(release_file)): + fireEvent('release.clean', release['_id']) + brk = True + break + if brk: + break + + # Check if there are duplicate releases (different quality) use the last one, delete the rest + if len(releases) > 1: + used_files = {} + for release in releases: + for file_type in release.get('files', {}): + for release_file in release['files'][file_type]: + already_used = used_files.get(release_file) + + if already_used: + release_id = release['_id'] if already_used.get('last_edit', 0) > release.get('last_edit', 0) else already_used['_id'] + if release_id not in deleted_releases: + fireEvent('release.delete', release_id, single = True) + deleted_releases.append(release_id) + break + else: + used_files[release_file] = release + del used_files + + # Break if CP wants to shut down + if self.shuttingDown(): + break + + if not self.shuttingDown(): + db = get_db() + db.reindex() + + Env.prop(last_update_key, time.time()) + except: + log.error('Failed updating library: %s', (traceback.format_exc())) + + while self.in_progress and len(self.in_progress) > 0 and not self.shuttingDown(): + + delete_me = {} + + # noinspection PyTypeChecker + for folder in self.in_progress: + if self.in_progress[folder]['to_go'] <= 0: + delete_me[folder] = True + + for delete in delete_me: + del self.in_progress[delete] + + time.sleep(1) + + fireEvent('notify.frontend', type = 'manage.updating', data = False) + self.in_progress = False + + # noinspection PyDefaultArgument + def createAddToLibrary(self, folder, added_identifiers = []): + + def addToLibrary(group, total_found, to_go): + if self.in_progress[folder]['total'] is None: + self.in_progress[folder].update({ + 'total': total_found, + 'to_go': total_found, + }) + + self.updateProgress(folder, to_go) + + if group['media'] and group['identifier']: + added_identifiers.append(group['identifier']) + + # Add it to release and update the info + fireEvent('release.add', group = group, update_info = False) + fireEvent('movie.update', identifier = group['identifier'], on_complete = self.createAfterUpdate(folder, group['identifier'])) + + return addToLibrary + + def createAfterUpdate(self, folder, identifier): + + # Notify frontend + def afterUpdate(): + if not self.in_progress or self.shuttingDown(): + return + + total = self.in_progress[folder]['total'] + movie_dict = fireEvent('media.get', identifier, single = True) + + if movie_dict: + fireEvent('notify.frontend', type = 'movie.added', data = movie_dict, message = None if total > 5 else 'Added "%s" to manage.' % getTitle(movie_dict)) + + return afterUpdate + + def updateProgress(self, folder, to_go): + + pr = self.in_progress[folder] + if to_go < pr['to_go']: + pr['to_go'] = to_go + + avg = (time.time() - pr['started']) / (pr['total'] - pr['to_go']) + pr['eta'] = tryInt(avg * pr['to_go']) + + + def directories(self): + try: + return self.conf('library', default = []) + except: + pass + + return [] + + def scanFilesToLibrary(self, folder = None, files = None, release_download = None): + + folder = os.path.normpath(folder) + + groups = fireEvent('scanner.scan', folder = folder, files = files, single = True) + + if groups: + for group in groups.values(): + if group.get('media'): + if release_download and release_download.get('release_id'): + fireEvent('release.add', group = group, update_id = release_download.get('release_id')) + else: + fireEvent('release.add', group = group) + + def getDiskSpace(self): + return getFreeSpace(self.directories()) + + +config = [{ + 'name': 'manage', + 'groups': [ + { + 'tab': 'manage', + 'label': 'Movie Library Manager', + 'description': 'Add your existing movie folders.', + 'options': [ + { + 'name': 'enabled', + 'default': False, + 'type': 'enabler', + }, + { + 'name': 'library', + 'type': 'directories', + 'description': 'Folder where the movies should be moved to.', + }, + { + 'label': 'Cleanup After', + 'name': 'cleanup', + 'type': 'bool', + 'description': 'Remove movie from db if it can\'t be found after re-scan.', + 'default': True, + }, + { + 'label': 'Scan at startup', + 'name': 'startup_scan', + 'type': 'bool', + 'default': True, + 'advanced': True, + 'description': 'Do a quick scan on startup. On slow systems better disable this.', + }, + { + 'label': 'Full library refresh', + 'name': 'library_refresh_interval', + 'type': 'int', + 'default': 0, + 'advanced': True, + 'description': 'Do a full scan every X hours. (0 is disabled)', + }, + ], + }, + ], +}] diff --git a/couchpotato/core/plugins/manage/__init__.py b/couchpotato/core/plugins/manage/__init__.py deleted file mode 100644 index 30f6ea686d..0000000000 --- a/couchpotato/core/plugins/manage/__init__.py +++ /dev/null @@ -1,34 +0,0 @@ -from .main import Manage - -def start(): - return Manage() - -config = [{ - 'name': 'manage', - 'groups': [ - { - 'tab': 'manage', - 'label': 'movie library manager', - 'description': 'Add your existing movie folders.', - 'options': [ - { - 'name': 'enabled', - 'default': False, - 'type': 'enabler', - }, - { - 'name': 'library', - 'type': 'directories', - 'description': 'Folder where the movies should be moved to.', - }, - { - 'label': 'Cleanup After', - 'name': 'cleanup', - 'type': 'bool', - 'description': 'Remove movie from db if it can\'t be found after re-scan.', - 'default': True, - }, - ], - }, - ], -}] diff --git a/couchpotato/core/plugins/manage/main.py b/couchpotato/core/plugins/manage/main.py deleted file mode 100644 index 51094899d4..0000000000 --- a/couchpotato/core/plugins/manage/main.py +++ /dev/null @@ -1,248 +0,0 @@ -from couchpotato.api import addApiView -from couchpotato.core.event import fireEvent, addEvent, fireEventAsync -from couchpotato.core.helpers.encoding import ss -from couchpotato.core.helpers.request import jsonified, getParam -from couchpotato.core.helpers.variable import splitString, getTitle -from couchpotato.core.logger import CPLog -from couchpotato.core.plugins.base import Plugin -from couchpotato.environment import Env -import ctypes -import os -import sys -import time -import traceback - - -log = CPLog(__name__) - -class Manage(Plugin): - - in_progress = False - - def __init__(self): - - fireEvent('scheduler.interval', identifier = 'manage.update_library', handle = self.updateLibrary, hours = 2) - - addEvent('manage.update', self.updateLibrary) - addEvent('manage.diskspace', self.getDiskSpace) - - # Add files after renaming - def after_rename(message = None, group = {}): - return self.scanFilesToLibrary(folder = group['destination_dir'], files = group['renamed_files']) - addEvent('renamer.after', after_rename, priority = 110) - - addApiView('manage.update', self.updateLibraryView, docs = { - 'desc': 'Update the library by scanning for new movies', - 'params': { - 'full': {'desc': 'Do a full update or just recently changed/added movies.'}, - } - }) - - addApiView('manage.progress', self.getProgress, docs = { - 'desc': 'Get the progress of current manage update', - 'return': {'type': 'object', 'example': """{ - 'progress': False || object, total & to_go, -}"""}, - }) - - if not Env.get('dev'): - def updateLibrary(): - self.updateLibrary(full = False) - addEvent('app.load', updateLibrary) - - def getProgress(self): - return jsonified({ - 'progress': self.in_progress - }) - - def updateLibraryView(self): - - full = getParam('full', default = 1) - fireEventAsync('manage.update', full = True if full == '1' else False) - - return jsonified({ - 'success': True - }) - - - def updateLibrary(self, full = True): - last_update = float(Env.prop('manage.last_update', default = 0)) - - if self.in_progress: - log.info('Already updating library: %s', self.in_progress) - return - elif self.isDisabled() or (last_update > time.time() - 20): - return - - self.in_progress = {} - fireEvent('notify.frontend', type = 'manage.updating', data = True) - - try: - - directories = self.directories() - added_identifiers = [] - - # Add some progress - self.in_progress = {} - for directory in directories: - self.in_progress[os.path.normpath(directory)] = { - 'total': None, - 'to_go': None, - } - - for directory in directories: - folder = os.path.normpath(directory) - - if not os.path.isdir(folder): - if len(directory) > 0: - log.error('Directory doesn\'t exist: %s', folder) - continue - - log.info('Updating manage library: %s', folder) - fireEvent('notify.frontend', type = 'manage.update', data = True, message = 'Scanning for movies in "%s"' % folder) - - onFound = self.createAddToLibrary(folder, added_identifiers) - fireEvent('scanner.scan', folder = folder, simple = True, newer_than = last_update if not full else 0, on_found = onFound, single = True) - - # Break if CP wants to shut down - if self.shuttingDown(): - break - - # If cleanup option is enabled, remove offline files from database - if self.conf('cleanup') and full and not self.shuttingDown(): - - # Get movies with done status - total_movies, done_movies = fireEvent('movie.list', status = 'done', single = True) - - for done_movie in done_movies: - if done_movie['library']['identifier'] not in added_identifiers: - fireEvent('movie.delete', movie_id = done_movie['id'], delete_from = 'all') - else: - - for release in done_movie.get('releases', []): - if len(release.get('files', [])) == 0: - fireEvent('release.delete', release['id']) - else: - for release_file in release.get('files', []): - # Remove release not available anymore - if not os.path.isfile(ss(release_file['path'])): - fireEvent('release.clean', release['id']) - break - - # Check if there are duplicate releases (different quality) use the last one, delete the rest - if len(done_movie.get('releases', [])) > 1: - used_files = {} - for release in done_movie.get('releases', []): - - for release_file in release.get('files', []): - already_used = used_files.get(release_file['path']) - - if already_used: - if already_used < release['id']: - fireEvent('release.delete', release['id'], single = True) # delete this one - else: - fireEvent('release.delete', already_used, single = True) # delete previous one - break - else: - used_files[release_file['path']] = release.get('id') - del used_files - - Env.prop('manage.last_update', time.time()) - except: - log.error('Failed updating library: %s', (traceback.format_exc())) - - while True and not self.shuttingDown(): - - delete_me = {} - - for folder in self.in_progress: - if self.in_progress[folder]['to_go'] <= 0: - delete_me[folder] = True - - for delete in delete_me: - del self.in_progress[delete] - - if len(self.in_progress) == 0: - break - - time.sleep(1) - - fireEvent('notify.frontend', type = 'manage.updating', data = False) - self.in_progress = False - - def createAddToLibrary(self, folder, added_identifiers = []): - def addToLibrary(group, total_found, to_go): - if self.in_progress[folder]['total'] is None: - self.in_progress[folder] = { - 'total': total_found, - 'to_go': total_found, - } - - if group['library'] and group['library'].get('identifier'): - identifier = group['library'].get('identifier') - added_identifiers.append(identifier) - - # Add it to release and update the info - fireEvent('release.add', group = group) - fireEventAsync('library.update', identifier = identifier, on_complete = self.createAfterUpdate(folder, identifier)) - - return addToLibrary - - def createAfterUpdate(self, folder, identifier): - - # Notify frontend - def afterUpdate(): - self.in_progress[folder]['to_go'] = self.in_progress[folder]['to_go'] - 1 - total = self.in_progress[folder]['total'] - movie_dict = fireEvent('movie.get', identifier, single = True) - - fireEvent('notify.frontend', type = 'movie.added', data = movie_dict, message = None if total > 5 else 'Added "%s" to manage.' % getTitle(movie_dict['library'])) - - return afterUpdate - - def directories(self): - try: - if self.conf('library', default = '').strip(): - return splitString(self.conf('library', default = ''), '::') - except: - pass - - return [] - - def scanFilesToLibrary(self, folder = None, files = None): - - folder = os.path.normpath(folder) - - groups = fireEvent('scanner.scan', folder = folder, files = files, single = True) - - for group in groups.itervalues(): - if group['library'] and group['library'].get('identifier'): - fireEvent('release.add', group = group) - - def getDiskSpace(self): - - free_space = {} - for folder in self.directories(): - - size = None - if os.path.isdir(folder): - if os.name == 'nt': - _, total, free = ctypes.c_ulonglong(), ctypes.c_ulonglong(), \ - ctypes.c_ulonglong() - if sys.version_info >= (3,) or isinstance(folder, unicode): - fun = ctypes.windll.kernel32.GetDiskFreeSpaceExW #@UndefinedVariable - else: - fun = ctypes.windll.kernel32.GetDiskFreeSpaceExA #@UndefinedVariable - ret = fun(folder, ctypes.byref(_), ctypes.byref(total), ctypes.byref(free)) - if ret == 0: - raise ctypes.WinError() - used = total.value - free.value - return [total.value, used, free.value] - else: - s = os.statvfs(folder) - size = [s.f_blocks * s.f_frsize / (1024 * 1024), (s.f_bavail * s.f_frsize) / (1024 * 1024)] - - free_space[folder] = size - - return free_space - diff --git a/couchpotato/core/plugins/movie/__init__.py b/couchpotato/core/plugins/movie/__init__.py deleted file mode 100644 index 4df29ad88a..0000000000 --- a/couchpotato/core/plugins/movie/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -from .main import MoviePlugin - -def start(): - return MoviePlugin() - -config = [] diff --git a/couchpotato/core/plugins/movie/main.py b/couchpotato/core/plugins/movie/main.py deleted file mode 100644 index c708ec4e04..0000000000 --- a/couchpotato/core/plugins/movie/main.py +++ /dev/null @@ -1,587 +0,0 @@ -from couchpotato import get_session -from couchpotato.api import addApiView -from couchpotato.core.event import fireEvent, fireEventAsync, addEvent -from couchpotato.core.helpers.encoding import toUnicode, simplifyString -from couchpotato.core.helpers.request import getParams, jsonified, getParam -from couchpotato.core.helpers.variable import getImdb, splitString -from couchpotato.core.logger import CPLog -from couchpotato.core.plugins.base import Plugin -from couchpotato.core.settings.model import Library, LibraryTitle, Movie, \ - Release -from couchpotato.environment import Env -from sqlalchemy.orm import joinedload_all -from sqlalchemy.sql.expression import or_, asc, not_, desc -from string import ascii_lowercase - -log = CPLog(__name__) - - -class MoviePlugin(Plugin): - - default_dict = { - 'profile': {'types': {'quality': {}}}, - 'releases': {'status': {}, 'quality': {}, 'files':{}, 'info': {}}, - 'library': {'titles': {}, 'files':{}}, - 'files': {}, - 'status': {} - } - - def __init__(self): - addApiView('movie.search', self.search, docs = { - 'desc': 'Search the movie providers for a movie', - 'params': { - 'q': {'desc': 'The (partial) movie name you want to search for'}, - }, - 'return': {'type': 'object', 'example': """{ - 'success': True, - 'empty': bool, any movies returned or not, - 'movies': array, movies found, -}"""} - }) - addApiView('movie.list', self.listView, docs = { - 'desc': 'List movies in wanted list', - 'params': { - 'status': {'type': 'array or csv', 'desc': 'Filter movie by status. Example:"active,done"'}, - 'release_status': {'type': 'array or csv', 'desc': 'Filter movie by status of its releases. Example:"snatched,available"'}, - 'limit_offset': {'desc': 'Limit and offset the movie list. Examples: "50" or "50,30"'}, - 'starts_with': {'desc': 'Starts with these characters. Example: "a" returns all movies starting with the letter "a"'}, - 'search': {'desc': 'Search movie title'}, - }, - 'return': {'type': 'object', 'example': """{ - 'success': True, - 'empty': bool, any movies returned or not, - 'movies': array, movies found, -}"""} - }) - addApiView('movie.get', self.getView, docs = { - 'desc': 'Get a movie by id', - 'params': { - 'id': {'desc': 'The id of the movie'}, - } - }) - addApiView('movie.refresh', self.refresh, docs = { - 'desc': 'Refresh a movie by id', - 'params': { - 'id': {'desc': 'Movie ID(s) you want to refresh.', 'type': 'int (comma separated)'}, - } - }) - addApiView('movie.available_chars', self.charView) - addApiView('movie.add', self.addView, docs = { - 'desc': 'Add new movie to the wanted list', - 'params': { - 'identifier': {'desc': 'IMDB id of the movie your want to add.'}, - 'profile_id': {'desc': 'ID of quality profile you want the add the movie in. If empty will use the default profile.'}, - 'title': {'desc': 'Movie title to use for searches. Has to be one of the titles returned by movie.search.'}, - } - }) - addApiView('movie.edit', self.edit, docs = { - 'desc': 'Add new movie to the wanted list', - 'params': { - 'id': {'desc': 'Movie ID(s) you want to edit.', 'type': 'int (comma separated)'}, - 'profile_id': {'desc': 'ID of quality profile you want the edit the movie to.'}, - 'default_title': {'desc': 'Movie title to use for searches. Has to be one of the titles returned by movie.search.'}, - } - }) - addApiView('movie.delete', self.deleteView, docs = { - 'desc': 'Delete a movie from the wanted list', - 'params': { - 'id': {'desc': 'Movie ID(s) you want to delete.', 'type': 'int (comma separated)'}, - 'delete_from': {'desc': 'Delete movie from this page', 'type': 'string: all (default), wanted, manage'}, - } - }) - - addEvent('movie.add', self.add) - addEvent('movie.delete', self.delete) - addEvent('movie.get', self.get) - addEvent('movie.list', self.list) - addEvent('movie.restatus', self.restatus) - - addEvent('app.load', self.cleanReleases) - - def cleanReleases(self): - - prop_name = 'cleaned_releases' - already_cleaned = Env.prop(prop_name, default = False) - if already_cleaned: - return True - - log.info('Removing releases from library movies') - - db = get_session() - - movies = db.query(Movie).all() - - done_status = fireEvent('status.get', 'done', single = True) - available_status = fireEvent('status.get', 'available', single = True) - snatched_status = fireEvent('status.get', 'snatched', single = True) - - for movie in movies: - if movie.status_id == done_status.get('id'): - for rel in movie.releases: - if rel.status_id in [available_status.get('id'), snatched_status.get('id')]: - fireEvent('release.delete', id = rel.id, single = True) - - Env.prop(prop_name, True) - - def getView(self): - - movie_id = getParam('id') - movie = self.get(movie_id) if movie_id else None - - return jsonified({ - 'success': movie is not None, - 'movie': movie, - }) - - def get(self, movie_id): - - db = get_session() - - imdb_id = getImdb(str(movie_id)) - - if(imdb_id): - m = db.query(Movie).filter(Movie.library.has(identifier = imdb_id)).first() - else: - m = db.query(Movie).filter_by(id = movie_id).first() - - results = None - if m: - results = m.to_dict(self.default_dict) - - return results - - def list(self, status = None, release_status = None, limit_offset = None, starts_with = None, search = None, order = None): - - db = get_session() - - # Make a list from string - if status and not isinstance(status, (list, tuple)): - status = [status] - if release_status and not isinstance(release_status, (list, tuple)): - release_status = [release_status] - - q = db.query(Movie) \ - .outerjoin(Movie.releases, Movie.library, Library.titles) \ - .filter(LibraryTitle.default == True) \ - .group_by(Movie.id) - - # Filter on movie status - if status and len(status) > 0: - q = q.filter(or_(*[Movie.status.has(identifier = s) for s in status])) - - # Filter on release status - if release_status and len(release_status) > 0: - q = q.filter(or_(*[Release.status.has(identifier = s) for s in release_status])) - - total_count = q.count() - - filter_or = [] - if starts_with: - starts_with = toUnicode(starts_with.lower()) - if starts_with in ascii_lowercase: - filter_or.append(LibraryTitle.simple_title.startswith(starts_with)) - else: - ignore = [] - for letter in ascii_lowercase: - ignore.append(LibraryTitle.simple_title.startswith(toUnicode(letter))) - filter_or.append(not_(or_(*ignore))) - - if search: - filter_or.append(LibraryTitle.simple_title.like('%%' + search + '%%')) - - if filter_or: - q = q.filter(or_(*filter_or)) - - if order == 'release_order': - q = q.order_by(desc(Release.last_edit)) - else: - q = q.order_by(asc(LibraryTitle.simple_title)) - - q = q.subquery() - q2 = db.query(Movie).join((q, q.c.id == Movie.id)) \ - .options(joinedload_all('releases')) \ - .options(joinedload_all('profile.types')) \ - .options(joinedload_all('library.titles')) \ - .options(joinedload_all('library.files')) \ - .options(joinedload_all('status')) \ - .options(joinedload_all('files')) - - if limit_offset: - splt = splitString(limit_offset) if isinstance(limit_offset, (str, unicode)) else limit_offset - limit = splt[0] - offset = 0 if len(splt) is 1 else splt[1] - q2 = q2.limit(limit).offset(offset) - - results = q2.all() - movies = [] - for movie in results: - temp = movie.to_dict({ - 'profile': {'types': {}}, - 'releases': {'files':{}, 'info': {}}, - 'library': {'titles': {}, 'files':{}}, - 'files': {}, - }) - movies.append(temp) - - #db.close() - return (total_count, movies) - - def availableChars(self, status = None, release_status = None): - - chars = '' - - db = get_session() - - # Make a list from string - if not isinstance(status, (list, tuple)): - status = [status] - if release_status and not isinstance(release_status, (list, tuple)): - release_status = [release_status] - - q = db.query(Movie) \ - .outerjoin(Movie.releases, Movie.library, Library.titles, Movie.status) \ - .options(joinedload_all('library.titles')) - - # Filter on movie status - if status and len(status) > 0: - q = q.filter(or_(*[Movie.status.has(identifier = s) for s in status])) - - # Filter on release status - if release_status and len(release_status) > 0: - q = q.filter(or_(*[Release.status.has(identifier = s) for s in release_status])) - - results = q.all() - - for movie in results: - char = movie.library.titles[0].simple_title[0] - char = char if char in ascii_lowercase else '#' - if char not in chars: - chars += str(char) - - #db.close() - return ''.join(sorted(chars, key = str.lower)) - - def listView(self): - - params = getParams() - status = splitString(params.get('status', None)) - release_status = splitString(params.get('release_status', None)) - limit_offset = params.get('limit_offset', None) - starts_with = params.get('starts_with', None) - search = params.get('search', None) - order = params.get('order', None) - - total_movies, movies = self.list( - status = status, - release_status = release_status, - limit_offset = limit_offset, - starts_with = starts_with, - search = search, - order = order - ) - - return jsonified({ - 'success': True, - 'empty': len(movies) == 0, - 'total': total_movies, - 'movies': movies, - }) - - def charView(self): - - params = getParams() - status = splitString(params.get('status', None)) - release_status = splitString(params.get('release_status', None)) - chars = self.availableChars(status, release_status) - - return jsonified({ - 'success': True, - 'empty': len(chars) == 0, - 'chars': chars, - }) - - def refresh(self): - - db = get_session() - - for id in splitString(getParam('id')): - movie = db.query(Movie).filter_by(id = id).first() - - if movie: - - # Get current selected title - default_title = '' - for title in movie.library.titles: - if title.default: default_title = title.title - - fireEvent('notify.frontend', type = 'movie.busy.%s' % id, data = True, message = 'Updating "%s"' % default_title) - fireEventAsync('library.update', identifier = movie.library.identifier, default_title = default_title, force = True, on_complete = self.createOnComplete(id)) - - - #db.close() - return jsonified({ - 'success': True, - }) - - def search(self): - - q = getParam('q') - cache_key = u'%s/%s' % (__name__, simplifyString(q)) - movies = Env.get('cache').get(cache_key) - - if not movies: - - if getImdb(q): - movies = [fireEvent('movie.info', identifier = q, merge = True)] - else: - movies = fireEvent('movie.search', q = q, merge = True) - Env.get('cache').set(cache_key, movies) - - return jsonified({ - 'success': True, - 'empty': len(movies) == 0 if movies else 0, - 'movies': movies, - }) - - def add(self, params = {}, force_readd = True, search_after = True, update_library = False, status_id = None): - - if not params.get('identifier'): - msg = 'Can\'t add movie without imdb identifier.' - log.error(msg) - fireEvent('notify.frontend', type = 'movie.is_tvshow', message = msg) - return False - else: - try: - is_movie = fireEvent('movie.is_movie', identifier = params.get('identifier'), single = True) - if not is_movie: - msg = 'Can\'t add movie, seems to be a TV show.' - log.error(msg) - fireEvent('notify.frontend', type = 'movie.is_tvshow', message = msg) - return False - except: - pass - - - library = fireEvent('library.add', single = True, attrs = params, update_after = update_library) - - # Status - status_active = fireEvent('status.add', 'active', single = True) - status_snatched = fireEvent('status.add', 'snatched', single = True) - - default_profile = fireEvent('profile.default', single = True) - - db = get_session() - m = db.query(Movie).filter_by(library_id = library.get('id')).first() - added = True - do_search = False - if not m: - m = Movie( - library_id = library.get('id'), - profile_id = params.get('profile_id', default_profile.get('id')), - status_id = status_id if status_id else status_active.get('id'), - ) - db.add(m) - db.commit() - - onComplete = None - if search_after: - onComplete = self.createOnComplete(m.id) - - fireEventAsync('library.update', params.get('identifier'), default_title = params.get('title', ''), on_complete = onComplete) - search_after = False - elif force_readd: - # Clean snatched history - for release in m.releases: - if release.status_id == status_snatched.get('id'): - release.delete() - - m.profile_id = params.get('profile_id', default_profile.get('id')) - else: - log.debug('Movie already exists, not updating: %s', params) - added = False - - if force_readd: - m.status_id = status_id if status_id else status_active.get('id') - do_search = True - - db.commit() - - # Remove releases - available_status = fireEvent('status.get', 'available', single = True) - for rel in m.releases: - if rel.status_id is available_status.get('id'): - db.delete(rel) - db.commit() - - movie_dict = m.to_dict(self.default_dict) - - if do_search and search_after: - onComplete = self.createOnComplete(m.id) - onComplete() - - if added: - fireEvent('notify.frontend', type = 'movie.added', data = movie_dict, message = 'Successfully added "%s" to your wanted list.' % params.get('title', '')) - - #db.close() - return movie_dict - - - def addView(self): - - params = getParams() - - movie_dict = self.add(params) - - return jsonified({ - 'success': True, - 'added': True if movie_dict else False, - 'movie': movie_dict, - }) - - def edit(self): - - params = getParams() - db = get_session() - - available_status = fireEvent('status.get', 'available', single = True) - - ids = splitString(params.get('id')) - for movie_id in ids: - - m = db.query(Movie).filter_by(id = movie_id).first() - if not m: - continue - - m.profile_id = params.get('profile_id') - - # Remove releases - for rel in m.releases: - if rel.status_id is available_status.get('id'): - db.delete(rel) - db.commit() - - # Default title - if params.get('default_title'): - for title in m.library.titles: - title.default = toUnicode(params.get('default_title', '')).lower() == toUnicode(title.title).lower() - - db.commit() - - fireEvent('movie.restatus', m.id) - - movie_dict = m.to_dict(self.default_dict) - fireEventAsync('searcher.single', movie_dict, on_complete = self.createNotifyFront(movie_id)) - - #db.close() - return jsonified({ - 'success': True, - }) - - def deleteView(self): - - params = getParams() - - ids = splitString(params.get('id')) - for movie_id in ids: - self.delete(movie_id, delete_from = params.get('delete_from', 'all')) - - return jsonified({ - 'success': True, - }) - - def delete(self, movie_id, delete_from = None): - - db = get_session() - - movie = db.query(Movie).filter_by(id = movie_id).first() - if movie: - deleted = False - if delete_from == 'all': - db.delete(movie) - db.commit() - deleted = True - else: - done_status = fireEvent('status.get', 'done', single = True) - - total_releases = len(movie.releases) - total_deleted = 0 - new_movie_status = None - for release in movie.releases: - if delete_from in ['wanted', 'snatched']: - if release.status_id != done_status.get('id'): - db.delete(release) - total_deleted += 1 - new_movie_status = 'done' - elif delete_from == 'manage': - if release.status_id == done_status.get('id'): - db.delete(release) - total_deleted += 1 - new_movie_status = 'active' - db.commit() - - if total_releases == total_deleted: - db.delete(movie) - db.commit() - deleted = True - elif new_movie_status: - new_status = fireEvent('status.get', new_movie_status, single = True) - movie.profile_id = None - movie.status_id = new_status.get('id') - db.commit() - else: - fireEvent('movie.restatus', movie.id, single = True) - - if deleted: - fireEvent('notify.frontend', type = 'movie.deleted', data = movie.to_dict()) - - #db.close() - return True - - def restatus(self, movie_id): - - active_status = fireEvent('status.get', 'active', single = True) - done_status = fireEvent('status.get', 'done', single = True) - - db = get_session() - - m = db.query(Movie).filter_by(id = movie_id).first() - if not m or len(m.library.titles) == 0: - log.debug('Can\'t restatus movie, doesn\'t seem to exist.') - return False - - log.debug('Changing status for %s', (m.library.titles[0].title)) - if not m.profile: - m.status_id = done_status.get('id') - else: - move_to_wanted = True - - for t in m.profile.types: - for release in m.releases: - if t.quality.identifier is release.quality.identifier and (release.status_id is done_status.get('id') and t.finish): - move_to_wanted = False - - m.status_id = active_status.get('id') if move_to_wanted else done_status.get('id') - - db.commit() - #db.close() - - return True - - def createOnComplete(self, movie_id): - - def onComplete(): - db = get_session() - movie = db.query(Movie).filter_by(id = movie_id).first() - fireEventAsync('searcher.single', movie.to_dict(self.default_dict), on_complete = self.createNotifyFront(movie_id)) - - return onComplete - - - def createNotifyFront(self, movie_id): - - def notifyFront(): - db = get_session() - movie = db.query(Movie).filter_by(id = movie_id).first() - fireEvent('notify.frontend', type = 'movie.update.%s' % movie.id, data = movie.to_dict(self.default_dict)) - - return notifyFront diff --git a/couchpotato/core/plugins/movie/static/list.js b/couchpotato/core/plugins/movie/static/list.js deleted file mode 100644 index 9e76fad3da..0000000000 --- a/couchpotato/core/plugins/movie/static/list.js +++ /dev/null @@ -1,534 +0,0 @@ -var MovieList = new Class({ - - Implements: [Options], - - options: { - navigation: true, - limit: 50, - load_more: true, - menu: [], - add_new: false - }, - - movies: [], - movies_added: {}, - letters: {}, - filter: null, - - initialize: function(options){ - var self = this; - self.setOptions(options); - - self.offset = 0; - self.filter = self.options.filter || { - 'startswith': null, - 'search': null - } - - self.el = new Element('div.movies').adopt( - self.title = self.options.title ? new Element('h2', { - 'text': self.options.title - }) : null, - self.movie_list = new Element('div'), - self.load_more = self.options.load_more ? new Element('a.load_more', { - 'events': { - 'click': self.loadMore.bind(self) - } - }) : null - ); - - self.changeView(self.options.view || 'details'); - - self.getMovies(); - - App.addEvent('movie.added', self.movieAdded.bind(self)) - App.addEvent('movie.deleted', self.movieDeleted.bind(self)) - }, - - movieDeleted: function(notification){ - var self = this; - - if(self.movies_added[notification.data.id]){ - self.movies.each(function(movie){ - if(movie.get('id') == notification.data.id){ - movie.destroy(); - delete self.movies_added[notification.data.id] - } - }) - } - - self.checkIfEmpty(); - }, - - movieAdded: function(notification){ - var self = this; - - if(self.options.add_new && !self.movies_added[notification.data.id] && notification.data.status.identifier == self.options.status){ - window.scroll(0,0); - self.createMovie(notification.data, 'top'); - - self.checkIfEmpty(); - } - }, - - create: function(){ - var self = this; - - // Create the alphabet nav - if(self.options.navigation) - self.createNavigation(); - - if(self.options.load_more) - self.scrollspy = new ScrollSpy({ - min: function(){ - var c = self.load_more.getCoordinates() - return c.top - window.document.getSize().y - 300 - }, - onEnter: self.loadMore.bind(self) - }); - - self.created = true; - }, - - addMovies: function(movies, total){ - var self = this; - - if(!self.created) self.create(); - - // do scrollspy - if(movies.length < self.options.limit && self.scrollspy){ - self.load_more.hide(); - self.scrollspy.stop(); - } - - Object.each(movies, function(movie){ - self.createMovie(movie); - }); - - self.total_movies = total; - self.setCounter(total); - - }, - - setCounter: function(count){ - var self = this; - - if(!self.navigation_counter) return; - - self.navigation_counter.set('text', (count || 0)); - - }, - - createMovie: function(movie, inject_at){ - var self = this; - - // Attach proper actions - var a = self.options.actions, - status = Status.get(movie.status_id), - actions = a ? a[status.identifier.capitalize()] || a.Wanted : {}; - - var m = new Movie(self, { - 'actions': actions, - 'view': self.current_view, - 'onSelect': self.calculateSelected.bind(self) - }, movie); - $(m).inject(self.movie_list, inject_at || 'bottom'); - m.fireEvent('injected'); - - self.movies.include(m) - self.movies_added[movie.id] = true; - }, - - createNavigation: function(){ - var self = this; - var chars = '#ABCDEFGHIJKLMNOPQRSTUVWXYZ'; - - self.current_view = self.getSavedView(); - self.el.addClass(self.current_view+'_list') - - self.navigation = new Element('div.alph_nav').adopt( - self.navigation_actions = new Element('ul.inlay.actions.reversed'), - self.navigation_counter = new Element('span.counter[title=Total]'), - self.navigation_alpha = new Element('ul.numbers', { - 'events': { - 'click:relay(li)': function(e, el){ - self.movie_list.empty() - self.activateLetter(el.get('data-letter')) - self.getMovies() - } - } - }), - self.navigation_search_input = new Element('input.inlay', { - 'placeholder': 'Search', - 'events': { - 'keyup': self.search.bind(self), - 'change': self.search.bind(self) - } - }), - self.navigation_menu = new Block.Menu(self), - self.mass_edit_form = new Element('div.mass_edit_form').adopt( - new Element('span.select').adopt( - self.mass_edit_select = new Element('input[type=checkbox].inlay', { - 'events': { - 'change': self.massEditToggleAll.bind(self) - } - }), - self.mass_edit_selected = new Element('span.count', {'text': 0}), - self.mass_edit_selected_label = new Element('span', {'text': 'selected'}) - ), - new Element('div.quality').adopt( - self.mass_edit_quality = new Element('select'), - new Element('a.button.orange', { - 'text': 'Change quality', - 'events': { - 'click': self.changeQualitySelected.bind(self) - } - }) - ), - new Element('div.delete').adopt( - new Element('span[text=or]'), - new Element('a.button.red', { - 'text': 'Delete', - 'events': { - 'click': self.deleteSelected.bind(self) - } - }) - ), - new Element('div.refresh').adopt( - new Element('span[text=or]'), - new Element('a.button.green', { - 'text': 'Refresh', - 'events': { - 'click': self.refreshSelected.bind(self) - } - }) - ) - ) - ).inject(self.el, 'top'); - - // Mass edit - self.mass_edit_select_class = new Form.Check(self.mass_edit_select); - Quality.getActiveProfiles().each(function(profile){ - new Element('option', { - 'value': profile.id ? profile.id : profile.data.id, - 'text': profile.label ? profile.label : profile.data.label - }).inject(self.mass_edit_quality) - }); - - // Actions - ['mass_edit', 'details', 'list'].each(function(view){ - self.navigation_actions.adopt( - new Element('li.'+view+(self.current_view == view ? '.active' : '')+'[data-view='+view+']', { - 'events': { - 'click': function(e){ - var a = 'active'; - self.navigation_actions.getElements('.'+a).removeClass(a); - self.changeView(this.get('data-view')); - this.addClass(a); - } - } - }).adopt(new Element('span')) - ) - }); - - // All - self.letters['all'] = new Element('li.letter_all.available.active', { - 'text': 'ALL', - }).inject(self.navigation_alpha); - - // Chars - chars.split('').each(function(c){ - self.letters[c] = new Element('li', { - 'text': c, - 'class': 'letter_'+c, - 'data-letter': c - }).inject(self.navigation_alpha); - }); - - // Get available chars and highlight - Api.request('movie.available_chars', { - 'data': Object.merge({ - 'status': self.options.status - }, self.filter), - 'onComplete': function(json){ - - json.chars.split('').each(function(c){ - self.letters[c.capitalize()].addClass('available') - }) - - } - }); - - // Add menu or hide - if (self.options.menu.length > 0) - self.options.menu.each(function(menu_item){ - self.navigation_menu.addLink(menu_item); - }) - else - self.navigation_menu.hide() - - self.nav_scrollspy = new ScrollSpy({ - min: 10, - onEnter: function(){ - self.navigation.addClass('float') - }, - onLeave: function(){ - self.navigation.removeClass('float') - } - }); - - }, - - calculateSelected: function(){ - var self = this; - - var selected = 0, - movies = self.movies.length; - self.movies.each(function(movie){ - selected += movie.isSelected() ? 1 : 0 - }) - - var indeterminate = selected > 0 && selected < movies, - checked = selected == movies && selected > 0; - - self.mass_edit_select.set('indeterminate', indeterminate) - - self.mass_edit_select_class[checked ? 'check' : 'uncheck']() - self.mass_edit_select_class.element[indeterminate ? 'addClass' : 'removeClass']('indeterminate') - - self.mass_edit_selected.set('text', selected); - }, - - deleteSelected: function(){ - var self = this, - ids = self.getSelectedMovies(), - help_msg = self.identifier == 'wanted' ? 'If you do, you won\'t be able to watch them, as they won\'t get downloaded!' : 'Your files will be safe, this will only delete the reference from the CouchPotato manage list'; - - var qObj = new Question('Are you sure you want to delete '+ids.length+' movie'+ (ids.length != 1 ? 's' : '') +'?', help_msg, [{ - 'text': 'Yes, delete '+(ids.length != 1 ? 'them' : 'it'), - 'class': 'delete', - 'events': { - 'click': function(e){ - (e).preventDefault(); - this.set('text', 'Deleting..') - Api.request('movie.delete', { - 'data': { - 'id': ids.join(','), - 'delete_from': self.options.identifier - }, - 'onSuccess': function(){ - qObj.close(); - - var erase_movies = []; - self.movies.each(function(movie){ - if (movie.isSelected()){ - $(movie).destroy() - erase_movies.include(movie) - } - }); - - erase_movies.each(function(movie){ - self.movies.erase(movie); - - movie.destroy() - }); - - self.calculateSelected(); - } - }); - - } - } - }, { - 'text': 'Cancel', - 'cancel': true - }]); - - }, - - changeQualitySelected: function(){ - var self = this; - var ids = self.getSelectedMovies() - - Api.request('movie.edit', { - 'data': { - 'id': ids.join(','), - 'profile_id': self.mass_edit_quality.get('value') - }, - 'onSuccess': self.search.bind(self) - }); - }, - - refreshSelected: function(){ - var self = this; - var ids = self.getSelectedMovies() - - Api.request('movie.refresh', { - 'data': { - 'id': ids.join(','), - } - }); - }, - - getSelectedMovies: function(){ - var self = this; - - var ids = [] - self.movies.each(function(movie){ - if (movie.isSelected()) - ids.include(movie.get('id')) - }); - - return ids - }, - - massEditToggleAll: function(){ - var self = this; - - var select = self.mass_edit_select.get('checked'); - - self.movies.each(function(movie){ - movie.select(select) - }); - - self.calculateSelected() - }, - - reset: function(){ - var self = this; - - self.movies = [] - self.calculateSelected() - self.navigation_alpha.getElements('.active').removeClass('active') - self.offset = 0; - if(self.scrollspy){ - self.load_more.show(); - self.scrollspy.start(); - } - }, - - activateLetter: function(letter){ - var self = this; - - self.reset() - - self.letters[letter || 'all'].addClass('active'); - self.filter.starts_with = letter; - - }, - - changeView: function(new_view){ - var self = this; - - self.el - .removeClass(self.current_view+'_list') - .addClass(new_view+'_list') - - self.current_view = new_view; - Cookie.write(self.options.identifier+'_view', new_view, {duration: 1000}); - }, - - getSavedView: function(){ - var self = this; - return Cookie.read(self.options.identifier+'_view') || 'thumbs'; - }, - - search: function(){ - var self = this; - - if(self.search_timer) clearTimeout(self.search_timer); - self.search_timer = (function(){ - var search_value = self.navigation_search_input.get('value'); - if (search_value == self.last_search_value) return - - self.reset() - - self.activateLetter(); - self.filter.search = search_value; - - self.movie_list.empty(); - self.getMovies(); - - self.last_search_value = search_value; - - }).delay(250); - - }, - - update: function(){ - var self = this; - - self.reset(); - self.movie_list.empty(); - self.getMovies(); - }, - - getMovies: function(){ - var self = this; - - if(self.scrollspy){ - self.scrollspy.stop(); - self.load_more.set('text', 'loading...'); - } - - Api.request(self.options.api_call || 'movie.list', { - 'data': Object.merge({ - 'status': self.options.status, - 'limit_offset': self.options.limit + ',' + self.offset - }, self.filter), - 'onComplete': function(json){ - self.store(json.movies); - self.addMovies(json.movies, json.total); - if(self.scrollspy) { - self.load_more.set('text', 'load more movies'); - self.scrollspy.start(); - } - - self.checkIfEmpty() - } - }); - }, - - loadMore: function(){ - var self = this; - if(self.offset >= self.options.limit) - self.getMovies() - }, - - store: function(movies){ - var self = this; - - self.offset += movies.length; - - }, - - checkIfEmpty: function(){ - var self = this; - - var is_empty = self.movies.length == 0 && self.total_movies == 0; - - if(self.title) - self.title[is_empty ? 'hide' : 'show']() - - if(is_empty && self.options.on_empty_element){ - self.el.grab(self.options.on_empty_element); - - if(self.navigation) - self.navigation.hide(); - - self.empty_element = self.options.on_empty_element; - } - else if(self.empty_element){ - self.empty_element.destroy(); - - if(self.navigation) - self.navigation.show(); - } - - }, - - toElement: function(){ - return this.el; - } - -}); \ No newline at end of file diff --git a/couchpotato/core/plugins/movie/static/movie.css b/couchpotato/core/plugins/movie/static/movie.css deleted file mode 100644 index 9d67e62826..0000000000 --- a/couchpotato/core/plugins/movie/static/movie.css +++ /dev/null @@ -1,726 +0,0 @@ -.movies { - padding: 60px 0 20px; -} - - .movies h2 { - margin-bottom: 20px; - } - - .movies.thumbs_list { - padding: 20px 0 20px; - } - - .home .movies { - padding-top: 6px; - } - - .movies.mass_edit_list { - padding-top: 90px; - } - - .movies .movie { - position: relative; - border-radius: 4px; - margin: 10px 0; - overflow: hidden; - width: 100%; - height: 180px; - transition: all 0.2s linear; - } - - .movies.list_list .movie:not(.details_view), - .movies.mass_edit_list .movie { - height: 32px; - } - - .movies.thumbs_list .movie { - width: 153px; - height: 230px; - display: inline-block; - margin: 0 8px 0 0; - } - .movies.thumbs_list .movie:nth-child(6n+6) { - margin: 0; - } - - .movies .movie .mask { - position: absolute; - top: 0; - left: 0; - height: 100%; - width: 100%; - } - - .movies.list_list .movie:not(.details_view), - .movies.mass_edit_list .movie { - margin: 1px 0; - border-radius: 0; - background: no-repeat; - box-shadow: none; - border-bottom: 1px solid rgba(255,255,255,0.05); - } - - .movies.list_list .movie:hover:not(.details_view), - .movies.mass_edit_list .movie { - background: rgba(255,255,255,0.03); - } - - .movies .data { - padding: 20px; - height: 100%; - width: 840px; - position: absolute; - right: 0; - border-radius: 0; - transition: all .6s cubic-bezier(0.9,0,0.1,1); - } - .movies.list_list .movie:not(.details_view) .data, - .movies.mass_edit_list .movie .data { - height: 30px; - padding: 3px 0 3px 10px; - width: 938px; - box-shadow: none; - border: 0; - background: none; - } - - .movies.thumbs_list .data { - left: 0; - width: 100%; - padding: 10px; - height: 100%; - background: none; - transition: none; - } - - .movies.thumbs_list .movie.no_thumbnail .data { background-image: linear-gradient(-30deg, rgba(255, 0, 85, .2) 0,rgba(125, 185, 235, .2) 100%); - } - .movies.thumbs_list .movie.no_thumbnail:nth-child(2n+6) .data { background-image: linear-gradient(-20deg, rgba(125, 0, 215, .2) 0, rgba(4, 55, 5, .7) 100%); } - .movies.thumbs_list .movie.no_thumbnail:nth-child(3n+6) .data { background-image: linear-gradient(-30deg, rgba(155, 0, 85, .2) 0,rgba(25, 185, 235, .7) 100%); } - .movies.thumbs_list .movie.no_thumbnail:nth-child(4n+6) .data { background-image: linear-gradient(-30deg, rgba(115, 5, 235, .2) 0, rgba(55, 180, 5, .7) 100%); } - .movies.thumbs_list .movie.no_thumbnail:nth-child(5n+6) .data { background-image: linear-gradient(-30deg, rgba(35, 15, 215, .2) 0, rgba(135, 215, 115, .7) 100%); } - .movies.thumbs_list .movie.no_thumbnail:nth-child(6n+6) .data { background-image: linear-gradient(-30deg, rgba(35, 15, 215, .2) 0, rgba(135, 15, 115, .7) 100%); } - - .movies.thumbs_list .movie:hover .data { - background: rgba(0,0,0,0.9); - } - - .movies .data.hide_right { - right: -100%; - } - - .movies .movie .check { - display: none; - } - - .movies.mass_edit_list .movie .check { - position: absolute; - left: 0; - top: 0; - display: block; - margin: 7px 0 0 5px; - } - - .movies .poster { - position: absolute; - left: 0; - width: 120px; - line-height: 0; - overflow: hidden; - height: 100%; - border-radius: 4px 0 0 4px; - transition: all .6s cubic-bezier(0.9,0,0.1,1); - - } - .movies.list_list .movie:not(.details_view) .poster, - .movies.mass_edit_list .poster { - width: 20px; - height: 30px; - border-radius: 1px 0 0 1px; - } - .movies.mass_edit_list .poster { - display: none; - } - - .movies.thumbs_list .poster { - width: 100%; - height: 100%; - } - - .movies .poster img, - .options .poster img { - width: 101%; - height: 101%; - } - - .movies .info { - position: relative; - height: 100%; - } - - .movies .info .title { - display: inline; - position: absolute; - font-size: 28px; - font-weight: bold; - margin-bottom: 10px; - left: 0; - top: 0; - width: 90%; - transition: all 0.2s linear; - } - .movies.list_list .movie:not(.details_view) .info .title, - .movies.mass_edit_list .info .title { - font-size: 16px; - font-weight: normal; - text-overflow: ellipsis; - width: auto; - overflow: hidden; - - } - - .movies.thumbs_list .movie:not(.no_thumbnail) .info { - display: none; - } - .movies.thumbs_list .movie:hover .info { - display: block; - } - - .movies.thumbs_list .info .title { - font-size: 21px; - text-shadow: 0 0 10px #000; - word-wrap: break-word; - } - - .movies .info .year { - position: absolute; - font-size: 30px; - margin-bottom: 10px; - color: #bbb; - width: 10%; - right: 0; - top: 0; - text-align: right; - transition: all 0.2s linear; - } - .movies.list_list .movie:not(.details_view) .info .year, - .movies.mass_edit_list .info .year { - font-size: 16px; - width: 6%; - right: 10px; - } - - .movies.thumbs_list .info .year { - font-size: 23px; - margin: 0; - bottom: 0; - left: 0; - top: auto; - right: auto; - color: #FFF; - text-shadow: none; - text-shadow: 0 0 6px #000; - } - - .movies .info .description { - position: absolute; - top: 30px; - clear: both; - height: 80px; - overflow: hidden; - } - .movies .data:hover .description { - overflow: auto; - } - .movies.list_list .movie:not(.details_view) .info .description, - .movies.mass_edit_list .info .description, - .movies.thumbs_list .info .description { - display: none; - } - - .movies .data .quality { - position: absolute; - bottom: 0; - display: block; - min-height: 20px; - vertical-align: mid; - } - - .movies .status_suggest .data .quality, - .movies.thumbs_list .data .quality { - display: none; - } - - .movies .data .quality span { - padding: 2px 3px; - font-weight: bold; - opacity: 0.5; - font-size: 10px; - height: 16px; - line-height: 12px; - vertical-align: middle; - display: inline-block; - text-transform: uppercase; - text-shadow: none; - font-weight: normal; - margin: 0 2px; - border-radius: 2px; - background-color: rgba(255,255,255,0.1); - } - .movies.list_list .data .quality, - .movies.mass_edit_list .data .quality { - text-align: right; - right: 0; - margin-right: 50px; - z-index: 2; - } - - .movies .data .quality .available, - .movies .data .quality .snatched { - opacity: 1; - box-shadow: 1px 1px 0 rgba(0,0,0,0.2); - cursor: pointer; - } - - .movies .data .quality .available { background-color: #578bc3; } - .movies .data .quality .snatched { background-color: #369545; } - .movies .data .quality .done { - background-color: #369545; - opacity: 1; - } - .movies .data .quality .finish { - background-image: url('../images/sprite.png'); - background-repeat: no-repeat; - background-position: 0 2px; - padding-left: 14px; - background-size: 14px - } - - .movies .data .actions { - position: absolute; - bottom: 20px; - right: 20px; - line-height: 0; - margin-top: -25px; - } - .movies .data:hover .action { opacity: 0.6; } - .movies .data:hover .action:hover { opacity: 1; } - .movies.mass_edit_list .data .actions { - display: none; - } - - .movies .data .action { - background-repeat: no-repeat; - background-position: center; - display: inline-block; - width: 26px; - height: 26px; - padding: 3px; - opacity: 0; - } - - .movies.list_list .movie:not(.details_view) .data:hover .actions, - .movies.mass_edit_list .data:hover .actions { - margin: 0; - background: #4e5969; - top: 2px; - bottom: 2px; - right: 5px; - z-index: 3; - } - - .movies .delete_container { - clear: both; - text-align: center; - font-size: 20px; - position: absolute; - padding: 70px 0 0; - width: 100%; - } - .movies .delete_container .cancel { - } - .movies .delete_container .or { - padding: 10px; - } - .movies .delete_container .delete { - background-color: #ff321c; - font-weight: normal; - } - .movies .delete_container .delete:hover { - color: #fff; - background-color: #d32917; - } - - .movies .options { - position: absolute; - margin-left: 120px; - width: 840px; - } - - .movies .options .form { - margin: 70px 20px 0; - float: left; - font-size: 20px; - } - - .movies .options .form select { - margin-right: 20px; - } - - .movies .options .table { - height: 180px; - overflow: auto; - } - .movies .options .table .item { - border-bottom: 1px solid rgba(255,255,255,0.1); - } - .movies .options .table .item.ignored span { - text-decoration: line-through; - color: rgba(255,255,255,0.4); - text-shadow: none; - } - .movies .options .table .item.ignored .delete { - background-image: url('../images/icon.undo.png'); - } - - .movies .options .table .item:last-child { border: 0; } - .movies .options .table .item:nth-child(even) { - background: rgba(255,255,255,0.05); - } - .movies .options .table .item:not(.head):hover { - background: rgba(255,255,255,0.03); - } - - .movies .options .table .item > * { - display: inline-block; - padding: 0 5px; - width: 60px; - min-height: 24px; - white-space: nowrap; - text-overflow: ellipsis; - text-align: center; - vertical-align: top; - border-left: 1px solid rgba(255, 255, 255, 0.1); - } - .movies .options .table .item > *:first-child { - border: 0; - } - .movies .options .table .provider { - width: 120px; - text-overflow: ellipsis; - overflow: hidden; - } - .movies .options .table .name { - width: 350px; - overflow: hidden; - text-align: left; - padding: 0 10px; - } - .movies .options .table.files .name { width: 590px; } - .movies .options .table .type { width: 130px; } - .movies .options .table .is_available { width: 90px; } - .movies .options .table .age, - .movies .options .table .size { width: 40px; } - - .movies .options .table a { - width: 30px !important; - height: 20px; - opacity: 0.8; - } - .movies .options .table a:hover { - opacity: 1; - } - - .movies .options .table .head > * { - font-weight: bold; - font-size: 14px; - padding-top: 4px; - padding-bottom: 4px; - height: auto; - } - - .movies .movie .trailer_container { - width: 100%; - background: #000; - text-align: center; - transition: all .6s cubic-bezier(0.9,0,0.1,1); - overflow: hidden; - } - .movies .movie .trailer_container.hide { - height: 0 !important; - } - - .movies .movie .hide_trailer { - position: absolute; - top: 0; - left: 50%; - margin-left: -50px; - width: 100px; - text-align: center; - padding: 3px 10px; - background: #4e5969; - border-radius: 0 0 2px 2px; - transition: all .2s cubic-bezier(0.9,0,0.1,1) .2s; - } - .movies .movie .hide_trailer.hide { - top: -30px; - } - - .movies .movie .try_container { - padding: 5px 10px; - text-align: center; - } - - .movies .movie .try_container a { - margin: 0 5px; - padding: 2px 5px; - } - - .movies .movie .releases .next_release { - border-left: 6px solid #2aa300; - } - - .movies .movie .releases .next_release > :first-child { - margin-left: -6px; - } - - .movies .movie .releases .last_release { - border-left: 6px solid #ffa200; - } - - .movies .movie .releases .last_release > :first-child { - margin-left: -6px; - } - - .movies .load_more { - display: block; - padding: 10px; - text-align: center; - font-size: 20px; - } - .movies .load_more.loading { - opacity: .5; - } - -.movies .alph_nav { - transition: box-shadow .4s linear; - position: fixed; - z-index: 2; - top: 0; - padding: 100px 60px 7px; - width: 1080px; - margin: 0 -60px; - box-shadow: 0 20px 20px -22px rgba(0,0,0,0.1); - background: #4e5969; -} - - .movies .alph_nav.float { - box-shadow: 0 30px 30px -32px rgba(0,0,0,0.5); - border-radius: 0; - } - -.movies .alph_nav ul.numbers, -.movies .alph_nav .counter, -.movies .alph_nav ul.actions { - list-style: none; - padding: 0 0 1px; - margin: 0; - float: left; - user-select: none; -} - - .movies .alph_nav .counter { - width: 60px; - text-align: center; - } - - .movies .alph_nav .numbers li, - .movies .alph_nav .actions li { - display: inline-block; - vertical-align: top; - width: 20px; - height: 24px; - line-height: 26px; - text-align: center; - cursor: pointer; - color: rgba(255,255,255,0.2); - border: 1px solid transparent; - transition: all 0.1s ease-in-out; - text-shadow: none; - } - .movies .alph_nav .numbers li:first-child { - width: 43px; - } - .movies .alph_nav li.available { - color: rgba(255,255,255,0.8); - font-weight: bolder; - - } - .movies .alph_nav li.active.available, .movies .alph_nav li.available:hover { - color: #fff; - font-size: 20px; - line-height: 20px; - } - - .movies .alph_nav input { - padding: 6px 5px; - margin: 0 0 0 6px; - float: left; - width: 155px; - height: 25px; - } - - .movies .alph_nav .actions { - margin: 0 6px 0 0; - -moz-user-select: none; - } - .movies .alph_nav .actions li { - border-radius: 1px; - width: auto; - } - .movies .alph_nav .actions li.active { - background: none; - border: 1px solid transparent; - box-shadow: none; - } - .movies .alph_nav .actions li span { - display: block; - background: url('../images/sprite.png') no-repeat; - width: 25px; - height: 100%; - } - - .movies .alph_nav .actions li.mass_edit span { - background-position: 3px 3px; - } - - .movies .alph_nav .actions li.list span { - background-position: 3px -95px; - } - - .movies .alph_nav .actions li.details span { - background-position: 3px -74px; - } - - .movies .alph_nav .actions li:first-child { - border-radius: 3px 0 0 3px; - } - .movies .alph_nav .actions li:last-child { - border-radius: 0 3px 3px 0; - } - - .movies .alph_nav .mass_edit_form { - clear: both; - text-align: center; - display: none; - } - .movies.mass_edit_list .mass_edit_form { - display: block; - } - .movies.mass_edit_list .mass_edit_form .select { - float: left; - margin: 5px 0 0 5px; - font-size: 14px; - } - .movies.mass_edit_list .mass_edit_form .select span { - vertical-align: middle; - opacity: 0.7; - } - .movies.mass_edit_list .mass_edit_form .select .count { - font-weight: bold; - margin: 0 3px 0 10px; - } - - .movies .alph_nav .mass_edit_form .quality { - float: left; - padding: 8px 0 0; - margin: 0 0 0 16px; - } - .movies .alph_nav .mass_edit_form .quality select { - width: 120px; - margin-right: 5px; - } - .movies .alph_nav .mass_edit_form .button { - padding: 3px 7px; - } - - .movies .alph_nav .mass_edit_form .refresh, - .movies .alph_nav .mass_edit_form .delete { - float: left; - padding: 8px 0 0 8px; - } - - .movies .alph_nav .mass_edit_form .refresh span, - .movies .alph_nav .mass_edit_form .delete span { - margin: 0 10px 0 0; - } - - .movies .alph_nav .more_menu { - margin-left: 48px; - } - - .movies .alph_nav .more_menu > a { - background-position: center -158px; - } - -.movies .empty_wanted { - background-image: url('../images/emptylist.png'); - height: 750px; - width: 800px; - padding-top: 260px; - margin-top: -50px; -} - -.movies .empty_manage { - text-align: center; - font-size: 25px; - line-height: 150%; -} - - .movies .empty_manage .after_manage { - margin-top: 30px; - font-size: 16px; - } - - .movies .progress { - border-radius: 2px; - padding: 10px; - margin: 5px 0; - text-align: left; - } - - .movies .progress > div { - padding: 5px 10px; - font-size: 12px; - line-height: 12px; - text-align: left; - display: inline-block; - width: 49%; - background: rgba(255, 255, 255, 0.05); - margin: 2px 0.5%; - border-radius: 3px; - } - - .movies .progress > div .folder { - display: inline-block; - padding: 5px 20px 5px 0; - white-space: nowrap; - text-overflow: ellipsis; - overflow: hidden; - width: 85%; - direction: rtl; - vertical-align: middle; - } - - .movies .progress > div .percentage { - font-weight: bold; - display: inline-block; - text-transform: uppercase; - text-shadow: none; - font-weight: normal; - font-size: 20px; - border-left: 1px solid rgba(255, 255, 255, .2); - width: 15%; - text-align: right; - vertical-align: middle; - } diff --git a/couchpotato/core/plugins/movie/static/movie.js b/couchpotato/core/plugins/movie/static/movie.js deleted file mode 100644 index 283bb41415..0000000000 --- a/couchpotato/core/plugins/movie/static/movie.js +++ /dev/null @@ -1,684 +0,0 @@ -var Movie = new Class({ - - Extends: BlockBase, - - action: {}, - - initialize: function(list, options, data){ - var self = this; - - self.data = data; - self.view = options.view || 'details'; - self.list = list; - - self.el = new Element('div.movie.inlay'); - - self.profile = Quality.getProfile(data.profile_id) || {}; - self.parent(self, options); - - self.addEvents(); - }, - - addEvents: function(){ - var self = this; - - App.addEvent('movie.update.'+self.data.id, self.update.bind(self)); - - ['movie.busy', 'searcher.started'].each(function(listener){ - App.addEvent(listener+'.'+self.data.id, function(notification){ - if(notification.data) - self.busy(true) - }); - }) - - App.addEvent('searcher.ended.'+self.data.id, function(notification){ - if(notification.data) - self.busy(false) - }); - }, - - destroy: function(){ - var self = this; - - self.el.destroy(); - delete self.list.movies_added[self.get('id')]; - self.list.movies.erase(self) - - self.list.checkIfEmpty(); - - // Remove events - App.removeEvents('movie.update.'+self.data.id); - ['movie.busy', 'searcher.started'].each(function(listener){ - App.removeEvents(listener+'.'+self.data.id); - }) - }, - - busy: function(set_busy){ - var self = this; - - if(!set_busy){ - if(self.spinner){ - self.mask.fade('out'); - setTimeout(function(){ - if(self.mask) - self.mask.destroy(); - if(self.spinner) - self.spinner.el.destroy(); - self.spinner = null; - self.mask = null; - }, 400); - } - } - else if(!self.spinner) { - self.createMask(); - self.spinner = createSpinner(self.mask); - self.mask.fade('in'); - } - }, - - createMask: function(){ - var self = this; - self.mask = new Element('div.mask', { - 'styles': { - 'z-index': '1' - } - }).inject(self.el, 'top').fade('hide'); - }, - - positionMask: function(){ - var self = this, - s = self.el.getSize() - - return self.mask.setStyles({ - 'width': s.x, - 'height': s.y - }).position({ - 'relativeTo': self.el - }) - }, - - update: function(notification){ - var self = this; - - self.data = notification.data; - self.el.empty(); - - self.profile = Quality.getProfile(self.data.profile_id) || {}; - self.create(); - - self.busy(false); - }, - - create: function(){ - var self = this; - - var s = Status.get(self.get('status_id')); - self.el.addClass('status_'+s.identifier); - - self.el.adopt( - self.select_checkbox = new Element('input[type=checkbox].inlay', { - 'events': { - 'change': function(){ - self.fireEvent('select') - } - } - }), - self.thumbnail = File.Select.single('poster', self.data.library.files), - self.data_container = new Element('div.data.inlay.light').adopt( - self.info_container = new Element('div.info').adopt( - self.title = new Element('div.title', { - 'text': self.getTitle() || 'n/a' - }), - self.year = new Element('div.year', { - 'text': self.data.library.year || 'n/a' - }), - self.rating = new Element('div.rating.icon', { - 'text': self.data.library.rating - }), - self.description = new Element('div.description', { - 'text': self.data.library.plot - }), - self.quality = new Element('div.quality', { - 'events': { - 'click': function(e){ - var releases = self.el.getElement('.actions .releases'); - if(releases) - releases.fireEvent('click', [e]) - } - } - }) - ), - self.actions = new Element('div.actions') - ) - ); - - if(self.thumbnail.empty) - self.el.addClass('no_thumbnail'); - - //self.changeView(self.view); - self.select_checkbox_class = new Form.Check(self.select_checkbox); - - // Add profile - if(self.profile.data) - self.profile.getTypes().each(function(type){ - - var q = self.addQuality(type.quality_id || type.get('quality_id')); - if((type.finish == true || type.get('finish')) && !q.hasClass('finish')){ - q.addClass('finish'); - q.set('title', q.get('title') + ' Will finish searching for this movie if this quality is found.') - } - - }); - - // Add releases - self.data.releases.each(function(release){ - - var q = self.quality.getElement('.q_id'+ release.quality_id), - status = Status.get(release.status_id); - - if(!q && (status.identifier == 'snatched' || status.identifier == 'done')) - var q = self.addQuality(release.quality_id) - - if (status && q && !q.hasClass(status.identifier)){ - q.addClass(status.identifier); - q.set('title', (q.get('title') ? q.get('title') : '') + ' status: '+ status.label) - } - - }); - - Object.each(self.options.actions, function(action, key){ - self.action[key.toLowerCase()] = action = new self.options.actions[key](self) - if(action.el) - self.actions.adopt(action) - }); - - if(!self.data.library.rating) - self.rating.hide(); - - }, - - addQuality: function(quality_id){ - var self = this; - - var q = Quality.getQuality(quality_id); - return new Element('span', { - 'text': q.label, - 'class': 'q_'+q.identifier + ' q_id' + q.id, - 'title': '' - }).inject(self.quality); - - }, - - getTitle: function(){ - var self = this; - - var titles = self.data.library.titles; - - var title = titles.filter(function(title){ - return title['default'] - }).pop() - - if(title) - return self.getUnprefixedTitle(title.title) - else if(titles.length > 0) - return self.getUnprefixedTitle(titles[0].title) - - return 'Unknown movie' - }, - - getUnprefixedTitle: function(t){ - if(t.substr(0, 4).toLowerCase() == 'the ') - t = t.substr(4) + ', The'; - return t; - }, - - slide: function(direction, el){ - var self = this; - - if(direction == 'in'){ - self.temp_view = self.view; - self.changeView('details') - - self.el.addEvent('outerClick', function(){ - self.removeView() - self.slide('out') - }) - el.show(); - self.data_container.addClass('hide_right'); - } - else { - self.el.removeEvents('outerClick') - - setTimeout(function(){ - self.el.getElements('> :not(.data):not(.poster):not(.movie_container)').hide(); - }, 600); - - self.data_container.removeClass('hide_right'); - } - }, - - changeView: function(new_view){ - var self = this; - - self.el - .removeClass(self.view+'_view') - .addClass(new_view+'_view') - - self.view = new_view; - }, - - removeView: function(){ - var self = this; - - self.el.removeClass(self.view+'_view') - }, - - get: function(attr){ - return this.data[attr] || this.data.library[attr] - }, - - select: function(bool){ - var self = this; - self.select_checkbox_class[bool ? 'check' : 'uncheck']() - }, - - isSelected: function(){ - return this.select_checkbox.get('checked'); - }, - - toElement: function(){ - return this.el; - } - -}); - -var MovieAction = new Class({ - - class_name: 'action icon', - - initialize: function(movie){ - var self = this; - self.movie = movie; - - self.create(); - if(self.el) - self.el.addClass(self.class_name) - }, - - create: function(){}, - - disable: function(){ - this.el.addClass('disable') - }, - - enable: function(){ - this.el.removeClass('disable') - }, - - createMask: function(){ - var self = this; - self.mask = new Element('div.mask', { - 'styles': { - 'z-index': '1' - } - }).inject(self.movie, 'top').fade('hide'); - //self.positionMask(); - }, - - positionMask: function(){ - var self = this, - movie = $(self.movie), - s = movie.getSize() - - return; - - return self.mask.setStyles({ - 'width': s.x, - 'height': s.y - }).position({ - 'relativeTo': movie - }) - }, - - toElement: function(){ - return this.el || null - } - -}); - -var IMDBAction = new Class({ - - Extends: MovieAction, - id: null, - - create: function(){ - var self = this; - - self.id = self.movie.get('identifier'); - - self.el = new Element('a.imdb', { - 'title': 'Go to the IMDB page of ' + self.movie.getTitle(), - 'href': 'http://www.imdb.com/title/'+self.id+'/', - 'target': '_blank' - }); - - if(!self.id) self.disable(); - } - -}); - -var ReleaseAction = new Class({ - - Extends: MovieAction, - - create: function(){ - var self = this; - - self.el = new Element('a.releases.icon.download', { - 'title': 'Show the releases that are available for ' + self.movie.getTitle(), - 'events': { - 'click': self.show.bind(self) - } - }); - - if(self.movie.data.releases.length == 0){ - self.el.hide() - } - else { - - var buttons_done = false; - - self.movie.data.releases.sortBy('-info.score').each(function(release){ - if(buttons_done) return; - - var status = Status.get(release.status_id); - - if((self.next_release && (status.identifier == 'ignored' || status.identifier == 'failed')) || (!self.next_release && status.identifier == 'available')){ - self.hide_on_click = false; - self.show(); - buttons_done = true; - } - - }); - - } - - }, - - show: function(e){ - var self = this; - if(e) - (e).preventDefault(); - - if(!self.options_container){ - self.options_container = new Element('div.options').adopt( - self.release_container = new Element('div.releases.table').adopt( - self.trynext_container = new Element('div.buttons.try_container') - ) - ).inject(self.movie, 'top'); - - // Header - new Element('div.item.head').adopt( - new Element('span.name', {'text': 'Release name'}), - new Element('span.status', {'text': 'Status'}), - new Element('span.quality', {'text': 'Quality'}), - new Element('span.size', {'text': 'Size'}), - new Element('span.age', {'text': 'Age'}), - new Element('span.score', {'text': 'Score'}), - new Element('span.provider', {'text': 'Provider'}) - ).inject(self.release_container) - - self.movie.data.releases.sortBy('-info.score').each(function(release){ - - var status = Status.get(release.status_id), - quality = Quality.getProfile(release.quality_id) || {}, - info = release.info, - provider = self.get(release, 'provider') + (release.info['provider_extra'] ? self.get(release, 'provider_extra') : ''); - release.status = status; - - // Create release - new Element('div', { - 'class': 'item '+status.identifier, - 'id': 'release_'+release.id - }).adopt( - new Element('span.name', {'text': self.get(release, 'name'), 'title': self.get(release, 'name')}), - new Element('span.status', {'text': status.identifier, 'class': 'release_status '+status.identifier}), - new Element('span.quality', {'text': quality.get('label') || 'n/a'}), - new Element('span.size', {'text': release.info['size'] ? Math.floor(self.get(release, 'size')) : 'n/a'}), - new Element('span.age', {'text': self.get(release, 'age')}), - new Element('span.score', {'text': self.get(release, 'score')}), - new Element('span.provider', { 'text': provider, 'title': provider }), - release.info['detail_url'] ? new Element('a.info.icon', { - 'href': release.info['detail_url'], - 'target': '_blank' - }) : null, - new Element('a.download.icon', { - 'events': { - 'click': function(e){ - (e).preventDefault(); - if(!this.hasClass('completed')) - self.download(release); - } - } - }), - new Element('a.delete.icon', { - 'events': { - 'click': function(e){ - (e).preventDefault(); - self.ignore(release); - this.getParent('.item').toggleClass('ignored') - } - } - }) - ).inject(self.release_container) - - if(status.identifier == 'ignored' || status.identifier == 'failed' || status.identifier == 'snatched'){ - if(!self.last_release || (self.last_release && self.last_release.status.identifier != 'snatched' && status.identifier == 'snatched')) - self.last_release = release; - } - else if(!self.next_release && status.identifier == 'available'){ - self.next_release = release; - } - }); - - if(self.last_release){ - self.release_container.getElement('#release_'+self.last_release.id).addClass('last_release'); - } - - if(self.next_release){ - self.release_container.getElement('#release_'+self.next_release.id).addClass('next_release'); - } - - if(self.next_release || self.last_release){ - - self.trynext_container.adopt( - new Element('span.or', { - 'text': 'This movie is snatched, if anything went wrong, download' - }), - self.last_release ? new Element('a.button.orange', { - 'text': 'the same release again', - 'events': { - 'click': self.trySameRelease.bind(self) - } - }) : null, - self.next_release && self.last_release ? new Element('span.or', { - 'text': ',' - }) : null, - self.next_release ? [new Element('a.button.green', { - 'text': self.last_release ? 'another release' : 'the best release', - 'events': { - 'click': self.tryNextRelease.bind(self) - } - }), - new Element('span.or', { - 'text': 'or pick one below' - })] : null - ) - } - - } - - self.movie.slide('in', self.options_container); - }, - - get: function(release, type){ - return release.info[type] || 'n/a' - }, - - download: function(release){ - var self = this; - - var release_el = self.release_container.getElement('#release_'+release.id), - icon = release_el.getElement('.download.icon'); - - icon.addClass('spinner'); - - Api.request('release.download', { - 'data': { - 'id': release.id - }, - 'onComplete': function(json){ - icon.removeClass('spinner') - if(json.success) - icon.addClass('completed'); - else - icon.addClass('attention').set('title', 'Something went wrong when downloading, please check logs.'); - } - }); - }, - - ignore: function(release){ - var self = this; - - Api.request('release.ignore', { - 'data': { - 'id': release.id - } - }) - - }, - - tryNextRelease: function(movie_id){ - var self = this; - - if(self.last_release) - self.ignore(self.last_release); - - if(self.next_release) - self.download(self.next_release); - - }, - - trySameRelease: function(movie_id){ - var self = this; - - if(self.last_release) - self.download(self.last_release); - - } - -}); - -var TrailerAction = new Class({ - - Extends: MovieAction, - id: null, - - create: function(){ - var self = this; - - self.el = new Element('a.trailer', { - 'title': 'Watch the trailer of ' + self.movie.getTitle(), - 'events': { - 'click': self.watch.bind(self) - } - }); - - }, - - watch: function(offset){ - var self = this; - - var data_url = 'http://gdata.youtube.com/feeds/videos?vq="{title}" {year} trailer&max-results=1&alt=json-in-script&orderby=relevance&sortorder=descending&format=5&fmt=18' - var url = data_url.substitute({ - 'title': encodeURI(self.movie.getTitle()), - 'year': self.movie.get('year'), - 'offset': offset || 1 - }), - size = $(self.movie).getSize(), - height = (size.x/16)*9, - id = 'trailer-'+randomString(); - - self.player_container = new Element('div[id='+id+']'); - self.container = new Element('div.hide.trailer_container') - .adopt(self.player_container) - .inject($(self.movie), 'top'); - - self.container.setStyle('height', 0); - self.container.removeClass('hide'); - - self.close_button = new Element('a.hide.hide_trailer', { - 'text': 'Hide trailer', - 'events': { - 'click': self.stop.bind(self) - } - }).inject(self.movie); - - self.container.setStyle('height', height); - $(self.movie).setStyle('height', height); - - new Request.JSONP({ - 'url': url, - 'onComplete': function(json){ - var video_url = json.feed.entry[0].id.$t.split('/'), - video_id = video_url[video_url.length-1]; - - self.player = new YT.Player(id, { - 'height': height, - 'width': size.x, - 'videoId': video_id, - 'playerVars': { - 'autoplay': 1, - 'showsearch': 0, - 'wmode': 'transparent', - 'iv_load_policy': 3 - } - }); - - self.close_button.removeClass('hide'); - - var quality_set = false; - var change_quality = function(state){ - if(!quality_set && (state.data == 1 || state.data || 2)){ - try { - self.player.setPlaybackQuality('hd720'); - quality_set = true; - } - catch(e){ - - } - } - } - self.player.addEventListener('onStateChange', change_quality); - - } - }).send() - - }, - - stop: function(){ - var self = this; - - self.player.stopVideo(); - self.container.addClass('hide'); - self.close_button.addClass('hide'); - $(self.movie).setStyle('height', null); - - setTimeout(function(){ - self.container.destroy() - self.close_button.destroy(); - }, 1800) - } - - -}); \ No newline at end of file diff --git a/couchpotato/core/plugins/movie/static/search.css b/couchpotato/core/plugins/movie/static/search.css deleted file mode 100644 index 391e34763d..0000000000 --- a/couchpotato/core/plugins/movie/static/search.css +++ /dev/null @@ -1,212 +0,0 @@ -.search_form { - display: inline-block; - vertical-align: middle; - width: 25%; -} - - .search_form input { - padding: 4px 20px 4px 4px; - margin: 0; - font-size: 14px; - width: 100%; - height: 24px; - } - .search_form input:focus { - padding-right: 83px; - } - - .search_form .input .enter { - background: #369545 url('../images/sprite.png') right -188px no-repeat; - padding: 0 20px 0 4px; - border-radius: 2px; - text-transform: uppercase; - font-size: 10px; - margin-left: -78px; - display: inline-block; - opacity: 0; - position: relative; - top: -2px; - cursor: pointer; - vertical-align: middle; - visibility: hidden; - } - .search_form.focused .input .enter { - visibility: visible; - } - .search_form.focused.filled .input .enter { - opacity: 1; - } - - .search_form .input a { - width: 17px; - height: 20px; - display: inline-block; - margin: -2px 0 0 2px; - top: 4px; - right: 5px; - background: url('../images/sprite.png') left -37px no-repeat; - cursor: pointer; - opacity: 0; - transition: all 0.2s ease-in-out; - vertical-align: middle; - } - - .search_form.filled .input a { - opacity: 1; - } - - .search_form .results_container { - position: absolute; - background: #5c697b; - margin: 6px 0 0 -230px; - width: 470px; - min-height: 140px; - border-radius: 3px; - box-shadow: 0 20px 20px -10px rgba(0,0,0,0.55); - display: none; - } - .search_form.shown.filled .results_container { - display: block; - } - - .search_form .results_container:before { - content: ' '; - height: 0; - position: relative; - width: 0; - border: 10px solid transparent; - border-bottom-color: #5c697b; - display: block; - top: -20px; - left: 346px; - } - - .search_form .results { - max-height: 570px; - overflow-x: hidden; - padding: 10px 0; - margin-top: -18px; - } - - .movie_result { - overflow: hidden; - height: 140px; - position: relative; - } - - .movie_result .options { - position: absolute; - height: 100%; - width: 100%; - top: 0; - left: 0; - border: 1px solid transparent; - border-width: 1px 0; - border-radius: 0; - box-shadow: inset 0 1px 8px rgba(0,0,0,0.25); - } - - .movie_result .options > div { - padding: 0 15px; - border: 0; - } - - .movie_result .options .thumbnail { - vertical-align: middle; - } - - .movie_result .options select { - vertical-align: middle; - display: inline-block; - margin-right: 10px; - } - .movie_result .options select[name=title] { width: 180px; } - .movie_result .options select[name=profile] { width: 90px; } - - .movie_result .options .button { - vertical-align: middle; - display: inline-block; - } - - .movie_result .options .message { - height: 100%; - line-height: 140px; - font-size: 20px; - text-align: center; - color: #fff; - } - - .movie_result .data { - padding: 0 15px; - position: absolute; - height: 100%; - width: 100%; - top: 0; - left: 0; - background: #5c697b; - cursor: pointer; - - border-bottom: 1px solid #333; - border-top: 1px solid rgba(255,255,255, 0.15); - transition: all .6s cubic-bezier(0.9,0,0.1,1); - } - .movie_result .data.open { - left: 100%; - } - - .movie_result:last-child .data { border-bottom: 0; } - - .movie_result .in_wanted, .movie_result .in_library { - position: absolute; - margin-top: 105px; - } - - .movie_result .thumbnail { - width: 17%; - display: inline-block; - margin: 15px 3% 15px 0; - vertical-align: top; - border-radius: 3px; - box-shadow: 0 0 3px rgba(0,0,0,0.35); - } - - .movie_result .info { - width: 80%; - display: inline-block; - vertical-align: top; - padding: 15px 0; - height: 120px; - overflow: hidden; - } - - .movie_result .info .tagline { - max-height: 70px; - overflow: hidden; - display: inline-block; - } - - .movie_result .add +.info { - margin-left: 20%; - } - - .movie_result .info h2 { - margin: 0; - font-size: 17px; - line-height: 20px; - } - - .movie_result .info h2 span { - padding: 0 5px; - } - - .movie_result .info h2 span:before { content: "("; } - .movie_result .info h2 span:after { content: ")"; } - -.search_form .mask { - border-radius: 3px; - position: absolute; - height: 100%; - width: 100%; - left: 0; - top: 0; -} \ No newline at end of file diff --git a/couchpotato/core/plugins/movie/static/search.js b/couchpotato/core/plugins/movie/static/search.js deleted file mode 100644 index ba8b547ea0..0000000000 --- a/couchpotato/core/plugins/movie/static/search.js +++ /dev/null @@ -1,380 +0,0 @@ -Block.Search = new Class({ - - Extends: BlockBase, - - cache: {}, - - create: function(){ - var self = this; - - self.el = new Element('div.search_form').adopt( - new Element('div.input').adopt( - self.input = new Element('input.inlay', { - 'placeholder': 'Search & add a new movie', - 'events': { - 'keyup': self.keyup.bind(self), - 'focus': function(){ - self.el.addClass('focused') - if(this.get('value')) - self.hideResults(false) - }, - 'blur': function(){ - (function(){ - self.el.removeClass('focused') - }).delay(2000); - } - } - }), - new Element('span.enter', { - 'events': { - 'click': self.keyup.bind(self) - }, - 'text':'Enter' - }), - new Element('a', { - 'events': { - 'click': self.clear.bind(self) - } - }) - ), - self.result_container = new Element('div.results_container', { - 'tween': { - 'duration': 200 - }, - 'events': { - 'mousewheel': function(e){ - (e).stopPropagation(); - } - } - }).adopt( - self.results = new Element('div.results') - ) - ); - - self.mask = new Element('div.mask').inject(self.result_container).fade('hide'); - - }, - - clear: function(e){ - var self = this; - (e).preventDefault(); - - self.last_q = ''; - self.input.set('value', ''); - self.input.focus() - - self.movies = [] - self.results.empty() - self.el.removeClass('filled') - }, - - hideResults: function(bool){ - var self = this; - - if(self.hidden == bool) return; - - self.el[bool ? 'removeClass' : 'addClass']('shown'); - - if(bool){ - History.removeEvent('change', self.hideResults.bind(self, !bool)); - self.el.removeEvent('outerClick', self.hideResults.bind(self, !bool)); - } - else { - History.addEvent('change', self.hideResults.bind(self, !bool)); - self.el.addEvent('outerClick', self.hideResults.bind(self, !bool)); - } - - self.hidden = bool; - }, - - keyup: function(e){ - var self = this; - - self.el[self.q() ? 'addClass' : 'removeClass']('filled') - - if(self.q() != self.last_q && (['enter'].indexOf(e.key) > -1 || e.type == 'click')) - self.autocomplete() - - }, - - autocomplete: function(){ - var self = this; - - if(!self.q()){ - self.hideResults(true) - return - } - - self.list() - }, - - list: function(){ - var self = this; - - if(self.api_request && self.api_request.running) return - - var q = self.q(); - var cache = self.cache[q]; - - self.hideResults(false); - - if(!cache){ - self.mask.fade('in'); - - if(!self.spinner) - self.spinner = createSpinner(self.mask); - - self.api_request = Api.request('movie.search', { - 'data': { - 'q': q - }, - 'onComplete': self.fill.bind(self, q) - }) - } - else - self.fill(q, cache) - - self.last_q = q; - - }, - - fill: function(q, json){ - var self = this; - - self.cache[q] = json - - self.movies = {} - self.results.empty() - - Object.each(json.movies, function(movie){ - - var m = new Block.Search.Item(movie); - $(m).inject(self.results) - self.movies[movie.imdb || 'r-'+Math.floor(Math.random()*10000)] = m - - if(q == movie.imdb) - m.showOptions() - - }); - - if(q != self.q()) - self.list() - - // Calculate result heights - var w = window.getSize(), - rc = self.result_container.getCoordinates(); - - self.results.setStyle('max-height', (w.y - rc.top - 50) + 'px') - self.mask.fade('out') - - }, - - loading: function(bool){ - this.el[bool ? 'addClass' : 'removeClass']('loading') - }, - - q: function(){ - return this.input.get('value').trim(); - } - -}); - -Block.Search.Item = new Class({ - - initialize: function(info, options){ - var self = this; - - self.info = info; - self.alternative_titles = []; - - self.create(); - }, - - create: function(){ - var self = this, - info = self.info; - - self.el = new Element('div.movie_result', { - 'id': info.imdb - }).adopt( - self.options_el = new Element('div.options.inlay'), - self.data_container = new Element('div.data', { - 'tween': { - duration: 400, - transition: 'quint:in:out' - }, - 'events': { - 'click': self.showOptions.bind(self) - } - }).adopt( - self.thumbnail = info.images && info.images.poster.length > 0 ? new Element('img.thumbnail', { - 'src': info.images.poster[0], - 'height': null, - 'width': null - }) : null, - new Element('div.info').adopt( - self.title = new Element('h2', { - 'text': info.titles[0] - }).adopt( - self.year = info.year ? new Element('span.year', { - 'text': info.year - }) : null - ), - self.tagline = new Element('span.tagline', { - 'text': info.tagline ? info.tagline : info.plot, - 'title': info.tagline ? info.tagline : info.plot - }), - self.director = self.info.director ? new Element('span.director', { - 'text': 'Director:' + info.director - }) : null, - self.starring = info.actors ? new Element('span.actors', { - 'text': 'Starring:' - }) : null - ) - ) - ) - - if(info.actors){ - Object.each(info.actors, function(actor){ - new Element('span', { - 'text': actor - }).inject(self.starring) - }) - } - - info.titles.each(function(title){ - self.alternativeTitle({ - 'title': title - }); - }) - }, - - alternativeTitle: function(alternative){ - var self = this; - - self.alternative_titles.include(alternative); - }, - - showOptions: function(){ - var self = this; - - self.createOptions(); - - self.data_container.addClass('open'); - self.el.addEvent('outerClick', self.closeOptions.bind(self)) - - }, - - closeOptions: function(){ - var self = this; - - self.data_container.removeClass('open'); - self.el.removeEvents('outerClick') - }, - - add: function(e){ - var self = this; - (e).preventDefault(); - - self.loadingMask(); - - Api.request('movie.add', { - 'data': { - 'identifier': self.info.imdb, - 'title': self.title_select.get('value'), - 'profile_id': self.profile_select.get('value') - }, - 'onComplete': function(json){ - self.options_el.empty(); - self.options_el.adopt( - new Element('div.message', { - 'text': json.added ? 'Movie successfully added.' : 'Movie didn\'t add properly. Check logs' - }) - ); - self.mask.fade('out'); - }, - 'onFailure': function(){ - self.options_el.empty(); - self.options_el.adopt( - new Element('div.message', { - 'text': 'Something went wrong, check the logs for more info.' - }) - ); - self.mask.fade('out'); - } - }); - }, - - createOptions: function(){ - var self = this, - info = self.info; - - if(!self.options_el.hasClass('set')){ - - if(self.info.in_library){ - var in_library = []; - self.info.in_library.releases.each(function(release){ - in_library.include(release.quality.label) - }); - } - - self.options_el.grab( - new Element('div').adopt( - self.thumbnail = (info.images && info.images.poster.length > 0) ? new Element('img.thumbnail', { - 'src': info.images.poster[0], - 'height': null, - 'width': null - }) : null, - self.info.in_wanted && self.info.in_wanted.profile ? new Element('span.in_wanted', { - 'text': 'Already in wanted list: ' + self.info.in_wanted.profile.label - }) : (in_library ? new Element('span.in_library', { - 'text': 'Already in library: ' + in_library.join(', ') - }) : null), - self.title_select = new Element('select', { - 'name': 'title' - }), - self.profile_select = new Element('select', { - 'name': 'profile' - }), - new Element('a.button', { - 'text': 'Add', - 'events': { - 'click': self.add.bind(self) - } - }) - ) - ); - - Array.each(self.alternative_titles, function(alt){ - new Element('option', { - 'text': alt.title - }).inject(self.title_select) - }) - - Quality.getActiveProfiles().each(function(profile){ - new Element('option', { - 'value': profile.id ? profile.id : profile.data.id, - 'text': profile.label ? profile.label : profile.data.label - }).inject(self.profile_select) - }); - - self.options_el.addClass('set'); - } - - }, - - loadingMask: function(){ - var self = this; - - self.mask = new Element('span.mask').inject(self.el).fade('hide') - - createSpinner(self.mask) - self.mask.fade('in') - - }, - - toElement: function(){ - return this.el - } - -}); diff --git a/couchpotato/core/plugins/profile/__init__.py b/couchpotato/core/plugins/profile/__init__.py index ac19b01812..15a74eee51 100644 --- a/couchpotato/core/plugins/profile/__init__.py +++ b/couchpotato/core/plugins/profile/__init__.py @@ -1,6 +1,5 @@ from .main import ProfilePlugin -def start(): - return ProfilePlugin() -config = [] +def autoload(): + return ProfilePlugin() diff --git a/couchpotato/core/plugins/profile/index.py b/couchpotato/core/plugins/profile/index.py new file mode 100644 index 0000000000..c2bf9445b8 --- /dev/null +++ b/couchpotato/core/plugins/profile/index.py @@ -0,0 +1,16 @@ +from CodernityDB.tree_index import TreeBasedIndex + + +class ProfileIndex(TreeBasedIndex): + _version = 1 + + def __init__(self, *args, **kwargs): + kwargs['key_format'] = 'i' + super(ProfileIndex, self).__init__(*args, **kwargs) + + def make_key(self, key): + return key + + def make_key_value(self, data): + if data.get('_t') == 'profile': + return data.get('order', 99), None diff --git a/couchpotato/core/plugins/profile/main.py b/couchpotato/core/plugins/profile/main.py index 4caa54f701..aa112c512f 100644 --- a/couchpotato/core/plugins/profile/main.py +++ b/couchpotato/core/plugins/profile/main.py @@ -1,18 +1,22 @@ -from couchpotato import get_session +import traceback + +from couchpotato import get_db, tryInt from couchpotato.api import addApiView from couchpotato.core.event import addEvent, fireEvent from couchpotato.core.helpers.encoding import toUnicode -from couchpotato.core.helpers.request import jsonified, getParams, getParam from couchpotato.core.logger import CPLog from couchpotato.core.plugins.base import Plugin -from couchpotato.core.settings.model import Profile, ProfileType +from .index import ProfileIndex + log = CPLog(__name__) class ProfilePlugin(Plugin): - to_dict = {'types': {}} + _database = { + 'profile': ProfileIndex + } def __init__(self): addEvent('profile.all', self.all) @@ -30,155 +34,224 @@ def __init__(self): }) addEvent('app.initialize', self.fill, priority = 90) + addEvent('app.load', self.forceDefaults, priority = 110) - def allView(self): + def forceDefaults(self): - return jsonified({ - 'success': True, - 'list': self.all() - }) + db = get_db() - def all(self): + # Fill qualities and profiles if they are empty somehow.. + if db.count(db.all, 'profile') == 0: - db = get_session() - profiles = db.query(Profile).all() + if db.count(db.all, 'quality') == 0: + fireEvent('quality.fill', single = True) - temp = [] - for profile in profiles: - temp.append(profile.to_dict(self.to_dict)) + self.fill() - return temp - - def save(self): - - params = getParams() - - db = get_session() + # Get all active movies without profile + try: + medias = fireEvent('media.with_status', 'active', single = True) - p = db.query(Profile).filter_by(id = params.get('id')).first() - if not p: - p = Profile() - db.add(p) + profile_ids = [x.get('_id') for x in self.all()] + default_id = profile_ids[0] - p.label = toUnicode(params.get('label')) - p.order = params.get('order', p.order if p.order else 0) - p.core = params.get('core', False) + for media in medias: + if media.get('profile_id') not in profile_ids: + media['profile_id'] = default_id + db.update(media) + except: + log.error('Failed: %s', traceback.format_exc()) - #delete old types - [db.delete(t) for t in p.types] + # Cleanup profiles that have empty qualites + profiles = self.all() + for profile in profiles: + try: + if '' in profile.get('qualities') or '-1' in profile.get('qualities'): + log.warning('Found profile with empty qualities, cleaning it up') + p = db.get('id', profile.get('_id')) + p['qualities'] = [x for x in p['qualities'] if (x != '' and x != '-1')] + db.update(p) + except: + log.error('Failed: %s', traceback.format_exc()) + + def allView(self, **kwargs): + + return { + 'success': True, + 'list': self.all() + } - order = 0 - for type in params.get('types', []): - t = ProfileType( - order = order, - finish = type.get('finish') if order > 0 else 1, - wait_for = params.get('wait_for'), - quality_id = type.get('quality_id') - ) - p.types.append(t) + def all(self): - order += 1 + db = get_db() + profiles = db.all('profile', with_doc = True) - db.commit() + return [x['doc'] for x in profiles] - profile_dict = p.to_dict(self.to_dict) + def save(self, **kwargs): - return jsonified({ - 'success': True, - 'profile': profile_dict - }) + try: + db = get_db() + + profile = { + '_t': 'profile', + 'label': toUnicode(kwargs.get('label')), + 'order': tryInt(kwargs.get('order', 999)), + 'core': kwargs.get('core', False), + 'minimum_score': tryInt(kwargs.get('minimum_score', 1)), + 'qualities': [], + 'wait_for': [], + 'stop_after': [], + 'finish': [], + '3d': [] + } + + # Update types + order = 0 + for type in kwargs.get('types', []): + profile['qualities'].append(type.get('quality')) + profile['wait_for'].append(tryInt(kwargs.get('wait_for', 0))) + profile['stop_after'].append(tryInt(kwargs.get('stop_after', 0))) + profile['finish'].append((tryInt(type.get('finish')) == 1) if order > 0 else True) + profile['3d'].append(tryInt(type.get('3d'))) + order += 1 + + id = kwargs.get('id') + try: + p = db.get('id', id) + profile['order'] = tryInt(kwargs.get('order', p.get('order', 999))) + except: + p = db.insert(profile) + + p.update(profile) + db.update(p) + + return { + 'success': True, + 'profile': p + } + except: + log.error('Failed: %s', traceback.format_exc()) + + return { + 'success': False + } def default(self): + db = get_db() + return list(db.all('profile', limit = 1, with_doc = True))[0]['doc'] - db = get_session() - default = db.query(Profile).first() - default_dict = default.to_dict(self.to_dict) + def saveOrder(self, **kwargs): - return default_dict + try: + db = get_db() - def saveOrder(self): + order = 0 - params = getParams() - db = get_session() + for profile_id in kwargs.get('ids', []): + p = db.get('id', profile_id) + p['hide'] = tryInt(kwargs.get('hidden')[order]) == 1 + p['order'] = order + db.update(p) - order = 0 - for profile in params.get('ids', []): - p = db.query(Profile).filter_by(id = profile).first() - p.hide = params.get('hidden')[order] - p.order = order + order += 1 - order += 1 + return { + 'success': True + } + except: + log.error('Failed: %s', traceback.format_exc()) - db.commit() + return { + 'success': False + } - return jsonified({ - 'success': True - }) + def delete(self, id = None, **kwargs): - def delete(self): + try: + db = get_db() - id = getParam('id') + success = False + message = '' - db = get_session() + try: + p = db.get('id', id) + db.delete(p) - success = False - message = '' - try: - p = db.query(Profile).filter_by(id = id).first() + # Force defaults on all empty profile movies + self.forceDefaults() - db.delete(p) - db.commit() + success = True + except Exception as e: + message = log.error('Failed deleting Profile: %s', e) - success = True - except Exception, e: - message = log.error('Failed deleting Profile: %s', e) + return { + 'success': success, + 'message': message + } + except: + log.error('Failed: %s', traceback.format_exc()) - return jsonified({ - 'success': success, - 'message': message - }) + return { + 'success': False + } def fill(self): - db = get_session(); - - profiles = [{ - 'label': 'Best', - 'qualities': ['720p', '1080p', 'brrip', 'dvdrip'] - }, { - 'label': 'HD', - 'qualities': ['720p', '1080p'] - }, { - 'label': 'SD', - 'qualities': ['dvdrip', 'dvdr'] - }] - - # Create default quality profile - order = -2 - for profile in profiles: - log.info('Creating default profile: %s', profile.get('label')) - p = Profile( - label = toUnicode(profile.get('label')), - order = order - ) - db.add(p) - - quality_order = 0 - for quality in profile.get('qualities'): - quality = fireEvent('quality.single', identifier = quality, single = True) - profile_type = ProfileType( - quality_id = quality.get('id'), - profile = p, - finish = True, - wait_for = 0, - order = quality_order - ) - p.types.append(profile_type) - - quality_order += 1 - - order += 1 - - db.commit() - - return True + try: + db = get_db() + + profiles = [{ + 'label': 'Best', + 'qualities': ['720p', '1080p', 'brrip', 'dvdrip'] + }, { + 'label': 'HD', + 'qualities': ['720p', '1080p'] + }, { + 'label': 'SD', + 'qualities': ['dvdrip', 'dvdr'] + }, { + 'label': 'Prefer 3D HD', + 'qualities': ['1080p', '720p', '720p', '1080p'], + '3d': [True, True] + }, { + 'label': '3D HD', + 'qualities': ['1080p', '720p'], + '3d': [True, True] + }, { + 'label': 'UHD 4K', + 'qualities': ['720p', '1080p', '2160p'] + }] + + # Create default quality profile + order = 0 + for profile in profiles: + log.info('Creating default profile: %s', profile.get('label')) + + pro = { + '_t': 'profile', + 'label': toUnicode(profile.get('label')), + 'order': order, + 'qualities': profile.get('qualities'), + 'minimum_score': 1, + 'finish': [], + 'wait_for': [], + 'stop_after': [], + '3d': [] + } + + threed = profile.get('3d', []) + for q in profile.get('qualities'): + pro['finish'].append(True) + pro['wait_for'].append(0) + pro['stop_after'].append(0) + pro['3d'].append(threed.pop() if threed else False) + + db.insert(pro) + order += 1 + + return True + except: + log.error('Failed: %s', traceback.format_exc()) + + return False diff --git a/couchpotato/core/plugins/profile/static/profile.css b/couchpotato/core/plugins/profile/static/profile.css deleted file mode 100644 index 9d50d2fd00..0000000000 --- a/couchpotato/core/plugins/profile/static/profile.css +++ /dev/null @@ -1,139 +0,0 @@ -.add_new_profile { - padding: 20px; - display: block; - text-align: center; - font-size: 20px; - border-bottom: 1px solid rgba(255,255,255,0.2); -} - -.profile { border-bottom: 1px solid rgba(255,255,255,0.2) } - - .profile > .delete { - height: 20px; - width: 20px; - position: absolute; - margin-left: 690px; - padding: 14px; - background-position: center; - } - - .profile .qualities { - min-height: 80px; - } - - .profile .formHint { - width: 250px !important; - vertical-align: top !important; - margin: 0 !important; - padding-left: 3px !important; - opacity: 0.1; - } - .profile:hover .formHint { - opacity: 1; - } - - .profile .wait_for { - position: absolute; - margin: -45px 0 0 437px; - } - - .profile .wait_for input { - margin: 0 5px !important; - } - - .profile .types { - padding: 0; - margin: 0 20px 0 -4px; - display: inline-block; - } - - .profile .types li { - padding: 3px 5px; - border-bottom: 1px solid rgba(255,255,255,0.2); - list-style: none; - } - .profile .types li:last-child { border: 0; } - - .profile .types li > * { - display: inline-block; - vertical-align: middle; - line-height: 0; - margin-right: 10px; - } - - .profile .quality_type select { - width: 186px; - margin-left: -1px; - } - - .profile .types li.is_empty .check, .profile .types li.is_empty .delete, .profile .types li.is_empty .handle { - visibility: hidden; - } - - .profile .types .type .handle { - background: url('./handle.png') center; - display: inline-block; - height: 20px; - width: 20px; - cursor: grab; - cursor: -moz-grab; - cursor: -webkit-grab; - margin: 0; - } - - .profile .types .type .delete { - background-position: left center; - height: 20px; - width: 20px; - visibility: hidden; - cursor: pointer; - } - - .profile .types .type:hover:not(.is_empty) .delete { - visibility: visible; - } - -#profile_ordering { - -} - - #profile_ordering ul { - float: left; - margin: 0; - width: 275px; - padding: 0; - } - - #profile_ordering li { - cursor: grab; - cursor: -moz-grab; - cursor: -webkit-grab; - border-bottom: 1px solid rgba(255,255,255,0.2); - padding: 0 5px; - } - #profile_ordering li:last-child { border: 0; } - - #profile_ordering li .check { - margin: 2px 10px 0 0; - vertical-align: top; - } - - #profile_ordering li > span { - display: inline-block; - height: 20px; - vertical-align: top; - line-height: 20px; - } - - #profile_ordering li .handle { - background: url('./handle.png') center; - width: 20px; - float: right; - } - - #profile_ordering .formHint { - clear: none; - float: right; - width: 250px; - margin: 0; - } \ No newline at end of file diff --git a/couchpotato/core/plugins/profile/static/profile.js b/couchpotato/core/plugins/profile/static/profile.js index 3bb44989cf..457aaa94ed 100644 --- a/couchpotato/core/plugins/profile/static/profile.js +++ b/couchpotato/core/plugins/profile/static/profile.js @@ -24,40 +24,69 @@ var Profile = new Class({ var data = self.data; self.el = new Element('div.profile').adopt( - self.delete_button = new Element('span.delete.icon', { + self.delete_button = new Element('span.delete.icon-delete', { 'events': { 'click': self.del.bind(self) } }), new Element('.quality_label.ctrlHolder').adopt( new Element('label', {'text':'Name'}), - new Element('input.inlay', { + new Element('input', { 'type':'text', 'value': data.label, 'placeholder': 'Profile name' }) ), - new Element('div.wait_for.ctrlHolder').adopt( - new Element('span', {'text':'Wait'}), - new Element('input.inlay.xsmall', { - 'type':'text', - 'value': data.types && data.types.length > 0 ? data.types[0].wait_for : 0 - }), - new Element('span', {'text':'day(s) for a better quality.'}) - ), new Element('div.qualities.ctrlHolder').adopt( new Element('label', {'text': 'Search for'}), self.type_container = new Element('ol.types'), new Element('div.formHint', { 'html': "Search these qualities (2 minimum), from top to bottom. Use the checkbox, to stop searching after it found this quality." }) + ), + new Element('div.wait_for.ctrlHolder').adopt( + // "Wait the entered number of days for a checked quality, before downloading a lower quality release." + new Element('span', {'text':'Wait'}), + new Element('input.wait_for_input.xsmall', { + 'type':'text', + 'value': data.wait_for && data.wait_for.length > 0 ? data.wait_for[0] : 0 + }), + new Element('span', {'text':'day(s) for a better quality '}), + new Element('span.advanced', {'text':'and keep searching'}), + + // "After a checked quality is found and downloaded, continue searching for even better quality releases for the entered number of days." + new Element('input.xsmall.stop_after_input.advanced', { + 'type':'text', + 'value': data.stop_after && data.stop_after.length > 0 ? data.stop_after[0] : 0 + }), + new Element('span.advanced', {'text':'day(s) for a better (checked) quality.'}), + + // Minimum score of + new Element('span.advanced', {'html':'
    Releases need a minimum score of'}), + new Element('input.advanced.xsmall.minimum_score_input', { + 'size': 4, + 'type':'text', + 'value': data.minimum_score || 1 + }) ) ); - self.makeSortable() + self.makeSortable(); + + // Combine qualities and properties into types + if(data.qualities){ + data.types = []; + data.qualities.each(function(quality, nr){ + data.types.include({ + 'quality': quality, + 'finish': data.finish[nr] || false, + '3d': data['3d'] ? data['3d'][nr] || false : false + }); + }); + } if(data.types) - Object.each(data.types, self.addType.bind(self)) + data.types.each(self.addType.bind(self)); else self.delete_button.hide(); @@ -67,8 +96,8 @@ var Profile = new Class({ save: function(delay){ var self = this; - if(self.save_timer) clearTimeout(self.save_timer); - self.save_timer = (function(){ + if(self.save_timer) clearRequestTimeout(self.save_timer); + self.save_timer = requestTimeout(function(){ self.addType(); @@ -87,14 +116,14 @@ var Profile = new Class({ 'onComplete': function(json){ if(json.success){ self.data = json.profile; - self.type_container.getElement('li:first-child input[type=checkbox]') + self.type_container.getElement('li:first-child input.finish[type=checkbox]') .set('checked', true) .getParent().addClass('checked'); } } }); - }).delay(delay, self) + }, delay); }, @@ -102,21 +131,24 @@ var Profile = new Class({ var self = this; var data = { - 'id' : self.data.id, + 'id' : self.data._id, 'label' : self.el.getElement('.quality_label input').get('value'), - 'wait_for' : self.el.getElement('.wait_for input').get('value'), + 'wait_for' : self.el.getElement('.wait_for_input').get('value'), + 'stop_after' : self.el.getElement('.stop_after_input').get('value'), + 'minimum_score' : self.el.getElement('.minimum_score_input').get('value'), 'types': [] - } + }; Array.each(self.type_container.getElements('.type'), function(type){ - if(!type.hasClass('deleted') && type.getElement('select').get('value') > 0) + if(!type.hasClass('deleted') && type.getElement('select').get('value') != -1 && type.getElement('select').get('value') != "") data.types.include({ - 'quality_id': type.getElement('select').get('value'), - 'finish': +type.getElement('input[type=checkbox]').checked + 'quality': type.getElement('select').get('value'), + 'finish': +type.getElement('input.finish[type=checkbox]').checked, + '3d': +type.getElement('input.3d[type=checkbox]').checked }); - }) + }); - return data + return data; }, addType: function(data){ @@ -145,7 +177,7 @@ var Profile = new Class({ var self = this; return self.types.filter(function(type){ - return type.get('quality_id') + return type.get('quality'); }); }, @@ -162,7 +194,7 @@ var Profile = new Class({ (e).preventDefault(); Api.request('profile.delete', { 'data': { - 'id': self.data.id + 'id': self.data._id }, 'useSpinner': true, 'spinnerOptions': { @@ -199,15 +231,15 @@ var Profile = new Class({ }, get: function(attr){ - return this.data[attr] + return this.data[attr]; }, isCore: function(){ - return this.data.core + return this.data.core; }, toElement: function(){ - return this.el + return this.el; } }); @@ -226,8 +258,10 @@ Profile.Type = new Class({ self.create(); self.addEvent('change', function(){ - self.el[self.qualities.get('value') == '-1' ? 'addClass' : 'removeClass']('is_empty'); - self.deleted = self.qualities.get('value') == '-1'; + var has_quality = !(self.qualities.get('value') == '-1' || self.qualities.get('value') == ''); + self.el[!has_quality ? 'addClass' : 'removeClass']('is_empty'); + self.el[has_quality && Quality.getQuality(self.qualities.get('value')).allow_3d ? 'addClass': 'removeClass']('allow_3d'); + self.deleted = !has_quality; }); }, @@ -237,36 +271,48 @@ Profile.Type = new Class({ var data = self.data; self.el = new Element('li.type').adopt( - new Element('span.quality_type').adopt( + new Element('span.quality_type.select_wrapper.icon-dropdown').grab( self.fillQualities() ), - new Element('span.finish').adopt( - self.finish = new Element('input.inlay.finish[type=checkbox]', { + self.finish_container = new Element('label.finish').adopt( + self.finish = new Element('input.finish[type=checkbox]', { 'checked': data.finish !== undefined ? data.finish : 1, 'events': { - 'change': function(e){ + 'change': function(){ if(self.el == self.el.getParent().getElement(':first-child')){ - self.finish_class.check(); - alert('Top quality always finishes the search') + alert('Top quality always finishes the search'); return; } self.fireEvent('change'); } } - }) + }), + new Element('span.check_label[text=finish]') ), - new Element('span.delete.icon', { + self['3d_container'] = new Element('label.threed').adopt( + self['3d'] = new Element('input.3d[type=checkbox]', { + 'checked': data['3d'] !== undefined ? data['3d'] : 0, + 'events': { + 'change': function(){ + self.fireEvent('change'); + } + } + }), + new Element('span.check_label[text=3D]') + ), + new Element('span.delete.icon-cancel', { 'events': { 'click': self.del.bind(self) } }), - new Element('span.handle') + new Element('span.handle.icon-handle') ); - self.el[self.data.quality_id > 0 ? 'removeClass' : 'addClass']('is_empty'); + self.el[self.data.quality ? 'removeClass' : 'addClass']('is_empty'); - self.finish_class = new Form.Check(self.finish); + if(self.data.quality && Quality.getQuality(self.data.quality).allow_3d) + self.el.addClass('allow_3d'); }, @@ -277,7 +323,7 @@ Profile.Type = new Class({ 'events': { 'change': self.fireEvent.bind(self, 'change') } - }).adopt( + }).grab( new Element('option', { 'text': '+ Add another quality', 'value': -1 @@ -287,11 +333,12 @@ Profile.Type = new Class({ Object.each(Quality.qualities, function(q){ new Element('option', { 'text': q.label, - 'value': q.id - }).inject(self.qualities) + 'value': q.identifier, + 'data-allow_3d': q.allow_3d + }).inject(self.qualities); }); - self.qualities.set('value', self.data.quality_id); + self.qualities.set('value', self.data.quality || -1); return self.qualities; @@ -301,9 +348,10 @@ Profile.Type = new Class({ var self = this; return { - 'quality_id': self.qualities.get('value'), - 'finish': +self.finish.checked - } + 'quality': self.qualities.get('value'), + 'finish': +self.finish.checked, + '3d': +self['3d'].checked + }; }, get: function(key){ @@ -324,4 +372,4 @@ Profile.Type = new Class({ return this.el; } -}) \ No newline at end of file +}); diff --git a/couchpotato/core/plugins/profile/static/profile.scss b/couchpotato/core/plugins/profile/static/profile.scss new file mode 100644 index 0000000000..a48f09ae95 --- /dev/null +++ b/couchpotato/core/plugins/profile/static/profile.scss @@ -0,0 +1,167 @@ +@import "_mixins"; + +.add_new_profile { + padding: 20px; + display: block; + text-align: center; + font-size: 20px; + border-bottom: 1px solid transparent; + @include theme(border-color, off); +} + +.profile { + margin-bottom: 20px; + + .quality_label input { + font-weight: bold; + } + + > .delete { + position: absolute; + padding: $padding/3 $padding; + right: 0; + cursor: pointer; + opacity: 0.6; + color: #fd5353; + font-size: 1.5em; + z-index: 2; + + &:hover { + opacity: 1; + } + } + + .ctrlHolder { + + .types { + flex: 1 1 auto; + min-width: 360px; + + .type { + display: flex; + flex-row: row nowrap; + align-items: center; + padding: 2px 0; + + label { + min-width: 0; + margin-left: $padding/2; + + span { + font-size: .9em; + } + } + + input[type=checkbox] { + margin-right: 3px; + } + + .delete, .handle { + margin-left: $padding/4; + width: 20px; + font-size: 20px; + opacity: .1; + text-align: center; + cursor: pointer; + + &.handle { + cursor: move; + cursor: grab; + } + + &:hover { + opacity: 1; + } + } + + &.is_empty { + .delete, .handle { + display: none; + } + } + } + + } + + &.wait_for.wait_for { + display: block; + + input { + min-width: 0; + width: 40px; + text-align: center; + margin: 0 2px; + } + + .advanced { + display: none; + @include theme(color, primary); + + .show_advanced & { + display: inline; + } + } + + } + + .formHint { + } + + } +} + +#profile_ordering { + ul { + list-style: none; + margin: 0; + width: 275px; + padding: 0; + } + + li { + border-bottom: 1px solid transparent; + @include theme(border-color, off); + padding: 5px; + display: flex; + align-items: center; + + &:hover { + @include theme(background, off); + } + + &:last-child { border: 0; } + + input[type=checkbox] { + margin: 2px 10px 0 0; + vertical-align: top; + } + + > span { + display: inline-block; + height: 20px; + vertical-align: top; + line-height: 20px; + + &.profile_label { + flex: 1 1 auto; + } + } + + .handle { + font-size: 20px; + width: 20px; + float: right; + cursor: move; + cursor: grab; + opacity: .5; + text-align: center; + + &:hover { + opacity: 1; + } + } + } + + .formHint { + } +} diff --git a/couchpotato/core/plugins/quality/__init__.py b/couchpotato/core/plugins/quality/__init__.py index e1b97ad0d8..7710251c7c 100644 --- a/couchpotato/core/plugins/quality/__init__.py +++ b/couchpotato/core/plugins/quality/__init__.py @@ -1,6 +1,5 @@ from .main import QualityPlugin -def start(): - return QualityPlugin() -config = [] +def autoload(): + return QualityPlugin() diff --git a/couchpotato/core/plugins/quality/index.py b/couchpotato/core/plugins/quality/index.py new file mode 100644 index 0000000000..7804397216 --- /dev/null +++ b/couchpotato/core/plugins/quality/index.py @@ -0,0 +1,18 @@ +from hashlib import md5 + +from CodernityDB.hash_index import HashIndex + + +class QualityIndex(HashIndex): + _version = 1 + + def __init__(self, *args, **kwargs): + kwargs['key_format'] = '32s' + super(QualityIndex, self).__init__(*args, **kwargs) + + def make_key(self, key): + return md5(key).hexdigest() + + def make_key_value(self, data): + if data.get('_t') == 'quality' and data.get('identifier'): + return md5(data.get('identifier')).hexdigest(), None diff --git a/couchpotato/core/plugins/quality/main.py b/couchpotato/core/plugins/quality/main.py index 4cd5b8de68..ea8e15eb03 100644 --- a/couchpotato/core/plugins/quality/main.py +++ b/couchpotato/core/plugins/quality/main.py @@ -1,42 +1,60 @@ -from couchpotato import get_session +О╩©from math import fabs, ceil +import traceback +import re + +from CodernityDB.database import RecordNotFound +from couchpotato import get_db from couchpotato.api import addApiView -from couchpotato.core.event import addEvent -from couchpotato.core.helpers.encoding import toUnicode -from couchpotato.core.helpers.request import jsonified, getParams -from couchpotato.core.helpers.variable import mergeDicts, md5, getExt +from couchpotato.core.event import addEvent, fireEvent +from couchpotato.core.helpers.encoding import toUnicode, ss +from couchpotato.core.helpers.variable import mergeDicts, getExt, tryInt, splitString, tryFloat from couchpotato.core.logger import CPLog from couchpotato.core.plugins.base import Plugin -from couchpotato.core.settings.model import Quality, Profile, ProfileType -from sqlalchemy.sql.expression import or_ -import os.path -import re -import time +from couchpotato.core.plugins.quality.index import QualityIndex + log = CPLog(__name__) class QualityPlugin(Plugin): + _database = { + 'quality': QualityIndex + } + qualities = [ - {'identifier': 'bd50', 'hd': True, 'size': (15000, 60000), 'label': 'BR-Disk', 'alternative': ['bd25'], 'allow': ['1080p'], 'ext':[], 'tags': ['bdmv', 'certificate', ('complete', 'bluray')]}, - {'identifier': '1080p', 'hd': True, 'size': (5000, 20000), 'label': '1080P', 'width': 1920, 'height': 1080, 'alternative': [], 'allow': [], 'ext':['mkv', 'm2ts'], 'tags': ['m2ts']}, - {'identifier': '720p', 'hd': True, 'size': (3500, 10000), 'label': '720P', 'width': 1280, 'height': 720, 'alternative': [], 'allow': [], 'ext':['mkv', 'ts']}, - {'identifier': 'brrip', 'hd': True, 'size': (700, 7000), 'label': 'BR-Rip', 'alternative': ['bdrip'], 'allow': ['720p', '1080p'], 'ext':['avi']}, - {'identifier': 'dvdr', 'size': (3000, 10000), 'label': 'DVD-R', 'alternative': [], 'allow': [], 'ext':['iso', 'img'], 'tags': ['pal', 'ntsc', 'video_ts', 'audio_ts']}, - {'identifier': 'dvdrip', 'size': (600, 2400), 'label': 'DVD-Rip', 'width': 720, 'alternative': ['dvdrip'], 'allow': [], 'ext':['avi', 'mpg', 'mpeg'], 'tags': [('dvd', 'rip'), ('dvd', 'xvid'), ('dvd', 'divx')]}, - {'identifier': 'scr', 'size': (600, 1600), 'label': 'Screener', 'alternative': ['screener', 'dvdscr', 'ppvrip'], 'allow': ['dvdr', 'dvd'], 'ext':['avi', 'mpg', 'mpeg']}, - {'identifier': 'r5', 'size': (600, 1000), 'label': 'R5', 'alternative': [], 'allow': ['dvdr'], 'ext':['avi', 'mpg', 'mpeg']}, - {'identifier': 'tc', 'size': (600, 1000), 'label': 'TeleCine', 'alternative': ['telecine'], 'allow': [], 'ext':['avi', 'mpg', 'mpeg']}, - {'identifier': 'ts', 'size': (600, 1000), 'label': 'TeleSync', 'alternative': ['telesync', 'hdts'], 'allow': [], 'ext':['avi', 'mpg', 'mpeg']}, - {'identifier': 'cam', 'size': (600, 1000), 'label': 'Cam', 'alternative': ['camrip', 'hdcam'], 'allow': [], 'ext':['avi', 'mpg', 'mpeg']} + {'identifier': '2160p', 'hd': True, 'allow_3d': True, 'size': (10000, 650000), 'median_size': 20000, 'label': '2160p', 'width': 3840, 'height': 2160, 'alternative': [], 'allow': [], 'ext':['mkv'], 'tags': ['x264', 'h264', '2160', 'UHD', 'x265', 'HDR']}, + {'identifier': 'bd50', 'hd': True, 'allow_3d': True, 'size': (20000, 60000), 'median_size': 40000, 'label': 'BR-Disk', 'alternative': ['bd25', ('br', 'disk')], 'allow': ['1080p'], 'ext':['iso', 'img'], 'tags': ['bdmv', 'certificate', ('complete', 'bluray'), 'avc', 'mvc']}, + {'identifier': '1080p', 'hd': True, 'allow_3d': True, 'size': (4000, 20000), 'median_size': 10000, 'label': '1080p', 'width': 1920, 'height': 1080, 'alternative': [], 'allow': [], 'ext':['mkv', 'm2ts', 'ts'], 'tags': ['m2ts', 'x264', 'h264', '1080']}, + {'identifier': '720p', 'hd': True, 'allow_3d': True, 'size': (3000, 10000), 'median_size': 5500, 'label': '720p', 'width': 1280, 'height': 720, 'alternative': [], 'allow': [], 'ext':['mkv', 'ts'], 'tags': ['x264', 'h264', '720']}, + {'identifier': 'brrip', 'hd': True, 'allow_3d': True, 'size': (700, 7000), 'median_size': 2000, 'label': 'BR-Rip', 'alternative': ['bdrip', ('br', 'rip'), 'hdtv', 'hdrip'], 'allow': ['720p', '1080p', '2160p'], 'ext':['mp4', 'avi', 'mkv'], 'tags': ['brrip']}, + {'identifier': 'dvdr', 'size': (3000, 10000), 'median_size': 4500, 'label': 'DVD-R', 'alternative': ['br2dvd', ('dvd', 'r')], 'allow': [], 'ext':['iso', 'img', 'vob'], 'tags': ['pal', 'ntsc', 'video_ts', 'audio_ts', ('dvd', 'r'), 'dvd9']}, + {'identifier': 'dvdrip', 'size': (600, 2400), 'median_size': 1500, 'label': 'DVD-Rip', 'width': 720, 'alternative': [('dvd', 'rip')], 'allow': [], 'ext':['avi'], 'tags': [('dvd', 'rip'), ('dvd', 'xvid'), ('dvd', 'divx')]}, + {'identifier': 'scr', 'size': (600, 1600), 'median_size': 700, 'label': 'Screener', 'alternative': ['screener', 'dvdscr', 'ppvrip', 'dvdscreener', 'hdscr', 'webrip', ('web', 'rip')], 'allow': ['dvdr', 'dvdrip', '720p', '1080p'], 'ext':[], 'tags': []}, + {'identifier': 'r5', 'size': (600, 1000), 'median_size': 700, 'label': 'R5', 'alternative': ['r6'], 'allow': ['dvdr', '720p', '1080p'], 'ext':[]}, + {'identifier': 'tc', 'size': (600, 1000), 'median_size': 700, 'label': 'TeleCine', 'alternative': ['telecine'], 'allow': ['720p', '1080p'], 'ext':[]}, + {'identifier': 'ts', 'size': (600, 1000), 'median_size': 700, 'label': 'TeleSync', 'alternative': ['telesync', 'hdts'], 'allow': ['720p', '1080p'], 'ext':[]}, + {'identifier': 'cam', 'size': (600, 1000), 'median_size': 700, 'label': 'Cam', 'alternative': ['camrip', 'hdcam'], 'allow': ['720p', '1080p'], 'ext':[]} ] pre_releases = ['cam', 'ts', 'tc', 'r5', 'scr'] + threed_tags = { + 'sbs': [('half', 'sbs'), 'hsbs', ('full', 'sbs'), 'fsbs'], + 'ou': [('half', 'ou'), 'hou', ('full', 'ou'), 'fou'], + '3d': ['2d3d', '3d2d', '3d'], + } + + cached_qualities = None + cached_order = None def __init__(self): addEvent('quality.all', self.all) addEvent('quality.single', self.single) addEvent('quality.guess', self.guess) addEvent('quality.pre_releases', self.preReleases) + addEvent('quality.order', self.getOrder) + addEvent('quality.ishigher', self.isHigher) + addEvent('quality.isfinish', self.isFinish) + addEvent('quality.fill', self.fill) addApiView('quality.size.save', self.saveSize) addApiView('quality.list', self.allView, docs = { @@ -48,38 +66,62 @@ def __init__(self): }) addEvent('app.initialize', self.fill, priority = 10) + addEvent('app.load', self.fillBlank, priority = 120) + + addEvent('app.test', self.doTest) + + self.order = [] + self.addOrder() + + def addOrder(self): + self.order = [] + for q in self.qualities: + self.order.append(q.get('identifier')) + + def getOrder(self): + return self.order def preReleases(self): return self.pre_releases - def allView(self): + def allView(self, **kwargs): - return jsonified({ + return { 'success': True, 'list': self.all() - }) + } def all(self): - db = get_session() + if self.cached_qualities: + return self.cached_qualities - qualities = db.query(Quality).all() + db = get_db() temp = [] - for quality in qualities: - q = mergeDicts(self.getQuality(quality.identifier), quality.to_dict()) + for quality in self.qualities: + quality_doc = db.get('quality', quality.get('identifier'), with_doc = True)['doc'] + q = mergeDicts(quality, quality_doc) temp.append(q) + if len(temp) == len(self.qualities): + self.cached_qualities = temp + return temp def single(self, identifier = ''): - db = get_session() + db = get_db() quality_dict = {} - quality = db.query(Quality).filter(or_(Quality.identifier == identifier, Quality.id == identifier)).first() + try: + quality = db.get('quality', identifier, with_doc = True)['doc'] + except RecordNotFound: + log.error("Unable to find '%s' in the quality DB", identifier) + quality = None + if quality: - quality_dict = dict(self.getQuality(quality.identifier), **quality.to_dict()) + quality_dict = mergeDicts(self.getQuality(quality['identifier']), quality) return quality_dict @@ -89,128 +131,411 @@ def getQuality(self, identifier): if identifier == q.get('identifier'): return q - def saveSize(self): + def saveSize(self, **kwargs): + + try: + db = get_db() + quality = db.get('quality', kwargs.get('identifier'), with_doc = True) + + if quality: + quality['doc'][kwargs.get('value_type')] = tryInt(kwargs.get('value')) + db.update(quality['doc']) + + self.cached_qualities = None + + return { + 'success': True + } + except: + log.error('Failed: %s', traceback.format_exc()) + + return { + 'success': False + } + + def fillBlank(self): + db = get_db() + + try: + existing = list(db.all('quality')) + if len(self.qualities) > len(existing): + log.error('Filling in new qualities') + self.fill(reorder = True) + except: + log.error('Failed filling quality database with new qualities: %s', traceback.format_exc()) + + def fill(self, reorder = False): + + try: + db = get_db() + + order = 0 + for q in self.qualities: + + existing = None + try: + existing = db.get('quality', q.get('identifier'), with_doc = reorder) + except RecordNotFound: + pass + + if not existing: + db.insert({ + '_t': 'quality', + 'order': order, + 'identifier': q.get('identifier'), + 'size_min': tryInt(q.get('size')[0]), + 'size_max': tryInt(q.get('size')[1]), + }) + + log.info('Creating profile: %s', q.get('label')) + db.insert({ + '_t': 'profile', + 'order': order + 20, # Make sure it goes behind other profiles + 'core': True, + 'qualities': [q.get('identifier')], + 'label': toUnicode(q.get('label')), + 'finish': [True], + 'wait_for': [0], + }) + elif reorder: + log.info2('Updating quality order') + existing['doc']['order'] = order + db.update(existing['doc']) + + order += 1 + + return True + except: + log.error('Failed: %s', traceback.format_exc()) + + return False + + def guess(self, files, extra = None, size = None, use_cache = True): + if not extra: extra = {} - params = getParams() + # Create hash for cache + cache_key = str([f.replace('.' + getExt(f), '') if len(getExt(f)) < 4 else f for f in files]) + if use_cache: + cached = self.getCache(cache_key) + if cached and len(extra) == 0: + return cached - db = get_session() - quality = db.query(Quality).filter_by(identifier = params.get('identifier')).first() + qualities = self.all() - if quality: - setattr(quality, params.get('value_type'), params.get('value')) - db.commit() + # Start with 0 + score = {} + for quality in qualities: + score[quality.get('identifier')] = { + 'score': 0, + '3d': {} + } - return jsonified({ - 'success': True - }) + # Use metadata titles as extra check + if extra and extra.get('titles'): + files.extend(extra.get('titles')) - def fill(self): + for cur_file in files: + words = re.split('\W+', cur_file.lower()) + name_year = fireEvent('scanner.name_year', cur_file, file_name = cur_file, single = True) + threed_words = words + if name_year and name_year.get('name'): + split_name = splitString(name_year.get('name'), ' ') + threed_words = [x for x in words if x not in split_name] - db = get_session(); + for quality in qualities: + contains_score = self.containsTagScore(quality, words, cur_file) + threedscore = self.contains3D(quality, threed_words, cur_file) if quality.get('allow_3d') else (0, None) - order = 0 - for q in self.qualities: + self.calcScore(score, quality, contains_score, threedscore, penalty = contains_score) - # Create quality - qual = db.query(Quality).filter_by(identifier = q.get('identifier')).first() + size_scores = [] + for quality in qualities: - if not qual: - log.info('Creating quality: %s', q.get('label')) - qual = Quality() - qual.order = order - qual.identifier = q.get('identifier') - qual.label = toUnicode(q.get('label')) - qual.size_min, qual.size_max = q.get('size') + # Evaluate score based on size + size_score = self.guessSizeScore(quality, size = size) + loose_score = self.guessLooseScore(quality, extra = extra) - db.add(qual) + if size_score > 0: + size_scores.append(quality) - # Create single quality profile - prof = db.query(Profile).filter( - Profile.core == True - ).filter( - Profile.types.any(quality = qual) - ).all() + self.calcScore(score, quality, size_score + loose_score) - if not prof: - log.info('Creating profile: %s', q.get('label')) - prof = Profile( - core = True, - label = toUnicode(qual.label), - order = order - ) - db.add(prof) + # Add additional size score if only 1 size validated + if len(size_scores) == 1: + self.calcScore(score, size_scores[0], 7) + del size_scores - profile_type = ProfileType( - quality = qual, - profile = prof, - finish = True, - order = 0 - ) - prof.types.append(profile_type) + # Return nothing if all scores are <= 0 + has_non_zero = 0 + for s in score: + if score[s]['score'] > 0: + has_non_zero += 1 - order += 1 + if not has_non_zero: + return None - db.commit() + heighest_quality = max(score, key = lambda p: score[p]['score']) + if heighest_quality: + for quality in qualities: + if quality.get('identifier') == heighest_quality: + quality['is_3d'] = False + if score[heighest_quality].get('3d'): + quality['is_3d'] = True + return self.setCache(cache_key, quality) - time.sleep(0.3) # Wait a moment + return None - return True + def containsTagScore(self, quality, words, cur_file = ''): + cur_file = ss(cur_file) + score = 0.0 - def guess(self, files, extra = {}): + extension = words[-1] + words = words[:-1] - # Create hash for cache - hash = md5(str([f.replace('.' + getExt(f), '') for f in files])) - cached = self.getCache(hash) - if cached and extra is {}: return cached + points = { + 'identifier': 25, + 'label': 25, + 'alternative': 20, + 'tags': 11, + 'ext': 5, + } - for cur_file in files: - size = (os.path.getsize(cur_file) / 1024 / 1024) if os.path.isfile(cur_file) else 0 - words = re.split('\W+', cur_file.lower()) + scored_on = [] - for quality in self.all(): + # Check alt and tags + for tag_type in ['identifier', 'alternative', 'tags', 'label']: + qualities = quality.get(tag_type, []) + qualities = [qualities] if isinstance(qualities, (str, unicode)) else qualities - # Check tags - if quality['identifier'] in words: - log.debug('Found via identifier "%s" in %s', (quality['identifier'], cur_file)) - return self.setCache(hash, quality) + for alt in qualities: + if isinstance(alt, tuple): + if len(set(words) & set(alt)) == len(alt): + log.debug('Found %s via %s %s in %s', (quality['identifier'], tag_type, quality.get(tag_type), cur_file)) + score += points.get(tag_type) - if list(set(quality.get('alternative', [])) & set(words)): - log.debug('Found %s via alt %s in %s', (quality['identifier'], quality.get('alternative'), cur_file)) - return self.setCache(hash, quality) + if isinstance(alt, (str, unicode)) and ss(alt.lower()) in words and ss(alt.lower()) not in scored_on: + log.debug('Found %s via %s %s in %s', (quality['identifier'], tag_type, quality.get(tag_type), cur_file)) + score += points.get(tag_type) - for tag in quality.get('tags', []): - if isinstance(tag, tuple) and '.'.join(tag) in '.'.join(words): - log.debug('Found %s via tag %s in %s', (quality['identifier'], quality.get('tags'), cur_file)) - return self.setCache(hash, quality) + # Don't score twice on same tag + scored_on.append(ss(alt).lower()) - if list(set(quality.get('tags', [])) & set(words)): - log.debug('Found %s via tag %s in %s', (quality['identifier'], quality.get('tags'), cur_file)) - return self.setCache(hash, quality) + # Check extension + for ext in quality.get('ext', []): + if ext == extension: + log.debug('Found %s with .%s extension in %s', (quality['identifier'], ext, cur_file)) + score += points['ext'] - # Try again with loose testing - quality = self.guessLoose(hash, extra = extra) - if quality: - return self.setCache(hash, quality) + return score - log.debug('Could not identify quality for: %s', files) - return None + def contains3D(self, quality, words, cur_file = ''): + cur_file = ss(cur_file) + + for key in self.threed_tags: + tags = self.threed_tags.get(key, []) + + for tag in tags: + if isinstance(tag, tuple): + if len(set(words) & set(tag)) == len(tag): + log.debug('Found %s in %s', (tag, cur_file)) + return 1, key + elif tag in words: + log.debug('Found %s in %s', (tag, cur_file)) + return 1, key - def guessLoose(self, hash, extra): + return 0, None - for quality in self.all(): + def guessLooseScore(self, quality, extra = None): + + score = 0 + + if extra: # Check width resolution, range 20 - if (quality.get('width', 720) - 20) <= extra.get('resolution_width', 0) <= (quality.get('width', 720) + 20): - log.debug('Found %s via resolution_width: %s == %s', (quality['identifier'], quality.get('width', 720), extra.get('resolution_width', 0))) - return self.setCache(hash, quality) + if quality.get('width') and (quality.get('width') - 20) <= extra.get('resolution_width', 0) <= (quality.get('width') + 20): + log.debug('Found %s via resolution_width: %s == %s', (quality['identifier'], quality.get('width'), extra.get('resolution_width', 0))) + score += 10 # Check height resolution, range 20 - if (quality.get('height', 480) - 20) <= extra.get('resolution_height', 0) <= (quality.get('height', 480) + 20): - log.debug('Found %s via resolution_height: %s == %s', (quality['identifier'], quality.get('height', 480), extra.get('resolution_height', 0))) - return self.setCache(hash, quality) + if quality.get('height') and (quality.get('height') - 20) <= extra.get('resolution_height', 0) <= (quality.get('height') + 20): + log.debug('Found %s via resolution_height: %s == %s', (quality['identifier'], quality.get('height'), extra.get('resolution_height', 0))) + score += 5 + + if quality.get('identifier') == 'dvdrip' and 480 <= extra.get('resolution_width', 0) <= 720: + log.debug('Add point for correct dvdrip resolutions') + score += 1 + + return score + + + def guessSizeScore(self, quality, size = None): + + score = 0 + + if size: + + size = tryFloat(size) + size_min = tryFloat(quality['size_min']) + size_max = tryFloat(quality['size_max']) + + if size_min <= size <= size_max: + log.debug('Found %s via release size: %s MB < %s MB < %s MB', (quality['identifier'], size_min, size, size_max)) + + proc_range = size_max - size_min + size_diff = size - size_min + size_proc = (size_diff / proc_range) + + median_diff = quality['median_size'] - size_min + median_proc = (median_diff / proc_range) + + max_points = 8 + score += ceil(max_points - (fabs(size_proc - median_proc) * max_points)) + else: + score -= 5 + + return score + + def calcScore(self, score, quality, add_score, threedscore = (0, None), penalty = 0): + + score[quality['identifier']]['score'] += add_score + + threedscore, threedtag = threedscore + if threedscore and threedtag: + if threedscore not in score[quality['identifier']]['3d']: + score[quality['identifier']]['3d'][threedtag] = 0 + + score[quality['identifier']]['3d'][threedtag] += threedscore + + # Set order for allow calculation (and cache) + if not self.cached_order: + self.cached_order = {} + for q in self.qualities: + self.cached_order[q.get('identifier')] = self.qualities.index(q) + + if penalty and add_score != 0: + for allow in quality.get('allow', []): + score[allow]['score'] -= ((penalty * 2) if self.cached_order[allow] < self.cached_order[quality['identifier']] else penalty) * 2 + + # Give panelty for all other qualities + for q in self.qualities: + if quality.get('identifier') != q.get('identifier') and score.get(q.get('identifier')): + score[q.get('identifier')]['score'] -= 1 + + def isFinish(self, quality, profile, release_age = 0): + if not isinstance(profile, dict) or not profile.get('qualities'): + # No profile so anything (scanned) is good enough + return True + + try: + index = [i for i, identifier in enumerate(profile['qualities']) if identifier == quality['identifier'] and bool(profile['3d'][i] if profile.get('3d') else False) == bool(quality.get('is_3d', False))][0] + + if index == 0 or (profile['finish'][index] and int(release_age) >= int(profile.get('stop_after', [0])[0])): + return True + + return False + except: + return False + + def isHigher(self, quality, compare_with, profile = None): + if not isinstance(profile, dict) or not profile.get('qualities'): + profile = fireEvent('profile.default', single = True) + + # Try to find quality in profile, if not found: a quality we do not want is lower than anything else + try: + quality_order = [i for i, identifier in enumerate(profile['qualities']) if identifier == quality['identifier'] and bool(profile['3d'][i] if profile.get('3d') else 0) == bool(quality.get('is_3d', 0))][0] + except: + log.debug('Quality %s not found in profile identifiers %s', (quality['identifier'] + (' 3D' if quality.get('is_3d', 0) else ''), \ + [identifier + (' 3D' if (profile['3d'][i] if profile.get('3d') else 0) else '') for i, identifier in enumerate(profile['qualities'])])) + return 'lower' + + # Try to find compare quality in profile, if not found: anything is higher than a not wanted quality + try: + compare_order = [i for i, identifier in enumerate(profile['qualities']) if identifier == compare_with['identifier'] and bool(profile['3d'][i] if profile.get('3d') else 0) == bool(compare_with.get('is_3d', 0))][0] + except: + log.debug('Compare quality %s not found in profile identifiers %s', (compare_with['identifier'] + (' 3D' if compare_with.get('is_3d', 0) else ''), \ + [identifier + (' 3D' if (profile['3d'][i] if profile.get('3d') else 0) else '') for i, identifier in enumerate(profile['qualities'])])) + return 'higher' + + # Note to self: a lower number means higher quality + if quality_order > compare_order: + return 'lower' + elif quality_order == compare_order: + return 'equal' + else: + return 'higher' + + def doTest(self): + + tests = { + 'Movie Name (1999)-DVD-Rip.avi': {'size': 700, 'quality': 'dvdrip'}, + 'Movie Name 1999 720p Bluray.mkv': {'size': 4200, 'quality': '720p'}, + 'Movie Name 1999 BR-Rip 720p.avi': {'size': 1000, 'quality': 'brrip'}, + 'Movie Name 1999 720p Web Rip.avi': {'size': 1200, 'quality': 'scr'}, + 'Movie Name 1999 Web DL.avi': {'size': 800, 'quality': 'brrip'}, + 'Movie.Name.1999.1080p.WEBRip.H264-Group': {'size': 1500, 'quality': 'scr'}, + 'Movie.Name.1999.DVDRip-Group': {'size': 750, 'quality': 'dvdrip'}, + 'Movie.Name.1999.DVD-Rip-Group': {'size': 700, 'quality': 'dvdrip'}, + 'Movie.Name.1999.DVD-R-Group': {'size': 4500, 'quality': 'dvdr'}, + 'Movie.Name.Camelie.1999.720p.BluRay.x264-Group': {'size': 5500, 'quality': '720p'}, + 'Movie.Name.2008.German.DL.AC3.1080p.BluRay.x264-Group': {'size': 8500, 'extra': {'resolution_width': 1920, 'resolution_height': 1080} , 'quality': '1080p'}, + 'Movie.Name.2004.GERMAN.AC3D.DL.1080p.BluRay.x264-Group': {'size': 8000, 'quality': '1080p'}, + 'Movie.Name.2013.BR-Disk-Group.iso': {'size': 48000, 'quality': 'bd50'}, + 'Movie.Name.2013.2D+3D.BR-Disk-Group.iso': {'size': 52000, 'quality': 'bd50', 'is_3d': True}, + 'Movie.Rising.Name.Girl.2011.NTSC.DVD9-GroupDVD': {'size': 7200, 'quality': 'dvdr'}, + 'Movie Name (2013) 2D + 3D': {'size': 49000, 'quality': 'bd50', 'is_3d': True}, + 'Movie Monuments 2013 BrRip 1080p': {'size': 1800, 'quality': 'brrip'}, + 'Movie Monuments 2013 BrRip 720p': {'size': 1300, 'quality': 'brrip'}, + 'The.Movie.2014.3D.1080p.BluRay.AVC.DTS-HD.MA.5.1-GroupName': {'size': 30000, 'quality': 'bd50', 'is_3d': True}, + '/home/namehou/Movie Monuments (2012)/Movie Monuments.mkv': {'size': 5500, 'quality': '720p', 'is_3d': False}, + '/home/namehou/Movie Monuments (2012)/Movie Monuments Full-OU.mkv': {'size': 5500, 'quality': '720p', 'is_3d': True}, + '/home/namehou/Movie Monuments (2013)/Movie Monuments.mkv': {'size': 10000, 'quality': '1080p', 'is_3d': False}, + '/home/namehou/Movie Monuments (2013)/Movie Monuments Full-OU.mkv': {'size': 10000, 'quality': '1080p', 'is_3d': True}, + '/volume1/Public/3D/Moviename/Moviename (2009).3D.SBS.ts': {'size': 7500, 'quality': '1080p', 'is_3d': True}, + '/volume1/Public/Moviename/Moviename (2009).ts': {'size': 7500, 'quality': '1080p'}, + '/movies/BluRay HDDVD H.264 MKV 720p EngSub/QuiQui le fou (criterion collection #123, 1915)/QuiQui le fou (1915) 720p x264 BluRay.mkv': {'size': 5500, 'quality': '720p'}, + 'C:\\movies\QuiQui le fou (collection #123, 1915)\QuiQui le fou (1915) 720p x264 BluRay.mkv': {'size': 5500, 'quality': '720p'}, + 'C:\\movies\QuiQui le fou (collection #123, 1915)\QuiQui le fou (1915) half-sbs 720p x264 BluRay.mkv': {'size': 5500, 'quality': '720p', 'is_3d': True}, + 'Moviename 2014 720p HDCAM XviD DualAudio': {'size': 4000, 'quality': 'cam'}, + 'Moviename (2014) - 720p CAM x264': {'size': 2250, 'quality': 'cam'}, + 'Movie Name (2014).mp4': {'size': 750, 'quality': 'brrip'}, + 'Moviename.2014.720p.R6.WEB-DL.x264.AC3-xyz': {'size': 750, 'quality': 'r5'}, + 'Movie name 2014 New Source 720p HDCAM x264 AC3 xyz': {'size': 750, 'quality': 'cam'}, + 'Movie.Name.2014.720p.HD.TS.AC3.x264': {'size': 750, 'quality': 'ts'}, + 'Movie.Name.2014.1080p.HDrip.x264.aac-ReleaseGroup': {'size': 7000, 'quality': 'brrip'}, + 'Movie.Name.2014.HDCam.Chinese.Subs-ReleaseGroup': {'size': 15000, 'quality': 'cam'}, + 'Movie Name 2014 HQ DVDRip X264 AC3 (bla)': {'size': 0, 'quality': 'dvdrip'}, + 'Movie Name1 (2012).mkv': {'size': 4500, 'quality': '720p'}, + 'Movie Name (2013).mkv': {'size': 8500, 'quality': '1080p'}, + 'Movie Name (2014).mkv': {'size': 4500, 'quality': '720p', 'extra': {'titles': ['Movie Name 2014 720p Bluray']}}, + 'Movie Name (2015).mkv': {'size': 500, 'quality': '1080p', 'extra': {'resolution_width': 1920}}, + 'Movie Name (2015).mp4': {'size': 6500, 'quality': 'brrip'}, + 'Movie Name.2014.720p Web-Dl Aac2.0 h264-ReleaseGroup': {'size': 3800, 'quality': 'brrip'}, + 'Movie Name.2014.720p.WEBRip.x264.AC3-ReleaseGroup': {'size': 3000, 'quality': 'scr'}, + 'Movie.Name.2014.1080p.HDCAM.-.ReleaseGroup': {'size': 5300, 'quality': 'cam'}, + 'Movie.Name.2014.720p.HDSCR.4PARTS.MP4.AAC.ReleaseGroup': {'size': 2401, 'quality': 'scr'}, + 'Movie.Name.2014.720p.BluRay.x264-ReleaseGroup': {'size': 10300, 'quality': '720p'}, + 'Movie.Name.2014.720.Bluray.x264.DTS-ReleaseGroup': {'size': 9700, 'quality': '720p'}, + 'Movie Name 2015 2160p SourceSite WEBRip DD5 1 x264-ReleaseGroup': {'size': 21800, 'quality': '2160p'}, + 'Movie Name 2012 2160p WEB-DL FLAC 5 1 x264-ReleaseGroup': {'size': 59650, 'quality': '2160p'}, + 'Movie.Name.2015.FRENCH.1080p.WebHD.H264-SiGeRiS.mkv' : {'size': 3060, 'quality': 'brrip'} + } + + correct = 0 + for name in tests: + test_quality = self.guess(files = [name], extra = tests[name].get('extra', None), size = tests[name].get('size', None), use_cache = False) or {} + success = test_quality.get('identifier') == tests[name]['quality'] and test_quality.get('is_3d') == tests[name].get('is_3d', False) + if not success: + log.error('%s failed check, thinks it\'s "%s" expecting "%s"', (name, + test_quality.get('identifier') + (' 3D' if test_quality.get('is_3d') else ''), + tests[name]['quality'] + (' 3D' if tests[name].get('is_3d') else '') + )) + + correct += success + + if correct == len(tests): + log.info('Quality test successful') + return True + else: + log.error('Quality test failed: %s out of %s succeeded', (correct, len(tests))) - if 480 <= extra.get('resolution_width', 0) <= 720: - log.debug('Found as dvdrip') - return self.setCache(hash, self.single('dvdrip')) - return None diff --git a/couchpotato/core/plugins/quality/static/quality.css b/couchpotato/core/plugins/quality/static/quality.css deleted file mode 100644 index f71f007e8c..0000000000 --- a/couchpotato/core/plugins/quality/static/quality.css +++ /dev/null @@ -1,26 +0,0 @@ -.group_sizes { - -} - - .group_sizes .head { - font-weight: bold; - } - - .group_sizes .ctrlHolder { - padding-top: 4px !important; - padding-bottom: 4px !important; - font-size: 12px; - } - - .group_sizes .label { - max-width: 120px; - } - - .group_sizes .min, .group_sizes .max { - text-align: center; - width: 50px; - max-width: 50px; - margin: 0 5px !important; - padding: 0 3px; - display: inline-block; - } \ No newline at end of file diff --git a/couchpotato/core/plugins/quality/static/quality.js b/couchpotato/core/plugins/quality/static/quality.js index bd2ff2acbc..d9aaf581e9 100644 --- a/couchpotato/core/plugins/quality/static/quality.js +++ b/couchpotato/core/plugins/quality/static/quality.js @@ -8,40 +8,47 @@ var QualityBase = new Class({ self.qualities = data.qualities; - self.profiles = [] + self.profiles_list = null; + self.profiles = []; Array.each(data.profiles, self.createProfilesClass.bind(self)); - App.addEvent('load', self.addSettings.bind(self)) + App.addEvent('loadSettings', self.addSettings.bind(self)); }, getProfile: function(id){ return this.profiles.filter(function(profile){ - return profile.data.id == id - }).pick() + return profile.data._id == id; + }).pick(); }, // Hide items when getting profiles getActiveProfiles: function(){ return Array.filter(this.profiles, function(profile){ - return !profile.data.hide + return !profile.data.hide; }); }, - getQuality: function(id){ - return this.qualities.filter(function(q){ - return q.id == id; - }).pick(); + getQuality: function(identifier){ + try { + return (this.qualities.filter(function(q){ + return q.identifier == identifier; + }).pick() || {}); + } + catch(e){} + + return {}; }, addSettings: function(){ var self = this; - self.settings = App.getPage('Settings') + self.settings = App.getPage('Settings'); self.settings.addEvent('create', function(){ var tab = self.settings.createSubTab('profile', { 'label': 'Quality', - 'name': 'profile' + 'name': 'profile', + 'subtab_label': 'Qualities' }, self.settings.tabs.searcher ,'searcher'); self.tab = tab.tab; @@ -51,7 +58,7 @@ var QualityBase = new Class({ self.createProfileOrdering(); self.createSizes(); - }) + }); }, @@ -61,7 +68,7 @@ var QualityBase = new Class({ createProfiles: function(){ var self = this; - var non_core_profiles = Array.filter(self.profiles, function(profile){ return !profile.isCore() }); + var non_core_profiles = Array.filter(self.profiles, function(profile){ return !profile.isCore(); }); var count = non_core_profiles.length; self.settings.createGroup({ @@ -74,7 +81,7 @@ var QualityBase = new Class({ 'events': { 'click': function(){ var profile = self.createProfilesClass(); - $(profile).inject(self.profile_container) + $(profile).inject(self.profile_container); } } }) @@ -82,7 +89,7 @@ var QualityBase = new Class({ // Add profiles, that aren't part of the core (for editing) Array.each(non_core_profiles, function(profile){ - $(profile).inject(self.profile_container) + $(profile).inject(self.profile_container); }); }, @@ -90,9 +97,9 @@ var QualityBase = new Class({ createProfilesClass: function(data){ var self = this; - var data = data || {'id': randomString()} - var profile = new Profile(data) - self.profiles.include(profile) + data = data || {'id': randomString()}; + var profile = new Profile(data); + self.profiles.include(profile); return profile; }, @@ -100,23 +107,23 @@ var QualityBase = new Class({ createProfileOrdering: function(){ var self = this; - var profile_list; - var group = self.settings.createGroup({ - 'label': 'Profile Defaults' - }).adopt( + self.settings.createGroup({ + 'label': 'Profile Defaults', + 'description': '(Needs refresh \'' +(App.isMac() ? 'CMD+R' : 'F5')+ '\' after editing)' + }).grab( new Element('.ctrlHolder#profile_ordering').adopt( new Element('label[text=Order]'), - profile_list = new Element('ul'), + self.profiles_list = new Element('ul'), new Element('p.formHint', { 'html': 'Change the order the profiles are in the dropdown list. Uncheck to hide it completely.
    First one will be default.' }) ) - ).inject(self.content) + ).inject(self.content); Array.each(self.profiles, function(profile){ var check; - new Element('li', {'data-id': profile.data.id}).adopt( - check = new Element('input.inlay[type=checkbox]', { + new Element('li', {'data-id': profile.data._id}).adopt( + check = new Element('input[type=checkbox]', { 'checked': !profile.data.hide, 'events': { 'change': self.saveProfileOrdering.bind(self) @@ -125,30 +132,35 @@ var QualityBase = new Class({ new Element('span.profile_label', { 'text': profile.data.label }), - new Element('span.handle') - ).inject(profile_list); - - new Form.Check(check); - + new Element('span.handle.icon-handle') + ).inject(self.profiles_list); }); // Sortable - self.profile_sortable = new Sortables(profile_list, { + var sorted_changed = false; + self.profile_sortable = new Sortables(self.profiles_list, { 'revert': true, - 'handle': '', + 'handle': '.handle', 'opacity': 0.5, - 'onComplete': self.saveProfileOrdering.bind(self) + 'onSort': function(){ + sorted_changed = true; + }, + 'onComplete': function(){ + if(sorted_changed){ + self.saveProfileOrdering(); + sorted_changed = false; + } + } }); }, saveProfileOrdering: function(){ - var self = this; - - var ids = []; - var hidden = []; + var self = this, + ids = [], + hidden = []; - self.profile_sortable.list.getElements('li').each(function(el, nr){ + self.profiles_list.getElements('li').each(function(el, nr){ ids.include(el.get('data-id')); hidden[nr] = +!el.getElement('input[type=checkbox]').get('checked'); }); @@ -173,35 +185,34 @@ var QualityBase = new Class({ 'description': 'Edit the minimal and maximum sizes (in MB) for each quality.', 'advanced': true, 'name': 'sizes' - }).inject(self.content) - + }).inject(self.content); new Element('div.item.head.ctrlHolder').adopt( new Element('span.label', {'text': 'Quality'}), new Element('span.min', {'text': 'Min'}), new Element('span.max', {'text': 'Max'}) - ).inject(group) + ).inject(group); Array.each(self.qualities, function(quality){ new Element('div.ctrlHolder.item').adopt( new Element('span.label', {'text': quality.label}), - new Element('input.min.inlay[type=text]', { + new Element('input.min[type=text]', { 'value': quality.size_min, 'events': { 'keyup': function(e){ - self.changeSize(quality.identifier, 'size_min', e.target.get('value')) + self.changeSize(quality.identifier, 'size_min', e.target.get('value')); } } }), - new Element('input.max.inlay[type=text]', { + new Element('input.max[type=text]', { 'value': quality.size_max, 'events': { 'keyup': function(e){ - self.changeSize(quality.identifier, 'size_max', e.target.get('value')) + self.changeSize(quality.identifier, 'size_max', e.target.get('value')); } } }) - ).inject(group) + ).inject(group); }); }, @@ -210,9 +221,9 @@ var QualityBase = new Class({ changeSize: function(identifier, type, value){ var self = this; - if(self.size_timer[identifier + type]) clearTimeout(self.size_timer[identifier + type]); + if(self.size_timer[identifier + type]) clearRequestTimeout(self.size_timer[identifier + type]); - self.size_timer[identifier + type] = (function(){ + self.size_timer[identifier + type] = requestTimeout(function(){ Api.request('quality.size.save', { 'data': { 'identifier': identifier, @@ -220,7 +231,7 @@ var QualityBase = new Class({ 'value': value } }); - }).delay(300) + }, 300); } diff --git a/couchpotato/core/plugins/quality/static/quality.scss b/couchpotato/core/plugins/quality/static/quality.scss new file mode 100644 index 0000000000..c2aa9f99b7 --- /dev/null +++ b/couchpotato/core/plugins/quality/static/quality.scss @@ -0,0 +1,19 @@ +@import "_mixins"; + +.group_sizes { + + .item { + .label { + min-width: 150px; + } + + .min, .max { + display: inline-block; + width: 70px !important; + min-width: 0 !important; + margin-right: $padding/2; + text-align: center; + } + } + +} diff --git a/couchpotato/core/plugins/release/__init__.py b/couchpotato/core/plugins/release/__init__.py index b6a667c219..e6e60c4bbd 100644 --- a/couchpotato/core/plugins/release/__init__.py +++ b/couchpotato/core/plugins/release/__init__.py @@ -1,6 +1,5 @@ from .main import Release -def start(): - return Release() -config = [] +def autoload(): + return Release() diff --git a/couchpotato/core/plugins/release/index.py b/couchpotato/core/plugins/release/index.py new file mode 100644 index 0000000000..8265fe332d --- /dev/null +++ b/couchpotato/core/plugins/release/index.py @@ -0,0 +1,64 @@ +from hashlib import md5 + +from CodernityDB.hash_index import HashIndex +from CodernityDB.tree_index import TreeBasedIndex + + +class ReleaseIndex(TreeBasedIndex): + _version = 1 + + def __init__(self, *args, **kwargs): + kwargs['key_format'] = '32s' + super(ReleaseIndex, self).__init__(*args, **kwargs) + + def make_key(self, key): + return key + + def make_key_value(self, data): + if data.get('_t') == 'release' and data.get('media_id'): + return data['media_id'], None + + +class ReleaseStatusIndex(TreeBasedIndex): + _version = 1 + + def __init__(self, *args, **kwargs): + kwargs['key_format'] = '32s' + super(ReleaseStatusIndex, self).__init__(*args, **kwargs) + + def make_key(self, key): + return md5(key).hexdigest() + + def make_key_value(self, data): + if data.get('_t') == 'release' and data.get('status'): + return md5(data.get('status')).hexdigest(), {'media_id': data.get('media_id')} + + +class ReleaseIDIndex(HashIndex): + _version = 1 + + def __init__(self, *args, **kwargs): + kwargs['key_format'] = '32s' + super(ReleaseIDIndex, self).__init__(*args, **kwargs) + + def make_key(self, key): + return md5(key).hexdigest() + + def make_key_value(self, data): + if data.get('_t') == 'release' and data.get('identifier'): + return md5(data.get('identifier')).hexdigest(), {'media_id': data.get('media_id')} + + +class ReleaseDownloadIndex(HashIndex): + _version = 2 + + def __init__(self, *args, **kwargs): + kwargs['key_format'] = '32s' + super(ReleaseDownloadIndex, self).__init__(*args, **kwargs) + + def make_key(self, key): + return md5(key.lower()).hexdigest() + + def make_key_value(self, data): + if data.get('_t') == 'release' and data.get('download_info') and data['download_info']['id'] and data['download_info']['downloader']: + return md5(('%s-%s' % (data['download_info']['downloader'], data['download_info']['id'])).lower()).hexdigest(), None diff --git a/couchpotato/core/plugins/release/main.py b/couchpotato/core/plugins/release/main.py index 02843f855a..83ec777268 100644 --- a/couchpotato/core/plugins/release/main.py +++ b/couchpotato/core/plugins/release/main.py @@ -1,24 +1,34 @@ -from couchpotato import get_session +from inspect import ismethod, isfunction +import os +import time +import traceback + +from CodernityDB.database import RecordDeleted, RecordNotFound +from couchpotato import md5, get_db from couchpotato.api import addApiView from couchpotato.core.event import fireEvent, addEvent -from couchpotato.core.helpers.encoding import ss -from couchpotato.core.helpers.request import getParam, jsonified +from couchpotato.core.helpers.encoding import toUnicode, sp +from couchpotato.core.helpers.variable import getTitle, tryInt from couchpotato.core.logger import CPLog from couchpotato.core.plugins.base import Plugin -from couchpotato.core.plugins.scanner.main import Scanner -from couchpotato.core.settings.model import File, Release as Relea, Movie -from sqlalchemy.sql.expression import and_, or_ -import os +from .index import ReleaseIndex, ReleaseStatusIndex, ReleaseIDIndex, ReleaseDownloadIndex +from couchpotato.environment import Env + log = CPLog(__name__) class Release(Plugin): - def __init__(self): - addEvent('release.add', self.add) + _database = { + 'release': ReleaseIndex, + 'release_status': ReleaseStatusIndex, + 'release_identifier': ReleaseIDIndex, + 'release_download': ReleaseDownloadIndex + } - addApiView('release.download', self.download, docs = { + def __init__(self): + addApiView('release.manual_download', self.manualDownload, docs = { 'desc': 'Send a release manually to the downloaders', 'params': { 'id': {'type': 'id', 'desc': 'ID of the release object in release-table'} @@ -37,160 +47,522 @@ def __init__(self): } }) + addEvent('release.add', self.add) + addEvent('release.download', self.download) + addEvent('release.try_download_result', self.tryDownloadResult) + addEvent('release.create_from_search', self.createFromSearch) addEvent('release.delete', self.delete) addEvent('release.clean', self.clean) + addEvent('release.update_status', self.updateStatus) + addEvent('release.with_status', self.withStatus) + addEvent('release.for_media', self.forMedia) - def add(self, group): - db = get_session() - - identifier = '%s.%s.%s' % (group['library']['identifier'], group['meta_data'].get('audio', 'unknown'), group['meta_data']['quality']['identifier']) - - # Add movie - done_status = fireEvent('status.get', 'done', single = True) - movie = db.query(Movie).filter_by(library_id = group['library'].get('id')).first() - if not movie: - movie = Movie( - library_id = group['library'].get('id'), - profile_id = 0, - status_id = done_status.get('id') - ) - db.add(movie) - db.commit() - - # Add Release - snatched_status = fireEvent('status.get', 'snatched', single = True) - rel = db.query(Relea).filter( - or_( - Relea.identifier == identifier, - and_(Relea.identifier.startswith(group['library']['identifier']), Relea.status_id == snatched_status.get('id')) - ) - ).first() - if not rel: - rel = Relea( - identifier = identifier, - movie = movie, - quality_id = group['meta_data']['quality'].get('id'), - status_id = done_status.get('id') - ) - db.add(rel) - db.commit() - - # Add each file type - for type in group['files']: - for cur_file in group['files'][type]: - added_file = self.saveFile(cur_file, type = type, include_media_info = type is 'movie') - try: - added_file = db.query(File).filter_by(id = added_file.get('id')).one() - rel.files.append(added_file) - db.commit() - except Exception, e: - log.debug('Failed to attach "%s" to release: %s', (cur_file, e)) + # Clean releases that didn't have activity in the last week + addEvent('app.load', self.cleanDone, priority = 1000) + fireEvent('schedule.interval', 'movie.clean_releases', self.cleanDone, hours = 12) - fireEvent('movie.restatus', movie.id) + def cleanDone(self): + log.debug('Removing releases from dashboard') - return True + now = time.time() + week = 604800 + db = get_db() - def saveFile(self, filepath, type = 'unknown', include_media_info = False): + # Get (and remove) parentless releases + releases = db.all('release', with_doc = False) + media_exist = [] + reindex = 0 + for release in releases: + if release.get('key') in media_exist: + continue - properties = {} + try: - # Get media info for files - if include_media_info: - properties = {} + try: + doc = db.get('id', release.get('_id')) + except RecordDeleted: + reindex += 1 + continue - # Check database and update/insert if necessary - return fireEvent('file.add', path = filepath, part = fireEvent('scanner.partnumber', file, single = True), type_tuple = Scanner.file_types.get(type), properties = properties, single = True) + db.get('id', release.get('key')) + media_exist.append(release.get('key')) - def deleteView(self): + try: + if doc.get('status') == 'ignore': + doc['status'] = 'ignored' + db.update(doc) + except: + log.error('Failed fixing mis-status tag: %s', traceback.format_exc()) + except ValueError: + fireEvent('database.delete_corrupted', release.get('key'), traceback_error = traceback.format_exc(0)) + reindex += 1 + except RecordDeleted: + db.delete(doc) + log.debug('Deleted orphaned release: %s', doc) + reindex += 1 + except: + log.debug('Failed cleaning up orphaned releases: %s', traceback.format_exc()) + + if reindex > 0: + db.reindex() + + del media_exist + + # get movies last_edit more than a week ago + medias = fireEvent('media.with_status', ['done', 'active'], single = True) + + for media in medias: + if media.get('last_edit', 0) > (now - week): + continue + + for rel in self.forMedia(media['_id']): + + # Remove all available releases + if rel['status'] in ['available']: + self.delete(rel['_id']) + + # Set all snatched and downloaded releases to ignored to make sure they are ignored when re-adding the media + elif rel['status'] in ['snatched', 'downloaded']: + self.updateStatus(rel['_id'], status = 'ignored') + + if 'recent' in media.get('tags', []): + fireEvent('media.untag', media.get('_id'), 'recent', single = True) + + def add(self, group, update_info = True, update_id = None): + + try: + db = get_db() + + release_identifier = '%s.%s.%s' % (group['identifier'], group['meta_data'].get('audio', 'unknown'), group['meta_data']['quality']['identifier']) + + # Add movie if it doesn't exist + try: + media = db.get('media', 'imdb-%s' % group['identifier'], with_doc = True)['doc'] + except: + media = fireEvent('movie.add', params = { + 'identifier': group['identifier'], + 'profile_id': None, + }, search_after = False, update_after = update_info, notify_after = False, status = 'done', single = True) + + release = None + if update_id: + try: + release = db.get('id', update_id) + release.update({ + 'identifier': release_identifier, + 'last_edit': int(time.time()), + 'status': 'done', + }) + except: + log.error('Failed updating existing release: %s', traceback.format_exc()) + else: + + # Add Release + if not release: + release = { + '_t': 'release', + 'media_id': media['_id'], + 'identifier': release_identifier, + 'quality': group['meta_data']['quality'].get('identifier'), + 'is_3d': group['meta_data']['quality'].get('is_3d', 0), + 'last_edit': int(time.time()), + 'status': 'done' + } - release_id = getParam('id') + try: + r = db.get('release_identifier', release_identifier, with_doc = True)['doc'] + r['media_id'] = media['_id'] + except: + log.debug('Failed updating release by identifier "%s". Inserting new.', release_identifier) + r = db.insert(release) - return jsonified({ - 'success': self.delete(release_id) - }) + # Update with ref and _id + release.update({ + '_id': r['_id'], + '_rev': r['_rev'], + }) - def delete(self, id): + # Empty out empty file groups + release['files'] = dict((k, [toUnicode(x) for x in v]) for k, v in group['files'].items() if v) + db.update(release) - db = get_session() + fireEvent('media.restatus', media['_id'], allowed_restatus = ['done'], single = True) - rel = db.query(Relea).filter_by(id = id).first() - if rel: - rel.delete() - db.commit() return True + except: + log.error('Failed: %s', traceback.format_exc()) return False - def clean(self, id): + def deleteView(self, id = None, **kwargs): + + return { + 'success': self.delete(id) + } - db = get_session() + def delete(self, release_id): + + try: + db = get_db() + rel = db.get('id', release_id) + db.delete(rel) + return True + except RecordDeleted: + log.debug('Already deleted: %s', release_id) + return True + except: + log.error('Failed: %s', traceback.format_exc()) + + return False - rel = db.query(Relea).filter_by(id = id).first() - if rel: - for release_file in rel.files: - if not os.path.isfile(ss(release_file.path)): - db.delete(release_file) - db.commit() + def clean(self, release_id): - if len(rel.files) == 0: - self.delete(id) + try: + db = get_db() + rel = db.get('id', release_id) + raw_files = rel.get('files') + + if len(raw_files) == 0: + self.delete(rel['_id']) + else: + + files = {} + for file_type in raw_files: + + for release_file in raw_files.get(file_type, []): + if os.path.isfile(sp(release_file)): + if file_type not in files: + files[file_type] = [] + files[file_type].append(release_file) + + rel['files'] = files + db.update(rel) return True + except: + log.error('Failed: %s', traceback.format_exc()) return False - def ignore(self): + def ignore(self, id = None, **kwargs): - db = get_session() - id = getParam('id') + db = get_db() - rel = db.query(Relea).filter_by(id = id).first() - if rel: - ignored_status = fireEvent('status.get', 'ignored', single = True) - available_status = fireEvent('status.get', 'available', single = True) - rel.status_id = available_status.get('id') if rel.status_id is ignored_status.get('id') else ignored_status.get('id') - db.commit() + try: + if id: + rel = db.get('id', id, with_doc = True) + self.updateStatus(id, 'available' if rel['status'] in ['ignored', 'failed'] else 'ignored') - return jsonified({ - 'success': True - }) + return { + 'success': True + } + except: + log.error('Failed: %s', traceback.format_exc()) - def download(self): + return { + 'success': False + } + + def manualDownload(self, id = None, **kwargs): - db = get_session() - id = getParam('id') - status_snatched = fireEvent('status.add', 'snatched', single = True) + db = get_db() - rel = db.query(Relea).filter_by(id = id).first() - if rel: - item = {} - for info in rel.info: - item[info.identifier] = info.value + try: + release = db.get('id', id) + item = release['info'] + movie = db.get('id', release['media_id']) + + fireEvent('notify.frontend', type = 'release.manual_download', data = True, message = 'Snatching "%s"' % item['name']) # Get matching provider provider = fireEvent('provider.belongs_to', item['url'], provider = item.get('provider'), single = True) - if item['type'] != 'torrent_magnet': - item['download'] = provider.download + if item.get('protocol') != 'torrent_magnet': + item['download'] = provider.loginDownload if provider.urls.get('login') else provider.download - success = fireEvent('searcher.download', data = item, movie = rel.movie.to_dict({ - 'profile': {'types': {'quality': {}}}, - 'releases': {'status': {}, 'quality': {}}, - 'library': {'titles': {}, 'files':{}}, - 'files': {} - }), manual = True, single = True) + success = self.download(data = item, media = movie, manual = True) if success: - rel.status_id = status_snatched.get('id') - db.commit() + fireEvent('notify.frontend', type = 'release.manual_download', data = True, message = 'Successfully snatched "%s"' % item['name']) - return jsonified({ - 'success': success - }) - else: - log.error('Couldn\'t find release with id: %s', id) + return { + 'success': success == True + } - return jsonified({ - 'success': False - }) + except: + log.error('Couldn\'t find release with id: %s: %s', (id, traceback.format_exc())) + return { + 'success': False + } + + def download(self, data, media, manual = False): + + # Test to see if any downloaders are enabled for this type + downloader_enabled = fireEvent('download.enabled', manual, data, single = True) + if not downloader_enabled: + log.info('Tried to download, but none of the "%s" downloaders are enabled or gave an error', data.get('protocol')) + return False + + # Download NZB or torrent file + filedata = None + if data.get('download') and (ismethod(data.get('download')) or isfunction(data.get('download'))): + try: + filedata = data.get('download')(url = data.get('url'), nzb_id = data.get('id')) + except: + log.error('Tried to download, but the "%s" provider gave an error: %s', (data.get('protocol'), traceback.format_exc())) + return False + + if filedata == 'try_next': + return filedata + elif not filedata: + return False + + # Send NZB or torrent file to downloader + download_result = fireEvent('download', data = data, media = media, manual = manual, filedata = filedata, single = True) + if not download_result: + log.info('Tried to download, but the "%s" downloader gave an error', data.get('protocol')) + return False + log.debug('Downloader result: %s', download_result) + + try: + db = get_db() + + try: + rls = db.get('release_identifier', md5(data['url']), with_doc = True)['doc'] + except: + log.error('No release found to store download information in') + return False + + renamer_enabled = Env.setting('enabled', 'renamer') + + # Save download-id info if returned + if isinstance(download_result, dict): + rls['download_info'] = download_result + db.update(rls) + + log_movie = '%s (%s) in %s' % (getTitle(media), media['info'].get('year'), rls['quality']) + snatch_message = 'Snatched "%s": %s from %s' % (data.get('name'), log_movie, (data.get('provider', '') + data.get('provider_extra', ''))) + log.info(snatch_message) + fireEvent('%s.snatched' % data['type'], message = snatch_message, data = media) + + # Mark release as snatched + if renamer_enabled: + self.updateStatus(rls['_id'], status = 'snatched') + + # If renamer isn't used, mark media done if finished or release downloaded + else: + + if media['status'] == 'active': + profile = db.get('id', media['profile_id']) + if fireEvent('quality.isfinish', {'identifier': rls['quality'], 'is_3d': rls.get('is_3d', False)}, profile, single = True): + log.info('Renamer disabled, marking media as finished: %s', log_movie) + + # Mark release done + self.updateStatus(rls['_id'], status = 'done') + + # Mark media done + fireEvent('media.restatus', media['_id'], single = True) + + return True + + # Assume release downloaded + self.updateStatus(rls['_id'], status = 'downloaded') + + except: + log.error('Failed storing download status: %s', traceback.format_exc()) + return False + + return True + + def tryDownloadResult(self, results, media, quality_custom): + + wait_for = False + let_through = False + filtered_results = [] + minimum_seeders = tryInt(Env.setting('minimum_seeders', section = 'torrent', default = 1)) + + # Filter out ignored and other releases we don't want + for rel in results: + + if rel['status'] in ['ignored', 'failed']: + log.info('Ignored: %s', rel['name']) + continue + + if rel['score'] < quality_custom.get('minimum_score'): + log.info('Ignored, score "%s" too low, need at least "%s": %s', (rel['score'], quality_custom.get('minimum_score'), rel['name'])) + continue + + if rel['size'] <= 50: + log.info('Ignored, size "%sMB" too low: %s', (rel['size'], rel['name'])) + continue + + if 'seeders' in rel and rel.get('seeders') < minimum_seeders: + log.info('Ignored, not enough seeders, has %s needs %s: %s', (rel.get('seeders'), minimum_seeders, rel['name'])) + continue + + # If a single release comes through the "wait for", let through all + rel['wait_for'] = False + if quality_custom.get('index') != 0 and quality_custom.get('wait_for', 0) > 0 and rel.get('age') <= quality_custom.get('wait_for', 0): + rel['wait_for'] = True + else: + let_through = True + + filtered_results.append(rel) + + # Loop through filtered results + for rel in filtered_results: + + # Only wait if not a single release is old enough + if rel.get('wait_for') and not let_through: + log.info('Ignored, waiting %s days: %s', (quality_custom.get('wait_for') - rel.get('age'), rel['name'])) + wait_for = True + continue + + downloaded = fireEvent('release.download', data = rel, media = media, single = True) + if downloaded is True: + return True + elif downloaded != 'try_next': + break + + return wait_for + + def createFromSearch(self, search_results, media, quality): + + try: + db = get_db() + + found_releases = [] + + is_3d = False + try: is_3d = quality['custom']['3d'] + except: pass + + for rel in search_results: + + rel_identifier = md5(rel['url']) + + release = { + '_t': 'release', + 'identifier': rel_identifier, + 'media_id': media.get('_id'), + 'quality': quality.get('identifier'), + 'is_3d': is_3d, + 'status': rel.get('status', 'available'), + 'last_edit': int(time.time()), + 'info': {} + } + + # Add downloader info if provided + try: + release['download_info'] = rel['download_info'] + del rel['download_info'] + except: + pass + + try: + rls = db.get('release_identifier', rel_identifier, with_doc = True)['doc'] + except: + rls = db.insert(release) + rls.update(release) + + # Update info, but filter out functions + for info in rel: + try: + if not isinstance(rel[info], (str, unicode, int, long, float)): + continue + + rls['info'][info] = toUnicode(rel[info]) if isinstance(rel[info], (str, unicode)) else rel[info] + except: + log.debug('Couldn\'t add %s to ReleaseInfo: %s', (info, traceback.format_exc())) + + db.update(rls) + + # Update release in search_results + rel['status'] = rls.get('status') + + if rel['status'] == 'available': + found_releases.append(rel_identifier) + + return found_releases + except: + log.error('Failed: %s', traceback.format_exc()) + + return [] + + def updateStatus(self, release_id, status = None): + if not status: return False + + try: + db = get_db() + + rel = db.get('id', release_id) + if rel and rel.get('status') != status: + + release_name = None + if rel.get('files'): + for file_type in rel.get('files', {}): + if file_type == 'movie': + for release_file in rel['files'][file_type]: + release_name = os.path.basename(release_file) + break + + if not release_name and rel.get('info'): + release_name = rel['info'].get('name') + + #update status in Db + log.debug('Marking release %s as %s', (release_name, status)) + rel['status'] = status + rel['last_edit'] = int(time.time()) + + db.update(rel) + + #Update all movie info as there is no release update function + fireEvent('notify.frontend', type = 'release.update_status', data = rel) + + return True + except: + log.error('Failed: %s', traceback.format_exc()) + + return False + + def withStatus(self, status, with_doc = True): + + db = get_db() + + status = list(status if isinstance(status, (list, tuple)) else [status]) + + for s in status: + for ms in db.get_many('release_status', s): + if with_doc: + try: + doc = db.get('id', ms['_id']) + yield doc + except RecordNotFound: + log.debug('Record not found, skipping: %s', ms['_id']) + else: + yield ms + + def forMedia(self, media_id): + + db = get_db() + raw_releases = db.get_many('release', media_id) + + releases = [] + for r in raw_releases: + try: + doc = db.get('id', r.get('_id')) + releases.append(doc) + except RecordDeleted: + pass + except (ValueError, EOFError): + fireEvent('database.delete_corrupted', r.get('_id'), traceback_error = traceback.format_exc(0)) + + releases = sorted(releases, key = lambda k: k.get('info', {}).get('score', 0), reverse = True) + + # Sort based on preferred search method + download_preference = self.conf('preferred_method', section = 'searcher') + if download_preference != 'both': + releases = sorted(releases, key = lambda k: k.get('info', {}).get('protocol', '')[:3], reverse = (download_preference == 'torrent')) + + return releases or [] diff --git a/couchpotato/core/plugins/renamer.py b/couchpotato/core/plugins/renamer.py new file mode 100755 index 0000000000..fa410098ec --- /dev/null +++ b/couchpotato/core/plugins/renamer.py @@ -0,0 +1,1587 @@ +import fnmatch +import os +import re +import shutil +import time +import traceback + +from couchpotato import get_db +from couchpotato.api import addApiView +from couchpotato.core.event import addEvent, fireEvent, fireEventAsync +from couchpotato.core.helpers.encoding import toUnicode, ss, sp +from couchpotato.core.helpers.variable import getExt, mergeDicts, getTitle, \ + getImdb, link, symlink, tryInt, splitString, fnEscape, isSubFolder, \ + getIdentifier, randomString, getFreeSpace, getSize +from couchpotato.core.logger import CPLog +from couchpotato.core.plugins.base import Plugin +from couchpotato.environment import Env +from unrar2 import RarFile +import six +from six.moves import filter + + +log = CPLog(__name__) + +autoload = 'Renamer' + + +class Renamer(Plugin): + + renaming_started = False + checking_snatched = False + + def __init__(self): + addApiView('renamer.scan', self.scanView, docs = { + 'desc': 'For the renamer to check for new files to rename in a folder', + 'params': { + 'async': {'desc': 'Optional: Set to 1 if you dont want to fire the renamer.scan asynchronous.'}, + 'to_folder': {'desc': 'Optional: The folder to move releases to. Leave empty for default folder.'}, + 'media_folder': {'desc': 'Optional: The folder of the media to scan. Keep empty for default renamer folder.'}, + 'files': {'desc': 'Optional: Provide the release files if more releases are in the same media_folder, delimited with a \'|\'. Note that no dedicated release folder is expected for releases with one file.'}, + 'base_folder': {'desc': 'Optional: The folder to find releases in. Leave empty for default folder.'}, + 'downloader': {'desc': 'Optional: The downloader the release has been downloaded with. \'download_id\' is required with this option.'}, + 'download_id': {'desc': 'Optional: The nzb/torrent ID of the release in media_folder. \'downloader\' is required with this option.'}, + 'status': {'desc': 'Optional: The status of the release: \'completed\' (default) or \'seeding\''}, + }, + }) + + addApiView('renamer.progress', self.getProgress, docs = { + 'desc': 'Get the progress of current renamer scan', + 'return': {'type': 'object', 'example': """{ + 'progress': False || True, +}"""}, + }) + + addEvent('renamer.scan', self.scan) + addEvent('renamer.check_snatched', self.checkSnatched) + + addEvent('app.load', self.scan) + addEvent('app.load', self.setCrons) + + # Enable / disable interval + addEvent('setting.save.renamer.enabled.after', self.setCrons) + addEvent('setting.save.renamer.run_every.after', self.setCrons) + addEvent('setting.save.renamer.force_every.after', self.setCrons) + + def setCrons(self): + + fireEvent('schedule.remove', 'renamer.check_snatched') + if self.isEnabled() and self.conf('run_every') > 0: + fireEvent('schedule.interval', 'renamer.check_snatched', self.checkSnatched, minutes = self.conf('run_every'), single = True) + + fireEvent('schedule.remove', 'renamer.check_snatched_forced') + if self.isEnabled() and self.conf('force_every') > 0: + fireEvent('schedule.interval', 'renamer.check_snatched_forced', self.scan, hours = self.conf('force_every'), single = True) + + return True + + def getProgress(self, **kwargs): + return { + 'progress': self.renaming_started + } + + def scanView(self, **kwargs): + + async = tryInt(kwargs.get('async', 0)) + base_folder = kwargs.get('base_folder') + media_folder = sp(kwargs.get('media_folder')) + to_folder = kwargs.get('to_folder') + + # Backwards compatibility, to be removed after a few versions :) + if not media_folder: + media_folder = sp(kwargs.get('movie_folder')) + + downloader = kwargs.get('downloader') + download_id = kwargs.get('download_id') + files = [sp(filename) for filename in splitString(kwargs.get('files'), '|')] + status = kwargs.get('status', 'completed') + + release_download = None + if not base_folder and media_folder: + release_download = {'folder': media_folder} + + if download_id: + release_download.update({ + 'id': download_id, + 'downloader': downloader, + 'status': status, + 'files': files + }) + + fire_handle = fireEvent if not async else fireEventAsync + fire_handle('renamer.scan', base_folder = base_folder, release_download = release_download, to_folder = to_folder) + + return { + 'success': True + } + + def scan(self, base_folder = None, release_download = None, to_folder = None): + if not release_download: release_download = {} + + if self.isDisabled(): + return + + if self.renaming_started is True: + log.info('Renamer is already running, if you see this often, check the logs above for errors.') + return + + if not base_folder: + base_folder = sp(self.conf('from')) + + from_folder = sp(self.conf('from')) + + if not to_folder: + to_folder = sp(self.conf('to')) + + # Get media folder to process + media_folder = sp(release_download.get('folder')) + + # Get all folders that should not be processed + no_process = [to_folder] + cat_list = fireEvent('category.all', single = True) or [] + no_process.extend([item['destination'] for item in cat_list]) + + # Don't continue if from-folder doesn't exist + if not os.path.isdir(base_folder): + log.error('The from folder "%s" doesn\'t exist. Please create it.', base_folder) + return + # Don't continue if to-folder doesn't exist + elif not os.path.isdir(to_folder): + log.error('The to folder "%s" doesn\'t exist. Please create it.', to_folder) + return + else: + # Check to see if the no_process folders are inside the "from" folder. + for item in no_process: + if isSubFolder(item, base_folder): + log.error('To protect your data, the media libraries can\'t be inside of or the same as the "from" folder. "%s" in "%s"', (item, base_folder)) + return + + # Check to see if the no_process folders are inside the provided media_folder + if media_folder and not os.path.isdir(media_folder): + log.debug('The provided media folder %s does not exist. Trying to find it in the \'from\' folder.', media_folder) + + # Update to the from folder + if len(release_download.get('files', [])) == 1: + new_media_folder = sp(from_folder) + else: + new_media_folder = sp(os.path.join(from_folder, os.path.basename(media_folder))) + + if not os.path.isdir(new_media_folder): + log.error('The provided media folder %s does not exist and could also not be found in the \'from\' folder.', media_folder) + return + + # Update the files + new_files = [os.path.join(new_media_folder, os.path.relpath(filename, media_folder)) for filename in release_download.get('files', [])] + if new_files and not os.path.isfile(new_files[0]): + log.error('The provided media folder %s does not exist and its files could also not be found in the \'from\' folder.', media_folder) + return + + # Update release_download info to the from folder + log.debug('Release %s found in the \'from\' folder.', media_folder) + release_download['folder'] = new_media_folder + release_download['files'] = new_files + media_folder = new_media_folder + + if media_folder: + for item in no_process: + if isSubFolder(item, media_folder): + log.error('To protect your data, the media libraries can\'t be inside of or the same as the provided media folder. "%s" in "%s"', (item, media_folder)) + return + + # Make sure a checkSnatched marked all downloads/seeds as such + if not release_download and self.conf('run_every') > 0: + self.checkSnatched(fire_scan = False) + + self.renaming_started = True + + # make sure the media folder name is included in the search + folder = None + files = [] + if media_folder: + log.info('Scanning media folder %s...', media_folder) + folder = os.path.dirname(media_folder) + + release_files = release_download.get('files', []) + if release_files: + files = release_files + + # If there is only one file in the torrent, the downloader did not create a subfolder + if len(release_files) == 1: + folder = media_folder + else: + # Get all files from the specified folder + try: + for root, folders, names in os.walk(media_folder): + files.extend([sp(os.path.join(root, name)) for name in names]) + except: + log.error('Failed getting files from %s: %s', (media_folder, traceback.format_exc())) + + # post_filter files from configuration; this is a ":"-separated list of globs + files = self.filesAfterIgnoring(files) + + db = get_db() + + # Extend the download info with info stored in the downloaded release + keep_original = self.moveTypeIsLinked() + is_torrent = False + if release_download: + release_download = self.extendReleaseDownload(release_download) + is_torrent = self.downloadIsTorrent(release_download) + keep_original = True if is_torrent and self.conf('file_action') not in ['move'] else keep_original + + # Unpack any archives + extr_files = None + if self.conf('unrar'): + folder, media_folder, files, extr_files = self.extractFiles(folder = folder, media_folder = media_folder, files = files, + cleanup = self.conf('cleanup') and not keep_original) + + groups = fireEvent('scanner.scan', folder = folder if folder else base_folder, + files = files, release_download = release_download, return_ignored = False, single = True) or [] + + folder_name = self.conf('folder_name') + file_name = self.conf('file_name') + trailer_name = self.conf('trailer_name') + nfo_name = self.conf('nfo_name') + separator = self.conf('separator') + + if len(file_name) == 0: + log.error('Please fill in the filename option under renamer settings. Forcing it on . to keep the same name as source file.') + file_name = '.' + + cd_keys = ['','', ''] + if not any(x in folder_name for x in cd_keys) and not any(x in file_name for x in cd_keys): + log.error('Missing `cd` or `cd_nr` in the renamer. This will cause multi-file releases of being renamed to the same file. ' + 'Please add it in the renamer settings. Force adding it for now.') + file_name = '%s %s' % ('', file_name) + + # Tag release folder as failed_rename in case no groups were found. This prevents check_snatched from removing the release from the downloader. + if not groups and self.statusInfoComplete(release_download): + self.tagRelease(release_download = release_download, tag = 'failed_rename') + + for group_identifier in groups: + + group = groups[group_identifier] + group['release_download'] = None + rename_files = {} + remove_files = [] + remove_releases = [] + + media_title = getTitle(group) + + # Add _UNKNOWN_ if no library item is connected + if not group.get('media') or not media_title: + self.tagRelease(group = group, tag = 'unknown') + continue + # Rename the files using the library data + else: + + # Media not in library, add it first + if not group['media'].get('_id'): + group['media'] = fireEvent('movie.add', params = { + 'identifier': group['identifier'], + 'profile_id': None + }, search_after = False, status = 'done', single = True) + else: + group['media'] = fireEvent('movie.update', media_id = group['media'].get('_id'), single = True) + + if not group['media'] or not group['media'].get('_id'): + log.error('Could not rename, no library item to work with: %s', group_identifier) + continue + + media = group['media'] + media_title = getTitle(media) + fr_media_title = media_title + + res = fireEvent('movie.getfrenchtitle', movie = media) + + if res != None and len(res) > 0: + fr_media_title = res[0] + + # Overwrite destination when set in category + destination = to_folder + category_label = '' + + if media.get('category_id') and media.get('category_id') != '-1': + try: + category = db.get('id', media['category_id']) + category_label = category['label'] + + if category['destination'] and len(category['destination']) > 0 and category['destination'] != 'None': + destination = sp(category['destination']) + log.debug('Setting category destination for "%s": %s' % (media_title, destination)) + else: + log.debug('No category destination found for "%s"' % media_title) + except: + log.error('Failed getting category label: %s', traceback.format_exc()) + + + # Find subtitle for renaming + group['before_rename'] = [] + fireEvent('renamer.before', group) + + # Add extracted files to the before_rename list + if extr_files: + group['before_rename'].extend(extr_files) + + # Remove weird chars from movie name + movie_name = re.sub(r"[\x00\/\\:\*\?\"<>\|]", '', media_title) + fr_movie_name = re.sub(r"[\x00\/\\:\*\?\"<>\|]", '', fr_media_title) + + # Put 'The' at the end + name_the = movie_name + for prefix in ['the ', 'an ', 'a ']: + if prefix == movie_name[:len(prefix)].lower(): + name_the = movie_name[len(prefix):] + ', ' + prefix.strip().capitalize() + break + + fr_name_the = fr_movie_name + for prefix in ['le ', 'la ']: + if prefix == fr_movie_name[:len(prefix)].lower(): + name_the = fr_movie_name[len(prefix):] + ', ' + prefix.strip().capitalize() + break + + replacements = { + 'ext': 'mkv', + 'namethe': name_the.strip(), + 'thename': movie_name.strip(), + 'frnamethe': fr_name_the.strip(), + 'frthename': fr_movie_name.strip(), + 'year': media['info']['year'], + 'first': name_the[0].upper(), + 'quality': group['meta_data']['quality']['label'], + 'quality_type': group['meta_data']['quality_type'], + 'video': group['meta_data'].get('video'), + 'audio': group['meta_data'].get('audio'), + 'group': group['meta_data']['group'], + 'source': group['meta_data']['source'], + 'resolution_width': group['meta_data'].get('resolution_width'), + 'resolution_height': group['meta_data'].get('resolution_height'), + 'audio_channels': group['meta_data'].get('audio_channels'), + 'imdb_id': group['identifier'], + 'cd': '', + 'cd_nr': '', + 'mpaa': media['info'].get('mpaa', ''), + 'mpaa_only': media['info'].get('mpaa', ''), + 'category': category_label, + '3d': '3D' if group['meta_data']['quality'].get('is_3d', 0) else '', + '3d_type': group['meta_data'].get('3d_type'), + '3d_type_short': group['meta_data'].get('3d_type'), + } + + if replacements['mpaa_only'] not in ('G', 'PG', 'PG-13', 'R', 'NC-17'): + replacements['mpaa_only'] = 'Not Rated' + + if replacements['3d_type_short']: + replacements['3d_type_short'] = replacements['3d_type_short'].replace('Half ', 'H').replace('Full ', '') + if self.conf('use_tab_threed') and replacements['3d_type']: + if 'OU' in replacements['3d_type']: + replacements['3d_type'] = replacements['3d_type'].replace('OU','TAB') + if self.conf('use_tab_threed') and replacements['3d_type_short']: + if 'OU' in replacements['3d_type_short']: + replacements['3d_type_short'] = replacements['3d_type_short'].replace('OU','TAB') + + + for file_type in group['files']: + + # Move nfo depending on settings + if file_type is 'nfo' and not self.conf('rename_nfo'): + log.debug('Skipping, renaming of %s disabled', file_type) + for current_file in group['files'][file_type]: + if self.conf('cleanup') and (not keep_original or self.fileIsAdded(current_file, group)): + remove_files.append(current_file) + continue + + # Subtitle extra + if file_type is 'subtitle_extra': + continue + + # Move other files + multiple = len(group['files'][file_type]) > 1 and not group['is_dvd'] + cd = 1 if multiple else 0 + + for current_file in sorted(list(group['files'][file_type])): + current_file = sp(current_file) + + # Original filename + replacements['original'] = os.path.splitext(os.path.basename(current_file))[0] + replacements['original_folder'] = fireEvent('scanner.remove_cptag', group['dirname'], single = True) + + if not replacements['original_folder'] or len(replacements['original_folder']) == 0: + replacements['original_folder'] = replacements['original'] + + # Extension + replacements['ext'] = getExt(current_file) + + # cd # + replacements['cd'] = ' cd%d' % cd if multiple else '' + replacements['cd_nr'] = cd if multiple else '' + + # Naming + final_folder_name = self.doReplace(folder_name, replacements, folder = True) + final_file_name = self.doReplace(file_name, replacements) + replacements['filename'] = final_file_name[:-(len(getExt(final_file_name)) + 1)] + + # Meta naming + if file_type is 'trailer': + final_file_name = self.doReplace(trailer_name, replacements, remove_multiple = True) + elif file_type is 'nfo': + final_file_name = self.doReplace(nfo_name, replacements, remove_multiple = True) + + # Move DVD files (no structure renaming) + if group['is_dvd'] and file_type is 'movie': + found = False + for top_dir in ['video_ts', 'audio_ts', 'bdmv', 'certificate']: + has_string = current_file.lower().find(os.path.sep + top_dir + os.path.sep) + if has_string >= 0: + structure_dir = current_file[has_string:].lstrip(os.path.sep) + rename_files[current_file] = os.path.join(destination, final_folder_name, structure_dir) + found = True + break + + if not found: + log.error('Could not determine dvd structure for: %s', current_file) + + # Do rename others + else: + if file_type is 'leftover': + if self.conf('move_leftover'): + rename_files[current_file] = os.path.join(destination, final_folder_name, os.path.basename(current_file)) + elif file_type not in ['subtitle']: + rename_files[current_file] = os.path.join(destination, final_folder_name, final_file_name) + + # Check for extra subtitle files + if file_type is 'subtitle': + + remove_multiple = False + if len(group['files']['movie']) == 1: + remove_multiple = True + + sub_langs = group['subtitle_language'].get(current_file, []) + + # rename subtitles with or without language + sub_name = self.doReplace(file_name, replacements, remove_multiple = remove_multiple) + rename_files[current_file] = os.path.join(destination, final_folder_name, sub_name) + + rename_extras = self.getRenameExtras( + extra_type = 'subtitle_extra', + replacements = replacements, + folder_name = folder_name, + file_name = file_name, + destination = destination, + group = group, + current_file = current_file, + remove_multiple = remove_multiple, + ) + + # Don't add language if multiple languages in 1 subtitle file + if len(sub_langs) == 1: + sub_suffix = '%s.%s' % (sub_langs[0], replacements['ext']) + + # Don't add language to subtitle file it it's already there + if not sub_name.endswith(sub_suffix): + sub_name = sub_name.replace(replacements['ext'], sub_suffix) + rename_files[current_file] = os.path.join(destination, final_folder_name, sub_name) + + rename_files = mergeDicts(rename_files, rename_extras) + + # Filename without cd etc + elif file_type is 'movie': + rename_extras = self.getRenameExtras( + extra_type = 'movie_extra', + replacements = replacements, + folder_name = folder_name, + file_name = file_name, + destination = destination, + group = group, + current_file = current_file + ) + rename_files = mergeDicts(rename_files, rename_extras) + + group['filename'] = self.doReplace(file_name, replacements, remove_multiple = True)[:-(len(getExt(final_file_name)) + 1)] + group['destination_dir'] = os.path.join(destination, final_folder_name) + + if multiple: + cd += 1 + + # Before renaming, remove the lower quality files + remove_leftovers = True + + # Get media quality profile + profile = None + if media.get('profile_id'): + try: + profile = db.get('id', media['profile_id']) + except: + # Set profile to None as it does not exist anymore + mdia = db.get('id', media['_id']) + mdia['profile_id'] = None + db.update(mdia) + log.error('Error getting quality profile for %s: %s', (media_title, traceback.format_exc())) + else: + log.debug('Media has no quality profile: %s', media_title) + + # Mark media for dashboard + mark_as_recent = False + + # Go over current movie releases + for release in fireEvent('release.for_media', media['_id'], single = True): + + # When a release already exists + if release.get('status') == 'done': + + # This is where CP removes older, lesser quality releases or releases that are not wanted anymore + is_higher = fireEvent('quality.ishigher', \ + group['meta_data']['quality'], {'identifier': release['quality'], 'is_3d': release.get('is_3d', False)}, profile, single = True) + + if is_higher == 'higher': + if self.conf('remove_lower_quality_copies'): + log.info('Removing lesser or not wanted quality %s for %s.', (media_title, release.get('quality'))) + for file_type in release.get('files', {}): + for release_file in release['files'][file_type]: + remove_files.append(release_file) + remove_releases.append(release) + + # Same quality, but still downloaded, so maybe repack/proper/unrated/directors cut etc + elif is_higher == 'equal': + if self.conf('remove_lower_quality_copies'): + log.info('Same quality release already exists for %s, with quality %s. Assuming repack.', (media_title, release.get('quality'))) + for file_type in release.get('files', {}): + for release_file in release['files'][file_type]: + remove_files.append(release_file) + remove_releases.append(release) + + # Downloaded a lower quality, rename the newly downloaded files/folder to exclude them from scan + else: + log.info('Better quality release already exists for %s, with quality %s', (media_title, release.get('quality'))) + + # Add exists tag to the .ignore file + self.tagRelease(group = group, tag = 'exists') + + # Notify on rename fail + download_message = 'Renaming of %s (%s) cancelled, exists in %s already.' % (media_title, group['meta_data']['quality']['label'], release.get('quality')) + fireEvent('movie.renaming.canceled', message = download_message, data = group) + remove_leftovers = False + + break + + elif release.get('status') in ['snatched', 'seeding']: + if release_download and release_download.get('release_id'): + if release_download['release_id'] == release['_id']: + if release_download['status'] == 'completed': + # Set the release to downloaded + fireEvent('release.update_status', release['_id'], status = 'downloaded', single = True) + group['release_download'] = release_download + mark_as_recent = True + elif release_download['status'] == 'seeding': + # Set the release to seeding + fireEvent('release.update_status', release['_id'], status = 'seeding', single = True) + mark_as_recent = True + + elif release.get('quality') == group['meta_data']['quality']['identifier']: + # Set the release to downloaded + fireEvent('release.update_status', release['_id'], status = 'downloaded', single = True) + group['release_download'] = release_download + mark_as_recent = True + + # Mark media for dashboard + if mark_as_recent: + fireEvent('media.tag', group['media'].get('_id'), 'recent', update_edited = True, single = True) + + # Remove leftover files + if not remove_leftovers: # Don't remove anything + continue + + log.debug('Removing leftover files') + for current_file in group['files']['leftover']: + if self.conf('cleanup') and not self.conf('move_leftover') and \ + (not keep_original or self.fileIsAdded(current_file, group)): + remove_files.append(current_file) + + if self.conf('check_space'): + total_space, available_space = getFreeSpace(destination) + renaming_size = getSize(rename_files.keys()) + if renaming_size > available_space: + log.error('Not enough space left, need %s MB but only %s MB available', (renaming_size, available_space)) + self.tagRelease(group = group, tag = 'not_enough_space') + continue + + # Remove files + delete_folders = [] + for src in remove_files: + + if rename_files.get(src): + log.debug('Not removing file that will be renamed: %s', src) + continue + + log.info('Removing "%s"', src) + try: + src = sp(src) + if os.path.isfile(src): + os.remove(src) + + parent_dir = os.path.dirname(src) + if parent_dir not in delete_folders and os.path.isdir(parent_dir) and \ + not isSubFolder(destination, parent_dir) and not isSubFolder(media_folder, parent_dir) and \ + isSubFolder(parent_dir, base_folder): + + delete_folders.append(parent_dir) + + except: + log.error('Failed removing %s: %s', (src, traceback.format_exc())) + self.tagRelease(group = group, tag = 'failed_remove') + + # Delete leftover folder from older releases + delete_folders = sorted(delete_folders, key = len, reverse = True) + for delete_folder in delete_folders: + try: + self.deleteEmptyFolder(delete_folder, show_error = False) + except Exception as e: + log.error('Failed to delete folder: %s %s', (e, traceback.format_exc())) + + # Rename all files marked + group['renamed_files'] = [] + failed_rename = False + for src in rename_files: + if rename_files[src]: + dst = rename_files[src] + + if dst in group['renamed_files']: + log.error('File "%s" already renamed once, adding random string at the end to prevent data loss', dst) + dst = '%s.random-%s' % (dst, randomString()) + + # Create dir + self.makeDir(os.path.dirname(dst)) + + try: + self.moveFile(src, dst, use_default = not is_torrent or self.fileIsAdded(src, group)) + group['renamed_files'].append(dst) + except: + log.error('Failed renaming the file "%s" : %s', (os.path.basename(src), traceback.format_exc())) + failed_rename = True + break + + # If renaming failed tag the release folder as failed and continue with next group. Note that all old files have already been deleted. + if failed_rename: + self.tagRelease(group = group, tag = 'failed_rename') + continue + # If renaming succeeded, make sure it is not tagged as failed (scanner didn't return a group, but a download_ID was provided in an earlier attempt) + else: + self.untagRelease(group = group, tag = 'failed_rename') + + # Tag folder if it is in the 'from' folder and it will not be removed because it is a torrent + if self.movieInFromFolder(media_folder) and keep_original: + self.tagRelease(group = group, tag = 'renamed_already') + + # Remove matching releases + for release in remove_releases: + log.debug('Removing release %s', release.get('identifier')) + try: + db.delete(release) + except: + log.error('Failed removing %s: %s', (release, traceback.format_exc())) + + if group['dirname'] and group['parentdir'] and not keep_original: + if media_folder: + # Delete the movie folder + group_folder = media_folder + else: + # Delete the first empty subfolder in the tree relative to the 'from' folder + group_folder = sp(os.path.join(base_folder, toUnicode(os.path.relpath(group['parentdir'], base_folder)).split(os.path.sep)[0])) + + try: + if self.conf('cleanup') or self.conf('move_leftover'): + log.info('Deleting folder: %s', group_folder) + self.deleteEmptyFolder(group_folder) + except: + log.error('Failed removing %s: %s', (group_folder, traceback.format_exc())) + + # Notify on download, search for trailers etc + download_message = 'Downloaded %s (%s%s)' % (media_title, replacements['quality'], (' ' + replacements['3d']) if replacements['3d'] else '') + try: + fireEvent('renamer.after', message = download_message, group = group, in_order = True) + except: + log.error('Failed firing (some) of the renamer.after events: %s', traceback.format_exc()) + + # Break if CP wants to shut down + if self.shuttingDown(): + break + + self.renaming_started = False + + def getRenameExtras(self, extra_type = '', replacements = None, folder_name = '', file_name = '', destination = '', group = None, current_file = '', remove_multiple = False): + if not group: group = {} + if not replacements: replacements = {} + + replacements = replacements.copy() + rename_files = {} + + def test(s): + return current_file[:-len(replacements['ext'])] in sp(s) + + for extra in set(filter(test, group['files'][extra_type])): + replacements['ext'] = getExt(extra) + + final_folder_name = self.doReplace(folder_name, replacements, remove_multiple = remove_multiple, folder = True) + final_file_name = self.doReplace(file_name, replacements, remove_multiple = remove_multiple) + rename_files[extra] = os.path.join(destination, final_folder_name, final_file_name) + + return rename_files + + # This adds a file to ignore / tag a release so it is ignored later + def tagRelease(self, tag, group = None, release_download = None): + if not tag: + return + + text = """This file is from CouchPotato +It has marked this release as "%s" +This file hides the release from the renamer +Remove it if you want it to be renamed (again, or at least let it try again) +""" % tag + + tag_files = [] + + # Tag movie files if they are known + if isinstance(group, dict): + tag_files = [sorted(list(group['files']['movie']))[0]] + + elif isinstance(release_download, dict): + # Tag download_files if they are known + if release_download.get('files', []): + tag_files = [filename for filename in release_download.get('files', []) if os.path.exists(filename)] + + # Tag all files in release folder + elif release_download['folder']: + for root, folders, names in os.walk(sp(release_download['folder'])): + tag_files.extend([os.path.join(root, name) for name in names]) + + for filename in tag_files: + + # Don't tag .ignore files + if os.path.splitext(filename)[1] == '.ignore': + continue + + tag_filename = '%s.%s.ignore' % (os.path.splitext(filename)[0], tag) + if not os.path.isfile(tag_filename): + self.createFile(tag_filename, text) + + def untagRelease(self, group = None, release_download = None, tag = ''): + if not release_download: + return + + tag_files = [] + folder = None + + # Tag movie files if they are known + if isinstance(group, dict): + tag_files = [sorted(list(group['files']['movie']))[0]] + + folder = sp(group['parentdir']) + if not group.get('dirname') or not os.path.isdir(folder): + return False + + elif isinstance(release_download, dict): + + folder = sp(release_download['folder']) + if not os.path.isdir(folder): + return False + + # Untag download_files if they are known + if release_download.get('files'): + tag_files = release_download.get('files', []) + + # Untag all files in release folder + else: + for root, folders, names in os.walk(folder): + tag_files.extend([sp(os.path.join(root, name)) for name in names if not os.path.splitext(name)[1] == '.ignore']) + + if not folder: + return False + + # Find all .ignore files in folder + ignore_files = [] + for root, dirnames, filenames in os.walk(folder): + ignore_files.extend(fnmatch.filter([sp(os.path.join(root, filename)) for filename in filenames], '*%s.ignore' % tag)) + + # Match all found ignore files with the tag_files and delete if found + for tag_file in tag_files: + ignore_file = fnmatch.filter(ignore_files, fnEscape('%s.%s.ignore' % (os.path.splitext(tag_file)[0], tag if tag else '*'))) + for filename in ignore_file: + try: + os.remove(filename) + except: + log.debug('Unable to remove ignore file: %s. Error: %s.' % (filename, traceback.format_exc())) + + def hastagRelease(self, release_download, tag = ''): + if not release_download: + return False + + folder = sp(release_download['folder']) + if not os.path.isdir(folder): + return False + + tag_files = [] + ignore_files = [] + + # Find tag on download_files if they are known + if release_download.get('files'): + tag_files = release_download.get('files', []) + + # Find tag on all files in release folder + else: + for root, folders, names in os.walk(folder): + tag_files.extend([sp(os.path.join(root, name)) for name in names if not os.path.splitext(name)[1] == '.ignore']) + + # Find all .ignore files in folder + for root, dirnames, filenames in os.walk(folder): + ignore_files.extend(fnmatch.filter([sp(os.path.join(root, filename)) for filename in filenames], '*%s.ignore' % tag)) + + # Match all found ignore files with the tag_files and return True found + for tag_file in [tag_files] if isinstance(tag_files,str) else tag_files: + ignore_file = fnmatch.filter(ignore_files, fnEscape('%s.%s.ignore' % (os.path.splitext(tag_file)[0], tag if tag else '*'))) + if ignore_file: + return True + + return False + + def moveFile(self, old, dest, use_default = False): + dest = sp(dest) + try: + + if os.path.exists(dest) and os.path.isfile(dest): + raise Exception('Destination "%s" already exists' % dest) + + move_type = self.conf('file_action') + if use_default: + move_type = self.conf('default_file_action') + + if move_type not in ['copy', 'link', 'symlink_reversed']: + try: + log.info('Moving "%s" to "%s"', (old, dest)) + shutil.move(old, dest) + except: + exists = os.path.exists(dest) + if exists and os.path.getsize(old) == os.path.getsize(dest): + log.error('Successfully moved file "%s", but something went wrong: %s', (dest, traceback.format_exc())) + os.unlink(old) + else: + # remove faultly copied file + if exists: + os.unlink(dest) + raise + elif move_type == 'copy': + log.info('Copying "%s" to "%s"', (old, dest)) + shutil.copy(old, dest) + elif move_type == 'symlink_reversed': + log.info('Reverse symlink "%s" to "%s"', (old, dest)) + try: + shutil.move(old, dest) + except: + log.error('Moving "%s" to "%s" went wrong: %s', (old, dest, traceback.format_exc())) + try: + symlink(dest, old) + except: + log.error('Error while linking "%s" back to "%s": %s', (dest, old, traceback.format_exc())) + else: + log.info('Linking "%s" to "%s"', (old, dest)) + # First try to hardlink + try: + log.debug('Hardlinking file "%s" to "%s"...', (old, dest)) + link(old, dest) + except: + # Try to symlink next + log.debug('Couldn\'t hardlink file "%s" to "%s". Symlinking instead. Error: %s.', (old, dest, traceback.format_exc())) + shutil.copy(old, dest) + try: + old_link = '%s.link' % sp(old) + symlink(dest, old_link) + os.unlink(old) + os.rename(old_link, old) + except: + log.error('Couldn\'t symlink file "%s" to "%s". Copied instead. Error: %s. ', (old, dest, traceback.format_exc())) + + try: + os.chmod(dest, Env.getPermission('file')) + if os.name == 'nt' and self.conf('ntfs_permission'): + os.popen('icacls "' + dest + '"* /reset /T') + except: + log.debug('Failed setting permissions for file: %s, %s', (dest, traceback.format_exc(1))) + except: + log.error('Couldn\'t move file "%s" to "%s": %s', (old, dest, traceback.format_exc())) + raise + + return True + + def doReplace(self, string, replacements, remove_multiple = False, folder = False): + """ + replace confignames with the real thing + """ + + replacements = replacements.copy() + if remove_multiple: + replacements['cd'] = '' + replacements['cd_nr'] = '' + + replaced = toUnicode(string) + for x, r in replacements.items(): + if x in ['thename', 'namethe']: + continue + if r is not None: + replaced = replaced.replace(six.u('<%s>') % toUnicode(x), toUnicode(r)) + else: + #If information is not available, we don't want the tag in the filename + replaced = replaced.replace('<' + x + '>', '') + + if self.conf('replace_doubles'): + replaced = self.replaceDoubles(replaced.lstrip('. ')) + + for x, r in replacements.items(): + if x in ['thename', 'namethe']: + replaced = replaced.replace(six.u('<%s>') % toUnicode(x), toUnicode(r)) + replaced = re.sub(r"[\x00:\*\?\"<>\|]", '', replaced) + + sep = self.conf('foldersep') if folder else self.conf('separator') + return ss(replaced.replace(' ', ' ' if not sep else sep)) + + def replaceDoubles(self, string): + + replaces = [ + ('\.+', '.'), ('_+', '_'), ('-+', '-'), ('\s+', ' '), (' \\\\', '\\\\'), (' /', '/'), + ('(\s\.)+', '.'), ('(-\.)+', '.'), ('(\s-[^\s])+', '-'), (' ]', ']'), + ] + + for r in replaces: + reg, replace_with = r + string = re.sub(reg, replace_with, string) + + string = string.rstrip(',_-/\\ ') + + return string + + def checkSnatched(self, fire_scan = True): + + log.info('Check snatched event triggered') + + if self.checking_snatched: + log.debug('Already checking snatched') + return False + + self.checking_snatched = True + + try: + db = get_db() + + rels = list(fireEvent('release.with_status', ['snatched', 'seeding', 'missing'], single = True)) + + if not rels: + #No releases found that need status checking + self.checking_snatched = False + return True + + # Collect all download information with the download IDs from the releases + download_ids = [] + no_status_support = [] + try: + for rel in rels: + if not rel.get('download_info'): continue + + if rel['download_info'].get('id') and rel['download_info'].get('downloader'): + download_ids.append(rel['download_info']) + + ds = rel['download_info'].get('status_support') + if ds is False or ds == 'False': + no_status_support.append(ss(rel['download_info'].get('downloader'))) + except: + log.error('Error getting download IDs from database') + self.checking_snatched = False + return False + + release_downloads = fireEvent('download.status', download_ids, merge = True) if download_ids else [] + + if len(no_status_support) > 0: + log.debug('Download status functionality is not implemented for one of the active downloaders: %s', list(set(no_status_support))) + + if not release_downloads: + if fire_scan: + self.scan() + + self.checking_snatched = False + return True + + scan_releases = [] + scan_required = False + + log.debug('Checking status snatched releases...') + + try: + for rel in rels: + if not rel.get('media_id'): continue + movie_dict = db.get('id', rel.get('media_id')) + download_info = rel.get('download_info') + + if not isinstance(download_info, dict): + log.error('Faulty release found without any info, ignoring.') + fireEvent('release.update_status', rel.get('_id'), status = 'ignored', single = True) + continue + + # Check if download ID is available + if not download_info.get('id') or not download_info.get('downloader'): + log.debug('Download status functionality is not implemented for downloader (%s) of release %s.', (download_info.get('downloader', 'unknown'), rel['info']['name'])) + scan_required = True + + # Continue with next release + continue + + # Find release in downloaders + nzbname = self.createNzbName(rel['info'], movie_dict) + + found_release = False + for release_download in release_downloads: + found_release = False + if download_info.get('id'): + if release_download['id'] == download_info['id'] and release_download['downloader'] == download_info['downloader']: + log.debug('Found release by id: %s', release_download['id']) + found_release = True + break + else: + if release_download['name'] == nzbname or rel['info']['name'] in release_download['name'] or getImdb(release_download['name']) == getIdentifier(movie_dict): + log.debug('Found release by release name or imdb ID: %s', release_download['name']) + found_release = True + break + + if not found_release: + #Check status if already missing and for how long, if > 1 week, set to ignored else to missing + if rel.get('status') == 'missing': + if rel.get('last_edit') < int(time.time()) - 7 * 24 * 60 * 60: + log.info('%s not found in downloaders after 7 days, setting status to ignored', nzbname) + fireEvent('release.update_status', rel.get('_id'), status = 'ignored', single = True) + else: + # Set the release to missing + log.info('%s not found in downloaders, setting status to missing', nzbname) + fireEvent('release.update_status', rel.get('_id'), status = 'missing', single = True) + + # Continue with next release + continue + + # Log that we found the release + timeleft = 'N/A' if release_download['timeleft'] == -1 else release_download['timeleft'] + log.debug('Found %s: %s, time to go: %s', (release_download['name'], release_download['status'].upper(), timeleft)) + + # Check status of release + if release_download['status'] == 'busy': + # Set the release to snatched if it was missing before + fireEvent('release.update_status', rel.get('_id'), status = 'snatched', single = True) + + # Tag folder if it is in the 'from' folder and it will not be processed because it is still downloading + if self.movieInFromFolder(release_download['folder']): + self.tagRelease(release_download = release_download, tag = 'downloading') + + elif release_download['status'] == 'seeding': + #If linking setting is enabled, process release + if self.conf('file_action') != 'move' and not rel.get('status') == 'seeding' and self.statusInfoComplete(release_download): + log.info('Download of %s completed! It is now being processed while leaving the original files alone for seeding. Current ratio: %s.', (release_download['name'], release_download['seed_ratio'])) + + # Remove the downloading tag + self.untagRelease(release_download = release_download, tag = 'downloading') + + # Scan and set the torrent to paused if required + release_download.update({'pause': True, 'scan': True, 'process_complete': False}) + scan_releases.append(release_download) + else: + #let it seed + log.debug('%s is seeding with ratio: %s', (release_download['name'], release_download['seed_ratio'])) + + # Set the release to seeding + fireEvent('release.update_status', rel.get('_id'), status = 'seeding', single = True) + + elif release_download['status'] == 'failed': + # Set the release to failed + fireEvent('release.update_status', rel.get('_id'), status = 'failed', single = True) + + fireEvent('download.remove_failed', release_download, single = True) + + if self.conf('next_on_failed'): + fireEvent('movie.searcher.try_next_release', media_id = rel.get('media_id')) + + elif release_download['status'] == 'completed': + log.info('Download of %s completed!', release_download['name']) + + #Make sure the downloader sent over a path to look in + if self.statusInfoComplete(release_download): + + # If the release has been seeding, process now the seeding is done + if rel.get('status') == 'seeding': + if self.conf('file_action') != 'move': + # Set the release to done as the movie has already been renamed + fireEvent('release.update_status', rel.get('_id'), status = 'downloaded', single = True) + + # Allow the downloader to clean-up + release_download.update({'pause': False, 'scan': False, 'process_complete': True}) + scan_releases.append(release_download) + else: + # Scan and Allow the downloader to clean-up + release_download.update({'pause': False, 'scan': True, 'process_complete': True}) + scan_releases.append(release_download) + + else: + # Set the release to snatched if it was missing before + fireEvent('release.update_status', rel.get('_id'), status = 'snatched', single = True) + + # Remove the downloading tag + self.untagRelease(release_download = release_download, tag = 'downloading') + + # Scan and Allow the downloader to clean-up + release_download.update({'pause': False, 'scan': True, 'process_complete': True}) + scan_releases.append(release_download) + else: + scan_required = True + + except: + log.error('Failed checking for release in downloader: %s', traceback.format_exc()) + + # The following can either be done here, or inside the scanner if we pass it scan_items in one go + for release_download in scan_releases: + # Ask the renamer to scan the item + if release_download['scan']: + if release_download['pause'] and self.conf('file_action') in ['link', "symlink_reversed"]: + fireEvent('download.pause', release_download = release_download, pause = True, single = True) + self.scan(release_download = release_download) + if release_download['pause'] and self.conf('file_action') in ['link', "symlink_reversed"]: + fireEvent('download.pause', release_download = release_download, pause = False, single = True) + if release_download['process_complete']: + # First make sure the files were successfully processed + if not self.hastagRelease(release_download = release_download, tag = 'failed_rename'): + # Remove the seeding tag if it exists + self.untagRelease(release_download = release_download, tag = 'renamed_already') + # Ask the downloader to process the item + fireEvent('download.process_complete', release_download = release_download, single = True) + + if fire_scan and (scan_required or len(no_status_support) > 0): + self.scan() + + self.checking_snatched = False + return True + except: + log.error('Failed checking snatched: %s', traceback.format_exc()) + + self.checking_snatched = False + return False + + def extendReleaseDownload(self, release_download): + + rls = None + db = get_db() + + if release_download and release_download.get('id'): + try: + rls = db.get('release_download', '%s-%s' % (release_download.get('downloader'), release_download.get('id')), with_doc = True)['doc'] + except: + log.error('Download ID %s from downloader %s not found in releases', (release_download.get('id'), release_download.get('downloader'))) + + if rls: + media = db.get('id', rls['media_id']) + release_download.update({ + 'imdb_id': getIdentifier(media), + 'quality': rls['quality'], + 'is_3d': rls['is_3d'], + 'protocol': rls.get('info', {}).get('protocol') or rls.get('info', {}).get('type'), + 'release_id': rls['_id'], + }) + + return release_download + + def downloadIsTorrent(self, release_download): + return release_download and release_download.get('protocol') in ['torrent', 'torrent_magnet'] + + def fileIsAdded(self, src, group): + if not group or not group.get('before_rename'): + return False + return src in group['before_rename'] + + def moveTypeIsLinked(self): + return self.conf('default_file_action') in ['copy', 'link', "symlink_reversed"] + + def statusInfoComplete(self, release_download): + return release_download.get('id') and release_download.get('downloader') and release_download.get('folder') + + def movieInFromFolder(self, media_folder): + return media_folder and isSubFolder(media_folder, sp(self.conf('from'))) or not media_folder + + @property + def ignored_in_path(self): + return self.conf('ignored_in_path').split(":") if self.conf('ignored_in_path') else [] + + def filesAfterIgnoring(self, original_file_list): + kept_files = [] + for path in original_file_list: + if self.keepFile(path): + kept_files.append(path) + else: + log.debug('Ignored "%s" during renaming', path) + return kept_files + + def keepFile(self, filename): + + # ignoredpaths + for i in self.ignored_in_path: + if i in filename.lower(): + log.debug('Ignored "%s" contains "%s".', (filename, i)) + return False + + # All is OK + return True + + def extractFiles(self, folder = None, media_folder = None, files = None, cleanup = False): + if not files: files = [] + + # RegEx for finding rar files + archive_regex = '(?P^(?P(?:(?!\.part\d+\.rar$).)*)\.(?:(?:part0*1\.)?rar)$)' + restfile_regex = '(^%s\.(?:part(?!0*1\.rar$)\d+\.rar$|[rstuvw]\d+$))' + extr_files = [] + + from_folder = sp(self.conf('from')) + + # Check input variables + if not folder: + folder = from_folder + + check_file_date = True + if media_folder: + check_file_date = False + + if not files: + for root, folders, names in os.walk(folder): + files.extend([sp(os.path.join(root, name)) for name in names]) + + # Find all archive files + archives = [re.search(archive_regex, name).groupdict() for name in files if re.search(archive_regex, name)] + + #Extract all found archives + for archive in archives: + # Check if it has already been processed by CPS + if self.hastagRelease(release_download = {'folder': os.path.dirname(archive['file']), 'files': archive['file']}): + continue + + # Find all related archive files + archive['files'] = [name for name in files if re.search(restfile_regex % re.escape(archive['base']), name)] + archive['files'].append(archive['file']) + + # Check if archive is fresh and maybe still copying/moving/downloading, ignore files newer than 1 minute + if check_file_date: + files_too_new, time_string = self.checkFilesChanged(archive['files']) + + if files_too_new: + log.info('Archive seems to be still copying/moving/downloading or just copied/moved/downloaded (created on %s), ignoring for now: %s', (time_string, os.path.basename(archive['file']))) + continue + + log.info('Archive %s found. Extracting...', os.path.basename(archive['file'])) + try: + unrar_path = self.conf('unrar_path') + unrar_path = unrar_path if unrar_path and (os.path.isfile(unrar_path) or re.match('^[a-zA-Z0-9_/\.\-]+$', unrar_path)) else None + + rar_handle = RarFile(archive['file'], custom_path = unrar_path) + extr_path = os.path.join(from_folder, os.path.relpath(os.path.dirname(archive['file']), folder)) + self.makeDir(extr_path) + for packedinfo in rar_handle.infolist(): + extr_file_path = sp(os.path.join(extr_path, os.path.basename(packedinfo.filename))) + if not packedinfo.isdir and not os.path.isfile(extr_file_path): + log.debug('Extracting %s...', packedinfo.filename) + rar_handle.extract(condition = [packedinfo.index], path = extr_path, withSubpath = False, overwrite = False) + if self.conf('unrar_modify_date'): + try: + os.utime(extr_file_path, (os.path.getatime(archive['file']), os.path.getmtime(archive['file']))) + except: + log.error('Rar modify date enabled, but failed: %s', traceback.format_exc()) + extr_files.append(extr_file_path) + del rar_handle + # Tag archive as extracted if no cleanup. + if not cleanup and os.path.isfile(extr_file_path): + self.tagRelease(release_download = {'folder': os.path.dirname(archive['file']), 'files': [archive['file']]}, tag = 'extracted') + except Exception as e: + log.error('Failed to extract %s: %s %s', (archive['file'], e, traceback.format_exc())) + continue + + # Delete the archive files + for filename in archive['files']: + if cleanup: + try: + os.remove(filename) + except Exception as e: + log.error('Failed to remove %s: %s %s', (filename, e, traceback.format_exc())) + continue + files.remove(filename) + + # Move the rest of the files and folders if any files are extracted to the from folder (only if folder was provided) + if extr_files and folder != from_folder: + for leftoverfile in list(files): + move_to = os.path.join(from_folder, os.path.relpath(leftoverfile, folder)) + + try: + self.makeDir(os.path.dirname(move_to)) + self.moveFile(leftoverfile, move_to, cleanup) + except Exception as e: + log.error('Failed moving left over file %s to %s: %s %s', (leftoverfile, move_to, e, traceback.format_exc())) + # As we probably tried to overwrite the nfo file, check if it exists and then remove the original + if os.path.isfile(move_to) and os.path.getsize(leftoverfile) == os.path.getsize(move_to): + if cleanup: + log.info('Deleting left over file %s instead...', leftoverfile) + os.unlink(leftoverfile) + else: + continue + + files.remove(leftoverfile) + extr_files.append(move_to) + + if cleanup: + # Remove all left over folders + log.debug('Removing old movie folder %s...', media_folder) + self.deleteEmptyFolder(media_folder) + + media_folder = os.path.join(from_folder, os.path.relpath(media_folder, folder)) + folder = from_folder + + if extr_files: + files.extend(extr_files) + + # Cleanup files and folder if media_folder was not provided + if not media_folder: + files = [] + folder = None + + return folder, media_folder, files, extr_files + + +rename_options = { + 'pre': '<', + 'post': '>', + 'choices': { + 'ext': 'Extension (mkv)', + 'namethe': 'Moviename, The', + 'thename': 'The Moviename', + 'frnamethe': 'NomDuFilm, Le', + 'frthename': 'Le NomDuFilm', + 'year': 'Year (2011)', + 'first': 'First letter (M)', + 'quality': 'Quality (720p)', + 'quality_type': '(HD) or (SD)', + '3d': '3D', + '3d_type': '3D Type (Full SBS)', + '3d_type_short' : 'Short 3D Type (FSBS)', + 'video': 'Video (x264)', + 'audio': 'Audio (DTS)', + 'group': 'Releasegroup name', + 'source': 'Source media (Bluray)', + 'resolution_width': 'resolution width (1280)', + 'resolution_height': 'resolution height (720)', + 'audio_channels': 'audio channels (7.1)', + 'original': 'Original filename', + 'original_folder': 'Original foldername', + 'imdb_id': 'IMDB id (tt0123456)', + 'cd': 'CD number (cd1)', + 'cd_nr': 'Just the cd nr. (1)', + 'mpaa': 'MPAA or other certification', + 'mpaa_only': 'MPAA only certification (G|PG|PG-13|R|NC-17|Not Rated)', + 'category': 'Category label', + }, +} + +config = [{ + 'name': 'renamer', + 'order': 40, + 'description': 'Move and rename your downloaded movies to your movie directory.', + 'groups': [ + { + 'tab': 'renamer', + 'name': 'renamer', + 'label': 'Rename downloaded movies', + 'wizard': True, + 'options': [ + { + 'name': 'enabled', + 'default': False, + 'type': 'enabler', + }, + { + 'name': 'from', + 'type': 'directory', + 'description': 'Folder where CP searches for movies.', + }, + { + 'name': 'to', + 'type': 'directory', + 'description': 'Default folder where the movies are moved/copied/linked to.', + }, + { + 'name': 'folder_name', + 'label': 'Folder naming', + 'description': 'Name of the folder. Keep empty for no folder.', + 'default': ' ()', + 'type': 'choice', + 'options': rename_options + }, + { + 'name': 'file_name', + 'label': 'File naming', + 'description': 'Name of the file', + 'default': '.', + 'type': 'choice', + 'options': rename_options + }, + { + 'advanced': True, + 'name': 'use_tab_threed', + 'type': 'bool', + 'label': 'Use TAB 3D', + 'description': ('Use TAB (Top And Bottom) instead of OU (Over Under).','This will allow Kodi to recognize vertical formatted 3D movies properly.'), + 'default': True + }, + { + 'advanced': True, + 'name': 'replace_doubles', + 'type': 'bool', + 'label': 'Clean Name', + 'description': ('Attempt to clean up double separaters due to missing data for fields.','Sometimes this eliminates wanted white space (see #2782).'), + 'default': True + }, + { + 'name': 'ignored_in_path', + 'label': 'Ignored file patterns', + 'description': ('A list of globs to path match when scanning, separated by ":"', 'anything on this list will be skipped during rename operations'), + 'default': '*/.sync/*', + }, + { + 'name': 'unrar', + 'type': 'bool', + 'description': 'Extract rar files if found.', + 'default': False, + }, + { + 'advanced': True, + 'name': 'unrar_path', + 'description': 'Custom path to unrar bin', + }, + { + 'advanced': True, + 'name': 'unrar_modify_date', + 'type': 'bool', + 'description': ('Set modify date of unrar-ed files to the rar-file\'s date.', 'This will allow Kodi to recognize extracted files as recently added even if the movie was released some time ago.'), + 'default': False, + }, + { + 'name': 'cleanup', + 'type': 'bool', + 'description': 'Cleanup leftover files after successful rename.', + 'default': False, + }, + { + 'name': 'remove_lower_quality_copies', + 'type': 'bool', + 'label': 'Delete Others', + 'description': 'Remove lower/equal quality copies of a release after downloading.', + 'default': True, + }, + { + 'advanced': True, + 'name': 'run_every', + 'label': 'Run every', + 'default': 1, + 'type': 'int', + 'unit': 'min(s)', + 'description': ('Detect movie status every X minutes.', 'Will start the renamer if movie is completed or handle failed download if these options are enabled'), + }, + { + 'advanced': True, + 'name': 'force_every', + 'label': 'Force every', + 'default': 2, + 'type': 'int', + 'unit': 'hour(s)', + 'description': 'Forces the renamer to scan every X hours', + }, + { + 'advanced': True, + 'name': 'next_on_failed', + 'default': True, + 'type': 'bool', + 'description': 'Try the next best release for a movie after a download failed.', + }, + { + 'name': 'move_leftover', + 'type': 'bool', + 'description': 'Move all leftover file after renaming, to the movie folder.', + 'default': False, + 'advanced': True, + }, + { + 'advanced': True, + 'name': 'separator', + 'label': 'File-Separator', + 'description': ('Replace all the spaces with a character.', 'Example: ".", "-" (without quotes). Leave empty to use spaces.'), + }, + { + 'advanced': True, + 'name': 'foldersep', + 'label': 'Folder-Separator', + 'description': ('Replace all the spaces with a character.', 'Example: ".", "-" (without quotes). Leave empty to use spaces.'), + }, + { + 'name': 'check_space', + 'label': 'Check space', + 'default': True, + 'type': 'bool', + 'description': ('Check if there\'s enough available space to rename the files', 'Disable when the filesystem doesn\'t return the proper value'), + 'advanced': True, + }, + { + 'name': 'default_file_action', + 'label': 'Default File Action', + 'default': 'move', + 'type': 'dropdown', + 'values': [('Link', 'link'), ('Copy', 'copy'), ('Move', 'move'), ('Reverse Symlink', 'symlink_reversed')], + 'description': ('Link, Copy or Move after download completed.', + 'Link first tries hard link, then sym link and falls back to Copy. Reverse Symlink moves the file and creates symlink to it in the original location'), + 'advanced': True, + }, + { + 'name': 'file_action', + 'label': 'Torrent File Action', + 'default': 'link', + 'type': 'dropdown', + 'values': [('Link', 'link'), ('Copy', 'copy'), ('Move', 'move'), ('Reverse Symlink', 'symlink_reversed')], + 'description': 'See above. It is prefered to use link when downloading torrents as it will save you space, while still being able to seed.', + 'advanced': True, + }, + { + 'advanced': True, + 'name': 'ntfs_permission', + 'label': 'NTFS Permission', + 'type': 'bool', + 'hidden': os.name != 'nt', + 'description': 'Set permission of moved files to that of destination folder (Windows NTFS only).', + 'default': False, + }, + ], + }, { + 'tab': 'renamer', + 'name': 'meta_renamer', + 'label': 'Advanced renaming', + 'description': 'Meta data file renaming. Use <filename> to use the above "File naming" settings, without the file extension.', + 'advanced': True, + 'options': [ + { + 'name': 'rename_nfo', + 'label': 'Rename .NFO', + 'description': 'Rename original .nfo file', + 'type': 'bool', + 'default': True, + }, + { + 'name': 'nfo_name', + 'label': 'NFO naming', + 'default': '.orig.', + 'type': 'choice', + 'options': rename_options + }, + ], + }, + ], +}] diff --git a/couchpotato/core/plugins/renamer/__init__.py b/couchpotato/core/plugins/renamer/__init__.py deleted file mode 100644 index 6ce21922d5..0000000000 --- a/couchpotato/core/plugins/renamer/__init__.py +++ /dev/null @@ -1,149 +0,0 @@ -from couchpotato.core.plugins.renamer.main import Renamer -import os - -def start(): - return Renamer() - -rename_options = { - 'pre': '<', - 'post': '>', - 'choices': { - 'ext': 'Extention (mkv)', - 'namethe': 'Moviename, The', - 'thename': 'The Moviename', - 'year': 'Year (2011)', - 'first': 'First letter (M)', - 'quality': 'Quality (720P)', - 'video': 'Video (x264)', - 'audio': 'Audio (DTS)', - 'group': 'Releasegroup name', - 'source': 'Source media (Bluray)', - 'original': 'Original filename', - 'original_folder': 'Original foldername', - 'imdb_id': 'IMDB id (tt0123456)', - 'cd': 'CD number (cd1)', - 'cd_nr': 'Just the cd nr. (1)', - }, -} - -config = [{ - 'name': 'renamer', - 'order': 40, - 'description': 'Move and rename your downloaded movies to your movie directory.', - 'groups': [ - { - 'tab': 'renamer', - 'name': 'renamer', - 'label': 'Rename downloaded movies', - 'wizard': True, - 'options': [ - { - 'name': 'enabled', - 'default': False, - 'type': 'enabler', - }, - { - 'name': 'from', - 'type': 'directory', - 'description': 'Folder where CP searches for movies.', - }, - { - 'name': 'to', - 'type': 'directory', - 'description': 'Folder where the movies should be moved to.', - }, - { - 'name': 'folder_name', - 'label': 'Folder naming', - 'description': 'Name of the folder. Keep empty for no folder.', - 'default': ' ()', - 'type': 'choice', - 'options': rename_options - }, - { - 'name': 'file_name', - 'label': 'File naming', - 'description': 'Name of the file', - 'default': '.', - 'type': 'choice', - 'options': rename_options - }, - { - 'name': 'cleanup', - 'type': 'bool', - 'description': 'Cleanup leftover files after successful rename.', - 'default': False, - }, - { - 'advanced': True, - 'name': 'run_every', - 'label': 'Run every', - 'default': 1, - 'type': 'int', - 'unit': 'min(s)', - 'description': 'Detect movie status every X minutes. Will start the renamer if movie is completed or handle failed download if these options are enabled', - }, - { - 'advanced': True, - 'name': 'force_every', - 'label': 'Force every', - 'default': 2, - 'type': 'int', - 'unit': 'hour(s)', - 'description': 'Forces the renamer to scan every X hours', - }, - { - 'advanced': True, - 'name': 'next_on_failed', - 'default': True, - 'type': 'bool', - 'description': 'Try the next best release for a movie after a download failed.', - }, - { - 'name': 'move_leftover', - 'type': 'bool', - 'description': 'Move all leftover file after renaming, to the movie folder.', - 'default': False, - 'advanced': True, - }, - { - 'advanced': True, - 'name': 'separator', - 'label': 'Separator', - 'description': 'Replace all the spaces with a character. Example: ".", "-" (without quotes). Leave empty to use spaces.', - }, - { - 'advanced': True, - 'name': 'ntfs_permission', - 'label': 'NTFS Permission', - 'type': 'bool', - 'hidden': os.name != 'nt', - 'description': 'Set permission of moved files to that of destination folder (Windows NTFS only).', - 'default': False, - }, - ], - }, { - 'tab': 'renamer', - 'name': 'meta_renamer', - 'label': 'Advanced renaming', - 'description': 'Meta data file renaming. Use <filename> to use the above "File naming" settings, without the file extention.', - 'advanced': True, - 'options': [ - { - 'name': 'rename_nfo', - 'label': 'Rename .NFO', - 'description': 'Rename original .nfo file', - 'type': 'bool', - 'default': True, - }, - { - 'name': 'nfo_name', - 'label': 'NFO naming', - 'default': '.orig.', - 'type': 'choice', - 'options': rename_options - }, - ], - }, - ], -}] diff --git a/couchpotato/core/plugins/renamer/main.py b/couchpotato/core/plugins/renamer/main.py deleted file mode 100644 index 7df93b83bd..0000000000 --- a/couchpotato/core/plugins/renamer/main.py +++ /dev/null @@ -1,602 +0,0 @@ -from couchpotato import get_session -from couchpotato.api import addApiView -from couchpotato.core.event import addEvent, fireEvent, fireEventAsync -from couchpotato.core.helpers.encoding import toUnicode, ss -from couchpotato.core.helpers.request import jsonified -from couchpotato.core.helpers.variable import getExt, mergeDicts, getTitle, \ - getImdb -from couchpotato.core.logger import CPLog -from couchpotato.core.plugins.base import Plugin -from couchpotato.core.settings.model import Library, File, Profile, Release -from couchpotato.environment import Env -import errno -import os -import re -import shutil -import traceback - -log = CPLog(__name__) - - -class Renamer(Plugin): - - renaming_started = False - checking_snatched = False - - def __init__(self): - - addApiView('renamer.scan', self.scanView, docs = { - 'desc': 'For the renamer to check for new files to rename', - }) - - addEvent('renamer.scan', self.scan) - addEvent('renamer.check_snatched', self.checkSnatched) - - addEvent('app.load', self.scan) - addEvent('app.load', self.checkSnatched) - - if self.conf('run_every') > 0: - fireEvent('schedule.interval', 'renamer.check_snatched', self.checkSnatched, minutes = self.conf('run_every')) - - if self.conf('force_every') > 0: - fireEvent('schedule.interval', 'renamer.check_snatched_forced', self.scan, hours = self.conf('force_every')) - - def scanView(self): - - fireEventAsync('renamer.scan') - - return jsonified({ - 'success': True - }) - - def scan(self): - - if self.isDisabled(): - return - - if self.renaming_started is True: - log.info('Renamer is already running, if you see this often, check the logs above for errors.') - return - - # Check to see if the "to" folder is inside the "from" folder. - if not os.path.isdir(self.conf('from')) or not os.path.isdir(self.conf('to')): - log.debug('"To" and "From" have to exist.') - return - elif self.conf('from') in self.conf('to'): - log.error('The "to" can\'t be inside of the "from" folder. You\'ll get an infinite loop.') - return - - groups = fireEvent('scanner.scan', folder = self.conf('from'), single = True) - - self.renaming_started = True - - destination = self.conf('to') - folder_name = self.conf('folder_name') - file_name = self.conf('file_name') - trailer_name = self.conf('trailer_name') - nfo_name = self.conf('nfo_name') - separator = self.conf('separator') - - # Statusses - done_status = fireEvent('status.get', 'done', single = True) - active_status = fireEvent('status.get', 'active', single = True) - downloaded_status = fireEvent('status.get', 'downloaded', single = True) - snatched_status = fireEvent('status.get', 'snatched', single = True) - - db = get_session() - - for group_identifier in groups: - - group = groups[group_identifier] - rename_files = {} - remove_files = [] - remove_releases = [] - - movie_title = getTitle(group['library']) - - # Add _UNKNOWN_ if no library item is connected - if not group['library'] or not movie_title: - self.tagDir(group, 'unknown') - continue - # Rename the files using the library data - else: - group['library'] = fireEvent('library.update', identifier = group['library']['identifier'], single = True) - if not group['library']: - log.error('Could not rename, no library item to work with: %s', group_identifier) - continue - - library = group['library'] - movie_title = getTitle(library) - - # Find subtitle for renaming - fireEvent('renamer.before', group) - - # Remove weird chars from moviename - movie_name = re.sub(r"[\x00\/\\:\*\?\"<>\|]", '', movie_title) - - # Put 'The' at the end - name_the = movie_name - if movie_name[:4].lower() == 'the ': - name_the = movie_name[4:] + ', The' - - replacements = { - 'ext': 'mkv', - 'namethe': name_the.strip(), - 'thename': movie_name.strip(), - 'year': library['year'], - 'first': name_the[0].upper(), - 'quality': group['meta_data']['quality']['label'], - 'quality_type': group['meta_data']['quality_type'], - 'video': group['meta_data'].get('video'), - 'audio': group['meta_data'].get('audio'), - 'group': group['meta_data']['group'], - 'source': group['meta_data']['source'], - 'resolution_width': group['meta_data'].get('resolution_width'), - 'resolution_height': group['meta_data'].get('resolution_height'), - 'imdb_id': library['identifier'], - 'cd': '', - 'cd_nr': '', - } - - for file_type in group['files']: - - # Move nfo depending on settings - if file_type is 'nfo' and not self.conf('rename_nfo'): - log.debug('Skipping, renaming of %s disabled', file_type) - if self.conf('cleanup'): - for current_file in group['files'][file_type]: - remove_files.append(current_file) - continue - - # Subtitle extra - if file_type is 'subtitle_extra': - continue - - # Move other files - multiple = len(group['files'][file_type]) > 1 and not group['is_dvd'] - cd = 1 if multiple else 0 - - for current_file in sorted(list(group['files'][file_type])): - - # Original filename - replacements['original'] = os.path.splitext(os.path.basename(current_file))[0] - replacements['original_folder'] = fireEvent('scanner.remove_cptag', group['dirname'], single = True) - - # Extension - replacements['ext'] = getExt(current_file) - - # cd # - replacements['cd'] = ' cd%d' % cd if multiple else '' - replacements['cd_nr'] = cd if multiple else '' - - # Naming - final_folder_name = self.doReplace(folder_name, replacements).lstrip('. ') - final_file_name = self.doReplace(file_name, replacements).lstrip('. ') - replacements['filename'] = final_file_name[:-(len(getExt(final_file_name)) + 1)] - - # Meta naming - if file_type is 'trailer': - final_file_name = self.doReplace(trailer_name, replacements, remove_multiple = True).lstrip('. ') - elif file_type is 'nfo': - final_file_name = self.doReplace(nfo_name, replacements, remove_multiple = True).lstrip('. ') - - # Seperator replace - if separator: - final_file_name = final_file_name.replace(' ', separator) - - # Move DVD files (no structure renaming) - if group['is_dvd'] and file_type is 'movie': - found = False - for top_dir in ['video_ts', 'audio_ts', 'bdmv', 'certificate']: - has_string = current_file.lower().find(os.path.sep + top_dir + os.path.sep) - if has_string >= 0: - structure_dir = current_file[has_string:].lstrip(os.path.sep) - rename_files[current_file] = os.path.join(destination, final_folder_name, structure_dir) - found = True - break - - if not found: - log.error('Could not determine dvd structure for: %s', current_file) - - # Do rename others - else: - if file_type is 'leftover': - if self.conf('move_leftover'): - rename_files[current_file] = os.path.join(destination, final_folder_name, os.path.basename(current_file)) - elif file_type not in ['subtitle']: - rename_files[current_file] = os.path.join(destination, final_folder_name, final_file_name) - - # Check for extra subtitle files - if file_type is 'subtitle': - - remove_multiple = False - if len(group['files']['movie']) == 1: - remove_multiple = True - - sub_langs = group['subtitle_language'].get(current_file, []) - - # rename subtitles with or without language - sub_name = self.doReplace(file_name, replacements, remove_multiple = remove_multiple) - rename_files[current_file] = os.path.join(destination, final_folder_name, sub_name) - - rename_extras = self.getRenameExtras( - extra_type = 'subtitle_extra', - replacements = replacements, - folder_name = folder_name, - file_name = file_name, - destination = destination, - group = group, - current_file = current_file, - remove_multiple = remove_multiple, - ) - - # Don't add language if multiple languages in 1 subtitle file - if len(sub_langs) == 1: - sub_name = final_file_name.replace(replacements['ext'], '%s.%s' % (sub_langs[0], replacements['ext'])) - rename_files[current_file] = os.path.join(destination, final_folder_name, sub_name) - - rename_files = mergeDicts(rename_files, rename_extras) - - # Filename without cd etc - elif file_type is 'movie': - rename_extras = self.getRenameExtras( - extra_type = 'movie_extra', - replacements = replacements, - folder_name = folder_name, - file_name = file_name, - destination = destination, - group = group, - current_file = current_file - ) - rename_files = mergeDicts(rename_files, rename_extras) - - group['filename'] = self.doReplace(file_name, replacements, remove_multiple = True)[:-(len(getExt(final_file_name)) + 1)] - group['destination_dir'] = os.path.join(destination, final_folder_name) - - if multiple: - cd += 1 - - # Before renaming, remove the lower quality files - library = db.query(Library).filter_by(identifier = group['library']['identifier']).first() - remove_leftovers = True - - # Add it to the wanted list before we continue - if len(library.movies) == 0: - profile = db.query(Profile).filter_by(core = True, label = group['meta_data']['quality']['label']).first() - fireEvent('movie.add', params = {'identifier': group['library']['identifier'], 'profile_id': profile.id}, search_after = False) - db.expire_all() - library = db.query(Library).filter_by(identifier = group['library']['identifier']).first() - - for movie in library.movies: - - # Mark movie "done" onces it found the quality with the finish check - try: - if movie.status_id == active_status.get('id') and movie.profile: - for profile_type in movie.profile.types: - if profile_type.quality_id == group['meta_data']['quality']['id'] and profile_type.finish: - movie.status_id = done_status.get('id') - db.commit() - except Exception, e: - log.error('Failed marking movie finished: %s %s', (e, traceback.format_exc())) - - # Go over current movie releases - for release in movie.releases: - - # When a release already exists - if release.status_id is done_status.get('id'): - - # This is where CP removes older, lesser quality releases - if release.quality.order > group['meta_data']['quality']['order']: - log.info('Removing lesser quality %s for %s.', (movie.library.titles[0].title, release.quality.label)) - for current_file in release.files: - remove_files.append(current_file) - remove_releases.append(release) - # Same quality, but still downloaded, so maybe repack/proper/unrated/directors cut etc - elif release.quality.order is group['meta_data']['quality']['order']: - log.info('Same quality release already exists for %s, with quality %s. Assuming repack.', (movie.library.titles[0].title, release.quality.label)) - for current_file in release.files: - remove_files.append(current_file) - remove_releases.append(release) - - # Downloaded a lower quality, rename the newly downloaded files/folder to exclude them from scan - else: - log.info('Better quality release already exists for %s, with quality %s', (movie.library.titles[0].title, release.quality.label)) - - # Add _EXISTS_ to the parent dir - self.tagDir(group, 'exists') - - # Notify on rename fail - download_message = 'Renaming of %s (%s) canceled, exists in %s already.' % (movie.library.titles[0].title, group['meta_data']['quality']['label'], release.quality.label) - fireEvent('movie.renaming.canceled', message = download_message, data = group) - remove_leftovers = False - - break - elif release.status_id is snatched_status.get('id'): - if release.quality.id is group['meta_data']['quality']['id']: - log.debug('Marking release as downloaded') - try: - release.status_id = downloaded_status.get('id') - except Exception, e: - log.error('Failed marking release as finished: %s %s', (e, traceback.format_exc())) - db.commit() - - # Remove leftover files - if self.conf('cleanup') and not self.conf('move_leftover') and remove_leftovers: - log.debug('Removing leftover files') - for current_file in group['files']['leftover']: - remove_files.append(current_file) - elif not remove_leftovers: # Don't remove anything - break - - # Remove files - delete_folders = [] - for src in remove_files: - - if isinstance(src, File): - src = src.path - - if rename_files.get(src): - log.debug('Not removing file that will be renamed: %s', src) - continue - - log.info('Removing "%s"', src) - try: - src = ss(src) - if os.path.isfile(src): - os.remove(src) - - parent_dir = os.path.normpath(os.path.dirname(src)) - if delete_folders.count(parent_dir) == 0 and os.path.isdir(parent_dir) and destination != parent_dir: - delete_folders.append(parent_dir) - - except: - log.error('Failed removing %s: %s', (src, traceback.format_exc())) - self.tagDir(group, 'failed_remove') - - # Delete leftover folder from older releases - for delete_folder in delete_folders: - try: - self.deleteEmptyFolder(delete_folder, show_error = False) - except Exception, e: - log.error('Failed to delete folder: %s %s', (e, traceback.format_exc())) - - # Rename all files marked - group['renamed_files'] = [] - for src in rename_files: - if rename_files[src]: - dst = rename_files[src] - log.info('Renaming "%s" to "%s"', (src, dst)) - - # Create dir - self.makeDir(os.path.dirname(dst)) - - try: - self.moveFile(src, dst) - group['renamed_files'].append(dst) - except: - log.error('Failed moving the file "%s" : %s', (os.path.basename(src), traceback.format_exc())) - self.tagDir(group, 'failed_rename') - - # Remove matching releases - for release in remove_releases: - log.debug('Removing release %s', release.identifier) - try: - db.delete(release) - except: - log.error('Failed removing %s: %s', (release.identifier, traceback.format_exc())) - - if group['dirname'] and group['parentdir']: - try: - log.info('Deleting folder: %s', group['parentdir']) - self.deleteEmptyFolder(group['parentdir']) - except: - log.error('Failed removing %s: %s', (group['parentdir'], traceback.format_exc())) - - # Notify on download, search for trailers etc - download_message = 'Downloaded %s (%s)' % (movie_title, replacements['quality']) - try: - fireEvent('renamer.after', message = download_message, group = group, in_order = True) - except: - log.error('Failed firing (some) of the renamer.after events: %s', traceback.format_exc()) - - # Break if CP wants to shut down - if self.shuttingDown(): - break - - self.renaming_started = False - - def getRenameExtras(self, extra_type = '', replacements = {}, folder_name = '', file_name = '', destination = '', group = {}, current_file = '', remove_multiple = False): - - replacements = replacements.copy() - rename_files = {} - - def test(s): - return current_file[:-len(replacements['ext'])] in s - - for extra in set(filter(test, group['files'][extra_type])): - replacements['ext'] = getExt(extra) - - final_folder_name = self.doReplace(folder_name, replacements, remove_multiple = remove_multiple) - final_file_name = self.doReplace(file_name, replacements, remove_multiple = remove_multiple) - rename_files[extra] = os.path.join(destination, final_folder_name, final_file_name) - - return rename_files - - def tagDir(self, group, tag): - - rename_files = {} - - if group['dirname']: - rename_files[group['parentdir']] = group['parentdir'].replace(group['dirname'], '_%s_%s' % (tag.upper(), group['dirname'])) - else: # Add it to filename - for file_type in group['files']: - for rename_me in group['files'][file_type]: - filename = os.path.basename(rename_me) - rename_files[rename_me] = rename_me.replace(filename, '_%s_%s' % (tag.upper(), filename)) - - for src in rename_files: - if rename_files[src]: - dst = rename_files[src] - log.info('Renaming "%s" to "%s"', (src, dst)) - - # Create dir - self.makeDir(os.path.dirname(dst)) - - try: - self.moveFile(src, dst) - except: - log.error('Failed moving the file "%s" : %s', (os.path.basename(src), traceback.format_exc())) - raise - - def moveFile(self, old, dest): - dest = ss(dest) - try: - shutil.move(old, dest) - - try: - os.chmod(dest, Env.getPermission('file')) - if os.name == 'nt' and self.conf('ntfs_permission'): - os.popen('icacls "' + dest + '"* /reset /T') - except: - log.error('Failed setting permissions for file: %s, %s', (dest, traceback.format_exc(1))) - - except OSError, err: - # Copying from a filesystem with octal permission to an NTFS file system causes a permission error. In this case ignore it. - if not hasattr(os, 'chmod') or err.errno != errno.EPERM: - raise - else: - if os.path.exists(dest): - os.unlink(old) - - except: - log.error('Couldn\'t move file "%s" to "%s": %s', (old, dest, traceback.format_exc())) - raise - - return True - - def doReplace(self, string, replacements, remove_multiple = False): - ''' - replace confignames with the real thing - ''' - - replacements = replacements.copy() - if remove_multiple: - replacements['cd'] = '' - replacements['cd_nr'] = '' - - replaced = toUnicode(string) - for x, r in replacements.iteritems(): - if r is not None: - replaced = replaced.replace(u'<%s>' % toUnicode(x), toUnicode(r)) - else: - #If information is not available, we don't want the tag in the filename - replaced = replaced.replace('<' + x + '>', '') - - replaced = re.sub(r"[\x00:\*\?\"<>\|]", '', replaced) - - sep = self.conf('separator') - return self.replaceDoubles(replaced).replace(' ', ' ' if not sep else sep) - - def replaceDoubles(self, string): - return string.replace(' ', ' ').replace(' .', '.') - - def deleteEmptyFolder(self, folder, show_error = True): - folder = ss(folder) - - loge = log.error if show_error else log.debug - for root, dirs, files in os.walk(folder): - - for dir_name in dirs: - full_path = os.path.join(root, dir_name) - if len(os.listdir(full_path)) == 0: - try: - os.rmdir(full_path) - except: - loge('Couldn\'t remove empty directory %s: %s', (full_path, traceback.format_exc())) - - try: - os.rmdir(folder) - except: - loge('Couldn\'t remove empty directory %s: %s', (folder, traceback.format_exc())) - - def checkSnatched(self): - if self.checking_snatched: - log.debug('Already checking snatched') - - self.checking_snatched = True - - snatched_status = fireEvent('status.get', 'snatched', single = True) - ignored_status = fireEvent('status.get', 'ignored', single = True) - failed_status = fireEvent('status.get', 'failed', single = True) - - done_status = fireEvent('status.get', 'done', single = True) - - db = get_session() - rels = db.query(Release).filter_by(status_id = snatched_status.get('id')).all() - - scan_required = False - - if rels: - self.checking_snatched = True - log.debug('Checking status snatched releases...') - - statuses = fireEvent('download.status', merge = True) - if not statuses: - log.debug('Download status functionality is not implemented for active downloaders.') - scan_required = True - else: - try: - for rel in rels: - rel_dict = rel.to_dict({'info': {}}) - - # Get current selected title - default_title = getTitle(rel.movie.library) - - # Check if movie has already completed and is manage tab (legacy db correction) - if rel.movie.status_id == done_status.get('id'): - log.debug('Found a completed movie with a snatched release : %s. Setting release status to ignored...' , default_title) - rel.status_id = ignored_status.get('id') - db.commit() - continue - - movie_dict = fireEvent('movie.get', rel.movie_id, single = True) - - # check status - nzbname = self.createNzbName(rel_dict['info'], movie_dict) - - found = False - for item in statuses: - if item['name'] == nzbname or rel_dict['info']['name'] in item['name'] or getImdb(item['name']) == movie_dict['library']['identifier']: - - timeleft = 'N/A' if item['timeleft'] == -1 else item['timeleft'] - log.debug('Found %s: %s, time to go: %s', (item['name'], item['status'].upper(), timeleft)) - - if item['status'] == 'busy': - pass - elif item['status'] == 'failed': - fireEvent('download.remove_failed', item, single = True) - - if self.conf('next_on_failed'): - fireEvent('searcher.try_next_release', movie_id = rel.movie_id) - else: - rel.status_id = failed_status.get('id') - db.commit() - elif item['status'] == 'completed': - log.info('Download of %s completed!', item['name']) - scan_required = True - - found = True - break - - if not found: - log.info('%s not found in downloaders', nzbname) - - except: - log.error('Failed checking for release in downloader: %s', traceback.format_exc()) - - if scan_required: - fireEvent('renamer.scan') - - self.checking_snatched = False - - return True diff --git a/couchpotato/core/plugins/scanner.py b/couchpotato/core/plugins/scanner.py new file mode 100644 index 0000000000..9233d28ecc --- /dev/null +++ b/couchpotato/core/plugins/scanner.py @@ -0,0 +1,974 @@ +import os +import re +import threading +import time +import traceback + +from couchpotato import get_db +from couchpotato.core.event import fireEvent, addEvent +from couchpotato.core.helpers.encoding import toUnicode, simplifyString, sp, ss +from couchpotato.core.helpers.variable import getExt, getImdb, tryInt, \ + splitString, getIdentifier +from couchpotato.core.logger import CPLog +from couchpotato.core.plugins.base import Plugin +from guessit import guess_movie_info +from subliminal.videos import Video +import enzyme +from six.moves import filter, map, zip + + +log = CPLog(__name__) + +autoload = 'Scanner' + + +class Scanner(Plugin): + + ignored_in_path = [os.path.sep + 'extracted' + os.path.sep, 'extracting', '_unpack', '_failed_', '_unknown_', '_exists_', '_failed_remove_', + '_failed_rename_', '.appledouble', '.appledb', '.appledesktop', os.path.sep + '._', '.ds_store', 'cp.cpnfo', + 'thumbs.db', 'ehthumbs.db', 'desktop.ini'] # unpacking, smb-crap, hidden files + ignore_names = ['extract', 'extracting', 'extracted', 'movie', 'movies', 'film', 'films', 'download', 'downloads', 'video_ts', 'audio_ts', 'bdmv', 'certificate'] + ignored_extensions = ['ignore', 'lftp-pget-status'] + extensions = { + 'movie': ['mkv', 'wmv', 'avi', 'mpg', 'mpeg', 'mp4', 'm2ts', 'iso', 'img', 'mdf', 'ts', 'm4v', 'flv'], + 'movie_extra': ['mds'], + 'dvd': ['vts_*', 'vob'], + 'nfo': ['nfo', 'txt', 'tag'], + 'subtitle': ['sub', 'srt', 'ssa', 'ass'], + 'subtitle_extra': ['idx'], + 'trailer': ['mov', 'mp4', 'flv'] + } + + threed_types = { + 'Half SBS': [('half', 'sbs'), ('h', 'sbs'), 'hsbs'], + 'Full SBS': [('full', 'sbs'), ('f', 'sbs'), 'fsbs'], + 'SBS': ['sbs'], + 'Half OU': [('half', 'ou'), ('h', 'ou'), ('half', 'tab'), ('h', 'tab'), 'htab', 'hou'], + 'Full OU': [('full', 'ou'), ('f', 'ou'), ('full', 'tab'), ('f', 'tab'), 'ftab', 'fou'], + 'OU': ['ou', 'tab'], + 'Frame Packed': ['mvc', ('complete', 'bluray')], + '3D': ['3d'] + } + + file_types = { + 'subtitle': ('subtitle', 'subtitle'), + 'subtitle_extra': ('subtitle', 'subtitle_extra'), + 'trailer': ('video', 'trailer'), + 'nfo': ('nfo', 'nfo'), + 'movie': ('video', 'movie'), + 'movie_extra': ('movie', 'movie_extra'), + 'backdrop': ('image', 'backdrop'), + 'poster': ('image', 'poster'), + 'thumbnail': ('image', 'thumbnail'), + 'leftover': ('leftover', 'leftover'), + } + + file_sizes = { # in MB + 'movie': {'min': 200}, + 'trailer': {'min': 2, 'max': 199}, + 'backdrop': {'min': 0, 'max': 5}, + } + + codecs = { + 'audio': ['DTS', 'AC3', 'AC3D', 'MP3'], + 'video': ['x264', 'H264', 'x265', 'H265', 'DivX', 'Xvid'] + } + + resolutions = { + '2160p': {'resolution_width': 3840, 'resolution_height': 2160, 'aspect': 1.78}, + '1080p': {'resolution_width': 1920, 'resolution_height': 1080, 'aspect': 1.78}, + '1080i': {'resolution_width': 1920, 'resolution_height': 1080, 'aspect': 1.78}, + '720p': {'resolution_width': 1280, 'resolution_height': 720, 'aspect': 1.78}, + '720i': {'resolution_width': 1280, 'resolution_height': 720, 'aspect': 1.78}, + '480p': {'resolution_width': 640, 'resolution_height': 480, 'aspect': 1.33}, + '480i': {'resolution_width': 640, 'resolution_height': 480, 'aspect': 1.33}, + 'default': {'resolution_width': 0, 'resolution_height': 0, 'aspect': 1}, + } + + audio_codec_map = { + 0x2000: 'AC3', + 0x2001: 'DTS', + 0x0055: 'MP3', + 0x0050: 'MP2', + 0x0001: 'PCM', + 0x003: 'WAV', + 0x77a1: 'TTA1', + 0x5756: 'WAV', + 0x6750: 'Vorbis', + 0xF1AC: 'FLAC', + 0x00ff: 'AAC', + } + + source_media = { + 'Blu-ray': ['bluray', 'blu-ray', 'brrip', 'br-rip'], + 'HD DVD': ['hddvd', 'hd-dvd'], + 'DVD': ['dvd'], + 'HDTV': ['hdtv'] + } + + clean = '([ _\,\.\(\)\[\]\-]|^)(3d|hsbs|sbs|half.sbs|full.sbs|ou|half.ou|full.ou|extended|extended.cut|directors.cut|french|fr|swedisch|sw|danish|dutch|nl|swesub|subs|spanish|german|ac3|dts|custom|dc|divx|divx5|dsr|dsrip|dutch|dvd|dvdr|dvdrip|dvdscr|dvdscreener|screener|dvdivx|cam|fragment|fs|hdtv|hdrip' \ + '|hdtvrip|webdl|web.dl|webrip|web.rip|internal|limited|multisubs|ntsc|ogg|ogm|pal|pdtv|proper|repack|rerip|retail|r3|r5|bd5|se|svcd|swedish|german|read.nfo|nfofix|unrated|ws|telesync|ts|telecine|tc|brrip|bdrip|video_ts|audio_ts|480p|480i|576p|576i|720p|720i|1080p|1080i|hrhd|hrhdtv|hddvd|bluray|x264|h264|x265|h265|xvid|xvidvd|xxx|www.www|hc|\[.*\])(?=[ _\,\.\(\)\[\]\-]|$)' + multipart_regex = [ + '[ _\.-]+cd[ _\.-]*([0-9a-d]+)', #*cd1 + '[ _\.-]+dvd[ _\.-]*([0-9a-d]+)', #*dvd1 + '[ _\.-]+part[ _\.-]*([0-9a-d]+)', #*part1 + '[ _\.-]+dis[ck][ _\.-]*([0-9a-d]+)', #*disk1 + 'cd[ _\.-]*([0-9a-d]+)$', #cd1.ext + 'dvd[ _\.-]*([0-9a-d]+)$', #dvd1.ext + 'part[ _\.-]*([0-9a-d]+)$', #part1.mkv + 'dis[ck][ _\.-]*([0-9a-d]+)$', #disk1.mkv + '()[ _\.-]+([0-9]*[abcd]+)(\.....?)$', + '([a-z])([0-9]+)(\.....?)$', + '()([ab])(\.....?)$' #*a.mkv + ] + + cp_imdb = '\.cp\((?Ptt[0-9]+),?\s?(?P[A-Za-z0-9]+)?\)' + + def __init__(self): + + addEvent('scanner.create_file_identifier', self.createStringIdentifier) + addEvent('scanner.remove_cptag', self.removeCPTag) + + addEvent('scanner.scan', self.scan) + addEvent('scanner.name_year', self.getReleaseNameYear) + addEvent('scanner.partnumber', self.getPartNumber) + + def scan(self, folder = None, files = None, release_download = None, simple = False, newer_than = 0, return_ignored = True, check_file_date = True, on_found = None): + + folder = sp(folder) + + if not folder or not os.path.isdir(folder): + log.error('Folder doesn\'t exists: %s', folder) + return {} + + # Get movie "master" files + movie_files = {} + leftovers = [] + + # Scan all files of the folder if no files are set + if not files: + try: + files = [] + for root, dirs, walk_files in os.walk(folder, followlinks=True): + files.extend([sp(os.path.join(sp(root), ss(filename))) for filename in walk_files]) + + # Break if CP wants to shut down + if self.shuttingDown(): + break + + except: + log.error('Failed getting files from %s: %s', (folder, traceback.format_exc())) + + log.debug('Found %s files to scan and group in %s', (len(files), folder)) + else: + check_file_date = False + files = [sp(x) for x in files] + + for file_path in files: + + if not os.path.exists(file_path): + continue + + # Remove ignored files + if self.isSampleFile(file_path): + leftovers.append(file_path) + continue + elif not self.keepFile(file_path): + continue + + is_dvd_file = self.isDVDFile(file_path) + if self.filesizeBetween(file_path, self.file_sizes['movie']) or is_dvd_file: # Minimal 300MB files or is DVD file + + # Normal identifier + identifier = self.createStringIdentifier(file_path, folder, exclude_filename = is_dvd_file) + identifiers = [identifier] + + # Identifier with quality + quality = fireEvent('quality.guess', files = [file_path], size = self.getFileSize(file_path), single = True) if not is_dvd_file else {'identifier':'dvdr'} + if quality: + identifier_with_quality = '%s %s' % (identifier, quality.get('identifier', '')) + identifiers = [identifier_with_quality, identifier] + + if not movie_files.get(identifier): + movie_files[identifier] = { + 'unsorted_files': [], + 'identifiers': identifiers, + 'is_dvd': is_dvd_file, + } + + movie_files[identifier]['unsorted_files'].append(file_path) + else: + leftovers.append(file_path) + + # Break if CP wants to shut down + if self.shuttingDown(): + break + + # Cleanup + del files + + # Sort reverse, this prevents "Iron man 2" from getting grouped with "Iron man" as the "Iron Man 2" + # files will be grouped first. + leftovers = set(sorted(leftovers, reverse = True)) + + # Group files minus extension + ignored_identifiers = [] + for identifier, group in movie_files.items(): + if identifier not in group['identifiers'] and len(identifier) > 0: group['identifiers'].append(identifier) + + log.debug('Grouping files: %s', identifier) + + has_ignored = 0 + for file_path in list(group['unsorted_files']): + ext = getExt(file_path) + wo_ext = file_path[:-(len(ext) + 1)] + found_files = set([i for i in leftovers if wo_ext in i]) + group['unsorted_files'].extend(found_files) + leftovers = leftovers - found_files + + has_ignored += 1 if ext in self.ignored_extensions else 0 + + if has_ignored == 0: + for file_path in list(group['unsorted_files']): + ext = getExt(file_path) + has_ignored += 1 if ext in self.ignored_extensions else 0 + + if has_ignored > 0: + ignored_identifiers.append(identifier) + + # Break if CP wants to shut down + if self.shuttingDown(): + break + + + # Create identifiers for all leftover files + path_identifiers = {} + for file_path in leftovers: + identifier = self.createStringIdentifier(file_path, folder) + + if not path_identifiers.get(identifier): + path_identifiers[identifier] = [] + + path_identifiers[identifier].append(file_path) + + + # Group the files based on the identifier + delete_identifiers = [] + for identifier, found_files in path_identifiers.items(): + log.debug('Grouping files on identifier: %s', identifier) + + group = movie_files.get(identifier) + if group: + group['unsorted_files'].extend(found_files) + delete_identifiers.append(identifier) + + # Remove the found files from the leftover stack + leftovers = leftovers - set(found_files) + + # Break if CP wants to shut down + if self.shuttingDown(): + break + + # Cleaning up used + for identifier in delete_identifiers: + if path_identifiers.get(identifier): + del path_identifiers[identifier] + del delete_identifiers + + # Group based on folder + delete_identifiers = [] + for identifier, found_files in path_identifiers.items(): + log.debug('Grouping files on foldername: %s', identifier) + + for ff in found_files: + new_identifier = self.createStringIdentifier(os.path.dirname(ff), folder) + + group = movie_files.get(new_identifier) + if group: + group['unsorted_files'].extend([ff]) + delete_identifiers.append(identifier) + + # Remove the found files from the leftover stack + leftovers -= leftovers - set([ff]) + + # Break if CP wants to shut down + if self.shuttingDown(): + break + + # leftovers should be empty + if leftovers: + log.debug('Some files are still left over: %s', leftovers) + + # Cleaning up used + for identifier in delete_identifiers: + if path_identifiers.get(identifier): + del path_identifiers[identifier] + del delete_identifiers + + # Make sure we remove older / still extracting files + valid_files = {} + while True and not self.shuttingDown(): + try: + identifier, group = movie_files.popitem() + except: + break + + # Check if movie is fresh and maybe still unpacking, ignore files newer than 1 minute + if check_file_date: + files_too_new, time_string = self.checkFilesChanged(group['unsorted_files']) + if files_too_new: + log.info('Files seem to be still unpacking or just unpacked (created on %s), ignoring for now: %s', (time_string, identifier)) + + # Delete the unsorted list + del group['unsorted_files'] + + continue + + # Only process movies newer than x + if newer_than and newer_than > 0: + has_new_files = False + for cur_file in group['unsorted_files']: + file_time = self.getFileTimes(cur_file) + if file_time[0] > newer_than or file_time[1] > newer_than: + has_new_files = True + break + + if not has_new_files: + log.debug('None of the files have changed since %s for %s, skipping.', (time.ctime(newer_than), identifier)) + + # Delete the unsorted list + del group['unsorted_files'] + + continue + + valid_files[identifier] = group + + del movie_files + + total_found = len(valid_files) + + # Make sure only one movie was found if a download ID is provided + if release_download and total_found == 0: + log.info('Download ID provided (%s), but no groups found! Make sure the download contains valid media files (fully extracted).', release_download.get('imdb_id')) + elif release_download and total_found > 1: + log.info('Download ID provided (%s), but more than one group found (%s). Ignoring Download ID...', (release_download.get('imdb_id'), len(valid_files))) + release_download = None + + # Determine file types + processed_movies = {} + while True and not self.shuttingDown(): + try: + identifier, group = valid_files.popitem() + except: + break + + if return_ignored is False and identifier in ignored_identifiers: + log.debug('Ignore file found, ignoring release: %s', identifier) + total_found -= 1 + continue + + # Group extra (and easy) files first + group['files'] = { + 'movie_extra': self.getMovieExtras(group['unsorted_files']), + 'subtitle': self.getSubtitles(group['unsorted_files']), + 'subtitle_extra': self.getSubtitlesExtras(group['unsorted_files']), + 'nfo': self.getNfo(group['unsorted_files']), + 'trailer': self.getTrailers(group['unsorted_files']), + 'leftover': set(group['unsorted_files']), + } + + # Media files + if group['is_dvd']: + group['files']['movie'] = self.getDVDFiles(group['unsorted_files']) + else: + group['files']['movie'] = self.getMediaFiles(group['unsorted_files']) + + if len(group['files']['movie']) == 0: + log.error('Couldn\'t find any movie files for %s', identifier) + total_found -= 1 + continue + + log.debug('Getting metadata for %s', identifier) + group['meta_data'] = self.getMetaData(group, folder = folder, release_download = release_download) + + # Subtitle meta + group['subtitle_language'] = self.getSubtitleLanguage(group) if not simple else {} + + # Get parent dir from movie files + for movie_file in group['files']['movie']: + group['parentdir'] = os.path.dirname(movie_file) + group['dirname'] = None + + folder_names = group['parentdir'].replace(folder, '').split(os.path.sep) + folder_names.reverse() + + # Try and get a proper dirname, so no "A", "Movie", "Download" etc + for folder_name in folder_names: + if folder_name.lower() not in self.ignore_names and len(folder_name) > 2: + group['dirname'] = folder_name + break + + break + + # Leftover "sorted" files + for file_type in group['files']: + if not file_type is 'leftover': + group['files']['leftover'] -= set(group['files'][file_type]) + group['files'][file_type] = list(group['files'][file_type]) + group['files']['leftover'] = list(group['files']['leftover']) + + # Delete the unsorted list + del group['unsorted_files'] + + # Determine movie + group['media'] = self.determineMedia(group, release_download = release_download) + if not group['media']: + log.error('Unable to determine media: %s', group['identifiers']) + else: + group['identifier'] = getIdentifier(group['media']) or group['media']['info'].get('imdb') + + processed_movies[identifier] = group + + # Notify parent & progress on something found + if on_found: + on_found(group, total_found, len(valid_files)) + + # Wait for all the async events calm down a bit + while threading.activeCount() > 100 and not self.shuttingDown(): + log.debug('Too many threads active, waiting a few seconds') + time.sleep(10) + + if len(processed_movies) > 0: + log.info('Found %s movies in the folder %s', (len(processed_movies), folder)) + else: + log.debug('Found no movies in the folder %s', folder) + + return processed_movies + + def getMetaData(self, group, folder = '', release_download = None): + + data = {} + files = list(group['files']['movie']) + + for cur_file in files: + if not self.filesizeBetween(cur_file, self.file_sizes['movie']): continue # Ignore smaller files + + if not data.get('audio'): # Only get metadata from first media file + meta = self.getMeta(cur_file) + + try: + data['titles'] = meta.get('titles', []) + data['video'] = meta.get('video', self.getCodec(cur_file, self.codecs['video'])) + data['audio'] = meta.get('audio', self.getCodec(cur_file, self.codecs['audio'])) + data['audio_channels'] = meta.get('audio_channels', 2.0) + if meta.get('resolution_width'): + data['resolution_width'] = meta.get('resolution_width') + data['resolution_height'] = meta.get('resolution_height') + data['aspect'] = round(float(meta.get('resolution_width')) / meta.get('resolution_height', 1), 2) + else: + data.update(self.getResolution(cur_file)) + except: + log.debug('Error parsing metadata: %s %s', (cur_file, traceback.format_exc())) + pass + + data['size'] = data.get('size', 0) + self.getFileSize(cur_file) + + data['quality'] = None + quality = fireEvent('quality.guess', size = data.get('size'), files = files, extra = data, single = True) + + # Use the quality that we snatched but check if it matches our guess + if release_download and release_download.get('quality'): + data['quality'] = fireEvent('quality.single', release_download.get('quality'), single = True) + data['quality']['is_3d'] = release_download.get('is_3d', 0) + if data['quality']['identifier'] != quality['identifier']: + log.info('Different quality snatched than detected for %s: %s vs. %s. Assuming snatched quality is correct.', (files[0], data['quality']['identifier'], quality['identifier'])) + if data['quality']['is_3d'] != quality['is_3d']: + log.info('Different 3d snatched than detected for %s: %s vs. %s. Assuming snatched 3d is correct.', (files[0], data['quality']['is_3d'], quality['is_3d'])) + + if not data['quality']: + data['quality'] = quality + + if not data['quality']: + data['quality'] = fireEvent('quality.single', 'dvdr' if group['is_dvd'] else 'dvdrip', single = True) + + data['quality_type'] = 'HD' if data.get('resolution_width', 0) >= 1280 or data['quality'].get('hd') else 'SD' + + filename = re.sub(self.cp_imdb, '', files[0]) + data['group'] = self.getGroup(filename[len(folder):]) + data['source'] = self.getSourceMedia(filename) + if data['quality'].get('is_3d', 0): + data['3d_type'] = self.get3dType(filename) + return data + + def get3dType(self, filename): + filename = ss(filename) + + words = re.split('\W+', filename.lower()) + + for key in self.threed_types: + tags = self.threed_types.get(key, []) + + for tag in tags: + if (isinstance(tag, tuple) and '.'.join(tag) in '.'.join(words)) or (isinstance(tag, (str, unicode)) and ss(tag.lower()) in words): + log.debug('Found %s in %s', (tag, filename)) + return key + + return '' + + def getMeta(self, filename): + + try: + p = enzyme.parse(filename) + + # Video codec + vc = ('H264' if p.video[0].codec == 'AVC1' else 'x265' if p.video[0].codec == 'HEVC' else p.video[0].codec) + + # Audio codec + ac = p.audio[0].codec + try: ac = self.audio_codec_map.get(p.audio[0].codec) + except: pass + + # Find title in video headers + titles = [] + + try: + if p.title and self.findYear(p.title): + titles.append(ss(p.title)) + except: + log.error('Failed getting title from meta: %s', traceback.format_exc()) + + for video in p.video: + try: + if video.title and self.findYear(video.title): + titles.append(ss(video.title)) + except: + log.error('Failed getting title from meta: %s', traceback.format_exc()) + + return { + 'titles': list(set(titles)), + 'video': vc, + 'audio': ac, + 'resolution_width': tryInt(p.video[0].width), + 'resolution_height': tryInt(p.video[0].height), + 'audio_channels': p.audio[0].channels, + } + except enzyme.exceptions.ParseError: + log.debug('Failed to parse meta for %s', filename) + except enzyme.exceptions.NoParserError: + log.debug('No parser found for %s', filename) + except: + log.debug('Failed parsing %s', filename) + + return {} + + def getSubtitleLanguage(self, group): + detected_languages = {} + + # Subliminal scanner + paths = None + try: + paths = group['files']['movie'] + scan_result = [] + for p in paths: + if not group['is_dvd']: + video = Video.from_path(toUnicode(sp(p))) + video_result = [(video, video.scan())] + scan_result.extend(video_result) + + for video, detected_subtitles in scan_result: + for s in detected_subtitles: + if s.language and s.path not in paths: + detected_languages[s.path] = [s.language] + except: + log.debug('Failed parsing subtitle languages for %s: %s', (paths, traceback.format_exc())) + + # IDX + for extra in group['files']['subtitle_extra']: + try: + if os.path.isfile(extra): + output = open(extra, 'r') + txt = output.read() + output.close() + + idx_langs = re.findall('\nid: (\w+)', txt) + + sub_file = '%s.sub' % os.path.splitext(extra)[0] + if len(idx_langs) > 0 and os.path.isfile(sub_file): + detected_languages[sub_file] = idx_langs + except: + log.error('Failed parsing subtitle idx for %s: %s', (extra, traceback.format_exc())) + + return detected_languages + + def determineMedia(self, group, release_download = None): + + # Get imdb id from downloader + imdb_id = release_download and release_download.get('imdb_id') + if imdb_id: + log.debug('Found movie via imdb id from it\'s download id: %s', release_download.get('imdb_id')) + + files = group['files'] + + # Check for CP(imdb_id) string in the file paths + if not imdb_id: + for cur_file in files['movie']: + imdb_id = self.getCPImdb(cur_file) + if imdb_id: + log.debug('Found movie via CP tag: %s', cur_file) + break + + # Check and see if nfo contains the imdb-id + nfo_file = None + if not imdb_id: + try: + for nf in files['nfo']: + imdb_id = getImdb(nf, check_inside = True) + if imdb_id: + log.debug('Found movie via nfo file: %s', nf) + nfo_file = nf + break + except: + pass + + # Check and see if filenames contains the imdb-id + if not imdb_id: + try: + for filetype in files: + for filetype_file in files[filetype]: + imdb_id = getImdb(filetype_file) + if imdb_id: + log.debug('Found movie via imdb in filename: %s', nfo_file) + break + except: + pass + + # Search based on identifiers + if not imdb_id: + for identifier in group['identifiers']: + + if len(identifier) > 2: + try: filename = list(group['files'].get('movie'))[0] + except: filename = None + + name_year = self.getReleaseNameYear(identifier, file_name = filename if not group['is_dvd'] else None) + if name_year.get('name') and name_year.get('year'): + search_q = '%(name)s %(year)s' % name_year + movie = fireEvent('movie.search', q = search_q, merge = True, limit = 1) + + # Try with other + if len(movie) == 0 and name_year.get('other') and name_year['other'].get('name') and name_year['other'].get('year'): + search_q2 = '%(name)s %(year)s' % name_year.get('other') + if search_q2 != search_q: + movie = fireEvent('movie.search', q = search_q2, merge = True, limit = 1) + + if len(movie) > 0: + imdb_id = movie[0].get('imdb') + log.debug('Found movie via search: %s', identifier) + if imdb_id: break + else: + log.debug('Identifier to short to use for search: %s', identifier) + + if imdb_id: + try: + db = get_db() + return db.get('media', 'imdb-%s' % imdb_id, with_doc = True)['doc'] + except: + log.debug('Movie "%s" not in library, just getting info', imdb_id) + return { + 'identifier': imdb_id, + 'info': fireEvent('movie.info', identifier = imdb_id, merge = True, extended = False) + } + + log.error('No imdb_id found for %s. Add a NFO file with IMDB id or add the year to the filename.', group['identifiers']) + return {} + + def getCPImdb(self, string): + + try: + m = re.search(self.cp_imdb, string.lower()) + id = m.group('id') + if id: return id + except AttributeError: + pass + + return False + + def removeCPTag(self, name): + try: + return re.sub(self.cp_imdb, '', name).strip() + except: + pass + return name + + def getSamples(self, files): + return set(filter(lambda s: self.isSampleFile(s), files)) + + def getMediaFiles(self, files): + + def test(s): + return self.filesizeBetween(s, self.file_sizes['movie']) and getExt(s.lower()) in self.extensions['movie'] and not self.isSampleFile(s) + + return set(filter(test, files)) + + def getMovieExtras(self, files): + return set(filter(lambda s: getExt(s.lower()) in self.extensions['movie_extra'], files)) + + def getDVDFiles(self, files): + def test(s): + return self.isDVDFile(s) + + return set(filter(test, files)) + + def getSubtitles(self, files): + return set(filter(lambda s: getExt(s.lower()) in self.extensions['subtitle'], files)) + + def getSubtitlesExtras(self, files): + return set(filter(lambda s: getExt(s.lower()) in self.extensions['subtitle_extra'], files)) + + def getNfo(self, files): + return set(filter(lambda s: getExt(s.lower()) in self.extensions['nfo'], files)) + + def getTrailers(self, files): + + def test(s): + return re.search('(^|[\W_])trailer\d*[\W_]', s.lower()) and self.filesizeBetween(s, self.file_sizes['trailer']) + + return set(filter(test, files)) + + def getImages(self, files): + + def test(s): + return getExt(s.lower()) in ['jpg', 'jpeg', 'png', 'gif', 'bmp', 'tbn'] + files = set(filter(test, files)) + + images = { + 'backdrop': set(filter(lambda s: re.search('(^|[\W_])fanart|backdrop\d*[\W_]', s.lower()) and self.filesizeBetween(s, self.file_sizes['backdrop']), files)) + } + + # Rest + images['rest'] = files - images['backdrop'] + + return images + + + def isDVDFile(self, file_name): + + if list(set(file_name.lower().split(os.path.sep)) & set(['video_ts', 'audio_ts'])): + return True + + for needle in ['vts_', 'video_ts', 'audio_ts', 'bdmv', 'certificate']: + if needle in file_name.lower(): + return True + + return False + + def keepFile(self, filename): + + # ignoredpaths + for i in self.ignored_in_path: + if i in filename.lower(): + log.debug('Ignored "%s" contains "%s".', (filename, i)) + return False + + # All is OK + return True + + def isSampleFile(self, filename): + is_sample = re.search('(^|[\W_])sample\d*[\W_]', filename.lower()) + if is_sample: log.debug('Is sample file: %s', filename) + return is_sample + + def filesizeBetween(self, file, file_size = None): + if not file_size: file_size = [] + + try: + return file_size.get('min', 0) < self.getFileSize(file) < file_size.get('max', 100000) + except: + log.error('Couldn\'t get filesize of %s.', file) + + return False + + def getFileSize(self, file): + try: + return os.path.getsize(file) / 1024 / 1024 + except: + return None + + def createStringIdentifier(self, file_path, folder = '', exclude_filename = False): + + identifier = file_path.replace(folder, '').lstrip(os.path.sep) # root folder + identifier = os.path.splitext(identifier)[0] # ext + + # Exclude file name path if needed (f.e. for DVD files) + if exclude_filename: + identifier = identifier[:len(identifier) - len(os.path.split(identifier)[-1])] + + # Make sure the identifier is lower case as all regex is with lower case tags + identifier = identifier.lower() + + try: + path_split = splitString(identifier, os.path.sep) + identifier = path_split[-2] if len(path_split) > 1 and len(path_split[-2]) > len(path_split[-1]) else path_split[-1] # Only get filename + except: pass + + # multipart + identifier = self.removeMultipart(identifier) + + # remove cptag + identifier = self.removeCPTag(identifier) + + # simplify the string + identifier = simplifyString(identifier) + + year = self.findYear(file_path) + + # groups, release tags, scenename cleaner + identifier = re.sub(self.clean, '::', identifier).strip(':') + + # Year + if year and identifier[:4] != year: + split_by = ':::' if ':::' in identifier else year + identifier = '%s %s' % (identifier.split(split_by)[0].strip(), year) + else: + identifier = identifier.split('::')[0] + + # Remove duplicates + out = [] + for word in identifier.split(): + if not word in out: + out.append(word) + + identifier = ' '.join(out) + + return simplifyString(identifier) + + + def removeMultipart(self, name): + for regex in self.multipart_regex: + try: + found = re.sub(regex, '', name) + if found != name: + name = found + except: + pass + return name + + def getPartNumber(self, name): + for regex in self.multipart_regex: + try: + found = re.search(regex, name) + if found: + return found.group(1) + return 1 + except: + pass + return 1 + + def getCodec(self, filename, codecs): + codecs = map(re.escape, codecs) + try: + codec = re.search('[^A-Z0-9](?P' + '|'.join(codecs) + ')[^A-Z0-9]', filename, re.I) + return (codec and codec.group('codec')) or '' + except: + return '' + + def getResolution(self, filename): + try: + for key in self.resolutions: + if key in filename.lower() and key != 'default': + return self.resolutions[key] + except: + pass + + return self.resolutions['default'] + + def getGroup(self, file): + try: + match = re.findall('\-([A-Z0-9]+)[\.\/]', file, re.I) + return match[-1] or '' + except: + return '' + + def getSourceMedia(self, file): + for media in self.source_media: + for alias in self.source_media[media]: + if alias in file.lower(): + return media + + return None + + def findYear(self, text): + + # Search year inside () or [] first + matches = re.findall('(\(|\[)(?P19[0-9]{2}|20[0-9]{2})(\]|\))', text) + if matches: + return matches[-1][1] + + # Search normal + matches = re.findall('(?P19[0-9]{2}|20[0-9]{2})', text) + if matches: + return matches[-1] + + return '' + + def getReleaseNameYear(self, release_name, file_name = None): + + release_name = release_name.strip(' .-_') + + # Use guessit first + guess = {} + if file_name: + try: + guessit = guess_movie_info(toUnicode(file_name)) + if guessit.get('title') and guessit.get('year'): + guess = { + 'name': guessit.get('title'), + 'year': guessit.get('year'), + } + except: + log.debug('Could not detect via guessit "%s": %s', (file_name, traceback.format_exc())) + + # Backup to simple + release_name = os.path.basename(release_name.replace('\\', '/')) + cleaned = ' '.join(re.split('\W+', simplifyString(release_name))) + cleaned = re.sub(self.clean, ' ', cleaned) + + year = None + for year_str in [file_name, release_name, cleaned]: + if not year_str: continue + year = self.findYear(year_str) + if year: + break + + cp_guess = {} + + if year: # Split name on year + try: + movie_name = cleaned.rsplit(year, 1).pop(0).strip() + if movie_name: + cp_guess = { + 'name': movie_name, + 'year': int(year), + } + except: + pass + + if not cp_guess: # Split name on multiple spaces + try: + movie_name = cleaned.split(' ').pop(0).strip() + cp_guess = { + 'name': movie_name, + 'year': int(year) if movie_name[:4] != year else 0, + } + except: + pass + + if cp_guess.get('year') == guess.get('year') and len(cp_guess.get('name', '')) > len(guess.get('name', '')): + cp_guess['other'] = guess + return cp_guess + elif guess == {}: + cp_guess['other'] = guess + return cp_guess + + guess['other'] = cp_guess + return guess diff --git a/couchpotato/core/plugins/scanner/__init__.py b/couchpotato/core/plugins/scanner/__init__.py deleted file mode 100644 index 3d640465bd..0000000000 --- a/couchpotato/core/plugins/scanner/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -from .main import Scanner - -def start(): - return Scanner() - -config = [] diff --git a/couchpotato/core/plugins/scanner/main.py b/couchpotato/core/plugins/scanner/main.py deleted file mode 100644 index b822bc0f9a..0000000000 --- a/couchpotato/core/plugins/scanner/main.py +++ /dev/null @@ -1,829 +0,0 @@ -from couchpotato import get_session -from couchpotato.core.event import fireEvent, addEvent -from couchpotato.core.helpers.encoding import toUnicode, simplifyString, ss -from couchpotato.core.helpers.variable import getExt, getImdb, tryInt -from couchpotato.core.logger import CPLog -from couchpotato.core.plugins.base import Plugin -from couchpotato.core.settings.model import File, Movie -from enzyme.exceptions import NoParserError, ParseError -from guessit import guess_movie_info -from subliminal.videos import Video -import enzyme -import os -import re -import time -import traceback - -log = CPLog(__name__) - - -class Scanner(Plugin): - - minimal_filesize = { - 'media': 314572800, # 300MB - 'trailer': 1048576, # 1MB - } - ignored_in_path = [os.path.sep + 'extracted' + os.path.sep, 'extracting', '_unpack', '_failed_', '_unknown_', '_exists_', '_failed_remove_', '_failed_rename_', '.appledouble', '.appledb', '.appledesktop', os.path.sep + '._', '.ds_store', 'cp.cpnfo'] #unpacking, smb-crap, hidden files - ignore_names = ['extract', 'extracting', 'extracted', 'movie', 'movies', 'film', 'films', 'download', 'downloads', 'video_ts', 'audio_ts', 'bdmv', 'certificate'] - extensions = { - 'movie': ['mkv', 'wmv', 'avi', 'mpg', 'mpeg', 'mp4', 'm2ts', 'iso', 'img', 'mdf', 'ts', 'm4v'], - 'movie_extra': ['mds'], - 'dvd': ['vts_*', 'vob'], - 'nfo': ['nfo', 'txt', 'tag'], - 'subtitle': ['sub', 'srt', 'ssa', 'ass'], - 'subtitle_extra': ['idx'], - 'trailer': ['mov', 'mp4', 'flv'] - } - - file_types = { - 'subtitle': ('subtitle', 'subtitle'), - 'subtitle_extra': ('subtitle', 'subtitle_extra'), - 'trailer': ('video', 'trailer'), - 'nfo': ('nfo', 'nfo'), - 'movie': ('video', 'movie'), - 'movie_extra': ('movie', 'movie_extra'), - 'backdrop': ('image', 'backdrop'), - 'poster': ('image', 'poster'), - 'thumbnail': ('image', 'thumbnail'), - 'leftover': ('leftover', 'leftover'), - } - - codecs = { - 'audio': ['dts', 'ac3', 'ac3d', 'mp3'], - 'video': ['x264', 'h264', 'divx', 'xvid'] - } - - audio_codec_map = { - 0x2000: 'ac3', - 0x2001: 'dts', - 0x0055: 'mp3', - 0x0050: 'mp2', - 0x0001: 'pcm', - 0x003: 'pcm', - 0x77a1: 'tta1', - 0x5756: 'wav', - 0x6750: 'vorbis', - 0xF1AC: 'flac', - 0x00ff: 'aac', - } - - source_media = { - 'bluray': ['bluray', 'blu-ray', 'brrip', 'br-rip'], - 'hddvd': ['hddvd', 'hd-dvd'], - 'dvd': ['dvd'], - 'hdtv': ['hdtv'] - } - - clean = '[ _\,\.\(\)\[\]\-](french|swedisch|danish|dutch|swesub|spanish|german|ac3|dts|custom|dc|divx|divx5|dsr|dsrip|dutch|dvd|dvdr|dvdrip|dvdscr|dvdscreener|screener|dvdivx|cam|fragment|fs|hdtv|hdrip|hdtvrip|internal|limited|multisubs|ntsc|ogg|ogm|pal|pdtv|proper|repack|rerip|retail|r3|r5|bd5|se|svcd|swedish|german|read.nfo|nfofix|unrated|ws|telesync|ts|telecine|tc|brrip|bdrip|video_ts|audio_ts|480p|480i|576p|576i|720p|720i|1080p|1080i|hrhd|hrhdtv|hddvd|bluray|x264|h264|xvid|xvidvd|xxx|www.www|cd[1-9]|\[.*\])([ _\,\.\(\)\[\]\-]|$)' - multipart_regex = [ - '[ _\.-]+cd[ _\.-]*([0-9a-d]+)', #*cd1 - '[ _\.-]+dvd[ _\.-]*([0-9a-d]+)', #*dvd1 - '[ _\.-]+part[ _\.-]*([0-9a-d]+)', #*part1 - '[ _\.-]+dis[ck][ _\.-]*([0-9a-d]+)', #*disk1 - 'cd[ _\.-]*([0-9a-d]+)$', #cd1.ext - 'dvd[ _\.-]*([0-9a-d]+)$', #dvd1.ext - 'part[ _\.-]*([0-9a-d]+)$', #part1.mkv - 'dis[ck][ _\.-]*([0-9a-d]+)$', #disk1.mkv - '()[ _\.-]+([0-9]*[abcd]+)(\.....?)$', - '([a-z])([0-9]+)(\.....?)$', - '()([ab])(\.....?)$' #*a.mkv - ] - - cp_imdb = '(.cp.(?Ptt[0-9{7}]+).)' - - def __init__(self): - - addEvent('scanner.create_file_identifier', self.createStringIdentifier) - addEvent('scanner.remove_cptag', self.removeCPTag) - - addEvent('scanner.scan', self.scan) - addEvent('scanner.name_year', self.getReleaseNameYear) - addEvent('scanner.partnumber', self.getPartNumber) - - def scan(self, folder = None, files = None, simple = False, newer_than = 0, on_found = None): - - folder = ss(os.path.normpath(folder)) - - if not folder or not os.path.isdir(folder): - log.error('Folder doesn\'t exists: %s', folder) - return {} - - # Get movie "master" files - movie_files = {} - leftovers = [] - - # Scan all files of the folder if no files are set - if not files: - check_file_date = True - try: - files = [] - for root, dirs, walk_files in os.walk(folder): - for filename in walk_files: - files.append(os.path.join(root, filename)) - except: - log.error('Failed getting files from %s: %s', (folder, traceback.format_exc())) - else: - check_file_date = False - files = [ss(x) for x in files] - - db = get_session() - - for file_path in files: - - if not os.path.exists(file_path): - continue - - # Remove ignored files - if self.isSampleFile(file_path): - leftovers.append(file_path) - continue - elif not self.keepFile(file_path): - continue - - is_dvd_file = self.isDVDFile(file_path) - if os.path.getsize(file_path) > self.minimal_filesize['media'] or is_dvd_file: # Minimal 300MB files or is DVD file - - # Normal identifier - identifier = self.createStringIdentifier(file_path, folder, exclude_filename = is_dvd_file) - identifiers = [identifier] - - # Identifier with quality - quality = fireEvent('quality.guess', [file_path], single = True) if not is_dvd_file else {'identifier':'dvdr'} - if quality: - identifier_with_quality = '%s %s' % (identifier, quality.get('identifier', '')) - identifiers = [identifier_with_quality, identifier] - - if not movie_files.get(identifier): - movie_files[identifier] = { - 'unsorted_files': [], - 'identifiers': identifiers, - 'is_dvd': is_dvd_file, - } - - movie_files[identifier]['unsorted_files'].append(file_path) - else: - leftovers.append(file_path) - - # Break if CP wants to shut down - if self.shuttingDown(): - break - - # Cleanup - del files - - # Sort reverse, this prevents "Iron man 2" from getting grouped with "Iron man" as the "Iron Man 2" - # files will be grouped first. - leftovers = set(sorted(leftovers, reverse = True)) - - - # Group files minus extension - for identifier, group in movie_files.iteritems(): - if identifier not in group['identifiers'] and len(identifier) > 0: group['identifiers'].append(identifier) - - log.debug('Grouping files: %s', identifier) - - for file_path in group['unsorted_files']: - wo_ext = file_path[:-(len(getExt(file_path)) + 1)] - found_files = set([i for i in leftovers if wo_ext in i]) - group['unsorted_files'].extend(found_files) - leftovers = leftovers - found_files - - # Break if CP wants to shut down - if self.shuttingDown(): - break - - - # Create identifiers for all leftover files - path_identifiers = {} - for file_path in leftovers: - identifier = self.createStringIdentifier(file_path, folder) - - if not path_identifiers.get(identifier): - path_identifiers[identifier] = [] - - path_identifiers[identifier].append(file_path) - - - # Group the files based on the identifier - delete_identifiers = [] - for identifier, found_files in path_identifiers.iteritems(): - log.debug('Grouping files on identifier: %s', identifier) - - group = movie_files.get(identifier) - if group: - group['unsorted_files'].extend(found_files) - delete_identifiers.append(identifier) - - # Remove the found files from the leftover stack - leftovers = leftovers - set(found_files) - - # Break if CP wants to shut down - if self.shuttingDown(): - break - - # Cleaning up used - for identifier in delete_identifiers: - if path_identifiers.get(identifier): - del path_identifiers[identifier] - del delete_identifiers - - # Group based on folder - delete_identifiers = [] - for identifier, found_files in path_identifiers.iteritems(): - log.debug('Grouping files on foldername: %s', identifier) - - for ff in found_files: - new_identifier = self.createStringIdentifier(os.path.dirname(ff), folder) - - group = movie_files.get(new_identifier) - if group: - group['unsorted_files'].extend([ff]) - delete_identifiers.append(identifier) - - # Remove the found files from the leftover stack - leftovers = leftovers - set([ff]) - - # Break if CP wants to shut down - if self.shuttingDown(): - break - - # Cleaning up used - for identifier in delete_identifiers: - if path_identifiers.get(identifier): - del path_identifiers[identifier] - del delete_identifiers - - # Make sure we remove older / still extracting files - valid_files = {} - while True and not self.shuttingDown(): - try: - identifier, group = movie_files.popitem() - except: - break - - # Check if movie is fresh and maybe still unpacking, ignore files new then 1 minute - file_too_new = False - for cur_file in group['unsorted_files']: - if not os.path.isfile(cur_file): - file_too_new = time.time() - break - file_time = [os.path.getmtime(cur_file), os.path.getctime(cur_file)] - for t in file_time: - if t > time.time() - 60: - file_too_new = tryInt(time.time() - t) - break - - if file_too_new: - break - - if check_file_date and file_too_new: - try: - time_string = time.ctime(file_time[0]) - except: - try: - time_string = time.ctime(file_time[1]) - except: - time_string = 'unknown' - - log.info('Files seem to be still unpacking or just unpacked (created on %s), ignoring for now: %s', (time_string, identifier)) - - # Delete the unsorted list - del group['unsorted_files'] - - continue - - # Only process movies newer than x - if newer_than and newer_than > 0: - has_new_files = False - for cur_file in group['unsorted_files']: - file_time = [os.path.getmtime(cur_file), os.path.getctime(cur_file)] - if file_time[0] > newer_than or file_time[1] > newer_than: - has_new_files = True - break - - if not has_new_files: - log.debug('None of the files have changed since %s for %s, skipping.', (time.ctime(newer_than), identifier)) - - # Delete the unsorted list - del group['unsorted_files'] - - continue - - valid_files[identifier] = group - - del movie_files - - # Determine file types - processed_movies = {} - total_found = len(valid_files) - while True and not self.shuttingDown(): - try: - identifier, group = valid_files.popitem() - except: - break - - # Group extra (and easy) files first - # images = self.getImages(group['unsorted_files']) - group['files'] = { - 'movie_extra': self.getMovieExtras(group['unsorted_files']), - 'subtitle': self.getSubtitles(group['unsorted_files']), - 'subtitle_extra': self.getSubtitlesExtras(group['unsorted_files']), - 'nfo': self.getNfo(group['unsorted_files']), - 'trailer': self.getTrailers(group['unsorted_files']), - #'backdrop': images['backdrop'], - 'leftover': set(group['unsorted_files']), - } - - # Media files - if group['is_dvd']: - group['files']['movie'] = self.getDVDFiles(group['unsorted_files']) - else: - group['files']['movie'] = self.getMediaFiles(group['unsorted_files']) - - if len(group['files']['movie']) == 0: - log.error('Couldn\'t find any movie files for %s', identifier) - continue - - log.debug('Getting metadata for %s', identifier) - group['meta_data'] = self.getMetaData(group, folder = folder) - - # Subtitle meta - group['subtitle_language'] = self.getSubtitleLanguage(group) if not simple else {} - - # Get parent dir from movie files - for movie_file in group['files']['movie']: - group['parentdir'] = os.path.dirname(movie_file) - group['dirname'] = None - - folder_names = group['parentdir'].replace(folder, '').split(os.path.sep) - folder_names.reverse() - - # Try and get a proper dirname, so no "A", "Movie", "Download" etc - for folder_name in folder_names: - if folder_name.lower() not in self.ignore_names and len(folder_name) > 2: - group['dirname'] = folder_name - break - - break - - # Leftover "sorted" files - for file_type in group['files']: - if not file_type is 'leftover': - group['files']['leftover'] -= set(group['files'][file_type]) - - # Delete the unsorted list - del group['unsorted_files'] - - # Determine movie - group['library'] = self.determineMovie(group) - if not group['library']: - log.error('Unable to determine movie: %s', group['identifiers']) - else: - movie = db.query(Movie).filter_by(library_id = group['library']['id']).first() - group['movie_id'] = None if not movie else movie.id - - processed_movies[identifier] = group - - # Notify parent & progress on something found - if on_found: - on_found(group, total_found, total_found - len(processed_movies)) - - if len(processed_movies) > 0: - log.info('Found %s movies in the folder %s', (len(processed_movies), folder)) - else: - log.debug('Found no movies in the folder %s', (folder)) - - return processed_movies - - def getMetaData(self, group, folder = ''): - - data = {} - files = list(group['files']['movie']) - - for cur_file in files: - if os.path.getsize(cur_file) < self.minimal_filesize['media']: continue # Ignore smaller files - - meta = self.getMeta(cur_file) - - try: - data['video'] = meta.get('video', self.getCodec(cur_file, self.codecs['video'])) - data['audio'] = meta.get('audio', self.getCodec(cur_file, self.codecs['audio'])) - data['resolution_width'] = meta.get('resolution_width', 720) - data['resolution_height'] = meta.get('resolution_height', 480) - data['aspect'] = meta.get('resolution_width', 720) / meta.get('resolution_height', 480) - except: - log.debug('Error parsing metadata: %s %s', (cur_file, traceback.format_exc())) - pass - - if data.get('audio'): break - - data['quality'] = fireEvent('quality.guess', files = files, extra = data, single = True) - if not data['quality']: - data['quality'] = fireEvent('quality.single', 'dvdr' if group['is_dvd'] else 'dvdrip', single = True) - - data['quality_type'] = 'HD' if data.get('resolution_width', 0) >= 1280 or data['quality'].get('hd') else 'SD' - - filename = re.sub('(.cp\(tt[0-9{7}]+\))', '', files[0]) - data['group'] = self.getGroup(filename[len(folder):]) - data['source'] = self.getSourceMedia(filename) - - return data - - def getMeta(self, filename): - - try: - p = enzyme.parse(filename) - - # Video codec - vc = ('h264' if p.video[0].codec == 'AVC1' else p.video[0].codec).lower() - - # Audio codec - ac = p.audio[0].codec - try: ac = self.audio_codec_map.get(p.audio[0].codec) - except: pass - - return { - 'video': vc, - 'audio': ac, - 'resolution_width': tryInt(p.video[0].width), - 'resolution_height': tryInt(p.video[0].height), - } - except ParseError: - log.debug('Failed to parse meta for %s', filename) - except NoParserError: - log.debug('No parser found for %s', filename) - except: - log.debug('Failed parsing %s', filename) - - return {} - - def getSubtitleLanguage(self, group): - detected_languages = {} - - # Subliminal scanner - try: - paths = group['files']['movie'] - scan_result = [] - for p in paths: - if not group['is_dvd']: - video = Video.from_path(toUnicode(p)) - video_result = [(video, video.scan())] - scan_result.extend(video_result) - - for video, detected_subtitles in scan_result: - for s in detected_subtitles: - if s.language and s.path not in paths: - detected_languages[s.path] = [s.language] - except: - log.debug('Failed parsing subtitle languages for %s: %s', (paths, traceback.format_exc())) - - # IDX - for extra in group['files']['subtitle_extra']: - try: - if os.path.isfile(extra): - output = open(extra, 'r') - txt = output.read() - output.close() - - idx_langs = re.findall('\nid: (\w+)', txt) - - sub_file = '%s.sub' % os.path.splitext(extra)[0] - if len(idx_langs) > 0 and os.path.isfile(sub_file): - detected_languages[sub_file] = idx_langs - except: - log.error('Failed parsing subtitle idx for %s: %s', (extra, traceback.format_exc())) - - return detected_languages - - def determineMovie(self, group): - imdb_id = None - - files = group['files'] - - # Check for CP(imdb_id) string in the file paths - for cur_file in files['movie']: - imdb_id = self.getCPImdb(cur_file) - if imdb_id: - log.debug('Found movie via CP tag: %s', cur_file) - break - - # Check and see if nfo contains the imdb-id - if not imdb_id: - try: - for nfo_file in files['nfo']: - imdb_id = getImdb(nfo_file) - if imdb_id: - log.debug('Found movie via nfo file: %s', nfo_file) - break - except: - pass - - # Check and see if filenames contains the imdb-id - if not imdb_id: - try: - for filetype in files: - for filetype_file in files[filetype]: - imdb_id = getImdb(filetype_file, check_inside = False) - if imdb_id: - log.debug('Found movie via imdb in filename: %s', nfo_file) - break - except: - pass - - # Check if path is already in db - if not imdb_id: - db = get_session() - for cur_file in files['movie']: - f = db.query(File).filter_by(path = toUnicode(cur_file)).first() - try: - imdb_id = f.library[0].identifier - log.debug('Found movie via database: %s', cur_file) - break - except: - pass - - # Search based on OpenSubtitleHash - if not imdb_id and not group['is_dvd']: - for cur_file in files['movie']: - movie = fireEvent('movie.by_hash', file = cur_file, merge = True) - - if len(movie) > 0: - imdb_id = movie[0]['imdb'] - if imdb_id: - log.debug('Found movie via OpenSubtitleHash: %s', cur_file) - break - - # Search based on identifiers - if not imdb_id: - for identifier in group['identifiers']: - - if len(identifier) > 2: - try: filename = list(group['files'].get('movie'))[0] - except: filename = None - - name_year = self.getReleaseNameYear(identifier, file_name = filename if not group['is_dvd'] else None) - if name_year.get('name') and name_year.get('year'): - movie = fireEvent('movie.search', q = '%(name)s %(year)s' % name_year, merge = True, limit = 1) - - if len(movie) > 0: - imdb_id = movie[0]['imdb'] - log.debug('Found movie via search: %s', cur_file) - if imdb_id: break - else: - log.debug('Identifier to short to use for search: %s', identifier) - - if imdb_id: - return fireEvent('library.add', attrs = { - 'identifier': imdb_id - }, update_after = False, single = True) - - log.error('No imdb_id found for %s. Add a NFO file with IMDB id or add the year to the filename.', group['identifiers']) - return {} - - def getCPImdb(self, string): - - try: - m = re.search(self.cp_imdb, string.lower()) - id = m.group('id') - if id: return id - except AttributeError: - pass - - return False - - def removeCPTag(self, name): - try: - return re.sub(self.cp_imdb, '', name) - except: - pass - return name - - def getSamples(self, files): - return set(filter(lambda s: self.isSampleFile(s), files)) - - def getMediaFiles(self, files): - - def test(s): - return self.filesizeBetween(s, 300, 100000) and getExt(s.lower()) in self.extensions['movie'] and not self.isSampleFile(s) - - return set(filter(test, files)) - - def getMovieExtras(self, files): - return set(filter(lambda s: getExt(s.lower()) in self.extensions['movie_extra'], files)) - - def getDVDFiles(self, files): - def test(s): - return self.isDVDFile(s) - - return set(filter(test, files)) - - def getSubtitles(self, files): - return set(filter(lambda s: getExt(s.lower()) in self.extensions['subtitle'], files)) - - def getSubtitlesExtras(self, files): - return set(filter(lambda s: getExt(s.lower()) in self.extensions['subtitle_extra'], files)) - - def getNfo(self, files): - return set(filter(lambda s: getExt(s.lower()) in self.extensions['nfo'], files)) - - def getTrailers(self, files): - - def test(s): - return re.search('(^|[\W_])trailer\d*[\W_]', s.lower()) and self.filesizeBetween(s, 2, 250) - - return set(filter(test, files)) - - def getImages(self, files): - - def test(s): - return getExt(s.lower()) in ['jpg', 'jpeg', 'png', 'gif', 'bmp', 'tbn'] - files = set(filter(test, files)) - - images = {} - - # Fanart - images['backdrop'] = set(filter(lambda s: re.search('(^|[\W_])fanart|backdrop\d*[\W_]', s.lower()) and self.filesizeBetween(s, 0, 5), files)) - - # Rest - images['rest'] = files - images['backdrop'] - - return images - - - def isDVDFile(self, file_name): - - if list(set(file_name.lower().split(os.path.sep)) & set(['video_ts', 'audio_ts'])): - return True - - for needle in ['vts_', 'video_ts', 'audio_ts', 'bdmv', 'certificate']: - if needle in file_name.lower(): - return True - - return False - - def keepFile(self, filename): - - # ignoredpaths - for i in self.ignored_in_path: - if i in filename.lower(): - log.debug('Ignored "%s" contains "%s".', (filename, i)) - return False - - # Sample file - if self.isSampleFile(filename): - log.debug('Is sample file "%s".', filename) - return False - - # Minimal size - if self.filesizeBetween(filename, self.minimal_filesize['media']): - log.debug('File to small: %s', filename) - return False - - # All is OK - return True - - def isSampleFile(self, filename): - is_sample = re.search('(^|[\W_])sample\d*[\W_]', filename.lower()) - if is_sample: log.debug('Is sample file: %s', filename) - return is_sample - - def filesizeBetween(self, file, min = 0, max = 100000): - try: - return (min * 1048576) < os.path.getsize(file) < (max * 1048576) - except: - log.error('Couldn\'t get filesize of %s.', file) - - return False - - def createStringIdentifier(self, file_path, folder = '', exclude_filename = False): - - identifier = file_path.replace(folder, '') # root folder - identifier = os.path.splitext(identifier)[0] # ext - - if exclude_filename: - identifier = identifier[:len(identifier) - len(os.path.split(identifier)[-1])] - - # multipart - identifier = self.removeMultipart(identifier) - - # remove cptag - identifier = self.removeCPTag(identifier) - - # groups, release tags, scenename cleaner, regex isn't correct - identifier = re.sub(self.clean, '::', simplifyString(identifier)).strip(':') - - # Year - year = self.findYear(identifier) - if year: - identifier = '%s %s' % (identifier.split(year)[0].strip(), year) - else: - identifier = identifier.split('::')[0] - - # Remove duplicates - out = [] - for word in identifier.split(): - if not word in out: - out.append(word) - - identifier = ' '.join(out) - - return simplifyString(identifier) - - - def removeMultipart(self, name): - for regex in self.multipart_regex: - try: - found = re.sub(regex, '', name) - if found != name: - name = found - except: - pass - return name - - def getPartNumber(self, name): - for regex in self.multipart_regex: - try: - found = re.search(regex, name) - if found: - return found.group(1) - return 1 - except: - pass - return 1 - - def getCodec(self, filename, codecs): - codecs = map(re.escape, codecs) - try: - codec = re.search('[^A-Z0-9](?P' + '|'.join(codecs) + ')[^A-Z0-9]', filename, re.I) - return (codec and codec.group('codec')) or '' - except: - return '' - - def getGroup(self, file): - try: - match = re.findall('\-([A-Z0-9]+)[\.\/]', file, re.I) - return match[-1] or '' - except: - return '' - - def getSourceMedia(self, file): - for media in self.source_media: - for alias in self.source_media[media]: - if alias in file.lower(): - return media - - return None - - def findYear(self, text): - matches = re.search('(?P19[0-9]{2}|20[0-9]{2})', text) - if matches: - return matches.group('year') - - return '' - - def getReleaseNameYear(self, release_name, file_name = None): - - # Use guessit first - guess = {} - if file_name: - try: - guess = guess_movie_info(toUnicode(file_name)) - if guess.get('title') and guess.get('year'): - guess = { - 'name': guess.get('title'), - 'year': guess.get('year'), - } - except: - log.debug('Could not detect via guessit "%s": %s', (file_name, traceback.format_exc())) - - # Backup to simple - cleaned = ' '.join(re.split('\W+', simplifyString(release_name))) - cleaned = re.sub(self.clean, ' ', cleaned) - year = self.findYear(cleaned) - cp_guess = {} - - if year: # Split name on year - try: - movie_name = cleaned.split(year).pop(0).strip() - cp_guess = { - 'name': movie_name, - 'year': int(year), - } - except: - pass - else: # Split name on multiple spaces - try: - movie_name = cleaned.split(' ').pop(0).strip() - cp_guess = { - 'name': movie_name, - 'year': int(year), - } - except: - pass - - if cp_guess.get('year') == guess.get('year') and len(cp_guess.get('name', '')) > len(guess.get('name', '')): - return cp_guess - elif guess == {}: - return cp_guess - - return guess diff --git a/couchpotato/core/plugins/score/__init__.py b/couchpotato/core/plugins/score/__init__.py index 2c367f896e..65cadd9919 100644 --- a/couchpotato/core/plugins/score/__init__.py +++ b/couchpotato/core/plugins/score/__init__.py @@ -1,6 +1,5 @@ from .main import Score -def start(): - return Score() -config = [] +def autoload(): + return Score() diff --git a/couchpotato/core/plugins/score/main.py b/couchpotato/core/plugins/score/main.py index f853be95ad..e6fef25324 100644 --- a/couchpotato/core/plugins/score/main.py +++ b/couchpotato/core/plugins/score/main.py @@ -1,11 +1,12 @@ from couchpotato.core.event import addEvent from couchpotato.core.helpers.encoding import toUnicode -from couchpotato.core.helpers.variable import getTitle +from couchpotato.core.helpers.variable import getTitle, splitString, removeDuplicate from couchpotato.core.logger import CPLog from couchpotato.core.plugins.base import Plugin from couchpotato.core.plugins.score.scores import nameScore, nameRatioScore, \ sizeScore, providerScore, duplicateScore, partialIgnoredScore, namePositionScore, \ - halfMultipartScore + halfMultipartScore, sceneScore +from couchpotato.environment import Env log = CPLog(__name__) @@ -16,21 +17,26 @@ def __init__(self): addEvent('score.calculate', self.calculate) def calculate(self, nzb, movie): - ''' Calculate the score of a NZB, used for sorting later ''' + """ Calculate the score of a NZB, used for sorting later """ - score = nameScore(toUnicode(nzb['name']), movie['library']['year']) + # Merge global and category + preferred_words = splitString(Env.setting('preferred_words', section = 'searcher').lower()) + try: preferred_words = removeDuplicate(preferred_words + splitString(movie['category']['preferred'].lower())) + except: pass - for movie_title in movie['library']['titles']: - score += nameRatioScore(toUnicode(nzb['name']), toUnicode(movie_title['title'])) - score += namePositionScore(toUnicode(nzb['name']), toUnicode(movie_title['title'])) + score = nameScore(toUnicode(nzb['name']), movie['info']['year'], preferred_words) + + for movie_title in movie['info']['titles']: + score += nameRatioScore(toUnicode(nzb['name']), toUnicode(movie_title)) + score += namePositionScore(toUnicode(nzb['name']), toUnicode(movie_title)) score += sizeScore(nzb['size']) # Torrents only if nzb.get('seeders'): try: - score += nzb.get('seeders') / 5 - score += nzb.get('leechers') / 10 + score += nzb.get('seeders') * 100 / 15 + score += nzb.get('leechers') * 100 / 30 except: pass @@ -38,10 +44,15 @@ def calculate(self, nzb, movie): score += providerScore(nzb['provider']) # Duplicates in name - score += duplicateScore(nzb['name'], getTitle(movie['library'])) + score += duplicateScore(nzb['name'], getTitle(movie)) + + # Merge global and category + ignored_words = splitString(Env.setting('ignored_words', section = 'searcher').lower()) + try: ignored_words = removeDuplicate(ignored_words + splitString(movie['category']['ignored'].lower())) + except: pass # Partial ignored words - score += partialIgnoredScore(nzb['name'], getTitle(movie['library'])) + score += partialIgnoredScore(nzb['name'], getTitle(movie), ignored_words) # Ignore single downloads from multipart score += halfMultipartScore(nzb['name']) @@ -51,4 +62,7 @@ def calculate(self, nzb, movie): if extra_score: score += extra_score(nzb) + # Scene / Nuke scoring + score += sceneScore(nzb['name']) + return score diff --git a/couchpotato/core/plugins/score/scores.py b/couchpotato/core/plugins/score/scores.py index 3d5a9e69b6..4a4d1892c5 100644 --- a/couchpotato/core/plugins/score/scores.py +++ b/couchpotato/core/plugins/score/scores.py @@ -1,8 +1,15 @@ +import re +import traceback + from couchpotato.core.event import fireEvent from couchpotato.core.helpers.encoding import simplifyString from couchpotato.core.helpers.variable import tryInt +from couchpotato.core.logger import CPLog from couchpotato.environment import Env -import re + + +log = CPLog(__name__) + name_scores = [ # Tags @@ -12,7 +19,7 @@ # Audio 'dts:4', 'ac3:2', # Quality - '720p:10', '1080p:10', 'bluray:10', 'dvd:1', 'dvdrip:1', 'brrip:1', 'bdrip:1', 'bd50:1', 'bd25:1', + '720p:10', '1080p:10', '2160p:10', 'bluray:10', 'dvd:1', 'dvdrip:1', 'brrip:1', 'bdrip:1', 'bd50:1', 'bd25:1', # Language / Subs 'german:-10', 'french:-10', 'spanish:-10', 'swesub:-20', 'danish:-10', 'dutch:-10', # Release groups @@ -23,39 +30,46 @@ ] -def nameScore(name, year): - ''' Calculate score for words in the NZB name ''' +def nameScore(name, year, preferred_words): + """ Calculate score for words in the NZB name """ - score = 0 - name = name.lower() - - # give points for the cool stuff - for value in name_scores: - v = value.split(':') - add = int(v.pop()) - if v.pop() in name: - score = score + add - - # points if the year is correct - if str(year) in name: - score = score + 5 - - # Contains preferred word - nzb_words = re.split('\W+', simplifyString(name)) - preferred_words = [x.strip() for x in Env.setting('preferred_words', section = 'searcher').split(',')] - for word in preferred_words: - if word.strip() and word.strip().lower() in nzb_words: - score = score + 100 + try: + score = 0 + name = name.lower() - return score + # give points for the cool stuff + for value in name_scores: + v = value.split(':') + add = int(v.pop()) + if v.pop() in name: + score += add + + # points if the year is correct + if str(year) in name: + score += 5 + + # Contains preferred word + nzb_words = re.split('\W+', simplifyString(name)) + score += 100 * len(list(set(nzb_words) & set(preferred_words))) + + return score + except: + log.error('Failed doing nameScore: %s', traceback.format_exc()) + + return 0 def nameRatioScore(nzb_name, movie_name): - nzb_words = re.split('\W+', fireEvent('scanner.create_file_identifier', nzb_name, single = True)) - movie_words = re.split('\W+', simplifyString(movie_name)) + try: + nzb_words = re.split('\W+', fireEvent('scanner.create_file_identifier', nzb_name, single = True)) + movie_words = re.split('\W+', simplifyString(movie_name)) + + left_over = set(nzb_words) - set(movie_words) + return 10 - len(left_over) + except: + log.error('Failed doing nameRatioScore: %s', traceback.format_exc()) - left_over = set(nzb_words) - set(movie_words) - return 10 - len(left_over) + return 0 def namePositionScore(nzb_name, movie_name): @@ -72,9 +86,12 @@ def namePositionScore(nzb_name, movie_name): name_year = fireEvent('scanner.name_year', nzb_name, single = True) # Give points for movies beginning with the correct name - name_split = simplifyString(nzb_name).split(simplifyString(movie_name)) - if name_split[0].strip() == '': - score += 10 + split_by = simplifyString(movie_name) + name_split = [] + if len(split_by) > 0: + name_split = simplifyString(nzb_name).split(split_by) + if name_split[0].strip() == '': + score += 10 # If year is second in line, give more points if len(name_split) > 1 and name_year: @@ -116,49 +133,98 @@ def sizeScore(size): def providerScore(provider): - if provider in ['OMGWTFNZBs', 'PassThePopcorn', 'SceneAccess', 'TorrentLeech']: - return 20 - if provider in ['Newznab']: - return 10 + try: + score = tryInt(Env.setting('extra_score', section = provider.lower(), default = 0)) + except: + score = 0 - return 0 + return score def duplicateScore(nzb_name, movie_name): - nzb_words = re.split('\W+', simplifyString(nzb_name)) - movie_words = re.split('\W+', simplifyString(movie_name)) + try: + nzb_words = re.split('\W+', simplifyString(nzb_name)) + movie_words = re.split('\W+', simplifyString(movie_name)) - # minus for duplicates - duplicates = [x for i, x in enumerate(nzb_words) if nzb_words[i:].count(x) > 1] + # minus for duplicates + duplicates = [x for i, x in enumerate(nzb_words) if nzb_words[i:].count(x) > 1] - return len(list(set(duplicates) - set(movie_words))) * -4 + return len(list(set(duplicates) - set(movie_words))) * -4 + except: + log.error('Failed doing duplicateScore: %s', traceback.format_exc()) + return 0 -def partialIgnoredScore(nzb_name, movie_name): - nzb_name = nzb_name.lower() - movie_name = movie_name.lower() +def partialIgnoredScore(nzb_name, movie_name, ignored_words): - ignored_words = [x.strip().lower() for x in Env.setting('ignored_words', section = 'searcher').split(',')] + try: + nzb_name = nzb_name.lower() + movie_name = movie_name.lower() - score = 0 - for ignored_word in ignored_words: - if ignored_word in nzb_name and ignored_word not in movie_name: - score -= 5 + score = 0 + for ignored_word in ignored_words: + if ignored_word in nzb_name and ignored_word not in movie_name: + score -= 5 + + return score + except: + log.error('Failed doing partialIgnoredScore: %s', traceback.format_exc()) + + return 0 - return score def halfMultipartScore(nzb_name): - wrong_found = 0 - for nr in [1, 2, 3, 4, 5, 'i', 'ii', 'iii', 'iv', 'v', 'a', 'b', 'c', 'd', 'e']: - for wrong in ['cd', 'part', 'dis', 'disc', 'dvd']: - if '%s%s' % (wrong, nr) in nzb_name.lower(): - wrong_found += 1 + try: + wrong_found = 0 + for nr in [1, 2, 3, 4, 5, 'i', 'ii', 'iii', 'iv', 'v', 'a', 'b', 'c', 'd', 'e']: + for wrong in ['cd', 'part', 'dis', 'disc', 'dvd']: + if '%s%s' % (wrong, nr) in nzb_name.lower(): + wrong_found += 1 + + if wrong_found == 1: + return -30 + + return 0 + except: + log.error('Failed doing halfMultipartScore: %s', traceback.format_exc()) + + return 0 + + +def sceneScore(nzb_name): + + check_names = [nzb_name] + + # Match names between " + try: check_names.append(re.search(r'([\'"])[^\1]*\1', nzb_name).group(0)) + except: pass + + # Match longest name between [] + try: check_names.append(max(re.findall(r'[^[]*\[([^]]*)\]', nzb_name), key = len).strip()) + except: pass + + for name in check_names: + + # Strip twice, remove possible file extensions + name = name.lower().strip(' "\'\.-_\[\]') + name = re.sub('\.([a-z0-9]{0,4})$', '', name) + name = name.strip(' "\'\.-_\[\]') + + # Make sure year and groupname is in there + year = re.findall('(?P19[0-9]{2}|20[0-9]{2})', name) + group = re.findall('\-([a-z0-9]+)$', name) - if wrong_found == 1: - return -30 + if len(year) > 0 and len(group) > 0: + try: + validate = fireEvent('release.validate', name, single = True) + if validate and tryInt(validate.get('score')) != 0: + log.debug('Release "%s" scored %s, reason: %s', (nzb_name, validate['score'], validate['reasons'])) + return tryInt(validate.get('score')) + except: + log.error('Failed scoring scene: %s', traceback.format_exc()) return 0 diff --git a/couchpotato/core/plugins/searcher/__init__.py b/couchpotato/core/plugins/searcher/__init__.py deleted file mode 100644 index a6dd6913ab..0000000000 --- a/couchpotato/core/plugins/searcher/__init__.py +++ /dev/null @@ -1,96 +0,0 @@ -from .main import Searcher -import random - -def start(): - return Searcher() - -config = [{ - 'name': 'searcher', - 'order': 20, - 'groups': [ - { - 'tab': 'searcher', - 'name': 'searcher', - 'label': 'Search', - 'description': 'Options for the searchers', - 'options': [ - { - 'name': 'preferred_words', - 'label': 'Preferred words', - 'default': '', - 'description': 'These words will give the releases a higher score.' - }, - { - 'name': 'required_words', - 'label': 'Required words', - 'default': '', - 'placeholder': 'Example: DTS, AC3 & English', - 'description': 'Ignore releases that don\'t contain at least one set of words. Sets are separated by "," and each word within a set must be separated with "&"' - }, - { - 'name': 'ignored_words', - 'label': 'Ignored words', - 'default': 'german, dutch, french, truefrench, danish, swedish, spanish, italian, korean, dubbed, swesub, korsub, dksubs', - }, - { - 'name': 'preferred_method', - 'label': 'First search', - 'description': 'Which of the methods do you prefer', - 'default': 'both', - 'type': 'dropdown', - 'values': [('usenet & torrents', 'both'), ('usenet', 'nzb'), ('torrents', 'torrent')], - }, - ], - }, { - 'tab': 'searcher', - 'name': 'cronjob', - 'label': 'Cronjob', - 'advanced': True, - 'description': 'Cron settings for the searcher see: APScheduler for details.', - 'options': [ - { - 'name': 'cron_day', - 'label': 'Day', - 'advanced': True, - 'default': '*', - 'type': 'string', - 'description': '*: Every day, */2: Every 2 days, 1: Every first of the month.', - }, - { - 'name': 'cron_hour', - 'label': 'Hour', - 'advanced': True, - 'default': random.randint(0, 23), - 'type': 'string', - 'description': '*: Every hour, */8: Every 8 hours, 3: At 3, midnight.', - }, - { - 'name': 'cron_minute', - 'label': 'Minute', - 'advanced': True, - 'default': random.randint(0, 59), - 'type': 'string', - 'description': "Just keep it random, so the providers don't get DDOSed by every CP user on a 'full' hour." - }, - ], - }, - ], -}, { - 'name': 'nzb', - 'groups': [ - { - 'tab': 'searcher', - 'name': 'nzb', - 'label': 'NZB', - 'wizard': True, - 'options': [ - { - 'name': 'retention', - 'default': 1000, - 'type': 'int', - 'unit': 'days' - }, - ], - }, - ], -}] diff --git a/couchpotato/core/plugins/searcher/main.py b/couchpotato/core/plugins/searcher/main.py deleted file mode 100644 index c5ed13800d..0000000000 --- a/couchpotato/core/plugins/searcher/main.py +++ /dev/null @@ -1,605 +0,0 @@ -from couchpotato import get_session -from couchpotato.api import addApiView -from couchpotato.core.event import addEvent, fireEvent, fireEventAsync -from couchpotato.core.helpers.encoding import simplifyString, toUnicode -from couchpotato.core.helpers.request import jsonified, getParam -from couchpotato.core.helpers.variable import md5, getTitle, splitString, \ - possibleTitles -from couchpotato.core.logger import CPLog -from couchpotato.core.plugins.base import Plugin -from couchpotato.core.settings.model import Movie, Release, ReleaseInfo -from couchpotato.environment import Env -from inspect import ismethod, isfunction -from sqlalchemy.exc import InterfaceError -import datetime -import random -import re -import time -import traceback - -log = CPLog(__name__) - - -class Searcher(Plugin): - - in_progress = False - - def __init__(self): - addEvent('searcher.all', self.allMovies) - addEvent('searcher.single', self.single) - addEvent('searcher.correct_movie', self.correctMovie) - addEvent('searcher.download', self.download) - addEvent('searcher.try_next_release', self.tryNextRelease) - addEvent('searcher.could_be_released', self.couldBeReleased) - - addApiView('searcher.try_next', self.tryNextReleaseView, docs = { - 'desc': 'Marks the snatched results as ignored and try the next best release', - 'params': { - 'id': {'desc': 'The id of the movie'}, - }, - }) - - addApiView('searcher.full_search', self.allMoviesView, docs = { - 'desc': 'Starts a full search for all wanted movies', - }) - - addApiView('searcher.progress', self.getProgress, docs = { - 'desc': 'Get the progress of current full search', - 'return': {'type': 'object', 'example': """{ - 'progress': False || object, total & to_go, -}"""}, - }) - - # Schedule cronjob - fireEvent('schedule.cron', 'searcher.all', self.allMovies, day = self.conf('cron_day'), hour = self.conf('cron_hour'), minute = self.conf('cron_minute')) - - def allMoviesView(self): - - in_progress = self.in_progress - if not in_progress: - fireEventAsync('searcher.all') - fireEvent('notify.frontend', type = 'searcher.started', data = True, message = 'Full search started') - else: - fireEvent('notify.frontend', type = 'searcher.already_started', data = True, message = 'Full search already in progress') - - return jsonified({ - 'success': not in_progress - }) - - def getProgress(self): - - return jsonified({ - 'progress': self.in_progress - }) - - def allMovies(self): - - if self.in_progress: - log.info('Search already in progress') - return - - self.in_progress = True - - db = get_session() - - movies = db.query(Movie).filter( - Movie.status.has(identifier = 'active') - ).all() - random.shuffle(movies) - - self.in_progress = { - 'total': len(movies), - 'to_go': len(movies), - } - - try: - search_types = self.getSearchTypes() - - for movie in movies: - movie_dict = movie.to_dict({ - 'profile': {'types': {'quality': {}}}, - 'releases': {'status': {}, 'quality': {}}, - 'library': {'titles': {}, 'files':{}}, - 'files': {} - }) - - try: - self.single(movie_dict, search_types) - except IndexError: - log.error('Forcing library update for %s, if you see this often, please report: %s', (movie_dict['library']['identifier'], traceback.format_exc())) - fireEvent('library.update', movie_dict['library']['identifier'], force = True) - except: - log.error('Search failed for %s: %s', (movie_dict['library']['identifier'], traceback.format_exc())) - - self.in_progress['to_go'] -= 1 - - # Break if CP wants to shut down - if self.shuttingDown(): - break - - except SearchSetupError: - pass - - self.in_progress = False - - def single(self, movie, search_types = None): - - # Find out search type - try: - if not search_types: - search_types = self.getSearchTypes() - except SearchSetupError: - return - - done_status = fireEvent('status.get', 'done', single = True) - - if not movie['profile'] or movie['status_id'] == done_status.get('id'): - log.debug('Movie doesn\'t have a profile or already done, assuming in manage tab.') - return - - db = get_session() - - pre_releases = fireEvent('quality.pre_releases', single = True) - release_dates = fireEvent('library.update_release_date', identifier = movie['library']['identifier'], merge = True) - available_status = fireEvent('status.get', 'available', single = True) - ignored_status = fireEvent('status.get', 'ignored', single = True) - - found_releases = [] - - default_title = getTitle(movie['library']) - if not default_title: - log.error('No proper info found for movie, removing it from library to cause it from having more issues.') - fireEvent('movie.delete', movie['id'], single = True) - return - - fireEvent('notify.frontend', type = 'searcher.started.%s' % movie['id'], data = True, message = 'Searching for "%s"' % default_title) - - - ret = False - for quality_type in movie['profile']['types']: - if not self.couldBeReleased(quality_type['quality']['identifier'] in pre_releases, release_dates): - log.info('Too early to search for %s, %s', (quality_type['quality']['identifier'], default_title)) - continue - - has_better_quality = 0 - - # See if better quality is available - for release in movie['releases']: - if release['quality']['order'] <= quality_type['quality']['order'] and release['status_id'] not in [available_status.get('id'), ignored_status.get('id')]: - has_better_quality += 1 - - # Don't search for quality lower then already available. - if has_better_quality is 0: - - log.info('Search for %s in %s', (default_title, quality_type['quality']['label'])) - quality = fireEvent('quality.single', identifier = quality_type['quality']['identifier'], single = True) - - results = [] - for search_type in search_types: - type_results = fireEvent('%s.search' % search_type, movie, quality, merge = True) - if type_results: - results += type_results - - sorted_results = sorted(results, key = lambda k: k['score'], reverse = True) - if len(sorted_results) == 0: - log.debug('Nothing found for %s in %s', (default_title, quality_type['quality']['label'])) - - download_preference = self.conf('preferred_method') - if download_preference != 'both': - sorted_results = sorted(sorted_results, key = lambda k: k['type'], reverse = (download_preference == 'torrent')) - - # Check if movie isn't deleted while searching - if not db.query(Movie).filter_by(id = movie.get('id')).first(): - break - - # Add them to this movie releases list - for nzb in sorted_results: - - nzb_identifier = md5(nzb['url']) - found_releases.append(nzb_identifier) - - rls = db.query(Release).filter_by(identifier = nzb_identifier).first() - if not rls: - rls = Release( - identifier = nzb_identifier, - movie_id = movie.get('id'), - quality_id = quality_type.get('quality_id'), - status_id = available_status.get('id') - ) - db.add(rls) - else: - [db.delete(old_info) for old_info in rls.info] - - db.commit() - - for info in nzb: - try: - if not isinstance(nzb[info], (str, unicode, int, long, float)): - continue - - rls_info = ReleaseInfo( - identifier = info, - value = toUnicode(nzb[info]) - ) - rls.info.append(rls_info) - except InterfaceError: - log.debug('Couldn\'t add %s to ReleaseInfo: %s', (info, traceback.format_exc())) - - db.commit() - - nzb['status_id'] = rls.status_id - - - for nzb in sorted_results: - if not quality_type.get('finish', False) and quality_type.get('wait_for', 0) > 0 and nzb.get('age') <= quality_type.get('wait_for', 0): - log.info('Ignored, waiting %s days: %s', (quality_type.get('wait_for'), nzb['name'])) - continue - - if nzb['status_id'] == ignored_status.get('id'): - log.info('Ignored: %s', nzb['name']) - continue - - if nzb['score'] <= 0: - log.info('Ignored, score to low: %s', nzb['name']) - continue - - downloaded = self.download(data = nzb, movie = movie) - if downloaded is True: - ret = True - break - elif downloaded != 'try_next': - break - - # Remove releases that aren't found anymore - for release in movie.get('releases', []): - if release.get('status_id') == available_status.get('id') and release.get('identifier') not in found_releases: - fireEvent('release.delete', release.get('id'), single = True) - - else: - log.info('Better quality (%s) already available or snatched for %s', (quality_type['quality']['label'], default_title)) - fireEvent('movie.restatus', movie['id']) - break - - # Break if CP wants to shut down - if self.shuttingDown() or ret: - break - - fireEvent('notify.frontend', type = 'searcher.ended.%s' % movie['id'], data = True) - - return ret - - def download(self, data, movie, manual = False): - - # Test to see if any downloaders are enabled for this type - downloader_enabled = fireEvent('download.enabled', manual, data, single = True) - - if downloader_enabled: - - snatched_status = fireEvent('status.get', 'snatched', single = True) - - # Download movie to temp - filedata = None - if data.get('download') and (ismethod(data.get('download')) or isfunction(data.get('download'))): - filedata = data.get('download')(url = data.get('url'), nzb_id = data.get('id')) - if filedata == 'try_next': - return filedata - - successful = fireEvent('download', data = data, movie = movie, manual = manual, filedata = filedata, single = True) - - if successful: - - try: - # Mark release as snatched - db = get_session() - rls = db.query(Release).filter_by(identifier = md5(data['url'])).first() - if rls: - rls.status_id = snatched_status.get('id') - db.commit() - - log_movie = '%s (%s) in %s' % (getTitle(movie['library']), movie['library']['year'], rls.quality.label) - snatch_message = 'Snatched "%s": %s' % (data.get('name'), log_movie) - log.info(snatch_message) - fireEvent('movie.snatched', message = snatch_message, data = rls.to_dict()) - - # If renamer isn't used, mark movie done - if not Env.setting('enabled', 'renamer'): - active_status = fireEvent('status.get', 'active', single = True) - done_status = fireEvent('status.get', 'done', single = True) - try: - if movie['status_id'] == active_status.get('id'): - for profile_type in movie['profile']['types']: - if rls and profile_type['quality_id'] == rls.quality.id and profile_type['finish']: - log.info('Renamer disabled, marking movie as finished: %s', log_movie) - - # Mark release done - rls.status_id = done_status.get('id') - db.commit() - - # Mark movie done - mvie = db.query(Movie).filter_by(id = movie['id']).first() - mvie.status_id = done_status.get('id') - db.commit() - except: - log.error('Failed marking movie finished, renamer disabled: %s', traceback.format_exc()) - - except: - log.error('Failed marking movie finished: %s', traceback.format_exc()) - - return True - - log.info('Tried to download, but none of the "%s" downloaders are enabled', (data.get('type', ''))) - - return False - - def getSearchTypes(self): - - download_types = fireEvent('download.enabled_types', merge = True) - provider_types = fireEvent('provider.enabled_types', merge = True) - - if download_types and len(list(set(provider_types) & set(download_types))) == 0: - log.error('There aren\'t any providers enabled for your downloader (%s). Check your settings.', ','.join(download_types)) - raise NoProviders - - for useless_provider in list(set(provider_types) - set(download_types)): - log.debug('Provider for "%s" enabled, but no downloader.', useless_provider) - - search_types = download_types - - if len(search_types) == 0: - log.error('There aren\'t any downloaders enabled. Please pick one in settings.') - raise NoDownloaders - - return search_types - - def correctMovie(self, nzb = {}, movie = {}, quality = {}, **kwargs): - - imdb_results = kwargs.get('imdb_results', False) - retention = Env.setting('retention', section = 'nzb') - - if nzb.get('seeders') is None and 0 < retention < nzb.get('age', 0): - log.info2('Wrong: Outside retention, age is %s, needs %s or lower: %s', (nzb['age'], retention, nzb['name'])) - return False - - movie_name = getTitle(movie['library']) - movie_words = re.split('\W+', simplifyString(movie_name)) - nzb_name = simplifyString(nzb['name']) - nzb_words = re.split('\W+', nzb_name) - required_words = splitString(self.conf('required_words').lower()) - - req_match = 0 - for req_set in required_words: - req = splitString(req_set, '&') - req_match += len(list(set(nzb_words) & set(req))) == len(req) - - if self.conf('required_words') and req_match == 0: - log.info2("Wrong: Required word missing: %s" % nzb['name']) - return False - - ignored_words = splitString(self.conf('ignored_words').lower()) - blacklisted = list(set(nzb_words) & set(ignored_words) - set(movie_words)) - if self.conf('ignored_words') and blacklisted: - log.info2("Wrong: '%s' blacklisted words: %s" % (nzb['name'], ", ".join(blacklisted))) - return False - - pron_tags = ['xxx', 'sex', 'anal', 'tits', 'fuck', 'porn', 'orgy', 'milf', 'boobs', 'erotica', 'erotic'] - pron_words = list(set(nzb_words) & set(pron_tags) - set(movie_words)) - if pron_words: - log.info('Wrong: %s, probably pr0n', (nzb['name'])) - return False - - #qualities = fireEvent('quality.all', single = True) - preferred_quality = fireEvent('quality.single', identifier = quality['identifier'], single = True) - - # Contains lower quality string - if self.containsOtherQuality(nzb, movie_year = movie['library']['year'], preferred_quality = preferred_quality): - log.info2('Wrong: %s, looking for %s', (nzb['name'], quality['label'])) - return False - - - # File to small - if nzb['size'] and preferred_quality['size_min'] > nzb['size']: - log.info2('Wrong: "%s" is too small to be %s. %sMB instead of the minimal of %sMB.', (nzb['name'], preferred_quality['label'], nzb['size'], preferred_quality['size_min'])) - return False - - # File to large - if nzb['size'] and preferred_quality.get('size_max') < nzb['size']: - log.info2('Wrong: "%s" is too large to be %s. %sMB instead of the maximum of %sMB.', (nzb['name'], preferred_quality['label'], nzb['size'], preferred_quality['size_max'])) - return False - - - # Provider specific functions - get_more = nzb.get('get_more_info') - if get_more: - get_more(nzb) - - extra_check = nzb.get('extra_check') - if extra_check and not extra_check(nzb): - return False - - - if imdb_results: - return True - - # Check if nzb contains imdb link - if self.checkIMDB([nzb.get('description', '')], movie['library']['identifier']): - return True - - for raw_title in movie['library']['titles']: - for movie_title in possibleTitles(raw_title['title']): - movie_words = re.split('\W+', simplifyString(movie_title)) - - if self.correctName(nzb['name'], movie_title): - # if no IMDB link, at least check year range 1 - if len(movie_words) > 2 and self.correctYear([nzb['name']], movie['library']['year'], 1): - return True - - # if no IMDB link, at least check year - if len(movie_words) <= 2 and self.correctYear([nzb['name']], movie['library']['year'], 0): - return True - - log.info("Wrong: %s, undetermined naming. Looking for '%s (%s)'" % (nzb['name'], movie_name, movie['library']['year'])) - return False - - def containsOtherQuality(self, nzb, movie_year = None, preferred_quality = {}): - - name = nzb['name'] - size = nzb.get('size', 0) - nzb_words = re.split('\W+', simplifyString(name)) - - qualities = fireEvent('quality.all', single = True) - - found = {} - for quality in qualities: - # Main in words - if quality['identifier'] in nzb_words: - found[quality['identifier']] = True - - # Alt in words - if list(set(nzb_words) & set(quality['alternative'])): - found[quality['identifier']] = True - - # Try guessing via quality tags - guess = fireEvent('quality.guess', [nzb.get('name')], single = True) - if guess: - found[guess['identifier']] = True - - # Hack for older movies that don't contain quality tag - year_name = fireEvent('scanner.name_year', name, single = True) - if len(found) == 0 and movie_year < datetime.datetime.now().year - 3 and not year_name.get('year', None): - if size > 3000: # Assume dvdr - log.info('Quality was missing in name, assuming it\'s a DVD-R based on the size: %s', (size)) - found['dvdr'] = True - else: # Assume dvdrip - log.info('Quality was missing in name, assuming it\'s a DVD-Rip based on the size: %s', (size)) - found['dvdrip'] = True - - # Allow other qualities - for allowed in preferred_quality.get('allow'): - if found.get(allowed): - del found[allowed] - - return not (found.get(preferred_quality['identifier']) and len(found) == 1) - - def checkIMDB(self, haystack, imdbId): - - for string in haystack: - if 'imdb.com/title/' + imdbId in string: - return True - - return False - - def correctYear(self, haystack, year, year_range): - - for string in haystack: - - year_name = fireEvent('scanner.name_year', string, single = True) - - if year_name and ((year - year_range) <= year_name.get('year') <= (year + year_range)): - log.debug('Movie year matches range: %s looking for %s', (year_name.get('year'), year)) - return True - - log.debug('Movie year doesn\'t matche range: %s looking for %s', (year_name.get('year'), year)) - return False - - def correctName(self, check_name, movie_name): - - check_names = [check_name] - - # Match names between " - try: check_names.append(re.search(r'([\'"])[^\1]*\1', check_name).group(0)) - except: pass - - # Match longest name between [] - try: check_names.append(max(check_name.split('['), key = len)) - except: pass - - for check_name in list(set(check_names)): - check_movie = fireEvent('scanner.name_year', check_name, single = True) - - try: - check_words = filter(None, re.split('\W+', check_movie.get('name', ''))) - movie_words = filter(None, re.split('\W+', simplifyString(movie_name))) - - if len(check_words) > 0 and len(movie_words) > 0 and len(list(set(check_words) - set(movie_words))) == 0: - return True - except: - pass - - return False - - def couldBeReleased(self, is_pre_release, dates): - - now = int(time.time()) - - if not dates or (dates.get('theater', 0) == 0 and dates.get('dvd', 0) == 0): - return True - else: - - # For movies before 1972 - if dates.get('theater', 0) < 0 or dates.get('dvd', 0) < 0: - return True - - if is_pre_release: - # Prerelease 1 week before theaters - if dates.get('theater') - 604800 < now: - return True - else: - # 12 weeks after theater release - if dates.get('theater') > 0 and dates.get('theater') + 7257600 < now: - return True - - if dates.get('dvd') > 0: - - # 4 weeks before dvd release - if dates.get('dvd') - 2419200 < now: - return True - - # Dvd should be released - if dates.get('dvd') < now: - return True - - - return False - - def tryNextReleaseView(self): - - trynext = self.tryNextRelease(getParam('id')) - - return jsonified({ - 'success': trynext - }) - - def tryNextRelease(self, movie_id, manual = False): - - snatched_status = fireEvent('status.get', 'snatched', single = True) - ignored_status = fireEvent('status.get', 'ignored', single = True) - - try: - db = get_session() - rels = db.query(Release).filter_by( - status_id = snatched_status.get('id'), - movie_id = movie_id - ).all() - - for rel in rels: - rel.status_id = ignored_status.get('id') - db.commit() - - movie_dict = fireEvent('movie.get', movie_id, single = True) - log.info('Trying next release for: %s', getTitle(movie_dict['library'])) - fireEvent('searcher.single', movie_dict) - - return True - - except: - log.error('Failed searching for next release: %s', traceback.format_exc()) - return False - -class SearchSetupError(Exception): - pass - -class NoDownloaders(SearchSetupError): - pass - -class NoProviders(SearchSetupError): - pass diff --git a/couchpotato/core/plugins/status/__init__.py b/couchpotato/core/plugins/status/__init__.py deleted file mode 100644 index fb5b4cc79b..0000000000 --- a/couchpotato/core/plugins/status/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -from .main import StatusPlugin - -def start(): - return StatusPlugin() - -config = [] diff --git a/couchpotato/core/plugins/status/main.py b/couchpotato/core/plugins/status/main.py deleted file mode 100644 index c01caef597..0000000000 --- a/couchpotato/core/plugins/status/main.py +++ /dev/null @@ -1,109 +0,0 @@ -from couchpotato import get_session -from couchpotato.api import addApiView -from couchpotato.core.event import addEvent -from couchpotato.core.helpers.encoding import toUnicode -from couchpotato.core.helpers.request import jsonified -from couchpotato.core.logger import CPLog -from couchpotato.core.plugins.base import Plugin -from couchpotato.core.settings.model import Status - -log = CPLog(__name__) - - -class StatusPlugin(Plugin): - - statuses = { - 'needs_update': 'Needs update', - 'active': 'Active', - 'done': 'Done', - 'downloaded': 'Downloaded', - 'wanted': 'Wanted', - 'snatched': 'Snatched', - 'failed': 'Failed', - 'deleted': 'Deleted', - 'ignored': 'Ignored', - 'available': 'Available', - 'suggest': 'Suggest', - } - - def __init__(self): - addEvent('status.add', self.add) - addEvent('status.get', self.add) # Alias for .add - addEvent('status.get_by_id', self.getById) - addEvent('status.all', self.all) - addEvent('app.initialize', self.fill) - - addApiView('status.list', self.list, docs = { - 'desc': 'Check for available update', - 'return': {'type': 'object', 'example': """{ - 'success': True, - 'list': array, statuses -}"""} - }) - - def list(self): - - return jsonified({ - 'success': True, - 'list': self.all() - }) - - def getById(self, id): - db = get_session() - status = db.query(Status).filter_by(id = id).first() - status_dict = status.to_dict() - #db.close() - - return status_dict - - def all(self): - - db = get_session() - - statuses = db.query(Status).all() - - temp = [] - for status in statuses: - s = status.to_dict() - temp.append(s) - - #db.close() - return temp - - def add(self, identifier): - - db = get_session() - - s = db.query(Status).filter_by(identifier = identifier).first() - if not s: - s = Status( - identifier = identifier, - label = toUnicode(identifier.capitalize()) - ) - db.add(s) - db.commit() - - status_dict = s.to_dict() - - #db.close() - return status_dict - - def fill(self): - - db = get_session() - - for identifier, label in self.statuses.iteritems(): - s = db.query(Status).filter_by(identifier = identifier).first() - if not s: - log.info('Creating status: %s', label) - s = Status( - identifier = identifier, - label = toUnicode(label) - ) - db.add(s) - - s.label = toUnicode(label) - db.commit() - - #db.close() - diff --git a/couchpotato/core/plugins/status/static/status.js b/couchpotato/core/plugins/status/static/status.js deleted file mode 100644 index 2b8d30f348..0000000000 --- a/couchpotato/core/plugins/status/static/status.js +++ /dev/null @@ -1,17 +0,0 @@ -var StatusBase = new Class({ - - setup: function(statuses){ - var self = this; - - self.statuses = statuses; - - }, - - get: function(id){ - return this.statuses.filter(function(status){ - return status.id == id - }).pick() - }, - -}); -window.Status = new StatusBase(); diff --git a/couchpotato/core/plugins/subtitle.py b/couchpotato/core/plugins/subtitle.py new file mode 100644 index 0000000000..110fe11348 --- /dev/null +++ b/couchpotato/core/plugins/subtitle.py @@ -0,0 +1,86 @@ +import traceback + +from couchpotato.core.event import addEvent +from couchpotato.core.helpers.encoding import toUnicode, sp +from couchpotato.core.helpers.variable import splitString +from couchpotato.core.logger import CPLog +from couchpotato.core.plugins.base import Plugin +from couchpotato.environment import Env +import subliminal + + +log = CPLog(__name__) + +autoload = 'Subtitle' + + +class Subtitle(Plugin): + + services = ['opensubtitles', 'thesubdb', 'subswiki', 'subscenter', 'wizdom'] + + def __init__(self): + addEvent('renamer.before', self.searchSingle) + + def searchSingle(self, group): + if self.isDisabled(): return + + try: + available_languages = sum(group['subtitle_language'].values(), []) + downloaded = [] + files = [toUnicode(x) for x in group['files']['movie']] + log.debug('Searching for subtitles for: %s', files) + + for lang in self.getLanguages(): + if lang not in available_languages: + download = subliminal.download_subtitles(files, multi = True, force = self.conf('force'), languages = [lang], services = self.services, cache_dir = Env.get('cache_dir')) + for subtitle in download: + downloaded.extend(download[subtitle]) + + for d_sub in downloaded: + log.info('Found subtitle (%s): %s', (d_sub.language.alpha2, files)) + group['files']['subtitle'].append(sp(d_sub.path)) + group['before_rename'].append(sp(d_sub.path)) + group['subtitle_language'][sp(d_sub.path)] = [d_sub.language.alpha2] + + return True + + except: + log.error('Failed searching for subtitle: %s', (traceback.format_exc())) + + return False + + def getLanguages(self): + return splitString(self.conf('languages')) + + +config = [{ + 'name': 'subtitle', + 'groups': [ + { + 'tab': 'renamer', + 'name': 'subtitle', + 'label': 'Download subtitles', + 'description': 'after rename', + 'options': [ + { + 'name': 'enabled', + 'label': 'Search and download subtitles', + 'default': False, + 'type': 'enabler', + }, + { + 'name': 'languages', + 'description': ('Comma separated, 2 letter country code.', 'Example: en, nl. See the codes at on Wikipedia'), + }, + { + 'advanced': True, + 'name': 'force', + 'label': 'Force', + 'description': ('Force download all languages (including embedded).', 'This will also overwrite all existing subtitles.'), + 'default': False, + 'type': 'bool', + }, + ], + }, + ], +}] diff --git a/couchpotato/core/plugins/subtitle/__init__.py b/couchpotato/core/plugins/subtitle/__init__.py deleted file mode 100644 index 686d385e6c..0000000000 --- a/couchpotato/core/plugins/subtitle/__init__.py +++ /dev/null @@ -1,34 +0,0 @@ -from .main import Subtitle - -def start(): - return Subtitle() - -config = [{ - 'name': 'subtitle', - 'groups': [ - { - 'tab': 'renamer', - 'name': 'subtitle', - 'label': 'Download subtitles', - 'description': 'after rename', - 'options': [ - { - 'name': 'enabled', - 'label': 'Search and download subtitles', - 'default': False, - 'type': 'enabler', - }, - { - 'name': 'languages', - 'description': 'Comma separated, 2 letter country code. Example: en, nl', - }, -# { -# 'name': 'automatic', -# 'default': True, -# 'type': 'bool', -# 'description': 'Automaticly search & download for movies in library', -# }, - ], - }, - ], -}] diff --git a/couchpotato/core/plugins/subtitle/main.py b/couchpotato/core/plugins/subtitle/main.py deleted file mode 100644 index 73ead0871f..0000000000 --- a/couchpotato/core/plugins/subtitle/main.py +++ /dev/null @@ -1,73 +0,0 @@ -from couchpotato import get_session -from couchpotato.core.event import addEvent, fireEvent -from couchpotato.core.helpers.encoding import toUnicode -from couchpotato.core.helpers.variable import splitString -from couchpotato.core.logger import CPLog -from couchpotato.core.plugins.base import Plugin -from couchpotato.core.settings.model import Library, FileType -from couchpotato.environment import Env -import subliminal -import traceback - -log = CPLog(__name__) - - -class Subtitle(Plugin): - - services = ['opensubtitles', 'thesubdb', 'subswiki', 'podnapisi'] - - def __init__(self): - addEvent('renamer.before', self.searchSingle) - - def searchLibrary(self): - - # Get all active and online movies - db = get_session() - - library = db.query(Library).all() - done_status = fireEvent('status.get', 'done', single = True) - - for movie in library.movies: - - for release in movie.releases: - - # get releases and their movie files - if release.status_id is done_status.get('id'): - - files = [] - for file in release.files.filter(FileType.status.has(identifier = 'movie')).all(): - files.append(file.path); - - # get subtitles for those files - subliminal.list_subtitles(files, cache_dir = Env.get('cache_dir'), multi = True, languages = self.getLanguages(), services = self.services) - - def searchSingle(self, group): - - if self.isDisabled(): return - - try: - available_languages = sum(group['subtitle_language'].itervalues(), []) - downloaded = [] - files = [toUnicode(x) for x in group['files']['movie']] - log.debug('Searching for subtitles for: %s', files) - - for lang in self.getLanguages(): - if lang not in available_languages: - download = subliminal.download_subtitles(files, multi = True, force = False, languages = [lang], services = self.services, cache_dir = Env.get('cache_dir')) - for subtitle in download: - downloaded.extend(download[subtitle]) - - for d_sub in downloaded: - log.info('Found subtitle (%s): %s', (d_sub.language.alpha2, files)) - group['files']['subtitle'].add(d_sub.path) - group['subtitle_language'][d_sub.path] = [d_sub.language.alpha2] - - return True - - except: - log.error('Failed searching for subtitle: %s', (traceback.format_exc())) - - return False - - def getLanguages(self): - return splitString(self.conf('languages')) diff --git a/couchpotato/core/plugins/suggestion/__init__.py b/couchpotato/core/plugins/suggestion/__init__.py deleted file mode 100644 index b63b5b13ef..0000000000 --- a/couchpotato/core/plugins/suggestion/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -from .main import Suggestion - -def start(): - return Suggestion() - -config = [] diff --git a/couchpotato/core/plugins/suggestion/main.py b/couchpotato/core/plugins/suggestion/main.py deleted file mode 100644 index 2c31ca32ad..0000000000 --- a/couchpotato/core/plugins/suggestion/main.py +++ /dev/null @@ -1,22 +0,0 @@ -from couchpotato.api import addApiView -from couchpotato.core.event import fireEvent -from couchpotato.core.helpers.request import jsonified, getParam -from couchpotato.core.plugins.base import Plugin - -class Suggestion(Plugin): - - def __init__(self): - - addApiView('suggestion.view', self.getView) - - def getView(self): - - limit_offset = getParam('limit_offset', None) - total_movies, movies = fireEvent('movie.list', status = 'suggest', limit_offset = limit_offset, single = True) - - return jsonified({ - 'success': True, - 'empty': len(movies) == 0, - 'total': total_movies, - 'movies': movies, - }) diff --git a/couchpotato/core/plugins/trailer.py b/couchpotato/core/plugins/trailer.py new file mode 100644 index 0000000000..f0e9b3365e --- /dev/null +++ b/couchpotato/core/plugins/trailer.py @@ -0,0 +1,104 @@ +import os + +from couchpotato.core.event import addEvent, fireEvent +from couchpotato.core.helpers.variable import getExt, getTitle +from couchpotato.core.logger import CPLog +from couchpotato.core.plugins.base import Plugin + + +log = CPLog(__name__) + +autoload = 'Trailer' + + +class Trailer(Plugin): + + def __init__(self): + addEvent('renamer.after', self.searchSingle) + + def searchSingle(self, message = None, group = None): + if not group: group = {} + if self.isDisabled() or len(group['files']['trailer']) > 0: return + + if self.conf('usevf'): + try: + filename = self.conf('name').replace('', group['filename'].decode('latin-1')) + except: + filename = self.conf('name').replace('', group['filename']) + try: + destination = os.path.join(group['destination_dir'].decode('latin-1'), filename) + except: + destination = os.path.join(group['destination_dir'], filename) + trailers = fireEvent('vftrailer.search', group = group, filename=filename, destination=destination, merge = True) + else : + trailers = fireEvent('trailer.search', group = group, merge = True) + if not trailers or trailers == []: + log.info('No trailers found for: %s', getTitle(group)) + return False + + if self.conf('usevf'): + log.info('Trailers found in VF for: %s', getTitle(group)) + return True + else: + + for trailer in trailers.get(self.conf('quality'), []): + + ext = getExt(trailer) + filename = self.conf('name').replace('', group['filename']) + ('.%s' % ('mp4' if len(ext) > 5 else ext)) + destination = os.path.join(group['destination_dir'], filename) + if not os.path.isfile(destination): + trailer_file = fireEvent('file.download', url = trailer, dest = destination, urlopen_kwargs = {'headers': {'User-Agent': 'Quicktime'}}, single = True) + if os.path.getsize(trailer_file) < (1024 * 1024): # Don't trust small trailers (1MB), try next one + os.unlink(trailer_file) + continue + else: + log.debug('Trailer already exists: %s', destination) + + group['renamed_files'].append(destination) + + # Download first and break + break + + return True + + + +config = [{ + 'name': 'trailer', + 'groups': [ + { + 'tab': 'renamer', + 'name': 'trailer', + 'label': 'Download trailer', + 'description': 'after rename', + 'options': [ + { + 'name': 'enabled', + 'label': 'Search and download trailers', + 'default': False, + 'type': 'enabler', + }, + { + 'name': 'usevf', + 'label' : 'Search french Trailers (beta)', + 'default': False, + 'advanced': True, + 'type': 'bool', + }, + { + 'name': 'quality', + 'default': '720p', + 'type': 'dropdown', + 'values': [('1080p', '1080p'), ('720p', '720p'), ('480P', '480p')], + }, + { + 'name': 'name', + 'label': 'Naming', + 'default': '-trailer', + 'advanced': True, + 'description': 'Use <filename> to use above settings.' + }, + ], + }, + ], +}] diff --git a/couchpotato/core/plugins/trailer/__init__.py b/couchpotato/core/plugins/trailer/__init__.py deleted file mode 100644 index 282b3482a9..0000000000 --- a/couchpotato/core/plugins/trailer/__init__.py +++ /dev/null @@ -1,37 +0,0 @@ -from .main import Trailer - -def start(): - return Trailer() - -config = [{ - 'name': 'trailer', - 'groups': [ - { - 'tab': 'renamer', - 'name': 'trailer', - 'label': 'Download trailer', - 'description': 'after rename', - 'options': [ - { - 'name': 'enabled', - 'label': 'Search and download trailers', - 'default': False, - 'type': 'enabler', - }, - { - 'name': 'quality', - 'default': '720p', - 'type': 'dropdown', - 'values': [('1080P', '1080p'), ('720P', '720p'), ('480P', '480p')], - }, - { - 'name': 'name', - 'label': 'Naming', - 'default': '-trailer', - 'advanced': True, - 'description': 'Use to use above settings.' - }, - ], - }, - ], -}] diff --git a/couchpotato/core/plugins/trailer/main.py b/couchpotato/core/plugins/trailer/main.py deleted file mode 100644 index 4ab51e788d..0000000000 --- a/couchpotato/core/plugins/trailer/main.py +++ /dev/null @@ -1,38 +0,0 @@ -from couchpotato.core.event import addEvent, fireEvent -from couchpotato.core.helpers.variable import getExt, getTitle -from couchpotato.core.logger import CPLog -from couchpotato.core.plugins.base import Plugin -import os - -log = CPLog(__name__) - - -class Trailer(Plugin): - - def __init__(self): - addEvent('renamer.after', self.searchSingle) - - def searchSingle(self, message = None, group = {}): - - if self.isDisabled() or len(group['files']['trailer']) > 0: return - - trailers = fireEvent('trailer.search', group = group, merge = True) - if not trailers or trailers == []: - log.info('No trailers found for: %s', getTitle(group['library'])) - return False - - for trailer in trailers.get(self.conf('quality'), []): - filename = self.conf('name').replace('', group['filename']) + ('.%s' % getExt(trailer)) - destination = os.path.join(group['destination_dir'], filename) - if not os.path.isfile(destination): - fireEvent('file.download', url = trailer, dest = destination, urlopen_kwargs = {'headers': {'User-Agent': 'Quicktime'}}, single = True) - else: - log.debug('Trailer already exists: %s', destination) - - group['renamed_files'].append(destination) - - # Download first and break - break - - return True - diff --git a/couchpotato/core/plugins/userscript/__init__.py b/couchpotato/core/plugins/userscript/__init__.py index 5df5a801f1..9d708593ba 100644 --- a/couchpotato/core/plugins/userscript/__init__.py +++ b/couchpotato/core/plugins/userscript/__init__.py @@ -1,6 +1,5 @@ from .main import Userscript -def start(): - return Userscript() -config = [] +def autoload(): + return Userscript() diff --git a/couchpotato/core/plugins/userscript/bookmark.js b/couchpotato/core/plugins/userscript/bookmark.js deleted file mode 100644 index 5ee8c376cc..0000000000 --- a/couchpotato/core/plugins/userscript/bookmark.js +++ /dev/null @@ -1,43 +0,0 @@ -var includes = {{includes|tojson}}; -var excludes = {{excludes|tojson}}; - -var specialChars = '\\{}+.():-|^$'; -var makeRegex = function(pattern) { - pattern = pattern.split(''); - var i, len = pattern.length; - for( i = 0; i < len; i++) { - var character = pattern[i]; - if(specialChars.indexOf(character) > -1) { - pattern[i] = '\\' + character; - } else if(character === '?') { - pattern[i] = '.'; - } else if(character === '*') { - pattern[i] = '.*'; - } - } - return new RegExp('^' + pattern.join('') + '$'); -}; - -var isCorrectUrl = function() { - for(i in includes) { - var reg = includes[i] - if (makeRegex(reg).test(document.location.href)) - return true; - } - return false; -} -var addUserscript = function() { - // Add window param - document.body.setAttribute('cp_auto_open', true) - - // Load userscript - var e = document.createElement('script'); - e.setAttribute('type', 'text/javascript'); - e.setAttribute('charset', 'UTF-8'); - e.setAttribute('src', '{{host}}couchpotato.js?r=' + Math.random() * 99999999); - document.body.appendChild(e) -} -if(isCorrectUrl()) - addUserscript() -else - alert('Can\'t find a proper movie on this page..') diff --git a/couchpotato/core/plugins/userscript/bookmark.js_tmpl b/couchpotato/core/plugins/userscript/bookmark.js_tmpl new file mode 100644 index 0000000000..cc04baf7f1 --- /dev/null +++ b/couchpotato/core/plugins/userscript/bookmark.js_tmpl @@ -0,0 +1,47 @@ +{% autoescape None %} + +var includes = {{ json_encode(includes) }}; +var excludes = {{ json_encode(excludes) }}; + +var specialChars = '\\{}+.():-|^$'; +var makeRegex = function(pattern) { + pattern = pattern.split(''); + var i, len = pattern.length; + for( i = 0; i < len; i++) { + var character = pattern[i]; + if(specialChars.indexOf(character) > -1) { + pattern[i] = '\\' + character; + } else if(character === '?') { + pattern[i] = '.'; + } else if(character === '*') { + pattern[i] = '.*'; + } + } + return new RegExp('^' + pattern.join('') + '$'); +}; + +var isCorrectUrl = function() { + for(i in includes) { + if(!includes.hasOwnProperty(i)) continue; + + var reg = includes[i] + if (makeRegex(reg).test(document.location.href)) + return true; + } + return false; +} +var addUserscript = function() { + // Add window param + document.body.setAttribute('cp_auto_open', 'true') + + // Load userscript + var e = document.createElement('script'); + e.setAttribute('type', 'text/javascript'); + e.setAttribute('charset', 'UTF-8'); + e.setAttribute('src', '{{host}}couchpotato.js?r=' + Math.random() * 99999999); + document.body.appendChild(e) +} +if(isCorrectUrl()) + addUserscript() +else + alert('Can\'t find a proper movie on this page..') diff --git a/couchpotato/core/plugins/userscript/main.py b/couchpotato/core/plugins/userscript/main.py index 3ff7fb072b..baa7906bbb 100644 --- a/couchpotato/core/plugins/userscript/main.py +++ b/couchpotato/core/plugins/userscript/main.py @@ -1,63 +1,78 @@ +import os +import traceback +import time +from base64 import b64encode, b64decode + +from couchpotato import index from couchpotato.api import addApiView from couchpotato.core.event import fireEvent, addEvent -from couchpotato.core.helpers.request import getParam, jsonified from couchpotato.core.helpers.variable import isDict from couchpotato.core.logger import CPLog from couchpotato.core.plugins.base import Plugin from couchpotato.environment import Env -from flask.globals import request -from flask.helpers import url_for -from flask.templating import render_template -import os +from tornado.web import RequestHandler + log = CPLog(__name__) class Userscript(Plugin): - version = 3 + version = 8 def __init__(self): - addApiView('userscript.get//', self.getUserScript, static = True) + addApiView('userscript.get/(.*)/(.*)', self.getUserScript, static = True) + addApiView('userscript', self.iFrame) addApiView('userscript.add_via_url', self.getViaUrl) addApiView('userscript.includes', self.getIncludes) addApiView('userscript.bookmark', self.bookmark) addEvent('userscript.get_version', self.getVersion) + addEvent('app.test', self.doTest) - def bookmark(self): + def bookmark(self, host = None, **kwargs): params = { 'includes': fireEvent('userscript.get_includes', merge = True), 'excludes': fireEvent('userscript.get_excludes', merge = True), - 'host': getParam('host', None), + 'host': host, } - return self.renderTemplate(__file__, 'bookmark.js', **params) + return self.renderTemplate(__file__, 'bookmark.js_tmpl', **params) - def getIncludes(self): + def getIncludes(self, **kwargs): - return jsonified({ + return { 'includes': fireEvent('userscript.get_includes', merge = True), 'excludes': fireEvent('userscript.get_excludes', merge = True), - }) + } - def getUserScript(self, random = '', filename = ''): + def getUserScript(self, script_route, **kwargs): - params = { - 'includes': fireEvent('userscript.get_includes', merge = True), - 'excludes': fireEvent('userscript.get_excludes', merge = True), - 'version': self.getVersion(), - 'api': '%suserscript/' % url_for('api.index').lstrip('/'), - 'host': request.host_url, - } + klass = self + + class UserscriptHandler(RequestHandler): + + def get(self, random, route): - script = self.renderTemplate(__file__, 'template.js', **params) - self.createFile(os.path.join(Env.get('cache_dir'), 'couchpotato.user.js'), script) + bookmarklet_host = Env.setting('bookmarklet_host') + loc = bookmarklet_host if bookmarklet_host else "{0}://{1}".format(self.request.protocol, self.request.headers.get('X-Forwarded-Host') or self.request.headers.get('host')) - from flask.helpers import send_from_directory - return send_from_directory(Env.get('cache_dir'), 'couchpotato.user.js') + params = { + 'includes': fireEvent('userscript.get_includes', merge = True), + 'excludes': fireEvent('userscript.get_excludes', merge = True), + 'version': klass.getVersion(), + 'api': '%suserscript/' % Env.get('api_base'), + 'host': loc, + } + + script = klass.renderTemplate(__file__, 'template.js_tmpl', **params) + klass.createFile(os.path.join(Env.get('cache_dir'), 'couchpotato.user.js'), script) + + self.redirect(Env.get('api_base') + 'file.cache/couchpotato.user.js') + + Env.get('app').add_handlers(".*$", [('%s%s' % (Env.get('api_base'), script_route), UserscriptHandler)]) def getVersion(self): @@ -69,12 +84,10 @@ def getVersion(self): return version - def iFrame(self): - return render_template('index.html', sep = os.sep, fireEvent = fireEvent, env = Env) - - def getViaUrl(self): + def iFrame(self, **kwargs): + return index() - url = getParam('url') + def getViaUrl(self, url = None, **kwargs): params = { 'url': url, @@ -84,4 +97,47 @@ def getViaUrl(self): log.error('Failed adding movie via url: %s', url) params['error'] = params['movie'] if params['movie'] else 'Failed getting movie info' - return jsonified(params) + return params + + def doTest(self): + time.sleep(1) + + tests = [ + 'aHR0cDovL3d3dy5hbGxvY2luZS5mci9maWxtL2ZpY2hlZmlsbV9nZW5fY2ZpbG09MjAxMTA1Lmh0bWw=', + 'aHR0cDovL3RyYWlsZXJzLmFwcGxlLmNvbS90cmFpbGVycy9wYXJhbW91bnQvbWlzc2lvbmltcG9zc2libGVyb2d1ZW5hdGlvbi8=', + 'aHR0cDovL3d3dy55b3V0aGVhdGVyLmNvbS92aWV3LnBocD9pZD0xMTI2Mjk5', + 'aHR0cDovL3RyYWt0LnR2L21vdmllcy9taXNzaW9uLWltcG9zc2libGUtcm9ndWUtbmF0aW9uLTIwMTU=', + 'aHR0cHM6Ly93d3cucmVkZGl0LmNvbS9yL0lqdXN0d2F0Y2hlZC9jb21tZW50cy8zZjk3bzYvaWp3X21pc3Npb25faW1wb3NzaWJsZV9yb2d1ZV9uYXRpb25fMjAxNS8=', + 'aHR0cDovL3d3dy5yb3R0ZW50b21hdG9lcy5jb20vbS9taXNzaW9uX2ltcG9zc2libGVfcm9ndWVfbmF0aW9uLw==', + 'aHR0cHM6Ly93d3cudGhlbW92aWVkYi5vcmcvbW92aWUvMTc3Njc3LW1pc3Npb24taW1wb3NzaWJsZS01', + 'aHR0cDovL3d3dy5jcml0aWNrZXIuY29tL2ZpbG0vTWlzc2lvbl9JbXBvc3NpYmxlX1JvZ3VlLw==', + 'aHR0cDovL2ZpbG1jZW50cnVtLm5sL2ZpbG1zLzE4MzIzL21pc3Npb24taW1wb3NzaWJsZS1yb2d1ZS1uYXRpb24v', + 'aHR0cDovL3d3dy5maWxtc3RhcnRzLmRlL2tyaXRpa2VuLzIwMTEwNS5odG1s', + 'aHR0cDovL3d3dy5maWxtd2ViLnBsL2ZpbG0vTWlzc2lvbiUzQStJbXBvc3NpYmxlKy0rUm9ndWUrTmF0aW9uLTIwMTUtNjU1MDQ4', + 'aHR0cDovL3d3dy5mbGlja2NoYXJ0LmNvbS9tb3ZpZS8zM0NFMzEyNUJB', + 'aHR0cDovL3d3dy5pbWRiLmNvbS90aXRsZS90dDIzODEyNDkv', + 'aHR0cDovL2xldHRlcmJveGQuY29tL2ZpbG0vbWlzc2lvbi1pbXBvc3NpYmxlLXJvZ3VlLW5hdGlvbi8=', + 'aHR0cDovL3d3dy5tb3ZpZW1ldGVyLm5sL2ZpbG0vMTA0MTcw', + 'aHR0cDovL21vdmllcy5pby9tLzMxL2Vu', + ] + + success = 0 + for x in tests: + x = b64decode(x) + try: + movie = self.getViaUrl(x) + movie = movie.get('movie', {}) or {} + imdb = movie.get('imdb') + + if imdb and b64encode(imdb) in ['dHQxMjI5MjM4', 'dHQyMzgxMjQ5']: + success += 1 + continue + except: + log.error('Failed userscript test "%s": %s', (x, traceback.format_exc())) + + log.error('Failed userscript test "%s"', x) + + if success == len(tests): + log.debug('All userscript tests successful') + else: + log.error('Failed userscript tests, %s out of %s', (success, len(tests))) diff --git a/couchpotato/core/plugins/userscript/static/userscript.css b/couchpotato/core/plugins/userscript/static/userscript.css deleted file mode 100644 index 304dfa74f0..0000000000 --- a/couchpotato/core/plugins/userscript/static/userscript.css +++ /dev/null @@ -1,14 +0,0 @@ -.page.userscript { - position: absolute; - width: 100%; - top: 0; - bottom: 0; - left: 0; - right: 0; -} - - .page.userscript .frame.loading { - text-align: center; - font-size: 20px; - padding: 20px; - } diff --git a/couchpotato/core/plugins/userscript/static/userscript.js b/couchpotato/core/plugins/userscript/static/userscript.js index d6d5983c24..15e018dc1a 100644 --- a/couchpotato/core/plugins/userscript/static/userscript.js +++ b/couchpotato/core/plugins/userscript/static/userscript.js @@ -2,6 +2,7 @@ Page.Userscript = new Class({ Extends: PageBase, + order: 80, name: 'userscript', has_tab: false, @@ -12,10 +13,10 @@ Page.Userscript = new Class({ } }, - indexAction: function(param){ + indexAction: function(){ var self = this; - self.el.adopt( + self.content.grab( self.frame = new Element('div.frame.loading', { 'text': 'Loading...' }) @@ -34,7 +35,7 @@ Page.Userscript = new Class({ if(json.error) self.frame.set('html', json.error); else { - var item = new Block.Search.Item(json.movie); + var item = new BlockSearchMovieItem(json.movie); self.frame.adopt(item); item.showOptions(); } @@ -53,60 +54,56 @@ var UserscriptSettingTab = new Class({ initialize: function(){ var self = this; - App.addEvent('load', self.addSettings.bind(self)) + App.addEvent('loadSettings', self.addSettings.bind(self)); }, addSettings: function(){ var self = this; - self.settings = App.getPage('Settings') + self.settings = App.getPage('Settings'); self.settings.addEvent('create', function(){ - // See if userscript can be installed - var userscript = false; - try { - if(Components.interfaces.gmIGreasemonkeyService) - userscript = true - } - catch(e){ - userscript = Browser.chrome === true; - } - var host_url = window.location.protocol + '//' + window.location.host; self.settings.createGroup({ 'name': 'userscript', - 'label': 'Install the bookmarklet' + (userscript ? ' or userscript' : ''), + 'label': 'Install the browser extension or bookmarklet', 'description': 'Easily add movies via imdb.com, appletrailers and more' }).inject(self.settings.tabs.automation.content, 'top').adopt( - (userscript ? [new Element('a.userscript.button', { - 'text': 'Install userscript', - 'href': Api.createUrl('userscript.get')+randomString()+'/couchpotato.user.js', - 'target': '_self' - }), new Element('span.or[text=or]')] : null), - new Element('span.bookmarklet').adopt( - new Element('a.button.green', { - 'text': '+CouchPotato', - 'href': "javascript:void((function(){var e=document.createElement('script');e.setAttribute('type','text/javascript');e.setAttribute('charset','UTF-8');e.setAttribute('src','" + - host_url + Api.createUrl('userscript.bookmark') + - "?host="+ encodeURI(host_url + Api.createUrl('userscript.get')+randomString()+'/') + - "&r='+Math.random()*99999999);document.body.appendChild(e)})());", - 'target': '', - 'events': { - 'click': function(e){ - (e).stop() - alert('Drag it to your bookmark ;)') - } - } + new Element('div').adopt( + new Element('a.userscript.button', { + 'text': 'Install extension', + 'href': 'https://couchpota.to/extension/', + 'target': '_blank' }), - new Element('span', { - 'text': 'Б┤╫ Drag this to your bookmarks' - }) - ) - ).setStyles({ - 'background-image': "url('"+Api.createUrl('static/userscript/userscript.png')+"')" - }); + new Element('span.or[text=or]'), + new Element('span.bookmarklet').adopt( + new Element('a.button.green', { + 'text': '+CouchPotato', + /* jshint ignore:start */ + 'href': "javascript:void((function(){var e=document.createElement('script');e.setAttribute('type','text/javascript');e.setAttribute('charset','UTF-8');e.setAttribute('src','" + + host_url + Api.createUrl('userscript.bookmark') + + "?host="+ encodeURI(host_url + Api.createUrl('userscript.get')+randomString()+'/') + + "&r='+Math.random()*99999999);document.body.appendChild(e)})());", + /* jshint ignore:end */ + 'target': '', + 'events': { + 'click': function(e){ + (e).stop(); + alert('Drag it to your bookmark ;)'); + } + } + }), + new Element('span', { + 'text': 'Б┤╫ Drag this to your bookmarks' + }) + ) + ), + new Element('img', { + 'src': 'https://couchpota.to/media/images/userscript.gif' + }) + ); }); diff --git a/couchpotato/core/plugins/userscript/static/userscript.png b/couchpotato/core/plugins/userscript/static/userscript.png deleted file mode 100644 index c8e7657783..0000000000 Binary files a/couchpotato/core/plugins/userscript/static/userscript.png and /dev/null differ diff --git a/couchpotato/core/plugins/userscript/static/userscript.scss b/couchpotato/core/plugins/userscript/static/userscript.scss new file mode 100644 index 0000000000..12f4fd2c66 --- /dev/null +++ b/couchpotato/core/plugins/userscript/static/userscript.scss @@ -0,0 +1,125 @@ +@import "_mixins"; + +.page.userscript { + position: absolute; + width: 100%; + top: 0; + bottom: 0; + left: 0; + right: 0; + padding: 0; + + .frame.loading { + text-align: center; + font-size: 20px; + padding: 20px; + } + + .media_result { + height: 140px; + display: flex; + } + + .thumbnail { + width: 90px; + } + + .options { + left: 90px; + display: flex; + align-items: center; + padding: $padding/2; + + > div { + display: flex; + flex-wrap: wrap; + + div { + flex: 1 auto; + margin: 0; + padding: 0 $padding/4; + } + + .title { + min-width: 100%; + margin-bottom: $padding; + } + + .add { + text-align: right; + + a { + display: block; + text-align: center; + } + } + + select { + width: 100%; + } + } + } + + .message { + font-size: 1.5em; + } + + .year, + .data { + display: none; + } + +} + + +.group_userscript.group_userscript { + display: block; + + .empty_wanted & { + padding: $padding 0; + } + + .wgroup_automation & { + padding: $padding/2 0; + margin-left: 0; + } + + h2 { + margin: 0 0 $padding/2; + } + + .userscript { + margin-left: $padding; + + @include media-tablet { + margin-left: $padding/2; + } + + .wgroup_automation & { + margin-left: 0; + } + } + + .bookmarklet { + + span { + margin-left: 10px; + display: inline-block; + } + } + + img { + clear: both; + margin: $padding; + width: 100%; + max-width: 600px; + + @include media-tablet { + margin: $padding/2; + } + + .wgroup_automation & { + margin-left: 0; + } + } +} diff --git a/couchpotato/core/plugins/userscript/template.js b/couchpotato/core/plugins/userscript/template.js deleted file mode 100644 index c30fad5bab..0000000000 --- a/couchpotato/core/plugins/userscript/template.js +++ /dev/null @@ -1,138 +0,0 @@ -// ==UserScript== -// @name CouchPotato UserScript -// @description Add movies like a real CouchPotato -// @grant none -// @version {{version}} - -// @match {{host}}* -{% for include in includes %} -// @match {{include}}{% endfor %} -{% for exclude in excludes %} -// @exclude {{exclude}}{% endfor %} -// @exclude {{host}}{{api.rstrip('/')}}* - -// ==/UserScript== - -if (window.top == window.self){ // Only run on top window - -var version = {{version}}, - host = '{{host}}', - api = '{{api}}'; - -function create() { - switch (arguments.length) { - case 1: - var A = document.createTextNode(arguments[0]); - break; - default: - var A = document.createElement(arguments[0]), B = arguments[1]; - for ( var b in B) { - if (b.indexOf("on") == 0){ - A.addEventListener(b.substring(2), B[b], false); - } - else if (",style,accesskey,id,name,src,href,which".indexOf("," - + b.toLowerCase()) != -1){ - A.setAttribute(b, B[b]); - } - else{ - A[b] = B[b]; - } - } - for ( var i = 2, len = arguments.length; i < len; ++i){ - A.appendChild(arguments[i]); - } - } - return A; -} - -var addStyle = function(css) { - var head = document.getElementsByTagName('head')[0], - style = document.createElement('style'); - if (!head) - return; - - style.type = 'text/css'; - style.textContent = css; - head.appendChild(style); -} - -// Styles -addStyle('\ - #cp_popup { font-family: "Helvetica Neue", Helvetica, Arial, Geneva, sans-serif; -moz-border-radius: 6px 0px 0px 6px; -webkit-border-radius: 6px 0px 0px 6px; border-radius: 6px 0px 0px 6px; -moz-box-shadow: 0 0 20px rgba(0,0,0,0.5); -webkit-box-shadow: 0 0 20px rgba(0,0,0,0.5); box-shadow: 0 0 20px rgba(0,0,0,0.5); position:fixed; z-index:9999; bottom:0; right:0; font-size:15px; margin: 20px 0; display: block; background:#4E5969; } \ - #cp_popup.opened { width: 492px; } \ - #cp_popup a#add_to { cursor:pointer; text-align:center; text-decoration:none; color: #000; display:block; padding:5px 0 5px 5px; } \ - #cp_popup a#close_button { cursor:pointer; float: right; padding:120px 10px 10px; } \ - #cp_popup a img { vertical-align: middle; } \ - #cp_popup a:hover { color:#000; } \ - #cp_popup iframe{ background:#4E5969; margin:6px 0 2px 6px; height:140px; width:450px; overflow:hidden; border:none; } \ -'); - -var cp_icon = 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACgAAAAoCAYAAACM/rhtAAADHmlDQ1BJQ0MgUHJvZmlsZQAAeAGFVN9r01AU/tplnbDhizpnEQk+aJFuZFN0Q5y2a1e6zVrqNrchSJumbVyaxiTtfrAH2YtvOsV38Qc++QcM2YNve5INxhRh+KyIIkz2IrOemzRNJ1MDufe73/nuOSfn5F6g+XFa0xQvDxRVU0/FwvzE5BTf8gFeHEMr/GhNi4YWSiZHQA/Tsnnvs/MOHsZsdO5v36v+Y9WalQwR8BwgvpQ1xCLhWaBpXNR0E+DWie+dMTXCzUxzWKcECR9nOG9jgeGMjSOWZjQ1QJoJwgfFQjpLuEA4mGng8w3YzoEU5CcmqZIuizyrRVIv5WRFsgz28B9zg/JfsKiU6Zut5xCNbZoZTtF8it4fOX1wjOYA1cE/Xxi9QbidcFg246M1fkLNJK4RJr3n7nRpmO1lmpdZKRIlHCS8YlSuM2xp5gsDiZrm0+30UJKwnzS/NDNZ8+PtUJUE6zHF9fZLRvS6vdfbkZMH4zU+pynWf0D+vff1corleZLw67QejdX0W5I6Vtvb5M2mI8PEd1E/A0hCgo4cZCjgkUIMYZpjxKr4TBYZIkqk0ml0VHmyONY7KJOW7RxHeMlfDrheFvVbsrj24Pue3SXXjrwVhcW3o9hR7bWB6bqyE5obf3VhpaNu4Te55ZsbbasLCFH+iuWxSF5lyk+CUdd1NuaQU5f8dQvPMpTuJXYSWAy6rPBe+CpsCk+FF8KXv9TIzt6tEcuAcSw+q55TzcbsJdJM0utkuL+K9ULGGPmQMUNanb4kTZyKOfLaUAsnBneC6+biXC/XB567zF3h+rkIrS5yI47CF/VFfCHwvjO+Pl+3b4hhp9u+02TrozFa67vTkbqisXqUj9sn9j2OqhMZsrG+sX5WCCu0omNqSrN0TwADJW1Ol/MFk+8RhAt8iK4tiY+rYleQTysKb5kMXpcMSa9I2S6wO4/tA7ZT1l3maV9zOfMqcOkb/cPrLjdVBl4ZwNFzLhegM3XkCbB8XizrFdsfPJ63gJE722OtPW1huos+VqvbdC5bHgG7D6vVn8+q1d3n5H8LeKP8BqkjCtbCoV8yAAAACXBIWXMAAAsTAAALEwEAmpwYAAABZGlUWHRYTUw6Y29tLmFkb2JlLnhtcAAAAAAAPHg6eG1wbWV0YSB4bWxuczp4PSJhZG9iZTpuczptZXRhLyIgeDp4bXB0az0iWE1QIENvcmUgNC40LjAiPgogICA8cmRmOlJERiB4bWxuczpyZGY9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkvMDIvMjItcmRmLXN5bnRheC1ucyMiPgogICAgICA8cmRmOkRlc2NyaXB0aW9uIHJkZjphYm91dD0iIgogICAgICAgICAgICB4bWxuczp4bXA9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC8iPgogICAgICAgICA8eG1wOkNyZWF0b3JUb29sPkFkb2JlIEltYWdlUmVhZHk8L3htcDpDcmVhdG9yVG9vbD4KICAgICAgPC9yZGY6RGVzY3JpcHRpb24+CiAgIDwvcmRmOlJERj4KPC94OnhtcG1ldGE+Chvleg4AAAdrSURBVFgJzZfPb1VVEMfn3PfaUgotVIpi+a3BWETFRIkYEzTRtRv8BxSiCRp1JStkBXGhC42J7Ay4wGrcqHHhAjfEaNSoESOoVQIWBIGW/qB9797r93Pem8d9zwe4KJFJpnPOnDkz35k559zXkOe5XS8KFsKW3VbC/8CQ5cNbjWAxYG7/LXC4XgABdzUQIYREsbNrFee6AAy7FXxXnt25N6wWgBdL8+1oeaH9Uh6wP5Zutj8/6c/HrwXM18s+mEu5xSyRv8yCbe5cbM9nM2bphHjcqidO2OmN88KZ8iI79NUT9pI6nquaFKrtWbsuAC8uEzRRKNtawGWzNqPwnVKVbdYGBX0wndHfOjhsr0RkOue0cLR2EZKSrUu6ItBS0mFBnCdAFGn8ZQy81TiLbasX7aLRHP859IqluAwlu41aClTQmDkyqYP+DZst62vVZtyO5rzF8faqIptfD92h21YE9SgHINFrj0yIJzSxY+0AtermvsW7axWZWGTLdAaXxgrSXpUiqMXSlfKqxmYjgDmk97EVVHE+ZxX0R3n9UKzPrACs1vPSJTB6DmugGQlkyCp2LpmykxHIkasDnJN3sN2jvHF/2FZeYPuyaUtVolIdRqrLUapO2A/fPWX3cDmu9sSQQNsK7tYrPxS7UCy22XDzNM4Wb9ctfDuvbHotrKv22gMdK+2nLx63b8rdNiQwRgX9/Omu5sk8OZ6wkQjua9XzPcuGn5RFO+eK8K8KDuuebc3zeAvb4LmiSlWcv+Gg/T6vywYU9nRWtR69dAsaG2oXpFLqsY6p47b3++35zsbaVQZNAPeF0LE9zysHensfzi5VdiRZVglpzkUMmaXp16aGiXhxVZzYNWWSp7dYuetU3/ljz409evQx21jWJYg/EYJMhNxPmb68aqmFypiNJKkdXpLa1Noxs01vylxXhhz844ykSo0Wq63lXQJ3sK/v/uzixUNKPWGDX/M+jZeLz4oXidkIT4ovncL5mA2+EezMUJ5dWKUE9KHLefe0DvEUK8uQCWT3YltT7bA1I9I9/LNZ/0gNmKaRkDAgI0DOnMBVPxwYWJePj3+mxz/R13xG4EoEoFoYax6ffOY6SlFfkZRtmO3TcRsrl279qJKM75BSnhOyqyPUTxsTOOusWpjKLUunLXvhfcvXv6sEZeaAiAP7PALUHFfZ1NkLr/aY9SrgrBa6+CGHgQDHDZSc9mKsb79N1Zlv16xaNdNfsdLH3bbokWkb3yQ7FjAWkVmnspmQs65pS545YMkdH5hNL5T+4mVADo5T0mixbiyAlUleriddAgjJs6DvfQRKtYiJExwwJ3v5j1I/AOR01rrekf1dUirbmmfNFW18vtlNSuTpt8xWfqoEexVD1QAIcZCtXM9PKyIFIzbnO6eNDhJQgKy3M4JhbYl4pXiVuF+c6kBeWJra5A89VvpcxeNJkbMORZkU2JUXzLbtMVsmcJM6yPwqdED4bmWK4C3WMILQOY5d0UtR606rgzPS03KYzdgxBuiAePQvvmGTdnJP2Xoe1Ftzq0AL5OBxsyd2KukjZqcXa8/52n5AeYyiBAfzJoAoYq/rkhbDEFVknWrJf9zIGXUbWqGbb7eIN8hg9HzJDg9XbfRls/sE6qFndSz0BIxqLRE4AKiAjTPNfvflMZFNACkpzAISqlfURjWmpSpITKLojDEBnACwSodizX6zX5eb3SvZIXBV3iqtQfjniULFXpJFtnJbgBhTLYwBSPXk3+4We4UdYNFhK9BB2a/YUwOT6Rx0jl1ODv+6wNYtbufL/TYBrMiUM8EFABhZAohMaR+bWEeixzHSq4yesVPsgm5q7KVumSeHBCC+sGFfO1/omwBizCY2eyAkTBV5TnBOhZ08e5foGTu1+/+NdXySOL4AARjI/bhsAZhHgA4KCQEYwlk7gKwVQV1r7MEBhU+X6PHv6xrWgA8zEqVa8rJj6EAByFeDjJH8YqCiVBx2O/ZASOeoqM/xgz17YXzRFferd7jh07vYUsHL54KgBMEQ/lZ8Wsy7R9beGrKlAkXWNO5FOviixC+gRsRLxbyj7s/f32IMLdfIq+cSLZky56vlPxAIALHG2IOjc8DFgFw6QBQllXlQTPL4xxdfq6Jk3FRBAhVboGncQOlvFpMtALyKbPZMXaIrMnqvrp8tl1qK/ogLIYsJA74JYEU7q7IgI7KBPCNA8gsG5w7Aq+RzpOuKgAHooJBXIsA5+9FqAujlBhztgLz8rJEhARgXyZ2yjkOkA6Qj6LyKDlaqJsIH+2AHh2wCKGcBhw5Kw8YYPQxAB1R06qBw6uAAwxh/SAfn1ZQqUtEf+4tAmwCmiVUrshCQKq2FHBgSHc69Su6oVXrmgGINYOiKADVtIk8WWQQZAa6vFcWm0mo/H29l3IURYAjAmPcKY4IgCd4q0Tm7LXP8sK8IEJ1TsYKtyTb+q9M/0B2368euAK7Qc1LRGYw+2HBO/LeYn1lOHtAleh+7dF1xj4+vJInnxLgB0JU3mqQTDeK/ux/rlWso/+fBDV9BjssNTf8AmPnhttjsZCIAAAAASUVORK5CYII='; -var close_img = 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAACXBIWXMAAAsTAAALEwEAmpwYAAAABGdBTUEAALGOfPtRkwAAACBjSFJNAAB6JQAAgIMAAPn/AACA6QAAdTAAAOpgAAA6mAAAF2+SX8VGAAAA5ElEQVR42tRTQYoEIQwsl/2Bl3gQoY9eBKEf5kvyG8G7h4Z+S38gIu5lp5lZ2R7YPm1BDhZJSFWiGmPgDj5wE7cbfD4/mBkAHprUj9yTTyn9OsGIMSLG+Fxwxc8SiAi9d4QQHskjhIDeO4jorQcq5wwiQmsN3nt479FaAxEh5zxJmyZIKalSClprL1FKQUpJXZr4DBH52xqZeRhjICKw1sJaCxGBMQbMPN41GFpriAicc6i1otYK5xxEBFrraQuThGVZAADbtp2amXms6woAOI7j0gO17/t5MN+HNfEvBf//M30NAKe7aRqUOIlfAAAAAElFTkSuQmCC'; - -var osd = function(){ - var navbar, newElement; - - var createApiUrl = function(url){ - return host + api + "?url=" + escape(url) - }; - - var iframe = create('iframe', { - 'src': createApiUrl(document.location.href), - 'frameborder': 0, - 'scrolling': 'no' - }); - - var popup = create('div', { - 'id': 'cp_popup' - }); - - var onclick = function(){ - - // Try and get imdb url - try { - var regex = new RegExp(/tt(\d{7})/); - var imdb_id = document.body.innerHTML.match(regex)[0]; - if (imdb_id) - iframe.setAttribute('src', createApiUrl('http://imdb.com/title/'+imdb_id+'/')) - } - catch(e){} - - popup.innerHTML = ''; - popup.setAttribute('class', 'opened'); - popup.appendChild(create('a', { - 'innerHTML': '', - 'id': 'close_button', - 'onclick': function(){ - popup.innerHTML = ''; - popup.appendChild(add_button); - popup.setAttribute('class', ''); - } - })); - popup.appendChild(iframe) - } - - var add_button = create('a', { - 'innerHTML': '', - 'id': 'add_to', - 'onclick': onclick - }); - popup.appendChild(add_button); - - document.body.parentNode.insertBefore(popup, document.body); - - // Auto fold open - if(document.body.getAttribute('cp_auto_open')) - onclick() -}; - -var setVersion = function(){ - document.body.setAttribute('data-userscript_version', version) -}; - -if(document.location.href.indexOf(host) == -1) - osd(); -else - setVersion(); - -} \ No newline at end of file diff --git a/couchpotato/core/plugins/userscript/template.js_tmpl b/couchpotato/core/plugins/userscript/template.js_tmpl new file mode 100644 index 0000000000..25e184202d --- /dev/null +++ b/couchpotato/core/plugins/userscript/template.js_tmpl @@ -0,0 +1,147 @@ +// ==UserScript== +// +// If you can read this, you need to enable or install the Greasemonkey add-on for firefox +// If you are using Chrome, download this file and drag it to the extensions tab +// Other browsers, use the bookmarklet +// +// @name CouchPotato UserScript +// @description Add movies like a real CouchPotato +// @grant none +// @version {{version}} + +// @match {{host}}/* +{% for include in includes %} +// @match {{include}}{% end %} +{% for exclude in excludes %} +// @exclude {{exclude}}{% end %} +// @exclude {{host}}{{api.rstrip('/')}}* + +// ==/UserScript== + +{% autoescape None %} +if (window.top == window.self){ // Only run on top window + +var version = {{version}}, + host = '{{host}}', + api = '{{api}}'; + +var create = function() { + var A, B; + switch (arguments.length) { + case 1: + A = document.createTextNode(arguments[0]); + break; + default: + A = document.createElement(arguments[0]); + B = arguments[1]; + + for ( var b in B) { + if (b.indexOf("on") == 0){ + A.addEventListener(b.substring(2), B[b], false); + } + else if (",style,accesskey,id,name,src,href,which".indexOf("," + + b.toLowerCase()) != -1){ + A.setAttribute(b, B[b]); + } + else{ + A[b] = B[b]; + } + } + for ( var i = 2, len = arguments.length; i < len; ++i){ + A.appendChild(arguments[i]); + } + } + return A; +} + +var addStyle = function(css) { + var head = document.getElementsByTagName('head')[0], + style = document.createElement('style'); + if (!head) + return; + + style.type = 'text/css'; + style.textContent = css; + head.appendChild(style); +} + +// Styles +addStyle('\ + #cp_popup { font-family: "Helvetica Neue", Helvetica, Arial, Geneva, sans-serif; -moz-border-radius: 6px 0px 0px 6px; -webkit-border-radius: 6px 0px 0px 6px; border-radius: 6px 0px 0px 6px; -moz-box-shadow: 0 0 20px rgba(0,0,0,0.5); -webkit-box-shadow: 0 0 20px rgba(0,0,0,0.5); box-shadow: 0 0 20px rgba(0,0,0,0.5); position:fixed; z-index:20000; bottom:0; right:0; font-size:15px; margin: 20px 0; display: block; background:#FFF; } \ + #cp_popup.opened { width: 492px; } \ + #cp_popup a#add_to { cursor:pointer; text-align:center; text-decoration:none; color: #000; display:block; padding:5px 0 5px 5px; } \ + #cp_popup a#close_button { cursor:pointer; float: right; padding:120px 10px 10px; } \ + #cp_popup a img { vertical-align: middle; } \ + #cp_popup a:hover { color:#000; } \ + #cp_popup iframe{ background:#FFF; margin:6px 0 2px 6px; height:140px; width:450px; overflow:hidden; border:none; } \ +'); + +var cp_icon = 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACgAAAAoCAYAAACM/rhtAAADHmlDQ1BJQ0MgUHJvZmlsZQAAeAGFVN9r01AU/tplnbDhizpnEQk+aJFuZFN0Q5y2a1e6zVrqNrchSJumbVyaxiTtfrAH2YtvOsV38Qc++QcM2YNve5INxhRh+KyIIkz2IrOemzRNJ1MDufe73/nuOSfn5F6g+XFa0xQvDxRVU0/FwvzE5BTf8gFeHEMr/GhNi4YWSiZHQA/Tsnnvs/MOHsZsdO5v36v+Y9WalQwR8BwgvpQ1xCLhWaBpXNR0E+DWie+dMTXCzUxzWKcECR9nOG9jgeGMjSOWZjQ1QJoJwgfFQjpLuEA4mGng8w3YzoEU5CcmqZIuizyrRVIv5WRFsgz28B9zg/JfsKiU6Zut5xCNbZoZTtF8it4fOX1wjOYA1cE/Xxi9QbidcFg246M1fkLNJK4RJr3n7nRpmO1lmpdZKRIlHCS8YlSuM2xp5gsDiZrm0+30UJKwnzS/NDNZ8+PtUJUE6zHF9fZLRvS6vdfbkZMH4zU+pynWf0D+vff1corleZLw67QejdX0W5I6Vtvb5M2mI8PEd1E/A0hCgo4cZCjgkUIMYZpjxKr4TBYZIkqk0ml0VHmyONY7KJOW7RxHeMlfDrheFvVbsrj24Pue3SXXjrwVhcW3o9hR7bWB6bqyE5obf3VhpaNu4Te55ZsbbasLCFH+iuWxSF5lyk+CUdd1NuaQU5f8dQvPMpTuJXYSWAy6rPBe+CpsCk+FF8KXv9TIzt6tEcuAcSw+q55TzcbsJdJM0utkuL+K9ULGGPmQMUNanb4kTZyKOfLaUAsnBneC6+biXC/XB567zF3h+rkIrS5yI47CF/VFfCHwvjO+Pl+3b4hhp9u+02TrozFa67vTkbqisXqUj9sn9j2OqhMZsrG+sX5WCCu0omNqSrN0TwADJW1Ol/MFk+8RhAt8iK4tiY+rYleQTysKb5kMXpcMSa9I2S6wO4/tA7ZT1l3maV9zOfMqcOkb/cPrLjdVBl4ZwNFzLhegM3XkCbB8XizrFdsfPJ63gJE722OtPW1huos+VqvbdC5bHgG7D6vVn8+q1d3n5H8LeKP8BqkjCtbCoV8yAAAACXBIWXMAAAsTAAALEwEAmpwYAAABZGlUWHRYTUw6Y29tLmFkb2JlLnhtcAAAAAAAPHg6eG1wbWV0YSB4bWxuczp4PSJhZG9iZTpuczptZXRhLyIgeDp4bXB0az0iWE1QIENvcmUgNC40LjAiPgogICA8cmRmOlJERiB4bWxuczpyZGY9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkvMDIvMjItcmRmLXN5bnRheC1ucyMiPgogICAgICA8cmRmOkRlc2NyaXB0aW9uIHJkZjphYm91dD0iIgogICAgICAgICAgICB4bWxuczp4bXA9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC8iPgogICAgICAgICA8eG1wOkNyZWF0b3JUb29sPkFkb2JlIEltYWdlUmVhZHk8L3htcDpDcmVhdG9yVG9vbD4KICAgICAgPC9yZGY6RGVzY3JpcHRpb24+CiAgIDwvcmRmOlJERj4KPC94OnhtcG1ldGE+Chvleg4AAAdrSURBVFgJzZfPb1VVEMfn3PfaUgotVIpi+a3BWETFRIkYEzTRtRv8BxSiCRp1JStkBXGhC42J7Ay4wGrcqHHhAjfEaNSoESOoVQIWBIGW/qB9797r93Pem8d9zwe4KJFJpnPOnDkz35k559zXkOe5XS8KFsKW3VbC/8CQ5cNbjWAxYG7/LXC4XgABdzUQIYREsbNrFee6AAy7FXxXnt25N6wWgBdL8+1oeaH9Uh6wP5Zutj8/6c/HrwXM18s+mEu5xSyRv8yCbe5cbM9nM2bphHjcqidO2OmN88KZ8iI79NUT9pI6nquaFKrtWbsuAC8uEzRRKNtawGWzNqPwnVKVbdYGBX0wndHfOjhsr0RkOue0cLR2EZKSrUu6ItBS0mFBnCdAFGn8ZQy81TiLbasX7aLRHP859IqluAwlu41aClTQmDkyqYP+DZst62vVZtyO5rzF8faqIptfD92h21YE9SgHINFrj0yIJzSxY+0AtermvsW7axWZWGTLdAaXxgrSXpUiqMXSlfKqxmYjgDmk97EVVHE+ZxX0R3n9UKzPrACs1vPSJTB6DmugGQlkyCp2LpmykxHIkasDnJN3sN2jvHF/2FZeYPuyaUtVolIdRqrLUapO2A/fPWX3cDmu9sSQQNsK7tYrPxS7UCy22XDzNM4Wb9ctfDuvbHotrKv22gMdK+2nLx63b8rdNiQwRgX9/Omu5sk8OZ6wkQjua9XzPcuGn5RFO+eK8K8KDuuebc3zeAvb4LmiSlWcv+Gg/T6vywYU9nRWtR69dAsaG2oXpFLqsY6p47b3++35zsbaVQZNAPeF0LE9zysHensfzi5VdiRZVglpzkUMmaXp16aGiXhxVZzYNWWSp7dYuetU3/ljz409evQx21jWJYg/EYJMhNxPmb68aqmFypiNJKkdXpLa1Noxs01vylxXhhz844ykSo0Wq63lXQJ3sK/v/uzixUNKPWGDX/M+jZeLz4oXidkIT4ovncL5mA2+EezMUJ5dWKUE9KHLefe0DvEUK8uQCWT3YltT7bA1I9I9/LNZ/0gNmKaRkDAgI0DOnMBVPxwYWJePj3+mxz/R13xG4EoEoFoYax6ffOY6SlFfkZRtmO3TcRsrl279qJKM75BSnhOyqyPUTxsTOOusWpjKLUunLXvhfcvXv6sEZeaAiAP7PALUHFfZ1NkLr/aY9SrgrBa6+CGHgQDHDZSc9mKsb79N1Zlv16xaNdNfsdLH3bbokWkb3yQ7FjAWkVmnspmQs65pS545YMkdH5hNL5T+4mVADo5T0mixbiyAlUleriddAgjJs6DvfQRKtYiJExwwJ3v5j1I/AOR01rrekf1dUirbmmfNFW18vtlNSuTpt8xWfqoEexVD1QAIcZCtXM9PKyIFIzbnO6eNDhJQgKy3M4JhbYl4pXiVuF+c6kBeWJra5A89VvpcxeNJkbMORZkU2JUXzLbtMVsmcJM6yPwqdED4bmWK4C3WMILQOY5d0UtR606rgzPS03KYzdgxBuiAePQvvmGTdnJP2Xoe1Ftzq0AL5OBxsyd2KukjZqcXa8/52n5AeYyiBAfzJoAoYq/rkhbDEFVknWrJf9zIGXUbWqGbb7eIN8hg9HzJDg9XbfRls/sE6qFndSz0BIxqLRE4AKiAjTPNfvflMZFNACkpzAISqlfURjWmpSpITKLojDEBnACwSodizX6zX5eb3SvZIXBV3iqtQfjniULFXpJFtnJbgBhTLYwBSPXk3+4We4UdYNFhK9BB2a/YUwOT6Rx0jl1ODv+6wNYtbufL/TYBrMiUM8EFABhZAohMaR+bWEeixzHSq4yesVPsgm5q7KVumSeHBCC+sGFfO1/omwBizCY2eyAkTBV5TnBOhZ08e5foGTu1+/+NdXySOL4AARjI/bhsAZhHgA4KCQEYwlk7gKwVQV1r7MEBhU+X6PHv6xrWgA8zEqVa8rJj6EAByFeDjJH8YqCiVBx2O/ZASOeoqM/xgz17YXzRFferd7jh07vYUsHL54KgBMEQ/lZ8Wsy7R9beGrKlAkXWNO5FOviixC+gRsRLxbyj7s/f32IMLdfIq+cSLZky56vlPxAIALHG2IOjc8DFgFw6QBQllXlQTPL4xxdfq6Jk3FRBAhVboGncQOlvFpMtALyKbPZMXaIrMnqvrp8tl1qK/ogLIYsJA74JYEU7q7IgI7KBPCNA8gsG5w7Aq+RzpOuKgAHooJBXIsA5+9FqAujlBhztgLz8rJEhARgXyZ2yjkOkA6Qj6LyKDlaqJsIH+2AHh2wCKGcBhw5Kw8YYPQxAB1R06qBw6uAAwxh/SAfn1ZQqUtEf+4tAmwCmiVUrshCQKq2FHBgSHc69Su6oVXrmgGINYOiKADVtIk8WWQQZAa6vFcWm0mo/H29l3IURYAjAmPcKY4IgCd4q0Tm7LXP8sK8IEJ1TsYKtyTb+q9M/0B2368euAK7Qc1LRGYw+2HBO/LeYn1lOHtAleh+7dF1xj4+vJInnxLgB0JU3mqQTDeK/ux/rlWso/+fBDV9BjssNTf8AmPnhttjsZCIAAAAASUVORK5CYII='; +var close_img = 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAACXBIWXMAAAsTAAALEwEAmpwYAAAABGdBTUEAALGOfPtRkwAAACBjSFJNAAB6JQAAgIMAAPn/AACA6QAAdTAAAOpgAAA6mAAAF2+SX8VGAAAA5ElEQVR42tRTQYoEIQwsl/2Bl3gQoY9eBKEf5kvyG8G7h4Z+S38gIu5lp5lZ2R7YPm1BDhZJSFWiGmPgDj5wE7cbfD4/mBkAHprUj9yTTyn9OsGIMSLG+Fxwxc8SiAi9d4QQHskjhIDeO4jorQcq5wwiQmsN3nt479FaAxEh5zxJmyZIKalSClprL1FKQUpJXZr4DBH52xqZeRhjICKw1sJaCxGBMQbMPN41GFpriAicc6i1otYK5xxEBFrraQuThGVZAADbtp2amXms6woAOI7j0gO17/t5MN+HNfEvBf//M30NAKe7aRqUOIlfAAAAAElFTkSuQmCC'; + +var osd = function(){ + var navbar, newElement; + + var createApiUrl = function(url){ + return host + api + "?url=" + escape(url) + }; + + var iframe = create('iframe', { + 'src': createApiUrl(document.location.href), + 'frameborder': 0, + 'scrolling': 'no' + }); + + var popup = create('div', { + 'id': 'cp_popup' + }); + + var onclick = function(){ + + // Try and get imdb url + try { + var regex = new RegExp(/tt(\d{7})/); + var imdb_id = document.body.innerHTML.match(regex)[0]; + if (imdb_id) + iframe.setAttribute('src', createApiUrl('http://imdb.com/title/'+imdb_id+'/')) + } + catch(e){} + + popup.innerHTML = ''; + popup.setAttribute('class', 'opened'); + popup.appendChild(create('a', { + 'innerHTML': '', + 'id': 'close_button', + 'onclick': function(){ + popup.innerHTML = ''; + popup.appendChild(add_button); + popup.setAttribute('class', ''); + } + })); + popup.appendChild(iframe) + } + + var add_button = create('a', { + 'innerHTML': '', + 'id': 'add_to', + 'onclick': onclick + }); + popup.appendChild(add_button); + + document.body.parentNode.insertBefore(popup, document.body); + + // Auto fold open + if(document.body.getAttribute('cp_auto_open')) + onclick() +}; + +var setVersion = function(){ + document.body.setAttribute('data-userscript_version', version) +}; + +if(document.location.href.indexOf(host) == -1) + osd(); +else + setVersion(); + +} diff --git a/couchpotato/core/plugins/v1importer/__init__.py b/couchpotato/core/plugins/v1importer/__init__.py deleted file mode 100644 index 40c1434b4c..0000000000 --- a/couchpotato/core/plugins/v1importer/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -from .main import V1Importer - -def start(): - return V1Importer() - -config = [] diff --git a/couchpotato/core/plugins/v1importer/form.html b/couchpotato/core/plugins/v1importer/form.html deleted file mode 100644 index e27d1c7228..0000000000 --- a/couchpotato/core/plugins/v1importer/form.html +++ /dev/null @@ -1,30 +0,0 @@ - - - - - - - - - - - - - {% if message: %} - {{ message }} - {% else: %} - - - - {% endif %} - - \ No newline at end of file diff --git a/couchpotato/core/plugins/v1importer/main.py b/couchpotato/core/plugins/v1importer/main.py deleted file mode 100644 index 08f8ba986f..0000000000 --- a/couchpotato/core/plugins/v1importer/main.py +++ /dev/null @@ -1,56 +0,0 @@ -from couchpotato.api import addApiView -from couchpotato.core.event import fireEventAsync -from couchpotato.core.helpers.variable import getImdb -from couchpotato.core.logger import CPLog -from couchpotato.core.plugins.base import Plugin -from couchpotato.environment import Env -from flask.globals import request -from flask.helpers import url_for -import os - -log = CPLog(__name__) - - -class V1Importer(Plugin): - - def __init__(self): - addApiView('v1.import', self.fromOld, methods = ['GET', 'POST']) - - def fromOld(self): - - if request.method != 'POST': - return self.renderTemplate(__file__, 'form.html', url_for = url_for) - - file = request.files['old_db'] - - uploaded_file = os.path.join(Env.get('cache_dir'), 'v1_database.db') - - if os.path.isfile(uploaded_file): - os.remove(uploaded_file) - - file.save(uploaded_file) - - try: - import sqlite3 - conn = sqlite3.connect(uploaded_file) - - wanted = [] - - t = ('want',) - cur = conn.execute('SELECT status, imdb FROM Movie WHERE status=?', t) - for row in cur: - status, imdb = row - if getImdb(imdb): - wanted.append(imdb) - conn.close() - - wanted = set(wanted) - for imdb in wanted: - fireEventAsync('movie.add', {'identifier': imdb}, search_after = False) - - message = 'Successfully imported %s movie(s)' % len(wanted) - except Exception, e: - message = 'Failed: %s' % e - - return self.renderTemplate(__file__, 'form.html', url_for = url_for, message = message) - diff --git a/couchpotato/core/plugins/wizard/__init__.py b/couchpotato/core/plugins/wizard/__init__.py index 78876470ee..7a272b44d0 100644 --- a/couchpotato/core/plugins/wizard/__init__.py +++ b/couchpotato/core/plugins/wizard/__init__.py @@ -1,6 +1,7 @@ from .main import Wizard -def start(): + +def autoload(): return Wizard() config = [{ diff --git a/couchpotato/core/plugins/wizard/static/wizard.css b/couchpotato/core/plugins/wizard/static/wizard.css deleted file mode 100644 index 8d50d9de08..0000000000 --- a/couchpotato/core/plugins/wizard/static/wizard.css +++ /dev/null @@ -1,88 +0,0 @@ -.page.wizard .uniForm { - width: 80%; - margin: 0 auto 30px; -} - -.page.wizard h1 { - padding: 10px 30px; - margin: 0; - display: block; - font-size: 30px; - margin-top: 80px; -} - -.page.wizard .description { - padding: 10px 30px; - font-size: 18px; - display: block; -} - -.page.wizard .tab_wrapper { - background: #5c697b; - padding: 10px 0; - font-size: 18px; - position: fixed; - top: 0; - margin: 0; - width: 100%; - min-width: 960px; - left: 0; - z-index: 2; - box-shadow: 0 0 50px rgba(0,0,0,0.55); -} - - .page.wizard .tab_wrapper .tabs { - text-align: center; - padding: 0; - margin: 0; - display: block; - } - - .page.wizard .tabs li { - display: inline-block; - } - .page.wizard .tabs li a { - padding: 20px 10px; - } - - .page.wizard .tab_wrapper .pointer { - border-right: 10px solid transparent; - border-left: 10px solid transparent; - border-top: 10px solid #5c697b; - display: block; - position: absolute; - top: 44px; - } - -.page.wizard .tab_content { - margin: 20px 0 160px; -} - -.page.wizard form > div { - min-height: 300px; -} -.page.wizard .wgroup_finish { - height: 300px; -} - .page.wizard .wgroup_finish h1 { - text-align: center; - } - .page.wizard .wgroup_finish .wizard_support, - .page.wizard .wgroup_finish .description { - font-size: 25px; - line-height: 120%; - margin: 20px 0; - text-align: center; - } - - .page.wizard .button.green { - padding: 20px; - font-size: 25px; - margin: 10px 30px 80px; - display: block; - text-align: center; - } - -.page.wizard .tab_nzb_providers { - margin: 20px 0 0 0; -} diff --git a/couchpotato/core/plugins/wizard/static/wizard.js b/couchpotato/core/plugins/wizard/static/wizard.js index eb41cb591c..78b9e76029 100644 --- a/couchpotato/core/plugins/wizard/static/wizard.js +++ b/couchpotato/core/plugins/wizard/static/wizard.js @@ -1,263 +1,200 @@ -Page.Wizard = new Class({ - - Extends: Page.Settings, - - name: 'wizard', - has_tab: false, - wizard_only: true, - - headers: { - 'welcome': { - 'title': 'Welcome to the new CouchPotato', - 'description': 'To get started, fill in each of the following settings as much as you can.
    Maybe first start with importing your movies from the previous CouchPotato', - 'content': new Element('div', { - 'styles': { - 'margin': '0 0 0 30px' - } - }).adopt( - new Element('div', { - 'html': 'Select the data.db. It should be in your CouchPotato root directory.' - }), - self.import_iframe = new Element('iframe', { - 'styles': { - 'height': 40, - 'width': 300, - 'border': 0, - 'overflow': 'hidden' - } - }) - ), - 'event': function(){ - self.import_iframe.set('src', Api.createUrl('v1.import')) - } - }, - 'general': { - 'title': 'General', - 'description': 'If you want to access CP from outside your local network, you better secure it a bit with a username & password.' - }, - 'downloaders': { - 'title': 'What download apps are you using?', - 'description': 'CP needs an external download app to work with. Choose one below. For more downloaders check settings after you have filled in the wizard. If your download app isn\'t in the list, use the default Blackhole.' - }, - 'providers': { - 'title': 'Are you registered at any of these sites?', - 'description': 'CP uses these sites to search for movies. A few free are enabled by default, but it\'s always better to have a few more. Check settings for the full list of available providers.' - }, - 'renamer': { - 'title': 'Move & rename the movies after downloading?', - 'description': 'The coolest part of CP is that it can move and organize your downloaded movies automagically. Check settings and you can even download trailers, subtitles and other data when it has finished downloading. It\'s awesome!' - }, - 'automation': { - 'title': 'Easily add movies to your wanted list!', - 'description': 'You can easily add movies from your favorite movie site, like IMDB, Rotten Tomatoes, Apple Trailers and more. Just install the userscript or drag the bookmarklet to your browsers bookmarks.' + - '
    Once installed, just click the bookmarklet on a movie page and watch the magic happen ;)', - 'content': function(){ - return App.createUserscriptButtons().setStyles({ - 'background-image': "url('"+Api.createUrl('static/userscript/userscript.png')+"')" - }) - } - }, - 'finish': { - 'title': 'Finishing Up', - 'description': 'Are you done? Did you fill in everything as much as possible?' + - '
    Be sure to check the settings to see what more CP can do!

    ' + - '
    After you\'ve used CP for a while, and you like it (which of course you will), consider supporting CP. Maybe even by writing some code.
    Or by getting a subscription at Usenet Server or Newshosting.
    ', - 'content': new Element('div').adopt( - new Element('a.button.green', { - 'styles': { - 'margin-top': 20 - }, - 'text': 'I\'m ready to start the awesomeness, wow this button is big and green!', - 'events': { - 'click': function(e){ - (e).preventDefault(); - Api.request('settings.save', { - 'data': { - 'section': 'core', - 'name': 'show_wizard', - 'value': 0 - }, - 'useSpinner': true, - 'spinnerOptions': { - 'target': self.el - }, - 'onComplete': function(){ - window.location = App.createUrl(); - } - }); - } - } - }) - ) - } - }, - groups: ['welcome', 'general', 'downloaders', 'searcher', 'providers', 'renamer', 'automation', 'finish'], - - open: function(action, params){ - var self = this; - - if(!self.initialized){ - App.fireEvent('unload'); - App.getBlock('header').hide(); - - self.parent(action, params); - - self.addEvent('create', function(){ - self.order(); - }); - - self.initialized = true; - - self.scroll = new Fx.Scroll(document.body, { - 'transition': 'quint:in:out' - }); - } - else - (function(){ - var sc = self.el.getElement('.wgroup_'+action); - self.scroll.start(0, sc.getCoordinates().top-80); - }).delay(1) - }, - - order: function(){ - var self = this; - - var form = self.el.getElement('.uniForm'); - var tabs = self.el.getElement('.tabs'); - - self.groups.each(function(group, nr){ - - if(self.headers[group]){ - group_container = new Element('.wgroup_'+group, { - 'styles': { - 'opacity': 0.2 - }, - 'tween': { - 'duration': 350 - } - }); - - if(self.headers[group].include){ - self.headers[group].include.each(function(inc){ - group_container.addClass('wgroup_'+inc); - }) - } - - var content = self.headers[group].content - group_container.adopt( - new Element('h1', { - 'text': self.headers[group].title - }), - self.headers[group].description ? new Element('span.description', { - 'html': self.headers[group].description - }) : null, - content ? (typeOf(content) == 'function' ? content() : content) : null - ).inject(form); - } - - var tab_navigation = tabs.getElement('.t_'+group); - - if(!tab_navigation && self.headers[group] && self.headers[group].include){ - tab_navigation = [] - self.headers[group].include.each(function(inc){ - tab_navigation.include(tabs.getElement('.t_'+inc)); - }) - } - - if(tab_navigation && group_container){ - tabs.adopt(tab_navigation); // Tab navigation - - if(self.headers[group] && self.headers[group].include){ - - self.headers[group].include.each(function(inc){ - self.el.getElement('.tab_'+inc).inject(group_container); - }) - - new Element('li.t_'+group).adopt( - new Element('a', { - 'href': App.createUrl('wizard/'+group), - 'text': (self.headers[group].label || group).capitalize() - }) - ).inject(tabs); - - } - else - self.el.getElement('.tab_'+group).inject(group_container); // Tab content - - if(tab_navigation.getElement && self.headers[group]){ - var a = tab_navigation.getElement('a'); - a.set('text', (self.headers[group].label || group).capitalize()); - var url_split = a.get('href').split('wizard')[1].split('/'); - if(url_split.length > 3) - a.set('href', a.get('href').replace(url_split[url_split.length-3]+'/', '')); - - } - } - else { - new Element('li.t_'+group).adopt( - new Element('a', { - 'href': App.createUrl('wizard/'+group), - 'text': (self.headers[group].label || group).capitalize() - }) - ).inject(tabs); - } - - if(self.headers[group] && self.headers[group].event) - self.headers[group].event.call() - }); - - // Remove toggle - self.el.getElement('.advanced_toggle').destroy(); - - // Hide retention - self.el.getElement('.tab_searcher').hide(); - self.el.getElement('.t_searcher').hide(); - - // Add pointer - new Element('.tab_wrapper').wraps(tabs).adopt( - self.pointer = new Element('.pointer', { - 'tween': { - 'transition': 'quint:in:out' - } - }) - ); - - // Add nav - var minimum = self.el.getSize().y-window.getSize().y; - self.groups.each(function(group, nr){ - - var g = self.el.getElement('.wgroup_'+group); - if(!g || !g.isVisible()) return; - var t = self.el.getElement('.t_'+group); - if(!t) return; - - var func = function(){ - var ct = t.getCoordinates(); - self.pointer.tween('left', ct.left+(ct.width/2)-(self.pointer.getWidth()/2)); - g.tween('opacity', 1); - } - - if(nr == 0) - func(); - - - var ss = new ScrollSpy( { - min: function(){ - var c = g.getCoordinates(); - var top = c.top-(window.getSize().y/2); - return top > minimum ? minimum : top - }, - max: function(){ - var c = g.getCoordinates(); - return c.top+(c.height/2) - }, - onEnter: func, - onLeave: function(){ - g.tween('opacity', 0.2) - } - }); - }); - - } - -}); \ No newline at end of file +Page.Wizard = new Class({ + + Extends: Page.Settings, + + order: 70, + name: 'wizard', + current: 'welcome', + has_tab: false, + wizard_only: true, + + headers: { + 'welcome': { + 'title': 'Welcome to the new CouchPotato', + 'description': 'To get started, fill in each of the following settings as much as you can.', + 'content': new Element('div', { + 'styles': { + 'margin': '0 0 0 30px' + } + }) + }, + 'general': { + 'title': 'General', + 'description': 'If you want to access CP from outside your local network, you better secure it a bit with a username & password.' + }, + 'downloaders': { + 'title': 'What download apps are you using?', + 'description': 'CP needs an external download app to work with. Choose one below. For more downloaders check settings after you have filled in the wizard. If your download app isn\'t in the list, use the default Blackhole.' + }, + 'searcher': { + 'label': 'Providers', + 'title': 'Are you registered at any of these sites?', + 'description': 'CP uses these sites to search for movies. A few free are enabled by default, but it\'s always better to have more.' + }, + 'renamer': { + 'title': 'Move & rename the movies after downloading?', + 'description': 'The coolest part of CP is that it can move and organize your downloaded movies automagically. Check settings and you can even download trailers, subtitles and other data when it has finished downloading. It\'s awesome!' + }, + 'automation': { + 'title': 'Easily add movies to your wanted list!', + 'description': 'You can easily add movies from your favorite movie site, like IMDB, Rotten Tomatoes, Apple Trailers and more. Just install the extension or drag the bookmarklet to your bookmarks.' + + '
    Once installed, just click the bookmarklet on a movie page and watch the magic happen ;)', + 'content': function(){ + return App.createUserscriptButtons(); + } + }, + 'finish': { + 'title': 'Finishing Up', + 'description': 'Are you done? Did you fill in everything as much as possible?' + + '
    Be sure to check the settings to see what more CP can do!

    ' + + '
    After you\'ve used CP for a while, and you like it (which of course you will), consider supporting CP. Maybe even by writing some code.
    Or by getting a subscription at Usenet Server or Newshosting.
    ', + 'content': new Element('div').grab( + new Element('a.button.green', { + 'styles': { + 'margin-top': 20 + }, + 'text': 'I\'m ready to start the awesomeness!', + 'events': { + 'click': function(e){ + (e).preventDefault(); + Api.request('settings.save', { + 'data': { + 'section': 'core', + 'name': 'show_wizard', + 'value': 0 + }, + 'useSpinner': true, + 'spinnerOptions': { + 'target': self.el + }, + 'onComplete': function(){ + window.location = App.createUrl('wanted'); + } + }); + } + } + }) + ) + } + }, + + groups: ['welcome', 'general', 'downloaders', 'searcher', 'renamer', 'automation', 'finish'], + + open: function(action, params){ + var self = this; + + if(!self.initialized){ + App.fireEvent('unload'); + App.getBlock('header').hide(); + + self.parent(action, params); + + self.el.addClass('settings'); + + self.addEvent('create', function(){ + self.orderGroups(); + }); + + self.initialized = true; + + self.scroll = new Fx.Scroll(document.body, { + 'transition': 'quint:in:out' + }); + } + else + requestTimeout(function(){ + var sc = self.el.getElement('.wgroup_'+action); + self.scroll.start(0, sc.getCoordinates().top-80); + }, 1); + }, + + orderGroups: function(){ + var self = this; + + var form = self.el.getElement('.uniForm'); + var tabs = self.el.getElement('.tabs').hide(); + + self.groups.each(function(group){ + + var group_container; + if(self.headers[group]){ + group_container = new Element('.wgroup_'+group); + + if(self.headers[group].include){ + self.headers[group].include.each(function(inc){ + group_container.addClass('wgroup_'+inc); + }); + } + + var content = self.headers[group].content; + group_container.adopt( + new Element('h1', { + 'text': self.headers[group].title + }), + self.headers[group].description ? new Element('span.description', { + 'html': self.headers[group].description + }) : null, + content ? (typeOf(content) == 'function' ? content() : content) : null + ).inject(form); + } + + var tab_navigation = tabs.getElement('.t_'+group); + + if(!tab_navigation && self.headers[group] && self.headers[group].include){ + tab_navigation = []; + self.headers[group].include.each(function(inc){ + tab_navigation.include(tabs.getElement('.t_'+inc)); + }); + } + + if(tab_navigation && group_container){ + tabs.adopt(tab_navigation); // Tab navigation + + if(self.headers[group] && self.headers[group].include){ + + self.headers[group].include.each(function(inc){ + self.el.getElement('.tab_'+inc).inject(group_container); + }); + + new Element('li.t_'+group).grab( + new Element('a', { + 'href': App.createUrl('wizard/'+group), + 'text': (self.headers[group].label || group).capitalize() + }) + ).inject(tabs); + + } + else + self.el.getElement('.tab_'+group).inject(group_container); // Tab content + + if(tab_navigation.getElement && self.headers[group]){ + var a = tab_navigation.getElement('a'); + a.set('text', (self.headers[group].label || group).capitalize()); + var url_split = a.get('href').split('wizard')[1].split('/'); + if(url_split.length > 3) + a.set('href', a.get('href').replace(url_split[url_split.length-3]+'/', '')); + + } + } + else { + new Element('li.t_'+group).grab( + new Element('a', { + 'href': App.createUrl('wizard/'+group), + 'text': (self.headers[group].label || group).capitalize() + }) + ).inject(tabs); + } + + if(self.headers[group] && self.headers[group].event) + self.headers[group].event.call(); + }); + + // Remove toggle + self.el.getElement('.advanced_toggle').destroy(); + + // Hide retention + self.el.getElement('.section_nzb').hide(); + + } + +}); diff --git a/couchpotato/core/plugins/wizard/static/wizard.scss b/couchpotato/core/plugins/wizard/static/wizard.scss new file mode 100644 index 0000000000..20b01d8019 --- /dev/null +++ b/couchpotato/core/plugins/wizard/static/wizard.scss @@ -0,0 +1,63 @@ +@import "_mixins"; + +.page.wizard { + top: 0 !important; + + .navigation.navigation { + display: none; + } + + .tab_content.tab_content { + display: block; + + fieldset { + + .ctrlHolder, h2 { + padding: $padding/4; + } + + } + } + + h1 { + padding: 10px 0; + display: block; + font-size: 30px; + margin: $header_height 5px 0; + font-weight: 300; + } + + .description { + padding: $padding/2 $padding/4; + font-size: 1.45em; + line-height: 1.4em; + display: block; + } + + form.uniForm.containers { + margin: 0; + } + + form > div { + min-height: 300px; + max-width: $mq-desktop; + padding: $padding; + margin: 0 auto; + + @include media-phablet { + padding: $padding/2; + } + } + + .button.green { + padding: 20px; + font-size: 25px; + margin: 10px 0 $header_height; + display: inline-block; + } + + .tab_nzb_providers { + margin: 20px 0 0 0; + } + +} diff --git a/couchpotato/core/providers/automation/__init__.py b/couchpotato/core/providers/automation/__init__.py deleted file mode 100644 index a217948a42..0000000000 --- a/couchpotato/core/providers/automation/__init__.py +++ /dev/null @@ -1,21 +0,0 @@ -config = { - 'name': 'automation_providers', - 'groups': [ - { - 'label': 'Watchlists', - 'description': 'Check watchlists for new movies', - 'type': 'list', - 'name': 'watchlist_providers', - 'tab': 'automation', - 'options': [], - }, - { - 'label': 'Automated', - 'description': 'Uses minimal requirements', - 'type': 'list', - 'name': 'automation_providers', - 'tab': 'automation', - 'options': [], - }, - ], -} diff --git a/couchpotato/core/providers/automation/base.py b/couchpotato/core/providers/automation/base.py deleted file mode 100644 index 8d08f9136d..0000000000 --- a/couchpotato/core/providers/automation/base.py +++ /dev/null @@ -1,74 +0,0 @@ -from couchpotato.core.event import addEvent, fireEvent -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.base import Provider -from couchpotato.environment import Env -import time - -log = CPLog(__name__) - - -class Automation(Provider): - - enabled_option = 'automation_enabled' - - interval = 86400 - last_checked = 0 - - def __init__(self): - addEvent('automation.get_movies', self._getMovies) - - def _getMovies(self): - - if self.isDisabled(): - return - - if not self.canCheck(): - log.debug('Just checked, skipping %s', self.getName()) - return [] - - self.last_checked = time.time() - - return self.getIMDBids() - - def search(self, name, year = None, imdb_only = False): - - prop_name = 'automation.cached.%s.%s' % (name, year) - cached_imdb = Env.prop(prop_name, default = False) - if cached_imdb and imdb_only: - return cached_imdb - - result = fireEvent('movie.search', q = '%s %s' % (name, year if year else ''), limit = 1, merge = True) - - if len(result) > 0: - if imdb_only and result[0].get('imdb'): - Env.prop(prop_name, result[0].get('imdb')) - - return result[0].get('imdb') if imdb_only else result[0] - else: - return None - - def isMinimalMovie(self, movie): - if not movie.get('rating'): - return False - - if movie['rating'] and movie['rating'].get('imdb'): - movie['votes'] = movie['rating']['imdb'][1] - movie['rating'] = movie['rating']['imdb'][0] - - for minimal_type in ['year', 'rating', 'votes']: - type_value = movie.get(minimal_type, 0) - type_min = self.getMinimal(minimal_type) - if type_value < type_min: - log.info('%s too low for %s, need %s has %s', (minimal_type, movie['imdb'], type_min, type_value)) - return False - - return True - - def getMinimal(self, min_type): - return Env.setting(min_type, 'automation') - - def getIMDBids(self): - return [] - - def canCheck(self): - return time.time() > self.last_checked + self.interval diff --git a/couchpotato/core/providers/automation/bluray/__init__.py b/couchpotato/core/providers/automation/bluray/__init__.py deleted file mode 100644 index b916b0af60..0000000000 --- a/couchpotato/core/providers/automation/bluray/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -from .main import Bluray - -def start(): - return Bluray() - -config = [{ - 'name': 'bluray', - 'groups': [ - { - 'tab': 'automation', - 'list': 'automation_providers', - 'name': 'bluray_automation', - 'label': 'Blu-ray.com', - 'description': 'Imports movies from blu-ray.com. (uses minimal requirements)', - 'options': [ - { - 'name': 'automation_enabled', - 'default': False, - 'type': 'enabler', - }, - ], - }, - ], -}] diff --git a/couchpotato/core/providers/automation/bluray/main.py b/couchpotato/core/providers/automation/bluray/main.py deleted file mode 100644 index 235a1e5f95..0000000000 --- a/couchpotato/core/providers/automation/bluray/main.py +++ /dev/null @@ -1,36 +0,0 @@ -from couchpotato.core.helpers.rss import RSS -from couchpotato.core.helpers.variable import tryInt -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.automation.base import Automation - -log = CPLog(__name__) - - -class Bluray(Automation, RSS): - - interval = 1800 - rss_url = 'http://www.blu-ray.com/rss/newreleasesfeed.xml' - - def getIMDBids(self): - - movies = [] - - rss_movies = self.getRSSData(self.rss_url) - - for movie in rss_movies: - name = self.getTextElement(movie, 'title').lower().split('blu-ray')[0].strip('(').rstrip() - year = self.getTextElement(movie, 'description').split('|')[1].strip('(').strip() - - if not name.find('/') == -1: # make sure it is not a double movie release - continue - - if tryInt(year) < self.getMinimal('year'): - continue - - imdb = self.search(name, year) - - if imdb: - if self.isMinimalMovie(imdb): - movies.append(imdb['imdb']) - - return movies diff --git a/couchpotato/core/providers/automation/cp/__init__.py b/couchpotato/core/providers/automation/cp/__init__.py deleted file mode 100644 index a4b55a83c5..0000000000 --- a/couchpotato/core/providers/automation/cp/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -from .main import CP - -def start(): - return CP() - -config = [] diff --git a/couchpotato/core/providers/automation/cp/main.py b/couchpotato/core/providers/automation/cp/main.py deleted file mode 100644 index 22b7942a5a..0000000000 --- a/couchpotato/core/providers/automation/cp/main.py +++ /dev/null @@ -1,11 +0,0 @@ -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.automation.base import Automation - -log = CPLog(__name__) - - -class CP(Automation): - - def getMovies(self): - - return [] diff --git a/couchpotato/core/providers/automation/goodfilms/__init__.py b/couchpotato/core/providers/automation/goodfilms/__init__.py deleted file mode 100644 index 795e21da49..0000000000 --- a/couchpotato/core/providers/automation/goodfilms/__init__.py +++ /dev/null @@ -1,28 +0,0 @@ -from .main import Goodfilms - -def start(): - return Goodfilms() - -config = [{ - 'name': 'goodfilms', - 'groups': [ - { - 'tab': 'automation', - 'list': 'watchlist_providers', - 'name': 'goodfilms_automation', - 'label': 'Goodfilms', - 'description': 'import movies from your Goodfilms queue', - 'options': [ - { - 'name': 'automation_enabled', - 'default': False, - 'type': 'enabler', - }, - { - 'name': 'automation_username', - 'label': 'Username', - }, - ], - }, - ], -}] \ No newline at end of file diff --git a/couchpotato/core/providers/automation/goodfilms/main.py b/couchpotato/core/providers/automation/goodfilms/main.py deleted file mode 100644 index dd4b1aef64..0000000000 --- a/couchpotato/core/providers/automation/goodfilms/main.py +++ /dev/null @@ -1,36 +0,0 @@ -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.automation.base import Automation -from bs4 import BeautifulSoup - -log = CPLog(__name__) - - -class Goodfilms(Automation): - - url = 'http://goodfil.ms/%s/queue' - - def getIMDBids(self): - - if not self.conf('automation_username'): - log.error('Please fill in your username') - return [] - - movies = [] - - for movie in self.getWatchlist(): - imdb_id = self.search(movie.get('title'), movie.get('year'), imdb_only = True) - movies.append(imdb_id) - - return movies - - def getWatchlist(self): - - url = self.url % self.conf('automation_username') - soup = BeautifulSoup(self.getHTMLData(url)) - - movies = [] - - for movie in soup.find_all('div', attrs = { 'class': 'movie', 'data-film-title': True }): - movies.append({ 'title': movie['data-film-title'], 'year': movie['data-film-year'] }) - - return movies diff --git a/couchpotato/core/providers/automation/imdb/__init__.py b/couchpotato/core/providers/automation/imdb/__init__.py deleted file mode 100644 index 8a91d42e30..0000000000 --- a/couchpotato/core/providers/automation/imdb/__init__.py +++ /dev/null @@ -1,34 +0,0 @@ -from .main import IMDB - -def start(): - return IMDB() - -config = [{ - 'name': 'imdb', - 'groups': [ - { - 'tab': 'automation', - 'list': 'watchlist_providers', - 'name': 'imdb_automation', - 'label': 'IMDB', - 'description': 'From any public IMDB watchlists. Url should be the RSS link.', - 'options': [ - { - 'name': 'automation_enabled', - 'default': False, - 'type': 'enabler', - }, - { - 'name': 'automation_urls_use', - 'label': 'Use', - }, - { - 'name': 'automation_urls', - 'label': 'url', - 'type': 'combined', - 'combine': ['automation_urls_use', 'automation_urls'], - }, - ], - }, - ], -}] diff --git a/couchpotato/core/providers/automation/imdb/main.py b/couchpotato/core/providers/automation/imdb/main.py deleted file mode 100644 index 75a2d75c52..0000000000 --- a/couchpotato/core/providers/automation/imdb/main.py +++ /dev/null @@ -1,38 +0,0 @@ -from couchpotato.core.helpers.rss import RSS -from couchpotato.core.helpers.variable import getImdb, splitString, tryInt -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.automation.base import Automation -import traceback - -log = CPLog(__name__) - - -class IMDB(Automation, RSS): - - interval = 1800 - - def getIMDBids(self): - - movies = [] - - enablers = [tryInt(x) for x in splitString(self.conf('automation_urls_use'))] - urls = splitString(self.conf('automation_urls')) - - index = -1 - for url in urls: - - index += 1 - if not enablers[index]: - continue - - try: - rss_data = self.getHTMLData(url) - imdbs = getImdb(rss_data, multiple = True) if rss_data else [] - - for imdb in imdbs: - movies.append(imdb) - - except: - log.error('Failed loading IMDB watchlist: %s %s', (url, traceback.format_exc())) - - return movies diff --git a/couchpotato/core/providers/automation/itunes/__init__.py b/couchpotato/core/providers/automation/itunes/__init__.py deleted file mode 100644 index b5c565f6a2..0000000000 --- a/couchpotato/core/providers/automation/itunes/__init__.py +++ /dev/null @@ -1,36 +0,0 @@ -from .main import ITunes - -def start(): - return ITunes() - -config = [{ - 'name': 'itunes', - 'groups': [ - { - 'tab': 'automation', - 'list': 'automation_providers', - 'name': 'itunes_automation', - 'label': 'iTunes', - 'description': 'From any iTunes Store feed. Url should be the RSS link. (uses minimal requirements)', - 'options': [ - { - 'name': 'automation_enabled', - 'default': False, - 'type': 'enabler', - }, - { - 'name': 'automation_urls_use', - 'label': 'Use', - 'default': ',', - }, - { - 'name': 'automation_urls', - 'label': 'url', - 'type': 'combined', - 'combine': ['automation_urls_use', 'automation_urls'], - 'default': 'https://itunes.apple.com/rss/topmovies/limit=25/xml,', - }, - ], - }, - ], -}] diff --git a/couchpotato/core/providers/automation/itunes/main.py b/couchpotato/core/providers/automation/itunes/main.py deleted file mode 100644 index 14ca2a8234..0000000000 --- a/couchpotato/core/providers/automation/itunes/main.py +++ /dev/null @@ -1,63 +0,0 @@ -from couchpotato.core.helpers.rss import RSS -from couchpotato.core.helpers.variable import md5, splitString, tryInt -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.automation.base import Automation -from xml.etree.ElementTree import QName -import datetime -import traceback -import xml.etree.ElementTree as XMLTree - -log = CPLog(__name__) - - -class ITunes(Automation, RSS): - - interval = 1800 - - def getIMDBids(self): - - if self.isDisabled(): - return - - movies = [] - - enablers = [tryInt(x) for x in splitString(self.conf('automation_urls_use'))] - urls = splitString(self.conf('automation_urls')) - - namespace = 'http://www.w3.org/2005/Atom' - namespaceIM = 'http://itunes.apple.com/rss' - - index = -1 - for url in urls: - - index += 1 - if not enablers[index]: - continue - - try: - cache_key = 'itunes.rss.%s' % md5(url) - rss_data = self.getCache(cache_key, url) - - data = XMLTree.fromstring(rss_data) - - if data is not None: - entry_tag = str(QName(namespace, 'entry')) - rss_movies = self.getElements(data, entry_tag) - - for movie in rss_movies: - name_tag = str(QName(namespaceIM, 'name')) - name = self.getTextElement(movie, name_tag) - - releaseDate_tag = str(QName(namespaceIM, 'releaseDate')) - releaseDateText = self.getTextElement(movie, releaseDate_tag) - year = datetime.datetime.strptime(releaseDateText, '%Y-%m-%dT00:00:00-07:00').strftime("%Y") - - imdb = self.search(name, year) - - if imdb and self.isMinimalMovie(imdb): - movies.append(imdb['imdb']) - - except: - log.error('Failed loading iTunes rss feed: %s %s', (url, traceback.format_exc())) - - return movies diff --git a/couchpotato/core/providers/automation/kinepolis/__init__.py b/couchpotato/core/providers/automation/kinepolis/__init__.py deleted file mode 100644 index d3b8e89852..0000000000 --- a/couchpotato/core/providers/automation/kinepolis/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -from .main import Kinepolis - -def start(): - return Kinepolis() - -config = [{ - 'name': 'kinepolis', - 'groups': [ - { - 'tab': 'automation', - 'list': 'automation_providers', - 'name': 'kinepolis_automation', - 'label': 'Kinepolis', - 'description': 'Imports movies from the current top 10 of kinepolis. (uses minimal requirements)', - 'options': [ - { - 'name': 'automation_enabled', - 'default': False, - 'type': 'enabler', - }, - ], - }, - ], -}] diff --git a/couchpotato/core/providers/automation/kinepolis/main.py b/couchpotato/core/providers/automation/kinepolis/main.py deleted file mode 100644 index 4158d4885a..0000000000 --- a/couchpotato/core/providers/automation/kinepolis/main.py +++ /dev/null @@ -1,29 +0,0 @@ -from couchpotato.core.helpers.rss import RSS -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.automation.base import Automation -import datetime - -log = CPLog(__name__) - - -class Kinepolis(Automation, RSS): - - interval = 1800 - rss_url = 'http://kinepolis.be/nl/top10-box-office/feed' - - def getIMDBids(self): - - movies = [] - - rss_movies = self.getRSSData(self.rss_url) - - for movie in rss_movies: - name = self.getTextElement(movie, 'title') - year = datetime.datetime.now().strftime('%Y') - - imdb = self.search(name, year) - - if imdb and self.isMinimalMovie(imdb): - movies.append(imdb['imdb']) - - return movies diff --git a/couchpotato/core/providers/automation/moviemeter/__init__.py b/couchpotato/core/providers/automation/moviemeter/__init__.py deleted file mode 100644 index 773bed45ef..0000000000 --- a/couchpotato/core/providers/automation/moviemeter/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -from .main import Moviemeter - -def start(): - return Moviemeter() - -config = [{ - 'name': 'moviemeter', - 'groups': [ - { - 'tab': 'automation', - 'list': 'automation_providers', - 'name': 'moviemeter_automation', - 'label': 'Moviemeter', - 'description': 'Imports movies from the current top 10 of moviemeter.nl. (uses minimal requirements)', - 'options': [ - { - 'name': 'automation_enabled', - 'default': False, - 'type': 'enabler', - }, - ], - }, - ], -}] diff --git a/couchpotato/core/providers/automation/moviemeter/main.py b/couchpotato/core/providers/automation/moviemeter/main.py deleted file mode 100644 index dae764bf56..0000000000 --- a/couchpotato/core/providers/automation/moviemeter/main.py +++ /dev/null @@ -1,28 +0,0 @@ -from couchpotato.core.event import fireEvent -from couchpotato.core.helpers.rss import RSS -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.automation.base import Automation - -log = CPLog(__name__) - - -class Moviemeter(Automation, RSS): - - interval = 1800 - rss_url = 'http://www.moviemeter.nl/rss/cinema' - - def getIMDBids(self): - - movies = [] - - rss_movies = self.getRSSData(self.rss_url) - - for movie in rss_movies: - - name_year = fireEvent('scanner.name_year', self.getTextElement(movie, 'title'), single = True) - imdb = self.search(name_year.get('name'), name_year.get('year')) - - if imdb and self.isMinimalMovie(imdb): - movies.append(imdb['imdb']) - - return movies diff --git a/couchpotato/core/providers/automation/movies_io/__init__.py b/couchpotato/core/providers/automation/movies_io/__init__.py deleted file mode 100644 index 9b28093060..0000000000 --- a/couchpotato/core/providers/automation/movies_io/__init__.py +++ /dev/null @@ -1,34 +0,0 @@ -from .main import MoviesIO - -def start(): - return MoviesIO() - -config = [{ - 'name': 'moviesio', - 'groups': [ - { - 'tab': 'automation', - 'list': 'watchlist_providers', - 'name': 'moviesio', - 'label': 'Movies.IO', - 'description': 'Imports movies from Movies.io RSS watchlists', - 'options': [ - { - 'name': 'automation_enabled', - 'default': False, - 'type': 'enabler', - }, - { - 'name': 'automation_urls_use', - 'label': 'Use', - }, - { - 'name': 'automation_urls', - 'label': 'url', - 'type': 'combined', - 'combine': ['automation_urls_use', 'automation_urls'], - }, - ], - }, - ], -}] diff --git a/couchpotato/core/providers/automation/movies_io/main.py b/couchpotato/core/providers/automation/movies_io/main.py deleted file mode 100644 index 0737e2e696..0000000000 --- a/couchpotato/core/providers/automation/movies_io/main.py +++ /dev/null @@ -1,39 +0,0 @@ -from couchpotato.core.event import fireEvent -from couchpotato.core.helpers.rss import RSS -from couchpotato.core.helpers.variable import tryInt, splitString -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.automation.base import Automation - -log = CPLog(__name__) - - -class MoviesIO(Automation, RSS): - - interval = 1800 - - def getIMDBids(self): - - movies = [] - - enablers = [tryInt(x) for x in splitString(self.conf('automation_urls_use'))] - - index = -1 - for rss_url in splitString(self.conf('automation_urls')): - - index += 1 - if not enablers[index]: - continue - - rss_movies = self.getRSSData(rss_url, headers = {'Referer': ''}) - - for movie in rss_movies: - - nameyear = fireEvent('scanner.name_year', self.getTextElement(movie, 'title'), single = True) - imdb = self.search(nameyear.get('name'), nameyear.get('year'), imdb_only = True) - - if not imdb: - continue - - movies.append(imdb) - - return movies diff --git a/couchpotato/core/providers/automation/rottentomatoes/__init__.py b/couchpotato/core/providers/automation/rottentomatoes/__init__.py deleted file mode 100644 index dd96fe456e..0000000000 --- a/couchpotato/core/providers/automation/rottentomatoes/__init__.py +++ /dev/null @@ -1,29 +0,0 @@ -from .main import Rottentomatoes - -def start(): - return Rottentomatoes() - -config = [{ - 'name': 'rottentomatoes', - 'groups': [ - { - 'tab': 'automation', - 'list': 'automation_providers', - 'name': 'rottentomatoes_automation', - 'label': 'Rottentomatoes', - 'description': 'Imports movies from the rottentomatoes "in theaters"-feed.', - 'options': [ - { - 'name': 'automation_enabled', - 'default': False, - 'type': 'enabler', - }, - { - 'name': 'tomatometer_percent', - 'default': '80', - 'label': 'Tomatometer' - } - ], - }, - ], -}] diff --git a/couchpotato/core/providers/automation/rottentomatoes/main.py b/couchpotato/core/providers/automation/rottentomatoes/main.py deleted file mode 100644 index 053b79e839..0000000000 --- a/couchpotato/core/providers/automation/rottentomatoes/main.py +++ /dev/null @@ -1,48 +0,0 @@ -from couchpotato.core.helpers.rss import RSS -from couchpotato.core.helpers.variable import tryInt -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.automation.base import Automation -from xml.etree.ElementTree import QName -import datetime -import re - -log = CPLog(__name__) - -class Rottentomatoes(Automation, RSS): - - interval = 1800 - urls = { - 'namespace': 'http://www.rottentomatoes.com/xmlns/rtmovie/', - 'theater': 'http://www.rottentomatoes.com/syndication/rss/in_theaters.xml', - } - - def getIMDBids(self): - - movies = [] - - rss_movies = self.getRSSData(self.urls['theater']) - rating_tag = str(QName(self.urls['namespace'], 'tomatometer_percent')) - - for movie in rss_movies: - - value = self.getTextElement(movie, "title") - result = re.search('(?<=%\s).*', value) - - if result: - - log.info2('Something smells...') - rating = tryInt(self.getTextElement(movie, rating_tag)) - name = result.group(0) - - if rating < tryInt(self.conf('tomatometer_percent')): - log.info2('%s seems to be rotten...' % name) - else: - - log.info2('Found %s fresh enough movies, enqueuing: %s' % (rating, name)) - year = datetime.datetime.now().strftime("%Y") - imdb = self.search(name, year) - - if imdb: - movies.append(imdb['imdb']) - - return movies diff --git a/couchpotato/core/providers/automation/trakt/__init__.py b/couchpotato/core/providers/automation/trakt/__init__.py deleted file mode 100644 index cbaaece3ec..0000000000 --- a/couchpotato/core/providers/automation/trakt/__init__.py +++ /dev/null @@ -1,38 +0,0 @@ -from .main import Trakt - -def start(): - return Trakt() - -config = [{ - 'name': 'trakt', - 'groups': [ - { - 'tab': 'automation', - 'list': 'watchlist_providers', - 'name': 'trakt_automation', - 'label': 'Trakt', - 'description': 'import movies from your own watchlist', - 'options': [ - { - 'name': 'automation_enabled', - 'default': False, - 'type': 'enabler', - }, - { - 'name': 'automation_api_key', - 'label': 'Apikey', - }, - { - 'name': 'automation_username', - 'label': 'Username', - }, - { - 'name': 'automation_password', - 'label': 'Password', - 'type': 'password', - 'description': 'When you have "Protect my data" checked on trakt.', - }, - ], - }, - ], -}] diff --git a/couchpotato/core/providers/automation/trakt/main.py b/couchpotato/core/providers/automation/trakt/main.py deleted file mode 100644 index 0109daf382..0000000000 --- a/couchpotato/core/providers/automation/trakt/main.py +++ /dev/null @@ -1,44 +0,0 @@ -from couchpotato.core.event import addEvent -from couchpotato.core.helpers.variable import sha1 -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.automation.base import Automation -import base64 - -log = CPLog(__name__) - - -class Trakt(Automation): - - urls = { - 'base': 'http://api.trakt.tv/', - 'watchlist': 'user/watchlist/movies.json/%s/', - } - - def __init__(self): - super(Trakt, self).__init__() - - addEvent('setting.save.trakt.automation_password', self.sha1Password) - - def sha1Password(self, value): - return sha1(value) if value else '' - - def getIMDBids(self): - - movies = [] - for movie in self.getWatchlist(): - movies.append(movie.get('imdb_id')) - - return movies - - def getWatchlist(self): - method = (self.urls['watchlist'] % self.conf('automation_api_key')) + self.conf('automation_username') - return self.call(method) - - def call(self, method_url): - - headers = {} - if self.conf('automation_password'): - headers['Authorization'] = 'Basic %s' % base64.encodestring('%s:%s' % (self.conf('automation_username'), self.conf('automation_password')))[:-1] - - data = self.getJsonData(self.urls['base'] + method_url, headers = headers) - return data if data else [] diff --git a/couchpotato/core/providers/base.py b/couchpotato/core/providers/base.py deleted file mode 100644 index 855f3c7295..0000000000 --- a/couchpotato/core/providers/base.py +++ /dev/null @@ -1,274 +0,0 @@ -from couchpotato.core.event import addEvent, fireEvent -from couchpotato.core.helpers.variable import tryFloat, mergeDicts, md5, \ - possibleTitles, getTitle -from couchpotato.core.logger import CPLog -from couchpotato.core.plugins.base import Plugin -from couchpotato.environment import Env -from urlparse import urlparse -import cookielib -import json -import re -import time -import traceback -import urllib2 -import xml.etree.ElementTree as XMLTree - - -log = CPLog(__name__) - - -class Provider(Plugin): - - type = None # movie, nzb, torrent, subtitle, trailer - http_time_between_calls = 10 # Default timeout for url requests - - last_available_check = {} - is_available = {} - - def isAvailable(self, test_url): - - if Env.get('dev'): return True - - now = time.time() - host = urlparse(test_url).hostname - - if self.last_available_check.get(host) < now - 900: - self.last_available_check[host] = now - - try: - self.urlopen(test_url, 30) - self.is_available[host] = True - except: - log.error('"%s" unavailable, trying again in an 15 minutes.', host) - self.is_available[host] = False - - return self.is_available.get(host, False) - - def getJsonData(self, url, **kwargs): - - cache_key = '%s%s' % (md5(url), md5('%s' % kwargs.get('params', {}))) - data = self.getCache(cache_key, url, **kwargs) - - if data: - try: - return json.loads(data) - except: - log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc())) - - return [] - - def getRSSData(self, url, item_path = 'channel/item', **kwargs): - - cache_key = '%s%s' % (md5(url), md5('%s' % kwargs.get('params', {}))) - data = self.getCache(cache_key, url, **kwargs) - - if data: - try: - data = XMLTree.fromstring(data) - return self.getElements(data, item_path) - except: - log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc())) - - return [] - - def getHTMLData(self, url, **kwargs): - - cache_key = '%s%s' % (md5(url), md5('%s' % kwargs.get('params', {}))) - return self.getCache(cache_key, url, **kwargs) - - -class YarrProvider(Provider): - - cat_ids = [] - - sizeGb = ['gb', 'gib'] - sizeMb = ['mb', 'mib'] - sizeKb = ['kb', 'kib'] - - login_opener = None - - def __init__(self): - addEvent('provider.enabled_types', self.getEnabledProviderType) - addEvent('provider.belongs_to', self.belongsTo) - addEvent('yarr.search', self.search) - addEvent('%s.search' % self.type, self.search) - - def getEnabledProviderType(self): - if self.isEnabled(): - return self.type - else: - return [] - - def login(self): - - try: - cookiejar = cookielib.CookieJar() - opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookiejar)) - urllib2.install_opener(opener) - log.info2('Logging into %s', self.urls['login']) - f = opener.open(self.urls['login'], self.getLoginParams()) - output = f.read() - f.close() - - if self.loginSuccess(output): - self.login_opener = opener - return True - except: - log.error('Failed to login %s: %s', (self.getName(), traceback.format_exc())) - - return False - - def loginSuccess(self, output): - return True - - def loginDownload(self, url = '', nzb_id = ''): - try: - if not self.login_opener and not self.login(): - log.error('Failed downloading from %s', self.getName()) - return self.urlopen(url, opener = self.login_opener) - except: - log.error('Failed downloading from %s: %s', (self.getName(), traceback.format_exc())) - - def getLoginParams(self): - return '' - - def download(self, url = '', nzb_id = ''): - try: - return self.urlopen(url, headers = {'User-Agent': Env.getIdentifier()}, show_error = False) - except: - log.error('Failed getting nzb from %s: %s', (self.getName(), traceback.format_exc())) - - return 'try_next' - - def search(self, movie, quality): - - if self.isDisabled(): - return [] - - # Login if needed - if self.urls.get('login') and (not self.login_opener and not self.login()): - log.error('Failed to login to: %s', self.getName()) - return [] - - # Create result container - imdb_results = hasattr(self, '_search') - results = ResultList(self, movie, quality, imdb_results = imdb_results) - - # Do search based on imdb id - if imdb_results: - self._search(movie, quality, results) - # Search possible titles - else: - for title in possibleTitles(getTitle(movie['library'])): - self._searchOnTitle(title, movie, quality, results) - - return results - - def belongsTo(self, url, provider = None, host = None): - try: - if provider and provider == self.getName(): - return self - - hostname = urlparse(url).hostname - if host and hostname in host: - return self - else: - for url_type in self.urls: - download_url = self.urls[url_type] - if hostname in download_url: - return self - except: - log.debug('Url % s doesn\'t belong to %s', (url, self.getName())) - - return - - def parseSize(self, size): - - sizeRaw = size.lower() - size = tryFloat(re.sub(r'[^0-9.]', '', size).strip()) - - for s in self.sizeGb: - if s in sizeRaw: - return size * 1024 - - for s in self.sizeMb: - if s in sizeRaw: - return size - - for s in self.sizeKb: - if s in sizeRaw: - return size / 1024 - - return 0 - - def getCatId(self, identifier): - - for cats in self.cat_ids: - ids, qualities = cats - if identifier in qualities: - return ids - - return [self.cat_backup_id] - - -class ResultList(list): - - result_ids = None - provider = None - movie = None - quality = None - - def __init__(self, provider, movie, quality, **kwargs): - - self.result_ids = [] - self.provider = provider - self.movie = movie - self.quality = quality - self.kwargs = kwargs - - super(ResultList, self).__init__() - - def extend(self, results): - for r in results: - self.append(r) - - def append(self, result): - - new_result = self.fillResult(result) - - is_correct_movie = fireEvent('searcher.correct_movie', - nzb = new_result, movie = self.movie, quality = self.quality, - imdb_results = self.kwargs.get('imdb_results', False), single = True) - - if is_correct_movie and new_result['id'] not in self.result_ids: - new_result['score'] += fireEvent('score.calculate', new_result, self.movie, single = True) - - self.found(new_result) - self.result_ids.append(result['id']) - - super(ResultList, self).append(new_result) - - def fillResult(self, result): - - defaults = { - 'id': 0, - 'type': self.provider.type, - 'provider': self.provider.getName(), - 'download': self.provider.download, - 'url': '', - 'name': '', - 'age': 0, - 'size': 0, - 'description': '', - 'score': 0 - } - - return mergeDicts(defaults, result) - - def found(self, new_result): - if not new_result.get('provider_extra'): - new_result['provider_extra'] = '' - else: - new_result['provider_extra'] = ', %s' % new_result['provider_extra'] - - log.info('Found: score(%(score)s) on %(provider)s%(provider_extra)s: %(name)s', new_result) diff --git a/couchpotato/core/providers/metadata/base.py b/couchpotato/core/providers/metadata/base.py deleted file mode 100644 index b41960a05c..0000000000 --- a/couchpotato/core/providers/metadata/base.py +++ /dev/null @@ -1,98 +0,0 @@ -from couchpotato.core.event import addEvent, fireEvent -from couchpotato.core.helpers.variable import mergeDicts -from couchpotato.core.logger import CPLog -from couchpotato.core.plugins.base import Plugin -from couchpotato.environment import Env -import os -import shutil -import traceback - -log = CPLog(__name__) - - -class MetaDataBase(Plugin): - - enabled_option = 'meta_enabled' - - def __init__(self): - addEvent('renamer.after', self.create) - - def create(self, message = None, group = {}): - if self.isDisabled(): return - - log.info('Creating %s metadata.', self.getName()) - - # Update library to get latest info - try: - updated_library = fireEvent('library.update', group['library']['identifier'], force = True, single = True) - group['library'] = mergeDicts(group['library'], updated_library) - except: - log.error('Failed to update movie, before creating metadata: %s', traceback.format_exc()) - - root_name = self.getRootName(group) - meta_name = os.path.basename(root_name) - root = os.path.dirname(root_name) - - movie_info = group['library'].get('info') - - for file_type in ['nfo', 'thumbnail', 'fanart']: - try: - # Get file path - name = getattr(self, 'get' + file_type.capitalize() + 'Name')(meta_name, root) - - if name and self.conf('meta_' + file_type): - - # Get file content - content = getattr(self, 'get' + file_type.capitalize())(movie_info = movie_info, data = group) - if content: - log.debug('Creating %s file: %s', (file_type, name)) - if os.path.isfile(content): - shutil.copy2(content, name) - else: - self.createFile(name, content) - group['renamed_files'].append(name) - - try: - os.chmod(name, Env.getPermission('file')) - except: - log.debug('Failed setting permissions for %s: %s', (name, traceback.format_exc())) - - except: - log.error('Unable to create %s file: %s', (file_type, traceback.format_exc())) - - def getRootName(self, data): - return - - def getFanartName(self, name, root): - return - - def getThumbnailName(self, name, root): - return - - def getNfoName(self, name, root): - return - - def getNfo(self, movie_info = {}, data = {}): - return - - def getThumbnail(self, movie_info = {}, data = {}, wanted_file_type = 'poster_original'): - file_types = fireEvent('file.types', single = True) - for file_type in file_types: - if file_type.get('identifier') == wanted_file_type: - break - - # See if it is in current files - for cur_file in data['library'].get('files', []): - if cur_file.get('type_id') is file_type.get('id') and os.path.isfile(cur_file.get('path')): - return cur_file.get('path') - - # Download using existing info - try: - images = data['library']['info']['images'][wanted_file_type] - file_path = fireEvent('file.download', url = images[0], single = True) - return file_path - except: - pass - - def getFanart(self, movie_info = {}, data = {}): - return self.getThumbnail(movie_info = movie_info, data = data, wanted_file_type = 'backdrop_original') diff --git a/couchpotato/core/providers/metadata/xbmc/__init__.py b/couchpotato/core/providers/metadata/xbmc/__init__.py deleted file mode 100644 index ea426dba66..0000000000 --- a/couchpotato/core/providers/metadata/xbmc/__init__.py +++ /dev/null @@ -1,69 +0,0 @@ -from .main import XBMC - -def start(): - return XBMC() - -config = [{ - 'name': 'xbmc', - 'groups': [ - { - 'tab': 'renamer', - 'subtab': 'metadata', - 'name': 'xbmc_metadata', - 'label': 'XBMC', - 'description': 'Enable metadata XBMC can understand', - 'options': [ - { - 'name': 'meta_enabled', - 'default': False, - 'type': 'enabler', - }, - { - 'name': 'meta_nfo', - 'label': 'NFO', - 'default': True, - 'type': 'bool', - }, - { - 'name': 'meta_nfo_name', - 'label': 'NFO filename', - 'default': '%s.nfo', - 'advanced': True, - 'description': '%s is the rootname of the movie. For example "/path/to/movie cd1.mkv" will be "/path/to/movie"' - }, - { - 'name': 'meta_url_only', - 'label': 'Only IMDB URL', - 'default': False, - 'advanced': True, - 'description': 'Create a nfo with only the IMDB url inside', - 'type': 'bool', - }, - { - 'name': 'meta_fanart', - 'label': 'Fanart', - 'default': True, - 'type': 'bool', - }, - { - 'name': 'meta_fanart_name', - 'label': 'Fanart filename', - 'default': '%s-fanart.jpg', - 'advanced': True, - }, - { - 'name': 'meta_thumbnail', - 'label': 'Thumbnail', - 'default': True, - 'type': 'bool', - }, - { - 'name': 'meta_thumbnail_name', - 'label': 'Thumbnail filename', - 'default': '%s.tbn', - 'advanced': True, - }, - ], - }, - ], -}] diff --git a/couchpotato/core/providers/metadata/xbmc/main.py b/couchpotato/core/providers/metadata/xbmc/main.py deleted file mode 100644 index 1fd95846ad..0000000000 --- a/couchpotato/core/providers/metadata/xbmc/main.py +++ /dev/null @@ -1,115 +0,0 @@ -from couchpotato.core.helpers.encoding import toUnicode -from couchpotato.core.helpers.variable import getTitle -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.metadata.base import MetaDataBase -from xml.etree.ElementTree import Element, SubElement, tostring -import os -import re -import traceback -import xml.dom.minidom - -log = CPLog(__name__) - -class XBMC(MetaDataBase): - - def getRootName(self, data = {}): - return os.path.join(data['destination_dir'], data['filename']) - - def getFanartName(self, name, root): - return self.createMetaName(self.conf('meta_fanart_name'), name, root) - - def getThumbnailName(self, name, root): - return self.createMetaName(self.conf('meta_thumbnail_name'), name, root) - - def getNfoName(self, name, root): - return self.createMetaName(self.conf('meta_nfo_name'), name, root) - - def createMetaName(self, basename, name, root): - return os.path.join(root, basename.replace('%s', name)) - - def getNfo(self, movie_info = {}, data = {}): - - # return imdb url only - if self.conf('meta_url_only'): - return 'http://www.imdb.com/title/%s/' % toUnicode(data['library']['identifier']) - - nfoxml = Element('movie') - - # Title - try: - el = SubElement(nfoxml, 'title') - el.text = toUnicode(getTitle(data['library'])) - except: - pass - - # IMDB id - try: - el = SubElement(nfoxml, 'id') - el.text = toUnicode(data['library']['identifier']) - except: - pass - - # Runtime - try: - runtime = SubElement(nfoxml, 'runtime') - runtime.text = '%s min' % movie_info.get('runtime') - except: - pass - - # Other values - types = ['year', 'mpaa', 'originaltitle:original_title', 'outline', 'plot', 'tagline', 'premiered:released'] - for type in types: - - if ':' in type: - name, type = type.split(':') - else: - name = type - - try: - if data['library'].get(type): - el = SubElement(nfoxml, name) - el.text = toUnicode(movie_info.get(type, '')) - except: - pass - - # Rating - for rating_type in ['imdb', 'rotten', 'tmdb']: - try: - r, v = movie_info['rating'][rating_type] - rating = SubElement(nfoxml, 'rating') - rating.text = str(r) - votes = SubElement(nfoxml, 'votes') - votes.text = str(v) - break - except: - log.debug('Failed adding rating info from %s: %s', (rating_type, traceback.format_exc())) - - # Genre - for genre in movie_info.get('genres', []): - genres = SubElement(nfoxml, 'genre') - genres.text = toUnicode(genre) - - # Actors - for actor in movie_info.get('actors', []): - actors = SubElement(nfoxml, 'actor') - name = SubElement(actors, 'name') - name.text = toUnicode(actor) - - # Directors - for director_name in movie_info.get('directors', []): - director = SubElement(nfoxml, 'director') - director.text = toUnicode(director_name) - - # Writers - for writer in movie_info.get('writers', []): - writers = SubElement(nfoxml, 'credits') - writers.text = toUnicode(writer) - - - # Clean up the xml and return it - nfoxml = xml.dom.minidom.parseString(tostring(nfoxml)) - xml_string = nfoxml.toprettyxml(indent = ' ') - text_re = re.compile('>\n\s+([^<>\s].*?)\n\s+\g<1> 0: - log.info('Found: %s', result['titles'][0] + ' (' + str(result['year']) + ')') - return [result] - - return [] - - return [] - - def getInfo(self, identifier = None): - - if not identifier: - return {} - - cache_key = 'omdbapi.cache.%s' % identifier - cached = self.getCache(cache_key, self.urls['info'] % identifier, timeout = 3) - - if cached: - result = self.parseMovie(cached) - if result.get('titles') and len(result.get('titles')) > 0: - log.info('Found: %s', result['titles'][0] + ' (' + str(result['year']) + ')') - return result - - return {} - - def parseMovie(self, movie): - - movie_data = {} - try: - - try: - if isinstance(movie, (str, unicode)): - movie = json.loads(movie) - except ValueError: - log.info('No proper json to decode') - return movie_data - - if movie.get('Response') == 'Parse Error' or movie.get('Response') == 'False': - return movie_data - - tmp_movie = movie.copy() - for key in tmp_movie: - if tmp_movie.get(key).lower() == 'n/a': - del movie[key] - - year = tryInt(movie.get('Year', '')) - - movie_data = { - 'via_imdb': True, - 'titles': [movie.get('Title')] if movie.get('Title') else [], - 'original_title': movie.get('Title', ''), - 'images': { - 'poster': [movie.get('Poster', '')] if movie.get('Poster') and len(movie.get('Poster', '')) > 4 else [], - }, - 'rating': { - 'imdb': (tryFloat(movie.get('imdbRating', 0)), tryInt(movie.get('imdbVotes', '').replace(',', ''))), - #'rotten': (tryFloat(movie.get('tomatoRating', 0)), tryInt(movie.get('tomatoReviews', '').replace(',', ''))), - }, - 'imdb': str(movie.get('imdbID', '')), - 'runtime': self.runtimeToMinutes(movie.get('Runtime', '')), - 'released': movie.get('Released', ''), - 'year': year if isinstance(year, (int)) else None, - 'plot': movie.get('Plot', ''), - 'genres': splitString(movie.get('Genre', '')), - 'directors': splitString(movie.get('Director', '')), - 'writers': splitString(movie.get('Writer', '')), - 'actors': splitString(movie.get('Actors', '')), - } - except: - log.error('Failed parsing IMDB API json: %s', traceback.format_exc()) - - return movie_data - - def runtimeToMinutes(self, runtime_str): - runtime = 0 - - regex = '(\d*.?\d+).(h|hr|hrs|mins|min)+' - matches = re.findall(regex, runtime_str) - for match in matches: - nr, size = match - runtime += tryInt(nr) * (60 if 'h' is str(size)[0] else 1) - - return runtime diff --git a/couchpotato/core/providers/movie/themoviedb/__init__.py b/couchpotato/core/providers/movie/themoviedb/__init__.py deleted file mode 100644 index 66ac536a3f..0000000000 --- a/couchpotato/core/providers/movie/themoviedb/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -from .main import TheMovieDb - -def start(): - return TheMovieDb() - -config = [{ - 'name': 'themoviedb', - 'groups': [ - { - 'tab': 'providers', - 'name': 'tmdb', - 'label': 'TheMovieDB', - 'hidden': True, - 'description': 'Used for all calls to TheMovieDB.', - 'options': [ - { - 'name': 'api_key', - 'default': '9b939aee0aaafc12a65bf448e4af9543', - 'label': 'Api Key', - }, - ], - }, - ], -}] diff --git a/couchpotato/core/providers/movie/themoviedb/main.py b/couchpotato/core/providers/movie/themoviedb/main.py deleted file mode 100644 index 0554a9015c..0000000000 --- a/couchpotato/core/providers/movie/themoviedb/main.py +++ /dev/null @@ -1,214 +0,0 @@ -from couchpotato.core.event import addEvent -from couchpotato.core.helpers.encoding import simplifyString, toUnicode -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.movie.base import MovieProvider -from libs.themoviedb import tmdb -import traceback - -log = CPLog(__name__) - - -class TheMovieDb(MovieProvider): - - def __init__(self): - addEvent('movie.by_hash', self.byHash) - addEvent('movie.search', self.search, priority = 2) - addEvent('movie.info', self.getInfo, priority = 2) - addEvent('movie.info_by_tmdb', self.getInfoByTMDBId) - - # Use base wrapper - tmdb.configure(self.conf('api_key')) - - def byHash(self, file): - ''' Find movie by hash ''' - - if self.isDisabled(): - return False - - cache_key = 'tmdb.cache.%s' % simplifyString(file) - results = self.getCache(cache_key) - - if not results: - log.debug('Searching for movie by hash: %s', file) - try: - raw = tmdb.searchByHashingFile(file) - - results = [] - if raw: - try: - results = self.parseMovie(raw) - log.info('Found: %s', results['titles'][0] + ' (' + str(results['year']) + ')') - - self.setCache(cache_key, results) - return results - except SyntaxError, e: - log.error('Failed to parse XML response: %s', e) - return False - except: - log.debug('No movies known by hash for: %s', file) - pass - - return results - - def search(self, q, limit = 12): - ''' Find movie by name ''' - - if self.isDisabled(): - return False - - search_string = simplifyString(q) - cache_key = 'tmdb.cache.%s.%s' % (search_string, limit) - results = self.getCache(cache_key) - - if not results: - log.debug('Searching for movie: %s', q) - - raw = None - try: - raw = tmdb.search(search_string) - except: - log.error('Failed searching TMDB for "%s": %s', (search_string, traceback.format_exc())) - - results = [] - if raw: - try: - nr = 0 - - for movie in raw: - results.append(self.parseMovie(movie)) - - nr += 1 - if nr == limit: - break - - log.info('Found: %s', [result['titles'][0] + ' (' + str(result['year']) + ')' for result in results]) - - self.setCache(cache_key, results) - return results - except SyntaxError, e: - log.error('Failed to parse XML response: %s', e) - return False - - return results - - def getInfo(self, identifier = None): - - if not identifier: - return {} - - cache_key = 'tmdb.cache.%s' % identifier - result = self.getCache(cache_key) - - if not result: - result = {} - movie = None - - try: - log.debug('Getting info: %s', cache_key) - movie = tmdb.imdbLookup(id = identifier) - except: - pass - - if movie: - result = self.parseMovie(movie[0]) - self.setCache(cache_key, result) - - return result - - def getInfoByTMDBId(self, id = None): - - cache_key = 'tmdb.cache.%s' % id - result = self.getCache(cache_key) - - if not result: - result = {} - movie = None - - try: - log.debug('Getting info: %s', cache_key) - movie = tmdb.getMovieInfo(id = id) - except: - pass - - if movie: - result = self.parseMovie(movie) - self.setCache(cache_key, result) - - return result - - def parseMovie(self, movie): - - # Images - poster = self.getImage(movie, type = 'poster', size = 'cover') - #backdrop = self.getImage(movie, type = 'backdrop', size = 'w1280') - poster_original = self.getImage(movie, type = 'poster', size = 'original') - backdrop_original = self.getImage(movie, type = 'backdrop', size = 'original') - - # Genres - try: - genres = self.getCategory(movie, 'genre') - except: - genres = [] - - # 1900 is the same as None - year = str(movie.get('released', 'none'))[:4] - if year == '1900' or year.lower() == 'none': - year = None - - movie_data = { - 'via_tmdb': True, - 'tmdb_id': int(movie.get('id', 0)), - 'titles': [toUnicode(movie.get('name'))], - 'original_title': movie.get('original_name'), - 'images': { - 'poster': [poster] if poster else [], - #'backdrop': [backdrop] if backdrop else [], - 'poster_original': [poster_original] if poster_original else [], - 'backdrop_original': [backdrop_original] if backdrop_original else [], - }, - 'imdb': movie.get('imdb_id'), - 'runtime': movie.get('runtime'), - 'released': movie.get('released'), - 'year': year, - 'plot': movie.get('overview', ''), - 'tagline': '', - 'genres': genres, - } - - # Add alternative names - for alt in ['original_name', 'alternative_name']: - alt_name = toUnicode(movie.get(alt)) - if alt_name and not alt_name in movie_data['titles'] and alt_name.lower() != 'none' and alt_name != None: - movie_data['titles'].append(alt_name) - - return movie_data - - def getImage(self, movie, type = 'poster', size = 'cover'): - - image_url = '' - for image in movie.get('images', []): - if(image.get('type') == type) and image.get(size): - image_url = image.get(size) - break - - return image_url - - def getCategory(self, movie, type = 'genre'): - - cats = movie.get('categories', {}).get(type) - - categories = [] - for category in cats: - try: - categories.append(category) - except: - pass - - return categories - - def isDisabled(self): - if self.conf('api_key') == '': - log.error('No API key provided.') - True - else: - False diff --git a/couchpotato/core/providers/nzb/__init__.py b/couchpotato/core/providers/nzb/__init__.py deleted file mode 100644 index 651ae8b9f5..0000000000 --- a/couchpotato/core/providers/nzb/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -config = { - 'name': 'nzb_providers', - 'groups': [ - { - 'label': 'Usenet', - 'description': 'Providers searching usenet for new releases', - 'wizard': True, - 'type': 'list', - 'name': 'nzb_providers', - 'tab': 'searcher', - 'subtab': 'providers', - 'options': [], - }, - ], -} diff --git a/couchpotato/core/providers/nzb/base.py b/couchpotato/core/providers/nzb/base.py deleted file mode 100644 index f11382ba0b..0000000000 --- a/couchpotato/core/providers/nzb/base.py +++ /dev/null @@ -1,9 +0,0 @@ -from couchpotato.core.providers.base import YarrProvider -import time - - -class NZBProvider(YarrProvider): - type = 'nzb' - - def calculateAge(self, unix): - return int(time.time() - unix) / 24 / 60 / 60 diff --git a/couchpotato/core/providers/nzb/binsearch/__init__.py b/couchpotato/core/providers/nzb/binsearch/__init__.py deleted file mode 100644 index f4288b11f5..0000000000 --- a/couchpotato/core/providers/nzb/binsearch/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -from .main import BinSearch - -def start(): - return BinSearch() - -config = [{ - 'name': 'binsearch', - 'groups': [ - { - 'tab': 'searcher', - 'subtab': 'providers', - 'list': 'nzb_providers', - 'name': 'binsearch', - 'description': 'Free provider, less accurate. See BinSearch', - 'wizard': True, - 'options': [ - { - 'name': 'enabled', - 'type': 'enabler', - }, - ], - }, - ], -}] diff --git a/couchpotato/core/providers/nzb/binsearch/main.py b/couchpotato/core/providers/nzb/binsearch/main.py deleted file mode 100644 index 1d86300283..0000000000 --- a/couchpotato/core/providers/nzb/binsearch/main.py +++ /dev/null @@ -1,98 +0,0 @@ -from bs4 import BeautifulSoup -from couchpotato.core.helpers.encoding import tryUrlencode -from couchpotato.core.helpers.variable import tryInt -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.nzb.base import NZBProvider -from couchpotato.environment import Env -import re -import traceback - -log = CPLog(__name__) - - -class BinSearch(NZBProvider): - - urls = { - 'download': 'https://www.binsearch.info/fcgi/nzb.fcgi?q=%s', - 'detail': 'https://www.binsearch.info%s', - 'search': 'https://www.binsearch.info/index.php?%s', - } - - http_time_between_calls = 4 # Seconds - - def _search(self, movie, quality, results): - - arguments = tryUrlencode({ - 'q': movie['library']['identifier'], - 'm': 'n', - 'max': 400, - 'adv_age': Env.setting('retention', 'nzb'), - 'adv_sort': 'date', - 'adv_col': 'on', - 'adv_nfo': 'on', - 'minsize': quality.get('size_min'), - 'maxsize': quality.get('size_max'), - }) - - data = self.getHTMLData(self.urls['search'] % arguments) - - if data: - try: - - html = BeautifulSoup(data) - main_table = html.find('table', attrs = {'id':'r2'}) - - if not main_table: - return - - items = main_table.find_all('tr') - - for row in items: - title = row.find('span', attrs = {'class':'s'}) - - if not title: continue - - nzb_id = row.find('input', attrs = {'type':'checkbox'})['name'] - info = row.find('span', attrs = {'class':'d'}) - size_match = re.search('size:.(?P[0-9\.]+.[GMB]+)', info.text) - - def extra_check(item): - parts = re.search('available:.(?P\d+)./.(?P\d+)', info.text) - total = tryInt(parts.group('total')) - parts = tryInt(parts.group('parts')) - - if (total / parts) < 0.95 or ((total / parts) >= 0.95 and not 'par2' in info.text.lower()): - log.info2('Wrong: \'%s\', not complete: %s out of %s', (item['name'], parts, total)) - return False - - if 'requires password' in info.text.lower(): - log.info2('Wrong: \'%s\', passworded', (item['name'])) - return False - - return True - - results.append({ - 'id': nzb_id, - 'name': title.text, - 'age': tryInt(re.search('(?P\d+d)', row.find_all('td')[-1:][0].text).group('size')[:-1]), - 'size': self.parseSize(size_match.group('size')), - 'url': self.urls['download'] % nzb_id, - 'detail_url': self.urls['detail'] % info.find('a')['href'], - 'extra_check': extra_check - }) - - except: - log.error('Failed to parse HTML response from BinSearch: %s', traceback.format_exc()) - - def download(self, url = '', nzb_id = ''): - - params = {'action': 'nzb'} - params[nzb_id] = 'on' - - try: - return self.urlopen(url, params = params, show_error = False) - except: - log.error('Failed getting nzb from %s: %s', (self.getName(), traceback.format_exc())) - - return 'try_next' - diff --git a/couchpotato/core/providers/nzb/ftdworld/__init__.py b/couchpotato/core/providers/nzb/ftdworld/__init__.py deleted file mode 100644 index ca60ac4dcc..0000000000 --- a/couchpotato/core/providers/nzb/ftdworld/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -from .main import FTDWorld - -def start(): - return FTDWorld() - -config = [{ - 'name': 'ftdworld', - 'groups': [ - { - 'tab': 'searcher', - 'subtab': 'providers', - 'list': 'nzb_providers', - 'name': 'FTDWorld', - 'description': 'Free provider, less accurate. See FTDWorld', - 'wizard': True, - 'options': [ - { - 'name': 'enabled', - 'type': 'enabler', - }, - { - 'name': 'username', - 'default': '', - }, - { - 'name': 'password', - 'default': '', - 'type': 'password', - }, - ], - }, - ], -}] diff --git a/couchpotato/core/providers/nzb/ftdworld/main.py b/couchpotato/core/providers/nzb/ftdworld/main.py deleted file mode 100644 index bd2a3ee281..0000000000 --- a/couchpotato/core/providers/nzb/ftdworld/main.py +++ /dev/null @@ -1,81 +0,0 @@ -from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode -from couchpotato.core.helpers.variable import tryInt -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.nzb.base import NZBProvider -from couchpotato.environment import Env -from dateutil.parser import parse -import json -import traceback - -log = CPLog(__name__) - - -class FTDWorld(NZBProvider): - - urls = { - 'search': 'http://ftdworld.net/api/index.php?%s', - 'detail': 'http://ftdworld.net/spotinfo.php?id=%s', - 'download': 'http://ftdworld.net/cgi-bin/nzbdown.pl?fileID=%s', - 'login': 'http://ftdworld.net/api/login.php', - } - - http_time_between_calls = 3 #seconds - - cat_ids = [ - ([4, 11], ['dvdr']), - ([1], ['cam', 'ts', 'dvdrip', 'tc', 'r5', 'scr', 'brrip']), - ([7, 10, 13, 14], ['bd50', '720p', '1080p']), - ] - cat_backup_id = 1 - - def _searchOnTitle(self, title, movie, quality, results): - - q = '"%s" %s' % (title, movie['library']['year']) - - params = tryUrlencode({ - 'ctitle': q, - 'customQuery': 'usr', - 'cage': Env.setting('retention', 'nzb'), - 'csizemin': quality.get('size_min'), - 'csizemax': quality.get('size_max'), - 'ccategory': 14, - 'ctype': ','.join([str(x) for x in self.getCatId(quality['identifier'])]), - }) - - data = self.getJsonData(self.urls['search'] % params, opener = self.login_opener) - - if data: - try: - - if data.get('numRes') == 0: - return - - for item in data.get('data'): - - nzb_id = tryInt(item.get('id')) - results.append({ - 'id': nzb_id, - 'name': toUnicode(item.get('Title')), - 'age': self.calculateAge(tryInt(item.get('Created'))), - 'size': item.get('Size', 0), - 'url': self.urls['download'] % nzb_id, - 'download': self.loginDownload, - 'detail_url': self.urls['detail'] % nzb_id, - 'score': (tryInt(item.get('webPlus', 0)) - tryInt(item.get('webMin', 0))) * 3, - }) - - except: - log.error('Failed to parse HTML response from FTDWorld: %s', traceback.format_exc()) - - def getLoginParams(self): - return tryUrlencode({ - 'userlogin': self.conf('username'), - 'passlogin': self.conf('password'), - 'submit': 'Log In', - }) - - def loginSuccess(self, output): - try: - return json.loads(output).get('goodToGo', False) - except: - return False diff --git a/couchpotato/core/providers/nzb/newznab/__init__.py b/couchpotato/core/providers/nzb/newznab/__init__.py deleted file mode 100644 index 9047d20035..0000000000 --- a/couchpotato/core/providers/nzb/newznab/__init__.py +++ /dev/null @@ -1,44 +0,0 @@ -from .main import Newznab - -def start(): - return Newznab() - -config = [{ - 'name': 'newznab', - 'groups': [ - { - 'tab': 'searcher', - 'subtab': 'providers', - 'list': 'nzb_providers', - 'name': 'newznab', - 'order': 10, - 'description': 'Enable NewzNab providers such as NZB.su, \ - NZBs.org, DOGnzb.cr, \ - Spotweb or NZBGeek', - 'wizard': True, - 'options': [ - { - 'name': 'enabled', - 'type': 'enabler', - }, - { - 'name': 'use', - 'default': '0,0,0,0' - }, - { - 'name': 'host', - 'default': 'nzb.su,dognzb.cr,nzbs.org,https://index.nzbgeek.info', - 'description': 'The hostname of your newznab provider', - }, - { - 'name': 'api_key', - 'default': ',,,', - 'label': 'Api Key', - 'description': 'Can be found on your profile page', - 'type': 'combined', - 'combine': ['use', 'host', 'api_key'], - }, - ], - }, - ], -}] diff --git a/couchpotato/core/providers/nzb/newznab/main.py b/couchpotato/core/providers/nzb/newznab/main.py deleted file mode 100644 index 414e711293..0000000000 --- a/couchpotato/core/providers/nzb/newznab/main.py +++ /dev/null @@ -1,152 +0,0 @@ -from couchpotato.core.helpers.encoding import tryUrlencode -from couchpotato.core.helpers.rss import RSS -from couchpotato.core.helpers.variable import cleanHost, splitString -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.base import ResultList -from couchpotato.core.providers.nzb.base import NZBProvider -from couchpotato.environment import Env -from dateutil.parser import parse -from urllib2 import HTTPError -from urlparse import urlparse -import time -import traceback - -log = CPLog(__name__) - - -class Newznab(NZBProvider, RSS): - - urls = { - 'download': 'get&id=%s', - 'detail': 'details&id=%s', - 'search': 'movie', - } - - limits_reached = {} - - http_time_between_calls = 1 # Seconds - - def search(self, movie, quality): - hosts = self.getHosts() - - results = ResultList(self, movie, quality, imdb_results = True) - - for host in hosts: - if self.isDisabled(host): - continue - - self._searchOnHost(host, movie, quality, results) - - return results - - def _searchOnHost(self, host, movie, quality, results): - - arguments = tryUrlencode({ - 'imdbid': movie['library']['identifier'].replace('tt', ''), - 'apikey': host['api_key'], - 'extended': 1 - }) - url = '%s&%s' % (self.getUrl(host['host'], self.urls['search']), arguments) - - nzbs = self.getRSSData(url, cache_timeout = 1800, headers = {'User-Agent': Env.getIdentifier()}) - - for nzb in nzbs: - - date = None - for item in nzb: - if item.attrib.get('name') == 'usenetdate': - date = item.attrib.get('value') - break - - if not date: - date = self.getTextElement(nzb, 'pubDate') - - nzb_id = self.getTextElement(nzb, 'guid').split('/')[-1:].pop() - name = self.getTextElement(nzb, 'title') - - if not name: - continue - - results.append({ - 'id': nzb_id, - 'provider_extra': urlparse(host['host']).hostname or host['host'], - 'name': self.getTextElement(nzb, 'title'), - 'age': self.calculateAge(int(time.mktime(parse(date).timetuple()))), - 'size': int(self.getElement(nzb, 'enclosure').attrib['length']) / 1024 / 1024, - 'url': (self.getUrl(host['host'], self.urls['download']) % tryUrlencode(nzb_id)) + self.getApiExt(host), - 'detail_url': '%sdetails/%s' % (cleanHost(host['host']), tryUrlencode(nzb_id)), - 'content': self.getTextElement(nzb, 'description'), - }) - - def getHosts(self): - - uses = splitString(str(self.conf('use'))) - hosts = splitString(self.conf('host')) - api_keys = splitString(self.conf('api_key')) - - list = [] - for nr in range(len(hosts)): - list.append({ - 'use': uses[nr], - 'host': hosts[nr], - 'api_key': api_keys[nr] - }) - - return list - - def belongsTo(self, url, provider = None): - - hosts = self.getHosts() - - for host in hosts: - result = super(Newznab, self).belongsTo(url, host = host['host'], provider = provider) - if result: - return result - - def getUrl(self, host, type): - if '?page=newznabapi' in host: - return cleanHost(host)[:-1] + '&t=' + type - - return cleanHost(host) + 'api?t=' + type - - def isDisabled(self, host = None): - return not self.isEnabled(host) - - def isEnabled(self, host = None): - - # Return true if at least one is enabled and no host is given - if host is None: - for host in self.getHosts(): - if self.isEnabled(host): - return True - return False - - return NZBProvider.isEnabled(self) and host['host'] and host['api_key'] and int(host['use']) - - def getApiExt(self, host): - return '&apikey=%s' % host['api_key'] - - def download(self, url = '', nzb_id = ''): - host = urlparse(url).hostname - - if self.limits_reached.get(host): - # Try again in 3 hours - if self.limits_reached[host] > time.time() - 10800: - return 'try_next' - - try: - data = self.urlopen(url, show_error = False) - self.limits_reached[host] = False - return data - except HTTPError, e: - if e.code == 503: - response = e.read().lower() - if 'maximum api' in response or 'download limit' in response: - if not self.limits_reached.get(host): - log.error('Limit reached for newznab provider: %s', host) - self.limits_reached[host] = time.time() - return 'try_next' - - log.error('Failed download from %s: %s', (host, traceback.format_exc())) - - return 'try_next' diff --git a/couchpotato/core/providers/nzb/nzbclub/__init__.py b/couchpotato/core/providers/nzb/nzbclub/__init__.py deleted file mode 100644 index 7859fe9c46..0000000000 --- a/couchpotato/core/providers/nzb/nzbclub/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -from .main import NZBClub - -def start(): - return NZBClub() - -config = [{ - 'name': 'nzbclub', - 'groups': [ - { - 'tab': 'searcher', - 'subtab': 'providers', - 'list': 'nzb_providers', - 'name': 'NZBClub', - 'description': 'Free provider, less accurate. See NZBClub', - 'wizard': True, - 'options': [ - { - 'name': 'enabled', - 'type': 'enabler', - }, - ], - }, - ], -}] diff --git a/couchpotato/core/providers/nzb/nzbclub/main.py b/couchpotato/core/providers/nzb/nzbclub/main.py deleted file mode 100644 index 59382dfdc2..0000000000 --- a/couchpotato/core/providers/nzb/nzbclub/main.py +++ /dev/null @@ -1,80 +0,0 @@ -from bs4 import BeautifulSoup -from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode -from couchpotato.core.helpers.rss import RSS -from couchpotato.core.helpers.variable import tryInt -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.nzb.base import NZBProvider -from dateutil.parser import parse -import time - -log = CPLog(__name__) - - -class NZBClub(NZBProvider, RSS): - - urls = { - 'search': 'http://www.nzbclub.com/nzbfeed.aspx?%s', - } - - http_time_between_calls = 4 #seconds - - def _searchOnTitle(self, title, movie, quality, results): - - q = '"%s %s"' % (title, movie['library']['year']) - - params = tryUrlencode({ - 'q': q, - 'ig': 1, - 'rpp': 200, - 'st': 5, - 'sp': 1, - 'ns': 1, - }) - - nzbs = self.getRSSData(self.urls['search'] % params) - - for nzb in nzbs: - - nzbclub_id = tryInt(self.getTextElement(nzb, "link").split('/nzb_view/')[1].split('/')[0]) - enclosure = self.getElement(nzb, "enclosure").attrib - size = enclosure['length'] - date = self.getTextElement(nzb, "pubDate") - - def extra_check(item): - full_description = self.getCache('nzbclub.%s' % nzbclub_id, item['detail_url'], cache_timeout = 25920000) - - for ignored in ['ARCHIVE inside ARCHIVE', 'Incomplete', 'repair impossible']: - if ignored in full_description: - log.info('Wrong: Seems to be passworded or corrupted files: %s', item['name']) - return False - - return True - - results.append({ - 'id': nzbclub_id, - 'name': toUnicode(self.getTextElement(nzb, "title")), - 'age': self.calculateAge(int(time.mktime(parse(date).timetuple()))), - 'size': tryInt(size) / 1024 / 1024, - 'url': enclosure['url'].replace(' ', '_'), - 'detail_url': self.getTextElement(nzb, "link"), - 'get_more_info': self.getMoreInfo, - 'extra_check': extra_check - }) - - def getMoreInfo(self, item): - full_description = self.getCache('nzbclub.%s' % item['id'], item['detail_url'], cache_timeout = 25920000) - html = BeautifulSoup(full_description) - nfo_pre = html.find('pre', attrs = {'class':'nfo'}) - description = toUnicode(nfo_pre.text) if nfo_pre else '' - - item['description'] = description - return item - - def extraCheck(self, item): - full_description = self.getCache('nzbclub.%s' % item['id'], item['detail_url'], cache_timeout = 25920000) - - if 'ARCHIVE inside ARCHIVE' in full_description: - log.info('Wrong: Seems to be passworded files: %s', item['name']) - return False - - return True diff --git a/couchpotato/core/providers/nzb/nzbindex/__init__.py b/couchpotato/core/providers/nzb/nzbindex/__init__.py deleted file mode 100644 index 29eb0d38bb..0000000000 --- a/couchpotato/core/providers/nzb/nzbindex/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -from .main import NzbIndex - -def start(): - return NzbIndex() - -config = [{ - 'name': 'nzbindex', - 'groups': [ - { - 'tab': 'searcher', - 'subtab': 'providers', - 'list': 'nzb_providers', - 'name': 'nzbindex', - 'description': 'Free provider, less accurate. See NZBIndex', - 'wizard': True, - 'options': [ - { - 'name': 'enabled', - 'type': 'enabler', - 'default': True, - }, - ], - }, - ], -}] diff --git a/couchpotato/core/providers/nzb/nzbindex/main.py b/couchpotato/core/providers/nzb/nzbindex/main.py deleted file mode 100644 index 3643f55b1e..0000000000 --- a/couchpotato/core/providers/nzb/nzbindex/main.py +++ /dev/null @@ -1,79 +0,0 @@ -from bs4 import BeautifulSoup -from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode -from couchpotato.core.helpers.rss import RSS -from couchpotato.core.helpers.variable import tryInt -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.nzb.base import NZBProvider -from couchpotato.environment import Env -from dateutil.parser import parse -import re -import time - -log = CPLog(__name__) - - -class NzbIndex(NZBProvider, RSS): - - urls = { - 'download': 'https://www.nzbindex.com/download/', - 'search': 'https://www.nzbindex.com/rss/?%s', - } - - http_time_between_calls = 1 # Seconds - - def _searchOnTitle(self, title, movie, quality, results): - - q = '"%s %s"' % (title, movie['library']['year']) - arguments = tryUrlencode({ - 'q': q, - 'age': Env.setting('retention', 'nzb'), - 'sort': 'agedesc', - 'minsize': quality.get('size_min'), - 'maxsize': quality.get('size_max'), - 'rating': 1, - 'max': 250, - 'more': 1, - 'complete': 1, - }) - - nzbs = self.getRSSData(self.urls['search'] % arguments) - - for nzb in nzbs: - - enclosure = self.getElement(nzb, 'enclosure').attrib - nzbindex_id = int(self.getTextElement(nzb, "link").split('/')[4]) - - try: - description = self.getTextElement(nzb, "description") - except: - description = '' - - def extra_check(item): - if '#c20000' in item['description'].lower(): - log.info('Wrong: Seems to be passworded: %s', item['name']) - return False - - return True - - results.append({ - 'id': nzbindex_id, - 'name': self.getTextElement(nzb, "title"), - 'age': self.calculateAge(int(time.mktime(parse(self.getTextElement(nzb, "pubDate")).timetuple()))), - 'size': tryInt(enclosure['length']) / 1024 / 1024, - 'url': enclosure['url'], - 'detail_url': enclosure['url'].replace('/download/', '/release/'), - 'description': description, - 'get_more_info': self.getMoreInfo, - 'extra_check': extra_check, - }) - - def getMoreInfo(self, item): - try: - if '/nfo/' in item['description'].lower(): - nfo_url = re.search('href=\"(?P.+)\" ', item['description']).group('nfo') - full_description = self.getCache('nzbindex.%s' % item['id'], url = nfo_url, cache_timeout = 25920000) - html = BeautifulSoup(full_description) - item['description'] = toUnicode(html.find('pre', attrs = {'id':'nfo0'}).text) - except: - pass - diff --git a/couchpotato/core/providers/nzb/nzbsrus/__init__.py b/couchpotato/core/providers/nzb/nzbsrus/__init__.py deleted file mode 100644 index 3a04278475..0000000000 --- a/couchpotato/core/providers/nzb/nzbsrus/__init__.py +++ /dev/null @@ -1,41 +0,0 @@ -from .main import Nzbsrus - -def start(): - return Nzbsrus() - -config = [{ - 'name': 'nzbsrus', - 'groups': [ - { - 'tab': 'searcher', - 'subtab': 'providers', - 'list': 'nzb_providers', - 'name': 'nzbsrus', - 'label': 'Nzbsrus', - 'description': 'See NZBsRus. You need a VIP account!', - 'wizard': True, - 'options': [ - { - 'name': 'enabled', - 'type': 'enabler', - }, - { - 'name': 'userid', - 'label': 'User ID', - }, - { - 'name': 'api_key', - 'default': '', - 'label': 'Api Key', - }, - { - 'name': 'english_only', - 'default': 1, - 'type': 'bool', - 'label': 'English only', - 'description': 'Only search for English spoken movies on Nzbsrus', - }, - ], - }, - ], -}] diff --git a/couchpotato/core/providers/nzb/nzbsrus/main.py b/couchpotato/core/providers/nzb/nzbsrus/main.py deleted file mode 100644 index d52212a7c8..0000000000 --- a/couchpotato/core/providers/nzb/nzbsrus/main.py +++ /dev/null @@ -1,62 +0,0 @@ -from couchpotato.core.helpers.encoding import tryUrlencode -from couchpotato.core.helpers.rss import RSS -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.nzb.base import NZBProvider -from couchpotato.environment import Env -import time - -log = CPLog(__name__) - -class Nzbsrus(NZBProvider, RSS): - - urls = { - 'download': 'https://www.nzbsrus.com/nzbdownload_rss.php/%s', - 'detail': 'https://www.nzbsrus.com/nzbdetails.php?id=%s', - 'search': 'https://www.nzbsrus.com/api.php?extended=1&xml=1&listname={date,grabs}', - } - - cat_ids = [ - ([90, 45, 51], ['720p', '1080p', 'brrip', 'bd50', 'dvdr']), - ([48, 51], ['cam', 'ts', 'dvdrip', 'tc', 'r5', 'scr']), - ] - cat_backup_id = 240 - - def _search(self, movie, quality, results): - - cat_id_string = '&'.join(['c%s=1' % x for x in self.getCatId(quality.get('identifier'))]) - arguments = tryUrlencode({ - 'searchtext': 'imdb:' + movie['library']['identifier'][2:], - 'uid': self.conf('userid'), - 'key': self.conf('api_key'), - 'age': Env.setting('retention', section = 'nzb'), - - }) - - # check for english_only - if self.conf('english_only'): - arguments += '&lang0=1&lang3=1&lang1=1' - - url = '%s&%s&%s' % (self.urls['search'], arguments , cat_id_string) - nzbs = self.getRSSData(url, item_path = 'results/result', cache_timeout = 1800, headers = {'User-Agent': Env.getIdentifier()}) - - for nzb in nzbs: - - title = self.getTextElement(nzb, 'name') - if 'error' in title.lower(): continue - - nzb_id = self.getTextElement(nzb, 'id') - size = int(round(int(self.getTextElement(nzb, 'size')) / 1048576)) - age = int(round((time.time() - int(self.getTextElement(nzb, 'postdate'))) / 86400)) - - results.append({ - 'id': nzb_id, - 'name': title, - 'age': age, - 'size': size, - 'url': self.urls['download'] % nzb_id + self.getApiExt() + self.getTextElement(nzb, 'key'), - 'detail_url': self.urls['detail'] % nzb_id, - 'description': self.getTextElement(nzb, 'addtext'), - }) - - def getApiExt(self): - return '/%s/' % (self.conf('userid')) diff --git a/couchpotato/core/providers/nzb/nzbx/__init__.py b/couchpotato/core/providers/nzb/nzbx/__init__.py deleted file mode 100644 index 9ce9226b46..0000000000 --- a/couchpotato/core/providers/nzb/nzbx/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -from .main import Nzbx - -def start(): - return Nzbx() - -config = [{ - 'name': 'nzbx', - 'groups': [ - { - 'tab': 'searcher', - 'subtab': 'providers', - 'list': 'nzb_providers', - 'name': 'nzbX', - 'description': 'Free provider. See nzbX', - 'wizard': True, - 'options': [ - { - 'name': 'enabled', - 'type': 'enabler', - 'default': True, - }, - ], - }, - ], -}] diff --git a/couchpotato/core/providers/nzb/nzbx/main.py b/couchpotato/core/providers/nzb/nzbx/main.py deleted file mode 100644 index ec7fbfe2c6..0000000000 --- a/couchpotato/core/providers/nzb/nzbx/main.py +++ /dev/null @@ -1,38 +0,0 @@ -from couchpotato.core.helpers.encoding import tryUrlencode -from couchpotato.core.helpers.variable import tryInt -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.nzb.base import NZBProvider -from couchpotato.environment import Env - -log = CPLog(__name__) - - -class Nzbx(NZBProvider): - - urls = { - 'search': 'https://nzbx.co/api/search?%s', - 'details': 'https://nzbx.co/api/details?guid=%s', - } - - http_time_between_calls = 1 # Seconds - - def _search(self, movie, quality, results): - - # Get nbzs - arguments = tryUrlencode({ - 'q': movie['library']['identifier'].replace('tt', ''), - 'sf': quality.get('size_min'), - }) - nzbs = self.getJsonData(self.urls['search'] % arguments, headers = {'User-Agent': Env.getIdentifier()}) - - for nzb in nzbs: - - results.append({ - 'id': nzb['guid'], - 'url': nzb['nzb'], - 'detail_url': self.urls['details'] % nzb['guid'], - 'name': nzb['name'], - 'age': self.calculateAge(int(nzb['postdate'])), - 'size': tryInt(nzb['size']) / 1024 / 1024, - 'score': 5 if nzb['votes']['upvotes'] > nzb['votes']['downvotes'] else 0 - }) diff --git a/couchpotato/core/providers/nzb/omgwtfnzbs/__init__.py b/couchpotato/core/providers/nzb/omgwtfnzbs/__init__.py deleted file mode 100644 index 287ced49bf..0000000000 --- a/couchpotato/core/providers/nzb/omgwtfnzbs/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -from .main import OMGWTFNZBs - -def start(): - return OMGWTFNZBs() - -config = [{ - 'name': 'omgwtfnzbs', - 'groups': [ - { - 'tab': 'searcher', - 'subtab': 'providers', - 'list': 'nzb_providers', - 'name': 'OMGWTFNZBs', - 'description': 'See OMGWTFNZBs', - 'wizard': True, - 'options': [ - { - 'name': 'enabled', - 'type': 'enabler', - }, - { - 'name': 'username', - 'default': '', - }, - { - 'name': 'api_key', - 'label': 'Api Key', - 'default': '', - }, - ], - }, - ], -}] diff --git a/couchpotato/core/providers/nzb/omgwtfnzbs/main.py b/couchpotato/core/providers/nzb/omgwtfnzbs/main.py deleted file mode 100644 index 0a18b8f4b8..0000000000 --- a/couchpotato/core/providers/nzb/omgwtfnzbs/main.py +++ /dev/null @@ -1,61 +0,0 @@ -from couchpotato.core.event import fireEvent -from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode -from couchpotato.core.helpers.rss import RSS -from couchpotato.core.helpers.variable import tryInt -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.nzb.base import NZBProvider -from dateutil.parser import parse -from urlparse import urlparse, parse_qs -import time - -log = CPLog(__name__) - - -class OMGWTFNZBs(NZBProvider, RSS): - - urls = { - 'search': 'http://rss.omgwtfnzbs.org/rss-search.php?%s', - } - - http_time_between_calls = 1 #seconds - - cat_ids = [ - ([15], ['dvdrip']), - ([15, 16], ['brrip']), - ([16], ['720p', '1080p', 'bd50']), - ([17], ['dvdr']), - ] - cat_backup_id = 'movie' - - def search(self, movie, quality): - - if quality['identifier'] in fireEvent('quality.pre_releases', single = True): - return [] - - return super(OMGWTFNZBs, self).search(movie, quality) - - def _searchOnTitle(self, title, movie, quality, results): - - q = '%s %s' % (title, movie['library']['year']) - params = tryUrlencode({ - 'search': q, - 'catid': ','.join([str(x) for x in self.getCatId(quality['identifier'])]), - 'user': self.conf('username', default = ''), - 'api': self.conf('api_key', default = ''), - }) - - nzbs = self.getRSSData(self.urls['search'] % params) - - for nzb in nzbs: - - enclosure = self.getElement(nzb, 'enclosure').attrib - - results.append({ - 'id': parse_qs(urlparse(self.getTextElement(nzb, 'link')).query).get('id')[0], - 'name': toUnicode(self.getTextElement(nzb, 'title')), - 'age': self.calculateAge(int(time.mktime(parse(self.getTextElement(nzb, 'pubDate')).timetuple()))), - 'size': tryInt(enclosure['length']) / 1024 / 1024, - 'url': enclosure['url'], - 'detail_url': self.getTextElement(nzb, 'link'), - 'description': self.getTextElement(nzb, 'description') - }) diff --git a/couchpotato/core/providers/torrent/__init__.py b/couchpotato/core/providers/torrent/__init__.py deleted file mode 100644 index 191e132e0f..0000000000 --- a/couchpotato/core/providers/torrent/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -config = { - 'name': 'torrent_providers', - 'groups': [ - { - 'label': 'Torrent', - 'description': 'Providers searching torrent sites for new releases', - 'wizard': True, - 'type': 'list', - 'name': 'torrent_providers', - 'tab': 'searcher', - 'subtab': 'providers', - 'options': [], - }, - ], -} diff --git a/couchpotato/core/providers/torrent/base.py b/couchpotato/core/providers/torrent/base.py deleted file mode 100644 index 453954c920..0000000000 --- a/couchpotato/core/providers/torrent/base.py +++ /dev/null @@ -1,32 +0,0 @@ -from couchpotato.core.helpers.variable import getImdb, md5 -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.base import YarrProvider - -log = CPLog(__name__) - - -class TorrentProvider(YarrProvider): - - type = 'torrent' - - def imdbMatch(self, url, imdbId): - if getImdb(url) == imdbId: - return True - - if url[:4] == 'http': - try: - cache_key = md5(url) - data = self.getCache(cache_key, url) - except IOError: - log.error('Failed to open %s.', url) - return False - - return getImdb(data) == imdbId - - return False - -class TorrentMagnetProvider(TorrentProvider): - - type = 'torrent_magnet' - - download = None diff --git a/couchpotato/core/providers/torrent/iptorrents/__init__.py b/couchpotato/core/providers/torrent/iptorrents/__init__.py deleted file mode 100644 index bca8ce6914..0000000000 --- a/couchpotato/core/providers/torrent/iptorrents/__init__.py +++ /dev/null @@ -1,40 +0,0 @@ -from .main import IPTorrents - -def start(): - return IPTorrents() - -config = [{ - 'name': 'iptorrents', - 'groups': [ - { - 'tab': 'searcher', - 'subtab': 'providers', - 'list': 'torrent_providers', - 'name': 'IPTorrents', - 'description': 'See IPTorrents', - 'wizard': True, - 'options': [ - { - 'name': 'enabled', - 'type': 'enabler', - 'default': False, - }, - { - 'name': 'username', - 'default': '', - }, - { - 'name': 'password', - 'default': '', - 'type': 'password', - }, - { - 'name': 'freeleech', - 'default': 0, - 'type': 'bool', - 'description': 'Only search for [FreeLeech] torrents.', - }, - ], - }, - ], -}] diff --git a/couchpotato/core/providers/torrent/iptorrents/main.py b/couchpotato/core/providers/torrent/iptorrents/main.py deleted file mode 100644 index 75dc6f12af..0000000000 --- a/couchpotato/core/providers/torrent/iptorrents/main.py +++ /dev/null @@ -1,83 +0,0 @@ -from bs4 import BeautifulSoup -from couchpotato.core.helpers.encoding import tryUrlencode -from couchpotato.core.helpers.variable import tryInt -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.torrent.base import TorrentProvider -import traceback - - -log = CPLog(__name__) - - -class IPTorrents(TorrentProvider): - - urls = { - 'test' : 'http://www.iptorrents.com/', - 'base_url' : 'http://www.iptorrents.com', - 'login' : 'http://www.iptorrents.com/torrents/', - 'search' : 'http://www.iptorrents.com/torrents/?l%d=1%s&q=%s&qf=ti', - } - - cat_ids = [ - ([48], ['720p', '1080p', 'bd50']), - ([72], ['cam', 'ts', 'tc', 'r5', 'scr']), - ([7], ['dvdrip', 'brrip']), - ([6], ['dvdr']), - ] - - http_time_between_calls = 1 #seconds - cat_backup_id = None - - def _searchOnTitle(self, title, movie, quality, results): - - freeleech = '' if not self.conf('freeleech') else '&free=on' - - url = self.urls['search'] % (self.getCatId(quality['identifier'])[0], freeleech, tryUrlencode('%s %s' % (title.replace(':', ''), movie['library']['year']))) - data = self.getHTMLData(url, opener = self.login_opener) - - if data: - html = BeautifulSoup(data) - - try: - result_table = html.find('table', attrs = {'class' : 'torrents'}) - - if not result_table or 'nothing found!' in data.lower(): - return - - entries = result_table.find_all('tr') - - for result in entries[1:]: - - torrent = result.find_all('td')[1].find('a') - - torrent_id = torrent['href'].replace('/details.php?id=', '') - torrent_name = torrent.string - torrent_download_url = self.urls['base_url'] + (result.find_all('td')[3].find('a'))['href'].replace(' ', '.') - torrent_details_url = self.urls['base_url'] + torrent['href'] - torrent_size = self.parseSize(result.find_all('td')[5].string) - torrent_seeders = tryInt(result.find('td', attrs = {'class' : 'ac t_seeders'}).string) - torrent_leechers = tryInt(result.find('td', attrs = {'class' : 'ac t_leechers'}).string) - - results.append({ - 'id': torrent_id, - 'name': torrent_name, - 'url': torrent_download_url, - 'detail_url': torrent_details_url, - 'download': self.loginDownload, - 'size': torrent_size, - 'seeders': torrent_seeders, - 'leechers': torrent_leechers, - }) - - except: - log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc())) - - def loginSuccess(self, output): - return 'don\'t have an account' not in output.lower() - - def getLoginParams(self): - return tryUrlencode({ - 'username': self.conf('username'), - 'password': self.conf('password'), - 'login': 'submit', - }) diff --git a/couchpotato/core/providers/torrent/kickasstorrents/__init__.py b/couchpotato/core/providers/torrent/kickasstorrents/__init__.py deleted file mode 100644 index 8ddb1f4a15..0000000000 --- a/couchpotato/core/providers/torrent/kickasstorrents/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -from .main import KickAssTorrents - -def start(): - return KickAssTorrents() - -config = [{ - 'name': 'kickasstorrents', - 'groups': [ - { - 'tab': 'searcher', - 'subtab': 'providers', - 'list': 'torrent_providers', - 'name': 'KickAssTorrents', - 'description': 'See KickAssTorrents', - 'wizard': True, - 'options': [ - { - 'name': 'enabled', - 'type': 'enabler', - 'default': True, - }, - ], - }, - ], -}] diff --git a/couchpotato/core/providers/torrent/kickasstorrents/main.py b/couchpotato/core/providers/torrent/kickasstorrents/main.py deleted file mode 100644 index d9980d84ad..0000000000 --- a/couchpotato/core/providers/torrent/kickasstorrents/main.py +++ /dev/null @@ -1,102 +0,0 @@ -from bs4 import BeautifulSoup -from couchpotato.core.helpers.variable import tryInt -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.torrent.base import TorrentMagnetProvider -import re -import traceback - -log = CPLog(__name__) - - -class KickAssTorrents(TorrentMagnetProvider): - - urls = { - 'test': 'https://kat.ph/', - 'detail': 'https://kat.ph/%s', - 'search': 'https://kat.ph/%s-i%s/', - } - - cat_ids = [ - (['cam'], ['cam']), - (['telesync'], ['ts', 'tc']), - (['screener', 'tvrip'], ['screener']), - (['x264', '720p', '1080p', 'blu-ray', 'hdrip'], ['bd50', '1080p', '720p', 'brrip']), - (['dvdrip'], ['dvdrip']), - (['dvd'], ['dvdr']), - ] - - http_time_between_calls = 1 #seconds - cat_backup_id = None - - def _search(self, movie, quality, results): - - data = self.getHTMLData(self.urls['search'] % ('m', movie['library']['identifier'].replace('tt', ''))) - - if data: - - cat_ids = self.getCatId(quality['identifier']) - table_order = ['name', 'size', None, 'age', 'seeds', 'leechers'] - - try: - html = BeautifulSoup(data) - resultdiv = html.find('div', attrs = {'class':'tabs'}) - for result in resultdiv.find_all('div', recursive = False): - if result.get('id').lower() not in cat_ids: - continue - - try: - for temp in result.find_all('tr'): - if temp['class'] is 'firstr' or not temp.get('id'): - continue - - new = {} - - nr = 0 - for td in temp.find_all('td'): - column_name = table_order[nr] - if column_name: - - if column_name is 'name': - link = td.find('div', {'class': 'torrentname'}).find_all('a')[1] - new['id'] = temp.get('id')[-8:] - new['name'] = link.text - new['url'] = td.find('a', 'imagnet')['href'] - new['detail_url'] = self.urls['detail'] % link['href'][1:] - new['score'] = 20 if td.find('a', 'iverif') else 0 - elif column_name is 'size': - new['size'] = self.parseSize(td.text) - elif column_name is 'age': - new['age'] = self.ageToDays(td.text) - elif column_name is 'seeds': - new['seeders'] = tryInt(td.text) - elif column_name is 'leechers': - new['leechers'] = tryInt(td.text) - - nr += 1 - - results.append(new) - except: - log.error('Failed parsing KickAssTorrents: %s', traceback.format_exc()) - - except AttributeError: - log.debug('No search results found.') - - def ageToDays(self, age_str): - age = 0 - age_str = age_str.replace(' ', ' ') - - regex = '(\d*.?\d+).(sec|hour|day|week|month|year)+' - matches = re.findall(regex, age_str) - for match in matches: - nr, size = match - mult = 1 - if size == 'week': - mult = 7 - elif size == 'month': - mult = 30.5 - elif size == 'year': - mult = 365 - - age += tryInt(nr) * mult - - return tryInt(age) diff --git a/couchpotato/core/providers/torrent/passthepopcorn/__init__.py b/couchpotato/core/providers/torrent/passthepopcorn/__init__.py deleted file mode 100644 index 06be7a890f..0000000000 --- a/couchpotato/core/providers/torrent/passthepopcorn/__init__.py +++ /dev/null @@ -1,44 +0,0 @@ -from main import PassThePopcorn - -def start(): - return PassThePopcorn() - -config = [{ - 'name': 'passthepopcorn', - 'groups': [ - { - 'tab': 'searcher', - 'subtab': 'providers', - 'list': 'torrent_providers', - 'name': 'PassThePopcorn', - 'description': 'See PassThePopcorn.me', - 'wizard': True, - 'options': [ - { - 'name': 'enabled', - 'type': 'enabler', - 'default': False - }, - { - 'name': 'domain', - 'advanced': True, - 'label': 'Proxy server', - 'description': 'Domain for requests (HTTPS only!), keep empty to use default (tls.passthepopcorn.me).', - }, - { - 'name': 'username', - 'default': '', - }, - { - 'name': 'password', - 'default': '', - 'type': 'password', - }, - { - 'name': 'passkey', - 'default': '', - } - ], -} - ] -}] diff --git a/couchpotato/core/providers/torrent/passthepopcorn/main.py b/couchpotato/core/providers/torrent/passthepopcorn/main.py deleted file mode 100644 index 9abe51fbdc..0000000000 --- a/couchpotato/core/providers/torrent/passthepopcorn/main.py +++ /dev/null @@ -1,229 +0,0 @@ -from couchpotato.core.helpers.encoding import tryUrlencode -from couchpotato.core.helpers.variable import getTitle, tryInt, mergeDicts -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.torrent.base import TorrentProvider -from dateutil.parser import parse -import cookielib -import htmlentitydefs -import json -import re -import time -import traceback -import urllib2 - -log = CPLog(__name__) - - -class PassThePopcorn(TorrentProvider): - - urls = { - 'domain': 'https://tls.passthepopcorn.me', - 'detail': 'https://tls.passthepopcorn.me/torrents.php?torrentid=%s', - 'torrent': 'https://tls.passthepopcorn.me/torrents.php', - 'login': 'https://tls.passthepopcorn.me/ajax.php?action=login', - 'search': 'https://tls.passthepopcorn.me/search/%s/0/7/%d' - } - - quality_search_params = { - 'bd50': {'media': 'Blu-ray', 'format': 'BD50'}, - '1080p': {'resolution': '1080p'}, - '720p': {'resolution': '720p'}, - 'brrip': {'media': 'Blu-ray'}, - 'dvdr': {'resolution': 'anysd'}, - 'dvdrip': {'media': 'DVD'}, - 'scr': {'media': 'DVD-Screener'}, - 'r5': {'media': 'R5'}, - 'tc': {'media': 'TC'}, - 'ts': {'media': 'TS'}, - 'cam': {'media': 'CAM'} - } - - post_search_filters = { - 'bd50': {'Codec': ['BD50']}, - '1080p': {'Resolution': ['1080p']}, - '720p': {'Resolution': ['720p']}, - 'brrip': {'Source': ['Blu-ray'], 'Quality': ['High Definition'], 'Container': ['!ISO']}, - 'dvdr': {'Codec': ['DVD5', 'DVD9']}, - 'dvdrip': {'Source': ['DVD'], 'Codec': ['!DVD5', '!DVD9']}, - 'scr': {'Source': ['DVD-Screener']}, - 'r5': {'Source': ['R5']}, - 'tc': {'Source': ['TC']}, - 'ts': {'Source': ['TS']}, - 'cam': {'Source': ['CAM']} - } - - class NotLoggedInHTTPError(urllib2.HTTPError): - def __init__(self, url, code, msg, headers, fp): - urllib2.HTTPError.__init__(self, url, code, msg, headers, fp) - - class PTPHTTPRedirectHandler(urllib2.HTTPRedirectHandler): - def http_error_302(self, req, fp, code, msg, headers): - log.debug("302 detected; redirected to %s" % headers['Location']) - if (headers['Location'] != 'login.php'): - return urllib2.HTTPRedirectHandler.http_error_302(self, req, fp, code, msg, headers) - else: - raise PassThePopcorn.NotLoggedInHTTPError(req.get_full_url(), code, msg, headers, fp) - - def _search(self, movie, quality, results): - - movie_title = getTitle(movie['library']) - quality_id = quality['identifier'] - - params = mergeDicts(self.quality_search_params[quality_id].copy(), { - 'order_by': 'relevance', - 'order_way': 'descending', - 'searchstr': movie['library']['identifier'] - }) - - # Do login for the cookies - if not self.login_opener and not self.login(): - return - - try: - url = '%s?json=noredirect&%s' % (self.urls['torrent'], tryUrlencode(params)) - txt = self.urlopen(url, opener = self.login_opener) - res = json.loads(txt) - except: - log.error('Search on PassThePopcorn.me (%s) failed (could not decode JSON)' % params) - return - - try: - if not 'Movies' in res: - return - - authkey = res['AuthKey'] - passkey = res['PassKey'] - - for ptpmovie in res['Movies']: - if not 'Torrents' in ptpmovie: - log.debug('Movie %s (%s) has NO torrents' % (ptpmovie['Title'], ptpmovie['Year'])) - continue - - log.debug('Movie %s (%s) has %d torrents' % (ptpmovie['Title'], ptpmovie['Year'], len(ptpmovie['Torrents']))) - for torrent in ptpmovie['Torrents']: - torrent_id = tryInt(torrent['Id']) - torrentdesc = '%s %s %s' % (torrent['Resolution'], torrent['Source'], torrent['Codec']) - - if 'GoldenPopcorn' in torrent and torrent['GoldenPopcorn']: - torrentdesc += ' HQ' - if 'Scene' in torrent and torrent['Scene']: - torrentdesc += ' Scene' - if 'RemasterTitle' in torrent and torrent['RemasterTitle']: - torrentdesc += self.htmlToASCII(' %s' % torrent['RemasterTitle']) - - torrentdesc += ' (%s)' % quality_id - torrent_name = re.sub('[^A-Za-z0-9\-_ \(\).]+', '', '%s (%s) - %s' % (movie_title, ptpmovie['Year'], torrentdesc)) - - def extra_check(item): - return self.torrentMeetsQualitySpec(item, type) - - results.append({ - 'id': torrent_id, - 'name': torrent_name, - 'url': '%s?action=download&id=%d&authkey=%s&torrent_pass=%s' % (self.urls['torrent'], torrent_id, authkey, passkey), - 'detail_url': self.urls['detail'] % torrent_id, - 'date': tryInt(time.mktime(parse(torrent['UploadTime']).timetuple())), - 'size': tryInt(torrent['Size']) / (1024 * 1024), - 'seeders': tryInt(torrent['Seeders']), - 'leechers': tryInt(torrent['Leechers']), - 'score': 50 if torrent['GoldenPopcorn'] else 0, - 'extra_check': extra_check, - 'download': self.loginDownload, - }) - - except: - log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc())) - - def login(self): - - cookieprocessor = urllib2.HTTPCookieProcessor(cookielib.CookieJar()) - opener = urllib2.build_opener(cookieprocessor, PassThePopcorn.PTPHTTPRedirectHandler()) - opener.addheaders = [ - ('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.75 Safari/537.1'), - ('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'), - ('Accept-Language', 'en-gb,en;q=0.5'), - ('Accept-Charset', 'ISO-8859-1,utf-8;q=0.7,*;q=0.7'), - ('Keep-Alive', '115'), - ('Connection', 'keep-alive'), - ('Cache-Control', 'max-age=0'), - ] - - try: - response = opener.open(self.urls['login'], self.getLoginParams()) - except urllib2.URLError as e: - log.error('Login to PassThePopcorn failed: %s' % e) - return False - - if response.getcode() == 200: - log.debug('Login HTTP status 200; seems successful') - self.login_opener = opener - return True - else: - log.error('Login to PassThePopcorn failed: returned code %d' % response.getcode()) - return False - - def torrentMeetsQualitySpec(self, torrent, quality): - - if not quality in self.post_search_filters: - return True - - for field, specs in self.post_search_filters[quality].items(): - matches_one = False - seen_one = False - - if not field in torrent: - log.debug('Torrent with ID %s has no field "%s"; cannot apply post-search-filter for quality "%s"' % (torrent['Id'], field, quality)) - continue - - for spec in specs: - if len(spec) > 0 and spec[0] == '!': - # a negative rule; if the field matches, return False - if torrent[field] == spec[1:]: - return False - else: - # a positive rule; if any of the possible positive values match the field, return True - seen_one = True - if torrent[field] == spec: - matches_one = True - - if seen_one and not matches_one: - return False - - return True - - def htmlToUnicode(self, text): - def fixup(m): - text = m.group(0) - if text[:2] == "&#": - # character reference - try: - if text[:3] == "&#x": - return unichr(int(text[3:-1], 16)) - else: - return unichr(int(text[2:-1])) - except ValueError: - pass - else: - # named entity - try: - text = unichr(htmlentitydefs.name2codepoint[text[1:-1]]) - except KeyError: - pass - return text # leave as is - return re.sub("&#?\w+;", fixup, u'%s' % text) - - def unicodeToASCII(self, text): - import unicodedata - return ''.join(c for c in unicodedata.normalize('NFKD', text) if unicodedata.category(c) != 'Mn') - - def htmlToASCII(self, text): - return self.unicodeToASCII(self.htmlToUnicode(text)) - - def getLoginParams(self): - return tryUrlencode({ - 'username': self.conf('username'), - 'password': self.conf('password'), - 'passkey': self.conf('passkey'), - 'keeplogged': '1', - 'login': 'Login' - }) diff --git a/couchpotato/core/providers/torrent/publichd/__init__.py b/couchpotato/core/providers/torrent/publichd/__init__.py deleted file mode 100644 index 2c356e20e9..0000000000 --- a/couchpotato/core/providers/torrent/publichd/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -from .main import PublicHD - -def start(): - return PublicHD() - -config = [{ - 'name': 'publichd', - 'groups': [ - { - 'tab': 'searcher', - 'subtab': 'providers', - 'list': 'torrent_providers', - 'name': 'PublicHD', - 'description': 'Public Torrent site with only HD content. See PublicHD', - 'wizard': True, - 'options': [ - { - 'name': 'enabled', - 'type': 'enabler', - 'default': True, - }, - ], - }, - ], -}] diff --git a/couchpotato/core/providers/torrent/publichd/main.py b/couchpotato/core/providers/torrent/publichd/main.py deleted file mode 100644 index 2043f8c426..0000000000 --- a/couchpotato/core/providers/torrent/publichd/main.py +++ /dev/null @@ -1,76 +0,0 @@ -from bs4 import BeautifulSoup -from couchpotato.core.helpers.encoding import tryUrlencode, toUnicode -from couchpotato.core.helpers.variable import tryInt -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.torrent.base import TorrentMagnetProvider -from urlparse import parse_qs -import re -import traceback - -log = CPLog(__name__) - - -class PublicHD(TorrentMagnetProvider): - - urls = { - 'test': 'https://publichd.se', - 'detail': 'https://publichd.se/index.php?page=torrent-details&id=%s', - 'search': 'https://publichd.se/index.php', - } - http_time_between_calls = 0 - - def search(self, movie, quality): - - if not quality.get('hd', False): - return [] - - return super(PublicHD, self).search(movie, quality) - - def _searchOnTitle(self, title, movie, quality, results): - - params = tryUrlencode({ - 'page':'torrents', - 'search': '%s %s' % (title, movie['library']['year']), - 'active': 1, - }) - - data = self.getHTMLData('%s?%s' % (self.urls['search'], params)) - - if data: - - try: - soup = BeautifulSoup(data) - - results_table = soup.find('table', attrs = {'id': 'bgtorrlist2'}) - entries = results_table.find_all('tr') - - for result in entries[2:len(entries) - 1]: - info_url = result.find(href = re.compile('torrent-details')) - download = result.find(href = re.compile('magnet:')) - - if info_url and download: - - url = parse_qs(info_url['href']) - - results.append({ - 'id': url['id'][0], - 'name': info_url.string, - 'url': download['href'], - 'detail_url': self.urls['detail'] % url['id'][0], - 'size': self.parseSize(result.find_all('td')[7].string), - 'seeders': tryInt(result.find_all('td')[4].string), - 'leechers': tryInt(result.find_all('td')[5].string), - 'get_more_info': self.getMoreInfo - }) - - except: - log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc())) - - def getMoreInfo(self, item): - full_description = self.getCache('publichd.%s' % item['id'], item['detail_url'], cache_timeout = 25920000) - html = BeautifulSoup(full_description) - nfo_pre = html.find('div', attrs = {'id':'torrmain'}) - description = toUnicode(nfo_pre.text) if nfo_pre else '' - - item['description'] = description - return item diff --git a/couchpotato/core/providers/torrent/sceneaccess/__init__.py b/couchpotato/core/providers/torrent/sceneaccess/__init__.py deleted file mode 100644 index e12bf8bd50..0000000000 --- a/couchpotato/core/providers/torrent/sceneaccess/__init__.py +++ /dev/null @@ -1,34 +0,0 @@ -from .main import SceneAccess - -def start(): - return SceneAccess() - -config = [{ - 'name': 'sceneaccess', - 'groups': [ - { - 'tab': 'searcher', - 'subtab': 'providers', - 'list': 'torrent_providers', - 'name': 'SceneAccess', - 'description': 'See SceneAccess', - 'wizard': True, - 'options': [ - { - 'name': 'enabled', - 'type': 'enabler', - 'default': False, - }, - { - 'name': 'username', - 'default': '', - }, - { - 'name': 'password', - 'default': '', - 'type': 'password', - }, - ], - }, - ], -}] diff --git a/couchpotato/core/providers/torrent/sceneaccess/main.py b/couchpotato/core/providers/torrent/sceneaccess/main.py deleted file mode 100644 index 904bafb98b..0000000000 --- a/couchpotato/core/providers/torrent/sceneaccess/main.py +++ /dev/null @@ -1,93 +0,0 @@ -from bs4 import BeautifulSoup -from couchpotato.core.helpers.encoding import tryUrlencode, toUnicode -from couchpotato.core.helpers.variable import tryInt -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.torrent.base import TorrentProvider -import traceback - -log = CPLog(__name__) - - -class SceneAccess(TorrentProvider): - - urls = { - 'test': 'https://www.sceneaccess.eu/', - 'login' : 'https://www.sceneaccess.eu/login', - 'detail': 'https://www.sceneaccess.eu/details?id=%s', - 'search': 'https://www.sceneaccess.eu/browse?method=2&c%d=%d', - 'download': 'https://www.sceneaccess.eu/%s', - } - - cat_ids = [ - ([22], ['720p', '1080p']), - ([7], ['cam', 'ts', 'dvdrip', 'tc', 'r5', 'scr', 'brrip']), - ([8], ['dvdr']), - ] - - http_time_between_calls = 1 #seconds - - def _search(self, movie, quality, results): - - url = self.urls['search'] % ( - self.getCatId(quality['identifier'])[0], - self.getCatId(quality['identifier'])[0] - ) - - arguments = tryUrlencode({ - 'search': movie['library']['identifier'], - 'method': 1, - }) - url = "%s&%s" % (url, arguments) - - # Do login for the cookies - if not self.login_opener and not self.login(): - return - - data = self.getHTMLData(url, opener = self.login_opener) - - if data: - html = BeautifulSoup(data) - - try: - resultsTable = html.find('table', attrs = {'id' : 'torrents-table'}) - if resultsTable is None: - return - - entries = resultsTable.find_all('tr', attrs = {'class' : 'tt_row'}) - for result in entries: - - link = result.find('td', attrs = {'class' : 'ttr_name'}).find('a') - url = result.find('td', attrs = {'class' : 'td_dl'}).find('a') - leechers = result.find('td', attrs = {'class' : 'ttr_leechers'}).find('a') - torrent_id = link['href'].replace('details?id=', '') - - results.append({ - 'id': torrent_id, - 'name': link['title'], - 'url': self.urls['download'] % url['href'], - 'detail_url': self.urls['detail'] % torrent_id, - 'size': self.parseSize(result.find('td', attrs = {'class' : 'ttr_size'}).contents[0]), - 'seeders': tryInt(result.find('td', attrs = {'class' : 'ttr_seeders'}).find('a').string), - 'leechers': tryInt(leechers.string) if leechers else 0, - 'download': self.loginDownload, - 'get_more_info': self.getMoreInfo, - }) - - except: - log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc())) - - def getLoginParams(self): - return tryUrlencode({ - 'username': self.conf('username'), - 'password': self.conf('password'), - 'submit': 'come on in', - }) - - def getMoreInfo(self, item): - full_description = self.getCache('sceneaccess.%s' % item['id'], item['detail_url'], cache_timeout = 25920000) - html = BeautifulSoup(full_description) - nfo_pre = html.find('div', attrs = {'id':'details_table'}) - description = toUnicode(nfo_pre.text) if nfo_pre else '' - - item['description'] = description - return item diff --git a/couchpotato/core/providers/torrent/scenehd/__init__.py b/couchpotato/core/providers/torrent/scenehd/__init__.py deleted file mode 100644 index 10c5e385f7..0000000000 --- a/couchpotato/core/providers/torrent/scenehd/__init__.py +++ /dev/null @@ -1,34 +0,0 @@ -from .main import SceneHD - -def start(): - return SceneHD() - -config = [{ - 'name': 'scenehd', - 'groups': [ - { - 'tab': 'searcher', - 'subtab': 'providers', - 'list': 'torrent_providers', - 'name': 'SceneHD', - 'description': 'See SceneHD', - 'wizard': True, - 'options': [ - { - 'name': 'enabled', - 'type': 'enabler', - 'default': False, - }, - { - 'name': 'username', - 'default': '', - }, - { - 'name': 'password', - 'default': '', - 'type': 'password', - }, - ], - }, - ], -}] diff --git a/couchpotato/core/providers/torrent/scenehd/main.py b/couchpotato/core/providers/torrent/scenehd/main.py deleted file mode 100644 index 93897c67e2..0000000000 --- a/couchpotato/core/providers/torrent/scenehd/main.py +++ /dev/null @@ -1,77 +0,0 @@ -from bs4 import BeautifulSoup -from couchpotato.core.helpers.encoding import simplifyString, tryUrlencode -from couchpotato.core.helpers.variable import tryInt -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.torrent.base import TorrentProvider -import traceback - -log = CPLog(__name__) - - -class SceneHD(TorrentProvider): - - urls = { - 'test': 'https://scenehd.org/', - 'login' : 'https://scenehd.org/takelogin.php', - 'detail': 'https://scenehd.org/details.php?id=%s', - 'search': 'https://scenehd.org/browse.php?ajax', - 'download': 'https://scenehd.org/download.php?id=%s', - } - - http_time_between_calls = 1 #seconds - - def _searchOnTitle(self, title, movie, quality, results): - - q = '"%s %s"' % (simplifyString(title), movie['library']['year']) - arguments = tryUrlencode({ - 'search': q, - }) - url = "%s&%s" % (self.urls['search'], arguments) - - # Cookie login - if not self.login_opener and not self.login(): - return - - data = self.getHTMLData(url, opener = self.login_opener) - - if data: - html = BeautifulSoup(data) - - try: - resultsTable = html.find_all('table')[6] - entries = resultsTable.find_all('tr') - for result in entries[1:]: - - all_cells = result.find_all('td') - - detail_link = all_cells[2].find('a') - details = detail_link['href'] - torrent_id = details.replace('details.php?id=', '') - - leechers = all_cells[11].find('a') - if leechers: - leechers = leechers.string - else: - leechers = all_cells[11].string - - results.append({ - 'id': torrent_id, - 'name': detail_link['title'], - 'size': self.parseSize(all_cells[7].string), - 'seeders': tryInt(all_cells[10].find('a').string), - 'leechers': tryInt(leechers), - 'url': self.urls['download'] % torrent_id, - 'download': self.loginDownload, - 'description': all_cells[1].find('a')['href'], - }) - - except: - log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc())) - - - def getLoginParams(self, params): - return tryUrlencode({ - 'username': self.conf('username'), - 'password': self.conf('password'), - 'ssl': 'yes', - }) diff --git a/couchpotato/core/providers/torrent/thepiratebay/__init__.py b/couchpotato/core/providers/torrent/thepiratebay/__init__.py deleted file mode 100644 index 38169084e9..0000000000 --- a/couchpotato/core/providers/torrent/thepiratebay/__init__.py +++ /dev/null @@ -1,31 +0,0 @@ -from main import ThePirateBay - -def start(): - return ThePirateBay() - -config = [{ - 'name': 'thepiratebay', - 'groups': [ - { - 'tab': 'searcher', - 'subtab': 'providers', - 'list': 'torrent_providers', - 'name': 'ThePirateBay', - 'description': 'The world\'s largest bittorrent tracker. See ThePirateBay', - 'wizard': True, - 'options': [ - { - 'name': 'enabled', - 'type': 'enabler', - 'default': True - }, - { - 'name': 'domain', - 'advanced': True, - 'label': 'Proxy server', - 'description': 'Domain for requests, keep empty to let CouchPotato pick.', - } - ], - } - ] -}] diff --git a/couchpotato/core/providers/torrent/thepiratebay/main.py b/couchpotato/core/providers/torrent/thepiratebay/main.py deleted file mode 100644 index 1e47a710dd..0000000000 --- a/couchpotato/core/providers/torrent/thepiratebay/main.py +++ /dev/null @@ -1,146 +0,0 @@ -from bs4 import BeautifulSoup -from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode -from couchpotato.core.helpers.variable import tryInt, cleanHost -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.torrent.base import TorrentMagnetProvider -from couchpotato.environment import Env -import re -import time -import traceback - -log = CPLog(__name__) - - -class ThePirateBay(TorrentMagnetProvider): - - urls = { - 'detail': '%s/torrent/%s', - 'search': '%s/search/%s/%s/7/%d' - } - - cat_ids = [ - ([207], ['720p', '1080p']), - ([201], ['cam', 'ts', 'dvdrip', 'tc', 'r5', 'scr', 'brrip']), - ([202], ['dvdr']) - ] - - cat_backup_id = 200 - disable_provider = False - http_time_between_calls = 0 - - proxy_list = [ - 'https://thepiratebay.se', - 'https://tpb.ipredator.se', - 'https://depiraatbaai.be', - 'https://piratereverse.info', - 'https://tpb.pirateparty.org.uk', - 'https://argumentomteemigreren.nl', - 'https://livepirate.com/', - 'https://www.getpirate.com/', - ] - - def __init__(self): - self.domain = self.conf('domain') - super(ThePirateBay, self).__init__() - - def _searchOnTitle(self, title, movie, quality, results): - - page = 0 - total_pages = 1 - - while page < total_pages: - - search_url = self.urls['search'] % (self.getDomain(), tryUrlencode('"%s %s"' % (title, movie['library']['year'])), page, self.getCatId(quality['identifier'])[0]) - page += 1 - - data = self.getHTMLData(search_url) - - if data: - try: - soup = BeautifulSoup(data) - results_table = soup.find('table', attrs = {'id': 'searchResult'}) - - if not results_table: - return - - try: - total_pages = len(soup.find('div', attrs = {'align': 'center'}).find_all('a')) - except: - pass - - entries = results_table.find_all('tr') - for result in entries[2:]: - link = result.find(href = re.compile('torrent\/\d+\/')) - download = result.find(href = re.compile('magnet:')) - - try: - size = re.search('Size (?P.+),', unicode(result.select('font.detDesc')[0])).group('size') - except: - continue - - if link and download: - - def extra_score(item): - trusted = (0, 10)[result.find('img', alt = re.compile('Trusted')) != None] - vip = (0, 20)[result.find('img', alt = re.compile('VIP')) != None] - confirmed = (0, 30)[result.find('img', alt = re.compile('Helpers')) != None] - moderated = (0, 50)[result.find('img', alt = re.compile('Moderator')) != None] - - return confirmed + trusted + vip + moderated - - results.append({ - 'id': re.search('/(?P\d+)/', link['href']).group('id'), - 'name': link.string, - 'url': download['href'], - 'detail_url': self.getDomain(link['href']), - 'size': self.parseSize(size), - 'seeders': tryInt(result.find_all('td')[2].string), - 'leechers': tryInt(result.find_all('td')[3].string), - 'extra_score': extra_score, - 'get_more_info': self.getMoreInfo - }) - - except: - log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc())) - - - def isEnabled(self): - return super(ThePirateBay, self).isEnabled() and self.getDomain() - - def getDomain(self, url = ''): - - if not self.domain: - for proxy in self.proxy_list: - - prop_name = 'tpb_proxy.%s' % proxy - last_check = float(Env.prop(prop_name, default = 0)) - if last_check > time.time() - 1209600: - continue - - data = '' - try: - data = self.urlopen(proxy, timeout = 3, show_error = False) - except: - log.debug('Failed tpb proxy %s', proxy) - - if 'title="Pirate Search"' in data: - log.debug('Using proxy: %s', proxy) - self.domain = proxy - break - - Env.prop(prop_name, time.time()) - - if not self.domain: - log.error('No TPB proxies left, please add one in settings, or let us know which one to add on the forum.') - return None - - return cleanHost(self.domain).rstrip('/') + url - - def getMoreInfo(self, item): - full_description = self.getCache('tpb.%s' % item['id'], item['detail_url'], cache_timeout = 25920000) - html = BeautifulSoup(full_description) - nfo_pre = html.find('div', attrs = {'class':'nfo'}) - description = toUnicode(nfo_pre.text) if nfo_pre else '' - - item['description'] = description - return item diff --git a/couchpotato/core/providers/torrent/torrentday/__init__.py b/couchpotato/core/providers/torrent/torrentday/__init__.py deleted file mode 100644 index 1a4d3c7f63..0000000000 --- a/couchpotato/core/providers/torrent/torrentday/__init__.py +++ /dev/null @@ -1,34 +0,0 @@ -from .main import TorrentDay - -def start(): - return TorrentDay() - -config = [{ - 'name': 'torrentday', - 'groups': [ - { - 'tab': 'searcher', - 'subtab': 'providers', - 'list': 'torrent_providers', - 'name': 'TorrentDay', - 'description': 'See TorrentDay', - 'wizard': True, - 'options': [ - { - 'name': 'enabled', - 'type': 'enabler', - 'default': False, - }, - { - 'name': 'username', - 'default': '', - }, - { - 'name': 'password', - 'default': '', - 'type': 'password', - }, - ], - }, - ], -}] diff --git a/couchpotato/core/providers/torrent/torrentday/main.py b/couchpotato/core/providers/torrent/torrentday/main.py deleted file mode 100644 index 5e207c1e2a..0000000000 --- a/couchpotato/core/providers/torrent/torrentday/main.py +++ /dev/null @@ -1,64 +0,0 @@ -from couchpotato.core.helpers.encoding import tryUrlencode -from couchpotato.core.helpers.variable import tryInt -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.torrent.base import TorrentProvider - -log = CPLog(__name__) - - -class TorrentDay(TorrentProvider): - - urls = { - 'test': 'http://www.td.af/', - 'login' : 'http://www.td.af/torrents/', - 'detail': 'http://www.td.af/details.php?id=%s', - 'search': 'http://www.td.af/V3/API/API.php', - 'download': 'http://www.td.af/download.php/%s/%s', - } - - cat_ids = [ - ([11], ['720p', '1080p']), - ([1, 21, 25], ['cam', 'ts', 'dvdrip', 'tc', 'r5', 'scr', 'brrip']), - ([3], ['dvdr']), - ([5], ['bd50']), - ] - - http_time_between_calls = 1 #seconds - - def _searchOnTitle(self, title, movie, quality, results): - - q = '"%s %s"' % (title, movie['library']['year']) - - params = { - '/browse.php?': None, - 'cata': 'yes', - 'jxt': 8, - 'jxw': 'b', - 'search': q, - } - - data = self.getJsonData(self.urls['search'], params = params, opener = self.login_opener) - try: torrents = data.get('Fs', [])[0].get('Cn', {}).get('torrents', []) - except: return - - for torrent in torrents: - results.append({ - 'id': torrent['id'], - 'name': torrent['name'], - 'url': self.urls['download'] % (torrent['id'], torrent['fname']), - 'detail_url': self.urls['detail'] % torrent['id'], - 'size': self.parseSize(torrent.get('size')), - 'seeders': tryInt(torrent.get('seed')), - 'leechers': tryInt(torrent.get('leech')), - 'download': self.loginDownload, - }) - - def getLoginParams(self): - return tryUrlencode({ - 'username': self.conf('username'), - 'password': self.conf('password'), - 'submit': 'submit', - }) - - def loginSuccess(self, output): - return 'Password not correct' not in output diff --git a/couchpotato/core/providers/torrent/torrentleech/__init__.py b/couchpotato/core/providers/torrent/torrentleech/__init__.py deleted file mode 100644 index d96ac06481..0000000000 --- a/couchpotato/core/providers/torrent/torrentleech/__init__.py +++ /dev/null @@ -1,34 +0,0 @@ -from .main import TorrentLeech - -def start(): - return TorrentLeech() - -config = [{ - 'name': 'torrentleech', - 'groups': [ - { - 'tab': 'searcher', - 'subtab': 'providers', - 'list': 'torrent_providers', - 'name': 'TorrentLeech', - 'description': 'See TorrentLeech', - 'wizard': True, - 'options': [ - { - 'name': 'enabled', - 'type': 'enabler', - 'default': False, - }, - { - 'name': 'username', - 'default': '', - }, - { - 'name': 'password', - 'default': '', - 'type': 'password', - }, - ], - }, - ], -}] diff --git a/couchpotato/core/providers/torrent/torrentleech/main.py b/couchpotato/core/providers/torrent/torrentleech/main.py deleted file mode 100644 index 6de18fbdf3..0000000000 --- a/couchpotato/core/providers/torrent/torrentleech/main.py +++ /dev/null @@ -1,76 +0,0 @@ -from bs4 import BeautifulSoup -from couchpotato.core.helpers.encoding import tryUrlencode -from couchpotato.core.helpers.variable import tryInt -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.torrent.base import TorrentProvider -import traceback - - -log = CPLog(__name__) - - -class TorrentLeech(TorrentProvider): - - urls = { - 'test' : 'http://www.torrentleech.org/', - 'login' : 'http://www.torrentleech.org/user/account/login/', - 'detail' : 'http://www.torrentleech.org/torrent/%s', - 'search' : 'http://www.torrentleech.org/torrents/browse/index/query/%s/categories/%d', - 'download' : 'http://www.torrentleech.org%s', - } - - cat_ids = [ - ([13], ['720p', '1080p']), - ([8], ['cam']), - ([9], ['ts', 'tc']), - ([10], ['r5', 'scr']), - ([11], ['dvdrip']), - ([14], ['brrip']), - ([12], ['dvdr']), - ] - - http_time_between_calls = 1 #seconds - cat_backup_id = None - - def _searchOnTitle(self, title, movie, quality, results): - - url = self.urls['search'] % (tryUrlencode('%s %s' % (title.replace(':', ''), movie['library']['year'])), self.getCatId(quality['identifier'])[0]) - data = self.getHTMLData(url, opener = self.login_opener) - - if data: - html = BeautifulSoup(data) - - try: - result_table = html.find('table', attrs = {'id' : 'torrenttable'}) - if not result_table: - return - - entries = result_table.find_all('tr') - - for result in entries[1:]: - - link = result.find('td', attrs = {'class' : 'name'}).find('a') - url = result.find('td', attrs = {'class' : 'quickdownload'}).find('a') - details = result.find('td', attrs = {'class' : 'name'}).find('a') - - results.append({ - 'id': link['href'].replace('/torrent/', ''), - 'name': link.string, - 'url': self.urls['download'] % url['href'], - 'detail_url': self.urls['download'] % details['href'], - 'download': self.loginDownload, - 'size': self.parseSize(result.find_all('td')[4].string), - 'seeders': tryInt(result.find('td', attrs = {'class' : 'seeders'}).string), - 'leechers': tryInt(result.find('td', attrs = {'class' : 'leechers'}).string), - }) - - except: - log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc())) - - def getLoginParams(self): - return tryUrlencode({ - 'username': self.conf('username'), - 'password': self.conf('password'), - 'remember_me': 'on', - 'login': 'submit', - }) diff --git a/couchpotato/core/providers/trailer/base.py b/couchpotato/core/providers/trailer/base.py deleted file mode 100644 index 338ca9b329..0000000000 --- a/couchpotato/core/providers/trailer/base.py +++ /dev/null @@ -1,13 +0,0 @@ -from couchpotato.core.event import addEvent -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.base import Provider - -log = CPLog(__name__) - - -class TrailerProvider(Provider): - - type = 'trailer' - - def __init__(self): - addEvent('trailer.search', self.search) diff --git a/couchpotato/core/providers/trailer/hdtrailers/__init__.py b/couchpotato/core/providers/trailer/hdtrailers/__init__.py deleted file mode 100644 index 016db7a261..0000000000 --- a/couchpotato/core/providers/trailer/hdtrailers/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -from .main import HDTrailers - -def start(): - return HDTrailers() - -config = [] diff --git a/couchpotato/core/providers/trailer/hdtrailers/main.py b/couchpotato/core/providers/trailer/hdtrailers/main.py deleted file mode 100644 index 320a5835ec..0000000000 --- a/couchpotato/core/providers/trailer/hdtrailers/main.py +++ /dev/null @@ -1,124 +0,0 @@ -from bs4 import SoupStrainer, BeautifulSoup -from couchpotato.core.helpers.encoding import tryUrlencode -from couchpotato.core.helpers.variable import mergeDicts, getTitle -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.trailer.base import TrailerProvider -from string import digits, ascii_letters -from urllib2 import HTTPError -import re - -log = CPLog(__name__) - - -class HDTrailers(TrailerProvider): - - urls = { - 'api': 'http://www.hd-trailers.net/movie/%s/', - 'backup': 'http://www.hd-trailers.net/blog/', - } - providers = ['apple.ico', 'yahoo.ico', 'moviefone.ico', 'myspace.ico', 'favicon.ico'] - - def search(self, group): - - movie_name = getTitle(group['library']) - - url = self.urls['api'] % self.movieUrlName(movie_name) - try: - data = self.getCache('hdtrailers.%s' % group['library']['identifier'], url, show_error = False) - except HTTPError: - log.debug('No page found for: %s', movie_name) - data = None - - result_data = {'480p':[], '720p':[], '1080p':[]} - - if not data: - return result_data - - did_alternative = False - for provider in self.providers: - results = self.findByProvider(data, provider) - - # Find alternative - if results.get('404') and not did_alternative: - results = self.findViaAlternative(group) - did_alternative = True - - result_data = mergeDicts(result_data, results) - - return result_data - - def findViaAlternative(self, group): - results = {'480p':[], '720p':[], '1080p':[]} - - movie_name = getTitle(group['library']) - - url = "%s?%s" % (self.urls['backup'], tryUrlencode({'s':movie_name})) - try: - data = self.getCache('hdtrailers.alt.%s' % group['library']['identifier'], url, show_error = False) - except HTTPError: - log.debug('No alternative page found for: %s', movie_name) - data = None - - if not data: - return results - - try: - tables = SoupStrainer('div') - html = BeautifulSoup(data, parse_only = tables) - result_table = html.find_all('h2', text = re.compile(movie_name)) - - for h2 in result_table: - if 'trailer' in h2.lower(): - parent = h2.parent.parent.parent - trailerLinks = parent.find_all('a', text = re.compile('480p|720p|1080p')) - try: - for trailer in trailerLinks: - results[trailer].insert(0, trailer.parent['href']) - except: - pass - - except AttributeError: - log.debug('No trailers found in via alternative.') - - return results - - def findByProvider(self, data, provider): - - results = {'480p':[], '720p':[], '1080p':[]} - try: - tables = SoupStrainer('table') - html = BeautifulSoup(data, parse_only = tables) - result_table = html.find('table', attrs = {'class':'bottomTable'}) - - - for tr in result_table.find_all('tr'): - trtext = str(tr).lower() - if 'clips' in trtext: - break - if 'trailer' in trtext and not 'clip' in trtext and provider in trtext: - nr = 0 - if 'trailer' not in tr.find('span', 'standardTrailerName').text.lower(): - continue - resolutions = tr.find_all('td', attrs = {'class':'bottomTableResolution'}) - for res in resolutions: - results[str(res.a.contents[0])].insert(0, res.a['href']) - nr += 1 - - return results - - except AttributeError: - log.debug('No trailers found in provider %s.', provider) - results['404'] = True - - return results - - def movieUrlName(self, string): - safe_chars = ascii_letters + digits + ' ' - r = ''.join([char if char in safe_chars else ' ' for char in string]) - name = re.sub('\s+' , '-', r).lower() - - try: - int(name) - return '-' + name - except: - return name diff --git a/couchpotato/core/providers/userscript/allocine/__init__.py b/couchpotato/core/providers/userscript/allocine/__init__.py deleted file mode 100644 index e451996f22..0000000000 --- a/couchpotato/core/providers/userscript/allocine/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -from .main import AlloCine - -def start(): - return AlloCine() - -config = [] diff --git a/couchpotato/core/providers/userscript/allocine/main.py b/couchpotato/core/providers/userscript/allocine/main.py deleted file mode 100644 index 8cc889eee5..0000000000 --- a/couchpotato/core/providers/userscript/allocine/main.py +++ /dev/null @@ -1,35 +0,0 @@ -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.userscript.base import UserscriptBase -import traceback - -log = CPLog(__name__) - - -class AlloCine(UserscriptBase): - - includes = ['http://www.allocine.fr/film/*'] - - def getMovie(self, url): - - if not 'fichefilm_gen_cfilm' in url: - return 'Url isn\'t from a movie' - - try: - data = self.getUrl(url) - except: - return - - name = None - year = None - - try: - start = data.find('') - end = data.find('', start) - page_title = data[start + len(''):end].strip().split('-') - - name = page_title[0].strip() - year = page_title[1].strip()[-4:] - return self.search(name, year) - except: - log.error('Failed parsing page for title and year: %s', traceback.format_exc()) - diff --git a/couchpotato/core/providers/userscript/appletrailers/__init__.py b/couchpotato/core/providers/userscript/appletrailers/__init__.py deleted file mode 100644 index e8078f470b..0000000000 --- a/couchpotato/core/providers/userscript/appletrailers/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -from .main import AppleTrailers - -def start(): - return AppleTrailers() - -config = [] diff --git a/couchpotato/core/providers/userscript/appletrailers/main.py b/couchpotato/core/providers/userscript/appletrailers/main.py deleted file mode 100644 index 693065d104..0000000000 --- a/couchpotato/core/providers/userscript/appletrailers/main.py +++ /dev/null @@ -1,22 +0,0 @@ -from couchpotato.core.providers.userscript.base import UserscriptBase -import re - - -class AppleTrailers(UserscriptBase): - - includes = ['http://trailers.apple.com/trailers/*'] - - def getMovie(self, url): - - try: - data = self.getUrl(url) - except: - return - - name = re.search("trailerTitle.*=.*\'(?P<name>.*)\';", data) - name = name.group('name').decode('string_escape') - - date = re.search("releaseDate.*=.*\'(?P<date>.*)\';", data) - year = date.group('date')[:4] - - return self.search(name, year) diff --git a/couchpotato/core/providers/userscript/base.py b/couchpotato/core/providers/userscript/base.py deleted file mode 100644 index 571b76c002..0000000000 --- a/couchpotato/core/providers/userscript/base.py +++ /dev/null @@ -1,66 +0,0 @@ -from couchpotato.core.event import addEvent, fireEvent -from couchpotato.core.helpers.encoding import simplifyString -from couchpotato.core.helpers.variable import getImdb, md5 -from couchpotato.core.logger import CPLog -from couchpotato.core.plugins.base import Plugin -from urlparse import urlparse - -log = CPLog(__name__) - - -class UserscriptBase(Plugin): - - version = 1 - - includes = [] - excludes = [] - - def __init__(self): - addEvent('userscript.get_includes', self.getInclude) - addEvent('userscript.get_excludes', self.getExclude) - addEvent('userscript.get_provider_version', self.getVersion) - addEvent('userscript.get_movie_via_url', self.belongsTo) - - def search(self, name, year = None): - result = fireEvent('movie.search', q = '%s %s' % (name, year), limit = 1, merge = True) - - if len(result) > 0: - movie = fireEvent('movie.info', identifier = result[0].get('imdb'), merge = True) - return movie - else: - return None - - def belongsTo(self, url): - - host = urlparse(url).hostname - host_split = host.split('.') - if len(host_split) > 2: - host = host[len(host_split[0]):] - - for include in self.includes: - if host in include: - return self.getMovie(url) - - return - - def getUrl(self, url): - return self.getCache(md5(simplifyString(url)), url = url) - - def getMovie(self, url): - try: - data = self.getUrl(url) - except: - data = '' - return self.getInfo(getImdb(data)) - - def getInfo(self, identifier): - return fireEvent('movie.info', identifier = identifier, merge = True) - - def getInclude(self): - return self.includes - - def getExclude(self): - return self.excludes - - def getVersion(self): - return self.version diff --git a/couchpotato/core/providers/userscript/filmweb/__init__.py b/couchpotato/core/providers/userscript/filmweb/__init__.py deleted file mode 100644 index 8ead54d65d..0000000000 --- a/couchpotato/core/providers/userscript/filmweb/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -from .main import Filmweb - -def start(): - return Filmweb() - -config = [] diff --git a/couchpotato/core/providers/userscript/filmweb/main.py b/couchpotato/core/providers/userscript/filmweb/main.py deleted file mode 100644 index 5e6adbe131..0000000000 --- a/couchpotato/core/providers/userscript/filmweb/main.py +++ /dev/null @@ -1,28 +0,0 @@ -from couchpotato.core.providers.userscript.base import UserscriptBase -import re - - -class Filmweb(UserscriptBase): - - includes = ['http://www.filmweb.pl/*'] - - def getMovie(self, url): - - cookie = {'Cookie': 'welcomeScreen=welcome_screen'} - - try: - data = self.urlopen(url, headers = cookie) - except: - return - - name = re.search("<h2.*?class=\"text-large caption\">(?P<name>[^<]+)</h2>", data) - - if name is None: - name = re.search("<a.*?property=\"v:name\".*?>(?P<name>[^<]+)</a>", data) - - name = name.group('name').decode('string_escape') - - year = re.search("<span.*?id=filmYear.*?>\((?P<year>[^\)]+)\).*?</span>", data) - year = year.group('year') - - return self.search(name, year) diff --git a/couchpotato/core/providers/userscript/imdb/__init__.py b/couchpotato/core/providers/userscript/imdb/__init__.py deleted file mode 100644 index f10505da7d..0000000000 --- a/couchpotato/core/providers/userscript/imdb/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -from .main import IMDB - -def start(): - return IMDB() - -config = [] diff --git a/couchpotato/core/providers/userscript/imdb/main.py b/couchpotato/core/providers/userscript/imdb/main.py deleted file mode 100644 index 24278b1a5a..0000000000 --- a/couchpotato/core/providers/userscript/imdb/main.py +++ /dev/null @@ -1,11 +0,0 @@ -from couchpotato.core.event import fireEvent -from couchpotato.core.helpers.variable import getImdb -from couchpotato.core.providers.userscript.base import UserscriptBase - - -class IMDB(UserscriptBase): - - includes = ['*://*.imdb.com/title/tt*', '*://imdb.com/title/tt*'] - - def getMovie(self, url): - return fireEvent('movie.info', identifier = getImdb(url), merge = True) diff --git a/couchpotato/core/providers/userscript/letterboxd/__init__.py b/couchpotato/core/providers/userscript/letterboxd/__init__.py deleted file mode 100644 index c8c17977e4..0000000000 --- a/couchpotato/core/providers/userscript/letterboxd/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -from .main import Letterboxd - -def start(): - return Letterboxd() - -config = [] diff --git a/couchpotato/core/providers/userscript/letterboxd/main.py b/couchpotato/core/providers/userscript/letterboxd/main.py deleted file mode 100644 index c0d91d79b9..0000000000 --- a/couchpotato/core/providers/userscript/letterboxd/main.py +++ /dev/null @@ -1,6 +0,0 @@ -from couchpotato.core.providers.userscript.base import UserscriptBase - - -class Letterboxd(UserscriptBase): - - includes = ['*://letterboxd.com/film/*'] diff --git a/couchpotato/core/providers/userscript/moviemeter/__init__.py b/couchpotato/core/providers/userscript/moviemeter/__init__.py deleted file mode 100644 index 5e3813c413..0000000000 --- a/couchpotato/core/providers/userscript/moviemeter/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -from .main import MovieMeter - -def start(): - return MovieMeter() - -config = [] diff --git a/couchpotato/core/providers/userscript/moviemeter/main.py b/couchpotato/core/providers/userscript/moviemeter/main.py deleted file mode 100644 index 3593d432f1..0000000000 --- a/couchpotato/core/providers/userscript/moviemeter/main.py +++ /dev/null @@ -1,6 +0,0 @@ -from couchpotato.core.providers.userscript.base import UserscriptBase - - -class MovieMeter(UserscriptBase): - - includes = ['http://*.moviemeter.nl/film/*', 'http://moviemeter.nl/film/*'] diff --git a/couchpotato/core/providers/userscript/moviesio/__init__.py b/couchpotato/core/providers/userscript/moviesio/__init__.py deleted file mode 100644 index 473f847db3..0000000000 --- a/couchpotato/core/providers/userscript/moviesio/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -from .main import MoviesIO - -def start(): - return MoviesIO() - -config = [] diff --git a/couchpotato/core/providers/userscript/moviesio/main.py b/couchpotato/core/providers/userscript/moviesio/main.py deleted file mode 100644 index 5dab618380..0000000000 --- a/couchpotato/core/providers/userscript/moviesio/main.py +++ /dev/null @@ -1,6 +0,0 @@ -from couchpotato.core.providers.userscript.base import UserscriptBase - - -class MoviesIO(UserscriptBase): - - includes = ['*://movies.io/m/*'] diff --git a/couchpotato/core/providers/userscript/rottentomatoes/__init__.py b/couchpotato/core/providers/userscript/rottentomatoes/__init__.py deleted file mode 100644 index ee8266ebdd..0000000000 --- a/couchpotato/core/providers/userscript/rottentomatoes/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -from .main import RottenTomatoes - -def start(): - return RottenTomatoes() - -config = [] diff --git a/couchpotato/core/providers/userscript/rottentomatoes/main.py b/couchpotato/core/providers/userscript/rottentomatoes/main.py deleted file mode 100644 index 0b16a441ca..0000000000 --- a/couchpotato/core/providers/userscript/rottentomatoes/main.py +++ /dev/null @@ -1,39 +0,0 @@ -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.userscript.base import UserscriptBase -import re -import traceback - -log = CPLog(__name__) - - -class RottenTomatoes(UserscriptBase): - - includes = ['*://www.rottentomatoes.com/m/*/'] - excludes = ['*://www.rottentomatoes.com/m/*/*/'] - - version = 2 - - def getMovie(self, url): - - try: - data = self.getUrl(url) - except: - return - - try: - name = None - year = None - metas = re.findall("property=\"(video:release_date|og:title)\" content=\"([^\"]*)\"", data) - - for meta in metas: - mname, mvalue = meta - if mname == 'og:title': - name = mvalue.decode('unicode_escape') - elif mname == 'video:release_date': - year = mvalue[:4] - - if name and year: - return self.search(name, year) - - except: - log.error('Failed parsing page for title and year: %s', traceback.format_exc()) diff --git a/couchpotato/core/providers/userscript/sharethe/__init__.py b/couchpotato/core/providers/userscript/sharethe/__init__.py deleted file mode 100644 index 7661f76117..0000000000 --- a/couchpotato/core/providers/userscript/sharethe/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -from .main import ShareThe - -def start(): - return ShareThe() - -config = [] diff --git a/couchpotato/core/providers/userscript/sharethe/main.py b/couchpotato/core/providers/userscript/sharethe/main.py deleted file mode 100644 index d22b67ebe2..0000000000 --- a/couchpotato/core/providers/userscript/sharethe/main.py +++ /dev/null @@ -1,6 +0,0 @@ -from couchpotato.core.providers.userscript.base import UserscriptBase - - -class ShareThe(UserscriptBase): - - includes = ['http://*.sharethe.tv/movies/*', 'http://sharethe.tv/movies/*'] diff --git a/couchpotato/core/providers/userscript/tmdb/__init__.py b/couchpotato/core/providers/userscript/tmdb/__init__.py deleted file mode 100644 index be33372c41..0000000000 --- a/couchpotato/core/providers/userscript/tmdb/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -from .main import TMDB - -def start(): - return TMDB() - -config = [] diff --git a/couchpotato/core/providers/userscript/tmdb/main.py b/couchpotato/core/providers/userscript/tmdb/main.py deleted file mode 100644 index 6205851ef6..0000000000 --- a/couchpotato/core/providers/userscript/tmdb/main.py +++ /dev/null @@ -1,16 +0,0 @@ -from couchpotato.core.event import fireEvent -from couchpotato.core.providers.userscript.base import UserscriptBase -import re - - -class TMDB(UserscriptBase): - - includes = ['http://www.themoviedb.org/movie/*'] - - def getMovie(self, url): - match = re.search('(?P<id>\d+)', url) - movie = fireEvent('movie.info_by_tmdb', id = match.group('id'), merge = True) - - if movie['imdb']: - return self.getInfo(movie['imdb']) - diff --git a/couchpotato/core/providers/userscript/trakt/__init__.py b/couchpotato/core/providers/userscript/trakt/__init__.py deleted file mode 100644 index ff67c1ecf2..0000000000 --- a/couchpotato/core/providers/userscript/trakt/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -from .main import Trakt - -def start(): - return Trakt() - -config = [] diff --git a/couchpotato/core/providers/userscript/trakt/main.py b/couchpotato/core/providers/userscript/trakt/main.py deleted file mode 100644 index 43e06deefa..0000000000 --- a/couchpotato/core/providers/userscript/trakt/main.py +++ /dev/null @@ -1,7 +0,0 @@ -from couchpotato.core.providers.userscript.base import UserscriptBase - - -class Trakt(UserscriptBase): - - includes = ['http://trakt.tv/movie/*', 'http://*.trakt.tv/movie/*'] - excludes = ['http://trakt.tv/movie/*/*', 'http://*.trakt.tv/movie/*/*'] diff --git a/couchpotato/core/providers/userscript/whiwa/__init__.py b/couchpotato/core/providers/userscript/whiwa/__init__.py deleted file mode 100644 index 6577ae33b7..0000000000 --- a/couchpotato/core/providers/userscript/whiwa/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -from .main import WHiWA - -def start(): - return WHiWA() - -config = [] diff --git a/couchpotato/core/providers/userscript/whiwa/main.py b/couchpotato/core/providers/userscript/whiwa/main.py deleted file mode 100644 index 40ffa2a943..0000000000 --- a/couchpotato/core/providers/userscript/whiwa/main.py +++ /dev/null @@ -1,6 +0,0 @@ -from couchpotato.core.providers.userscript.base import UserscriptBase - - -class WHiWA(UserscriptBase): - - includes = ['http://whiwa.net/stats/movie/*'] diff --git a/couchpotato/core/providers/userscript/youteather/__init__.py b/couchpotato/core/providers/userscript/youteather/__init__.py deleted file mode 100644 index a07bf56b45..0000000000 --- a/couchpotato/core/providers/userscript/youteather/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -from .main import YouTheater - -def start(): - return YouTheater() - -config = [] diff --git a/couchpotato/core/providers/userscript/youteather/main.py b/couchpotato/core/providers/userscript/youteather/main.py deleted file mode 100644 index 3efd36867c..0000000000 --- a/couchpotato/core/providers/userscript/youteather/main.py +++ /dev/null @@ -1,12 +0,0 @@ -from couchpotato.core.providers.userscript.base import UserscriptBase -import re - -class YouTheater(UserscriptBase): - id_re = re.compile("view\.php\?id=(\d+)") - includes = ['http://www.youtheater.com/view.php?id=*', 'http://youtheater.com/view.php?id=*', - 'http://www.sratim.co.il/view.php?id=*', 'http://sratim.co.il/view.php?id=*'] - - def getMovie(self, url): - id = self.id_re.findall(url)[0] - url = 'http://www.youtheater.com/view.php?id=%s' % id - return super(YouTheater, self).getMovie(url) diff --git a/couchpotato/core/settings.py b/couchpotato/core/settings.py new file mode 100644 index 0000000000..9e4bd1d9f2 --- /dev/null +++ b/couchpotato/core/settings.py @@ -0,0 +1,464 @@ +from __future__ import with_statement +import ConfigParser +import traceback +from hashlib import md5 + +from CodernityDB.hash_index import HashIndex +from couchpotato.api import addApiView +from couchpotato.core.event import addEvent, fireEvent +from couchpotato.core.helpers.encoding import toUnicode +from couchpotato.core.helpers.variable import mergeDicts, tryInt, tryFloat + +class Settings(object): + + options = {} + types = {} + + def __init__(self): + + addApiView('settings', self.view, docs = { + 'desc': 'Return the options and its values of settings.conf. Including the default values and group ordering used on the settings page.', + 'return': {'type': 'object', 'example': """{ + // objects like in __init__.py of plugin + "options": { + "moovee" : { + "groups" : [{ + "description" : "SD movies only", + "name" : "#alt.binaries.moovee", + "options" : [{ + "default" : false, + "name" : "enabled", + "type" : "enabler" + }], + "tab" : "providers" + }], + "name" : "moovee" + } + }, + // object structured like settings.conf + "values": { + "moovee": { + "enabled": false + } + } +}"""} + }) + + addApiView('settings.save', self.saveView, docs = { + 'desc': 'Save setting to config file (settings.conf)', + 'params': { + 'section': {'desc': 'The section name in settings.conf'}, + 'name': {'desc': 'The option name'}, + 'value': {'desc': 'The value you want to save'}, + } + }) + + addEvent('database.setup', self.databaseSetup) + + self.file = None + self.p = None + self.log = None + self.directories_delimiter = "::" + + def setFile(self, config_file): + self.file = config_file + + self.p = ConfigParser.RawConfigParser() + self.p.read(config_file) + + from couchpotato.core.logger import CPLog + self.log = CPLog(__name__) + + self.connectEvents() + + def databaseSetup(self): + fireEvent('database.setup_index', 'property', PropertyIndex) + + def parser(self): + return self.p + + def sections(self): + res = filter( self.isSectionReadable, self.p.sections()) + return res + + def connectEvents(self): + addEvent('settings.options', self.addOptions) + addEvent('settings.register', self.registerDefaults) + addEvent('settings.save', self.save) + + def registerDefaults(self, section_name, options = None, save = True): + if not options: options = {} + + self.addSection(section_name) + + for option_name, option in options.items(): + self.setDefault(section_name, option_name, option.get('default', '')) + + # Set UI-meta for option (hidden/ro/rw) + if option.get('ui-meta'): + value = option.get('ui-meta') + if value: + value = value.lower() + if value in ['hidden', 'rw', 'ro']: + meta_option_name = option_name + self.optionMetaSuffix() + self.setDefault(section_name, meta_option_name, value) + else: + self.log.warning('Wrong value for option %s.%s : ui-meta can not be equal to "%s"', (section_name, option_name, value)) + + # Migrate old settings from old location to the new location + if option.get('migrate_from'): + if self.p.has_option(option.get('migrate_from'), option_name): + previous_value = self.p.get(option.get('migrate_from'), option_name) + self.p.set(section_name, option_name, previous_value) + self.p.remove_option(option.get('migrate_from'), option_name) + + if option.get('type'): + self.setType(section_name, option_name, option.get('type')) + + if save: + self.save() + + def set(self, section, option, value): + if not self.isOptionWritable(section, option): + self.log.warning('set::option "%s.%s" isn\'t writable', (section, option)) + return None + if self.isOptionMeta(section, option): + self.log.warning('set::option "%s.%s" cancelled, since it is a META option', (section, option)) + return None + + return self.p.set(section, option, value) + + def get(self, option = '', section = 'core', default = None, type = None): + if self.isOptionMeta(section, option): + self.log.warning('get::option "%s.%s" cancelled, since it is a META option', (section, option)) + return None + + tp = type + try: + tp = self.getType(section, option) if not tp else tp + + if hasattr(self, 'get%s' % tp.capitalize()): + return getattr(self, 'get%s' % tp.capitalize())(section, option) + else: + return self.getUnicode(section, option) + + except: + return default + + def delete(self, option = '', section = 'core'): + if not self.isOptionWritable(section, option): + self.log.warning('delete::option "%s.%s" isn\'t writable', (section, option)) + return None + + if self.isOptionMeta(section, option): + self.log.warning('set::option "%s.%s" cancelled, since it is a META option', (section, option)) + return None + + self.p.remove_option(section, option) + self.save() + + def getEnabler(self, section, option): + return self.getBool(section, option) + + def getBool(self, section, option): + try: + return self.p.getboolean(section, option) + except: + return self.p.get(section, option) == 1 + + def getInt(self, section, option): + try: + return self.p.getint(section, option) + except: + return tryInt(self.p.get(section, option)) + + def getFloat(self, section, option): + try: + return self.p.getfloat(section, option) + except: + return tryFloat(self.p.get(section, option)) + + def getDirectories(self, section, option): + value = self.p.get(section, option) + + if value: + return map(str.strip, str.split(value, self.directories_delimiter)) + return [] + + def getUnicode(self, section, option): + value = self.p.get(section, option).decode('unicode_escape') + return toUnicode(value).strip() + + def getValues(self): + from couchpotato.environment import Env + + values = {} + soft_chroot = Env.get('softchroot') + + # TODO : There is two commented "continue" blocks (# COMMENTED_SKIPPING). They both are good... + # ... but, they omit output of values of hidden and non-readable options + # Currently, such behaviour could break the Web UI of CP... + # So, currently this two blocks are commented (but they are required to + # provide secure hidding of options. + for section in self.sections(): + + # COMMENTED_SKIPPING + #if not self.isSectionReadable(section): + # continue + + values[section] = {} + for option in self.p.items(section): + (option_name, option_value) = option + + #skip meta options: + if self.isOptionMeta(section, option_name): + continue + + # COMMENTED_SKIPPING + #if not self.isOptionReadable(section, option_name): + # continue + + value = self.get(option_name, section) + + is_password = self.getType(section, option_name) == 'password' + if is_password and value: + value = len(value) * '*' + + # chrootify directory before sending to UI: + if (self.getType(section, option_name) == 'directory') and value: + try: value = soft_chroot.abs2chroot(value) + except: value = "" + # chrootify directories before sending to UI: + if (self.getType(section, option_name) == 'directories'): + if (not value): + value = [] + try : value = map(soft_chroot.abs2chroot, value) + except : value = [] + + values[section][option_name] = value + + return values + + def save(self): + with open(self.file, 'wb') as configfile: + self.p.write(configfile) + + def addSection(self, section): + if not self.p.has_section(section): + self.p.add_section(section) + + def setDefault(self, section, option, value): + if not self.p.has_option(section, option): + self.p.set(section, option, value) + + def setType(self, section, option, type): + if not self.types.get(section): + self.types[section] = {} + + self.types[section][option] = type + + def getType(self, section, option): + tp = None + try: tp = self.types[section][option] + except: tp = 'unicode' if not tp else tp + return tp + + def addOptions(self, section_name, options): + # no additional actions (related to ro-rw options) are required here + if not self.options.get(section_name): + self.options[section_name] = options + else: + self.options[section_name] = mergeDicts(self.options[section_name], options) + + def getOptions(self): + """Returns dict of UI-readable options + + To check, whether the option is readable self.isOptionReadable() is used + """ + + res = {} + + # it is required to filter invisible options for UI, but also we should + # preserve original tree for server's purposes. + # So, next loops do one thing: copy options to res and in the process + # 1. omit NON-READABLE (for UI) options, and + # 2. put flags on READONLY options + for section_key in self.options.keys(): + section_orig = self.options[section_key] + section_name = section_orig.get('name') if 'name' in section_orig else section_key + if self.isSectionReadable(section_name): + section_copy = {} + section_copy_groups = [] + for section_field in section_orig: + if section_field.lower() != 'groups': + section_copy[section_field] = section_orig[section_field] + else: + for group_orig in section_orig['groups']: + group_copy = {} + group_copy_options = [] + for group_field in group_orig: + if group_field.lower() != 'options': + group_copy[group_field] = group_orig[group_field] + else: + for option in group_orig[group_field]: + option_name = option.get('name') + # You should keep in mind, that READONLY = !IS_WRITABLE + # and IS_READABLE is a different thing + if self.isOptionReadable(section_name, option_name): + group_copy_options.append(option) + if not self.isOptionWritable(section_name, option_name): + option['readonly'] = True + if len(group_copy_options)>0: + group_copy['options'] = group_copy_options + section_copy_groups.append(group_copy) + if len(section_copy_groups)>0: + section_copy['groups'] = section_copy_groups + res[section_key] = section_copy + + return res + + def view(self, **kwargs): + return { + 'options': self.getOptions(), + 'values': self.getValues() + } + + def saveView(self, **kwargs): + + section = kwargs.get('section') + option = kwargs.get('name') + value = kwargs.get('value') + + if not self.isOptionWritable(section, option): + self.log.warning('Option "%s.%s" isn\'t writable', (section, option)) + return { + 'success' : False, + } + + from couchpotato.environment import Env + soft_chroot = Env.get('softchroot') + + if self.getType(section, option) == 'directory': + value = soft_chroot.chroot2abs(value) + + if self.getType(section, option) == 'directories': + import json + value = json.loads(value) + if not (value and isinstance(value, list)): + value = [] + value = map(soft_chroot.chroot2abs, value) + value = self.directories_delimiter.join(value) + + # See if a value handler is attached, use that as value + new_value = fireEvent('setting.save.%s.%s' % (section, option), value, single = True) + + self.set(section, option, (new_value if new_value else value).encode('unicode_escape')) + self.save() + + # After save (for re-interval etc) + fireEvent('setting.save.%s.%s.after' % (section, option), single = True) + fireEvent('setting.save.%s.*.after' % section, single = True) + + return { + 'success': True + } + + def isSectionReadable(self, section): + meta = 'section_hidden' + self.optionMetaSuffix() + try: + return not self.p.getboolean(section, meta) + except: pass + + # by default - every section is readable: + return True + + def isOptionReadable(self, section, option): + meta = option + self.optionMetaSuffix() + if self.p.has_option(section, meta): + meta_v = self.p.get(section, meta).lower() + return (meta_v == 'rw') or (meta_v == 'ro') + + # by default - all is writable: + return True + + def optionReadableCheckAndWarn(self, section, option): + x = self.isOptionReadable(section, option) + if not x: + self.log.warning('Option "%s.%s" isn\'t readable', (section, option)) + return x + + def isOptionWritable(self, section, option): + meta = option + self.optionMetaSuffix() + if self.p.has_option(section, meta): + return self.p.get(section, meta).lower() == 'rw' + + # by default - all is writable: + return True + + def optionMetaSuffix(self): + return '_internal_meta' + + def isOptionMeta(self, section, option): + """ A helper method for detecting internal-meta options in the ini-file + + For a meta options used following names: + * section_hidden_internal_meta = (True | False) - for section visibility + * <OPTION>_internal_meta = (ro|rw|hidden) - for section visibility + + """ + + suffix = self.optionMetaSuffix() + return option.endswith(suffix) + + def getProperty(self, identifier): + from couchpotato import get_db + + db = get_db() + prop = None + + identifier = identifier.encode("ascii","ignore") # if identifier is not ascii it crashes below in the db access + + try: + propert = db.get('property', identifier, with_doc = True) + prop = propert['doc']['value'] + except ValueError: + propert = db.get('property', identifier) + fireEvent('database.delete_corrupted', propert.get('_id')) + except: + self.log.debug('Property "%s" doesn\'t exist: %s', (identifier, traceback.format_exc(0))) + + return prop + + def setProperty(self, identifier, value = ''): + from couchpotato import get_db + + db = get_db() + + try: + p = db.get('property', identifier, with_doc = True) + p['doc'].update({ + 'identifier': identifier, + 'value': toUnicode(value), + }) + db.update(p['doc']) + except: + db.insert({ + '_t': 'property', + 'identifier': identifier, + 'value': toUnicode(value), + }) + + +class PropertyIndex(HashIndex): + _version = 1 + + def __init__(self, *args, **kwargs): + kwargs['key_format'] = '32s' + super(PropertyIndex, self).__init__(*args, **kwargs) + + def make_key(self, key): + return md5(key).hexdigest() + + def make_key_value(self, data): + if data.get('_t') == 'property': + return md5(data['identifier']).hexdigest(), None diff --git a/couchpotato/core/settings/__init__.py b/couchpotato/core/settings/__init__.py deleted file mode 100644 index 00f77a647f..0000000000 --- a/couchpotato/core/settings/__init__.py +++ /dev/null @@ -1,222 +0,0 @@ -from __future__ import with_statement -from couchpotato.api import addApiView -from couchpotato.core.event import addEvent, fireEvent -from couchpotato.core.helpers.encoding import isInt, toUnicode -from couchpotato.core.helpers.request import getParams, jsonified -from couchpotato.core.helpers.variable import mergeDicts, tryInt -from couchpotato.core.settings.model import Properties -import ConfigParser -import os.path -import time -import traceback - - -class Settings(object): - - options = {} - types = {} - - def __init__(self): - - addApiView('settings', self.view, docs = { - 'desc': 'Return the options and its values of settings.conf. Including the default values and group ordering used on the settings page.', - 'return': {'type': 'object', 'example': """{ - // objects like in __init__.py of plugin - "options": { - "moovee" : { - "groups" : [{ - "description" : "SD movies only", - "name" : "#alt.binaries.moovee", - "options" : [{ - "default" : false, - "name" : "enabled", - "type" : "enabler" - }], - "tab" : "providers" - }], - "name" : "moovee" - } - }, - // object structured like settings.conf - "values": { - "moovee": { - "enabled": false - } - } -}"""} - }) - addApiView('settings.save', self.saveView, docs = { - 'desc': 'Save setting to config file (settings.conf)', - 'params': { - 'section': {'desc': 'The section name in settings.conf'}, - 'option': {'desc': 'The option name'}, - 'value': {'desc': 'The value you want to save'}, - } - }) - - def setFile(self, config_file): - self.file = config_file - - self.p = ConfigParser.RawConfigParser() - self.p.read(config_file) - - from couchpotato.core.logger import CPLog - self.log = CPLog(__name__) - - self.connectEvents() - - def parser(self): - return self.p - - def sections(self): - return self.p.sections() - - def connectEvents(self): - addEvent('settings.options', self.addOptions) - addEvent('settings.register', self.registerDefaults) - addEvent('settings.save', self.save) - - def registerDefaults(self, section_name, options = {}, save = True): - self.addSection(section_name) - for option_name, option in options.iteritems(): - self.setDefault(section_name, option_name, option.get('default', '')) - - if option.get('type'): - self.setType(section_name, option_name, option.get('type')) - - if save: - self.save(self) - - def set(self, section, option, value): - return self.p.set(section, option, value) - - def get(self, option = '', section = 'core', default = None, type = None): - try: - - try: type = self.types[section][option] - except: type = 'unicode' if not type else type - - if hasattr(self, 'get%s' % type.capitalize()): - return getattr(self, 'get%s' % type.capitalize())(section, option) - else: - return self.getUnicode(section, option) - - except: - return default - - def getEnabler(self, section, option): - return self.getBool(section, option) - - def getBool(self, section, option): - try: - return self.p.getboolean(section, option) - except: - return self.p.get(section, option) == 1 - - def getInt(self, section, option): - try: - return self.p.getint(section, option) - except: - return tryInt(self.p.get(section, option)) - - def getFloat(self, section, option): - try: - return self.p.getfloat(section, option) - except: - return tryInt(self.p.get(section, option)) - - def getUnicode(self, section, option): - value = self.p.get(section, option).decode('unicode_escape') - return toUnicode(value).strip() - - def getValues(self): - values = {} - for section in self.sections(): - values[section] = {} - for option in self.p.items(section): - (option_name, option_value) = option - values[section][option_name] = self.get(option_name, section) - return values - - def save(self): - with open(self.file, 'wb') as configfile: - self.p.write(configfile) - - self.log.debug('Saved settings') - - def addSection(self, section): - if not self.p.has_section(section): - self.p.add_section(section) - - def setDefault(self, section, option, value): - if not self.p.has_option(section, option): - self.p.set(section, option, value) - - def setType(self, section, option, type): - if not self.types.get(section): - self.types[section] = {} - - self.types[section][option] = type - - def addOptions(self, section_name, options): - - if not self.options.get(section_name): - self.options[section_name] = options - else: - self.options[section_name] = mergeDicts(self.options[section_name], options) - - def getOptions(self): - return self.options - - - def view(self): - return jsonified({ - 'options': self.getOptions(), - 'values': self.getValues() - }) - - def saveView(self): - - params = getParams() - - section = params.get('section') - option = params.get('name') - value = params.get('value') - - # See if a value handler is attached, use that as value - new_value = fireEvent('setting.save.%s.%s' % (section, option), value, single = True) - - self.set(section, option, (new_value if new_value else value).encode('unicode_escape')) - self.save() - - return jsonified({ - 'success': True, - }) - - def getProperty(self, identifier): - from couchpotato import get_session - - db = get_session() - prop = None - try: - propert = db.query(Properties).filter_by(identifier = identifier).first() - prop = propert.value - except: - pass - - return prop - - def setProperty(self, identifier, value = ''): - from couchpotato import get_session - - db = get_session() - - p = db.query(Properties).filter_by(identifier = identifier).first() - if not p: - p = Properties() - db.add(p) - - p.identifier = identifier - p.value = toUnicode(value) - - db.commit() diff --git a/couchpotato/core/settings/model.py b/couchpotato/core/settings/model.py deleted file mode 100644 index 64117fb48d..0000000000 --- a/couchpotato/core/settings/model.py +++ /dev/null @@ -1,262 +0,0 @@ -from couchpotato.core.helpers.encoding import toUnicode -from elixir.entity import Entity -from elixir.fields import Field -from elixir.options import options_defaults, using_options -from elixir.relationships import ManyToMany, OneToMany, ManyToOne -from sqlalchemy.types import Integer, Unicode, UnicodeText, Boolean, String, \ - TypeDecorator -import json -import time - -options_defaults["shortnames"] = True - -# We would like to be able to create this schema in a specific database at -# will, so we can test it easily. -# Make elixir not bind to any session to make this possible. -# -# http://elixir.ematia.de/trac/wiki/Recipes/MultipleDatabasesOneMetadata -__session__ = None - -class SetEncoder(json.JSONEncoder): - def default(self, obj): - if isinstance(obj, set): - return list(obj) - return json.JSONEncoder.default(self, obj) - - -class JsonType(TypeDecorator): - impl = UnicodeText - - def process_bind_param(self, value, dialect): - try: - return toUnicode(json.dumps(value, cls = SetEncoder)) - except: - try: - return toUnicode(json.dumps(value, cls = SetEncoder, encoding = 'latin-1')) - except: - raise - - def process_result_value(self, value, dialect): - return json.loads(value if value else '{}') - - -class Movie(Entity): - """Movie Resource a movie could have multiple releases - The files belonging to the movie object are global for the whole movie - such as trailers, nfo, thumbnails""" - - last_edit = Field(Integer, default = lambda: int(time.time())) - - library = ManyToOne('Library', cascade = 'delete, delete-orphan', single_parent = True) - status = ManyToOne('Status') - profile = ManyToOne('Profile') - releases = OneToMany('Release', cascade = 'all, delete-orphan') - files = ManyToMany('File', cascade = 'all, delete-orphan', single_parent = True) - - -class Library(Entity): - """""" - - year = Field(Integer) - identifier = Field(String(20), index = True) - - plot = Field(UnicodeText) - tagline = Field(UnicodeText(255)) - info = Field(JsonType) - - status = ManyToOne('Status') - movies = OneToMany('Movie', cascade = 'all, delete-orphan') - titles = OneToMany('LibraryTitle', cascade = 'all, delete-orphan') - files = ManyToMany('File', cascade = 'all, delete-orphan', single_parent = True) - - -class LibraryTitle(Entity): - """""" - using_options(order_by = '-default') - - title = Field(Unicode) - simple_title = Field(Unicode, index = True) - default = Field(Boolean, index = True) - - language = OneToMany('Language') - libraries = ManyToOne('Library') - - -class Language(Entity): - """""" - - identifier = Field(String(20), index = True) - label = Field(Unicode) - - titles = ManyToOne('LibraryTitle') - - -class Release(Entity): - """Logically groups all files that belong to a certain release, such as - parts of a movie, subtitles.""" - - identifier = Field(String(100), index = True) - - movie = ManyToOne('Movie') - status = ManyToOne('Status') - quality = ManyToOne('Quality') - files = ManyToMany('File') - info = OneToMany('ReleaseInfo', cascade = 'all, delete-orphan') - - def to_dict(self, deep = {}, exclude = []): - orig_dict = super(Release, self).to_dict(deep = deep, exclude = exclude) - - new_info = {} - for info in orig_dict.get('info', []): - - value = info['value'] - try: value = int(info['value']) - except: pass - - new_info[info['identifier']] = value - - orig_dict['info'] = new_info - - return orig_dict - - -class ReleaseInfo(Entity): - """Properties that can be bound to a file for off-line usage""" - - identifier = Field(String(50), index = True) - value = Field(Unicode(255), nullable = False) - - release = ManyToOne('Release') - - -class Status(Entity): - """The status of a release, such as Downloaded, Deleted, Wanted etc""" - - identifier = Field(String(20), unique = True) - label = Field(Unicode(20)) - - releases = OneToMany('Release') - movies = OneToMany('Movie') - - -class Quality(Entity): - """Quality name of a release, DVD, 720P, DVD-Rip etc""" - using_options(order_by = 'order') - - identifier = Field(String(20), unique = True) - label = Field(Unicode(20)) - order = Field(Integer, index = True) - - size_min = Field(Integer) - size_max = Field(Integer) - - releases = OneToMany('Release') - profile_types = OneToMany('ProfileType') - - -class Profile(Entity): - """""" - using_options(order_by = 'order') - - label = Field(Unicode(50)) - order = Field(Integer, index = True) - core = Field(Boolean) - hide = Field(Boolean) - - movie = OneToMany('Movie') - types = OneToMany('ProfileType', cascade = 'all, delete-orphan') - - -class ProfileType(Entity): - """""" - using_options(order_by = 'order') - - order = Field(Integer, index = True) - finish = Field(Boolean) - wait_for = Field(Integer) - - quality = ManyToOne('Quality') - profile = ManyToOne('Profile') - - -class File(Entity): - """File that belongs to a release.""" - - path = Field(Unicode(255), nullable = False, unique = True) - part = Field(Integer, default = 1) - available = Field(Boolean) - - type = ManyToOne('FileType') - properties = OneToMany('FileProperty') - - history = OneToMany('RenameHistory') - movie = ManyToMany('Movie') - release = ManyToMany('Release') - library = ManyToMany('Library') - - -class FileType(Entity): - """Types could be trailer, subtitle, movie, partial movie etc.""" - - identifier = Field(String(20), unique = True) - type = Field(Unicode(20)) - name = Field(Unicode(50), nullable = False) - - files = OneToMany('File') - - -class FileProperty(Entity): - """Properties that can be bound to a file for off-line usage""" - - identifier = Field(String(20), index = True) - value = Field(Unicode(255), nullable = False) - - file = ManyToOne('File') - - -class RenameHistory(Entity): - """Remembers from where to where files have been moved.""" - - old = Field(Unicode(255)) - new = Field(Unicode(255)) - - file = ManyToOne('File') - - -class Notification(Entity): - using_options(order_by = 'added') - - added = Field(Integer, default = lambda: int(time.time())) - read = Field(Boolean, default = False) - message = Field(Unicode(255)) - data = Field(JsonType) - - -class Folder(Entity): - """Renamer destination folders.""" - - path = Field(Unicode(255)) - label = Field(Unicode(255)) - - -class Properties(Entity): - - identifier = Field(String(50), index = True) - value = Field(Unicode(255), nullable = False) - - -def setup(): - """Setup the database and create the tables that don't exists yet""" - from elixir import setup_all, create_all - from couchpotato.environment import Env - - engine = Env.getEngine() - - setup_all() - create_all(engine) - - try: - engine.execute("PRAGMA journal_mode = WAL") - engine.execute("PRAGMA temp_store = MEMORY") - except: - pass diff --git a/couchpotato/core/settings_test.py b/couchpotato/core/settings_test.py new file mode 100644 index 0000000000..67aca4f7fc --- /dev/null +++ b/couchpotato/core/settings_test.py @@ -0,0 +1,173 @@ +import mock +from mock import patch, Mock, MagicMock +import unittest +from unittest import TestCase + +from couchpotato.core.settings import Settings + + +class DoNotUseMe: + """ Do not use this class, it is just for storing Mock ' s of Settings-class + + Usage: + Select appropriate Mocks and copy-paste them to your test-method + """ + + def __do_not_call(self): + # s = Settings + s = Mock() + + # methods: + s.isOptionWritable = Mock(return_value=True) + s.set = Mock(return_value=None) + s.save = Mock() + + # props: + s.log = Mock() + + # subobjects + s.p = Mock() + s.p.getboolean = Mock(return_value=True) + s.p.has_option = Mock + + +class SettingsCommon(TestCase): + + def setUp(self): + self.s = Settings() + + def test_get_directories(self): + s = self.s + raw = ' /some/directory ::/another/dir ' + exp = ['/some/directory', '/another/dir'] + + sec = 'sec' + opt = 'opt' + s.types[sec] = {} + s.types[sec][opt] = 'directories' + + s.p = MagicMock() + s.p.get.return_value = raw + + act = s.get(option = opt, section = sec) + + self.assertEqual(act, exp) + +class SettingsSaveWritableNonWritable(TestCase): + + def setUp(self): + self.s = Settings() + + def test_save_writable(self): + s = self.s + + # set up Settings-mocks : + # lets assume, that option is writable: + mock_isOptionWritable = s.isOptionWritable = Mock(return_value=True) + mock_set = s.set = Mock(return_value=None) + mock_p_save = s.save = Mock() + + section = 'core' + option = 'option_non_exist_be_sure' + value = "1000" + params = {'section': section, 'name': option, 'value': value} + + # call method: + env_mock = Mock() + + # HERE is an example of mocking LOCAL 'import' + with patch.dict('sys.modules', {'couchpotato.environment.Env': env_mock}): + result = s.saveView(**params) + + self.assertIsInstance(s, Settings) + self.assertIsInstance(result, dict) + self.assertTrue(result['success']) + + # ----------------------------------------- + # check mock + # ----------------------------------------- + mock_isOptionWritable.assert_called_with(section, option) + + # check, that Settings tried to save my value: + mock_set.assert_called_with(section, option, value) + + def test_save_non_writable(self): + s = self.s + + # set up Settings-mocks : + # lets assume, that option is not writable: + mock_is_w = s.isOptionWritable = Mock(return_value=False) + mock_set = s.set = Mock(return_value=None) + mock_p_save = s.save = Mock() + mock_log_s = s.log = Mock() + + section = 'core' + option = 'option_non_exist_be_sure' + value = "1000" + params = {'section': section, 'name': option, 'value': value} + + # call method: + env_mock = Mock() + + # HERE is an example of mocking LOCAL 'import' + with patch.dict('sys.modules', {'couchpotato.environment.Env': env_mock}): + result = s.saveView(**params) + + self.assertIsInstance(s, Settings) + self.assertIsInstance(result, dict) + self.assertFalse(result['success']) + + # ----------------------------------------- + # check mock + # ----------------------------------------- + # lets check, that 'set'-method was not called: + self.assertFalse(mock_set.called, 'Method `set` was called') + mock_is_w.assert_called_with(section, option) + + +class OptionMetaSuite(TestCase): + """ tests for ro rw hidden options """ + + def setUp(self): + self.s = Settings() + self.meta = self.s.optionMetaSuffix() + + # hide real config-parser: + self.s.p = Mock() + + def test_no_meta_option(self): + s = self.s + + section = 'core' + option = 'url' + + option_meta = option + self.meta + # setup mock + s.p.getboolean = Mock(return_value=True) + + # there is no META-record for our option: + s.p.has_option = Mock(side_effect=lambda s, o: not (s == section and o == option_meta)) + + # by default all options are writable and readable + self.assertTrue(s.isOptionWritable(section, option)) + self.assertTrue(s.isOptionReadable(section, option)) + + def test_non_writable(self): + s = self.s + + section = 'core' + option = 'url' + + def mock_get_meta_ro(s, o): + if (s == section and o == option_meta): + return 'ro' + return 11 + + option_meta = option + self.meta + # setup mock + s.p.has_option = Mock(return_value=True) + s.p.get = Mock(side_effect=mock_get_meta_ro) + + # by default all options are writable and readable + self.assertFalse(s.isOptionWritable(section, option)) + self.assertTrue(s.isOptionReadable(section, option)) diff --git a/couchpotato/core/softchroot.py b/couchpotato/core/softchroot.py new file mode 100644 index 0000000000..ef05ef4fc7 --- /dev/null +++ b/couchpotato/core/softchroot.py @@ -0,0 +1,134 @@ +import os +import sys + + +class SoftChrootInitError(IOError): + """Error during soft-chroot initialization""" + pass + +class SoftChroot: + """Soft Chroot module + + Provides chroot feature for interation with Web-UI. Since it is not real chroot, so the name is SOFT CHROOT. + The module prevents access to entire file-system, allowing access only to subdirs of SOFT-CHROOT directory. + """ + def __init__(self): + self.enabled = None + self.chdir = None + + def initialize(self, chdir): + """ initialize module, by setting soft-chroot-directory + + Sets soft-chroot directory and 'enabled'-flag + + Args: + self (SoftChroot) : self + chdir (string) : absolute path to soft-chroot + + Raises: + SoftChrootInitError: when chdir doesn't exist + """ + + orig_chdir = chdir + + if chdir: + chdir = chdir.strip() + + if (chdir): + # enabling soft-chroot: + if not os.path.isdir(chdir): + raise SoftChrootInitError(2, 'SOFT-CHROOT is requested, but the folder doesn\'t exist', orig_chdir) + + self.enabled = True + self.chdir = chdir.rstrip(os.path.sep) + os.path.sep + else: + self.enabled = False + + def get_chroot(self): + """Returns root in chrooted environment + + Raises: + RuntimeError: when `SoftChroot` is not initialized OR enabled + """ + if None == self.enabled: + raise RuntimeError('SoftChroot is not initialized') + if not self.enabled: + raise RuntimeError('SoftChroot is not enabled') + + return self.chdir + + def is_root_abs(self, abspath): + """ Checks whether absolute path @abspath is the root in the soft-chrooted environment""" + if None == self.enabled: + raise RuntimeError('SoftChroot is not initialized') + + if None == abspath: + raise ValueError('abspath can not be None') + + if not self.enabled: + # if not chroot environment : check, whether parent is the same dir: + parent = os.path.dirname(abspath.rstrip(os.path.sep)) + return parent==abspath + + # in soft-chrooted env: check, that path == chroot + path = abspath.rstrip(os.path.sep) + os.path.sep + return self.chdir == path + + def is_subdir(self, abspath): + """ Checks whether @abspath is subdir (on any level) of soft-chroot""" + if None == self.enabled: + raise RuntimeError('SoftChroot is not initialized') + + if None == abspath: + return False + + if not self.enabled: + return True + + if not abspath.endswith(os.path.sep): + abspath += os.path.sep + + return abspath.startswith(self.chdir) + + def chroot2abs(self, path): + """ Converts chrooted path to absolute path""" + + if None == self.enabled: + raise RuntimeError('SoftChroot is not initialized') + if not self.enabled: + return path + + if None == path or len(path)==0: + return self.chdir + + if not path.startswith(os.path.sep): + path = os.path.sep + path + + return self.chdir[:-1] + path + + def abs2chroot(self, path, force = False): + """ Converts absolute path to chrooted path""" + + if None == self.enabled: + raise RuntimeError('SoftChroot is not initialized') + + if None == path: + raise ValueError('path is empty') + + if not self.enabled: + return path + + if path == self.chdir.rstrip(os.path.sep): + return '/' + + resulst = None + if not path.startswith(self.chdir): + if (force): + result = self.get_chroot() + else: + raise ValueError("path must starts with 'chdir': %s" % path) + else: + l = len(self.chdir)-1 + result = path[l:] + + return result diff --git a/couchpotato/core/softchroot_test.py b/couchpotato/core/softchroot_test.py new file mode 100644 index 0000000000..5dfeaf0468 --- /dev/null +++ b/couchpotato/core/softchroot_test.py @@ -0,0 +1,120 @@ +import sys +import os +import logging +import unittest +from unittest import TestCase + +from couchpotato.core.softchroot import SoftChroot + +CHROOT_DIR = '/tmp/' + +class SoftChrootNonInitialized(TestCase): + def setUp(self): + self.b = SoftChroot() + + def test_is_root_abs(self): + with self.assertRaises(RuntimeError): + self.b.is_root_abs('1') + + def test_is_subdir(self): + with self.assertRaises(RuntimeError): + self.b.is_subdir('1') + + def test_chroot2abs(self): + with self.assertRaises(RuntimeError): + self.b.chroot2abs('1') + + def test_abs2chroot(self): + with self.assertRaises(RuntimeError): + self.b.abs2chroot('1') + + def test_get_root(self): + with self.assertRaises(RuntimeError): + self.b.get_chroot() + +class SoftChrootNOTEnabledTest(TestCase): + def setUp(self): + self.b = SoftChroot() + self.b.initialize(None) + + def test_get_root(self): + with self.assertRaises(RuntimeError): + self.b.get_chroot() + + def test_chroot2abs_noleading_slash(self): + path = 'no_leading_slash' + self.assertEqual( self.b.chroot2abs(path), path ) + + def test_chroot2abs(self): + self.assertIsNone( self.b.chroot2abs(None), None ) + self.assertEqual( self.b.chroot2abs(''), '' ) + self.assertEqual( self.b.chroot2abs('/asdf'), '/asdf' ) + + def test_abs2chroot_raise_on_empty(self): + with self.assertRaises(ValueError): + self.b.abs2chroot(None) + + def test_abs2chroot(self): + self.assertEqual( self.b.abs2chroot(''), '' ) + self.assertEqual( self.b.abs2chroot('/asdf'), '/asdf' ) + self.assertEqual( self.b.abs2chroot('/'), '/' ) + + def test_get_root(self): + with self.assertRaises(RuntimeError): + self.b.get_chroot() + +class SoftChrootEnabledTest(TestCase): + def setUp(self): + self.b = SoftChroot() + self.b.initialize(CHROOT_DIR) + + def test_enabled(self): + self.assertTrue( self.b.enabled) + + def test_is_subdir(self): + self.assertFalse( self.b.is_subdir('') ) + self.assertFalse( self.b.is_subdir(None) ) + + self.assertTrue( self.b.is_subdir(CHROOT_DIR) ) + noslash = CHROOT_DIR[:-1] + self.assertTrue( self.b.is_subdir(noslash) ) + + self.assertTrue( self.b.is_subdir(CHROOT_DIR + 'come') ) + + def test_is_root_abs_none(self): + with self.assertRaises(ValueError): + self.assertFalse( self.b.is_root_abs(None) ) + + def test_is_root_abs(self): + self.assertFalse( self.b.is_root_abs('') ) + + self.assertTrue( self.b.is_root_abs(CHROOT_DIR) ) + noslash = CHROOT_DIR[:-1] + self.assertTrue( self.b.is_root_abs(noslash) ) + + self.assertFalse( self.b.is_root_abs(CHROOT_DIR + 'come') ) + + def test_chroot2abs_noleading_slash(self): + path = 'no_leading_slash' + path_sl = CHROOT_DIR + path + #with self.assertRaises(ValueError): + # self.b.chroot2abs('no_leading_slash') + self.assertEqual( self.b.chroot2abs(path), path_sl ) + + def test_chroot2abs(self): + self.assertEqual( self.b.chroot2abs(None), CHROOT_DIR ) + self.assertEqual( self.b.chroot2abs(''), CHROOT_DIR ) + + self.assertEqual( self.b.chroot2abs('/asdf'), CHROOT_DIR + 'asdf' ) + + def test_abs2chroot_raise_on_empty(self): + with self.assertRaises(ValueError): self.b.abs2chroot(None) + with self.assertRaises(ValueError): self.b.abs2chroot('') + + def test_abs2chroot(self): + self.assertEqual( self.b.abs2chroot(CHROOT_DIR + 'asdf'), '/asdf' ) + self.assertEqual( self.b.abs2chroot(CHROOT_DIR), '/' ) + self.assertEqual( self.b.abs2chroot(CHROOT_DIR.rstrip(os.path.sep)), '/' ) + + def test_get_root(self): + self.assertEqual( self.b.get_chroot(), CHROOT_DIR ) diff --git a/couchpotato/environment.py b/couchpotato/environment.py index bd637ad219..96011ab2c7 100644 --- a/couchpotato/environment.py +++ b/couchpotato/environment.py @@ -1,34 +1,39 @@ +import os + +from couchpotato.core.database import Database from couchpotato.core.event import fireEvent, addEvent +from couchpotato.core.helpers.encoding import toUnicode from couchpotato.core.loader import Loader from couchpotato.core.settings import Settings -from sqlalchemy.engine import create_engine -from sqlalchemy.orm import scoped_session -from sqlalchemy.orm.session import sessionmaker -import os +from couchpotato.core.softchroot import SoftChroot + class Env(object): _appname = 'CouchPotato' ''' Environment variables ''' + _app = None _encoding = 'UTF-8' _debug = False _dev = False _settings = Settings() + _database = Database() _loader = Loader() + _softchroot = SoftChroot() _cache = None _options = None _args = None _quiet = False _daemonized = False _desktop = None - _session = None + _http_opener = None ''' Data paths and directories ''' _app_dir = "" _data_dir = "" _cache_dir = "" - _db_path = "" + _db = "" _log_path = "" @staticmethod @@ -36,8 +41,11 @@ def doDebug(): return Env._debug @staticmethod - def get(attr): - return getattr(Env, '_' + attr) + def get(attr, unicode = False): + if unicode: + return toUnicode(getattr(Env, '_' + attr)) + else: + return getattr(Env, '_' + attr) @staticmethod def all(): @@ -51,32 +59,17 @@ def all(): def set(attr, value): return setattr(Env, '_' + attr, value) - @staticmethod - def getSession(engine = None): - existing_session = Env.get('session') - if existing_session: - return existing_session - - engine = Env.getEngine() - session = scoped_session(sessionmaker(bind = engine)) - Env.set('session', session) - - return session - - @staticmethod - def getEngine(): - return create_engine(Env.get('db_path'), echo = False, pool_recycle = 30) - @staticmethod def setting(attr, section = 'core', value = None, default = '', type = None): s = Env.get('settings') # Return setting - if value == None: + if value is None: return s.get(attr, default = default, section = section, type = type) # Set setting + s.addSection(section) s.set(section, attr, value) s.save() @@ -85,7 +78,7 @@ def setting(attr, section = 'core', value = None, default = '', type = None): @staticmethod def prop(identifier, value = None, default = None): s = Env.get('settings') - if value == None: + if value is None: v = s.getProperty(identifier) return v if v else default diff --git a/couchpotato/environment_test.py b/couchpotato/environment_test.py new file mode 100644 index 0000000000..bcf782cdb3 --- /dev/null +++ b/couchpotato/environment_test.py @@ -0,0 +1,19 @@ +import unittest +from unittest import TestCase +import mock + +from couchpotato.environment import Env + +class EnvironmentBaseTest(TestCase): + def test_appname(self): + self.assertEqual('CouchPotato', Env.get('appname')) + + def test_set_get_appname(self): + x = 'NEWVALUE' + Env.set('appname', x) + self.assertEqual(x, Env.get('appname')) + + def test_get_softchroot(self): + from couchpotato.core.softchroot import SoftChroot + sc = Env.get('softchroot') + self.assertIsInstance(sc, SoftChroot) diff --git a/couchpotato/runner.py b/couchpotato/runner.py index de5384853d..900b1f82f7 100644 --- a/couchpotato/runner.py +++ b/couchpotato/runner.py @@ -1,22 +1,33 @@ -from argparse import ArgumentParser -from couchpotato import web -from couchpotato.api import api, NonBlockHandler -from couchpotato.core.event import fireEventAsync, fireEvent -from couchpotato.core.helpers.variable import getDataDir, tryInt from logging import handlers -from tornado.httpserver import HTTPServer -from tornado.web import Application, FallbackHandler -from tornado.wsgi import WSGIContainer -from werkzeug.contrib.cache import FileSystemCache +from uuid import uuid4 import locale import logging import os.path -import shutil import sys import time +import traceback import warnings +import re +import tarfile +import shutil -def getOptions(base_path, args): +from CodernityDB.database_super_thread_safe import SuperThreadSafeDatabase +from argparse import ArgumentParser +from cache import FileSystemCache +from couchpotato import KeyHandler, LoginHandler, LogoutHandler +from couchpotato.api import NonBlockHandler, ApiHandler +from couchpotato.core.event import fireEventAsync, fireEvent +from couchpotato.core.helpers.encoding import sp +from couchpotato.core.helpers.variable import getDataDir, tryInt, getFreeSpace +import requests +from requests.packages.urllib3 import disable_warnings +from tornado.httpserver import HTTPServer +from tornado.web import Application, StaticFileHandler, RedirectHandler +from couchpotato.core.softchroot import SoftChrootInitError +try: from tornado.netutil import bind_unix_socket +except: pass + +def getOptions(args): # Options parser = ArgumentParser(prog = 'CouchPotato.py') @@ -50,6 +61,7 @@ def getOptions(base_path, args): return options + # Tornado monkey patch logging.. def _log(status_code, request): @@ -75,46 +87,84 @@ def runCouchPotato(options, base_path, args, data_dir = None, log_dir = None, En if not encoding or encoding in ('ANSI_X3.4-1968', 'US-ASCII', 'ASCII'): encoding = 'UTF-8' + Env.set('encoding', encoding) + # Do db stuff - db_path = os.path.join(data_dir, 'couchpotato.db') - - # Backup before start and cleanup old databases - new_backup = os.path.join(data_dir, 'db_backup', str(int(time.time()))) - - # Create path and copy - if not os.path.isdir(new_backup): os.makedirs(new_backup) - src_files = [options.config_file, db_path, db_path + '-shm', db_path + '-wal'] - for src_file in src_files: - if os.path.isfile(src_file): - shutil.copy2(src_file, os.path.join(new_backup, os.path.basename(src_file))) - - # Remove older backups, keep backups 3 days or at least 3 - backups = [] - for directory in os.listdir(os.path.dirname(new_backup)): - backup = os.path.join(os.path.dirname(new_backup), directory) - if os.path.isdir(backup): - backups.append(backup) - - total_backups = len(backups) - for backup in backups: - if total_backups > 3: - if tryInt(os.path.basename(backup)) < time.time() - 259200: - for src_file in src_files: - b_file = os.path.join(backup, os.path.basename(src_file)) - if os.path.isfile(b_file): - os.remove(b_file) - os.rmdir(backup) - total_backups -= 1 + db_path = sp(os.path.join(data_dir, 'database')) + old_db_path = os.path.join(data_dir, 'couchpotato.db') + + # Remove database folder if both exists + if os.path.isdir(db_path) and os.path.isfile(old_db_path): + db = SuperThreadSafeDatabase(db_path) + db.open() + db.destroy() + # Check if database exists + db = SuperThreadSafeDatabase(db_path) + db_exists = db.exists() + if db_exists: + + # Backup before start and cleanup old backups + backup_path = sp(os.path.join(data_dir, 'db_backup')) + backup_count = 5 + existing_backups = [] + if not os.path.isdir(backup_path): os.makedirs(backup_path) + + for root, dirs, files in os.walk(backup_path): + # Only consider files being a direct child of the backup_path + if root == backup_path: + for backup_file in sorted(files): + ints = re.findall('\d+', backup_file) + + # Delete non zip files + if len(ints) != 1: + try: os.remove(os.path.join(root, backup_file)) + except: pass + else: + existing_backups.append((int(ints[0]), backup_file)) + else: + # Delete stray directories. + shutil.rmtree(root) + + # Remove all but the last 5 + for eb in existing_backups[:-backup_count]: + os.remove(os.path.join(backup_path, eb[1])) + + # Create new backup + new_backup = sp(os.path.join(backup_path, '%s.tar.gz' % int(time.time()))) + zipf = tarfile.open(new_backup, 'w:gz') + for root, dirs, files in os.walk(db_path): + for zfilename in files: + zipf.add(os.path.join(root, zfilename), arcname = 'database/%s' % os.path.join(root[len(db_path) + 1:], zfilename)) + zipf.close() + + # Open last + db.open() + + else: + db.create() + + # Force creation of cachedir + log_dir = sp(log_dir) + cache_dir = sp(os.path.join(data_dir, 'cache')) + python_cache = sp(os.path.join(cache_dir, 'python')) + + if not os.path.exists(cache_dir): + os.mkdir(cache_dir) + if not os.path.exists(python_cache): + os.mkdir(python_cache) + + session = requests.Session() + session.max_redirects = 5 # Register environment settings - Env.set('encoding', encoding) - Env.set('app_dir', base_path) - Env.set('data_dir', data_dir) - Env.set('log_path', os.path.join(log_dir, 'CouchPotato.log')) - Env.set('db_path', 'sqlite:///' + db_path) - Env.set('cache_dir', os.path.join(data_dir, 'cache')) - Env.set('cache', FileSystemCache(os.path.join(Env.get('cache_dir'), 'python'))) + Env.set('app_dir', sp(base_path)) + Env.set('data_dir', sp(data_dir)) + Env.set('log_path', sp(os.path.join(log_dir, 'CouchPotato.log'))) + Env.set('db', db) + Env.set('http_opener', session) + Env.set('cache_dir', cache_dir) + Env.set('cache', FileSystemCache(python_cache)) Env.set('console_log', options.console_log) Env.set('quiet', options.quiet) Env.set('desktop', desktop) @@ -131,12 +181,15 @@ def runCouchPotato(options, base_path, args, data_dir = None, log_dir = None, En Env.set('dev', development) # Disable logging for some modules - for logger_name in ['enzyme', 'guessit', 'subliminal', 'apscheduler']: + for logger_name in ['enzyme', 'guessit', 'subliminal', 'apscheduler', 'tornado', 'requests']: logging.getLogger(logger_name).setLevel(logging.ERROR) - for logger_name in ['gntp', 'migrate']: + for logger_name in ['gntp']: logging.getLogger(logger_name).setLevel(logging.WARNING) + # Disable SSL warning + disable_warnings() + # Use reloader reloader = debug is True and development and not Env.get('desktop') and not options.daemon @@ -154,121 +207,165 @@ def runCouchPotato(options, base_path, args, data_dir = None, log_dir = None, En logger.addHandler(hdlr) # To file - hdlr2 = handlers.RotatingFileHandler(Env.get('log_path'), 'a', 500000, 10) + hdlr2 = handlers.RotatingFileHandler(Env.get('log_path'), 'a', 500000, 10, encoding = Env.get('encoding')) hdlr2.setFormatter(formatter) logger.addHandler(hdlr2) # Start logging & enable colors + # noinspection PyUnresolvedReferences import color_logs from couchpotato.core.logger import CPLog log = CPLog(__name__) log.debug('Started with options %s', options) + # Check soft-chroot dir exists: + try: + # Load Soft-Chroot + soft_chroot = Env.get('softchroot') + soft_chroot_dir = Env.setting('soft_chroot', section = 'core', default = None, type='unicode' ) + soft_chroot.initialize(soft_chroot_dir) + except SoftChrootInitError as exc: + log.error(exc) + return + except: + log.error('Unable to check whether SOFT-CHROOT is defined') + return + + # Check available space + try: + total_space, available_space = getFreeSpace(data_dir) + if available_space < 100: + log.error('Shutting down as CP needs some space to work. You\'ll get corrupted data otherwise. Only %sMB left', available_space) + return + except: + log.error('Failed getting diskspace: %s', traceback.format_exc()) + def customwarn(message, category, filename, lineno, file = None, line = None): log.warning('%s %s %s line:%s', (category, message, filename, lineno)) warnings.showwarning = customwarn - # Check if database exists - db = Env.get('db_path') - db_exists = os.path.isfile(db_path) - - # Load configs & plugins - loader = Env.get('loader') - loader.preload(root = base_path) - loader.run() - - # Load migrations - if db_exists: - - from migrate.versioning.api import version_control, db_version, version, upgrade - repo = os.path.join(base_path, 'couchpotato', 'core', 'migration') - - latest_db_version = version(repo) - try: - current_db_version = db_version(db, repo) - except: - version_control(db, repo, version = latest_db_version) - current_db_version = db_version(db, repo) - - if current_db_version < latest_db_version and not development: - log.info('Doing database upgrade. From %d to %d', (current_db_version, latest_db_version)) - upgrade(db, repo) - - # Configure Database - from couchpotato.core.settings.model import setup - setup() - - # Fill database with needed stuff - if not db_exists: - fireEvent('app.initialize', in_order = True) - # Create app - from couchpotato import app + from couchpotato import WebHandler + web_base = ('/' + Env.setting('url_base').lstrip('/') + '/') if Env.setting('url_base') else '/' + Env.set('web_base', web_base) + api_key = Env.setting('api_key') - url_base = '/' + Env.setting('url_base').lstrip('/') if Env.setting('url_base') else '' + if not api_key: + api_key = uuid4().hex + Env.setting('api_key', value = api_key) + + api_base = r'%sapi/%s/' % (web_base, api_key) + Env.set('api_base', api_base) # Basic config - app.secret_key = api_key host = Env.setting('host', default = '0.0.0.0') - # app.debug = development + host6 = Env.setting('host6', default = '::') + config = { 'use_reloader': reloader, - 'port': tryInt(Env.setting('port', default = 5000)), + 'port': tryInt(Env.setting('port', default = 5050)), 'host': host if host and len(host) > 0 else '0.0.0.0', + 'host6': host6 if host6 and len(host6) > 0 else '::', 'ssl_cert': Env.setting('ssl_cert', default = None), 'ssl_key': Env.setting('ssl_key', default = None), } - # Static path - app.static_folder = os.path.join(base_path, 'couchpotato', 'static') - web.add_url_rule('api/%s/static/<path:filename>' % api_key, - endpoint = 'static', - view_func = app.send_static_file) + # Load the app + application = Application( + [], + log_function = lambda x: None, + debug = config['use_reloader'], + gzip = True, + cookie_secret = api_key, + login_url = '%slogin/' % web_base, + ) + Env.set('app', application) + + # Request handlers + application.add_handlers(".*$", [ + (r'%snonblock/(.*)(/?)' % api_base, NonBlockHandler), + + # API handlers + (r'%s(.*)(/?)' % api_base, ApiHandler), # Main API handler + (r'%sgetkey(/?)' % web_base, KeyHandler), # Get API key + (r'%s' % api_base, RedirectHandler, {"url": web_base + 'docs/'}), # API docs + + # Login handlers + (r'%slogin(/?)' % web_base, LoginHandler), + (r'%slogout(/?)' % web_base, LogoutHandler), + + # Catch all webhandlers + (r'%s(.*)(/?)' % web_base, WebHandler), + (r'(.*)', WebHandler), + ]) + + # Static paths + static_path = '%sstatic/' % web_base + for dir_name in ['fonts', 'images', 'scripts', 'style']: + application.add_handlers(".*$", [ + ('%s%s/(.*)' % (static_path, dir_name), StaticFileHandler, {'path': sp(os.path.join(base_path, 'couchpotato', 'static', dir_name))}) + ]) + Env.set('static_path', static_path) - # Register modules - app.register_blueprint(web, url_prefix = '%s/' % url_base) - app.register_blueprint(api, url_prefix = '%s/api/%s/' % (url_base, api_key)) + # Load configs & plugins + loader = Env.get('loader') + loader.preload(root = sp(base_path)) + loader.run() + + # Fill database with needed stuff + fireEvent('database.setup') + if not db_exists: + fireEvent('app.initialize', in_order = True) + fireEvent('app.migrate') + + # Go go go! + from tornado.ioloop import IOLoop + from tornado.autoreload import add_reload_hook + loop = IOLoop.current() + + # Reload hook + def reload_hook(): + fireEvent('app.shutdown') + add_reload_hook(reload_hook) # Some logging and fire load event try: log.info('Starting server on port %(port)s', config) except: pass fireEventAsync('app.load') - # Go go go! - from tornado.ioloop import IOLoop - web_container = WSGIContainer(app) - web_container._log = _log - loop = IOLoop.instance() - - application = Application([ - (r'%s/api/%s/nonblock/(.*)/' % (url_base, api_key), NonBlockHandler), - (r'.*', FallbackHandler, dict(fallback = web_container)), - ], - log_function = lambda x : None, - debug = config['use_reloader'], - gzip = True, - ) - + ssl_options = None if config['ssl_cert'] and config['ssl_key']: - server = HTTPServer(application, no_keep_alive = True, ssl_options = { - "certfile": config['ssl_cert'], - "keyfile": config['ssl_key'], - }) - else: - server = HTTPServer(application, no_keep_alive = True) + ssl_options = { + 'certfile': config['ssl_cert'], + 'keyfile': config['ssl_key'], + } + + server = HTTPServer(application, no_keep_alive = True, ssl_options = ssl_options) try_restart = True restart_tries = 5 while try_restart: try: - server.listen(config['port'], config['host']) + if config['host'].startswith('unix:'): + server.add_socket(bind_unix_socket(config['host'][5:])) + else: + server.listen(config['port'], config['host']) + + if Env.setting('ipv6', default = False): + try: server.listen(config['port'], config['host6']) + except: log.info2('Tried to bind to IPV6 but failed') + loop.start() - except Exception, e: + server.close_all_connections() + server.stop() + loop.close(all_fds = True) + except Exception as e: + log.error('Failed starting: %s', traceback.format_exc()) try: nr, msg = e if nr == 48: - log.info('Already in use, try %s more time after few seconds', restart_tries) + log.info('Port (%s) needed for CouchPotato is already in use, try %s more time after few seconds', (config.get('port'), restart_tries)) time.sleep(1) restart_tries -= 1 @@ -276,6 +373,8 @@ def customwarn(message, category, filename, lineno, file = None, line = None): continue else: return + except ValueError: + return except: pass diff --git a/couchpotato/static/fonts/Lobster-webfont.eot b/couchpotato/static/fonts/Lobster-webfont.eot new file mode 100755 index 0000000000..bf219fff86 Binary files /dev/null and b/couchpotato/static/fonts/Lobster-webfont.eot differ diff --git a/couchpotato/static/fonts/Lobster-webfont.svg b/couchpotato/static/fonts/Lobster-webfont.svg new file mode 100755 index 0000000000..5ea8bc565b --- /dev/null +++ b/couchpotato/static/fonts/Lobster-webfont.svg @@ -0,0 +1,38 @@ +<?xml version="1.0" standalone="no"?> +<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd" > +<svg xmlns="http://www.w3.org/2000/svg"> +<metadata></metadata> +<defs> +<font id="lobster_14regular" horiz-adv-x="2048" > +<font-face units-per-em="2048" ascent="1638" descent="-410" /> +<missing-glyph horiz-adv-x="444" /> +<glyph /> +<glyph /> +<glyph unicode=" " /> +<glyph unicode=" " horiz-adv-x="444" /> +<glyph unicode=" " horiz-adv-x="444" /> +<glyph unicode=" " horiz-adv-x="444" /> +<glyph unicode="C" horiz-adv-x="1017" d="M39 471q0 83 11 176t37 197.5t64 202t96.5 187t129.5 156.5t168.5 106.5t208.5 39.5q75 0 135.5 -15t107.5 -47t73 -85.5t26 -126.5q0 -87 -35 -138t-109 -51q-70 0 -116 53q37 19 65.5 75t28.5 114q0 55 -30 90t-95 35q-108 0 -208.5 -152t-159 -374t-58.5 -431 q0 -59 6.5 -105.5t23.5 -87.5t44.5 -68t70.5 -42.5t101 -15.5q119 0 223.5 54.5t175.5 150.5l47 -21q-39 -98 -110 -174.5t-154 -119.5t-164.5 -65t-155.5 -22q-232 0 -340 116.5t-108 387.5z" /> +<glyph unicode="P" horiz-adv-x="1173" d="M12 1004q0 132 108 256.5t275 200t336 75.5q122 0 216.5 -33t151.5 -90t86 -128t29 -152q0 -99 -41.5 -196t-115 -176t-187.5 -128t-247 -49h-11l-125 -584h-294l278 1303l303 40l-145 -684q75 6 143 55t114 121t73.5 159t27.5 169q0 123 -63 201t-191 78 q-249 0 -389.5 -132.5t-140.5 -365.5q0 -46 6.5 -76t14 -44.5t7.5 -18.5q-110 0 -164.5 46t-54.5 153z" /> +<glyph unicode="a" horiz-adv-x="1077" d="M-27 317q0 71 14.5 149.5t42.5 159t73.5 153.5t101.5 130t132.5 91t162.5 34q84 0 125 -28.5t41 -75.5v-15l22 109h295l-147 -696q-9 -40 -9 -66q0 -88 84 -88q56 0 98 54t68 139h86q-133 -379 -411 -379q-87 0 -139.5 48.5t-61.5 139.5q-126 -188 -305 -188 q-43 0 -82 10.5t-74 35t-61 62t-41 94t-15 127.5zM276 346q0 -56 10 -91.5t28.5 -48.5t31.5 -16.5t33 -3.5q49 0 101 49t69 132l98 462q0 26 -20 52t-64 26q-62 0 -118 -60t-91.5 -148.5t-56.5 -183t-21 -169.5z" /> +<glyph unicode="c" horiz-adv-x="786" d="M-27 313q0 54 8.5 115.5t28.5 133t49 138.5t74 130t99.5 109.5t130 74.5t161.5 28q125 0 176.5 -53t51.5 -135q0 -72 -31 -110.5t-78 -38.5q-35 0 -72 24q25 70 25 121q0 84 -57 84q-61 0 -124 -103.5t-101 -244t-38 -254.5q0 -100 35 -136t113 -36q77 0 144.5 29.5 t113 69.5t104.5 108h70q-236 -379 -563 -379q-151 0 -235.5 79t-84.5 246z" /> +<glyph unicode="h" horiz-adv-x="1034" d="M-78 0l297 1393l303 41l-110 -516q100 114 239 114q104 0 166 -56.5t62 -174.5q0 -89 -49.5 -283.5t-49.5 -245.5q0 -86 82 -86q61 0 96.5 43t75.5 138h86q-33 -97 -73.5 -168t-78.5 -110.5t-81.5 -63t-76.5 -30.5t-71 -7q-124 0 -182.5 66.5t-58.5 164.5q0 71 46 264 t46 258q0 105 -76 105q-53 0 -98.5 -63t-57.5 -117l-141 -666h-295z" /> +<glyph unicode="o" horiz-adv-x="899" d="M-29 315q0 53 8.5 114.5t29 132.5t50 138.5t75.5 130t102 109t132.5 74.5t163.5 28q263 0 263 -327q8 -4 22 -4q66 0 155 36.5t161 86.5l18 -56q-58 -62 -153.5 -106.5t-209.5 -63.5q-24 -280 -160.5 -448t-328.5 -168q-152 0 -240 78t-88 245zM276 342 q0 -99 22.5 -132.5t82.5 -33.5q75 0 143.5 122.5t93.5 303.5q-55 13 -55 86q0 83 64 111q-3 65 -20.5 92t-57.5 27q-68 0 -133.5 -103t-102.5 -236.5t-37 -236.5z" /> +<glyph unicode="t" horiz-adv-x="546" d="M-16 233q0 56 16 134l123 575h-68l17 82h67l62 283l303 41l-70 -324h123l-16 -82h-123l-131 -614q-8 -36 -8 -66q0 -42 19.5 -59t63.5 -17q59 0 110 51.5t75 129.5h86q-33 -95 -79 -167t-90.5 -111t-95.5 -63t-88.5 -31t-76.5 -7q-98 0 -158.5 61t-60.5 184z" /> +<glyph unicode="u" horiz-adv-x="1034" d="M-16 233q0 56 16 134l139 657h295l-147 -696q-6 -30 -6 -58q0 -84 61 -84q109 0 164 181l139 657h295l-147 -696q-9 -40 -9 -66q0 -42 20 -59t64 -17q57 0 99 49.5t67 131.5h86q-133 -379 -411 -379q-88 0 -141 49.5t-62 144.5q-118 -194 -303 -194q-98 0 -158.5 61 t-60.5 184z" /> +<glyph unicode=" " horiz-adv-x="768" /> +<glyph unicode=" " horiz-adv-x="1536" /> +<glyph unicode=" " horiz-adv-x="768" /> +<glyph unicode=" " horiz-adv-x="1536" /> +<glyph unicode=" " horiz-adv-x="512" /> +<glyph unicode=" " horiz-adv-x="384" /> +<glyph unicode=" " horiz-adv-x="256" /> +<glyph unicode=" " horiz-adv-x="256" /> +<glyph unicode=" " horiz-adv-x="192" /> +<glyph unicode=" " horiz-adv-x="307" /> +<glyph unicode=" " horiz-adv-x="85" /> +<glyph unicode=" " horiz-adv-x="307" /> +<glyph unicode=" " horiz-adv-x="384" /> +<glyph unicode="◼" horiz-adv-x="1140" d="M0 0v1140h1140v-1140h-1140z" /> +</font> +</defs></svg> \ No newline at end of file diff --git a/couchpotato/static/fonts/Lobster-webfont.ttf b/couchpotato/static/fonts/Lobster-webfont.ttf new file mode 100755 index 0000000000..6661b92f56 Binary files /dev/null and b/couchpotato/static/fonts/Lobster-webfont.ttf differ diff --git a/couchpotato/static/fonts/Lobster-webfont.woff b/couchpotato/static/fonts/Lobster-webfont.woff new file mode 100755 index 0000000000..69b19660a6 Binary files /dev/null and b/couchpotato/static/fonts/Lobster-webfont.woff differ diff --git a/couchpotato/static/fonts/Lobster-webfont.woff2 b/couchpotato/static/fonts/Lobster-webfont.woff2 new file mode 100644 index 0000000000..67014bef71 Binary files /dev/null and b/couchpotato/static/fonts/Lobster-webfont.woff2 differ diff --git a/couchpotato/static/fonts/OpenSans-Bold-webfont.eot b/couchpotato/static/fonts/OpenSans-Bold-webfont.eot new file mode 100755 index 0000000000..e1c7674430 Binary files /dev/null and b/couchpotato/static/fonts/OpenSans-Bold-webfont.eot differ diff --git a/couchpotato/static/fonts/OpenSans-Bold-webfont.svg b/couchpotato/static/fonts/OpenSans-Bold-webfont.svg new file mode 100755 index 0000000000..364b368678 --- /dev/null +++ b/couchpotato/static/fonts/OpenSans-Bold-webfont.svg @@ -0,0 +1,146 @@ +<?xml version="1.0" standalone="no"?> +<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd" > +<svg xmlns="http://www.w3.org/2000/svg"> +<metadata> +This is a custom SVG webfont generated by Font Squirrel. +Copyright : Digitized data copyright 20102011 Google Corporation +Foundry : Ascender Corporation +Foundry URL : httpwwwascendercorpcom +</metadata> +<defs> +<font id="OpenSansBold" horiz-adv-x="1169" > +<font-face units-per-em="2048" ascent="1638" descent="-410" /> +<missing-glyph horiz-adv-x="532" /> +<glyph unicode=" " horiz-adv-x="532" /> +<glyph unicode=" " horiz-adv-x="532" /> +<glyph unicode=" " horiz-adv-x="532" /> +<glyph unicode="!" horiz-adv-x="586" d="M117 143q0 84 45 127t131 43q83 0 128.5 -44t45.5 -126q0 -79 -46 -124.5t-128 -45.5q-84 0 -130 44.5t-46 125.5zM121 1462h346l-51 -977h-244z" /> +<glyph unicode=""" horiz-adv-x="967" d="M133 1462h279l-41 -528h-197zM555 1462h279l-41 -528h-197z" /> +<glyph unicode="#" horiz-adv-x="1323" d="M45 406v206h277l47 232h-252v209h289l77 407h219l-77 -407h198l78 407h215l-78 -407h240v-209h-279l-47 -232h258v-206h-297l-77 -406h-220l78 406h-194l-76 -406h-215l74 406h-238zM539 612h196l47 232h-196z" /> +<glyph unicode="$" d="M88 1049q0 145 113.5 238.5t316.5 113.5v153h137v-149q229 -10 414 -92l-94 -234q-156 64 -320 78v-295q195 -75 277.5 -130t121 -121t38.5 -154q0 -159 -115 -255.5t-322 -115.5v-205h-137v201q-244 5 -428 86v264q87 -43 209.5 -76t218.5 -39v310l-67 26 q-198 78 -280.5 169.5t-82.5 226.5zM389 1049q0 -44 30.5 -72.5t98.5 -58.5v235q-129 -19 -129 -104zM655 324q136 23 136 118q0 42 -34 71t-102 60v-249z" /> +<glyph unicode="%" horiz-adv-x="1845" d="M63 1026q0 457 345 457q169 0 259.5 -118.5t90.5 -338.5q0 -230 -89 -345.5t-261 -115.5q-165 0 -255 118.5t-90 342.5zM315 1024q0 -127 22.5 -189.5t72.5 -62.5q96 0 96 252q0 250 -96 250q-50 0 -72.5 -61.5t-22.5 -188.5zM395 0l811 1462h240l-811 -1462h-240z M1087 442q0 457 345 457q169 0 259.5 -118.5t90.5 -338.5q0 -229 -89 -344.5t-261 -115.5q-165 0 -255 118.5t-90 341.5zM1339 440q0 -127 22.5 -189.5t72.5 -62.5q96 0 96 252q0 250 -96 250q-50 0 -72.5 -61.5t-22.5 -188.5z" /> +<glyph unicode="&" horiz-adv-x="1536" d="M82 395q0 137 60.5 233.5t207.5 180.5q-75 86 -109 164.5t-34 171.5q0 152 116.5 245t311.5 93q186 0 297.5 -86.5t111.5 -231.5q0 -119 -69 -217.5t-223 -187.5l284 -277q71 117 123 301h318q-36 -135 -99 -263.5t-143 -227.5l301 -293h-377l-115 113 q-191 -133 -432 -133q-244 0 -387 112t-143 303zM403 424q0 -86 64.5 -137t165.5 -51q126 0 227 61l-332 330q-58 -44 -91.5 -92t-33.5 -111zM489 1124q0 -88 95 -194q86 48 132 94.5t46 108.5q0 53 -36 83.5t-93 30.5q-67 0 -105.5 -32t-38.5 -91z" /> +<glyph unicode="'" horiz-adv-x="545" d="M133 1462h279l-41 -528h-197z" /> +<glyph unicode="(" horiz-adv-x="694" d="M82 561q0 265 77.5 496t223.5 405h250q-141 -193 -213 -424t-72 -475q0 -245 73.5 -473.5t209.5 -413.5h-248q-147 170 -224 397t-77 488z" /> +<glyph unicode=")" horiz-adv-x="694" d="M61 1462h250q147 -175 224 -406.5t77 -494.5t-77.5 -490t-223.5 -395h-248q135 184 209 412.5t74 474.5q0 244 -72 475t-213 424z" /> +<glyph unicode="*" horiz-adv-x="1116" d="M63 1042l39 250l365 -104l-41 368h262l-41 -368l373 104l33 -252l-340 -24l223 -297l-227 -121l-156 313l-137 -311l-236 119l221 297z" /> +<glyph unicode="+" d="M88 612v219h387v390h219v-390h387v-219h-387v-385h-219v385h-387z" /> +<glyph unicode="," horiz-adv-x="594" d="M63 -264q65 266 101 502h280l15 -23q-52 -202 -176 -479h-220z" /> +<glyph unicode="-" horiz-adv-x="659" d="M61 424v250h537v-250h-537z" /> +<glyph unicode="." horiz-adv-x="584" d="M117 143q0 84 45 127t131 43q83 0 128.5 -44t45.5 -126q0 -79 -46 -124.5t-128 -45.5q-84 0 -130 44.5t-46 125.5z" /> +<glyph unicode="/" horiz-adv-x="846" d="M14 0l545 1462h277l-545 -1462h-277z" /> +<glyph unicode="0" d="M74 731q0 387 125 570.5t385 183.5q253 0 382.5 -192t129.5 -562q0 -383 -125.5 -567t-386.5 -184q-253 0 -381.5 190t-128.5 561zM381 731q0 -269 46.5 -385.5t156.5 -116.5q108 0 156 118t48 384q0 269 -48.5 386.5t-155.5 117.5q-109 0 -156 -117.5t-47 -386.5z" /> +<glyph unicode="1" d="M121 1087l471 375h254v-1462h-309v846l3 139l5 152q-77 -77 -107 -101l-168 -135z" /> +<glyph unicode="2" d="M78 1274q108 92 179 130t155 58.5t188 20.5q137 0 242 -50t163 -140t58 -206q0 -101 -35.5 -189.5t-110 -181.5t-262.5 -265l-188 -177v-14h637v-260h-1022v215l367 371q163 167 213 231.5t72 119.5t22 114q0 88 -48.5 131t-129.5 43q-85 0 -165 -39t-167 -111z" /> +<glyph unicode="3" d="M78 59v263q85 -43 187 -70t202 -27q153 0 226 52t73 167q0 103 -84 146t-268 43h-111v237h113q170 0 248.5 44.5t78.5 152.5q0 166 -208 166q-72 0 -146.5 -24t-165.5 -83l-143 213q200 144 477 144q227 0 358.5 -92t131.5 -256q0 -137 -83 -233t-233 -132v-6 q177 -22 268 -107.5t91 -230.5q0 -211 -153 -328.5t-437 -117.5q-238 0 -422 79z" /> +<glyph unicode="4" d="M35 303v215l641 944h285v-919h176v-240h-176v-303h-302v303h-624zM307 543h352v248q0 62 5 180t8 137h-8q-37 -82 -89 -160z" /> +<glyph unicode="5" d="M100 59v267q79 -42 184 -68.5t199 -26.5q283 0 283 232q0 221 -293 221q-53 0 -117 -10.5t-104 -22.5l-123 66l55 745h793v-262h-522l-27 -287l35 7q61 14 151 14q212 0 337.5 -119t125.5 -326q0 -245 -151 -377t-432 -132q-244 0 -394 79z" /> +<glyph unicode="6" d="M72 621q0 434 183.5 646t549.5 212q125 0 196 -15v-247q-89 20 -176 20q-159 0 -259.5 -48t-150.5 -142t-59 -267h13q99 170 317 170q196 0 307 -123t111 -340q0 -234 -132 -370.5t-366 -136.5q-162 0 -282.5 75t-186 219t-65.5 347zM379 510q0 -119 62.5 -201t158.5 -82 q99 0 152 66.5t53 189.5q0 107 -49.5 168.5t-149.5 61.5q-94 0 -160.5 -61t-66.5 -142z" /> +<glyph unicode="7" d="M55 1200v260h1049v-194l-553 -1266h-324l549 1200h-721z" /> +<glyph unicode="8" d="M72 371q0 125 66.5 222t213.5 171q-125 79 -180 169t-55 197q0 157 130 254t339 97q210 0 338.5 -95.5t128.5 -257.5q0 -112 -62 -199.5t-200 -156.5q164 -88 235.5 -183.5t71.5 -209.5q0 -180 -141 -289.5t-371 -109.5q-240 0 -377 102t-137 289zM358 389q0 -86 60 -134 t164 -48q115 0 172 49.5t57 130.5q0 67 -56.5 125.5t-183.5 124.5q-213 -98 -213 -248zM408 1106q0 -60 38.5 -107.5t139.5 -97.5q98 46 137 94t39 111q0 69 -50 109t-128 40q-79 0 -127.5 -40.5t-48.5 -108.5z" /> +<glyph unicode="9" d="M66 971q0 235 133.5 371.5t363.5 136.5q162 0 283.5 -76t186.5 -220.5t65 -344.5q0 -432 -182 -645t-551 -213q-130 0 -197 14v248q84 -21 176 -21q155 0 255 45.5t153 143t61 268.5h-12q-58 -94 -134 -132t-190 -38q-191 0 -301 122.5t-110 340.5zM365 975 q0 -106 49 -168t149 -62q94 0 161 61.5t67 141.5q0 119 -62.5 201t-159.5 82q-96 0 -150 -66t-54 -190z" /> +<glyph unicode=":" horiz-adv-x="584" d="M117 143q0 84 45 127t131 43q83 0 128.5 -44t45.5 -126q0 -79 -46 -124.5t-128 -45.5q-84 0 -130 44.5t-46 125.5zM117 969q0 84 45 127t131 43q83 0 128.5 -44t45.5 -126q0 -81 -46.5 -125.5t-127.5 -44.5q-84 0 -130 44t-46 126z" /> +<glyph unicode=";" horiz-adv-x="594" d="M63 -264q65 266 101 502h280l15 -23q-52 -202 -176 -479h-220zM117 969q0 84 45 127t131 43q83 0 128.5 -44t45.5 -126q0 -81 -46.5 -125.5t-127.5 -44.5q-84 0 -130 44t-46 126z" /> +<glyph unicode="<" d="M88 641v143l993 496v-240l-684 -317l684 -281v-239z" /> +<glyph unicode="=" d="M88 418v219h993v-219h-993zM88 805v219h993v-219h-993z" /> +<glyph unicode=">" d="M88 203v239l684 281l-684 317v240l993 -496v-143z" /> +<glyph unicode="?" horiz-adv-x="977" d="M6 1358q223 125 473 125q206 0 327.5 -99t121.5 -264q0 -110 -50 -190t-190 -180q-96 -71 -121.5 -108t-25.5 -97v-60h-265v74q0 96 41 167t150 151q105 75 138.5 122t33.5 105q0 65 -48 99t-134 34q-150 0 -342 -98zM244 143q0 84 45 127t131 43q83 0 128.5 -44 t45.5 -126q0 -79 -46 -124.5t-128 -45.5q-84 0 -130 44.5t-46 125.5z" /> +<glyph unicode="@" horiz-adv-x="1837" d="M102 602q0 247 108.5 448.5t309 316t461.5 114.5q220 0 393 -90t267 -256t94 -383q0 -144 -46 -263.5t-130 -187.5t-195 -68q-74 0 -131 35.5t-82 93.5h-16q-108 -129 -275 -129q-177 0 -279 106.5t-102 291.5q0 211 134 340t350 129q86 0 189.5 -16.5t170.5 -39.5 l-23 -489q0 -139 76 -139q64 0 102 93.5t38 244.5q0 161 -67 284.5t-188.5 188.5t-277.5 65q-202 0 -351 -83t-228.5 -239.5t-79.5 -361.5q0 -276 147.5 -423.5t427.5 -147.5q106 0 233 23.5t250 68.5v-192q-214 -91 -475 -91q-380 0 -592.5 200t-212.5 556zM711 627 q0 -211 172 -211q90 0 137 63.5t57 206.5l13 221q-51 11 -115 11q-125 0 -194.5 -78t-69.5 -213z" /> +<glyph unicode="A" horiz-adv-x="1413" d="M0 0l516 1468h379l518 -1468h-334l-106 348h-533l-106 -348h-334zM518 608h381q-147 473 -165.5 535t-26.5 98q-33 -128 -189 -633z" /> +<glyph unicode="B" horiz-adv-x="1376" d="M184 0v1462h455q311 0 451.5 -88.5t140.5 -281.5q0 -131 -61.5 -215t-163.5 -101v-10q139 -31 200.5 -116t61.5 -226q0 -200 -144.5 -312t-392.5 -112h-547zM494 256h202q128 0 189 49t61 150q0 182 -260 182h-192v-381zM494 883h180q126 0 182.5 39t56.5 129 q0 84 -61.5 120.5t-194.5 36.5h-163v-325z" /> +<glyph unicode="C" horiz-adv-x="1305" d="M119 729q0 228 83 399.5t238.5 263t364.5 91.5q213 0 428 -103l-100 -252q-82 39 -165 68t-163 29q-175 0 -271 -131.5t-96 -366.5q0 -489 367 -489q154 0 373 77v-260q-180 -75 -402 -75q-319 0 -488 193.5t-169 555.5z" /> +<glyph unicode="D" horiz-adv-x="1516" d="M184 0v1462h459q358 0 556 -189t198 -528q0 -361 -205.5 -553t-593.5 -192h-414zM494 256h133q448 0 448 481q0 471 -416 471h-165v-952z" /> +<glyph unicode="E" horiz-adv-x="1147" d="M184 0v1462h842v-254h-532v-321h495v-254h-495v-377h532v-256h-842z" /> +<glyph unicode="F" horiz-adv-x="1124" d="M184 0v1462h838v-254h-533v-377h496v-253h-496v-578h-305z" /> +<glyph unicode="G" horiz-adv-x="1483" d="M119 733q0 354 202.5 552t561.5 198q225 0 434 -90l-103 -248q-160 80 -333 80q-201 0 -322 -135t-121 -363q0 -238 97.5 -363.5t283.5 -125.5q97 0 197 20v305h-277v258h580v-758q-141 -46 -265.5 -64.5t-254.5 -18.5q-331 0 -505.5 194.5t-174.5 558.5z" /> +<glyph unicode="H" horiz-adv-x="1567" d="M184 0v1462h310v-573h579v573h309v-1462h-309v631h-579v-631h-310z" /> +<glyph unicode="I" horiz-adv-x="678" d="M184 0v1462h310v-1462h-310z" /> +<glyph unicode="J" horiz-adv-x="678" d="M-152 -150q80 -20 146 -20q102 0 146 63.5t44 198.5v1370h310v-1368q0 -256 -117 -390t-346 -134q-105 0 -183 22v258z" /> +<glyph unicode="K" horiz-adv-x="1360" d="M184 0v1462h310v-669l122 172l396 497h344l-510 -647l514 -815h-352l-383 616l-131 -94v-522h-310z" /> +<glyph unicode="L" horiz-adv-x="1157" d="M184 0v1462h310v-1206h593v-256h-903z" /> +<glyph unicode="M" horiz-adv-x="1931" d="M184 0v1462h422l346 -1118h6l367 1118h422v-1462h-289v692q0 49 1.5 113t13.5 340h-9l-377 -1145h-284l-352 1147h-9q19 -350 19 -467v-680h-277z" /> +<glyph unicode="N" horiz-adv-x="1665" d="M184 0v1462h391l635 -1095h7q-15 285 -15 403v692h279v-1462h-394l-636 1106h-9q19 -293 19 -418v-688h-277z" /> +<glyph unicode="O" horiz-adv-x="1630" d="M119 735q0 365 180.5 557.5t517.5 192.5t515.5 -194t178.5 -558q0 -363 -180 -558t-516 -195t-516 195t-180 560zM444 733q0 -245 93 -369t278 -124q371 0 371 493q0 494 -369 494q-185 0 -279 -124.5t-94 -369.5z" /> +<glyph unicode="P" horiz-adv-x="1286" d="M184 0v1462h467q266 0 404.5 -114.5t138.5 -341.5q0 -236 -147.5 -361t-419.5 -125h-133v-520h-310zM494 774h102q143 0 214 56.5t71 164.5q0 109 -59.5 161t-186.5 52h-141v-434z" /> +<glyph unicode="Q" horiz-adv-x="1630" d="M119 735q0 365 180.5 557.5t517.5 192.5t515.5 -194t178.5 -558q0 -258 -91.5 -432.5t-268.5 -255.5l352 -393h-397l-268 328h-23q-336 0 -516 195t-180 560zM444 733q0 -245 93 -369t278 -124q371 0 371 493q0 494 -369 494q-185 0 -279 -124.5t-94 -369.5z" /> +<glyph unicode="R" horiz-adv-x="1352" d="M184 0v1462h426q298 0 441 -108.5t143 -329.5q0 -129 -71 -229.5t-201 -157.5q330 -493 430 -637h-344l-349 561h-165v-561h-310zM494 813h100q147 0 217 49t70 154q0 104 -71.5 148t-221.5 44h-94v-395z" /> +<glyph unicode="S" horiz-adv-x="1128" d="M94 68v288q148 -66 250.5 -93t187.5 -27q102 0 156.5 39t54.5 116q0 43 -24 76.5t-70.5 64.5t-189.5 99q-134 63 -201 121t-107 135t-40 180q0 194 131.5 305t363.5 111q114 0 217.5 -27t216.5 -76l-100 -241q-117 48 -193.5 67t-150.5 19q-88 0 -135 -41t-47 -107 q0 -41 19 -71.5t60.5 -59t196.5 -102.5q205 -98 281 -196.5t76 -241.5q0 -198 -142.5 -312t-396.5 -114q-234 0 -414 88z" /> +<glyph unicode="T" horiz-adv-x="1186" d="M41 1204v258h1104v-258h-397v-1204h-310v1204h-397z" /> +<glyph unicode="U" horiz-adv-x="1548" d="M174 520v942h309v-895q0 -169 68 -248t225 -79q152 0 220.5 79.5t68.5 249.5v893h309v-946q0 -162 -72.5 -284t-209.5 -187t-324 -65q-282 0 -438 144.5t-156 395.5z" /> +<glyph unicode="V" horiz-adv-x="1331" d="M0 1462h313l275 -870q23 -77 47.5 -179.5t30.5 -142.5q11 92 75 322l277 870h313l-497 -1462h-338z" /> +<glyph unicode="W" horiz-adv-x="1980" d="M0 1462h305l187 -798q49 -221 71 -383q6 57 27.5 176.5t40.5 185.5l213 819h293l213 -819q14 -55 35 -168t32 -194q10 78 32 194.5t40 188.5l186 798h305l-372 -1462h-353l-198 768q-11 41 -37.5 169.5t-30.5 172.5q-6 -54 -30 -173.5t-37 -170.5l-197 -766h-352z" /> +<glyph unicode="X" horiz-adv-x="1366" d="M0 0l485 754l-454 708h342l315 -526l309 526h334l-459 -725l494 -737h-354l-340 553l-340 -553h-332z" /> +<glyph unicode="Y" horiz-adv-x="1278" d="M0 1462h336l303 -602l305 602h334l-485 -893v-569h-308v559z" /> +<glyph unicode="Z" horiz-adv-x="1186" d="M49 0v201l701 1005h-682v256h1050v-200l-700 -1006h719v-256h-1088z" /> +<glyph unicode="[" horiz-adv-x="678" d="M143 -324v1786h484v-211h-224v-1364h224v-211h-484z" /> +<glyph unicode="\" horiz-adv-x="846" d="M12 1462h277l545 -1462h-277z" /> +<glyph unicode="]" horiz-adv-x="678" d="M51 -113h223v1364h-223v211h484v-1786h-484v211z" /> +<glyph unicode="^" horiz-adv-x="1090" d="M8 520l438 950h144l495 -950h-239l-322 643l-280 -643h-236z" /> +<glyph unicode="_" horiz-adv-x="842" d="M-4 -184h850v-140h-850v140z" /> +<glyph unicode="`" horiz-adv-x="1243" d="M332 1548v21h342q63 -101 235 -301v-27h-202q-63 44 -185 142.5t-190 164.5z" /> +<glyph unicode="a" horiz-adv-x="1237" d="M86 334q0 178 124.5 262.5t375.5 93.5l194 6v49q0 170 -174 170q-134 0 -315 -81l-101 206q193 101 428 101q225 0 345 -98t120 -298v-745h-213l-59 152h-8q-77 -97 -158.5 -134.5t-212.5 -37.5q-161 0 -253.5 92t-92.5 262zM399 332q0 -129 148 -129q106 0 169.5 61 t63.5 162v92l-118 -4q-133 -4 -198 -48t-65 -134z" /> +<glyph unicode="b" horiz-adv-x="1296" d="M160 0v1556h305v-362q0 -69 -12 -221h12q107 166 317 166q198 0 310 -154.5t112 -423.5q0 -277 -115.5 -429t-314.5 -152q-197 0 -309 143h-21l-51 -123h-233zM465 563q0 -180 53.5 -258t169.5 -78q94 0 149.5 86.5t55.5 251.5t-56 247.5t-153 82.5q-113 0 -165 -69.5 t-54 -229.5v-33z" /> +<glyph unicode="c" horiz-adv-x="1053" d="M92 553q0 285 142 435.5t407 150.5q194 0 348 -76l-90 -236q-72 29 -134 47.5t-124 18.5q-238 0 -238 -338q0 -328 238 -328q88 0 163 23.5t150 73.5v-261q-74 -47 -149.5 -65t-190.5 -18q-522 0 -522 573z" /> +<glyph unicode="d" horiz-adv-x="1296" d="M92 557q0 275 114.5 428.5t315.5 153.5q211 0 322 -164h10q-23 125 -23 223v358h306v-1556h-234l-59 145h-13q-104 -165 -317 -165q-197 0 -309.5 153t-112.5 424zM401 553q0 -165 57 -247.5t163 -82.5q117 0 171.5 68t59.5 231v33q0 180 -55.5 258t-180.5 78 q-102 0 -158.5 -86.5t-56.5 -251.5z" /> +<glyph unicode="e" horiz-adv-x="1210" d="M92 551q0 281 140.5 434.5t388.5 153.5q237 0 369 -135t132 -373v-148h-721q5 -130 77 -203t202 -73q101 0 191 21t188 67v-236q-80 -40 -171 -59.5t-222 -19.5q-270 0 -422 149t-152 422zM408 686h428q-2 113 -59 174.5t-154 61.5t-152 -61.5t-63 -174.5z" /> +<glyph unicode="f" horiz-adv-x="793" d="M41 889v147l168 82v82q0 191 94 279t301 88q158 0 281 -47l-78 -224q-92 29 -170 29q-65 0 -94 -38.5t-29 -98.5v-70h264v-229h-264v-889h-305v889h-168z" /> +<glyph unicode="g" horiz-adv-x="1157" d="M6 -182q0 101 63 169t185 97q-47 20 -82 65.5t-35 96.5q0 64 37 106.5t107 83.5q-88 38 -139.5 122t-51.5 198q0 183 119 283t340 100q47 0 111.5 -8.5t82.5 -12.5h390v-155l-175 -45q48 -75 48 -168q0 -180 -125.5 -280.5t-348.5 -100.5l-55 3l-45 5q-47 -36 -47 -80 q0 -66 168 -66h190q184 0 280.5 -79t96.5 -232q0 -196 -163.5 -304t-469.5 -108q-234 0 -357.5 81.5t-123.5 228.5zM270 -158q0 -63 60.5 -99t169.5 -36q164 0 257 45t93 123q0 63 -55 87t-170 24h-158q-84 0 -140.5 -39.5t-56.5 -104.5zM381 752q0 -91 41.5 -144t126.5 -53 q86 0 126 53t40 144q0 202 -166 202q-168 0 -168 -202z" /> +<glyph unicode="h" horiz-adv-x="1346" d="M160 0v1556h305v-317q0 -37 -7 -174l-7 -90h16q102 164 324 164q197 0 299 -106t102 -304v-729h-305v653q0 242 -180 242q-128 0 -185 -87t-57 -282v-526h-305z" /> +<glyph unicode="i" horiz-adv-x="625" d="M147 1407q0 149 166 149t166 -149q0 -71 -41.5 -110.5t-124.5 -39.5q-166 0 -166 150zM160 0v1118h305v-1118h-305z" /> +<glyph unicode="j" horiz-adv-x="625" d="M-131 -227q70 -19 143 -19q77 0 112.5 43t35.5 127v1194h305v-1239q0 -178 -103 -274.5t-292 -96.5q-117 0 -201 25v240zM147 1407q0 149 166 149t166 -149q0 -71 -41.5 -110.5t-124.5 -39.5q-166 0 -166 150z" /> +<glyph unicode="k" horiz-adv-x="1270" d="M160 0v1556h305v-694l-16 -254h4l133 170l313 340h344l-444 -485l471 -633h-352l-322 453l-131 -105v-348h-305z" /> +<glyph unicode="l" horiz-adv-x="625" d="M160 0v1556h305v-1556h-305z" /> +<glyph unicode="m" horiz-adv-x="2011" d="M160 0v1118h233l41 -143h17q45 77 130 120.5t195 43.5q251 0 340 -164h27q45 78 132.5 121t197.5 43q190 0 287.5 -97.5t97.5 -312.5v-729h-306v653q0 121 -40.5 181.5t-127.5 60.5q-112 0 -167.5 -80t-55.5 -254v-561h-305v653q0 121 -40.5 181.5t-127.5 60.5 q-117 0 -170 -86t-53 -283v-526h-305z" /> +<glyph unicode="n" horiz-adv-x="1346" d="M160 0v1118h233l41 -143h17q51 81 140.5 122.5t203.5 41.5q195 0 296 -105.5t101 -304.5v-729h-305v653q0 121 -43 181.5t-137 60.5q-128 0 -185 -85.5t-57 -283.5v-526h-305z" /> +<glyph unicode="o" horiz-adv-x="1268" d="M92 561q0 274 143 426t402 152q161 0 284 -70t189 -201t66 -307q0 -273 -144 -427t-401 -154q-161 0 -284 70.5t-189 202.5t-66 308zM403 561q0 -166 54.5 -251t177.5 -85q122 0 175.5 84.5t53.5 251.5q0 166 -54 249t-177 83q-122 0 -176 -82.5t-54 -249.5z" /> +<glyph unicode="p" horiz-adv-x="1296" d="M160 -492v1610h248l43 -145h14q107 166 317 166q198 0 310 -153t112 -425q0 -179 -52.5 -311t-149.5 -201t-228 -69q-197 0 -309 143h-16q16 -140 16 -162v-453h-305zM465 563q0 -180 53.5 -258t169.5 -78q205 0 205 338q0 165 -50.5 247.5t-158.5 82.5 q-113 0 -165 -69.5t-54 -229.5v-33z" /> +<glyph unicode="q" horiz-adv-x="1296" d="M92 557q0 274 114.5 428t313.5 154q106 0 185 -40t139 -124h8l27 143h258v-1610h-306v469q0 61 13 168h-13q-49 -81 -130 -123t-187 -42q-198 0 -310 152.5t-112 424.5zM403 553q0 -168 53.5 -251t166.5 -83q116 0 170 66.5t59 232.5v37q0 180 -55.5 258t-178.5 78 q-215 0 -215 -338z" /> +<glyph unicode="r" horiz-adv-x="930" d="M160 0v1118h231l45 -188h15q52 94 140.5 151.5t192.5 57.5q62 0 103 -9l-23 -286q-37 10 -90 10q-146 0 -227.5 -75t-81.5 -210v-569h-305z" /> +<glyph unicode="s" horiz-adv-x="1018" d="M92 827q0 149 115.5 230.5t327.5 81.5q202 0 393 -88l-92 -220q-84 36 -157 59t-149 23q-135 0 -135 -73q0 -41 43.5 -71t190.5 -89q131 -53 192 -99t90 -106t29 -143q0 -172 -119.5 -262t-357.5 -90q-122 0 -208 16.5t-161 48.5v252q85 -40 191.5 -67t187.5 -27 q166 0 166 96q0 36 -22 58.5t-76 51t-144 66.5q-129 54 -189.5 100t-88 105.5t-27.5 146.5z" /> +<glyph unicode="t" horiz-adv-x="889" d="M47 889v129l168 102l88 236h195v-238h313v-229h-313v-539q0 -65 36.5 -96t96.5 -31q80 0 192 35v-227q-114 -51 -280 -51q-183 0 -266.5 92.5t-83.5 277.5v539h-146z" /> +<glyph unicode="u" horiz-adv-x="1346" d="M154 389v729h305v-653q0 -121 43 -181.5t137 -60.5q128 0 185 85.5t57 283.5v526h305v-1118h-234l-41 143h-16q-49 -78 -139 -120.5t-205 -42.5q-197 0 -297 105.5t-100 303.5z" /> +<glyph unicode="v" horiz-adv-x="1165" d="M0 1118h319l216 -637q36 -121 45 -229h6q5 96 45 229l215 637h319l-426 -1118h-313z" /> +<glyph unicode="w" horiz-adv-x="1753" d="M20 1118h304l129 -495q31 -133 63 -367h6q4 76 35 241l16 85l138 536h336l131 -536q4 -22 12.5 -65t16.5 -91.5t14.5 -95t7.5 -74.5h6q9 72 32 197.5t33 169.5l134 495h299l-322 -1118h-332l-86 391l-116 494h-7l-204 -885h-328z" /> +<glyph unicode="x" horiz-adv-x="1184" d="M10 0l379 571l-360 547h346l217 -356l219 356h346l-364 -547l381 -571h-347l-235 383l-236 -383h-346z" /> +<glyph unicode="y" horiz-adv-x="1165" d="M0 1118h334l211 -629q27 -82 37 -194h6q11 103 43 194l207 629h327l-473 -1261q-65 -175 -185.5 -262t-281.5 -87q-79 0 -155 17v242q55 -13 120 -13q81 0 141.5 49.5t94.5 149.5l18 55z" /> +<glyph unicode="z" horiz-adv-x="999" d="M55 0v180l518 705h-487v233h834v-198l-504 -687h522v-233h-883z" /> +<glyph unicode="{" horiz-adv-x="807" d="M31 449v239q126 0 191 44t65 126v8v318q0 153 97 215.5t341 62.5v-225q-99 -3 -136.5 -38t-37.5 -103v-299q-6 -188 -234 -222v-12q234 -35 234 -212v-9v-299q0 -68 37 -103t137 -38v-226q-244 0 -341 62.5t-97 216.5v315q0 87 -65.5 133t-190.5 46z" /> +<glyph unicode="|" horiz-adv-x="1128" d="M455 -465v2015h219v-2015h-219z" /> +<glyph unicode="}" horiz-adv-x="807" d="M82 -98q99 2 136.5 36t37.5 105v299v11q0 86 59 139.5t174 70.5v12q-227 34 -233 222v299q0 70 -37 104t-137 37v225q167 0 262 -26.5t135.5 -84t40.5 -167.5v-318v-10q0 -84 61.5 -126t194.5 -42v-239q-125 0 -190.5 -41t-65.5 -138v-315q0 -112 -41 -169t-135.5 -83.5 t-261.5 -26.5v226z" /> +<glyph unicode="~" d="M88 551v231q103 109 256 109q73 0 137.5 -16t139.5 -48q129 -55 227 -55q53 0 116 32t117 89v-231q-101 -109 -256 -109q-66 0 -126 13t-150 50q-131 56 -227 56q-55 0 -117.5 -33.5t-116.5 -87.5z" /> +<glyph unicode="¢" d="M143 741q0 261 104.5 403t315.5 173v166h178v-158q166 -9 299 -74l-90 -235q-72 29 -134 47t-124 18q-121 0 -179 -83.5t-58 -254.5q0 -327 237 -327q82 0 148 15.5t166 60.5v-254q-127 -61 -265 -70v-188h-178v196q-420 59 -420 565z" /> +<glyph unicode="£" d="M82 0v248q103 44 141.5 101t38.5 157v145h-178v219h178v195q0 201 114.5 309.5t323.5 108.5q195 0 390 -82l-93 -230q-157 64 -272 64q-78 0 -120 -44.5t-42 -127.5v-193h375v-219h-375v-143q0 -170 -151 -248h718v-260h-1048z" /> +<glyph unicode="¥" d="M6 1462h316l262 -602l264 602h313l-383 -747h195v-178h-246v-138h246v-178h-246v-221h-287v221h-247v178h247v138h-247v178h190z" /> +<glyph unicode="©" horiz-adv-x="1704" d="M100 731q0 200 100 375t275 276t377 101q200 0 375 -100t276 -275t101 -377q0 -197 -97 -370t-272 -277t-383 -104q-207 0 -382 103.5t-272.5 276.5t-97.5 371zM242 731q0 -164 82 -305.5t224 -223t304 -81.5q164 0 305.5 82t223 224t81.5 304q0 164 -82 305.5t-224 223 t-304 81.5q-164 0 -305.5 -82t-223 -224t-81.5 -304zM461 733q0 220 110.5 342.5t309.5 122.5q149 0 305 -78l-74 -168q-113 58 -217 58q-97 0 -150 -74t-53 -205q0 -280 203 -280q57 0 123 15t123 44v-191q-120 -57 -252 -57q-204 0 -316 125t-112 346z" /> +<glyph unicode="­" horiz-adv-x="659" d="M61 424v250h537v-250h-537z" /> +<glyph unicode="®" horiz-adv-x="1704" d="M100 731q0 200 100 375t275 276t377 101q200 0 375 -100t276 -275t101 -377q0 -197 -97 -370t-272 -277t-383 -104q-207 0 -382 103.5t-272.5 276.5t-97.5 371zM242 731q0 -164 82 -305.5t224 -223t304 -81.5q164 0 305.5 82t223 224t81.5 304q0 164 -82 305.5t-224 223 t-304 81.5q-164 0 -305.5 -82t-223 -224t-81.5 -304zM543 272v916h264q181 0 265.5 -70t84.5 -213q0 -170 -143 -233l237 -400h-254l-178 338h-47v-338h-229zM772 778h31q66 0 94.5 28.5t28.5 94.5q0 65 -28 92t-97 27h-29v-242z" /> +<glyph unicode="´" horiz-adv-x="1243" d="M332 1241v27q172 200 235 301h342v-21q-52 -52 -177.5 -154.5t-196.5 -152.5h-203z" /> +<glyph unicode=" " horiz-adv-x="784" /> +<glyph unicode=" " horiz-adv-x="1569" /> +<glyph unicode=" " horiz-adv-x="784" /> +<glyph unicode=" " horiz-adv-x="1569" /> +<glyph unicode=" " horiz-adv-x="523" /> +<glyph unicode=" " horiz-adv-x="392" /> +<glyph unicode=" " horiz-adv-x="261" /> +<glyph unicode=" " horiz-adv-x="261" /> +<glyph unicode=" " horiz-adv-x="196" /> +<glyph unicode=" " horiz-adv-x="313" /> +<glyph unicode=" " horiz-adv-x="87" /> +<glyph unicode="‐" horiz-adv-x="659" d="M61 424v250h537v-250h-537z" /> +<glyph unicode="‑" horiz-adv-x="659" d="M61 424v250h537v-250h-537z" /> +<glyph unicode="‒" horiz-adv-x="659" d="M61 424v250h537v-250h-537z" /> +<glyph unicode="–" horiz-adv-x="1024" d="M82 436v230h860v-230h-860z" /> +<glyph unicode="—" horiz-adv-x="2048" d="M82 436v230h1884v-230h-1884z" /> +<glyph unicode="‘" horiz-adv-x="444" d="M25 983q22 91 72.5 228.5t103.5 250.5h219q-66 -267 -101 -501h-280z" /> +<glyph unicode="’" horiz-adv-x="444" d="M25 961q69 296 100 501h281l14 -22q-50 -197 -176 -479h-219z" /> +<glyph unicode="“" horiz-adv-x="911" d="M25 983q22 91 72.5 228.5t103.5 250.5h219q-66 -267 -101 -501h-280zM492 983q22 91 72.5 228.5t103.5 250.5h219q-66 -267 -101 -501h-280z" /> +<glyph unicode="”" horiz-adv-x="911" d="M25 961q69 296 100 501h281l14 -22q-50 -197 -176 -479h-219zM492 961q69 296 100 501h280l15 -22q-50 -197 -176 -479h-219z" /> +<glyph unicode="•" horiz-adv-x="770" d="M98 748q0 154 74 235.5t213 81.5q137 0 212 -82t75 -235q0 -152 -75.5 -235t-211.5 -83q-138 0 -212.5 83t-74.5 235z" /> +<glyph unicode="…" horiz-adv-x="1751" d="M117 143q0 84 45 127t131 43q83 0 128.5 -44t45.5 -126q0 -79 -46 -124.5t-128 -45.5q-84 0 -130 44.5t-46 125.5zM700 143q0 84 45 127t132 43q83 0 128.5 -44t45.5 -126q0 -79 -46 -124.5t-128 -45.5q-85 0 -131 44.5t-46 125.5zM1284 143q0 84 45 127t131 43 q83 0 128.5 -44t45.5 -126q0 -79 -46 -124.5t-128 -45.5q-84 0 -130 44.5t-46 125.5z" /> +<glyph unicode=" " horiz-adv-x="313" /> +<glyph unicode=" " horiz-adv-x="392" /> +<glyph unicode="€" d="M66 481v178h118q-4 23 -4 62l2 53h-116v176h133q37 242 199 382.5t405 140.5q188 0 352 -82l-98 -232q-69 31 -129 48.5t-125 17.5q-122 0 -201 -70.5t-102 -204.5h403v-176h-418l-2 -35v-47l2 -33h355v-178h-338q51 -243 321 -243q143 0 275 57v-256q-116 -59 -293 -59 q-245 0 -403 133t-199 368h-137z" /> +<glyph unicode="™" horiz-adv-x="1534" d="M16 1313v149h564v-149h-199v-572h-168v572h-197zM625 741v721h247l160 -510l170 510h240v-721h-168v408l4 121h-6l-174 -529h-142l-165 529h-7l4 -111v-418h-163z" /> +<glyph unicode="" horiz-adv-x="1120" d="M0 1120h1120v-1120h-1120v1120z" /> +</font> +</defs></svg> \ No newline at end of file diff --git a/couchpotato/static/fonts/OpenSans-Bold-webfont.ttf b/couchpotato/static/fonts/OpenSans-Bold-webfont.ttf new file mode 100755 index 0000000000..2d94f0629d Binary files /dev/null and b/couchpotato/static/fonts/OpenSans-Bold-webfont.ttf differ diff --git a/couchpotato/static/fonts/OpenSans-Bold-webfont.woff b/couchpotato/static/fonts/OpenSans-Bold-webfont.woff new file mode 100755 index 0000000000..cd86852d0a Binary files /dev/null and b/couchpotato/static/fonts/OpenSans-Bold-webfont.woff differ diff --git a/couchpotato/static/fonts/OpenSans-BoldItalic-webfont.eot b/couchpotato/static/fonts/OpenSans-BoldItalic-webfont.eot new file mode 100755 index 0000000000..f44ac9a331 Binary files /dev/null and b/couchpotato/static/fonts/OpenSans-BoldItalic-webfont.eot differ diff --git a/couchpotato/static/fonts/OpenSans-BoldItalic-webfont.svg b/couchpotato/static/fonts/OpenSans-BoldItalic-webfont.svg new file mode 100755 index 0000000000..8392240a1d --- /dev/null +++ b/couchpotato/static/fonts/OpenSans-BoldItalic-webfont.svg @@ -0,0 +1,146 @@ +<?xml version="1.0" standalone="no"?> +<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd" > +<svg xmlns="http://www.w3.org/2000/svg"> +<metadata> +This is a custom SVG webfont generated by Font Squirrel. +Copyright : Digitized data copyright 20102011 Google Corporation +Foundry : Ascender Corporation +Foundry URL : httpwwwascendercorpcom +</metadata> +<defs> +<font id="OpenSansBoldItalic" horiz-adv-x="1128" > +<font-face units-per-em="2048" ascent="1638" descent="-410" /> +<missing-glyph horiz-adv-x="532" /> +<glyph unicode=" " horiz-adv-x="532" /> +<glyph unicode=" " horiz-adv-x="532" /> +<glyph unicode=" " horiz-adv-x="532" /> +<glyph unicode="!" horiz-adv-x="586" d="M25 115q0 90 53.5 144t150.5 54q68 0 109 -38t41 -107q0 -87 -55 -141t-144 -54q-73 0 -114 37.5t-41 104.5zM150 485l157 977h340l-256 -977h-241z" /> +<glyph unicode=""" horiz-adv-x="928" d="M201 934l71 528h277l-152 -528h-196zM604 934l74 528h276l-151 -528h-199z" /> +<glyph unicode="#" horiz-adv-x="1323" d="M41 408l18 206h277l70 232h-252l18 209h289l119 407h217l-117 -407h199l116 407h215l-116 -407h239l-18 -209h-279l-69 -232h258l-19 -206h-297l-116 -408h-220l117 408h-194l-115 -408h-215l113 408h-238zM553 614h197l69 232h-196z" /> +<glyph unicode="$" d="M51 168v266q198 -107 404 -117l71 322q-163 61 -241 151t-78 214q0 173 127 279.5t350 121.5l35 151h139l-33 -151q166 -22 295 -90l-106 -232q-132 65 -242 74l-63 -299q131 -51 195 -99.5t97 -113t33 -149.5q0 -184 -125.5 -291.5t-367.5 -124.5l-39 -199h-140l44 201 q-209 12 -355 86zM502 1022q0 -79 80 -111l51 246q-62 -7 -96.5 -41t-34.5 -94zM594 322q63 9 102 45t39 98q0 46 -24.5 75.5t-59.5 43.5z" /> +<glyph unicode="%" horiz-adv-x="1753" d="M115 885q0 169 55.5 311.5t148.5 214.5t216 72q137 0 211.5 -80t74.5 -238q0 -166 -56 -310t-151 -217t-217 -73q-139 0 -210.5 83.5t-71.5 236.5zM231 0l1088 1462h235l-1083 -1462h-240zM360 868q0 -96 56 -96q65 0 112 131t47 275q0 96 -57 96q-63 0 -110.5 -128.5 t-47.5 -277.5zM973 283q0 177 53 322.5t148 219.5t219 74q137 0 211.5 -78.5t74.5 -230.5q0 -167 -54 -313.5t-148 -220.5t-215 -74q-144 0 -216.5 78.5t-72.5 222.5zM1219 285q0 -97 55 -97q41 0 77 55t59.5 154.5t23.5 196.5q0 96 -58 96q-39 0 -75 -56t-59 -154t-23 -195 z" /> +<glyph unicode="&" horiz-adv-x="1450" d="M68 358q0 145 78.5 248.5t273.5 200.5q-76 130 -76 258q0 195 117.5 307.5t316.5 112.5q169 0 266 -82.5t97 -224.5q0 -280 -365 -426l195 -263q44 57 80.5 121.5t78.5 173.5h300q-133 -313 -310 -497l205 -287h-350l-72 98q-175 -118 -403 -118q-209 0 -320.5 97.5 t-111.5 280.5zM383 387q0 -65 45.5 -108t116.5 -43q115 0 221 59l-225 328q-88 -51 -123 -104.5t-35 -131.5zM621 1085q0 -46 12 -92t29 -73q113 59 155.5 111t42.5 112q0 57 -30 82.5t-70 25.5q-66 0 -102.5 -46.5t-36.5 -119.5z" /> +<glyph unicode="'" horiz-adv-x="522" d="M201 934l71 528h277l-152 -528h-196z" /> +<glyph unicode="(" horiz-adv-x="694" d="M74 281q0 339 122.5 626.5t381.5 554.5h262q-255 -278 -377.5 -573.5t-122.5 -618.5q0 -308 117 -594h-234q-149 266 -149 605z" /> +<glyph unicode=")" horiz-adv-x="694" d="M-147 -324q499 545 499 1192q0 307 -116 594h233q149 -264 149 -604q0 -342 -124 -630.5t-379 -551.5h-262z" /> +<glyph unicode="*" horiz-adv-x="1116" d="M172 1141l86 237l338 -174l33 369l256 -51l-113 -353l387 29l-18 -254l-338 43l160 -336l-246 -73l-90 337l-197 -278l-207 164l275 248z" /> +<glyph unicode="+" d="M109 612v219h366v369h219v-369h367v-219h-367v-364h-219v364h-366z" /> +<glyph unicode="," horiz-adv-x="569" d="M-102 -264q74 167 194 502h285l8 -23q-118 -255 -262 -479h-225z" /> +<glyph unicode="-" horiz-adv-x="659" d="M41 424l53 250h524l-53 -250h-524z" /> +<glyph unicode="." horiz-adv-x="584" d="M25 115q0 90 53.5 144t150.5 54q68 0 109 -38t41 -107q0 -87 -55 -141t-144 -54q-73 0 -114 37.5t-41 104.5z" /> +<glyph unicode="/" horiz-adv-x="862" d="M-90 0l809 1462h295l-809 -1462h-295z" /> +<glyph unicode="0" d="M66 467q0 297 84 537t228 360.5t333 120.5q399 0 399 -473q0 -470 -168.5 -751t-472.5 -281q-198 0 -300.5 122t-102.5 365zM369 461q0 -115 27.5 -173.5t97.5 -58.5q81 0 150.5 106t116 301t46.5 386q0 111 -30.5 162t-92.5 51q-80 0 -149.5 -104t-117.5 -302t-48 -368z " /> +<glyph unicode="1" d="M182 1114l566 348h249l-309 -1462h-305l180 829q35 152 76 287q-9 -8 -61.5 -47t-262.5 -170z" /> +<glyph unicode="2" d="M-49 0l43 213l477 424q180 159 248.5 254.5t68.5 179.5q0 75 -41 114.5t-110 39.5q-66 0 -135.5 -33.5t-171.5 -118.5l-146 203q132 112 252 159.5t250 47.5q190 0 301 -98t111 -259q0 -107 -41 -201t-122.5 -188t-266.5 -245l-269 -222v-10h568l-54 -260h-962z" /> +<glyph unicode="3" d="M14 59v267q84 -50 182 -75.5t191 -25.5q158 0 243 63.5t85 176.5q0 172 -258 172h-138l46 221h73q167 0 263 62t96 172q0 67 -43 104t-121 37q-134 0 -287 -100l-127 204q124 81 232.5 113.5t246.5 32.5q190 0 298 -90.5t108 -243.5q0 -156 -94.5 -262t-261.5 -135v-4 q131 -26 198.5 -106.5t67.5 -201.5q0 -133 -74 -238t-212 -163.5t-327 -58.5q-239 0 -387 79z" /> +<glyph unicode="4" d="M-25 303l48 234l770 925h311l-195 -919h170l-51 -240h-170l-63 -303h-293l63 303h-590zM305 543h311l58 248q12 58 40 164t42 141h-6q-35 -63 -132 -181z" /> +<glyph unicode="5" d="M27 61v269q174 -99 352 -99q154 0 241 71t87 194q0 94 -57.5 141t-166.5 47q-102 0 -213 -33l-104 78l207 733h755l-55 -262h-489l-88 -293q72 15 127 15q183 0 289 -103t106 -287q0 -167 -71.5 -292t-208.5 -192.5t-330 -67.5q-117 0 -218.5 23t-162.5 58z" /> +<glyph unicode="6" d="M88 469q0 202 61 395.5t167.5 335t256.5 213.5t357 72q125 0 223 -27l-51 -246q-84 25 -191 25q-194 0 -313.5 -108t-185.5 -345h4q115 166 311 166q157 0 242.5 -97t85.5 -273q0 -169 -71 -313.5t-190.5 -215.5t-277.5 -71q-212 0 -320 127t-108 362zM383 422 q0 -91 40 -143t107 -52q99 0 161.5 94t62.5 236q0 71 -33.5 113.5t-102.5 42.5q-60 0 -114.5 -35.5t-87.5 -95.5t-33 -160z" /> +<glyph unicode="7" d="M78 0l737 1202h-629l56 260h975l-41 -194l-752 -1268h-346z" /> +<glyph unicode="8" d="M55 350q0 298 348 426q-165 132 -165 299q0 119 58 212.5t168 145.5t257 52q123 0 215.5 -42t141 -118t48.5 -174q0 -134 -80.5 -233.5t-230.5 -151.5q217 -141 217 -365q0 -122 -63.5 -218.5t-181 -149.5t-273.5 -53q-214 0 -336.5 100t-122.5 270zM352 383 q0 -81 50 -128.5t135 -47.5q93 0 147.5 53.5t54.5 138.5q0 73 -36.5 131.5t-120.5 112.5q-116 -45 -173 -107t-57 -153zM528 1094q0 -132 123 -201q185 72 185 221q0 68 -39.5 107t-102.5 39q-76 0 -121 -46.5t-45 -119.5z" /> +<glyph unicode="9" d="M86 12v256q111 -41 227 -41q121 0 207.5 49t144 138.5t99.5 257.5h-4q-111 -158 -295 -158q-163 0 -252.5 103.5t-89.5 285.5q0 166 73 305.5t196 208t286 68.5q203 0 308.5 -123t105.5 -361q0 -280 -99 -533t-264 -370.5t-403 -117.5q-128 0 -240 32zM424 928 q0 -87 37.5 -131.5t105.5 -44.5q60 0 111.5 36.5t82 100t30.5 158.5q0 84 -35.5 137t-110.5 53q-65 0 -115.5 -42t-78 -114t-27.5 -153z" /> +<glyph unicode=":" horiz-adv-x="584" d="M25 115q0 90 53.5 144t150.5 54q68 0 109 -38t41 -107q0 -87 -55 -141t-144 -54q-73 0 -114 37.5t-41 104.5zM207 940q0 92 55.5 145.5t149.5 53.5q68 0 108.5 -38.5t40.5 -107.5q0 -86 -54.5 -140t-144.5 -54q-72 0 -113.5 36.5t-41.5 104.5z" /> +<glyph unicode=";" horiz-adv-x="584" d="M-102 -264q74 167 194 502h285l8 -23q-118 -255 -262 -479h-225zM207 940q0 92 55.5 145.5t149.5 53.5q68 0 108.5 -38.5t40.5 -107.5q0 -86 -54.5 -140t-144.5 -54q-72 0 -113.5 36.5t-41.5 104.5z" /> +<glyph unicode="<" d="M109 641v143l952 496v-240l-643 -317l643 -281v-239z" /> +<glyph unicode="=" d="M109 418v219h952v-219h-952zM109 807v217h952v-217h-952z" /> +<glyph unicode=">" d="M109 203v239l643 281l-643 317v240l952 -496v-143z" /> +<glyph unicode="?" horiz-adv-x="940" d="M166 115q0 91 55 144.5t150 53.5q68 0 108.5 -38t40.5 -107q0 -87 -55 -141t-143 -54q-74 0 -115 38t-41 104zM178 1358q230 125 445 125q177 0 280 -87.5t103 -244.5q0 -83 -28.5 -149.5t-82.5 -123t-190 -147.5q-64 -43 -96.5 -73t-52.5 -64.5t-38 -108.5h-258l14 78 q19 103 73.5 177t172.5 155q124 84 157.5 127t33.5 96q0 119 -133 119q-50 0 -106.5 -16t-201.5 -84z" /> +<glyph unicode="@" horiz-adv-x="1753" d="M92 500q0 279 120.5 497t343 341.5t497.5 123.5q318 0 499 -163.5t181 -458.5q0 -173 -64 -321t-177.5 -231t-254.5 -83q-88 0 -144.5 38.5t-72.5 108.5h-6q-50 -77 -113 -112t-147 -35q-127 0 -198 79.5t-71 229.5q0 147 67.5 276.5t187.5 205t268 75.5q185 0 327 -55 l-106 -420q-11 -44 -19 -76.5t-8 -64.5q0 -68 58 -68q66 0 124 64t92.5 171t34.5 214q0 213 -123.5 325.5t-359.5 112.5q-203 0 -366.5 -94t-255 -266t-91.5 -392q0 -243 134 -380.5t376 -137.5q117 0 219.5 20t221.5 66v-186q-230 -90 -465 -90q-217 0 -378 85.5 t-246 241.5t-85 359zM713 526q0 -65 24.5 -102t69.5 -37q141 0 213 270l57 222q-36 10 -82 10q-82 0 -145.5 -51.5t-100 -137t-36.5 -174.5z" /> +<glyph unicode="A" horiz-adv-x="1286" d="M-123 0l766 1468h373l147 -1468h-297l-24 348h-473l-172 -348h-320zM494 608h333l-26 350q-10 131 -10 253v36q-44 -120 -109 -254z" /> +<glyph unicode="B" horiz-adv-x="1270" d="M53 0l309 1462h426q229 0 346 -81.5t117 -243.5q0 -150 -83 -247.5t-236 -129.5v-6q100 -26 159.5 -96.5t59.5 -180.5q0 -229 -153 -353t-423 -124h-522zM412 256h180q117 0 183.5 58t66.5 161q0 162 -183 162h-165zM545 883h149q121 0 181.5 48.5t60.5 139.5 q0 137 -170 137h-152z" /> +<glyph unicode="C" horiz-adv-x="1253" d="M123 553q0 262 104 482.5t278 335t400 114.5q125 0 222 -22.5t208 -82.5l-118 -250q-106 59 -175 78t-137 19q-132 0 -237.5 -81t-169.5 -238.5t-64 -338.5q0 -167 68.5 -248t218.5 -81q146 0 338 77v-260q-199 -77 -400 -77q-254 0 -395 149.5t-141 423.5z" /> +<glyph unicode="D" horiz-adv-x="1386" d="M53 0l309 1462h396q270 0 417.5 -143t147.5 -410q0 -280 -98 -486.5t-283.5 -314.5t-437.5 -108h-451zM412 256h106q148 0 258 76t172 223.5t62 337.5q0 154 -72.5 234.5t-208.5 80.5h-115z" /> +<glyph unicode="E" horiz-adv-x="1110" d="M53 0l309 1462h818l-54 -254h-512l-67 -321h477l-55 -254h-477l-80 -377h512l-54 -256h-817z" /> +<glyph unicode="F" horiz-adv-x="1087" d="M53 0l309 1462h814l-54 -254h-508l-79 -377h473l-56 -253h-473l-121 -578h-305z" /> +<glyph unicode="G" horiz-adv-x="1413" d="M123 549q0 268 107 484.5t301 334t448 117.5q218 0 410 -99l-115 -251q-74 40 -148 64t-161 24q-153 0 -273.5 -83t-189 -236.5t-68.5 -330.5q0 -172 72.5 -252.5t222.5 -80.5q76 0 170 24l66 299h-267l56 258h563l-162 -762q-134 -46 -248.5 -62.5t-242.5 -16.5 q-259 0 -400 147t-141 422z" /> +<glyph unicode="H" horiz-adv-x="1434" d="M53 0l309 1462h306l-121 -573h471l121 573h305l-309 -1462h-306l134 631h-471l-134 -631h-305z" /> +<glyph unicode="I" horiz-adv-x="659" d="M53 0l312 1462h305l-312 -1462h-305z" /> +<glyph unicode="J" horiz-adv-x="678" d="M-322 -150q88 -20 164 -20q99 0 160.5 60.5t89.5 191.5l293 1380h305l-303 -1423q-52 -245 -175.5 -357t-346.5 -112q-94 0 -187 27v253z" /> +<glyph unicode="K" horiz-adv-x="1255" d="M53 0l309 1462h306l-152 -702l158 205l409 497h361l-594 -700l291 -762h-338l-211 592l-125 -70l-109 -522h-305z" /> +<glyph unicode="L" horiz-adv-x="1061" d="M53 0l309 1462h306l-256 -1206h512l-54 -256h-817z" /> +<glyph unicode="M" horiz-adv-x="1802" d="M53 0l309 1462h404l68 -1093h4l551 1093h423l-309 -1462h-280l145 692q53 247 105 441h-5l-569 -1133h-281l-61 1133h-4q-11 -88 -38 -231t-187 -902h-275z" /> +<glyph unicode="N" horiz-adv-x="1546" d="M53 0l309 1462h357l340 -1077h4q12 76 39 217t180 860h274l-309 -1462h-342l-356 1106h-6l-4 -32q-32 -216 -66 -386l-145 -688h-275z" /> +<glyph unicode="O" horiz-adv-x="1495" d="M123 537q0 265 99 487.5t273 341.5t402 119q255 0 395 -144t140 -403q0 -283 -99 -506.5t-271 -337.5t-396 -114q-256 0 -399.5 147.5t-143.5 409.5zM434 537q0 -147 66.5 -222t187.5 -75t220.5 87t155.5 246t56 357q0 142 -65 219.5t-183 77.5q-121 0 -222 -91.5 t-158.5 -251.5t-57.5 -347z" /> +<glyph unicode="P" horiz-adv-x="1188" d="M53 0l309 1462h338q242 0 366 -106.5t124 -319.5q0 -241 -169.5 -378.5t-467.5 -137.5h-86l-109 -520h-305zM522 774h56q142 0 223.5 69t81.5 185q0 180 -195 180h-74z" /> +<glyph unicode="Q" horiz-adv-x="1495" d="M123 537q0 265 99 487.5t273 341.5t402 119q255 0 395 -144t140 -403q0 -316 -122.5 -555.5t-334.5 -337.5l254 -393h-359l-178 328h-26q-256 0 -399.5 147.5t-143.5 409.5zM434 537q0 -147 66.5 -222t187.5 -75t220.5 87t155.5 246t56 357q0 142 -65 219.5t-183 77.5 q-121 0 -222 -91.5t-158.5 -251.5t-57.5 -347z" /> +<glyph unicode="R" horiz-adv-x="1247" d="M53 0l309 1462h359q237 0 356 -102t119 -299q0 -158 -83 -271.5t-239 -168.5l261 -621h-332l-207 561h-119l-119 -561h-305zM530 813h78q131 0 204 57t73 174q0 82 -47.5 123t-149.5 41h-74z" /> +<glyph unicode="S" horiz-adv-x="1085" d="M41 70v274q193 -108 358 -108q112 0 175 42.5t63 116.5q0 43 -13.5 75.5t-38.5 60.5t-124 102q-138 99 -194 196t-56 209q0 129 62 230.5t176.5 158t263.5 56.5q217 0 397 -99l-109 -233q-156 74 -288 74q-83 0 -136 -45t-53 -119q0 -61 33 -106.5t148 -120.5 q121 -80 181 -176.5t60 -225.5q0 -209 -148 -330.5t-401 -121.5q-221 0 -356 90z" /> +<glyph unicode="T" horiz-adv-x="1087" d="M168 1204l55 258h1010l-55 -258h-353l-254 -1204h-305l254 1204h-352z" /> +<glyph unicode="U" horiz-adv-x="1415" d="M141 401q0 72 15 138l196 923h305l-194 -919q-17 -74 -17 -125q0 -178 189 -178q123 0 195 76.5t104 228.5l194 917h306l-201 -946q-57 -266 -218 -401t-419 -135q-212 0 -333.5 113.5t-121.5 307.5z" /> +<glyph unicode="V" horiz-adv-x="1208" d="M184 1462h295l51 -880q4 -45 4 -133q-2 -103 -6 -150h7q78 221 110 283l432 880h316l-748 -1462h-334z" /> +<glyph unicode="W" horiz-adv-x="1831" d="M184 1462h287l6 -798q0 -52 -4 -173t-10 -174h6q22 64 67 180.5t60 145.5l369 819h270l21 -873q0 -146 -9 -272h6q43 129 131 349l330 796h309l-647 -1462h-346l-22 721l-2 139q0 88 4 158h-4q-46 -146 -115 -299l-324 -719h-338z" /> +<glyph unicode="X" horiz-adv-x="1241" d="M-117 0l576 764l-238 698h320l153 -518l363 518h344l-545 -725l268 -737h-331l-172 543l-396 -543h-342z" /> +<glyph unicode="Y" horiz-adv-x="1155" d="M186 1462h312l129 -592l374 592h342l-618 -903l-119 -559h-303l119 559z" /> +<glyph unicode="Z" horiz-adv-x="1098" d="M-61 0l38 201l777 1005h-543l53 256h936l-41 -202l-782 -1004h596l-53 -256h-981z" /> +<glyph unicode="[" horiz-adv-x="678" d="M-37 -324l381 1786h473l-45 -211h-215l-291 -1364h215l-45 -211h-473z" /> +<glyph unicode="\" horiz-adv-x="862" d="M221 1462h260l224 -1462h-267z" /> +<glyph unicode="]" horiz-adv-x="678" d="M-137 -324l45 211h213l291 1364h-215l45 211h473l-381 -1786h-471z" /> +<glyph unicode="^" horiz-adv-x="1081" d="M20 520l619 950h147l277 -950h-223l-174 633l-402 -633h-244z" /> +<glyph unicode="_" horiz-adv-x="819" d="M-186 -324l30 140h822l-31 -140h-821z" /> +<glyph unicode="`" horiz-adv-x="1135" d="M508 1548v21h311q36 -148 115 -303v-25h-184q-71 69 -138.5 153.5t-103.5 153.5z" /> +<glyph unicode="a" horiz-adv-x="1217" d="M90 385q0 198 72 377.5t189 278t257 98.5q97 0 167.5 -42t109.5 -122h8l57 143h232l-238 -1118h-229l14 145h-4q-134 -165 -319 -165q-147 0 -231.5 106.5t-84.5 298.5zM395 399q0 -88 33.5 -132t95.5 -44q69 0 133 67t103 181.5t39 259.5q0 71 -38.5 117.5t-101.5 46.5 q-68 0 -129.5 -72t-98 -190t-36.5 -234z" /> +<glyph unicode="b" horiz-adv-x="1219" d="M37 0l330 1556h301l-62 -288q-41 -182 -84 -299h8q78 98 142.5 134t140.5 36q146 0 230.5 -108t84.5 -298t-68 -367.5t-187 -281.5t-263 -104q-194 0 -276 163h-8l-58 -143h-231zM420 399q0 -80 37 -128t102 -48q67 0 128 69t98.5 189.5t37.5 237.5q0 176 -131 176 q-68 0 -130 -65t-102 -180.5t-40 -250.5z" /> +<glyph unicode="c" horiz-adv-x="989" d="M90 391q0 212 74.5 385.5t209.5 268t308 94.5q182 0 328 -72l-92 -229q-54 23 -106 40t-118 17q-85 0 -153.5 -64t-107 -175.5t-38.5 -239.5q0 -96 45.5 -144.5t126.5 -48.5q76 0 141 23.5t134 58.5v-246q-152 -79 -336 -79q-201 0 -308.5 107.5t-107.5 303.5z" /> +<glyph unicode="d" horiz-adv-x="1217" d="M90 387q0 196 71.5 374.5t188.5 278t258 99.5q82 0 141.5 -37t112.5 -127h8l2 28q6 110 25 195l76 358h301l-330 -1556h-229l14 145h-4q-71 -87 -148.5 -126t-170.5 -39q-147 0 -231.5 107t-84.5 300zM395 399q0 -176 137 -176q66 0 128.5 68.5t100.5 182.5t38 245 q0 80 -37.5 128t-102.5 48q-68 0 -129.5 -72t-98 -190t-36.5 -234z" /> +<glyph unicode="e" horiz-adv-x="1141" d="M90 412q0 207 82.5 377.5t223.5 260t319 89.5q177 0 276 -81.5t99 -223.5q0 -187 -167 -288.5t-477 -101.5h-51l-2 -21v-20q0 -91 51.5 -143.5t147.5 -52.5q87 0 158 19t172 67v-227q-172 -86 -390 -86q-210 0 -326 113t-116 319zM428 647h45q155 0 241.5 48.5 t86.5 131.5q0 95 -105 95q-88 0 -166 -80t-102 -195z" /> +<glyph unicode="f" horiz-adv-x="764" d="M-219 -225q61 -21 115 -21q61 0 107 40t65 130l204 965h-163l30 145l183 84l18 84q41 190 138.5 277.5t273.5 87.5q131 0 235 -49l-80 -224q-69 31 -133 31q-57 0 -92 -40t-47 -105l-12 -62h219l-49 -229h-220l-215 -1010q-77 -371 -403 -371q-104 0 -174 25v242z" /> +<glyph unicode="g" horiz-adv-x="1108" d="M-115 -209q0 102 68.5 175.5t214.5 121.5q-74 47 -74 133q0 71 44.5 122.5t146.5 98.5q-65 49 -96 112t-31 153q0 199 125.5 315.5t341.5 116.5q83 0 166 -23h395l-35 -166l-174 -41q16 -52 16 -118q0 -195 -121 -308.5t-329 -113.5q-59 0 -99 10q-84 -27 -84 -78 q0 -34 30 -49t89 -23l137 -18q163 -21 237.5 -84.5t74.5 -183.5q0 -211 -156 -323t-446 -112q-208 0 -324.5 75.5t-116.5 207.5zM150 -172q0 -115 194 -115q151 0 228 45t77 127q0 39 -32.5 60t-137.5 35l-114 14q-106 -14 -160.5 -57t-54.5 -109zM442 680q0 -119 103 -119 q75 0 121.5 76.5t46.5 193.5t-99 117q-77 0 -124.5 -76.5t-47.5 -191.5z" /> +<glyph unicode="h" horiz-adv-x="1237" d="M37 0l330 1556h301q-39 -181 -60 -278t-86 -309h8q62 77 138 123.5t176 46.5q138 0 213.5 -83.5t75.5 -238.5q0 -73 -23 -180l-133 -637h-301l137 653q16 68 16 119q0 123 -108 123q-92 0 -167 -114t-118 -318l-98 -463h-301z" /> +<glyph unicode="i" horiz-adv-x="608" d="M37 0l237 1118h301l-237 -1118h-301zM322 1380q0 87 47.5 131.5t134.5 44.5q73 0 111 -31t38 -89q0 -80 -44 -129.5t-136 -49.5q-151 0 -151 123z" /> +<glyph unicode="j" horiz-adv-x="608" d="M-264 -225q61 -21 114 -21q137 0 173 170l253 1194h302l-265 -1239q-77 -371 -403 -371q-104 0 -174 25v242zM324 1380q0 87 47.5 131.5t134.5 44.5q73 0 111 -31t38 -89q0 -80 -44 -129.5t-136 -49.5q-151 0 -151 123z" /> +<glyph unicode="k" horiz-adv-x="1163" d="M37 0l330 1556h301l-148 -694q-8 -41 -29 -117l-28 -102h4l453 475h344l-498 -504l285 -614h-336l-183 420l-120 -72l-74 -348h-301z" /> +<glyph unicode="l" horiz-adv-x="608" d="M37 0l330 1556h301l-330 -1556h-301z" /> +<glyph unicode="m" horiz-adv-x="1853" d="M37 0l237 1118h230l-21 -207h6q146 228 355 228q219 0 262 -228h6q68 110 160.5 169t197.5 59q136 0 207.5 -85t71.5 -237q0 -76 -23 -180l-133 -637h-301l138 653q16 68 16 119q0 123 -98 123q-92 0 -166.5 -112t-118.5 -318l-96 -465h-301l137 653q16 68 16 119 q0 123 -98 123q-92 0 -167 -114t-118 -318l-98 -463h-301z" /> +<glyph unicode="n" horiz-adv-x="1237" d="M37 0l237 1118h230l-21 -207h6q146 228 355 228q138 0 213.5 -83.5t75.5 -238.5q0 -73 -23 -180l-133 -637h-301l137 653q16 68 16 119q0 123 -108 123q-92 0 -167 -114t-118 -318l-98 -463h-301z" /> +<glyph unicode="o" horiz-adv-x="1198" d="M90 410q0 213 71.5 379.5t206.5 258t316 91.5q196 0 310 -118t114 -325q0 -211 -70.5 -374t-203.5 -252.5t-316 -89.5q-195 0 -311.5 117.5t-116.5 312.5zM393 410q0 -185 150 -185q75 0 135 61.5t93.5 171t33.5 238.5q0 197 -143 197q-75 0 -134.5 -61t-97 -179 t-37.5 -243z" /> +<glyph unicode="p" horiz-adv-x="1219" d="M-68 -492l342 1610h230l-17 -170h9q138 191 317 191q146 0 230.5 -107.5t84.5 -300.5q0 -191 -68.5 -367.5t-187.5 -280t-262 -103.5q-83 0 -143 37t-111 126h-8q-12 -159 -43 -295l-72 -340h-301zM420 399q0 -80 37 -128t102 -48q67 0 128 69t98.5 189.5t37.5 237.5 q0 176 -131 176q-68 0 -131.5 -67.5t-102 -180t-38.5 -248.5z" /> +<glyph unicode="q" horiz-adv-x="1217" d="M90 385q0 198 72 377.5t189 278t257 98.5q86 0 152.5 -37.5t124.5 -126.5h8l57 143h232l-342 -1610h-301q47 218 73 337.5t84 304.5h-8q-72 -94 -143 -132t-154 -38q-88 0 -156 47.5t-106.5 138.5t-38.5 219zM395 399q0 -88 36.5 -132t103.5 -44q64 0 127.5 70t100 181 t36.5 245q0 80 -37.5 128t-102.5 48q-68 0 -129.5 -72t-98 -190t-36.5 -234z" /> +<glyph unicode="r" horiz-adv-x="862" d="M37 0l237 1118h230l-21 -207h6q147 228 353 228q59 0 96 -11l-66 -290q-45 16 -100 16q-116 0 -203.5 -91.5t-124.5 -262.5l-106 -500h-301z" /> +<glyph unicode="s" horiz-adv-x="969" d="M23 45v248q157 -90 319 -90q80 0 131 32.5t51 88.5q0 43 -37 77t-131 86q-121 68 -169 135.5t-48 159.5q0 170 110.5 263.5t315.5 93.5q201 0 363 -95l-99 -215q-140 84 -258 84q-57 0 -92 -25.5t-35 -68.5q0 -39 32 -68.5t120 -74.5q123 -63 178 -137t55 -170 q0 -188 -124.5 -288.5t-346.5 -100.5q-107 0 -186.5 15t-148.5 50z" /> +<glyph unicode="t" horiz-adv-x="840" d="M94 889l29 147l196 84l132 236h194l-49 -238h283l-50 -229h-282l-115 -539q-6 -30 -6 -53q0 -74 88 -74q65 0 162 35v-225q-111 -53 -266 -53q-150 0 -220.5 63t-70.5 195q0 50 12 112l115 539h-152z" /> +<glyph unicode="u" horiz-adv-x="1237" d="M111 301q0 93 24 213l127 604h301l-137 -653q-16 -68 -16 -119q0 -123 108 -123q92 0 167 114t118 318l98 463h301l-237 -1118h-230l21 207h-6q-145 -227 -355 -227q-138 0 -211 82.5t-73 238.5z" /> +<glyph unicode="v" horiz-adv-x="1049" d="M102 1118h295l45 -586q7 -133 7 -231h6q55 153 92 223l297 594h323l-604 -1118h-323z" /> +<glyph unicode="w" horiz-adv-x="1614" d="M125 1118h281l4 -495l-4 -167l-7 -171h4q6 20 14 41.5t51 136.5t46 119l231 536h328v-536q0 -142 -10 -297h6l28 80q73 208 95 258l219 495h307l-530 -1118h-330l-6 520q0 155 10 340h-6q-62 -178 -123 -319l-233 -541h-324z" /> +<glyph unicode="x" horiz-adv-x="1087" d="M-100 0l479 573l-225 545h321l115 -334l244 334h354l-467 -561l244 -557h-326l-125 342l-264 -342h-350z" /> +<glyph unicode="y" horiz-adv-x="1063" d="M-141 -233q68 -13 116 -13q84 0 147.5 48t117.5 149l26 49l-164 1118h295l56 -518q14 -122 14 -293h6q20 51 44 119.5t65 153.5l260 538h327l-680 -1278q-177 -332 -483 -332q-90 0 -147 19v240z" /> +<glyph unicode="z" horiz-adv-x="932" d="M-47 0l35 180l575 705h-397l51 233h750l-43 -200l-566 -685h439l-49 -233h-795z" /> +<glyph unicode="{" horiz-adv-x="727" d="M-8 459l45 229q122 0 192.5 41.5t92.5 138.5l61 285q38 170 131 239.5t270 69.5h84l-49 -225q-90 -2 -130.5 -34.5t-55.5 -106.5l-66 -297q-45 -207 -276 -236v-8q85 -26 126.5 -82.5t41.5 -134.5q0 -44 -15 -113l-36 -178q-7 -28 -7 -51q0 -54 33.5 -74t91.5 -20v-226 h-53q-167 0 -253.5 63.5t-86.5 184.5q0 57 14 125l39 184q15 69 15 86q0 140 -209 140z" /> +<glyph unicode="|" d="M455 -465v2015h219v-2015h-219z" /> +<glyph unicode="}" horiz-adv-x="727" d="M-100 -98q93 3 137 35.5t59 105.5l66 297q25 111 95 166t181 69v9q-168 51 -168 217q0 43 15 112l37 179q6 30 6 51q0 54 -36.5 74t-109.5 20l41 225h33q340 0 340 -248q0 -56 -14 -124l-39 -185q-15 -69 -15 -86q0 -139 209 -139l-45 -229q-122 0 -192.5 -42t-91.5 -139 l-62 -284q-37 -170 -130.5 -240t-270.5 -70h-45v226z" /> +<glyph unicode="~" d="M109 551v231q101 109 256 109q64 0 117 -14t139 -50q64 -27 111 -41t95 -14q51 0 112 30.5t122 90.5v-231q-103 -109 -256 -109q-59 0 -109 11.5t-147 51.5q-89 38 -127 47t-80 9q-54 0 -116.5 -33t-116.5 -88z" /> +<glyph unicode="¢" d="M164 584q0 193 62.5 355t178 262.5t267.5 123.5l33 158h188l-35 -158q118 -14 225 -65l-92 -230q-53 23 -105 40t-118 17q-133 0 -216 -143t-83 -336q0 -96 45 -144t127 -48q75 0 140 23.5t134 58.5v-246q-136 -71 -299 -80l-41 -192h-188l49 210q-134 36 -203 136 t-69 258z" /> +<glyph unicode="£" d="M-12 0l49 246q196 48 244 264l22 104h-192l45 220h192l49 247q41 197 162 300.5t313 103.5q195 0 369 -86l-113 -232q-141 68 -237 68q-75 0 -123 -39.5t-68 -132.5l-47 -229h299l-45 -220h-299l-18 -84q-42 -195 -209 -270h655l-55 -260h-993z" /> +<glyph unicode="¥" d="M88 221l37 178h252l29 138h-252l39 178h196l-192 747h297l114 -590l371 590h311l-506 -747h203l-39 -178h-252l-28 -138h252l-37 -178h-252l-47 -221h-291l47 221h-252z" /> +<glyph unicode="©" horiz-adv-x="1704" d="M125 731q0 200 100 375t275 276t377 101q199 0 373.5 -99t276 -275.5t101.5 -377.5q0 -199 -98.5 -373t-272.5 -276t-380 -102q-207 0 -382 103.5t-272.5 276.5t-97.5 371zM266 731q0 -164 81.5 -305t224 -223t305.5 -82q167 0 308 83t221.5 223.5t80.5 303.5 t-80.5 303.5t-222 223.5t-307.5 83q-164 0 -306.5 -82.5t-223.5 -223.5t-81 -304zM485 721q0 225 117.5 351t325.5 126q142 0 284 -72l-75 -174q-114 58 -205 58q-111 0 -163 -73t-52 -214q0 -134 55.5 -203t159.5 -69q43 0 108.5 15.5t124.5 43.5v-191q-131 -57 -262 -57 q-196 0 -307 122.5t-111 336.5z" /> +<glyph unicode="­" horiz-adv-x="659" d="M41 424l53 250h524l-53 -250h-524z" /> +<glyph unicode="®" horiz-adv-x="1704" d="M125 731q0 200 100 375t275 276t377 101q199 0 373.5 -99t276 -275.5t101.5 -377.5q0 -199 -98.5 -373t-272.5 -276t-380 -102q-207 0 -382 103.5t-272.5 276.5t-97.5 371zM266 731q0 -164 81.5 -305t224 -223t305.5 -82q167 0 308 83t221.5 223.5t80.5 303.5 t-80.5 303.5t-222 223.5t-307.5 83q-164 0 -306.5 -82.5t-223.5 -223.5t-81 -304zM571 293v874h308q173 0 265.5 -67.5t92.5 -200.5q0 -86 -44 -149.5t-130 -96.5l197 -360h-254l-138 297h-67v-297h-230zM801 758h51q72 0 113 31t41 92q0 59 -35.5 88.5t-116.5 29.5h-53 v-241z" /> +<glyph unicode="´" horiz-adv-x="1135" d="M483 1241v25q79 88 222 303h335v-17q-46 -56 -154 -152.5t-194 -158.5h-209z" /> +<glyph unicode=" " horiz-adv-x="786" /> +<glyph unicode=" " horiz-adv-x="1573" /> +<glyph unicode=" " horiz-adv-x="786" /> +<glyph unicode=" " horiz-adv-x="1573" /> +<glyph unicode=" " horiz-adv-x="524" /> +<glyph unicode=" " horiz-adv-x="393" /> +<glyph unicode=" " horiz-adv-x="262" /> +<glyph unicode=" " horiz-adv-x="262" /> +<glyph unicode=" " horiz-adv-x="196" /> +<glyph unicode=" " horiz-adv-x="314" /> +<glyph unicode=" " horiz-adv-x="87" /> +<glyph unicode="‐" horiz-adv-x="659" d="M41 424l53 250h524l-53 -250h-524z" /> +<glyph unicode="‑" horiz-adv-x="659" d="M41 424l53 250h524l-53 -250h-524z" /> +<glyph unicode="‒" horiz-adv-x="659" d="M41 424l53 250h524l-53 -250h-524z" /> +<glyph unicode="–" horiz-adv-x="983" d="M41 436l49 230h852l-49 -230h-852z" /> +<glyph unicode="—" horiz-adv-x="1966" d="M41 436l49 230h1835l-49 -230h-1835z" /> +<glyph unicode="‘" horiz-adv-x="440" d="M115 983q103 227 262 479h225q-91 -213 -194 -501h-285z" /> +<glyph unicode="’" horiz-adv-x="440" d="M106 961q89 206 195 501h285l8 -22q-103 -227 -262 -479h-226z" /> +<glyph unicode="“" horiz-adv-x="887" d="M115 983q103 227 262 479h225q-91 -213 -194 -501h-285zM561 983q103 227 262 479h226q-97 -227 -195 -501h-285z" /> +<glyph unicode="”" horiz-adv-x="887" d="M106 961q89 206 195 501h285l8 -22q-103 -227 -262 -479h-226zM553 961q23 53 46.5 111t148.5 390h284l8 -22q-103 -227 -262 -479h-225z" /> +<glyph unicode="•" horiz-adv-x="739" d="M104 686q0 106 42.5 194t120 136.5t182.5 48.5q120 0 182.5 -67t62.5 -191q0 -177 -91.5 -277t-248.5 -100q-117 0 -183.5 67t-66.5 189z" /> +<glyph unicode="…" horiz-adv-x="1706" d="M25 115q0 90 53.5 144t150.5 54q68 0 109 -38t41 -107q0 -87 -55 -141t-144 -54q-73 0 -114 37.5t-41 104.5zM586 115q0 90 53.5 144t150.5 54q68 0 109 -38t41 -107q0 -87 -55 -141t-144 -54q-73 0 -114 37.5t-41 104.5zM1147 115q0 90 53.5 144t150.5 54q68 0 109 -38 t41 -107q0 -87 -55 -141t-144 -54q-73 0 -114 37.5t-41 104.5z" /> +<glyph unicode=" " horiz-adv-x="314" /> +<glyph unicode=" " horiz-adv-x="393" /> +<glyph unicode="€" d="M41 481l37 178h127q9 67 22 115h-125l39 176h135q87 252 250.5 393.5t374.5 141.5q100 0 179 -23t165 -80l-125 -223q-87 49 -131 63.5t-90 14.5q-97 0 -176 -74.5t-135 -212.5h348l-39 -176h-360q-11 -34 -25 -115h299l-37 -178h-280q0 -120 44.5 -181.5t147.5 -61.5 q133 0 283 63v-258q-126 -63 -330 -63q-446 0 -446 501h-152z" /> +<glyph unicode="™" horiz-adv-x="1534" d="M106 1313v149h564v-149h-199v-572h-168v572h-197zM715 741v721h248l159 -510l170 510h240v-721h-168v408l4 121h-6l-174 -529h-141l-166 529h-7l5 -111v-418h-164z" /> +<glyph unicode="" horiz-adv-x="1120" d="M0 1120h1120v-1120h-1120v1120z" /> +</font> +</defs></svg> \ No newline at end of file diff --git a/couchpotato/static/fonts/OpenSans-BoldItalic-webfont.ttf b/couchpotato/static/fonts/OpenSans-BoldItalic-webfont.ttf new file mode 100755 index 0000000000..f74e0e3ca7 Binary files /dev/null and b/couchpotato/static/fonts/OpenSans-BoldItalic-webfont.ttf differ diff --git a/couchpotato/static/fonts/OpenSans-BoldItalic-webfont.woff b/couchpotato/static/fonts/OpenSans-BoldItalic-webfont.woff new file mode 100755 index 0000000000..f3248c1142 Binary files /dev/null and b/couchpotato/static/fonts/OpenSans-BoldItalic-webfont.woff differ diff --git a/couchpotato/static/fonts/OpenSans-Italic-webfont.eot b/couchpotato/static/fonts/OpenSans-Italic-webfont.eot new file mode 100755 index 0000000000..277c1899cd Binary files /dev/null and b/couchpotato/static/fonts/OpenSans-Italic-webfont.eot differ diff --git a/couchpotato/static/fonts/OpenSans-Italic-webfont.svg b/couchpotato/static/fonts/OpenSans-Italic-webfont.svg new file mode 100755 index 0000000000..29c7497fed --- /dev/null +++ b/couchpotato/static/fonts/OpenSans-Italic-webfont.svg @@ -0,0 +1,146 @@ +<?xml version="1.0" standalone="no"?> +<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd" > +<svg xmlns="http://www.w3.org/2000/svg"> +<metadata> +This is a custom SVG webfont generated by Font Squirrel. +Copyright : Digitized data copyright 20102011 Google Corporation +Foundry : Ascender Corporation +Foundry URL : httpwwwascendercorpcom +</metadata> +<defs> +<font id="OpenSansItalic" horiz-adv-x="1128" > +<font-face units-per-em="2048" ascent="1638" descent="-410" /> +<missing-glyph horiz-adv-x="532" /> +<glyph unicode=" " horiz-adv-x="532" /> +<glyph unicode=" " horiz-adv-x="532" /> +<glyph unicode=" " horiz-adv-x="532" /> +<glyph unicode="!" horiz-adv-x="530" d="M43 78q0 76 39.5 120t107.5 44q45 0 73 -27.5t28 -81.5q0 -68 -39 -115t-105 -47q-49 0 -76.5 28t-27.5 79zM172 403q49 307 176 1059h207l-274 -1059h-109z" /> +<glyph unicode=""" horiz-adv-x="791" d="M225 934l72 528h188l-153 -528h-107zM573 934l72 528h189l-154 -528h-107z" /> +<glyph unicode="#" horiz-adv-x="1323" d="M63 430l13 129h284l101 340h-277l13 127h301l123 436h139l-125 -436h305l127 436h133l-125 -436h264l-12 -127h-291l-98 -340h285l-13 -129h-309l-125 -430h-139l129 430h-303l-127 -430h-133l121 430h-261zM500 559h303l96 340h-303z" /> +<glyph unicode="$" d="M72 176v154q82 -41 175.5 -63.5t166.5 -22.5l98 452q-139 49 -201.5 123.5t-62.5 188.5q0 159 108 255t299 113l39 176h133l-39 -178q159 -12 283 -76l-63 -135q-121 63 -248 72l-94 -440q149 -55 212.5 -125t63.5 -178q0 -162 -112.5 -263t-309.5 -123l-49 -225h-133 l49 223q-195 14 -315 72zM401 1010q0 -53 34.5 -97.5t107.5 -70.5l84 393q-108 -11 -167 -69t-59 -156zM549 250q107 13 170 75t63 154q0 54 -33 96t-114 74z" /> +<glyph unicode="%" horiz-adv-x="1624" d="M168 860q0 166 50.5 318.5t136.5 228.5t200 76q116 0 176 -72t60 -205q0 -108 -32 -237.5t-82.5 -217.5t-120.5 -137t-157 -49q-109 0 -170 75t-61 220zM231 0l1086 1462h151l-1085 -1462h-152zM307 864q0 -172 107 -172q52 0 94 39.5t73.5 114t50.5 175t19 171.5 q0 166 -108 166q-66 0 -119 -63t-85 -187.5t-32 -243.5zM909 274q0 166 50.5 318.5t136.5 228.5t200 76q116 0 176 -71.5t60 -204.5q0 -107 -31.5 -236t-82 -217.5t-121 -138t-156.5 -49.5q-110 0 -171 74.5t-61 219.5zM1049 279q0 -173 106 -173q65 0 117 65t86.5 198.5 t34.5 236.5q0 166 -109 166q-67 0 -119.5 -64.5t-84 -188.5t-31.5 -240z" /> +<glyph unicode="&" horiz-adv-x="1372" d="M66 342q0 148 90 257.5t303 211.5q-103 165 -103 309q0 164 106 264.5t281 100.5q149 0 236.5 -79t87.5 -212q0 -78 -32.5 -137t-87.5 -108t-127.5 -90t-153.5 -83l278 -389q127 110 199 295h168q-101 -236 -283 -412l203 -270h-201l-117 166q-120 -100 -230 -143 t-247 -43q-168 0 -269 96t-101 266zM229 354q0 -106 66.5 -170.5t175.5 -64.5q87 0 168 33t195 124l-306 433q-128 -67 -184 -116t-85.5 -107.5t-29.5 -131.5zM516 1118q0 -120 82 -235q139 71 191 110t83 85t31 104q0 77 -42.5 121.5t-123.5 44.5q-105 0 -163 -60t-58 -170 z" /> +<glyph unicode="'" horiz-adv-x="444" d="M225 934l72 528h188l-153 -528h-107z" /> +<glyph unicode="(" horiz-adv-x="584" d="M82 272q0 339 120 627t384 563h157q-246 -270 -371.5 -570t-125.5 -618q0 -339 114 -598h-131q-147 266 -147 596z" /> +<glyph unicode=")" horiz-adv-x="584" d="M-160 -324q496 551 496 1188q0 341 -113 598h131q146 -269 146 -598q0 -341 -121.5 -629.5t-382.5 -558.5h-156z" /> +<glyph unicode="*" horiz-adv-x="1130" d="M215 1194l55 154l371 -185l41 400l172 -35l-123 -383l422 18l-8 -157l-393 47l180 -383l-166 -52l-113 406l-258 -344l-116 121l309 284z" /> +<glyph unicode="+" d="M127 651v142h389v391h141v-391h390v-142h-390v-387h-141v387h-389z" /> +<glyph unicode="," horiz-adv-x="492" d="M-100 -264q126 286 204 502h187l8 -23q-113 -235 -270 -479h-129z" /> +<glyph unicode="-" horiz-adv-x="639" d="M55 469l35 158h479l-34 -158h-480z" /> +<glyph unicode="." horiz-adv-x="518" d="M43 74q0 77 40.5 122.5t111.5 45.5q43 0 69.5 -26t26.5 -79q0 -71 -40 -118.5t-108 -47.5q-46 0 -73 26t-27 77z" /> +<glyph unicode="/" horiz-adv-x="717" d="M-94 0l813 1462h174l-813 -1462h-174z" /> +<glyph unicode="0" d="M121 477q0 270 82 514.5t216.5 369t307.5 124.5q365 0 365 -471q0 -295 -78.5 -539t-214 -369.5t-314.5 -125.5q-176 0 -270 127.5t-94 369.5zM293 479q0 -172 50 -264t161 -92q115 0 209 114t150.5 328t56.5 453q0 323 -203 323q-113 0 -209 -115.5t-155.5 -323 t-59.5 -423.5z" /> +<glyph unicode="1" d="M303 1178l449 284h149l-313 -1462h-172l196 913q59 261 88 359q-50 -53 -139 -111l-178 -110z" /> +<glyph unicode="2" d="M12 0l31 147l465 420q102 93 176.5 163.5t123 133t72 124t23.5 136.5q0 99 -60 157t-163 58q-77 0 -150.5 -28.5t-162.5 -96.5l-82 115q191 154 413 154q176 0 278.5 -88.5t102.5 -243.5q0 -111 -39.5 -204t-131 -197t-294.5 -281l-352 -307v-8h678l-29 -154h-899z" /> +<glyph unicode="3" d="M47 59v164q94 -49 199 -75.5t190 -26.5q162 0 252 79.5t90 217.5q0 131 -79 198.5t-220 67.5h-131l31 143h139q165 0 274 87t109 227q0 92 -58 146t-157 54q-80 0 -157 -27t-175 -93l-80 118q195 144 424 144q179 0 277 -87t98 -237q0 -156 -101 -264.5t-280 -140.5v-9 q124 -23 195 -106.5t71 -208.5q0 -133 -62 -234.5t-181 -158.5t-283 -57q-210 0 -385 79z" /> +<glyph unicode="4" d="M16 334l29 158l834 978h196l-207 -983h232l-33 -153h-233l-72 -334h-164l74 334h-656zM219 487h486q46 220 78 373t116 445h-8q-17 -29 -66.5 -96.5t-72.5 -96.5z" /> +<glyph unicode="5" d="M80 59v164q164 -102 334 -102q191 0 298 96t107 268q0 126 -73.5 199.5t-204.5 73.5q-48 0 -97 -6.5t-139 -30.5l-74 57l197 684h668l-33 -153h-522l-127 -439q87 23 184 23q182 0 289.5 -104.5t107.5 -282.5q0 -161 -73 -283t-204 -182.5t-308 -60.5q-193 0 -330 79z " /> +<glyph unicode="6" d="M133 424q0 209 60.5 415t163.5 351.5t246 219t327 73.5q111 0 184 -23l-35 -145q-68 22 -170 22q-212 0 -356.5 -149t-212.5 -443h8q59 79 146.5 126t193.5 47q154 0 244 -98.5t90 -270.5q0 -161 -66.5 -294.5t-180.5 -204t-261 -70.5q-182 0 -281.5 115t-99.5 329z M299 416q0 -137 60.5 -216t172.5 -79q94 0 167.5 54t114 149t40.5 208q0 248 -221 248q-66 0 -128 -28.5t-110 -76t-72 -104.5t-24 -155z" /> +<glyph unicode="7" d="M174 0l768 1313h-719l31 149h891l-27 -139l-764 -1323h-180z" /> +<glyph unicode="8" d="M96 346q0 148 95 256t296 184q-95 69 -135.5 144.5t-40.5 171.5q0 111 54.5 198.5t153.5 136t222 48.5q174 0 271.5 -86.5t97.5 -235.5q0 -129 -78 -225t-266 -176q127 -78 180 -165t53 -202q0 -122 -60 -217.5t-172.5 -146.5t-264.5 -51q-190 0 -298 98.5t-108 267.5z M270 354q0 -107 69 -170t181 -63q139 0 222 74t83 196q0 99 -52 174t-165 135q-185 -60 -261.5 -143.5t-76.5 -202.5zM479 1100q0 -82 39 -144t127 -116q161 60 228 131.5t67 173.5q0 90 -57.5 143t-153.5 53q-114 0 -182 -65.5t-68 -175.5z" /> +<glyph unicode="9" d="M98 14v158q134 -47 246 -47q202 0 327 141t189 441h-10q-51 -75 -132.5 -118.5t-180.5 -43.5q-169 0 -261 98.5t-92 288.5q0 153 64.5 280.5t180 199t259.5 71.5q180 0 279.5 -114.5t99.5 -334.5q0 -194 -56 -406.5t-147.5 -360t-221.5 -217.5t-302 -70q-136 0 -242 34z M350 938q0 -124 54.5 -190t162.5 -66q76 0 140 28.5t108.5 81.5t65 114t20.5 151q0 131 -59 207.5t-160 76.5q-150 0 -241 -113t-91 -290z" /> +<glyph unicode=":" horiz-adv-x="518" d="M43 74q0 77 40.5 122.5t111.5 45.5q43 0 69.5 -26t26.5 -79q0 -71 -40 -118.5t-108 -47.5q-46 0 -73 26t-27 77zM203 956q0 77 40 122.5t111 45.5q97 0 97 -104q0 -73 -41.5 -119.5t-106.5 -46.5q-46 0 -73 26.5t-27 75.5z" /> +<glyph unicode=";" horiz-adv-x="518" d="M-100 -264q126 286 204 502h187l8 -23q-113 -235 -270 -479h-129zM203 956q0 77 40 122.5t111 45.5q97 0 97 -104q0 -73 -41.5 -119.5t-106.5 -46.5q-46 0 -73 26.5t-27 75.5z" /> +<glyph unicode="<" d="M121 664v98l919 479v-149l-747 -371l747 -328v-151z" /> +<glyph unicode="=" d="M127 444v142h920v-142h-920zM127 858v139h920v-139h-920z" /> +<glyph unicode=">" d="M121 242v151l745 328l-745 371v149l919 -479v-98z" /> +<glyph unicode="?" horiz-adv-x="874" d="M158 74q0 77 40 122.5t111 45.5q44 0 70.5 -26t26.5 -79q0 -73 -41.5 -119.5t-106.5 -46.5q-46 0 -73 26t-27 77zM197 1382q92 51 192 76t182 25q167 0 259 -84t92 -238q0 -123 -65.5 -226.5t-225.5 -223.5q-125 -91 -169 -147.5t-67 -160.5h-135q22 130 72.5 213.5 t165.5 174.5q128 100 168 144t63 94t23 112q0 93 -51.5 143.5t-147.5 50.5q-81 0 -155 -25.5t-140 -56.5z" /> +<glyph unicode="@" horiz-adv-x="1735" d="M111 504q0 261 126.5 485.5t343.5 347.5t486 123q191 0 329 -75.5t210.5 -213.5t72.5 -319q0 -179 -55 -324t-155 -227t-222 -82q-197 0 -213 184h-8q-111 -184 -291 -184q-115 0 -180.5 75.5t-65.5 209.5q0 157 68 284t188.5 199t260.5 72q65 0 127.5 -12t150.5 -48 q-64 -242 -98 -368t-31 -172q0 -117 102 -117q78 0 141.5 67t100.5 183.5t37 243.5q0 239 -128 367t-370 128q-228 0 -406.5 -107t-277 -295.5t-98.5 -416.5q0 -270 143.5 -418.5t409.5 -148.5q197 0 420 86v-127q-219 -90 -443 -90q-314 0 -494.5 184.5t-180.5 505.5z M639 518q0 -93 33 -134.5t98 -41.5q187 0 272 315l70 258q-63 23 -127 23q-94 0 -174 -55t-126 -153t-46 -212z" /> +<glyph unicode="A" horiz-adv-x="1137" d="M-117 0l799 1462h174l184 -1462h-170l-57 465h-496l-245 -465h-189zM401 621h394l-35 299q-24 179 -29 350q-37 -88 -80.5 -175t-249.5 -474z" /> +<glyph unicode="B" horiz-adv-x="1225" d="M86 0l309 1462h375q432 0 432 -336q0 -141 -87 -238t-245 -126v-10q115 -32 176.5 -110.5t61.5 -188.5q0 -212 -152 -332.5t-407 -120.5h-463zM287 145h266q181 0 278 80.5t97 227.5q0 116 -74.5 177.5t-214.5 61.5h-236zM434 836h248q156 0 249 73t93 199 q0 104 -66.5 155.5t-209.5 51.5h-211z" /> +<glyph unicode="C" horiz-adv-x="1198" d="M150 537q0 261 105.5 485.5t283.5 342.5t403 118q197 0 348 -80l-69 -141q-138 69 -279 69q-174 0 -311.5 -97t-218 -284.5t-80.5 -408.5q0 -187 97.5 -298.5t268.5 -111.5q139 0 322 57v-149q-86 -31 -164 -45t-188 -14q-242 0 -380 149.5t-138 407.5z" /> +<glyph unicode="D" horiz-adv-x="1364" d="M86 0l309 1462h342q276 0 419.5 -149.5t143.5 -435.5q0 -261 -105 -461t-300 -308t-457 -108h-352zM287 147h162q202 0 355 91.5t234.5 258.5t81.5 382t-103 325.5t-302 110.5h-178z" /> +<glyph unicode="E" horiz-adv-x="1047" d="M86 0l309 1462h735l-32 -153h-566l-98 -469h527l-29 -152h-529l-114 -536h565l-33 -152h-735z" /> +<glyph unicode="F" horiz-adv-x="967" d="M86 0l309 1462h735l-30 -153h-568l-110 -533h528l-32 -153h-529l-131 -623h-172z" /> +<glyph unicode="G" horiz-adv-x="1386" d="M150 528q0 269 101.5 489.5t281.5 343t399 122.5q117 0 219.5 -20t206.5 -64l-66 -152q-77 34 -165.5 59t-194.5 25q-169 0 -307.5 -101.5t-215.5 -283.5t-77 -407q0 -190 102.5 -299t286.5 -109q154 0 260 39l96 444h-289l33 152h459l-154 -711q-216 -75 -419 -75 q-264 0 -410.5 144.5t-146.5 403.5z" /> +<glyph unicode="H" horiz-adv-x="1389" d="M86 0l309 1462h170l-131 -622h660l133 622h168l-310 -1462h-167l143 688h-660l-145 -688h-170z" /> +<glyph unicode="I" horiz-adv-x="559" d="M86 0l311 1462h168l-311 -1462h-168z" /> +<glyph unicode="J" horiz-adv-x="547" d="M-319 -360l6 147q69 -20 145 -20q100 0 165.5 62.5t90.5 182.5l307 1450h170l-309 -1468q-79 -379 -422 -379q-105 0 -153 25z" /> +<glyph unicode="K" horiz-adv-x="1141" d="M86 0l309 1462h170l-151 -710l700 710h209l-639 -637l350 -825h-186q-72 181 -146.5 359.5t-146.5 361.5l-174 -131l-125 -590h-170z" /> +<glyph unicode="L" horiz-adv-x="971" d="M86 0l309 1462h170l-276 -1308h565l-33 -154h-735z" /> +<glyph unicode="M" horiz-adv-x="1714" d="M84 0l309 1462h244l149 -1204h9l659 1204h266l-303 -1462h-174q126 590 193 905.5t94 392.5h-6l-717 -1298h-131l-166 1296h-8q-7 -72 -28.5 -197.5t-37.5 -199.5l-190 -899h-162z" /> +<glyph unicode="N" horiz-adv-x="1438" d="M84 0l309 1462h180l459 -1220h6q30 224 72 405l174 815h164l-309 -1462h-181l-460 1223h-6q-32 -221 -74 -418l-172 -805h-162z" /> +<glyph unicode="O" horiz-adv-x="1475" d="M150 549q0 264 96 482t263.5 336t377.5 118q244 0 384 -154t140 -424q0 -269 -88 -481.5t-252 -329t-379 -116.5q-256 0 -399 149.5t-143 419.5zM332 553q0 -199 98 -310.5t266 -111.5q152 0 272.5 97.5t190.5 279.5t70 403q0 199 -94 310.5t-261 111.5q-157 0 -281 -101 t-192.5 -281t-68.5 -398z" /> +<glyph unicode="P" horiz-adv-x="1159" d="M86 0l309 1462h330q214 0 324 -94.5t110 -282.5q0 -248 -164 -379t-481 -131h-135l-123 -575h-170zM410 721h133q216 0 328 91t112 267q0 125 -69.5 180.5t-213.5 55.5h-163z" /> +<glyph unicode="Q" horiz-adv-x="1475" d="M150 549q0 264 96 482t263.5 336t377.5 118q244 0 384 -154t140 -424q0 -333 -139 -576t-375 -321l274 -358h-219l-227 330l-17 -2h-16q-256 0 -399 149.5t-143 419.5zM332 553q0 -199 98 -310.5t266 -111.5q158 0 279 100t187.5 280.5t66.5 399.5q0 199 -94 310.5 t-261 111.5q-157 0 -281 -101t-192.5 -281t-68.5 -398z" /> +<glyph unicode="R" horiz-adv-x="1165" d="M86 0l309 1462h320q446 0 446 -366q0 -348 -368 -449l239 -647h-186l-209 608h-252l-129 -608h-170zM416 754h168q193 0 297 85t104 244q0 121 -67.5 175.5t-219.5 54.5h-166q-102 -494 -116 -559z" /> +<glyph unicode="S" horiz-adv-x="1028" d="M39 43v170q162 -84 340 -84q162 0 257 75.5t95 207.5q0 78 -52.5 137.5t-195.5 140.5q-151 85 -209.5 170t-58.5 201q0 187 132 304.5t347 117.5q99 0 184.5 -19t180.5 -65l-66 -150q-66 38 -148 60t-151 22q-134 0 -215.5 -69.5t-81.5 -188.5q0 -54 17 -92.5t54 -72.5 t142 -95q147 -88 198.5 -138t78 -110.5t26.5 -140.5q0 -211 -140.5 -327.5t-395.5 -116.5q-106 0 -186.5 14.5t-151.5 48.5z" /> +<glyph unicode="T" horiz-adv-x="1020" d="M186 1311l33 151h985l-30 -151h-408l-279 -1311h-172l277 1311h-406z" /> +<glyph unicode="U" horiz-adv-x="1384" d="M164 383q0 81 24 201l189 878h170l-191 -891q-22 -106 -22 -188q0 -117 73 -184.5t218 -67.5q172 0 267.5 87.5t139.5 289.5l205 954h170l-205 -966q-55 -263 -197.5 -389.5t-388.5 -126.5q-230 0 -341 104t-111 299z" /> +<glyph unicode="V" horiz-adv-x="1122" d="M188 1462h170l97 -930q20 -196 20 -335h4q61 144 162 338l479 927h191l-781 -1462h-180z" /> +<glyph unicode="W" horiz-adv-x="1745" d="M223 1462h170l31 -901l2 -88q0 -98 -10 -258h6q89 243 156 383l405 864h178l43 -860q9 -153 9 -304l-1 -83h9q75 224 131 354l387 893h182l-664 -1462h-170l-49 965q-8 136 -8 282h-6q-25 -72 -61 -154.5t-504 -1092.5h-174z" /> +<glyph unicode="X" horiz-adv-x="1063" d="M-104 0l596 776l-263 686h172l203 -563l443 563h186l-555 -694l278 -768h-180l-213 641l-481 -641h-186z" /> +<glyph unicode="Y" horiz-adv-x="1030" d="M188 1462h170l179 -747l489 747h193l-627 -921l-113 -541h-172l119 549z" /> +<glyph unicode="Z" horiz-adv-x="1087" d="M-16 0l28 137l924 1170h-655l32 155h858l-26 -139l-924 -1169h697l-33 -154h-901z" /> +<glyph unicode="[" horiz-adv-x="586" d="M-16 -324l381 1786h387l-31 -141h-227l-318 -1503h227l-32 -142h-387z" /> +<glyph unicode="\" horiz-adv-x="717" d="M221 1462h154l217 -1462h-154z" /> +<glyph unicode="]" horiz-adv-x="586" d="M-150 -324l31 142h225l320 1503h-227l30 141h389l-380 -1786h-388z" /> +<glyph unicode="^" horiz-adv-x="1059" d="M53 553l598 920h109l266 -920h-145l-201 747l-467 -747h-160z" /> +<glyph unicode="_" horiz-adv-x="807" d="M-188 -324l30 140h811l-30 -140h-811z" /> +<glyph unicode="`" horiz-adv-x="1135" d="M575 1548v21h181q43 -136 147 -303v-25h-104q-61 61 -128.5 154t-95.5 153z" /> +<glyph unicode="a" horiz-adv-x="1157" d="M98 350q0 208 71 386t196 279t274 101q92 0 164 -49.5t112 -142.5h11l67 172h127l-233 -1096h-133l26 209h-8q-179 -229 -377 -229q-139 0 -218 99t-79 271zM270 346q0 -114 47 -170.5t132 -56.5q97 0 193 92.5t156 241t60 297.5q0 103 -56 164t-147 61 q-104 0 -193.5 -86t-140.5 -233t-51 -310z" /> +<glyph unicode="b" horiz-adv-x="1182" d="M59 0l330 1556h168q-51 -242 -78.5 -370.5t-75.5 -300.5h9q93 118 183.5 173.5t186.5 55.5q141 0 220 -99t79 -272q0 -209 -68.5 -386.5t-191 -277t-276.5 -99.5q-97 0 -170.5 51t-110.5 139h-10l-70 -170h-125zM319 346q0 -110 55.5 -168.5t160.5 -58.5q99 0 184.5 81 t137.5 230.5t52 317.5q0 227 -178 227q-96 0 -195.5 -95t-158 -239t-58.5 -295z" /> +<glyph unicode="c" horiz-adv-x="922" d="M98 389q0 200 74 369t204.5 263.5t293.5 94.5q137 0 268 -51l-47 -141q-120 51 -219 51q-112 0 -204.5 -76.5t-145 -213t-52.5 -296.5q0 -128 66.5 -199t183.5 -71q72 0 136 20t126 47v-143q-124 -63 -276 -63q-194 0 -301 107t-107 302z" /> +<glyph unicode="d" horiz-adv-x="1182" d="M98 350q0 214 72 392t194.5 275t274.5 97q194 0 281 -190h10q17 155 45 274l78 358h166l-330 -1556h-139l22 209h-8q-101 -125 -189 -177t-182 -52q-139 0 -217 98t-78 272zM270 346q0 -227 179 -227q94 0 194 93.5t158.5 239t58.5 296.5q0 111 -54 169t-157 58 q-101 0 -187.5 -82.5t-139 -232t-52.5 -314.5z" /> +<glyph unicode="e" horiz-adv-x="1010" d="M98 391q0 188 74.5 360.5t197.5 268.5t271 96q153 0 230 -66.5t77 -185.5q0 -180 -166 -282.5t-475 -102.5h-33l-4 -80q0 -131 61.5 -204.5t190.5 -73.5q63 0 129.5 18t165.5 66v-146q-94 -44 -166 -61.5t-159 -17.5q-184 0 -289 109t-105 302zM299 618h12 q228 0 349.5 59.5t121.5 172.5q0 53 -36.5 88t-114.5 35q-103 0 -193.5 -94t-138.5 -261z" /> +<glyph unicode="f" horiz-adv-x="641" d="M-229 -330q64 -22 112 -22q76 0 117 62t66 177l227 1082h-193l13 67l206 66l23 100q46 200 127.5 282.5t241.5 82.5q40 0 98 -11.5t90 -25.5l-43 -129q-76 29 -137 29q-87 0 -133.5 -48.5t-75.5 -177.5l-25 -108h238l-25 -127h-237l-232 -1098q-39 -189 -120 -276 t-213 -87q-69 0 -125 21v141z" /> +<glyph unicode="g" horiz-adv-x="1026" d="M-127 -211q0 105 72 182t233 131q-78 41 -78 121q0 69 51 118.5t142 92.5q-63 32 -103 94.5t-40 145.5q0 194 119.5 318t305.5 124q78 0 154 -20h371l-25 -107l-211 -24q41 -62 41 -158q0 -191 -116.5 -304.5t-311.5 -113.5q-55 0 -84 8q-139 -53 -139 -131 q0 -41 33 -54.5t96 -21.5l117 -14q181 -22 262.5 -88t81.5 -194q0 -184 -146 -285t-411 -101q-194 0 -304 73.5t-110 207.5zM35 -195q0 -77 65 -122t193 -45q182 0 284.5 63.5t102.5 179.5q0 62 -54 98t-184 50l-159 16q-120 -25 -184 -88t-64 -152zM313 680 q0 -85 45 -129.5t125 -44.5q79 0 138 42t90.5 115.5t31.5 159.5q0 82 -44 125t-126 43q-78 0 -136.5 -40.5t-91 -113t-32.5 -157.5z" /> +<glyph unicode="h" horiz-adv-x="1182" d="M59 0l330 1556h168q-18 -82 -34.5 -159t-34 -156.5t-38 -166.5t-47.5 -189h11q94 123 185.5 176t191.5 53q131 0 202.5 -72t71.5 -204q0 -62 -23 -166q-39 -193 -145 -672h-168l148 692q18 94 18 135q0 148 -147 148q-89 0 -173.5 -59t-149 -171.5t-97.5 -271.5 l-101 -473h-168z" /> +<glyph unicode="i" horiz-adv-x="520" d="M59 0l234 1096h168l-234 -1096h-168zM340 1376q0 56 32 91.5t83 35.5q88 0 88 -90q0 -55 -33.5 -93t-77.5 -38q-40 0 -66 24.5t-26 69.5z" /> +<glyph unicode="j" horiz-adv-x="520" d="M-258 -330q61 -22 119 -22q125 0 168 205l264 1243h166l-266 -1258q-36 -171 -114.5 -250.5t-213.5 -79.5q-69 0 -123 21v141zM340 1376q0 56 32 91.5t83 35.5q86 0 86 -90q0 -55 -33.5 -93t-77.5 -38q-38 0 -64 24.5t-26 69.5z" /> +<glyph unicode="k" horiz-adv-x="999" d="M57 0l330 1556h170l-129 -602q-57 -266 -102 -395h4l526 537h201l-469 -467l295 -629h-187l-235 524l-152 -123l-82 -401h-170z" /> +<glyph unicode="l" horiz-adv-x="520" d="M57 0l332 1556h168l-332 -1556h-168z" /> +<glyph unicode="m" horiz-adv-x="1786" d="M59 0l234 1096h139l-22 -203h10q87 119 173.5 171t178.5 52q113 0 174 -65t72 -181h8q86 125 183 185.5t196 60.5q127 0 196.5 -68t69.5 -198q0 -68 -22 -178l-144 -672h-170l148 692q20 104 20 146q0 62 -34.5 99.5t-108.5 37.5q-81 0 -160 -58t-138.5 -164.5 t-90.5 -252.5l-107 -500h-168l148 692q18 94 18 135q0 70 -31 109t-106 39q-84 0 -163.5 -60t-140 -171.5t-93.5 -268.5l-101 -475h-168z" /> +<glyph unicode="n" horiz-adv-x="1182" d="M59 0l234 1096h139l-22 -203h10q96 122 185.5 172.5t185.5 50.5q127 0 200.5 -69.5t73.5 -194.5q0 -79 -23 -180l-143 -672h-170l148 692q20 104 20 144q0 63 -35.5 101t-113.5 38q-89 0 -173.5 -60t-149 -171t-97.5 -269l-101 -475h-168z" /> +<glyph unicode="o" horiz-adv-x="1149" d="M98 406q0 190 73 357.5t197 257t275 89.5q190 0 300 -112.5t110 -309.5q0 -188 -72 -355t-195 -258t-278 -91q-192 0 -301 113t-109 309zM270 397q0 -131 63.5 -202.5t182.5 -71.5q104 0 187 73t129.5 207.5t46.5 307.5q0 115 -62.5 186.5t-169.5 71.5q-109 0 -195.5 -74 t-134 -205.5t-47.5 -292.5z" /> +<glyph unicode="p" horiz-adv-x="1182" d="M-43 -492l336 1588h139l-26 -209h8q179 227 372 227q137 0 216 -97.5t79 -273.5q0 -212 -69 -389t-191 -275.5t-276 -98.5q-97 0 -170 50t-113 140h-10l-4 -38q-3 -25 -10.5 -70t-114.5 -554h-166zM319 346q0 -110 55.5 -168.5t160.5 -58.5q99 0 184.5 81t137.5 230.5 t52 317.5q0 227 -178 227q-96 0 -195.5 -95t-158 -239t-58.5 -295z" /> +<glyph unicode="q" horiz-adv-x="1182" d="M98 350q0 212 72.5 392t196 277t274.5 97q94 0 165.5 -50.5t108.5 -141.5h13l67 172h125l-336 -1588h-166l101 480q9 45 57 221h-8q-95 -121 -185 -175t-186 -54q-140 0 -219.5 97.5t-79.5 272.5zM270 346q0 -227 179 -227q92 0 190 92t158.5 237t60.5 300 q0 105 -54.5 166t-152.5 61q-101 0 -189 -84.5t-140 -233t-52 -311.5z" /> +<glyph unicode="r" horiz-adv-x="811" d="M59 0l234 1096h139l-22 -203h10q72 95 119 136.5t98.5 64t114.5 22.5q69 0 120 -14l-36 -150q-53 13 -105 13q-91 0 -170.5 -60t-139 -166.5t-87.5 -236.5l-107 -502h-168z" /> +<glyph unicode="s" horiz-adv-x="877" d="M8 49v158q70 -42 151 -65t150 -23q126 0 190 50t64 128q0 57 -35 96t-151 107q-130 73 -184 143t-54 166q0 138 101 222.5t266 84.5q171 0 330 -74l-54 -137l-56 25q-101 43 -220 43q-93 0 -146 -43.5t-53 -112.5q0 -56 35.5 -96t146.5 -103q107 -60 153.5 -103 t69.5 -92.5t23 -111.5q0 -156 -110.5 -243.5t-311.5 -87.5q-169 0 -305 69z" /> +<glyph unicode="t" horiz-adv-x="664" d="M90 969l14 73l185 78l125 228h98l-55 -252h274l-26 -127h-273l-129 -604q-18 -87 -18 -132q0 -56 29 -86t81 -30q55 0 144 26v-129q-34 -14 -84 -24t-80 -10q-125 0 -191.5 59.5t-66.5 177.5q0 66 18 150l127 602h-172z" /> +<glyph unicode="u" horiz-adv-x="1182" d="M113 248q0 62 22 172l146 676h170l-150 -695q-18 -89 -18 -139q0 -143 147 -143q88 0 173 60t150 172t99 270l100 475h166l-231 -1096h-139l22 203h-12q-98 -125 -187 -174t-184 -49q-128 0 -201 69.5t-73 198.5z" /> +<glyph unicode="v" horiz-adv-x="946" d="M98 1096h168l64 -613q24 -258 24 -362h6q127 275 179 371l325 604h178l-591 -1096h-228z" /> +<glyph unicode="w" horiz-adv-x="1468" d="M117 1096h164l18 -594v-88q0 -147 -8 -269h6q47 124 137 322l295 629h182l37 -594q6 -168 6 -262v-53l-2 -42h6q28 86 83 218.5t323 732.5h178l-506 -1096h-205l-32 602q-4 94 -4 172v156h-9l-50 -118l-83 -189l-291 -623h-202z" /> +<glyph unicode="x" horiz-adv-x="979" d="M-74 0l475 565l-239 531h170l174 -412l330 412h194l-455 -539l252 -557h-168l-192 434l-346 -434h-195z" /> +<glyph unicode="y" horiz-adv-x="946" d="M-197 -336q63 -18 131 -18q82 0 140.5 50.5t113.5 149.5l76 136l-166 1114h168l74 -545q10 -69 19.5 -203.5t9.5 -216.5h6q35 87 87 200t77 156l325 609h178l-696 -1282q-93 -172 -184 -239t-219 -67q-72 0 -140 21v135z" /> +<glyph unicode="z" horiz-adv-x="909" d="M-29 0l23 117l694 854h-479l27 125h657l-29 -140l-680 -831h531l-25 -125h-719z" /> +<glyph unicode="{" horiz-adv-x="715" d="M27 514l32 143q118 0 189.5 43.5t93.5 147.5l68 326q34 160 117.5 224t254.5 64h33l-31 -141q-105 0 -151 -36.5t-66 -123.5l-71 -321q-28 -123 -91 -184t-167 -78v-5q151 -41 151 -213q0 -59 -18 -131l-47 -211q-15 -58 -15 -98q0 -53 36.5 -77.5t119.5 -24.5v-142h-23 q-141 0 -216.5 52.5t-75.5 171.5q0 52 20 141q33 146 51.5 227.5t14.5 102.5q0 143 -209 143z" /> +<glyph unicode="|" d="M541 -496v2052h139v-2052h-139z" /> +<glyph unicode="}" horiz-adv-x="715" d="M-74 -182q115 0 167 36t71 123l72 322q25 117 88 179.5t170 80.5v6q-150 42 -150 211q0 59 18 131l50 213q14 65 14 99q0 53 -40.5 77.5t-139.5 24.5l28 141h11q144 0 220.5 -52.5t76.5 -170.5q0 -48 -21 -141l-49 -219q-16 -68 -16 -111q0 -143 209 -143l-33 -144 q-119 0 -190 -43t-93 -147l-67 -326q-36 -164 -119 -226.5t-264 -62.5h-13v142z" /> +<glyph unicode="~" d="M115 592v151q98 109 243 109q69 0 127 -14.5t144 -51.5q64 -27 112.5 -41t98.5 -14q55 0 119.5 33t115.5 88v-150q-100 -110 -244 -110q-72 0 -135 16.5t-135 48.5q-75 32 -120 44t-93 12q-54 0 -118.5 -34.5t-114.5 -86.5z" /> +<glyph unicode="¢" d="M225 590q0 185 63.5 344t178.5 258.5t260 120.5l35 170h123l-37 -168q119 -9 217 -49l-47 -142q-109 52 -219 52q-112 0 -204.5 -76.5t-145 -213t-52.5 -296.5q0 -125 66 -198t184 -73q72 0 136 20t126 48v-143q-123 -62 -286 -66l-41 -198h-125l43 215 q-132 34 -203.5 137.5t-71.5 257.5z" /> +<glyph unicode="£" d="M-23 0l27 141q205 46 258 289l47 221h-200l26 127h201l76 350q75 353 430 353q184 0 336 -86l-66 -133q-146 79 -278 79q-213 0 -263 -237l-69 -326h370l-26 -127h-371l-47 -219q-22 -98 -66 -166.5t-124 -111.5h725l-33 -154h-953z" /> +<glyph unicode="¥" d="M127 266l29 133h290l33 160h-291l29 133h225l-202 770h163l179 -747l491 747h187l-533 -770h231l-28 -133h-297l-33 -160h297l-29 -133h-295l-57 -266h-154l56 266h-291z" /> +<glyph unicode="©" horiz-adv-x="1704" d="M139 731q0 200 100 375t275 276t377 101q197 0 370 -97t277 -272t104 -383q0 -204 -100.5 -376.5t-273 -273.5t-377.5 -101q-207 0 -382 103.5t-272.5 276.5t-97.5 371zM244 731q0 -173 87 -323.5t237.5 -237t322.5 -86.5q174 0 323 87t236.5 235.5t87.5 324.5 q0 174 -87 323t-235.5 236.5t-324.5 87.5q-174 0 -323 -87t-236.5 -235.5t-87.5 -324.5zM520 733q0 208 110 330.5t300 122.5q130 0 248 -60l-60 -120q-106 53 -190 53q-125 0 -191.5 -87t-66.5 -241q0 -169 65 -249.5t193 -80.5q82 0 211 43v-122q-66 -28 -113 -38 t-104 -10q-192 0 -297 119.5t-105 339.5z" /> +<glyph unicode="­" horiz-adv-x="639" d="M55 469l35 158h479l-34 -158h-480z" /> +<glyph unicode="®" horiz-adv-x="1704" d="M139 731q0 200 100 375t275 276t377 101q197 0 370 -97t277 -272t104 -383q0 -204 -100.5 -376.5t-273 -273.5t-377.5 -101q-207 0 -382 103.5t-272.5 276.5t-97.5 371zM244 731q0 -173 87 -323.5t237.5 -237t322.5 -86.5q174 0 323 87t236.5 235.5t87.5 324.5 q0 174 -87 323t-235.5 236.5t-324.5 87.5q-174 0 -323 -87t-236.5 -235.5t-87.5 -324.5zM645 291v880h229q163 0 241.5 -63t78.5 -193q0 -78 -47.5 -141t-132.5 -98l227 -385h-149l-207 352h-113v-352h-127zM772 762h92q195 0 195 149q0 76 -47.5 107t-149.5 31h-90v-287z " /> +<glyph unicode="´" horiz-adv-x="1135" d="M532 1241v27q56 60 125.5 151.5t106.5 149.5h190v-21q-38 -49 -140 -151t-177 -156h-105z" /> +<glyph unicode=" " horiz-adv-x="784" /> +<glyph unicode=" " horiz-adv-x="1569" /> +<glyph unicode=" " horiz-adv-x="784" /> +<glyph unicode=" " horiz-adv-x="1569" /> +<glyph unicode=" " horiz-adv-x="523" /> +<glyph unicode=" " horiz-adv-x="392" /> +<glyph unicode=" " horiz-adv-x="261" /> +<glyph unicode=" " horiz-adv-x="261" /> +<glyph unicode=" " horiz-adv-x="196" /> +<glyph unicode=" " horiz-adv-x="313" /> +<glyph unicode=" " horiz-adv-x="87" /> +<glyph unicode="‐" horiz-adv-x="639" d="M55 469l35 158h479l-34 -158h-480z" /> +<glyph unicode="‑" horiz-adv-x="639" d="M55 469l35 158h479l-34 -158h-480z" /> +<glyph unicode="‒" horiz-adv-x="639" d="M55 469l35 158h479l-34 -158h-480z" /> +<glyph unicode="–" horiz-adv-x="983" d="M55 469l35 160h823l-34 -160h-824z" /> +<glyph unicode="—" horiz-adv-x="1966" d="M55 469l35 160h1806l-34 -160h-1807z" /> +<glyph unicode="‘" horiz-adv-x="348" d="M123 983q98 211 270 479h127q-147 -345 -203 -501h-188z" /> +<glyph unicode="’" horiz-adv-x="348" d="M125 961q134 298 203 501h188l8 -22q-40 -91 -111 -218.5t-159 -260.5h-129z" /> +<glyph unicode="“" horiz-adv-x="719" d="M123 983q98 211 270 479h127q-147 -345 -203 -501h-188zM492 983q80 181 272 479h127q-162 -379 -203 -501h-188z" /> +<glyph unicode="”" horiz-adv-x="719" d="M125 961q134 298 203 501h188l8 -22q-40 -91 -111 -218.5t-159 -260.5h-129zM494 961q57 126 115.5 272.5t86.5 228.5h189l10 -22q-94 -206 -274 -479h-127z" /> +<glyph unicode="•" horiz-adv-x="774" d="M199 684q0 145 73.5 231t198.5 86q92 0 139 -49t47 -141q0 -141 -74 -230t-202 -89q-89 0 -135.5 49.5t-46.5 142.5z" /> +<glyph unicode="…" horiz-adv-x="1563" d="M563 74q0 77 40.5 122.5t111.5 45.5q43 0 69.5 -26t26.5 -79q0 -71 -40 -118.5t-108 -47.5q-46 0 -73 26t-27 77zM1085 74q0 77 40.5 122.5t111.5 45.5q43 0 69.5 -26t26.5 -79q0 -71 -40 -118.5t-108 -47.5q-46 0 -73 26t-27 77zM43 74q0 77 40.5 122.5t111.5 45.5 q43 0 69.5 -26t26.5 -79q0 -71 -40 -118.5t-108 -47.5q-46 0 -73 26t-27 77z" /> +<glyph unicode=" " horiz-adv-x="313" /> +<glyph unicode=" " horiz-adv-x="392" /> +<glyph unicode="€" d="M63 504l27 131h154q8 80 30 164h-151l27 133h159q97 267 259.5 408t369.5 141q89 0 160 -21.5t141 -70.5l-80 -138q-113 78 -231 78q-140 0 -254 -99t-189 -298h426l-26 -133h-441q-21 -65 -32 -164h381l-29 -131h-361q0 -373 297 -373q123 0 256 55v-147 q-127 -59 -278 -59q-212 0 -328.5 133.5t-116.5 378.5v12h-170z" /> +<glyph unicode="™" horiz-adv-x="1534" d="M121 1358v104h516v-104h-199v-617h-121v617h-196zM705 741v721h180l182 -557l193 557h170v-721h-121v430q0 73 4 121h-6l-197 -551h-96l-189 551h-6q4 -52 4 -121v-430h-118z" /> +<glyph unicode="" horiz-adv-x="1095" d="M0 1095h1095v-1095h-1095v1095z" /> +</font> +</defs></svg> \ No newline at end of file diff --git a/couchpotato/static/fonts/OpenSans-Italic-webfont.ttf b/couchpotato/static/fonts/OpenSans-Italic-webfont.ttf new file mode 100755 index 0000000000..63f187e984 Binary files /dev/null and b/couchpotato/static/fonts/OpenSans-Italic-webfont.ttf differ diff --git a/couchpotato/static/fonts/OpenSans-Italic-webfont.woff b/couchpotato/static/fonts/OpenSans-Italic-webfont.woff new file mode 100755 index 0000000000..469a29bbfb Binary files /dev/null and b/couchpotato/static/fonts/OpenSans-Italic-webfont.woff differ diff --git a/couchpotato/static/fonts/OpenSans-Light-webfont.eot b/couchpotato/static/fonts/OpenSans-Light-webfont.eot new file mode 100644 index 0000000000..14868406aa Binary files /dev/null and b/couchpotato/static/fonts/OpenSans-Light-webfont.eot differ diff --git a/couchpotato/static/fonts/OpenSans-Light-webfont.svg b/couchpotato/static/fonts/OpenSans-Light-webfont.svg new file mode 100644 index 0000000000..11a472ca8a --- /dev/null +++ b/couchpotato/static/fonts/OpenSans-Light-webfont.svg @@ -0,0 +1,1831 @@ +<?xml version="1.0" standalone="no"?> +<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd" > +<svg xmlns="http://www.w3.org/2000/svg"> +<metadata></metadata> +<defs> +<font id="open_sanslight" horiz-adv-x="1169" > +<font-face units-per-em="2048" ascent="1638" descent="-410" /> +<missing-glyph horiz-adv-x="532" /> +<glyph unicode="fi" horiz-adv-x="1077" d="M29 0zM586 1001h-256v-1001h-99v1001h-202v58l202 37v84q0 200 73.5 293.5t240.5 93.5q90 0 180 -27l-23 -86q-80 25 -159 25q-116 0 -164.5 -68.5t-48.5 -222.5v-101h256v-86zM895 0h-99v1087h99v-1087zM782 1389q0 96 63 96q31 0 48.5 -25t17.5 -71q0 -45 -17.5 -71 t-48.5 -26q-63 0 -63 97z" /> +<glyph unicode="fl" horiz-adv-x="1077" d="M29 0zM586 1001h-256v-1001h-99v1001h-202v58l202 37v84q0 200 73.5 293.5t240.5 93.5q90 0 180 -27l-23 -86q-80 25 -159 25q-116 0 -164.5 -68.5t-48.5 -222.5v-101h256v-86zM895 0h-99v1556h99v-1556z" /> +<glyph unicode="ffi" horiz-adv-x="1692" d="M29 0zM586 1001h-256v-1001h-99v1001h-202v58l202 37v84q0 200 73.5 293.5t240.5 93.5q90 0 180 -27l-23 -86q-80 25 -159 25q-116 0 -164.5 -68.5t-48.5 -222.5v-101h256v-86zM1200 1001h-256v-1001h-99v1001h-202v58l202 37v84q0 200 73.5 293.5t240.5 93.5 q90 0 180 -27l-23 -86q-80 25 -159 25q-116 0 -164.5 -68.5t-48.5 -222.5v-101h256v-86zM1510 0h-99v1087h99v-1087zM1397 1389q0 96 63 96q31 0 48.5 -25t17.5 -71q0 -45 -17.5 -71t-48.5 -26q-63 0 -63 97z" /> +<glyph unicode="ffl" horiz-adv-x="1692" d="M29 0zM586 1001h-256v-1001h-99v1001h-202v58l202 37v84q0 200 73.5 293.5t240.5 93.5q90 0 180 -27l-23 -86q-80 25 -159 25q-116 0 -164.5 -68.5t-48.5 -222.5v-101h256v-86zM1200 1001h-256v-1001h-99v1001h-202v58l202 37v84q0 200 73.5 293.5t240.5 93.5 q90 0 180 -27l-23 -86q-80 25 -159 25q-116 0 -164.5 -68.5t-48.5 -222.5v-101h256v-86zM1510 0h-99v1556h99v-1556z" /> +<glyph horiz-adv-x="2048" /> +<glyph horiz-adv-x="2048" /> +<glyph unicode=" " horiz-adv-x="1044" /> +<glyph unicode=" " horiz-adv-x="532" /> +<glyph unicode=" " horiz-adv-x="532" /> +<glyph unicode=" " horiz-adv-x="532" /> +<glyph unicode="!" horiz-adv-x="492" d="M276 377h-61l-29 1085h119zM164 78q0 98 80 98q82 0 82 -98t-82 -98q-80 0 -80 98z" /> +<glyph unicode=""" horiz-adv-x="723" d="M260 1462l-33 -528h-61l-33 528h127zM590 1462l-33 -528h-61l-33 528h127z" /> +<glyph unicode="#" horiz-adv-x="1323" d="M967 928l-76 -398h303v-79h-320l-86 -451h-90l88 451h-360l-86 -451h-88l86 451h-283v79h299l76 398h-297v80h311l86 454h91l-89 -454h365l88 454h86l-88 -454h285v-80h-301zM440 530h363l78 398h-363z" /> +<glyph unicode="$" d="M991 440q0 -133 -99 -217t-274 -106v-236h-81v232q-92 2 -200.5 22.5t-172.5 50.5v103q75 -36 179.5 -61t193.5 -25v508q-145 44 -215 88t-102 104t-32 146q0 124 94.5 208.5t254.5 104.5v192h81v-190q197 -9 351 -72l-33 -90q-141 62 -318 72v-486q213 -66 293 -144 t80 -204zM881 444q0 85 -63 140.5t-200 95.5v-471q122 13 192.5 75t70.5 160zM297 1049q0 -86 57 -141t183 -93v453q-119 -16 -179.5 -76t-60.5 -143z" /> +<glyph unicode="%" horiz-adv-x="1653" d="M211 1026q0 -186 45 -279.5t141 -93.5q193 0 193 373q0 184 -49.5 276.5t-143.5 92.5q-96 0 -141 -92.5t-45 -276.5zM688 1026q0 -226 -75 -343.5t-216 -117.5q-133 0 -208.5 120.5t-75.5 340.5q0 223 72 340t212 117q139 0 215 -120.5t76 -336.5zM1063 438 q0 -185 45 -277.5t141 -92.5q193 0 193 370q0 369 -193 369q-96 0 -141 -91.5t-45 -277.5zM1540 438q0 -226 -74 -343.5t-215 -117.5q-136 0 -211 121.5t-75 339.5q0 225 73.5 341t212.5 116q137 0 213 -120t76 -337zM1280 1462l-811 -1462h-96l811 1462h96z" /> +<glyph unicode="&" horiz-adv-x="1460" d="M123 371q0 138 73.5 235t274.5 205l-75 82q-66 71 -98 139t-32 142q0 143 95.5 227t256.5 84q155 0 245.5 -81t90.5 -224q0 -105 -70 -192.5t-253 -194.5l452 -457q61 72 104 157t75 201h96q-63 -246 -209 -426l266 -268h-135l-193 197q-92 -90 -164 -131.5t-157.5 -63.5 t-194.5 -22q-209 0 -328.5 103t-119.5 288zM578 70q128 0 234.5 43.5t209.5 146.5l-483 485q-136 -72 -196.5 -122.5t-88 -109.5t-27.5 -138q0 -143 93 -224t258 -81zM373 1176q0 -79 40 -146t152 -174q159 85 221 159t62 169q0 94 -62 152.5t-168 58.5q-114 0 -179.5 -58 t-65.5 -161z" /> +<glyph unicode="'" horiz-adv-x="393" d="M260 1462l-33 -528h-61l-33 528h127z" /> +<glyph unicode="(" horiz-adv-x="557" d="M82 561q0 265 77.5 496t223.5 405h113q-148 -182 -227 -412.5t-79 -486.5q0 -483 304 -887h-111q-147 170 -224 397t-77 488z" /> +<glyph unicode=")" horiz-adv-x="557" d="M475 561q0 -263 -77.5 -490t-223.5 -395h-111q304 404 304 887q0 257 -79 487.5t-227 411.5h113q147 -175 224 -406.5t77 -494.5z" /> +<glyph unicode="*" horiz-adv-x="1128" d="M631 1556l-37 -405l405 104l21 -131l-395 -39l247 -340l-124 -71l-191 379l-180 -379l-125 71l242 340l-390 39l19 131l401 -104l-39 405h146z" /> +<glyph unicode="+" d="M625 764h434v-82h-434v-432h-82v432h-432v82h432v434h82v-434z" /> +<glyph unicode="," horiz-adv-x="440" d="M295 238l12 -21q-75 -265 -174 -481h-65q77 275 110 502h117z" /> +<glyph unicode="-" horiz-adv-x="659" d="M92 512v82h475v-82h-475z" /> +<glyph unicode="." horiz-adv-x="487" d="M162 78q0 98 80 98q82 0 82 -98t-82 -98q-80 0 -80 98z" /> +<glyph unicode="/" horiz-adv-x="698" d="M674 1462l-545 -1462h-104l544 1462h105z" /> +<glyph unicode="0" d="M1055 735q0 -385 -117.5 -570t-355.5 -185q-229 0 -348 190.5t-119 564.5q0 382 115.5 566t351.5 184q231 0 352 -190.5t121 -559.5zM223 735q0 -340 89 -502.5t270 -162.5q189 0 275.5 168t86.5 497q0 324 -86.5 492t-275.5 168t-274 -168t-85 -492z" /> +<glyph unicode="1" d="M682 0h-98v1065q0 145 12 301q-15 -15 -31 -29t-309 -243l-57 71l397 297h86v-1462z" /> +<glyph unicode="2" d="M1028 0h-915v88l389 406q164 170 230 260t97 172t31 172q0 131 -86 213t-223 82q-183 0 -350 -133l-54 69q183 154 406 154q191 0 300.5 -102t109.5 -281q0 -145 -73.5 -280.5t-268.5 -334.5l-375 -385v-4h782v-96z" /> +<glyph unicode="3" d="M979 1118q0 -136 -85.5 -229t-229.5 -119v-6q176 -22 268 -112t92 -242q0 -205 -139.5 -317.5t-401.5 -112.5q-223 0 -389 83v99q84 -44 188.5 -69t196.5 -25q221 0 332 89.5t111 252.5q0 145 -113.5 223t-333.5 78h-158v96h160q182 0 288.5 86.5t106.5 234.5 q0 122 -86.5 195.5t-226.5 73.5q-109 0 -199 -30.5t-202 -104.5l-49 67q85 71 205 112.5t243 41.5q202 0 312 -95.5t110 -269.5z" /> +<glyph unicode="4" d="M1141 373h-252v-373h-94v373h-752v67l725 1030h121v-1011h252v-86zM795 459v418q0 302 14 507h-8q-20 -37 -123 -188l-516 -737h633z" /> +<glyph unicode="5" d="M537 879q234 0 368.5 -113t134.5 -311q0 -225 -140 -350t-386 -125q-109 0 -207 21.5t-164 61.5v103q108 -55 192 -76.5t179 -21.5q192 0 308 101.5t116 274.5q0 163 -113 256t-307 93q-130 0 -272 -39l-60 39l58 669h704v-96h-610l-45 -516q156 29 244 29z" /> +<glyph unicode="6" d="M131 623q0 285 77.5 479.5t220 288.5t343.5 94q94 0 172 -23v-88q-73 27 -176 27q-247 0 -384.5 -178t-154.5 -518h13q76 98 174 148t207 50q205 0 320.5 -117t115.5 -323q0 -224 -121.5 -353.5t-327.5 -129.5q-222 0 -350.5 169.5t-128.5 473.5zM610 68q164 0 255 103 t91 294q0 168 -90 262t-245 94q-102 0 -189.5 -45t-139.5 -119.5t-52 -152.5q0 -111 49.5 -213.5t134 -162.5t186.5 -60z" /> +<glyph unicode="7" d="M334 0l602 1366h-827v96h946v-73l-604 -1389h-117z" /> +<glyph unicode="8" d="M582 1487q186 0 299.5 -95t113.5 -257q0 -112 -70.5 -198t-228.5 -159q192 -79 270 -173t78 -228q0 -181 -126.5 -289t-339.5 -108q-221 0 -339 101t-118 294q0 131 83 230t257 169q-161 76 -227 160.5t-66 202.5q0 105 53 184.5t148.5 122.5t212.5 43zM223 360 q0 -138 93.5 -214t261.5 -76q164 0 264 80.5t100 218.5q0 124 -78.5 201.5t-302.5 162.5q-184 -71 -261 -157t-77 -216zM580 1397q-141 0 -226.5 -69.5t-85.5 -190.5q0 -70 31.5 -123.5t91 -97t199.5 -101.5q163 63 234 139t71 183q0 120 -84.5 190t-230.5 70z" /> +<glyph unicode="9" d="M1036 842q0 -288 -75.5 -482t-220 -287t-349.5 -93q-104 0 -192 26v86q43 -14 103.5 -21.5t92.5 -7.5q247 0 387 178.5t156 520.5h-12q-73 -96 -174 -147.5t-211 -51.5q-203 0 -316.5 112t-113.5 318q0 220 124.5 356t323.5 136q144 0 252 -75.5t166.5 -221.5t58.5 -346z M559 1397q-158 0 -252 -106.5t-94 -291.5q0 -174 87 -264t249 -90q101 0 188.5 45t139 119.5t51.5 151.5q0 117 -46.5 219t-130 159.5t-192.5 57.5z" /> +<glyph unicode=":" horiz-adv-x="487" d="M162 78q0 98 80 98q82 0 82 -98t-82 -98q-80 0 -80 98zM162 971q0 98 80 98q82 0 82 -98q0 -53 -23.5 -76t-58.5 -23q-34 0 -57 23t-23 76z" /> +<glyph unicode=";" horiz-adv-x="487" d="M303 238l12 -21q-75 -265 -174 -481h-65q29 97 62 245.5t48 256.5h117zM162 971q0 98 80 98q82 0 82 -98q0 -53 -23.5 -76t-58.5 -23q-34 0 -57 23t-23 76z" /> +<glyph unicode="<" d="M1059 266l-948 416v61l948 474v-95l-823 -405l823 -355v-96z" /> +<glyph unicode="=" d="M111 885v82h948v-82h-948zM111 477v82h948v-82h-948z" /> +<glyph unicode=">" d="M111 362l823 355l-823 405v95l948 -474v-61l-948 -416v96z" /> +<glyph unicode="?" horiz-adv-x="862" d="M293 377v37q0 123 37.5 201t138.5 167l91 79q72 61 103 121t31 138q0 127 -83.5 202t-219.5 75q-79 0 -148 -17.5t-149 -56.5l-37 80q110 48 184.5 64t153.5 16q183 0 288 -98.5t105 -270.5q0 -68 -18 -119t-50.5 -94.5t-78.5 -84t-102 -87.5q-64 -54 -98.5 -98.5 t-50 -93.5t-15.5 -146v-14h-82zM260 78q0 98 80 98q82 0 82 -98t-82 -98q-80 0 -80 98z" /> +<glyph unicode="@" horiz-adv-x="1815" d="M1702 725q0 -228 -90.5 -366t-245.5 -138q-89 0 -144.5 54t-64.5 147h-4q-43 -100 -124 -150.5t-189 -50.5q-148 0 -229 96.5t-81 270.5q0 202 120.5 330.5t314.5 128.5q138 0 286 -41l-22 -464v-30q0 -104 35 -156.5t116 -52.5q103 0 168.5 116.5t65.5 303.5 q0 194 -79 340t-225.5 224.5t-334.5 78.5q-230 0 -405.5 -99.5t-270 -281.5t-94.5 -418q0 -322 167 -497.5t474 -175.5q93 0 188.5 18t231.5 70v-99q-203 -80 -414 -80q-349 0 -544 200.5t-195 557.5q0 256 108.5 460.5t307 317.5t448.5 113q215 0 380.5 -89t255 -254.5 t89.5 -383.5zM633 590q0 -143 55 -215t174 -72q255 0 273 346l16 291q-79 27 -193 27q-149 0 -237 -102.5t-88 -274.5z" /> +<glyph unicode="A" horiz-adv-x="1229" d="M911 516h-594l-204 -516h-113l588 1468h65l576 -1468h-115zM354 608h523l-199 527q-25 62 -60 172q-27 -96 -59 -174z" /> +<glyph unicode="B" horiz-adv-x="1284" d="M207 1462h401q271 0 398 -92t127 -278q0 -127 -77.5 -211.5t-226.5 -108.5v-6q175 -26 257.5 -110.5t82.5 -235.5q0 -202 -134 -311t-380 -109h-448v1462zM309 811h322q206 0 299.5 68.5t93.5 214.5t-105.5 212t-314.5 66h-295v-561zM309 721v-631h344q406 0 406 330 q0 301 -428 301h-322z" /> +<glyph unicode="C" horiz-adv-x="1272" d="M831 1391q-275 0 -433 -176t-158 -482q0 -313 149 -486t426 -173q184 0 338 47v-90q-145 -51 -362 -51q-308 0 -485 199t-177 556q0 223 84.5 393t243 262.5t368.5 92.5q214 0 383 -80l-41 -92q-160 80 -336 80z" /> +<glyph unicode="D" horiz-adv-x="1446" d="M1317 745q0 -368 -193 -556.5t-567 -188.5h-350v1462h395q350 0 532.5 -183t182.5 -534zM1206 741q0 314 -159.5 472.5t-468.5 158.5h-269v-1282h242q655 0 655 651z" /> +<glyph unicode="E" horiz-adv-x="1130" d="M1006 0h-799v1462h799v-94h-697v-553h658v-94h-658v-627h697v-94z" /> +<glyph unicode="F" horiz-adv-x="1028" d="M309 0h-102v1462h801v-94h-699v-620h660v-95h-660v-653z" /> +<glyph unicode="G" horiz-adv-x="1481" d="M782 737h539v-667q-212 -90 -477 -90q-346 0 -530.5 195.5t-184.5 553.5q0 223 91.5 395.5t262 266.5t391.5 94q239 0 429 -88l-41 -92q-190 88 -394 88q-289 0 -458.5 -178.5t-169.5 -481.5q0 -330 161 -496.5t473 -166.5q202 0 343 57v514h-435v96z" /> +<glyph unicode="H" horiz-adv-x="1473" d="M1266 0h-103v719h-854v-719h-102v1462h102v-649h854v649h103v-1462z" /> +<glyph unicode="I" horiz-adv-x="516" d="M207 0v1462h102v-1462h-102z" /> +<glyph unicode="J" horiz-adv-x="506" d="M-33 -369q-92 0 -151 27v88q78 -20 149 -20q242 0 242 264v1472h102v-1462q0 -369 -342 -369z" /> +<glyph unicode="K" horiz-adv-x="1190" d="M1190 0h-125l-561 772l-195 -172v-600h-102v1462h102v-760l162 162l573 598h130l-599 -618z" /> +<glyph unicode="L" horiz-adv-x="1051" d="M207 0v1462h102v-1366h697v-96h-799z" /> +<glyph unicode="M" horiz-adv-x="1767" d="M850 0l-545 1350h-8q8 -124 8 -254v-1096h-98v1462h158l518 -1286h6l518 1286h154v-1462h-103v1108q0 116 12 240h-8l-547 -1348h-65z" /> +<glyph unicode="N" horiz-adv-x="1477" d="M1270 0h-103l-866 1298h-8q12 -232 12 -350v-948h-98v1462h102l865 -1296h6q-9 180 -9 342v954h99v-1462z" /> +<glyph unicode="O" horiz-adv-x="1565" d="M1436 733q0 -348 -174 -550.5t-480 -202.5q-305 0 -479 202.5t-174 552.5q0 349 175.5 549.5t479.5 200.5q306 0 479 -201.5t173 -550.5zM240 733q0 -314 140 -485.5t402 -171.5q264 0 403.5 170t139.5 487q0 316 -139.5 484.5t-401.5 168.5q-261 0 -402.5 -170 t-141.5 -483z" /> +<glyph unicode="P" horiz-adv-x="1198" d="M1087 1042q0 -212 -144 -325t-408 -113h-226v-604h-102v1462h358q522 0 522 -420zM309 692h201q247 0 357 81.5t110 264.5q0 169 -104 250.5t-322 81.5h-242v-678z" /> +<glyph unicode="Q" horiz-adv-x="1565" d="M1436 733q0 -294 -126 -486.5t-349 -246.5l333 -348h-166l-282 330l-33 -2h-31q-305 0 -479 202.5t-174 552.5q0 349 175.5 549.5t479.5 200.5q306 0 479 -201.5t173 -550.5zM240 733q0 -314 140 -485.5t402 -171.5q264 0 403.5 170t139.5 487q0 316 -139.5 484.5 t-401.5 168.5q-261 0 -402.5 -170t-141.5 -483z" /> +<glyph unicode="R" horiz-adv-x="1217" d="M309 637v-637h-102v1462h348q272 0 402 -100.5t130 -302.5q0 -147 -77.5 -248t-235.5 -145l397 -666h-122l-377 637h-363zM309 725h279q185 0 287 82.5t102 243.5q0 167 -100 243t-326 76h-242v-645z" /> +<glyph unicode="S" horiz-adv-x="1116" d="M1014 377q0 -183 -134.5 -290t-357.5 -107q-268 0 -411 59v102q158 -67 403 -67q180 0 285.5 82.5t105.5 216.5q0 83 -35 137.5t-114 99.5t-232 97q-224 77 -309.5 166.5t-85.5 238.5q0 164 128.5 267.5t330.5 103.5q206 0 387 -78l-37 -88q-182 76 -348 76 q-162 0 -258 -75t-96 -204q0 -81 29.5 -133t96.5 -93.5t230 -99.5q171 -59 257 -114.5t125.5 -126t39.5 -170.5z" /> +<glyph unicode="T" horiz-adv-x="1073" d="M588 0h-103v1366h-475v96h1053v-96h-475v-1366z" /> +<glyph unicode="U" horiz-adv-x="1473" d="M1282 1462v-946q0 -252 -146 -394t-407 -142q-254 0 -396.5 142.5t-142.5 397.5v942h103v-946q0 -211 117 -328.5t331 -117.5q209 0 324 115.5t115 320.5v956h102z" /> +<glyph unicode="V" horiz-adv-x="1182" d="M1071 1462h111l-547 -1462h-90l-545 1462h109l368 -995q84 -225 113 -338q20 75 79 233z" /> +<glyph unicode="W" horiz-adv-x="1827" d="M1372 0h-84l-321 1128q-40 139 -60 228q-16 -87 -45.5 -200t-322.5 -1156h-86l-402 1462h107l256 -942q15 -57 28 -105.5t23.5 -91t19 -82t15.5 -79.5q24 136 102 413l250 887h113l293 -1018q51 -176 73 -284q13 72 33.5 153t308.5 1149h103z" /> +<glyph unicode="X" horiz-adv-x="1102" d="M1102 0h-117l-432 682l-440 -682h-113l492 762l-447 700h115l395 -626l401 626h109l-453 -698z" /> +<glyph unicode="Y" horiz-adv-x="1081" d="M543 662l428 800h110l-487 -897v-565h-105v557l-489 905h117z" /> +<glyph unicode="Z" horiz-adv-x="1180" d="M1098 0h-1016v76l856 1290h-817v96h954v-76l-858 -1290h881v-96z" /> +<glyph unicode="[" horiz-adv-x="653" d="M602 -324h-428v1786h428v-94h-330v-1597h330v-95z" /> +<glyph unicode="\" horiz-adv-x="698" d="M127 1462l547 -1462h-103l-546 1462h102z" /> +<glyph unicode="]" horiz-adv-x="653" d="M51 -229h330v1597h-330v94h428v-1786h-428v95z" /> +<glyph unicode="^" d="M88 561l465 912h68l460 -912h-100l-395 791l-398 -791h-100z" /> +<glyph unicode="_" horiz-adv-x="842" d="M846 -266h-850v82h850v-82z" /> +<glyph unicode="`" horiz-adv-x="1182" d="M776 1241h-69q-96 79 -188.5 171.5t-125.5 139.5v17h142q26 -48 98.5 -142t142.5 -170v-16z" /> +<glyph unicode="a" horiz-adv-x="1085" d="M842 0l-25 172h-8q-82 -105 -168.5 -148.5t-204.5 -43.5q-160 0 -249 82t-89 227q0 159 132.5 247t383.5 93l207 6v72q0 155 -63 234t-203 79q-151 0 -313 -84l-37 86q179 84 354 84q179 0 267.5 -93t88.5 -290v-723h-73zM442 70q174 0 274.5 99.5t100.5 276.5v107 l-190 -8q-229 -11 -326.5 -71.5t-97.5 -188.5q0 -102 62.5 -158.5t176.5 -56.5z" /> +<glyph unicode="b" horiz-adv-x="1219" d="M641 1108q228 0 343.5 -143.5t115.5 -419.5q0 -271 -121.5 -418t-341.5 -147q-116 0 -209 48t-147 136h-9l-28 -164h-62v1556h99v-391q0 -88 -4 -162l-3 -85h7q62 98 149.5 144t210.5 46zM639 1018q-192 0 -275 -110t-83 -363v-17q0 -246 86.5 -353t269.5 -107 q178 0 268 124.5t90 354.5q0 471 -356 471z" /> +<glyph unicode="c" horiz-adv-x="973" d="M616 -20q-233 0 -365 147t-132 410q0 270 137 420.5t375 150.5q141 0 270 -49l-27 -88q-141 47 -245 47q-200 0 -303 -123.5t-103 -355.5q0 -220 103 -344.5t288 -124.5q148 0 275 53v-92q-104 -51 -273 -51z" /> +<glyph unicode="d" horiz-adv-x="1219" d="M580 1108q118 0 204 -43t154 -147h6q-6 126 -6 247v391h98v-1556h-65l-25 166h-8q-124 -186 -356 -186q-225 0 -344 140t-119 408q0 282 118 431t343 149zM580 1018q-178 0 -267.5 -125t-89.5 -363q0 -462 359 -462q184 0 270 107t86 353v17q0 252 -84.5 362.5 t-273.5 110.5z" /> +<glyph unicode="e" horiz-adv-x="1124" d="M621 -20q-237 0 -369.5 146t-132.5 409q0 260 128 416.5t345 156.5q192 0 303 -134t111 -364v-80h-783q2 -224 104.5 -342t293.5 -118q93 0 163.5 13t178.5 56v-90q-92 -40 -170 -54.5t-172 -14.5zM592 1020q-157 0 -252 -103.5t-111 -298.5h672q0 189 -82 295.5 t-227 106.5z" /> +<glyph unicode="f" horiz-adv-x="614" d="M586 1001h-256v-1001h-99v1001h-202v58l202 37v84q0 200 73.5 293.5t240.5 93.5q90 0 180 -27l-23 -86q-80 25 -159 25q-116 0 -164.5 -68.5t-48.5 -222.5v-101h256v-86z" /> +<glyph unicode="g" horiz-adv-x="1071" d="M1030 1087v-69l-225 -14q90 -112 90 -246q0 -157 -104.5 -254.5t-280.5 -97.5q-74 0 -104 6q-59 -31 -90 -73t-31 -89q0 -52 39.5 -76t132.5 -24h190q177 0 271 -71.5t94 -211.5q0 -172 -139.5 -265.5t-397.5 -93.5q-205 0 -317.5 79t-112.5 220q0 112 69.5 186 t188.5 101q-49 21 -78.5 59.5t-29.5 88.5q0 109 139 192q-95 39 -148 122.5t-53 191.5q0 163 103.5 261.5t279.5 98.5q107 0 166 -21h348zM150 -184q0 -224 333 -224q428 0 428 273q0 98 -67 142t-217 44h-178q-299 0 -299 -235zM233 748q0 -126 76.5 -195.5t204.5 -69.5 q136 0 208.5 69t72.5 200q0 139 -74.5 208.5t-208.5 69.5q-130 0 -204.5 -74.5t-74.5 -207.5z" /> +<glyph unicode="h" horiz-adv-x="1208" d="M940 0v705q0 164 -69 238.5t-214 74.5q-195 0 -285.5 -98.5t-90.5 -319.5v-600h-99v1556h99v-495l-5 -139h7q61 98 154 142t231 44q370 0 370 -397v-711h-98z" /> +<glyph unicode="i" horiz-adv-x="463" d="M281 0h-99v1087h99v-1087zM168 1389q0 96 63 96q31 0 48.5 -25t17.5 -71q0 -45 -17.5 -71t-48.5 -26q-63 0 -63 97z" /> +<glyph unicode="j" horiz-adv-x="463" d="M37 -492q-80 0 -135 25v86q69 -20 129 -20q151 0 151 176v1312h99v-1298q0 -135 -63.5 -208t-180.5 -73zM168 1389q0 96 63 96q31 0 48.5 -25t17.5 -71q0 -45 -17.5 -71t-48.5 -26q-63 0 -63 97z" /> +<glyph unicode="k" horiz-adv-x="991" d="M279 477l555 610h120l-428 -464l465 -623h-119l-413 549l-178 -162v-387h-99v1556h99v-780l-7 -299h5z" /> +<glyph unicode="l" horiz-adv-x="463" d="M281 0h-99v1556h99v-1556z" /> +<glyph unicode="m" horiz-adv-x="1808" d="M1540 0v713q0 159 -62 232t-190 73q-167 0 -247 -92t-80 -289v-637h-101v743q0 275 -252 275q-171 0 -249 -99.5t-78 -318.5v-600h-99v1087h82l21 -149h6q45 81 128 125.5t183 44.5q257 0 330 -193h4q53 93 142.5 143t203.5 50q178 0 267 -95t89 -302v-711h-98z" /> +<glyph unicode="n" horiz-adv-x="1208" d="M940 0v705q0 164 -69 238.5t-214 74.5q-195 0 -285.5 -98.5t-90.5 -319.5v-600h-99v1087h84l19 -149h6q106 170 377 170q370 0 370 -397v-711h-98z" /> +<glyph unicode="o" horiz-adv-x="1200" d="M1081 545q0 -266 -129 -415.5t-356 -149.5q-143 0 -252 69t-167 198t-58 298q0 266 129 414.5t354 148.5q224 0 351.5 -150.5t127.5 -412.5zM223 545q0 -224 98.5 -349.5t278.5 -125.5t278.5 125.5t98.5 349.5q0 225 -99.5 349t-279.5 124t-277.5 -123.5t-97.5 -349.5z " /> +<glyph unicode="p" horiz-adv-x="1219" d="M647 -20q-251 0 -366 188h-7l3 -84q4 -74 4 -162v-414h-99v1579h84l19 -155h6q112 176 358 176q220 0 335.5 -144.5t115.5 -420.5q0 -268 -121.5 -415.5t-331.5 -147.5zM645 68q167 0 258.5 124t91.5 347q0 479 -346 479q-190 0 -279 -104.5t-89 -340.5v-32 q0 -255 85.5 -364t278.5 -109z" /> +<glyph unicode="q" horiz-adv-x="1219" d="M569 -20q-214 0 -332 142t-118 410q0 275 118 425.5t338 150.5q236 0 353 -174h6l18 153h84v-1579h-98v414q0 122 6 248h-6q-118 -190 -369 -190zM571 68q198 0 282.5 109t84.5 366v12q0 245 -85 354t-271 109q-176 0 -267.5 -124t-91.5 -364q0 -229 89.5 -345.5 t258.5 -116.5z" /> +<glyph unicode="r" horiz-adv-x="797" d="M610 1108q69 0 148 -14l-19 -95q-68 17 -141 17q-139 0 -228 -118t-89 -298v-600h-99v1087h84l10 -196h7q67 120 143 168.5t184 48.5z" /> +<glyph unicode="s" horiz-adv-x="954" d="M856 283q0 -146 -111 -224.5t-315 -78.5q-218 0 -346 67v107q164 -82 346 -82q161 0 244.5 53.5t83.5 142.5q0 82 -66.5 138t-218.5 110q-163 59 -229 101.5t-99.5 96t-33.5 130.5q0 122 102.5 193t286.5 71q176 0 334 -66l-37 -90q-160 66 -297 66q-133 0 -211 -44 t-78 -122q0 -85 60.5 -136t236.5 -114q147 -53 214 -95.5t100.5 -96.5t33.5 -127z" /> +<glyph unicode="t" horiz-adv-x="686" d="M469 68q94 0 164 16v-80q-72 -24 -166 -24q-144 0 -212.5 77t-68.5 242v702h-161v58l161 45l50 246h51v-263h319v-86h-319v-688q0 -125 44 -185t138 -60z" /> +<glyph unicode="u" horiz-adv-x="1208" d="M268 1087v-704q0 -164 69 -238.5t214 -74.5q194 0 285.5 98t91.5 319v600h98v-1087h-84l-18 150h-6q-106 -170 -377 -170q-371 0 -371 397v710h98z" /> +<glyph unicode="v" horiz-adv-x="940" d="M420 0l-420 1087h102l281 -739q56 -142 84 -248h6q41 136 84 250l281 737h102l-420 -1087h-100z" /> +<glyph unicode="w" horiz-adv-x="1481" d="M1051 0l-238 727q-23 74 -59 217h-6l-21 -74l-45 -145l-242 -725h-98l-311 1087h106l174 -630q61 -234 80 -344h6q59 234 86 311l224 663h90l213 -661q72 -235 88 -311h6q8 65 80 348l166 624h100l-295 -1087h-104z" /> +<glyph unicode="x" horiz-adv-x="1020" d="M449 559l-379 528h114l324 -458l321 458h109l-373 -528l400 -559h-115l-342 485l-344 -485h-109z" /> +<glyph unicode="y" horiz-adv-x="940" d="M0 1087h102l230 -610q105 -281 133 -379h6q42 129 137 385l230 604h102l-487 -1263q-59 -154 -99 -208t-93.5 -81t-129.5 -27q-57 0 -127 21v86q58 -16 125 -16q51 0 90 24t70.5 74.5t73 160t53.5 142.5z" /> +<glyph unicode="z" horiz-adv-x="944" d="M858 0h-776v63l645 936h-598v88h727v-63l-649 -936h651v-88z" /> +<glyph unicode="{" horiz-adv-x="723" d="M389 -27q0 -102 59.5 -152.5t202.5 -53.5v-91q-195 0 -277.5 75t-82.5 231v337q0 205 -230 209v80q122 2 176 51t54 148v350q0 299 360 305v-90q-138 -5 -200 -58t-62 -157v-305q0 -130 -44 -194t-142 -85v-8q97 -20 141.5 -83.5t44.5 -186.5v-322z" /> +<glyph unicode="|" horiz-adv-x="1108" d="M508 1561h92v-2067h-92v2067z" /> +<glyph unicode="}" horiz-adv-x="723" d="M334 295q0 123 44.5 186.5t141.5 83.5v8q-97 20 -141.5 84t-44.5 195v305q0 103 -61.5 156.5t-200.5 58.5v90q174 0 267 -77.5t93 -227.5v-350q0 -100 54.5 -148.5t175.5 -50.5v-80q-230 -4 -230 -209v-337q0 -155 -82.5 -230.5t-277.5 -75.5v91q141 2 201.5 52.5 t60.5 153.5v322z" /> +<glyph unicode="~" d="M334 745q-49 0 -108 -30.5t-115 -89.5v94q108 110 233 110q61 0 115 -13.5t155 -57.5q126 -58 220 -58q56 0 109.5 30.5t115.5 94.5v-96q-48 -49 -104.5 -81t-129.5 -32q-116 0 -270 72q-124 57 -221 57z" /> +<glyph unicode="¡" horiz-adv-x="492" d="M215 711h61l29 -1086h-119zM166 1010q0 98 80 98q82 0 82 -98q0 -53 -23.5 -76t-58.5 -23q-34 0 -57 23t-23 76z" /> +<glyph unicode="¢" d="M602 190q-186 30 -288.5 175t-102.5 380q0 232 102.5 381.5t288.5 182.5v174h82v-166h14q131 0 275 -55l-31 -84q-134 51 -237 51q-187 0 -288.5 -122.5t-101.5 -358.5q0 -225 100.5 -349.5t280.5 -124.5q131 0 267 58v-92q-110 -56 -267 -56h-12v-204h-82v210z" /> +<glyph unicode="£" d="M412 676v-256q0 -116 -35 -196t-113 -128h809v-96h-995v84q110 21 171.5 110t61.5 224v258h-211v82h211v297q0 204 98 315t281 111q175 0 330 -68l-35 -86q-157 66 -295 66q-141 0 -209.5 -81t-68.5 -253v-301h411v-82h-411z" /> +<glyph unicode="¤" d="M991 723q0 -151 -90 -256l139 -141l-59 -60l-137 142q-110 -93 -260 -93q-153 0 -260 93l-138 -142l-59 60l139 141q-90 106 -90 256q0 147 90 258l-139 141l59 60l138 -142q103 93 260 93q155 0 260 -93l137 142l59 -60l-139 -141q90 -111 90 -258zM584 395 q134 0 228.5 95.5t94.5 232.5q0 136 -95 233t-228 97q-134 0 -229 -97t-95 -233t94.5 -232t229.5 -96z" /> +<glyph unicode="¥" d="M586 666l428 796h110l-432 -788h283v-82h-338v-205h338v-82h-338v-305h-105v305h-337v82h337v205h-337v82h278l-430 788h117z" /> +<glyph unicode="¦" horiz-adv-x="1108" d="M508 1561h92v-764h-92v764zM508 258h92v-764h-92v764z" /> +<glyph unicode="§" horiz-adv-x="1057" d="M145 813q0 83 50.5 152.5t138.5 107.5q-86 47 -125 102t-39 136q0 117 101.5 183.5t275.5 66.5q175 0 336 -64l-35 -80q-91 34 -158.5 47t-144.5 13q-134 0 -205.5 -44.5t-71.5 -119.5q0 -54 25.5 -88.5t85.5 -65.5t188 -74q192 -64 264 -132.5t72 -170.5 q0 -173 -186 -274q86 -42 129 -96t43 -136q0 -135 -113 -207.5t-311 -72.5q-92 0 -171 15t-165 52v95q182 -78 332 -78q162 0 247 49.5t85 140.5q0 55 -25 87.5t-88.5 65.5t-190.5 79q-200 73 -272 141.5t-72 169.5zM246 825q0 -65 31.5 -104t105.5 -75t250 -99 q82 41 126 98t44 121q0 62 -32 102t-108.5 77t-236.5 87q-81 -23 -130.5 -79t-49.5 -128z" /> +<glyph unicode="¨" horiz-adv-x="1182" d="M336 1389q0 46 15.5 66t47.5 20q64 0 64 -86t-64 -86q-63 0 -63 86zM717 1389q0 46 15.5 66t47.5 20q64 0 64 -86t-64 -86q-63 0 -63 86z" /> +<glyph unicode="©" horiz-adv-x="1704" d="M897 1092q-142 0 -222.5 -94.5t-80.5 -264.5q0 -186 74.5 -275t220.5 -89q84 0 198 43v-88q-102 -45 -208 -45q-187 0 -288.5 115t-101.5 331q0 208 111 332.5t297 124.5q119 0 227 -52l-37 -83q-98 45 -190 45zM100 731q0 200 100 375t275 276t377 101q200 0 375 -100 t276 -275t101 -377q0 -197 -97 -370t-272 -277t-383 -104q-207 0 -382 103.5t-272.5 276.5t-97.5 371zM193 731q0 -178 88.5 -329.5t240.5 -240.5t330 -89t329.5 88.5t240.5 240.5t89 330q0 174 -85.5 325t-239 243t-334.5 92q-176 0 -328.5 -88.5t-241.5 -242.5t-89 -329z " /> +<glyph unicode="ª" horiz-adv-x="686" d="M512 813l-25 72q-84 -84 -202 -84q-95 0 -151 49t-56 139q0 100 80 151.5t241 59.5l95 4v43q0 77 -38 114.5t-106 37.5q-87 0 -196 -49l-33 73q117 56 231 56q228 0 228 -215v-451h-68zM168 993q0 -54 35 -85t96 -31q90 0 142.5 50t52.5 142v64l-88 -5 q-116 -6 -177 -36.5t-61 -98.5z" /> +<glyph unicode="«" horiz-adv-x="885" d="M82 543l309 393l62 -43l-254 -363l254 -362l-62 -43l-309 391v27zM442 543l310 393l61 -43l-254 -363l254 -362l-61 -43l-310 391v27z" /> +<glyph unicode="¬" d="M1038 764v-494h-82v412h-845v82h927z" /> +<glyph unicode="­" horiz-adv-x="659" d="M92 512zM92 512v82h475v-82h-475z" /> +<glyph unicode="®" horiz-adv-x="1704" d="M709 731h112q91 0 143 46.5t52 135.5q0 172 -197 172h-110v-354zM1120 918q0 -79 -38.5 -139.5t-110.5 -94.5l237 -393h-121l-210 360h-168v-360h-101v880h211q143 0 222 -62t79 -191zM100 731q0 200 100 375t275 276t377 101q200 0 375 -100t276 -275t101 -377 q0 -197 -97 -370t-272 -277t-383 -104q-207 0 -382 103.5t-272.5 276.5t-97.5 371zM193 731q0 -178 88.5 -329.5t240.5 -240.5t330 -89t329.5 88.5t240.5 240.5t89 330q0 174 -85.5 325t-239 243t-334.5 92q-176 0 -328.5 -88.5t-241.5 -242.5t-89 -329z" /> +<glyph unicode="¯" horiz-adv-x="1024" d="M1030 1556h-1036v82h1036v-82z" /> +<glyph unicode="°" horiz-adv-x="877" d="M139 1184q0 132 86.5 215.5t212.5 83.5t212.5 -83.5t86.5 -215.5t-86.5 -215.5t-212.5 -83.5q-130 0 -214.5 83t-84.5 216zM229 1184q0 -91 61 -154t148 -63q86 0 147.5 62t61.5 155q0 92 -60 154.5t-149 62.5q-90 0 -149.5 -64t-59.5 -153z" /> +<glyph unicode="±" d="M111 1zM111 1v82h948v-82h-948zM625 764h434v-82h-434v-432h-82v432h-432v82h432v434h82v-434z" /> +<glyph unicode="²" horiz-adv-x="688" d="M629 586h-576v78l242 237q125 121 172 193t47 149q0 71 -46.5 112.5t-123.5 41.5q-108 0 -217 -82l-49 65q119 103 270 103q124 0 194 -63.5t70 -174.5q0 -47 -13 -89t-40 -85.5t-68.5 -90t-308.5 -306.5h447v-88z" /> +<glyph unicode="³" horiz-adv-x="688" d="M616 1260q0 -78 -44 -131.5t-117 -75.5q186 -45 186 -211q0 -130 -88.5 -201.5t-247.5 -71.5q-144 0 -264 60v88q136 -62 266 -62q115 0 174.5 49t59.5 136q0 83 -59.5 122t-178.5 39h-131v84h135q105 0 158 43.5t53 120.5q0 67 -47 107.5t-127 40.5q-128 0 -246 -78 l-47 70q130 94 293 94q127 0 199.5 -60t72.5 -163z" /> +<glyph unicode="´" horiz-adv-x="1182" d="M393 1257q73 79 144.5 171.5t97.5 140.5h141v-17q-36 -52 -122.5 -138t-190.5 -173h-70v16z" /> +<glyph unicode="µ" horiz-adv-x="1221" d="M281 1087v-704q0 -164 69 -238.5t213 -74.5q194 0 285.5 98t91.5 319v600h98v-1087h-84l-18 150h-6q-50 -77 -150 -123.5t-217 -46.5q-99 0 -167.5 27.5t-119.5 84.5q5 -92 5 -170v-414h-99v1579h99z" /> +<glyph unicode="¶" horiz-adv-x="1341" d="M1106 -260h-100v1722h-228v-1722h-100v819q-64 -18 -146 -18q-216 0 -317.5 125t-101.5 376q0 260 109 387t341 127h543v-1816z" /> +<glyph unicode="·" horiz-adv-x="487" d="M162 623zM162 721q0 98 80 98q82 0 82 -98t-82 -98q-80 0 -80 98z" /> +<glyph unicode="¸" horiz-adv-x="420" d="M393 -291q0 -100 -67.5 -150.5t-188.5 -50.5q-68 0 -94 11v88q30 -10 92 -10q78 0 119 28t41 80q0 94 -193 121l93 174h96l-66 -117q168 -37 168 -174z" /> +<glyph unicode="¹" horiz-adv-x="688" d="M350 1462h92v-876h-98v547q0 99 12 233q-26 -23 -233 -145l-47 77z" /> +<glyph unicode="º" horiz-adv-x="739" d="M670 1141q0 -161 -80 -250.5t-223 -89.5t-220 86t-77 254q0 162 78 250t223 88q142 0 220.5 -87t78.5 -251zM160 1141q0 -264 209 -264t209 264q0 131 -50 194.5t-159 63.5t-159 -63.5t-50 -194.5z" /> +<glyph unicode="»" horiz-adv-x="885" d="M803 518l-309 -393l-62 43l254 362l-254 363l62 43l309 -391v-27zM442 518l-309 -393l-61 43l254 362l-254 363l61 43l309 -391v-27z" /> +<glyph unicode="¼" horiz-adv-x="1516" d="M59 0zM333 1462h92v-876h-98v547q0 99 12 233q-26 -23 -233 -145l-47 77zM1148 1462l-811 -1462h-94l811 1462h94zM1392 242h-129v-241h-90v241h-413v60l407 581h96v-563h129v-78zM1173 320v221q0 132 8 232q-6 -12 -21.5 -35.5t-295.5 -417.5h309z" /> +<glyph unicode="½" horiz-adv-x="1516" d="M11 0zM285 1462h92v-876h-98v547q0 99 12 233q-26 -23 -233 -145l-47 77zM1073 1462l-811 -1462h-94l811 1462h94zM1403 1h-576v78l242 237q125 121 172 193t47 149q0 71 -46.5 112.5t-123.5 41.5q-108 0 -217 -82l-49 65q119 103 270 103q124 0 194 -63.5t70 -174.5 q0 -47 -13 -89t-40 -85.5t-68.5 -90t-308.5 -306.5h447v-88z" /> +<glyph unicode="¾" horiz-adv-x="1516" d="M41 0zM616 1260q0 -78 -44 -131.5t-117 -75.5q186 -45 186 -211q0 -130 -88.5 -201.5t-247.5 -71.5q-144 0 -264 60v88q136 -62 266 -62q115 0 174.5 49t59.5 136q0 83 -59.5 122t-178.5 39h-131v84h135q105 0 158 43.5t53 120.5q0 67 -47 107.5t-127 40.5 q-128 0 -246 -78l-47 70q130 94 293 94q127 0 199.5 -60t72.5 -163zM1300 1462l-811 -1462h-94l811 1462h94zM1495 242h-129v-241h-90v241h-413v60l407 581h96v-563h129v-78zM1276 320v221q0 132 8 232q-6 -12 -21.5 -35.5t-295.5 -417.5h309z" /> +<glyph unicode="¿" horiz-adv-x="862" d="M569 711v-37q0 -125 -39.5 -204.5t-136.5 -164.5l-90 -79q-73 -61 -104 -120.5t-31 -138.5q0 -124 82 -200t221 -76q125 0 233 46l64 27l37 -79q-111 -48 -185.5 -64t-152.5 -16q-184 0 -288.5 99t-104.5 269q0 70 20 124t58.5 102t171.5 159q64 53 98.5 98.5t49.5 94 t15 145.5v15h82zM440 1010q0 98 80 98q82 0 82 -98q0 -53 -23.5 -76t-58.5 -23q-34 0 -57 23t-23 76z" /> +<glyph unicode="À" horiz-adv-x="1229" d="M0 0zM911 516h-594l-204 -516h-113l588 1468h65l576 -1468h-115zM354 608h523l-199 527q-25 62 -60 172q-27 -96 -59 -174zM720 1579h-69q-96 79 -188.5 171.5t-125.5 139.5v17h142q26 -48 98.5 -142t142.5 -170v-16z" /> +<glyph unicode="Á" horiz-adv-x="1229" d="M0 0zM911 516h-594l-204 -516h-113l588 1468h65l576 -1468h-115zM354 608h523l-199 527q-25 62 -60 172q-27 -96 -59 -174zM504 1595q73 79 144.5 171.5t97.5 140.5h141v-17q-36 -52 -122.5 -138t-190.5 -173h-70v16z" /> +<glyph unicode="Â" horiz-adv-x="1229" d="M0 0zM911 516h-594l-204 -516h-113l588 1468h65l576 -1468h-115zM354 608h523l-199 527q-25 62 -60 172q-27 -96 -59 -174zM328 1595q62 67 131.5 156t110.5 156h98q68 -120 242 -312v-16h-70q-122 101 -221 207q-108 -114 -221 -207h-70v16z" /> +<glyph unicode="Ã" horiz-adv-x="1229" d="M0 0zM911 516h-594l-204 -516h-113l588 1468h65l576 -1468h-115zM354 608h523l-199 527q-25 62 -60 172q-27 -96 -59 -174zM784 1581q-36 0 -75 18.5t-101 71.5q-32 26 -62.5 46t-62.5 20q-45 0 -75 -34.5t-48 -121.5h-73q10 111 63 174.5t137 63.5q48 0 88 -25t82 -59 q34 -28 66 -50t61 -22q46 0 77 36.5t48 119.5h76q-16 -116 -69 -177t-132 -61z" /> +<glyph unicode="Ä" horiz-adv-x="1229" d="M0 0zM911 516h-594l-204 -516h-113l588 1468h65l576 -1468h-115zM354 608h523l-199 527q-25 62 -60 172q-27 -96 -59 -174zM367 1727q0 46 15.5 66t47.5 20q64 0 64 -86t-64 -86q-63 0 -63 86zM748 1727q0 46 15.5 66t47.5 20q64 0 64 -86t-64 -86q-63 0 -63 86z" /> +<glyph unicode="Å" horiz-adv-x="1229" d="M0 0zM911 516h-594l-204 -516h-113l588 1468h65l576 -1468h-115zM354 608h523l-199 527q-25 62 -60 172q-27 -96 -59 -174zM836 1610q0 -97 -60 -155t-157 -58t-157 58t-60 155q0 94 60 152.5t157 58.5t157 -59t60 -152zM482 1610q0 -66 37.5 -103.5t99.5 -37.5 t99.5 37.5t37.5 103.5q0 64 -39 101.5t-98 37.5q-62 0 -99.5 -38t-37.5 -101z" /> +<glyph unicode="Æ" horiz-adv-x="1653" d="M1528 0h-717v516h-475l-227 -516h-111l653 1462h877v-94h-615v-553h576v-94h-576v-627h615v-94zM377 608h434v760h-100z" /> +<glyph unicode="Ç" horiz-adv-x="1272" d="M129 0zM831 1391q-275 0 -433 -176t-158 -482q0 -313 149 -486t426 -173q184 0 338 47v-90q-145 -51 -362 -51q-308 0 -485 199t-177 556q0 223 84.5 393t243 262.5t368.5 92.5q214 0 383 -80l-41 -92q-160 80 -336 80zM911 -291q0 -100 -67.5 -150.5t-188.5 -50.5 q-68 0 -94 11v88q30 -10 92 -10q78 0 119 28t41 80q0 94 -193 121l93 174h96l-66 -117q168 -37 168 -174z" /> +<glyph unicode="È" horiz-adv-x="1130" d="M207 0zM1006 0h-799v1462h799v-94h-697v-553h658v-94h-658v-627h697v-94zM697 1579h-69q-96 79 -188.5 171.5t-125.5 139.5v17h142q26 -48 98.5 -142t142.5 -170v-16z" /> +<glyph unicode="É" horiz-adv-x="1130" d="M207 0zM1006 0h-799v1462h799v-94h-697v-553h658v-94h-658v-627h697v-94zM463 1595q73 79 144.5 171.5t97.5 140.5h141v-17q-36 -52 -122.5 -138t-190.5 -173h-70v16z" /> +<glyph unicode="Ê" horiz-adv-x="1130" d="M207 0zM1006 0h-799v1462h799v-94h-697v-553h658v-94h-658v-627h697v-94zM315 1595q62 67 131.5 156t110.5 156h98q68 -120 242 -312v-16h-70q-122 101 -221 207q-108 -114 -221 -207h-70v16z" /> +<glyph unicode="Ë" horiz-adv-x="1130" d="M207 0zM1006 0h-799v1462h799v-94h-697v-553h658v-94h-658v-627h697v-94zM354 1727q0 46 15.5 66t47.5 20q64 0 64 -86t-64 -86q-63 0 -63 86zM735 1727q0 46 15.5 66t47.5 20q64 0 64 -86t-64 -86q-63 0 -63 86z" /> +<glyph unicode="Ì" horiz-adv-x="516" d="M0 0zM207 0v1462h102v-1462h-102zM320 1579h-69q-96 79 -188.5 171.5t-125.5 139.5v17h142q26 -48 98.5 -142t142.5 -170v-16z" /> +<glyph unicode="Í" horiz-adv-x="516" d="M191 0zM207 0v1462h102v-1462h-102zM191 1595q73 79 144.5 171.5t97.5 140.5h141v-17q-36 -52 -122.5 -138t-190.5 -173h-70v16z" /> +<glyph unicode="Î" horiz-adv-x="516" d="M0 0zM207 0v1462h102v-1462h-102zM-32 1595q62 67 131.5 156t110.5 156h98q68 -120 242 -312v-16h-70q-122 101 -221 207q-108 -114 -221 -207h-70v16z" /> +<glyph unicode="Ï" horiz-adv-x="516" d="M5 0zM207 0v1462h102v-1462h-102zM5 1727q0 46 15.5 66t47.5 20q64 0 64 -86t-64 -86q-63 0 -63 86zM386 1727q0 46 15.5 66t47.5 20q64 0 64 -86t-64 -86q-63 0 -63 86z" /> +<glyph unicode="Ð" horiz-adv-x="1466" d="M1317 745q0 -368 -193 -556.5t-567 -188.5h-350v678h-160v94h160v690h395q350 0 532.5 -183t182.5 -534zM1206 741q0 314 -159.5 472.5t-468.5 158.5h-269v-600h406v-94h-406v-588h242q655 0 655 651z" /> +<glyph unicode="Ñ" horiz-adv-x="1477" d="M207 0zM1270 0h-103l-866 1298h-8q12 -232 12 -350v-948h-98v1462h102l865 -1296h6q-9 180 -9 342v954h99v-1462zM897 1581q-36 0 -75 18.5t-101 71.5q-32 26 -62.5 46t-62.5 20q-45 0 -75 -34.5t-48 -121.5h-73q10 111 63 174.5t137 63.5q48 0 88 -25t82 -59 q34 -28 66 -50t61 -22q46 0 77 36.5t48 119.5h76q-16 -116 -69 -177t-132 -61z" /> +<glyph unicode="Ò" horiz-adv-x="1565" d="M129 0zM1436 733q0 -348 -174 -550.5t-480 -202.5q-305 0 -479 202.5t-174 552.5q0 349 175.5 549.5t479.5 200.5q306 0 479 -201.5t173 -550.5zM240 733q0 -314 140 -485.5t402 -171.5q264 0 403.5 170t139.5 487q0 316 -139.5 484.5t-401.5 168.5q-261 0 -402.5 -170 t-141.5 -483zM885 1579h-69q-96 79 -188.5 171.5t-125.5 139.5v17h142q26 -48 98.5 -142t142.5 -170v-16z" /> +<glyph unicode="Ó" horiz-adv-x="1565" d="M129 0zM1436 733q0 -348 -174 -550.5t-480 -202.5q-305 0 -479 202.5t-174 552.5q0 349 175.5 549.5t479.5 200.5q306 0 479 -201.5t173 -550.5zM240 733q0 -314 140 -485.5t402 -171.5q264 0 403.5 170t139.5 487q0 316 -139.5 484.5t-401.5 168.5q-261 0 -402.5 -170 t-141.5 -483zM686 1595q73 79 144.5 171.5t97.5 140.5h141v-17q-36 -52 -122.5 -138t-190.5 -173h-70v16z" /> +<glyph unicode="Ô" horiz-adv-x="1565" d="M129 0zM1436 733q0 -348 -174 -550.5t-480 -202.5q-305 0 -479 202.5t-174 552.5q0 349 175.5 549.5t479.5 200.5q306 0 479 -201.5t173 -550.5zM240 733q0 -314 140 -485.5t402 -171.5q264 0 403.5 170t139.5 487q0 316 -139.5 484.5t-401.5 168.5q-261 0 -402.5 -170 t-141.5 -483zM492 1595q62 67 131.5 156t110.5 156h98q68 -120 242 -312v-16h-70q-122 101 -221 207q-108 -114 -221 -207h-70v16z" /> +<glyph unicode="Õ" horiz-adv-x="1565" d="M129 0zM1436 733q0 -348 -174 -550.5t-480 -202.5q-305 0 -479 202.5t-174 552.5q0 349 175.5 549.5t479.5 200.5q306 0 479 -201.5t173 -550.5zM240 733q0 -314 140 -485.5t402 -171.5q264 0 403.5 170t139.5 487q0 316 -139.5 484.5t-401.5 168.5q-261 0 -402.5 -170 t-141.5 -483zM940 1581q-36 0 -75 18.5t-101 71.5q-32 26 -62.5 46t-62.5 20q-45 0 -75 -34.5t-48 -121.5h-73q10 111 63 174.5t137 63.5q48 0 88 -25t82 -59q34 -28 66 -50t61 -22q46 0 77 36.5t48 119.5h76q-16 -116 -69 -177t-132 -61z" /> +<glyph unicode="Ö" horiz-adv-x="1565" d="M129 0zM1436 733q0 -348 -174 -550.5t-480 -202.5q-305 0 -479 202.5t-174 552.5q0 349 175.5 549.5t479.5 200.5q306 0 479 -201.5t173 -550.5zM240 733q0 -314 140 -485.5t402 -171.5q264 0 403.5 170t139.5 487q0 316 -139.5 484.5t-401.5 168.5q-261 0 -402.5 -170 t-141.5 -483zM529 1727q0 46 15.5 66t47.5 20q64 0 64 -86t-64 -86q-63 0 -63 86zM910 1727q0 46 15.5 66t47.5 20q64 0 64 -86t-64 -86q-63 0 -63 86z" /> +<glyph unicode="×" d="M584 780l409 408l58 -58l-408 -407l406 -408l-58 -57l-407 408l-406 -408l-57 57l405 408l-407 407l57 58z" /> +<glyph unicode="Ø" horiz-adv-x="1565" d="M1436 733q0 -348 -174 -550.5t-480 -202.5q-236 0 -395 120l-86 -120l-74 59l90 127q-188 200 -188 569q0 349 175.5 549.5t479.5 200.5q232 0 392 -121l108 152l72 -60l-111 -153q191 -207 191 -570zM1325 733q0 315 -139 486l-742 -1037q133 -106 338 -106 q264 0 403.5 170t139.5 487zM240 733q0 -312 139 -483l739 1034q-133 102 -334 102q-261 0 -402.5 -170t-141.5 -483z" /> +<glyph unicode="Ù" horiz-adv-x="1473" d="M190 0zM1282 1462v-946q0 -252 -146 -394t-407 -142q-254 0 -396.5 142.5t-142.5 397.5v942h103v-946q0 -211 117 -328.5t331 -117.5q209 0 324 115.5t115 320.5v956h102zM833 1579h-69q-96 79 -188.5 171.5t-125.5 139.5v17h142q26 -48 98.5 -142t142.5 -170v-16z" /> +<glyph unicode="Ú" horiz-adv-x="1473" d="M190 0zM1282 1462v-946q0 -252 -146 -394t-407 -142q-254 0 -396.5 142.5t-142.5 397.5v942h103v-946q0 -211 117 -328.5t331 -117.5q209 0 324 115.5t115 320.5v956h102zM633 1595q73 79 144.5 171.5t97.5 140.5h141v-17q-36 -52 -122.5 -138t-190.5 -173h-70v16z" /> +<glyph unicode="Û" horiz-adv-x="1473" d="M190 0zM1282 1462v-946q0 -252 -146 -394t-407 -142q-254 0 -396.5 142.5t-142.5 397.5v942h103v-946q0 -211 117 -328.5t331 -117.5q209 0 324 115.5t115 320.5v956h102zM444 1595q62 67 131.5 156t110.5 156h98q68 -120 242 -312v-16h-70q-122 101 -221 207 q-108 -114 -221 -207h-70v16z" /> +<glyph unicode="Ü" horiz-adv-x="1473" d="M190 0zM1282 1462v-946q0 -252 -146 -394t-407 -142q-254 0 -396.5 142.5t-142.5 397.5v942h103v-946q0 -211 117 -328.5t331 -117.5q209 0 324 115.5t115 320.5v956h102zM481 1727q0 46 15.5 66t47.5 20q64 0 64 -86t-64 -86q-63 0 -63 86zM862 1727q0 46 15.5 66 t47.5 20q64 0 64 -86t-64 -86q-63 0 -63 86z" /> +<glyph unicode="Ý" horiz-adv-x="1081" d="M0 0zM543 662l428 800h110l-487 -897v-565h-105v557l-489 905h117zM434 1595q73 79 144.5 171.5t97.5 140.5h141v-17q-36 -52 -122.5 -138t-190.5 -173h-70v16z" /> +<glyph unicode="Þ" horiz-adv-x="1198" d="M1087 778q0 -212 -144 -325t-408 -113h-226v-340h-102v1462h102v-264h256q522 0 522 -420zM309 428h201q247 0 357 81.5t110 264.5q0 169 -104 250.5t-322 81.5h-242v-678z" /> +<glyph unicode="ß" horiz-adv-x="1194" d="M961 1284q0 -139 -139 -250q-81 -64 -110.5 -100.5t-29.5 -75.5q0 -44 14.5 -68t51.5 -57t102 -78q106 -75 151.5 -124.5t68 -103t22.5 -120.5q0 -156 -88 -241.5t-246 -85.5q-95 0 -174.5 18.5t-126.5 48.5v107q65 -38 148.5 -62t152.5 -24q114 0 174.5 54.5t60.5 160.5 q0 83 -39 144t-149 136q-127 87 -175 147t-48 146q0 60 32.5 110t106.5 108q74 57 106.5 105.5t32.5 106.5q0 93 -70 143t-202 50q-145 0 -226 -69t-81 -196v-1214h-99v1206q0 173 103.5 267t292.5 94q188 0 285.5 -72.5t97.5 -210.5z" /> +<glyph unicode="à" horiz-adv-x="1085" d="M98 0zM842 0l-25 172h-8q-82 -105 -168.5 -148.5t-204.5 -43.5q-160 0 -249 82t-89 227q0 159 132.5 247t383.5 93l207 6v72q0 155 -63 234t-203 79q-151 0 -313 -84l-37 86q179 84 354 84q179 0 267.5 -93t88.5 -290v-723h-73zM442 70q174 0 274.5 99.5t100.5 276.5v107 l-190 -8q-229 -11 -326.5 -71.5t-97.5 -188.5q0 -102 62.5 -158.5t176.5 -56.5zM638 1241h-69q-96 79 -188.5 171.5t-125.5 139.5v17h142q26 -48 98.5 -142t142.5 -170v-16z" /> +<glyph unicode="á" horiz-adv-x="1085" d="M98 0zM842 0l-25 172h-8q-82 -105 -168.5 -148.5t-204.5 -43.5q-160 0 -249 82t-89 227q0 159 132.5 247t383.5 93l207 6v72q0 155 -63 234t-203 79q-151 0 -313 -84l-37 86q179 84 354 84q179 0 267.5 -93t88.5 -290v-723h-73zM442 70q174 0 274.5 99.5t100.5 276.5v107 l-190 -8q-229 -11 -326.5 -71.5t-97.5 -188.5q0 -102 62.5 -158.5t176.5 -56.5zM422 1257q73 79 144.5 171.5t97.5 140.5h141v-17q-36 -52 -122.5 -138t-190.5 -173h-70v16z" /> +<glyph unicode="â" horiz-adv-x="1085" d="M98 0zM842 0l-25 172h-8q-82 -105 -168.5 -148.5t-204.5 -43.5q-160 0 -249 82t-89 227q0 159 132.5 247t383.5 93l207 6v72q0 155 -63 234t-203 79q-151 0 -313 -84l-37 86q179 84 354 84q179 0 267.5 -93t88.5 -290v-723h-73zM442 70q174 0 274.5 99.5t100.5 276.5v107 l-190 -8q-229 -11 -326.5 -71.5t-97.5 -188.5q0 -102 62.5 -158.5t176.5 -56.5zM251 1257q62 67 131.5 156t110.5 156h98q68 -120 242 -312v-16h-70q-122 101 -221 207q-108 -114 -221 -207h-70v16z" /> +<glyph unicode="ã" horiz-adv-x="1085" d="M98 0zM842 0l-25 172h-8q-82 -105 -168.5 -148.5t-204.5 -43.5q-160 0 -249 82t-89 227q0 159 132.5 247t383.5 93l207 6v72q0 155 -63 234t-203 79q-151 0 -313 -84l-37 86q179 84 354 84q179 0 267.5 -93t88.5 -290v-723h-73zM442 70q174 0 274.5 99.5t100.5 276.5v107 l-190 -8q-229 -11 -326.5 -71.5t-97.5 -188.5q0 -102 62.5 -158.5t176.5 -56.5zM697 1243q-36 0 -75 18.5t-101 71.5q-32 26 -62.5 46t-62.5 20q-45 0 -75 -34.5t-48 -121.5h-73q10 111 63 174.5t137 63.5q48 0 88 -25t82 -59q34 -28 66 -50t61 -22q46 0 77 36.5t48 119.5 h76q-16 -116 -69 -177t-132 -61z" /> +<glyph unicode="ä" horiz-adv-x="1085" d="M98 0zM842 0l-25 172h-8q-82 -105 -168.5 -148.5t-204.5 -43.5q-160 0 -249 82t-89 227q0 159 132.5 247t383.5 93l207 6v72q0 155 -63 234t-203 79q-151 0 -313 -84l-37 86q179 84 354 84q179 0 267.5 -93t88.5 -290v-723h-73zM442 70q174 0 274.5 99.5t100.5 276.5v107 l-190 -8q-229 -11 -326.5 -71.5t-97.5 -188.5q0 -102 62.5 -158.5t176.5 -56.5zM282 1389q0 46 15.5 66t47.5 20q64 0 64 -86t-64 -86q-63 0 -63 86zM663 1389q0 46 15.5 66t47.5 20q64 0 64 -86t-64 -86q-63 0 -63 86z" /> +<glyph unicode="å" horiz-adv-x="1085" d="M98 0zM842 0l-25 172h-8q-82 -105 -168.5 -148.5t-204.5 -43.5q-160 0 -249 82t-89 227q0 159 132.5 247t383.5 93l207 6v72q0 155 -63 234t-203 79q-151 0 -313 -84l-37 86q179 84 354 84q179 0 267.5 -93t88.5 -290v-723h-73zM442 70q174 0 274.5 99.5t100.5 276.5v107 l-190 -8q-229 -11 -326.5 -71.5t-97.5 -188.5q0 -102 62.5 -158.5t176.5 -56.5zM759 1456q0 -97 -60 -155t-157 -58t-157 58t-60 155q0 94 60 152.5t157 58.5t157 -59t60 -152zM405 1456q0 -66 37.5 -103.5t99.5 -37.5t99.5 37.5t37.5 103.5q0 64 -39 101.5t-98 37.5 q-62 0 -99.5 -38t-37.5 -101z" /> +<glyph unicode="æ" horiz-adv-x="1731" d="M1243 -20q-295 0 -397 256q-68 -133 -168 -194.5t-252 -61.5q-156 0 -242 82.5t-86 226.5q0 154 125 243t377 97l201 6v72q0 155 -61.5 234t-198.5 79q-148 0 -305 -84l-37 86q173 84 346 84q261 0 325 -211q111 213 347 213q184 0 289.5 -134.5t105.5 -363.5v-80h-715 q0 -460 348 -460q85 0 150 12t174 57v-90q-92 -41 -165 -55t-161 -14zM434 70q169 0 266 99.5t97 276.5v107l-187 -8q-219 -11 -313 -71.5t-94 -188.5q0 -102 61 -158.5t170 -56.5zM1217 1020q-284 0 -314 -402h604q0 188 -77.5 295t-212.5 107z" /> +<glyph unicode="ç" horiz-adv-x="973" d="M119 0zM616 -20q-233 0 -365 147t-132 410q0 270 137 420.5t375 150.5q141 0 270 -49l-27 -88q-141 47 -245 47q-200 0 -303 -123.5t-103 -355.5q0 -220 103 -344.5t288 -124.5q148 0 275 53v-92q-104 -51 -273 -51zM723 -291q0 -100 -67.5 -150.5t-188.5 -50.5 q-68 0 -94 11v88q30 -10 92 -10q78 0 119 28t41 80q0 94 -193 121l93 174h96l-66 -117q168 -37 168 -174z" /> +<glyph unicode="è" horiz-adv-x="1124" d="M119 0zM621 -20q-237 0 -369.5 146t-132.5 409q0 260 128 416.5t345 156.5q192 0 303 -134t111 -364v-80h-783q2 -224 104.5 -342t293.5 -118q93 0 163.5 13t178.5 56v-90q-92 -40 -170 -54.5t-172 -14.5zM592 1020q-157 0 -252 -103.5t-111 -298.5h672q0 189 -82 295.5 t-227 106.5zM685 1241h-69q-96 79 -188.5 171.5t-125.5 139.5v17h142q26 -48 98.5 -142t142.5 -170v-16z" /> +<glyph unicode="é" horiz-adv-x="1124" d="M119 0zM621 -20q-237 0 -369.5 146t-132.5 409q0 260 128 416.5t345 156.5q192 0 303 -134t111 -364v-80h-783q2 -224 104.5 -342t293.5 -118q93 0 163.5 13t178.5 56v-90q-92 -40 -170 -54.5t-172 -14.5zM592 1020q-157 0 -252 -103.5t-111 -298.5h672q0 189 -82 295.5 t-227 106.5zM452 1257q73 79 144.5 171.5t97.5 140.5h141v-17q-36 -52 -122.5 -138t-190.5 -173h-70v16z" /> +<glyph unicode="ê" horiz-adv-x="1124" d="M119 0zM621 -20q-237 0 -369.5 146t-132.5 409q0 260 128 416.5t345 156.5q192 0 303 -134t111 -364v-80h-783q2 -224 104.5 -342t293.5 -118q93 0 163.5 13t178.5 56v-90q-92 -40 -170 -54.5t-172 -14.5zM592 1020q-157 0 -252 -103.5t-111 -298.5h672q0 189 -82 295.5 t-227 106.5zM290 1257q62 67 131.5 156t110.5 156h98q68 -120 242 -312v-16h-70q-122 101 -221 207q-108 -114 -221 -207h-70v16z" /> +<glyph unicode="ë" horiz-adv-x="1124" d="M119 0zM621 -20q-237 0 -369.5 146t-132.5 409q0 260 128 416.5t345 156.5q192 0 303 -134t111 -364v-80h-783q2 -224 104.5 -342t293.5 -118q93 0 163.5 13t178.5 56v-90q-92 -40 -170 -54.5t-172 -14.5zM592 1020q-157 0 -252 -103.5t-111 -298.5h672q0 189 -82 295.5 t-227 106.5zM331 1389q0 46 15.5 66t47.5 20q64 0 64 -86t-64 -86q-63 0 -63 86zM712 1389q0 46 15.5 66t47.5 20q64 0 64 -86t-64 -86q-63 0 -63 86z" /> +<glyph unicode="ì" horiz-adv-x="463" d="M0 0zM281 0h-99v1087h99v-1087zM349 1241h-69q-96 79 -188.5 171.5t-125.5 139.5v17h142q26 -48 98.5 -142t142.5 -170v-16z" /> +<glyph unicode="í" horiz-adv-x="463" d="M107 0zM281 0h-99v1087h99v-1087zM107 1257q73 79 144.5 171.5t97.5 140.5h141v-17q-36 -52 -122.5 -138t-190.5 -173h-70v16z" /> +<glyph unicode="î" horiz-adv-x="463" d="M0 0zM281 0h-99v1087h99v-1087zM-58 1257q62 67 131.5 156t110.5 156h98q68 -120 242 -312v-16h-70q-122 101 -221 207q-108 -114 -221 -207h-70v16z" /> +<glyph unicode="ï" horiz-adv-x="463" d="M0 0zM281 0h-99v1087h99v-1087zM-21 1389q0 46 15.5 66t47.5 20q64 0 64 -86t-64 -86q-63 0 -63 86zM360 1389q0 46 15.5 66t47.5 20q64 0 64 -86t-64 -86q-63 0 -63 86z" /> +<glyph unicode="ð" horiz-adv-x="1174" d="M1055 559q0 -276 -124 -427.5t-349 -151.5q-214 0 -339.5 130t-125.5 361q0 228 126.5 357.5t342.5 129.5q108 0 187.5 -33t148.5 -96l4 2q-64 270 -269 459l-270 -157l-49 77l244 146q-86 62 -199 119l45 81q147 -69 248 -145l225 137l49 -84l-202 -121 q154 -151 230.5 -353t76.5 -431zM950 557q0 146 -97 228.5t-267 82.5q-185 0 -275 -100.5t-90 -304.5q0 -186 94.5 -289.5t268.5 -103.5q179 0 272.5 123t93.5 364z" /> +<glyph unicode="ñ" horiz-adv-x="1208" d="M182 0zM940 0v705q0 164 -69 238.5t-214 74.5q-195 0 -285.5 -98.5t-90.5 -319.5v-600h-99v1087h84l19 -149h6q106 170 377 170q370 0 370 -397v-711h-98zM779 1243q-36 0 -75 18.5t-101 71.5q-32 26 -62.5 46t-62.5 20q-45 0 -75 -34.5t-48 -121.5h-73q10 111 63 174.5 t137 63.5q48 0 88 -25t82 -59q34 -28 66 -50t61 -22q46 0 77 36.5t48 119.5h76q-16 -116 -69 -177t-132 -61z" /> +<glyph unicode="ò" horiz-adv-x="1200" d="M119 0zM1081 545q0 -266 -129 -415.5t-356 -149.5q-143 0 -252 69t-167 198t-58 298q0 266 129 414.5t354 148.5q224 0 351.5 -150.5t127.5 -412.5zM223 545q0 -224 98.5 -349.5t278.5 -125.5t278.5 125.5t98.5 349.5q0 225 -99.5 349t-279.5 124t-277.5 -123.5 t-97.5 -349.5zM718 1241h-69q-96 79 -188.5 171.5t-125.5 139.5v17h142q26 -48 98.5 -142t142.5 -170v-16z" /> +<glyph unicode="ó" horiz-adv-x="1200" d="M119 0zM1081 545q0 -266 -129 -415.5t-356 -149.5q-143 0 -252 69t-167 198t-58 298q0 266 129 414.5t354 148.5q224 0 351.5 -150.5t127.5 -412.5zM223 545q0 -224 98.5 -349.5t278.5 -125.5t278.5 125.5t98.5 349.5q0 225 -99.5 349t-279.5 124t-277.5 -123.5 t-97.5 -349.5zM499 1257q73 79 144.5 171.5t97.5 140.5h141v-17q-36 -52 -122.5 -138t-190.5 -173h-70v16z" /> +<glyph unicode="ô" horiz-adv-x="1200" d="M119 0zM1081 545q0 -266 -129 -415.5t-356 -149.5q-143 0 -252 69t-167 198t-58 298q0 266 129 414.5t354 148.5q224 0 351.5 -150.5t127.5 -412.5zM223 545q0 -224 98.5 -349.5t278.5 -125.5t278.5 125.5t98.5 349.5q0 225 -99.5 349t-279.5 124t-277.5 -123.5 t-97.5 -349.5zM309 1257q62 67 131.5 156t110.5 156h98q68 -120 242 -312v-16h-70q-122 101 -221 207q-108 -114 -221 -207h-70v16z" /> +<glyph unicode="õ" horiz-adv-x="1200" d="M119 0zM1081 545q0 -266 -129 -415.5t-356 -149.5q-143 0 -252 69t-167 198t-58 298q0 266 129 414.5t354 148.5q224 0 351.5 -150.5t127.5 -412.5zM223 545q0 -224 98.5 -349.5t278.5 -125.5t278.5 125.5t98.5 349.5q0 225 -99.5 349t-279.5 124t-277.5 -123.5 t-97.5 -349.5zM761 1243q-36 0 -75 18.5t-101 71.5q-32 26 -62.5 46t-62.5 20q-45 0 -75 -34.5t-48 -121.5h-73q10 111 63 174.5t137 63.5q48 0 88 -25t82 -59q34 -28 66 -50t61 -22q46 0 77 36.5t48 119.5h76q-16 -116 -69 -177t-132 -61z" /> +<glyph unicode="ö" horiz-adv-x="1200" d="M119 0zM1081 545q0 -266 -129 -415.5t-356 -149.5q-143 0 -252 69t-167 198t-58 298q0 266 129 414.5t354 148.5q224 0 351.5 -150.5t127.5 -412.5zM223 545q0 -224 98.5 -349.5t278.5 -125.5t278.5 125.5t98.5 349.5q0 225 -99.5 349t-279.5 124t-277.5 -123.5 t-97.5 -349.5zM346 1389q0 46 15.5 66t47.5 20q64 0 64 -86t-64 -86q-63 0 -63 86zM727 1389q0 46 15.5 66t47.5 20q64 0 64 -86t-64 -86q-63 0 -63 86z" /> +<glyph unicode="÷" d="M111 682v82h948v-82h-948zM504 1075q0 99 80 99q82 0 82 -99q0 -52 -23.5 -75t-58.5 -23q-34 0 -57 23t-23 75zM504 371q0 98 80 98q82 0 82 -98q0 -53 -23.5 -76t-58.5 -23q-34 0 -57 23t-23 76z" /> +<glyph unicode="ø" horiz-adv-x="1200" d="M1081 545q0 -266 -129 -415.5t-356 -149.5q-173 0 -291 98l-86 -113l-72 58l93 120q-121 153 -121 402q0 266 129 414.5t354 148.5q179 0 301 -104l96 124l74 -55l-104 -137q112 -147 112 -391zM223 545q0 -200 78 -322l543 705q-98 90 -246 90q-180 0 -277.5 -123.5 t-97.5 -349.5zM977 545q0 190 -72 309l-543 -702q94 -82 238 -82q180 0 278.5 125.5t98.5 349.5z" /> +<glyph unicode="ù" horiz-adv-x="1208" d="M170 0zM268 1087v-704q0 -164 69 -238.5t214 -74.5q194 0 285.5 98t91.5 319v600h98v-1087h-84l-18 150h-6q-106 -170 -377 -170q-371 0 -371 397v710h98zM687 1241h-69q-96 79 -188.5 171.5t-125.5 139.5v17h142q26 -48 98.5 -142t142.5 -170v-16z" /> +<glyph unicode="ú" horiz-adv-x="1208" d="M170 0zM268 1087v-704q0 -164 69 -238.5t214 -74.5q194 0 285.5 98t91.5 319v600h98v-1087h-84l-18 150h-6q-106 -170 -377 -170q-371 0 -371 397v710h98zM495 1257q73 79 144.5 171.5t97.5 140.5h141v-17q-36 -52 -122.5 -138t-190.5 -173h-70v16z" /> +<glyph unicode="û" horiz-adv-x="1208" d="M170 0zM268 1087v-704q0 -164 69 -238.5t214 -74.5q194 0 285.5 98t91.5 319v600h98v-1087h-84l-18 150h-6q-106 -170 -377 -170q-371 0 -371 397v710h98zM313 1257q62 67 131.5 156t110.5 156h98q68 -120 242 -312v-16h-70q-122 101 -221 207q-108 -114 -221 -207h-70 v16z" /> +<glyph unicode="ü" horiz-adv-x="1208" d="M170 0zM268 1087v-704q0 -164 69 -238.5t214 -74.5q194 0 285.5 98t91.5 319v600h98v-1087h-84l-18 150h-6q-106 -170 -377 -170q-371 0 -371 397v710h98zM350 1389q0 46 15.5 66t47.5 20q64 0 64 -86t-64 -86q-63 0 -63 86zM731 1389q0 46 15.5 66t47.5 20q64 0 64 -86 t-64 -86q-63 0 -63 86z" /> +<glyph unicode="ý" horiz-adv-x="940" d="M0 0zM0 1087h102l230 -610q105 -281 133 -379h6q42 129 137 385l230 604h102l-487 -1263q-59 -154 -99 -208t-93.5 -81t-129.5 -27q-57 0 -127 21v86q58 -16 125 -16q51 0 90 24t70.5 74.5t73 160t53.5 142.5zM361 1257q73 79 144.5 171.5t97.5 140.5h141v-17 q-36 -52 -122.5 -138t-190.5 -173h-70v16z" /> +<glyph unicode="þ" horiz-adv-x="1219" d="M281 918q114 190 368 190q220 0 335.5 -144.5t115.5 -420.5q0 -268 -121.5 -415.5t-331.5 -147.5q-251 0 -366 188h-7l3 -84q4 -74 4 -162v-414h-99v2048h99v-391l-7 -247h7zM645 68q167 0 258.5 124t91.5 347q0 479 -348 479q-193 0 -279.5 -105t-86.5 -354v-18 q0 -255 85.5 -364t278.5 -109z" /> +<glyph unicode="ÿ" horiz-adv-x="940" d="M0 0zM0 1087h102l230 -610q105 -281 133 -379h6q42 129 137 385l230 604h102l-487 -1263q-59 -154 -99 -208t-93.5 -81t-129.5 -27q-57 0 -127 21v86q58 -16 125 -16q51 0 90 24t70.5 74.5t73 160t53.5 142.5zM214 1389q0 46 15.5 66t47.5 20q64 0 64 -86t-64 -86 q-63 0 -63 86zM595 1389q0 46 15.5 66t47.5 20q64 0 64 -86t-64 -86q-63 0 -63 86z" /> +<glyph unicode="ı" horiz-adv-x="463" d="M281 0h-99v1087h99v-1087z" /> +<glyph unicode="Œ" horiz-adv-x="1839" d="M1714 0h-756q-76 -16 -176 -16q-305 0 -479 200t-174 551q0 347 174.5 545.5t480.5 198.5q78 0 183 -17h747v-94h-655v-553h616v-94h-616v-627h655v-94zM782 80q109 0 174 18v1266q-62 16 -172 16q-262 0 -403 -167.5t-141 -479.5q0 -315 140.5 -484t401.5 -169z" /> +<glyph unicode="œ" horiz-adv-x="1942" d="M1438 -20q-156 0 -266.5 67.5t-165.5 198.5q-59 -128 -158 -197t-252 -69q-143 0 -252 69t-167 198t-58 298q0 266 129 414.5t354 148.5q151 0 251 -70t157 -209q110 279 399 279q192 0 303 -134t111 -364v-80h-762q2 -230 100.5 -345t276.5 -115q93 0 163.5 13t178.5 56 v-90q-92 -40 -170 -54.5t-172 -14.5zM223 545q0 -224 98.5 -349.5t278.5 -125.5q174 0 265 122.5t91 352.5q0 224 -93 348.5t-265 124.5q-180 0 -277.5 -123.5t-97.5 -349.5zM1409 1020q-155 0 -242 -104t-102 -298h653q0 189 -82 295.5t-227 106.5z" /> +<glyph unicode="Ÿ" horiz-adv-x="1081" d="M0 0zM543 662l428 800h110l-487 -897v-565h-105v557l-489 905h117zM288 1727q0 46 15.5 66t47.5 20q64 0 64 -86t-64 -86q-63 0 -63 86zM669 1727q0 46 15.5 66t47.5 20q64 0 64 -86t-64 -86q-63 0 -63 86z" /> +<glyph unicode="ˆ" horiz-adv-x="1182" d="M299 1257q62 67 131.5 156t110.5 156h98q68 -120 242 -312v-16h-70q-122 101 -221 207q-108 -114 -221 -207h-70v16z" /> +<glyph unicode="˚" horiz-adv-x="1182" d="M805 1456q0 -97 -60 -155t-157 -58t-157 58t-60 155q0 94 60 152.5t157 58.5t157 -59t60 -152zM451 1456q0 -66 37.5 -103.5t99.5 -37.5t99.5 37.5t37.5 103.5q0 64 -39 101.5t-98 37.5q-62 0 -99.5 -38t-37.5 -101z" /> +<glyph unicode="˜" horiz-adv-x="1182" d="M780 1243q-36 0 -75 18.5t-101 71.5q-32 26 -62.5 46t-62.5 20q-45 0 -75 -34.5t-48 -121.5h-73q10 111 63 174.5t137 63.5q48 0 88 -25t82 -59q34 -28 66 -50t61 -22q46 0 77 36.5t48 119.5h76q-16 -116 -69 -177t-132 -61z" /> +<glyph unicode=" " horiz-adv-x="953" /> +<glyph unicode=" " horiz-adv-x="1907" /> +<glyph unicode=" " horiz-adv-x="953" /> +<glyph unicode=" " horiz-adv-x="1907" /> +<glyph unicode=" " horiz-adv-x="635" /> +<glyph unicode=" " horiz-adv-x="476" /> +<glyph unicode=" " horiz-adv-x="317" /> +<glyph unicode=" " horiz-adv-x="317" /> +<glyph unicode=" " horiz-adv-x="238" /> +<glyph unicode=" " horiz-adv-x="381" /> +<glyph unicode=" " horiz-adv-x="105" /> +<glyph unicode="‐" horiz-adv-x="659" d="M92 512v82h475v-82h-475z" /> +<glyph unicode="‑" horiz-adv-x="659" d="M92 512v82h475v-82h-475z" /> +<glyph unicode="‒" horiz-adv-x="659" d="M92 512v82h475v-82h-475z" /> +<glyph unicode="–" horiz-adv-x="1024" d="M82 512v82h860v-82h-860z" /> +<glyph unicode="—" horiz-adv-x="2048" d="M82 512v82h1884v-82h-1884z" /> +<glyph unicode="‘" horiz-adv-x="297" d="M41 961l-12 20q32 112 81.5 251t92.5 230h65q-30 -101 -64.5 -257t-45.5 -244h-117z" /> +<glyph unicode="’" horiz-adv-x="297" d="M256 1462l12 -20q-75 -265 -174 -481h-65q29 96 61 241.5t49 259.5h117z" /> +<glyph unicode="‚" horiz-adv-x="451" d="M68 0zM295 238l12 -20q-75 -265 -174 -481h-65q29 96 61 241.5t49 259.5h117z" /> +<glyph unicode="“" horiz-adv-x="614" d="M358 961l-12 20q34 120 83 255t91 226h66q-30 -98 -63 -248.5t-48 -252.5h-117zM41 961l-12 20q32 112 81.5 251t92.5 230h65q-30 -101 -64.5 -257t-45.5 -244h-117z" /> +<glyph unicode="”" horiz-adv-x="614" d="M256 1462l12 -20q-75 -265 -174 -481h-65q29 96 61 241.5t49 259.5h117zM573 1462l13 -20q-36 -128 -85 -261t-89 -220h-66q30 98 63 248.5t48 252.5h116z" /> +<glyph unicode="„" horiz-adv-x="768" d="M68 0zM295 238l12 -20q-75 -265 -174 -481h-65q29 96 61 241.5t49 259.5h117zM612 238l13 -20q-36 -128 -85 -261t-89 -220h-66q30 98 63 248.5t48 252.5h116z" /> +<glyph unicode="•" horiz-adv-x="770" d="M231 748q0 89 40.5 134.5t113.5 45.5t113.5 -47t40.5 -133q0 -85 -41 -133t-113 -48t-113 47t-41 134z" /> +<glyph unicode="…" horiz-adv-x="1466" d="M162 0zM162 78q0 98 80 98q82 0 82 -98t-82 -98q-80 0 -80 98zM651 78q0 98 80 98q82 0 82 -98t-82 -98q-80 0 -80 98zM1141 78q0 98 80 98q82 0 82 -98t-82 -98q-80 0 -80 98z" /> +<glyph unicode=" " horiz-adv-x="381" /> +<glyph unicode="‹" horiz-adv-x="524" d="M82 543l309 393l62 -43l-254 -363l254 -362l-62 -43l-309 391v27z" /> +<glyph unicode="›" horiz-adv-x="524" d="M442 518l-309 -393l-61 43l254 362l-254 363l61 43l309 -391v-27z" /> +<glyph unicode="⁄" horiz-adv-x="246" d="M573 1462l-811 -1462h-94l811 1462h94z" /> +<glyph unicode=" " horiz-adv-x="476" /> +<glyph unicode="⁴" horiz-adv-x="688" d="M657 827h-129v-241h-90v241h-413v60l407 581h96v-563h129v-78zM438 905v221q0 132 8 232q-6 -12 -21.5 -35.5t-295.5 -417.5h309z" /> +<glyph unicode="€" d="M803 1397q-174 0 -288 -125.5t-155 -364.5h502v-82h-510l-4 -104v-24q0 -65 4 -87h449v-82h-443q30 -217 147.5 -338.5t301.5 -121.5q148 0 287 65v-94q-81 -34 -150.5 -46.5t-140.5 -12.5q-228 0 -367.5 140t-181.5 408h-180v82h172q-4 38 -4 113l4 102h-172v82h184 q39 272 183 425t362 153q88 0 161 -17t148 -57l-39 -86q-132 72 -270 72z" /> +<glyph unicode="™" horiz-adv-x="1485" d="M313 741h-86v643h-217v78h522v-78h-219v-643zM913 741l-221 609h-6l4 -201v-408h-82v721h125l221 -606l224 606h125v-721h-86v398l4 207h-7l-227 -605h-74z" /> +<glyph unicode="−" d="M111 682v82h948v-82h-948z" /> +<glyph unicode="" horiz-adv-x="1085" d="M0 1085h1085v-1085h-1085v1085z" /> +<glyph horiz-adv-x="1219" d="M0 0z" /> +<hkern u1=""" u2="Ÿ" k="-20" /> +<hkern u1=""" u2="œ" k="123" /> +<hkern u1=""" u2="ü" k="61" /> +<hkern u1=""" u2="û" k="61" /> +<hkern u1=""" u2="ú" k="61" /> +<hkern u1=""" u2="ù" k="61" /> +<hkern u1=""" u2="ø" k="123" /> +<hkern u1=""" u2="ö" k="123" /> +<hkern u1=""" u2="õ" k="123" /> +<hkern u1=""" u2="ô" k="123" /> +<hkern u1=""" u2="ó" k="123" /> +<hkern u1=""" u2="ò" k="123" /> +<hkern u1=""" u2="ë" k="123" /> +<hkern u1=""" u2="ê" k="123" /> +<hkern u1=""" u2="é" k="123" /> +<hkern u1=""" u2="è" k="123" /> +<hkern u1=""" u2="ç" k="123" /> +<hkern u1=""" u2="æ" k="82" /> +<hkern u1=""" u2="å" k="82" /> +<hkern u1=""" u2="ä" k="82" /> +<hkern u1=""" u2="ã" k="82" /> +<hkern u1=""" u2="â" k="82" /> +<hkern u1=""" u2="á" k="82" /> +<hkern u1=""" u2="à" k="123" /> +<hkern u1=""" u2="Ý" k="-20" /> +<hkern u1=""" u2="Å" k="143" /> +<hkern u1=""" u2="Ä" k="143" /> +<hkern u1=""" u2="Ã" k="143" /> +<hkern u1=""" u2="Â" k="143" /> +<hkern u1=""" u2="Á" k="143" /> +<hkern u1=""" u2="À" k="143" /> +<hkern u1=""" u2="u" k="61" /> +<hkern u1=""" u2="s" k="61" /> +<hkern u1=""" u2="r" k="61" /> +<hkern u1=""" u2="q" k="123" /> +<hkern u1=""" u2="p" k="61" /> +<hkern u1=""" u2="o" k="123" /> +<hkern u1=""" u2="n" k="61" /> +<hkern u1=""" u2="m" k="61" /> +<hkern u1=""" u2="g" k="61" /> +<hkern u1=""" u2="e" k="123" /> +<hkern u1=""" u2="d" k="123" /> +<hkern u1=""" u2="c" k="123" /> +<hkern u1=""" u2="a" k="82" /> +<hkern u1=""" u2="Y" k="-20" /> +<hkern u1=""" u2="W" k="-41" /> +<hkern u1=""" u2="V" k="-41" /> +<hkern u1=""" u2="T" k="-41" /> +<hkern u1=""" u2="A" k="143" /> +<hkern u1="'" u2="Ÿ" k="-20" /> +<hkern u1="'" u2="œ" k="123" /> +<hkern u1="'" u2="ü" k="61" /> +<hkern u1="'" u2="û" k="61" /> +<hkern u1="'" u2="ú" k="61" /> +<hkern u1="'" u2="ù" k="61" /> +<hkern u1="'" u2="ø" k="123" /> +<hkern u1="'" u2="ö" k="123" /> +<hkern u1="'" u2="õ" k="123" /> +<hkern u1="'" u2="ô" k="123" /> +<hkern u1="'" u2="ó" k="123" /> +<hkern u1="'" u2="ò" k="123" /> +<hkern u1="'" u2="ë" k="123" /> +<hkern u1="'" u2="ê" k="123" /> +<hkern u1="'" u2="é" k="123" /> +<hkern u1="'" u2="è" k="123" /> +<hkern u1="'" u2="ç" k="123" /> +<hkern u1="'" u2="æ" k="82" /> +<hkern u1="'" u2="å" k="82" /> +<hkern u1="'" u2="ä" k="82" /> +<hkern u1="'" u2="ã" k="82" /> +<hkern u1="'" u2="â" k="82" /> +<hkern u1="'" u2="á" k="82" /> +<hkern u1="'" u2="à" k="123" /> +<hkern u1="'" u2="Ý" k="-20" /> +<hkern u1="'" u2="Å" k="143" /> +<hkern u1="'" u2="Ä" k="143" /> +<hkern u1="'" u2="Ã" k="143" /> +<hkern u1="'" u2="Â" k="143" /> +<hkern u1="'" u2="Á" k="143" /> +<hkern u1="'" u2="À" k="143" /> +<hkern u1="'" u2="u" k="61" /> +<hkern u1="'" u2="s" k="61" /> +<hkern u1="'" u2="r" k="61" /> +<hkern u1="'" u2="q" k="123" /> +<hkern u1="'" u2="p" k="61" /> +<hkern u1="'" u2="o" k="123" /> +<hkern u1="'" u2="n" k="61" /> +<hkern u1="'" u2="m" k="61" /> +<hkern u1="'" u2="g" k="61" /> +<hkern u1="'" u2="e" k="123" /> +<hkern u1="'" u2="d" k="123" /> +<hkern u1="'" u2="c" k="123" /> +<hkern u1="'" u2="a" k="82" /> +<hkern u1="'" u2="Y" k="-20" /> +<hkern u1="'" u2="W" k="-41" /> +<hkern u1="'" u2="V" k="-41" /> +<hkern u1="'" u2="T" k="-41" /> +<hkern u1="'" u2="A" k="143" /> +<hkern u1="(" u2="J" k="-184" /> +<hkern u1="," u2="Ÿ" k="123" /> +<hkern u1="," u2="Œ" k="102" /> +<hkern u1="," u2="Ý" k="123" /> +<hkern u1="," u2="Ü" k="41" /> +<hkern u1="," u2="Û" k="41" /> +<hkern u1="," u2="Ú" k="41" /> +<hkern u1="," u2="Ù" k="41" /> +<hkern u1="," u2="Ø" k="102" /> +<hkern u1="," u2="Ö" k="102" /> +<hkern u1="," u2="Õ" k="102" /> +<hkern u1="," u2="Ô" k="102" /> +<hkern u1="," u2="Ó" k="102" /> +<hkern u1="," u2="Ò" k="102" /> +<hkern u1="," u2="Ç" k="102" /> +<hkern u1="," u2="Y" k="123" /> +<hkern u1="," u2="W" k="123" /> +<hkern u1="," u2="V" k="123" /> +<hkern u1="," u2="U" k="41" /> +<hkern u1="," u2="T" k="143" /> +<hkern u1="," u2="Q" k="102" /> +<hkern u1="," u2="O" k="102" /> +<hkern u1="," u2="G" k="102" /> +<hkern u1="," u2="C" k="102" /> +<hkern u1="-" u2="T" k="82" /> +<hkern u1="." u2="Ÿ" k="123" /> +<hkern u1="." u2="Œ" k="102" /> +<hkern u1="." u2="Ý" k="123" /> +<hkern u1="." u2="Ü" k="41" /> +<hkern u1="." u2="Û" k="41" /> +<hkern u1="." u2="Ú" k="41" /> +<hkern u1="." u2="Ù" k="41" /> +<hkern u1="." u2="Ø" k="102" /> +<hkern u1="." u2="Ö" k="102" /> +<hkern u1="." u2="Õ" k="102" /> +<hkern u1="." u2="Ô" k="102" /> +<hkern u1="." u2="Ó" k="102" /> +<hkern u1="." u2="Ò" k="102" /> +<hkern u1="." u2="Ç" k="102" /> +<hkern u1="." u2="Y" k="123" /> +<hkern u1="." u2="W" k="123" /> +<hkern u1="." u2="V" k="123" /> +<hkern u1="." u2="U" k="41" /> +<hkern u1="." u2="T" k="143" /> +<hkern u1="." u2="Q" k="102" /> +<hkern u1="." u2="O" k="102" /> +<hkern u1="." u2="G" k="102" /> +<hkern u1="." u2="C" k="102" /> +<hkern u1="A" u2="”" k="143" /> +<hkern u1="A" u2="’" k="143" /> +<hkern u1="A" u2="Ÿ" k="123" /> +<hkern u1="A" u2="Œ" k="41" /> +<hkern u1="A" u2="Ý" k="123" /> +<hkern u1="A" u2="Ø" k="41" /> +<hkern u1="A" u2="Ö" k="41" /> +<hkern u1="A" u2="Õ" k="41" /> +<hkern u1="A" u2="Ô" k="41" /> +<hkern u1="A" u2="Ó" k="41" /> +<hkern u1="A" u2="Ò" k="41" /> +<hkern u1="A" u2="Ç" k="41" /> +<hkern u1="A" u2="Y" k="123" /> +<hkern u1="A" u2="W" k="82" /> +<hkern u1="A" u2="V" k="82" /> +<hkern u1="A" u2="T" k="143" /> +<hkern u1="A" u2="Q" k="41" /> +<hkern u1="A" u2="O" k="41" /> +<hkern u1="A" u2="J" k="-266" /> +<hkern u1="A" u2="G" k="41" /> +<hkern u1="A" u2="C" k="41" /> +<hkern u1="A" u2="'" k="143" /> +<hkern u1="A" u2=""" k="143" /> +<hkern u1="B" u2="„" k="82" /> +<hkern u1="B" u2="‚" k="82" /> +<hkern u1="B" u2="Ÿ" k="20" /> +<hkern u1="B" u2="Ý" k="20" /> +<hkern u1="B" u2="Å" k="41" /> +<hkern u1="B" u2="Ä" k="41" /> +<hkern u1="B" u2="Ã" k="41" /> +<hkern u1="B" u2="Â" k="41" /> +<hkern u1="B" u2="Á" k="41" /> +<hkern u1="B" u2="À" k="41" /> +<hkern u1="B" u2="Z" k="20" /> +<hkern u1="B" u2="Y" k="20" /> +<hkern u1="B" u2="X" k="41" /> +<hkern u1="B" u2="W" k="20" /> +<hkern u1="B" u2="V" k="20" /> +<hkern u1="B" u2="T" k="61" /> +<hkern u1="B" u2="A" k="41" /> +<hkern u1="B" u2="." k="82" /> +<hkern u1="B" u2="," k="82" /> +<hkern u1="C" u2="Œ" k="41" /> +<hkern u1="C" u2="Ø" k="41" /> +<hkern u1="C" u2="Ö" k="41" /> +<hkern u1="C" u2="Õ" k="41" /> +<hkern u1="C" u2="Ô" k="41" /> +<hkern u1="C" u2="Ó" k="41" /> +<hkern u1="C" u2="Ò" k="41" /> +<hkern u1="C" u2="Ç" k="41" /> +<hkern u1="C" u2="Q" k="41" /> +<hkern u1="C" u2="O" k="41" /> +<hkern u1="C" u2="G" k="41" /> +<hkern u1="C" u2="C" k="41" /> +<hkern u1="D" u2="„" k="82" /> +<hkern u1="D" u2="‚" k="82" /> +<hkern u1="D" u2="Ÿ" k="20" /> +<hkern u1="D" u2="Ý" k="20" /> +<hkern u1="D" u2="Å" k="41" /> +<hkern u1="D" u2="Ä" k="41" /> +<hkern u1="D" u2="Ã" k="41" /> +<hkern u1="D" u2="Â" k="41" /> +<hkern u1="D" u2="Á" k="41" /> +<hkern u1="D" u2="À" k="41" /> +<hkern u1="D" u2="Z" k="20" /> +<hkern u1="D" u2="Y" k="20" /> +<hkern u1="D" u2="X" k="41" /> +<hkern u1="D" u2="W" k="20" /> +<hkern u1="D" u2="V" k="20" /> +<hkern u1="D" u2="T" k="61" /> +<hkern u1="D" u2="A" k="41" /> +<hkern u1="D" u2="." k="82" /> +<hkern u1="D" u2="," k="82" /> +<hkern u1="E" u2="J" k="-123" /> +<hkern u1="F" u2="„" k="123" /> +<hkern u1="F" u2="‚" k="123" /> +<hkern u1="F" u2="Å" k="41" /> +<hkern u1="F" u2="Ä" k="41" /> +<hkern u1="F" u2="Ã" k="41" /> +<hkern u1="F" u2="Â" k="41" /> +<hkern u1="F" u2="Á" k="41" /> +<hkern u1="F" u2="À" k="41" /> +<hkern u1="F" u2="A" k="41" /> +<hkern u1="F" u2="?" k="-41" /> +<hkern u1="F" u2="." k="123" /> +<hkern u1="F" u2="," k="123" /> +<hkern u1="K" u2="Œ" k="41" /> +<hkern u1="K" u2="Ø" k="41" /> +<hkern u1="K" u2="Ö" k="41" /> +<hkern u1="K" u2="Õ" k="41" /> +<hkern u1="K" u2="Ô" k="41" /> +<hkern u1="K" u2="Ó" k="41" /> +<hkern u1="K" u2="Ò" k="41" /> +<hkern u1="K" u2="Ç" k="41" /> +<hkern u1="K" u2="Q" k="41" /> +<hkern u1="K" u2="O" k="41" /> +<hkern u1="K" u2="G" k="41" /> +<hkern u1="K" u2="C" k="41" /> +<hkern u1="L" u2="”" k="164" /> +<hkern u1="L" u2="’" k="164" /> +<hkern u1="L" u2="Ÿ" k="61" /> +<hkern u1="L" u2="Œ" k="41" /> +<hkern u1="L" u2="Ý" k="61" /> +<hkern u1="L" u2="Ü" k="20" /> +<hkern u1="L" u2="Û" k="20" /> +<hkern u1="L" u2="Ú" k="20" /> +<hkern u1="L" u2="Ù" k="20" /> +<hkern u1="L" u2="Ø" k="41" /> +<hkern u1="L" u2="Ö" k="41" /> +<hkern u1="L" u2="Õ" k="41" /> +<hkern u1="L" u2="Ô" k="41" /> +<hkern u1="L" u2="Ó" k="41" /> +<hkern u1="L" u2="Ò" k="41" /> +<hkern u1="L" u2="Ç" k="41" /> +<hkern u1="L" u2="Y" k="61" /> +<hkern u1="L" u2="W" k="41" /> +<hkern u1="L" u2="V" k="41" /> +<hkern u1="L" u2="U" k="20" /> +<hkern u1="L" u2="T" k="41" /> +<hkern u1="L" u2="Q" k="41" /> +<hkern u1="L" u2="O" k="41" /> +<hkern u1="L" u2="G" k="41" /> +<hkern u1="L" u2="C" k="41" /> +<hkern u1="L" u2="'" k="164" /> +<hkern u1="L" u2=""" k="164" /> +<hkern u1="O" u2="„" k="82" /> +<hkern u1="O" u2="‚" k="82" /> +<hkern u1="O" u2="Ÿ" k="20" /> +<hkern u1="O" u2="Ý" k="20" /> +<hkern u1="O" u2="Å" k="41" /> +<hkern u1="O" u2="Ä" k="41" /> +<hkern u1="O" u2="Ã" k="41" /> +<hkern u1="O" u2="Â" k="41" /> +<hkern u1="O" u2="Á" k="41" /> +<hkern u1="O" u2="À" k="41" /> +<hkern u1="O" u2="Z" k="20" /> +<hkern u1="O" u2="Y" k="20" /> +<hkern u1="O" u2="X" k="41" /> +<hkern u1="O" u2="W" k="20" /> +<hkern u1="O" u2="V" k="20" /> +<hkern u1="O" u2="T" k="61" /> +<hkern u1="O" u2="A" k="41" /> +<hkern u1="O" u2="." k="82" /> +<hkern u1="O" u2="," k="82" /> +<hkern u1="P" u2="„" k="266" /> +<hkern u1="P" u2="‚" k="266" /> +<hkern u1="P" u2="Å" k="102" /> +<hkern u1="P" u2="Ä" k="102" /> +<hkern u1="P" u2="Ã" k="102" /> +<hkern u1="P" u2="Â" k="102" /> +<hkern u1="P" u2="Á" k="102" /> +<hkern u1="P" u2="À" k="102" /> +<hkern u1="P" u2="Z" k="20" /> +<hkern u1="P" u2="X" k="41" /> +<hkern u1="P" u2="A" k="102" /> +<hkern u1="P" u2="." k="266" /> +<hkern u1="P" u2="," k="266" /> +<hkern u1="Q" u2="„" k="82" /> +<hkern u1="Q" u2="‚" k="82" /> +<hkern u1="Q" u2="Ÿ" k="20" /> +<hkern u1="Q" u2="Ý" k="20" /> +<hkern u1="Q" u2="Å" k="41" /> +<hkern u1="Q" u2="Ä" k="41" /> +<hkern u1="Q" u2="Ã" k="41" /> +<hkern u1="Q" u2="Â" k="41" /> +<hkern u1="Q" u2="Á" k="41" /> +<hkern u1="Q" u2="À" k="41" /> +<hkern u1="Q" u2="Z" k="20" /> +<hkern u1="Q" u2="Y" k="20" /> +<hkern u1="Q" u2="X" k="41" /> +<hkern u1="Q" u2="W" k="20" /> +<hkern u1="Q" u2="V" k="20" /> +<hkern u1="Q" u2="T" k="61" /> +<hkern u1="Q" u2="A" k="41" /> +<hkern u1="Q" u2="." k="82" /> +<hkern u1="Q" u2="," k="82" /> +<hkern u1="T" u2="„" k="123" /> +<hkern u1="T" u2="‚" k="123" /> +<hkern u1="T" u2="—" k="82" /> +<hkern u1="T" u2="–" k="82" /> +<hkern u1="T" u2="œ" k="143" /> +<hkern u1="T" u2="Œ" k="41" /> +<hkern u1="T" u2="ý" k="41" /> +<hkern u1="T" u2="ü" k="102" /> +<hkern u1="T" u2="û" k="102" /> +<hkern u1="T" u2="ú" k="102" /> +<hkern u1="T" u2="ù" k="102" /> +<hkern u1="T" u2="ø" k="143" /> +<hkern u1="T" u2="ö" k="143" /> +<hkern u1="T" u2="õ" k="143" /> +<hkern u1="T" u2="ô" k="143" /> +<hkern u1="T" u2="ó" k="143" /> +<hkern u1="T" u2="ò" k="143" /> +<hkern u1="T" u2="ë" k="143" /> +<hkern u1="T" u2="ê" k="143" /> +<hkern u1="T" u2="é" k="143" /> +<hkern u1="T" u2="è" k="143" /> +<hkern u1="T" u2="ç" k="143" /> +<hkern u1="T" u2="æ" k="164" /> +<hkern u1="T" u2="å" k="164" /> +<hkern u1="T" u2="ä" k="164" /> +<hkern u1="T" u2="ã" k="164" /> +<hkern u1="T" u2="â" k="164" /> +<hkern u1="T" u2="á" k="164" /> +<hkern u1="T" u2="à" k="143" /> +<hkern u1="T" u2="Ø" k="41" /> +<hkern u1="T" u2="Ö" k="41" /> +<hkern u1="T" u2="Õ" k="41" /> +<hkern u1="T" u2="Ô" k="41" /> +<hkern u1="T" u2="Ó" k="41" /> +<hkern u1="T" u2="Ò" k="41" /> +<hkern u1="T" u2="Ç" k="41" /> +<hkern u1="T" u2="Å" k="143" /> +<hkern u1="T" u2="Ä" k="143" /> +<hkern u1="T" u2="Ã" k="143" /> +<hkern u1="T" u2="Â" k="143" /> +<hkern u1="T" u2="Á" k="143" /> +<hkern u1="T" u2="À" k="143" /> +<hkern u1="T" u2="z" k="82" /> +<hkern u1="T" u2="y" k="41" /> +<hkern u1="T" u2="x" k="41" /> +<hkern u1="T" u2="w" k="41" /> +<hkern u1="T" u2="v" k="41" /> +<hkern u1="T" u2="u" k="102" /> +<hkern u1="T" u2="s" k="123" /> +<hkern u1="T" u2="r" k="102" /> +<hkern u1="T" u2="q" k="143" /> +<hkern u1="T" u2="p" k="102" /> +<hkern u1="T" u2="o" k="143" /> +<hkern u1="T" u2="n" k="102" /> +<hkern u1="T" u2="m" k="102" /> +<hkern u1="T" u2="g" k="143" /> +<hkern u1="T" u2="e" k="143" /> +<hkern u1="T" u2="d" k="143" /> +<hkern u1="T" u2="c" k="143" /> +<hkern u1="T" u2="a" k="164" /> +<hkern u1="T" u2="T" k="-41" /> +<hkern u1="T" u2="Q" k="41" /> +<hkern u1="T" u2="O" k="41" /> +<hkern u1="T" u2="G" k="41" /> +<hkern u1="T" u2="C" k="41" /> +<hkern u1="T" u2="A" k="143" /> +<hkern u1="T" u2="?" k="-41" /> +<hkern u1="T" u2="." k="123" /> +<hkern u1="T" u2="-" k="82" /> +<hkern u1="T" u2="," k="123" /> +<hkern u1="U" u2="„" k="41" /> +<hkern u1="U" u2="‚" k="41" /> +<hkern u1="U" u2="Å" k="20" /> +<hkern u1="U" u2="Ä" k="20" /> +<hkern u1="U" u2="Ã" k="20" /> +<hkern u1="U" u2="Â" k="20" /> +<hkern u1="U" u2="Á" k="20" /> +<hkern u1="U" u2="À" k="20" /> +<hkern u1="U" u2="A" k="20" /> +<hkern u1="U" u2="." k="41" /> +<hkern u1="U" u2="," k="41" /> +<hkern u1="V" u2="„" k="102" /> +<hkern u1="V" u2="‚" k="102" /> +<hkern u1="V" u2="œ" k="41" /> +<hkern u1="V" u2="Œ" k="20" /> +<hkern u1="V" u2="ü" k="20" /> +<hkern u1="V" u2="û" k="20" /> +<hkern u1="V" u2="ú" k="20" /> +<hkern u1="V" u2="ù" k="20" /> +<hkern u1="V" u2="ø" k="41" /> +<hkern u1="V" u2="ö" k="41" /> +<hkern u1="V" u2="õ" k="41" /> +<hkern u1="V" u2="ô" k="41" /> +<hkern u1="V" u2="ó" k="41" /> +<hkern u1="V" u2="ò" k="41" /> +<hkern u1="V" u2="ë" k="41" /> +<hkern u1="V" u2="ê" k="41" /> +<hkern u1="V" u2="é" k="41" /> +<hkern u1="V" u2="è" k="41" /> +<hkern u1="V" u2="ç" k="41" /> +<hkern u1="V" u2="æ" k="41" /> +<hkern u1="V" u2="å" k="41" /> +<hkern u1="V" u2="ä" k="41" /> +<hkern u1="V" u2="ã" k="41" /> +<hkern u1="V" u2="â" k="41" /> +<hkern u1="V" u2="á" k="41" /> +<hkern u1="V" u2="à" k="41" /> +<hkern u1="V" u2="Ø" k="20" /> +<hkern u1="V" u2="Ö" k="20" /> +<hkern u1="V" u2="Õ" k="20" /> +<hkern u1="V" u2="Ô" k="20" /> +<hkern u1="V" u2="Ó" k="20" /> +<hkern u1="V" u2="Ò" k="20" /> +<hkern u1="V" u2="Ç" k="20" /> +<hkern u1="V" u2="Å" k="82" /> +<hkern u1="V" u2="Ä" k="82" /> +<hkern u1="V" u2="Ã" k="82" /> +<hkern u1="V" u2="Â" k="82" /> +<hkern u1="V" u2="Á" k="82" /> +<hkern u1="V" u2="À" k="82" /> +<hkern u1="V" u2="u" k="20" /> +<hkern u1="V" u2="s" k="20" /> +<hkern u1="V" u2="r" k="20" /> +<hkern u1="V" u2="q" k="41" /> +<hkern u1="V" u2="p" k="20" /> +<hkern u1="V" u2="o" k="41" /> +<hkern u1="V" u2="n" k="20" /> +<hkern u1="V" u2="m" k="20" /> +<hkern u1="V" u2="g" k="20" /> +<hkern u1="V" u2="e" k="41" /> +<hkern u1="V" u2="d" k="41" /> +<hkern u1="V" u2="c" k="41" /> +<hkern u1="V" u2="a" k="41" /> +<hkern u1="V" u2="Q" k="20" /> +<hkern u1="V" u2="O" k="20" /> +<hkern u1="V" u2="G" k="20" /> +<hkern u1="V" u2="C" k="20" /> +<hkern u1="V" u2="A" k="82" /> +<hkern u1="V" u2="?" k="-41" /> +<hkern u1="V" u2="." k="102" /> +<hkern u1="V" u2="," k="102" /> +<hkern u1="W" u2="„" k="102" /> +<hkern u1="W" u2="‚" k="102" /> +<hkern u1="W" u2="œ" k="41" /> +<hkern u1="W" u2="Œ" k="20" /> +<hkern u1="W" u2="ü" k="20" /> +<hkern u1="W" u2="û" k="20" /> +<hkern u1="W" u2="ú" k="20" /> +<hkern u1="W" u2="ù" k="20" /> +<hkern u1="W" u2="ø" k="41" /> +<hkern u1="W" u2="ö" k="41" /> +<hkern u1="W" u2="õ" k="41" /> +<hkern u1="W" u2="ô" k="41" /> +<hkern u1="W" u2="ó" k="41" /> +<hkern u1="W" u2="ò" k="41" /> +<hkern u1="W" u2="ë" k="41" /> +<hkern u1="W" u2="ê" k="41" /> +<hkern u1="W" u2="é" k="41" /> +<hkern u1="W" u2="è" k="41" /> +<hkern u1="W" u2="ç" k="41" /> +<hkern u1="W" u2="æ" k="41" /> +<hkern u1="W" u2="å" k="41" /> +<hkern u1="W" u2="ä" k="41" /> +<hkern u1="W" u2="ã" k="41" /> +<hkern u1="W" u2="â" k="41" /> +<hkern u1="W" u2="á" k="41" /> +<hkern u1="W" u2="à" k="41" /> +<hkern u1="W" u2="Ø" k="20" /> +<hkern u1="W" u2="Ö" k="20" /> +<hkern u1="W" u2="Õ" k="20" /> +<hkern u1="W" u2="Ô" k="20" /> +<hkern u1="W" u2="Ó" k="20" /> +<hkern u1="W" u2="Ò" k="20" /> +<hkern u1="W" u2="Ç" k="20" /> +<hkern u1="W" u2="Å" k="82" /> +<hkern u1="W" u2="Ä" k="82" /> +<hkern u1="W" u2="Ã" k="82" /> +<hkern u1="W" u2="Â" k="82" /> +<hkern u1="W" u2="Á" k="82" /> +<hkern u1="W" u2="À" k="82" /> +<hkern u1="W" u2="u" k="20" /> +<hkern u1="W" u2="s" k="20" /> +<hkern u1="W" u2="r" k="20" /> +<hkern u1="W" u2="q" k="41" /> +<hkern u1="W" u2="p" k="20" /> +<hkern u1="W" u2="o" k="41" /> +<hkern u1="W" u2="n" k="20" /> +<hkern u1="W" u2="m" k="20" /> +<hkern u1="W" u2="g" k="20" /> +<hkern u1="W" u2="e" k="41" /> +<hkern u1="W" u2="d" k="41" /> +<hkern u1="W" u2="c" k="41" /> +<hkern u1="W" u2="a" k="41" /> +<hkern u1="W" u2="Q" k="20" /> +<hkern u1="W" u2="O" k="20" /> +<hkern u1="W" u2="G" k="20" /> +<hkern u1="W" u2="C" k="20" /> +<hkern u1="W" u2="A" k="82" /> +<hkern u1="W" u2="?" k="-41" /> +<hkern u1="W" u2="." k="102" /> +<hkern u1="W" u2="," k="102" /> +<hkern u1="X" u2="Œ" k="41" /> +<hkern u1="X" u2="Ø" k="41" /> +<hkern u1="X" u2="Ö" k="41" /> +<hkern u1="X" u2="Õ" k="41" /> +<hkern u1="X" u2="Ô" k="41" /> +<hkern u1="X" u2="Ó" k="41" /> +<hkern u1="X" u2="Ò" k="41" /> +<hkern u1="X" u2="Ç" k="41" /> +<hkern u1="X" u2="Q" k="41" /> +<hkern u1="X" u2="O" k="41" /> +<hkern u1="X" u2="G" k="41" /> +<hkern u1="X" u2="C" k="41" /> +<hkern u1="Y" u2="„" k="123" /> +<hkern u1="Y" u2="‚" k="123" /> +<hkern u1="Y" u2="œ" k="102" /> +<hkern u1="Y" u2="Œ" k="41" /> +<hkern u1="Y" u2="ü" k="61" /> +<hkern u1="Y" u2="û" k="61" /> +<hkern u1="Y" u2="ú" k="61" /> +<hkern u1="Y" u2="ù" k="61" /> +<hkern u1="Y" u2="ø" k="102" /> +<hkern u1="Y" u2="ö" k="102" /> +<hkern u1="Y" u2="õ" k="102" /> +<hkern u1="Y" u2="ô" k="102" /> +<hkern u1="Y" u2="ó" k="102" /> +<hkern u1="Y" u2="ò" k="102" /> +<hkern u1="Y" u2="ë" k="102" /> +<hkern u1="Y" u2="ê" k="102" /> +<hkern u1="Y" u2="é" k="102" /> +<hkern u1="Y" u2="è" k="102" /> +<hkern u1="Y" u2="ç" k="102" /> +<hkern u1="Y" u2="æ" k="102" /> +<hkern u1="Y" u2="å" k="102" /> +<hkern u1="Y" u2="ä" k="102" /> +<hkern u1="Y" u2="ã" k="102" /> +<hkern u1="Y" u2="â" k="102" /> +<hkern u1="Y" u2="á" k="102" /> +<hkern u1="Y" u2="à" k="102" /> +<hkern u1="Y" u2="Ø" k="41" /> +<hkern u1="Y" u2="Ö" k="41" /> +<hkern u1="Y" u2="Õ" k="41" /> +<hkern u1="Y" u2="Ô" k="41" /> +<hkern u1="Y" u2="Ó" k="41" /> +<hkern u1="Y" u2="Ò" k="41" /> +<hkern u1="Y" u2="Ç" k="41" /> +<hkern u1="Y" u2="Å" k="123" /> +<hkern u1="Y" u2="Ä" k="123" /> +<hkern u1="Y" u2="Ã" k="123" /> +<hkern u1="Y" u2="Â" k="123" /> +<hkern u1="Y" u2="Á" k="123" /> +<hkern u1="Y" u2="À" k="123" /> +<hkern u1="Y" u2="z" k="41" /> +<hkern u1="Y" u2="u" k="61" /> +<hkern u1="Y" u2="s" k="82" /> +<hkern u1="Y" u2="r" k="61" /> +<hkern u1="Y" u2="q" k="102" /> +<hkern u1="Y" u2="p" k="61" /> +<hkern u1="Y" u2="o" k="102" /> +<hkern u1="Y" u2="n" k="61" /> +<hkern u1="Y" u2="m" k="61" /> +<hkern u1="Y" u2="g" k="41" /> +<hkern u1="Y" u2="e" k="102" /> +<hkern u1="Y" u2="d" k="102" /> +<hkern u1="Y" u2="c" k="102" /> +<hkern u1="Y" u2="a" k="102" /> +<hkern u1="Y" u2="Q" k="41" /> +<hkern u1="Y" u2="O" k="41" /> +<hkern u1="Y" u2="G" k="41" /> +<hkern u1="Y" u2="C" k="41" /> +<hkern u1="Y" u2="A" k="123" /> +<hkern u1="Y" u2="?" k="-41" /> +<hkern u1="Y" u2="." k="123" /> +<hkern u1="Y" u2="," k="123" /> +<hkern u1="Z" u2="Œ" k="20" /> +<hkern u1="Z" u2="Ø" k="20" /> +<hkern u1="Z" u2="Ö" k="20" /> +<hkern u1="Z" u2="Õ" k="20" /> +<hkern u1="Z" u2="Ô" k="20" /> +<hkern u1="Z" u2="Ó" k="20" /> +<hkern u1="Z" u2="Ò" k="20" /> +<hkern u1="Z" u2="Ç" k="20" /> +<hkern u1="Z" u2="Q" k="20" /> +<hkern u1="Z" u2="O" k="20" /> +<hkern u1="Z" u2="G" k="20" /> +<hkern u1="Z" u2="C" k="20" /> +<hkern u1="[" u2="J" k="-184" /> +<hkern u1="a" u2="”" k="20" /> +<hkern u1="a" u2="’" k="20" /> +<hkern u1="a" u2="'" k="20" /> +<hkern u1="a" u2=""" k="20" /> +<hkern u1="b" u2="”" k="20" /> +<hkern u1="b" u2="’" k="20" /> +<hkern u1="b" u2="ý" k="41" /> +<hkern u1="b" u2="z" k="20" /> +<hkern u1="b" u2="y" k="41" /> +<hkern u1="b" u2="x" k="41" /> +<hkern u1="b" u2="w" k="41" /> +<hkern u1="b" u2="v" k="41" /> +<hkern u1="b" u2="'" k="20" /> +<hkern u1="b" u2=""" k="20" /> +<hkern u1="c" u2="”" k="-41" /> +<hkern u1="c" u2="’" k="-41" /> +<hkern u1="c" u2="'" k="-41" /> +<hkern u1="c" u2=""" k="-41" /> +<hkern u1="e" u2="”" k="20" /> +<hkern u1="e" u2="’" k="20" /> +<hkern u1="e" u2="ý" k="41" /> +<hkern u1="e" u2="z" k="20" /> +<hkern u1="e" u2="y" k="41" /> +<hkern u1="e" u2="x" k="41" /> +<hkern u1="e" u2="w" k="41" /> +<hkern u1="e" u2="v" k="41" /> +<hkern u1="e" u2="'" k="20" /> +<hkern u1="e" u2=""" k="20" /> +<hkern u1="f" u2="”" k="-123" /> +<hkern u1="f" u2="’" k="-123" /> +<hkern u1="f" u2="'" k="-123" /> +<hkern u1="f" u2=""" k="-123" /> +<hkern u1="h" u2="”" k="20" /> +<hkern u1="h" u2="’" k="20" /> +<hkern u1="h" u2="'" k="20" /> +<hkern u1="h" u2=""" k="20" /> +<hkern u1="k" u2="œ" k="41" /> +<hkern u1="k" u2="ø" k="41" /> +<hkern u1="k" u2="ö" k="41" /> +<hkern u1="k" u2="õ" k="41" /> +<hkern u1="k" u2="ô" k="41" /> +<hkern u1="k" u2="ó" k="41" /> +<hkern u1="k" u2="ò" k="41" /> +<hkern u1="k" u2="ë" k="41" /> +<hkern u1="k" u2="ê" k="41" /> +<hkern u1="k" u2="é" k="41" /> +<hkern u1="k" u2="è" k="41" /> +<hkern u1="k" u2="ç" k="41" /> +<hkern u1="k" u2="à" k="41" /> +<hkern u1="k" u2="q" k="41" /> +<hkern u1="k" u2="o" k="41" /> +<hkern u1="k" u2="e" k="41" /> +<hkern u1="k" u2="d" k="41" /> +<hkern u1="k" u2="c" k="41" /> +<hkern u1="m" u2="”" k="20" /> +<hkern u1="m" u2="’" k="20" /> +<hkern u1="m" u2="'" k="20" /> +<hkern u1="m" u2=""" k="20" /> +<hkern u1="n" u2="”" k="20" /> +<hkern u1="n" u2="’" k="20" /> +<hkern u1="n" u2="'" k="20" /> +<hkern u1="n" u2=""" k="20" /> +<hkern u1="o" u2="”" k="20" /> +<hkern u1="o" u2="’" k="20" /> +<hkern u1="o" u2="ý" k="41" /> +<hkern u1="o" u2="z" k="20" /> +<hkern u1="o" u2="y" k="41" /> +<hkern u1="o" u2="x" k="41" /> +<hkern u1="o" u2="w" k="41" /> +<hkern u1="o" u2="v" k="41" /> +<hkern u1="o" u2="'" k="20" /> +<hkern u1="o" u2=""" k="20" /> +<hkern u1="p" u2="”" k="20" /> +<hkern u1="p" u2="’" k="20" /> +<hkern u1="p" u2="ý" k="41" /> +<hkern u1="p" u2="z" k="20" /> +<hkern u1="p" u2="y" k="41" /> +<hkern u1="p" u2="x" k="41" /> +<hkern u1="p" u2="w" k="41" /> +<hkern u1="p" u2="v" k="41" /> +<hkern u1="p" u2="'" k="20" /> +<hkern u1="p" u2=""" k="20" /> +<hkern u1="r" u2="”" k="-82" /> +<hkern u1="r" u2="’" k="-82" /> +<hkern u1="r" u2="œ" k="41" /> +<hkern u1="r" u2="ø" k="41" /> +<hkern u1="r" u2="ö" k="41" /> +<hkern u1="r" u2="õ" k="41" /> +<hkern u1="r" u2="ô" k="41" /> +<hkern u1="r" u2="ó" k="41" /> +<hkern u1="r" u2="ò" k="41" /> +<hkern u1="r" u2="ë" k="41" /> +<hkern u1="r" u2="ê" k="41" /> +<hkern u1="r" u2="é" k="41" /> +<hkern u1="r" u2="è" k="41" /> +<hkern u1="r" u2="ç" k="41" /> +<hkern u1="r" u2="æ" k="41" /> +<hkern u1="r" u2="å" k="41" /> +<hkern u1="r" u2="ä" k="41" /> +<hkern u1="r" u2="ã" k="41" /> +<hkern u1="r" u2="â" k="41" /> +<hkern u1="r" u2="á" k="41" /> +<hkern u1="r" u2="à" k="41" /> +<hkern u1="r" u2="q" k="41" /> +<hkern u1="r" u2="o" k="41" /> +<hkern u1="r" u2="g" k="20" /> +<hkern u1="r" u2="e" k="41" /> +<hkern u1="r" u2="d" k="41" /> +<hkern u1="r" u2="c" k="41" /> +<hkern u1="r" u2="a" k="41" /> +<hkern u1="r" u2="'" k="-82" /> +<hkern u1="r" u2=""" k="-82" /> +<hkern u1="t" u2="”" k="-41" /> +<hkern u1="t" u2="’" k="-41" /> +<hkern u1="t" u2="'" k="-41" /> +<hkern u1="t" u2=""" k="-41" /> +<hkern u1="v" u2="„" k="82" /> +<hkern u1="v" u2="”" k="-82" /> +<hkern u1="v" u2="‚" k="82" /> +<hkern u1="v" u2="’" k="-82" /> +<hkern u1="v" u2="?" k="-41" /> +<hkern u1="v" u2="." k="82" /> +<hkern u1="v" u2="," k="82" /> +<hkern u1="v" u2="'" k="-82" /> +<hkern u1="v" u2=""" k="-82" /> +<hkern u1="w" u2="„" k="82" /> +<hkern u1="w" u2="”" k="-82" /> +<hkern u1="w" u2="‚" k="82" /> +<hkern u1="w" u2="’" k="-82" /> +<hkern u1="w" u2="?" k="-41" /> +<hkern u1="w" u2="." k="82" /> +<hkern u1="w" u2="," k="82" /> +<hkern u1="w" u2="'" k="-82" /> +<hkern u1="w" u2=""" k="-82" /> +<hkern u1="x" u2="œ" k="41" /> +<hkern u1="x" u2="ø" k="41" /> +<hkern u1="x" u2="ö" k="41" /> +<hkern u1="x" u2="õ" k="41" /> +<hkern u1="x" u2="ô" k="41" /> +<hkern u1="x" u2="ó" k="41" /> +<hkern u1="x" u2="ò" k="41" /> +<hkern u1="x" u2="ë" k="41" /> +<hkern u1="x" u2="ê" k="41" /> +<hkern u1="x" u2="é" k="41" /> +<hkern u1="x" u2="è" k="41" /> +<hkern u1="x" u2="ç" k="41" /> +<hkern u1="x" u2="à" k="41" /> +<hkern u1="x" u2="q" k="41" /> +<hkern u1="x" u2="o" k="41" /> +<hkern u1="x" u2="e" k="41" /> +<hkern u1="x" u2="d" k="41" /> +<hkern u1="x" u2="c" k="41" /> +<hkern u1="y" u2="„" k="82" /> +<hkern u1="y" u2="”" k="-82" /> +<hkern u1="y" u2="‚" k="82" /> +<hkern u1="y" u2="’" k="-82" /> +<hkern u1="y" u2="?" k="-41" /> +<hkern u1="y" u2="." k="82" /> +<hkern u1="y" u2="," k="82" /> +<hkern u1="y" u2="'" k="-82" /> +<hkern u1="y" u2=""" k="-82" /> +<hkern u1="{" u2="J" k="-184" /> +<hkern u1="À" u2="”" k="143" /> +<hkern u1="À" u2="’" k="143" /> +<hkern u1="À" u2="Ÿ" k="123" /> +<hkern u1="À" u2="Œ" k="41" /> +<hkern u1="À" u2="Ý" k="123" /> +<hkern u1="À" u2="Ø" k="41" /> +<hkern u1="À" u2="Ö" k="41" /> +<hkern u1="À" u2="Õ" k="41" /> +<hkern u1="À" u2="Ô" k="41" /> +<hkern u1="À" u2="Ó" k="41" /> +<hkern u1="À" u2="Ò" k="41" /> +<hkern u1="À" u2="Ç" k="41" /> +<hkern u1="À" u2="Y" k="123" /> +<hkern u1="À" u2="W" k="82" /> +<hkern u1="À" u2="V" k="82" /> +<hkern u1="À" u2="T" k="143" /> +<hkern u1="À" u2="Q" k="41" /> +<hkern u1="À" u2="O" k="41" /> +<hkern u1="À" u2="J" k="-266" /> +<hkern u1="À" u2="G" k="41" /> +<hkern u1="À" u2="C" k="41" /> +<hkern u1="À" u2="'" k="143" /> +<hkern u1="À" u2=""" k="143" /> +<hkern u1="Á" u2="”" k="143" /> +<hkern u1="Á" u2="’" k="143" /> +<hkern u1="Á" u2="Ÿ" k="123" /> +<hkern u1="Á" u2="Œ" k="41" /> +<hkern u1="Á" u2="Ý" k="123" /> +<hkern u1="Á" u2="Ø" k="41" /> +<hkern u1="Á" u2="Ö" k="41" /> +<hkern u1="Á" u2="Õ" k="41" /> +<hkern u1="Á" u2="Ô" k="41" /> +<hkern u1="Á" u2="Ó" k="41" /> +<hkern u1="Á" u2="Ò" k="41" /> +<hkern u1="Á" u2="Ç" k="41" /> +<hkern u1="Á" u2="Y" k="123" /> +<hkern u1="Á" u2="W" k="82" /> +<hkern u1="Á" u2="V" k="82" /> +<hkern u1="Á" u2="T" k="143" /> +<hkern u1="Á" u2="Q" k="41" /> +<hkern u1="Á" u2="O" k="41" /> +<hkern u1="Á" u2="J" k="-266" /> +<hkern u1="Á" u2="G" k="41" /> +<hkern u1="Á" u2="C" k="41" /> +<hkern u1="Á" u2="'" k="143" /> +<hkern u1="Á" u2=""" k="143" /> +<hkern u1="Â" u2="”" k="143" /> +<hkern u1="Â" u2="’" k="143" /> +<hkern u1="Â" u2="Ÿ" k="123" /> +<hkern u1="Â" u2="Œ" k="41" /> +<hkern u1="Â" u2="Ý" k="123" /> +<hkern u1="Â" u2="Ø" k="41" /> +<hkern u1="Â" u2="Ö" k="41" /> +<hkern u1="Â" u2="Õ" k="41" /> +<hkern u1="Â" u2="Ô" k="41" /> +<hkern u1="Â" u2="Ó" k="41" /> +<hkern u1="Â" u2="Ò" k="41" /> +<hkern u1="Â" u2="Ç" k="41" /> +<hkern u1="Â" u2="Y" k="123" /> +<hkern u1="Â" u2="W" k="82" /> +<hkern u1="Â" u2="V" k="82" /> +<hkern u1="Â" u2="T" k="143" /> +<hkern u1="Â" u2="Q" k="41" /> +<hkern u1="Â" u2="O" k="41" /> +<hkern u1="Â" u2="J" k="-266" /> +<hkern u1="Â" u2="G" k="41" /> +<hkern u1="Â" u2="C" k="41" /> +<hkern u1="Â" u2="'" k="143" /> +<hkern u1="Â" u2=""" k="143" /> +<hkern u1="Ã" u2="”" k="143" /> +<hkern u1="Ã" u2="’" k="143" /> +<hkern u1="Ã" u2="Ÿ" k="123" /> +<hkern u1="Ã" u2="Œ" k="41" /> +<hkern u1="Ã" u2="Ý" k="123" /> +<hkern u1="Ã" u2="Ø" k="41" /> +<hkern u1="Ã" u2="Ö" k="41" /> +<hkern u1="Ã" u2="Õ" k="41" /> +<hkern u1="Ã" u2="Ô" k="41" /> +<hkern u1="Ã" u2="Ó" k="41" /> +<hkern u1="Ã" u2="Ò" k="41" /> +<hkern u1="Ã" u2="Ç" k="41" /> +<hkern u1="Ã" u2="Y" k="123" /> +<hkern u1="Ã" u2="W" k="82" /> +<hkern u1="Ã" u2="V" k="82" /> +<hkern u1="Ã" u2="T" k="143" /> +<hkern u1="Ã" u2="Q" k="41" /> +<hkern u1="Ã" u2="O" k="41" /> +<hkern u1="Ã" u2="J" k="-266" /> +<hkern u1="Ã" u2="G" k="41" /> +<hkern u1="Ã" u2="C" k="41" /> +<hkern u1="Ã" u2="'" k="143" /> +<hkern u1="Ã" u2=""" k="143" /> +<hkern u1="Ä" u2="”" k="143" /> +<hkern u1="Ä" u2="’" k="143" /> +<hkern u1="Ä" u2="Ÿ" k="123" /> +<hkern u1="Ä" u2="Œ" k="41" /> +<hkern u1="Ä" u2="Ý" k="123" /> +<hkern u1="Ä" u2="Ø" k="41" /> +<hkern u1="Ä" u2="Ö" k="41" /> +<hkern u1="Ä" u2="Õ" k="41" /> +<hkern u1="Ä" u2="Ô" k="41" /> +<hkern u1="Ä" u2="Ó" k="41" /> +<hkern u1="Ä" u2="Ò" k="41" /> +<hkern u1="Ä" u2="Ç" k="41" /> +<hkern u1="Ä" u2="Y" k="123" /> +<hkern u1="Ä" u2="W" k="82" /> +<hkern u1="Ä" u2="V" k="82" /> +<hkern u1="Ä" u2="T" k="143" /> +<hkern u1="Ä" u2="Q" k="41" /> +<hkern u1="Ä" u2="O" k="41" /> +<hkern u1="Ä" u2="J" k="-266" /> +<hkern u1="Ä" u2="G" k="41" /> +<hkern u1="Ä" u2="C" k="41" /> +<hkern u1="Ä" u2="'" k="143" /> +<hkern u1="Ä" u2=""" k="143" /> +<hkern u1="Å" u2="”" k="143" /> +<hkern u1="Å" u2="’" k="143" /> +<hkern u1="Å" u2="Ÿ" k="123" /> +<hkern u1="Å" u2="Œ" k="41" /> +<hkern u1="Å" u2="Ý" k="123" /> +<hkern u1="Å" u2="Ø" k="41" /> +<hkern u1="Å" u2="Ö" k="41" /> +<hkern u1="Å" u2="Õ" k="41" /> +<hkern u1="Å" u2="Ô" k="41" /> +<hkern u1="Å" u2="Ó" k="41" /> +<hkern u1="Å" u2="Ò" k="41" /> +<hkern u1="Å" u2="Ç" k="41" /> +<hkern u1="Å" u2="Y" k="123" /> +<hkern u1="Å" u2="W" k="82" /> +<hkern u1="Å" u2="V" k="82" /> +<hkern u1="Å" u2="T" k="143" /> +<hkern u1="Å" u2="Q" k="41" /> +<hkern u1="Å" u2="O" k="41" /> +<hkern u1="Å" u2="J" k="-266" /> +<hkern u1="Å" u2="G" k="41" /> +<hkern u1="Å" u2="C" k="41" /> +<hkern u1="Å" u2="'" k="143" /> +<hkern u1="Å" u2=""" k="143" /> +<hkern u1="Æ" u2="J" k="-123" /> +<hkern u1="Ç" u2="Œ" k="41" /> +<hkern u1="Ç" u2="Ø" k="41" /> +<hkern u1="Ç" u2="Ö" k="41" /> +<hkern u1="Ç" u2="Õ" k="41" /> +<hkern u1="Ç" u2="Ô" k="41" /> +<hkern u1="Ç" u2="Ó" k="41" /> +<hkern u1="Ç" u2="Ò" k="41" /> +<hkern u1="Ç" u2="Ç" k="41" /> +<hkern u1="Ç" u2="Q" k="41" /> +<hkern u1="Ç" u2="O" k="41" /> +<hkern u1="Ç" u2="G" k="41" /> +<hkern u1="Ç" u2="C" k="41" /> +<hkern u1="È" u2="J" k="-123" /> +<hkern u1="É" u2="J" k="-123" /> +<hkern u1="Ê" u2="J" k="-123" /> +<hkern u1="Ë" u2="J" k="-123" /> +<hkern u1="Ð" u2="„" k="82" /> +<hkern u1="Ð" u2="‚" k="82" /> +<hkern u1="Ð" u2="Ÿ" k="20" /> +<hkern u1="Ð" u2="Ý" k="20" /> +<hkern u1="Ð" u2="Å" k="41" /> +<hkern u1="Ð" u2="Ä" k="41" /> +<hkern u1="Ð" u2="Ã" k="41" /> +<hkern u1="Ð" u2="Â" k="41" /> +<hkern u1="Ð" u2="Á" k="41" /> +<hkern u1="Ð" u2="À" k="41" /> +<hkern u1="Ð" u2="Z" k="20" /> +<hkern u1="Ð" u2="Y" k="20" /> +<hkern u1="Ð" u2="X" k="41" /> +<hkern u1="Ð" u2="W" k="20" /> +<hkern u1="Ð" u2="V" k="20" /> +<hkern u1="Ð" u2="T" k="61" /> +<hkern u1="Ð" u2="A" k="41" /> +<hkern u1="Ð" u2="." k="82" /> +<hkern u1="Ð" u2="," k="82" /> +<hkern u1="Ò" u2="„" k="82" /> +<hkern u1="Ò" u2="‚" k="82" /> +<hkern u1="Ò" u2="Ÿ" k="20" /> +<hkern u1="Ò" u2="Ý" k="20" /> +<hkern u1="Ò" u2="Å" k="41" /> +<hkern u1="Ò" u2="Ä" k="41" /> +<hkern u1="Ò" u2="Ã" k="41" /> +<hkern u1="Ò" u2="Â" k="41" /> +<hkern u1="Ò" u2="Á" k="41" /> +<hkern u1="Ò" u2="À" k="41" /> +<hkern u1="Ò" u2="Z" k="20" /> +<hkern u1="Ò" u2="Y" k="20" /> +<hkern u1="Ò" u2="X" k="41" /> +<hkern u1="Ò" u2="W" k="20" /> +<hkern u1="Ò" u2="V" k="20" /> +<hkern u1="Ò" u2="T" k="61" /> +<hkern u1="Ò" u2="A" k="41" /> +<hkern u1="Ò" u2="." k="82" /> +<hkern u1="Ò" u2="," k="82" /> +<hkern u1="Ó" u2="„" k="82" /> +<hkern u1="Ó" u2="‚" k="82" /> +<hkern u1="Ó" u2="Ÿ" k="20" /> +<hkern u1="Ó" u2="Ý" k="20" /> +<hkern u1="Ó" u2="Å" k="41" /> +<hkern u1="Ó" u2="Ä" k="41" /> +<hkern u1="Ó" u2="Ã" k="41" /> +<hkern u1="Ó" u2="Â" k="41" /> +<hkern u1="Ó" u2="Á" k="41" /> +<hkern u1="Ó" u2="À" k="41" /> +<hkern u1="Ó" u2="Z" k="20" /> +<hkern u1="Ó" u2="Y" k="20" /> +<hkern u1="Ó" u2="X" k="41" /> +<hkern u1="Ó" u2="W" k="20" /> +<hkern u1="Ó" u2="V" k="20" /> +<hkern u1="Ó" u2="T" k="61" /> +<hkern u1="Ó" u2="A" k="41" /> +<hkern u1="Ó" u2="." k="82" /> +<hkern u1="Ó" u2="," k="82" /> +<hkern u1="Ô" u2="„" k="82" /> +<hkern u1="Ô" u2="‚" k="82" /> +<hkern u1="Ô" u2="Ÿ" k="20" /> +<hkern u1="Ô" u2="Ý" k="20" /> +<hkern u1="Ô" u2="Å" k="41" /> +<hkern u1="Ô" u2="Ä" k="41" /> +<hkern u1="Ô" u2="Ã" k="41" /> +<hkern u1="Ô" u2="Â" k="41" /> +<hkern u1="Ô" u2="Á" k="41" /> +<hkern u1="Ô" u2="À" k="41" /> +<hkern u1="Ô" u2="Z" k="20" /> +<hkern u1="Ô" u2="Y" k="20" /> +<hkern u1="Ô" u2="X" k="41" /> +<hkern u1="Ô" u2="W" k="20" /> +<hkern u1="Ô" u2="V" k="20" /> +<hkern u1="Ô" u2="T" k="61" /> +<hkern u1="Ô" u2="A" k="41" /> +<hkern u1="Ô" u2="." k="82" /> +<hkern u1="Ô" u2="," k="82" /> +<hkern u1="Õ" u2="„" k="82" /> +<hkern u1="Õ" u2="‚" k="82" /> +<hkern u1="Õ" u2="Ÿ" k="20" /> +<hkern u1="Õ" u2="Ý" k="20" /> +<hkern u1="Õ" u2="Å" k="41" /> +<hkern u1="Õ" u2="Ä" k="41" /> +<hkern u1="Õ" u2="Ã" k="41" /> +<hkern u1="Õ" u2="Â" k="41" /> +<hkern u1="Õ" u2="Á" k="41" /> +<hkern u1="Õ" u2="À" k="41" /> +<hkern u1="Õ" u2="Z" k="20" /> +<hkern u1="Õ" u2="Y" k="20" /> +<hkern u1="Õ" u2="X" k="41" /> +<hkern u1="Õ" u2="W" k="20" /> +<hkern u1="Õ" u2="V" k="20" /> +<hkern u1="Õ" u2="T" k="61" /> +<hkern u1="Õ" u2="A" k="41" /> +<hkern u1="Õ" u2="." k="82" /> +<hkern u1="Õ" u2="," k="82" /> +<hkern u1="Ö" u2="„" k="82" /> +<hkern u1="Ö" u2="‚" k="82" /> +<hkern u1="Ö" u2="Ÿ" k="20" /> +<hkern u1="Ö" u2="Ý" k="20" /> +<hkern u1="Ö" u2="Å" k="41" /> +<hkern u1="Ö" u2="Ä" k="41" /> +<hkern u1="Ö" u2="Ã" k="41" /> +<hkern u1="Ö" u2="Â" k="41" /> +<hkern u1="Ö" u2="Á" k="41" /> +<hkern u1="Ö" u2="À" k="41" /> +<hkern u1="Ö" u2="Z" k="20" /> +<hkern u1="Ö" u2="Y" k="20" /> +<hkern u1="Ö" u2="X" k="41" /> +<hkern u1="Ö" u2="W" k="20" /> +<hkern u1="Ö" u2="V" k="20" /> +<hkern u1="Ö" u2="T" k="61" /> +<hkern u1="Ö" u2="A" k="41" /> +<hkern u1="Ö" u2="." k="82" /> +<hkern u1="Ö" u2="," k="82" /> +<hkern u1="Ø" u2="„" k="82" /> +<hkern u1="Ø" u2="‚" k="82" /> +<hkern u1="Ø" u2="Ÿ" k="20" /> +<hkern u1="Ø" u2="Ý" k="20" /> +<hkern u1="Ø" u2="Å" k="41" /> +<hkern u1="Ø" u2="Ä" k="41" /> +<hkern u1="Ø" u2="Ã" k="41" /> +<hkern u1="Ø" u2="Â" k="41" /> +<hkern u1="Ø" u2="Á" k="41" /> +<hkern u1="Ø" u2="À" k="41" /> +<hkern u1="Ø" u2="Z" k="20" /> +<hkern u1="Ø" u2="Y" k="20" /> +<hkern u1="Ø" u2="X" k="41" /> +<hkern u1="Ø" u2="W" k="20" /> +<hkern u1="Ø" u2="V" k="20" /> +<hkern u1="Ø" u2="T" k="61" /> +<hkern u1="Ø" u2="A" k="41" /> +<hkern u1="Ø" u2="." k="82" /> +<hkern u1="Ø" u2="," k="82" /> +<hkern u1="Ù" u2="„" k="41" /> +<hkern u1="Ù" u2="‚" k="41" /> +<hkern u1="Ù" u2="Å" k="20" /> +<hkern u1="Ù" u2="Ä" k="20" /> +<hkern u1="Ù" u2="Ã" k="20" /> +<hkern u1="Ù" u2="Â" k="20" /> +<hkern u1="Ù" u2="Á" k="20" /> +<hkern u1="Ù" u2="À" k="20" /> +<hkern u1="Ù" u2="A" k="20" /> +<hkern u1="Ù" u2="." k="41" /> +<hkern u1="Ù" u2="," k="41" /> +<hkern u1="Ú" u2="„" k="41" /> +<hkern u1="Ú" u2="‚" k="41" /> +<hkern u1="Ú" u2="Å" k="20" /> +<hkern u1="Ú" u2="Ä" k="20" /> +<hkern u1="Ú" u2="Ã" k="20" /> +<hkern u1="Ú" u2="Â" k="20" /> +<hkern u1="Ú" u2="Á" k="20" /> +<hkern u1="Ú" u2="À" k="20" /> +<hkern u1="Ú" u2="A" k="20" /> +<hkern u1="Ú" u2="." k="41" /> +<hkern u1="Ú" u2="," k="41" /> +<hkern u1="Û" u2="„" k="41" /> +<hkern u1="Û" u2="‚" k="41" /> +<hkern u1="Û" u2="Å" k="20" /> +<hkern u1="Û" u2="Ä" k="20" /> +<hkern u1="Û" u2="Ã" k="20" /> +<hkern u1="Û" u2="Â" k="20" /> +<hkern u1="Û" u2="Á" k="20" /> +<hkern u1="Û" u2="À" k="20" /> +<hkern u1="Û" u2="A" k="20" /> +<hkern u1="Û" u2="." k="41" /> +<hkern u1="Û" u2="," k="41" /> +<hkern u1="Ü" u2="„" k="41" /> +<hkern u1="Ü" u2="‚" k="41" /> +<hkern u1="Ü" u2="Å" k="20" /> +<hkern u1="Ü" u2="Ä" k="20" /> +<hkern u1="Ü" u2="Ã" k="20" /> +<hkern u1="Ü" u2="Â" k="20" /> +<hkern u1="Ü" u2="Á" k="20" /> +<hkern u1="Ü" u2="À" k="20" /> +<hkern u1="Ü" u2="A" k="20" /> +<hkern u1="Ü" u2="." k="41" /> +<hkern u1="Ü" u2="," k="41" /> +<hkern u1="Ý" u2="„" k="123" /> +<hkern u1="Ý" u2="‚" k="123" /> +<hkern u1="Ý" u2="œ" k="102" /> +<hkern u1="Ý" u2="Œ" k="41" /> +<hkern u1="Ý" u2="ü" k="61" /> +<hkern u1="Ý" u2="û" k="61" /> +<hkern u1="Ý" u2="ú" k="61" /> +<hkern u1="Ý" u2="ù" k="61" /> +<hkern u1="Ý" u2="ø" k="102" /> +<hkern u1="Ý" u2="ö" k="102" /> +<hkern u1="Ý" u2="õ" k="102" /> +<hkern u1="Ý" u2="ô" k="102" /> +<hkern u1="Ý" u2="ó" k="102" /> +<hkern u1="Ý" u2="ò" k="102" /> +<hkern u1="Ý" u2="ë" k="102" /> +<hkern u1="Ý" u2="ê" k="102" /> +<hkern u1="Ý" u2="é" k="102" /> +<hkern u1="Ý" u2="è" k="102" /> +<hkern u1="Ý" u2="ç" k="102" /> +<hkern u1="Ý" u2="æ" k="102" /> +<hkern u1="Ý" u2="å" k="102" /> +<hkern u1="Ý" u2="ä" k="102" /> +<hkern u1="Ý" u2="ã" k="102" /> +<hkern u1="Ý" u2="â" k="102" /> +<hkern u1="Ý" u2="á" k="102" /> +<hkern u1="Ý" u2="à" k="102" /> +<hkern u1="Ý" u2="Ø" k="41" /> +<hkern u1="Ý" u2="Ö" k="41" /> +<hkern u1="Ý" u2="Õ" k="41" /> +<hkern u1="Ý" u2="Ô" k="41" /> +<hkern u1="Ý" u2="Ó" k="41" /> +<hkern u1="Ý" u2="Ò" k="41" /> +<hkern u1="Ý" u2="Ç" k="41" /> +<hkern u1="Ý" u2="Å" k="123" /> +<hkern u1="Ý" u2="Ä" k="123" /> +<hkern u1="Ý" u2="Ã" k="123" /> +<hkern u1="Ý" u2="Â" k="123" /> +<hkern u1="Ý" u2="Á" k="123" /> +<hkern u1="Ý" u2="À" k="123" /> +<hkern u1="Ý" u2="z" k="41" /> +<hkern u1="Ý" u2="u" k="61" /> +<hkern u1="Ý" u2="s" k="82" /> +<hkern u1="Ý" u2="r" k="61" /> +<hkern u1="Ý" u2="q" k="102" /> +<hkern u1="Ý" u2="p" k="61" /> +<hkern u1="Ý" u2="o" k="102" /> +<hkern u1="Ý" u2="n" k="61" /> +<hkern u1="Ý" u2="m" k="61" /> +<hkern u1="Ý" u2="g" k="41" /> +<hkern u1="Ý" u2="e" k="102" /> +<hkern u1="Ý" u2="d" k="102" /> +<hkern u1="Ý" u2="c" k="102" /> +<hkern u1="Ý" u2="a" k="102" /> +<hkern u1="Ý" u2="Q" k="41" /> +<hkern u1="Ý" u2="O" k="41" /> +<hkern u1="Ý" u2="G" k="41" /> +<hkern u1="Ý" u2="C" k="41" /> +<hkern u1="Ý" u2="A" k="123" /> +<hkern u1="Ý" u2="?" k="-41" /> +<hkern u1="Ý" u2="." k="123" /> +<hkern u1="Ý" u2="," k="123" /> +<hkern u1="Þ" u2="„" k="266" /> +<hkern u1="Þ" u2="‚" k="266" /> +<hkern u1="Þ" u2="Å" k="102" /> +<hkern u1="Þ" u2="Ä" k="102" /> +<hkern u1="Þ" u2="Ã" k="102" /> +<hkern u1="Þ" u2="Â" k="102" /> +<hkern u1="Þ" u2="Á" k="102" /> +<hkern u1="Þ" u2="À" k="102" /> +<hkern u1="Þ" u2="Z" k="20" /> +<hkern u1="Þ" u2="X" k="41" /> +<hkern u1="Þ" u2="A" k="102" /> +<hkern u1="Þ" u2="." k="266" /> +<hkern u1="Þ" u2="," k="266" /> +<hkern u1="à" u2="”" k="20" /> +<hkern u1="à" u2="’" k="20" /> +<hkern u1="à" u2="'" k="20" /> +<hkern u1="à" u2=""" k="20" /> +<hkern u1="á" u2="”" k="20" /> +<hkern u1="á" u2="’" k="20" /> +<hkern u1="á" u2="'" k="20" /> +<hkern u1="á" u2=""" k="20" /> +<hkern u1="â" u2="”" k="20" /> +<hkern u1="â" u2="’" k="20" /> +<hkern u1="â" u2="'" k="20" /> +<hkern u1="â" u2=""" k="20" /> +<hkern u1="ã" u2="”" k="20" /> +<hkern u1="ã" u2="’" k="20" /> +<hkern u1="ã" u2="'" k="20" /> +<hkern u1="ã" u2=""" k="20" /> +<hkern u1="ä" u2="”" k="20" /> +<hkern u1="ä" u2="’" k="20" /> +<hkern u1="ä" u2="'" k="20" /> +<hkern u1="ä" u2=""" k="20" /> +<hkern u1="å" u2="”" k="20" /> +<hkern u1="å" u2="’" k="20" /> +<hkern u1="å" u2="'" k="20" /> +<hkern u1="å" u2=""" k="20" /> +<hkern u1="è" u2="”" k="20" /> +<hkern u1="è" u2="’" k="20" /> +<hkern u1="è" u2="ý" k="41" /> +<hkern u1="è" u2="z" k="20" /> +<hkern u1="è" u2="y" k="41" /> +<hkern u1="è" u2="x" k="41" /> +<hkern u1="è" u2="w" k="41" /> +<hkern u1="è" u2="v" k="41" /> +<hkern u1="è" u2="'" k="20" /> +<hkern u1="è" u2=""" k="20" /> +<hkern u1="é" u2="”" k="20" /> +<hkern u1="é" u2="’" k="20" /> +<hkern u1="é" u2="ý" k="41" /> +<hkern u1="é" u2="z" k="20" /> +<hkern u1="é" u2="y" k="41" /> +<hkern u1="é" u2="x" k="41" /> +<hkern u1="é" u2="w" k="41" /> +<hkern u1="é" u2="v" k="41" /> +<hkern u1="é" u2="'" k="20" /> +<hkern u1="é" u2=""" k="20" /> +<hkern u1="ê" u2="”" k="20" /> +<hkern u1="ê" u2="’" k="20" /> +<hkern u1="ê" u2="ý" k="41" /> +<hkern u1="ê" u2="z" k="20" /> +<hkern u1="ê" u2="y" k="41" /> +<hkern u1="ê" u2="x" k="41" /> +<hkern u1="ê" u2="w" k="41" /> +<hkern u1="ê" u2="v" k="41" /> +<hkern u1="ê" u2="'" k="20" /> +<hkern u1="ê" u2=""" k="20" /> +<hkern u1="ë" u2="”" k="20" /> +<hkern u1="ë" u2="’" k="20" /> +<hkern u1="ë" u2="ý" k="41" /> +<hkern u1="ë" u2="z" k="20" /> +<hkern u1="ë" u2="y" k="41" /> +<hkern u1="ë" u2="x" k="41" /> +<hkern u1="ë" u2="w" k="41" /> +<hkern u1="ë" u2="v" k="41" /> +<hkern u1="ë" u2="'" k="20" /> +<hkern u1="ë" u2=""" k="20" /> +<hkern u1="ð" u2="”" k="20" /> +<hkern u1="ð" u2="’" k="20" /> +<hkern u1="ð" u2="ý" k="41" /> +<hkern u1="ð" u2="z" k="20" /> +<hkern u1="ð" u2="y" k="41" /> +<hkern u1="ð" u2="x" k="41" /> +<hkern u1="ð" u2="w" k="41" /> +<hkern u1="ð" u2="v" k="41" /> +<hkern u1="ð" u2="'" k="20" /> +<hkern u1="ð" u2=""" k="20" /> +<hkern u1="ò" u2="”" k="20" /> +<hkern u1="ò" u2="’" k="20" /> +<hkern u1="ò" u2="ý" k="41" /> +<hkern u1="ò" u2="z" k="20" /> +<hkern u1="ò" u2="y" k="41" /> +<hkern u1="ò" u2="x" k="41" /> +<hkern u1="ò" u2="w" k="41" /> +<hkern u1="ò" u2="v" k="41" /> +<hkern u1="ò" u2="'" k="20" /> +<hkern u1="ò" u2=""" k="20" /> +<hkern u1="ó" u2="”" k="20" /> +<hkern u1="ó" u2="’" k="20" /> +<hkern u1="ó" u2="ý" k="41" /> +<hkern u1="ó" u2="z" k="20" /> +<hkern u1="ó" u2="y" k="41" /> +<hkern u1="ó" u2="x" k="41" /> +<hkern u1="ó" u2="w" k="41" /> +<hkern u1="ó" u2="v" k="41" /> +<hkern u1="ó" u2="'" k="20" /> +<hkern u1="ó" u2=""" k="20" /> +<hkern u1="ô" u2="”" k="20" /> +<hkern u1="ô" u2="’" k="20" /> +<hkern u1="ô" u2="ý" k="41" /> +<hkern u1="ô" u2="z" k="20" /> +<hkern u1="ô" u2="y" k="41" /> +<hkern u1="ô" u2="x" k="41" /> +<hkern u1="ô" u2="w" k="41" /> +<hkern u1="ô" u2="v" k="41" /> +<hkern u1="ô" u2="'" k="20" /> +<hkern u1="ô" u2=""" k="20" /> +<hkern u1="ö" u2="”" k="41" /> +<hkern u1="ö" u2="’" k="41" /> +<hkern u1="ö" u2="'" k="41" /> +<hkern u1="ö" u2=""" k="41" /> +<hkern u1="ø" u2="”" k="20" /> +<hkern u1="ø" u2="’" k="20" /> +<hkern u1="ø" u2="ý" k="41" /> +<hkern u1="ø" u2="z" k="20" /> +<hkern u1="ø" u2="y" k="41" /> +<hkern u1="ø" u2="x" k="41" /> +<hkern u1="ø" u2="w" k="41" /> +<hkern u1="ø" u2="v" k="41" /> +<hkern u1="ø" u2="'" k="20" /> +<hkern u1="ø" u2=""" k="20" /> +<hkern u1="ý" u2="„" k="82" /> +<hkern u1="ý" u2="”" k="-82" /> +<hkern u1="ý" u2="‚" k="82" /> +<hkern u1="ý" u2="’" k="-82" /> +<hkern u1="ý" u2="?" k="-41" /> +<hkern u1="ý" u2="." k="82" /> +<hkern u1="ý" u2="," k="82" /> +<hkern u1="ý" u2="'" k="-82" /> +<hkern u1="ý" u2=""" k="-82" /> +<hkern u1="þ" u2="”" k="20" /> +<hkern u1="þ" u2="’" k="20" /> +<hkern u1="þ" u2="ý" k="41" /> +<hkern u1="þ" u2="z" k="20" /> +<hkern u1="þ" u2="y" k="41" /> +<hkern u1="þ" u2="x" k="41" /> +<hkern u1="þ" u2="w" k="41" /> +<hkern u1="þ" u2="v" k="41" /> +<hkern u1="þ" u2="'" k="20" /> +<hkern u1="þ" u2=""" k="20" /> +<hkern u1="ÿ" u2="„" k="82" /> +<hkern u1="ÿ" u2="”" k="-82" /> +<hkern u1="ÿ" u2="‚" k="82" /> +<hkern u1="ÿ" u2="’" k="-82" /> +<hkern u1="ÿ" u2="?" k="-41" /> +<hkern u1="ÿ" u2="." k="82" /> +<hkern u1="ÿ" u2="," k="82" /> +<hkern u1="ÿ" u2="'" k="-82" /> +<hkern u1="ÿ" u2=""" k="-82" /> +<hkern u1="Œ" u2="J" k="-123" /> +<hkern u1="Ÿ" u2="„" k="123" /> +<hkern u1="Ÿ" u2="‚" k="123" /> +<hkern u1="Ÿ" u2="œ" k="102" /> +<hkern u1="Ÿ" u2="Œ" k="41" /> +<hkern u1="Ÿ" u2="ü" k="61" /> +<hkern u1="Ÿ" u2="û" k="61" /> +<hkern u1="Ÿ" u2="ú" k="61" /> +<hkern u1="Ÿ" u2="ù" k="61" /> +<hkern u1="Ÿ" u2="ø" k="102" /> +<hkern u1="Ÿ" u2="ö" k="102" /> +<hkern u1="Ÿ" u2="õ" k="102" /> +<hkern u1="Ÿ" u2="ô" k="102" /> +<hkern u1="Ÿ" u2="ó" k="102" /> +<hkern u1="Ÿ" u2="ò" k="102" /> +<hkern u1="Ÿ" u2="ë" k="102" /> +<hkern u1="Ÿ" u2="ê" k="102" /> +<hkern u1="Ÿ" u2="é" k="102" /> +<hkern u1="Ÿ" u2="è" k="102" /> +<hkern u1="Ÿ" u2="ç" k="102" /> +<hkern u1="Ÿ" u2="æ" k="102" /> +<hkern u1="Ÿ" u2="å" k="102" /> +<hkern u1="Ÿ" u2="ä" k="102" /> +<hkern u1="Ÿ" u2="ã" k="102" /> +<hkern u1="Ÿ" u2="â" k="102" /> +<hkern u1="Ÿ" u2="á" k="102" /> +<hkern u1="Ÿ" u2="à" k="102" /> +<hkern u1="Ÿ" u2="Ø" k="41" /> +<hkern u1="Ÿ" u2="Ö" k="41" /> +<hkern u1="Ÿ" u2="Õ" k="41" /> +<hkern u1="Ÿ" u2="Ô" k="41" /> +<hkern u1="Ÿ" u2="Ó" k="41" /> +<hkern u1="Ÿ" u2="Ò" k="41" /> +<hkern u1="Ÿ" u2="Ç" k="41" /> +<hkern u1="Ÿ" u2="Å" k="123" /> +<hkern u1="Ÿ" u2="Ä" k="123" /> +<hkern u1="Ÿ" u2="Ã" k="123" /> +<hkern u1="Ÿ" u2="Â" k="123" /> +<hkern u1="Ÿ" u2="Á" k="123" /> +<hkern u1="Ÿ" u2="À" k="123" /> +<hkern u1="Ÿ" u2="z" k="41" /> +<hkern u1="Ÿ" u2="u" k="61" /> +<hkern u1="Ÿ" u2="s" k="82" /> +<hkern u1="Ÿ" u2="r" k="61" /> +<hkern u1="Ÿ" u2="q" k="102" /> +<hkern u1="Ÿ" u2="p" k="61" /> +<hkern u1="Ÿ" u2="o" k="102" /> +<hkern u1="Ÿ" u2="n" k="61" /> +<hkern u1="Ÿ" u2="m" k="61" /> +<hkern u1="Ÿ" u2="g" k="41" /> +<hkern u1="Ÿ" u2="e" k="102" /> +<hkern u1="Ÿ" u2="d" k="102" /> +<hkern u1="Ÿ" u2="c" k="102" /> +<hkern u1="Ÿ" u2="a" k="102" /> +<hkern u1="Ÿ" u2="Q" k="41" /> +<hkern u1="Ÿ" u2="O" k="41" /> +<hkern u1="Ÿ" u2="G" k="41" /> +<hkern u1="Ÿ" u2="C" k="41" /> +<hkern u1="Ÿ" u2="A" k="123" /> +<hkern u1="Ÿ" u2="?" k="-41" /> +<hkern u1="Ÿ" u2="." k="123" /> +<hkern u1="Ÿ" u2="," k="123" /> +<hkern u1="–" u2="T" k="82" /> +<hkern u1="—" u2="T" k="82" /> +<hkern u1="‘" u2="Ÿ" k="-20" /> +<hkern u1="‘" u2="œ" k="123" /> +<hkern u1="‘" u2="ü" k="61" /> +<hkern u1="‘" u2="û" k="61" /> +<hkern u1="‘" u2="ú" k="61" /> +<hkern u1="‘" u2="ù" k="61" /> +<hkern u1="‘" u2="ø" k="123" /> +<hkern u1="‘" u2="ö" k="123" /> +<hkern u1="‘" u2="õ" k="123" /> +<hkern u1="‘" u2="ô" k="123" /> +<hkern u1="‘" u2="ó" k="123" /> +<hkern u1="‘" u2="ò" k="123" /> +<hkern u1="‘" u2="ë" k="123" /> +<hkern u1="‘" u2="ê" k="123" /> +<hkern u1="‘" u2="é" k="123" /> +<hkern u1="‘" u2="è" k="123" /> +<hkern u1="‘" u2="ç" k="123" /> +<hkern u1="‘" u2="æ" k="82" /> +<hkern u1="‘" u2="å" k="82" /> +<hkern u1="‘" u2="ä" k="82" /> +<hkern u1="‘" u2="ã" k="82" /> +<hkern u1="‘" u2="â" k="82" /> +<hkern u1="‘" u2="á" k="82" /> +<hkern u1="‘" u2="à" k="123" /> +<hkern u1="‘" u2="Ý" k="-20" /> +<hkern u1="‘" u2="Å" k="143" /> +<hkern u1="‘" u2="Ä" k="143" /> +<hkern u1="‘" u2="Ã" k="143" /> +<hkern u1="‘" u2="Â" k="143" /> +<hkern u1="‘" u2="Á" k="143" /> +<hkern u1="‘" u2="À" k="143" /> +<hkern u1="‘" u2="u" k="61" /> +<hkern u1="‘" u2="s" k="61" /> +<hkern u1="‘" u2="r" k="61" /> +<hkern u1="‘" u2="q" k="123" /> +<hkern u1="‘" u2="p" k="61" /> +<hkern u1="‘" u2="o" k="123" /> +<hkern u1="‘" u2="n" k="61" /> +<hkern u1="‘" u2="m" k="61" /> +<hkern u1="‘" u2="g" k="61" /> +<hkern u1="‘" u2="e" k="123" /> +<hkern u1="‘" u2="d" k="123" /> +<hkern u1="‘" u2="c" k="123" /> +<hkern u1="‘" u2="a" k="82" /> +<hkern u1="‘" u2="Y" k="-20" /> +<hkern u1="‘" u2="W" k="-41" /> +<hkern u1="‘" u2="V" k="-41" /> +<hkern u1="‘" u2="T" k="-41" /> +<hkern u1="‘" u2="A" k="143" /> +<hkern u1="’" u2="Ÿ" k="-20" /> +<hkern u1="’" u2="œ" k="123" /> +<hkern u1="’" u2="ü" k="61" /> +<hkern u1="’" u2="û" k="61" /> +<hkern u1="’" u2="ú" k="61" /> +<hkern u1="’" u2="ù" k="61" /> +<hkern u1="’" u2="ø" k="123" /> +<hkern u1="’" u2="ö" k="123" /> +<hkern u1="’" u2="õ" k="123" /> +<hkern u1="’" u2="ô" k="123" /> +<hkern u1="’" u2="ó" k="123" /> +<hkern u1="’" u2="ò" k="123" /> +<hkern u1="’" u2="ë" k="123" /> +<hkern u1="’" u2="ê" k="123" /> +<hkern u1="’" u2="é" k="123" /> +<hkern u1="’" u2="è" k="123" /> +<hkern u1="’" u2="ç" k="123" /> +<hkern u1="’" u2="æ" k="82" /> +<hkern u1="’" u2="å" k="82" /> +<hkern u1="’" u2="ä" k="82" /> +<hkern u1="’" u2="ã" k="82" /> +<hkern u1="’" u2="â" k="82" /> +<hkern u1="’" u2="á" k="82" /> +<hkern u1="’" u2="à" k="123" /> +<hkern u1="’" u2="Ý" k="-20" /> +<hkern u1="’" u2="Å" k="143" /> +<hkern u1="’" u2="Ä" k="143" /> +<hkern u1="’" u2="Ã" k="143" /> +<hkern u1="’" u2="Â" k="143" /> +<hkern u1="’" u2="Á" k="143" /> +<hkern u1="’" u2="À" k="143" /> +<hkern u1="’" u2="u" k="61" /> +<hkern u1="’" u2="s" k="61" /> +<hkern u1="’" u2="r" k="61" /> +<hkern u1="’" u2="q" k="123" /> +<hkern u1="’" u2="p" k="61" /> +<hkern u1="’" u2="o" k="123" /> +<hkern u1="’" u2="n" k="61" /> +<hkern u1="’" u2="m" k="61" /> +<hkern u1="’" u2="g" k="61" /> +<hkern u1="’" u2="e" k="123" /> +<hkern u1="’" u2="d" k="123" /> +<hkern u1="’" u2="c" k="123" /> +<hkern u1="’" u2="a" k="82" /> +<hkern u1="’" u2="Y" k="-20" /> +<hkern u1="’" u2="W" k="-41" /> +<hkern u1="’" u2="V" k="-41" /> +<hkern u1="’" u2="T" k="-41" /> +<hkern u1="’" u2="A" k="143" /> +<hkern u1="‚" u2="Ÿ" k="123" /> +<hkern u1="‚" u2="Œ" k="102" /> +<hkern u1="‚" u2="Ý" k="123" /> +<hkern u1="‚" u2="Ü" k="41" /> +<hkern u1="‚" u2="Û" k="41" /> +<hkern u1="‚" u2="Ú" k="41" /> +<hkern u1="‚" u2="Ù" k="41" /> +<hkern u1="‚" u2="Ø" k="102" /> +<hkern u1="‚" u2="Ö" k="102" /> +<hkern u1="‚" u2="Õ" k="102" /> +<hkern u1="‚" u2="Ô" k="102" /> +<hkern u1="‚" u2="Ó" k="102" /> +<hkern u1="‚" u2="Ò" k="102" /> +<hkern u1="‚" u2="Ç" k="102" /> +<hkern u1="‚" u2="Y" k="123" /> +<hkern u1="‚" u2="W" k="123" /> +<hkern u1="‚" u2="V" k="123" /> +<hkern u1="‚" u2="U" k="41" /> +<hkern u1="‚" u2="T" k="143" /> +<hkern u1="‚" u2="Q" k="102" /> +<hkern u1="‚" u2="O" k="102" /> +<hkern u1="‚" u2="G" k="102" /> +<hkern u1="‚" u2="C" k="102" /> +<hkern u1="“" u2="Ÿ" k="-20" /> +<hkern u1="“" u2="œ" k="123" /> +<hkern u1="“" u2="ü" k="61" /> +<hkern u1="“" u2="û" k="61" /> +<hkern u1="“" u2="ú" k="61" /> +<hkern u1="“" u2="ù" k="61" /> +<hkern u1="“" u2="ø" k="123" /> +<hkern u1="“" u2="ö" k="123" /> +<hkern u1="“" u2="õ" k="123" /> +<hkern u1="“" u2="ô" k="123" /> +<hkern u1="“" u2="ó" k="123" /> +<hkern u1="“" u2="ò" k="123" /> +<hkern u1="“" u2="ë" k="123" /> +<hkern u1="“" u2="ê" k="123" /> +<hkern u1="“" u2="é" k="123" /> +<hkern u1="“" u2="è" k="123" /> +<hkern u1="“" u2="ç" k="123" /> +<hkern u1="“" u2="æ" k="82" /> +<hkern u1="“" u2="å" k="82" /> +<hkern u1="“" u2="ä" k="82" /> +<hkern u1="“" u2="ã" k="82" /> +<hkern u1="“" u2="â" k="82" /> +<hkern u1="“" u2="á" k="82" /> +<hkern u1="“" u2="à" k="123" /> +<hkern u1="“" u2="Ý" k="-20" /> +<hkern u1="“" u2="Å" k="143" /> +<hkern u1="“" u2="Ä" k="143" /> +<hkern u1="“" u2="Ã" k="143" /> +<hkern u1="“" u2="Â" k="143" /> +<hkern u1="“" u2="Á" k="143" /> +<hkern u1="“" u2="À" k="143" /> +<hkern u1="“" u2="u" k="61" /> +<hkern u1="“" u2="s" k="61" /> +<hkern u1="“" u2="r" k="61" /> +<hkern u1="“" u2="q" k="123" /> +<hkern u1="“" u2="p" k="61" /> +<hkern u1="“" u2="o" k="123" /> +<hkern u1="“" u2="n" k="61" /> +<hkern u1="“" u2="m" k="61" /> +<hkern u1="“" u2="g" k="61" /> +<hkern u1="“" u2="e" k="123" /> +<hkern u1="“" u2="d" k="123" /> +<hkern u1="“" u2="c" k="123" /> +<hkern u1="“" u2="a" k="82" /> +<hkern u1="“" u2="Y" k="-20" /> +<hkern u1="“" u2="W" k="-41" /> +<hkern u1="“" u2="V" k="-41" /> +<hkern u1="“" u2="T" k="-41" /> +<hkern u1="“" u2="A" k="143" /> +<hkern u1="„" u2="Ÿ" k="123" /> +<hkern u1="„" u2="Œ" k="102" /> +<hkern u1="„" u2="Ý" k="123" /> +<hkern u1="„" u2="Ü" k="41" /> +<hkern u1="„" u2="Û" k="41" /> +<hkern u1="„" u2="Ú" k="41" /> +<hkern u1="„" u2="Ù" k="41" /> +<hkern u1="„" u2="Ø" k="102" /> +<hkern u1="„" u2="Ö" k="102" /> +<hkern u1="„" u2="Õ" k="102" /> +<hkern u1="„" u2="Ô" k="102" /> +<hkern u1="„" u2="Ó" k="102" /> +<hkern u1="„" u2="Ò" k="102" /> +<hkern u1="„" u2="Ç" k="102" /> +<hkern u1="„" u2="Y" k="123" /> +<hkern u1="„" u2="W" k="123" /> +<hkern u1="„" u2="V" k="123" /> +<hkern u1="„" u2="U" k="41" /> +<hkern u1="„" u2="T" k="143" /> +<hkern u1="„" u2="Q" k="102" /> +<hkern u1="„" u2="O" k="102" /> +<hkern u1="„" u2="G" k="102" /> +<hkern u1="„" u2="C" k="102" /> +</font> +</defs></svg> \ No newline at end of file diff --git a/couchpotato/static/fonts/OpenSans-Light-webfont.ttf b/couchpotato/static/fonts/OpenSans-Light-webfont.ttf new file mode 100644 index 0000000000..63af664cde Binary files /dev/null and b/couchpotato/static/fonts/OpenSans-Light-webfont.ttf differ diff --git a/couchpotato/static/fonts/OpenSans-Light-webfont.woff b/couchpotato/static/fonts/OpenSans-Light-webfont.woff new file mode 100644 index 0000000000..e786074813 Binary files /dev/null and b/couchpotato/static/fonts/OpenSans-Light-webfont.woff differ diff --git a/couchpotato/static/fonts/OpenSans-Regular-webfont.eot b/couchpotato/static/fonts/OpenSans-Regular-webfont.eot new file mode 100755 index 0000000000..dd6fd2cb3a Binary files /dev/null and b/couchpotato/static/fonts/OpenSans-Regular-webfont.eot differ diff --git a/couchpotato/static/fonts/OpenSans-Regular-webfont.svg b/couchpotato/static/fonts/OpenSans-Regular-webfont.svg new file mode 100755 index 0000000000..01038bb1c7 --- /dev/null +++ b/couchpotato/static/fonts/OpenSans-Regular-webfont.svg @@ -0,0 +1,146 @@ +<?xml version="1.0" standalone="no"?> +<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd" > +<svg xmlns="http://www.w3.org/2000/svg"> +<metadata> +This is a custom SVG webfont generated by Font Squirrel. +Copyright : Digitized data copyright 20102011 Google Corporation +Foundry : Ascender Corporation +Foundry URL : httpwwwascendercorpcom +</metadata> +<defs> +<font id="OpenSansRegular" horiz-adv-x="1171" > +<font-face units-per-em="2048" ascent="1638" descent="-410" /> +<missing-glyph horiz-adv-x="532" /> +<glyph unicode=" " horiz-adv-x="532" /> +<glyph unicode=" " horiz-adv-x="532" /> +<glyph unicode=" " horiz-adv-x="532" /> +<glyph unicode="!" horiz-adv-x="547" d="M152 106q0 136 120 136q58 0 89.5 -35t31.5 -101q0 -64 -32 -99.5t-89 -35.5q-52 0 -86 31.5t-34 103.5zM170 1462h207l-51 -1059h-105z" /> +<glyph unicode=""" horiz-adv-x="821" d="M133 1462h186l-40 -528h-105zM502 1462h186l-41 -528h-104z" /> +<glyph unicode="#" horiz-adv-x="1323" d="M51 430v129h287l68 340h-277v127h299l82 436h139l-82 -436h305l84 436h134l-84 -436h264v-127h-289l-66 -340h283v-129h-307l-84 -430h-137l84 430h-303l-82 -430h-136l80 430h-262zM475 559h303l66 340h-303z" /> +<glyph unicode="$" d="M131 170v156q83 -37 191.5 -60.5t197.5 -23.5v440q-205 65 -287.5 151t-82.5 222q0 131 101.5 215t268.5 102v182h129v-180q184 -5 355 -74l-52 -131q-149 59 -303 70v-434q157 -50 235 -97.5t115 -109t37 -149.5q0 -136 -102 -224.5t-285 -111.5v-232h-129v223 q-112 0 -217 17.5t-172 48.5zM319 1057q0 -76 45 -122t156 -87v387q-99 -16 -150 -62.5t-51 -115.5zM649 252q217 30 217 184q0 72 -44.5 116.5t-172.5 88.5v-389z" /> +<glyph unicode="%" horiz-adv-x="1686" d="M104 1026q0 227 74.5 342t220.5 115q145 0 223 -119t78 -338q0 -228 -76.5 -344.5t-224.5 -116.5q-140 0 -217.5 119t-77.5 342zM242 1026q0 -170 37 -255t120 -85q164 0 164 340q0 338 -164 338q-83 0 -120 -84t-37 -254zM365 0l811 1462h147l-811 -1462h-147zM985 440 q0 227 74.5 342t220.5 115q142 0 221.5 -117.5t79.5 -339.5q0 -227 -76.5 -343.5t-224.5 -116.5q-142 0 -218.5 119t-76.5 341zM1122 440q0 -171 37 -255.5t121 -84.5t124 83.5t40 256.5q0 171 -40 253.5t-124 82.5t-121 -82.5t-37 -253.5z" /> +<glyph unicode="&" horiz-adv-x="1495" d="M113 379q0 130 69.5 230t249.5 202q-85 95 -115.5 144t-48.5 102t-18 110q0 150 98 234t273 84q162 0 255 -83.5t93 -232.5q0 -107 -68 -197.5t-225 -183.5l407 -391q56 62 89.5 145.5t56.5 182.5h168q-68 -286 -205 -434l299 -291h-229l-185 178q-118 -106 -240 -152 t-272 -46q-215 0 -333.5 106t-118.5 293zM285 383q0 -117 77.5 -185.5t206.5 -68.5q241 0 400 154l-437 424q-111 -68 -157 -112.5t-68 -95.5t-22 -116zM414 1171q0 -69 36 -131.5t123 -150.5q129 75 179.5 138.5t50.5 146.5q0 77 -51.5 125.5t-137.5 48.5q-89 0 -144.5 -48 t-55.5 -129z" /> +<glyph unicode="'" horiz-adv-x="453" d="M133 1462h186l-40 -528h-105z" /> +<glyph unicode="(" horiz-adv-x="606" d="M82 561q0 265 77.5 496t223.5 405h162q-144 -193 -216.5 -424t-72.5 -475q0 -240 74 -469t213 -418h-160q-147 170 -224 397t-77 488z" /> +<glyph unicode=")" horiz-adv-x="606" d="M61 1462h162q147 -175 224 -406.5t77 -494.5t-77.5 -490t-223.5 -395h-160q139 188 213 417.5t74 469.5q0 244 -72.5 475t-216.5 424z" /> +<glyph unicode="*" horiz-adv-x="1130" d="M86 1090l29 182l391 -111l-43 395h194l-43 -395l398 111l26 -182l-381 -31l248 -326l-172 -94l-176 362l-160 -362l-176 94l242 326z" /> +<glyph unicode="+" d="M104 653v138h410v428h139v-428h412v-138h-412v-426h-139v426h-410z" /> +<glyph unicode="," horiz-adv-x="502" d="M63 -264q27 104 59.5 257t45.5 245h182l15 -23q-26 -100 -75 -232.5t-102 -246.5h-125z" /> +<glyph unicode="-" horiz-adv-x="659" d="M84 473v152h491v-152h-491z" /> +<glyph unicode="." horiz-adv-x="545" d="M152 106q0 67 30.5 101.5t87.5 34.5q58 0 90.5 -34.5t32.5 -101.5q0 -65 -33 -100t-90 -35q-51 0 -84.5 31.5t-33.5 103.5z" /> +<glyph unicode="/" horiz-adv-x="752" d="M20 0l545 1462h166l-545 -1462h-166z" /> +<glyph unicode="0" d="M102 733q0 382 119 567t363 185q238 0 361.5 -193t123.5 -559q0 -379 -119.5 -566t-365.5 -187q-236 0 -359 191.5t-123 561.5zM270 733q0 -319 75 -464.5t239 -145.5q166 0 240.5 147.5t74.5 462.5t-74.5 461.5t-240.5 146.5q-164 0 -239 -144.5t-75 -463.5z" /> +<glyph unicode="1" d="M188 1163l387 299h140v-1462h-162v1042q0 130 8 246q-21 -21 -47 -44t-238 -195z" /> +<glyph unicode="2" d="M100 0v143l385 387q176 178 232 254t84 148t28 155q0 117 -71 185.5t-197 68.5q-91 0 -172.5 -30t-181.5 -109l-88 113q202 168 440 168q206 0 323 -105.5t117 -283.5q0 -139 -78 -275t-292 -344l-320 -313v-8h752v-154h-961z" /> +<glyph unicode="3" d="M94 59v158q95 -47 202.5 -71.5t203.5 -24.5q379 0 379 297q0 266 -418 266h-144v143h146q171 0 271 75.5t100 209.5q0 107 -73.5 168t-199.5 61q-96 0 -181 -26t-194 -96l-84 112q90 71 207.5 111.5t247.5 40.5q213 0 331 -97.5t118 -267.5q0 -140 -78.5 -229 t-222.5 -119v-8q176 -22 261 -112t85 -236q0 -209 -145 -321.5t-412 -112.5q-116 0 -212.5 17.5t-187.5 61.5z" /> +<glyph unicode="4" d="M43 336v145l694 989h176v-983h217v-151h-217v-336h-159v336h-711zM209 487h545v486q0 143 10 323h-8q-48 -96 -90 -159z" /> +<glyph unicode="5" d="M133 59v160q70 -45 174 -70.5t205 -25.5q176 0 273.5 83t97.5 240q0 306 -375 306q-95 0 -254 -29l-86 55l55 684h727v-153h-585l-37 -439q115 23 229 23q231 0 363.5 -114.5t132.5 -313.5q0 -227 -144.5 -356t-398.5 -129q-247 0 -377 79z" /> +<glyph unicode="6" d="M117 625q0 431 167.5 644.5t495.5 213.5q113 0 178 -19v-143q-77 25 -176 25q-235 0 -359 -146.5t-136 -460.5h12q110 172 348 172q197 0 310.5 -119t113.5 -323q0 -228 -124.5 -358.5t-336.5 -130.5q-227 0 -360 170.5t-133 474.5zM287 506q0 -103 40 -192t113.5 -141 t167.5 -52q142 0 220.5 89.5t78.5 258.5q0 145 -73 228t-218 83q-90 0 -165 -37t-119.5 -102t-44.5 -135z" /> +<glyph unicode="7" d="M94 1309v153h973v-133l-598 -1329h-184l606 1309h-797z" /> +<glyph unicode="8" d="M104 373q0 251 306 391q-138 78 -198 168.5t-60 202.5q0 159 117.5 253.5t314.5 94.5q200 0 317 -93t117 -257q0 -108 -67 -197t-214 -162q178 -85 253 -178.5t75 -216.5q0 -182 -127 -290.5t-348 -108.5q-234 0 -360 102.5t-126 290.5zM268 369q0 -120 83.5 -187 t234.5 -67q149 0 232 70t83 192q0 97 -78 172.5t-272 146.5q-149 -64 -216 -141.5t-67 -185.5zM315 1128q0 -92 59 -158t218 -132q143 60 202.5 129t59.5 161q0 101 -72.5 160.5t-199.5 59.5q-125 0 -196 -60t-71 -160z" /> +<glyph unicode="9" d="M106 991q0 228 127.5 360t335.5 132q149 0 260.5 -76.5t171.5 -223t60 -345.5q0 -858 -664 -858q-116 0 -184 20v143q80 -26 182 -26q240 0 362.5 148.5t133.5 455.5h-12q-55 -83 -146 -126.5t-205 -43.5q-194 0 -308 116t-114 324zM270 993q0 -144 72 -226.5t219 -82.5 q91 0 167.5 37t120.5 101t44 134q0 105 -41 194t-114.5 140t-168.5 51q-143 0 -221 -92t-78 -256z" /> +<glyph unicode=":" horiz-adv-x="545" d="M152 106q0 67 30.5 101.5t87.5 34.5q58 0 90.5 -34.5t32.5 -101.5q0 -65 -33 -100t-90 -35q-51 0 -84.5 31.5t-33.5 103.5zM152 989q0 135 118 135q123 0 123 -135q0 -65 -33 -100t-90 -35q-51 0 -84.5 31.5t-33.5 103.5z" /> +<glyph unicode=";" horiz-adv-x="545" d="M63 -264q27 104 59.5 257t45.5 245h182l15 -23q-26 -100 -75 -232.5t-102 -246.5h-125zM147 989q0 135 119 135q123 0 123 -135q0 -65 -33 -100t-90 -35q-58 0 -88.5 35t-30.5 100z" /> +<glyph unicode="<" d="M104 664v98l961 479v-149l-782 -371l782 -328v-151z" /> +<glyph unicode="=" d="M119 449v137h930v-137h-930zM119 858v137h930v-137h-930z" /> +<glyph unicode=">" d="M104 242v151l783 326l-783 373v149l961 -479v-98z" /> +<glyph unicode="?" horiz-adv-x="879" d="M27 1384q189 99 395 99q191 0 297 -94t106 -265q0 -73 -19.5 -128.5t-57.5 -105t-164 -159.5q-101 -86 -133.5 -143t-32.5 -152v-33h-129v54q0 117 36 192.5t134 159.5q136 115 171.5 173t35.5 140q0 102 -65.5 157.5t-188.5 55.5q-79 0 -154 -18.5t-172 -67.5zM240 106 q0 136 120 136q58 0 89.5 -35t31.5 -101q0 -64 -32 -99.5t-89 -35.5q-52 0 -86 31.5t-34 103.5z" /> +<glyph unicode="@" horiz-adv-x="1841" d="M121 571q0 260 107 463t305 314.5t454 111.5q215 0 382.5 -90.5t259 -257t91.5 -383.5q0 -142 -44 -260t-124 -183t-184 -65q-86 0 -145 52t-70 133h-8q-40 -87 -114.5 -136t-176.5 -49q-150 0 -234.5 102.5t-84.5 278.5q0 204 118 331.5t310 127.5q68 0 154 -12.5 t155 -34.5l-25 -470v-22q0 -178 133 -178q91 0 148 107.5t57 279.5q0 181 -74 317t-210.5 209.5t-313.5 73.5q-223 0 -388 -92.5t-252 -264t-87 -396.5q0 -305 161 -469t464 -164q210 0 436 86v-133q-192 -84 -436 -84q-363 0 -563.5 199.5t-200.5 557.5zM686 598 q0 -254 195 -254q207 0 225 313l14 261q-72 20 -157 20q-130 0 -203.5 -90t-73.5 -250z" /> +<glyph unicode="A" horiz-adv-x="1296" d="M0 0l578 1468h143l575 -1468h-176l-182 465h-586l-180 -465h-172zM412 618h473l-170 453q-33 86 -68 211q-22 -96 -63 -211z" /> +<glyph unicode="B" horiz-adv-x="1327" d="M201 0v1462h413q291 0 421 -87t130 -275q0 -130 -72.5 -214.5t-211.5 -109.5v-10q333 -57 333 -350q0 -196 -132.5 -306t-370.5 -110h-510zM371 145h305q177 0 266.5 68.5t89.5 214.5q0 136 -91.5 200t-278.5 64h-291v-547zM371 836h280q180 0 259 56.5t79 190.5 q0 123 -88 177.5t-280 54.5h-250v-479z" /> +<glyph unicode="C" horiz-adv-x="1292" d="M125 733q0 226 84.5 396t244 262t375.5 92q230 0 402 -84l-72 -146q-166 78 -332 78q-241 0 -380.5 -160.5t-139.5 -439.5q0 -287 134.5 -443.5t383.5 -156.5q153 0 349 55v-149q-152 -57 -375 -57q-323 0 -498.5 196t-175.5 557z" /> +<glyph unicode="D" horiz-adv-x="1493" d="M201 0v1462h448q341 0 530 -189t189 -528q0 -362 -196.5 -553.5t-565.5 -191.5h-405zM371 147h207q304 0 457 149.5t153 442.5q0 286 -143.5 431t-426.5 145h-247v-1168z" /> +<glyph unicode="E" horiz-adv-x="1139" d="M201 0v1462h815v-151h-645v-471h606v-150h-606v-538h645v-152h-815z" /> +<glyph unicode="F" horiz-adv-x="1057" d="M201 0v1462h815v-151h-645v-535h606v-151h-606v-625h-170z" /> +<glyph unicode="G" horiz-adv-x="1491" d="M125 731q0 228 91.5 399.5t263.5 262t403 90.5q234 0 436 -86l-66 -150q-198 84 -381 84q-267 0 -417 -159t-150 -441q0 -296 144.5 -449t424.5 -153q152 0 297 35v450h-327v152h497v-711q-116 -37 -236 -56t-278 -19q-332 0 -517 197.5t-185 553.5z" /> +<glyph unicode="H" horiz-adv-x="1511" d="M201 0v1462h170v-622h770v622h170v-1462h-170v688h-770v-688h-170z" /> +<glyph unicode="I" horiz-adv-x="571" d="M201 0v1462h170v-1462h-170z" /> +<glyph unicode="J" horiz-adv-x="547" d="M-160 -213q71 -20 148 -20q99 0 150.5 60t51.5 173v1462h170v-1448q0 -190 -96 -294.5t-276 -104.5q-94 0 -148 27v145z" /> +<glyph unicode="K" horiz-adv-x="1257" d="M201 0v1462h170v-725l663 725h201l-588 -635l610 -827h-200l-533 709l-153 -136v-573h-170z" /> +<glyph unicode="L" horiz-adv-x="1063" d="M201 0v1462h170v-1308h645v-154h-815z" /> +<glyph unicode="M" horiz-adv-x="1849" d="M201 0v1462h256l463 -1206h8l467 1206h254v-1462h-170v942q0 162 14 352h-8l-500 -1294h-137l-496 1296h-8q14 -154 14 -366v-930h-157z" /> +<glyph unicode="N" horiz-adv-x="1544" d="M201 0v1462h192l797 -1222h8q-2 28 -9 174q-5 114 -5 177v32v839h159v-1462h-194l-799 1227h-8q16 -216 16 -396v-831h-157z" /> +<glyph unicode="O" horiz-adv-x="1595" d="M125 735q0 357 176 553.5t500 196.5q315 0 492 -200t177 -552q0 -351 -177.5 -552t-493.5 -201q-323 0 -498.5 197.5t-175.5 557.5zM305 733q0 -297 126.5 -450.5t367.5 -153.5q243 0 367 153t124 451q0 295 -123.5 447.5t-365.5 152.5q-243 0 -369.5 -153.5 t-126.5 -446.5z" /> +<glyph unicode="P" horiz-adv-x="1233" d="M201 0v1462h379q548 0 548 -426q0 -222 -151.5 -341.5t-433.5 -119.5h-172v-575h-170zM371 721h153q226 0 327 73t101 234q0 145 -95 216t-296 71h-190v-594z" /> +<glyph unicode="Q" horiz-adv-x="1595" d="M125 735q0 357 176 553.5t500 196.5q315 0 492 -200t177 -552q0 -281 -113 -467t-319 -252l348 -362h-247l-285 330l-55 -2q-323 0 -498.5 197.5t-175.5 557.5zM305 733q0 -297 126.5 -450.5t367.5 -153.5q243 0 367 153t124 451q0 295 -123.5 447.5t-365.5 152.5 q-243 0 -369.5 -153.5t-126.5 -446.5z" /> +<glyph unicode="R" horiz-adv-x="1266" d="M201 0v1462h401q269 0 397.5 -103t128.5 -310q0 -290 -294 -392l397 -657h-201l-354 608h-305v-608h-170zM371 754h233q180 0 264 71.5t84 214.5q0 145 -85.5 209t-274.5 64h-221v-559z" /> +<glyph unicode="S" horiz-adv-x="1124" d="M106 47v164q90 -38 196 -60t210 -22q170 0 256 64.5t86 179.5q0 76 -30.5 124.5t-102 89.5t-217.5 93q-204 73 -291.5 173t-87.5 261q0 169 127 269t336 100q218 0 401 -80l-53 -148q-181 76 -352 76q-135 0 -211 -58t-76 -161q0 -76 28 -124.5t94.5 -89t203.5 -89.5 q230 -82 316.5 -176t86.5 -244q0 -193 -140 -301t-380 -108q-260 0 -400 67z" /> +<glyph unicode="T" horiz-adv-x="1133" d="M18 1311v151h1096v-151h-463v-1311h-170v1311h-463z" /> +<glyph unicode="U" horiz-adv-x="1491" d="M186 520v942h170v-954q0 -183 100 -281t294 -98q185 0 285 98.5t100 282.5v952h170v-946q0 -250 -151 -393t-415 -143t-408.5 144t-144.5 396z" /> +<glyph unicode="V" horiz-adv-x="1219" d="M0 1462h180l336 -946q58 -163 92 -317q36 162 94 323l334 940h183l-527 -1462h-168z" /> +<glyph unicode="W" horiz-adv-x="1896" d="M27 1462h180l231 -903q48 -190 70 -344q27 183 80 358l262 889h180l275 -897q48 -155 81 -350q19 142 72 346l230 901h180l-391 -1462h-168l-295 979q-21 65 -47 164t-27 119q-22 -132 -70 -289l-286 -973h-168z" /> +<glyph unicode="X" horiz-adv-x="1182" d="M8 0l486 764l-453 698h188l363 -579l366 579h181l-453 -692l488 -770h-193l-393 643l-400 -643h-180z" /> +<glyph unicode="Y" horiz-adv-x="1147" d="M0 1462h186l387 -731l390 731h184l-488 -895v-567h-172v559z" /> +<glyph unicode="Z" horiz-adv-x="1169" d="M82 0v133l776 1176h-752v153h959v-133l-776 -1175h798v-154h-1005z" /> +<glyph unicode="[" horiz-adv-x="674" d="M166 -324v1786h457v-141h-289v-1503h289v-142h-457z" /> +<glyph unicode="\" horiz-adv-x="752" d="M23 1462h163l547 -1462h-166z" /> +<glyph unicode="]" horiz-adv-x="674" d="M51 -182h289v1503h-289v141h457v-1786h-457v142z" /> +<glyph unicode="^" horiz-adv-x="1110" d="M49 551l434 922h99l477 -922h-152l-372 745l-334 -745h-152z" /> +<glyph unicode="_" horiz-adv-x="918" d="M-4 -184h926v-131h-926v131z" /> +<glyph unicode="`" horiz-adv-x="1182" d="M393 1548v21h203q32 -69 89 -159.5t101 -143.5v-25h-110q-65 52 -154 148t-129 159z" /> +<glyph unicode="a" horiz-adv-x="1139" d="M94 303q0 332 531 348l186 6v68q0 129 -55.5 190.5t-177.5 61.5q-137 0 -310 -84l-51 127q81 44 177.5 69t193.5 25q196 0 290.5 -87t94.5 -279v-748h-123l-33 156h-8q-82 -103 -163.5 -139.5t-203.5 -36.5q-163 0 -255.5 84t-92.5 239zM268 301q0 -90 54.5 -137 t152.5 -47q155 0 243.5 85t88.5 238v99l-166 -7q-198 -7 -285.5 -61.5t-87.5 -169.5z" /> +<glyph unicode="b" horiz-adv-x="1255" d="M176 0v1556h166v-378q0 -127 -8 -228h8q116 164 344 164q216 0 335.5 -147.5t119.5 -417.5t-120.5 -419.5t-334.5 -149.5q-107 0 -195.5 39.5t-148.5 121.5h-12l-35 -141h-119zM342 549q0 -231 77 -330.5t247 -99.5q153 0 228 111.5t75 320.5q0 214 -75 319t-232 105 q-170 0 -245 -97.5t-75 -328.5z" /> +<glyph unicode="c" horiz-adv-x="975" d="M115 541q0 275 132.5 425t377.5 150q79 0 158 -17t124 -40l-51 -141q-55 22 -120 36.5t-115 14.5q-334 0 -334 -426q0 -202 81.5 -310t241.5 -108q137 0 281 59v-147q-110 -57 -277 -57q-238 0 -368.5 146.5t-130.5 414.5z" /> +<glyph unicode="d" horiz-adv-x="1255" d="M115 545q0 271 120 421t334 150q223 0 342 -162h13l-7 79l-4 77v446h166v-1556h-135l-22 147h-9q-115 -167 -344 -167q-215 0 -334.5 147t-119.5 418zM287 543q0 -210 77 -317t226 -107q170 0 246.5 92.5t76.5 298.5v35q0 233 -77.5 332.5t-247.5 99.5 q-146 0 -223.5 -113.5t-77.5 -320.5z" /> +<glyph unicode="e" horiz-adv-x="1149" d="M115 539q0 265 130.5 421t350.5 156q206 0 326 -135.5t120 -357.5v-105h-755q5 -193 97.5 -293t260.5 -100q177 0 350 74v-148q-88 -38 -166.5 -54.5t-189.5 -16.5q-243 0 -383.5 148t-140.5 411zM291 653h573q0 157 -70 240.5t-200 83.5q-132 0 -210.5 -86t-92.5 -238z " /> +<glyph unicode="f" horiz-adv-x="694" d="M29 967v75l196 60v61q0 404 353 404q87 0 204 -35l-43 -133q-96 31 -164 31q-94 0 -139 -62.5t-45 -200.5v-71h279v-129h-279v-967h-166v967h-196z" /> +<glyph unicode="g" horiz-adv-x="1122" d="M39 -186q0 100 64 173t180 99q-42 19 -70.5 59t-28.5 93q0 60 32 105t101 87q-85 35 -138.5 119t-53.5 192q0 180 108 277.5t306 97.5q86 0 155 -20h379v-105l-203 -24q28 -35 50 -91.5t22 -127.5q0 -161 -110 -257t-302 -96q-49 0 -92 8q-106 -56 -106 -141 q0 -45 37 -66.5t127 -21.5h194q178 0 273.5 -75t95.5 -218q0 -182 -146 -277.5t-426 -95.5q-215 0 -331.5 80t-116.5 226zM199 -184q0 -89 75 -135t215 -46q209 0 309.5 62.5t100.5 169.5q0 89 -55 123.5t-207 34.5h-199q-113 0 -176 -54t-63 -155zM289 745q0 -115 65 -174 t181 -59q243 0 243 236q0 247 -246 247q-117 0 -180 -63t-63 -187z" /> +<glyph unicode="h" horiz-adv-x="1257" d="M176 0v1556h166v-471q0 -85 -8 -141h10q49 79 139.5 124.5t206.5 45.5q201 0 301.5 -95.5t100.5 -303.5v-715h-166v709q0 134 -61 200t-191 66q-173 0 -252.5 -94t-79.5 -308v-573h-166z" /> +<glyph unicode="i" horiz-adv-x="518" d="M162 1393q0 57 28 83.5t70 26.5q40 0 69 -27t29 -83t-29 -83.5t-69 -27.5q-42 0 -70 27.5t-28 83.5zM176 0v1096h166v-1096h-166z" /> +<glyph unicode="j" horiz-adv-x="518" d="M-111 -332q69 -20 136 -20q78 0 114.5 42.5t36.5 129.5v1276h166v-1264q0 -324 -299 -324q-95 0 -154 25v135zM162 1393q0 57 28 83.5t70 26.5q40 0 69 -27t29 -83t-29 -83.5t-69 -27.5q-42 0 -70 27.5t-28 83.5z" /> +<glyph unicode="k" horiz-adv-x="1075" d="M176 0v1556h164v-825q0 -55 -8 -170h8q43 61 131 160l354 375h197l-444 -467l475 -629h-201l-387 518l-125 -108v-410h-164z" /> +<glyph unicode="l" horiz-adv-x="518" d="M176 0v1556h166v-1556h-166z" /> +<glyph unicode="m" horiz-adv-x="1905" d="M176 0v1096h135l27 -150h8q47 80 132.5 125t191.5 45q257 0 336 -186h8q49 86 142 136t212 50q186 0 278.5 -95.5t92.5 -305.5v-715h-166v713q0 131 -56 196.5t-174 65.5q-155 0 -229 -89t-74 -274v-612h-166v713q0 131 -56 196.5t-175 65.5q-156 0 -228.5 -93.5 t-72.5 -306.5v-575h-166z" /> +<glyph unicode="n" horiz-adv-x="1257" d="M176 0v1096h135l27 -150h8q51 81 143 125.5t205 44.5q198 0 298 -95.5t100 -305.5v-715h-166v709q0 134 -61 200t-191 66q-172 0 -252 -93t-80 -307v-575h-166z" /> +<glyph unicode="o" horiz-adv-x="1237" d="M115 549q0 268 134 417.5t372 149.5q230 0 365.5 -153t135.5 -414q0 -268 -135 -418.5t-373 -150.5q-147 0 -261 69t-176 198t-62 302zM287 549q0 -210 84 -320t247 -110t247.5 109.5t84.5 320.5q0 209 -84.5 317.5t-249.5 108.5q-163 0 -246 -107t-83 -319z" /> +<glyph unicode="p" horiz-adv-x="1255" d="M176 -492v1588h135l23 -150h8q64 90 149 130t195 40q218 0 336.5 -149t118.5 -418q0 -270 -120.5 -419.5t-334.5 -149.5q-107 0 -195.5 39.5t-148.5 121.5h-12q12 -96 12 -182v-451h-166zM342 549q0 -231 77 -330.5t247 -99.5q142 0 222.5 115t80.5 317 q0 205 -80.5 314.5t-226.5 109.5q-168 0 -243 -93t-77 -296v-37z" /> +<glyph unicode="q" horiz-adv-x="1255" d="M115 545q0 269 120 420t334 151q225 0 346 -170h9l24 150h131v-1588h-166v469q0 100 11 170h-13q-115 -167 -346 -167q-212 0 -331 149t-119 416zM287 543q0 -207 76.5 -315.5t226.5 -108.5q166 0 242 89t81 300v37q0 230 -78 331t-247 101q-146 0 -223.5 -113.5 t-77.5 -320.5z" /> +<glyph unicode="r" horiz-adv-x="836" d="M176 0v1096h137l19 -203h8q61 107 147 165t189 58q73 0 131 -12l-23 -154q-68 15 -120 15q-133 0 -227.5 -108t-94.5 -269v-588h-166z" /> +<glyph unicode="s" horiz-adv-x="977" d="M106 827q0 134 109 211.5t299 77.5q177 0 346 -72l-59 -135q-165 68 -299 68q-118 0 -178 -37t-60 -102q0 -44 22.5 -75t72.5 -59t192 -81q195 -71 263.5 -143t68.5 -181q0 -153 -114 -236t-320 -83q-218 0 -340 69v154q79 -40 169.5 -63t174.5 -23q130 0 200 41.5 t70 126.5q0 64 -55.5 109.5t-216.5 107.5q-153 57 -217.5 99.5t-96 96.5t-31.5 129z" /> +<glyph unicode="t" horiz-adv-x="723" d="M31 967v80l157 69l70 234h96v-254h318v-129h-318v-645q0 -99 47 -152t129 -53q44 0 85 6.5t65 13.5v-127q-27 -13 -79.5 -21.5t-94.5 -8.5q-318 0 -318 335v652h-157z" /> +<glyph unicode="u" horiz-adv-x="1257" d="M164 379v717h168v-711q0 -134 61 -200t191 -66q172 0 251.5 94t79.5 307v576h166v-1096h-137l-24 147h-9q-51 -81 -141.5 -124t-206.5 -43q-200 0 -299.5 95t-99.5 304z" /> +<glyph unicode="v" horiz-adv-x="1026" d="M0 1096h178l236 -650q80 -228 94 -296h8q11 53 69.5 219.5t262.5 726.5h178l-416 -1096h-194z" /> +<glyph unicode="w" horiz-adv-x="1593" d="M23 1096h174q106 -413 161.5 -629t63.5 -291h8q11 57 35.5 147.5t42.5 143.5l201 629h180l196 -629q56 -172 76 -289h8q4 36 21.5 111t208.5 807h172l-303 -1096h-197l-201 643q-19 59 -71 268h-8q-40 -175 -70 -270l-207 -641h-192z" /> +<glyph unicode="x" horiz-adv-x="1073" d="M39 0l401 561l-381 535h189l289 -420l288 420h187l-381 -535l401 -561h-188l-307 444l-310 -444h-188z" /> +<glyph unicode="y" horiz-adv-x="1032" d="M2 1096h178l240 -625q79 -214 98 -309h8q13 51 54.5 174.5t271.5 759.5h178l-471 -1248q-70 -185 -163.5 -262.5t-229.5 -77.5q-76 0 -150 17v133q55 -12 123 -12q171 0 244 192l61 156z" /> +<glyph unicode="z" horiz-adv-x="958" d="M82 0v113l598 854h-561v129h743v-129l-590 -838h605v-129h-795z" /> +<glyph unicode="{" horiz-adv-x="776" d="M61 498v141q130 2 188 48t58 142v306q0 155 108 241t290 86v-139q-230 -6 -230 -199v-295q0 -215 -223 -254v-12q223 -39 223 -254v-297q0 -102 58.5 -148t171.5 -48v-140q-190 2 -294 87t-104 239v303q0 104 -63 148.5t-183 44.5z" /> +<glyph unicode="|" horiz-adv-x="1128" d="M494 -496v2052h141v-2052h-141z" /> +<glyph unicode="}" horiz-adv-x="776" d="M72 -184q111 2 169 48t58 148v297q0 114 55 174t168 80v12q-223 39 -223 254v295q0 193 -227 199v139q184 0 289.5 -87t105.5 -240v-306q0 -97 59 -142.5t189 -47.5v-141q-122 0 -185 -44.5t-63 -148.5v-303q0 -153 -102.5 -238.5t-292.5 -87.5v140z" /> +<glyph unicode="~" d="M104 592v151q100 109 244 109q68 0 124.5 -14t145.5 -52q66 -28 115 -41.5t96 -13.5q54 0 118 32t118 89v-150q-102 -110 -244 -110q-72 0 -135 16.5t-135 48.5q-75 32 -120 44t-93 12q-53 0 -116.5 -33.5t-117.5 -87.5z" /> +<glyph unicode="¢" d="M190 741q0 508 396 570v172h135v-164q75 -3 146 -19.5t120 -39.5l-49 -140q-133 51 -242 51q-172 0 -253 -105.5t-81 -322.5q0 -212 79.5 -313.5t246.5 -101.5q141 0 283 59v-147q-105 -54 -252 -60v-200h-133v206q-203 32 -299.5 168.5t-96.5 386.5z" /> +<glyph unicode="£" d="M63 0v141q205 47 205 291v223h-198v127h198v316q0 178 112 280.5t302 102.5t360 -84l-61 -133q-154 77 -297 77q-123 0 -185.5 -62t-62.5 -202v-295h422v-127h-422v-221q0 -100 -32.5 -168t-106.5 -112h795v-154h-1029z" /> +<glyph unicode="¥" d="M31 1462h178l375 -727l379 727h174l-416 -770h262v-127h-317v-170h317v-127h-317v-268h-164v268h-316v127h316v170h-316v127h256z" /> +<glyph unicode="©" horiz-adv-x="1704" d="M100 731q0 200 100 375t275 276t377 101q200 0 375 -100t276 -275t101 -377q0 -197 -97 -370t-272 -277t-383 -104q-207 0 -382 103.5t-272.5 276.5t-97.5 371zM205 731q0 -173 87 -323.5t237.5 -237t322.5 -86.5q174 0 323 87t236.5 235.5t87.5 324.5q0 174 -87 323 t-235.5 236.5t-324.5 87.5q-174 0 -323 -87t-236.5 -235.5t-87.5 -324.5zM481 731q0 209 110.5 332t301.5 123q128 0 246 -60l-58 -118q-108 51 -188 51q-125 0 -192.5 -87t-67.5 -241q0 -168 63.5 -249t194.5 -81q86 0 211 45v-124q-48 -20 -98.5 -34t-120.5 -14 q-194 0 -298 120.5t-104 336.5z" /> +<glyph unicode="­" horiz-adv-x="659" d="M84 473v152h491v-152h-491z" /> +<glyph unicode="®" horiz-adv-x="1704" d="M100 731q0 200 100 375t275 276t377 101q200 0 375 -100t276 -275t101 -377q0 -197 -97 -370t-272 -277t-383 -104q-207 0 -382 103.5t-272.5 276.5t-97.5 371zM205 731q0 -173 87 -323.5t237.5 -237t322.5 -86.5q174 0 323 87t236.5 235.5t87.5 324.5q0 174 -87 323 t-235.5 236.5t-324.5 87.5q-174 0 -323 -87t-236.5 -235.5t-87.5 -324.5zM575 285v891h261q166 0 243.5 -65t77.5 -198q0 -80 -42.5 -141.5t-119.5 -91.5l238 -395h-168l-207 354h-135v-354h-148zM723 762h108q80 0 128.5 41.5t48.5 105.5q0 75 -43 107.5t-136 32.5h-106 v-287z" /> +<glyph unicode="´" horiz-adv-x="1182" d="M393 1241v25q48 62 103.5 150t87.5 153h202v-21q-44 -65 -131 -160t-151 -147h-111z" /> +<glyph unicode=" " horiz-adv-x="784" /> +<glyph unicode=" " horiz-adv-x="1569" /> +<glyph unicode=" " horiz-adv-x="784" /> +<glyph unicode=" " horiz-adv-x="1569" /> +<glyph unicode=" " horiz-adv-x="523" /> +<glyph unicode=" " horiz-adv-x="392" /> +<glyph unicode=" " horiz-adv-x="261" /> +<glyph unicode=" " horiz-adv-x="261" /> +<glyph unicode=" " horiz-adv-x="196" /> +<glyph unicode=" " horiz-adv-x="313" /> +<glyph unicode=" " horiz-adv-x="87" /> +<glyph unicode="‐" horiz-adv-x="659" d="M84 473v152h491v-152h-491z" /> +<glyph unicode="‑" horiz-adv-x="659" d="M84 473v152h491v-152h-491z" /> +<glyph unicode="‒" horiz-adv-x="659" d="M84 473v152h491v-152h-491z" /> +<glyph unicode="–" horiz-adv-x="1024" d="M82 473v152h860v-152h-860z" /> +<glyph unicode="—" horiz-adv-x="2048" d="M82 473v152h1884v-152h-1884z" /> +<glyph unicode="‘" horiz-adv-x="348" d="M25 983q22 90 71 224t105 255h123q-66 -254 -103 -501h-184z" /> +<glyph unicode="’" horiz-adv-x="348" d="M25 961q70 285 102 501h182l15 -22q-26 -100 -75 -232.5t-102 -246.5h-122z" /> +<glyph unicode="“" horiz-adv-x="717" d="M25 983q22 90 71 224t105 255h123q-66 -254 -103 -501h-184zM391 983q56 215 178 479h123q-30 -115 -59.5 -259.5t-42.5 -241.5h-184z" /> +<glyph unicode="”" horiz-adv-x="717" d="M25 961q70 285 102 501h182l15 -22q-26 -100 -75 -232.5t-102 -246.5h-122zM391 961q26 100 59 254t46 247h182l14 -22q-24 -91 -72 -224t-104 -255h-125z" /> +<glyph unicode="•" horiz-adv-x="770" d="M164 748q0 121 56.5 184t164.5 63q105 0 163 -62t58 -185q0 -119 -57.5 -183.5t-163.5 -64.5q-107 0 -164 65.5t-57 182.5z" /> +<glyph unicode="…" horiz-adv-x="1606" d="M152 106q0 67 30.5 101.5t87.5 34.5q58 0 90.5 -34.5t32.5 -101.5q0 -65 -33 -100t-90 -35q-51 0 -84.5 31.5t-33.5 103.5zM682 106q0 67 30.5 101.5t87.5 34.5q58 0 90.5 -34.5t32.5 -101.5q0 -65 -33 -100t-90 -35q-51 0 -84.5 31.5t-33.5 103.5zM1213 106 q0 67 30.5 101.5t87.5 34.5q58 0 90.5 -34.5t32.5 -101.5q0 -65 -33 -100t-90 -35q-51 0 -84.5 31.5t-33.5 103.5z" /> +<glyph unicode=" " horiz-adv-x="313" /> +<glyph unicode=" " horiz-adv-x="392" /> +<glyph unicode="€" horiz-adv-x="1208" d="M63 506v129h152l-2 42v44l2 80h-152v129h164q39 261 185 407t383 146q201 0 366 -97l-71 -139q-166 86 -295 86q-319 0 -398 -403h510v-129h-524l-2 -57v-64l2 -45h463v-129h-447q37 -180 138.5 -278.5t271.5 -98.5q156 0 309 66v-150q-146 -65 -317 -65 q-237 0 -381.5 134.5t-190.5 391.5h-166z" /> +<glyph unicode="™" horiz-adv-x="1589" d="M37 1356v106h543v-106h-211v-615h-123v615h-209zM647 741v721h187l196 -559l203 559h180v-721h-127v420l6 137h-8l-211 -557h-104l-201 559h-8l6 -129v-430h-119z" /> +<glyph unicode="" horiz-adv-x="1095" d="M0 1095h1095v-1095h-1095v1095z" /> +</font> +</defs></svg> \ No newline at end of file diff --git a/couchpotato/static/fonts/OpenSans-Regular-webfont.ttf b/couchpotato/static/fonts/OpenSans-Regular-webfont.ttf new file mode 100755 index 0000000000..05951e7b36 Binary files /dev/null and b/couchpotato/static/fonts/OpenSans-Regular-webfont.ttf differ diff --git a/couchpotato/static/fonts/OpenSans-Regular-webfont.woff b/couchpotato/static/fonts/OpenSans-Regular-webfont.woff new file mode 100755 index 0000000000..274664b28e Binary files /dev/null and b/couchpotato/static/fonts/OpenSans-Regular-webfont.woff differ diff --git a/couchpotato/static/fonts/config.json b/couchpotato/static/fonts/config.json new file mode 100644 index 0000000000..309d296193 --- /dev/null +++ b/couchpotato/static/fonts/config.json @@ -0,0 +1,190 @@ +{ + "name": "icons", + "css_prefix_text": "icon-", + "css_use_suffix": false, + "hinting": true, + "units_per_em": 1000, + "ascent": 850, + "glyphs": [ + { + "uid": "48cc210e59ff4bc56b6c8fba6eb384b8", + "css": "emo-coffee", + "code": 59401, + "src": "fontelico" + }, + { + "uid": "078fec38562c3f83a1201a908040c141", + "css": "emo-sunglasses", + "code": 59402, + "src": "fontelico" + }, + { + "uid": "04688d76a33ce7a7950e40fae79c08ac", + "css": "emo-cry", + "code": 59400, + "src": "fontelico" + }, + { + "uid": "9dd9e835aebe1060ba7190ad2b2ed951", + "css": "search", + "code": 59394, + "src": "fontawesome" + }, + { + "uid": "474656633f79ea2f1dad59ff63f6bf07", + "css": "star", + "code": 59418, + "src": "fontawesome" + }, + { + "uid": "d17030afaecc1e1c22349b99f3c4992a", + "css": "star-empty", + "code": 59419, + "src": "fontawesome" + }, + { + "uid": "84cf1fcc3fec556e7eaeb19679ca2dc9", + "css": "star-half", + "code": 59420, + "src": "fontawesome" + }, + { + "uid": "872d9516df93eb6b776cc4d94bd97dac", + "css": "movie", + "code": 59416, + "src": "fontawesome" + }, + { + "uid": "b1887b423d2fd15c345e090320c91ca0", + "css": "thumbs", + "code": 59397, + "src": "fontawesome" + }, + { + "uid": "f805bb95d40c7ef2bc51b3d50d4f2e5c", + "css": "list", + "code": 59398, + "src": "fontawesome" + }, + { + "uid": "12f4ece88e46abd864e40b35e05b11cd", + "css": "ok", + "code": 59408, + "src": "fontawesome" + }, + { + "uid": "5211af474d3a9848f67f945e2ccaf143", + "css": "cancel", + "code": 59406, + "src": "fontawesome" + }, + { + "uid": "44e04715aecbca7f266a17d5a7863c68", + "css": "plus", + "code": 59411, + "src": "fontawesome" + }, + { + "uid": "3d4ea8a78dc34efe891f3a0f3d961274", + "css": "info", + "code": 59403, + "src": "fontawesome" + }, + { + "uid": "d7271d490b71df4311e32cdacae8b331", + "css": "home", + "code": 59415, + "src": "fontawesome" + }, + { + "uid": "c5fd349cbd3d23e4ade333789c29c729", + "css": "eye", + "code": 59412, + "src": "fontawesome" + }, + { + "uid": "9a76bc135eac17d2c8b8ad4a5774fc87", + "css": "download", + "code": 59404, + "src": "fontawesome" + }, + { + "uid": "f48ae54adfb27d8ada53d0fd9e34ee10", + "css": "delete", + "code": 59405, + "src": "fontawesome" + }, + { + "uid": "e99461abfef3923546da8d745372c995", + "css": "settings", + "code": 59393, + "src": "fontawesome" + }, + { + "uid": "bc71f4c6e53394d5ba46b063040014f1", + "css": "redo", + "code": 59407, + "src": "fontawesome" + }, + { + "uid": "a73c5deb486c8d66249811642e5d719a", + "css": "refresh", + "code": 59414, + "src": "fontawesome" + }, + { + "uid": "4109c474ff99cad28fd5a2c38af2ec6f", + "css": "filter", + "code": 59396, + "src": "fontawesome" + }, + { + "uid": "026007bd17bfc67f3fe013199676f620", + "css": "donate", + "code": 59421, + "src": "fontawesome" + }, + { + "uid": "94103e1b3f1e8cf514178ec5912b4469", + "css": "dropdown", + "code": 59409, + "src": "fontawesome" + }, + { + "uid": "2cfb3f2b46b34a1790aec0aa846297b6", + "css": "menu", + "code": 59417, + "src": "entypo" + }, + { + "uid": "c311c48d79488965b0fab7f9cd12b6b5", + "css": "left-arrow", + "code": 59392, + "src": "entypo" + }, + { + "uid": "cb13afd4722a849d48056540bb74c47e", + "css": "play", + "code": 59410, + "src": "entypo" + }, + { + "uid": "d10920db2e79c997c5e783279291970c", + "css": "dots", + "code": 59395, + "src": "entypo" + }, + { + "uid": "51fb22f9ff9d7f60c95ef31e4c59502d", + "css": "notifications", + "code": 59399, + "src": "mfglabs" + }, + { + "uid": "3ab229dd9bccaaaf6c71096da4b72c04", + "css": "error", + "code": 59413, + "src": "elusive" + } + ] +} \ No newline at end of file diff --git a/couchpotato/static/fonts/icons.eot b/couchpotato/static/fonts/icons.eot new file mode 100644 index 0000000000..a910d5ea33 Binary files /dev/null and b/couchpotato/static/fonts/icons.eot differ diff --git a/couchpotato/static/fonts/icons.svg b/couchpotato/static/fonts/icons.svg new file mode 100644 index 0000000000..2863239dec --- /dev/null +++ b/couchpotato/static/fonts/icons.svg @@ -0,0 +1,41 @@ +<?xml version="1.0" standalone="no"?> +<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd"> +<svg xmlns="http://www.w3.org/2000/svg"> +<metadata>Copyright (C) 2015 by original authors @ fontello.com</metadata> +<defs> +<font id="icons" horiz-adv-x="1000" > +<font-face font-family="icons" font-weight="400" font-stretch="normal" units-per-em="1000" ascent="850" descent="-150" /> +<missing-glyph horiz-adv-x="1000" /> +<glyph glyph-name="left-arrow" unicode="" d="m242 626q14 16 39 16t41-16q38-36 0-80l-186-196 186-194q38-44 0-80-16-16-40-16t-40 16l-226 236q-16 16-16 38 0 24 16 40 206 214 226 236z" horiz-adv-x="341" /> +<glyph glyph-name="settings" unicode="" d="m571 350q0 59-41 101t-101 42-101-42-42-101 42-101 101-42 101 42 41 101z m286 61v-124q0-7-4-13t-11-7l-104-16q-10-30-21-51 19-27 59-77 6-6 6-13t-5-13q-15-21-55-61t-53-39q-7 0-14 5l-77 60q-25-13-51-21-9-76-16-104-4-16-20-16h-124q-8 0-14 5t-6 12l-16 103q-27 9-50 21l-79-60q-6-5-14-5-8 0-14 6-70 64-92 94-4 5-4 13 0 6 5 12 8 12 28 37t30 40q-15 28-23 55l-102 15q-7 1-11 7t-5 13v124q0 7 5 13t10 7l104 16q8 25 22 51-23 32-60 77-6 7-6 14 0 5 5 12 15 20 55 60t53 40q7 0 15-5l77-60q24 13 50 21 9 76 17 104 3 15 20 15h124q7 0 13-4t7-12l15-103q28-9 50-21l80 60q5 5 13 5 7 0 14-5 72-67 92-95 4-5 4-13 0-6-4-12-9-12-29-38t-30-39q14-28 23-55l102-15q7-1 12-7t4-13z" horiz-adv-x="857.1" /> +<glyph glyph-name="search" unicode="" d="m643 386q0 103-74 176t-176 74-177-74-73-176 73-177 177-73 176 73 74 177z m286-465q0-29-22-50t-50-21q-30 0-50 21l-191 191q-100-69-223-69-80 0-153 31t-125 84-84 125-31 153 31 152 84 126 125 84 153 31 152-31 126-84 84-126 31-152q0-123-69-223l191-191q21-21 21-51z" horiz-adv-x="928.6" /> +<glyph glyph-name="dots" unicode="" d="m110 460q46 0 78-32t32-78q0-44-32-77t-78-33-78 33-32 77q0 46 32 78t78 32z m350 0q46 0 78-32t32-78q0-44-33-77t-77-33-77 33-33 77q0 46 32 78t78 32z m350 0q46 0 78-32t32-78q0-44-32-77t-78-33-78 33-32 77q0 46 32 78t78 32z" horiz-adv-x="920" /> +<glyph glyph-name="filter" unicode="" d="m783 685q9-23-8-39l-275-275v-414q0-23-22-33-7-3-14-3-15 0-25 11l-143 143q-10 10-10 25v271l-275 275q-18 16-8 39 9 22 33 22h714q23 0 33-22z" horiz-adv-x="785.7" /> +<glyph glyph-name="thumbs" unicode="" d="m286 154v-108q0-22-16-37t-38-16h-178q-23 0-38 16t-16 37v108q0 22 16 38t38 15h178q22 0 38-15t16-38z m0 285v-107q0-22-16-38t-38-15h-178q-23 0-38 15t-16 38v107q0 23 16 38t38 16h178q22 0 38-16t16-38z m357-285v-108q0-22-16-37t-38-16h-178q-23 0-38 16t-16 37v108q0 22 16 38t38 15h178q23 0 38-15t16-38z m-357 571v-107q0-22-16-38t-38-16h-178q-23 0-38 16t-16 38v107q0 22 16 38t38 16h178q22 0 38-16t16-38z m357-286v-107q0-22-16-38t-38-15h-178q-23 0-38 15t-16 38v107q0 23 16 38t38 16h178q23 0 38-16t16-38z m357-285v-108q0-22-16-37t-38-16h-178q-22 0-38 16t-16 37v108q0 22 16 38t38 15h178q23 0 38-15t16-38z m-357 571v-107q0-22-16-38t-38-16h-178q-23 0-38 16t-16 38v107q0 22 16 38t38 16h178q23 0 38-16t16-38z m357-286v-107q0-22-16-38t-38-15h-178q-22 0-38 15t-16 38v107q0 23 16 38t38 16h178q23 0 38-16t16-38z m0 286v-107q0-22-16-38t-38-16h-178q-22 0-38 16t-16 38v107q0 22 16 38t38 16h178q23 0 38-16t16-38z" horiz-adv-x="1000" /> +<glyph glyph-name="list" unicode="" d="m286 154v-108q0-22-16-37t-38-16h-178q-23 0-38 16t-16 37v108q0 22 16 38t38 15h178q22 0 38-15t16-38z m0 285v-107q0-22-16-38t-38-15h-178q-23 0-38 15t-16 38v107q0 23 16 38t38 16h178q22 0 38-16t16-38z m714-285v-108q0-22-16-37t-38-16h-535q-23 0-38 16t-16 37v108q0 22 16 38t38 15h535q23 0 38-15t16-38z m-714 571v-107q0-22-16-38t-38-16h-178q-23 0-38 16t-16 38v107q0 22 16 38t38 16h178q22 0 38-16t16-38z m714-286v-107q0-22-16-38t-38-15h-535q-23 0-38 15t-16 38v107q0 23 16 38t38 16h535q23 0 38-16t16-38z m0 286v-107q0-22-16-38t-38-16h-535q-23 0-38 16t-16 38v107q0 22 16 38t38 16h535q23 0 38-16t16-38z" horiz-adv-x="1000" /> +<glyph glyph-name="notifications" unicode="" d="m0 404q0 67 27 130t75 114 114 89 145 59 166 21 166-21 146-59 113-89 76-114 26-130q0-84-42-160t-112-131-168-88-205-33q-105 0-201 31l-121-85q-67-42-94-24t-12 100l25 125q-60 55-92 122t-32 143z" horiz-adv-x="1054.7" /> +<glyph glyph-name="emo-cry" unicode="" d="m278 787c-7 0-15-2-23-5l-128-62-80-38c-2-1-4-2-6-3-1-5-1-11-1-16l0 0 0-1 0 0c0-69 40-162 122-163l0 0 0 0 0 0c34 1 65 19 87 50 26 35 37 82 34 128l18 8c27 13 38 45 25 71-9 19-28 31-48 31z m212 0c-20 0-39-11-48-30-13-27-1-59 25-71l18-9c-3-46 9-93 34-128 22-31 53-49 87-49l0 0 1 0 0 0c82 0 121 93 122 162l0 0 0 1 0 0c0 5-1 11-1 16-2 1-5 2-7 3l-80 38-128 62c-7 3-15 5-23 5z m279-302c-7 0-15-5-21-16-42-72-96-259 27-259 123 1 73 182 16 261-6 8-14 13-22 14z m-512-138c-57 0-112-9-166-26-10-4-20-7-30-11-11-5-21-9-30-13l0 0-1-1 0 0c-26-12-37-44-25-70 13-26 44-38 70-25l0-1c9 5 17 8 24 11 9 3 17 6 25 9 43 14 88 21 133 21 51 0 101-9 148-27 19-7 37-15 55-24 91-49 163-130 200-232 10-27 40-41 68-31 27 10 42 40 32 68-46 126-137 227-250 288-22 12-45 22-69 31-57 21-120 33-184 33z" horiz-adv-x="851" /> +<glyph glyph-name="emo-coffee" unicode="" d="m1234 850c-17 0-34-8-44-23-54-80-24-144 4-204 13-29 26-56 5-72-23-17-28-51-10-74 17-23 51-28 74-10 89 67 60 131 27 201-15 33-32 70-12 99 16 25 10 57-14 74-9 6-20 9-30 9z m-1021-121c-81 0-157-39-203-105-18-25-12-59 14-77 24-17 59-11 76 14 45 63 132 76 193 32l0 0c3-2 7-6 11-9l1-1 1-1c3-3 7-7 10-10l1-2 0 0 0 0 1-1 1 0c20-23 55-25 78-4 22 20 24 55 4 77-46 55-116 87-188 87z m499 0c-80 0-156-39-202-105-18-25-12-59 13-77 25-17 59-11 77 14 44 63 131 76 192 32l0 0c4-2 8-6 12-9l0-1 1-1c4-3 7-7 11-10l1-2 0 0 0 0 1-1 0 0c20-23 55-25 78-4 23 20 25 55 4 77-46 55-116 87-188 87z m206-346c-24 0-44-19-44-43l1-4-1-275 0 0 0 0 0 0c0-58 24-111 62-149l0 0 0 0 0 0c38-38 91-62 148-62l0 0 226 0c1 0 3 0 5 0 13 1 27 2 39 5 15 4 29 8 42 14 55 26 97 74 114 133l31 0 0 0c37 0 70 15 94 39 23 23 38 56 38 92l0 0 0 0 0 119 0 0c0 36-15 69-39 93l0 0c-23 23-56 38-92 38l0 0 0 0-624 0z m604-100c11 0 21-5 28-12l1 0 0 0c7-8 12-18 12-29l0 0 0-110 0 0c0-11-5-21-12-29-8-7-18-12-29-12l0 0-62 0 0 0c-21 0-39-16-40-37-4-41-31-76-67-93-7-3-15-6-22-8-7-1-14-2-21-2l-3 0-209 0 0-1c-30 1-59 13-79 34l-1 0c-21 21-34 50-34 81l0 0 0 0 0 0 1 218 537 0 0 0z m-82-55l0-96 32 0c22 0 39 17 39 38l0 20c0 21-17 38-39 38l-32 0z m-1131-12c-30 0-55-25-55-55s25-55 55-55l297 0c30 0 54 25 54 55s-24 55-54 55l-297 0z" horiz-adv-x="1673" /> +<glyph glyph-name="emo-sunglasses" unicode="" d="m495 745c-154 0-309 0-463 0-18 0-32-15-32-33 3-153 129-306 278-309 84-2 164 36 218 95 55-59 135-97 219-95 149 3 272 156 275 309 0 18-14 33-32 33-155 0-309 0-463 0z m319-510c-18 0-35-9-45-25l0 0c0-1-1-2-1-3-7-12-15-23-24-34-10-12-19-23-29-32-54-51-126-80-203-80l0 0 0 0c-21 0-43 2-64 7-3 0-6 1-10 2-17 5-34 11-51 19-26 12-58 1-70-26-13-27-1-58 25-71 22-10 45-18 69-24 5-1 9-2 14-4 29-6 58-9 87-9l0 0 0 0c104 0 201 39 275 108 15 14 28 29 40 43 11 15 22 31 32 48 0 0 1 1 1 1 15 25 7 58-18 73-9 5-18 7-28 7z" horiz-adv-x="990" /> +<glyph glyph-name="info" unicode="" d="m357 100v-71q0-15-10-25t-26-11h-285q-15 0-25 11t-11 25v71q0 15 11 25t25 11h35v214h-35q-15 0-25 11t-11 25v71q0 15 11 25t25 11h214q15 0 25-11t11-25v-321h35q15 0 26-11t10-25z m-71 643v-107q0-15-11-25t-25-11h-143q-14 0-25 11t-11 25v107q0 14 11 25t25 11h143q15 0 25-11t11-25z" horiz-adv-x="357.1" /> +<glyph glyph-name="download" unicode="" d="m714 100q0 15-10 25t-25 11-26-11-10-25 10-25 26-11 25 11 10 25z m143 0q0 15-10 25t-26 11-25-11-10-25 10-25 25-11 26 11 10 25z m72 125v-179q0-22-16-37t-38-16h-821q-23 0-38 16t-16 37v179q0 22 16 38t38 16h259l75-76q33-32 76-32t76 32l76 76h259q22 0 38-16t16-38z m-182 318q10-23-8-40l-250-250q-10-10-25-10t-25 10l-250 250q-17 17-8 40 10 21 33 21h143v250q0 15 11 25t25 11h143q14 0 25-11t10-25v-250h143q24 0 33-21z" horiz-adv-x="928.6" /> +<glyph glyph-name="delete" unicode="" d="m286 439v-321q0-8-5-13t-13-5h-36q-8 0-13 5t-5 13v321q0 8 5 13t13 5h36q8 0 13-5t5-13z m143 0v-321q0-8-5-13t-13-5h-36q-8 0-13 5t-5 13v321q0 8 5 13t13 5h36q8 0 13-5t5-13z m142 0v-321q0-8-5-13t-12-5h-36q-8 0-13 5t-5 13v321q0 8 5 13t13 5h36q7 0 12-5t5-13z m72-404v529h-500v-529q0-12 4-22t8-15 6-5h464q2 0 6 5t8 15 4 22z m-375 601h250l-27 65q-4 5-9 6h-177q-6-1-10-6z m518-18v-36q0-8-5-13t-13-5h-54v-529q0-46-26-80t-63-34h-464q-37 0-63 33t-27 79v531h-53q-8 0-13 5t-5 13v36q0 8 5 13t13 5h172l39 93q9 21 31 35t44 15h178q22 0 44-15t30-35l39-93h173q8 0 13-5t5-13z" horiz-adv-x="785.7" /> +<glyph glyph-name="cancel" unicode="" d="m724 112q0-22-15-38l-76-76q-16-15-38-15t-38 15l-164 165-164-165q-16-15-38-15t-38 15l-76 76q-16 16-16 38t16 38l164 164-164 164q-16 16-16 38t16 38l76 76q16 16 38 16t38-16l164-164 164 164q16 16 38 16t38-16l76-76q15-15 15-38t-15-38l-164-164 164-164q15-15 15-38z" horiz-adv-x="785.7" /> +<glyph glyph-name="redo" unicode="" d="m857 707v-250q0-14-10-25t-26-11h-250q-23 0-32 23-10 22 7 38l77 77q-82 77-194 77-58 0-111-23t-91-61-62-91-22-111 22-111 62-91 91-61 111-23q66 0 125 29t100 82q4 6 13 7 8 0 14-5l76-77q5-4 6-11t-5-13q-60-74-147-114t-182-41q-87 0-167 34t-136 92-92 137-34 166 34 166 92 137 136 92 167 34q82 0 158-31t137-88l72 72q16 18 39 8 22-9 22-33z" horiz-adv-x="857.1" /> +<glyph glyph-name="ok" unicode="" d="m932 534q0-22-15-38l-404-404-76-76q-16-15-38-15t-38 15l-76 76-202 202q-15 16-15 38t15 38l76 76q16 16 38 16t38-16l164-165 366 367q16 16 38 16t38-16l76-76q15-16 15-38z" horiz-adv-x="1000" /> +<glyph glyph-name="dropdown" unicode="" d="m571 243q0-15-10-25l-250-250q-11-11-25-11t-25 11l-250 250q-11 10-11 25t11 25 25 11h500q14 0 25-11t10-25z" horiz-adv-x="571.4" /> +<glyph glyph-name="play" unicode="" d="m486 376q14-10 14-26 0-14-14-24l-428-266q-24-16-41-6t-17 40l0 514q0 30 17 40t41-6z" horiz-adv-x="500" /> +<glyph glyph-name="plus" unicode="" d="m786 439v-107q0-22-16-38t-38-15h-232v-233q0-22-16-37t-38-16h-107q-22 0-38 16t-15 37v233h-232q-23 0-38 15t-16 38v107q0 23 16 38t38 16h232v232q0 22 15 38t38 16h107q23 0 38-16t16-38v-232h232q22 0 38-16t16-38z" horiz-adv-x="785.7" /> +<glyph glyph-name="eye" unicode="" d="m929 314q-85 132-213 197 34-58 34-125 0-104-73-177t-177-73-177 73-73 177q0 67 34 125-128-65-213-197 75-114 187-182t242-68 242 68 187 182z m-402 215q0 11-8 19t-19 7q-70 0-120-50t-50-119q0-12 8-19t19-8 19 8 8 19q0 48 34 82t82 34q11 0 19 8t8 19z m473-215q0-19-11-38-78-129-210-206t-279-77-279 77-210 206q-11 19-11 38t11 39q78 128 210 205t279 78 279-78 210-205q11-20 11-39z" horiz-adv-x="1000" /> +<glyph glyph-name="error" unicode="" d="m0 350q0 207 147 354t353 146 354-146 146-354-146-354-354-146-353 146-147 354z m137 0q0-150 106-257t257-106 257 106 106 257-106 257-257 106-257-106-106-257z m97-98l0 196 532 0 0-196-532 0z" horiz-adv-x="1000" /> +<glyph glyph-name="refresh" unicode="" d="m843 261q0-3 0-4-36-150-150-243t-267-93q-81 0-157 31t-136 88l-72-72q-11-11-25-11t-25 11-11 25v250q0 14 11 25t25 11h250q14 0 25-11t10-25-10-25l-77-77q40-37 90-57t105-20q74 0 139 37t104 99q6 10 29 66 5 13 17 13h107q8 0 13-6t5-12z m14 446v-250q0-14-10-25t-26-11h-250q-14 0-25 11t-10 25 10 25l77 77q-82 77-194 77-75 0-140-37t-104-99q-6-10-29-66-5-13-17-13h-111q-7 0-13 6t-5 12v4q36 150 151 243t268 93q81 0 158-31t137-88l72 72q11 11 25 11t26-11 10-25z" horiz-adv-x="857.1" /> +<glyph glyph-name="home" unicode="" d="m786 296v-267q0-15-11-26t-25-10h-214v214h-143v-214h-214q-15 0-25 10t-11 26v267q0 1 0 2t0 2l321 264 321-264q1-1 1-4z m124 39l-34-41q-5-5-12-6h-2q-7 0-12 3l-386 322-386-322q-7-4-13-4-7 2-12 7l-35 41q-4 5-3 13t6 12l401 334q18 15 42 15t43-15l136-114v109q0 8 5 13t13 5h107q8 0 13-5t5-13v-227l122-102q5-5 6-12t-4-13z" horiz-adv-x="928.6" /> +<glyph glyph-name="movie" unicode="" d="m214-43v72q0 14-10 25t-25 10h-72q-14 0-25-10t-11-25v-72q0-14 11-25t25-11h72q14 0 25 11t10 25z m0 214v72q0 14-10 25t-25 11h-72q-14 0-25-11t-11-25v-72q0-14 11-25t25-10h72q14 0 25 10t10 25z m0 215v71q0 15-10 25t-25 11h-72q-14 0-25-11t-11-25v-71q0-15 11-25t25-11h72q14 0 25 11t10 25z m572-429v286q0 14-11 25t-25 11h-429q-14 0-25-11t-10-25v-286q0-14 10-25t25-11h429q15 0 25 11t11 25z m-572 643v71q0 15-10 26t-25 10h-72q-14 0-25-10t-11-26v-71q0-15 11-25t25-11h72q14 0 25 11t10 25z m786-643v72q0 14-11 25t-25 10h-71q-15 0-25-10t-11-25v-72q0-14 11-25t25-11h71q15 0 25 11t11 25z m-214 429v285q0 15-11 26t-25 10h-429q-14 0-25-10t-10-26v-285q0-15 10-25t25-11h429q15 0 25 11t11 25z m214-215v72q0 14-11 25t-25 11h-71q-15 0-25-11t-11-25v-72q0-14 11-25t25-10h71q15 0 25 10t11 25z m0 215v71q0 15-11 25t-25 11h-71q-15 0-25-11t-11-25v-71q0-15 11-25t25-11h71q15 0 25 11t11 25z m0 214v71q0 15-11 26t-25 10h-71q-15 0-25-10t-11-26v-71q0-15 11-25t25-11h71q15 0 25 11t11 25z m71 89v-750q0-37-26-63t-63-26h-893q-36 0-63 26t-26 63v750q0 37 26 63t63 27h893q37 0 63-27t26-63z" horiz-adv-x="1071.4" /> +<glyph glyph-name="menu" unicode="" d="m650 400q22 0 36-15t14-35-15-35-35-15l-600 0q-20 0-35 15t-15 35 14 35 36 15l600 0z m-600 100q-20 0-35 15t-15 35 14 35 36 15l600 0q22 0 36-15t14-35-15-35-35-15l-600 0z m600-300q22 0 36-15t14-35-15-35-35-15l-600 0q-20 0-35 15t-15 35 14 35 36 15l600 0z" horiz-adv-x="700" /> +<glyph glyph-name="star" unicode="" d="m929 489q0-12-15-27l-203-197 48-279q1-4 1-12 0-11-6-19t-17-9q-10 0-22 7l-251 132-250-132q-13-7-23-7-11 0-17 9t-6 19q0 4 1 12l48 279-203 197q-14 15-14 27 0 21 31 26l280 40 126 254q11 23 27 23t28-23l125-254 280-40q32-5 32-26z" horiz-adv-x="928.6" /> +<glyph glyph-name="star-empty" unicode="" d="m634 290l171 165-235 35-106 213-105-213-236-35 171-165-41-235 211 111 211-111z m295 199q0-12-15-27l-203-197 48-279q1-4 1-12 0-28-23-28-10 0-22 7l-251 132-250-132q-13-7-23-7-11 0-17 9t-6 19q0 4 1 12l48 279-203 197q-14 15-14 27 0 21 31 26l280 40 126 254q11 23 27 23t28-23l125-254 280-40q32-5 32-26z" horiz-adv-x="928.6" /> +<glyph glyph-name="star-half" unicode="" d="m662 316l143 139-198 29-37 6-17 34-89 179v-537l33-17 178-94-34 198-7 37z m252 146l-202-197 47-279q3-19-3-29t-19-11q-9 0-22 7l-251 132-250-132q-13-7-23-7-12 0-19 11t-3 29l48 279-203 197q-18 18-13 33t30 20l280 40 126 254q11 23 27 23 16 0 28-23l125-254 280-40q25-4 30-20t-13-33z" horiz-adv-x="928.6" /> +<glyph glyph-name="donate" unicode="" d="m546 189q0-86-56-147t-144-77v-97q0-8-5-13t-13-5h-75q-7 0-13 5t-5 13v97q-37 5-71 18t-57 25-41 26-26 21-10 10q-9 12-1 23l58 76q3 5 12 6 9 1 14-5l1-1q63-55 135-70 21-4 42-4 45 0 79 24t35 68q0 16-9 30t-18 23-33 21-37 18-45 18q-21 9-34 14t-34 15-35 17-32 20-29 24-25 27-20 32-11 37-5 44q0 77 55 135t142 75v100q0 7 5 13t13 5h75q8 0 13-5t5-13v-98q32-4 62-13t48-19 36-21 21-16 9-8q9-10 3-21l-46-81q-4-9-12-9-8-2-16 4-1 1-8 6t-21 15-33 18-42 15-47 6q-53 0-87-24t-33-62q0-14 4-27t17-23 22-18 31-18 34-15 39-15q30-11 45-18t43-19 42-24 34-28 30-35 18-43 7-52z" horiz-adv-x="571.4" /> +</font> +</defs> +</svg> \ No newline at end of file diff --git a/couchpotato/static/fonts/icons.ttf b/couchpotato/static/fonts/icons.ttf new file mode 100644 index 0000000000..18c5f3a5fa Binary files /dev/null and b/couchpotato/static/fonts/icons.ttf differ diff --git a/couchpotato/static/fonts/icons.woff b/couchpotato/static/fonts/icons.woff new file mode 100644 index 0000000000..5f1194978d Binary files /dev/null and b/couchpotato/static/fonts/icons.woff differ diff --git a/couchpotato/static/images/couch.png b/couchpotato/static/images/couch.png index 3bc445e77a..5b7efc706c 100644 Binary files a/couchpotato/static/images/couch.png and b/couchpotato/static/images/couch.png differ diff --git a/couchpotato/static/images/emptylist.png b/couchpotato/static/images/emptylist.png deleted file mode 100644 index 08db653736..0000000000 Binary files a/couchpotato/static/images/emptylist.png and /dev/null differ diff --git a/couchpotato/static/images/favicon.ico b/couchpotato/static/images/favicon.ico deleted file mode 100644 index f93f9fbe77..0000000000 Binary files a/couchpotato/static/images/favicon.ico and /dev/null differ diff --git a/couchpotato/static/images/gear.png b/couchpotato/static/images/gear.png deleted file mode 100644 index f1d63badeb..0000000000 Binary files a/couchpotato/static/images/gear.png and /dev/null differ diff --git a/couchpotato/static/images/homescreen.png b/couchpotato/static/images/homescreen.png deleted file mode 100644 index 491be66d00..0000000000 Binary files a/couchpotato/static/images/homescreen.png and /dev/null differ diff --git a/couchpotato/static/images/icon.attention.png b/couchpotato/static/images/icon.attention.png deleted file mode 100644 index ff10b97643..0000000000 Binary files a/couchpotato/static/images/icon.attention.png and /dev/null differ diff --git a/couchpotato/static/images/icon.check.png b/couchpotato/static/images/icon.check.png deleted file mode 100644 index e99e575f59..0000000000 Binary files a/couchpotato/static/images/icon.check.png and /dev/null differ diff --git a/couchpotato/static/images/icon.delete.png b/couchpotato/static/images/icon.delete.png deleted file mode 100644 index 5fbfe36d0d..0000000000 Binary files a/couchpotato/static/images/icon.delete.png and /dev/null differ diff --git a/couchpotato/static/images/icon.download.png b/couchpotato/static/images/icon.download.png deleted file mode 100644 index e64e999707..0000000000 Binary files a/couchpotato/static/images/icon.download.png and /dev/null differ diff --git a/couchpotato/static/images/icon.edit.png b/couchpotato/static/images/icon.edit.png deleted file mode 100644 index 9d7aac683f..0000000000 Binary files a/couchpotato/static/images/icon.edit.png and /dev/null differ diff --git a/couchpotato/static/images/icon.files.png b/couchpotato/static/images/icon.files.png deleted file mode 100644 index 951fea6d7b..0000000000 Binary files a/couchpotato/static/images/icon.files.png and /dev/null differ diff --git a/couchpotato/static/images/icon.folder.gif b/couchpotato/static/images/icon.folder.gif deleted file mode 100644 index 9fbb12fb11..0000000000 Binary files a/couchpotato/static/images/icon.folder.gif and /dev/null differ diff --git a/couchpotato/static/images/icon.imdb.png b/couchpotato/static/images/icon.imdb.png deleted file mode 100644 index a9903c49f3..0000000000 Binary files a/couchpotato/static/images/icon.imdb.png and /dev/null differ diff --git a/couchpotato/static/images/icon.info.png b/couchpotato/static/images/icon.info.png deleted file mode 100644 index f61dc86814..0000000000 Binary files a/couchpotato/static/images/icon.info.png and /dev/null differ diff --git a/couchpotato/static/images/icon.rating.png b/couchpotato/static/images/icon.rating.png deleted file mode 100644 index f5e07adf32..0000000000 Binary files a/couchpotato/static/images/icon.rating.png and /dev/null differ diff --git a/couchpotato/static/images/icon.refresh.png b/couchpotato/static/images/icon.refresh.png deleted file mode 100644 index 906887ed24..0000000000 Binary files a/couchpotato/static/images/icon.refresh.png and /dev/null differ diff --git a/couchpotato/static/images/icon.spinner.gif b/couchpotato/static/images/icon.spinner.gif deleted file mode 100644 index c84d177f20..0000000000 Binary files a/couchpotato/static/images/icon.spinner.gif and /dev/null differ diff --git a/couchpotato/static/images/icon.trailer.png b/couchpotato/static/images/icon.trailer.png deleted file mode 100644 index 8bdd71715d..0000000000 Binary files a/couchpotato/static/images/icon.trailer.png and /dev/null differ diff --git a/couchpotato/static/images/icon.undo.png b/couchpotato/static/images/icon.undo.png deleted file mode 100644 index 71c7ec0af1..0000000000 Binary files a/couchpotato/static/images/icon.undo.png and /dev/null differ diff --git a/couchpotato/static/images/icons/android.png b/couchpotato/static/images/icons/android.png new file mode 100644 index 0000000000..6aeb8f0b2a Binary files /dev/null and b/couchpotato/static/images/icons/android.png differ diff --git a/couchpotato/static/images/icons/dark/android.png b/couchpotato/static/images/icons/dark/android.png new file mode 100644 index 0000000000..9efc63c10f Binary files /dev/null and b/couchpotato/static/images/icons/dark/android.png differ diff --git a/couchpotato/static/images/icons/dark/favicon.ico b/couchpotato/static/images/icons/dark/favicon.ico new file mode 100644 index 0000000000..7dc73e7090 Binary files /dev/null and b/couchpotato/static/images/icons/dark/favicon.ico differ diff --git a/couchpotato/static/images/icons/dark/ios.png b/couchpotato/static/images/icons/dark/ios.png new file mode 100644 index 0000000000..f0acf841c0 Binary files /dev/null and b/couchpotato/static/images/icons/dark/ios.png differ diff --git a/couchpotato/static/images/icons/dark/safari.svg b/couchpotato/static/images/icons/dark/safari.svg new file mode 100644 index 0000000000..89b50920b2 --- /dev/null +++ b/couchpotato/static/images/icons/dark/safari.svg @@ -0,0 +1 @@ +<svg xmlns="http://www.w3.org/2000/svg" width="100" height="100" viewBox="0 0 16 16"><g><path d="m5.3009374 1.8040626c-3.8007467 0-5.06281241 4.9177307-5.06281241 7.5434374 0 2.6112 1.03020071 3.568437 3.17718751 3.568437 1.3201067 0 3.3655217-0.754232 4.134375-2.698125l-0.3337501-0.145c-0.6673065 0.899413-1.6972299 1.450625-2.8287499 1.450625-1.3926401 0-1.7409374-0.928511-1.7409376-2.2631245 0-2.9593602 1.4217667-6.7746875 3.0175002-6.7746875 0.6527998 0 0.8849998 0.4207867 0.885 0.885 0 0.5512532-0.3193402 1.1606075-0.6675002 1.3346875C6.1568766 5.0244592 6.4760182 5.0825 6.708125 5.0825c0.2872515 0 0.5038558-0.086278 0.6621875-0.235625-0.080472 0.2391332-0.1240626 0.4831773-0.1240626 0.72875 0 1.04448 0.5512275 1.4071875 1.5521875 1.4071875-0.014507-0.13056-0.2031249-0.2467224-0.2031249-0.9865626 0-2.2050133 1.4074195-3.5249998 3.7574995-3.525 1.276588 0 1.798751 0.8993192 1.798751 1.9728127 0 1.5522132-1.073578 3.4526966-2.538751 3.5687499l1.03-4.8453125-2.146875 0.2903125-1.9731245 9.2262505h2.0890625l0.885-4.1343755h0.0725c2.6112 0 4.1925-2.0454657 4.1925-3.8878125 0-1.4941867-1.044656-2.8578124-3.42375-2.8578124-1.828958 0-3.7977012 1.0501832-4.653125 2.395 0.02578-0.1395922 0.03875-0.2906221 0.03875-0.4512501 0-1.4071467-1.0591859-1.9437499-2.4228126-1.9437499z"/></g></svg> \ No newline at end of file diff --git a/couchpotato/static/images/icons/favicon.ico b/couchpotato/static/images/icons/favicon.ico new file mode 100644 index 0000000000..0342c6e5c9 Binary files /dev/null and b/couchpotato/static/images/icons/favicon.ico differ diff --git a/couchpotato/static/images/icons/ios.png b/couchpotato/static/images/icons/ios.png new file mode 100644 index 0000000000..dd694b38bb Binary files /dev/null and b/couchpotato/static/images/icons/ios.png differ diff --git a/couchpotato/static/images/icons/safari.svg b/couchpotato/static/images/icons/safari.svg new file mode 100644 index 0000000000..89b50920b2 --- /dev/null +++ b/couchpotato/static/images/icons/safari.svg @@ -0,0 +1 @@ +<svg xmlns="http://www.w3.org/2000/svg" width="100" height="100" viewBox="0 0 16 16"><g><path d="m5.3009374 1.8040626c-3.8007467 0-5.06281241 4.9177307-5.06281241 7.5434374 0 2.6112 1.03020071 3.568437 3.17718751 3.568437 1.3201067 0 3.3655217-0.754232 4.134375-2.698125l-0.3337501-0.145c-0.6673065 0.899413-1.6972299 1.450625-2.8287499 1.450625-1.3926401 0-1.7409374-0.928511-1.7409376-2.2631245 0-2.9593602 1.4217667-6.7746875 3.0175002-6.7746875 0.6527998 0 0.8849998 0.4207867 0.885 0.885 0 0.5512532-0.3193402 1.1606075-0.6675002 1.3346875C6.1568766 5.0244592 6.4760182 5.0825 6.708125 5.0825c0.2872515 0 0.5038558-0.086278 0.6621875-0.235625-0.080472 0.2391332-0.1240626 0.4831773-0.1240626 0.72875 0 1.04448 0.5512275 1.4071875 1.5521875 1.4071875-0.014507-0.13056-0.2031249-0.2467224-0.2031249-0.9865626 0-2.2050133 1.4074195-3.5249998 3.7574995-3.525 1.276588 0 1.798751 0.8993192 1.798751 1.9728127 0 1.5522132-1.073578 3.4526966-2.538751 3.5687499l1.03-4.8453125-2.146875 0.2903125-1.9731245 9.2262505h2.0890625l0.885-4.1343755h0.0725c2.6112 0 4.1925-2.0454657 4.1925-3.8878125 0-1.4941867-1.044656-2.8578124-3.42375-2.8578124-1.828958 0-3.7977012 1.0501832-4.653125 2.395 0.02578-0.1395922 0.03875-0.2906221 0.03875-0.4512501 0-1.4071467-1.0591859-1.9437499-2.4228126-1.9437499z"/></g></svg> \ No newline at end of file diff --git a/couchpotato/static/images/icons/windows.png b/couchpotato/static/images/icons/windows.png new file mode 100644 index 0000000000..84bb3b9c6a Binary files /dev/null and b/couchpotato/static/images/icons/windows.png differ diff --git a/couchpotato/static/images/imdb_watchlist.png b/couchpotato/static/images/imdb_watchlist.png index a0250b3e56..8a84654dde 100644 Binary files a/couchpotato/static/images/imdb_watchlist.png and b/couchpotato/static/images/imdb_watchlist.png differ diff --git a/couchpotato/static/images/notify.couch.large.png b/couchpotato/static/images/notify.couch.large.png new file mode 100644 index 0000000000..e918c0493a Binary files /dev/null and b/couchpotato/static/images/notify.couch.large.png differ diff --git a/couchpotato/static/images/notify.couch.medium.png b/couchpotato/static/images/notify.couch.medium.png new file mode 100644 index 0000000000..939aca406d Binary files /dev/null and b/couchpotato/static/images/notify.couch.medium.png differ diff --git a/couchpotato/static/images/xbmc-notify.png b/couchpotato/static/images/notify.couch.small.png similarity index 100% rename from couchpotato/static/images/xbmc-notify.png rename to couchpotato/static/images/notify.couch.small.png diff --git a/couchpotato/static/images/right.arrow.png b/couchpotato/static/images/right.arrow.png deleted file mode 100644 index 399db7608d..0000000000 Binary files a/couchpotato/static/images/right.arrow.png and /dev/null differ diff --git a/couchpotato/static/images/sprite.png b/couchpotato/static/images/sprite.png deleted file mode 100644 index 5ba4d00e5d..0000000000 Binary files a/couchpotato/static/images/sprite.png and /dev/null differ diff --git a/couchpotato/static/images/toTop.gif b/couchpotato/static/images/toTop.gif deleted file mode 100644 index 110534a2f2..0000000000 Binary files a/couchpotato/static/images/toTop.gif and /dev/null differ diff --git a/couchpotato/static/scripts/api.js b/couchpotato/static/scripts/api.js index 5e507bc17d..df3d0ff39a 100644 --- a/couchpotato/static/scripts/api.js +++ b/couchpotato/static/scripts/api.js @@ -1,29 +1,29 @@ var ApiClass = new Class({ setup: function(options){ - var self = this + var self = this; self.options = options; }, request: function(type, options){ - var self = this; + var self = this, + r_type = self.options.is_remote ? 'JSONP' : 'JSON'; - var r_type = self.options.is_remote ? 'JSONP' : 'JSON'; return new Request[r_type](Object.merge({ 'callbackKey': 'callback_func', 'method': 'get', - 'url': self.createUrl(type, {'t': randomString()}), - }, options)).send() + 'url': self.createUrl(type, {'t': randomString()}) + }, options)).send(); }, createUrl: function(action, params){ - return this.options.url + (action || 'default') + '/' + (params ? '?'+Object.toQueryString(params) : '') + return this.options.url + (action || 'default') + '/' + (params ? '?'+Object.toQueryString(params) : ''); }, getOption: function(name){ - return this.options[name] + return this.options[name]; } }); -window.Api = new ApiClass() \ No newline at end of file +window.Api = new ApiClass(); diff --git a/couchpotato/static/scripts/block.js b/couchpotato/static/scripts/block.js index 82193ca55d..6f18bae3a3 100644 --- a/couchpotato/static/scripts/block.js +++ b/couchpotato/static/scripts/block.js @@ -1,6 +1,6 @@ var BlockBase = new Class({ - Implements: [Options, Events], + Implements: [Options], options: {}, @@ -19,7 +19,7 @@ var BlockBase = new Class({ }, getParent: function(){ - return this.page + return this.page; }, hide: function(){ @@ -31,9 +31,7 @@ var BlockBase = new Class({ }, toElement: function(){ - return this.el + return this.el; } }); - -var Block = BlockBase \ No newline at end of file diff --git a/couchpotato/static/scripts/block/footer.js b/couchpotato/static/scripts/block/footer.js index acec158569..619922fd8b 100644 --- a/couchpotato/static/scripts/block/footer.js +++ b/couchpotato/static/scripts/block/footer.js @@ -1,4 +1,4 @@ -Block.Footer = new Class({ +var BlockFooter = new Class({ Extends: BlockBase, @@ -8,4 +8,4 @@ Block.Footer = new Class({ self.el = new Element('div.footer'); } -}); \ No newline at end of file +}); diff --git a/couchpotato/static/scripts/block/header.js b/couchpotato/static/scripts/block/header.js new file mode 100644 index 0000000000..5d802e67df --- /dev/null +++ b/couchpotato/static/scripts/block/header.js @@ -0,0 +1,52 @@ +var BlockHeader = new Class({ + + Extends: BlockNavigation, + + create: function(){ + var self = this, + animation_options = { + type: dynamics.spring + }, + couch, potato; + + self.parent(); + + self.el.adopt( + self.logo = new Element('a.logo', { + 'href': App.createUrl(''), + 'events': { + 'mouseenter': function(){ + dynamics.animate(couch, { + opacity: 0, + translateX: -50 + }, animation_options); + + dynamics.animate(potato, { + opacity: 1, + translateX: 0 + }, animation_options); + }, + 'mouseleave': function(){ + dynamics.animate(couch, { + opacity: 1, + translateX: 0 + }, animation_options); + + dynamics.animate(potato, { + opacity: 0, + translateX: 50 + }, animation_options); + } + } + }).adopt( + couch = new Element('span[text=Couch]'), + potato = new Element('span[text=Potato]') + ), + self.nav + ); + + + + } + +}); diff --git a/couchpotato/static/scripts/block/menu.js b/couchpotato/static/scripts/block/menu.js index 4dc143d440..b800e5a1be 100644 --- a/couchpotato/static/scripts/block/menu.js +++ b/couchpotato/static/scripts/block/menu.js @@ -1,45 +1,133 @@ -Block.Menu = new Class({ +var BlockMenu = new Class({ Extends: BlockBase, + Implements: [Options, Events], options: { 'class': 'menu' }, + lis: null, + create: function(){ var self = this; + self.lis = []; + + self.shown = false; self.el = new Element('div', { 'class': 'more_menu '+self.options['class'] }).adopt( self.wrapper = new Element('div.wrapper').adopt( self.more_option_ul = new Element('ul') ), - new Element('a.button.onlay', { + self.button = new Element('a' + (self.options.button_class ? '.' + self.options.button_class : ''), { + 'text': self.options.button_text || '', 'events': { 'click': function(){ - self.el.toggleClass('show') - self.fireEvent(self.el.hasClass('show') ? 'open' : 'close') - if(self.el.hasClass('show')) - this.addEvent('outerClick', function(){ - self.el.removeClass('show') - this.removeEvents('outerClick'); - }) - else - this.removeEvents('outerClick'); + if(!self.shown){ + dynamics.css(self.wrapper, { + opacity: 0, + scale: 0.1, + display: 'block' + }); + + dynamics.animate(self.wrapper, { + opacity: 1, + scale: 1 + }, { + type: dynamics.spring, + frequency: 200, + friction: 270, + duration: 800 + }); + + if(self.lis === null) + self.lis = self.more_option_ul.getElements('> li').slice(0, 10); + + self.lis.each(function(li, nr){ + dynamics.css(li, { + opacity: 0, + translateY: 20 + }); + + // Animate to final properties + dynamics.animate(li, { + opacity: 1, + translateY: 0 + }, { + type: dynamics.spring, + frequency: 300, + friction: 435, + duration: 1000, + delay: 100 + nr * 40 + }); + }); + self.shown = true; + } + else { + self.hide(); + } + + self.fireEvent(self.shown ? 'open' : 'close'); + + if(self.shown){ + self.el.addEvent('outerClick', self.removeOuterClick.bind(self)); + this.addEvent('outerClick', function(e) { + if (e.target.get('tag') != 'input') + self.removeOuterClick(); + }); + } + else { + self.removeOuterClick(); + } } } }) - ) + ); }, - addLink: function(tab, position){ + hide: function(){ + var self = this; + + dynamics.animate(self.wrapper, { + opacity: 0, + scale: 0.1 + }, { + type: dynamics.easeInOut, + duration: 300, + friction: 100, + complete: function(){ + dynamics.css(self.wrapper, { + display: 'none' + }); + } + }); + + self.shown = false; + + }, + + removeOuterClick: function(){ var self = this; - var el = new Element('li').adopt(tab).inject(self.more_option_ul, position || 'bottom'); - return el; + + self.hide(); + self.el.removeClass('show'); + self.el.removeEvents('outerClick'); + + self.button.removeEvents('outerClick'); + }, + + addLink: function(tab, position){ + var self = this, + li = new Element('li').adopt(tab).inject(self.more_option_ul, position || 'bottom'); + + self.lis = null; + + return li; } -}); \ No newline at end of file +}); diff --git a/couchpotato/static/scripts/block/navigation.js b/couchpotato/static/scripts/block/navigation.js index 85f20c49ad..1201085499 100644 --- a/couchpotato/static/scripts/block/navigation.js +++ b/couchpotato/static/scripts/block/navigation.js @@ -1,43 +1,22 @@ -Block.Navigation = new Class({ +var BlockNavigation = new Class({ Extends: BlockBase, create: function(){ var self = this; - self.el = new Element('div.navigation').adopt( - self.nav = new Element('ul'), - self.backtotop = new Element('a.backtotop', { - 'text': 'back to top', - 'events': { - 'click': function(){ - window.scroll(0,0) - } - }, - 'tween': { - 'duration': 100 - } - }) - ) - - new ScrollSpy({ - min: 400, - onLeave: function(){ - self.backtotop.fade('out') - }, - onEnter: function(){ - self.backtotop.fade('in') - } - }) + self.el = new Element('div.navigation').grab( + self.nav = new Element('ul') + ); }, addTab: function(name, tab){ - var self = this + var self = this; - return new Element('li.tab_'+(name || 'unknown')).adopt( + return new Element('li.tab_'+(name || 'unknown')).grab( new Element('a', tab) - ).inject(self.nav) + ).inject(self.nav); }, @@ -49,4 +28,4 @@ Block.Navigation = new Class({ } -}); \ No newline at end of file +}); diff --git a/couchpotato/static/scripts/combined.base.min.js b/couchpotato/static/scripts/combined.base.min.js new file mode 100644 index 0000000000..8093536255 --- /dev/null +++ b/couchpotato/static/scripts/combined.base.min.js @@ -0,0 +1,2128 @@ +var Uniform = new Class({ + Implements: [ Options ], + options: { + focusedClass: "focused", + holderClass: "ctrlHolder" + }, + initialize: function(options) { + this.setOptions(options); + var focused = this.options.focusedClass; + var holder = "." + this.options.holderClass; + $(document.body).addEvents({ + "focus:relay(input, select, textarea)": function() { + var parent = this.getParent(holder); + if (parent) parent.addClass(focused); + }, + "blur:relay(input, select, textarea)": function() { + var parent = this.getParent(holder); + if (parent) parent.removeClass(focused); + } + }); + } +}); + +var Question = new Class({ + initialize: function(question, hint, answers) { + var self = this; + self.question = question; + self.hint = hint; + self.answers = answers; + self.createQuestion(); + self.answers.each(function(answer) { + self.createAnswer(answer); + }); + }, + createQuestion: function() { + var self = this, h3, hint; + self.container = new Element("div.mask.question").grab(self.inner = new Element("div.inner").adopt(h3 = new Element("h3", { + html: this.question + }), hint = this.hint ? new Element("div.hint", { + html: this.hint + }) : null)).inject(document.body); + requestTimeout(function() { + self.container.addClass("show"); + self.inner.getElements("> *").each(function(el, nr) { + dynamics.css(el, { + opacity: 0, + translateY: 50 + }); + dynamics.animate(el, { + opacity: 1, + translateY: 0 + }, { + type: dynamics.spring, + frequency: 200, + friction: 300, + duration: 800, + delay: 400 + nr * 100 + }); + }); + }, 10); + }, + createAnswer: function(options) { + var self = this; + var answer = new Element("a", Object.merge(options, { + class: "answer button " + (options["class"] || "") + (options.cancel ? " cancel" : "") + })).inject(this.inner); + if (options.cancel) { + answer.addEvent("click", self.close.bind(self)); + } else if (options.request) { + answer.addEvent("click", function(e) { + e.stop(); + new Request(Object.merge(options, { + url: options.href, + onComplete: function() { + (options.onComplete || function() {})(); + self.close(); + } + })).send(); + }); + } + }, + close: function() { + var self = this; + var ended = function() { + self.container.dispose(); + self.container.removeEventListener("transitionend", ended); + }; + self.container.addEventListener("transitionend", ended, false); + self.inner.getElements("> *").reverse().each(function(el, nr) { + dynamics.css(el, { + opacity: 1, + translateY: 0 + }); + dynamics.animate(el, { + opacity: 0, + translateY: 50 + }, { + type: dynamics.spring, + frequency: 200, + friction: 300, + duration: 800, + anticipationSize: 175, + anticipationStrength: 400, + delay: nr * 100 + }); + }); + dynamics.setTimeout(function() { + self.container.removeClass("show"); + }, 200); + }, + toElement: function() { + return this.container; + } +}); + +var ScrollSpy = new Class({ + Implements: [ Options, Events ], + options: { + container: window, + max: 0, + min: 0, + mode: "vertical" + }, + initialize: function(options) { + this.setOptions(options); + this.container = document.id(this.options.container); + this.enters = this.leaves = 0; + this.inside = false; + var self = this; + this.listener = function(e) { + var position = self.container.getScroll(), xy = position[self.options.mode == "vertical" ? "y" : "x"], min = typeOf(self.options.min) == "function" ? self.options.min() : self.options.min, max = typeOf(self.options.max) == "function" ? self.options.max() : self.options.max; + if (xy >= min && (max === 0 || xy <= max)) { + if (!self.inside) { + self.inside = true; + self.enters++; + self.fireEvent("enter", [ position, self.enters, e ]); + } + self.fireEvent("tick", [ position, self.inside, self.enters, self.leaves, e ]); + } else if (self.inside) { + self.inside = false; + self.leaves++; + self.fireEvent("leave", [ position, self.leaves, e ]); + } + self.fireEvent("scroll", [ position, self.inside, self.enters, self.leaves, e ]); + }; + this.addListener(); + }, + start: function() { + this.container.addEvent("scroll", this.listener); + }, + stop: function() { + this.container.removeEvent("scroll", this.listener); + }, + addListener: function() { + this.start(); + } +}); + +var CouchPotato = new Class({ + Implements: [ Events, Options ], + defaults: { + page: "home", + action: "index", + params: {} + }, + pages: [], + block: [], + initialize: function() { + var self = this; + self.global_events = {}; + }, + setup: function(options) { + var self = this; + self.setOptions(options); + self.c = $(document.body); + self.createLayout(); + self.createPages(); + if (window.location.hash) History.handleInitialState(); else self.openPage(window.location.pathname); + History.addEvent("change", self.openPage.bind(self)); + self.c.addEvent("click:relay(.header a, .navigation a, .movie_details a, .list_list .movie)", self.ripple.bind(self)); + self.c.addEvent("click:relay(a[href^=/]:not([target]))", self.pushState.bind(self)); + self.c.addEvent("click:relay(a[href^=http])", self.openDerefered.bind(self)); + self.touch_device = "ontouchstart" in window || navigator.msMaxTouchPoints; + if (self.touch_device) { + self.c.addClass("touch_enabled"); + FastClick.attach(document.body); + } + window.addEvent("resize", self.resize.bind(self)); + self.resize(); + }, + checkCache: function() { + window.addEventListener("load", function() { + window.applicationCache.addEventListener("updateready", function(e) { + if (window.applicationCache.status == window.applicationCache.UPDATEREADY) { + window.applicationCache.swapCache(); + window.location.reload(); + } + }, false); + }, false); + }, + resize: function() { + var self = this; + self.mobile_screen = Math.max(document.documentElement.clientWidth, window.innerWidth || 0) <= 480; + self.c[self.mobile_screen ? "addClass" : "removeClass"]("mobile"); + }, + ripple: function(e, el) { + var self = this, button = el.getCoordinates(), x = e.page.x - button.left, y = e.page.y - button.top, ripple = new Element("div.ripple", { + styles: { + left: x, + top: y + } + }); + ripple.inject(el); + requestTimeout(function() { + ripple.addClass("animate"); + }, 0); + requestTimeout(function() { + ripple.dispose(); + }, 2100); + }, + getOption: function(name) { + try { + return this.options[name]; + } catch (e) { + return null; + } + }, + pushState: function(e, el) { + var self = this; + if (!e.meta && App.isMac() || !e.control && !App.isMac()) { + e.preventDefault(); + var url = el.get("href"); + if (e.event && e.event.button === 1) window.open(url); else if (History.getPath() != url) History.push(url); + } + self.fireEvent("history.push"); + }, + isMac: function() { + return Browser.platform == "mac"; + }, + createLayout: function() { + var self = this; + self.hide_update = !!App.options && App.options.webui_feature && App.options.webui_feature.hide_menuitem_update; + self.block.header = new BlockBase(); + self.c.adopt($(self.block.header).addClass("header").adopt(self.block.navigation = new BlockHeader(self, {}), self.block.search = new BlockSearch(self, {}), self.support = new Element("a.donate.icon-donate", { + href: "https://couchpota.to/support/", + target: "_blank" + }).grab(new Element("span", { + text: "Donate" + })), self.block.more = new BlockMenu(self, { + button_class: "icon-settings" + })), new Element("div.corner_background"), self.content = new Element("div.content").adopt(self.pages_container = new Element("div.pages"), self.block.footer = new BlockFooter(self, {}))); + var setting_links = [ new Element("a", { + text: "About CouchPotato", + href: App.createUrl("settings/about") + }), new Element("a", { + text: "Settings", + href: App.createUrl("settings/general") + }), new Element("a", { + text: "Logs", + href: App.createUrl("log") + }), new Element("a", { + text: "Restart", + events: { + click: self.restartQA.bind(self) + } + }), new Element("a", { + text: "Shutdown", + events: { + click: self.shutdownQA.bind(self) + } + }) ]; + if (!self.hide_update) { + setting_links.splice(1, 0, new Element("a", { + text: "Check for Updates", + events: { + click: self.checkForUpdate.bind(self, null) + } + })); + } + setting_links.each(function(a) { + self.block.more.addLink(a); + }); + self.addEvent("setting.save.core.dark_theme", function(enabled) { + document.html[enabled ? "addClass" : "removeClass"]("dark"); + }); + }, + createPages: function() { + var self = this; + var pages = []; + Object.each(Page, function(page_class, class_name) { + var pg = new Page[class_name](self, { + level: 1 + }); + self.pages[class_name] = pg; + pages.include({ + order: pg.order, + name: class_name, + class: pg + }); + }); + pages.stableSort(self.sortPageByOrder).each(function(page) { + page["class"].load(); + self.fireEvent("load" + page.name); + $(page["class"]).inject(self.getPageContainer()); + }); + self.fireEvent("load"); + }, + sortPageByOrder: function(a, b) { + return (a.order || 100) - (b.order || 100); + }, + openPage: function(url) { + var self = this, route = new Route(self.defaults); + route.parse(rep(History.getPath())); + var page_name = route.getPage().capitalize(), action = route.getAction(), params = route.getParams(), current_url = route.getCurrentUrl(), page; + if (current_url == self.current_url) return; + if (self.current_page) self.current_page.hide(); + try { + page = self.pages[page_name] || self.pages.Home; + page.open(action, params, current_url); + page.show(); + } catch (e) { + console.error("Can't open page:" + url, e); + } + self.current_page = page; + self.current_url = current_url; + }, + getBlock: function(block_name) { + return this.block[block_name]; + }, + getPage: function(name) { + return this.pages[name]; + }, + getPageContainer: function() { + return this.pages_container; + }, + shutdown: function() { + var self = this; + self.blockPage("You have shutdown. This is what is supposed to happen ;)"); + Api.request("app.shutdown", { + onComplete: self.blockPage.bind(self) + }); + self.checkAvailable(1e3); + }, + shutdownQA: function() { + var self = this; + var q = new Question("Are you sure you want to shutdown CouchPotato?", "", [ { + text: "Shutdown", + class: "shutdown red", + events: { + click: function(e) { + e.preventDefault(); + self.shutdown(); + requestTimeout(q.close.bind(q), 100); + } + } + }, { + text: "No, nevah!", + cancel: true + } ]); + }, + restart: function(message, title) { + var self = this; + self.blockPage(message || "Restarting... please wait. If this takes too long, something must have gone wrong.", title); + Api.request("app.restart"); + self.checkAvailable(1e3); + }, + restartQA: function(e, message, title) { + var self = this; + var q = new Question("Are you sure you want to restart CouchPotato?", "", [ { + text: "Restart", + class: "restart orange", + events: { + click: function(e) { + e.preventDefault(); + self.restart(message, title); + requestTimeout(q.close.bind(q), 100); + } + } + }, { + text: "No, nevah!", + cancel: true + } ]); + }, + checkForUpdate: function(onComplete) { + var self = this; + Updater.check(onComplete); + self.blockPage("Please wait. If this takes too long, something must have gone wrong.", "Checking for updates"); + self.checkAvailable(3e3); + }, + checkAvailable: function(delay, onAvailable) { + var self = this; + requestTimeout(function() { + var onFailure = function() { + requestTimeout(function() { + self.checkAvailable(delay, onAvailable); + }, 1e3); + self.fireEvent("unload"); + }; + var request = Api.request("app.available", { + timeout: 2e3, + onTimeout: function() { + request.cancel(); + onFailure(); + }, + onFailure: onFailure, + onSuccess: function() { + if (onAvailable) onAvailable(); + self.unBlockPage(); + self.fireEvent("reload"); + } + }); + }, delay || 0); + }, + blockPage: function(message, title) { + var self = this; + self.unBlockPage(); + self.mask = new Element("div.mask.with_message").adopt(new Element("div.message").adopt(new Element("h1", { + text: title || "Unavailable" + }), new Element("div", { + text: message || "Something must have crashed.. check the logs ;)" + }))).inject(document.body); + createSpinner(self.mask); + requestTimeout(function() { + self.mask.addClass("show"); + }, 10); + }, + unBlockPage: function() { + var self = this; + if (self.mask) self.mask.get("tween").start("opacity", 0).chain(function() { + this.element.destroy(); + }); + }, + createUrl: function(action, params) { + return this.options.base_url + (action ? action + "/" : "") + (params ? "?" + Object.toQueryString(params) : ""); + }, + openDerefered: function(e, el) { + var self = this; + e.stop(); + var url = el.get("href"); + if (self.getOption("dereferer")) { + url = self.getOption("dereferer") + el.get("href"); + } + if (el.get("target") == "_blank" || e.meta && self.isMac() || e.control && !self.isMac()) window.open(url); else window.location = url; + }, + createUserscriptButtons: function() { + var host_url = window.location.protocol + "//" + window.location.host; + return new Element("div.group_userscript").adopt(new Element("div").adopt(new Element("a.userscript.button", { + text: "Install extension", + href: "https://couchpota.to/extension/", + target: "_blank" + }), new Element("span.or[text=or]"), new Element("span.bookmarklet").adopt(new Element("a.button", { + text: "+CouchPotato", + href: "javascript:void((function(){var e=document.createElement('script');e.setAttribute('type','text/javascript');e.setAttribute('charset','UTF-8');e.setAttribute('src','" + host_url + Api.createUrl("userscript.bookmark") + "?host=" + encodeURI(host_url + Api.createUrl("userscript.get") + randomString() + "/") + "&r='+Math.random()*99999999);document.body.appendChild(e)})());", + target: "", + events: { + click: function(e) { + e.stop(); + alert("Drag it to your bookmark ;)"); + } + } + }), new Element("span", { + text: "Б┤╫ Drag this to your bookmarks" + }))), new Element("img", { + src: "https://couchpota.to/media/images/userscript.gif" + })); + }, + on: function(name, handle) { + var self = this; + if (!self.global_events[name]) self.global_events[name] = []; + self.global_events[name].push(handle); + }, + trigger: function(name, args, on_complete) { + var self = this; + if (!self.global_events[name]) { + return; + } + if (!on_complete && typeOf(args) == "function") { + on_complete = args; + args = []; + } + self.global_events[name].each(function(handle) { + requestTimeout(function() { + var results = handle.apply(handle, args || []); + if (on_complete) on_complete(results); + }, 0); + }); + }, + off: function(name, handle) { + var self = this; + if (!self.global_events[name]) return; + if (handle) { + self.global_events[name] = self.global_events[name].erase(handle); + } else { + self.global_events[name] = []; + } + } +}); + +window.App = new CouchPotato(); + +var Route = new Class({ + defaults: null, + page: "", + action: "index", + params: {}, + initialize: function(defaults) { + var self = this; + self.defaults = defaults || {}; + }, + parse: function(path) { + var self = this; + if (path == "/" && location.hash) { + path = rep(location.hash.replace("#", "/")); + } + self.current = path.replace(/^\/+|\/+$/g, ""); + var url = self.current.split("/"); + self.page = url.length > 0 ? url.shift() : self.defaults.page; + self.action = url.length > 0 ? url.join("/") : self.defaults.action; + self.params = Object.merge({}, self.defaults.params); + if (url.length > 1) { + var key; + url.each(function(el, nr) { + if (nr % 2 === 0) key = el; else if (key) { + self.params[key] = el; + key = null; + } + }); + } else if (url.length == 1) { + self.params[url] = true; + } + return self; + }, + getPage: function() { + return this.page; + }, + getAction: function() { + return this.action; + }, + getParams: function() { + return this.params; + }, + getCurrentUrl: function() { + return this.current; + }, + get: function(param) { + return this.params[param]; + } +}); + +var p = function() { + if (typeof console !== "undefined" && console !== null) console.log(arguments); +}; + +(function() { + var events; + var check = function(e) { + var target = $(e.target); + var parents = target.getParents(); + events.each(function(item) { + var element = item.element; + if (element != target && !parents.contains(element)) item.fn.call(element, e); + }); + }; + Element.Events.outerClick = { + onAdd: function(fn) { + if (!events) { + document.addEvent("click", check); + events = []; + } + events.push({ + element: this, + fn: fn + }); + }, + onRemove: function(fn) { + events = events.filter(function(item) { + return item.element != this || item.fn != fn; + }, this); + if (!events.length) { + document.removeEvent("click", check); + events = null; + } + } + }; +})(); + +function randomString(length, extra) { + var chars = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXTZabcdefghiklmnopqrstuvwxyz" + (extra ? "-._!@#$%^&*()+=" : ""), string_length = length || 8, random_string = ""; + for (var i = 0; i < string_length; i++) { + var rnum = Math.floor(Math.random() * chars.length); + random_string += chars.charAt(rnum); + } + return random_string; +} + +(function() { + var keyPaths = []; + var saveKeyPath = function(path) { + keyPaths.push({ + sign: path[0] === "+" || path[0] === "-" ? parseInt(path.shift() + 1) : 1, + path: path + }); + }; + var valueOf = function(object, path) { + var ptr = object; + path.each(function(key) { + ptr = ptr[key]; + }); + return ptr; + }; + var comparer = function(a, b) { + for (var i = 0, l = keyPaths.length; i < l; i++) { + var aVal = valueOf(a, keyPaths[i].path), bVal = valueOf(b, keyPaths[i].path); + if (aVal > bVal) return keyPaths[i].sign; + if (aVal < bVal) return -keyPaths[i].sign; + } + return 0; + }; + Array.implement({ + sortBy: function() { + keyPaths.empty(); + Array.each(arguments, function(argument) { + switch (typeOf(argument)) { + case "array": + saveKeyPath(argument); + break; + + case "string": + saveKeyPath(argument.match(/[+-]|[^.]+/g)); + break; + } + }); + return this.stableSort(comparer); + } + }); +})(); + +var createSpinner = function(container) { + var spinner = new Element("div.spinner"); + container.grab(spinner); + return spinner; +}; + +var rep = function(pa) { + return pa.replace(Api.getOption("url"), "/").replace(App.getOption("base_url"), "/"); +}; + +var ApiClass = new Class({ + setup: function(options) { + var self = this; + self.options = options; + }, + request: function(type, options) { + var self = this, r_type = self.options.is_remote ? "JSONP" : "JSON"; + return new Request[r_type](Object.merge({ + callbackKey: "callback_func", + method: "get", + url: self.createUrl(type, { + t: randomString() + }) + }, options)).send(); + }, + createUrl: function(action, params) { + return this.options.url + (action || "default") + "/" + (params ? "?" + Object.toQueryString(params) : ""); + }, + getOption: function(name) { + return this.options[name]; + } +}); + +window.Api = new ApiClass(); + +var PageBase = new Class({ + Implements: [ Options, Events ], + disable_pointer_onscroll: true, + order: 1, + has_tab: true, + name: "", + icon: null, + parent_page: null, + sub_pages: null, + initialize: function(parent_page, options) { + var self = this; + self.parent_page = parent_page; + self.setOptions(options); + self.el = new Element("div", { + class: "page " + self.getPageClass() + (" level_" + (options.level || 0)) + }).grab(self.content = new Element("div.scroll_content")); + if (self.options.disable_pointer_onscroll) { + App.addEvent("load", function() { + requestTimeout(function() { + if (!App.mobile_screen && !App.getOption("dev")) { + self.content.addEvent("scroll", self.preventHover.bind(self)); + } + }, 100); + }); + } + }, + load: function() { + var self = this; + if (self.has_tab) { + var nav; + if (self.parent_page && self.parent_page.navigation) { + nav = self.parent_page.navigation; + } else { + nav = App.getBlock("navigation"); + } + self.tab = nav.addTab(self.name, { + href: App.createUrl(self.getPageUrl()), + title: self.title, + html: "<span>" + self.name.capitalize() + "</span>", + class: self.icon ? "icon-" + self.icon : null + }); + } + if (self.sub_pages) { + self.loadSubPages(); + } + }, + loadSubPages: function() { + var self = this; + var sub_pages = self.sub_pages; + self.sub_pages = []; + sub_pages.each(function(class_name) { + var pg = new (window[self.name.capitalize() + class_name])(self, { + level: 2 + }); + self.sub_pages[class_name] = pg; + self.sub_pages.include({ + order: pg.order, + name: class_name, + class: pg + }); + }); + self.sub_pages.stableSort(self.sortPageByOrder).each(function(page) { + page["class"].load(); + self.fireEvent("load" + page.name); + $(page["class"]).inject(App.getPageContainer()); + }); + }, + sortPageByOrder: function(a, b) { + return (a.order || 100) - (b.order || 100); + }, + open: function(action, params) { + var self = this; + try { + var elements; + if (!self[action + "Action"]) { + elements = self.defaultAction(action, params); + } else { + elements = self[action + "Action"](params); + } + if (elements !== undefined) { + self.content.empty(); + self.content.adopt(elements); + } + App.getBlock("navigation").activate(self.name); + self.fireEvent("opened"); + } catch (e) { + self.errorAction(e); + self.fireEvent("error"); + } + }, + openUrl: function(url) { + if (History.getPath() != url) History.push(url); + }, + getPageUrl: function() { + var self = this; + return (self.parent_page && self.parent_page.getPageUrl ? self.parent_page.getPageUrl() + "/" : "") + self.name; + }, + getPageClass: function() { + var self = this; + return (self.parent_page && self.parent_page.getPageClass ? self.parent_page.getPageClass() + "_" : "") + self.name; + }, + errorAction: function(e) { + p("Error, action not found", e); + }, + getName: function() { + return this.name; + }, + show: function() { + this.el.addClass("active"); + }, + hide: function() { + var self = this; + self.el.removeClass("active"); + if (self.sub_pages) { + self.sub_pages.each(function(sub_page) { + sub_page["class"].hide(); + }); + } + }, + preventHover: function() { + var self = this; + if (self.hover_timer) clearRequestTimeout(self.hover_timer); + self.el.addClass("disable_hover"); + self.hover_timer = requestTimeout(function() { + self.el.removeClass("disable_hover"); + }, 200); + }, + toElement: function() { + return this.el; + } +}); + +var Page = {}; + +var BlockBase = new Class({ + Implements: [ Options ], + options: {}, + initialize: function(parent, options) { + var self = this; + self.setOptions(options); + self.page = parent; + self.create(); + }, + create: function() { + this.el = new Element("div.block"); + }, + getParent: function() { + return this.page; + }, + hide: function() { + this.el.hide(); + }, + show: function() { + this.el.show(); + }, + toElement: function() { + return this.el; + } +}); + +var BlockNavigation = new Class({ + Extends: BlockBase, + create: function() { + var self = this; + self.el = new Element("div.navigation").grab(self.nav = new Element("ul")); + }, + addTab: function(name, tab) { + var self = this; + return new Element("li.tab_" + (name || "unknown")).grab(new Element("a", tab)).inject(self.nav); + }, + activate: function(name) { + var self = this; + self.nav.getElements(".active").removeClass("active"); + self.nav.getElements(".tab_" + name).addClass("active"); + } +}); + +var BlockHeader = new Class({ + Extends: BlockNavigation, + create: function() { + var self = this, animation_options = { + type: dynamics.spring + }, couch, potato; + self.parent(); + self.el.adopt(self.logo = new Element("a.logo", { + href: App.createUrl(""), + events: { + mouseenter: function() { + dynamics.animate(couch, { + opacity: 0, + translateX: -50 + }, animation_options); + dynamics.animate(potato, { + opacity: 1, + translateX: 0 + }, animation_options); + }, + mouseleave: function() { + dynamics.animate(couch, { + opacity: 1, + translateX: 0 + }, animation_options); + dynamics.animate(potato, { + opacity: 0, + translateX: 50 + }, animation_options); + } + } + }).adopt(couch = new Element("span[text=Couch]"), potato = new Element("span[text=Potato]")), self.nav); + } +}); + +var BlockFooter = new Class({ + Extends: BlockBase, + create: function() { + var self = this; + self.el = new Element("div.footer"); + } +}); + +var BlockMenu = new Class({ + Extends: BlockBase, + Implements: [ Options, Events ], + options: { + class: "menu" + }, + lis: null, + create: function() { + var self = this; + self.lis = []; + self.shown = false; + self.el = new Element("div", { + class: "more_menu " + self.options["class"] + }).adopt(self.wrapper = new Element("div.wrapper").adopt(self.more_option_ul = new Element("ul")), self.button = new Element("a" + (self.options.button_class ? "." + self.options.button_class : ""), { + text: self.options.button_text || "", + events: { + click: function() { + if (!self.shown) { + dynamics.css(self.wrapper, { + opacity: 0, + scale: .1, + display: "block" + }); + dynamics.animate(self.wrapper, { + opacity: 1, + scale: 1 + }, { + type: dynamics.spring, + frequency: 200, + friction: 270, + duration: 800 + }); + if (self.lis === null) self.lis = self.more_option_ul.getElements("> li").slice(0, 10); + self.lis.each(function(li, nr) { + dynamics.css(li, { + opacity: 0, + translateY: 20 + }); + dynamics.animate(li, { + opacity: 1, + translateY: 0 + }, { + type: dynamics.spring, + frequency: 300, + friction: 435, + duration: 1e3, + delay: 100 + nr * 40 + }); + }); + self.shown = true; + } else { + self.hide(); + } + self.fireEvent(self.shown ? "open" : "close"); + if (self.shown) { + self.el.addEvent("outerClick", self.removeOuterClick.bind(self)); + this.addEvent("outerClick", function(e) { + if (e.target.get("tag") != "input") self.removeOuterClick(); + }); + } else { + self.removeOuterClick(); + } + } + } + })); + }, + hide: function() { + var self = this; + dynamics.animate(self.wrapper, { + opacity: 0, + scale: .1 + }, { + type: dynamics.easeInOut, + duration: 300, + friction: 100, + complete: function() { + dynamics.css(self.wrapper, { + display: "none" + }); + } + }); + self.shown = false; + }, + removeOuterClick: function() { + var self = this; + self.hide(); + self.el.removeClass("show"); + self.el.removeEvents("outerClick"); + self.button.removeEvents("outerClick"); + }, + addLink: function(tab, position) { + var self = this, li = new Element("li").adopt(tab).inject(self.more_option_ul, position || "bottom"); + self.lis = null; + return li; + } +}); + +Page.Home = new Class({ + Extends: PageBase, + name: "home", + title: "Manage new stuff for things and such", + icon: "home", + indexAction: function() { + var self = this; + if (self.soon_list) { + self.available_list.update(); + if (self.late_list) self.late_list.update(); + return; + } + self.chain = new Chain(); + self.chain.chain(self.createAvailable.bind(self), self.createBigsearch.bind(self), self.createSoon.bind(self), self.createSuggestions.bind(self), self.createCharts.bind(self), self.createLate.bind(self)); + self.chain.callChain(); + }, + createBigsearch: function() { + var self = this; + new Element(".big_search").grab(new BlockSearch(self, { + animate: false + })).inject(self.content); + self.chain.callChain(); + }, + createAvailable: function() { + var self = this; + self.available_list = new MovieList({ + navigation: false, + identifier: "snatched", + load_more: false, + view: "list", + actions: [ MA.MarkAsDone, MA.IMDB, MA.Release, MA.Trailer, MA.Refresh, MA.Readd, MA.Delete, MA.Category, MA.Profile ], + title: "Snatched & Available", + description: "These movies have been snatched or have finished downloading", + on_empty_element: new Element("div").adopt(new Element("h2", { + text: "Snatched & Available" + }), new Element("span.no_movies", { + html: 'No snatched movies or anything!? Damn.. <a href="#">Maybe add a movie.</a>', + events: { + click: function(e) { + e.preventDefault(); + $(document.body).getElement(".big_search input").focus(); + } + } + })), + filter: { + release_status: "snatched,missing,available,downloaded,done,seeding", + with_tags: "recent" + }, + limit: null, + onLoaded: function() { + self.chain.callChain(); + }, + onMovieAdded: function(notification) { + var after_search = function(data) { + if (notification.data._id != data.data._id) return; + self.available_list.update(); + App.off("movie.searcher.ended", after_search); + }; + App.on("movie.searcher.ended", after_search); + } + }); + $(self.available_list).inject(self.content); + }, + createSoon: function() { + var self = this; + self.soon_list = new MovieList({ + navigation: false, + identifier: "soon", + limit: 12, + title: "Available soon", + description: "Should be available soon as they will be released on DVD/Blu-ray in the coming weeks.", + filter: { + random: true + }, + actions: [ MA.IMDB, MA.Release, MA.Trailer, MA.Refresh, MA.Delete, MA.Category, MA.Profile ], + load_more: false, + view: "thumb", + force_view: true, + api_call: "dashboard.soon", + onLoaded: function() { + self.chain.callChain(); + } + }); + $(self.soon_list).inject(self.content); + }, + createSuggestions: function() { + var self = this; + self.suggestions_list = new MovieList({ + navigation: false, + identifier: "suggest", + limit: 12, + title: "Suggestions", + description: "Based on your current wanted and managed items", + actions: [ MA.Add, MA.SuggestIgnore, MA.SuggestSeen, MA.IMDB, MA.Trailer ], + load_more: false, + view: "thumb", + force_view: true, + api_call: "suggestion.view", + onLoaded: function() { + self.chain.callChain(); + } + }); + $(self.suggestions_list).inject(self.content); + }, + createCharts: function() { + var self = this; + self.charts_list = new Charts({ + onCreated: function() { + self.chain.callChain(); + } + }); + $(self.charts_list).inject(self.content); + }, + createLate: function() { + var self = this; + self.late_list = new MovieList({ + navigation: false, + identifier: "late", + limit: 50, + title: "Still not available", + description: 'Try another quality profile or maybe add more providers in <a href="' + App.createUrl("settings/searcher/providers/") + '">Settings</a>.', + filter: { + late: true + }, + loader: false, + load_more: false, + view: "list", + actions: [ MA.IMDB, MA.Trailer, MA.Refresh, MA.Delete, MA.Category, MA.Profile ], + api_call: "dashboard.soon", + onLoaded: function() { + self.chain.callChain(); + } + }); + $(self.late_list).inject(self.content); + } +}); + +Page.Settings = new Class({ + Extends: PageBase, + order: 50, + name: "settings", + title: "Change settings.", + wizard_only: false, + tabs: {}, + lists: {}, + current: "about", + has_tab: false, + open: function(action, params) { + var self = this; + self.action = action == "index" ? self.default_action : action; + self.params = params; + if (!self.data) self.getData(self.create.bind(self)); else { + self.openTab(action); + } + App.getBlock("navigation").activate(self.name); + }, + openTab: function(action) { + var self = this; + action = (action == "index" ? "about" : action) || self.action; + if (self.current) self.toggleTab(self.current, true); + var tab = self.toggleTab(action); + self.current = tab == self.tabs.general ? "general" : action; + }, + toggleTab: function(tab_name, hide) { + var self = this; + var a = hide ? "removeClass" : "addClass"; + var c = "active"; + tab_name = tab_name.split("/")[0]; + var t = self.tabs[tab_name] || self.tabs[self.action] || self.tabs.general; + var subtab = null; + Object.each(self.params, function(param, subtab_name) { + subtab = param; + }); + self.content.getElements("li." + c + " , .tab_content." + c).each(function(active) { + active.removeClass(c); + }); + if (t.subtabs[subtab]) { + t.tab[a](c); + t.subtabs[subtab].tab[a](c); + t.subtabs[subtab].content[a](c); + if (!hide) t.subtabs[subtab].content.fireEvent("activate"); + } else { + t.tab[a](c); + t.content[a](c); + if (!hide) t.content.fireEvent("activate"); + } + return t; + }, + getData: function(onComplete) { + var self = this; + if (onComplete) Api.request("settings", { + useSpinner: true, + spinnerOptions: { + target: self.content + }, + onComplete: function(json) { + self.data = json; + onComplete(json); + } + }); + return self.data; + }, + getValue: function(section, name) { + var self = this; + try { + return self.data.values[section][name]; + } catch (e) { + return ""; + } + }, + showAdvanced: function() { + var self = this; + var c = self.advanced_toggle.checked ? "addClass" : "removeClass"; + self.el[c]("show_advanced"); + Cookie.write("advanced_toggle_checked", +self.advanced_toggle.checked, { + duration: 365 + }); + }, + sortByOrder: function(a, b) { + return (a.order || 100) - (b.order || 100); + }, + create: function(json) { + var self = this; + self.navigation = new Element("div.navigation").adopt(new Element("h2[text=Settings]"), new Element("div.advanced_toggle").adopt(new Element("span", { + text: "Show advanced" + }), new Element("label.switch").adopt(self.advanced_toggle = new Element("input[type=checkbox]", { + checked: +Cookie.read("advanced_toggle_checked"), + events: { + change: self.showAdvanced.bind(self) + } + }), new Element("div.toggle")))); + self.tabs_container = new Element("ul.tabs"); + self.containers = new Element("form.uniForm.containers", { + events: { + "click:relay(.enabler.disabled h2)": function(e, el) { + el.getPrevious().getElements(".check").fireEvent("click"); + } + } + }); + self.showAdvanced(); + var options = []; + Object.each(json.options, function(section, section_name) { + section.section_name = section_name; + options.include(section); + }); + options.stableSort(self.sortByOrder).each(function(section) { + var section_name = section.section_name; + section.groups.stableSort(self.sortByOrder).each(function(group) { + if (group.hidden) return; + if (self.wizard_only && !group.wizard) return; + if (!self.tabs[group.tab] || !self.tabs[group.tab].groups) self.createTab(group.tab, {}); + var content_container = self.tabs[group.tab].content; + if (group.subtab) { + if (!self.tabs[group.tab].subtabs[group.subtab]) self.createSubTab(group.subtab, group, self.tabs[group.tab], group.tab); + content_container = self.tabs[group.tab].subtabs[group.subtab].content; + } + if (group.list && !self.lists[group.list]) { + self.lists[group.list] = self.createList(content_container); + } + if (!self.tabs[group.tab].groups[group.name]) self.tabs[group.tab].groups[group.name] = self.createGroup(group).inject(group.list ? self.lists[group.list] : content_container).addClass("section_" + section_name); + if (group.type && group.type == "list") { + if (!self.lists[group.name]) self.lists[group.name] = self.createList(content_container); else self.lists[group.name].inject(self.tabs[group.tab].groups[group.name]); + } + group.options.stableSort(self.sortByOrder).each(function(option) { + if (option.hidden) return; + var class_name = (option.type || "string").capitalize(); + var input = new Option[class_name](section_name, option.name, self.getValue(section_name, option.name), option); + input.inject(self.tabs[group.tab].groups[group.name]); + input.fireEvent("injected"); + }); + }); + }); + requestTimeout(function() { + self.el.grab(self.navigation); + self.content.adopt(self.tabs_container, self.containers); + self.fireEvent("create"); + self.openTab(); + }, 0); + }, + createTab: function(tab_name, tab) { + var self = this; + if (self.tabs[tab_name] && self.tabs[tab_name].tab) return self.tabs[tab_name].tab; + var label = tab.label || (tab.name || tab_name).capitalize(); + var tab_el = new Element("li.t_" + tab_name).adopt(new Element("a", { + href: App.createUrl(self.name + "/" + tab_name), + text: label + }).adopt()).inject(self.tabs_container); + if (!self.tabs[tab_name]) self.tabs[tab_name] = { + label: label + }; + self.tabs[tab_name] = Object.merge(self.tabs[tab_name], { + tab: tab_el, + subtabs: {}, + content: new Element("div.tab_content.tab_" + tab_name).inject(self.containers), + groups: {} + }); + return self.tabs[tab_name]; + }, + createSubTab: function(tab_name, tab, parent_tab, parent_tab_name) { + var self = this; + if (parent_tab.subtabs[tab_name]) return parent_tab.subtabs[tab_name]; + if (!parent_tab.subtabs_el) parent_tab.subtabs_el = new Element("ul.subtabs").inject(parent_tab.tab); + var label = tab.subtab_label || tab_name.replace("_", " ").capitalize(); + var tab_el = new Element("li.t_" + tab_name).adopt(new Element("a", { + href: App.createUrl(self.name + "/" + parent_tab_name + "/" + tab_name), + text: label + }).adopt()).inject(parent_tab.subtabs_el); + if (!parent_tab.subtabs[tab_name]) parent_tab.subtabs[tab_name] = { + label: label + }; + parent_tab.subtabs[tab_name] = Object.merge(parent_tab.subtabs[tab_name], { + tab: tab_el, + content: new Element("div.tab_content.tab_" + tab_name).inject(self.containers), + groups: {} + }); + return parent_tab.subtabs[tab_name]; + }, + createGroup: function(group) { + var hint; + if (typeOf(group.description) == "array") { + hint = new Element("span.hint.more_hint", { + html: group.description[0] + }); + createTooltip(group.description[1]).inject(hint); + } else { + hint = new Element("span.hint", { + html: group.description || "" + }); + } + var icon; + if (group.icon) { + icon = new Element("span.icon").grab(new Element("img", { + src: "data:image/png;base64," + group.icon + })); + } + var label = new Element("span.group_label", { + text: group.label || group.name.capitalize() + }); + return new Element("fieldset", { + class: (group.advanced ? "inlineLabels advanced" : "inlineLabels") + " group_" + (group.name || "") + " subtab_" + (group.subtab || "") + }).grab(new Element("h2").adopt(icon, label, hint)); + }, + createList: function(content_container) { + return new Element("div.option_list").inject(content_container); + } +}); + +var OptionBase = new Class({ + Implements: [ Options, Events ], + klass: "", + focused_class: "focused", + save_on_change: true, + read_only: false, + initialize: function(section, name, value, options) { + var self = this; + self.setOptions(options); + self.section = section; + self.name = name; + self.value = self.previous_value = value; + self.read_only = !(options && !options.readonly); + self.createBase(); + self.create(); + self.createHint(); + self.setAdvanced(); + self.input.addEvents({ + change: self.changed.bind(self), + keyup: self.changed.bind(self) + }); + self.addEvent("injected", self.afterInject.bind(self)); + }, + createBase: function() { + var self = this; + self.el = new Element("div.ctrlHolder." + self.section + "_" + self.name + (self.klass ? "." + self.klass : "") + (self.read_only ? ".read_only" : "")); + }, + create: function() {}, + createLabel: function() { + var self = this; + return new Element("label", { + text: (self.options.label || self.options.name.replace("_", " ")).capitalize() + }); + }, + setAdvanced: function() { + this.el.addClass(this.options.advanced ? "advanced" : ""); + }, + createHint: function() { + var self = this; + if (self.options.description) { + if (typeOf(self.options.description) == "array") { + var hint = new Element("p.formHint.more_hint", { + html: self.options.description[0] + }).inject(self.el); + createTooltip(self.options.description[1]).inject(hint); + } else { + new Element("p.formHint", { + html: self.options.description || "" + }).inject(self.el); + } + } + }, + afterInject: function() {}, + changed: function() { + var self = this; + if (self.getValue() != self.previous_value) { + if (self.save_on_change) { + if (self.changed_timer) clearRequestTimeout(self.changed_timer); + self.changed_timer = requestTimeout(self.save.bind(self), 300); + } + self.fireEvent("change"); + } + }, + save: function() { + var self = this, value = self.getValue(), ro = self.read_only; + if (ro) { + console.warn("Unable to save readonly-option " + self.section + "." + self.name); + return; + } + App.fireEvent("setting.save." + self.section + "." + self.name, value); + Api.request("settings.save", { + data: { + section: self.section, + name: self.name, + value: value + }, + useSpinner: true, + spinnerOptions: { + target: self.el + }, + onComplete: self.saveCompleted.bind(self) + }); + }, + saveCompleted: function(json) { + var self = this; + var sc = json.success ? "save_success" : "save_failed"; + self.previous_value = self.getValue(); + self.el.addClass(sc); + requestTimeout(function() { + self.el.removeClass(sc); + }, 3e3); + }, + setName: function(name) { + this.name = name; + }, + postName: function() { + var self = this; + return self.section + "[" + self.name + "]"; + }, + getValue: function() { + var self = this; + return self.input.get("value"); + }, + getSettingValue: function() { + return this.value; + }, + inject: function(el, position) { + this.el.inject(el, position); + return this.el; + }, + toElement: function() { + return this.el; + } +}); + +var Option = {}; + +Option.String = new Class({ + Extends: OptionBase, + type: "string", + create: function() { + var self = this; + if (self.read_only) { + self.input = new Element("span", { + text: self.getSettingValue() + }); + } else { + self.input = new Element("input", { + type: "text", + name: self.postName(), + value: self.getSettingValue(), + placeholder: self.getPlaceholder() + }); + } + self.el.adopt(self.createLabel(), self.input); + }, + getPlaceholder: function() { + return this.options.placeholder; + } +}); + +Option.Dropdown = new Class({ + Extends: OptionBase, + create: function() { + var self = this; + self.el.adopt(self.createLabel(), new Element("div.select_wrapper.icon-dropdown").grab(self.input = new Element("select", { + name: self.postName(), + readonly: self.read_only, + disabled: self.read_only + }))); + Object.each(self.options.values, function(value) { + new Element("option", { + text: value[0], + value: value[1] + }).inject(self.input); + }); + self.input.set("value", self.getSettingValue()); + } +}); + +Option.Checkbox = new Class({ + Extends: OptionBase, + type: "checkbox", + create: function() { + var self = this; + var randomId = "r-" + randomString(); + self.el.adopt(self.createLabel().set("for", randomId), self.input = new Element("input", { + name: self.postName(), + type: "checkbox", + checked: self.getSettingValue(), + id: randomId, + readonly: self.read_only, + disabled: self.read_only + })); + }, + getValue: function() { + var self = this; + return +self.input.checked; + } +}); + +Option.Password = new Class({ + Extends: Option.String, + type: "password", + create: function() { + var self = this; + self.el.adopt(self.createLabel(), self.input = new Element("input", { + type: "text", + name: self.postName(), + value: self.getSettingValue() ? "********" : "", + placeholder: self.getPlaceholder(), + readonly: self.read_only, + disabled: self.read_only + })); + self.input.addEvent("focus", function() { + self.input.set("value", ""); + self.input.set("type", "password"); + }); + } +}); + +Option.Bool = new Class({ + Extends: Option.Checkbox +}); + +Option.Enabler = new Class({ + Extends: Option.Bool, + create: function() { + var self = this; + self.el.adopt(new Element("label.switch").adopt(self.input = new Element("input", { + type: "checkbox", + checked: self.getSettingValue(), + id: "r-" + randomString(), + readonly: self.read_only, + disabled: self.read_only + }), new Element("div.toggle"))); + }, + changed: function() { + this.parent(); + this.checkState(); + }, + checkState: function() { + var self = this, enabled = self.getValue(); + self.parentFieldset[enabled ? "removeClass" : "addClass"]("disabled"); + }, + afterInject: function() { + var self = this; + self.parentFieldset = self.el.getParent("fieldset").addClass("enabler"); + self.parentList = self.parentFieldset.getParent(".option_list"); + self.el.inject(self.parentFieldset, "top"); + self.checkState(); + } +}); + +Option.Int = new Class({ + Extends: Option.String +}); + +Option.Float = new Class({ + Extends: Option.Int +}); + +Option.Directory = new Class({ + Extends: OptionBase, + type: "span", + browser: null, + save_on_change: false, + use_cache: false, + current_dir: "", + create: function() { + var self = this; + if (self.read_only) { + self.el.adopt(self.createLabel(), self.input = new Element("input", { + type: "text", + name: self.postName(), + value: self.getSettingValue(), + readonly: true, + disabled: true + })); + } else { + self.el.adopt(self.createLabel(), self.directory_inlay = new Element("span.directory", { + events: { + click: self.showBrowser.bind(self) + } + }).adopt(self.input = new Element("input", { + value: self.getSettingValue(), + readonly: self.read_only, + disabled: self.read_only, + events: { + change: self.filterDirectory.bind(self), + keydown: function(e) { + if (e.key == "enter" || e.key == "tab") e.stop(); + }, + keyup: self.filterDirectory.bind(self), + paste: self.filterDirectory.bind(self) + } + }))); + } + self.cached = {}; + }, + filterDirectory: function(e) { + var self = this, value = self.getValue(), path_sep = Api.getOption("path_sep"), active_selector = "li:not(.blur):not(.empty)", first; + if (e.key == "enter" || e.key == "tab") { + e.stop(); + first = self.dir_list.getElement(active_selector); + if (first) { + self.selectDirectory(first.get("data-value")); + } + } else { + if (value.substr(-1) == path_sep) { + if (self.current_dir != value) self.selectDirectory(value); + } else { + var pd = self.getParentDir(value); + if (self.current_dir != pd) self.getDirs(pd); + var folder_filter = value.split(path_sep).getLast(); + self.dir_list.getElements("li").each(function(li) { + var valid = li.get("text").substr(0, folder_filter.length).toLowerCase() != folder_filter.toLowerCase(); + li[valid ? "addClass" : "removeClass"]("blur"); + }); + first = self.dir_list.getElement(active_selector); + if (first) { + if (!self.dir_list_scroll) self.dir_list_scroll = new Fx.Scroll(self.dir_list, { + transition: "quint:in:out" + }); + self.dir_list_scroll.toElement(first); + } + } + } + }, + selectDirectory: function(dir) { + var self = this; + self.input.set("value", dir); + self.getDirs(); + }, + previousDirectory: function() { + var self = this; + self.selectDirectory(self.getParentDir()); + }, + caretAtEnd: function() { + var self = this; + self.input.focus(); + if (typeof self.input.selectionStart == "number") { + self.input.selectionStart = self.input.selectionEnd = self.input.get("value").length; + } else if (typeof el.createTextRange != "undefined") { + self.input.focus(); + var range = self.input.createTextRange(); + range.collapse(false); + range.select(); + } + }, + showBrowser: function() { + var self = this; + if (!self.browser || self.browser && !self.browser.isVisible()) self.caretAtEnd(); + if (!self.browser) { + self.browser = new Element("div.directory_list").adopt(self.pointer = new Element("div.pointer"), new Element("div.wrapper").adopt(new Element("div.actions").adopt(self.back_button = new Element("a.back", { + html: "", + events: { + click: self.previousDirectory.bind(self) + } + }), new Element("label", { + text: "Hidden folders" + }).adopt(self.show_hidden = new Element("input[type=checkbox]", { + events: { + change: function() { + self.getDirs(); + } + } + }))), self.dir_list = new Element("ul", { + events: { + "click:relay(li:not(.empty))": function(e, el) { + e.preventDefault(); + self.selectDirectory(el.get("data-value")); + }, + mousewheel: function(e) { + e.stopPropagation(); + } + } + }), new Element("div.actions").adopt(new Element("a.clear.button", { + text: "Clear", + events: { + click: function(e) { + self.input.set("value", ""); + self.hideBrowser(e, true); + } + } + }), new Element("a.cancel", { + text: "Cancel", + events: { + click: self.hideBrowser.bind(self) + } + }), new Element("span", { + text: "or" + }), self.save_button = new Element("a.button.save", { + text: "Save", + events: { + click: function(e) { + self.hideBrowser(e, true); + } + } + })))).inject(self.directory_inlay, "before"); + } + self.initial_directory = self.input.get("value"); + self.getDirs(); + self.browser.show(); + self.el.addEvent("outerClick", self.hideBrowser.bind(self)); + }, + hideBrowser: function(e, save) { + var self = this; + e.preventDefault(); + if (save) self.save(); else self.input.set("value", self.initial_directory); + self.browser.hide(); + self.el.removeEvents("outerClick"); + }, + fillBrowser: function(json) { + var self = this, v = self.getValue(); + self.data = json; + var previous_dir = json.parent; + if (v === "") self.input.set("value", json.home); + if (previous_dir.length >= 1 && !json.is_root) { + var prev_dirname = self.getCurrentDirname(previous_dir); + if (previous_dir == json.home) prev_dirname = "Home Folder"; else if (previous_dir == "/" && json.platform == "nt") prev_dirname = "Computer"; + self.back_button.set("data-value", previous_dir); + self.back_button.set("html", "« " + prev_dirname); + self.back_button.show(); + } else { + self.back_button.hide(); + } + if (self.use_cache) if (!json) json = self.cached[v]; else self.cached[v] = json; + self.dir_list.empty(); + if (json.dirs.length > 0) json.dirs.each(function(dir) { + new Element("li", { + "data-value": dir, + text: self.getCurrentDirname(dir) + }).inject(self.dir_list); + }); else new Element("li.empty", { + text: "Selected folder is empty" + }).inject(self.dir_list); + self.dir_list.setStyle("webkitTransform", "scale(1)"); + self.caretAtEnd(); + }, + getDirs: function(dir) { + var self = this, c = dir || self.getValue(); + if (self.cached[c] && self.use_cache) { + self.fillBrowser(); + } else { + Api.request("directory.list", { + data: { + path: c, + show_hidden: +self.show_hidden.checked + }, + onComplete: function(json) { + self.current_dir = c; + self.fillBrowser(json); + } + }); + } + }, + getParentDir: function(dir) { + var self = this; + if (!dir && self.data && self.data.parent) return self.data.parent; + var v = dir || self.getValue(); + var sep = Api.getOption("path_sep"); + var dirs = v.split(sep); + if (dirs.pop() === "") dirs.pop(); + return dirs.join(sep) + sep; + }, + getCurrentDirname: function(dir) { + var dir_split = dir.split(Api.getOption("path_sep")); + return dir_split[dir_split.length - 2] || Api.getOption("path_sep"); + }, + getValue: function() { + var self = this; + return self.input.get("value"); + } +}); + +Option.Directories = new Class({ + Extends: Option.String, + directories: [], + afterInject: function() { + var self = this; + self.el.setStyle("display", "none"); + self.directories = []; + self.getSettingValue().each(function(value) { + self.addDirectory(value); + }); + self.addDirectory(); + }, + addDirectory: function(value) { + var self = this; + var has_empty = false; + self.directories.each(function(dir) { + if (!dir.getValue()) has_empty = true; + }); + if (has_empty) return; + var dir = new Option.Directory(self.section, self.name, value || "", self.options); + var parent = self.el.getParent("fieldset"); + var dirs = parent.getElements(".multi_directory"); + if (dirs.length === 0) $(dir).inject(parent); else $(dir).inject(dirs.getLast(), "after"); + dir.save = self.saveItems.bind(self); + $(dir).getElement("label").set("text", "Movie Folder"); + $(dir).getElement(".formHint").destroy(); + $(dir).addClass("multi_directory"); + if (!value) $(dir).addClass("is_empty"); + new Element("a.icon-delete.delete", { + events: { + click: self.delItem.bind(self, dir) + } + }).inject(dir); + self.directories.include(dir); + }, + delItem: function(dir) { + var self = this; + self.directories.erase(dir); + $(dir).destroy(); + self.saveItems(); + self.addDirectory(); + }, + saveItems: function() { + var self = this; + var dirs = []; + self.directories.each(function(dir) { + if (dir.getValue()) { + $(dir).removeClass("is_empty"); + dirs.include(dir.getValue()); + } else $(dir).addClass("is_empty"); + }); + self.input.set("value", JSON.encode(dirs)); + self.input.fireEvent("change"); + self.addDirectory(); + } +}); + +Option.Choice = new Class({ + Extends: Option.String, + klass: "choice", + afterInject: function() { + var self = this; + var wrapper = new Element("div.select_wrapper.icon-dropdown").grab(self.select = new Element("select.select", { + events: { + change: self.addSelection.bind(self) + } + }).grab(new Element("option[text=Add option]"))); + var o = self.options.options; + Object.each(o.choices, function(label, choice) { + new Element("option", { + text: label, + value: o.pre + choice + o.post + }).inject(self.select); + }); + wrapper.inject(self.input, "after"); + }, + addSelection: function() { + var self = this; + self.input.set("value", self.input.get("value") + self.select.get("value")); + self.input.fireEvent("change"); + } +}); + +Option.Combined = new Class({ + Extends: Option.String, + afterInject: function() { + var self = this; + self.fieldset = self.input.getParent("fieldset"); + self.combined_list = new Element("div.combined_table").inject(self.fieldset.getElement("h2"), "after"); + self.values = {}; + self.inputs = {}; + self.items = []; + self.labels = {}; + self.descriptions = {}; + self.options.combine.each(function(name) { + self.inputs[name] = self.fieldset.getElement("input[name=" + self.section + "[" + name + "]]"); + var values = self.inputs[name].get("value").split(","); + values.each(function(value, nr) { + if (!self.values[nr]) self.values[nr] = {}; + self.values[nr][name] = value.trim(); + }); + self.inputs[name].getParent(".ctrlHolder").setStyle("display", "none"); + self.inputs[name].addEvent("change", self.addEmpty.bind(self)); + }); + var head = new Element("div.head").inject(self.combined_list); + Object.each(self.inputs, function(input, name) { + var _in = input.getNext(); + self.labels[name] = input.getPrevious().get("text"); + self.descriptions[name] = _in ? _in.get("text") : ""; + new Element("abbr", { + class: name, + text: self.labels[name], + title: self.descriptions[name] + }).inject(head); + }); + Object.each(self.values, function(item) { + self.createItem(item); + }); + self.addEmpty(); + }, + add_empty_timeout: 0, + addEmpty: function() { + var self = this; + if (self.add_empty_timeout) clearRequestTimeout(self.add_empty_timeout); + var has_empty = 0; + self.items.each(function(ctrl_holder) { + var empty_count = 0; + self.options.combine.each(function(name) { + var input = ctrl_holder.getElement("input." + name); + if (input.get("value") === "" || input.get("type") == "checkbox") empty_count++; + }); + has_empty += empty_count == self.options.combine.length ? 1 : 0; + ctrl_holder[empty_count == self.options.combine.length ? "addClass" : "removeClass"]("is_empty"); + }); + if (has_empty > 0) return; + self.add_empty_timeout = requestTimeout(function() { + self.createItem({ + use: true + }); + }, 10); + }, + createItem: function(values) { + var self = this; + var item = new Element("div.ctrlHolder").inject(self.combined_list), value_count = 0, value_empty = 0; + self.options.combine.each(function(name) { + var value = values[name] || ""; + if (name.indexOf("use") != -1) { + var checkbox = new Element("input[type=checkbox]." + name, { + checked: +value, + events: { + click: self.saveCombined.bind(self), + change: self.saveCombined.bind(self) + } + }).inject(item); + } else { + value_count++; + new Element("input[type=text]." + name, { + value: value, + placeholder: self.labels[name] || name, + events: { + keyup: self.saveCombined.bind(self), + change: self.saveCombined.bind(self) + } + }).inject(item); + if (!value) value_empty++; + } + }); + item[value_empty == value_count ? "addClass" : "removeClass"]("is_empty"); + new Element("a.icon-cancel.delete", { + events: { + click: self.deleteCombinedItem.bind(self) + } + }).inject(item); + self.items.include(item); + }, + saveCombined: function() { + var self = this, temp = {}; + self.items.each(function(item, nr) { + self.options.combine.each(function(name) { + var input = item.getElement("input." + name); + if (item.hasClass("is_empty")) return; + if (!temp[name]) temp[name] = []; + temp[name][nr] = input.get("type") == "checkbox" ? +input.get("checked") : input.get("value").trim(); + }); + }); + self.options.combine.each(function(name) { + self.inputs[name].set("value", (temp[name] || []).join(",")); + self.inputs[name].fireEvent("change"); + }); + self.addEmpty(); + }, + deleteCombinedItem: function(e) { + var self = this; + e.preventDefault(); + var item = e.target.getParent(); + self.items.erase(item); + item.destroy(); + self.saveCombined(); + } +}); + +var createTooltip = function(description) { + var tip = new Element("div.tooltip", { + events: { + mouseenter: function() { + tip.addClass("shown"); + }, + mouseleave: function() { + tip.removeClass("shown"); + } + } + }).adopt(new Element("a.icon-info.info"), new Element("div.tip", { + html: description + })); + return tip; +}; + +var AboutSettingTab = new Class({ + tab: "", + content: "", + initialize: function() { + var self = this; + App.addEvent("loadSettings", self.addSettings.bind(self)); + }, + addSettings: function() { + var self = this; + self.settings = App.getPage("Settings"); + self.settings.addEvent("create", function() { + var tab = self.settings.createTab("about", { + label: "About", + name: "about" + }); + self.tab = tab.tab; + self.content = tab.content; + self.createAbout(); + }); + self.settings.default_action = "about"; + self.hide_about_dirs = !!App.options && App.options.webui_feature && App.options.webui_feature.hide_about_dirs; + self.hide_about_update = !!App.options && App.options.webui_feature && App.options.webui_feature.hide_about_update; + }, + createAbout: function() { + var self = this; + var millennium = new Date(2008, 7, 16), today = new Date(), one_day = 1e3 * 60 * 60 * 24; + var about_block; + self.settings.createGroup({ + label: "About This CouchPotato", + name: "variables" + }).inject(self.content).adopt((about_block = new Element("dl.info")).adopt(new Element("dt[text=Version]"), self.version_text = new Element("dd.version", { + text: "Getting version..." + }), new Element("dt[text=Updater]"), self.updater_type = new Element("dd.updater"), new Element("dt[text=ID]"), new Element("dd", { + text: App.getOption("pid") + }))); + if (!self.hide_about_update) { + self.version_text.addEvents({ + click: App.checkForUpdate.bind(App, function(json) { + self.fillVersion(json.info); + }), + mouseenter: function() { + this.set("text", "Check for updates"); + }, + mouseleave: function() { + self.fillVersion(Updater.getInfo()); + } + }); + } else { + self.version_text.setProperty("style", "cursor: auto"); + } + if (!self.hide_about_dirs) { + about_block.adopt(new Element("dt[text=Directories]"), new Element("dd", { + text: App.getOption("app_dir") + }), new Element("dd", { + text: App.getOption("data_dir") + }), new Element("dt[text=Startup Args]"), new Element("dd", { + html: App.getOption("args") + }), new Element("dd", { + html: App.getOption("options") + })); + } + if (!self.fillVersion(Updater.getInfo())) Updater.addEvent("loaded", self.fillVersion.bind(self)); + self.settings.createGroup({ + name: "Help Support CouchPotato" + }).inject(self.content).adopt(new Element("div.usenet").adopt(new Element("div.text").adopt(new Element("span", { + text: "Help support CouchPotato and save some money for yourself by signing up for an account at" + }), new Element("a", { + href: "https://usenetserver.com/partners/?a_aid=couchpotato&a_bid=3f357c6f", + target: "_blank", + text: "UsenetServer" + }), new Element("span[text=or]"), new Element("a", { + href: "https://www.newshosting.com/partners/?a_aid=couchpotato&a_bid=a0b022df", + target: "_blank", + text: "Newshosting" + }), new Element("span", { + text: ". For as low as $7.95 per month, youБ─≥ll get:" + })), new Element("ul").adopt(new Element("li.icon-ok", { + text: Math.ceil((today.getTime() - millennium.getTime()) / one_day) + " days retention" + }), new Element("li.icon-ok[text=No speed or download limits]"), new Element("li.icon-ok[text=Free SSL Encrypted connections]"))), new Element("div.donate", { + html: 'Or support me via: <iframe src="https://couchpota.to/donate.html" scrolling="no"></iframe>' + })); + }, + fillVersion: function(json) { + if (!json) return; + var self = this; + var date = new Date(json.version.date * 1e3); + self.version_text.set("text", json.version.hash + (json.version.date ? " (" + date.toLocaleString() + ")" : "")); + self.updater_type.set("text", json.version.type != json.branch ? json.version.type + ", " + json.branch : json.branch); + } +}); + +window.addEvent("domready", function() { + new AboutSettingTab(); +}); + +window.addEvent("domready", function() { + var b = $(document.body), login_page = b.hasClass("login"); + if (login_page) { + var form = b.getElement("form"), els = b.getElements("h1, .username, .password, .remember_me, .button"); + els.each(function(el, nr) { + dynamics.css(el, { + opacity: 0, + translateY: 50 + }); + dynamics.animate(el, { + opacity: 1, + translateY: 0 + }, { + type: dynamics.spring, + frequency: 200, + friction: 300, + duration: 800, + anticipationSize: 175, + anticipationStrength: 400, + delay: nr * 100 + }); + }); + } +}); \ No newline at end of file diff --git a/couchpotato/static/scripts/combined.plugins.min.js b/couchpotato/static/scripts/combined.plugins.min.js new file mode 100644 index 0000000000..db05942061 --- /dev/null +++ b/couchpotato/static/scripts/combined.plugins.min.js @@ -0,0 +1,3823 @@ +var DownloadersBase = new Class({ + Implements: [ Events ], + initialize: function() { + var self = this; + App.addEvent("loadSettings", self.addTestButtons.bind(self)); + }, + addTestButtons: function() { + var self = this; + var setting_page = App.getPage("Settings"); + setting_page.addEvent("create", function() { + Object.each(setting_page.tabs.downloaders.groups, self.addTestButton.bind(self)); + }); + }, + addTestButton: function(fieldset, plugin_name) { + var self = this, button_name = self.testButtonName(fieldset); + if (button_name.contains("Downloaders")) return; + new Element(".ctrlHolder.test_button").grab(new Element("a.button", { + text: button_name, + events: { + click: function() { + var button = fieldset.getElement(".test_button .button"); + button.set("text", "Connecting..."); + Api.request("download." + plugin_name + ".test", { + onComplete: function(json) { + button.set("text", button_name); + var message; + if (json.success) { + message = new Element("span.success", { + text: "Connection successful" + }).inject(button, "after"); + } else { + var msg_text = "Connection failed. Check logs for details."; + if (json.hasOwnProperty("msg")) msg_text = json.msg; + message = new Element("span.failed", { + text: msg_text + }).inject(button, "after"); + } + requestTimeout(function() { + message.destroy(); + }, 3e3); + } + }); + } + } + })).inject(fieldset); + }, + testButtonName: function(fieldset) { + var name = fieldset.getElement("h2 .group_label").get("text"); + return "Test " + name; + } +}); + +var Downloaders = new DownloadersBase(); + +var UpdaterBase = new Class({ + Implements: [ Events ], + initialize: function() { + var self = this; + App.addEvent("load", self.info.bind(self, 2e3)); + App.addEvent("unload", function() { + if (self.timer) clearRequestTimeout(self.timer); + }); + }, + check: function(onComplete) { + var self = this; + Api.request("updater.check", { + onComplete: function(json) { + if (onComplete) onComplete(json); + if (json.update_available) self.doUpdate(); else { + App.unBlockPage(); + App.trigger("message", [ "No updates available" ]); + } + } + }); + }, + info: function(timeout) { + var self = this; + if (self.timer) clearRequestTimeout(self.timer); + self.timer = requestTimeout(function() { + Api.request("updater.info", { + onComplete: function(json) { + self.json = json; + self.fireEvent("loaded", [ json ]); + if (json.update_version) { + self.createMessage(json); + } else { + if (self.message) self.message.destroy(); + } + } + }); + }, timeout || 0); + }, + getInfo: function() { + return this.json; + }, + createMessage: function(data) { + var self = this; + if (self.message) return; + var changelog = "https://github.com/" + data.repo_name + "/compare/" + data.version.hash + "..." + data.branch; + if (data.update_version.changelog) changelog = data.update_version.changelog + "#" + data.version.hash + "..." + data.update_version.hash; + self.message = new Element("div.message.update").adopt(new Element("span", { + text: "A new version is available" + }), new Element("a", { + href: changelog, + text: "see what has changed", + target: "_blank" + }), new Element("span[text=or]"), new Element("a", { + text: "just update, gogogo!", + events: { + click: self.doUpdate.bind(self) + } + })).inject(App.getBlock("footer")); + }, + doUpdate: function() { + var self = this; + App.blockPage("Please wait while CouchPotato is being updated with more awesome stuff.", "Updating"); + Api.request("updater.update", { + onComplete: function(json) { + if (json.success) self.updating(); else App.unBlockPage(); + } + }); + }, + updating: function() { + requestTimeout(function() { + App.checkAvailable(1e3, function() { + window.location.reload(); + }); + }, 500); + if (self.message) self.message.destroy(); + } +}); + +var Updater = new UpdaterBase(); + +var PutIODownloader = new Class({ + initialize: function() { + var self = this; + App.addEvent("loadSettings", self.addRegisterButton.bind(self)); + }, + addRegisterButton: function() { + var self = this; + var setting_page = App.getPage("Settings"); + setting_page.addEvent("create", function() { + var fieldset = setting_page.tabs.downloaders.groups.putio, l = window.location; + var putio_set = 0; + fieldset.getElements("input[type=text]").each(function(el) { + putio_set += +(el.get("value") !== ""); + }); + new Element(".ctrlHolder").adopt(putio_set > 0 ? [ self.unregister = new Element("a.button.red", { + text: 'Unregister "' + fieldset.getElement("input[name*=oauth_token]").get("value") + '"', + events: { + click: function() { + fieldset.getElements("input[name*=oauth_token]").set("value", "").fireEvent("change"); + self.unregister.destroy(); + self.unregister_or.destroy(); + } + } + }), self.unregister_or = new Element("span[text=or]") ] : null, new Element("a.button", { + text: putio_set > 0 ? "Register a different account" : "Register your put.io account", + events: { + click: function() { + Api.request("downloader.putio.auth_url", { + data: { + host: l.protocol + "//" + l.hostname + (l.port ? ":" + l.port : "") + }, + onComplete: function(json) { + window.location = json.url; + } + }); + } + } + })).inject(fieldset.getElement(".test_button"), "before"); + }); + } +}); + +window.addEvent("domready", function() { + new PutIODownloader(); +}); + +var BlockSearch = new Class({ + Extends: BlockBase, + options: { + animate: true + }, + cache: {}, + create: function() { + var self = this; + var focus_timer = 0; + self.el = new Element("div.search_form").adopt(new Element("a.icon-search", { + events: { + click: self.clear.bind(self) + } + }), self.wrapper = new Element("div.wrapper").adopt(self.result_container = new Element("div.results_container", { + events: { + mousewheel: function(e) { + e.stopPropagation(); + } + } + }).grab(self.results = new Element("div.results")), new Element("div.input").grab(self.input = new Element("input", { + placeholder: "Search & add a new media", + events: { + input: self.keyup.bind(self), + paste: self.keyup.bind(self), + change: self.keyup.bind(self), + keyup: self.keyup.bind(self), + focus: function() { + if (focus_timer) clearRequestTimeout(focus_timer); + if (this.get("value")) self.hideResults(false); + }, + blur: function() { + focus_timer = requestTimeout(function() { + self.el.removeClass("focused"); + self.last_q = null; + }, 100); + } + } + })))); + self.mask = new Element("div.mask").inject(self.result_container); + }, + clear: function(e) { + var self = this; + e.preventDefault(); + if (self.last_q === "") { + self.input.blur(); + self.last_q = null; + } else { + self.last_q = ""; + self.input.set("value", ""); + self.el.addClass("focused"); + self.input.focus(); + self.media = {}; + self.results.empty(); + self.el.removeClass("filled"); + if (self.options.animate) { + dynamics.css(self.wrapper, { + opacity: 0, + scale: .1 + }); + dynamics.animate(self.wrapper, { + opacity: 1, + scale: 1 + }, { + type: dynamics.spring, + frequency: 200, + friction: 270, + duration: 800 + }); + } + } + }, + hideResults: function(bool) { + var self = this; + if (self.hidden == bool) return; + self.el[bool ? "removeClass" : "addClass"]("shown"); + if (bool) { + History.removeEvent("change", self.hideResults.bind(self, !bool)); + self.el.removeEvent("outerClick", self.hideResults.bind(self, !bool)); + } else { + History.addEvent("change", self.hideResults.bind(self, !bool)); + self.el.addEvent("outerClick", self.hideResults.bind(self, !bool)); + } + self.hidden = bool; + }, + keyup: function() { + var self = this; + self.el[self.q() ? "addClass" : "removeClass"]("filled"); + if (self.q() != self.last_q) { + if (self.api_request && self.api_request.isRunning()) self.api_request.cancel(); + if (self.autocomplete_timer) clearRequestTimeout(self.autocomplete_timer); + self.autocomplete_timer = requestTimeout(self.autocomplete.bind(self), 300); + } + }, + autocomplete: function() { + var self = this; + if (!self.q()) { + self.hideResults(true); + return; + } + self.list(); + }, + list: function() { + var self = this, q = self.q(), cache = self.cache[q]; + self.hideResults(false); + if (!cache) { + requestTimeout(function() { + self.mask.addClass("show"); + }, 10); + if (!self.spinner) self.spinner = createSpinner(self.mask); + self.api_request = Api.request("search", { + data: { + q: q + }, + onComplete: self.fill.bind(self, q) + }); + } else self.fill(q, cache); + self.last_q = q; + }, + fill: function(q, json) { + var self = this; + self.cache[q] = json; + self.media = {}; + self.results.empty(); + Object.each(json, function(media) { + if (typeOf(media) == "array") { + Object.each(media, function(me) { + var m = new (window["BlockSearch" + me.type.capitalize() + "Item"])(me); + $(m).inject(self.results); + self.media[m.imdb || "r-" + Math.floor(Math.random() * 1e4)] = m; + if (q == m.imdb) m.showOptions(); + }); + } + }); + self.mask.removeClass("show"); + }, + loading: function(bool) { + this.el[bool ? "addClass" : "removeClass"]("loading"); + }, + q: function() { + return this.input.get("value").trim(); + } +}); + +var MovieDetails = new Class({ + Extends: BlockBase, + sections: null, + buttons: null, + initialize: function(parent, options) { + var self = this; + self.sections = {}; + var category = parent.get("category"); + self.el = new Element("div", { + class: "page active movie_details level_" + (options.level || 0) + }).adopt(self.overlay = new Element("div.overlay", { + events: { + click: self.close.bind(self) + } + }).grab(new Element("a.close.icon-left-arrow")), self.content = new Element("div.scroll_content").grab(new Element("div.head").adopt(new Element("h1").grab(self.title_dropdown = new BlockMenu(self, { + class: "title", + button_text: parent.getTitle() + (parent.get("year") ? " (" + parent.get("year") + ")" : ""), + button_class: "icon-dropdown" + })), self.buttons = new Element("div.buttons")))); + var eta_date = parent.getETA("%b %Y"); + self.addSection("description", new Element("div").adopt(new Element("div", { + text: parent.get("plot") + }), new Element("div.meta", { + html: (eta_date ? "<span>ETA:" + eta_date + "</span>" : "") + "<span>" + (parent.get("genres") || []).join(", ") + "</span>" + }))); + var titles = parent.get("info").titles; + $(self.title_dropdown).addEvents({ + "click:relay(li a)": function(e, el) { + e.stopPropagation(); + Api.request("movie.edit", { + data: { + id: parent.get("_id"), + default_title: el.get("text") + } + }); + $(self.title_dropdown).getElements(".icon-ok").removeClass("icon-ok"); + el.addClass("icon-ok"); + self.title_dropdown.button.set("text", el.get("text") + (parent.get("year") ? " (" + parent.get("year") + ")" : "")); + } + }); + titles.each(function(t) { + self.title_dropdown.addLink(new Element("a", { + text: t, + class: parent.get("title") == t ? "icon-ok" : "" + })); + }); + }, + addSection: function(name, section_el) { + var self = this; + name = name.toLowerCase(); + self.content.grab(self.sections[name] = new Element("div", { + class: "section section_" + name + }).grab(section_el)); + }, + addButton: function(button) { + var self = this; + self.buttons.grab(button); + }, + open: function() { + var self = this; + self.el.addClass("show"); + document.onkeyup = self.keyup.bind(self); + self.outer_click = function() { + self.close(); + }; + App.addEvent("history.push", self.outer_click); + }, + keyup: function(e) { + if (e.keyCode == 27) { + this.close(); + } + }, + close: function() { + var self = this; + var ended = function() { + self.el.dispose(); + self.overlay.removeEventListener("transitionend", ended); + document.onkeyup = null; + }; + self.overlay.addEventListener("transitionend", ended, false); + self.el.removeClass("show"); + App.removeEvent("history.push", self.outer_click); + } +}); + +var MovieList = new Class({ + Implements: [ Events, Options ], + options: { + api_call: "media.list", + navigation: true, + limit: 50, + load_more: true, + loader: true, + menu: [], + add_new: false, + force_view: false + }, + available_views: [ "thumb", "list" ], + movies: [], + movies_added: {}, + total_movies: 0, + letters: {}, + filter: null, + initialize: function(options) { + var self = this; + self.setOptions(options); + self.offset = 0; + self.filter = self.options.filter || { + starts_with: null, + search: null + }; + self.el = new Element("div.movies").adopt(self.title = self.options.title ? new Element("h2", { + text: self.options.title, + styles: { + display: "none" + } + }) : null, self.description = self.options.description ? new Element("div.description", { + html: self.options.description, + styles: { + display: "none" + } + }) : null, self.movie_list = new Element("div", { + events: { + "click:relay(.movie)": function(e, el) { + el.retrieve("klass").onClick(e); + }, + "mouseenter:relay(.movie)": function(e, el) { + e.stopPropagation(); + el.retrieve("klass").onMouseenter(e); + }, + "change:relay(.movie input)": function(e, el) { + e.stopPropagation(); + el = el.getParent(".movie"); + var klass = el.retrieve("klass"); + klass.fireEvent("select"); + klass.select(klass.select_checkbox.get("checked")); + } + } + }), self.load_more = self.options.load_more ? new Element("a.load_more", { + events: { + click: self.loadMore.bind(self) + } + }) : null); + self.changeView(self.getSavedView() || self.options.view || "thumb"); + if (self.options.navigation) self.createNavigation(); + if (self.options.api_call) self.getMovies(); + App.on("movie.added", self.movieAdded.bind(self)); + App.on("movie.deleted", self.movieDeleted.bind(self)); + }, + movieDeleted: function(notification) { + var self = this; + if (self.movies_added[notification.data._id]) { + self.movies.each(function(movie) { + if (movie.get("_id") == notification.data._id) { + movie.destroy(); + delete self.movies_added[notification.data._id]; + self.setCounter(self.counter_count - 1); + self.total_movies--; + } + }); + } + self.checkIfEmpty(); + }, + movieAdded: function(notification) { + var self = this; + self.fireEvent("movieAdded", notification); + if (self.options.add_new && !self.movies_added[notification.data._id] && notification.data.status == self.options.status) { + window.scroll(0, 0); + self.createMovie(notification.data, "top"); + self.setCounter(self.counter_count + 1); + self.checkIfEmpty(); + } + }, + create: function() { + var self = this; + if (self.options.load_more) { + self.scrollspy = new ScrollSpy({ + container: self.el.getParent(), + min: function() { + return self.load_more.getCoordinates().top; + }, + onEnter: self.loadMore.bind(self) + }); + } + self.created = true; + }, + addMovies: function(movies, total) { + var self = this; + if (!self.created) self.create(); + if (movies.length < self.options.limit && self.scrollspy) { + self.load_more.hide(); + self.scrollspy.stop(); + } + self.createMovie(movies, "bottom"); + self.total_movies += total; + self.setCounter(total); + self.calculateSelected(); + }, + setCounter: function(count) { + var self = this; + if (!self.navigation_counter) return; + self.counter_count = count; + self.navigation_counter.set("text", count === 1 ? "1 movie" : (count || 0) + " movies"); + if (self.empty_message) { + self.empty_message.destroy(); + self.empty_message = null; + } + if (self.total_movies && count === 0 && !self.empty_message) { + var message = (self.filter.search ? 'for "' + self.filter.search + '"' : "") + (self.filter.starts_with ? " in <strong>" + self.filter.starts_with + "</strong>" : ""); + self.empty_message = new Element(".message", { + html: "No movies found " + message + ".<br/>" + }).grab(new Element("a", { + text: "Reset filter", + events: { + click: function() { + self.filter = { + starts_with: null, + search: null + }; + self.navigation_search_input.set("value", ""); + self.reset(); + self.activateLetter(); + self.getMovies(true); + self.last_search_value = ""; + } + } + })).inject(self.movie_list); + } + }, + createMovie: function(movie, inject_at, nr) { + var self = this, movies = Array.isArray(movie) ? movie : [ movie ], movie_els = []; + inject_at = inject_at || "bottom"; + movies.each(function(movie, nr) { + var m = new Movie(self, { + actions: self.options.actions, + view: self.current_view, + onSelect: self.calculateSelected.bind(self) + }, movie); + var el = $(m); + if (inject_at === "bottom") { + movie_els.push(el); + } else { + el.inject(self.movie_list, inject_at); + } + self.movies.include(m); + self.movies_added[movie._id] = true; + }); + if (movie_els.length > 0) { + $(self.movie_list).adopt(movie_els); + } + }, + createNavigation: function() { + var self = this; + var chars = "#ABCDEFGHIJKLMNOPQRSTUVWXYZ"; + self.el.addClass("with_navigation"); + self.navigation = new Element("div.alph_nav").adopt(self.mass_edit_form = new Element("div.mass_edit_form").adopt(new Element("span.select").adopt(self.mass_edit_select = new Element("input[type=checkbox]", { + events: { + change: self.massEditToggleAll.bind(self) + } + }), self.mass_edit_selected = new Element("span.count", { + text: 0 + }), self.mass_edit_selected_label = new Element("span", { + text: "selected" + })), new Element("div.quality").adopt(self.mass_edit_quality = new Element("select"), new Element("a.button.orange", { + text: "Change quality", + events: { + click: self.changeQualitySelected.bind(self) + } + })), new Element("div.delete").adopt(new Element("span[text=or]"), new Element("a.button.red", { + text: "Delete", + events: { + click: self.deleteSelected.bind(self) + } + })), new Element("div.refresh").adopt(new Element("span[text=or]"), new Element("a.button.green", { + text: "Refresh", + events: { + click: self.refreshSelected.bind(self) + } + }))), new Element("div.menus").adopt(self.navigation_counter = new Element("span.counter[title=Total]"), self.filter_menu = new BlockMenu(self, { + class: "filter", + button_class: "icon-filter" + }), self.navigation_actions = new Element("div.actions", { + events: { + click: function(e, el) { + e.preventDefault(); + var new_view = self.current_view == "list" ? "thumb" : "list"; + var a = "active"; + self.navigation_actions.getElements("." + a).removeClass(a); + self.changeView(new_view); + self.navigation_actions.getElement("[data-view=" + new_view + "]").addClass(a); + } + } + }), self.navigation_menu = new BlockMenu(self, { + class: "extra", + button_class: "icon-dots" + }))); + Quality.getActiveProfiles().each(function(profile) { + new Element("option", { + value: profile.get("_id"), + text: profile.get("label") + }).inject(self.mass_edit_quality); + }); + self.filter_menu.addLink(self.navigation_search_input = new Element("input", { + title: "Search through " + self.options.identifier, + placeholder: "Search through " + self.options.identifier, + events: { + keyup: self.search.bind(self), + change: self.search.bind(self) + } + })).addClass("search icon-search"); + var available_chars; + self.filter_menu.addEvent("open", function() { + self.navigation_search_input.focus(); + if (!available_chars && (self.navigation.isDisplayed() || self.navigation.isVisible())) Api.request("media.available_chars", { + data: Object.merge({ + status: self.options.status + }, self.filter), + onSuccess: function(json) { + available_chars = json.chars; + available_chars.each(function(c) { + self.letters[c.capitalize()].addClass("available"); + }); + } + }); + }); + self.filter_menu.addLink(self.navigation_alpha = new Element("ul.numbers", { + events: { + "click:relay(li.available)": function(e, el) { + self.activateLetter(el.get("data-letter")); + self.getMovies(true); + } + } + })); + [ "thumb", "list" ].each(function(view) { + var current = self.current_view == view; + new Element("a", { + class: "icon-" + view + (current ? " active " : ""), + "data-view": view + }).inject(self.navigation_actions, current ? "top" : "bottom"); + }); + self.letters.all = new Element("li.letter_all.available.active", { + text: "ALL" + }).inject(self.navigation_alpha); + chars.split("").each(function(c) { + self.letters[c] = new Element("li", { + text: c, + class: "letter_" + c, + "data-letter": c + }).inject(self.navigation_alpha); + }); + if (self.options.menu.length > 0) self.options.menu.each(function(menu_item) { + self.navigation_menu.addLink(menu_item); + }); else self.navigation_menu.hide(); + }, + calculateSelected: function() { + var self = this; + var selected = 0, movies = self.movies.length; + self.movies.each(function(movie) { + selected += movie.isSelected() ? 1 : 0; + }); + var indeterminate = selected > 0 && selected < movies, checked = selected == movies && selected > 0; + document.body[selected > 0 ? "addClass" : "removeClass"]("mass_editing"); + if (self.mass_edit_select) { + self.mass_edit_select.set("checked", checked); + self.mass_edit_select.indeterminate = indeterminate; + self.mass_edit_selected.set("text", selected); + } + }, + deleteSelected: function() { + var self = this, ids = self.getSelectedMovies(), help_msg = self.identifier == "wanted" ? "If you do, you won't be able to watch them, as they won't get downloaded!" : "Your files will be safe, this will only delete the references in CouchPotato"; + var qObj = new Question("Are you sure you want to delete " + ids.length + " movie" + (ids.length != 1 ? "s" : "") + "?", help_msg, [ { + text: "Yes, delete " + (ids.length != 1 ? "them" : "it"), + class: "delete", + events: { + click: function(e) { + e.preventDefault(); + this.set("text", "Deleting.."); + Api.request("media.delete", { + method: "post", + data: { + id: ids.join(","), + delete_from: self.options.identifier + }, + onSuccess: function() { + qObj.close(); + var erase_movies = []; + self.movies.each(function(movie) { + if (movie.isSelected()) { + $(movie).destroy(); + erase_movies.include(movie); + } + }); + erase_movies.each(function(movie) { + self.movies.erase(movie); + movie.destroy(); + self.setCounter(self.counter_count - 1); + self.total_movies--; + }); + self.calculateSelected(); + } + }); + } + } + }, { + text: "Cancel", + cancel: true + } ]); + }, + changeQualitySelected: function() { + var self = this; + var ids = self.getSelectedMovies(); + Api.request("movie.edit", { + method: "post", + data: { + id: ids.join(","), + profile_id: self.mass_edit_quality.get("value") + }, + onSuccess: self.search.bind(self) + }); + }, + refreshSelected: function() { + var self = this; + var ids = self.getSelectedMovies(); + Api.request("media.refresh", { + method: "post", + data: { + id: ids.join(",") + } + }); + }, + getSelectedMovies: function() { + var self = this; + var ids = []; + self.movies.each(function(movie) { + if (movie.isSelected()) ids.include(movie.get("_id")); + }); + return ids; + }, + massEditToggleAll: function() { + var self = this; + var select = self.mass_edit_select.get("checked"); + self.movies.each(function(movie) { + movie.select(select); + }); + self.calculateSelected(); + }, + reset: function() { + var self = this; + self.movies = []; + if (self.mass_edit_select) self.calculateSelected(); + if (self.navigation_alpha) self.navigation_alpha.getElements(".active").removeClass("active"); + self.offset = 0; + if (self.scrollspy) { + self.scrollspy.start(); + } + }, + activateLetter: function(letter) { + var self = this; + self.reset(); + self.letters[letter || "all"].addClass("active"); + self.filter.starts_with = letter; + }, + changeView: function(new_view) { + var self = this; + if (self.available_views.indexOf(new_view) == -1) new_view = "thumb"; + self.el.removeClass(self.current_view + "_list").addClass(new_view + "_list"); + self.current_view = new_view; + Cookie.write(self.options.identifier + "_view", new_view, { + duration: 1e3 + }); + }, + getSavedView: function() { + var self = this; + return self.options.force_view ? self.options.view : Cookie.read(self.options.identifier + "_view"); + }, + search: function() { + var self = this; + if (self.search_timer) clearRequestTimeout(self.search_timer); + self.search_timer = requestTimeout(function() { + var search_value = self.navigation_search_input.get("value"); + if (search_value == self.last_search_value) return; + self.reset(); + self.activateLetter(); + self.filter.search = search_value; + self.getMovies(true); + self.last_search_value = search_value; + }, 250); + }, + update: function() { + var self = this; + self.reset(); + self.getMovies(true); + }, + getMovies: function(reset) { + var self = this; + if (self.scrollspy) { + self.scrollspy.stop(); + self.load_more.set("text", "loading..."); + } + var loader_timeout; + if (self.movies.length === 0 && self.options.loader) { + self.loader_first = new Element("div.mask.loading.with_message").grab(new Element("div.message", { + text: self.options.title ? "Loading '" + self.options.title + "'" : "Loading..." + })).inject(self.el, "top"); + createSpinner(self.loader_first); + var lfc = self.loader_first; + loader_timeout = requestTimeout(function() { + lfc.addClass("show"); + }, 10); + self.el.setStyle("min-height", 220); + } + Api.request(self.options.api_call, { + data: Object.merge({ + type: self.options.type || "movie", + status: self.options.status, + limit_offset: self.options.limit ? self.options.limit + "," + self.offset : null + }, self.filter), + onSuccess: function(json) { + if (reset) self.movie_list.empty(); + if (loader_timeout) clearRequestTimeout(loader_timeout); + if (self.loader_first) { + var lf = self.loader_first; + self.loader_first = null; + lf.removeClass("show"); + requestTimeout(function() { + lf.destroy(); + }, 1e3); + self.el.setStyle("min-height", null); + } + self.store(json.movies); + self.addMovies(json.movies, json.total || json.movies.length); + if (self.scrollspy) { + self.load_more.set("text", "load more movies"); + self.scrollspy.start(); + } + self.checkIfEmpty(); + self.fireEvent("loaded"); + } + }); + }, + loadMore: function() { + var self = this; + if (self.offset >= self.options.limit) self.getMovies(); + }, + store: function(movies) { + var self = this; + self.offset += movies.length; + }, + checkIfEmpty: function() { + var self = this; + var is_empty = self.movies.length === 0 && (self.total_movies === 0 || self.total_movies === undefined); + if (self.title) self.title[is_empty ? "hide" : "show"](); + if (self.description) self.description.setStyle("display", [ is_empty ? "none" : "" ]); + if (is_empty && self.options.on_empty_element) { + var ee = typeOf(self.options.on_empty_element) == "function" ? self.options.on_empty_element() : self.options.on_empty_element; + ee.inject(self.loader_first || self.title || self.movie_list, "after"); + if (self.navigation) self.navigation.hide(); + self.empty_element = ee; + } else if (self.empty_element) { + self.empty_element.destroy(); + if (self.navigation) self.navigation.show(); + } + }, + toElement: function() { + return this.el; + } +}); + +var MoviesManage = new Class({ + Extends: PageBase, + order: 20, + name: "manage", + title: "Do stuff to your existing movies!", + indexAction: function() { + var self = this; + if (!self.list) { + self.refresh_button = new Element("a", { + title: "Rescan your library for new movies", + text: "Full library refresh", + events: { + click: self.refresh.bind(self, true) + } + }); + self.refresh_quick = new Element("a", { + title: "Just scan for recently changed", + text: "Quick library scan", + events: { + click: self.refresh.bind(self, false) + } + }); + self.list = new MovieList({ + identifier: "manage", + filter: { + status: "done", + release_status: "done", + status_or: 1 + }, + actions: [ MA.IMDB, MA.Files, MA.Trailer, MA.Readd, MA.Delete ], + menu: [ self.refresh_button, self.refresh_quick ], + on_empty_element: new Element("div.empty_manage").adopt(new Element("div", { + text: "Seems like you don't have anything in your library yet. Add your existing movie folders in " + }).grab(new Element("a", { + text: "Settings > Manage", + href: App.createUrl("settings/manage") + })), new Element("div.after_manage", { + text: "When you've done that, hit this button Б├▓ " + }).grab(new Element("a.button.green", { + text: "Hit me, but not too hard", + events: { + click: self.refresh.bind(self, true) + } + }))) + }); + $(self.list).inject(self.content); + self.startProgressInterval(); + } + }, + refresh: function(full) { + var self = this; + if (!self.update_in_progress) { + Api.request("manage.update", { + data: { + full: +full + } + }); + self.startProgressInterval(); + } + }, + startProgressInterval: function() { + var self = this; + self.progress_interval = requestInterval(function() { + if (self.progress_request && self.progress_request.running) return; + self.update_in_progress = true; + self.progress_request = Api.request("manage.progress", { + onComplete: function(json) { + if (!json || !json.progress) { + clearRequestInterval(self.progress_interval); + self.update_in_progress = false; + if (self.progress_container) { + self.progress_container.destroy(); + self.list.update(); + } + } else { + var progress = json.progress; + if (!self.list.navigation) return; + if (!self.progress_container) self.progress_container = new Element("div.progress").inject(self.list, "top"); + self.progress_container.empty(); + var sorted_table = self.parseProgress(json.progress); + sorted_table.each(function(folder) { + var folder_progress = progress[folder]; + new Element("div").adopt(new Element("span.folder", { + text: folder + (folder_progress.eta > 0 ? ", " + new Date().increment("second", folder_progress.eta).timeDiffInWords().replace("from now", "to go") : "") + }), new Element("span.percentage", { + text: folder_progress.total ? Math.round((folder_progress.total - folder_progress.to_go) / folder_progress.total * 100) + "%" : "0%" + })).inject(self.progress_container); + }); + } + } + }); + }, 1e3); + }, + parseProgress: function(progress_object) { + var folder, temp_array = []; + for (folder in progress_object) { + if (progress_object.hasOwnProperty(folder)) { + temp_array.push(folder); + } + } + return temp_array.stableSort(); + } +}); + +var MovieAction = new Class({ + Implements: [ Options ], + class_name: "action", + label: "UNKNOWN", + icon: null, + button: null, + details: null, + detail_button: null, + initialize: function(movie, options) { + var self = this; + self.setOptions(options); + self.movie = movie; + self.create(); + if (self.button) { + var wrapper = new Element("div", { + class: self.class_name + }); + self.button.inject(wrapper); + self.button = wrapper; + } + }, + create: function() {}, + getButton: function() { + return this.button || null; + }, + getDetails: function() { + return this.details || null; + }, + getDetailButton: function() { + return this.detail_button || null; + }, + getLabel: function() { + return this.label; + }, + disable: function() { + if (this.el) this.el.addClass("disable"); + }, + enable: function() { + if (this.el) this.el.removeClass("disable"); + }, + getTitle: function() { + var self = this; + try { + return self.movie.getTitle(true); + } catch (e) { + try { + return self.movie.original_title ? self.movie.original_title : self.movie.titles[0]; + } catch (e2) { + return "Unknown"; + } + } + }, + get: function(key) { + var self = this; + try { + return self.movie.get(key); + } catch (e) { + return self.movie[key]; + } + }, + createMask: function() { + var self = this; + self.mask = new Element("div.mask", { + styles: { + "z-index": "1" + } + }).inject(self.movie, "top").fade("hide"); + }, + toElement: function() { + return this.el || null; + } +}); + +var MA = {}; + +MA.IMDB = new Class({ + Extends: MovieAction, + id: null, + create: function() { + var self = this; + self.id = self.movie.getIdentifier ? self.movie.getIdentifier() : self.get("imdb"); + self.button = self.createButton(); + self.detail_button = self.createButton(); + if (!self.id) self.disable(); + }, + createButton: function() { + var self = this; + return new Element("a.imdb", { + text: "IMDB", + title: "Go to the IMDB page of " + self.getTitle(), + href: "http://www.imdb.com/title/" + self.id + "/", + target: "_blank" + }); + } +}); + +MA.Release = new Class({ + Extends: MovieAction, + label: "Releases", + create: function() { + var self = this; + App.on("movie.searcher.ended", function(notification) { + if (self.movie.data._id != notification.data._id) return; + self.releases = null; + if (self.options_container) { + if (self.options_container.isDisplayed()) { + self.options_container.destroy(); + self.getDetails(); + } else { + self.options_container.destroy(); + self.options_container = null; + } + } + }); + }, + getDetails: function(refresh) { + var self = this; + if (!self.movie.data.releases || self.movie.data.releases.length === 0) return; + if (!self.options_container || refresh) { + self.options_container = new Element("div.options").grab(self.release_container = new Element("div.releases.table")); + new Element("div.item.head").adopt(new Element("span.name", { + text: "Release name" + }), new Element("span.status", { + text: "Status" + }), new Element("span.quality", { + text: "Quality" + }), new Element("span.size", { + text: "Size" + }), new Element("span.age", { + text: "Age" + }), new Element("span.score", { + text: "Score" + }), new Element("span.provider", { + text: "Provider" + }), new Element("span.actions")).inject(self.release_container); + if (self.movie.data.releases) self.movie.data.releases.each(function(release) { + var quality = Quality.getQuality(release.quality) || {}, info = release.info || {}, provider = self.get(release, "provider") + (info.provider_extra ? self.get(release, "provider_extra") : ""); + var release_name = self.get(release, "name"); + if (release.files && release.files.length > 0) { + try { + var movie_file = release.files.filter(function(file) { + var type = File.Type.get(file.type_id); + return type && type.identifier == "movie"; + }).pick(); + release_name = movie_file.path.split(Api.getOption("path_sep")).getLast(); + } catch (e) {} + } + var size = info.size ? Math.floor(self.get(release, "size")) : 0; + size = size ? size < 1e3 ? size + "MB" : Math.round(size * 10 / 1024) / 10 + "GB" : "n/a"; + release.el = new Element("div", { + class: "item " + release.status, + id: "release_" + release._id + }).adopt(new Element("span.name", { + text: release_name, + title: release_name + }), new Element("span.status", { + text: release.status, + class: "status " + release.status + }), new Element("span.quality", { + text: quality.label + (release.is_3d ? " 3D" : "") || "n/a" + }), new Element("span.size", { + text: size + }), new Element("span.age", { + text: self.get(release, "age") + }), new Element("span.score", { + text: self.get(release, "score") + }), new Element("span.provider", { + text: provider, + title: provider + }), new Element("span.actions").adopt(info.detail_url ? new Element("a.icon-info", { + href: info.detail_url, + target: "_blank" + }) : new Element("a"), new Element("a.icon-download", { + events: { + click: function(e) { + e.stopPropagation(); + if (!this.hasClass("completed")) self.download(release); + } + } + }), new Element("a", { + class: release.status == "ignored" ? "icon-redo" : "icon-cancel", + events: { + click: function(e) { + e.stopPropagation(); + self.ignore(release); + this.toggleClass("icon-redo"); + this.toggleClass("icon-cancel"); + } + } + }))).inject(self.release_container); + if (release.status == "ignored" || release.status == "failed" || release.status == "snatched") { + if (!self.last_release || self.last_release && self.last_release.status != "snatched" && release.status == "snatched") self.last_release = release; + } else if (!self.next_release && release.status == "available") { + self.next_release = release; + } + var update_handle = function(notification) { + if (notification.data._id != release._id) return; + var q = self.movie.quality.getElement(".q_" + release.quality), new_status = notification.data.status; + release.el.set("class", "item " + new_status); + var status_el = release.el.getElement(".status"); + status_el.set("class", "status " + new_status); + status_el.set("text", new_status); + if (!q && (new_status == "snatched" || new_status == "seeding" || new_status == "done")) q = self.addQuality(release.quality_id); + if (q && !q.hasClass(new_status)) { + q.removeClass(release.status).addClass(new_status); + q.set("title", q.get("title").replace(release.status, new_status)); + } + }; + App.on("release.update_status", update_handle); + }); + if (self.last_release) self.release_container.getElements("#release_" + self.last_release._id).addClass("last_release"); + if (self.next_release) self.release_container.getElements("#release_" + self.next_release._id).addClass("next_release"); + if (self.next_release || self.last_release && [ "ignored", "failed" ].indexOf(self.last_release.status) === false) { + self.trynext_container = new Element("div.buttons.try_container").inject(self.release_container, "top"); + var nr = self.next_release, lr = self.last_release; + self.trynext_container.adopt(new Element("span.or", { + text: "If anything went wrong, download " + }), lr ? new Element("a.orange", { + text: "the same release again", + events: { + click: function() { + self.download(lr); + } + } + }) : null, nr && lr ? new Element("span.or", { + text: ", " + }) : null, nr ? [ new Element("a.green", { + text: lr ? "another release" : "the best release", + events: { + click: function() { + self.download(nr); + } + } + }), new Element("span.or", { + text: " or pick one below" + }) ] : null); + } + self.last_release = null; + self.next_release = null; + } + return self.options_container; + }, + get: function(release, type) { + return release.info && release.info[type] !== undefined ? release.info[type] : "n/a"; + }, + download: function(release) { + var self = this; + var release_el = self.release_container.getElement("#release_" + release._id), icon = release_el.getElement(".icon-download"); + if (icon) icon.addClass("icon spinner").removeClass("download"); + Api.request("release.manual_download", { + data: { + id: release._id + }, + onComplete: function(json) { + if (icon) icon.removeClass("icon spinner"); + if (json.success) { + if (icon) icon.addClass("completed"); + release_el.getElement(".status").set("text", "snatched"); + } else if (icon) icon.addClass("attention").set("title", "Something went wrong when downloading, please check logs."); + } + }); + }, + ignore: function(release) { + Api.request("release.ignore", { + data: { + id: release._id + } + }); + } +}); + +MA.Trailer = new Class({ + Extends: MovieAction, + id: null, + label: "Trailer", + getDetails: function() { + var self = this, data_url = 'https://www.googleapis.com/youtube/v3/search?q="{title}" {year} trailer&maxResults=1&type=video&videoDefinition=high&videoEmbeddable=true&part=snippet&key=AIzaSyAT3li1KjfLidaL6Vt8T92MRU7n4VOrjYk'; + if (!self.player_container) { + self.id = "trailer-" + randomString(); + self.container = new Element("div.trailer_container").adopt(self.player_container = new Element("div.icon-play[id=" + self.id + "]", { + events: { + click: self.watch.bind(self) + } + }).adopt(new Element('span[text="watch"]'), new Element('span[text="trailer"]')), self.background = new Element("div.background")); + requestTimeout(function() { + var url = data_url.substitute({ + title: encodeURI(self.getTitle()), + year: self.get("year") + }); + new Request.JSONP({ + url: url, + onComplete: function(json) { + if (json.items.length > 0) { + self.video_id = json.items[0].id.videoId; + self.background.setStyle("background-image", "url(" + json.items[0].snippet.thumbnails.high.url + ")"); + self.background.addClass("visible"); + } else { + self.container.getParent(".section").addClass("no_trailer"); + } + } + }).send(); + }, 1e3); + } + return self.container; + }, + watch: function() { + var self = this; + new Element("iframe", { + src: "https://www.youtube-nocookie.com/embed/" + self.video_id + "?rel=0&showinfo=0&autoplay=1&showsearch=0&iv_load_policy=3&vq=hd720", + allowfullscreen: "true" + }).inject(self.container); + } +}); + +MA.Category = new Class({ + Extends: MovieAction, + create: function() { + var self = this; + var category = self.movie.get("category"); + self.detail_button = new BlockMenu(self, { + class: "category", + button_text: category ? category.label : "No category", + button_class: "icon-dropdown" + }); + var categories = CategoryList.getAll(); + if (categories.length > 0) { + $(self.detail_button).addEvents({ + "click:relay(li a)": function(e, el) { + e.stopPropagation(); + Api.request("movie.edit", { + data: { + id: self.movie.get("_id"), + category_id: el.get("data-id") + } + }); + $(self.detail_button).getElements(".icon-ok").removeClass("icon-ok"); + el.addClass("icon-ok"); + self.detail_button.button.set("text", el.get("text")); + } + }); + self.detail_button.addLink(new Element("a[text=No category]", { + class: !category ? "icon-ok" : "", + "data-id": "" + })); + categories.each(function(c) { + self.detail_button.addLink(new Element("a", { + text: c.get("label"), + class: category && category._id == c.get("_id") ? "icon-ok" : "", + "data-id": c.get("_id") + })); + }); + } else { + $(self.detail_button).hide(); + } + } +}); + +MA.Profile = new Class({ + Extends: MovieAction, + create: function() { + var self = this; + var profile = self.movie.profile; + self.detail_button = new BlockMenu(self, { + class: "profile", + button_text: profile ? profile.get("label") : "No profile", + button_class: "icon-dropdown" + }); + var profiles = Quality.getActiveProfiles(); + if (profiles.length > 0) { + $(self.detail_button).addEvents({ + "click:relay(li a)": function(e, el) { + e.stopPropagation(); + Api.request("movie.edit", { + data: { + id: self.movie.get("_id"), + profile_id: el.get("data-id") + } + }); + $(self.detail_button).getElements(".icon-ok").removeClass("icon-ok"); + el.addClass("icon-ok"); + self.detail_button.button.set("text", el.get("text")); + } + }); + profiles.each(function(pr) { + self.detail_button.addLink(new Element("a", { + text: pr.get("label"), + class: profile && profile.get("_id") == pr.get("_id") ? "icon-ok" : "", + "data-id": pr.get("_id") + })); + }); + } else { + $(self.detail_button).hide(); + } + } +}); + +MA.Refresh = new Class({ + Extends: MovieAction, + icon: "refresh", + create: function() { + var self = this; + self.button = self.createButton(); + self.detail_button = self.createButton(); + }, + createButton: function() { + var self = this; + return new Element("a.refresh", { + text: "Refresh", + title: "Refresh the movie info and do a forced search", + events: { + click: self.doRefresh.bind(self) + } + }); + }, + doRefresh: function(e) { + var self = this; + e.stop(); + Api.request("media.refresh", { + data: { + id: self.movie.get("_id") + } + }); + } +}); + +var SuggestBase = new Class({ + Extends: MovieAction, + getIMDB: function() { + return this.movie.data.info.imdb; + }, + refresh: function(json) { + var self = this; + if (json && json.movie) { + self.movie.list.addMovies([ json.movie ], 1); + var last_added = self.movie.list.movies[self.movie.list.movies.length - 1]; + $(last_added).inject(self.movie, "before"); + } + self.movie.destroy(); + } +}); + +MA.Add = new Class({ + Extends: SuggestBase, + label: "Add", + icon: "plus", + create: function() { + var self = this; + self.button = new Element("a.add", { + text: "Add", + title: "Re-add the movie and mark all previous snatched/downloaded as ignored", + events: { + click: function() { + self.movie.openDetails(); + } + } + }); + }, + getDetails: function() { + var self = this; + var m = new BlockSearchMovieItem(self.movie.data.info, { + onAdded: self.movie.data.status == "suggested" ? function() { + Api.request("suggestion.ignore", { + data: { + imdb: self.movie.data.info.imdb, + remove_only: true + }, + onComplete: self.refresh.bind(self) + }); + } : function() { + self.movie.destroy(); + } + }); + m.showOptions(); + return m; + } +}); + +MA.SuggestSeen = new Class({ + Extends: SuggestBase, + icon: "eye", + create: function() { + var self = this; + self.button = self.createButton(); + self.detail_button = self.createButton(); + }, + createButton: function() { + var self = this; + return new Element("a.seen", { + text: "Already seen", + title: "Already seen it!", + events: { + click: self.markAsSeen.bind(self) + } + }); + }, + markAsSeen: function(e) { + var self = this; + e.stopPropagation(); + Api.request("suggestion.ignore", { + data: { + imdb: self.getIMDB(), + mark_seen: 1 + }, + onComplete: function(json) { + self.refresh(json); + if (self.movie.details) { + self.movie.details.close(); + } + } + }); + } +}); + +MA.SuggestIgnore = new Class({ + Extends: SuggestBase, + icon: "error", + create: function() { + var self = this; + self.button = self.createButton(); + self.detail_button = self.createButton(); + }, + createButton: function() { + var self = this; + return new Element("a.ignore", { + text: "Ignore", + title: "Don't suggest this movie anymore", + events: { + click: self.markAsIgnored.bind(self) + } + }); + }, + markAsIgnored: function(e) { + var self = this; + e.stopPropagation(); + Api.request("suggestion.ignore", { + data: { + imdb: self.getIMDB() + }, + onComplete: function(json) { + self.refresh(json); + if (self.movie.details) { + self.movie.details.close(); + } + } + }); + } +}); + +MA.ChartIgnore = new Class({ + Extends: SuggestBase, + icon: "error", + create: function() { + var self = this; + self.button = self.createButton(); + self.detail_button = self.createButton(); + }, + createButton: function() { + var self = this; + return new Element("a.ignore", { + text: "Hide", + title: "Don't show this movie in charts", + events: { + click: self.markAsHidden.bind(self) + } + }); + }, + markAsHidden: function(e) { + var self = this; + e.stopPropagation(); + Api.request("charts.ignore", { + data: { + imdb: self.getIMDB() + }, + onComplete: function(json) { + if (self.movie.details) { + self.movie.details.close(); + } + self.movie.destroy(); + } + }); + } +}); + +MA.Readd = new Class({ + Extends: MovieAction, + create: function() { + var self = this, movie_done = self.movie.data.status == "done", snatched; + if (self.movie.data.releases && !movie_done) snatched = self.movie.data.releases.filter(function(release) { + return release.status && (release.status == "snatched" || release.status == "seeding" || release.status == "downloaded" || release.status == "done"); + }).length; + if (movie_done || snatched && snatched > 0) self.el = new Element("a.readd", { + title: "Re-add the movie and mark all previous snatched/downloaded as ignored", + events: { + click: self.doReadd.bind(self) + } + }); + }, + doReadd: function(e) { + var self = this; + e.stopPropagation(); + Api.request("movie.add", { + data: { + identifier: self.movie.getIdentifier(), + ignore_previous: 1 + } + }); + } +}); + +MA.Delete = new Class({ + Extends: MovieAction, + Implements: [ Chain ], + create: function() { + var self = this; + self.button = self.createButton(); + self.detail_button = self.createButton(); + }, + createButton: function() { + var self = this; + return new Element("a.delete", { + text: "Delete", + title: "Remove the movie from this CP list", + events: { + click: self.showConfirm.bind(self) + } + }); + }, + showConfirm: function(e) { + var self = this; + e.stopPropagation(); + self.question = new Question("Are you sure you want to delete <strong>" + self.getTitle() + "</strong>?", "", [ { + text: "Yes, delete " + self.getTitle(), + class: "delete", + events: { + click: function(e) { + e.target.set("text", "Deleting..."); + self.del(); + } + } + }, { + text: "Cancel", + cancel: true + } ]); + }, + del: function() { + var self = this; + var movie = $(self.movie); + Api.request("media.delete", { + data: { + id: self.movie.get("_id"), + delete_from: self.movie.list.options.identifier + }, + onComplete: function() { + if (self.question) self.question.close(); + dynamics.animate(movie, { + opacity: 0, + scale: 0 + }, { + type: dynamics.bezier, + points: [ { + x: 0, + y: 0, + cp: [ { + x: .876, + y: 0 + } ] + }, { + x: 1, + y: 1, + cp: [ { + x: .145, + y: 1 + } ] + } ], + duration: 400, + complete: function() { + self.movie.destroy(); + } + }); + } + }); + } +}); + +MA.Files = new Class({ + Extends: MovieAction, + label: "Files", + getDetails: function() { + var self = this; + if (!self.movie.data.releases || self.movie.data.releases.length === 0) return; + if (!self.files_container) { + self.files_container = new Element("div.files.table"); + new Element("div.item.head").adopt(new Element("span.name", { + text: "File" + }), new Element("span.type", { + text: "Type" + })).inject(self.files_container); + if (self.movie.data.releases) Array.each(self.movie.data.releases, function(release) { + var rel = new Element("div.release").inject(self.files_container); + Object.each(release.files, function(files, type) { + Array.each(files, function(file) { + new Element("div.file.item").adopt(new Element("span.name", { + text: file + }), new Element("span.type", { + text: type + })).inject(rel); + }); + }); + }); + } + return self.files_container; + } +}); + +MA.MarkAsDone = new Class({ + Extends: MovieAction, + create: function() { + var self = this; + self.button = self.createButton(); + self.detail_button = self.createButton(); + }, + createButton: function() { + var self = this; + if (!self.movie.data.releases || self.movie.data.releases.length === 0) return; + return new Element("a.mark_as_done", { + text: "Mark as done", + title: "Remove from available list and move to managed movies", + events: { + click: self.markMovieDone.bind(self) + } + }); + }, + markMovieDone: function() { + var self = this; + Api.request("media.delete", { + data: { + id: self.movie.get("_id"), + delete_from: "wanted" + }, + onComplete: function() { + self.movie.destroy(); + } + }); + } +}); + +var Movie = new Class({ + Extends: BlockBase, + Implements: [ Options, Events ], + actions: null, + details: null, + initialize: function(list, options, data) { + var self = this; + self.actions = []; + self.data = data; + self.list = list; + self.buttons = []; + self.el = new Element("a.movie").grab(self.inner = new Element("div.inner")); + self.el.store("klass", self); + self.profile = Quality.getProfile(data.profile_id) || {}; + self.category = CategoryList.getCategory(data.category_id) || {}; + self.parent(self, options); + self.addEvents(); + }, + openDetails: function() { + var self = this; + if (!self.details) { + self.details = new MovieDetails(self, { + level: 3 + }); + self.actions.each(function(action, nr) { + var details = action.getDetails(); + if (details) { + self.details.addSection(action.getLabel(), details); + } else { + var button = action.getDetailButton(); + if (button) { + self.details.addButton(button); + } + } + }); + } + App.getPageContainer().grab(self.details); + requestTimeout(self.details.open.bind(self.details), 20); + }, + addEvents: function() { + var self = this; + self.global_events = {}; + self.global_events["movie.update"] = function(notification) { + if (self.data._id != notification.data._id) return; + self.busy(false); + requestTimeout(function() { + self.update(notification); + }, 2e3); + }; + App.on("movie.update", self.global_events["movie.update"]); + [ "media.busy", "movie.searcher.started" ].each(function(listener) { + self.global_events[listener] = function(notification) { + if (notification.data && (self.data._id == notification.data._id || typeOf(notification.data._id) == "array" && notification.data._id.indexOf(self.data._id) > -1)) self.busy(true); + }; + App.on(listener, self.global_events[listener]); + }); + self.global_events["movie.searcher.ended"] = function(notification) { + if (notification.data && self.data._id == notification.data._id) self.busy(false); + }; + App.on("movie.searcher.ended", self.global_events["movie.searcher.ended"]); + self.global_events["release.update_status"] = function(notification) { + var data = notification.data; + if (data && self.data._id == data.media_id) { + if (!self.data.releases) self.data.releases = []; + var updated = false; + self.data.releases.each(function(release) { + if (release._id == data._id) { + release.status = data.status; + updated = true; + } + }); + if (updated) self.updateReleases(); + } + }; + App.on("release.update_status", self.global_events["release.update_status"]); + }, + destroy: function() { + var self = this; + self.el.destroy(); + delete self.list.movies_added[self.get("id")]; + self.list.movies.erase(self); + self.list.checkIfEmpty(); + if (self.details) self.details.close(); + Object.each(self.global_events, function(handle, listener) { + App.off(listener, handle); + }); + }, + busy: function(set_busy, timeout) { + var self = this; + if (!set_busy) { + requestTimeout(function() { + if (self.spinner) { + self.mask.fade("out"); + requestTimeout(function() { + if (self.mask) self.mask.destroy(); + if (self.spinner) self.spinner.destroy(); + self.spinner = null; + self.mask = null; + }, timeout || 400); + } + }, timeout || 1e3); + } else if (!self.spinner) { + self.createMask(); + self.spinner = createSpinner(self.mask); + self.mask.fade("in"); + } + }, + createMask: function() { + var self = this; + self.mask = new Element("div.mask", { + styles: { + "z-index": 4 + } + }).inject(self.el, "top").fade("hide"); + }, + update: function(notification) { + var self = this; + self.actions = []; + self.data = notification.data; + self.inner.empty(); + self.profile = Quality.getProfile(self.data.profile_id) || {}; + self.category = CategoryList.getCategory(self.data.category_id) || {}; + self.create(); + self.select(self.select_checkbox.get("checked")); + self.busy(false); + }, + create: function() { + var self = this; + self.el.addClass("status_" + self.get("status")); + var eta_date = self.getETA(); + var rating, stars; + if ([ "suggested", "chart" ].indexOf(self.data.status) > -1 && self.data.info && self.data.info.rating && self.data.info.rating.imdb) { + rating = Array.prototype.slice.call(self.data.info.rating.imdb); + stars = []; + var half_rating = rating[0] / 2; + for (var i = 1; i <= 5; i++) { + if (half_rating >= 1) stars.push(new Element("span.icon-star")); else if (half_rating > 0) stars.push(new Element("span.icon-star-half")); else stars.push(new Element("span.icon-star-empty")); + half_rating -= 1; + } + } + var thumbnail = new Element("div.poster"); + if (self.data.files && self.data.files.image_poster && self.data.files.image_poster.length > 0) { + thumbnail = new Element("div", { + class: "type_image poster", + styles: { + "background-image": "url(" + Api.createUrl("file.cache") + self.data.files.image_poster[0].split(Api.getOption("path_sep")).pop() + ")" + } + }); + } else if (self.data.info && self.data.info.images && self.data.info.images.poster && self.data.info.images.poster.length > 0) { + thumbnail = new Element("div", { + class: "type_image poster", + styles: { + "background-image": "url(" + self.data.info.images.poster[0] + ")" + } + }); + } + self.inner.adopt(self.select_checkbox = new Element("input[type=checkbox]"), new Element("div.poster_container").adopt(thumbnail, self.actions_el = new Element("div.actions")), new Element("div.info").adopt(new Element("div.title").adopt(new Element("span", { + text: self.getTitle() || "n/a" + }), new Element("div.year", { + text: self.data.info.year || "n/a" + })), eta_date ? new Element("div.eta", { + text: eta_date, + title: "ETA" + }) : null, self.quality = new Element("div.quality"), rating ? new Element("div.rating[title=" + rating[0] + "]").adopt(stars, new Element("span.votes[text=(" + rating.join(" / ") + ")][title=Votes]")) : null)); + if (!thumbnail) self.el.addClass("no_thumbnail"); + if (self.profile.data) self.profile.getTypes().each(function(type) { + var q = self.addQuality(type.get("quality"), type.get("3d")); + if ((type.finish === true || type.get("finish")) && !q.hasClass("finish")) { + q.addClass("finish"); + q.set("title", q.get("title") + " Will finish searching for this movie if this quality is found."); + } + }); + self.updateReleases(); + }, + onClick: function(e) { + var self = this; + if (e.target.getParents(".actions").length === 0 && e.target != self.select_checkbox) { + e.stopPropagation(); + self.addActions(); + self.openDetails(); + } + }, + addActions: function() { + var self = this; + if (self.actions.length <= 0) { + self.options.actions.each(function(a) { + var action = new a(self), button = action.getButton(); + if (button) { + self.actions_el.grab(button); + self.buttons.push(button); + } + self.actions.push(action); + }); + } + }, + onMouseenter: function() { + var self = this; + if (App.mobile_screen) return; + self.addActions(); + if (self.list.current_view == "thumb") { + self.el.addClass("hover_start"); + requestTimeout(function() { + self.el.removeClass("hover_start"); + }, 300); + dynamics.css(self.inner, { + scale: 1 + }); + dynamics.animate(self.inner, { + scale: .9 + }, { + type: dynamics.bounce + }); + self.buttons.each(function(el, nr) { + dynamics.css(el, { + opacity: 0, + translateY: 50 + }); + dynamics.animate(el, { + opacity: 1, + translateY: 0 + }, { + type: dynamics.spring, + frequency: 200, + friction: 300, + duration: 800, + delay: 100 + nr * 40 + }); + }); + } + }, + updateReleases: function() { + var self = this; + if (!self.data.releases || self.data.releases.length === 0) return; + self.data.releases.each(function(release) { + var q = self.quality.getElement(".q_" + release.quality + (release.is_3d ? ".is_3d" : ":not(.is_3d)")), status = release.status; + if (!q && (status == "snatched" || status == "seeding" || status == "done")) q = self.addQuality(release.quality, release.is_3d || false); + if (q && !q.hasClass(status)) { + q.addClass(status); + q.set("title", (q.get("title") ? q.get("title") : "") + " status: " + status); + } + }); + }, + addQuality: function(quality, is_3d) { + var self = this; + var q = Quality.getQuality(quality); + return new Element("span", { + text: q.label + (is_3d ? " 3D" : ""), + class: "q_" + q.identifier + (is_3d ? " is_3d" : ""), + title: "" + }).inject(self.quality); + }, + getTitle: function(prefixed) { + var self = this; + if (self.data.title) return prefixed ? self.data.title : self.getUnprefixedTitle(self.data.title); else if (self.data.info && self.data.info.titles && self.data.info.titles.length > 0) return prefixed ? self.data.info.titles[0] : self.getUnprefixedTitle(self.data.info.titles[0]); + return "Unknown movie"; + }, + getUnprefixedTitle: function(t) { + if (t.substr(0, 4).toLowerCase() == "the ") t = t.substr(4) + ", The"; else if (t.substr(0, 3).toLowerCase() == "an ") t = t.substr(3) + ", An"; else if (t.substr(0, 2).toLowerCase() == "a ") t = t.substr(2) + ", A"; + return t; + }, + getIdentifier: function() { + var self = this; + try { + return self.get("identifiers").imdb; + } catch (e) {} + return self.get("imdb"); + }, + getETA: function(format) { + var self = this, d = new Date(), now = Math.round(+d / 1e3), eta = null, eta_date = ""; + if (self.data.info.release_date) [ self.data.info.release_date.dvd, self.data.info.release_date.theater ].each(function(timestamp) { + if (timestamp > 0 && (eta === null || Math.abs(timestamp - now) < Math.abs(eta - now))) eta = timestamp; + }); + if (eta) { + eta_date = new Date(eta * 1e3); + if (+eta_date / 1e3 < now) { + eta_date = null; + } else { + eta_date = format ? eta_date.format(format) : eta_date.format("%b") + (d.getFullYear() != eta_date.getFullYear() ? " " + eta_date.getFullYear() : ""); + } + } + return now + 8035200 > eta ? eta_date : ""; + }, + get: function(attr) { + return this.data[attr] || this.data.info[attr]; + }, + select: function(select) { + var self = this; + self.select_checkbox.set("checked", select); + self.el[self.select_checkbox.get("checked") ? "addClass" : "removeClass"]("checked"); + }, + isSelected: function() { + return this.select_checkbox.get("checked"); + }, + toElement: function() { + return this.el; + } +}); + +Page.Movies = new Class({ + Extends: PageBase, + name: "movies", + icon: "movie", + sub_pages: [ "Wanted", "Manage" ], + default_page: "Wanted", + current_page: null, + initialize: function(parent, options) { + var self = this; + self.parent(parent, options); + self.navigation = new BlockNavigation(); + $(self.navigation).inject(self.el); + }, + defaultAction: function(action, params) { + var self = this; + if (self.current_page) { + self.current_page.hide(); + if (self.current_page.list && self.current_page.list.navigation) self.current_page.list.navigation.dispose(); + } + var route = new Route(); + route.parse(action); + var page_name = route.getPage() != "index" ? route.getPage().capitalize() : self.default_page; + var page = self.sub_pages.filter(function(page) { + return page.name == page_name; + }).pick()["class"]; + page.open(route.getAction() || "index", params); + page.show(); + if (page.list && page.list.navigation) page.list.navigation.inject(self.navigation); + self.current_page = page; + self.navigation.activate(page_name.toLowerCase()); + } +}); + +var BlockSearchMovieItem = new Class({ + Implements: [ Options, Events ], + initialize: function(info, options) { + var self = this; + self.setOptions(options); + self.info = info; + self.alternative_titles = []; + self.create(); + }, + create: function() { + var self = this, info = self.info; + var in_library; + if (info.in_library) { + in_library = []; + (info.in_library.releases || []).each(function(release) { + in_library.include(release.quality); + }); + } + self.el = new Element("div.media_result", { + id: info.imdb, + events: { + click: self.showOptions.bind(self) + } + }).adopt(self.thumbnail = info.images && info.images.poster.length > 0 ? new Element("img.thumbnail", { + src: info.images.poster[0], + height: null, + width: null + }) : null, self.options_el = new Element("div.options"), self.data_container = new Element("div.data").grab(self.info_container = new Element("div.info").grab(new Element("h2", { + class: info.in_wanted && info.in_wanted.profile_id || in_library ? "in_library_wanted" : "", + title: self.getTitle() + }).adopt(self.title = new Element("span.title", { + text: self.getTitle() + }), self.year = info.year ? new Element("span.year", { + text: info.year + }) : null, info.in_wanted && info.in_wanted.profile_id ? new Element("span.in_wanted", { + text: "Already in wanted list: " + Quality.getProfile(info.in_wanted.profile_id).get("label") + }) : in_library ? new Element("span.in_library", { + text: "Already in library: " + in_library.join(", ") + }) : null)))); + if (info.titles) info.titles.each(function(title) { + self.alternativeTitle({ + title: title + }); + }); + }, + alternativeTitle: function(alternative) { + var self = this; + self.alternative_titles.include(alternative); + }, + getTitle: function() { + var self = this; + try { + return self.info.original_title ? self.info.original_title : self.info.titles[0]; + } catch (e) { + return "Unknown"; + } + }, + get: function(key) { + return this.info[key]; + }, + showOptions: function() { + var self = this; + self.createOptions(); + self.data_container.addClass("open"); + self.el.addEvent("outerClick", self.closeOptions.bind(self)); + }, + closeOptions: function() { + var self = this; + self.data_container.removeClass("open"); + self.el.removeEvents("outerClick"); + }, + add: function(e) { + var self = this; + if (e) e.preventDefault(); + self.loadingMask(); + Api.request("movie.add", { + data: { + identifier: self.info.imdb, + title: self.title_select.get("value"), + profile_id: self.profile_select.get("value"), + category_id: self.category_select.get("value") + }, + onComplete: function(json) { + self.options_el.empty(); + self.options_el.grab(new Element("div.message", { + text: json.success ? "Movie successfully added." : "Movie didn't add properly. Check logs" + })); + self.mask.fade("out"); + self.fireEvent("added"); + }, + onFailure: function() { + self.options_el.empty(); + self.options_el.grab(new Element("div.message", { + text: "Something went wrong, check the logs for more info." + })); + self.mask.fade("out"); + } + }); + }, + createOptions: function() { + var self = this, info = self.info; + if (!self.options_el.hasClass("set")) { + self.options_el.grab(new Element("div").adopt(new Element("div.title").grab(self.title_select = new Element("select", { + name: "title" + })), new Element("div.profile").grab(self.profile_select = new Element("select", { + name: "profile" + })), self.category_select_container = new Element("div.category").grab(self.category_select = new Element("select", { + name: "category" + }).grab(new Element("option", { + value: -1, + text: "None" + }))), new Element("div.add").grab(self.add_button = new Element("a.button", { + text: "Add", + events: { + click: self.add.bind(self) + } + })))); + Array.each(self.alternative_titles, function(alt) { + new Element("option", { + text: alt.title + }).inject(self.title_select); + }); + var categories = CategoryList.getAll(); + if (categories.length === 0) self.category_select_container.hide(); else { + self.category_select_container.show(); + categories.each(function(category) { + new Element("option", { + value: category.data._id, + text: category.data.label + }).inject(self.category_select); + }); + } + var profiles = Quality.getActiveProfiles(); + if (profiles.length == 1) self.profile_select.hide(); + profiles.each(function(profile) { + new Element("option", { + value: profile.get("_id"), + text: profile.get("label") + }).inject(self.profile_select); + }); + self.options_el.addClass("set"); + if (categories.length === 0 && self.title_select.getElements("option").length == 1 && profiles.length == 1 && !(self.info.in_wanted && self.info.in_wanted.profile_id || in_library)) self.add(); + } + }, + loadingMask: function() { + var self = this; + self.mask = new Element("div.mask").inject(self.el).fade("hide"); + createSpinner(self.mask); + self.mask.fade("in"); + }, + toElement: function() { + return this.el; + } +}); + +var MoviesWanted = new Class({ + Extends: PageBase, + order: 10, + name: "wanted", + title: "Gimme gimme gimme!", + folder_browser: null, + indexAction: function() { + var self = this; + if (!self.list) { + self.manual_search = new Element("a", { + title: "Force a search for the full wanted list", + text: "Search all wanted", + events: { + click: self.doFullSearch.bind(self, true) + } + }); + self.scan_folder = new Element("a", { + title: "Scan a folder and rename all movies in it", + text: "Manual folder scan", + events: { + click: self.scanFolder.bind(self) + } + }); + self.list = new MovieList({ + identifier: "wanted", + status: "active", + actions: [ MA.MarkAsDone, MA.IMDB, MA.Release, MA.Trailer, MA.Refresh, MA.Readd, MA.Delete, MA.Category, MA.Profile ], + add_new: true, + menu: [ self.manual_search, self.scan_folder ], + on_empty_element: function() { + return new Element("div.empty_wanted").adopt(new Element("div.no_movies", { + text: "Seems like you don't have any movies yet.. Maybe add some via search or the extension." + }), App.createUserscriptButtons()); + } + }); + $(self.list).inject(self.content); + requestTimeout(self.startProgressInterval.bind(self), 4e3); + } + }, + doFullSearch: function() { + var self = this; + if (!self.search_in_progress) { + Api.request("movie.searcher.full_search"); + self.startProgressInterval(); + } + }, + startProgressInterval: function() { + var self = this; + var start_text = self.manual_search.get("text"); + self.progress_interval = requestInterval(function() { + if (self.search_progress && self.search_progress.running) return; + self.search_progress = Api.request("movie.searcher.progress", { + onComplete: function(json) { + self.search_in_progress = true; + if (!json.movie) { + clearRequestInterval(self.progress_interval); + self.search_in_progress = false; + self.manual_search.set("text", start_text); + } else { + var progress = json.movie; + self.manual_search.set("text", "Searching.. (" + Math.round((progress.total - progress.to_go) / progress.total * 100) + "%)"); + } + } + }); + }, 1e3); + }, + scanFolder: function(e) { + e.stop(); + var self = this; + var options = { + name: "Scan_folder" + }; + if (!self.folder_browser) { + self.folder_browser = new Option.Directory("Scan", "folder", "", options); + self.folder_browser.save = function() { + var folder = self.folder_browser.getValue(); + Api.request("renamer.scan", { + data: { + base_folder: folder + } + }); + }; + self.folder_browser.inject(self.content, "top"); + self.folder_browser.fireEvent("injected"); + self.folder_browser.directory_inlay.hide(); + self.folder_browser.el.removeChild(self.folder_browser.el.firstChild); + self.folder_browser.showBrowser(); + self.folder_browser.browser.getElements(".clear.button").hide(); + self.folder_browser.save_button.text = "Select"; + self.folder_browser.browser.setStyles({ + "z-index": 1e3, + right: 20, + top: 0, + margin: 0 + }); + self.folder_browser.pointer.setStyles({ + right: 20 + }); + } else { + self.folder_browser.showBrowser(); + } + self.list.navigation_menu.hide(); + } +}); + +var Charts = new Class({ + Implements: [ Options, Events ], + shown_once: false, + initialize: function(options) { + var self = this; + self.setOptions(options); + self.create(); + }, + create: function() { + var self = this; + self.el = new Element("div.charts").grab(self.el_refresh_container = new Element("div.refresh").grab(self.el_refreshing_text = new Element("span.refreshing", { + text: "Refreshing charts..." + }))); + self.show(); + requestTimeout(function() { + self.fireEvent("created"); + }, 0); + }, + fill: function(json) { + var self = this; + self.el_refreshing_text.hide(); + if (json && json.count > 0) { + json.charts.sort(function(a, b) { + return a.order - b.order; + }); + Object.each(json.charts, function(chart) { + var chart_list = new MovieList({ + navigation: false, + identifier: chart.name.toLowerCase().replace(/[^a-z0-9]+/g, "_"), + title: chart.name, + description: '<a href="' + chart.url + '" target="_blank">See source</a>', + actions: [ MA.Add, MA.ChartIgnore, MA.IMDB, MA.Trailer ], + load_more: false, + view: "thumb", + force_view: true, + api_call: null + }); + chart_list.store(chart.list); + chart_list.addMovies(chart.list, chart.list.length); + chart_list.checkIfEmpty(); + chart_list.fireEvent("loaded"); + $(chart_list).inject(self.el); + }); + } + self.fireEvent("loaded"); + }, + show: function() { + var self = this; + self.el.show(); + if (!self.shown_once) { + requestTimeout(function() { + self.api_request = Api.request("charts.view", { + onComplete: self.fill.bind(self) + }); + }, 100); + self.shown_once = true; + } + }, + toElement: function() { + return this.el; + } +}); + +var TraktAutomation = new Class({ + initialize: function() { + var self = this; + App.addEvent("loadSettings", self.addRegisterButton.bind(self)); + }, + addRegisterButton: function() { + var self = this, setting_page = App.getPage("Settings"); + setting_page.addEvent("create", function() { + var fieldset = setting_page.tabs.automation.groups.trakt_automation, l = window.location; + var trakt_set = 0; + fieldset.getElements("input[type=text]").each(function(el) { + trakt_set += +(el.get("value") !== ""); + }); + new Element(".ctrlHolder").adopt(trakt_set > 0 ? [ self.unregister = new Element("a.button.red", { + text: "Unregister", + events: { + click: function() { + fieldset.getElements("input[name*=oauth_token]").set("value", "").fireEvent("change"); + self.unregister.destroy(); + self.unregister_or.destroy(); + } + } + }), self.unregister_or = new Element("span[text=or]") ] : null, new Element("a.button", { + text: trakt_set > 0 ? "Register a different account" : "Register your trakt.tv account", + events: { + click: function() { + Api.request("automation.trakt.auth_url", { + data: { + host: l.protocol + "//" + l.hostname + (l.port ? ":" + l.port : "") + }, + onComplete: function(json) { + window.location = json.url; + } + }); + } + } + })).inject(fieldset); + }); + } +}); + +new TraktAutomation(); + +var NotificationBase = new Class({ + Extends: BlockBase, + Implements: [ Options, Events ], + stopped: false, + initialize: function(options) { + var self = this; + self.setOptions(options); + App.addEvent("unload", self.stopPoll.bind(self)); + App.addEvent("reload", self.startInterval.bind(self, [ true ])); + App.on("notification", self.notify.bind(self)); + App.on("message", self.showMessage.bind(self)); + App.addEvent("loadSettings", self.addTestButtons.bind(self)); + self.notifications = []; + App.addEvent("load", function() { + App.block.notification = new BlockMenu(self, { + button_class: "icon-notifications", + class: "notification_menu", + onOpen: self.markAsRead.bind(self) + }); + $(App.block.notification).inject(App.getBlock("search"), "after"); + self.badge = new Element("div.badge").inject(App.block.notification, "top").hide(); + requestTimeout(function() { + self.startInterval(); + }, $(window).getSize().x <= 480 ? 2e3 : 100); + }); + }, + notify: function(result) { + var self = this; + var added = new Date(); + added.setTime(result.added * 1e3); + result.el = App.getBlock("notification").addLink(new Element("span." + (result.read ? "read" : "")).adopt(new Element("span.message", { + html: result.message + }), new Element("span.added", { + text: added.timeDiffInWords(), + title: added + })), "top"); + self.notifications.include(result); + if ((result.important !== undefined || result.sticky !== undefined) && !result.read) { + var sticky = true; + App.trigger("message", [ result.message, sticky, result ]); + } else if (!result.read) { + self.setBadge(self.notifications.filter(function(n) { + return !n.read; + }).length); + } + }, + setBadge: function(value) { + var self = this; + self.badge.set("text", value); + self.badge[value ? "show" : "hide"](); + }, + markAsRead: function(force_ids) { + var self = this, ids = force_ids; + if (!force_ids) { + var rn = self.notifications.filter(function(n) { + return !n.read && n.important === undefined; + }); + ids = []; + rn.each(function(n) { + ids.include(n._id); + }); + } + if (ids.length > 0) Api.request("notification.markread", { + data: { + ids: ids.join(",") + }, + onSuccess: function() { + self.setBadge(""); + } + }); + }, + startInterval: function(force) { + var self = this; + if (self.stopped && !force) { + self.stopped = false; + return; + } + self.request = Api.request("notification.listener", { + data: { + init: true + }, + onSuccess: function(json) { + self.processData(json, true); + } + }).send(); + requestInterval(function() { + if (self.request && self.request.isRunning()) { + self.request.cancel(); + self.startPoll(); + } + }, 12e4); + }, + startPoll: function() { + var self = this; + if (self.stopped) return; + if (self.request && self.request.isRunning()) self.request.cancel(); + self.request = Api.request("nonblock/notification.listener", { + onSuccess: function(json) { + self.processData(json, false); + }, + data: { + last_id: self.last_id + }, + onFailure: function() { + requestTimeout(self.startPoll.bind(self), 2e3); + } + }).send(); + }, + stopPoll: function() { + if (this.request) this.request.cancel(); + this.stopped = true; + }, + processData: function(json, init) { + var self = this; + if (json && json.result) { + Array.each(json.result, function(result) { + App.trigger(result._t || result.type, [ result ]); + if (result.message && result.read === undefined && !init) self.showMessage(result.message); + }); + if (json.result.length > 0) self.last_id = json.result.getLast().message_id; + } + requestTimeout(self.startPoll.bind(self), 1500); + }, + showMessage: function(message, sticky, data) { + var self = this; + if (!self.message_container) self.message_container = new Element("div.messages").inject(document.body); + var new_message = new Element("div", { + class: "message" + (sticky ? " sticky" : ""), + html: '<div class="inner">' + message + "</div>" + }).inject(self.message_container, "top"); + requestTimeout(function() { + new_message.addClass("show"); + }, 10); + var hide_message = function() { + new_message.addClass("hide"); + requestTimeout(function() { + new_message.destroy(); + }, 1e3); + }; + if (sticky) new_message.grab(new Element("a.icon-cancel", { + events: { + click: function() { + self.markAsRead([ data._id ]); + hide_message(); + } + } + })); else requestTimeout(hide_message, 4e3); + }, + addTestButtons: function() { + var self = this; + var setting_page = App.getPage("Settings"); + setting_page.addEvent("create", function() { + Object.each(setting_page.tabs.notifications.groups, self.addTestButton.bind(self)); + }); + }, + addTestButton: function(fieldset, plugin_name) { + var self = this, button_name = self.testButtonName(fieldset); + if (button_name.contains("Notifications")) return; + new Element(".ctrlHolder.test_button").grab(new Element("a.button", { + text: button_name, + events: { + click: function() { + var button = fieldset.getElement(".test_button .button"); + button.set("text", "Sending notification"); + Api.request("notify." + plugin_name + ".test", { + onComplete: function(json) { + button.set("text", button_name); + var message; + if (json.success) { + message = new Element("span.success", { + text: "Notification successful" + }).inject(button, "after"); + } else { + message = new Element("span.failed", { + text: "Notification failed. Check logs for details." + }).inject(button, "after"); + } + requestTimeout(function() { + message.destroy(); + }, 3e3); + } + }); + } + } + })).inject(fieldset); + }, + testButtonName: function(fieldset) { + var name = fieldset.getElement("h2 .group_label").get("text"); + return "Test " + name; + } +}); + +window.Notification = new NotificationBase(); + +var TwitterNotification = new Class({ + initialize: function() { + var self = this; + App.addEvent("loadSettings", self.addRegisterButton.bind(self)); + }, + addRegisterButton: function() { + var self = this; + var setting_page = App.getPage("Settings"); + setting_page.addEvent("create", function() { + var fieldset = setting_page.tabs.notifications.groups.twitter, l = window.location; + var twitter_set = 0; + fieldset.getElements("input[type=text]").each(function(el) { + twitter_set += +(el.get("value") !== ""); + }); + new Element(".ctrlHolder").adopt(twitter_set > 0 ? [ self.unregister = new Element("a.button.red", { + text: 'Unregister "' + fieldset.getElement("input[name*=screen_name]").get("value") + '"', + events: { + click: function() { + fieldset.getElements("input[type=text]").set("value", "").fireEvent("change"); + self.unregister.destroy(); + self.unregister_or.destroy(); + } + } + }), self.unregister_or = new Element("span[text=or]") ] : null, new Element("a.button", { + text: twitter_set > 0 ? "Register a different account" : "Register your Twitter account", + events: { + click: function() { + Api.request("notify.twitter.auth_url", { + data: { + host: l.protocol + "//" + l.hostname + (l.port ? ":" + l.port : "") + }, + onComplete: function(json) { + window.location = json.url; + } + }); + } + } + })).inject(fieldset.getElement(".test_button"), "before"); + }); + } +}); + +window.addEvent("domready", function() { + new TwitterNotification(); +}); + +var CategoryListBase = new Class({ + initialize: function() { + var self = this; + App.addEvent("loadSettings", self.addSettings.bind(self)); + }, + setup: function(categories) { + var self = this; + self.categories = []; + Array.each(categories, self.createCategory.bind(self)); + }, + addSettings: function() { + var self = this; + self.settings = App.getPage("Settings"); + self.settings.addEvent("create", function() { + var tab = self.settings.createSubTab("category", { + label: "Categories", + name: "category", + subtab_label: "Category & filtering" + }, self.settings.tabs.searcher, "searcher"); + self.tab = tab.tab; + self.content = tab.content; + self.createList(); + self.createOrdering(); + }); + self.settings.addEvent("create", function() { + var renamer_group = self.settings.tabs.renamer.groups.renamer; + self.categories.each(function(category) { + var input = new Option.Directory("section_name", "option.name", category.get("destination"), { + name: category.get("label") + }); + input.inject(renamer_group.getElement(".renamer_to")); + input.fireEvent("injected"); + input.save = function() { + category.data.destination = input.getValue(); + category.save(); + }; + }); + }); + }, + createList: function() { + var self = this; + var count = self.categories.length; + self.settings.createGroup({ + label: "Categories", + description: "Create categories, each one extending global filters. (Needs refresh '" + (App.isMac() ? "CMD+R" : "F5") + "' after editing)" + }).inject(self.content).adopt(self.category_container = new Element("div.container"), new Element("a.add_new_category", { + text: count > 0 ? "Create another category" : "Click here to create a category.", + events: { + click: function() { + var category = self.createCategory(); + $(category).inject(self.category_container); + } + } + })); + Array.each(self.categories, function(category) { + $(category).inject(self.category_container); + }); + }, + getCategory: function(id) { + return this.categories.filter(function(category) { + return category.data._id == id; + }).pick(); + }, + getAll: function() { + return this.categories; + }, + createCategory: function(data) { + var self = this; + data = data || { + id: randomString() + }; + var category = new Category(data); + self.categories.include(category); + return category; + }, + createOrdering: function() { + var self = this; + var category_list; + self.settings.createGroup({ + label: "Category ordering" + }).adopt(new Element(".ctrlHolder#category_ordering").adopt(new Element("label[text=Order]"), category_list = new Element("ul"), new Element("p.formHint", { + html: "Change the order the categories are in the dropdown list." + }))).inject(self.content); + Array.each(self.categories, function(category) { + new Element("li", { + "data-id": category.data._id + }).adopt(new Element("span.category_label", { + text: category.data.label + }), new Element("span.handle.icon-handle")).inject(category_list); + }); + self.category_sortable = new Sortables(category_list, { + revert: true, + handle: "", + opacity: .5, + onComplete: self.saveOrdering.bind(self) + }); + }, + saveOrdering: function() { + var self = this; + var ids = []; + self.category_sortable.list.getElements("li").each(function(el) { + ids.include(el.get("data-id")); + }); + Api.request("category.save_order", { + data: { + ids: ids + } + }); + } +}); + +window.CategoryList = new CategoryListBase(); + +var Category = new Class({ + data: {}, + initialize: function(data) { + var self = this; + self.data = data; + self.create(); + self.el.addEvents({ + "change:relay(select)": self.save.bind(self, 0), + "keyup:relay(input[type=text])": self.save.bind(self, [ 300 ]), + "change:relay(input[type=checkbox])": self.save.bind(self, 0) + }); + }, + create: function() { + var self = this; + var data = self.data; + self.el = new Element("div.category").adopt(self.delete_button = new Element("span.delete.icon-delete", { + events: { + click: self.del.bind(self) + } + }), new Element(".category_label.ctrlHolder").adopt(new Element("label", { + text: "Name" + }), new Element("input", { + type: "text", + value: data.label, + placeholder: "Example: Kids, Horror or His" + }), new Element("p.formHint", { + text: "See global filters for explanation." + })), new Element(".category_preferred.ctrlHolder").adopt(new Element("label", { + text: "Preferred" + }), new Element("input", { + type: "text", + value: data.preferred, + placeholder: "Blu-ray, DTS" + })), new Element(".category_required.ctrlHolder").adopt(new Element("label", { + text: "Required" + }), new Element("input", { + type: "text", + value: data.required, + placeholder: "Example: DTS, AC3 & English" + })), new Element(".category_ignored.ctrlHolder").adopt(new Element("label", { + text: "Ignored" + }), new Element("input", { + type: "text", + value: data.ignored, + placeholder: "Example: dubbed, swesub, french" + })), new Element(".category_dubbed_version.ctrlHolder").adopt(new Element("label", { + text: "Dubbed version" + }), new Element("input", { + type: "checkbox", + checked: data.dubbed_version + }))); + self.makeSortable(); + }, + save: function(delay) { + var self = this; + if (self.save_timer) clearRequestTimeout(self.save_timer); + self.save_timer = requestTimeout(function() { + Api.request("category.save", { + data: self.getData(), + useSpinner: true, + spinnerOptions: { + target: self.el + }, + onComplete: function(json) { + if (json.success) { + self.data = json.category; + } + } + }); + }, delay || 0); + }, + getData: function() { + var self = this; + return { + id: self.data._id, + label: self.el.getElement(".category_label input").get("value"), + required: self.el.getElement(".category_required input").get("value"), + preferred: self.el.getElement(".category_preferred input").get("value"), + ignored: self.el.getElement(".category_ignored input").get("value"), + dubbed_version: self.el.getElement(".category_dubbed_version input").get("checked"), + destination: self.data.destination + }; + }, + del: function() { + var self = this; + if (self.data.label === undefined) { + self.el.destroy(); + return; + } + var label = self.el.getElement(".category_label input").get("value"); + var qObj = new Question('Are you sure you want to delete <strong>"' + label + '"</strong>?', "", [ { + text: 'Delete "' + label + '"', + class: "delete", + events: { + click: function(e) { + e.preventDefault(); + Api.request("category.delete", { + data: { + id: self.data._id + }, + useSpinner: true, + spinnerOptions: { + target: self.el + }, + onComplete: function(json) { + if (json.success) { + qObj.close(); + self.el.destroy(); + } else { + alert(json.message); + } + } + }); + } + } + }, { + text: "Cancel", + cancel: true + } ]); + }, + makeSortable: function() { + var self = this; + self.sortable = new Sortables(self.category_container, { + revert: true, + handle: ".handle", + opacity: .5, + onComplete: self.save.bind(self, 300) + }); + }, + get: function(attr) { + return this.data[attr]; + }, + toElement: function() { + return this.el; + } +}); + +Page.Log = new Class({ + Extends: PageBase, + disable_pointer_onscroll: false, + order: 60, + name: "log", + title: "Show recent logs.", + has_tab: false, + navigation: null, + log_items: [], + report_text: "### Steps to reproduce:\n" + "1. ..\n" + "2. ..\n" + "\n" + "### Information:\n" + "Movie(s) I have this with: ...\n" + "Quality of the movie being searched: ...\n" + "Providers I use: ...\n" + "Version of CouchPotato: {version}\n" + "Running on: ...\n" + "\n" + "### Logs:\n" + "```\n{issue}```", + indexAction: function() { + var self = this; + self.getLogs(0); + }, + getLogs: function(nr) { + var self = this; + if (self.log) self.log.destroy(); + self.log = new Element("div.container.loading", { + text: "loading...", + events: { + "mouseup:relay(.time)": function(e) { + requestTimeout(function() { + self.showSelectionButton(e); + }, 100); + } + } + }).inject(self.content); + if (self.navigation) { + var nav = self.navigation.getElement(".nav"); + nav.getElements(".active").removeClass("active"); + self.navigation.getElements("li")[nr + 1].addClass("active"); + } + if (self.request && self.request.running) self.request.cancel(); + self.request = Api.request("logging.get", { + data: { + nr: nr + }, + onComplete: function(json) { + self.log.set("text", ""); + self.log_items = self.createLogElements(json.log); + self.log.adopt(self.log_items); + self.log.removeClass("loading"); + self.scrollToBottom(); + if (!self.navigation) { + self.navigation = new Element("div.navigation").adopt(new Element("h2[text=Logs]"), new Element("div.hint", { + text: "Select multiple lines & report an issue" + })); + var nav = new Element("ul.nav", { + events: { + "click:relay(li.select)": function(e, el) { + self.getLogs(parseInt(el.get("text")) - 1); + } + } + }).inject(self.navigation); + new Element("li.filter").grab(new Element("select", { + events: { + change: function() { + var type_filter = this.getSelected()[0].get("value"); + self.content.set("data-filter", type_filter); + self.scrollToBottom(); + } + } + }).adopt(new Element("option", { + value: "ALL", + text: "Show all logs" + }), new Element("option", { + value: "INFO", + text: "Show only INFO" + }), new Element("option", { + value: "DEBUG", + text: "Show only DEBUG" + }), new Element("option", { + value: "ERROR", + text: "Show only ERROR" + }))).inject(nav); + for (var i = 0; i <= json.total; i++) { + new Element("li", { + text: i + 1, + class: "select " + (nr == i ? "active" : "") + }).inject(nav); + } + new Element("li.clear", { + text: "clear", + events: { + click: function() { + Api.request("logging.clear", { + onComplete: function() { + self.getLogs(0); + } + }); + } + } + }).inject(nav); + self.navigation.inject(self.content, "top"); + } + } + }); + }, + createLogElements: function(logs) { + var elements = []; + logs.each(function(log) { + elements.include(new Element("div", { + class: "time " + log.type.toLowerCase() + }).adopt(new Element("span.time_type", { + text: log.time + " " + log.type + }), new Element("span.message", { + text: log.message + }))); + }); + return elements; + }, + scrollToBottom: function() { + new Fx.Scroll(this.content, { + duration: 0 + }).toBottom(); + }, + showSelectionButton: function(e) { + var self = this, selection = self.getSelected(), start_node = selection.anchorNode, parent_start = start_node.parentNode.getParent(".time"), end_node = selection.focusNode.parentNode.getParent(".time"), text = ""; + var remove_button = function() { + self.log.getElements(".highlight").removeClass("highlight"); + if (self.do_report) self.do_report.destroy(); + document.body.removeEvent("click", remove_button); + }; + remove_button(); + if (parent_start) start_node = parent_start; + var index = { + start: self.log_items.indexOf(start_node), + end: self.log_items.indexOf(end_node) + }; + if (index.start > index.end) { + index = { + start: index.end, + end: index.start + }; + } + var nodes = self.log_items.slice(index.start, index.end + 1); + nodes.each(function(node, nr) { + node.addClass("highlight"); + node.getElements("span").each(function(span) { + text += self.spaceFill(span.get("text") + " ", 6); + }); + text += "\n"; + }); + self.do_report = new Element("a.do_report.button", { + text: "Report issue", + styles: { + top: e.page.y, + left: e.page.x + }, + events: { + click: function(e) { + e.stop(); + self.showReport(text); + } + } + }).inject(document.body); + requestTimeout(function() { + document.body.addEvent("click", remove_button); + }, 0); + }, + showReport: function(text) { + var self = this, version = Updater.getInfo(), body = self.report_text.replace("{issue}", text).replace("{version}", version ? version.version.repr : "..."), textarea; + var overlay = new Element("div.mask.report_popup", { + method: "post", + events: { + click: function(e) { + overlay.destroy(); + } + } + }).grab(new Element("div.bug", { + events: { + click: function(e) { + e.stopPropagation(); + } + } + }).adopt(new Element("h1", { + text: "Report a bug" + }), new Element("span").adopt(new Element("span", { + text: "Read " + }), new Element("a.button", { + target: "_blank", + text: "the contributing guide", + href: "https://github.com/CouchPotato/CouchPotatoServer/wiki/Developer-branch" + }), new Element("span", { + html: " before posting, then copy the text below and <strong>FILL IN</strong> the dots." + })), textarea = new Element("textarea", { + text: body + }), new Element("a.button", { + target: "_blank", + text: "Create a new issue on GitHub with the text above", + href: "https://github.com/CouchPotato/CouchPotatoServer/issues/new", + events: { + click: function(e) { + e.stop(); + var body = textarea.get("value"), bdy = "?body=" + (body.length < 2e3 ? encodeURIComponent(body) : "Paste the text here"), win = window.open(e.target.get("href") + bdy, "_blank"); + win.focus(); + } + } + }))); + overlay.inject(document.body); + }, + getSelected: function() { + if (window.getSelection) return window.getSelection(); else if (document.getSelection) return document.getSelection(); else { + var selection = document.selection && document.selection.createRange(); + if (selection.text) return selection.text; + } + return false; + }, + spaceFill: function(number, width) { + if (number.toString().length >= width) return number; + return (new Array(width).join(" ") + number.toString()).substr(-width); + } +}); + +var Profile = new Class({ + data: {}, + types: [], + initialize: function(data) { + var self = this; + self.data = data; + self.types = []; + self.create(); + self.el.addEvents({ + "change:relay(select, input[type=checkbox])": self.save.bind(self, 0), + "keyup:relay(input[type=text])": self.save.bind(self, [ 300 ]) + }); + }, + create: function() { + var self = this; + var data = self.data; + self.el = new Element("div.profile").adopt(self.delete_button = new Element("span.delete.icon-delete", { + events: { + click: self.del.bind(self) + } + }), new Element(".quality_label.ctrlHolder").adopt(new Element("label", { + text: "Name" + }), new Element("input", { + type: "text", + value: data.label, + placeholder: "Profile name" + })), new Element("div.qualities.ctrlHolder").adopt(new Element("label", { + text: "Search for" + }), self.type_container = new Element("ol.types"), new Element("div.formHint", { + html: "Search these qualities (2 minimum), from top to bottom. Use the checkbox, to stop searching after it found this quality." + })), new Element("div.wait_for.ctrlHolder").adopt(new Element("span", { + text: "Wait" + }), new Element("input.wait_for_input.xsmall", { + type: "text", + value: data.wait_for && data.wait_for.length > 0 ? data.wait_for[0] : 0 + }), new Element("span", { + text: "day(s) for a better quality " + }), new Element("span.advanced", { + text: "and keep searching" + }), new Element("input.xsmall.stop_after_input.advanced", { + type: "text", + value: data.stop_after && data.stop_after.length > 0 ? data.stop_after[0] : 0 + }), new Element("span.advanced", { + text: "day(s) for a better (checked) quality." + }), new Element("span.advanced", { + html: "<br/>Releases need a minimum score of" + }), new Element("input.advanced.xsmall.minimum_score_input", { + size: 4, + type: "text", + value: data.minimum_score || 1 + }))); + self.makeSortable(); + if (data.qualities) { + data.types = []; + data.qualities.each(function(quality, nr) { + data.types.include({ + quality: quality, + finish: data.finish[nr] || false, + "3d": data["3d"] ? data["3d"][nr] || false : false + }); + }); + } + if (data.types) data.types.each(self.addType.bind(self)); else self.delete_button.hide(); + self.addType(); + }, + save: function(delay) { + var self = this; + if (self.save_timer) clearRequestTimeout(self.save_timer); + self.save_timer = requestTimeout(function() { + self.addType(); + var data = self.getData(); + if (data.types.length < 2) return; else self.delete_button.show(); + Api.request("profile.save", { + data: self.getData(), + useSpinner: true, + spinnerOptions: { + target: self.el + }, + onComplete: function(json) { + if (json.success) { + self.data = json.profile; + self.type_container.getElement("li:first-child input.finish[type=checkbox]").set("checked", true).getParent().addClass("checked"); + } + } + }); + }, delay); + }, + getData: function() { + var self = this; + var data = { + id: self.data._id, + label: self.el.getElement(".quality_label input").get("value"), + wait_for: self.el.getElement(".wait_for_input").get("value"), + stop_after: self.el.getElement(".stop_after_input").get("value"), + minimum_score: self.el.getElement(".minimum_score_input").get("value"), + types: [] + }; + Array.each(self.type_container.getElements(".type"), function(type) { + if (!type.hasClass("deleted") && type.getElement("select").get("value") != -1 && type.getElement("select").get("value") != "") data.types.include({ + quality: type.getElement("select").get("value"), + finish: +type.getElement("input.finish[type=checkbox]").checked, + "3d": +type.getElement("input.3d[type=checkbox]").checked + }); + }); + return data; + }, + addType: function(data) { + var self = this; + var has_empty = false; + self.types.each(function(type) { + if ($(type).hasClass("is_empty")) has_empty = true; + }); + if (has_empty) return; + var t = new Profile.Type(data, { + onChange: self.save.bind(self, 0) + }); + $(t).inject(self.type_container); + self.sortable.addItems($(t)); + self.types.include(t); + }, + getTypes: function() { + var self = this; + return self.types.filter(function(type) { + return type.get("quality"); + }); + }, + del: function() { + var self = this; + var label = self.el.getElement(".quality_label input").get("value"); + var qObj = new Question('Are you sure you want to delete <strong>"' + label + '"</strong>?', "Items using this profile, will be set to the default quality.", [ { + text: 'Delete "' + label + '"', + class: "delete", + events: { + click: function(e) { + e.preventDefault(); + Api.request("profile.delete", { + data: { + id: self.data._id + }, + useSpinner: true, + spinnerOptions: { + target: self.el + }, + onComplete: function(json) { + if (json.success) { + qObj.close(); + self.el.destroy(); + } else { + alert(json.message); + } + } + }); + } + } + }, { + text: "Cancel", + cancel: true + } ]); + }, + makeSortable: function() { + var self = this; + self.sortable = new Sortables(self.type_container, { + revert: true, + handle: ".handle", + opacity: .5, + onComplete: self.save.bind(self, 300) + }); + }, + get: function(attr) { + return this.data[attr]; + }, + isCore: function() { + return this.data.core; + }, + toElement: function() { + return this.el; + } +}); + +Profile.Type = new Class({ + Implements: [ Events, Options ], + deleted: false, + initialize: function(data, options) { + var self = this; + self.setOptions(options); + self.data = data || {}; + self.create(); + self.addEvent("change", function() { + var has_quality = !(self.qualities.get("value") == "-1" || self.qualities.get("value") == ""); + self.el[!has_quality ? "addClass" : "removeClass"]("is_empty"); + self.el[has_quality && Quality.getQuality(self.qualities.get("value")).allow_3d ? "addClass" : "removeClass"]("allow_3d"); + self.deleted = !has_quality; + }); + }, + create: function() { + var self = this; + var data = self.data; + self.el = new Element("li.type").adopt(new Element("span.quality_type.select_wrapper.icon-dropdown").grab(self.fillQualities()), self.finish_container = new Element("label.finish").adopt(self.finish = new Element("input.finish[type=checkbox]", { + checked: data.finish !== undefined ? data.finish : 1, + events: { + change: function() { + if (self.el == self.el.getParent().getElement(":first-child")) { + alert("Top quality always finishes the search"); + return; + } + self.fireEvent("change"); + } + } + }), new Element("span.check_label[text=finish]")), self["3d_container"] = new Element("label.threed").adopt(self["3d"] = new Element("input.3d[type=checkbox]", { + checked: data["3d"] !== undefined ? data["3d"] : 0, + events: { + change: function() { + self.fireEvent("change"); + } + } + }), new Element("span.check_label[text=3D]")), new Element("span.delete.icon-cancel", { + events: { + click: self.del.bind(self) + } + }), new Element("span.handle.icon-handle")); + self.el[self.data.quality ? "removeClass" : "addClass"]("is_empty"); + if (self.data.quality && Quality.getQuality(self.data.quality).allow_3d) self.el.addClass("allow_3d"); + }, + fillQualities: function() { + var self = this; + self.qualities = new Element("select", { + events: { + change: self.fireEvent.bind(self, "change") + } + }).grab(new Element("option", { + text: "+ Add another quality", + value: -1 + })); + Object.each(Quality.qualities, function(q) { + new Element("option", { + text: q.label, + value: q.identifier, + "data-allow_3d": q.allow_3d + }).inject(self.qualities); + }); + self.qualities.set("value", self.data.quality || -1); + return self.qualities; + }, + getData: function() { + var self = this; + return { + quality: self.qualities.get("value"), + finish: +self.finish.checked, + "3d": +self["3d"].checked + }; + }, + get: function(key) { + return this.data[key]; + }, + del: function() { + var self = this; + self.el.addClass("deleted"); + self.el.hide(); + self.deleted = true; + self.fireEvent("change"); + }, + toElement: function() { + return this.el; + } +}); + +var QualityBase = new Class({ + tab: "", + content: "", + setup: function(data) { + var self = this; + self.qualities = data.qualities; + self.profiles_list = null; + self.profiles = []; + Array.each(data.profiles, self.createProfilesClass.bind(self)); + App.addEvent("loadSettings", self.addSettings.bind(self)); + }, + getProfile: function(id) { + return this.profiles.filter(function(profile) { + return profile.data._id == id; + }).pick(); + }, + getActiveProfiles: function() { + return Array.filter(this.profiles, function(profile) { + return !profile.data.hide; + }); + }, + getQuality: function(identifier) { + try { + return this.qualities.filter(function(q) { + return q.identifier == identifier; + }).pick() || {}; + } catch (e) {} + return {}; + }, + addSettings: function() { + var self = this; + self.settings = App.getPage("Settings"); + self.settings.addEvent("create", function() { + var tab = self.settings.createSubTab("profile", { + label: "Quality", + name: "profile", + subtab_label: "Qualities" + }, self.settings.tabs.searcher, "searcher"); + self.tab = tab.tab; + self.content = tab.content; + self.createProfiles(); + self.createProfileOrdering(); + self.createSizes(); + }); + }, + createProfiles: function() { + var self = this; + var non_core_profiles = Array.filter(self.profiles, function(profile) { + return !profile.isCore(); + }); + var count = non_core_profiles.length; + self.settings.createGroup({ + label: "Quality Profiles", + description: "Create your own profiles with multiple qualities." + }).inject(self.content).adopt(self.profile_container = new Element("div.container"), new Element("a.add_new_profile", { + text: count > 0 ? "Create another quality profile" : "Click here to create a quality profile.", + events: { + click: function() { + var profile = self.createProfilesClass(); + $(profile).inject(self.profile_container); + } + } + })); + Array.each(non_core_profiles, function(profile) { + $(profile).inject(self.profile_container); + }); + }, + createProfilesClass: function(data) { + var self = this; + data = data || { + id: randomString() + }; + var profile = new Profile(data); + self.profiles.include(profile); + return profile; + }, + createProfileOrdering: function() { + var self = this; + self.settings.createGroup({ + label: "Profile Defaults", + description: "(Needs refresh '" + (App.isMac() ? "CMD+R" : "F5") + "' after editing)" + }).grab(new Element(".ctrlHolder#profile_ordering").adopt(new Element("label[text=Order]"), self.profiles_list = new Element("ul"), new Element("p.formHint", { + html: "Change the order the profiles are in the dropdown list. Uncheck to hide it completely.<br />First one will be default." + }))).inject(self.content); + Array.each(self.profiles, function(profile) { + var check; + new Element("li", { + "data-id": profile.data._id + }).adopt(check = new Element("input[type=checkbox]", { + checked: !profile.data.hide, + events: { + change: self.saveProfileOrdering.bind(self) + } + }), new Element("span.profile_label", { + text: profile.data.label + }), new Element("span.handle.icon-handle")).inject(self.profiles_list); + }); + var sorted_changed = false; + self.profile_sortable = new Sortables(self.profiles_list, { + revert: true, + handle: ".handle", + opacity: .5, + onSort: function() { + sorted_changed = true; + }, + onComplete: function() { + if (sorted_changed) { + self.saveProfileOrdering(); + sorted_changed = false; + } + } + }); + }, + saveProfileOrdering: function() { + var self = this, ids = [], hidden = []; + self.profiles_list.getElements("li").each(function(el, nr) { + ids.include(el.get("data-id")); + hidden[nr] = +!el.getElement("input[type=checkbox]").get("checked"); + }); + Api.request("profile.save_order", { + data: { + ids: ids, + hidden: hidden + } + }); + }, + createSizes: function() { + var self = this; + var group = self.settings.createGroup({ + label: "Sizes", + description: "Edit the minimal and maximum sizes (in MB) for each quality.", + advanced: true, + name: "sizes" + }).inject(self.content); + new Element("div.item.head.ctrlHolder").adopt(new Element("span.label", { + text: "Quality" + }), new Element("span.min", { + text: "Min" + }), new Element("span.max", { + text: "Max" + })).inject(group); + Array.each(self.qualities, function(quality) { + new Element("div.ctrlHolder.item").adopt(new Element("span.label", { + text: quality.label + }), new Element("input.min[type=text]", { + value: quality.size_min, + events: { + keyup: function(e) { + self.changeSize(quality.identifier, "size_min", e.target.get("value")); + } + } + }), new Element("input.max[type=text]", { + value: quality.size_max, + events: { + keyup: function(e) { + self.changeSize(quality.identifier, "size_max", e.target.get("value")); + } + } + })).inject(group); + }); + }, + size_timer: {}, + changeSize: function(identifier, type, value) { + var self = this; + if (self.size_timer[identifier + type]) clearRequestTimeout(self.size_timer[identifier + type]); + self.size_timer[identifier + type] = requestTimeout(function() { + Api.request("quality.size.save", { + data: { + identifier: identifier, + value_type: type, + value: value + } + }); + }, 300); + } +}); + +window.Quality = new QualityBase(); + +Page.Userscript = new Class({ + Extends: PageBase, + order: 80, + name: "userscript", + has_tab: false, + options: { + onOpened: function() { + App.fireEvent("unload"); + App.getBlock("header").hide(); + } + }, + indexAction: function() { + var self = this; + self.content.grab(self.frame = new Element("div.frame.loading", { + text: "Loading..." + })); + var url = window.location.href.split("url=")[1]; + Api.request("userscript.add_via_url", { + data: { + url: url + }, + onComplete: function(json) { + self.frame.empty(); + self.frame.removeClass("loading"); + if (json.error) self.frame.set("html", json.error); else { + var item = new BlockSearchMovieItem(json.movie); + self.frame.adopt(item); + item.showOptions(); + } + } + }); + } +}); + +var UserscriptSettingTab = new Class({ + tab: "", + content: "", + initialize: function() { + var self = this; + App.addEvent("loadSettings", self.addSettings.bind(self)); + }, + addSettings: function() { + var self = this; + self.settings = App.getPage("Settings"); + self.settings.addEvent("create", function() { + var host_url = window.location.protocol + "//" + window.location.host; + self.settings.createGroup({ + name: "userscript", + label: "Install the browser extension or bookmarklet", + description: "Easily add movies via imdb.com, appletrailers and more" + }).inject(self.settings.tabs.automation.content, "top").adopt(new Element("div").adopt(new Element("a.userscript.button", { + text: "Install extension", + href: "https://couchpota.to/extension/", + target: "_blank" + }), new Element("span.or[text=or]"), new Element("span.bookmarklet").adopt(new Element("a.button.green", { + text: "+CouchPotato", + href: "javascript:void((function(){var e=document.createElement('script');e.setAttribute('type','text/javascript');e.setAttribute('charset','UTF-8');e.setAttribute('src','" + host_url + Api.createUrl("userscript.bookmark") + "?host=" + encodeURI(host_url + Api.createUrl("userscript.get") + randomString() + "/") + "&r='+Math.random()*99999999);document.body.appendChild(e)})());", + target: "", + events: { + click: function(e) { + e.stop(); + alert("Drag it to your bookmark ;)"); + } + } + }), new Element("span", { + text: "Б┤╫ Drag this to your bookmarks" + }))), new Element("img", { + src: "https://couchpota.to/media/images/userscript.gif" + })); + }); + } +}); + +window.addEvent("domready", function() { + new UserscriptSettingTab(); +}); + +window.addEvent("load", function() { + var your_version = $(document.body).get("data-userscript_version"), latest_version = App.getOption("userscript_version") || "", key = "cp_version_check", checked_already = Cookie.read(key); + if (your_version && your_version < latest_version && checked_already < latest_version) { + if (confirm("Update to the latest Userscript?\nYour version: " + your_version + ", new version: " + latest_version)) { + document.location = Api.createUrl("userscript.get") + randomString() + "/couchpotato.user.js"; + } + Cookie.write(key, latest_version, { + duration: 100 + }); + } +}); + +Page.Wizard = new Class({ + Extends: Page.Settings, + order: 70, + name: "wizard", + current: "welcome", + has_tab: false, + wizard_only: true, + headers: { + welcome: { + title: "Welcome to the new CouchPotato", + description: "To get started, fill in each of the following settings as much as you can.", + content: new Element("div", { + styles: { + margin: "0 0 0 30px" + } + }) + }, + general: { + title: "General", + description: "If you want to access CP from outside your local network, you better secure it a bit with a username & password." + }, + downloaders: { + title: "What download apps are you using?", + description: "CP needs an external download app to work with. Choose one below. For more downloaders check settings after you have filled in the wizard. If your download app isn't in the list, use the default Blackhole." + }, + searcher: { + label: "Providers", + title: "Are you registered at any of these sites?", + description: "CP uses these sites to search for movies. A few free are enabled by default, but it's always better to have more." + }, + renamer: { + title: "Move & rename the movies after downloading?", + description: "The coolest part of CP is that it can move and organize your downloaded movies automagically. Check settings and you can even download trailers, subtitles and other data when it has finished downloading. It's awesome!" + }, + automation: { + title: "Easily add movies to your wanted list!", + description: "You can easily add movies from your favorite movie site, like IMDB, Rotten Tomatoes, Apple Trailers and more. Just install the extension or drag the bookmarklet to your bookmarks." + "<br />Once installed, just click the bookmarklet on a movie page and watch the magic happen ;)", + content: function() { + return App.createUserscriptButtons(); + } + }, + finish: { + title: "Finishing Up", + description: "Are you done? Did you fill in everything as much as possible?" + "<br />Be sure to check the settings to see what more CP can do!<br /><br />" + '<div class="wizard_support">After you\'ve used CP for a while, and you like it (which of course you will), consider supporting CP. Maybe even by writing some code. <br />Or by getting a subscription at <a href="https://usenetserver.com/partners/?a_aid=couchpotato&a_bid=3f357c6f" target="_blank">Usenet Server</a> or <a href="https://www.newshosting.com/partners/?a_aid=couchpotato&a_bid=a0b022df" target="_blank">Newshosting</a>.</div>', + content: new Element("div").grab(new Element("a.button.green", { + styles: { + "margin-top": 20 + }, + text: "I'm ready to start the awesomeness!", + events: { + click: function(e) { + e.preventDefault(); + Api.request("settings.save", { + data: { + section: "core", + name: "show_wizard", + value: 0 + }, + useSpinner: true, + spinnerOptions: { + target: self.el + }, + onComplete: function() { + window.location = App.createUrl("wanted"); + } + }); + } + } + })) + } + }, + groups: [ "welcome", "general", "downloaders", "searcher", "renamer", "automation", "finish" ], + open: function(action, params) { + var self = this; + if (!self.initialized) { + App.fireEvent("unload"); + App.getBlock("header").hide(); + self.parent(action, params); + self.el.addClass("settings"); + self.addEvent("create", function() { + self.orderGroups(); + }); + self.initialized = true; + self.scroll = new Fx.Scroll(document.body, { + transition: "quint:in:out" + }); + } else requestTimeout(function() { + var sc = self.el.getElement(".wgroup_" + action); + self.scroll.start(0, sc.getCoordinates().top - 80); + }, 1); + }, + orderGroups: function() { + var self = this; + var form = self.el.getElement(".uniForm"); + var tabs = self.el.getElement(".tabs").hide(); + self.groups.each(function(group) { + var group_container; + if (self.headers[group]) { + group_container = new Element(".wgroup_" + group); + if (self.headers[group].include) { + self.headers[group].include.each(function(inc) { + group_container.addClass("wgroup_" + inc); + }); + } + var content = self.headers[group].content; + group_container.adopt(new Element("h1", { + text: self.headers[group].title + }), self.headers[group].description ? new Element("span.description", { + html: self.headers[group].description + }) : null, content ? typeOf(content) == "function" ? content() : content : null).inject(form); + } + var tab_navigation = tabs.getElement(".t_" + group); + if (!tab_navigation && self.headers[group] && self.headers[group].include) { + tab_navigation = []; + self.headers[group].include.each(function(inc) { + tab_navigation.include(tabs.getElement(".t_" + inc)); + }); + } + if (tab_navigation && group_container) { + tabs.adopt(tab_navigation); + if (self.headers[group] && self.headers[group].include) { + self.headers[group].include.each(function(inc) { + self.el.getElement(".tab_" + inc).inject(group_container); + }); + new Element("li.t_" + group).grab(new Element("a", { + href: App.createUrl("wizard/" + group), + text: (self.headers[group].label || group).capitalize() + })).inject(tabs); + } else self.el.getElement(".tab_" + group).inject(group_container); + if (tab_navigation.getElement && self.headers[group]) { + var a = tab_navigation.getElement("a"); + a.set("text", (self.headers[group].label || group).capitalize()); + var url_split = a.get("href").split("wizard")[1].split("/"); + if (url_split.length > 3) a.set("href", a.get("href").replace(url_split[url_split.length - 3] + "/", "")); + } + } else { + new Element("li.t_" + group).grab(new Element("a", { + href: App.createUrl("wizard/" + group), + text: (self.headers[group].label || group).capitalize() + })).inject(tabs); + } + if (self.headers[group] && self.headers[group].event) self.headers[group].event.call(); + }); + self.el.getElement(".advanced_toggle").destroy(); + self.el.getElement(".section_nzb").hide(); + } +}); diff --git a/couchpotato/static/scripts/combined.vendor.min.js b/couchpotato/static/scripts/combined.vendor.min.js new file mode 100644 index 0000000000..cbc329cda0 --- /dev/null +++ b/couchpotato/static/scripts/combined.vendor.min.js @@ -0,0 +1,9070 @@ +(function() { + this.MooTools = { + version: "1.5.1", + build: "0542c135fdeb7feed7d9917e01447a408f22c876" + }; + var typeOf = this.typeOf = function(item) { + if (item == null) return "null"; + if (item.$family != null) return item.$family(); + if (item.nodeName) { + if (item.nodeType == 1) return "element"; + if (item.nodeType == 3) return /\S/.test(item.nodeValue) ? "textnode" : "whitespace"; + } else if (typeof item.length == "number") { + if ("callee" in item) return "arguments"; + if ("item" in item) return "collection"; + } + return typeof item; + }; + var instanceOf = this.instanceOf = function(item, object) { + if (item == null) return false; + var constructor = item.$constructor || item.constructor; + while (constructor) { + if (constructor === object) return true; + constructor = constructor.parent; + } + if (!item.hasOwnProperty) return false; + return item instanceof object; + }; + var Function = this.Function; + var enumerables = true; + for (var i in { + toString: 1 + }) enumerables = null; + if (enumerables) enumerables = [ "hasOwnProperty", "valueOf", "isPrototypeOf", "propertyIsEnumerable", "toLocaleString", "toString", "constructor" ]; + Function.prototype.overloadSetter = function(usePlural) { + var self = this; + return function(a, b) { + if (a == null) return this; + if (usePlural || typeof a != "string") { + for (var k in a) self.call(this, k, a[k]); + if (enumerables) for (var i = enumerables.length; i--; ) { + k = enumerables[i]; + if (a.hasOwnProperty(k)) self.call(this, k, a[k]); + } + } else { + self.call(this, a, b); + } + return this; + }; + }; + Function.prototype.overloadGetter = function(usePlural) { + var self = this; + return function(a) { + var args, result; + if (typeof a != "string") args = a; else if (arguments.length > 1) args = arguments; else if (usePlural) args = [ a ]; + if (args) { + result = {}; + for (var i = 0; i < args.length; i++) result[args[i]] = self.call(this, args[i]); + } else { + result = self.call(this, a); + } + return result; + }; + }; + Function.prototype.extend = function(key, value) { + this[key] = value; + }.overloadSetter(); + Function.prototype.implement = function(key, value) { + this.prototype[key] = value; + }.overloadSetter(); + var slice = Array.prototype.slice; + Function.from = function(item) { + return typeOf(item) == "function" ? item : function() { + return item; + }; + }; + Array.from = function(item) { + if (item == null) return []; + return Type.isEnumerable(item) && typeof item != "string" ? typeOf(item) == "array" ? item : slice.call(item) : [ item ]; + }; + Number.from = function(item) { + var number = parseFloat(item); + return isFinite(number) ? number : null; + }; + String.from = function(item) { + return item + ""; + }; + Function.implement({ + hide: function() { + this.$hidden = true; + return this; + }, + protect: function() { + this.$protected = true; + return this; + } + }); + var Type = this.Type = function(name, object) { + if (name) { + var lower = name.toLowerCase(); + var typeCheck = function(item) { + return typeOf(item) == lower; + }; + Type["is" + name] = typeCheck; + if (object != null) { + object.prototype.$family = function() { + return lower; + }.hide(); + } + } + if (object == null) return null; + object.extend(this); + object.$constructor = Type; + object.prototype.$constructor = object; + return object; + }; + var toString = Object.prototype.toString; + Type.isEnumerable = function(item) { + return item != null && typeof item.length == "number" && toString.call(item) != "[object Function]"; + }; + var hooks = {}; + var hooksOf = function(object) { + var type = typeOf(object.prototype); + return hooks[type] || (hooks[type] = []); + }; + var implement = function(name, method) { + if (method && method.$hidden) return; + var hooks = hooksOf(this); + for (var i = 0; i < hooks.length; i++) { + var hook = hooks[i]; + if (typeOf(hook) == "type") implement.call(hook, name, method); else hook.call(this, name, method); + } + var previous = this.prototype[name]; + if (previous == null || !previous.$protected) this.prototype[name] = method; + if (this[name] == null && typeOf(method) == "function") extend.call(this, name, function(item) { + return method.apply(item, slice.call(arguments, 1)); + }); + }; + var extend = function(name, method) { + if (method && method.$hidden) return; + var previous = this[name]; + if (previous == null || !previous.$protected) this[name] = method; + }; + Type.implement({ + implement: implement.overloadSetter(), + extend: extend.overloadSetter(), + alias: function(name, existing) { + implement.call(this, name, this.prototype[existing]); + }.overloadSetter(), + mirror: function(hook) { + hooksOf(this).push(hook); + return this; + } + }); + new Type("Type", Type); + var force = function(name, object, methods) { + var isType = object != Object, prototype = object.prototype; + if (isType) object = new Type(name, object); + for (var i = 0, l = methods.length; i < l; i++) { + var key = methods[i], generic = object[key], proto = prototype[key]; + if (generic) generic.protect(); + if (isType && proto) object.implement(key, proto.protect()); + } + if (isType) { + var methodsEnumerable = prototype.propertyIsEnumerable(methods[0]); + object.forEachMethod = function(fn) { + if (!methodsEnumerable) for (var i = 0, l = methods.length; i < l; i++) { + fn.call(prototype, prototype[methods[i]], methods[i]); + } + for (var key in prototype) fn.call(prototype, prototype[key], key); + }; + } + return force; + }; + force("String", String, [ "charAt", "charCodeAt", "concat", "contains", "indexOf", "lastIndexOf", "match", "quote", "replace", "search", "slice", "split", "substr", "substring", "trim", "toLowerCase", "toUpperCase" ])("Array", Array, [ "pop", "push", "reverse", "shift", "sort", "splice", "unshift", "concat", "join", "slice", "indexOf", "lastIndexOf", "filter", "forEach", "every", "map", "some", "reduce", "reduceRight" ])("Number", Number, [ "toExponential", "toFixed", "toLocaleString", "toPrecision" ])("Function", Function, [ "apply", "call", "bind" ])("RegExp", RegExp, [ "exec", "test" ])("Object", Object, [ "create", "defineProperty", "defineProperties", "keys", "getPrototypeOf", "getOwnPropertyDescriptor", "getOwnPropertyNames", "preventExtensions", "isExtensible", "seal", "isSealed", "freeze", "isFrozen" ])("Date", Date, [ "now" ]); + Object.extend = extend.overloadSetter(); + Date.extend("now", function() { + return +new Date(); + }); + new Type("Boolean", Boolean); + Number.prototype.$family = function() { + return isFinite(this) ? "number" : "null"; + }.hide(); + Number.extend("random", function(min, max) { + return Math.floor(Math.random() * (max - min + 1) + min); + }); + var hasOwnProperty = Object.prototype.hasOwnProperty; + Object.extend("forEach", function(object, fn, bind) { + for (var key in object) { + if (hasOwnProperty.call(object, key)) fn.call(bind, object[key], key, object); + } + }); + Object.each = Object.forEach; + Array.implement({ + forEach: function(fn, bind) { + for (var i = 0, l = this.length; i < l; i++) { + if (i in this) fn.call(bind, this[i], i, this); + } + }, + each: function(fn, bind) { + Array.forEach(this, fn, bind); + return this; + } + }); + var cloneOf = function(item) { + switch (typeOf(item)) { + case "array": + return item.clone(); + + case "object": + return Object.clone(item); + + default: + return item; + } + }; + Array.implement("clone", function() { + var i = this.length, clone = new Array(i); + while (i--) clone[i] = cloneOf(this[i]); + return clone; + }); + var mergeOne = function(source, key, current) { + switch (typeOf(current)) { + case "object": + if (typeOf(source[key]) == "object") Object.merge(source[key], current); else source[key] = Object.clone(current); + break; + + case "array": + source[key] = current.clone(); + break; + + default: + source[key] = current; + } + return source; + }; + Object.extend({ + merge: function(source, k, v) { + if (typeOf(k) == "string") return mergeOne(source, k, v); + for (var i = 1, l = arguments.length; i < l; i++) { + var object = arguments[i]; + for (var key in object) mergeOne(source, key, object[key]); + } + return source; + }, + clone: function(object) { + var clone = {}; + for (var key in object) clone[key] = cloneOf(object[key]); + return clone; + }, + append: function(original) { + for (var i = 1, l = arguments.length; i < l; i++) { + var extended = arguments[i] || {}; + for (var key in extended) original[key] = extended[key]; + } + return original; + } + }); + [ "Object", "WhiteSpace", "TextNode", "Collection", "Arguments" ].each(function(name) { + new Type(name); + }); + var UID = Date.now(); + String.extend("uniqueID", function() { + return (UID++).toString(36); + }); +})(); + +Array.implement({ + every: function(fn, bind) { + for (var i = 0, l = this.length >>> 0; i < l; i++) { + if (i in this && !fn.call(bind, this[i], i, this)) return false; + } + return true; + }, + filter: function(fn, bind) { + var results = []; + for (var value, i = 0, l = this.length >>> 0; i < l; i++) if (i in this) { + value = this[i]; + if (fn.call(bind, value, i, this)) results.push(value); + } + return results; + }, + indexOf: function(item, from) { + var length = this.length >>> 0; + for (var i = from < 0 ? Math.max(0, length + from) : from || 0; i < length; i++) { + if (this[i] === item) return i; + } + return -1; + }, + map: function(fn, bind) { + var length = this.length >>> 0, results = Array(length); + for (var i = 0; i < length; i++) { + if (i in this) results[i] = fn.call(bind, this[i], i, this); + } + return results; + }, + some: function(fn, bind) { + for (var i = 0, l = this.length >>> 0; i < l; i++) { + if (i in this && fn.call(bind, this[i], i, this)) return true; + } + return false; + }, + clean: function() { + return this.filter(function(item) { + return item != null; + }); + }, + invoke: function(methodName) { + var args = Array.slice(arguments, 1); + return this.map(function(item) { + return item[methodName].apply(item, args); + }); + }, + associate: function(keys) { + var obj = {}, length = Math.min(this.length, keys.length); + for (var i = 0; i < length; i++) obj[keys[i]] = this[i]; + return obj; + }, + link: function(object) { + var result = {}; + for (var i = 0, l = this.length; i < l; i++) { + for (var key in object) { + if (object[key](this[i])) { + result[key] = this[i]; + delete object[key]; + break; + } + } + } + return result; + }, + contains: function(item, from) { + return this.indexOf(item, from) != -1; + }, + append: function(array) { + this.push.apply(this, array); + return this; + }, + getLast: function() { + return this.length ? this[this.length - 1] : null; + }, + getRandom: function() { + return this.length ? this[Number.random(0, this.length - 1)] : null; + }, + include: function(item) { + if (!this.contains(item)) this.push(item); + return this; + }, + combine: function(array) { + for (var i = 0, l = array.length; i < l; i++) this.include(array[i]); + return this; + }, + erase: function(item) { + for (var i = this.length; i--; ) { + if (this[i] === item) this.splice(i, 1); + } + return this; + }, + empty: function() { + this.length = 0; + return this; + }, + flatten: function() { + var array = []; + for (var i = 0, l = this.length; i < l; i++) { + var type = typeOf(this[i]); + if (type == "null") continue; + array = array.concat(type == "array" || type == "collection" || type == "arguments" || instanceOf(this[i], Array) ? Array.flatten(this[i]) : this[i]); + } + return array; + }, + pick: function() { + for (var i = 0, l = this.length; i < l; i++) { + if (this[i] != null) return this[i]; + } + return null; + }, + hexToRgb: function(array) { + if (this.length != 3) return null; + var rgb = this.map(function(value) { + if (value.length == 1) value += value; + return parseInt(value, 16); + }); + return array ? rgb : "rgb(" + rgb + ")"; + }, + rgbToHex: function(array) { + if (this.length < 3) return null; + if (this.length == 4 && this[3] == 0 && !array) return "transparent"; + var hex = []; + for (var i = 0; i < 3; i++) { + var bit = (this[i] - 0).toString(16); + hex.push(bit.length == 1 ? "0" + bit : bit); + } + return array ? hex : "#" + hex.join(""); + } +}); + +String.implement({ + contains: function(string, index) { + return (index ? String(this).slice(index) : String(this)).indexOf(string) > -1; + }, + test: function(regex, params) { + return (typeOf(regex) == "regexp" ? regex : new RegExp("" + regex, params)).test(this); + }, + trim: function() { + return String(this).replace(/^\s+|\s+$/g, ""); + }, + clean: function() { + return String(this).replace(/\s+/g, " ").trim(); + }, + camelCase: function() { + return String(this).replace(/-\D/g, function(match) { + return match.charAt(1).toUpperCase(); + }); + }, + hyphenate: function() { + return String(this).replace(/[A-Z]/g, function(match) { + return "-" + match.charAt(0).toLowerCase(); + }); + }, + capitalize: function() { + return String(this).replace(/\b[a-z]/g, function(match) { + return match.toUpperCase(); + }); + }, + escapeRegExp: function() { + return String(this).replace(/([-.*+?^${}()|[\]\/\\])/g, "\\$1"); + }, + toInt: function(base) { + return parseInt(this, base || 10); + }, + toFloat: function() { + return parseFloat(this); + }, + hexToRgb: function(array) { + var hex = String(this).match(/^#?(\w{1,2})(\w{1,2})(\w{1,2})$/); + return hex ? hex.slice(1).hexToRgb(array) : null; + }, + rgbToHex: function(array) { + var rgb = String(this).match(/\d{1,3}/g); + return rgb ? rgb.rgbToHex(array) : null; + }, + substitute: function(object, regexp) { + return String(this).replace(regexp || /\\?\{([^{}]+)\}/g, function(match, name) { + if (match.charAt(0) == "\\") return match.slice(1); + return object[name] != null ? object[name] : ""; + }); + } +}); + +Function.extend({ + attempt: function() { + for (var i = 0, l = arguments.length; i < l; i++) { + try { + return arguments[i](); + } catch (e) {} + } + return null; + } +}); + +Function.implement({ + attempt: function(args, bind) { + try { + return this.apply(bind, Array.from(args)); + } catch (e) {} + return null; + }, + bind: function(that) { + var self = this, args = arguments.length > 1 ? Array.slice(arguments, 1) : null, F = function() {}; + var bound = function() { + var context = that, length = arguments.length; + if (this instanceof bound) { + F.prototype = self.prototype; + context = new F(); + } + var result = !args && !length ? self.call(context) : self.apply(context, args && length ? args.concat(Array.slice(arguments)) : args || arguments); + return context == that ? result : context; + }; + return bound; + }, + pass: function(args, bind) { + var self = this; + if (args != null) args = Array.from(args); + return function() { + return self.apply(bind, args || arguments); + }; + }, + delay: function(delay, bind, args) { + return setTimeout(this.pass(args == null ? [] : args, bind), delay); + }, + periodical: function(periodical, bind, args) { + return setInterval(this.pass(args == null ? [] : args, bind), periodical); + } +}); + +Number.implement({ + limit: function(min, max) { + return Math.min(max, Math.max(min, this)); + }, + round: function(precision) { + precision = Math.pow(10, precision || 0).toFixed(precision < 0 ? -precision : 0); + return Math.round(this * precision) / precision; + }, + times: function(fn, bind) { + for (var i = 0; i < this; i++) fn.call(bind, i, this); + }, + toFloat: function() { + return parseFloat(this); + }, + toInt: function(base) { + return parseInt(this, base || 10); + } +}); + +Number.alias("each", "times"); + +(function(math) { + var methods = {}; + math.each(function(name) { + if (!Number[name]) methods[name] = function() { + return Math[name].apply(null, [ this ].concat(Array.from(arguments))); + }; + }); + Number.implement(methods); +})([ "abs", "acos", "asin", "atan", "atan2", "ceil", "cos", "exp", "floor", "log", "max", "min", "pow", "sin", "sqrt", "tan" ]); + +(function() { + var Class = this.Class = new Type("Class", function(params) { + if (instanceOf(params, Function)) params = { + initialize: params + }; + var newClass = function() { + reset(this); + if (newClass.$prototyping) return this; + this.$caller = null; + var value = this.initialize ? this.initialize.apply(this, arguments) : this; + this.$caller = this.caller = null; + return value; + }.extend(this).implement(params); + newClass.$constructor = Class; + newClass.prototype.$constructor = newClass; + newClass.prototype.parent = parent; + return newClass; + }); + var parent = function() { + if (!this.$caller) throw new Error('The method "parent" cannot be called.'); + var name = this.$caller.$name, parent = this.$caller.$owner.parent, previous = parent ? parent.prototype[name] : null; + if (!previous) throw new Error('The method "' + name + '" has no parent.'); + return previous.apply(this, arguments); + }; + var reset = function(object) { + for (var key in object) { + var value = object[key]; + switch (typeOf(value)) { + case "object": + var F = function() {}; + F.prototype = value; + object[key] = reset(new F()); + break; + + case "array": + object[key] = value.clone(); + break; + } + } + return object; + }; + var wrap = function(self, key, method) { + if (method.$origin) method = method.$origin; + var wrapper = function() { + if (method.$protected && this.$caller == null) throw new Error('The method "' + key + '" cannot be called.'); + var caller = this.caller, current = this.$caller; + this.caller = current; + this.$caller = wrapper; + var result = method.apply(this, arguments); + this.$caller = current; + this.caller = caller; + return result; + }.extend({ + $owner: self, + $origin: method, + $name: key + }); + return wrapper; + }; + var implement = function(key, value, retain) { + if (Class.Mutators.hasOwnProperty(key)) { + value = Class.Mutators[key].call(this, value); + if (value == null) return this; + } + if (typeOf(value) == "function") { + if (value.$hidden) return this; + this.prototype[key] = retain ? value : wrap(this, key, value); + } else { + Object.merge(this.prototype, key, value); + } + return this; + }; + var getInstance = function(klass) { + klass.$prototyping = true; + var proto = new klass(); + delete klass.$prototyping; + return proto; + }; + Class.implement("implement", implement.overloadSetter()); + Class.Mutators = { + Extends: function(parent) { + this.parent = parent; + this.prototype = getInstance(parent); + }, + Implements: function(items) { + Array.from(items).each(function(item) { + var instance = new item(); + for (var key in instance) implement.call(this, key, instance[key], true); + }, this); + } + }; +})(); + +(function() { + this.Chain = new Class({ + $chain: [], + chain: function() { + this.$chain.append(Array.flatten(arguments)); + return this; + }, + callChain: function() { + return this.$chain.length ? this.$chain.shift().apply(this, arguments) : false; + }, + clearChain: function() { + this.$chain.empty(); + return this; + } + }); + var removeOn = function(string) { + return string.replace(/^on([A-Z])/, function(full, first) { + return first.toLowerCase(); + }); + }; + this.Events = new Class({ + $events: {}, + addEvent: function(type, fn, internal) { + type = removeOn(type); + this.$events[type] = (this.$events[type] || []).include(fn); + if (internal) fn.internal = true; + return this; + }, + addEvents: function(events) { + for (var type in events) this.addEvent(type, events[type]); + return this; + }, + fireEvent: function(type, args, delay) { + type = removeOn(type); + var events = this.$events[type]; + if (!events) return this; + args = Array.from(args); + events.each(function(fn) { + if (delay) fn.delay(delay, this, args); else fn.apply(this, args); + }, this); + return this; + }, + removeEvent: function(type, fn) { + type = removeOn(type); + var events = this.$events[type]; + if (events && !fn.internal) { + var index = events.indexOf(fn); + if (index != -1) delete events[index]; + } + return this; + }, + removeEvents: function(events) { + var type; + if (typeOf(events) == "object") { + for (type in events) this.removeEvent(type, events[type]); + return this; + } + if (events) events = removeOn(events); + for (type in this.$events) { + if (events && events != type) continue; + var fns = this.$events[type]; + for (var i = fns.length; i--; ) if (i in fns) { + this.removeEvent(type, fns[i]); + } + } + return this; + } + }); + this.Options = new Class({ + setOptions: function() { + var options = this.options = Object.merge.apply(null, [ {}, this.options ].append(arguments)); + if (this.addEvent) for (var option in options) { + if (typeOf(options[option]) != "function" || !/^on[A-Z]/.test(option)) continue; + this.addEvent(option, options[option]); + delete options[option]; + } + return this; + } + }); +})(); + +(function() { + var document = this.document; + var window = document.window = this; + var parse = function(ua, platform) { + ua = ua.toLowerCase(); + platform = platform ? platform.toLowerCase() : ""; + var UA = ua.match(/(opera|ie|firefox|chrome|trident|crios|version)[\s\/:]([\w\d\.]+)?.*?(safari|(?:rv[\s\/:]|version[\s\/:])([\w\d\.]+)|$)/) || [ null, "unknown", 0 ]; + if (UA[1] == "trident") { + UA[1] = "ie"; + if (UA[4]) UA[2] = UA[4]; + } else if (UA[1] == "crios") { + UA[1] = "chrome"; + } + platform = ua.match(/ip(?:ad|od|hone)/) ? "ios" : (ua.match(/(?:webos|android)/) || platform.match(/mac|win|linux/) || [ "other" ])[0]; + if (platform == "win") platform = "windows"; + return { + extend: Function.prototype.extend, + name: UA[1] == "version" ? UA[3] : UA[1], + version: parseFloat(UA[1] == "opera" && UA[4] ? UA[4] : UA[2]), + platform: platform + }; + }; + var Browser = this.Browser = parse(navigator.userAgent, navigator.platform); + if (Browser.name == "ie") { + Browser.version = document.documentMode; + } + Browser.extend({ + Features: { + xpath: !!document.evaluate, + air: !!window.runtime, + query: !!document.querySelector, + json: !!window.JSON + }, + parseUA: parse + }); + Browser.Request = function() { + var XMLHTTP = function() { + return new XMLHttpRequest(); + }; + var MSXML2 = function() { + return new ActiveXObject("MSXML2.XMLHTTP"); + }; + var MSXML = function() { + return new ActiveXObject("Microsoft.XMLHTTP"); + }; + return Function.attempt(function() { + XMLHTTP(); + return XMLHTTP; + }, function() { + MSXML2(); + return MSXML2; + }, function() { + MSXML(); + return MSXML; + }); + }(); + Browser.Features.xhr = !!Browser.Request; + Browser.exec = function(text) { + if (!text) return text; + if (window.execScript) { + window.execScript(text); + } else { + var script = document.createElement("script"); + script.setAttribute("type", "text/javascript"); + script.text = text; + document.head.appendChild(script); + document.head.removeChild(script); + } + return text; + }; + String.implement("stripScripts", function(exec) { + var scripts = ""; + var text = this.replace(/<script[^>]*>([\s\S]*?)<\/script>/gi, function(all, code) { + scripts += code + "\n"; + return ""; + }); + if (exec === true) Browser.exec(scripts); else if (typeOf(exec) == "function") exec(scripts, text); + return text; + }); + Browser.extend({ + Document: this.Document, + Window: this.Window, + Element: this.Element, + Event: this.Event + }); + this.Window = this.$constructor = new Type("Window", function() {}); + this.$family = Function.from("window").hide(); + Window.mirror(function(name, method) { + window[name] = method; + }); + this.Document = document.$constructor = new Type("Document", function() {}); + document.$family = Function.from("document").hide(); + Document.mirror(function(name, method) { + document[name] = method; + }); + document.html = document.documentElement; + if (!document.head) document.head = document.getElementsByTagName("head")[0]; + if (document.execCommand) try { + document.execCommand("BackgroundImageCache", false, true); + } catch (e) {} + if (this.attachEvent && !this.addEventListener) { + var unloadEvent = function() { + this.detachEvent("onunload", unloadEvent); + document.head = document.html = document.window = null; + window = this.Window = document = null; + }; + this.attachEvent("onunload", unloadEvent); + } + var arrayFrom = Array.from; + try { + arrayFrom(document.html.childNodes); + } catch (e) { + Array.from = function(item) { + if (typeof item != "string" && Type.isEnumerable(item) && typeOf(item) != "array") { + var i = item.length, array = new Array(i); + while (i--) array[i] = item[i]; + return array; + } + return arrayFrom(item); + }; + var prototype = Array.prototype, slice = prototype.slice; + [ "pop", "push", "reverse", "shift", "sort", "splice", "unshift", "concat", "join", "slice" ].each(function(name) { + var method = prototype[name]; + Array[name] = function(item) { + return method.apply(Array.from(item), slice.call(arguments, 1)); + }; + }); + } +})(); + +(function() { + var hasOwnProperty = Object.prototype.hasOwnProperty; + Object.extend({ + subset: function(object, keys) { + var results = {}; + for (var i = 0, l = keys.length; i < l; i++) { + var k = keys[i]; + if (k in object) results[k] = object[k]; + } + return results; + }, + map: function(object, fn, bind) { + var results = {}; + for (var key in object) { + if (hasOwnProperty.call(object, key)) results[key] = fn.call(bind, object[key], key, object); + } + return results; + }, + filter: function(object, fn, bind) { + var results = {}; + for (var key in object) { + var value = object[key]; + if (hasOwnProperty.call(object, key) && fn.call(bind, value, key, object)) results[key] = value; + } + return results; + }, + every: function(object, fn, bind) { + for (var key in object) { + if (hasOwnProperty.call(object, key) && !fn.call(bind, object[key], key)) return false; + } + return true; + }, + some: function(object, fn, bind) { + for (var key in object) { + if (hasOwnProperty.call(object, key) && fn.call(bind, object[key], key)) return true; + } + return false; + }, + keys: function(object) { + var keys = []; + for (var key in object) { + if (hasOwnProperty.call(object, key)) keys.push(key); + } + return keys; + }, + values: function(object) { + var values = []; + for (var key in object) { + if (hasOwnProperty.call(object, key)) values.push(object[key]); + } + return values; + }, + getLength: function(object) { + return Object.keys(object).length; + }, + keyOf: function(object, value) { + for (var key in object) { + if (hasOwnProperty.call(object, key) && object[key] === value) return key; + } + return null; + }, + contains: function(object, value) { + return Object.keyOf(object, value) != null; + }, + toQueryString: function(object, base) { + var queryString = []; + Object.each(object, function(value, key) { + if (base) key = base + "[" + key + "]"; + var result; + switch (typeOf(value)) { + case "object": + result = Object.toQueryString(value, key); + break; + + case "array": + var qs = {}; + value.each(function(val, i) { + qs[i] = val; + }); + result = Object.toQueryString(qs, key); + break; + + default: + result = key + "=" + encodeURIComponent(value); + } + if (value != null) queryString.push(result); + }); + return queryString.join("&"); + } + }); +})(); + +(function() { + var parsed, separatorIndex, combinatorIndex, reversed, cache = {}, reverseCache = {}, reUnescape = /\\/g; + var parse = function(expression, isReversed) { + if (expression == null) return null; + if (expression.Slick === true) return expression; + expression = ("" + expression).replace(/^\s+|\s+$/g, ""); + reversed = !!isReversed; + var currentCache = reversed ? reverseCache : cache; + if (currentCache[expression]) return currentCache[expression]; + parsed = { + Slick: true, + expressions: [], + raw: expression, + reverse: function() { + return parse(this.raw, true); + } + }; + separatorIndex = -1; + while (expression != (expression = expression.replace(regexp, parser))) ; + parsed.length = parsed.expressions.length; + return currentCache[parsed.raw] = reversed ? reverse(parsed) : parsed; + }; + var reverseCombinator = function(combinator) { + if (combinator === "!") return " "; else if (combinator === " ") return "!"; else if (/^!/.test(combinator)) return combinator.replace(/^!/, ""); else return "!" + combinator; + }; + var reverse = function(expression) { + var expressions = expression.expressions; + for (var i = 0; i < expressions.length; i++) { + var exp = expressions[i]; + var last = { + parts: [], + tag: "*", + combinator: reverseCombinator(exp[0].combinator) + }; + for (var j = 0; j < exp.length; j++) { + var cexp = exp[j]; + if (!cexp.reverseCombinator) cexp.reverseCombinator = " "; + cexp.combinator = cexp.reverseCombinator; + delete cexp.reverseCombinator; + } + exp.reverse().push(last); + } + return expression; + }; + var escapeRegExp = function(string) { + return string.replace(/[-[\]{}()*+?.\\^$|,#\s]/g, function(match) { + return "\\" + match; + }); + }; + var regexp = new RegExp("^(?:\\s*(,)\\s*|\\s*(<combinator>+)\\s*|(\\s+)|(<unicode>+|\\*)|\\#(<unicode>+)|\\.(<unicode>+)|\\[\\s*(<unicode1>+)(?:\\s*([*^$!~|]?=)(?:\\s*(?:([\"']?)(.*?)\\9)))?\\s*\\](?!\\])|(:+)(<unicode>+)(?:\\((?:(?:([\"'])([^\\13]*)\\13)|((?:\\([^)]+\\)|[^()]*)+))\\))?)".replace(/<combinator>/, "[" + escapeRegExp(">+~`!@$%^&={}\\;</") + "]").replace(/<unicode>/g, "(?:[\\w\\u00a1-\\uFFFF-]|\\\\[^\\s0-9a-f])").replace(/<unicode1>/g, "(?:[:\\w\\u00a1-\\uFFFF-]|\\\\[^\\s0-9a-f])")); + function parser(rawMatch, separator, combinator, combinatorChildren, tagName, id, className, attributeKey, attributeOperator, attributeQuote, attributeValue, pseudoMarker, pseudoClass, pseudoQuote, pseudoClassQuotedValue, pseudoClassValue) { + if (separator || separatorIndex === -1) { + parsed.expressions[++separatorIndex] = []; + combinatorIndex = -1; + if (separator) return ""; + } + if (combinator || combinatorChildren || combinatorIndex === -1) { + combinator = combinator || " "; + var currentSeparator = parsed.expressions[separatorIndex]; + if (reversed && currentSeparator[combinatorIndex]) currentSeparator[combinatorIndex].reverseCombinator = reverseCombinator(combinator); + currentSeparator[++combinatorIndex] = { + combinator: combinator, + tag: "*" + }; + } + var currentParsed = parsed.expressions[separatorIndex][combinatorIndex]; + if (tagName) { + currentParsed.tag = tagName.replace(reUnescape, ""); + } else if (id) { + currentParsed.id = id.replace(reUnescape, ""); + } else if (className) { + className = className.replace(reUnescape, ""); + if (!currentParsed.classList) currentParsed.classList = []; + if (!currentParsed.classes) currentParsed.classes = []; + currentParsed.classList.push(className); + currentParsed.classes.push({ + value: className, + regexp: new RegExp("(^|\\s)" + escapeRegExp(className) + "(\\s|$)") + }); + } else if (pseudoClass) { + pseudoClassValue = pseudoClassValue || pseudoClassQuotedValue; + pseudoClassValue = pseudoClassValue ? pseudoClassValue.replace(reUnescape, "") : null; + if (!currentParsed.pseudos) currentParsed.pseudos = []; + currentParsed.pseudos.push({ + key: pseudoClass.replace(reUnescape, ""), + value: pseudoClassValue, + type: pseudoMarker.length == 1 ? "class" : "element" + }); + } else if (attributeKey) { + attributeKey = attributeKey.replace(reUnescape, ""); + attributeValue = (attributeValue || "").replace(reUnescape, ""); + var test, regexp; + switch (attributeOperator) { + case "^=": + regexp = new RegExp("^" + escapeRegExp(attributeValue)); + break; + + case "$=": + regexp = new RegExp(escapeRegExp(attributeValue) + "$"); + break; + + case "~=": + regexp = new RegExp("(^|\\s)" + escapeRegExp(attributeValue) + "(\\s|$)"); + break; + + case "|=": + regexp = new RegExp("^" + escapeRegExp(attributeValue) + "(-|$)"); + break; + + case "=": + test = function(value) { + return attributeValue == value; + }; + break; + + case "*=": + test = function(value) { + return value && value.indexOf(attributeValue) > -1; + }; + break; + + case "!=": + test = function(value) { + return attributeValue != value; + }; + break; + + default: + test = function(value) { + return !!value; + }; + } + if (attributeValue == "" && /^[*$^]=$/.test(attributeOperator)) test = function() { + return false; + }; + if (!test) test = function(value) { + return value && regexp.test(value); + }; + if (!currentParsed.attributes) currentParsed.attributes = []; + currentParsed.attributes.push({ + key: attributeKey, + operator: attributeOperator, + value: attributeValue, + test: test + }); + } + return ""; + } + var Slick = this.Slick || {}; + Slick.parse = function(expression) { + return parse(expression); + }; + Slick.escapeRegExp = escapeRegExp; + if (!this.Slick) this.Slick = Slick; +}).apply(typeof exports != "undefined" ? exports : this); + +(function() { + var local = {}, featuresCache = {}, toString = Object.prototype.toString; + local.isNativeCode = function(fn) { + return /\{\s*\[native code\]\s*\}/.test("" + fn); + }; + local.isXML = function(document) { + return !!document.xmlVersion || !!document.xml || toString.call(document) == "[object XMLDocument]" || document.nodeType == 9 && document.documentElement.nodeName != "HTML"; + }; + local.setDocument = function(document) { + var nodeType = document.nodeType; + if (nodeType == 9) ; else if (nodeType) document = document.ownerDocument; else if (document.navigator) document = document.document; else return; + if (this.document === document) return; + this.document = document; + var root = document.documentElement, rootUid = this.getUIDXML(root), features = featuresCache[rootUid], feature; + if (features) { + for (feature in features) { + this[feature] = features[feature]; + } + return; + } + features = featuresCache[rootUid] = {}; + features.root = root; + features.isXMLDocument = this.isXML(document); + features.brokenStarGEBTN = features.starSelectsClosedQSA = features.idGetsName = features.brokenMixedCaseQSA = features.brokenGEBCN = features.brokenCheckedQSA = features.brokenEmptyAttributeQSA = features.isHTMLDocument = features.nativeMatchesSelector = false; + var starSelectsClosed, starSelectsComments, brokenSecondClassNameGEBCN, cachedGetElementsByClassName, brokenFormAttributeGetter; + var selected, id = "slick_uniqueid"; + var testNode = document.createElement("div"); + var testRoot = document.body || document.getElementsByTagName("body")[0] || root; + testRoot.appendChild(testNode); + try { + testNode.innerHTML = '<a id="' + id + '"></a>'; + features.isHTMLDocument = !!document.getElementById(id); + } catch (e) {} + if (features.isHTMLDocument) { + testNode.style.display = "none"; + testNode.appendChild(document.createComment("")); + starSelectsComments = testNode.getElementsByTagName("*").length > 1; + try { + testNode.innerHTML = "foo</foo>"; + selected = testNode.getElementsByTagName("*"); + starSelectsClosed = selected && !!selected.length && selected[0].nodeName.charAt(0) == "/"; + } catch (e) {} + features.brokenStarGEBTN = starSelectsComments || starSelectsClosed; + try { + testNode.innerHTML = '<a name="' + id + '"></a><b id="' + id + '"></b>'; + features.idGetsName = document.getElementById(id) === testNode.firstChild; + } catch (e) {} + if (testNode.getElementsByClassName) { + try { + testNode.innerHTML = '<a class="f"></a><a class="b"></a>'; + testNode.getElementsByClassName("b").length; + testNode.firstChild.className = "b"; + cachedGetElementsByClassName = testNode.getElementsByClassName("b").length != 2; + } catch (e) {} + try { + testNode.innerHTML = '<a class="a"></a><a class="f b a"></a>'; + brokenSecondClassNameGEBCN = testNode.getElementsByClassName("a").length != 2; + } catch (e) {} + features.brokenGEBCN = cachedGetElementsByClassName || brokenSecondClassNameGEBCN; + } + if (testNode.querySelectorAll) { + try { + testNode.innerHTML = "foo</foo>"; + selected = testNode.querySelectorAll("*"); + features.starSelectsClosedQSA = selected && !!selected.length && selected[0].nodeName.charAt(0) == "/"; + } catch (e) {} + try { + testNode.innerHTML = '<a class="MiX"></a>'; + features.brokenMixedCaseQSA = !testNode.querySelectorAll(".MiX").length; + } catch (e) {} + try { + testNode.innerHTML = '<select><option selected="selected">a</option></select>'; + features.brokenCheckedQSA = testNode.querySelectorAll(":checked").length == 0; + } catch (e) {} + try { + testNode.innerHTML = '<a class=""></a>'; + features.brokenEmptyAttributeQSA = testNode.querySelectorAll('[class*=""]').length != 0; + } catch (e) {} + } + try { + testNode.innerHTML = '<form action="s"><input id="action"/></form>'; + brokenFormAttributeGetter = testNode.firstChild.getAttribute("action") != "s"; + } catch (e) {} + features.nativeMatchesSelector = root.matches || root.mozMatchesSelector || root.webkitMatchesSelector; + if (features.nativeMatchesSelector) try { + features.nativeMatchesSelector.call(root, ":slick"); + features.nativeMatchesSelector = null; + } catch (e) {} + } + try { + root.slick_expando = 1; + delete root.slick_expando; + features.getUID = this.getUIDHTML; + } catch (e) { + features.getUID = this.getUIDXML; + } + testRoot.removeChild(testNode); + testNode = selected = testRoot = null; + features.getAttribute = features.isHTMLDocument && brokenFormAttributeGetter ? function(node, name) { + var method = this.attributeGetters[name]; + if (method) return method.call(node); + var attributeNode = node.getAttributeNode(name); + return attributeNode ? attributeNode.nodeValue : null; + } : function(node, name) { + var method = this.attributeGetters[name]; + return method ? method.call(node) : node.getAttribute(name); + }; + features.hasAttribute = root && this.isNativeCode(root.hasAttribute) ? function(node, attribute) { + return node.hasAttribute(attribute); + } : function(node, attribute) { + node = node.getAttributeNode(attribute); + return !!(node && (node.specified || node.nodeValue)); + }; + var nativeRootContains = root && this.isNativeCode(root.contains), nativeDocumentContains = document && this.isNativeCode(document.contains); + features.contains = nativeRootContains && nativeDocumentContains ? function(context, node) { + return context.contains(node); + } : nativeRootContains && !nativeDocumentContains ? function(context, node) { + return context === node || (context === document ? document.documentElement : context).contains(node); + } : root && root.compareDocumentPosition ? function(context, node) { + return context === node || !!(context.compareDocumentPosition(node) & 16); + } : function(context, node) { + if (node) do { + if (node === context) return true; + } while (node = node.parentNode); + return false; + }; + features.documentSorter = root.compareDocumentPosition ? function(a, b) { + if (!a.compareDocumentPosition || !b.compareDocumentPosition) return 0; + return a.compareDocumentPosition(b) & 4 ? -1 : a === b ? 0 : 1; + } : "sourceIndex" in root ? function(a, b) { + if (!a.sourceIndex || !b.sourceIndex) return 0; + return a.sourceIndex - b.sourceIndex; + } : document.createRange ? function(a, b) { + if (!a.ownerDocument || !b.ownerDocument) return 0; + var aRange = a.ownerDocument.createRange(), bRange = b.ownerDocument.createRange(); + aRange.setStart(a, 0); + aRange.setEnd(a, 0); + bRange.setStart(b, 0); + bRange.setEnd(b, 0); + return aRange.compareBoundaryPoints(Range.START_TO_END, bRange); + } : null; + root = null; + for (feature in features) { + this[feature] = features[feature]; + } + }; + var reSimpleSelector = /^([#.]?)((?:[\w-]+|\*))$/, reEmptyAttribute = /\[.+[*$^]=(?:""|'')?\]/, qsaFailExpCache = {}; + local.search = function(context, expression, append, first) { + var found = this.found = first ? null : append || []; + if (!context) return found; else if (context.navigator) context = context.document; else if (!context.nodeType) return found; + var parsed, i, uniques = this.uniques = {}, hasOthers = !!(append && append.length), contextIsDocument = context.nodeType == 9; + if (this.document !== (contextIsDocument ? context : context.ownerDocument)) this.setDocument(context); + if (hasOthers) for (i = found.length; i--; ) uniques[this.getUID(found[i])] = true; + if (typeof expression == "string") { + var simpleSelector = expression.match(reSimpleSelector); + simpleSelectors: if (simpleSelector) { + var symbol = simpleSelector[1], name = simpleSelector[2], node, nodes; + if (!symbol) { + if (name == "*" && this.brokenStarGEBTN) break simpleSelectors; + nodes = context.getElementsByTagName(name); + if (first) return nodes[0] || null; + for (i = 0; node = nodes[i++]; ) { + if (!(hasOthers && uniques[this.getUID(node)])) found.push(node); + } + } else if (symbol == "#") { + if (!this.isHTMLDocument || !contextIsDocument) break simpleSelectors; + node = context.getElementById(name); + if (!node) return found; + if (this.idGetsName && node.getAttributeNode("id").nodeValue != name) break simpleSelectors; + if (first) return node || null; + if (!(hasOthers && uniques[this.getUID(node)])) found.push(node); + } else if (symbol == ".") { + if (!this.isHTMLDocument || (!context.getElementsByClassName || this.brokenGEBCN) && context.querySelectorAll) break simpleSelectors; + if (context.getElementsByClassName && !this.brokenGEBCN) { + nodes = context.getElementsByClassName(name); + if (first) return nodes[0] || null; + for (i = 0; node = nodes[i++]; ) { + if (!(hasOthers && uniques[this.getUID(node)])) found.push(node); + } + } else { + var matchClass = new RegExp("(^|\\s)" + Slick.escapeRegExp(name) + "(\\s|$)"); + nodes = context.getElementsByTagName("*"); + for (i = 0; node = nodes[i++]; ) { + className = node.className; + if (!(className && matchClass.test(className))) continue; + if (first) return node; + if (!(hasOthers && uniques[this.getUID(node)])) found.push(node); + } + } + } + if (hasOthers) this.sort(found); + return first ? null : found; + } + querySelector: if (context.querySelectorAll) { + if (!this.isHTMLDocument || qsaFailExpCache[expression] || this.brokenMixedCaseQSA || this.brokenCheckedQSA && expression.indexOf(":checked") > -1 || this.brokenEmptyAttributeQSA && reEmptyAttribute.test(expression) || !contextIsDocument && expression.indexOf(",") > -1 || Slick.disableQSA) break querySelector; + var _expression = expression, _context = context; + if (!contextIsDocument) { + var currentId = _context.getAttribute("id"), slickid = "slickid__"; + _context.setAttribute("id", slickid); + _expression = "#" + slickid + " " + _expression; + context = _context.parentNode; + } + try { + if (first) return context.querySelector(_expression) || null; else nodes = context.querySelectorAll(_expression); + } catch (e) { + qsaFailExpCache[expression] = 1; + break querySelector; + } finally { + if (!contextIsDocument) { + if (currentId) _context.setAttribute("id", currentId); else _context.removeAttribute("id"); + context = _context; + } + } + if (this.starSelectsClosedQSA) for (i = 0; node = nodes[i++]; ) { + if (node.nodeName > "@" && !(hasOthers && uniques[this.getUID(node)])) found.push(node); + } else for (i = 0; node = nodes[i++]; ) { + if (!(hasOthers && uniques[this.getUID(node)])) found.push(node); + } + if (hasOthers) this.sort(found); + return found; + } + parsed = this.Slick.parse(expression); + if (!parsed.length) return found; + } else if (expression == null) { + return found; + } else if (expression.Slick) { + parsed = expression; + } else if (this.contains(context.documentElement || context, expression)) { + found ? found.push(expression) : found = expression; + return found; + } else { + return found; + } + this.posNTH = {}; + this.posNTHLast = {}; + this.posNTHType = {}; + this.posNTHTypeLast = {}; + this.push = !hasOthers && (first || parsed.length == 1 && parsed.expressions[0].length == 1) ? this.pushArray : this.pushUID; + if (found == null) found = []; + var j, m, n; + var combinator, tag, id, classList, classes, attributes, pseudos; + var currentItems, currentExpression, currentBit, lastBit, expressions = parsed.expressions; + search: for (i = 0; currentExpression = expressions[i]; i++) for (j = 0; currentBit = currentExpression[j]; j++) { + combinator = "combinator:" + currentBit.combinator; + if (!this[combinator]) continue search; + tag = this.isXMLDocument ? currentBit.tag : currentBit.tag.toUpperCase(); + id = currentBit.id; + classList = currentBit.classList; + classes = currentBit.classes; + attributes = currentBit.attributes; + pseudos = currentBit.pseudos; + lastBit = j === currentExpression.length - 1; + this.bitUniques = {}; + if (lastBit) { + this.uniques = uniques; + this.found = found; + } else { + this.uniques = {}; + this.found = []; + } + if (j === 0) { + this[combinator](context, tag, id, classes, attributes, pseudos, classList); + if (first && lastBit && found.length) break search; + } else { + if (first && lastBit) for (m = 0, n = currentItems.length; m < n; m++) { + this[combinator](currentItems[m], tag, id, classes, attributes, pseudos, classList); + if (found.length) break search; + } else for (m = 0, n = currentItems.length; m < n; m++) this[combinator](currentItems[m], tag, id, classes, attributes, pseudos, classList); + } + currentItems = this.found; + } + if (hasOthers || parsed.expressions.length > 1) this.sort(found); + return first ? found[0] || null : found; + }; + local.uidx = 1; + local.uidk = "slick-uniqueid"; + local.getUIDXML = function(node) { + var uid = node.getAttribute(this.uidk); + if (!uid) { + uid = this.uidx++; + node.setAttribute(this.uidk, uid); + } + return uid; + }; + local.getUIDHTML = function(node) { + return node.uniqueNumber || (node.uniqueNumber = this.uidx++); + }; + local.sort = function(results) { + if (!this.documentSorter) return results; + results.sort(this.documentSorter); + return results; + }; + local.cacheNTH = {}; + local.matchNTH = /^([+-]?\d*)?([a-z]+)?([+-]\d+)?$/; + local.parseNTHArgument = function(argument) { + var parsed = argument.match(this.matchNTH); + if (!parsed) return false; + var special = parsed[2] || false; + var a = parsed[1] || 1; + if (a == "-") a = -1; + var b = +parsed[3] || 0; + parsed = special == "n" ? { + a: a, + b: b + } : special == "odd" ? { + a: 2, + b: 1 + } : special == "even" ? { + a: 2, + b: 0 + } : { + a: 0, + b: a + }; + return this.cacheNTH[argument] = parsed; + }; + local.createNTHPseudo = function(child, sibling, positions, ofType) { + return function(node, argument) { + var uid = this.getUID(node); + if (!this[positions][uid]) { + var parent = node.parentNode; + if (!parent) return false; + var el = parent[child], count = 1; + if (ofType) { + var nodeName = node.nodeName; + do { + if (el.nodeName != nodeName) continue; + this[positions][this.getUID(el)] = count++; + } while (el = el[sibling]); + } else { + do { + if (el.nodeType != 1) continue; + this[positions][this.getUID(el)] = count++; + } while (el = el[sibling]); + } + } + argument = argument || "n"; + var parsed = this.cacheNTH[argument] || this.parseNTHArgument(argument); + if (!parsed) return false; + var a = parsed.a, b = parsed.b, pos = this[positions][uid]; + if (a == 0) return b == pos; + if (a > 0) { + if (pos < b) return false; + } else { + if (b < pos) return false; + } + return (pos - b) % a == 0; + }; + }; + local.pushArray = function(node, tag, id, classes, attributes, pseudos) { + if (this.matchSelector(node, tag, id, classes, attributes, pseudos)) this.found.push(node); + }; + local.pushUID = function(node, tag, id, classes, attributes, pseudos) { + var uid = this.getUID(node); + if (!this.uniques[uid] && this.matchSelector(node, tag, id, classes, attributes, pseudos)) { + this.uniques[uid] = true; + this.found.push(node); + } + }; + local.matchNode = function(node, selector) { + if (this.isHTMLDocument && this.nativeMatchesSelector) { + try { + return this.nativeMatchesSelector.call(node, selector.replace(/\[([^=]+)=\s*([^'"\]]+?)\s*\]/g, '[$1="$2"]')); + } catch (matchError) {} + } + var parsed = this.Slick.parse(selector); + if (!parsed) return true; + var expressions = parsed.expressions, simpleExpCounter = 0, i, currentExpression; + for (i = 0; currentExpression = expressions[i]; i++) { + if (currentExpression.length == 1) { + var exp = currentExpression[0]; + if (this.matchSelector(node, this.isXMLDocument ? exp.tag : exp.tag.toUpperCase(), exp.id, exp.classes, exp.attributes, exp.pseudos)) return true; + simpleExpCounter++; + } + } + if (simpleExpCounter == parsed.length) return false; + var nodes = this.search(this.document, parsed), item; + for (i = 0; item = nodes[i++]; ) { + if (item === node) return true; + } + return false; + }; + local.matchPseudo = function(node, name, argument) { + var pseudoName = "pseudo:" + name; + if (this[pseudoName]) return this[pseudoName](node, argument); + var attribute = this.getAttribute(node, name); + return argument ? argument == attribute : !!attribute; + }; + local.matchSelector = function(node, tag, id, classes, attributes, pseudos) { + if (tag) { + var nodeName = this.isXMLDocument ? node.nodeName : node.nodeName.toUpperCase(); + if (tag == "*") { + if (nodeName < "@") return false; + } else { + if (nodeName != tag) return false; + } + } + if (id && node.getAttribute("id") != id) return false; + var i, part, cls; + if (classes) for (i = classes.length; i--; ) { + cls = this.getAttribute(node, "class"); + if (!(cls && classes[i].regexp.test(cls))) return false; + } + if (attributes) for (i = attributes.length; i--; ) { + part = attributes[i]; + if (part.operator ? !part.test(this.getAttribute(node, part.key)) : !this.hasAttribute(node, part.key)) return false; + } + if (pseudos) for (i = pseudos.length; i--; ) { + part = pseudos[i]; + if (!this.matchPseudo(node, part.key, part.value)) return false; + } + return true; + }; + var combinators = { + " ": function(node, tag, id, classes, attributes, pseudos, classList) { + var i, item, children; + if (this.isHTMLDocument) { + getById: if (id) { + item = this.document.getElementById(id); + if (!item && node.all || this.idGetsName && item && item.getAttributeNode("id").nodeValue != id) { + children = node.all[id]; + if (!children) return; + if (!children[0]) children = [ children ]; + for (i = 0; item = children[i++]; ) { + var idNode = item.getAttributeNode("id"); + if (idNode && idNode.nodeValue == id) { + this.push(item, tag, null, classes, attributes, pseudos); + break; + } + } + return; + } + if (!item) { + if (this.contains(this.root, node)) return; else break getById; + } else if (this.document !== node && !this.contains(node, item)) return; + this.push(item, tag, null, classes, attributes, pseudos); + return; + } + getByClass: if (classes && node.getElementsByClassName && !this.brokenGEBCN) { + children = node.getElementsByClassName(classList.join(" ")); + if (!(children && children.length)) break getByClass; + for (i = 0; item = children[i++]; ) this.push(item, tag, id, null, attributes, pseudos); + return; + } + } + getByTag: { + children = node.getElementsByTagName(tag); + if (!(children && children.length)) break getByTag; + if (!this.brokenStarGEBTN) tag = null; + for (i = 0; item = children[i++]; ) this.push(item, tag, id, classes, attributes, pseudos); + } + }, + ">": function(node, tag, id, classes, attributes, pseudos) { + if (node = node.firstChild) do { + if (node.nodeType == 1) this.push(node, tag, id, classes, attributes, pseudos); + } while (node = node.nextSibling); + }, + "+": function(node, tag, id, classes, attributes, pseudos) { + while (node = node.nextSibling) if (node.nodeType == 1) { + this.push(node, tag, id, classes, attributes, pseudos); + break; + } + }, + "^": function(node, tag, id, classes, attributes, pseudos) { + node = node.firstChild; + if (node) { + if (node.nodeType == 1) this.push(node, tag, id, classes, attributes, pseudos); else this["combinator:+"](node, tag, id, classes, attributes, pseudos); + } + }, + "~": function(node, tag, id, classes, attributes, pseudos) { + while (node = node.nextSibling) { + if (node.nodeType != 1) continue; + var uid = this.getUID(node); + if (this.bitUniques[uid]) break; + this.bitUniques[uid] = true; + this.push(node, tag, id, classes, attributes, pseudos); + } + }, + "++": function(node, tag, id, classes, attributes, pseudos) { + this["combinator:+"](node, tag, id, classes, attributes, pseudos); + this["combinator:!+"](node, tag, id, classes, attributes, pseudos); + }, + "~~": function(node, tag, id, classes, attributes, pseudos) { + this["combinator:~"](node, tag, id, classes, attributes, pseudos); + this["combinator:!~"](node, tag, id, classes, attributes, pseudos); + }, + "!": function(node, tag, id, classes, attributes, pseudos) { + while (node = node.parentNode) if (node !== this.document) this.push(node, tag, id, classes, attributes, pseudos); + }, + "!>": function(node, tag, id, classes, attributes, pseudos) { + node = node.parentNode; + if (node !== this.document) this.push(node, tag, id, classes, attributes, pseudos); + }, + "!+": function(node, tag, id, classes, attributes, pseudos) { + while (node = node.previousSibling) if (node.nodeType == 1) { + this.push(node, tag, id, classes, attributes, pseudos); + break; + } + }, + "!^": function(node, tag, id, classes, attributes, pseudos) { + node = node.lastChild; + if (node) { + if (node.nodeType == 1) this.push(node, tag, id, classes, attributes, pseudos); else this["combinator:!+"](node, tag, id, classes, attributes, pseudos); + } + }, + "!~": function(node, tag, id, classes, attributes, pseudos) { + while (node = node.previousSibling) { + if (node.nodeType != 1) continue; + var uid = this.getUID(node); + if (this.bitUniques[uid]) break; + this.bitUniques[uid] = true; + this.push(node, tag, id, classes, attributes, pseudos); + } + } + }; + for (var c in combinators) local["combinator:" + c] = combinators[c]; + var pseudos = { + empty: function(node) { + var child = node.firstChild; + return !(child && child.nodeType == 1) && !(node.innerText || node.textContent || "").length; + }, + not: function(node, expression) { + return !this.matchNode(node, expression); + }, + contains: function(node, text) { + return (node.innerText || node.textContent || "").indexOf(text) > -1; + }, + "first-child": function(node) { + while (node = node.previousSibling) if (node.nodeType == 1) return false; + return true; + }, + "last-child": function(node) { + while (node = node.nextSibling) if (node.nodeType == 1) return false; + return true; + }, + "only-child": function(node) { + var prev = node; + while (prev = prev.previousSibling) if (prev.nodeType == 1) return false; + var next = node; + while (next = next.nextSibling) if (next.nodeType == 1) return false; + return true; + }, + "nth-child": local.createNTHPseudo("firstChild", "nextSibling", "posNTH"), + "nth-last-child": local.createNTHPseudo("lastChild", "previousSibling", "posNTHLast"), + "nth-of-type": local.createNTHPseudo("firstChild", "nextSibling", "posNTHType", true), + "nth-last-of-type": local.createNTHPseudo("lastChild", "previousSibling", "posNTHTypeLast", true), + index: function(node, index) { + return this["pseudo:nth-child"](node, "" + (index + 1)); + }, + even: function(node) { + return this["pseudo:nth-child"](node, "2n"); + }, + odd: function(node) { + return this["pseudo:nth-child"](node, "2n+1"); + }, + "first-of-type": function(node) { + var nodeName = node.nodeName; + while (node = node.previousSibling) if (node.nodeName == nodeName) return false; + return true; + }, + "last-of-type": function(node) { + var nodeName = node.nodeName; + while (node = node.nextSibling) if (node.nodeName == nodeName) return false; + return true; + }, + "only-of-type": function(node) { + var prev = node, nodeName = node.nodeName; + while (prev = prev.previousSibling) if (prev.nodeName == nodeName) return false; + var next = node; + while (next = next.nextSibling) if (next.nodeName == nodeName) return false; + return true; + }, + enabled: function(node) { + return !node.disabled; + }, + disabled: function(node) { + return node.disabled; + }, + checked: function(node) { + return node.checked || node.selected; + }, + focus: function(node) { + return this.isHTMLDocument && this.document.activeElement === node && (node.href || node.type || this.hasAttribute(node, "tabindex")); + }, + root: function(node) { + return node === this.root; + }, + selected: function(node) { + return node.selected; + } + }; + for (var p in pseudos) local["pseudo:" + p] = pseudos[p]; + var attributeGetters = local.attributeGetters = { + for: function() { + return "htmlFor" in this ? this.htmlFor : this.getAttribute("for"); + }, + href: function() { + return "href" in this ? this.getAttribute("href", 2) : this.getAttribute("href"); + }, + style: function() { + return this.style ? this.style.cssText : this.getAttribute("style"); + }, + tabindex: function() { + var attributeNode = this.getAttributeNode("tabindex"); + return attributeNode && attributeNode.specified ? attributeNode.nodeValue : null; + }, + type: function() { + return this.getAttribute("type"); + }, + maxlength: function() { + var attributeNode = this.getAttributeNode("maxLength"); + return attributeNode && attributeNode.specified ? attributeNode.nodeValue : null; + } + }; + attributeGetters.MAXLENGTH = attributeGetters.maxLength = attributeGetters.maxlength; + var Slick = local.Slick = this.Slick || {}; + Slick.version = "1.1.7"; + Slick.search = function(context, expression, append) { + return local.search(context, expression, append); + }; + Slick.find = function(context, expression) { + return local.search(context, expression, null, true); + }; + Slick.contains = function(container, node) { + local.setDocument(container); + return local.contains(container, node); + }; + Slick.getAttribute = function(node, name) { + local.setDocument(node); + return local.getAttribute(node, name); + }; + Slick.hasAttribute = function(node, name) { + local.setDocument(node); + return local.hasAttribute(node, name); + }; + Slick.match = function(node, selector) { + if (!(node && selector)) return false; + if (!selector || selector === node) return true; + local.setDocument(node); + return local.matchNode(node, selector); + }; + Slick.defineAttributeGetter = function(name, fn) { + local.attributeGetters[name] = fn; + return this; + }; + Slick.lookupAttributeGetter = function(name) { + return local.attributeGetters[name]; + }; + Slick.definePseudo = function(name, fn) { + local["pseudo:" + name] = function(node, argument) { + return fn.call(node, argument); + }; + return this; + }; + Slick.lookupPseudo = function(name) { + var pseudo = local["pseudo:" + name]; + if (pseudo) return function(argument) { + return pseudo.call(this, argument); + }; + return null; + }; + Slick.override = function(regexp, fn) { + local.override(regexp, fn); + return this; + }; + Slick.isXML = local.isXML; + Slick.uidOf = function(node) { + return local.getUIDHTML(node); + }; + if (!this.Slick) this.Slick = Slick; +}).apply(typeof exports != "undefined" ? exports : this); + +var Element = this.Element = function(tag, props) { + var konstructor = Element.Constructors[tag]; + if (konstructor) return konstructor(props); + if (typeof tag != "string") return document.id(tag).set(props); + if (!props) props = {}; + if (!/^[\w-]+$/.test(tag)) { + var parsed = Slick.parse(tag).expressions[0][0]; + tag = parsed.tag == "*" ? "div" : parsed.tag; + if (parsed.id && props.id == null) props.id = parsed.id; + var attributes = parsed.attributes; + if (attributes) for (var attr, i = 0, l = attributes.length; i < l; i++) { + attr = attributes[i]; + if (props[attr.key] != null) continue; + if (attr.value != null && attr.operator == "=") props[attr.key] = attr.value; else if (!attr.value && !attr.operator) props[attr.key] = true; + } + if (parsed.classList && props["class"] == null) props["class"] = parsed.classList.join(" "); + } + return document.newElement(tag, props); +}; + +if (Browser.Element) { + Element.prototype = Browser.Element.prototype; + Element.prototype._fireEvent = function(fireEvent) { + return function(type, event) { + return fireEvent.call(this, type, event); + }; + }(Element.prototype.fireEvent); +} + +new Type("Element", Element).mirror(function(name) { + if (Array.prototype[name]) return; + var obj = {}; + obj[name] = function() { + var results = [], args = arguments, elements = true; + for (var i = 0, l = this.length; i < l; i++) { + var element = this[i], result = results[i] = element[name].apply(element, args); + elements = elements && typeOf(result) == "element"; + } + return elements ? new Elements(results) : results; + }; + Elements.implement(obj); +}); + +if (!Browser.Element) { + Element.parent = Object; + Element.Prototype = { + $constructor: Element, + $family: Function.from("element").hide() + }; + Element.mirror(function(name, method) { + Element.Prototype[name] = method; + }); +} + +Element.Constructors = {}; + +var IFrame = new Type("IFrame", function() { + var params = Array.link(arguments, { + properties: Type.isObject, + iframe: function(obj) { + return obj != null; + } + }); + var props = params.properties || {}, iframe; + if (params.iframe) iframe = document.id(params.iframe); + var onload = props.onload || function() {}; + delete props.onload; + props.id = props.name = [ props.id, props.name, iframe ? iframe.id || iframe.name : "IFrame_" + String.uniqueID() ].pick(); + iframe = new Element(iframe || "iframe", props); + var onLoad = function() { + onload.call(iframe.contentWindow); + }; + if (window.frames[props.id]) onLoad(); else iframe.addListener("load", onLoad); + return iframe; +}); + +var Elements = this.Elements = function(nodes) { + if (nodes && nodes.length) { + var uniques = {}, node; + for (var i = 0; node = nodes[i++]; ) { + var uid = Slick.uidOf(node); + if (!uniques[uid]) { + uniques[uid] = true; + this.push(node); + } + } + } +}; + +Elements.prototype = { + length: 0 +}; + +Elements.parent = Array; + +new Type("Elements", Elements).implement({ + filter: function(filter, bind) { + if (!filter) return this; + return new Elements(Array.filter(this, typeOf(filter) == "string" ? function(item) { + return item.match(filter); + } : filter, bind)); + }.protect(), + push: function() { + var length = this.length; + for (var i = 0, l = arguments.length; i < l; i++) { + var item = document.id(arguments[i]); + if (item) this[length++] = item; + } + return this.length = length; + }.protect(), + unshift: function() { + var items = []; + for (var i = 0, l = arguments.length; i < l; i++) { + var item = document.id(arguments[i]); + if (item) items.push(item); + } + return Array.prototype.unshift.apply(this, items); + }.protect(), + concat: function() { + var newElements = new Elements(this); + for (var i = 0, l = arguments.length; i < l; i++) { + var item = arguments[i]; + if (Type.isEnumerable(item)) newElements.append(item); else newElements.push(item); + } + return newElements; + }.protect(), + append: function(collection) { + for (var i = 0, l = collection.length; i < l; i++) this.push(collection[i]); + return this; + }.protect(), + empty: function() { + while (this.length) delete this[--this.length]; + return this; + }.protect() +}); + +(function() { + var splice = Array.prototype.splice, object = { + "0": 0, + "1": 1, + length: 2 + }; + splice.call(object, 1, 1); + if (object[1] == 1) Elements.implement("splice", function() { + var length = this.length; + var result = splice.apply(this, arguments); + while (length >= this.length) delete this[length--]; + return result; + }.protect()); + Array.forEachMethod(function(method, name) { + Elements.implement(name, method); + }); + Array.mirror(Elements); + var createElementAcceptsHTML; + try { + createElementAcceptsHTML = document.createElement("<input name=x>").name == "x"; + } catch (e) {} + var escapeQuotes = function(html) { + return ("" + html).replace(/&/g, "&").replace(/"/g, """); + }; + var canChangeStyleHTML = function() { + var div = document.createElement("style"), flag = false; + try { + div.innerHTML = "#justTesing{margin: 0px;}"; + flag = !!div.innerHTML; + } catch (e) {} + return flag; + }(); + Document.implement({ + newElement: function(tag, props) { + if (props) { + if (props.checked != null) props.defaultChecked = props.checked; + if ((props.type == "checkbox" || props.type == "radio") && props.value == null) props.value = "on"; + if (!canChangeStyleHTML && tag == "style") { + var styleElement = document.createElement("style"); + styleElement.setAttribute("type", "text/css"); + if (props.type) delete props.type; + return this.id(styleElement).set(props); + } + if (createElementAcceptsHTML) { + tag = "<" + tag; + if (props.name) tag += ' name="' + escapeQuotes(props.name) + '"'; + if (props.type) tag += ' type="' + escapeQuotes(props.type) + '"'; + tag += ">"; + delete props.name; + delete props.type; + } + } + return this.id(this.createElement(tag)).set(props); + } + }); +})(); + +(function() { + Slick.uidOf(window); + Slick.uidOf(document); + Document.implement({ + newTextNode: function(text) { + return this.createTextNode(text); + }, + getDocument: function() { + return this; + }, + getWindow: function() { + return this.window; + }, + id: function() { + var types = { + string: function(id, nocash, doc) { + id = Slick.find(doc, "#" + id.replace(/(\W)/g, "\\$1")); + return id ? types.element(id, nocash) : null; + }, + element: function(el, nocash) { + Slick.uidOf(el); + if (!nocash && !el.$family && !/^(?:object|embed)$/i.test(el.tagName)) { + var fireEvent = el.fireEvent; + el._fireEvent = function(type, event) { + return fireEvent(type, event); + }; + Object.append(el, Element.Prototype); + } + return el; + }, + object: function(obj, nocash, doc) { + if (obj.toElement) return types.element(obj.toElement(doc), nocash); + return null; + } + }; + types.textnode = types.whitespace = types.window = types.document = function(zero) { + return zero; + }; + return function(el, nocash, doc) { + if (el && el.$family && el.uniqueNumber) return el; + var type = typeOf(el); + return types[type] ? types[type](el, nocash, doc || document) : null; + }; + }() + }); + if (window.$ == null) Window.implement("$", function(el, nc) { + return document.id(el, nc, this.document); + }); + Window.implement({ + getDocument: function() { + return this.document; + }, + getWindow: function() { + return this; + } + }); + [ Document, Element ].invoke("implement", { + getElements: function(expression) { + return Slick.search(this, expression, new Elements()); + }, + getElement: function(expression) { + return document.id(Slick.find(this, expression)); + } + }); + var contains = { + contains: function(element) { + return Slick.contains(this, element); + } + }; + if (!document.contains) Document.implement(contains); + if (!document.createElement("div").contains) Element.implement(contains); + var injectCombinator = function(expression, combinator) { + if (!expression) return combinator; + expression = Object.clone(Slick.parse(expression)); + var expressions = expression.expressions; + for (var i = expressions.length; i--; ) expressions[i][0].combinator = combinator; + return expression; + }; + Object.forEach({ + getNext: "~", + getPrevious: "!~", + getParent: "!" + }, function(combinator, method) { + Element.implement(method, function(expression) { + return this.getElement(injectCombinator(expression, combinator)); + }); + }); + Object.forEach({ + getAllNext: "~", + getAllPrevious: "!~", + getSiblings: "~~", + getChildren: ">", + getParents: "!" + }, function(combinator, method) { + Element.implement(method, function(expression) { + return this.getElements(injectCombinator(expression, combinator)); + }); + }); + Element.implement({ + getFirst: function(expression) { + return document.id(Slick.search(this, injectCombinator(expression, ">"))[0]); + }, + getLast: function(expression) { + return document.id(Slick.search(this, injectCombinator(expression, ">")).getLast()); + }, + getWindow: function() { + return this.ownerDocument.window; + }, + getDocument: function() { + return this.ownerDocument; + }, + getElementById: function(id) { + return document.id(Slick.find(this, "#" + ("" + id).replace(/(\W)/g, "\\$1"))); + }, + match: function(expression) { + return !expression || Slick.match(this, expression); + } + }); + if (window.$$ == null) Window.implement("$$", function(selector) { + if (arguments.length == 1) { + if (typeof selector == "string") return Slick.search(this.document, selector, new Elements()); else if (Type.isEnumerable(selector)) return new Elements(selector); + } + return new Elements(arguments); + }); + var inserters = { + before: function(context, element) { + var parent = element.parentNode; + if (parent) parent.insertBefore(context, element); + }, + after: function(context, element) { + var parent = element.parentNode; + if (parent) parent.insertBefore(context, element.nextSibling); + }, + bottom: function(context, element) { + element.appendChild(context); + }, + top: function(context, element) { + element.insertBefore(context, element.firstChild); + } + }; + inserters.inside = inserters.bottom; + var propertyGetters = {}, propertySetters = {}; + var properties = {}; + Array.forEach([ "type", "value", "defaultValue", "accessKey", "cellPadding", "cellSpacing", "colSpan", "frameBorder", "rowSpan", "tabIndex", "useMap" ], function(property) { + properties[property.toLowerCase()] = property; + }); + properties.html = "innerHTML"; + properties.text = document.createElement("div").textContent == null ? "innerText" : "textContent"; + Object.forEach(properties, function(real, key) { + propertySetters[key] = function(node, value) { + node[real] = value; + }; + propertyGetters[key] = function(node) { + return node[real]; + }; + }); + propertySetters.text = function(setter) { + return function(node, value) { + if (node.get("tag") == "style") node.set("html", value); else node[properties.text] = value; + }; + }(propertySetters.text); + propertyGetters.text = function(getter) { + return function(node) { + return node.get("tag") == "style" ? node.innerHTML : getter(node); + }; + }(propertyGetters.text); + var bools = [ "compact", "nowrap", "ismap", "declare", "noshade", "checked", "disabled", "readOnly", "multiple", "selected", "noresize", "defer", "defaultChecked", "autofocus", "controls", "autoplay", "loop" ]; + var booleans = {}; + Array.forEach(bools, function(bool) { + var lower = bool.toLowerCase(); + booleans[lower] = bool; + propertySetters[lower] = function(node, value) { + node[bool] = !!value; + }; + propertyGetters[lower] = function(node) { + return !!node[bool]; + }; + }); + Object.append(propertySetters, { + class: function(node, value) { + "className" in node ? node.className = value || "" : node.setAttribute("class", value); + }, + for: function(node, value) { + "htmlFor" in node ? node.htmlFor = value : node.setAttribute("for", value); + }, + style: function(node, value) { + node.style ? node.style.cssText = value : node.setAttribute("style", value); + }, + value: function(node, value) { + node.value = value != null ? value : ""; + } + }); + propertyGetters["class"] = function(node) { + return "className" in node ? node.className || null : node.getAttribute("class"); + }; + var el = document.createElement("button"); + try { + el.type = "button"; + } catch (e) {} + if (el.type != "button") propertySetters.type = function(node, value) { + node.setAttribute("type", value); + }; + el = null; + var canChangeStyleHTML = function() { + var div = document.createElement("style"), flag = false; + try { + div.innerHTML = "#justTesing{margin: 0px;}"; + flag = !!div.innerHTML; + } catch (e) {} + return flag; + }(); + var input = document.createElement("input"), volatileInputValue, html5InputSupport; + input.value = "t"; + input.type = "submit"; + volatileInputValue = input.value != "t"; + try { + input.type = "email"; + html5InputSupport = input.type == "email"; + } catch (e) {} + input = null; + if (volatileInputValue || !html5InputSupport) propertySetters.type = function(node, type) { + try { + var value = node.value; + node.type = type; + node.value = value; + } catch (e) {} + }; + var pollutesGetAttribute = function(div) { + div.random = "attribute"; + return div.getAttribute("random") == "attribute"; + }(document.createElement("div")); + var hasCloneBug = function(test) { + test.innerHTML = '<object><param name="should_fix" value="the unknown" /></object>'; + return test.cloneNode(true).firstChild.childNodes.length != 1; + }(document.createElement("div")); + var hasClassList = !!document.createElement("div").classList; + var classes = function(className) { + var classNames = (className || "").clean().split(" "), uniques = {}; + return classNames.filter(function(className) { + if (className !== "" && !uniques[className]) return uniques[className] = className; + }); + }; + var addToClassList = function(name) { + this.classList.add(name); + }; + var removeFromClassList = function(name) { + this.classList.remove(name); + }; + Element.implement({ + setProperty: function(name, value) { + var setter = propertySetters[name.toLowerCase()]; + if (setter) { + setter(this, value); + } else { + var attributeWhiteList; + if (pollutesGetAttribute) attributeWhiteList = this.retrieve("$attributeWhiteList", {}); + if (value == null) { + this.removeAttribute(name); + if (pollutesGetAttribute) delete attributeWhiteList[name]; + } else { + this.setAttribute(name, "" + value); + if (pollutesGetAttribute) attributeWhiteList[name] = true; + } + } + return this; + }, + setProperties: function(attributes) { + for (var attribute in attributes) this.setProperty(attribute, attributes[attribute]); + return this; + }, + getProperty: function(name) { + var getter = propertyGetters[name.toLowerCase()]; + if (getter) return getter(this); + if (pollutesGetAttribute) { + var attr = this.getAttributeNode(name), attributeWhiteList = this.retrieve("$attributeWhiteList", {}); + if (!attr) return null; + if (attr.expando && !attributeWhiteList[name]) { + var outer = this.outerHTML; + if (outer.substr(0, outer.search(/\/?['"]?>(?![^<]*<['"])/)).indexOf(name) < 0) return null; + attributeWhiteList[name] = true; + } + } + var result = Slick.getAttribute(this, name); + return !result && !Slick.hasAttribute(this, name) ? null : result; + }, + getProperties: function() { + var args = Array.from(arguments); + return args.map(this.getProperty, this).associate(args); + }, + removeProperty: function(name) { + return this.setProperty(name, null); + }, + removeProperties: function() { + Array.each(arguments, this.removeProperty, this); + return this; + }, + set: function(prop, value) { + var property = Element.Properties[prop]; + property && property.set ? property.set.call(this, value) : this.setProperty(prop, value); + }.overloadSetter(), + get: function(prop) { + var property = Element.Properties[prop]; + return property && property.get ? property.get.apply(this) : this.getProperty(prop); + }.overloadGetter(), + erase: function(prop) { + var property = Element.Properties[prop]; + property && property.erase ? property.erase.apply(this) : this.removeProperty(prop); + return this; + }, + hasClass: hasClassList ? function(className) { + return this.classList.contains(className); + } : function(className) { + return classes(this.className).contains(className); + }, + addClass: hasClassList ? function(className) { + classes(className).forEach(addToClassList, this); + return this; + } : function(className) { + this.className = classes(className + " " + this.className).join(" "); + return this; + }, + removeClass: hasClassList ? function(className) { + classes(className).forEach(removeFromClassList, this); + return this; + } : function(className) { + var classNames = classes(this.className); + classes(className).forEach(classNames.erase, classNames); + this.className = classNames.join(" "); + return this; + }, + toggleClass: function(className, force) { + if (force == null) force = !this.hasClass(className); + return force ? this.addClass(className) : this.removeClass(className); + }, + adopt: function() { + var parent = this, fragment, elements = Array.flatten(arguments), length = elements.length; + if (length > 1) parent = fragment = document.createDocumentFragment(); + for (var i = 0; i < length; i++) { + var element = document.id(elements[i], true); + if (element) parent.appendChild(element); + } + if (fragment) this.appendChild(fragment); + return this; + }, + appendText: function(text, where) { + return this.grab(this.getDocument().newTextNode(text), where); + }, + grab: function(el, where) { + inserters[where || "bottom"](document.id(el, true), this); + return this; + }, + inject: function(el, where) { + inserters[where || "bottom"](this, document.id(el, true)); + return this; + }, + replaces: function(el) { + el = document.id(el, true); + el.parentNode.replaceChild(this, el); + return this; + }, + wraps: function(el, where) { + el = document.id(el, true); + return this.replaces(el).grab(el, where); + }, + getSelected: function() { + this.selectedIndex; + return new Elements(Array.from(this.options).filter(function(option) { + return option.selected; + })); + }, + toQueryString: function() { + var queryString = []; + this.getElements("input, select, textarea").each(function(el) { + var type = el.type; + if (!el.name || el.disabled || type == "submit" || type == "reset" || type == "file" || type == "image") return; + var value = el.get("tag") == "select" ? el.getSelected().map(function(opt) { + return document.id(opt).get("value"); + }) : (type == "radio" || type == "checkbox") && !el.checked ? null : el.get("value"); + Array.from(value).each(function(val) { + if (typeof val != "undefined") queryString.push(encodeURIComponent(el.name) + "=" + encodeURIComponent(val)); + }); + }); + return queryString.join("&"); + } + }); + var appendInserters = { + before: "beforeBegin", + after: "afterEnd", + bottom: "beforeEnd", + top: "afterBegin", + inside: "beforeEnd" + }; + Element.implement("appendHTML", "insertAdjacentHTML" in document.createElement("div") ? function(html, where) { + this.insertAdjacentHTML(appendInserters[where || "bottom"], html); + return this; + } : function(html, where) { + var temp = new Element("div", { + html: html + }), children = temp.childNodes, fragment = temp.firstChild; + if (!fragment) return this; + if (children.length > 1) { + fragment = document.createDocumentFragment(); + for (var i = 0, l = children.length; i < l; i++) { + fragment.appendChild(children[i]); + } + } + inserters[where || "bottom"](fragment, this); + return this; + }); + var collected = {}, storage = {}; + var get = function(uid) { + return storage[uid] || (storage[uid] = {}); + }; + var clean = function(item) { + var uid = item.uniqueNumber; + if (item.removeEvents) item.removeEvents(); + if (item.clearAttributes) item.clearAttributes(); + if (uid != null) { + delete collected[uid]; + delete storage[uid]; + } + return item; + }; + var formProps = { + input: "checked", + option: "selected", + textarea: "value" + }; + Element.implement({ + destroy: function() { + var children = clean(this).getElementsByTagName("*"); + Array.each(children, clean); + Element.dispose(this); + return null; + }, + empty: function() { + Array.from(this.childNodes).each(Element.dispose); + return this; + }, + dispose: function() { + return this.parentNode ? this.parentNode.removeChild(this) : this; + }, + clone: function(contents, keepid) { + contents = contents !== false; + var clone = this.cloneNode(contents), ce = [ clone ], te = [ this ], i; + if (contents) { + ce.append(Array.from(clone.getElementsByTagName("*"))); + te.append(Array.from(this.getElementsByTagName("*"))); + } + for (i = ce.length; i--; ) { + var node = ce[i], element = te[i]; + if (!keepid) node.removeAttribute("id"); + if (node.clearAttributes) { + node.clearAttributes(); + node.mergeAttributes(element); + node.removeAttribute("uniqueNumber"); + if (node.options) { + var no = node.options, eo = element.options; + for (var j = no.length; j--; ) no[j].selected = eo[j].selected; + } + } + var prop = formProps[element.tagName.toLowerCase()]; + if (prop && element[prop]) node[prop] = element[prop]; + } + if (hasCloneBug) { + var co = clone.getElementsByTagName("object"), to = this.getElementsByTagName("object"); + for (i = co.length; i--; ) co[i].outerHTML = to[i].outerHTML; + } + return document.id(clone); + } + }); + [ Element, Window, Document ].invoke("implement", { + addListener: function(type, fn) { + if (window.attachEvent && !window.addEventListener) { + collected[Slick.uidOf(this)] = this; + } + if (this.addEventListener) this.addEventListener(type, fn, !!arguments[2]); else this.attachEvent("on" + type, fn); + return this; + }, + removeListener: function(type, fn) { + if (this.removeEventListener) this.removeEventListener(type, fn, !!arguments[2]); else this.detachEvent("on" + type, fn); + return this; + }, + retrieve: function(property, dflt) { + var storage = get(Slick.uidOf(this)), prop = storage[property]; + if (dflt != null && prop == null) prop = storage[property] = dflt; + return prop != null ? prop : null; + }, + store: function(property, value) { + var storage = get(Slick.uidOf(this)); + storage[property] = value; + return this; + }, + eliminate: function(property) { + var storage = get(Slick.uidOf(this)); + delete storage[property]; + return this; + } + }); + if (window.attachEvent && !window.addEventListener) { + var gc = function() { + Object.each(collected, clean); + if (window.CollectGarbage) CollectGarbage(); + window.removeListener("unload", gc); + }; + window.addListener("unload", gc); + } + Element.Properties = {}; + Element.Properties.style = { + set: function(style) { + this.style.cssText = style; + }, + get: function() { + return this.style.cssText; + }, + erase: function() { + this.style.cssText = ""; + } + }; + Element.Properties.tag = { + get: function() { + return this.tagName.toLowerCase(); + } + }; + Element.Properties.html = { + set: function(html) { + if (html == null) html = ""; else if (typeOf(html) == "array") html = html.join(""); + if (this.styleSheet && !canChangeStyleHTML) this.styleSheet.cssText = html; else this.innerHTML = html; + }, + erase: function() { + this.set("html", ""); + } + }; + var supportsHTML5Elements = true, supportsTableInnerHTML = true, supportsTRInnerHTML = true; + var div = document.createElement("div"); + div.innerHTML = "<nav></nav>"; + supportsHTML5Elements = div.childNodes.length == 1; + if (!supportsHTML5Elements) { + var tags = "abbr article aside audio canvas datalist details figcaption figure footer header hgroup mark meter nav output progress section summary time video".split(" "), fragment = document.createDocumentFragment(), l = tags.length; + while (l--) fragment.createElement(tags[l]); + } + div = null; + supportsTableInnerHTML = Function.attempt(function() { + var table = document.createElement("table"); + table.innerHTML = "<tr><td></td></tr>"; + return true; + }); + var tr = document.createElement("tr"), html = "<td></td>"; + tr.innerHTML = html; + supportsTRInnerHTML = tr.innerHTML == html; + tr = null; + if (!supportsTableInnerHTML || !supportsTRInnerHTML || !supportsHTML5Elements) { + Element.Properties.html.set = function(set) { + var translations = { + table: [ 1, "<table>", "</table>" ], + select: [ 1, "<select>", "</select>" ], + tbody: [ 2, "<table><tbody>", "</tbody></table>" ], + tr: [ 3, "<table><tbody><tr>", "</tr></tbody></table>" ] + }; + translations.thead = translations.tfoot = translations.tbody; + return function(html) { + if (this.styleSheet) return set.call(this, html); + var wrap = translations[this.get("tag")]; + if (!wrap && !supportsHTML5Elements) wrap = [ 0, "", "" ]; + if (!wrap) return set.call(this, html); + var level = wrap[0], wrapper = document.createElement("div"), target = wrapper; + if (!supportsHTML5Elements) fragment.appendChild(wrapper); + wrapper.innerHTML = [ wrap[1], html, wrap[2] ].flatten().join(""); + while (level--) target = target.firstChild; + this.empty().adopt(target.childNodes); + if (!supportsHTML5Elements) fragment.removeChild(wrapper); + wrapper = null; + }; + }(Element.Properties.html.set); + } + var testForm = document.createElement("form"); + testForm.innerHTML = "<select><option>s</option></select>"; + if (testForm.firstChild.value != "s") Element.Properties.value = { + set: function(value) { + var tag = this.get("tag"); + if (tag != "select") return this.setProperty("value", value); + var options = this.getElements("option"); + value = String(value); + for (var i = 0; i < options.length; i++) { + var option = options[i], attr = option.getAttributeNode("value"), optionValue = attr && attr.specified ? option.value : option.get("text"); + if (optionValue === value) return option.selected = true; + } + }, + get: function() { + var option = this, tag = option.get("tag"); + if (tag != "select" && tag != "option") return this.getProperty("value"); + if (tag == "select" && !(option = option.getSelected()[0])) return ""; + var attr = option.getAttributeNode("value"); + return attr && attr.specified ? option.value : option.get("text"); + } + }; + testForm = null; + if (document.createElement("div").getAttributeNode("id")) Element.Properties.id = { + set: function(id) { + this.id = this.getAttributeNode("id").value = id; + }, + get: function() { + return this.id || null; + }, + erase: function() { + this.id = this.getAttributeNode("id").value = ""; + } + }; +})(); + +(function() { + var _keys = {}; + var normalizeWheelSpeed = function(event) { + var normalized; + if (event.wheelDelta) { + normalized = event.wheelDelta % 120 == 0 ? event.wheelDelta / 120 : event.wheelDelta / 12; + } else { + var rawAmount = event.deltaY || event.detail || 0; + normalized = -(rawAmount % 3 == 0 ? rawAmount / 3 : rawAmount * 10); + } + return normalized; + }; + var DOMEvent = this.DOMEvent = new Type("DOMEvent", function(event, win) { + if (!win) win = window; + event = event || win.event; + if (event.$extended) return event; + this.event = event; + this.$extended = true; + this.shift = event.shiftKey; + this.control = event.ctrlKey; + this.alt = event.altKey; + this.meta = event.metaKey; + var type = this.type = event.type; + var target = event.target || event.srcElement; + while (target && target.nodeType == 3) target = target.parentNode; + this.target = document.id(target); + if (type.indexOf("key") == 0) { + var code = this.code = event.which || event.keyCode; + this.key = _keys[code]; + if (type == "keydown" || type == "keyup") { + if (code > 111 && code < 124) this.key = "f" + (code - 111); else if (code > 95 && code < 106) this.key = code - 96; + } + if (this.key == null) this.key = String.fromCharCode(code).toLowerCase(); + } else if (type == "click" || type == "dblclick" || type == "contextmenu" || type == "wheel" || type == "DOMMouseScroll" || type.indexOf("mouse") == 0) { + var doc = win.document; + doc = !doc.compatMode || doc.compatMode == "CSS1Compat" ? doc.html : doc.body; + this.page = { + x: event.pageX != null ? event.pageX : event.clientX + doc.scrollLeft, + y: event.pageY != null ? event.pageY : event.clientY + doc.scrollTop + }; + this.client = { + x: event.pageX != null ? event.pageX - win.pageXOffset : event.clientX, + y: event.pageY != null ? event.pageY - win.pageYOffset : event.clientY + }; + if (type == "DOMMouseScroll" || type == "wheel" || type == "mousewheel") this.wheel = normalizeWheelSpeed(event); + this.rightClick = event.which == 3 || event.button == 2; + if (type == "mouseover" || type == "mouseout") { + var related = event.relatedTarget || event[(type == "mouseover" ? "from" : "to") + "Element"]; + while (related && related.nodeType == 3) related = related.parentNode; + this.relatedTarget = document.id(related); + } + } else if (type.indexOf("touch") == 0 || type.indexOf("gesture") == 0) { + this.rotation = event.rotation; + this.scale = event.scale; + this.targetTouches = event.targetTouches; + this.changedTouches = event.changedTouches; + var touches = this.touches = event.touches; + if (touches && touches[0]) { + var touch = touches[0]; + this.page = { + x: touch.pageX, + y: touch.pageY + }; + this.client = { + x: touch.clientX, + y: touch.clientY + }; + } + } + if (!this.client) this.client = {}; + if (!this.page) this.page = {}; + }); + DOMEvent.implement({ + stop: function() { + return this.preventDefault().stopPropagation(); + }, + stopPropagation: function() { + if (this.event.stopPropagation) this.event.stopPropagation(); else this.event.cancelBubble = true; + return this; + }, + preventDefault: function() { + if (this.event.preventDefault) this.event.preventDefault(); else this.event.returnValue = false; + return this; + } + }); + DOMEvent.defineKey = function(code, key) { + _keys[code] = key; + return this; + }; + DOMEvent.defineKeys = DOMEvent.defineKey.overloadSetter(true); + DOMEvent.defineKeys({ + "38": "up", + "40": "down", + "37": "left", + "39": "right", + "27": "esc", + "32": "space", + "8": "backspace", + "9": "tab", + "46": "delete", + "13": "enter" + }); +})(); + +(function() { + Element.Properties.events = { + set: function(events) { + this.addEvents(events); + } + }; + [ Element, Window, Document ].invoke("implement", { + addEvent: function(type, fn) { + var events = this.retrieve("events", {}); + if (!events[type]) events[type] = { + keys: [], + values: [] + }; + if (events[type].keys.contains(fn)) return this; + events[type].keys.push(fn); + var realType = type, custom = Element.Events[type], condition = fn, self = this; + if (custom) { + if (custom.onAdd) custom.onAdd.call(this, fn, type); + if (custom.condition) { + condition = function(event) { + if (custom.condition.call(this, event, type)) return fn.call(this, event); + return true; + }; + } + if (custom.base) realType = Function.from(custom.base).call(this, type); + } + var defn = function() { + return fn.call(self); + }; + var nativeEvent = Element.NativeEvents[realType]; + if (nativeEvent) { + if (nativeEvent == 2) { + defn = function(event) { + event = new DOMEvent(event, self.getWindow()); + if (condition.call(self, event) === false) event.stop(); + }; + } + this.addListener(realType, defn, arguments[2]); + } + events[type].values.push(defn); + return this; + }, + removeEvent: function(type, fn) { + var events = this.retrieve("events"); + if (!events || !events[type]) return this; + var list = events[type]; + var index = list.keys.indexOf(fn); + if (index == -1) return this; + var value = list.values[index]; + delete list.keys[index]; + delete list.values[index]; + var custom = Element.Events[type]; + if (custom) { + if (custom.onRemove) custom.onRemove.call(this, fn, type); + if (custom.base) type = Function.from(custom.base).call(this, type); + } + return Element.NativeEvents[type] ? this.removeListener(type, value, arguments[2]) : this; + }, + addEvents: function(events) { + for (var event in events) this.addEvent(event, events[event]); + return this; + }, + removeEvents: function(events) { + var type; + if (typeOf(events) == "object") { + for (type in events) this.removeEvent(type, events[type]); + return this; + } + var attached = this.retrieve("events"); + if (!attached) return this; + if (!events) { + for (type in attached) this.removeEvents(type); + this.eliminate("events"); + } else if (attached[events]) { + attached[events].keys.each(function(fn) { + this.removeEvent(events, fn); + }, this); + delete attached[events]; + } + return this; + }, + fireEvent: function(type, args, delay) { + var events = this.retrieve("events"); + if (!events || !events[type]) return this; + args = Array.from(args); + events[type].keys.each(function(fn) { + if (delay) fn.delay(delay, this, args); else fn.apply(this, args); + }, this); + return this; + }, + cloneEvents: function(from, type) { + from = document.id(from); + var events = from.retrieve("events"); + if (!events) return this; + if (!type) { + for (var eventType in events) this.cloneEvents(from, eventType); + } else if (events[type]) { + events[type].keys.each(function(fn) { + this.addEvent(type, fn); + }, this); + } + return this; + } + }); + Element.NativeEvents = { + click: 2, + dblclick: 2, + mouseup: 2, + mousedown: 2, + contextmenu: 2, + wheel: 2, + mousewheel: 2, + DOMMouseScroll: 2, + mouseover: 2, + mouseout: 2, + mousemove: 2, + selectstart: 2, + selectend: 2, + keydown: 2, + keypress: 2, + keyup: 2, + orientationchange: 2, + touchstart: 2, + touchmove: 2, + touchend: 2, + touchcancel: 2, + gesturestart: 2, + gesturechange: 2, + gestureend: 2, + focus: 2, + blur: 2, + change: 2, + reset: 2, + select: 2, + submit: 2, + paste: 2, + input: 2, + load: 2, + unload: 1, + beforeunload: 2, + resize: 1, + move: 1, + DOMContentLoaded: 1, + readystatechange: 1, + hashchange: 1, + popstate: 2, + error: 1, + abort: 1, + scroll: 1, + message: 2 + }; + Element.Events = { + mousewheel: { + base: "onwheel" in document ? "wheel" : "onmousewheel" in document ? "mousewheel" : "DOMMouseScroll" + } + }; + var check = function(event) { + var related = event.relatedTarget; + if (related == null) return true; + if (!related) return false; + return related != this && related.prefix != "xul" && typeOf(this) != "document" && !this.contains(related); + }; + if ("onmouseenter" in document.documentElement) { + Element.NativeEvents.mouseenter = Element.NativeEvents.mouseleave = 2; + Element.MouseenterCheck = check; + } else { + Element.Events.mouseenter = { + base: "mouseover", + condition: check + }; + Element.Events.mouseleave = { + base: "mouseout", + condition: check + }; + } + if (!window.addEventListener) { + Element.NativeEvents.propertychange = 2; + Element.Events.change = { + base: function() { + var type = this.type; + return this.get("tag") == "input" && (type == "radio" || type == "checkbox") ? "propertychange" : "change"; + }, + condition: function(event) { + return event.type != "propertychange" || event.event.propertyName == "checked"; + } + }; + } +})(); + +(function() { + var eventListenerSupport = !!window.addEventListener; + Element.NativeEvents.focusin = Element.NativeEvents.focusout = 2; + var bubbleUp = function(self, match, fn, event, target) { + while (target && target != self) { + if (match(target, event)) return fn.call(target, event, target); + target = document.id(target.parentNode); + } + }; + var map = { + mouseenter: { + base: "mouseover", + condition: Element.MouseenterCheck + }, + mouseleave: { + base: "mouseout", + condition: Element.MouseenterCheck + }, + focus: { + base: "focus" + (eventListenerSupport ? "" : "in"), + capture: true + }, + blur: { + base: eventListenerSupport ? "blur" : "focusout", + capture: true + } + }; + var _key = "$delegation:"; + var formObserver = function(type) { + return { + base: "focusin", + remove: function(self, uid) { + var list = self.retrieve(_key + type + "listeners", {})[uid]; + if (list && list.forms) for (var i = list.forms.length; i--; ) { + if (list.forms[i].removeEvent) list.forms[i].removeEvent(type, list.fns[i]); + } + }, + listen: function(self, match, fn, event, target, uid) { + var form = target.get("tag") == "form" ? target : event.target.getParent("form"); + if (!form) return; + var listeners = self.retrieve(_key + type + "listeners", {}), listener = listeners[uid] || { + forms: [], + fns: [] + }, forms = listener.forms, fns = listener.fns; + if (forms.indexOf(form) != -1) return; + forms.push(form); + var _fn = function(event) { + bubbleUp(self, match, fn, event, target); + }; + form.addEvent(type, _fn); + fns.push(_fn); + listeners[uid] = listener; + self.store(_key + type + "listeners", listeners); + } + }; + }; + var inputObserver = function(type) { + return { + base: "focusin", + listen: function(self, match, fn, event, target) { + var events = { + blur: function() { + this.removeEvents(events); + } + }; + events[type] = function(event) { + bubbleUp(self, match, fn, event, target); + }; + event.target.addEvents(events); + } + }; + }; + if (!eventListenerSupport) Object.append(map, { + submit: formObserver("submit"), + reset: formObserver("reset"), + change: inputObserver("change"), + select: inputObserver("select") + }); + var proto = Element.prototype, addEvent = proto.addEvent, removeEvent = proto.removeEvent; + var relay = function(old, method) { + return function(type, fn, useCapture) { + if (type.indexOf(":relay") == -1) return old.call(this, type, fn, useCapture); + var parsed = Slick.parse(type).expressions[0][0]; + if (parsed.pseudos[0].key != "relay") return old.call(this, type, fn, useCapture); + var newType = parsed.tag; + parsed.pseudos.slice(1).each(function(pseudo) { + newType += ":" + pseudo.key + (pseudo.value ? "(" + pseudo.value + ")" : ""); + }); + old.call(this, type, fn); + return method.call(this, newType, parsed.pseudos[0].value, fn); + }; + }; + var delegation = { + addEvent: function(type, match, fn) { + var storage = this.retrieve("$delegates", {}), stored = storage[type]; + if (stored) for (var _uid in stored) { + if (stored[_uid].fn == fn && stored[_uid].match == match) return this; + } + var _type = type, _match = match, _fn = fn, _map = map[type] || {}; + type = _map.base || _type; + match = function(target) { + return Slick.match(target, _match); + }; + var elementEvent = Element.Events[_type]; + if (_map.condition || elementEvent && elementEvent.condition) { + var __match = match, condition = _map.condition || elementEvent.condition; + match = function(target, event) { + return __match(target, event) && condition.call(target, event, type); + }; + } + var self = this, uid = String.uniqueID(); + var delegator = _map.listen ? function(event, target) { + if (!target && event && event.target) target = event.target; + if (target) _map.listen(self, match, fn, event, target, uid); + } : function(event, target) { + if (!target && event && event.target) target = event.target; + if (target) bubbleUp(self, match, fn, event, target); + }; + if (!stored) stored = {}; + stored[uid] = { + match: _match, + fn: _fn, + delegator: delegator + }; + storage[_type] = stored; + return addEvent.call(this, type, delegator, _map.capture); + }, + removeEvent: function(type, match, fn, _uid) { + var storage = this.retrieve("$delegates", {}), stored = storage[type]; + if (!stored) return this; + if (_uid) { + var _type = type, delegator = stored[_uid].delegator, _map = map[type] || {}; + type = _map.base || _type; + if (_map.remove) _map.remove(this, _uid); + delete stored[_uid]; + storage[_type] = stored; + return removeEvent.call(this, type, delegator, _map.capture); + } + var __uid, s; + if (fn) for (__uid in stored) { + s = stored[__uid]; + if (s.match == match && s.fn == fn) return delegation.removeEvent.call(this, type, match, fn, __uid); + } else for (__uid in stored) { + s = stored[__uid]; + if (s.match == match) delegation.removeEvent.call(this, type, match, s.fn, __uid); + } + return this; + } + }; + [ Element, Window, Document ].invoke("implement", { + addEvent: relay(addEvent, delegation.addEvent), + removeEvent: relay(removeEvent, delegation.removeEvent) + }); +})(); + +(function() { + var html = document.html, el; + el = document.createElement("div"); + el.style.color = "red"; + el.style.color = null; + var doesNotRemoveStyles = el.style.color == "red"; + var border = "1px solid #123abc"; + el.style.border = border; + var returnsBordersInWrongOrder = el.style.border != border; + el = null; + var hasGetComputedStyle = !!window.getComputedStyle, supportBorderRadius = document.createElement("div").style.borderRadius != null; + Element.Properties.styles = { + set: function(styles) { + this.setStyles(styles); + } + }; + var hasOpacity = html.style.opacity != null, hasFilter = html.style.filter != null, reAlpha = /alpha\(opacity=([\d.]+)\)/i; + var setVisibility = function(element, opacity) { + element.store("$opacity", opacity); + element.style.visibility = opacity > 0 || opacity == null ? "visible" : "hidden"; + }; + var setFilter = function(element, regexp, value) { + var style = element.style, filter = style.filter || element.getComputedStyle("filter") || ""; + style.filter = (regexp.test(filter) ? filter.replace(regexp, value) : filter + " " + value).trim(); + if (!style.filter) style.removeAttribute("filter"); + }; + var setOpacity = hasOpacity ? function(element, opacity) { + element.style.opacity = opacity; + } : hasFilter ? function(element, opacity) { + if (!element.currentStyle || !element.currentStyle.hasLayout) element.style.zoom = 1; + if (opacity == null || opacity == 1) { + setFilter(element, reAlpha, ""); + if (opacity == 1 && getOpacity(element) != 1) setFilter(element, reAlpha, "alpha(opacity=100)"); + } else { + setFilter(element, reAlpha, "alpha(opacity=" + (opacity * 100).limit(0, 100).round() + ")"); + } + } : setVisibility; + var getOpacity = hasOpacity ? function(element) { + var opacity = element.style.opacity || element.getComputedStyle("opacity"); + return opacity == "" ? 1 : opacity.toFloat(); + } : hasFilter ? function(element) { + var filter = element.style.filter || element.getComputedStyle("filter"), opacity; + if (filter) opacity = filter.match(reAlpha); + return opacity == null || filter == null ? 1 : opacity[1] / 100; + } : function(element) { + var opacity = element.retrieve("$opacity"); + if (opacity == null) opacity = element.style.visibility == "hidden" ? 0 : 1; + return opacity; + }; + var floatName = html.style.cssFloat == null ? "styleFloat" : "cssFloat", namedPositions = { + left: "0%", + top: "0%", + center: "50%", + right: "100%", + bottom: "100%" + }, hasBackgroundPositionXY = html.style.backgroundPositionX != null; + var removeStyle = function(style, property) { + if (property == "backgroundPosition") { + style.removeAttribute(property + "X"); + property += "Y"; + } + style.removeAttribute(property); + }; + Element.implement({ + getComputedStyle: function(property) { + if (!hasGetComputedStyle && this.currentStyle) return this.currentStyle[property.camelCase()]; + var defaultView = Element.getDocument(this).defaultView, computed = defaultView ? defaultView.getComputedStyle(this, null) : null; + return computed ? computed.getPropertyValue(property == floatName ? "float" : property.hyphenate()) : ""; + }, + setStyle: function(property, value) { + if (property == "opacity") { + if (value != null) value = parseFloat(value); + setOpacity(this, value); + return this; + } + property = (property == "float" ? floatName : property).camelCase(); + if (typeOf(value) != "string") { + var map = (Element.Styles[property] || "@").split(" "); + value = Array.from(value).map(function(val, i) { + if (!map[i]) return ""; + return typeOf(val) == "number" ? map[i].replace("@", Math.round(val)) : val; + }).join(" "); + } else if (value == String(Number(value))) { + value = Math.round(value); + } + this.style[property] = value; + if ((value == "" || value == null) && doesNotRemoveStyles && this.style.removeAttribute) { + removeStyle(this.style, property); + } + return this; + }, + getStyle: function(property) { + if (property == "opacity") return getOpacity(this); + property = (property == "float" ? floatName : property).camelCase(); + if (supportBorderRadius && property.indexOf("borderRadius") != -1) { + return [ "borderTopLeftRadius", "borderTopRightRadius", "borderBottomRightRadius", "borderBottomLeftRadius" ].map(function(corner) { + return this.style[corner] || "0px"; + }, this).join(" "); + } + var result = this.style[property]; + if (!result || property == "zIndex") { + if (Element.ShortStyles.hasOwnProperty(property)) { + result = []; + for (var s in Element.ShortStyles[property]) result.push(this.getStyle(s)); + return result.join(" "); + } + result = this.getComputedStyle(property); + } + if (hasBackgroundPositionXY && /^backgroundPosition[XY]?$/.test(property)) { + return result.replace(/(top|right|bottom|left)/g, function(position) { + return namedPositions[position]; + }) || "0px"; + } + if (!result && property == "backgroundPosition") return "0px 0px"; + if (result) { + result = String(result); + var color = result.match(/rgba?\([\d\s,]+\)/); + if (color) result = result.replace(color[0], color[0].rgbToHex()); + } + if (!hasGetComputedStyle && !this.style[property]) { + if (/^(height|width)$/.test(property) && !/px$/.test(result)) { + var values = property == "width" ? [ "left", "right" ] : [ "top", "bottom" ], size = 0; + values.each(function(value) { + size += this.getStyle("border-" + value + "-width").toInt() + this.getStyle("padding-" + value).toInt(); + }, this); + return this["offset" + property.capitalize()] - size + "px"; + } + if (/^border(.+)Width|margin|padding/.test(property) && isNaN(parseFloat(result))) { + return "0px"; + } + } + if (returnsBordersInWrongOrder && /^border(Top|Right|Bottom|Left)?$/.test(property) && /^#/.test(result)) { + return result.replace(/^(.+)\s(.+)\s(.+)$/, "$2 $3 $1"); + } + return result; + }, + setStyles: function(styles) { + for (var style in styles) this.setStyle(style, styles[style]); + return this; + }, + getStyles: function() { + var result = {}; + Array.flatten(arguments).each(function(key) { + result[key] = this.getStyle(key); + }, this); + return result; + } + }); + Element.Styles = { + left: "@px", + top: "@px", + bottom: "@px", + right: "@px", + width: "@px", + height: "@px", + maxWidth: "@px", + maxHeight: "@px", + minWidth: "@px", + minHeight: "@px", + backgroundColor: "rgb(@, @, @)", + backgroundSize: "@px", + backgroundPosition: "@px @px", + color: "rgb(@, @, @)", + fontSize: "@px", + letterSpacing: "@px", + lineHeight: "@px", + clip: "rect(@px @px @px @px)", + margin: "@px @px @px @px", + padding: "@px @px @px @px", + border: "@px @ rgb(@, @, @) @px @ rgb(@, @, @) @px @ rgb(@, @, @)", + borderWidth: "@px @px @px @px", + borderStyle: "@ @ @ @", + borderColor: "rgb(@, @, @) rgb(@, @, @) rgb(@, @, @) rgb(@, @, @)", + zIndex: "@", + zoom: "@", + fontWeight: "@", + textIndent: "@px", + opacity: "@", + borderRadius: "@px @px @px @px" + }; + Element.ShortStyles = { + margin: {}, + padding: {}, + border: {}, + borderWidth: {}, + borderStyle: {}, + borderColor: {} + }; + [ "Top", "Right", "Bottom", "Left" ].each(function(direction) { + var Short = Element.ShortStyles; + var All = Element.Styles; + [ "margin", "padding" ].each(function(style) { + var sd = style + direction; + Short[style][sd] = All[sd] = "@px"; + }); + var bd = "border" + direction; + Short.border[bd] = All[bd] = "@px @ rgb(@, @, @)"; + var bdw = bd + "Width", bds = bd + "Style", bdc = bd + "Color"; + Short[bd] = {}; + Short.borderWidth[bdw] = Short[bd][bdw] = All[bdw] = "@px"; + Short.borderStyle[bds] = Short[bd][bds] = All[bds] = "@"; + Short.borderColor[bdc] = Short[bd][bdc] = All[bdc] = "rgb(@, @, @)"; + }); + if (hasBackgroundPositionXY) Element.ShortStyles.backgroundPosition = { + backgroundPositionX: "@", + backgroundPositionY: "@" + }; +})(); + +(function() { + var element = document.createElement("div"), child = document.createElement("div"); + element.style.height = "0"; + element.appendChild(child); + var brokenOffsetParent = child.offsetParent === element; + element = child = null; + var heightComponents = [ "height", "paddingTop", "paddingBottom", "borderTopWidth", "borderBottomWidth" ], widthComponents = [ "width", "paddingLeft", "paddingRight", "borderLeftWidth", "borderRightWidth" ]; + var svgCalculateSize = function(el) { + var gCS = window.getComputedStyle(el), bounds = { + x: 0, + y: 0 + }; + heightComponents.each(function(css) { + bounds.y += parseFloat(gCS[css]); + }); + widthComponents.each(function(css) { + bounds.x += parseFloat(gCS[css]); + }); + return bounds; + }; + var isOffset = function(el) { + return styleString(el, "position") != "static" || isBody(el); + }; + var isOffsetStatic = function(el) { + return isOffset(el) || /^(?:table|td|th)$/i.test(el.tagName); + }; + Element.implement({ + scrollTo: function(x, y) { + if (isBody(this)) { + this.getWindow().scrollTo(x, y); + } else { + this.scrollLeft = x; + this.scrollTop = y; + } + return this; + }, + getSize: function() { + if (isBody(this)) return this.getWindow().getSize(); + if (!window.getComputedStyle) return { + x: this.offsetWidth, + y: this.offsetHeight + }; + if (this.get("tag") == "svg") return svgCalculateSize(this); + var bounds = this.getBoundingClientRect(); + return { + x: bounds.width, + y: bounds.height + }; + }, + getScrollSize: function() { + if (isBody(this)) return this.getWindow().getScrollSize(); + return { + x: this.scrollWidth, + y: this.scrollHeight + }; + }, + getScroll: function() { + if (isBody(this)) return this.getWindow().getScroll(); + return { + x: this.scrollLeft, + y: this.scrollTop + }; + }, + getScrolls: function() { + var element = this.parentNode, position = { + x: 0, + y: 0 + }; + while (element && !isBody(element)) { + position.x += element.scrollLeft; + position.y += element.scrollTop; + element = element.parentNode; + } + return position; + }, + getOffsetParent: brokenOffsetParent ? function() { + var element = this; + if (isBody(element) || styleString(element, "position") == "fixed") return null; + var isOffsetCheck = styleString(element, "position") == "static" ? isOffsetStatic : isOffset; + while (element = element.parentNode) { + if (isOffsetCheck(element)) return element; + } + return null; + } : function() { + var element = this; + if (isBody(element) || styleString(element, "position") == "fixed") return null; + try { + return element.offsetParent; + } catch (e) {} + return null; + }, + getOffsets: function() { + var hasGetBoundingClientRect = this.getBoundingClientRect; + if (hasGetBoundingClientRect) { + var bound = this.getBoundingClientRect(), html = document.id(this.getDocument().documentElement), htmlScroll = html.getScroll(), elemScrolls = this.getScrolls(), isFixed = styleString(this, "position") == "fixed"; + return { + x: bound.left.toInt() + elemScrolls.x + (isFixed ? 0 : htmlScroll.x) - html.clientLeft, + y: bound.top.toInt() + elemScrolls.y + (isFixed ? 0 : htmlScroll.y) - html.clientTop + }; + } + var element = this, position = { + x: 0, + y: 0 + }; + if (isBody(this)) return position; + while (element && !isBody(element)) { + position.x += element.offsetLeft; + position.y += element.offsetTop; + element = element.offsetParent; + } + return position; + }, + getPosition: function(relative) { + var offset = this.getOffsets(), scroll = this.getScrolls(); + var position = { + x: offset.x - scroll.x, + y: offset.y - scroll.y + }; + if (relative && (relative = document.id(relative))) { + var relativePosition = relative.getPosition(); + return { + x: position.x - relativePosition.x - leftBorder(relative), + y: position.y - relativePosition.y - topBorder(relative) + }; + } + return position; + }, + getCoordinates: function(element) { + if (isBody(this)) return this.getWindow().getCoordinates(); + var position = this.getPosition(element), size = this.getSize(); + var obj = { + left: position.x, + top: position.y, + width: size.x, + height: size.y + }; + obj.right = obj.left + obj.width; + obj.bottom = obj.top + obj.height; + return obj; + }, + computePosition: function(obj) { + return { + left: obj.x - styleNumber(this, "margin-left"), + top: obj.y - styleNumber(this, "margin-top") + }; + }, + setPosition: function(obj) { + return this.setStyles(this.computePosition(obj)); + } + }); + [ Document, Window ].invoke("implement", { + getSize: function() { + var doc = getCompatElement(this); + return { + x: doc.clientWidth, + y: doc.clientHeight + }; + }, + getScroll: function() { + var win = this.getWindow(), doc = getCompatElement(this); + return { + x: win.pageXOffset || doc.scrollLeft, + y: win.pageYOffset || doc.scrollTop + }; + }, + getScrollSize: function() { + var doc = getCompatElement(this), min = this.getSize(), body = this.getDocument().body; + return { + x: Math.max(doc.scrollWidth, body.scrollWidth, min.x), + y: Math.max(doc.scrollHeight, body.scrollHeight, min.y) + }; + }, + getPosition: function() { + return { + x: 0, + y: 0 + }; + }, + getCoordinates: function() { + var size = this.getSize(); + return { + top: 0, + left: 0, + bottom: size.y, + right: size.x, + height: size.y, + width: size.x + }; + } + }); + var styleString = Element.getComputedStyle; + function styleNumber(element, style) { + return styleString(element, style).toInt() || 0; + } + function borderBox(element) { + return styleString(element, "-moz-box-sizing") == "border-box"; + } + function topBorder(element) { + return styleNumber(element, "border-top-width"); + } + function leftBorder(element) { + return styleNumber(element, "border-left-width"); + } + function isBody(element) { + return /^(?:body|html)$/i.test(element.tagName); + } + function getCompatElement(element) { + var doc = element.getDocument(); + return !doc.compatMode || doc.compatMode == "CSS1Compat" ? doc.html : doc.body; + } +})(); + +Element.alias({ + position: "setPosition" +}); + +[ Window, Document, Element ].invoke("implement", { + getHeight: function() { + return this.getSize().y; + }, + getWidth: function() { + return this.getSize().x; + }, + getScrollTop: function() { + return this.getScroll().y; + }, + getScrollLeft: function() { + return this.getScroll().x; + }, + getScrollHeight: function() { + return this.getScrollSize().y; + }, + getScrollWidth: function() { + return this.getScrollSize().x; + }, + getTop: function() { + return this.getPosition().y; + }, + getLeft: function() { + return this.getPosition().x; + } +}); + +(function() { + var Fx = this.Fx = new Class({ + Implements: [ Chain, Events, Options ], + options: { + fps: 60, + unit: false, + duration: 500, + frames: null, + frameSkip: true, + link: "ignore" + }, + initialize: function(options) { + this.subject = this.subject || this; + this.setOptions(options); + }, + getTransition: function() { + return function(p) { + return -(Math.cos(Math.PI * p) - 1) / 2; + }; + }, + step: function(now) { + if (this.options.frameSkip) { + var diff = this.time != null ? now - this.time : 0, frames = diff / this.frameInterval; + this.time = now; + this.frame += frames; + } else { + this.frame++; + } + if (this.frame < this.frames) { + var delta = this.transition(this.frame / this.frames); + this.set(this.compute(this.from, this.to, delta)); + } else { + this.frame = this.frames; + this.set(this.compute(this.from, this.to, 1)); + this.stop(); + } + }, + set: function(now) { + return now; + }, + compute: function(from, to, delta) { + return Fx.compute(from, to, delta); + }, + check: function() { + if (!this.isRunning()) return true; + switch (this.options.link) { + case "cancel": + this.cancel(); + return true; + + case "chain": + this.chain(this.caller.pass(arguments, this)); + return false; + } + return false; + }, + start: function(from, to) { + if (!this.check(from, to)) return this; + this.from = from; + this.to = to; + this.frame = this.options.frameSkip ? 0 : -1; + this.time = null; + this.transition = this.getTransition(); + var frames = this.options.frames, fps = this.options.fps, duration = this.options.duration; + this.duration = Fx.Durations[duration] || duration.toInt(); + this.frameInterval = 1e3 / fps; + this.frames = frames || Math.round(this.duration / this.frameInterval); + this.fireEvent("start", this.subject); + pushInstance.call(this, fps); + return this; + }, + stop: function() { + if (this.isRunning()) { + this.time = null; + pullInstance.call(this, this.options.fps); + if (this.frames == this.frame) { + this.fireEvent("complete", this.subject); + if (!this.callChain()) this.fireEvent("chainComplete", this.subject); + } else { + this.fireEvent("stop", this.subject); + } + } + return this; + }, + cancel: function() { + if (this.isRunning()) { + this.time = null; + pullInstance.call(this, this.options.fps); + this.frame = this.frames; + this.fireEvent("cancel", this.subject).clearChain(); + } + return this; + }, + pause: function() { + if (this.isRunning()) { + this.time = null; + pullInstance.call(this, this.options.fps); + } + return this; + }, + resume: function() { + if (this.isPaused()) pushInstance.call(this, this.options.fps); + return this; + }, + isRunning: function() { + var list = instances[this.options.fps]; + return list && list.contains(this); + }, + isPaused: function() { + return this.frame < this.frames && !this.isRunning(); + } + }); + Fx.compute = function(from, to, delta) { + return (to - from) * delta + from; + }; + Fx.Durations = { + short: 250, + normal: 500, + long: 1e3 + }; + var instances = {}, timers = {}; + var loop = function() { + var now = Date.now(); + for (var i = this.length; i--; ) { + var instance = this[i]; + if (instance) instance.step(now); + } + }; + var pushInstance = function(fps) { + var list = instances[fps] || (instances[fps] = []); + list.push(this); + if (!timers[fps]) timers[fps] = loop.periodical(Math.round(1e3 / fps), list); + }; + var pullInstance = function(fps) { + var list = instances[fps]; + if (list) { + list.erase(this); + if (!list.length && timers[fps]) { + delete instances[fps]; + timers[fps] = clearInterval(timers[fps]); + } + } + }; +})(); + +Fx.CSS = new Class({ + Extends: Fx, + prepare: function(element, property, values) { + values = Array.from(values); + var from = values[0], to = values[1]; + if (to == null) { + to = from; + from = element.getStyle(property); + var unit = this.options.unit; + if (unit && from && typeof from == "string" && from.slice(-unit.length) != unit && parseFloat(from) != 0) { + element.setStyle(property, to + unit); + var value = element.getComputedStyle(property); + if (!/px$/.test(value)) { + value = element.style[("pixel-" + property).camelCase()]; + if (value == null) { + var left = element.style.left; + element.style.left = to + unit; + value = element.style.pixelLeft; + element.style.left = left; + } + } + from = (to || 1) / (parseFloat(value) || 1) * (parseFloat(from) || 0); + element.setStyle(property, from + unit); + } + } + return { + from: this.parse(from), + to: this.parse(to) + }; + }, + parse: function(value) { + value = Function.from(value)(); + value = typeof value == "string" ? value.split(" ") : Array.from(value); + return value.map(function(val) { + val = String(val); + var found = false; + Object.each(Fx.CSS.Parsers, function(parser, key) { + if (found) return; + var parsed = parser.parse(val); + if (parsed || parsed === 0) found = { + value: parsed, + parser: parser + }; + }); + found = found || { + value: val, + parser: Fx.CSS.Parsers.String + }; + return found; + }); + }, + compute: function(from, to, delta) { + var computed = []; + Math.min(from.length, to.length).times(function(i) { + computed.push({ + value: from[i].parser.compute(from[i].value, to[i].value, delta), + parser: from[i].parser + }); + }); + computed.$family = Function.from("fx:css:value"); + return computed; + }, + serve: function(value, unit) { + if (typeOf(value) != "fx:css:value") value = this.parse(value); + var returned = []; + value.each(function(bit) { + returned = returned.concat(bit.parser.serve(bit.value, unit)); + }); + return returned; + }, + render: function(element, property, value, unit) { + element.setStyle(property, this.serve(value, unit)); + }, + search: function(selector) { + if (Fx.CSS.Cache[selector]) return Fx.CSS.Cache[selector]; + var to = {}, selectorTest = new RegExp("^" + selector.escapeRegExp() + "$"); + var searchStyles = function(rules) { + Array.each(rules, function(rule, i) { + if (rule.media) { + searchStyles(rule.rules || rule.cssRules); + return; + } + if (!rule.style) return; + var selectorText = rule.selectorText ? rule.selectorText.replace(/^\w+/, function(m) { + return m.toLowerCase(); + }) : null; + if (!selectorText || !selectorTest.test(selectorText)) return; + Object.each(Element.Styles, function(value, style) { + if (!rule.style[style] || Element.ShortStyles[style]) return; + value = String(rule.style[style]); + to[style] = /^rgb/.test(value) ? value.rgbToHex() : value; + }); + }); + }; + Array.each(document.styleSheets, function(sheet, j) { + var href = sheet.href; + if (href && href.indexOf("://") > -1 && href.indexOf(document.domain) == -1) return; + var rules = sheet.rules || sheet.cssRules; + searchStyles(rules); + }); + return Fx.CSS.Cache[selector] = to; + } +}); + +Fx.CSS.Cache = {}; + +Fx.CSS.Parsers = { + Color: { + parse: function(value) { + if (value.match(/^#[0-9a-f]{3,6}$/i)) return value.hexToRgb(true); + return (value = value.match(/(\d+),\s*(\d+),\s*(\d+)/)) ? [ value[1], value[2], value[3] ] : false; + }, + compute: function(from, to, delta) { + return from.map(function(value, i) { + return Math.round(Fx.compute(from[i], to[i], delta)); + }); + }, + serve: function(value) { + return value.map(Number); + } + }, + Number: { + parse: parseFloat, + compute: Fx.compute, + serve: function(value, unit) { + return unit ? value + unit : value; + } + }, + String: { + parse: Function.from(false), + compute: function(zero, one) { + return one; + }, + serve: function(zero) { + return zero; + } + } +}; + +Fx.Morph = new Class({ + Extends: Fx.CSS, + initialize: function(element, options) { + this.element = this.subject = document.id(element); + this.parent(options); + }, + set: function(now) { + if (typeof now == "string") now = this.search(now); + for (var p in now) this.render(this.element, p, now[p], this.options.unit); + return this; + }, + compute: function(from, to, delta) { + var now = {}; + for (var p in from) now[p] = this.parent(from[p], to[p], delta); + return now; + }, + start: function(properties) { + if (!this.check(properties)) return this; + if (typeof properties == "string") properties = this.search(properties); + var from = {}, to = {}; + for (var p in properties) { + var parsed = this.prepare(this.element, p, properties[p]); + from[p] = parsed.from; + to[p] = parsed.to; + } + return this.parent(from, to); + } +}); + +Element.Properties.morph = { + set: function(options) { + this.get("morph").cancel().setOptions(options); + return this; + }, + get: function() { + var morph = this.retrieve("morph"); + if (!morph) { + morph = new Fx.Morph(this, { + link: "cancel" + }); + this.store("morph", morph); + } + return morph; + } +}; + +Element.implement({ + morph: function(props) { + this.get("morph").start(props); + return this; + } +}); + +Fx.implement({ + getTransition: function() { + var trans = this.options.transition || Fx.Transitions.Sine.easeInOut; + if (typeof trans == "string") { + var data = trans.split(":"); + trans = Fx.Transitions; + trans = trans[data[0]] || trans[data[0].capitalize()]; + if (data[1]) trans = trans["ease" + data[1].capitalize() + (data[2] ? data[2].capitalize() : "")]; + } + return trans; + } +}); + +Fx.Transition = function(transition, params) { + params = Array.from(params); + var easeIn = function(pos) { + return transition(pos, params); + }; + return Object.append(easeIn, { + easeIn: easeIn, + easeOut: function(pos) { + return 1 - transition(1 - pos, params); + }, + easeInOut: function(pos) { + return (pos <= .5 ? transition(2 * pos, params) : 2 - transition(2 * (1 - pos), params)) / 2; + } + }); +}; + +Fx.Transitions = { + linear: function(zero) { + return zero; + } +}; + +Fx.Transitions.extend = function(transitions) { + for (var transition in transitions) Fx.Transitions[transition] = new Fx.Transition(transitions[transition]); +}; + +Fx.Transitions.extend({ + Pow: function(p, x) { + return Math.pow(p, x && x[0] || 6); + }, + Expo: function(p) { + return Math.pow(2, 8 * (p - 1)); + }, + Circ: function(p) { + return 1 - Math.sin(Math.acos(p)); + }, + Sine: function(p) { + return 1 - Math.cos(p * Math.PI / 2); + }, + Back: function(p, x) { + x = x && x[0] || 1.618; + return Math.pow(p, 2) * ((x + 1) * p - x); + }, + Bounce: function(p) { + var value; + for (var a = 0, b = 1; 1; a += b, b /= 2) { + if (p >= (7 - 4 * a) / 11) { + value = b * b - Math.pow((11 - 6 * a - 11 * p) / 4, 2); + break; + } + } + return value; + }, + Elastic: function(p, x) { + return Math.pow(2, 10 * --p) * Math.cos(20 * p * Math.PI * (x && x[0] || 1) / 3); + } +}); + +[ "Quad", "Cubic", "Quart", "Quint" ].each(function(transition, i) { + Fx.Transitions[transition] = new Fx.Transition(function(p) { + return Math.pow(p, i + 2); + }); +}); + +Fx.Tween = new Class({ + Extends: Fx.CSS, + initialize: function(element, options) { + this.element = this.subject = document.id(element); + this.parent(options); + }, + set: function(property, now) { + if (arguments.length == 1) { + now = property; + property = this.property || this.options.property; + } + this.render(this.element, property, now, this.options.unit); + return this; + }, + start: function(property, from, to) { + if (!this.check(property, from, to)) return this; + var args = Array.flatten(arguments); + this.property = this.options.property || args.shift(); + var parsed = this.prepare(this.element, this.property, args); + return this.parent(parsed.from, parsed.to); + } +}); + +Element.Properties.tween = { + set: function(options) { + this.get("tween").cancel().setOptions(options); + return this; + }, + get: function() { + var tween = this.retrieve("tween"); + if (!tween) { + tween = new Fx.Tween(this, { + link: "cancel" + }); + this.store("tween", tween); + } + return tween; + } +}; + +Element.implement({ + tween: function(property, from, to) { + this.get("tween").start(property, from, to); + return this; + }, + fade: function(how) { + var fade = this.get("tween"), method, args = [ "opacity" ].append(arguments), toggle; + if (args[1] == null) args[1] = "toggle"; + switch (args[1]) { + case "in": + method = "start"; + args[1] = 1; + break; + + case "out": + method = "start"; + args[1] = 0; + break; + + case "show": + method = "set"; + args[1] = 1; + break; + + case "hide": + method = "set"; + args[1] = 0; + break; + + case "toggle": + var flag = this.retrieve("fade:flag", this.getStyle("opacity") == 1); + method = "start"; + args[1] = flag ? 0 : 1; + this.store("fade:flag", !flag); + toggle = true; + break; + + default: + method = "start"; + } + if (!toggle) this.eliminate("fade:flag"); + fade[method].apply(fade, args); + var to = args[args.length - 1]; + if (method == "set" || to != 0) this.setStyle("visibility", to == 0 ? "hidden" : "visible"); else fade.chain(function() { + this.element.setStyle("visibility", "hidden"); + this.callChain(); + }); + return this; + }, + highlight: function(start, end) { + if (!end) { + end = this.retrieve("highlight:original", this.getStyle("background-color")); + end = end == "transparent" ? "#fff" : end; + } + var tween = this.get("tween"); + tween.start("background-color", start || "#ffff88", end).chain(function() { + this.setStyle("background-color", this.retrieve("highlight:original")); + tween.callChain(); + }.bind(this)); + return this; + } +}); + +(function() { + var empty = function() {}, progressSupport = "onprogress" in new Browser.Request(); + var Request = this.Request = new Class({ + Implements: [ Chain, Events, Options ], + options: { + url: "", + data: "", + headers: { + "X-Requested-With": "XMLHttpRequest", + Accept: "text/javascript, text/html, application/xml, text/xml, */*" + }, + async: true, + format: false, + method: "post", + link: "ignore", + isSuccess: null, + emulation: true, + urlEncoded: true, + encoding: "utf-8", + evalScripts: false, + evalResponse: false, + timeout: 0, + noCache: false + }, + initialize: function(options) { + this.xhr = new Browser.Request(); + this.setOptions(options); + this.headers = this.options.headers; + }, + onStateChange: function() { + var xhr = this.xhr; + if (xhr.readyState != 4 || !this.running) return; + this.running = false; + this.status = 0; + Function.attempt(function() { + var status = xhr.status; + this.status = status == 1223 ? 204 : status; + }.bind(this)); + xhr.onreadystatechange = empty; + if (progressSupport) xhr.onprogress = xhr.onloadstart = empty; + if (this.timer) { + clearTimeout(this.timer); + delete this.timer; + } + this.response = { + text: this.xhr.responseText || "", + xml: this.xhr.responseXML + }; + if (this.options.isSuccess.call(this, this.status)) this.success(this.response.text, this.response.xml); else this.failure(); + }, + isSuccess: function() { + var status = this.status; + return status >= 200 && status < 300; + }, + isRunning: function() { + return !!this.running; + }, + processScripts: function(text) { + if (this.options.evalResponse || /(ecma|java)script/.test(this.getHeader("Content-type"))) return Browser.exec(text); + return text.stripScripts(this.options.evalScripts); + }, + success: function(text, xml) { + this.onSuccess(this.processScripts(text), xml); + }, + onSuccess: function() { + this.fireEvent("complete", arguments).fireEvent("success", arguments).callChain(); + }, + failure: function() { + this.onFailure(); + }, + onFailure: function() { + this.fireEvent("complete").fireEvent("failure", this.xhr); + }, + loadstart: function(event) { + this.fireEvent("loadstart", [ event, this.xhr ]); + }, + progress: function(event) { + this.fireEvent("progress", [ event, this.xhr ]); + }, + timeout: function() { + this.fireEvent("timeout", this.xhr); + }, + setHeader: function(name, value) { + this.headers[name] = value; + return this; + }, + getHeader: function(name) { + return Function.attempt(function() { + return this.xhr.getResponseHeader(name); + }.bind(this)); + }, + check: function() { + if (!this.running) return true; + switch (this.options.link) { + case "cancel": + this.cancel(); + return true; + + case "chain": + this.chain(this.caller.pass(arguments, this)); + return false; + } + return false; + }, + send: function(options) { + if (!this.check(options)) return this; + this.options.isSuccess = this.options.isSuccess || this.isSuccess; + this.running = true; + var type = typeOf(options); + if (type == "string" || type == "element") options = { + data: options + }; + var old = this.options; + options = Object.append({ + data: old.data, + url: old.url, + method: old.method + }, options); + var data = options.data, url = String(options.url), method = options.method.toLowerCase(); + switch (typeOf(data)) { + case "element": + data = document.id(data).toQueryString(); + break; + + case "object": + case "hash": + data = Object.toQueryString(data); + } + if (this.options.format) { + var format = "format=" + this.options.format; + data = data ? format + "&" + data : format; + } + if (this.options.emulation && ![ "get", "post" ].contains(method)) { + var _method = "_method=" + method; + data = data ? _method + "&" + data : _method; + method = "post"; + } + if (this.options.urlEncoded && [ "post", "put" ].contains(method)) { + var encoding = this.options.encoding ? "; charset=" + this.options.encoding : ""; + this.headers["Content-type"] = "application/x-www-form-urlencoded" + encoding; + } + if (!url) url = document.location.pathname; + var trimPosition = url.lastIndexOf("/"); + if (trimPosition > -1 && (trimPosition = url.indexOf("#")) > -1) url = url.substr(0, trimPosition); + if (this.options.noCache) url += (url.indexOf("?") > -1 ? "&" : "?") + String.uniqueID(); + if (data && (method == "get" || method == "delete")) { + url += (url.indexOf("?") > -1 ? "&" : "?") + data; + data = null; + } + var xhr = this.xhr; + if (progressSupport) { + xhr.onloadstart = this.loadstart.bind(this); + xhr.onprogress = this.progress.bind(this); + } + xhr.open(method.toUpperCase(), url, this.options.async, this.options.user, this.options.password); + if (this.options.withCredentials && "withCredentials" in xhr) xhr.withCredentials = true; + xhr.onreadystatechange = this.onStateChange.bind(this); + Object.each(this.headers, function(value, key) { + try { + xhr.setRequestHeader(key, value); + } catch (e) { + this.fireEvent("exception", [ key, value ]); + } + }, this); + this.fireEvent("request"); + xhr.send(data); + if (!this.options.async) this.onStateChange(); else if (this.options.timeout) this.timer = this.timeout.delay(this.options.timeout, this); + return this; + }, + cancel: function() { + if (!this.running) return this; + this.running = false; + var xhr = this.xhr; + xhr.abort(); + if (this.timer) { + clearTimeout(this.timer); + delete this.timer; + } + xhr.onreadystatechange = empty; + if (progressSupport) xhr.onprogress = xhr.onloadstart = empty; + this.xhr = new Browser.Request(); + this.fireEvent("cancel"); + return this; + } + }); + var methods = {}; + [ "get", "post", "put", "delete", "patch", "head", "GET", "POST", "PUT", "DELETE", "PATCH", "HEAD" ].each(function(method) { + methods[method] = function(data) { + var object = { + method: method + }; + if (data != null) object.data = data; + return this.send(object); + }; + }); + Request.implement(methods); + Element.Properties.send = { + set: function(options) { + var send = this.get("send").cancel(); + send.setOptions(options); + return this; + }, + get: function() { + var send = this.retrieve("send"); + if (!send) { + send = new Request({ + data: this, + link: "cancel", + method: this.get("method") || "post", + url: this.get("action") + }); + this.store("send", send); + } + return send; + } + }; + Element.implement({ + send: function(url) { + var sender = this.get("send"); + sender.send({ + data: this, + url: url || sender.options.url + }); + return this; + } + }); +})(); + +if (typeof JSON == "undefined") this.JSON = {}; + +(function() { + var special = { + "\b": "\\b", + " ": "\\t", + "\n": "\\n", + "\f": "\\f", + "\r": "\\r", + '"': '\\"', + "\\": "\\\\" + }; + var escape = function(chr) { + return special[chr] || "\\u" + ("0000" + chr.charCodeAt(0).toString(16)).slice(-4); + }; + JSON.validate = function(string) { + string = string.replace(/\\(?:["\\\/bfnrt]|u[0-9a-fA-F]{4})/g, "@").replace(/"[^"\\\n\r]*"|true|false|null|-?\d+(?:\.\d*)?(?:[eE][+\-]?\d+)?/g, "]").replace(/(?:^|:|,)(?:\s*\[)+/g, ""); + return /^[\],:{}\s]*$/.test(string); + }; + JSON.encode = JSON.stringify ? function(obj) { + return JSON.stringify(obj); + } : function(obj) { + if (obj && obj.toJSON) obj = obj.toJSON(); + switch (typeOf(obj)) { + case "string": + return '"' + obj.replace(/[\x00-\x1f\\"]/g, escape) + '"'; + + case "array": + return "[" + obj.map(JSON.encode).clean() + "]"; + + case "object": + case "hash": + var string = []; + Object.each(obj, function(value, key) { + var json = JSON.encode(value); + if (json) string.push(JSON.encode(key) + ":" + json); + }); + return "{" + string + "}"; + + case "number": + case "boolean": + return "" + obj; + + case "null": + return "null"; + } + return null; + }; + JSON.secure = true; + JSON.decode = function(string, secure) { + if (!string || typeOf(string) != "string") return null; + if (secure == null) secure = JSON.secure; + if (secure) { + if (JSON.parse) return JSON.parse(string); + if (!JSON.validate(string)) throw new Error("JSON could not decode the input; security is enabled and the value is not secure."); + } + return eval("(" + string + ")"); + }; +})(); + +Request.JSON = new Class({ + Extends: Request, + options: { + secure: true + }, + initialize: function(options) { + this.parent(options); + Object.append(this.headers, { + Accept: "application/json", + "X-Request": "JSON" + }); + }, + success: function(text) { + var json; + try { + json = this.response.json = JSON.decode(text, this.options.secure); + } catch (error) { + this.fireEvent("error", [ text, error ]); + return; + } + if (json == null) this.onFailure(); else this.onSuccess(json, text); + } +}); + +var Cookie = new Class({ + Implements: Options, + options: { + path: "/", + domain: false, + duration: false, + secure: false, + document: document, + encode: true + }, + initialize: function(key, options) { + this.key = key; + this.setOptions(options); + }, + write: function(value) { + if (this.options.encode) value = encodeURIComponent(value); + if (this.options.domain) value += "; domain=" + this.options.domain; + if (this.options.path) value += "; path=" + this.options.path; + if (this.options.duration) { + var date = new Date(); + date.setTime(date.getTime() + this.options.duration * 24 * 60 * 60 * 1e3); + value += "; expires=" + date.toGMTString(); + } + if (this.options.secure) value += "; secure"; + this.options.document.cookie = this.key + "=" + value; + return this; + }, + read: function() { + var value = this.options.document.cookie.match("(?:^|;)\\s*" + this.key.escapeRegExp() + "=([^;]*)"); + return value ? decodeURIComponent(value[1]) : null; + }, + dispose: function() { + new Cookie(this.key, Object.merge({}, this.options, { + duration: -1 + })).write(""); + return this; + } +}); + +Cookie.write = function(key, value, options) { + return new Cookie(key, options).write(value); +}; + +Cookie.read = function(key) { + return new Cookie(key).read(); +}; + +Cookie.dispose = function(key, options) { + return new Cookie(key, options).dispose(); +}; + +(function(window, document) { + var ready, loaded, checks = [], shouldPoll, timer, testElement = document.createElement("div"); + var domready = function() { + clearTimeout(timer); + if (!ready) { + Browser.loaded = ready = true; + document.removeListener("DOMContentLoaded", domready).removeListener("readystatechange", check); + document.fireEvent("domready"); + window.fireEvent("domready"); + } + document = window = testElement = null; + }; + var check = function() { + for (var i = checks.length; i--; ) if (checks[i]()) { + domready(); + return true; + } + return false; + }; + var poll = function() { + clearTimeout(timer); + if (!check()) timer = setTimeout(poll, 10); + }; + document.addListener("DOMContentLoaded", domready); + var doScrollWorks = function() { + try { + testElement.doScroll(); + return true; + } catch (e) {} + return false; + }; + if (testElement.doScroll && !doScrollWorks()) { + checks.push(doScrollWorks); + shouldPoll = true; + } + if (document.readyState) checks.push(function() { + var state = document.readyState; + return state == "loaded" || state == "complete"; + }); + if ("onreadystatechange" in document) document.addListener("readystatechange", check); else shouldPoll = true; + if (shouldPoll) poll(); + Element.Events.domready = { + onAdd: function(fn) { + if (ready) fn.call(this); + } + }; + Element.Events.load = { + base: "load", + onAdd: function(fn) { + if (loaded && this == window) fn.call(this); + }, + condition: function() { + if (this == window) { + domready(); + delete Element.Events.load; + } + return true; + } + }; + window.addEvent("load", function() { + loaded = true; + }); +})(window, document); + +MooTools.More = { + version: "1.5.1", + build: "2dd695ba957196ae4b0275a690765d6636a61ccd" +}; + +(function() { + Events.Pseudos = function(pseudos, addEvent, removeEvent) { + var storeKey = "_monitorEvents:"; + var storageOf = function(object) { + return { + store: object.store ? function(key, value) { + object.store(storeKey + key, value); + } : function(key, value) { + (object._monitorEvents || (object._monitorEvents = {}))[key] = value; + }, + retrieve: object.retrieve ? function(key, dflt) { + return object.retrieve(storeKey + key, dflt); + } : function(key, dflt) { + if (!object._monitorEvents) return dflt; + return object._monitorEvents[key] || dflt; + } + }; + }; + var splitType = function(type) { + if (type.indexOf(":") == -1 || !pseudos) return null; + var parsed = Slick.parse(type).expressions[0][0], parsedPseudos = parsed.pseudos, l = parsedPseudos.length, splits = []; + while (l--) { + var pseudo = parsedPseudos[l].key, listener = pseudos[pseudo]; + if (listener != null) splits.push({ + event: parsed.tag, + value: parsedPseudos[l].value, + pseudo: pseudo, + original: type, + listener: listener + }); + } + return splits.length ? splits : null; + }; + return { + addEvent: function(type, fn, internal) { + var split = splitType(type); + if (!split) return addEvent.call(this, type, fn, internal); + var storage = storageOf(this), events = storage.retrieve(type, []), eventType = split[0].event, args = Array.slice(arguments, 2), stack = fn, self = this; + split.each(function(item) { + var listener = item.listener, stackFn = stack; + if (listener == false) eventType += ":" + item.pseudo + "(" + item.value + ")"; else stack = function() { + listener.call(self, item, stackFn, arguments, stack); + }; + }); + events.include({ + type: eventType, + event: fn, + monitor: stack + }); + storage.store(type, events); + if (type != eventType) addEvent.apply(this, [ type, fn ].concat(args)); + return addEvent.apply(this, [ eventType, stack ].concat(args)); + }, + removeEvent: function(type, fn) { + var split = splitType(type); + if (!split) return removeEvent.call(this, type, fn); + var storage = storageOf(this), events = storage.retrieve(type); + if (!events) return this; + var args = Array.slice(arguments, 2); + removeEvent.apply(this, [ type, fn ].concat(args)); + events.each(function(monitor, i) { + if (!fn || monitor.event == fn) removeEvent.apply(this, [ monitor.type, monitor.monitor ].concat(args)); + delete events[i]; + }, this); + storage.store(type, events); + return this; + } + }; + }; + var pseudos = { + once: function(split, fn, args, monitor) { + fn.apply(this, args); + this.removeEvent(split.event, monitor).removeEvent(split.original, fn); + }, + throttle: function(split, fn, args) { + if (!fn._throttled) { + fn.apply(this, args); + fn._throttled = setTimeout(function() { + fn._throttled = false; + }, split.value || 250); + } + }, + pause: function(split, fn, args) { + clearTimeout(fn._pause); + fn._pause = fn.delay(split.value || 250, this, args); + } + }; + Events.definePseudo = function(key, listener) { + pseudos[key] = listener; + return this; + }; + Events.lookupPseudo = function(key) { + return pseudos[key]; + }; + var proto = Events.prototype; + Events.implement(Events.Pseudos(pseudos, proto.addEvent, proto.removeEvent)); + [ "Request", "Fx" ].each(function(klass) { + if (this[klass]) this[klass].implement(Events.prototype); + }); +})(); + +var Drag = new Class({ + Implements: [ Events, Options ], + options: { + snap: 6, + unit: "px", + grid: false, + style: true, + limit: false, + handle: false, + invert: false, + preventDefault: false, + stopPropagation: false, + compensateScroll: false, + modifiers: { + x: "left", + y: "top" + } + }, + initialize: function() { + var params = Array.link(arguments, { + options: Type.isObject, + element: function(obj) { + return obj != null; + } + }); + this.element = document.id(params.element); + this.document = this.element.getDocument(); + this.setOptions(params.options || {}); + var htype = typeOf(this.options.handle); + this.handles = (htype == "array" || htype == "collection" ? $$(this.options.handle) : document.id(this.options.handle)) || this.element; + this.mouse = { + now: {}, + pos: {} + }; + this.value = { + start: {}, + now: {} + }; + this.offsetParent = function(el) { + var offsetParent = el.getOffsetParent(); + var isBody = !offsetParent || /^(?:body|html)$/i.test(offsetParent.tagName); + return isBody ? window : document.id(offsetParent); + }(this.element); + this.selection = "selectstart" in document ? "selectstart" : "mousedown"; + this.compensateScroll = { + start: {}, + diff: {}, + last: {} + }; + if ("ondragstart" in document && !("FileReader" in window) && !Drag.ondragstartFixed) { + document.ondragstart = Function.from(false); + Drag.ondragstartFixed = true; + } + this.bound = { + start: this.start.bind(this), + check: this.check.bind(this), + drag: this.drag.bind(this), + stop: this.stop.bind(this), + cancel: this.cancel.bind(this), + eventStop: Function.from(false), + scrollListener: this.scrollListener.bind(this) + }; + this.attach(); + }, + attach: function() { + this.handles.addEvent("mousedown", this.bound.start); + if (this.options.compensateScroll) this.offsetParent.addEvent("scroll", this.bound.scrollListener); + return this; + }, + detach: function() { + this.handles.removeEvent("mousedown", this.bound.start); + if (this.options.compensateScroll) this.offsetParent.removeEvent("scroll", this.bound.scrollListener); + return this; + }, + scrollListener: function() { + if (!this.mouse.start) return; + var newScrollValue = this.offsetParent.getScroll(); + if (this.element.getStyle("position") == "absolute") { + var scrollDiff = this.sumValues(newScrollValue, this.compensateScroll.last, -1); + this.mouse.now = this.sumValues(this.mouse.now, scrollDiff, 1); + } else { + this.compensateScroll.diff = this.sumValues(newScrollValue, this.compensateScroll.start, -1); + } + if (this.offsetParent != window) this.compensateScroll.diff = this.sumValues(this.compensateScroll.start, newScrollValue, -1); + this.compensateScroll.last = newScrollValue; + this.render(this.options); + }, + sumValues: function(alpha, beta, op) { + var sum = {}, options = this.options; + for (z in options.modifiers) { + if (!options.modifiers[z]) continue; + sum[z] = alpha[z] + beta[z] * op; + } + return sum; + }, + start: function(event) { + var options = this.options; + if (event.rightClick) return; + if (options.preventDefault) event.preventDefault(); + if (options.stopPropagation) event.stopPropagation(); + this.compensateScroll.start = this.compensateScroll.last = this.offsetParent.getScroll(); + this.compensateScroll.diff = { + x: 0, + y: 0 + }; + this.mouse.start = event.page; + this.fireEvent("beforeStart", this.element); + var limit = options.limit; + this.limit = { + x: [], + y: [] + }; + var z, coordinates, offsetParent = this.offsetParent == window ? null : this.offsetParent; + for (z in options.modifiers) { + if (!options.modifiers[z]) continue; + var style = this.element.getStyle(options.modifiers[z]); + if (style && !style.match(/px$/)) { + if (!coordinates) coordinates = this.element.getCoordinates(offsetParent); + style = coordinates[options.modifiers[z]]; + } + if (options.style) this.value.now[z] = (style || 0).toInt(); else this.value.now[z] = this.element[options.modifiers[z]]; + if (options.invert) this.value.now[z] *= -1; + this.mouse.pos[z] = event.page[z] - this.value.now[z]; + if (limit && limit[z]) { + var i = 2; + while (i--) { + var limitZI = limit[z][i]; + if (limitZI || limitZI === 0) this.limit[z][i] = typeof limitZI == "function" ? limitZI() : limitZI; + } + } + } + if (typeOf(this.options.grid) == "number") this.options.grid = { + x: this.options.grid, + y: this.options.grid + }; + var events = { + mousemove: this.bound.check, + mouseup: this.bound.cancel + }; + events[this.selection] = this.bound.eventStop; + this.document.addEvents(events); + }, + check: function(event) { + if (this.options.preventDefault) event.preventDefault(); + var distance = Math.round(Math.sqrt(Math.pow(event.page.x - this.mouse.start.x, 2) + Math.pow(event.page.y - this.mouse.start.y, 2))); + if (distance > this.options.snap) { + this.cancel(); + this.document.addEvents({ + mousemove: this.bound.drag, + mouseup: this.bound.stop + }); + this.fireEvent("start", [ this.element, event ]).fireEvent("snap", this.element); + } + }, + drag: function(event) { + var options = this.options; + if (options.preventDefault) event.preventDefault(); + this.mouse.now = this.sumValues(event.page, this.compensateScroll.diff, -1); + this.render(options); + this.fireEvent("drag", [ this.element, event ]); + }, + render: function(options) { + for (var z in options.modifiers) { + if (!options.modifiers[z]) continue; + this.value.now[z] = this.mouse.now[z] - this.mouse.pos[z]; + if (options.invert) this.value.now[z] *= -1; + if (options.limit && this.limit[z]) { + if ((this.limit[z][1] || this.limit[z][1] === 0) && this.value.now[z] > this.limit[z][1]) { + this.value.now[z] = this.limit[z][1]; + } else if ((this.limit[z][0] || this.limit[z][0] === 0) && this.value.now[z] < this.limit[z][0]) { + this.value.now[z] = this.limit[z][0]; + } + } + if (options.grid[z]) this.value.now[z] -= (this.value.now[z] - (this.limit[z][0] || 0)) % options.grid[z]; + if (options.style) this.element.setStyle(options.modifiers[z], this.value.now[z] + options.unit); else this.element[options.modifiers[z]] = this.value.now[z]; + } + }, + cancel: function(event) { + this.document.removeEvents({ + mousemove: this.bound.check, + mouseup: this.bound.cancel + }); + if (event) { + this.document.removeEvent(this.selection, this.bound.eventStop); + this.fireEvent("cancel", this.element); + } + }, + stop: function(event) { + var events = { + mousemove: this.bound.drag, + mouseup: this.bound.stop + }; + events[this.selection] = this.bound.eventStop; + this.document.removeEvents(events); + this.mouse.start = null; + if (event) this.fireEvent("complete", [ this.element, event ]); + } +}); + +Element.implement({ + makeResizable: function(options) { + var drag = new Drag(this, Object.merge({ + modifiers: { + x: "width", + y: "height" + } + }, options)); + this.store("resizer", drag); + return drag.addEvent("drag", function() { + this.fireEvent("resize", drag); + }.bind(this)); + } +}); + +Drag.Move = new Class({ + Extends: Drag, + options: { + droppables: [], + container: false, + precalculate: false, + includeMargins: true, + checkDroppables: true + }, + initialize: function(element, options) { + this.parent(element, options); + element = this.element; + this.droppables = $$(this.options.droppables); + this.setContainer(this.options.container); + if (this.options.style) { + if (this.options.modifiers.x == "left" && this.options.modifiers.y == "top") { + var parent = element.getOffsetParent(), styles = element.getStyles("left", "top"); + if (parent && (styles.left == "auto" || styles.top == "auto")) { + element.setPosition(element.getPosition(parent)); + } + } + if (element.getStyle("position") == "static") element.setStyle("position", "absolute"); + } + this.addEvent("start", this.checkDroppables, true); + this.overed = null; + }, + setContainer: function(container) { + this.container = document.id(container); + if (this.container && typeOf(this.container) != "element") { + this.container = document.id(this.container.getDocument().body); + } + }, + start: function(event) { + if (this.container) this.options.limit = this.calculateLimit(); + if (this.options.precalculate) { + this.positions = this.droppables.map(function(el) { + return el.getCoordinates(); + }); + } + this.parent(event); + }, + calculateLimit: function() { + var element = this.element, container = this.container, offsetParent = document.id(element.getOffsetParent()) || document.body, containerCoordinates = container.getCoordinates(offsetParent), elementMargin = {}, elementBorder = {}, containerMargin = {}, containerBorder = {}, offsetParentPadding = {}, offsetScroll = offsetParent.getScroll(); + [ "top", "right", "bottom", "left" ].each(function(pad) { + elementMargin[pad] = element.getStyle("margin-" + pad).toInt(); + elementBorder[pad] = element.getStyle("border-" + pad).toInt(); + containerMargin[pad] = container.getStyle("margin-" + pad).toInt(); + containerBorder[pad] = container.getStyle("border-" + pad).toInt(); + offsetParentPadding[pad] = offsetParent.getStyle("padding-" + pad).toInt(); + }, this); + var width = element.offsetWidth + elementMargin.left + elementMargin.right, height = element.offsetHeight + elementMargin.top + elementMargin.bottom, left = 0 + offsetScroll.x, top = 0 + offsetScroll.y, right = containerCoordinates.right - containerBorder.right - width + offsetScroll.x, bottom = containerCoordinates.bottom - containerBorder.bottom - height + offsetScroll.y; + if (this.options.includeMargins) { + left += elementMargin.left; + top += elementMargin.top; + } else { + right += elementMargin.right; + bottom += elementMargin.bottom; + } + if (element.getStyle("position") == "relative") { + var coords = element.getCoordinates(offsetParent); + coords.left -= element.getStyle("left").toInt(); + coords.top -= element.getStyle("top").toInt(); + left -= coords.left; + top -= coords.top; + if (container.getStyle("position") != "relative") { + left += containerBorder.left; + top += containerBorder.top; + } + right += elementMargin.left - coords.left; + bottom += elementMargin.top - coords.top; + if (container != offsetParent) { + left += containerMargin.left + offsetParentPadding.left; + if (!offsetParentPadding.left && left < 0) left = 0; + top += offsetParent == document.body ? 0 : containerMargin.top + offsetParentPadding.top; + if (!offsetParentPadding.top && top < 0) top = 0; + } + } else { + left -= elementMargin.left; + top -= elementMargin.top; + if (container != offsetParent) { + left += containerCoordinates.left + containerBorder.left; + top += containerCoordinates.top + containerBorder.top; + } + } + return { + x: [ left, right ], + y: [ top, bottom ] + }; + }, + getDroppableCoordinates: function(element) { + var position = element.getCoordinates(); + if (element.getStyle("position") == "fixed") { + var scroll = window.getScroll(); + position.left += scroll.x; + position.right += scroll.x; + position.top += scroll.y; + position.bottom += scroll.y; + } + return position; + }, + checkDroppables: function() { + var overed = this.droppables.filter(function(el, i) { + el = this.positions ? this.positions[i] : this.getDroppableCoordinates(el); + var now = this.mouse.now; + return now.x > el.left && now.x < el.right && now.y < el.bottom && now.y > el.top; + }, this).getLast(); + if (this.overed != overed) { + if (this.overed) this.fireEvent("leave", [ this.element, this.overed ]); + if (overed) this.fireEvent("enter", [ this.element, overed ]); + this.overed = overed; + } + }, + drag: function(event) { + this.parent(event); + if (this.options.checkDroppables && this.droppables.length) this.checkDroppables(); + }, + stop: function(event) { + this.checkDroppables(); + this.fireEvent("drop", [ this.element, this.overed, event ]); + this.overed = null; + return this.parent(event); + } +}); + +Element.implement({ + makeDraggable: function(options) { + var drag = new Drag.Move(this, options); + this.store("dragger", drag); + return drag; + } +}); + +var Sortables = new Class({ + Implements: [ Events, Options ], + options: { + opacity: 1, + clone: false, + revert: false, + handle: false, + dragOptions: {}, + unDraggableTags: [ "button", "input", "a", "textarea", "select", "option" ] + }, + initialize: function(lists, options) { + this.setOptions(options); + this.elements = []; + this.lists = []; + this.idle = true; + this.addLists($$(document.id(lists) || lists)); + if (!this.options.clone) this.options.revert = false; + if (this.options.revert) this.effect = new Fx.Morph(null, Object.merge({ + duration: 250, + link: "cancel" + }, this.options.revert)); + }, + attach: function() { + this.addLists(this.lists); + return this; + }, + detach: function() { + this.lists = this.removeLists(this.lists); + return this; + }, + addItems: function() { + Array.flatten(arguments).each(function(element) { + this.elements.push(element); + var start = element.retrieve("sortables:start", function(event) { + this.start.call(this, event, element); + }.bind(this)); + (this.options.handle ? element.getElement(this.options.handle) || element : element).addEvent("mousedown", start); + }, this); + return this; + }, + addLists: function() { + Array.flatten(arguments).each(function(list) { + this.lists.include(list); + this.addItems(list.getChildren()); + }, this); + return this; + }, + removeItems: function() { + return $$(Array.flatten(arguments).map(function(element) { + this.elements.erase(element); + var start = element.retrieve("sortables:start"); + (this.options.handle ? element.getElement(this.options.handle) || element : element).removeEvent("mousedown", start); + return element; + }, this)); + }, + removeLists: function() { + return $$(Array.flatten(arguments).map(function(list) { + this.lists.erase(list); + this.removeItems(list.getChildren()); + return list; + }, this)); + }, + getDroppableCoordinates: function(element) { + var offsetParent = element.getOffsetParent(); + var position = element.getPosition(offsetParent); + var scroll = { + w: window.getScroll(), + offsetParent: offsetParent.getScroll() + }; + position.x += scroll.offsetParent.x; + position.y += scroll.offsetParent.y; + if (offsetParent.getStyle("position") == "fixed") { + position.x -= scroll.w.x; + position.y -= scroll.w.y; + } + return position; + }, + getClone: function(event, element) { + if (!this.options.clone) return new Element(element.tagName).inject(document.body); + if (typeOf(this.options.clone) == "function") return this.options.clone.call(this, event, element, this.list); + var clone = element.clone(true).setStyles({ + margin: 0, + position: "absolute", + visibility: "hidden", + width: element.getStyle("width") + }).addEvent("mousedown", function(event) { + element.fireEvent("mousedown", event); + }); + if (clone.get("html").test("radio")) { + clone.getElements("input[type=radio]").each(function(input, i) { + input.set("name", "clone_" + i); + if (input.get("checked")) element.getElements("input[type=radio]")[i].set("checked", true); + }); + } + return clone.inject(this.list).setPosition(this.getDroppableCoordinates(this.element)); + }, + getDroppables: function() { + var droppables = this.list.getChildren().erase(this.clone).erase(this.element); + if (!this.options.constrain) droppables.append(this.lists).erase(this.list); + return droppables; + }, + insert: function(dragging, element) { + var where = "inside"; + if (this.lists.contains(element)) { + this.list = element; + this.drag.droppables = this.getDroppables(); + } else { + where = this.element.getAllPrevious().contains(element) ? "before" : "after"; + } + this.element.inject(element, where); + this.fireEvent("sort", [ this.element, this.clone ]); + }, + start: function(event, element) { + if (!this.idle || event.rightClick || !this.options.handle && this.options.unDraggableTags.contains(event.target.get("tag"))) return; + this.idle = false; + this.element = element; + this.opacity = element.getStyle("opacity"); + this.list = element.getParent(); + this.clone = this.getClone(event, element); + this.drag = new Drag.Move(this.clone, Object.merge({ + droppables: this.getDroppables() + }, this.options.dragOptions)).addEvents({ + onSnap: function() { + event.stop(); + this.clone.setStyle("visibility", "visible"); + this.element.setStyle("opacity", this.options.opacity || 0); + this.fireEvent("start", [ this.element, this.clone ]); + }.bind(this), + onEnter: this.insert.bind(this), + onCancel: this.end.bind(this), + onComplete: this.end.bind(this) + }); + this.clone.inject(this.element, "before"); + this.drag.start(event); + }, + end: function() { + this.drag.detach(); + this.element.setStyle("opacity", this.opacity); + var self = this; + if (this.effect) { + var dim = this.element.getStyles("width", "height"), clone = this.clone, pos = clone.computePosition(this.getDroppableCoordinates(clone)); + var destroy = function() { + this.removeEvent("cancel", destroy); + clone.destroy(); + self.reset(); + }; + this.effect.element = clone; + this.effect.start({ + top: pos.top, + left: pos.left, + width: dim.width, + height: dim.height, + opacity: .25 + }).addEvent("cancel", destroy).chain(destroy); + } else { + this.clone.destroy(); + self.reset(); + } + }, + reset: function() { + this.idle = true; + this.fireEvent("complete", this.element); + }, + serialize: function() { + var params = Array.link(arguments, { + modifier: Type.isFunction, + index: function(obj) { + return obj != null; + } + }); + var serial = this.lists.map(function(list) { + return list.getChildren().map(params.modifier || function(element) { + return element.get("id"); + }, this); + }, this); + var index = params.index; + if (this.lists.length == 1) index = 0; + return (index || index === 0) && index >= 0 && index < this.lists.length ? serial[index] : serial; + } +}); + +(function() { + var special = { + a: /[ц═ц║ц╒цёц╓ц╔д┐д┘]/g, + A: /[ц─ц│ц┌ц┐ц└ц┘д┌д└]/g, + c: /[д┤д█ц╖]/g, + C: /[д├д▄ц┤]/g, + d: /[д▐д▒]/g, + D: /[д▌ц░]/g, + e: /[ц╗ц╘ц╙ц╚д⌡д≥]/g, + E: /[ц┬ц┴ц┼ц▀д д≤]/g, + g: /[д÷]/g, + G: /[д·]/g, + i: /[ц╛ц╜ц╝ц╞]/g, + I: /[ц▄ц█ц▌ц▐]/g, + l: /[д╨д╬е┌]/g, + L: /[д╧д╫е│]/g, + n: /[ц╠е┬е└]/g, + N: /[ц▒е┤е┐]/g, + o: /[ц╡цЁц╢ц╣ц╤ц╦е▒]/g, + O: /[ц▓ц⌠ц■ц∙ц√ц≤]/g, + r: /[е≥е∙]/g, + R: /[е≤е■]/g, + s: /[е║е║е÷]/g, + S: /[е═е·е ]/g, + t: /[е╔её]/g, + T: /[е╓е╒]/g, + u: /[ц╧ц╨ц╩е╞ц╪б╣]/g, + U: /[ц≥ц ц⌡е╝ц°]/g, + y: /[ц©ц╫]/g, + Y: /[е╦ц²]/g, + z: /[е╬е╨е╪]/g, + Z: /[е╫е╧е╩]/g, + th: /[ц╬]/g, + TH: /[ц·]/g, + dh: /[ц╟]/g, + DH: /[ц░]/g, + ss: /[ц÷]/g, + oe: /[е⌠]/g, + OE: /[е▓]/g, + ae: /[ц╕]/g, + AE: /[ц├]/g + }, tidy = { + " ": /[\xa0\u2002\u2003\u2009]/g, + "*": /[\xb7]/g, + "'": /[\u2018\u2019]/g, + '"': /[\u201c\u201d]/g, + "...": /[\u2026]/g, + "-": /[\u2013]/g, + "»": /[\uFFFD]/g + }, conversions = { + ms: 1, + s: 1e3, + m: 6e4, + h: 36e5 + }, findUnits = /(\d*.?\d+)([msh]+)/; + var walk = function(string, replacements) { + var result = string, key; + for (key in replacements) result = result.replace(replacements[key], key); + return result; + }; + var getRegexForTag = function(tag, contents) { + tag = tag || ""; + var regstr = contents ? "<" + tag + "(?!\\w)[^>]*>([\\s\\S]*?)</" + tag + "(?!\\w)>" : "</?" + tag + "([^>]+)?>", reg = new RegExp(regstr, "gi"); + return reg; + }; + String.implement({ + standardize: function() { + return walk(this, special); + }, + repeat: function(times) { + return new Array(times + 1).join(this); + }, + pad: function(length, str, direction) { + if (this.length >= length) return this; + var pad = (str == null ? " " : "" + str).repeat(length - this.length).substr(0, length - this.length); + if (!direction || direction == "right") return this + pad; + if (direction == "left") return pad + this; + return pad.substr(0, (pad.length / 2).floor()) + this + pad.substr(0, (pad.length / 2).ceil()); + }, + getTags: function(tag, contents) { + return this.match(getRegexForTag(tag, contents)) || []; + }, + stripTags: function(tag, contents) { + return this.replace(getRegexForTag(tag, contents), ""); + }, + tidy: function() { + return walk(this, tidy); + }, + truncate: function(max, trail, atChar) { + var string = this; + if (trail == null && arguments.length == 1) trail = "Б─╕"; + if (string.length > max) { + string = string.substring(0, max); + if (atChar) { + var index = string.lastIndexOf(atChar); + if (index != -1) string = string.substr(0, index); + } + if (trail) string += trail; + } + return string; + }, + ms: function() { + var units = findUnits.exec(this); + if (units == null) return Number(this); + return Number(units[1]) * conversions[units[2]]; + } + }); +})(); + +Element.implement({ + tidy: function() { + this.set("value", this.get("value").tidy()); + }, + getTextInRange: function(start, end) { + return this.get("value").substring(start, end); + }, + getSelectedText: function() { + if (this.setSelectionRange) return this.getTextInRange(this.getSelectionStart(), this.getSelectionEnd()); + return document.selection.createRange().text; + }, + getSelectedRange: function() { + if (this.selectionStart != null) { + return { + start: this.selectionStart, + end: this.selectionEnd + }; + } + var pos = { + start: 0, + end: 0 + }; + var range = this.getDocument().selection.createRange(); + if (!range || range.parentElement() != this) return pos; + var duplicate = range.duplicate(); + if (this.type == "text") { + pos.start = 0 - duplicate.moveStart("character", -1e5); + pos.end = pos.start + range.text.length; + } else { + var value = this.get("value"); + var offset = value.length; + duplicate.moveToElementText(this); + duplicate.setEndPoint("StartToEnd", range); + if (duplicate.text.length) offset -= value.match(/[\n\r]*$/)[0].length; + pos.end = offset - duplicate.text.length; + duplicate.setEndPoint("StartToStart", range); + pos.start = offset - duplicate.text.length; + } + return pos; + }, + getSelectionStart: function() { + return this.getSelectedRange().start; + }, + getSelectionEnd: function() { + return this.getSelectedRange().end; + }, + setCaretPosition: function(pos) { + if (pos == "end") pos = this.get("value").length; + this.selectRange(pos, pos); + return this; + }, + getCaretPosition: function() { + return this.getSelectedRange().start; + }, + selectRange: function(start, end) { + if (this.setSelectionRange) { + this.focus(); + this.setSelectionRange(start, end); + } else { + var value = this.get("value"); + var diff = value.substr(start, end - start).replace(/\r/g, "").length; + start = value.substr(0, start).replace(/\r/g, "").length; + var range = this.createTextRange(); + range.collapse(true); + range.moveEnd("character", start + diff); + range.moveStart("character", start); + range.select(); + } + return this; + }, + insertAtCursor: function(value, select) { + var pos = this.getSelectedRange(); + var text = this.get("value"); + this.set("value", text.substring(0, pos.start) + value + text.substring(pos.end, text.length)); + if (select !== false) this.selectRange(pos.start, pos.start + value.length); else this.setCaretPosition(pos.start + value.length); + return this; + }, + insertAroundCursor: function(options, select) { + options = Object.append({ + before: "", + defaultMiddle: "", + after: "" + }, options); + var value = this.getSelectedText() || options.defaultMiddle; + var pos = this.getSelectedRange(); + var text = this.get("value"); + if (pos.start == pos.end) { + this.set("value", text.substring(0, pos.start) + options.before + value + options.after + text.substring(pos.end, text.length)); + this.selectRange(pos.start + options.before.length, pos.end + options.before.length + value.length); + } else { + var current = text.substring(pos.start, pos.end); + this.set("value", text.substring(0, pos.start) + options.before + current + options.after + text.substring(pos.end, text.length)); + var selStart = pos.start + options.before.length; + if (select !== false) this.selectRange(selStart, selStart + current.length); else this.setCaretPosition(selStart + text.length); + } + return this; + } +}); + +(function() { + var getStylesList = function(styles, planes) { + var list = []; + Object.each(planes, function(directions) { + Object.each(directions, function(edge) { + styles.each(function(style) { + list.push(style + "-" + edge + (style == "border" ? "-width" : "")); + }); + }); + }); + return list; + }; + var calculateEdgeSize = function(edge, styles) { + var total = 0; + Object.each(styles, function(value, style) { + if (style.test(edge)) total = total + value.toInt(); + }); + return total; + }; + var isVisible = function(el) { + return !!(!el || el.offsetHeight || el.offsetWidth); + }; + Element.implement({ + measure: function(fn) { + if (isVisible(this)) return fn.call(this); + var parent = this.getParent(), toMeasure = []; + while (!isVisible(parent) && parent != document.body) { + toMeasure.push(parent.expose()); + parent = parent.getParent(); + } + var restore = this.expose(), result = fn.call(this); + restore(); + toMeasure.each(function(restore) { + restore(); + }); + return result; + }, + expose: function() { + if (this.getStyle("display") != "none") return function() {}; + var before = this.style.cssText; + this.setStyles({ + display: "block", + position: "absolute", + visibility: "hidden" + }); + return function() { + this.style.cssText = before; + }.bind(this); + }, + getDimensions: function(options) { + options = Object.merge({ + computeSize: false + }, options); + var dim = { + x: 0, + y: 0 + }; + var getSize = function(el, options) { + return options.computeSize ? el.getComputedSize(options) : el.getSize(); + }; + var parent = this.getParent("body"); + if (parent && this.getStyle("display") == "none") { + dim = this.measure(function() { + return getSize(this, options); + }); + } else if (parent) { + try { + dim = getSize(this, options); + } catch (e) {} + } + return Object.append(dim, dim.x || dim.x === 0 ? { + width: dim.x, + height: dim.y + } : { + x: dim.width, + y: dim.height + }); + }, + getComputedSize: function(options) { + options = Object.merge({ + styles: [ "padding", "border" ], + planes: { + height: [ "top", "bottom" ], + width: [ "left", "right" ] + }, + mode: "both" + }, options); + var styles = {}, size = { + width: 0, + height: 0 + }, dimensions; + if (options.mode == "vertical") { + delete size.width; + delete options.planes.width; + } else if (options.mode == "horizontal") { + delete size.height; + delete options.planes.height; + } + getStylesList(options.styles, options.planes).each(function(style) { + styles[style] = this.getStyle(style).toInt(); + }, this); + Object.each(options.planes, function(edges, plane) { + var capitalized = plane.capitalize(), style = this.getStyle(plane); + if (style == "auto" && !dimensions) dimensions = this.getDimensions(); + style = styles[plane] = style == "auto" ? dimensions[plane] : style.toInt(); + size["total" + capitalized] = style; + edges.each(function(edge) { + var edgesize = calculateEdgeSize(edge, styles); + size["computed" + edge.capitalize()] = edgesize; + size["total" + capitalized] += edgesize; + }); + }, this); + return Object.append(size, styles); + } + }); +})(); + +(function(original) { + var local = Element.Position = { + options: { + relativeTo: document.body, + position: { + x: "center", + y: "center" + }, + offset: { + x: 0, + y: 0 + } + }, + getOptions: function(element, options) { + options = Object.merge({}, local.options, options); + local.setPositionOption(options); + local.setEdgeOption(options); + local.setOffsetOption(element, options); + local.setDimensionsOption(element, options); + return options; + }, + setPositionOption: function(options) { + options.position = local.getCoordinateFromValue(options.position); + }, + setEdgeOption: function(options) { + var edgeOption = local.getCoordinateFromValue(options.edge); + options.edge = edgeOption ? edgeOption : options.position.x == "center" && options.position.y == "center" ? { + x: "center", + y: "center" + } : { + x: "left", + y: "top" + }; + }, + setOffsetOption: function(element, options) { + var parentOffset = { + x: 0, + y: 0 + }; + var parentScroll = { + x: 0, + y: 0 + }; + var offsetParent = element.measure(function() { + return document.id(this.getOffsetParent()); + }); + if (!offsetParent || offsetParent == element.getDocument().body) return; + parentScroll = offsetParent.getScroll(); + parentOffset = offsetParent.measure(function() { + var position = this.getPosition(); + if (this.getStyle("position") == "fixed") { + var scroll = window.getScroll(); + position.x += scroll.x; + position.y += scroll.y; + } + return position; + }); + options.offset = { + parentPositioned: offsetParent != document.id(options.relativeTo), + x: options.offset.x - parentOffset.x + parentScroll.x, + y: options.offset.y - parentOffset.y + parentScroll.y + }; + }, + setDimensionsOption: function(element, options) { + options.dimensions = element.getDimensions({ + computeSize: true, + styles: [ "padding", "border", "margin" ] + }); + }, + getPosition: function(element, options) { + var position = {}; + options = local.getOptions(element, options); + var relativeTo = document.id(options.relativeTo) || document.body; + local.setPositionCoordinates(options, position, relativeTo); + if (options.edge) local.toEdge(position, options); + var offset = options.offset; + position.left = (position.x >= 0 || offset.parentPositioned || options.allowNegative ? position.x : 0).toInt(); + position.top = (position.y >= 0 || offset.parentPositioned || options.allowNegative ? position.y : 0).toInt(); + local.toMinMax(position, options); + if (options.relFixedPosition || relativeTo.getStyle("position") == "fixed") local.toRelFixedPosition(relativeTo, position); + if (options.ignoreScroll) local.toIgnoreScroll(relativeTo, position); + if (options.ignoreMargins) local.toIgnoreMargins(position, options); + position.left = Math.ceil(position.left); + position.top = Math.ceil(position.top); + delete position.x; + delete position.y; + return position; + }, + setPositionCoordinates: function(options, position, relativeTo) { + var offsetY = options.offset.y, offsetX = options.offset.x, calc = relativeTo == document.body ? window.getScroll() : relativeTo.getPosition(), top = calc.y, left = calc.x, winSize = window.getSize(); + switch (options.position.x) { + case "left": + position.x = left + offsetX; + break; + + case "right": + position.x = left + offsetX + relativeTo.offsetWidth; + break; + + default: + position.x = left + (relativeTo == document.body ? winSize.x : relativeTo.offsetWidth) / 2 + offsetX; + break; + } + switch (options.position.y) { + case "top": + position.y = top + offsetY; + break; + + case "bottom": + position.y = top + offsetY + relativeTo.offsetHeight; + break; + + default: + position.y = top + (relativeTo == document.body ? winSize.y : relativeTo.offsetHeight) / 2 + offsetY; + break; + } + }, + toMinMax: function(position, options) { + var xy = { + left: "x", + top: "y" + }, value; + [ "minimum", "maximum" ].each(function(minmax) { + [ "left", "top" ].each(function(lr) { + value = options[minmax] ? options[minmax][xy[lr]] : null; + if (value != null && (minmax == "minimum" ? position[lr] < value : position[lr] > value)) position[lr] = value; + }); + }); + }, + toRelFixedPosition: function(relativeTo, position) { + var winScroll = window.getScroll(); + position.top += winScroll.y; + position.left += winScroll.x; + }, + toIgnoreScroll: function(relativeTo, position) { + var relScroll = relativeTo.getScroll(); + position.top -= relScroll.y; + position.left -= relScroll.x; + }, + toIgnoreMargins: function(position, options) { + position.left += options.edge.x == "right" ? options.dimensions["margin-right"] : options.edge.x != "center" ? -options.dimensions["margin-left"] : -options.dimensions["margin-left"] + (options.dimensions["margin-right"] + options.dimensions["margin-left"]) / 2; + position.top += options.edge.y == "bottom" ? options.dimensions["margin-bottom"] : options.edge.y != "center" ? -options.dimensions["margin-top"] : -options.dimensions["margin-top"] + (options.dimensions["margin-bottom"] + options.dimensions["margin-top"]) / 2; + }, + toEdge: function(position, options) { + var edgeOffset = {}, dimensions = options.dimensions, edge = options.edge; + switch (edge.x) { + case "left": + edgeOffset.x = 0; + break; + + case "right": + edgeOffset.x = -dimensions.x - dimensions.computedRight - dimensions.computedLeft; + break; + + default: + edgeOffset.x = -Math.round(dimensions.totalWidth / 2); + break; + } + switch (edge.y) { + case "top": + edgeOffset.y = 0; + break; + + case "bottom": + edgeOffset.y = -dimensions.y - dimensions.computedTop - dimensions.computedBottom; + break; + + default: + edgeOffset.y = -Math.round(dimensions.totalHeight / 2); + break; + } + position.x += edgeOffset.x; + position.y += edgeOffset.y; + }, + getCoordinateFromValue: function(option) { + if (typeOf(option) != "string") return option; + option = option.toLowerCase(); + return { + x: option.test("left") ? "left" : option.test("right") ? "right" : "center", + y: option.test(/upper|top/) ? "top" : option.test("bottom") ? "bottom" : "center" + }; + } + }; + Element.implement({ + position: function(options) { + if (options && (options.x != null || options.y != null)) { + return original ? original.apply(this, arguments) : this; + } + var position = this.setStyle("position", "absolute").calculatePosition(options); + return options && options.returnPos ? position : this.setStyles(position); + }, + calculatePosition: function(options) { + return local.getPosition(this, options); + } + }); +})(Element.prototype.position); + +Element.implement({ + isDisplayed: function() { + return this.getStyle("display") != "none"; + }, + isVisible: function() { + var w = this.offsetWidth, h = this.offsetHeight; + return w == 0 && h == 0 ? false : w > 0 && h > 0 ? true : this.style.display != "none"; + }, + toggle: function() { + return this[this.isDisplayed() ? "hide" : "show"](); + }, + hide: function() { + var d; + try { + d = this.getStyle("display"); + } catch (e) {} + if (d == "none") return this; + return this.store("element:_originalDisplay", d || "").setStyle("display", "none"); + }, + show: function(display) { + if (!display && this.isDisplayed()) return this; + display = display || this.retrieve("element:_originalDisplay") || "block"; + return this.setStyle("display", display == "none" ? "block" : display); + }, + swapClass: function(remove, add) { + return this.removeClass(remove).addClass(add); + } +}); + +Document.implement({ + clearSelection: function() { + if (window.getSelection) { + var selection = window.getSelection(); + if (selection && selection.removeAllRanges) selection.removeAllRanges(); + } else if (document.selection && document.selection.empty) { + try { + document.selection.empty(); + } catch (e) {} + } + } +}); + +(function() { + var defined = function(value) { + return value != null; + }; + var hasOwnProperty = Object.prototype.hasOwnProperty; + Object.extend({ + getFromPath: function(source, parts) { + if (typeof parts == "string") parts = parts.split("."); + for (var i = 0, l = parts.length; i < l; i++) { + if (hasOwnProperty.call(source, parts[i])) source = source[parts[i]]; else return null; + } + return source; + }, + cleanValues: function(object, method) { + method = method || defined; + for (var key in object) if (!method(object[key])) { + delete object[key]; + } + return object; + }, + erase: function(object, key) { + if (hasOwnProperty.call(object, key)) delete object[key]; + return object; + }, + run: function(object) { + var args = Array.slice(arguments, 1); + for (var key in object) if (object[key].apply) { + object[key].apply(object, args); + } + return object; + } + }); +})(); + +(function() { + var current = null, locales = {}, inherits = {}; + var getSet = function(set) { + if (instanceOf(set, Locale.Set)) return set; else return locales[set]; + }; + var Locale = this.Locale = { + define: function(locale, set, key, value) { + var name; + if (instanceOf(locale, Locale.Set)) { + name = locale.name; + if (name) locales[name] = locale; + } else { + name = locale; + if (!locales[name]) locales[name] = new Locale.Set(name); + locale = locales[name]; + } + if (set) locale.define(set, key, value); + if (!current) current = locale; + return locale; + }, + use: function(locale) { + locale = getSet(locale); + if (locale) { + current = locale; + this.fireEvent("change", locale); + } + return this; + }, + getCurrent: function() { + return current; + }, + get: function(key, args) { + return current ? current.get(key, args) : ""; + }, + inherit: function(locale, inherits, set) { + locale = getSet(locale); + if (locale) locale.inherit(inherits, set); + return this; + }, + list: function() { + return Object.keys(locales); + } + }; + Object.append(Locale, new Events()); + Locale.Set = new Class({ + sets: {}, + inherits: { + locales: [], + sets: {} + }, + initialize: function(name) { + this.name = name || ""; + }, + define: function(set, key, value) { + var defineData = this.sets[set]; + if (!defineData) defineData = {}; + if (key) { + if (typeOf(key) == "object") defineData = Object.merge(defineData, key); else defineData[key] = value; + } + this.sets[set] = defineData; + return this; + }, + get: function(key, args, _base) { + var value = Object.getFromPath(this.sets, key); + if (value != null) { + var type = typeOf(value); + if (type == "function") value = value.apply(null, Array.from(args)); else if (type == "object") value = Object.clone(value); + return value; + } + var index = key.indexOf("."), set = index < 0 ? key : key.substr(0, index), names = (this.inherits.sets[set] || []).combine(this.inherits.locales).include("en-US"); + if (!_base) _base = []; + for (var i = 0, l = names.length; i < l; i++) { + if (_base.contains(names[i])) continue; + _base.include(names[i]); + var locale = locales[names[i]]; + if (!locale) continue; + value = locale.get(key, args, _base); + if (value != null) return value; + } + return ""; + }, + inherit: function(names, set) { + names = Array.from(names); + if (set && !this.inherits.sets[set]) this.inherits.sets[set] = []; + var l = names.length; + while (l--) (set ? this.inherits.sets[set] : this.inherits.locales).unshift(names[l]); + return this; + } + }); +})(); + +Locale.define("en-US", "Date", { + months: [ "January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December" ], + months_abbr: [ "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec" ], + days: [ "Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday" ], + days_abbr: [ "Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat" ], + dateOrder: [ "month", "date", "year" ], + shortDate: "%m/%d/%Y", + shortTime: "%I:%M%p", + AM: "AM", + PM: "PM", + firstDayOfWeek: 0, + ordinal: function(dayOfMonth) { + return dayOfMonth > 3 && dayOfMonth < 21 ? "th" : [ "th", "st", "nd", "rd", "th" ][Math.min(dayOfMonth % 10, 4)]; + }, + lessThanMinuteAgo: "less than a minute ago", + minuteAgo: "about a minute ago", + minutesAgo: "{delta} minutes ago", + hourAgo: "about an hour ago", + hoursAgo: "about {delta} hours ago", + dayAgo: "1 day ago", + daysAgo: "{delta} days ago", + weekAgo: "1 week ago", + weeksAgo: "{delta} weeks ago", + monthAgo: "1 month ago", + monthsAgo: "{delta} months ago", + yearAgo: "1 year ago", + yearsAgo: "{delta} years ago", + lessThanMinuteUntil: "less than a minute from now", + minuteUntil: "about a minute from now", + minutesUntil: "{delta} minutes from now", + hourUntil: "about an hour from now", + hoursUntil: "about {delta} hours from now", + dayUntil: "1 day from now", + daysUntil: "{delta} days from now", + weekUntil: "1 week from now", + weeksUntil: "{delta} weeks from now", + monthUntil: "1 month from now", + monthsUntil: "{delta} months from now", + yearUntil: "1 year from now", + yearsUntil: "{delta} years from now" +}); + +(function() { + var Date = this.Date; + var DateMethods = Date.Methods = { + ms: "Milliseconds", + year: "FullYear", + min: "Minutes", + mo: "Month", + sec: "Seconds", + hr: "Hours" + }; + [ "Date", "Day", "FullYear", "Hours", "Milliseconds", "Minutes", "Month", "Seconds", "Time", "TimezoneOffset", "Week", "Timezone", "GMTOffset", "DayOfYear", "LastMonth", "LastDayOfMonth", "UTCDate", "UTCDay", "UTCFullYear", "AMPM", "Ordinal", "UTCHours", "UTCMilliseconds", "UTCMinutes", "UTCMonth", "UTCSeconds", "UTCMilliseconds" ].each(function(method) { + Date.Methods[method.toLowerCase()] = method; + }); + var pad = function(n, digits, string) { + if (digits == 1) return n; + return n < Math.pow(10, digits - 1) ? (string || "0") + pad(n, digits - 1, string) : n; + }; + Date.implement({ + set: function(prop, value) { + prop = prop.toLowerCase(); + var method = DateMethods[prop] && "set" + DateMethods[prop]; + if (method && this[method]) this[method](value); + return this; + }.overloadSetter(), + get: function(prop) { + prop = prop.toLowerCase(); + var method = DateMethods[prop] && "get" + DateMethods[prop]; + if (method && this[method]) return this[method](); + return null; + }.overloadGetter(), + clone: function() { + return new Date(this.get("time")); + }, + increment: function(interval, times) { + interval = interval || "day"; + times = times != null ? times : 1; + switch (interval) { + case "year": + return this.increment("month", times * 12); + + case "month": + var d = this.get("date"); + this.set("date", 1).set("mo", this.get("mo") + times); + return this.set("date", d.min(this.get("lastdayofmonth"))); + + case "week": + return this.increment("day", times * 7); + + case "day": + return this.set("date", this.get("date") + times); + } + if (!Date.units[interval]) throw new Error(interval + " is not a supported interval"); + return this.set("time", this.get("time") + times * Date.units[interval]()); + }, + decrement: function(interval, times) { + return this.increment(interval, -1 * (times != null ? times : 1)); + }, + isLeapYear: function() { + return Date.isLeapYear(this.get("year")); + }, + clearTime: function() { + return this.set({ + hr: 0, + min: 0, + sec: 0, + ms: 0 + }); + }, + diff: function(date, resolution) { + if (typeOf(date) == "string") date = Date.parse(date); + return ((date - this) / Date.units[resolution || "day"](3, 3)).round(); + }, + getLastDayOfMonth: function() { + return Date.daysInMonth(this.get("mo"), this.get("year")); + }, + getDayOfYear: function() { + return (Date.UTC(this.get("year"), this.get("mo"), this.get("date") + 1) - Date.UTC(this.get("year"), 0, 1)) / Date.units.day(); + }, + setDay: function(day, firstDayOfWeek) { + if (firstDayOfWeek == null) { + firstDayOfWeek = Date.getMsg("firstDayOfWeek"); + if (firstDayOfWeek === "") firstDayOfWeek = 1; + } + day = (7 + Date.parseDay(day, true) - firstDayOfWeek) % 7; + var currentDay = (7 + this.get("day") - firstDayOfWeek) % 7; + return this.increment("day", day - currentDay); + }, + getWeek: function(firstDayOfWeek) { + if (firstDayOfWeek == null) { + firstDayOfWeek = Date.getMsg("firstDayOfWeek"); + if (firstDayOfWeek === "") firstDayOfWeek = 1; + } + var date = this, dayOfWeek = (7 + date.get("day") - firstDayOfWeek) % 7, dividend = 0, firstDayOfYear; + if (firstDayOfWeek == 1) { + var month = date.get("month"), startOfWeek = date.get("date") - dayOfWeek; + if (month == 11 && startOfWeek > 28) return 1; + if (month == 0 && startOfWeek < -2) { + date = new Date(date).decrement("day", dayOfWeek); + dayOfWeek = 0; + } + firstDayOfYear = new Date(date.get("year"), 0, 1).get("day") || 7; + if (firstDayOfYear > 4) dividend = -7; + } else { + firstDayOfYear = new Date(date.get("year"), 0, 1).get("day"); + } + dividend += date.get("dayofyear"); + dividend += 6 - dayOfWeek; + dividend += (7 + firstDayOfYear - firstDayOfWeek) % 7; + return dividend / 7; + }, + getOrdinal: function(day) { + return Date.getMsg("ordinal", day || this.get("date")); + }, + getTimezone: function() { + return this.toString().replace(/^.*? ([A-Z]{3}).[0-9]{4}.*$/, "$1").replace(/^.*?\(([A-Z])[a-z]+ ([A-Z])[a-z]+ ([A-Z])[a-z]+\)$/, "$1$2$3"); + }, + getGMTOffset: function() { + var off = this.get("timezoneOffset"); + return (off > 0 ? "-" : "+") + pad((off.abs() / 60).floor(), 2) + pad(off % 60, 2); + }, + setAMPM: function(ampm) { + ampm = ampm.toUpperCase(); + var hr = this.get("hr"); + if (hr > 11 && ampm == "AM") return this.decrement("hour", 12); else if (hr < 12 && ampm == "PM") return this.increment("hour", 12); + return this; + }, + getAMPM: function() { + return this.get("hr") < 12 ? "AM" : "PM"; + }, + parse: function(str) { + this.set("time", Date.parse(str)); + return this; + }, + isValid: function(date) { + if (!date) date = this; + return typeOf(date) == "date" && !isNaN(date.valueOf()); + }, + format: function(format) { + if (!this.isValid()) return "invalid date"; + if (!format) format = "%x %X"; + if (typeof format == "string") format = formats[format.toLowerCase()] || format; + if (typeof format == "function") return format(this); + var d = this; + return format.replace(/%([a-z%])/gi, function($0, $1) { + switch ($1) { + case "a": + return Date.getMsg("days_abbr")[d.get("day")]; + + case "A": + return Date.getMsg("days")[d.get("day")]; + + case "b": + return Date.getMsg("months_abbr")[d.get("month")]; + + case "B": + return Date.getMsg("months")[d.get("month")]; + + case "c": + return d.format("%a %b %d %H:%M:%S %Y"); + + case "d": + return pad(d.get("date"), 2); + + case "e": + return pad(d.get("date"), 2, " "); + + case "H": + return pad(d.get("hr"), 2); + + case "I": + return pad(d.get("hr") % 12 || 12, 2); + + case "j": + return pad(d.get("dayofyear"), 3); + + case "k": + return pad(d.get("hr"), 2, " "); + + case "l": + return pad(d.get("hr") % 12 || 12, 2, " "); + + case "L": + return pad(d.get("ms"), 3); + + case "m": + return pad(d.get("mo") + 1, 2); + + case "M": + return pad(d.get("min"), 2); + + case "o": + return d.get("ordinal"); + + case "p": + return Date.getMsg(d.get("ampm")); + + case "s": + return Math.round(d / 1e3); + + case "S": + return pad(d.get("seconds"), 2); + + case "T": + return d.format("%H:%M:%S"); + + case "U": + return pad(d.get("week"), 2); + + case "w": + return d.get("day"); + + case "x": + return d.format(Date.getMsg("shortDate")); + + case "X": + return d.format(Date.getMsg("shortTime")); + + case "y": + return d.get("year").toString().substr(2); + + case "Y": + return d.get("year"); + + case "z": + return d.get("GMTOffset"); + + case "Z": + return d.get("Timezone"); + } + return $1; + }); + }, + toISOString: function() { + return this.format("iso8601"); + } + }).alias({ + toJSON: "toISOString", + compare: "diff", + strftime: "format" + }); + var rfcDayAbbr = [ "Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat" ], rfcMonthAbbr = [ "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec" ]; + var formats = { + db: "%Y-%m-%d %H:%M:%S", + compact: "%Y%m%dT%H%M%S", + short: "%d %b %H:%M", + long: "%B %d, %Y %H:%M", + rfc822: function(date) { + return rfcDayAbbr[date.get("day")] + date.format(", %d ") + rfcMonthAbbr[date.get("month")] + date.format(" %Y %H:%M:%S %Z"); + }, + rfc2822: function(date) { + return rfcDayAbbr[date.get("day")] + date.format(", %d ") + rfcMonthAbbr[date.get("month")] + date.format(" %Y %H:%M:%S %z"); + }, + iso8601: function(date) { + return date.getUTCFullYear() + "-" + pad(date.getUTCMonth() + 1, 2) + "-" + pad(date.getUTCDate(), 2) + "T" + pad(date.getUTCHours(), 2) + ":" + pad(date.getUTCMinutes(), 2) + ":" + pad(date.getUTCSeconds(), 2) + "." + pad(date.getUTCMilliseconds(), 3) + "Z"; + } + }; + var parsePatterns = [], nativeParse = Date.parse; + var parseWord = function(type, word, num) { + var ret = -1, translated = Date.getMsg(type + "s"); + switch (typeOf(word)) { + case "object": + ret = translated[word.get(type)]; + break; + + case "number": + ret = translated[word]; + if (!ret) throw new Error("Invalid " + type + " index: " + word); + break; + + case "string": + var match = translated.filter(function(name) { + return this.test(name); + }, new RegExp("^" + word, "i")); + if (!match.length) throw new Error("Invalid " + type + " string"); + if (match.length > 1) throw new Error("Ambiguous " + type); + ret = match[0]; + } + return num ? translated.indexOf(ret) : ret; + }; + var startCentury = 1900, startYear = 70; + Date.extend({ + getMsg: function(key, args) { + return Locale.get("Date." + key, args); + }, + units: { + ms: Function.from(1), + second: Function.from(1e3), + minute: Function.from(6e4), + hour: Function.from(36e5), + day: Function.from(864e5), + week: Function.from(6084e5), + month: function(month, year) { + var d = new Date(); + return Date.daysInMonth(month != null ? month : d.get("mo"), year != null ? year : d.get("year")) * 864e5; + }, + year: function(year) { + year = year || new Date().get("year"); + return Date.isLeapYear(year) ? 316224e5 : 31536e6; + } + }, + daysInMonth: function(month, year) { + return [ 31, Date.isLeapYear(year) ? 29 : 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 ][month]; + }, + isLeapYear: function(year) { + return year % 4 === 0 && year % 100 !== 0 || year % 400 === 0; + }, + parse: function(from) { + var t = typeOf(from); + if (t == "number") return new Date(from); + if (t != "string") return from; + from = from.clean(); + if (!from.length) return null; + var parsed; + parsePatterns.some(function(pattern) { + var bits = pattern.re.exec(from); + return bits ? parsed = pattern.handler(bits) : false; + }); + if (!(parsed && parsed.isValid())) { + parsed = new Date(nativeParse(from)); + if (!(parsed && parsed.isValid())) parsed = new Date(from.toInt()); + } + return parsed; + }, + parseDay: function(day, num) { + return parseWord("day", day, num); + }, + parseMonth: function(month, num) { + return parseWord("month", month, num); + }, + parseUTC: function(value) { + var localDate = new Date(value); + var utcSeconds = Date.UTC(localDate.get("year"), localDate.get("mo"), localDate.get("date"), localDate.get("hr"), localDate.get("min"), localDate.get("sec"), localDate.get("ms")); + return new Date(utcSeconds); + }, + orderIndex: function(unit) { + return Date.getMsg("dateOrder").indexOf(unit) + 1; + }, + defineFormat: function(name, format) { + formats[name] = format; + return this; + }, + defineParser: function(pattern) { + parsePatterns.push(pattern.re && pattern.handler ? pattern : build(pattern)); + return this; + }, + defineParsers: function() { + Array.flatten(arguments).each(Date.defineParser); + return this; + }, + define2DigitYearStart: function(year) { + startYear = year % 100; + startCentury = year - startYear; + return this; + } + }).extend({ + defineFormats: Date.defineFormat.overloadSetter() + }); + var regexOf = function(type) { + return new RegExp("(?:" + Date.getMsg(type).map(function(name) { + return name.substr(0, 3); + }).join("|") + ")[a-z]*"); + }; + var replacers = function(key) { + switch (key) { + case "T": + return "%H:%M:%S"; + + case "x": + return (Date.orderIndex("month") == 1 ? "%m[-./]%d" : "%d[-./]%m") + "([-./]%y)?"; + + case "X": + return "%H([.:]%M)?([.:]%S([.:]%s)?)? ?%p? ?%z?"; + } + return null; + }; + var keys = { + d: /[0-2]?[0-9]|3[01]/, + H: /[01]?[0-9]|2[0-3]/, + I: /0?[1-9]|1[0-2]/, + M: /[0-5]?\d/, + s: /\d+/, + o: /[a-z]*/, + p: /[ap]\.?m\.?/, + y: /\d{2}|\d{4}/, + Y: /\d{4}/, + z: /Z|[+-]\d{2}(?::?\d{2})?/ + }; + keys.m = keys.I; + keys.S = keys.M; + var currentLanguage; + var recompile = function(language) { + currentLanguage = language; + keys.a = keys.A = regexOf("days"); + keys.b = keys.B = regexOf("months"); + parsePatterns.each(function(pattern, i) { + if (pattern.format) parsePatterns[i] = build(pattern.format); + }); + }; + var build = function(format) { + if (!currentLanguage) return { + format: format + }; + var parsed = []; + var re = (format.source || format).replace(/%([a-z])/gi, function($0, $1) { + return replacers($1) || $0; + }).replace(/\((?!\?)/g, "(?:").replace(/ (?!\?|\*)/g, ",? ").replace(/%([a-z%])/gi, function($0, $1) { + var p = keys[$1]; + if (!p) return $1; + parsed.push($1); + return "(" + p.source + ")"; + }).replace(/\[a-z\]/gi, "[a-z\\u00c0-\\uffff;&]"); + return { + format: format, + re: new RegExp("^" + re + "$", "i"), + handler: function(bits) { + bits = bits.slice(1).associate(parsed); + var date = new Date().clearTime(), year = bits.y || bits.Y; + if (year != null) handle.call(date, "y", year); + if ("d" in bits) handle.call(date, "d", 1); + if ("m" in bits || bits.b || bits.B) handle.call(date, "m", 1); + for (var key in bits) handle.call(date, key, bits[key]); + return date; + } + }; + }; + var handle = function(key, value) { + if (!value) return this; + switch (key) { + case "a": + case "A": + return this.set("day", Date.parseDay(value, true)); + + case "b": + case "B": + return this.set("mo", Date.parseMonth(value, true)); + + case "d": + return this.set("date", value); + + case "H": + case "I": + return this.set("hr", value); + + case "m": + return this.set("mo", value - 1); + + case "M": + return this.set("min", value); + + case "p": + return this.set("ampm", value.replace(/\./g, "")); + + case "S": + return this.set("sec", value); + + case "s": + return this.set("ms", ("0." + value) * 1e3); + + case "w": + return this.set("day", value); + + case "Y": + return this.set("year", value); + + case "y": + value = +value; + if (value < 100) value += startCentury + (value < startYear ? 100 : 0); + return this.set("year", value); + + case "z": + if (value == "Z") value = "+00"; + var offset = value.match(/([+-])(\d{2}):?(\d{2})?/); + offset = (offset[1] + "1") * (offset[2] * 60 + (+offset[3] || 0)) + this.getTimezoneOffset(); + return this.set("time", this - offset * 6e4); + } + return this; + }; + Date.defineParsers("%Y([-./]%m([-./]%d((T| )%X)?)?)?", "%Y%m%d(T%H(%M%S?)?)?", "%x( %X)?", "%d%o( %b( %Y)?)?( %X)?", "%b( %d%o)?( %Y)?( %X)?", "%Y %b( %d%o( %X)?)?", "%o %b %d %X %z %Y", "%T", "%H:%M( ?%p)?"); + Locale.addEvent("change", function(language) { + if (Locale.get("Date")) recompile(language); + }).fireEvent("change", Locale.getCurrent()); +})(); + +(function() { + Fx.Scroll = new Class({ + Extends: Fx, + options: { + offset: { + x: 0, + y: 0 + }, + wheelStops: true + }, + initialize: function(element, options) { + this.element = this.subject = document.id(element); + this.parent(options); + if (typeOf(this.element) != "element") this.element = document.id(this.element.getDocument().body); + if (this.options.wheelStops) { + var stopper = this.element, cancel = this.cancel.pass(false, this); + this.addEvent("start", function() { + stopper.addEvent("mousewheel", cancel); + }, true); + this.addEvent("complete", function() { + stopper.removeEvent("mousewheel", cancel); + }, true); + } + }, + set: function() { + var now = Array.flatten(arguments); + this.element.scrollTo(now[0], now[1]); + return this; + }, + compute: function(from, to, delta) { + return [ 0, 1 ].map(function(i) { + return Fx.compute(from[i], to[i], delta); + }); + }, + start: function(x, y) { + if (!this.check(x, y)) return this; + var scroll = this.element.getScroll(); + return this.parent([ scroll.x, scroll.y ], [ x, y ]); + }, + calculateScroll: function(x, y) { + var element = this.element, scrollSize = element.getScrollSize(), scroll = element.getScroll(), size = element.getSize(), offset = this.options.offset, values = { + x: x, + y: y + }; + for (var z in values) { + if (!values[z] && values[z] !== 0) values[z] = scroll[z]; + if (typeOf(values[z]) != "number") values[z] = scrollSize[z] - size[z]; + values[z] += offset[z]; + } + return [ values.x, values.y ]; + }, + toTop: function() { + return this.start.apply(this, this.calculateScroll(false, 0)); + }, + toLeft: function() { + return this.start.apply(this, this.calculateScroll(0, false)); + }, + toRight: function() { + return this.start.apply(this, this.calculateScroll("right", false)); + }, + toBottom: function() { + return this.start.apply(this, this.calculateScroll(false, "bottom")); + }, + toElement: function(el, axes) { + axes = axes ? Array.from(axes) : [ "x", "y" ]; + var scroll = isBody(this.element) ? { + x: 0, + y: 0 + } : this.element.getScroll(); + var position = Object.map(document.id(el).getPosition(this.element), function(value, axis) { + return axes.contains(axis) ? value + scroll[axis] : false; + }); + return this.start.apply(this, this.calculateScroll(position.x, position.y)); + }, + toElementEdge: function(el, axes, offset) { + axes = axes ? Array.from(axes) : [ "x", "y" ]; + el = document.id(el); + var to = {}, position = el.getPosition(this.element), size = el.getSize(), scroll = this.element.getScroll(), containerSize = this.element.getSize(), edge = { + x: position.x + size.x, + y: position.y + size.y + }; + [ "x", "y" ].each(function(axis) { + if (axes.contains(axis)) { + if (edge[axis] > scroll[axis] + containerSize[axis]) to[axis] = edge[axis] - containerSize[axis]; + if (position[axis] < scroll[axis]) to[axis] = position[axis]; + } + if (to[axis] == null) to[axis] = scroll[axis]; + if (offset && offset[axis]) to[axis] = to[axis] + offset[axis]; + }, this); + if (to.x != scroll.x || to.y != scroll.y) this.start(to.x, to.y); + return this; + }, + toElementCenter: function(el, axes, offset) { + axes = axes ? Array.from(axes) : [ "x", "y" ]; + el = document.id(el); + var to = {}, position = el.getPosition(this.element), size = el.getSize(), scroll = this.element.getScroll(), containerSize = this.element.getSize(); + [ "x", "y" ].each(function(axis) { + if (axes.contains(axis)) { + to[axis] = position[axis] - (containerSize[axis] - size[axis]) / 2; + } + if (to[axis] == null) to[axis] = scroll[axis]; + if (offset && offset[axis]) to[axis] = to[axis] + offset[axis]; + }, this); + if (to.x != scroll.x || to.y != scroll.y) this.start(to.x, to.y); + return this; + } + }); + function isBody(element) { + return /^(?:body|html)$/i.test(element.tagName); + } +})(); + +Fx.Slide = new Class({ + Extends: Fx, + options: { + mode: "vertical", + wrapper: false, + hideOverflow: true, + resetHeight: false + }, + initialize: function(element, options) { + element = this.element = this.subject = document.id(element); + this.parent(options); + options = this.options; + var wrapper = element.retrieve("wrapper"), styles = element.getStyles("margin", "position", "overflow"); + if (options.hideOverflow) styles = Object.append(styles, { + overflow: "hidden" + }); + if (options.wrapper) wrapper = document.id(options.wrapper).setStyles(styles); + if (!wrapper) wrapper = new Element("div", { + styles: styles + }).wraps(element); + element.store("wrapper", wrapper).setStyle("margin", 0); + if (element.getStyle("overflow") == "visible") element.setStyle("overflow", "hidden"); + this.now = []; + this.open = true; + this.wrapper = wrapper; + this.addEvent("complete", function() { + this.open = wrapper["offset" + this.layout.capitalize()] != 0; + if (this.open && this.options.resetHeight) wrapper.setStyle("height", ""); + }, true); + }, + vertical: function() { + this.margin = "margin-top"; + this.layout = "height"; + this.offset = this.element.offsetHeight; + }, + horizontal: function() { + this.margin = "margin-left"; + this.layout = "width"; + this.offset = this.element.offsetWidth; + }, + set: function(now) { + this.element.setStyle(this.margin, now[0]); + this.wrapper.setStyle(this.layout, now[1]); + return this; + }, + compute: function(from, to, delta) { + return [ 0, 1 ].map(function(i) { + return Fx.compute(from[i], to[i], delta); + }); + }, + start: function(how, mode) { + if (!this.check(how, mode)) return this; + this[mode || this.options.mode](); + var margin = this.element.getStyle(this.margin).toInt(), layout = this.wrapper.getStyle(this.layout).toInt(), caseIn = [ [ margin, layout ], [ 0, this.offset ] ], caseOut = [ [ margin, layout ], [ -this.offset, 0 ] ], start; + switch (how) { + case "in": + start = caseIn; + break; + + case "out": + start = caseOut; + break; + + case "toggle": + start = layout == 0 ? caseIn : caseOut; + } + return this.parent(start[0], start[1]); + }, + slideIn: function(mode) { + return this.start("in", mode); + }, + slideOut: function(mode) { + return this.start("out", mode); + }, + hide: function(mode) { + this[mode || this.options.mode](); + this.open = false; + return this.set([ -this.offset, 0 ]); + }, + show: function(mode) { + this[mode || this.options.mode](); + this.open = true; + return this.set([ 0, this.offset ]); + }, + toggle: function(mode) { + return this.start("toggle", mode); + } +}); + +Element.Properties.slide = { + set: function(options) { + this.get("slide").cancel().setOptions(options); + return this; + }, + get: function() { + var slide = this.retrieve("slide"); + if (!slide) { + slide = new Fx.Slide(this, { + link: "cancel" + }); + this.store("slide", slide); + } + return slide; + } +}; + +Element.implement({ + slide: function(how, mode) { + how = how || "toggle"; + var slide = this.get("slide"), toggle; + switch (how) { + case "hide": + slide.hide(mode); + break; + + case "show": + slide.show(mode); + break; + + case "toggle": + var flag = this.retrieve("slide:flag", slide.open); + slide[flag ? "slideOut" : "slideIn"](mode); + this.store("slide:flag", !flag); + toggle = true; + break; + + default: + slide.start(how, mode); + } + if (!toggle) this.eliminate("slide:flag"); + return this; + } +}); + +Request.JSONP = new Class({ + Implements: [ Chain, Events, Options ], + options: { + onRequest: function(src) { + if (this.options.log && window.console && console.log) { + console.log("JSONP retrieving script with url:" + src); + } + }, + onError: function(src) { + if (this.options.log && window.console && console.warn) { + console.warn("JSONP " + src + " will fail in Internet Explorer, which enforces a 2083 bytes length limit on URIs"); + } + }, + url: "", + callbackKey: "callback", + injectScript: document.head, + data: "", + link: "ignore", + timeout: 0, + log: false + }, + initialize: function(options) { + this.setOptions(options); + }, + send: function(options) { + if (!Request.prototype.check.call(this, options)) return this; + this.running = true; + var type = typeOf(options); + if (type == "string" || type == "element") options = { + data: options + }; + options = Object.merge(this.options, options || {}); + var data = options.data; + switch (typeOf(data)) { + case "element": + data = document.id(data).toQueryString(); + break; + + case "object": + case "hash": + data = Object.toQueryString(data); + } + var index = this.index = Request.JSONP.counter++; + var src = options.url + (options.url.test("\\?") ? "&" : "?") + options.callbackKey + "=Request.JSONP.request_map.request_" + index + (data ? "&" + data : ""); + if (src.length > 2083) this.fireEvent("error", src); + Request.JSONP.request_map["request_" + index] = function() { + this.success(arguments, index); + }.bind(this); + var script = this.getScript(src).inject(options.injectScript); + this.fireEvent("request", [ src, script ]); + if (options.timeout) this.timeout.delay(options.timeout, this); + return this; + }, + getScript: function(src) { + if (!this.script) this.script = new Element("script", { + type: "text/javascript", + async: true, + src: src + }); + return this.script; + }, + success: function(args, index) { + if (!this.running) return; + this.clear().fireEvent("complete", args).fireEvent("success", args).callChain(); + }, + cancel: function() { + if (this.running) this.clear().fireEvent("cancel"); + return this; + }, + isRunning: function() { + return !!this.running; + }, + clear: function() { + this.running = false; + if (this.script) { + this.script.destroy(); + this.script = null; + } + return this; + }, + timeout: function() { + if (this.running) { + this.running = false; + this.fireEvent("timeout", [ this.script.get("src"), this.script ]).fireEvent("failure").cancel(); + } + return this; + } +}); + +Request.JSONP.counter = 0; + +Request.JSONP.request_map = {}; + +Request.implement({ + options: { + initialDelay: 5e3, + delay: 5e3, + limit: 6e4 + }, + startTimer: function(data) { + var fn = function() { + if (!this.running) this.send({ + data: data + }); + }; + this.lastDelay = this.options.initialDelay; + this.timer = fn.delay(this.lastDelay, this); + this.completeCheck = function(response) { + clearTimeout(this.timer); + this.lastDelay = response ? this.options.delay : (this.lastDelay + this.options.delay).min(this.options.limit); + this.timer = fn.delay(this.lastDelay, this); + }; + return this.addEvent("complete", this.completeCheck); + }, + stopTimer: function() { + clearTimeout(this.timer); + return this.removeEvent("complete", this.completeCheck); + } +}); + +Date.implement({ + timeDiffInWords: function(to) { + return Date.distanceOfTimeInWords(this, to || new Date()); + }, + timeDiff: function(to, separator) { + if (to == null) to = new Date(); + var delta = ((to - this) / 1e3).floor().abs(); + var vals = [], durations = [ 60, 60, 24, 365, 0 ], names = [ "s", "m", "h", "d", "y" ], value, duration; + for (var item = 0; item < durations.length; item++) { + if (item && !delta) break; + value = delta; + if (duration = durations[item]) { + value = delta % duration; + delta = (delta / duration).floor(); + } + vals.unshift(value + (names[item] || "")); + } + return vals.join(separator || ":"); + } +}).extend({ + distanceOfTimeInWords: function(from, to) { + return Date.getTimePhrase(((to - from) / 1e3).toInt()); + }, + getTimePhrase: function(delta) { + var suffix = delta < 0 ? "Until" : "Ago"; + if (delta < 0) delta *= -1; + var units = { + minute: 60, + hour: 60, + day: 24, + week: 7, + month: 52 / 12, + year: 12, + eon: Infinity + }; + var msg = "lessThanMinute"; + for (var unit in units) { + var interval = units[unit]; + if (delta < 1.5 * interval) { + if (delta > .75 * interval) msg = unit; + break; + } + delta /= interval; + msg = unit + "s"; + } + delta = delta.round(); + return Date.getMsg(msg + suffix, delta).substitute({ + delta: delta + }); + } +}).defineParsers({ + re: /^(?:tod|tom|yes)/i, + handler: function(bits) { + var d = new Date().clearTime(); + switch (bits[0]) { + case "tom": + return d.increment(); + + case "yes": + return d.decrement(); + + default: + return d; + } + } +}, { + re: /^(next|last) ([a-z]+)$/i, + handler: function(bits) { + var d = new Date().clearTime(); + var day = d.getDay(); + var newDay = Date.parseDay(bits[2], true); + var addDays = newDay - day; + if (newDay <= day) addDays += 7; + if (bits[1] == "last") addDays -= 7; + return d.set("date", d.getDate() + addDays); + } +}).alias("timeAgoInWords", "timeDiffInWords"); + +(function() { + var defaultSortFunction = function(a, b) { + return a > b ? 1 : a < b ? -1 : 0; + }; + Array.implement({ + stableSort: function(compare) { + return Browser.chrome || Browser.firefox2 || Browser.opera9 ? this.mergeSort(compare) : this.sort(compare); + }, + mergeSort: function(compare, token) { + compare = compare || defaultSortFunction; + if (this.length > 1) { + var right = this.splice(Math.floor(this.length / 2)).mergeSort(compare); + var left = this.splice(0).mergeSort(compare); + while (left.length > 0 || right.length > 0) { + this.push(right.length === 0 ? left.shift() : left.length === 0 ? right.shift() : compare(left[0], right[0]) > 0 ? right.shift() : left.shift()); + } + } + return this; + } + }); +})(); + +Class.Binds = new Class({ + $bound: {}, + bound: function(name) { + return this.$bound[name] ? this.$bound[name] : this.$bound[name] = this[name].bind(this); + } +}); + +(function() { + var events = Element.NativeEvents, location = window.location, base = location.pathname, history = window.history, hasPushState = "pushState" in history, event = hasPushState ? "popstate" : "hashchange"; + this.History = new new Class({ + Implements: [ Class.Binds, Events ], + initialize: hasPushState ? function() { + events[event] = 2; + window.addEvent(event, this.bound("pop")); + } : function() { + events[event] = 1; + window.addEvent(event, this.bound("pop")); + this.hash = location.hash; + var hashchange = "onhashchange" in window; + if (!(hashchange && (document.documentMode === undefined || document.documentMode > 7))) this.timer = this.check.periodical(200, this); + }, + push: hasPushState ? function(url, title, state) { + if (base && base != url) base = null; + history.pushState(state || null, title || null, url); + this.onChange(url, state); + } : function(url) { + location.hash = url; + }, + replace: hasPushState ? function(url, title, state) { + history.replaceState(state || null, title || null, url); + } : function(url) { + this.hash = "#" + url; + this.push(url); + }, + pop: hasPushState ? function(event) { + var url = location.pathname; + if (url == base) { + base = null; + return; + } + this.onChange(url, event.event.state); + } : function() { + var hash = location.hash; + if (this.hash == hash) return; + this.hash = hash; + this.onChange(hash.substr(1)); + }, + onChange: function(url, state) { + this.fireEvent("change", [ url, state || {} ]); + }, + back: function() { + history.back(); + }, + forward: function() { + history.forward(); + }, + getPath: function() { + return hasPushState ? location.pathname : location.hash.substr(1); + }, + hasPushState: function() { + return hasPushState; + }, + check: function() { + if (this.hash != location.hash) this.pop(); + } + })(); +})(); + +History.handleInitialState = function(base) { + if (!base) base = ""; + var location = window.location, pathname = location.pathname.substr(base.length), hash = location.hash, hasPushState = History.hasPushState(); + if (!hasPushState && pathname.length > 1) { + window.location = (base || "/") + "#" + pathname; + return true; + } + if (!hash || hash.length <= 1) return false; + if (hasPushState) { + (function() { + History.push(hash.substr(1)); + }).delay(1); + return false; + } + if (!pathname || pathname == "/") return false; + window.location = (base || "/") + hash; + return true; +}; + +(function() { + var Color, DecomposedMatrix, DecomposedMatrix2D, InterpolableArray, InterpolableColor, InterpolableObject, InterpolableWithUnit, Matrix, Matrix2D, Set, Vector, addTimeout, animationTick, animations, animationsTimeouts, applyDefaults, applyFrame, applyProperties, baseSVG, cacheFn, cancelTimeout, clone, createInterpolable, defaultValueForKey, degProperties, dynamics, getCurrentProperties, interpolate, isDocumentVisible, isSVGElement, lastTime, leftDelayForTimeout, makeArrayFn, observeVisibilityChange, parseProperties, prefixFor, propertyWithPrefix, pxProperties, rAF, roundf, runLoopPaused, runLoopRunning, runLoopTick, setRealTimeout, slow, slowRatio, startAnimation, startRunLoop, svgProperties, timeBeforeVisibilityChange, timeoutLastId, timeouts, toDashed, transformProperties, transformValueForProperty, unitForProperty, __bind = function(fn, me) { + return function() { + return fn.apply(me, arguments); + }; + }; + isDocumentVisible = function() { + return document.visibilityState === "visible" || dynamics.tests != null; + }; + observeVisibilityChange = function() { + var fns; + fns = []; + if (typeof document !== "undefined" && document !== null) { + document.addEventListener("visibilitychange", function() { + var fn, _i, _len, _results; + _results = []; + for (_i = 0, _len = fns.length; _i < _len; _i++) { + fn = fns[_i]; + _results.push(fn(isDocumentVisible())); + } + return _results; + }); + } + return function(fn) { + return fns.push(fn); + }; + }(); + clone = function(o) { + var k, newO, v; + newO = {}; + for (k in o) { + v = o[k]; + newO[k] = v; + } + return newO; + }; + cacheFn = function(func) { + var data; + data = {}; + return function() { + var k, key, result, _i, _len; + key = ""; + for (_i = 0, _len = arguments.length; _i < _len; _i++) { + k = arguments[_i]; + key += k.toString() + ","; + } + result = data[key]; + if (!result) { + data[key] = result = func.apply(this, arguments); + } + return result; + }; + }; + makeArrayFn = function(fn) { + return function(el) { + var args, i, res; + if (el instanceof Array || el instanceof NodeList || el instanceof HTMLCollection) { + res = function() { + var _i, _ref, _results; + _results = []; + for (i = _i = 0, _ref = el.length; 0 <= _ref ? _i < _ref : _i > _ref; i = 0 <= _ref ? ++_i : --_i) { + args = Array.prototype.slice.call(arguments, 1); + args.splice(0, 0, el[i]); + _results.push(fn.apply(this, args)); + } + return _results; + }.apply(this, arguments); + return res; + } + return fn.apply(this, arguments); + }; + }; + applyDefaults = function(options, defaults) { + var k, v, _results; + _results = []; + for (k in defaults) { + v = defaults[k]; + _results.push(options[k] != null ? options[k] : options[k] = v); + } + return _results; + }; + applyFrame = function(el, properties) { + var k, v, _results; + if (el.style != null) { + return applyProperties(el, properties); + } else { + _results = []; + for (k in properties) { + v = properties[k]; + _results.push(el[k] = v.format()); + } + return _results; + } + }; + applyProperties = function(el, properties) { + var isSVG, k, matrix, transforms, v; + properties = parseProperties(properties); + transforms = []; + isSVG = isSVGElement(el); + for (k in properties) { + v = properties[k]; + if (transformProperties.contains(k)) { + transforms.push([ k, v ]); + } else { + if (v.format != null) { + v = v.format(); + } else { + v = "" + v + unitForProperty(k, v); + } + if (isSVG && svgProperties.contains(k)) { + el.setAttribute(k, v); + } else { + el.style[propertyWithPrefix(k)] = v; + } + } + } + if (transforms.length > 0) { + if (isSVG) { + matrix = new Matrix2D(); + matrix.applyProperties(transforms); + return el.setAttribute("transform", matrix.decompose().format()); + } else { + v = transforms.map(function(transform) { + return transformValueForProperty(transform[0], transform[1]); + }).join(" "); + return el.style[propertyWithPrefix("transform")] = v; + } + } + }; + isSVGElement = function(el) { + var _ref, _ref1; + if (typeof SVGElement !== "undefined" && SVGElement !== null && (typeof SVGSVGElement !== "undefined" && SVGSVGElement !== null)) { + return el instanceof SVGElement && !(el instanceof SVGSVGElement); + } else { + return (_ref = (_ref1 = dynamics.tests) != null ? typeof _ref1.isSVG === "function" ? _ref1.isSVG(el) : void 0 : void 0) != null ? _ref : false; + } + }; + roundf = function(v, decimal) { + var d; + d = Math.pow(10, decimal); + return Math.round(v * d) / d; + }; + Set = function() { + function Set(array) { + var v, _i, _len; + this.obj = {}; + for (_i = 0, _len = array.length; _i < _len; _i++) { + v = array[_i]; + this.obj[v] = 1; + } + } + Set.prototype.contains = function(v) { + return this.obj[v] === 1; + }; + return Set; + }(); + toDashed = function(str) { + return str.replace(/([A-Z])/g, function($1) { + return "-" + $1.toLowerCase(); + }); + }; + pxProperties = new Set("marginTop,marginLeft,marginBottom,marginRight,paddingTop,paddingLeft,paddingBottom,paddingRight,top,left,bottom,right,translateX,translateY,translateZ,perspectiveX,perspectiveY,perspectiveZ,width,height,maxWidth,maxHeight,minWidth,minHeight,borderRadius".split(",")); + degProperties = new Set("rotate,rotateX,rotateY,rotateZ,skew,skewX,skewY,skewZ".split(",")); + transformProperties = new Set("translate,translateX,translateY,translateZ,scale,scaleX,scaleY,scaleZ,rotate,rotateX,rotateY,rotateZ,rotateC,rotateCX,rotateCY,skew,skewX,skewY,skewZ,perspective".split(",")); + svgProperties = new Set("accent-height,ascent,azimuth,baseFrequency,baseline-shift,bias,cx,cy,d,diffuseConstant,divisor,dx,dy,elevation,filterRes,fx,fy,gradientTransform,height,k1,k2,k3,k4,kernelMatrix,kernelUnitLength,letter-spacing,limitingConeAngle,markerHeight,markerWidth,numOctaves,order,overline-position,overline-thickness,pathLength,points,pointsAtX,pointsAtY,pointsAtZ,r,radius,rx,ry,seed,specularConstant,specularExponent,stdDeviation,stop-color,stop-opacity,strikethrough-position,strikethrough-thickness,surfaceScale,target,targetX,targetY,transform,underline-position,underline-thickness,viewBox,width,x,x1,x2,y,y1,y2,z".split(",")); + unitForProperty = function(k, v) { + if (typeof v !== "number") { + return ""; + } + if (pxProperties.contains(k)) { + return "px"; + } else if (degProperties.contains(k)) { + return "deg"; + } + return ""; + }; + transformValueForProperty = function(k, v) { + var match, unit; + match = ("" + v).match(/^([0-9.-]*)([^0-9]*)$/); + if (match != null) { + v = match[1]; + unit = match[2]; + } else { + v = parseFloat(v); + } + v = roundf(parseFloat(v), 10); + if (unit == null || unit === "") { + unit = unitForProperty(k, v); + } + return "" + k + "(" + v + unit + ")"; + }; + parseProperties = function(properties) { + var axis, match, parsed, property, value, _i, _len, _ref; + parsed = {}; + for (property in properties) { + value = properties[property]; + if (transformProperties.contains(property)) { + match = property.match(/(translate|rotateC|rotate|skew|scale|perspective)(X|Y|Z|)/); + if (match && match[2].length > 0) { + parsed[property] = value; + } else { + _ref = [ "X", "Y", "Z" ]; + for (_i = 0, _len = _ref.length; _i < _len; _i++) { + axis = _ref[_i]; + parsed[match[1] + axis] = value; + } + } + } else { + parsed[property] = value; + } + } + return parsed; + }; + defaultValueForKey = function(key) { + var v; + v = key === "opacity" ? 1 : 0; + return "" + v + unitForProperty(key, v); + }; + getCurrentProperties = function(el, keys) { + var isSVG, key, matrix, properties, style, v, _i, _j, _len, _len1, _ref; + properties = {}; + isSVG = isSVGElement(el); + if (el.style != null) { + style = window.getComputedStyle(el, null); + for (_i = 0, _len = keys.length; _i < _len; _i++) { + key = keys[_i]; + if (transformProperties.contains(key)) { + if (properties["transform"] == null) { + if (isSVG) { + matrix = new Matrix2D((_ref = el.transform.baseVal.consolidate()) != null ? _ref.matrix : void 0); + } else { + matrix = Matrix.fromTransform(style[propertyWithPrefix("transform")]); + } + properties["transform"] = matrix.decompose(); + } + } else { + v = style[key]; + if (v == null && svgProperties.contains(key)) { + v = el.getAttribute(key); + } + if (v === "" || v == null) { + v = defaultValueForKey(key); + } + properties[key] = createInterpolable(v); + } + } + } else { + for (_j = 0, _len1 = keys.length; _j < _len1; _j++) { + key = keys[_j]; + properties[key] = createInterpolable(el[key]); + } + } + return properties; + }; + createInterpolable = function(value) { + var interpolable, klass, klasses, _i, _len; + klasses = [ InterpolableColor, InterpolableArray, InterpolableObject, InterpolableWithUnit ]; + for (_i = 0, _len = klasses.length; _i < _len; _i++) { + klass = klasses[_i]; + interpolable = klass.create(value); + if (interpolable != null) { + return interpolable; + } + } + return null; + }; + InterpolableObject = function() { + function InterpolableObject(obj) { + this.format = __bind(this.format, this); + this.interpolate = __bind(this.interpolate, this); + this.obj = obj; + } + InterpolableObject.prototype.interpolate = function(endInterpolable, t) { + var end, k, newObj, start, v; + start = this.obj; + end = endInterpolable.obj; + newObj = {}; + for (k in start) { + v = start[k]; + if (v.interpolate != null) { + newObj[k] = v.interpolate(end[k], t); + } else { + newObj[k] = v; + } + } + return new InterpolableObject(newObj); + }; + InterpolableObject.prototype.format = function() { + return this.obj; + }; + InterpolableObject.create = function(value) { + var k, obj, v; + if (value instanceof Object) { + obj = {}; + for (k in value) { + v = value[k]; + obj[k] = createInterpolable(v); + } + return new InterpolableObject(obj); + } + return null; + }; + return InterpolableObject; + }(); + InterpolableWithUnit = function() { + function InterpolableWithUnit(value, prefix, suffix) { + this.prefix = prefix; + this.suffix = suffix; + this.format = __bind(this.format, this); + this.interpolate = __bind(this.interpolate, this); + this.value = parseFloat(value); + } + InterpolableWithUnit.prototype.interpolate = function(endInterpolable, t) { + var end, start; + start = this.value; + end = endInterpolable.value; + return new InterpolableWithUnit((end - start) * t + start, endInterpolable.prefix || this.prefix, endInterpolable.suffix || this.suffix); + }; + InterpolableWithUnit.prototype.format = function() { + if (this.prefix == null && this.suffix == null) { + return roundf(this.value, 5); + } + return this.prefix + roundf(this.value, 5) + this.suffix; + }; + InterpolableWithUnit.create = function(value) { + var match; + if (typeof value !== "string") { + return new InterpolableWithUnit(value); + } + match = ("" + value).match("([^0-9.+-]*)([0-9.+-]+)([^0-9.+-]*)"); + if (match != null) { + return new InterpolableWithUnit(match[2], match[1], match[3]); + } + return null; + }; + return InterpolableWithUnit; + }(); + InterpolableArray = function() { + function InterpolableArray(values, sep) { + this.values = values; + this.sep = sep; + this.format = __bind(this.format, this); + this.interpolate = __bind(this.interpolate, this); + } + InterpolableArray.prototype.interpolate = function(endInterpolable, t) { + var end, i, newValues, start, _i, _ref; + start = this.values; + end = endInterpolable.values; + newValues = []; + for (i = _i = 0, _ref = Math.min(start.length, end.length); 0 <= _ref ? _i < _ref : _i > _ref; i = 0 <= _ref ? ++_i : --_i) { + if (start[i].interpolate != null) { + newValues.push(start[i].interpolate(end[i], t)); + } else { + newValues.push(start[i]); + } + } + return new InterpolableArray(newValues, this.sep); + }; + InterpolableArray.prototype.format = function() { + var values; + values = this.values.map(function(val) { + if (val.format != null) { + return val.format(); + } else { + return val; + } + }); + if (this.sep != null) { + return values.join(this.sep); + } else { + return values; + } + }; + InterpolableArray.createFromArray = function(arr, sep) { + var values; + values = arr.map(function(val) { + return createInterpolable(val) || val; + }); + values = values.filter(function(val) { + return val != null; + }); + return new InterpolableArray(values, sep); + }; + InterpolableArray.create = function(value) { + var arr, sep, seps, _i, _len; + if (value instanceof Array) { + return InterpolableArray.createFromArray(value, null); + } + if (typeof value !== "string") { + return; + } + seps = [ " ", ",", "|", ";", "/", ":" ]; + for (_i = 0, _len = seps.length; _i < _len; _i++) { + sep = seps[_i]; + arr = value.split(sep); + if (arr.length > 1) { + return InterpolableArray.createFromArray(arr, sep); + } + } + return null; + }; + return InterpolableArray; + }(); + Color = function() { + function Color(rgb, format) { + this.rgb = rgb != null ? rgb : {}; + this.format = format; + this.toRgba = __bind(this.toRgba, this); + this.toRgb = __bind(this.toRgb, this); + this.toHex = __bind(this.toHex, this); + } + Color.fromHex = function(hex) { + var hex3, result; + hex3 = hex.match(/^#([a-f\d]{1})([a-f\d]{1})([a-f\d]{1})$/i); + if (hex3 != null) { + hex = "#" + hex3[1] + hex3[1] + hex3[2] + hex3[2] + hex3[3] + hex3[3]; + } + result = hex.match(/^#([a-f\d]{2})([a-f\d]{2})([a-f\d]{2})$/i); + if (result != null) { + return new Color({ + r: parseInt(result[1], 16), + g: parseInt(result[2], 16), + b: parseInt(result[3], 16), + a: 1 + }, "hex"); + } + return null; + }; + Color.fromRgb = function(rgb) { + var match, _ref; + match = rgb.match(/^rgba?\(([0-9.]*), ?([0-9.]*), ?([0-9.]*)(?:, ?([0-9.]*))?\)$/); + if (match != null) { + return new Color({ + r: parseFloat(match[1]), + g: parseFloat(match[2]), + b: parseFloat(match[3]), + a: parseFloat((_ref = match[4]) != null ? _ref : 1) + }, match[4] != null ? "rgba" : "rgb"); + } + return null; + }; + Color.componentToHex = function(c) { + var hex; + hex = c.toString(16); + if (hex.length === 1) { + return "0" + hex; + } else { + return hex; + } + }; + Color.prototype.toHex = function() { + return "#" + Color.componentToHex(this.rgb.r) + Color.componentToHex(this.rgb.g) + Color.componentToHex(this.rgb.b); + }; + Color.prototype.toRgb = function() { + return "rgb(" + this.rgb.r + ", " + this.rgb.g + ", " + this.rgb.b + ")"; + }; + Color.prototype.toRgba = function() { + return "rgba(" + this.rgb.r + ", " + this.rgb.g + ", " + this.rgb.b + ", " + this.rgb.a + ")"; + }; + return Color; + }(); + InterpolableColor = function() { + function InterpolableColor(color) { + this.color = color; + this.format = __bind(this.format, this); + this.interpolate = __bind(this.interpolate, this); + } + InterpolableColor.prototype.interpolate = function(endInterpolable, t) { + var end, k, rgb, start, v, _i, _len, _ref; + start = this.color; + end = endInterpolable.color; + rgb = {}; + _ref = [ "r", "g", "b" ]; + for (_i = 0, _len = _ref.length; _i < _len; _i++) { + k = _ref[_i]; + v = Math.round((end.rgb[k] - start.rgb[k]) * t + start.rgb[k]); + rgb[k] = Math.min(255, Math.max(0, v)); + } + k = "a"; + v = roundf((end.rgb[k] - start.rgb[k]) * t + start.rgb[k], 5); + rgb[k] = Math.min(1, Math.max(0, v)); + return new InterpolableColor(new Color(rgb, end.format)); + }; + InterpolableColor.prototype.format = function() { + if (this.color.format === "hex") { + return this.color.toHex(); + } else if (this.color.format === "rgb") { + return this.color.toRgb(); + } else if (this.color.format === "rgba") { + return this.color.toRgba(); + } + }; + InterpolableColor.create = function(value) { + var color; + if (typeof value !== "string") { + return; + } + color = Color.fromHex(value) || Color.fromRgb(value); + if (color != null) { + return new InterpolableColor(color); + } + return null; + }; + return InterpolableColor; + }(); + DecomposedMatrix2D = function() { + function DecomposedMatrix2D(props) { + this.props = props; + this.applyRotateCenter = __bind(this.applyRotateCenter, this); + this.format = __bind(this.format, this); + this.interpolate = __bind(this.interpolate, this); + } + DecomposedMatrix2D.prototype.interpolate = function(endMatrix, t) { + var i, k, newProps, _i, _j, _k, _l, _len, _len1, _ref, _ref1, _ref2; + newProps = {}; + _ref = [ "translate", "scale", "rotate" ]; + for (_i = 0, _len = _ref.length; _i < _len; _i++) { + k = _ref[_i]; + newProps[k] = []; + for (i = _j = 0, _ref1 = this.props[k].length; 0 <= _ref1 ? _j < _ref1 : _j > _ref1; i = 0 <= _ref1 ? ++_j : --_j) { + newProps[k][i] = (endMatrix.props[k][i] - this.props[k][i]) * t + this.props[k][i]; + } + } + for (i = _k = 1; _k <= 2; i = ++_k) { + newProps["rotate"][i] = endMatrix.props["rotate"][i]; + } + _ref2 = [ "skew" ]; + for (_l = 0, _len1 = _ref2.length; _l < _len1; _l++) { + k = _ref2[_l]; + newProps[k] = (endMatrix.props[k] - this.props[k]) * t + this.props[k]; + } + return new DecomposedMatrix2D(newProps); + }; + DecomposedMatrix2D.prototype.format = function() { + return "translate(" + this.props.translate.join(",") + ") rotate(" + this.props.rotate.join(",") + ") skewX(" + this.props.skew + ") scale(" + this.props.scale.join(",") + ")"; + }; + DecomposedMatrix2D.prototype.applyRotateCenter = function(rotateC) { + var i, m, m2d, negativeTranslate, _i, _results; + m = baseSVG.createSVGMatrix(); + m = m.translate(rotateC[0], rotateC[1]); + m = m.rotate(this.props.rotate[0]); + m = m.translate(-rotateC[0], -rotateC[1]); + m2d = new Matrix2D(m); + negativeTranslate = m2d.decompose().props.translate; + _results = []; + for (i = _i = 0; _i <= 1; i = ++_i) { + _results.push(this.props.translate[i] -= negativeTranslate[i]); + } + return _results; + }; + return DecomposedMatrix2D; + }(); + baseSVG = typeof document !== "undefined" && document !== null ? document.createElementNS("http://www.w3.org/2000/svg", "svg") : void 0; + Matrix2D = function() { + function Matrix2D(m) { + this.m = m; + this.applyProperties = __bind(this.applyProperties, this); + this.decompose = __bind(this.decompose, this); + if (!this.m) { + this.m = baseSVG.createSVGMatrix(); + } + } + Matrix2D.prototype.decompose = function() { + var kx, ky, kz, r0, r1; + r0 = new Vector([ this.m.a, this.m.b ]); + r1 = new Vector([ this.m.c, this.m.d ]); + kx = r0.length(); + kz = r0.dot(r1); + r0 = r0.normalize(); + ky = r1.combine(r0, 1, -kz).length(); + return new DecomposedMatrix2D({ + translate: [ this.m.e, this.m.f ], + rotate: [ Math.atan2(this.m.b, this.m.a) * 180 / Math.PI, this.rotateCX, this.rotateCY ], + scale: [ kx, ky ], + skew: kz / ky * 180 / Math.PI + }); + }; + Matrix2D.prototype.applyProperties = function(properties) { + var hash, k, props, v, _i, _len, _ref, _ref1; + hash = {}; + for (_i = 0, _len = properties.length; _i < _len; _i++) { + props = properties[_i]; + hash[props[0]] = props[1]; + } + for (k in hash) { + v = hash[k]; + if (k === "translateX") { + this.m = this.m.translate(v, 0); + } else if (k === "translateY") { + this.m = this.m.translate(0, v); + } else if (k === "scaleX") { + this.m = this.m.scale(v, 1); + } else if (k === "scaleY") { + this.m = this.m.scale(1, v); + } else if (k === "rotateZ") { + this.m = this.m.rotate(v); + } else if (k === "skewX") { + this.m = this.m.skewX(v); + } else if (k === "skewY") { + this.m = this.m.skewY(v); + } + } + this.rotateCX = (_ref = hash.rotateCX) != null ? _ref : 0; + return this.rotateCY = (_ref1 = hash.rotateCY) != null ? _ref1 : 0; + }; + return Matrix2D; + }(); + Vector = function() { + function Vector(els) { + this.els = els; + this.combine = __bind(this.combine, this); + this.normalize = __bind(this.normalize, this); + this.length = __bind(this.length, this); + this.cross = __bind(this.cross, this); + this.dot = __bind(this.dot, this); + this.e = __bind(this.e, this); + } + Vector.prototype.e = function(i) { + if (i < 1 || i > this.els.length) { + return null; + } else { + return this.els[i - 1]; + } + }; + Vector.prototype.dot = function(vector) { + var V, n, product; + V = vector.els || vector; + product = 0; + n = this.els.length; + if (n !== V.length) { + return null; + } + n += 1; + while (--n) { + product += this.els[n - 1] * V[n - 1]; + } + return product; + }; + Vector.prototype.cross = function(vector) { + var A, B; + B = vector.els || vector; + if (this.els.length !== 3 || B.length !== 3) { + return null; + } + A = this.els; + return new Vector([ A[1] * B[2] - A[2] * B[1], A[2] * B[0] - A[0] * B[2], A[0] * B[1] - A[1] * B[0] ]); + }; + Vector.prototype.length = function() { + var a, e, _i, _len, _ref; + a = 0; + _ref = this.els; + for (_i = 0, _len = _ref.length; _i < _len; _i++) { + e = _ref[_i]; + a += Math.pow(e, 2); + } + return Math.sqrt(a); + }; + Vector.prototype.normalize = function() { + var e, i, length, newElements, _ref; + length = this.length(); + newElements = []; + _ref = this.els; + for (i in _ref) { + e = _ref[i]; + newElements[i] = e / length; + } + return new Vector(newElements); + }; + Vector.prototype.combine = function(b, ascl, bscl) { + var i, result, _i, _ref; + result = []; + for (i = _i = 0, _ref = this.els.length; 0 <= _ref ? _i < _ref : _i > _ref; i = 0 <= _ref ? ++_i : --_i) { + result[i] = ascl * this.els[i] + bscl * b.els[i]; + } + return new Vector(result); + }; + return Vector; + }(); + DecomposedMatrix = function() { + function DecomposedMatrix() { + this.toMatrix = __bind(this.toMatrix, this); + this.format = __bind(this.format, this); + this.interpolate = __bind(this.interpolate, this); + } + DecomposedMatrix.prototype.interpolate = function(decomposedB, t, only) { + var angle, decomposed, decomposedA, i, invscale, invth, k, qa, qb, scale, th, _i, _j, _k, _l, _len, _ref, _ref1; + if (only == null) { + only = null; + } + decomposedA = this; + decomposed = new DecomposedMatrix(); + _ref = [ "translate", "scale", "skew", "perspective" ]; + for (_i = 0, _len = _ref.length; _i < _len; _i++) { + k = _ref[_i]; + decomposed[k] = []; + for (i = _j = 0, _ref1 = decomposedA[k].length - 1; 0 <= _ref1 ? _j <= _ref1 : _j >= _ref1; i = 0 <= _ref1 ? ++_j : --_j) { + if (only == null || only.indexOf(k) > -1 || only.indexOf("" + k + [ "x", "y", "z" ][i]) > -1) { + decomposed[k][i] = (decomposedB[k][i] - decomposedA[k][i]) * t + decomposedA[k][i]; + } else { + decomposed[k][i] = decomposedA[k][i]; + } + } + } + if (only == null || only.indexOf("rotate") !== -1) { + qa = decomposedA.quaternion; + qb = decomposedB.quaternion; + angle = qa[0] * qb[0] + qa[1] * qb[1] + qa[2] * qb[2] + qa[3] * qb[3]; + if (angle < 0) { + for (i = _k = 0; _k <= 3; i = ++_k) { + qa[i] = -qa[i]; + } + angle = -angle; + } + if (angle + 1 > .05) { + if (1 - angle >= .05) { + th = Math.acos(angle); + invth = 1 / Math.sin(th); + scale = Math.sin(th * (1 - t)) * invth; + invscale = Math.sin(th * t) * invth; + } else { + scale = 1 - t; + invscale = t; + } + } else { + qb[0] = -qa[1]; + qb[1] = qa[0]; + qb[2] = -qa[3]; + qb[3] = qa[2]; + scale = Math.sin(piDouble * (.5 - t)); + invscale = Math.sin(piDouble * t); + } + decomposed.quaternion = []; + for (i = _l = 0; _l <= 3; i = ++_l) { + decomposed.quaternion[i] = qa[i] * scale + qb[i] * invscale; + } + } else { + decomposed.quaternion = decomposedA.quaternion; + } + return decomposed; + }; + DecomposedMatrix.prototype.format = function() { + return this.toMatrix().toString(); + }; + DecomposedMatrix.prototype.toMatrix = function() { + var decomposedMatrix, i, j, match, matrix, quaternion, skew, temp, w, x, y, z, _i, _j, _k, _l; + decomposedMatrix = this; + matrix = Matrix.I(4); + for (i = _i = 0; _i <= 3; i = ++_i) { + matrix.els[i][3] = decomposedMatrix.perspective[i]; + } + quaternion = decomposedMatrix.quaternion; + x = quaternion[0]; + y = quaternion[1]; + z = quaternion[2]; + w = quaternion[3]; + skew = decomposedMatrix.skew; + match = [ [ 1, 0 ], [ 2, 0 ], [ 2, 1 ] ]; + for (i = _j = 2; _j >= 0; i = --_j) { + if (skew[i]) { + temp = Matrix.I(4); + temp.els[match[i][0]][match[i][1]] = skew[i]; + matrix = matrix.multiply(temp); + } + } + matrix = matrix.multiply(new Matrix([ [ 1 - 2 * (y * y + z * z), 2 * (x * y - z * w), 2 * (x * z + y * w), 0 ], [ 2 * (x * y + z * w), 1 - 2 * (x * x + z * z), 2 * (y * z - x * w), 0 ], [ 2 * (x * z - y * w), 2 * (y * z + x * w), 1 - 2 * (x * x + y * y), 0 ], [ 0, 0, 0, 1 ] ])); + for (i = _k = 0; _k <= 2; i = ++_k) { + for (j = _l = 0; _l <= 2; j = ++_l) { + matrix.els[i][j] *= decomposedMatrix.scale[i]; + } + matrix.els[3][i] = decomposedMatrix.translate[i]; + } + return matrix; + }; + return DecomposedMatrix; + }(); + Matrix = function() { + function Matrix(els) { + this.els = els; + this.toString = __bind(this.toString, this); + this.decompose = __bind(this.decompose, this); + this.inverse = __bind(this.inverse, this); + this.augment = __bind(this.augment, this); + this.toRightTriangular = __bind(this.toRightTriangular, this); + this.transpose = __bind(this.transpose, this); + this.multiply = __bind(this.multiply, this); + this.dup = __bind(this.dup, this); + this.e = __bind(this.e, this); + } + Matrix.prototype.e = function(i, j) { + if (i < 1 || i > this.els.length || j < 1 || j > this.els[0].length) { + return null; + } + return this.els[i - 1][j - 1]; + }; + Matrix.prototype.dup = function() { + return new Matrix(this.els); + }; + Matrix.prototype.multiply = function(matrix) { + var M, c, cols, elements, i, j, ki, kj, nc, ni, nj, returnVector, sum; + returnVector = matrix.modulus ? true : false; + M = matrix.els || matrix; + if (typeof M[0][0] === "undefined") { + M = new Matrix(M).els; + } + ni = this.els.length; + ki = ni; + kj = M[0].length; + cols = this.els[0].length; + elements = []; + ni += 1; + while (--ni) { + i = ki - ni; + elements[i] = []; + nj = kj; + nj += 1; + while (--nj) { + j = kj - nj; + sum = 0; + nc = cols; + nc += 1; + while (--nc) { + c = cols - nc; + sum += this.els[i][c] * M[c][j]; + } + elements[i][j] = sum; + } + } + M = new Matrix(elements); + if (returnVector) { + return M.col(1); + } else { + return M; + } + }; + Matrix.prototype.transpose = function() { + var cols, elements, i, j, ni, nj, rows; + rows = this.els.length; + cols = this.els[0].length; + elements = []; + ni = cols; + ni += 1; + while (--ni) { + i = cols - ni; + elements[i] = []; + nj = rows; + nj += 1; + while (--nj) { + j = rows - nj; + elements[i][j] = this.els[j][i]; + } + } + return new Matrix(elements); + }; + Matrix.prototype.toRightTriangular = function() { + var M, els, i, j, k, kp, multiplier, n, np, p, _i, _j, _ref, _ref1; + M = this.dup(); + n = this.els.length; + k = n; + kp = this.els[0].length; + while (--n) { + i = k - n; + if (M.els[i][i] === 0) { + for (j = _i = _ref = i + 1; _ref <= k ? _i < k : _i > k; j = _ref <= k ? ++_i : --_i) { + if (M.els[j][i] !== 0) { + els = []; + np = kp; + np += 1; + while (--np) { + p = kp - np; + els.push(M.els[i][p] + M.els[j][p]); + } + M.els[i] = els; + break; + } + } + } + if (M.els[i][i] !== 0) { + for (j = _j = _ref1 = i + 1; _ref1 <= k ? _j < k : _j > k; j = _ref1 <= k ? ++_j : --_j) { + multiplier = M.els[j][i] / M.els[i][i]; + els = []; + np = kp; + np += 1; + while (--np) { + p = kp - np; + els.push(p <= i ? 0 : M.els[j][p] - M.els[i][p] * multiplier); + } + M.els[j] = els; + } + } + } + return M; + }; + Matrix.prototype.augment = function(matrix) { + var M, T, cols, i, j, ki, kj, ni, nj; + M = matrix.els || matrix; + if (typeof M[0][0] === "undefined") { + M = new Matrix(M).els; + } + T = this.dup(); + cols = T.els[0].length; + ni = T.els.length; + ki = ni; + kj = M[0].length; + if (ni !== M.length) { + return null; + } + ni += 1; + while (--ni) { + i = ki - ni; + nj = kj; + nj += 1; + while (--nj) { + j = kj - nj; + T.els[i][cols + j] = M[i][j]; + } + } + return T; + }; + Matrix.prototype.inverse = function() { + var M, divisor, els, i, inverse_elements, j, ki, kp, new_element, ni, np, p, _i; + ni = this.els.length; + ki = ni; + M = this.augment(Matrix.I(ni)).toRightTriangular(); + kp = M.els[0].length; + inverse_elements = []; + ni += 1; + while (--ni) { + i = ni - 1; + els = []; + np = kp; + inverse_elements[i] = []; + divisor = M.els[i][i]; + np += 1; + while (--np) { + p = kp - np; + new_element = M.els[i][p] / divisor; + els.push(new_element); + if (p >= ki) { + inverse_elements[i].push(new_element); + } + } + M.els[i] = els; + for (j = _i = 0; 0 <= i ? _i < i : _i > i; j = 0 <= i ? ++_i : --_i) { + els = []; + np = kp; + np += 1; + while (--np) { + p = kp - np; + els.push(M.els[j][p] - M.els[i][p] * M.els[j][i]); + } + M.els[j] = els; + } + } + return new Matrix(inverse_elements); + }; + Matrix.I = function(n) { + var els, i, j, k, nj; + els = []; + k = n; + n += 1; + while (--n) { + i = k - n; + els[i] = []; + nj = k; + nj += 1; + while (--nj) { + j = k - nj; + els[i][j] = i === j ? 1 : 0; + } + } + return new Matrix(els); + }; + Matrix.prototype.decompose = function() { + var els, i, inversePerspectiveMatrix, j, k, matrix, pdum3, perspective, perspectiveMatrix, quaternion, result, rightHandSide, rotate, row, rowElement, s, scale, skew, t, translate, transposedInversePerspectiveMatrix, type, typeKey, v, w, x, y, z, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r; + matrix = this; + translate = []; + scale = []; + skew = []; + quaternion = []; + perspective = []; + els = []; + for (i = _i = 0; _i <= 3; i = ++_i) { + els[i] = []; + for (j = _j = 0; _j <= 3; j = ++_j) { + els[i][j] = matrix.els[i][j]; + } + } + if (els[3][3] === 0) { + return false; + } + for (i = _k = 0; _k <= 3; i = ++_k) { + for (j = _l = 0; _l <= 3; j = ++_l) { + els[i][j] /= els[3][3]; + } + } + perspectiveMatrix = matrix.dup(); + for (i = _m = 0; _m <= 2; i = ++_m) { + perspectiveMatrix.els[i][3] = 0; + } + perspectiveMatrix.els[3][3] = 1; + if (els[0][3] !== 0 || els[1][3] !== 0 || els[2][3] !== 0) { + rightHandSide = new Vector(els.slice(0, 4)[3]); + inversePerspectiveMatrix = perspectiveMatrix.inverse(); + transposedInversePerspectiveMatrix = inversePerspectiveMatrix.transpose(); + perspective = transposedInversePerspectiveMatrix.multiply(rightHandSide).els; + for (i = _n = 0; _n <= 2; i = ++_n) { + els[i][3] = 0; + } + els[3][3] = 1; + } else { + perspective = [ 0, 0, 0, 1 ]; + } + for (i = _o = 0; _o <= 2; i = ++_o) { + translate[i] = els[3][i]; + els[3][i] = 0; + } + row = []; + for (i = _p = 0; _p <= 2; i = ++_p) { + row[i] = new Vector(els[i].slice(0, 3)); + } + scale[0] = row[0].length(); + row[0] = row[0].normalize(); + skew[0] = row[0].dot(row[1]); + row[1] = row[1].combine(row[0], 1, -skew[0]); + scale[1] = row[1].length(); + row[1] = row[1].normalize(); + skew[0] /= scale[1]; + skew[1] = row[0].dot(row[2]); + row[2] = row[2].combine(row[0], 1, -skew[1]); + skew[2] = row[1].dot(row[2]); + row[2] = row[2].combine(row[1], 1, -skew[2]); + scale[2] = row[2].length(); + row[2] = row[2].normalize(); + skew[1] /= scale[2]; + skew[2] /= scale[2]; + pdum3 = row[1].cross(row[2]); + if (row[0].dot(pdum3) < 0) { + for (i = _q = 0; _q <= 2; i = ++_q) { + scale[i] *= -1; + for (j = _r = 0; _r <= 2; j = ++_r) { + row[i].els[j] *= -1; + } + } + } + rowElement = function(index, elementIndex) { + return row[index].els[elementIndex]; + }; + rotate = []; + rotate[1] = Math.asin(-rowElement(0, 2)); + if (Math.cos(rotate[1]) !== 0) { + rotate[0] = Math.atan2(rowElement(1, 2), rowElement(2, 2)); + rotate[2] = Math.atan2(rowElement(0, 1), rowElement(0, 0)); + } else { + rotate[0] = Math.atan2(-rowElement(2, 0), rowElement(1, 1)); + rotate[1] = 0; + } + t = rowElement(0, 0) + rowElement(1, 1) + rowElement(2, 2) + 1; + if (t > 1e-4) { + s = .5 / Math.sqrt(t); + w = .25 / s; + x = (rowElement(2, 1) - rowElement(1, 2)) * s; + y = (rowElement(0, 2) - rowElement(2, 0)) * s; + z = (rowElement(1, 0) - rowElement(0, 1)) * s; + } else if (rowElement(0, 0) > rowElement(1, 1) && rowElement(0, 0) > rowElement(2, 2)) { + s = Math.sqrt(1 + rowElement(0, 0) - rowElement(1, 1) - rowElement(2, 2)) * 2; + x = .25 * s; + y = (rowElement(0, 1) + rowElement(1, 0)) / s; + z = (rowElement(0, 2) + rowElement(2, 0)) / s; + w = (rowElement(2, 1) - rowElement(1, 2)) / s; + } else if (rowElement(1, 1) > rowElement(2, 2)) { + s = Math.sqrt(1 + rowElement(1, 1) - rowElement(0, 0) - rowElement(2, 2)) * 2; + x = (rowElement(0, 1) + rowElement(1, 0)) / s; + y = .25 * s; + z = (rowElement(1, 2) + rowElement(2, 1)) / s; + w = (rowElement(0, 2) - rowElement(2, 0)) / s; + } else { + s = Math.sqrt(1 + rowElement(2, 2) - rowElement(0, 0) - rowElement(1, 1)) * 2; + x = (rowElement(0, 2) + rowElement(2, 0)) / s; + y = (rowElement(1, 2) + rowElement(2, 1)) / s; + z = .25 * s; + w = (rowElement(1, 0) - rowElement(0, 1)) / s; + } + quaternion = [ x, y, z, w ]; + result = new DecomposedMatrix(); + result.translate = translate; + result.scale = scale; + result.skew = skew; + result.quaternion = quaternion; + result.perspective = perspective; + result.rotate = rotate; + for (typeKey in result) { + type = result[typeKey]; + for (k in type) { + v = type[k]; + if (isNaN(v)) { + type[k] = 0; + } + } + } + return result; + }; + Matrix.prototype.toString = function() { + var i, j, str, _i, _j; + str = "matrix3d("; + for (i = _i = 0; _i <= 3; i = ++_i) { + for (j = _j = 0; _j <= 3; j = ++_j) { + str += roundf(this.els[i][j], 10); + if (!(i === 3 && j === 3)) { + str += ","; + } + } + } + str += ")"; + return str; + }; + Matrix.matrixForTransform = cacheFn(function(transform) { + var matrixEl, result, style, _ref, _ref1, _ref2; + matrixEl = document.createElement("div"); + matrixEl.style.position = "absolute"; + matrixEl.style.visibility = "hidden"; + matrixEl.style[propertyWithPrefix("transform")] = transform; + document.body.appendChild(matrixEl); + style = window.getComputedStyle(matrixEl, null); + result = (_ref = (_ref1 = style.transform) != null ? _ref1 : style[propertyWithPrefix("transform")]) != null ? _ref : (_ref2 = dynamics.tests) != null ? _ref2.matrixForTransform(transform) : void 0; + document.body.removeChild(matrixEl); + return result; + }); + Matrix.fromTransform = function(transform) { + var digits, elements, i, match, matrixElements, _i; + match = transform != null ? transform.match(/matrix3?d?\(([-0-9,e \.]*)\)/) : void 0; + if (match) { + digits = match[1].split(","); + digits = digits.map(parseFloat); + if (digits.length === 6) { + elements = [ digits[0], digits[1], 0, 0, digits[2], digits[3], 0, 0, 0, 0, 1, 0, digits[4], digits[5], 0, 1 ]; + } else { + elements = digits; + } + } else { + elements = [ 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1 ]; + } + matrixElements = []; + for (i = _i = 0; _i <= 3; i = ++_i) { + matrixElements.push(elements.slice(i * 4, i * 4 + 4)); + } + return new Matrix(matrixElements); + }; + return Matrix; + }(); + prefixFor = cacheFn(function(property) { + var k, prefix, prop, propArray, propertyName, _i, _j, _len, _len1, _ref; + if (document.body.style[property] !== void 0) { + return ""; + } + propArray = property.split("-"); + propertyName = ""; + for (_i = 0, _len = propArray.length; _i < _len; _i++) { + prop = propArray[_i]; + propertyName += prop.substring(0, 1).toUpperCase() + prop.substring(1); + } + _ref = [ "Webkit", "Moz", "ms" ]; + for (_j = 0, _len1 = _ref.length; _j < _len1; _j++) { + prefix = _ref[_j]; + k = prefix + propertyName; + if (document.body.style[k] !== void 0) { + return prefix; + } + } + return ""; + }); + propertyWithPrefix = cacheFn(function(property) { + var prefix; + prefix = prefixFor(property); + if (prefix === "Moz") { + return "" + prefix + (property.substring(0, 1).toUpperCase() + property.substring(1)); + } + if (prefix !== "") { + return "-" + prefix.toLowerCase() + "-" + toDashed(property); + } + return toDashed(property); + }); + rAF = typeof window !== "undefined" && window !== null ? window.requestAnimationFrame : void 0; + animations = []; + animationsTimeouts = []; + slow = false; + slowRatio = 1; + if (typeof window !== "undefined" && window !== null) { + window.addEventListener("keyup", function(e) { + if (e.keyCode === 68 && e.shiftKey && e.ctrlKey) { + return dynamics.toggleSlow(); + } + }); + } + if (rAF == null) { + lastTime = 0; + rAF = function(callback) { + var currTime, id, timeToCall; + currTime = Date.now(); + timeToCall = Math.max(0, 16 - (currTime - lastTime)); + id = window.setTimeout(function() { + return callback(currTime + timeToCall); + }, timeToCall); + lastTime = currTime + timeToCall; + return id; + }; + } + runLoopRunning = false; + runLoopPaused = false; + startRunLoop = function() { + if (!runLoopRunning) { + runLoopRunning = true; + return rAF(runLoopTick); + } + }; + runLoopTick = function(t) { + var animation, toRemoveAnimations, _i, _len; + if (runLoopPaused) { + rAF(runLoopTick); + return; + } + toRemoveAnimations = []; + for (_i = 0, _len = animations.length; _i < _len; _i++) { + animation = animations[_i]; + if (!animationTick(t, animation)) { + toRemoveAnimations.push(animation); + } + } + animations = animations.filter(function(animation) { + return toRemoveAnimations.indexOf(animation) === -1; + }); + if (animations.length === 0) { + return runLoopRunning = false; + } else { + return rAF(runLoopTick); + } + }; + animationTick = function(t, animation) { + var key, properties, property, tt, y, _base, _base1, _ref; + if (animation.tStart == null) { + animation.tStart = t; + } + tt = (t - animation.tStart) / animation.options.duration; + y = animation.curve(tt); + properties = {}; + if (tt >= 1) { + if (animation.curve.initialForce) { + properties = animation.properties.start; + } else { + properties = animation.properties.end; + } + } else { + _ref = animation.properties.start; + for (key in _ref) { + property = _ref[key]; + properties[key] = interpolate(property, animation.properties.end[key], y); + } + } + applyFrame(animation.el, properties); + if (typeof (_base = animation.options).change === "function") { + _base.change(animation.el); + } + if (tt >= 1) { + if (typeof (_base1 = animation.options).complete === "function") { + _base1.complete(animation.el); + } + } + return tt < 1; + }; + interpolate = function(start, end, y) { + if (start != null && start.interpolate != null) { + return start.interpolate(end, y); + } + return null; + }; + startAnimation = function(el, properties, options, timeoutId) { + var endProperties, isSVG, k, matrix, startProperties, transforms, v, _base; + if (timeoutId != null) { + animationsTimeouts = animationsTimeouts.filter(function(timeout) { + return timeout.id !== timeoutId; + }); + } + dynamics.stop(el, { + timeout: false + }); + if (!options.animated) { + dynamics.css(el, properties); + if (typeof options.complete === "function") { + options.complete(this); + } + return; + } + properties = parseProperties(properties); + startProperties = getCurrentProperties(el, Object.keys(properties)); + endProperties = {}; + transforms = []; + for (k in properties) { + v = properties[k]; + if (transformProperties.contains(k)) { + transforms.push([ k, v ]); + } else { + endProperties[k] = createInterpolable(v); + if (endProperties[k] instanceof InterpolableWithUnit && el.style != null) { + endProperties[k].prefix = ""; + if ((_base = endProperties[k]).suffix == null) { + _base.suffix = unitForProperty(k, 0); + } + } + } + } + if (transforms.length > 0) { + isSVG = isSVGElement(el); + if (isSVG) { + matrix = new Matrix2D(); + matrix.applyProperties(transforms); + } else { + v = transforms.map(function(transform) { + return transformValueForProperty(transform[0], transform[1]); + }).join(" "); + matrix = Matrix.fromTransform(Matrix.matrixForTransform(v)); + } + endProperties["transform"] = matrix.decompose(); + if (isSVG) { + startProperties.transform.applyRotateCenter([ endProperties.transform.props.rotate[1], endProperties.transform.props.rotate[2] ]); + } + } + animations.push({ + el: el, + properties: { + start: startProperties, + end: endProperties + }, + options: options, + curve: options.type.call(options.type, options) + }); + return startRunLoop(); + }; + timeouts = []; + timeoutLastId = 0; + setRealTimeout = function(timeout) { + if (!isDocumentVisible()) { + return; + } + return timeout.realTimeoutId = setTimeout(function() { + timeout.fn(); + return cancelTimeout(timeout.id); + }, timeout.delay); + }; + addTimeout = function(fn, delay) { + var timeout; + timeoutLastId += 1; + timeout = { + id: timeoutLastId, + tStart: Date.now(), + fn: fn, + delay: delay, + originalDelay: delay + }; + setRealTimeout(timeout); + timeouts.push(timeout); + return timeoutLastId; + }; + cancelTimeout = function(id) { + return timeouts = timeouts.filter(function(timeout) { + if (timeout.id === id) { + clearTimeout(timeout.realTimeoutId); + } + return timeout.id !== id; + }); + }; + leftDelayForTimeout = function(time, timeout) { + var consumedDelay; + if (time != null) { + consumedDelay = time - timeout.tStart; + return timeout.originalDelay - consumedDelay; + } else { + return timeout.originalDelay; + } + }; + if (typeof window !== "undefined" && window !== null) { + window.addEventListener("unload", function() {}); + } + timeBeforeVisibilityChange = null; + observeVisibilityChange(function(visible) { + var animation, difference, timeout, _i, _j, _k, _len, _len1, _len2, _results; + runLoopPaused = !visible; + if (!visible) { + timeBeforeVisibilityChange = Date.now(); + _results = []; + for (_i = 0, _len = timeouts.length; _i < _len; _i++) { + timeout = timeouts[_i]; + _results.push(clearTimeout(timeout.realTimeoutId)); + } + return _results; + } else { + if (runLoopRunning) { + difference = Date.now() - timeBeforeVisibilityChange; + for (_j = 0, _len1 = animations.length; _j < _len1; _j++) { + animation = animations[_j]; + if (animation.tStart != null) { + animation.tStart += difference; + } + } + } + for (_k = 0, _len2 = timeouts.length; _k < _len2; _k++) { + timeout = timeouts[_k]; + timeout.delay = leftDelayForTimeout(timeBeforeVisibilityChange, timeout); + setRealTimeout(timeout); + } + return timeBeforeVisibilityChange = null; + } + }); + dynamics = {}; + dynamics.linear = function() { + return function(t) { + return t; + }; + }; + dynamics.spring = function(options) { + var A1, A2, decal, frequency, friction, s; + if (options == null) { + options = {}; + } + applyDefaults(options, arguments.callee.defaults); + frequency = Math.max(1, options.frequency / 20); + friction = Math.pow(20, options.friction / 100); + s = options.anticipationSize / 1e3; + decal = Math.max(0, s); + A1 = function(t) { + var M, a, b, x0, x1; + M = .8; + x0 = s / (1 - s); + x1 = 0; + b = (x0 - M * x1) / (x0 - x1); + a = (M - b) / x0; + return a * t * options.anticipationStrength / 100 + b; + }; + A2 = function(t) { + return Math.pow(friction / 10, -t) * (1 - t); + }; + return function(t) { + var A, At, a, angle, b, frictionT, y0, yS; + frictionT = t / (1 - s) - s / (1 - s); + if (t < s) { + yS = s / (1 - s) - s / (1 - s); + y0 = 0 / (1 - s) - s / (1 - s); + b = Math.acos(1 / A1(yS)); + a = (Math.acos(1 / A1(y0)) - b) / (frequency * -s); + A = A1; + } else { + A = A2; + b = 0; + a = 1; + } + At = A(frictionT); + angle = frequency * (t - s) * a + b; + return 1 - At * Math.cos(angle); + }; + }; + dynamics.bounce = function(options) { + var A, fn, frequency, friction; + if (options == null) { + options = {}; + } + applyDefaults(options, arguments.callee.defaults); + frequency = Math.max(1, options.frequency / 20); + friction = Math.pow(20, options.friction / 100); + A = function(t) { + return Math.pow(friction / 10, -t) * (1 - t); + }; + fn = function(t) { + var At, a, angle, b; + b = -3.14 / 2; + a = 1; + At = A(t); + angle = frequency * t * a + b; + return At * Math.cos(angle); + }; + fn.initialForce = true; + return fn; + }; + dynamics.gravity = function(options) { + var L, bounciness, curves, elasticity, fn, getPointInCurve, gravity; + if (options == null) { + options = {}; + } + applyDefaults(options, arguments.callee.defaults); + bounciness = Math.min(options.bounciness / 1250, .8); + elasticity = options.elasticity / 1e3; + gravity = 100; + curves = []; + L = function() { + var b, curve; + b = Math.sqrt(2 / gravity); + curve = { + a: -b, + b: b, + H: 1 + }; + if (options.initialForce) { + curve.a = 0; + curve.b = curve.b * 2; + } + while (curve.H > .001) { + L = curve.b - curve.a; + curve = { + a: curve.b, + b: curve.b + L * bounciness, + H: curve.H * bounciness * bounciness + }; + } + return curve.b; + }(); + getPointInCurve = function(a, b, H, t) { + var c, t2; + L = b - a; + t2 = 2 / L * t - 1 - a * 2 / L; + c = t2 * t2 * H - H + 1; + if (options.initialForce) { + c = 1 - c; + } + return c; + }; + (function() { + var L2, b, curve, _results; + b = Math.sqrt(2 / (gravity * L * L)); + curve = { + a: -b, + b: b, + H: 1 + }; + if (options.initialForce) { + curve.a = 0; + curve.b = curve.b * 2; + } + curves.push(curve); + L2 = L; + _results = []; + while (curve.b < 1 && curve.H > .001) { + L2 = curve.b - curve.a; + curve = { + a: curve.b, + b: curve.b + L2 * bounciness, + H: curve.H * elasticity + }; + _results.push(curves.push(curve)); + } + return _results; + })(); + fn = function(t) { + var curve, i, v; + i = 0; + curve = curves[i]; + while (!(t >= curve.a && t <= curve.b)) { + i += 1; + curve = curves[i]; + if (!curve) { + break; + } + } + if (!curve) { + v = options.initialForce ? 0 : 1; + } else { + v = getPointInCurve(curve.a, curve.b, curve.H, t); + } + return v; + }; + fn.initialForce = options.initialForce; + return fn; + }; + dynamics.forceWithGravity = function(options) { + if (options == null) { + options = {}; + } + applyDefaults(options, arguments.callee.defaults); + options.initialForce = true; + return dynamics.gravity(options); + }; + dynamics.bezier = function() { + var Bezier, Bezier_, yForX; + Bezier_ = function(t, p0, p1, p2, p3) { + return Math.pow(1 - t, 3) * p0 + 3 * Math.pow(1 - t, 2) * t * p1 + 3 * (1 - t) * Math.pow(t, 2) * p2 + Math.pow(t, 3) * p3; + }; + Bezier = function(t, p0, p1, p2, p3) { + return { + x: Bezier_(t, p0.x, p1.x, p2.x, p3.x), + y: Bezier_(t, p0.y, p1.y, p2.y, p3.y) + }; + }; + yForX = function(xTarget, Bs, returnsToSelf) { + var B, aB, i, lower, percent, upper, x, xTolerance, _i, _len; + B = null; + for (_i = 0, _len = Bs.length; _i < _len; _i++) { + aB = Bs[_i]; + if (xTarget >= aB(0).x && xTarget <= aB(1).x) { + B = aB; + } + if (B !== null) { + break; + } + } + if (!B) { + if (returnsToSelf) { + return 0; + } else { + return 1; + } + } + xTolerance = 1e-4; + lower = 0; + upper = 1; + percent = (upper + lower) / 2; + x = B(percent).x; + i = 0; + while (Math.abs(xTarget - x) > xTolerance && i < 100) { + if (xTarget > x) { + lower = percent; + } else { + upper = percent; + } + percent = (upper + lower) / 2; + x = B(percent).x; + i += 1; + } + return B(percent).y; + }; + return function(options) { + var Bs, points, returnsToSelf; + if (options == null) { + options = {}; + } + points = options.points; + returnsToSelf = false; + Bs = function() { + var i, k, _fn; + Bs = []; + _fn = function(pointA, pointB) { + var B2; + B2 = function(t) { + return Bezier(t, pointA, pointA.cp[pointA.cp.length - 1], pointB.cp[0], pointB); + }; + return Bs.push(B2); + }; + for (i in points) { + k = parseInt(i); + if (k >= points.length - 1) { + break; + } + _fn(points[k], points[k + 1]); + } + return Bs; + }(); + return function(t) { + if (t === 0) { + return 0; + } else if (t === 1) { + return 1; + } else { + return yForX(t, Bs, returnsToSelf); + } + }; + }; + }(); + dynamics.easeInOut = function(options) { + var friction, _ref; + if (options == null) { + options = {}; + } + friction = (_ref = options.friction) != null ? _ref : arguments.callee.defaults.friction; + return dynamics.bezier({ + points: [ { + x: 0, + y: 0, + cp: [ { + x: .92 - friction / 1e3, + y: 0 + } ] + }, { + x: 1, + y: 1, + cp: [ { + x: .08 + friction / 1e3, + y: 1 + } ] + } ] + }); + }; + dynamics.easeIn = function(options) { + var friction, _ref; + if (options == null) { + options = {}; + } + friction = (_ref = options.friction) != null ? _ref : arguments.callee.defaults.friction; + return dynamics.bezier({ + points: [ { + x: 0, + y: 0, + cp: [ { + x: .92 - friction / 1e3, + y: 0 + } ] + }, { + x: 1, + y: 1, + cp: [ { + x: 1, + y: 1 + } ] + } ] + }); + }; + dynamics.easeOut = function(options) { + var friction, _ref; + if (options == null) { + options = {}; + } + friction = (_ref = options.friction) != null ? _ref : arguments.callee.defaults.friction; + return dynamics.bezier({ + points: [ { + x: 0, + y: 0, + cp: [ { + x: 0, + y: 0 + } ] + }, { + x: 1, + y: 1, + cp: [ { + x: .08 + friction / 1e3, + y: 1 + } ] + } ] + }); + }; + dynamics.spring.defaults = { + frequency: 300, + friction: 200, + anticipationSize: 0, + anticipationStrength: 0 + }; + dynamics.bounce.defaults = { + frequency: 300, + friction: 200 + }; + dynamics.forceWithGravity.defaults = dynamics.gravity.defaults = { + bounciness: 400, + elasticity: 200 + }; + dynamics.easeInOut.defaults = dynamics.easeIn.defaults = dynamics.easeOut.defaults = { + friction: 500 + }; + dynamics.css = makeArrayFn(function(el, properties) { + return applyProperties(el, properties, true); + }); + dynamics.animate = makeArrayFn(function(el, properties, options) { + var id; + if (options == null) { + options = {}; + } + options = clone(options); + applyDefaults(options, { + type: dynamics.easeInOut, + duration: 1e3, + delay: 0, + animated: true + }); + options.duration = Math.max(0, options.duration * slowRatio); + options.delay = Math.max(0, options.delay); + if (options.delay === 0) { + return startAnimation(el, properties, options); + } else { + id = dynamics.setTimeout(function() { + return startAnimation(el, properties, options, id); + }, options.delay); + return animationsTimeouts.push({ + id: id, + el: el + }); + } + }); + dynamics.stop = makeArrayFn(function(el, options) { + if (options == null) { + options = {}; + } + if (options.timeout == null) { + options.timeout = true; + } + if (options.timeout) { + animationsTimeouts = animationsTimeouts.filter(function(timeout) { + if (timeout.el === el && (options.filter == null || options.filter(timeout))) { + dynamics.clearTimeout(timeout.id); + return true; + } + return false; + }); + } + return animations = animations.filter(function(animation) { + return animation.el !== el; + }); + }); + dynamics.setTimeout = function(fn, delay) { + return addTimeout(fn, delay * slowRatio); + }; + dynamics.clearTimeout = function(id) { + return cancelTimeout(id); + }; + dynamics.toggleSlow = function() { + slow = !slow; + if (slow) { + slowRatio = 3; + } else { + slowRatio = 1; + } + return typeof console !== "undefined" && console !== null ? typeof console.log === "function" ? console.log("dynamics.js: slow animations " + (slow ? "enabled" : "disabled")) : void 0 : void 0; + }; + if (typeof module === "object" && typeof module.exports === "object") { + module.exports = dynamics; + } else if (typeof define === "function") { + define("dynamics", function() { + return dynamics; + }); + } else { + window.dynamics = dynamics; + } +}).call(this); + +(function() { + "use strict"; + function FastClick(layer, options) { + var oldOnClick; + options = options || {}; + this.trackingClick = false; + this.trackingClickStart = 0; + this.targetElement = null; + this.touchStartX = 0; + this.touchStartY = 0; + this.lastTouchIdentifier = 0; + this.touchBoundary = options.touchBoundary || 10; + this.layer = layer; + this.tapDelay = options.tapDelay || 200; + this.tapTimeout = options.tapTimeout || 700; + if (FastClick.notNeeded(layer)) { + return; + } + function bind(method, context) { + return function() { + return method.apply(context, arguments); + }; + } + var methods = [ "onMouse", "onClick", "onTouchStart", "onTouchMove", "onTouchEnd", "onTouchCancel" ]; + var context = this; + for (var i = 0, l = methods.length; i < l; i++) { + context[methods[i]] = bind(context[methods[i]], context); + } + if (deviceIsAndroid) { + layer.addEventListener("mouseover", this.onMouse, true); + layer.addEventListener("mousedown", this.onMouse, true); + layer.addEventListener("mouseup", this.onMouse, true); + } + layer.addEventListener("click", this.onClick, true); + layer.addEventListener("touchstart", this.onTouchStart, false); + layer.addEventListener("touchmove", this.onTouchMove, false); + layer.addEventListener("touchend", this.onTouchEnd, false); + layer.addEventListener("touchcancel", this.onTouchCancel, false); + if (!Event.prototype.stopImmediatePropagation) { + layer.removeEventListener = function(type, callback, capture) { + var rmv = Node.prototype.removeEventListener; + if (type === "click") { + rmv.call(layer, type, callback.hijacked || callback, capture); + } else { + rmv.call(layer, type, callback, capture); + } + }; + layer.addEventListener = function(type, callback, capture) { + var adv = Node.prototype.addEventListener; + if (type === "click") { + adv.call(layer, type, callback.hijacked || (callback.hijacked = function(event) { + if (!event.propagationStopped) { + callback(event); + } + }), capture); + } else { + adv.call(layer, type, callback, capture); + } + }; + } + if (typeof layer.onclick === "function") { + oldOnClick = layer.onclick; + layer.addEventListener("click", function(event) { + oldOnClick(event); + }, false); + layer.onclick = null; + } + } + var deviceIsWindowsPhone = navigator.userAgent.indexOf("Windows Phone") >= 0; + var deviceIsAndroid = navigator.userAgent.indexOf("Android") > 0 && !deviceIsWindowsPhone; + var deviceIsIOS = /iP(ad|hone|od)/.test(navigator.userAgent) && !deviceIsWindowsPhone; + var deviceIsIOS4 = deviceIsIOS && /OS 4_\d(_\d)?/.test(navigator.userAgent); + var deviceIsIOSWithBadTarget = deviceIsIOS && /OS [6-7]_\d/.test(navigator.userAgent); + var deviceIsBlackBerry10 = navigator.userAgent.indexOf("BB10") > 0; + FastClick.prototype.needsClick = function(target) { + switch (target.nodeName.toLowerCase()) { + case "button": + case "select": + case "textarea": + if (target.disabled) { + return true; + } + break; + + case "input": + if (deviceIsIOS && target.type === "file" || target.disabled) { + return true; + } + break; + + case "label": + case "iframe": + case "video": + return true; + } + return /\bneedsclick\b/.test(target.className); + }; + FastClick.prototype.needsFocus = function(target) { + switch (target.nodeName.toLowerCase()) { + case "textarea": + return true; + + case "select": + return !deviceIsAndroid; + + case "input": + switch (target.type) { + case "button": + case "checkbox": + case "file": + case "image": + case "radio": + case "submit": + return false; + } + return !target.disabled && !target.readOnly; + + default: + return /\bneedsfocus\b/.test(target.className); + } + }; + FastClick.prototype.sendClick = function(targetElement, event) { + var clickEvent, touch; + if (document.activeElement && document.activeElement !== targetElement) { + document.activeElement.blur(); + } + touch = event.changedTouches[0]; + clickEvent = document.createEvent("MouseEvents"); + clickEvent.initMouseEvent(this.determineEventType(targetElement), true, true, window, 1, touch.screenX, touch.screenY, touch.clientX, touch.clientY, false, false, false, false, 0, null); + clickEvent.forwardedTouchEvent = true; + targetElement.dispatchEvent(clickEvent); + }; + FastClick.prototype.determineEventType = function(targetElement) { + if (deviceIsAndroid && targetElement.tagName.toLowerCase() === "select") { + return "mousedown"; + } + return "click"; + }; + FastClick.prototype.focus = function(targetElement) { + var length; + if (deviceIsIOS && targetElement.setSelectionRange && targetElement.type.indexOf("date") !== 0 && targetElement.type !== "time" && targetElement.type !== "month") { + length = targetElement.value.length; + targetElement.setSelectionRange(length, length); + } else { + targetElement.focus(); + } + }; + FastClick.prototype.updateScrollParent = function(targetElement) { + var scrollParent, parentElement; + scrollParent = targetElement.fastClickScrollParent; + if (!scrollParent || !scrollParent.contains(targetElement)) { + parentElement = targetElement; + do { + if (parentElement.scrollHeight > parentElement.offsetHeight) { + scrollParent = parentElement; + targetElement.fastClickScrollParent = parentElement; + break; + } + parentElement = parentElement.parentElement; + } while (parentElement); + } + if (scrollParent) { + scrollParent.fastClickLastScrollTop = scrollParent.scrollTop; + } + }; + FastClick.prototype.getTargetElementFromEventTarget = function(eventTarget) { + if (eventTarget.nodeType === Node.TEXT_NODE) { + return eventTarget.parentNode; + } + return eventTarget; + }; + FastClick.prototype.onTouchStart = function(event) { + var targetElement, touch, selection; + if (event.targetTouches.length > 1) { + return true; + } + targetElement = this.getTargetElementFromEventTarget(event.target); + touch = event.targetTouches[0]; + if (deviceIsIOS) { + selection = window.getSelection(); + if (selection.rangeCount && !selection.isCollapsed) { + return true; + } + if (!deviceIsIOS4) { + if (touch.identifier && touch.identifier === this.lastTouchIdentifier) { + event.preventDefault(); + return false; + } + this.lastTouchIdentifier = touch.identifier; + this.updateScrollParent(targetElement); + } + } + this.trackingClick = true; + this.trackingClickStart = event.timeStamp; + this.targetElement = targetElement; + this.touchStartX = touch.pageX; + this.touchStartY = touch.pageY; + if (event.timeStamp - this.lastClickTime < this.tapDelay) { + event.preventDefault(); + } + return true; + }; + FastClick.prototype.touchHasMoved = function(event) { + var touch = event.changedTouches[0], boundary = this.touchBoundary; + if (Math.abs(touch.pageX - this.touchStartX) > boundary || Math.abs(touch.pageY - this.touchStartY) > boundary) { + return true; + } + return false; + }; + FastClick.prototype.onTouchMove = function(event) { + if (!this.trackingClick) { + return true; + } + if (this.targetElement !== this.getTargetElementFromEventTarget(event.target) || this.touchHasMoved(event)) { + this.trackingClick = false; + this.targetElement = null; + } + return true; + }; + FastClick.prototype.findControl = function(labelElement) { + if (labelElement.control !== undefined) { + return labelElement.control; + } + if (labelElement.htmlFor) { + return document.getElementById(labelElement.htmlFor); + } + return labelElement.querySelector("button, input:not([type=hidden]), keygen, meter, output, progress, select, textarea"); + }; + FastClick.prototype.onTouchEnd = function(event) { + var forElement, trackingClickStart, targetTagName, scrollParent, touch, targetElement = this.targetElement; + if (!this.trackingClick) { + return true; + } + if (event.timeStamp - this.lastClickTime < this.tapDelay) { + this.cancelNextClick = true; + return true; + } + if (event.timeStamp - this.trackingClickStart > this.tapTimeout) { + return true; + } + this.cancelNextClick = false; + this.lastClickTime = event.timeStamp; + trackingClickStart = this.trackingClickStart; + this.trackingClick = false; + this.trackingClickStart = 0; + if (deviceIsIOSWithBadTarget) { + touch = event.changedTouches[0]; + targetElement = document.elementFromPoint(touch.pageX - window.pageXOffset, touch.pageY - window.pageYOffset) || targetElement; + targetElement.fastClickScrollParent = this.targetElement.fastClickScrollParent; + } + targetTagName = targetElement.tagName.toLowerCase(); + if (targetTagName === "label") { + forElement = this.findControl(targetElement); + if (forElement) { + this.focus(targetElement); + if (deviceIsAndroid) { + return false; + } + targetElement = forElement; + } + } else if (this.needsFocus(targetElement)) { + if (event.timeStamp - trackingClickStart > 100 || deviceIsIOS && window.top !== window && targetTagName === "input") { + this.targetElement = null; + return false; + } + this.focus(targetElement); + this.sendClick(targetElement, event); + if (!deviceIsIOS || targetTagName !== "select") { + this.targetElement = null; + event.preventDefault(); + } + return false; + } + if (deviceIsIOS && !deviceIsIOS4) { + scrollParent = targetElement.fastClickScrollParent; + if (scrollParent && scrollParent.fastClickLastScrollTop !== scrollParent.scrollTop) { + return true; + } + } + if (!this.needsClick(targetElement)) { + event.preventDefault(); + this.sendClick(targetElement, event); + } + return false; + }; + FastClick.prototype.onTouchCancel = function() { + this.trackingClick = false; + this.targetElement = null; + }; + FastClick.prototype.onMouse = function(event) { + if (!this.targetElement) { + return true; + } + if (event.forwardedTouchEvent) { + return true; + } + if (!event.cancelable) { + return true; + } + if (!this.needsClick(this.targetElement) || this.cancelNextClick) { + if (event.stopImmediatePropagation) { + event.stopImmediatePropagation(); + } else { + event.propagationStopped = true; + } + event.stopPropagation(); + event.preventDefault(); + return false; + } + return true; + }; + FastClick.prototype.onClick = function(event) { + var permitted; + if (this.trackingClick) { + this.targetElement = null; + this.trackingClick = false; + return true; + } + if (event.target.type === "submit" && event.detail === 0) { + return true; + } + permitted = this.onMouse(event); + if (!permitted) { + this.targetElement = null; + } + return permitted; + }; + FastClick.prototype.destroy = function() { + var layer = this.layer; + if (deviceIsAndroid) { + layer.removeEventListener("mouseover", this.onMouse, true); + layer.removeEventListener("mousedown", this.onMouse, true); + layer.removeEventListener("mouseup", this.onMouse, true); + } + layer.removeEventListener("click", this.onClick, true); + layer.removeEventListener("touchstart", this.onTouchStart, false); + layer.removeEventListener("touchmove", this.onTouchMove, false); + layer.removeEventListener("touchend", this.onTouchEnd, false); + layer.removeEventListener("touchcancel", this.onTouchCancel, false); + }; + FastClick.notNeeded = function(layer) { + var metaViewport; + var chromeVersion; + var blackberryVersion; + var firefoxVersion; + if (typeof window.ontouchstart === "undefined") { + return true; + } + chromeVersion = +(/Chrome\/([0-9]+)/.exec(navigator.userAgent) || [ , 0 ])[1]; + if (chromeVersion) { + if (deviceIsAndroid) { + metaViewport = document.querySelector("meta[name=viewport]"); + if (metaViewport) { + if (metaViewport.content.indexOf("user-scalable=no") !== -1) { + return true; + } + if (chromeVersion > 31 && document.documentElement.scrollWidth <= window.outerWidth) { + return true; + } + } + } else { + return true; + } + } + if (deviceIsBlackBerry10) { + blackberryVersion = navigator.userAgent.match(/Version\/([0-9]*)\.([0-9]*)/); + if (blackberryVersion[1] >= 10 && blackberryVersion[2] >= 3) { + metaViewport = document.querySelector("meta[name=viewport]"); + if (metaViewport) { + if (metaViewport.content.indexOf("user-scalable=no") !== -1) { + return true; + } + if (document.documentElement.scrollWidth <= window.outerWidth) { + return true; + } + } + } + } + if (layer.style.msTouchAction === "none" || layer.style.touchAction === "manipulation") { + return true; + } + firefoxVersion = +(/Firefox\/([0-9]+)/.exec(navigator.userAgent) || [ , 0 ])[1]; + if (firefoxVersion >= 27) { + metaViewport = document.querySelector("meta[name=viewport]"); + if (metaViewport && (metaViewport.content.indexOf("user-scalable=no") !== -1 || document.documentElement.scrollWidth <= window.outerWidth)) { + return true; + } + } + if (layer.style.touchAction === "none" || layer.style.touchAction === "manipulation") { + return true; + } + return false; + }; + FastClick.attach = function(layer, options) { + return new FastClick(layer, options); + }; + if (typeof define === "function" && typeof define.amd === "object" && define.amd) { + define(function() { + return FastClick; + }); + } else if (typeof module !== "undefined" && module.exports) { + module.exports = FastClick.attach; + module.exports.FastClick = FastClick; + } else { + window.FastClick = FastClick; + } +})(); + +window.requestAnimFrame = function() { + return window.requestAnimationFrame || window.webkitRequestAnimationFrame || window.mozRequestAnimationFrame || window.oRequestAnimationFrame || window.msRequestAnimationFrame || function(callback, element) { + window.setTimeout(callback, 1e3 / 60); + }; +}(); + +window.requestInterval = function(fn, delay) { + if (!window.requestAnimationFrame && !window.webkitRequestAnimationFrame && !(window.mozRequestAnimationFrame && window.mozCancelRequestAnimationFrame) && !window.oRequestAnimationFrame && !window.msRequestAnimationFrame) return window.setInterval(fn, delay); + var start = new Date().getTime(), handle = new Object(); + function loop() { + var current = new Date().getTime(), delta = current - start; + if (delta >= delay) { + fn.call(); + start = new Date().getTime(); + } + handle.value = requestAnimFrame(loop); + } + handle.value = requestAnimFrame(loop); + return handle; +}; + +window.clearRequestInterval = function(handle) { + window.cancelAnimationFrame ? window.cancelAnimationFrame(handle.value) : window.webkitCancelAnimationFrame ? window.webkitCancelAnimationFrame(handle.value) : window.webkitCancelRequestAnimationFrame ? window.webkitCancelRequestAnimationFrame(handle.value) : window.mozCancelRequestAnimationFrame ? window.mozCancelRequestAnimationFrame(handle.value) : window.oCancelRequestAnimationFrame ? window.oCancelRequestAnimationFrame(handle.value) : window.msCancelRequestAnimationFrame ? window.msCancelRequestAnimationFrame(handle.value) : clearInterval(handle); +}; + +window.requestTimeout = function(fn, delay) { + if (!window.requestAnimationFrame && !window.webkitRequestAnimationFrame && !(window.mozRequestAnimationFrame && window.mozCancelRequestAnimationFrame) && !window.oRequestAnimationFrame && !window.msRequestAnimationFrame) return window.setTimeout(fn, delay); + var start = new Date().getTime(), handle = new Object(); + function loop() { + var current = new Date().getTime(), delta = current - start; + delta >= delay ? fn.call() : handle.value = requestAnimFrame(loop); + } + handle.value = requestAnimFrame(loop); + return handle; +}; + +window.clearRequestTimeout = function(handle) { + window.cancelAnimationFrame ? window.cancelAnimationFrame(handle.value) : window.webkitCancelAnimationFrame ? window.webkitCancelAnimationFrame(handle.value) : window.webkitCancelRequestAnimationFrame ? window.webkitCancelRequestAnimationFrame(handle.value) : window.mozCancelRequestAnimationFrame ? window.mozCancelRequestAnimationFrame(handle.value) : window.oCancelRequestAnimationFrame ? window.oCancelRequestAnimationFrame(handle.value) : window.msCancelRequestAnimationFrame ? window.msCancelRequestAnimationFrame(handle.value) : clearTimeout(handle); +}; \ No newline at end of file diff --git a/couchpotato/static/scripts/couchpotato.js b/couchpotato/static/scripts/couchpotato.js index d2e8fa0017..9f6bbfda73 100644 --- a/couchpotato/static/scripts/couchpotato.js +++ b/couchpotato/static/scripts/couchpotato.js @@ -1,4 +1,4 @@ -var CouchPotato = new Class({ +О╩©var CouchPotato = new Class({ Implements: [Events, Options], @@ -11,13 +11,17 @@ var CouchPotato = new Class({ pages: [], block: [], + initialize: function(){ + var self = this; + + self.global_events = {}; + }, + setup: function(options) { var self = this; self.setOptions(options); - self.c = $(document.body) - - self.route = new Route(self.defaults); + self.c = $(document.body); self.createLayout(); self.createPages(); @@ -28,8 +32,59 @@ var CouchPotato = new Class({ self.openPage(window.location.pathname); History.addEvent('change', self.openPage.bind(self)); + self.c.addEvent('click:relay(.header a, .navigation a, .movie_details a, .list_list .movie)', self.ripple.bind(self)); self.c.addEvent('click:relay(a[href^=/]:not([target]))', self.pushState.bind(self)); self.c.addEvent('click:relay(a[href^=http])', self.openDerefered.bind(self)); + + // Check if device is touchenabled + self.touch_device = 'ontouchstart' in window || navigator.msMaxTouchPoints; + if(self.touch_device){ + self.c.addClass('touch_enabled'); + FastClick.attach(document.body); + } + + window.addEvent('resize', self.resize.bind(self)); + self.resize(); + + //self.checkCache(); + + }, + + checkCache: function(){ + window.addEventListener('load', function() { + window.applicationCache.addEventListener('updateready', function(e) { + if (window.applicationCache.status == window.applicationCache.UPDATEREADY) { + window.applicationCache.swapCache(); + window.location.reload(); + } + }, false); + + }, false); + }, + + resize: function(){ + var self = this; + + self.mobile_screen = Math.max(document.documentElement.clientWidth, window.innerWidth || 0) <= 480; + self.c[self.mobile_screen ? 'addClass' : 'removeClass']('mobile'); + }, + + ripple: function(e, el){ + var self = this, + button = el.getCoordinates(), + x = e.page.x - button.left, + y = e.page.y - button.top, + ripple = new Element('div.ripple', { + 'styles': { + 'left': x, + 'top': y + } + }); + + ripple.inject(el); + + requestTimeout(function(){ ripple.addClass('animate'); }, 0); + requestTimeout(function(){ ripple.dispose(); }, 2100); }, getOption: function(name){ @@ -37,110 +92,165 @@ var CouchPotato = new Class({ return this.options[name]; } catch(e){ - return null + return null; } }, - pushState: function(e){ + pushState: function(e, el){ var self = this; - if((!e.meta && Browser.Platform.mac) || (!e.control && !Browser.Platform.mac)){ + + if((!e.meta && App.isMac()) || (!e.control && !App.isMac())){ (e).preventDefault(); - var url = e.target.get('href'); - if(History.getPath() != url) + var url = el.get('href'); + + // Middle click + if(e.event && e.event.button === 1) + window.open(url); + else if(History.getPath() != url) History.push(url); + } + + self.fireEvent('history.push'); + }, + + isMac: function(){ + return Browser.platform == 'mac'; }, createLayout: function(){ var self = this; - self.block.header = new Block(); + // TODO : sorry, it's a crutch... Need to move self.hide_update initialization to appropriate place.. + // WebUI Feature: + self.hide_update = !! App.options && App.options.webui_feature && App.options.webui_feature.hide_menuitem_update; + + self.block.header = new BlockBase(); self.c.adopt( $(self.block.header).addClass('header').adopt( - new Element('div').adopt( - self.block.navigation = new Block.Navigation(self, {}), - self.block.search = new Block.Search(self, {}), - self.block.more = new Block.Menu(self, {}) - ) + self.block.navigation = new BlockHeader(self, {}), + self.block.search = new BlockSearch(self, {}), + self.support = new Element('a.donate.icon-donate', { + 'href': 'https://couchpota.to/support/', + 'target': '_blank' + }).grab( + new Element('span', { + 'text': 'Donate' + }) + ), + self.block.more = new BlockMenu(self, {'button_class': 'icon-settings'}) ), - self.content = new Element('div.content'), - self.block.footer = new Block.Footer(self, {}) + new Element('div.corner_background'), + self.content = new Element('div.content').adopt( + self.pages_container = new Element('div.pages'), + self.block.footer = new BlockFooter(self, {}) + ) ); - [new Element('a.orange', { - 'text': 'Restart', - 'events': { - 'click': self.restartQA.bind(self) - } - }), - new Element('a.red', { - 'text': 'Shutdown', - 'events': { - 'click': self.shutdownQA.bind(self) - } - }), - new Element('a', { - 'text': 'Update to latest', - 'events': { - 'click': self.checkForUpdate.bind(self, null) - } - }), - new Element('a', { - 'text': 'Run install wizard', - 'href': App.createUrl('wizard') - })].each(function(a){ - self.block.more.addLink(a) - }) - - - new ScrollSpy({ - min: 10, - onLeave: function(){ - $(self.block.header).removeClass('with_shadow') - }, - onEnter: function(){ - $(self.block.header).addClass('with_shadow') - } - }) + var setting_links = [ + new Element('a', { + 'text': 'About CouchPotato', + 'href': App.createUrl('settings/about') + }), + new Element('a', { + 'text': 'Settings', + 'href': App.createUrl('settings/general') + }), + new Element('a', { + 'text': 'Logs', + 'href': App.createUrl('log') + }), + new Element('a', { + 'text': 'Restart', + 'events': { + 'click': self.restartQA.bind(self) + } + }), + new Element('a', { + 'text': 'Shutdown', + 'events': { + 'click': self.shutdownQA.bind(self) + } + }) + ]; + + if (!self.hide_update){ + setting_links.splice(1, 0, new Element('a', { + 'text': 'Check for Updates', + 'events': { + 'click': self.checkForUpdate.bind(self, null) + } + })); + } + + setting_links.each(function(a){ + self.block.more.addLink(a); + }); + + // Set theme + self.addEvent('setting.save.core.dark_theme', function(enabled){ + document.html[enabled ? 'addClass' : 'removeClass']('dark'); + }); + }, createPages: function(){ var self = this; + var pages = []; Object.each(Page, function(page_class, class_name){ - pg = new Page[class_name](self, {}); + var pg = new Page[class_name](self, { + 'level': 1 + }); self.pages[class_name] = pg; - $(pg).inject(self.content); + pages.include({ + 'order': pg.order, + 'name': class_name, + 'class': pg + }); + }); + + pages.stableSort(self.sortPageByOrder).each(function(page){ + page['class'].load(); + self.fireEvent('load'+page.name); + $(page['class']).inject(self.getPageContainer()); }); self.fireEvent('load'); }, + sortPageByOrder: function(a, b){ + return (a.order || 100) - (b.order || 100); + }, + openPage: function(url) { - var self = this; + var self = this, + route = new Route(self.defaults); + + route.parse(rep(History.getPath())); - self.route.parse(); - var page_name = self.route.getPage().capitalize(); - var action = self.route.getAction(); - var params = self.route.getParams(); + var page_name = route.getPage().capitalize(), + action = route.getAction(), + params = route.getParams(), + current_url = route.getCurrentUrl(), + page; - var current_url = self.route.getCurrentUrl(); if(current_url == self.current_url) return; if(self.current_page) - self.current_page.hide() + self.current_page.hide(); try { - var page = self.pages[page_name] || self.pages.Home; + page = self.pages[page_name] || self.pages.Home; page.open(action, params, current_url); page.show(); } catch(e){ - console.error("Can't open page:" + url, e) + console.error("Can't open page:" + url, e); } self.current_page = page; @@ -149,24 +259,28 @@ var CouchPotato = new Class({ }, getBlock: function(block_name){ - return this.block[block_name] + return this.block[block_name]; }, getPage: function(name){ - return this.pages[name] + return this.pages[name]; + }, + + getPageContainer: function(){ + return this.pages_container; }, shutdown: function(){ var self = this; - self.blockPage('You have shutdown. This is what suppose to happen ;)'); + self.blockPage('You have shutdown. This is what is supposed to happen ;)'); Api.request('app.shutdown', { 'onComplete': self.blockPage.bind(self) }); self.checkAvailable(1000); }, - shutdownQA: function(e){ + shutdownQA: function(){ var self = this; var q = new Question('Are you sure you want to shutdown CouchPotato?', '', [{ @@ -176,7 +290,7 @@ var CouchPotato = new Class({ 'click': function(e){ (e).preventDefault(); self.shutdown(); - q.close.delay(100, q); + requestTimeout(q.close.bind(q), 100); } } }, { @@ -203,7 +317,7 @@ var CouchPotato = new Class({ 'click': function(e){ (e).preventDefault(); self.restart(message, title); - q.close.delay(100, q); + requestTimeout(q.close.bind(q), 100); } } }, { @@ -215,7 +329,7 @@ var CouchPotato = new Class({ checkForUpdate: function(onComplete){ var self = this; - Updater.check(onComplete) + Updater.check(onComplete); self.blockPage('Please wait. If this takes too long, something must have gone wrong.', 'Checking for updates'); self.checkAvailable(3000); @@ -224,22 +338,31 @@ var CouchPotato = new Class({ checkAvailable: function(delay, onAvailable){ var self = this; - (function(){ + requestTimeout(function(){ - Api.request('app.available', { - 'onFailure': function(){ - self.checkAvailable.delay(1000, self, [delay, onAvailable]); - self.fireEvent('unload'); + var onFailure = function(){ + requestTimeout(function(){ + self.checkAvailable(delay, onAvailable); + }, 1000); + self.fireEvent('unload'); + }; + + var request = Api.request('app.available', { + 'timeout': 2000, + 'onTimeout': function(){ + request.cancel(); + onFailure(); }, + 'onFailure': onFailure, 'onSuccess': function(){ if(onAvailable) - onAvailable() + onAvailable(); self.unBlockPage(); self.fireEvent('reload'); } }); - }).delay(delay || 0) + }, delay || 0); }, blockPage: function(message, title){ @@ -247,37 +370,42 @@ var CouchPotato = new Class({ self.unBlockPage(); - var body = $(document.body); - self.mask = new Element('div.mask').adopt( - new Element('div').adopt( + self.mask = new Element('div.mask.with_message').adopt( + new Element('div.message').adopt( new Element('h1', {'text': title || 'Unavailable'}), new Element('div', {'text': message || 'Something must have crashed.. check the logs ;)'}) ) - ).fade('hide').inject(document.body).fade('in'); + ).inject(document.body); - createSpinner(self.mask, { - 'top': -50 - }); + createSpinner(self.mask); + + requestTimeout(function(){ + self.mask.addClass('show'); + }, 10); }, unBlockPage: function(){ var self = this; if(self.mask) self.mask.get('tween').start('opacity', 0).chain(function(){ - this.element.destroy() + this.element.destroy(); }); }, createUrl: function(action, params){ - return this.options.base_url + (action ? action+'/' : '') + (params ? '?'+Object.toQueryString(params) : '') + return this.options.base_url + (action ? action+'/' : '') + (params ? '?'+Object.toQueryString(params) : ''); }, openDerefered: function(e, el){ + var self = this; (e).stop(); - var url = 'http://www.dereferer.org/?' + el.get('href'); + var url = el.get('href'); + if(self.getOption('dereferer')){ + url = self.getOption('dereferer') + el.get('href'); + } - if(el.get('target') == '_blank' || (e.meta && Browser.Platform.mac) || (e.control && !Browser.Platform.mac)) + if(el.get('target') == '_blank' || (e.meta && self.isMac()) || (e.control && !self.isMac())) window.open(url); else window.location = url; @@ -285,43 +413,94 @@ var CouchPotato = new Class({ createUserscriptButtons: function(){ - var userscript = false; - try { - if(Components.interfaces.gmIGreasemonkeyService) - userscript = true - } - catch(e){ - userscript = Browser.chrome === true; - } - var host_url = window.location.protocol + '//' + window.location.host; return new Element('div.group_userscript').adopt( - (userscript ? [new Element('a.userscript.button', { - 'text': 'Install userscript', - 'href': Api.createUrl('userscript.get')+randomString()+'/couchpotato.user.js', - 'target': '_self' - }), new Element('span.or[text=or]')] : null), - new Element('span.bookmarklet').adopt( - new Element('a.button.orange', { - 'text': '+CouchPotato', - 'href': "javascript:void((function(){var e=document.createElement('script');e.setAttribute('type','text/javascript');e.setAttribute('charset','UTF-8');e.setAttribute('src','" + - host_url + Api.createUrl('userscript.bookmark') + - "?host="+ encodeURI(host_url + Api.createUrl('userscript.get')+randomString()+'/') + - "&r='+Math.random()*99999999);document.body.appendChild(e)})());", - 'target': '', - 'events': { - 'click': function(e){ - (e).stop() - alert('Drag it to your bookmark ;)') - } - } + new Element('div').adopt( + new Element('a.userscript.button', { + 'text': 'Install extension', + 'href': 'https://couchpota.to/extension/', + 'target': '_blank' }), - new Element('span', { - 'text': 'Б┤╫ Drag this to your bookmarks' - }) - ) + new Element('span.or[text=or]'), + new Element('span.bookmarklet').adopt( + new Element('a.button', { + 'text': '+CouchPotato', + /* jshint ignore:start */ + 'href': "javascript:void((function(){var e=document.createElement('script');e.setAttribute('type','text/javascript');e.setAttribute('charset','UTF-8');e.setAttribute('src','" + + host_url + Api.createUrl('userscript.bookmark') + + "?host="+ encodeURI(host_url + Api.createUrl('userscript.get')+randomString()+'/') + + "&r='+Math.random()*99999999);document.body.appendChild(e)})());", + /* jshint ignore:end */ + 'target': '', + 'events': { + 'click': function(e){ + (e).stop(); + alert('Drag it to your bookmark ;)'); + } + } + }), + new Element('span', { + 'text': 'Б┤╫ Drag this to your bookmarks' + }) + ) + ), + new Element('img', { + 'src': 'https://couchpota.to/media/images/userscript.gif' + }) ); + }, + + /* + * Global events + */ + on: function(name, handle){ + var self = this; + + if(!self.global_events[name]) + self.global_events[name] = []; + + self.global_events[name].push(handle); + + }, + + trigger: function(name, args, on_complete){ + var self = this; + + if(!self.global_events[name]){ return; } + + if(!on_complete && typeOf(args) == 'function'){ + on_complete = args; + args = []; + } + + // Create parallel callback + self.global_events[name].each(function(handle){ + + requestTimeout(function(){ + var results = handle.apply(handle, args || []); + + if(on_complete) + on_complete(results); + }, 0); + }); + + }, + + off: function(name, handle){ + var self = this; + + if(!self.global_events[name]) return; + + // Remove single + if(handle){ + self.global_events[name] = self.global_events[name].erase(handle); + } + // Reset full event + else { + self.global_events[name] = []; + } + } }); @@ -329,77 +508,72 @@ window.App = new CouchPotato(); var Route = new Class({ - defaults: {}, + defaults: null, page: '', action: 'index', params: {}, initialize: function(defaults){ - var self = this - self.defaults = defaults + var self = this; + self.defaults = defaults || {}; }, - parse: function(){ + parse: function(path){ var self = this; - var rep = function(pa){ - return pa.replace(Api.getOption('url'), '/').replace(App.getOption('base_url'), '/') - } - - var path = rep(History.getPath()) if(path == '/' && location.hash){ - path = rep(location.hash.replace('#', '/')) + path = rep(location.hash.replace('#', '/')); } - self.current = path.replace(/^\/+|\/+$/g, '') - var url = self.current.split('/') + self.current = path.replace(/^\/+|\/+$/g, ''); + var url = self.current.split('/'); - self.page = (url.length > 0) ? url.shift() : self.defaults.page - self.action = (url.length > 0) ? url.shift() : self.defaults.action + self.page = (url.length > 0) ? url.shift() : self.defaults.page; + self.action = (url.length > 0) ? url.join('/') : self.defaults.action; self.params = Object.merge({}, self.defaults.params); if(url.length > 1){ - var key + var key; url.each(function(el, nr){ - if(nr%2 == 0) - key = el + if(nr%2 === 0) + key = el; else if(key) { - self.params[key] = el - key = null + self.params[key] = el; + key = null; } - }) + }); } else if(url.length == 1){ self.params[url] = true; } - return self + return self; }, getPage: function(){ - return this.page + return this.page; }, getAction: function(){ - return this.action + return this.action; }, getParams: function(){ - return this.params + return this.params; }, getCurrentUrl: function(){ - return this.current + return this.current; }, get: function(param){ - return this.params[param] + return this.params[param]; } }); var p = function(){ - if(typeof(console) !== 'undefined' && console != null) - console.log(arguments) + if(typeof(console) !== 'undefined' && console !== null) + console.log(arguments); }; @@ -442,14 +616,14 @@ var p = function(){ function randomString(length, extra) { - var chars = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXTZabcdefghiklmnopqrstuvwxyz" + (extra ? '-._!@#$%^&*()+=' : ''); - var stringLength = length || 8; - var randomString = ''; - for (var i = 0; i < stringLength; i++) { + var chars = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXTZabcdefghiklmnopqrstuvwxyz" + (extra ? '-._!@#$%^&*()+=' : ''), + string_length = length || 8, + random_string = ''; + for (var i = 0; i < string_length; i++) { var rnum = Math.floor(Math.random() * chars.length); - randomString += chars.charAt(rnum); + random_string += chars.charAt(rnum); } - return randomString; + return random_string; } (function(){ @@ -465,14 +639,14 @@ function randomString(length, extra) { var valueOf = function(object, path) { var ptr = object; - path.each(function(key) { ptr = ptr[key] }); + path.each(function(key) { ptr = ptr[key]; }); return ptr; }; var comparer = function(a, b) { for (var i = 0, l = keyPaths.length; i < l; i++) { - aVal = valueOf(a, keyPaths[i].path); - bVal = valueOf(b, keyPaths[i].path); + var aVal = valueOf(a, keyPaths[i].path), + bVal = valueOf(b, keyPaths[i].path); if (aVal > bVal) return keyPaths[i].sign; if (aVal < bVal) return -keyPaths[i].sign; } @@ -489,28 +663,18 @@ function randomString(length, extra) { case "string": saveKeyPath(argument.match(/[+-]|[^.]+/g)); break; } }); - return this.sort(comparer); + return this.stableSort(comparer); } }); })(); -var createSpinner = function(target, options){ - var opts = Object.merge({ - lines: 12, - length: 5, - width: 4, - radius: 9, - color: '#fff', - speed: 1.9, - trail: 53, - shadow: false, - hwaccel: true, - className: 'spinner', - zIndex: 2e9, - top: 'auto', - left: 'auto' - }, options); - - return new Spinner(opts).spin(target); -} \ No newline at end of file +var createSpinner = function(container){ + var spinner = new Element('div.spinner'); + container.grab(spinner); + return spinner; +}; + +var rep = function (pa) { + return pa.replace(Api.getOption('url'), '/').replace(App.getOption('base_url'), '/'); +}; diff --git a/couchpotato/static/scripts/library/form_replacement/form_check.js b/couchpotato/static/scripts/library/form_replacement/form_check.js deleted file mode 100644 index 4c240f6aee..0000000000 --- a/couchpotato/static/scripts/library/form_replacement/form_check.js +++ /dev/null @@ -1,126 +0,0 @@ -/* ---- -name: Form.Check -description: Class to represent a checkbox -authors: Bryan J Swift (@bryanjswift) -license: MIT-style license. -requires: [Core/Class.Extras, Core/Element, Core/Element.Event] -provides: Form.Check -... -*/ -if (typeof window.Form === 'undefined') { window.Form = {}; } - -Form.Check = new Class({ - Implements: [Events, Options], - options: { - checked: false, - disabled: false - }, - bound: {}, - checked: false, - config: { - checkedClass: 'checked', - disabledClass: 'disabled', - elementClass: 'check', - highlightedClass: 'highlighted', - storage: 'Form.Check::data' - }, - disabled: false, - element: null, - input: null, - label: null, - value: null, - initialize: function(input, options) { - this.setOptions(options); - this.bound = { - disable: this.disable.bind(this), - enable: this.enable.bind(this), - highlight: this.highlight.bind(this), - removeHighlight: this.removeHighlight.bind(this), - keyToggle: this.keyToggle.bind(this), - toggle: this.toggle.bind(this) - }; - var bound = this.bound; - input = this.input = $(input); - var id = input.get('id'); - this.label = document.getElement('label[for=' + id + ']'); - this.element = new Element('div', { - 'class': input.get('class') + ' ' + this.config.elementClass, - id: id ? id + 'Check' : '', - events: { - click: bound.toggle, - mouseenter: bound.highlight, - mouseleave: bound.removeHighlight - } - }); - this.input.addEvents({ - keypress: bound.keyToggle, - keydown: bound.keyToggle, - keyup: bound.keyToggle - }); - if (this.label) { this.label.addEvent('click', bound.toggle); } - this.element.wraps(input); - this.value = input.get('value'); - if (this.input.checked) { this.check(); } else { this.uncheck(); } - if (this.input.disabled) { this.disable(); } else { this.enable(); } - input.store(this.config.storage, this).addEvents({ - blur: bound.removeHighlight, - focus: bound.highlight - }); - this.fireEvent('create', this); - }, - check: function() { - this.element.addClass(this.config.checkedClass); - this.input.set('checked', 'checked').focus(); - this.checked = true; - this.fireEvent('check', this); - }, - disable: function() { - this.element.addClass(this.config.disabledClass); - this.input.set('disabled', 'disabled'); - this.disabled = true; - this.fireEvent('disable', this); - }, - enable: function() { - this.element.removeClass(this.config.disabledClass); - this.input.erase('disabled'); - this.disabled = false; - this.fireEvent('enable', this); - }, - highlight: function() { - this.element.addClass(this.config.highlightedClass); - this.fireEvent('highlight', this); - }, - removeHighlight: function() { - this.element.removeClass(this.config.highlightedClass); - this.fireEvent('removeHighlight', this); - }, - keyToggle: function(e) { - var evt = (e); - if (evt.key === 'space') { this.toggle(e); } - }, - toggle: function(e) { - var evt; - if (this.disabled) { return this; } - if (e) { - evt = (e).stopPropagation(); - if (evt.target.tagName.toLowerCase() !== 'a') { - evt.stop(); - } - } - if (this.checked) { - this.uncheck(); - } else { - this.check(); - } - this.fireEvent('change', this); - this.input.fireEvent('change', this); - return this; - }, - uncheck: function() { - this.element.removeClass(this.config.checkedClass); - this.input.erase('checked'); - this.checked = false; - this.fireEvent('uncheck', this); - } -}); diff --git a/couchpotato/static/scripts/library/form_replacement/form_checkgroup.js b/couchpotato/static/scripts/library/form_replacement/form_checkgroup.js deleted file mode 100644 index 002fd6ed37..0000000000 --- a/couchpotato/static/scripts/library/form_replacement/form_checkgroup.js +++ /dev/null @@ -1,51 +0,0 @@ -/* ---- -name: Form.CheckGroup -description: Class to represent a group of Form.Check wrapped checkboxes -authors: Bryan J Swift (@bryanjswift) -license: MIT-style license. -requires: [Core/Class.Extras, Core/Element, Core/Element.Event, Form-Replacement/Form.Check] -provides: Form.CheckGroup -... -*/ -if (typeof window.Form === 'undefined') { window.Form = {}; } - -Form.CheckGroup = new Class({ - Implements: [Events,Options], - options: { - checkOptions: {}, - initialValues: {} - }, - checks: [], - initialize: function(group,options) { - if (!Form.Check) { throw 'required Class Form.Check not found'; } - this.setOptions(options); - group = $(group); - if (!group) { return this; } - var checkboxes = group.getElements('input[type=checkbox]'); - checkboxes.each(this.addCheck,this); - }, - addCheck: function(checkbox) { - var initialValues = this.options.initialValues[checkbox.get('name')]; - var checkOptions = {}; - checkOptions.checked = initialValues ? initialValues.contains(checkbox.get('value')) : checkbox.get('checked'); - checkOptions.disabled = checkbox.get('disabled'); - checkbox.store('Form.CheckGroup::data',this); - var check = checkbox.retrieve('Form.Check::data') || new Form.Check(checkbox, Object.append(checkOptions,this.options.checkOptions)); - this.checks.push(check); - }, - checkAll: function() { - this.checks.each(function(check) { if (!check.checked) { check.toggle(); } }); - }, - disable: function() { - this.checks.each(function(check) { check.disable(); }); - this.fireEvent('disable',this); - }, - enable: function() { - this.checks.each(function(check) { check.enable(); }); - this.fireEvent('enable',this); - }, - uncheckAll: function() { - this.checks.each(function(check) { if (check.checked) { check.toggle(); } }); - } -}); diff --git a/couchpotato/static/scripts/library/form_replacement/form_dropdown.js b/couchpotato/static/scripts/library/form_replacement/form_dropdown.js deleted file mode 100644 index 86c2c3c8da..0000000000 --- a/couchpotato/static/scripts/library/form_replacement/form_dropdown.js +++ /dev/null @@ -1,325 +0,0 @@ -/* ---- -name: Form.Dropdown -description: Class to represent a select input -authors: Bryan J Swift (@bryanjswift) -license: MIT-style license. -requires: [Core/Class.Extras, Core/Element, Core/Element.Event, Form-Replacement/Form.SelectOption] -provides: Form.Dropdown -... -*/ -if (typeof window.Form === 'undefined') { window.Form = {}; } - -Form.Dropdown = new Class({ - Implements: [Events,Options], - options: { - excludedValues: [], - initialValue: null, - mouseLeaveDelay: 350, - selectOptions: {}, - typeDelay: 500 - }, - bound: {}, - dropdownOptions: [], - element: null, - events: {}, - highlighted: null, - input: null, - open: true, - selected: null, - selection: null, - typed: { lastKey: null, value: null, timer: null, pressed: null, shortlist: [], startkey: null }, - value: null, - initialize: function(select,options) { - this.setOptions(options); - select = $(select); - this.bound = { - collapse: this.collapse.bind(this), - expand: this.expand.bind(this), - focus: this.focus.bind(this), - highlightOption: this.highlightOption.bind(this), - keydown: this.keydown.bind(this), - keypress: this.keypress.bind(this), - mouseenterDropdown: this.mouseenterDropdown.bind(this), - mouseleaveDropdown: this.mouseleaveDropdown.bind(this), - mousemove: this.mousemove.bind(this), - removeHighlightOption: this.removeHighlightOption.bind(this), - select: this.select.bind(this), - toggle: this.toggle.bind(this) - }; - this.events = { mouseenter: this.bound.mouseenterDropdown, mouseleave: this.bound.mouseleaveDropdown }; - this.value = this.options.initialValue; - this.initializeCreateElements(select); - var optionElements = select.getElements('option'); - this.updateOptions(optionElements); - this.element.replaces(select); - document.addEvent('click', this.bound.collapse); - var eventName = Browser.ie || Browser.webkit ? 'keydown' : 'keypress'; - var target = Browser.ie ? $(document.body) : window; - target.addEvent('keydown',this.bound.keydown).addEvent(eventName,this.bound.keypress); - }, - initializeCreateElements: function(select) { - var id = select.get('id'); - var dropdown = new Element('div', { - 'class': (select.get('class') + ' select').trim(), - 'id': (id && id !== '') ? id + 'Dropdown' : '' - }); - var menu = new Element('div', {'class': 'menu'}); - var list = new Element('div', {'class': 'list'}); - var options = new Element('ul', {'class': 'options'}); - dropdown.adopt(menu.adopt(list.adopt(options))); - var dropdownSelection = new Element('div', { - 'class': 'selection', - events: {click: this.bound.toggle} - }); - var dropdownBackground = new Element('div', { 'class': 'dropdownBackground' }); - var selection = new Element('span', { 'class': 'selectionDisplay' }); - var input = new Element('input', { - type:'text', - id: id, - name: select.get('name'), - events: { - focus: this.bound.focus - } - }); - dropdownSelection.adopt(dropdownBackground, selection, input); - dropdown.adopt(dropdownSelection); - this.element = dropdown; - this.selection = selection; - this.input = input; - return options; - }, - collapse: function(e) { - this.open = false; - this.element.removeClass('active').removeClass('dropdown-active'); - if (this.selected) { this.selected.removeHighlight(); } - this.element.removeEvents(this.events); - this.fireEvent('collapse', [this, e]); - }, - deselect: function(option) { - option.deselect(); - }, - destroy: function() { - this.element = null; - this.selection = null; - this.input = null; - }, - disable: function() { - this.collapse(); - this.input.set('disabled', 'disabled').removeEvents({blur:this.bound.blur, focus:this.bound.focus}); - this.selection.getParent().removeEvent('click', this.bound.toggle); - this.fireEvent('disable', this); - }, - enable: function() { - this.input.erase('disabled').addEvents({blur:this.bound.blur, focus:this.bound.focus}); - this.selection.getParent().addEvent('click', this.bound.toggle); - this.fireEvent('enable', this); - }, - expand: function(e) { - clearTimeout(this.collapseInterval); - var evt = e ? (e).stop() : null; - this.open = true; - this.input.focus(); - this.element.addClass('active').addClass('dropdown-active'); - if (this.selected) { this.selected.highlight(); } - this.element.addEvents(this.events); - this.fireEvent('expand', [this, e]); - }, - focus: function(e) { - this.expand(); - }, - foundMatch: function(e) { - var typed = this.typed; - var shortlist = typed.shortlist; - var value = typed.value; - var i = 0; - var optionsLength = shortlist.length; - var excludedValues = this.options.excludedValues; - var found = false; - if (!optionsLength) { return; } - var option; - do { - option = shortlist[i]; - if (option.text.toLowerCase().indexOf(value) === 0 && !excludedValues.contains(option.value)) { - found = true; - option.highlight(e); - typed.pressed = i + 1; - i = optionsLength; - } - i = i + 1; - } while(i < optionsLength); - return found; - }, - highlightOption: function(option) { - if (this.highlighted) { this.highlighted.removeHighlight(); } - this.highlighted = option; - }, - isOpen: function() { - return this.open; - }, - keydown: function(e) { - if (!this.open) { return; } - this.dropdownOptions.each(function(option) { option.disable(); }); - document.addEvent('mousemove', this.bound.mousemove); - }, - keypress: function(e) { - if (!this.open) { return; } - (e).stop(); - - var code = e.code, key = e.key; - - var typed = this.typed; - var match, i, options, option, optionsLength, found, first, excludedValues, shortlist; - switch(code) { - case 38: // up - case 37: // left - if (typed.pressed > 0) { typed.pressed = typed.pressed - 1; } - if (!this.highlighted) { this.dropdownOptions.getLast().highlight(e); break; } - match = this.highlighted.element.getPrevious(); - match = match ? match.retrieve('Form.SelectOption::data') : this.dropdownOptions.getLast(); - match.highlight(e); - break; - case 40: // down - case 39: // right - if (typed.shortlist.length > 0) { typed.pressed = typed.pressed + 1; } - if (!this.highlighted) { this.dropdownOptions[0].highlight(e); break; } - match = this.highlighted.element.getNext(); - match = match ? match.retrieve('Form.SelectOption::data') : this.dropdownOptions[0]; - match.highlight(e); - break; - case 13: // enter - e.stop(); - case 9: // tab - skips the stop event but selects the item - this.highlighted.select(); - break; - case 27: // esc - e.stop(); - this.toggle(); - break; - case 32: // space - default: // anything else - if (!(code >= 48 && code <= 122 && (code <= 57 || (code >= 65 && code <= 90) || code >=97) || code === 32)) { - break; - } - if (evt.control || evt.alt || evt.meta) { return; } - // alphanumeric or space - key = code === 32 ? ' ' : key; - clearTimeout(typed.timer); - options = this.dropdownOptions; - optionsLength = options.length; - excludedValues = this.options.excludedValues; - if (typed.timer === null) { // timer is expired - typed.shortlist = []; - if (key === typed.lastKey || key === typed.startkey) { // get next - typed.pressed = typed.pressed + 1; - typed.value = key; - } else { // get first - typed = this.resetTyped(); - typed.value = key; - typed.startkey = key; - typed.pressed = 1; - } - typed.timer = this.resetTyped.delay(500, this); - } else { - if (key === typed.lastKey) { // check for match, if no match get next - typed.value = typed.value + key; - if (this.foundMatch(e)) { // got a match so break - typed.timer = this.resetTyped.delay(500, this); - break; - } else { // no match fall through - typed.shortlist = []; - typed.value = key; - typed.pressed = typed.pressed + 1; - typed.timer = null; - } - } else { // reset timer, get first match, set pressed to found position - typed.timer = this.resetTyped.delay(500, this); - typed.value = typed.value + key; - typed.startkey = typed.value.substring(0, 1); - typed.lastKey = key; - this.foundMatch(e); - break; - } - } - typed.lastKey = key; - shortlist = typed.shortlist; - i = 0; - found = 0; - do { - option = options[i]; - if (option.text.toLowerCase().indexOf(key) === 0 && !excludedValues.contains(option.value)) { - if (found === 0) { first = option; } - found = found + 1; - if (found === typed.pressed) { option.highlight(e); } - shortlist.push(option); - } - i = i + 1; - } while(i < optionsLength); - if (typed.pressed > found) { - first.highlight(e); - typed.pressed = 1; - } - break; - } - }, - mouseenterDropdown: function() { - clearTimeout(this.collapseInterval); - }, - mouseleaveDropdown: function() { - this.collapseInterval = this.options.mouseLeaveDelay ? this.collapse.delay(this.options.mouseLeaveDelay,this) : null; - }, - mousemove: function() { - this.dropdownOptions.each(function(option) { option.enable(); }); - document.removeEvent('mousemove', this.bound.mousemove); - }, - removeHighlightOption: function(option) { - this.highlighted = null; - }, - reset: function() { - if (this.options.initialValue) { - this.dropdownOptions.each(function(o) { - if (o.value === this.options.initialValue) { o.select(); } - }, this); - } else { - this.dropdownOptions[0].select(); - } - }, - resetTyped: function() { - var typed = this.typed; - typed.value = null; - typed.timer = null; - return typed; - }, - select: function(option, e) { - this.dropdownOptions.each(this.deselect); - this.selection.set('html', option.element.get('html')); - var oldValue = this.value; - this.value = option.value; - this.input.set('value', option.value); - this.selected = option; - this.fireEvent('select', [this, e]); - if (oldValue && oldValue !== this.value) { this.fireEvent('change', [this, e]); } - this.collapse(e); - }, - toggle: function(e) { - if (this.open) { this.collapse(e); } - else { this.expand(e); } - }, - updateOptions: function(optionElements) { - var optionsList = this.element.getElement('ul').empty(), - dropdownOptions = this.dropdownOptions.empty(), - selectOptions = this.options.selectOptions; - optionElements.each(function(opt) { - var option = new Form.SelectOption(opt, selectOptions); - option.addEvents({ - 'onHighlight':this.bound.highlightOption, - 'onRemoveHighlight':this.bound.removeHighlightOption, - 'onSelect':this.bound.select - }).owner = this; - if (option.value === this.options.initialValue || opt.get('selected')) { this.select(option); } - dropdownOptions.push(option); - optionsList.adopt(option.element); - }, this); - if (!this.selected && optionElements[0]) { optionElements[0].retrieve('Form.SelectOption::data').select(); } - } -}); diff --git a/couchpotato/static/scripts/library/form_replacement/form_radio.js b/couchpotato/static/scripts/library/form_replacement/form_radio.js deleted file mode 100644 index 245aa4d370..0000000000 --- a/couchpotato/static/scripts/library/form_replacement/form_radio.js +++ /dev/null @@ -1,34 +0,0 @@ -/* ---- -name: Form.Radio -description: Class to represent a radio button -authors: Bryan J Swift (@bryanjswift) -license: MIT-style license. -requires: [Core/Class.Extras, Core/Element, Core/Element.Event, Form-Replacement/Form.Check] -provides: Form.Radio -... -*/ -if (typeof window.Form === 'undefined') { window.Form = {}; } - -Form.Radio = new Class({ - Extends: Form.Check, - config: { - elementClass: 'radio', - storage: 'Form.Radio::data' - }, - initialize: function(input,options) { - this.parent(input,options); - }, - toggle: function(e) { - if (this.element.hasClass('checked') || this.disabled) { return; } - var evt; - if (e) { evt = (e).stop(); } - if (this.checked) { - this.uncheck(); - } else { - this.check(); - } - this.fireEvent(this.checked ? 'onCheck' : 'onUncheck',this); - this.fireEvent('onChange',this); - } -}); \ No newline at end of file diff --git a/couchpotato/static/scripts/library/form_replacement/form_radiogroup.js b/couchpotato/static/scripts/library/form_replacement/form_radiogroup.js deleted file mode 100644 index f0000b5327..0000000000 --- a/couchpotato/static/scripts/library/form_replacement/form_radiogroup.js +++ /dev/null @@ -1,59 +0,0 @@ -/* ---- -name: Form.RadioGroup -description: Class to represent a group of Form.Radio buttons -authors: Bryan J Swift (@bryanjswift) -license: MIT-style license. -requires: [Core/Class.Extras, Core/Element, Core/Element.Event, Form-Replacement/Form.Radio] -provides: Form.RadioGroup -... -*/ -if (typeof window.Form === 'undefined') { window.Form = {}; } - -Form.RadioGroup = new Class({ - Implements: [Events,Options], - options: { - radioOptions: {}, - initialValues: {} - }, - bound: {}, - radios: [], - value: null, - initialize: function(group,options) { - if (!Form.Radio) { throw 'required Class Form.Radio not found'; } - this.setOptions(options); - this.bound = { select: this.select.bind(this) }; - group = $(group); - if (!group) { return this; } - var radios = group.getElements('input[type=radio]'); - radios.each(this.addCheck,this); - }, - addCheck: function(radio,i) { - var initialValues = this.options.initialValues[radio.get('name')]; - var radioOptions = {}; - radioOptions.checked = initialValues ? initialValues.contains(radio.get('value')) : radio.get('checked'); - radioOptions.disabled = radio.get('disabled'); - var check = (radio.retrieve('Form.Radio::data') - || new Form.Radio(radio,Object.append(radioOptions,this.options.radioOptions))); - check.addEvent('onCheck',this.bound.select); - if (check.checked) { i ? this.changed(check) : this.value = check.value; } - radio.store('Form.RadioGroup::data',this); - this.radios.push(check); - }, - changed: function(radio) { - this.value = radio.value; - this.fireEvent('onChange',this); - }, - disable: function() { - this.radios.each(function(radio) { radio.disable(); }); - }, - enable: function() { - this.radios.each(function(radio) { radio.enable(); }); - }, - select: function(checkedRadio) { - this.radios.each(function(radio) { - if (radio.checked && radio.value !== checkedRadio.value) { radio.uncheck(); } - }); - if (checkedRadio.value !== this.value) { this.changed(checkedRadio); } - } -}); diff --git a/couchpotato/static/scripts/library/form_replacement/form_selectoption.js b/couchpotato/static/scripts/library/form_replacement/form_selectoption.js deleted file mode 100644 index 2b81140367..0000000000 --- a/couchpotato/static/scripts/library/form_replacement/form_selectoption.js +++ /dev/null @@ -1,93 +0,0 @@ -/* ---- -name: Form.SelectOption -description: Class to represent an option for Form.Dropdown -authors: Bryan J Swift (@bryanjswift) -license: MIT-style license. -requires: [Core/Class.Extras, Core/Element, Core/Element.Event] -provides: Form.SelectOption -... -*/ -if (typeof window.Form === 'undefined') { window.Form = {}; } - -Form.SelectOption = new Class({ - Implements: [Events, Options], - options: { - optionTag: 'li', - selected: false - }, - config: { - highlightedClass: 'highlighted', - optionClass: 'option', - selectedClass: 'selected' - }, - element: null, - bound: {}, - option: null, - selected: false, - text: null, - value: null, - initialize: function(option, options) { - this.setOptions(options); - option = $(option); - this.option = option; - this.bound = { - deselect: this.deselect.bind(this), - highlight: this.highlight.bind(this), - removeHighlight: this.removeHighlight.bind(this), - select: this.select.bind(this) - }; - this.text = option.get('text'); - this.value = option.get('value'); - this.element = new Element(this.options.optionTag, { - 'class': (option.get('class') + ' ' + this.config.optionClass).trim(), - 'html': option.get('html'), - 'events': { - click: this.bound.select, - mouseenter: this.bound.highlight, - mouseleave: this.bound.removeHighlight - } - }); - this.element.store('Form.SelectOption::data', this); - option.store('Form.SelectOption::data', this); - }, - deselect: function(e) { - this.fireEvent('onDeselect', [this, e]); - this.element.removeClass(this.config.selectedClass).addEvent('click', this.bound.select); - this.selected = false; - }, - destroy: function() { - this.element = null; - this.bound = null; - this.option = null; - }, - disable: function() { - this.element.removeEvents({ - mouseenter: this.bound.highlight, - mouseleave: this.bound.removeHighlight - }); - this.fireEvent('onDisable', this); - }, - enable: function() { - this.element.addEvents({ - mouseenter: this.bound.highlight, - mouseleave: this.bound.removeHighlight - }); - this.fireEvent('onEnable', this); - }, - highlight: function(e) { - this.fireEvent('onHighlight', [this, e]); - this.element.addClass(this.config.highlightedClass); - return this; - }, - removeHighlight: function(e) { - this.fireEvent('onRemoveHighlight', [this, e]); - this.element.removeClass(this.config.highlightedClass); - return this; - }, - select: function(e) { - this.fireEvent('onSelect', [this, e]); - this.element.addClass(this.config.selectedClass).removeEvent('click', this.bound.select); - this.selected = true; - } -}); diff --git a/couchpotato/static/scripts/library/mootools_tween_css3.js b/couchpotato/static/scripts/library/mootools_tween_css3.js deleted file mode 100644 index 8f6460b4a2..0000000000 --- a/couchpotato/static/scripts/library/mootools_tween_css3.js +++ /dev/null @@ -1,141 +0,0 @@ -/* ---- -name: Fx.Tween.CSS3.Replacement -script: Fx.Tween.CSS3.Replacement.js -license: MIT-style license. -description: Same behavior like Fx.Tween but tries to use native CSS3 transition if possible (Overwrites Fx.Tween). -copyright: Copyright (c) 2010, Dipl.-Ing. (FH) Andrц╘ Fiedler <kontakt at visualdrugs dot net>, based on code by eskimoblood (mootools users group) -authors: [Andrц╘ Fiedler, eskimoblood] - -requires: [Core/Class.Extras, Core/Element.Event, Core/Element.Style, Core/Fx.Tween] - -provides: [Fx.Tween] -... -*/ - -Element.NativeEvents.transitionend = 2; -Element.NativeEvents.webkitTransitionEnd = 2; -Element.NativeEvents.oTransitionEnd = 2; -Element.NativeEvents.msTransitionEnd = 2; - -Element.Events.transitionend = { - base: Browser.safari || Browser.chrome ? 'webkitTransitionEnd' : (Browser.opera ? 'oTransitionEnd' : (Browser.ie && Browser.version > 8 ? 'msTransitionEnd' : 'transitionend')) -}; - -Event.implement({ - - getPropertyName: function(){ - return this.event.propertyName; - }, - - getElapsedTime: function(nativeTime){ - return nativeTime ? this.event.elapsedTime : (this.event.elapsedTime * 1000).toInt(); - } - -}); - -Element.implement({ - - supportStyle: function(style){ - var value = this.style[style]; - return !!(value || value === ''); - }, - - supportVendorStyle: function(style){ - var prefixedStyle = null; - return this.supportStyle(style) ? style : ['webkit', 'Moz', 'o', 'ms'].some(function(prefix){ - prefixedStyle = prefix + style.capitalize(); - return this.supportStyle(prefixedStyle); - }, this) ? prefixedStyle : null; - } - -}); - -Fx.TweenCSS2 = Fx.Tween; - -Fx.Tween = new Class({ - - Extends: Fx.TweenCSS2, - - transitionTimings: { - 'linear' : '0,0,1,1', - 'expo:in' : '0.71,0.01,0.83,0', - 'expo:out' : '0.14,1,0.32,0.99', - 'expo:in:out' : '0.85,0,0.15,1', - 'circ:in' : '0.34,0,0.96,0.23', - 'circ:out' : '0,0.5,0.37,0.98', - 'circ:in:out' : '0.88,0.1,0.12,0.9', - 'sine:in' : '0.22,0.04,0.36,0', - 'sine:out' : '0.04,0,0.5,1', - 'sine:in:out' : '0.37,0.01,0.63,1', - 'quad:in' : '0.14,0.01,0.49,0', - 'quad:out' : '0.01,0,0.43,1', - 'quad:in:out' : '0.47,0.04,0.53,0.96', - 'cubic:in' : '0.35,0,0.65,0', - 'cubic:out' : '0.09,0.25,0.24,1', - 'cubic:in:out' : '0.66,0,0.34,1', - 'quart:in' : '0.69,0,0.76,0.17', - 'quart:out' : '0.26,0.96,0.44,1', - 'quart:in:out' : '0.76,0,0.24,1', - 'quint:in' : '0.64,0,0.78,0', - 'quint:out' : '0.22,1,0.35,1', - 'quint:in:out' : '0.9,0,0.1,1' - }, - - initialize: function(element, options){ - options.transition = options.transition || 'sine:in:out'; - this.parent(element, options); - if (typeof this.options.transition != 'string') alert('Only short notated transitions (like \'sine:in\') are supported by Fx.Tween.CSS3'); - this.options.transition = this.options.transition.toLowerCase(); - this.transition = this.element.supportVendorStyle('transition'); - this.css3Supported = !!this.transition && !!this.transitionTimings[this.options.transition]; - }, - - check: function(){ - if (!this.boundComplete) return true; - return this.parent(); - }, - - start: function(property, from, to){ - if (this.css3Supported){ - if (!this.check(property, from, to)) return this; - var args = Array.flatten(arguments); - this.property = this.options.property || args.shift(); - var parsed = this.prepare(this.element, this.property, args); - this.from = parsed.from; - this.to = parsed.to; - this.boundComplete = function(event){ - if (event.getPropertyName() == this.property /* && event.getElapsedTime() == this.options.duration */ ){ - this.element.removeEvent('transitionend', this.boundComplete); - this.boundComplete = null; - this.fireEvent('complete', this); - } - }.bind(this); - this.element.addEvent('transitionend', this.boundComplete); - var trans = function(){ - this.element.setStyle(this.transition, this.property + ' ' + this.options.duration + 'ms cubic-bezier(' + this.transitionTimings[this.options.transition] + ')'); - this.element.setStyle(this.property, this.to[0].value + this.options.unit); - }.bind(this); - if (args[1]){ - this.element.setStyle(this.transition, 'none'); - this.element.setStyle(this.property, this.from[0].value + this.options.unit); - trans.delay(0.1); - } else - trans(); - this.fireEvent('start', this); - return this; - } - return this.parent(property, from, to); - }, - - cancel: function(){ - if (this.css3Supported){ - this.element.setStyle(this.transition, 'none'); - this.element.removeEvent('transitionend', this.boundComplete); - this.boundComplete = null; - } - this.parent(); - return this; - } - -}); diff --git a/couchpotato/static/scripts/library/prefix_free.js b/couchpotato/static/scripts/library/prefix_free.js deleted file mode 100644 index b6d9812a0e..0000000000 --- a/couchpotato/static/scripts/library/prefix_free.js +++ /dev/null @@ -1,487 +0,0 @@ -/** - * StyleFix 1.0.3 & PrefixFree 1.0.7 - * @author Lea Verou - * MIT license - */ - -(function(){ - -if(!window.addEventListener) { - return; -} - -var self = window.StyleFix = { - link: function(link) { - try { - // Ignore stylesheets with data-noprefix attribute as well as alternate stylesheets - if(link.rel !== 'stylesheet' || link.hasAttribute('data-noprefix')) { - return; - } - } - catch(e) { - return; - } - - var url = link.href || link.getAttribute('data-href'), - base = url.replace(/[^\/]+$/, ''), - base_scheme = (/^[a-z]{3,10}:/.exec(base) || [''])[0], - base_domain = (/^[a-z]{3,10}:\/\/[^\/]+/.exec(base) || [''])[0], - base_query = /^([^?]*)\??/.exec(url)[1], - parent = link.parentNode, - xhr = new XMLHttpRequest(), - process; - - xhr.onreadystatechange = function() { - if(xhr.readyState === 4) { - process(); - } - }; - - process = function() { - var css = xhr.responseText; - - if(css && link.parentNode && (!xhr.status || xhr.status < 400 || xhr.status > 600)) { - css = self.fix(css, true, link); - - // Convert relative URLs to absolute, if needed - if(base) { - css = css.replace(/url\(\s*?((?:"|')?)(.+?)\1\s*?\)/gi, function($0, quote, url) { - if(/^([a-z]{3,10}:|#)/i.test(url)) { // Absolute & or hash-relative - return $0; - } - else if(/^\/\//.test(url)) { // Scheme-relative - // May contain sequences like /../ and /./ but those DO work - return 'url("' + base_scheme + url + '")'; - } - else if(/^\//.test(url)) { // Domain-relative - return 'url("' + base_domain + url + '")'; - } - else if(/^\?/.test(url)) { // Query-relative - return 'url("' + base_query + url + '")'; - } - else { - // Path-relative - return 'url("' + base + url + '")'; - } - }); - - // behavior URLs shoudnБ─≥t be converted (Issue #19) - // base should be escaped before added to RegExp (Issue #81) - var escaped_base = base.replace(/([\\\^\$*+[\]?{}.=!:(|)])/g,"\\$1"); - css = css.replace(RegExp('\\b(behavior:\\s*?url\\(\'?"?)' + escaped_base, 'gi'), '$1'); - } - - var style = document.createElement('style'); - style.textContent = css; - style.media = link.media; - style.disabled = link.disabled; - style.setAttribute('data-href', link.getAttribute('href')); - - parent.insertBefore(style, link); - parent.removeChild(link); - - style.media = link.media; // Duplicate is intentional. See issue #31 - } - }; - - try { - xhr.open('GET', url); - xhr.send(null); - } catch (e) { - // Fallback to XDomainRequest if available - if (typeof XDomainRequest != "undefined") { - xhr = new XDomainRequest(); - xhr.onerror = xhr.onprogress = function() {}; - xhr.onload = process; - xhr.open("GET", url); - xhr.send(null); - } - } - - link.setAttribute('data-inprogress', ''); - }, - - styleElement: function(style) { - if (style.hasAttribute('data-noprefix')) { - return; - } - var disabled = style.disabled; - - style.textContent = self.fix(style.textContent, true, style); - - style.disabled = disabled; - }, - - styleAttribute: function(element) { - var css = element.getAttribute('style'); - - css = self.fix(css, false, element); - - element.setAttribute('style', css); - }, - - process: function() { - // Linked stylesheets - $('link[rel="stylesheet"]:not([data-inprogress])').forEach(StyleFix.link); - - // Inline stylesheets - $('style').forEach(StyleFix.styleElement); - - // Inline styles - $('[style]').forEach(StyleFix.styleAttribute); - }, - - register: function(fixer, index) { - (self.fixers = self.fixers || []) - .splice(index === undefined? self.fixers.length : index, 0, fixer); - }, - - fix: function(css, raw, element) { - for(var i=0; i<self.fixers.length; i++) { - css = self.fixers[i](css, raw, element) || css; - } - - return css; - }, - - camelCase: function(str) { - return str.replace(/-([a-z])/g, function($0, $1) { return $1.toUpperCase(); }).replace('-',''); - }, - - deCamelCase: function(str) { - return str.replace(/[A-Z]/g, function($0) { return '-' + $0.toLowerCase() }); - } -}; - -/************************************** - * Process styles - **************************************/ -(function(){ - setTimeout(function(){ - $('link[rel="stylesheet"]').forEach(StyleFix.link); - }, 10); - - document.addEventListener('DOMContentLoaded', StyleFix.process, false); -})(); - -function $(expr, con) { - return [].slice.call((con || document).querySelectorAll(expr)); -} - -})(); - -/** - * PrefixFree - */ -(function(root){ - -if(!window.StyleFix || !window.getComputedStyle) { - return; -} - -// Private helper -function fix(what, before, after, replacement, css) { - what = self[what]; - - if(what.length) { - var regex = RegExp(before + '(' + what.join('|') + ')' + after, 'gi'); - - css = css.replace(regex, replacement); - } - - return css; -} - -var self = window.PrefixFree = { - prefixCSS: function(css, raw, element) { - var prefix = self.prefix; - - // Gradient angles hotfix - if(self.functions.indexOf('linear-gradient') > -1) { - // Gradients are supported with a prefix, convert angles to legacy - css = css.replace(/(\s|:|,)(repeating-)?linear-gradient\(\s*(-?\d*\.?\d*)deg/ig, function ($0, delim, repeating, deg) { - return delim + (repeating || '') + 'linear-gradient(' + (90-deg) + 'deg'; - }); - } - - css = fix('functions', '(\\s|:|,)', '\\s*\\(', '$1' + prefix + '$2(', css); - css = fix('keywords', '(\\s|:)', '(\\s|;|\\}|$)', '$1' + prefix + '$2$3', css); - css = fix('properties', '(^|\\{|\\s|;)', '\\s*:', '$1' + prefix + '$2:', css); - - // Prefix properties *inside* values (issue #8) - if (self.properties.length) { - var regex = RegExp('\\b(' + self.properties.join('|') + ')(?!:)', 'gi'); - - css = fix('valueProperties', '\\b', ':(.+?);', function($0) { - return $0.replace(regex, prefix + "$1") - }, css); - } - - if(raw) { - css = fix('selectors', '', '\\b', self.prefixSelector, css); - css = fix('atrules', '@', '\\b', '@' + prefix + '$1', css); - } - - // Fix double prefixing - css = css.replace(RegExp('-' + prefix, 'g'), '-'); - - // Prefix wildcard - css = css.replace(/-\*-(?=[a-z]+)/gi, self.prefix); - - return css; - }, - - property: function(property) { - return (self.properties.indexOf(property)? self.prefix : '') + property; - }, - - value: function(value, property) { - value = fix('functions', '(^|\\s|,)', '\\s*\\(', '$1' + self.prefix + '$2(', value); - value = fix('keywords', '(^|\\s)', '(\\s|$)', '$1' + self.prefix + '$2$3', value); - - // TODO properties inside values - - return value; - }, - - // Warning: Prefixes no matter what, even if the selector is supported prefix-less - prefixSelector: function(selector) { - return selector.replace(/^:{1,2}/, function($0) { return $0 + self.prefix }) - }, - - // Warning: Prefixes no matter what, even if the property is supported prefix-less - prefixProperty: function(property, camelCase) { - var prefixed = self.prefix + property; - - return camelCase? StyleFix.camelCase(prefixed) : prefixed; - } -}; - -/************************************** - * Properties - **************************************/ -(function() { - var prefixes = {}, - properties = [], - shorthands = {}, - style = getComputedStyle(document.documentElement, null), - dummy = document.createElement('div').style; - - // Why are we doing this instead of iterating over properties in a .style object? Cause Webkit won't iterate over those. - var iterate = function(property) { - if(property.charAt(0) === '-') { - properties.push(property); - - var parts = property.split('-'), - prefix = parts[1]; - - // Count prefix uses - prefixes[prefix] = ++prefixes[prefix] || 1; - - // This helps determining shorthands - while(parts.length > 3) { - parts.pop(); - - var shorthand = parts.join('-'); - - if(supported(shorthand) && properties.indexOf(shorthand) === -1) { - properties.push(shorthand); - } - } - } - }, - supported = function(property) { - return StyleFix.camelCase(property) in dummy; - } - - // Some browsers have numerical indices for the properties, some don't - if(style.length > 0) { - for(var i=0; i<style.length; i++) { - iterate(style[i]) - } - } - else { - for(var property in style) { - iterate(StyleFix.deCamelCase(property)); - } - } - - // Find most frequently used prefix - var highest = {uses:0}; - for(var prefix in prefixes) { - var uses = prefixes[prefix]; - - if(highest.uses < uses) { - highest = {prefix: prefix, uses: uses}; - } - } - - self.prefix = '-' + highest.prefix + '-'; - self.Prefix = StyleFix.camelCase(self.prefix); - - self.properties = []; - - // Get properties ONLY supported with a prefix - for(var i=0; i<properties.length; i++) { - var property = properties[i]; - - if(property.indexOf(self.prefix) === 0) { // we might have multiple prefixes, like Opera - var unprefixed = property.slice(self.prefix.length); - - if(!supported(unprefixed)) { - self.properties.push(unprefixed); - } - } - } - - // IE fix - if(self.Prefix == 'Ms' - && !('transform' in dummy) - && !('MsTransform' in dummy) - && ('msTransform' in dummy)) { - self.properties.push('transform', 'transform-origin'); - } - - self.properties.sort(); -})(); - -/************************************** - * Values - **************************************/ -(function() { -// Values that might need prefixing -var functions = { - 'linear-gradient': { - property: 'backgroundImage', - params: 'red, teal' - }, - 'calc': { - property: 'width', - params: '1px + 5%' - }, - 'element': { - property: 'backgroundImage', - params: '#foo' - }, - 'cross-fade': { - property: 'backgroundImage', - params: 'url(a.png), url(b.png), 50%' - } -}; - - -functions['repeating-linear-gradient'] = -functions['repeating-radial-gradient'] = -functions['radial-gradient'] = -functions['linear-gradient']; - -var keywords = { - 'initial': 'color', - 'zoom-in': 'cursor', - 'zoom-out': 'cursor', - 'box': 'display', - 'flexbox': 'display', - 'inline-flexbox': 'display', - 'flex': 'display', - 'inline-flex': 'display' -}; - -self.functions = []; -self.keywords = []; - -var style = document.createElement('div').style; - -function supported(value, property) { - style[property] = ''; - style[property] = value; - - return !!style[property]; -} - -for (var func in functions) { - var test = functions[func], - property = test.property, - value = func + '(' + test.params + ')'; - - if (!supported(value, property) - && supported(self.prefix + value, property)) { - // It's supported, but with a prefix - self.functions.push(func); - } -} - -for (var keyword in keywords) { - var property = keywords[keyword]; - - if (!supported(keyword, property) - && supported(self.prefix + keyword, property)) { - // It's supported, but with a prefix - self.keywords.push(keyword); - } -} - -})(); - -/************************************** - * Selectors and @-rules - **************************************/ -(function() { - -var -selectors = { - ':read-only': null, - ':read-write': null, - ':any-link': null, - '::selection': null -}, - -atrules = { - 'keyframes': 'name', - 'viewport': null, - 'document': 'regexp(".")' -}; - -self.selectors = []; -self.atrules = []; - -var style = root.appendChild(document.createElement('style')); - -function supported(selector) { - style.textContent = selector + '{}'; // Safari 4 has issues with style.innerHTML - - return !!style.sheet.cssRules.length; -} - -for(var selector in selectors) { - var test = selector + (selectors[selector]? '(' + selectors[selector] + ')' : ''); - - if(!supported(test) && supported(self.prefixSelector(test))) { - self.selectors.push(selector); - } -} - -for(var atrule in atrules) { - var test = atrule + ' ' + (atrules[atrule] || ''); - - if(!supported('@' + test) && supported('@' + self.prefix + test)) { - self.atrules.push(atrule); - } -} - -root.removeChild(style); - -})(); - -// Properties that accept properties as their value -self.valueProperties = [ - 'transition', - 'transition-property' -] - -// Add class for current prefix -root.className += ' ' + self.prefix; - -StyleFix.register(self.prefixCSS); - - -})(document.documentElement); \ No newline at end of file diff --git a/couchpotato/static/scripts/library/question.js b/couchpotato/static/scripts/library/question.js index cab634657c..bcc3c9297c 100644 --- a/couchpotato/static/scripts/library/question.js +++ b/couchpotato/static/scripts/library/question.js @@ -1,54 +1,67 @@ var Question = new Class( { initialize : function(question, hint, answers) { - var self = this + var self = this; - self.question = question - self.hint = hint - self.answers = answers + self.question = question; + self.hint = hint; + self.answers = answers; - self.createQuestion() + self.createQuestion(); self.answers.each(function(answer) { - self.createAnswer(answer) - }) - self.createMask() - - }, - - createMask : function() { - var self = this + self.createAnswer(answer); + }); - self.mask = new Element('div.mask').fade('hide').inject(document.body).fade('in'); }, createQuestion : function() { - - this.container = new Element('div', { - 'class' : 'question' - }).adopt( - new Element('h3', { - 'html': this.question - }), - new Element('div.hint', { - 'html': this.hint - }) - ).inject(document.body) - - this.container.position( { - 'position' : 'center' - }); + var self = this, + h3, hint; + + self.container = new Element('div.mask.question') + .grab(self.inner = new Element('div.inner').adopt( + h3 = new Element('h3', { + 'html': this.question + }), + hint = this.hint ? new Element('div.hint', { + 'html': this.hint + }) : null + ) + ).inject(document.body); + + requestTimeout(function(){ + self.container.addClass('show'); + + self.inner.getElements('> *').each(function(el, nr){ + dynamics.css(el, { + opacity: 0, + translateY: 50 + }); + + dynamics.animate(el, { + opacity: 1, + translateY: 0 + }, { + type: dynamics.spring, + frequency: 200, + friction: 300, + duration: 800, + delay: 400 + (nr * 100) + }); + }); + }, 10); }, createAnswer : function(options) { - var self = this + var self = this; var answer = new Element('a', Object.merge(options, { - 'class' : 'answer button '+(options['class'] || '')+(options['cancel'] ? ' cancel' : '') - })).inject(this.container) + 'class' : 'answer button '+(options['class'] || '')+(options.cancel ? ' cancel' : '') + })).inject(this.inner); if (options.cancel) { - answer.addEvent('click', self.close.bind(self)) + answer.addEvent('click', self.close.bind(self)); } else if (options.request) { answer.addEvent('click', function(e){ @@ -56,24 +69,52 @@ var Question = new Class( { new Request(Object.merge(options, { 'url': options.href, 'onComplete': function() { - (options.onComplete || function(){})() + (options.onComplete || function(){})(); self.close(); } - })).send(); + })).send(); }); } }, close : function() { var self = this; - self.mask.fade('out'); - (function(){self.mask.destroy()}).delay(1000); - - this.container.destroy(); + + var ended = function() { + self.container.dispose(); + self.container.removeEventListener('transitionend', ended); + }; + self.container.addEventListener('transitionend', ended, false); + + // Hide items + self.inner.getElements('> *').reverse().each(function(el, nr){ + dynamics.css(el, { + opacity: 1, + translateY: 0 + }); + + dynamics.animate(el, { + opacity: 0, + translateY: 50 + }, { + type: dynamics.spring, + frequency: 200, + friction: 300, + duration: 800, + anticipationSize: 175, + anticipationStrength: 400, + delay: nr * 100 + }); + }); + + // animate out + dynamics.setTimeout(function(){ + self.container.removeClass('show'); + }, 200); }, toElement : function() { - return this.container + return this.container; } -}) +}); diff --git a/couchpotato/static/scripts/library/scrollspy.js b/couchpotato/static/scripts/library/scrollspy.js index 2120eb5232..053aab6021 100644 --- a/couchpotato/static/scripts/library/scrollspy.js +++ b/couchpotato/static/scripts/library/scrollspy.js @@ -50,7 +50,7 @@ var ScrollSpy = new Class({ min = typeOf(self.options.min) == 'function' ? self.options.min() : self.options.min, max = typeOf(self.options.max) == 'function' ? self.options.max() : self.options.max; - if(xy >= min && (max == 0 || xy <= max)) { + if(xy >= min && (max === 0 || xy <= max)) { /* trigger enter event if necessary */ if(!self.inside) { /* record as inside */ @@ -90,4 +90,4 @@ var ScrollSpy = new Class({ addListener: function() { this.start(); } -}); \ No newline at end of file +}); diff --git a/couchpotato/static/scripts/library/spin.js b/couchpotato/static/scripts/library/spin.js deleted file mode 100644 index 6c2d0d59a9..0000000000 --- a/couchpotato/static/scripts/library/spin.js +++ /dev/null @@ -1,301 +0,0 @@ -//fgnass.github.com/spin.js#v1.2.4 -(function(window, document, undefined) { - -/** - * Copyright (c) 2011 Felix Gnass [fgnass at neteye dot de] - * Licensed under the MIT license - */ - - var prefixes = ['webkit', 'Moz', 'ms', 'O']; /* Vendor prefixes */ - var animations = {}; /* Animation rules keyed by their name */ - var useCssAnimations; - - /** - * Utility function to create elements. If no tag name is given, - * a DIV is created. Optionally properties can be passed. - */ - function createEl(tag, prop) { - var el = document.createElement(tag || 'div'); - var n; - - for(n in prop) { - el[n] = prop[n]; - } - return el; - } - - /** - * Appends children and returns the parent. - */ - function ins(parent /* child1, child2, ...*/) { - for (var i=1, n=arguments.length; i<n; i++) { - parent.appendChild(arguments[i]); - } - return parent; - } - - /** - * Insert a new stylesheet to hold the @keyframe or VML rules. - */ - var sheet = function() { - var el = createEl('style'); - ins(document.getElementsByTagName('head')[0], el); - return el.sheet || el.styleSheet; - }(); - - /** - * Creates an opacity keyframe animation rule and returns its name. - * Since most mobile Webkits have timing issues with animation-delay, - * we create separate rules for each line/segment. - */ - function addAnimation(alpha, trail, i, lines) { - var name = ['opacity', trail, ~~(alpha*100), i, lines].join('-'); - var start = 0.01 + i/lines*100; - var z = Math.max(1-(1-alpha)/trail*(100-start) , alpha); - var prefix = useCssAnimations.substring(0, useCssAnimations.indexOf('Animation')).toLowerCase(); - var pre = prefix && '-'+prefix+'-' || ''; - - if (!animations[name]) { - sheet.insertRule( - '@' + pre + 'keyframes ' + name + '{' + - '0%{opacity:'+z+'}' + - start + '%{opacity:'+ alpha + '}' + - (start+0.01) + '%{opacity:1}' + - (start+trail)%100 + '%{opacity:'+ alpha + '}' + - '100%{opacity:'+ z + '}' + - '}', 0); - animations[name] = 1; - } - return name; - } - - /** - * Tries various vendor prefixes and returns the first supported property. - **/ - function vendor(el, prop) { - var s = el.style; - var pp; - var i; - - if(s[prop] !== undefined) return prop; - prop = prop.charAt(0).toUpperCase() + prop.slice(1); - for(i=0; i<prefixes.length; i++) { - pp = prefixes[i]+prop; - if(s[pp] !== undefined) return pp; - } - } - - /** - * Sets multiple style properties at once. - */ - function css(el, prop) { - for (var n in prop) { - el.style[vendor(el, n)||n] = prop[n]; - } - return el; - } - - /** - * Fills in default values. - */ - function merge(obj) { - for (var i=1; i < arguments.length; i++) { - var def = arguments[i]; - for (var n in def) { - if (obj[n] === undefined) obj[n] = def[n]; - } - } - return obj; - } - - /** - * Returns the absolute page-offset of the given element. - */ - function pos(el) { - var o = {x:el.offsetLeft, y:el.offsetTop}; - while((el = el.offsetParent)) { - o.x+=el.offsetLeft; - o.y+=el.offsetTop; - } - return o; - } - - var defaults = { - lines: 12, // The number of lines to draw - length: 7, // The length of each line - width: 5, // The line thickness - radius: 10, // The radius of the inner circle - color: '#000', // #rgb or #rrggbb - speed: 1, // Rounds per second - trail: 100, // Afterglow percentage - opacity: 1/4, // Opacity of the lines - fps: 20, // Frames per second when using setTimeout() - zIndex: 2e9, // Use a high z-index by default - className: 'spinner', // CSS class to assign to the element - top: 'auto', // center vertically - left: 'auto' // center horizontally - }; - - /** The constructor */ - var Spinner = function Spinner(o) { - if (!this.spin) return new Spinner(o); - this.opts = merge(o || {}, Spinner.defaults, defaults); - }; - - Spinner.defaults = {}; - Spinner.prototype = { - spin: function(target) { - this.stop(); - var self = this; - var o = self.opts; - var el = self.el = css(createEl(0, {className: o.className}), {position: 'relative', zIndex: o.zIndex}); - var mid = o.radius+o.length+o.width; - var ep; // element position - var tp; // target position - - if (target) { - target.insertBefore(el, target.firstChild||null); - tp = pos(target); - ep = pos(el); - css(el, { - left: (o.left == 'auto' ? tp.x-ep.x + (target.offsetWidth >> 1) : o.left+mid) + 'px', - top: (o.top == 'auto' ? tp.y-ep.y + (target.offsetHeight >> 1) : o.top+mid) + 'px' - }); - } - - el.setAttribute('aria-role', 'progressbar'); - self.lines(el, self.opts); - - if (!useCssAnimations) { - // No CSS animation support, use setTimeout() instead - var i = 0; - var fps = o.fps; - var f = fps/o.speed; - var ostep = (1-o.opacity)/(f*o.trail / 100); - var astep = f/o.lines; - - !function anim() { - i++; - for (var s=o.lines; s; s--) { - var alpha = Math.max(1-(i+s*astep)%f * ostep, o.opacity); - self.opacity(el, o.lines-s, alpha, o); - } - self.timeout = self.el && setTimeout(anim, ~~(1000/fps)); - }(); - } - return self; - }, - stop: function() { - var el = this.el; - if (el) { - clearTimeout(this.timeout); - if (el.parentNode) el.parentNode.removeChild(el); - this.el = undefined; - } - return this; - }, - lines: function(el, o) { - var i = 0; - var seg; - - function fill(color, shadow) { - return css(createEl(), { - position: 'absolute', - width: (o.length+o.width) + 'px', - height: o.width + 'px', - background: color, - boxShadow: shadow, - transformOrigin: 'left', - transform: 'rotate(' + ~~(360/o.lines*i) + 'deg) translate(' + o.radius+'px' +',0)', - borderRadius: (o.width>>1) + 'px' - }); - } - for (; i < o.lines; i++) { - seg = css(createEl(), { - position: 'absolute', - top: 1+~(o.width/2) + 'px', - transform: o.hwaccel ? 'translate3d(0,0,0)' : '', - opacity: o.opacity, - animation: useCssAnimations && addAnimation(o.opacity, o.trail, i, o.lines) + ' ' + 1/o.speed + 's linear infinite' - }); - if (o.shadow) ins(seg, css(fill('#000', '0 0 4px ' + '#000'), {top: 2+'px'})); - ins(el, ins(seg, fill(o.color, '0 0 1px rgba(0,0,0,.1)'))); - } - return el; - }, - opacity: function(el, i, val) { - if (i < el.childNodes.length) el.childNodes[i].style.opacity = val; - } - }; - - ///////////////////////////////////////////////////////////////////////// - // VML rendering for IE - ///////////////////////////////////////////////////////////////////////// - - /** - * Check and init VML support - */ - !function() { - var s = css(createEl('group'), {behavior: 'url(#default#VML)'}); - var i; - - if (!vendor(s, 'transform') && s.adj) { - - // VML support detected. Insert CSS rules ... - for (i=4; i--;) sheet.addRule(['group', 'roundrect', 'fill', 'stroke'][i], 'behavior:url(#default#VML)'); - - Spinner.prototype.lines = function(el, o) { - var r = o.length+o.width; - var s = 2*r; - - function grp() { - return css(createEl('group', {coordsize: s +' '+s, coordorigin: -r +' '+-r}), {width: s, height: s}); - } - - var margin = -(o.width+o.length)*2+'px'; - var g = css(grp(), {position: 'absolute', top: margin, left: margin}); - - var i; - - function seg(i, dx, filter) { - ins(g, - ins(css(grp(), {rotation: 360 / o.lines * i + 'deg', left: ~~dx}), - ins(css(createEl('roundrect', {arcsize: 1}), { - width: r, - height: o.width, - left: o.radius, - top: -o.width>>1, - filter: filter - }), - createEl('fill', {color: o.color, opacity: o.opacity}), - createEl('stroke', {opacity: 0}) // transparent stroke to fix color bleeding upon opacity change - ) - ) - ); - } - - if (o.shadow) { - for (i = 1; i <= o.lines; i++) { - seg(i, -2, 'progid:DXImageTransform.Microsoft.Blur(pixelradius=2,makeshadow=1,shadowopacity=.3)'); - } - } - for (i = 1; i <= o.lines; i++) seg(i); - return ins(el, g); - }; - Spinner.prototype.opacity = function(el, i, val, o) { - var c = el.firstChild; - o = o.shadow && o.lines || 0; - if (c && i+o < c.childNodes.length) { - c = c.childNodes[i+o]; c = c && c.firstChild; c = c && c.firstChild; - if (c) c.opacity = val; - } - }; - } - else { - useCssAnimations = vendor(s, 'animation'); - } - }(); - - window.Spinner = Spinner; - -})(window, document); diff --git a/couchpotato/static/scripts/page.js b/couchpotato/static/scripts/page.js index 1af800e86d..b954a22241 100644 --- a/couchpotato/static/scripts/page.js +++ b/couchpotato/static/scripts/page.js @@ -2,30 +2,98 @@ var PageBase = new Class({ Implements: [Options, Events], - options: { - - }, - + disable_pointer_onscroll: true, + order: 1, has_tab: true, name: '', + icon: null, - initialize: function(options) { + parent_page: null, + sub_pages: null, + + initialize: function(parent_page, options) { var self = this; - self.setOptions(options) + self.parent_page = parent_page; + self.setOptions(options); // Create main page container - self.el = new Element('div.page.'+self.name); + self.el = new Element('div', { + 'class': 'page ' + self.getPageClass() + (' level_' + (options.level || 0)) + }).grab( + self.content = new Element('div.scroll_content') + ); + + // Stop hover events while scrolling + if(self.options.disable_pointer_onscroll){ + App.addEvent('load', function(){ + requestTimeout(function(){ + if(!App.mobile_screen && !App.getOption('dev')){ + self.content.addEvent('scroll', self.preventHover.bind(self)); + } + }, 100); + }); + } + }, + + load: function(){ + var self = this; // Create tab for page if(self.has_tab){ - var nav = App.getBlock('navigation'); + var nav; + + if(self.parent_page && self.parent_page.navigation){ + nav = self.parent_page.navigation; + } + else { + nav = App.getBlock('navigation'); + } + self.tab = nav.addTab(self.name, { - 'href': App.createUrl(self.name), + 'href': App.createUrl(self.getPageUrl()), 'title': self.title, - 'text': self.name.capitalize() + 'html': '<span>' + self.name.capitalize() + '</span>', + 'class': self.icon ? 'icon-' + self.icon : null }); } + + if(self.sub_pages){ + self.loadSubPages(); + } + + }, + + loadSubPages: function(){ + var self = this; + + var sub_pages = self.sub_pages; + + self.sub_pages = []; + sub_pages.each(function(class_name){ + var pg = new window[self.name.capitalize()+class_name](self, { + 'level': 2 + }); + self.sub_pages[class_name] = pg; + + self.sub_pages.include({ + 'order': pg.order, + 'name': class_name, + 'class': pg + }); + }); + + self.sub_pages.stableSort(self.sortPageByOrder).each(function(page){ + page['class'].load(); + self.fireEvent('load'+page.name); + + $(page['class']).inject(App.getPageContainer()); + }); + + }, + + sortPageByOrder: function(a, b){ + return (a.order || 100) - (b.order || 100); }, open: function(action, params){ @@ -33,10 +101,16 @@ var PageBase = new Class({ //p('Opening: ' +self.getName() + ', ' + action + ', ' + Object.toQueryString(params)); try { - var elements = self[action+'Action'](params); + var elements; + if(!self[action+'Action']){ + elements = self.defaultAction(action, params); + } + else { + elements = self[action+'Action'](params); + } if(elements !== undefined){ - self.el.empty(); - self.el.adopt(elements); + self.content.empty(); + self.content.adopt(elements); } App.getBlock('navigation').activate(self.name); @@ -53,12 +127,22 @@ var PageBase = new Class({ History.push(url); }, + getPageUrl: function(){ + var self = this; + return (self.parent_page && self.parent_page.getPageUrl ? self.parent_page.getPageUrl() + '/' : '') + self.name; + }, + + getPageClass: function(){ + var self = this; + return (self.parent_page && self.parent_page.getPageClass ? self.parent_page.getPageClass() + '_' : '') + self.name; + }, + errorAction: function(e){ p('Error, action not found', e); }, getName: function(){ - return this.name + return this.name; }, show: function(){ @@ -66,12 +150,31 @@ var PageBase = new Class({ }, hide: function(){ - this.el.removeClass('active'); + var self = this; + + self.el.removeClass('active'); + + if(self.sub_pages){ + self.sub_pages.each(function(sub_page){ + sub_page['class'].hide(); + }); + } + }, + + preventHover: function(){ + var self = this; + + if(self.hover_timer) clearRequestTimeout(self.hover_timer); + self.el.addClass('disable_hover'); + + self.hover_timer = requestTimeout(function(){ + self.el.removeClass('disable_hover'); + }, 200); }, toElement: function(){ - return this.el + return this.el; } }); -var Page = {} +var Page = {}; diff --git a/couchpotato/static/scripts/page/about.js b/couchpotato/static/scripts/page/about.js index ba451c841a..0e19218ae2 100644 --- a/couchpotato/static/scripts/page/about.js +++ b/couchpotato/static/scripts/page/about.js @@ -6,14 +6,14 @@ var AboutSettingTab = new Class({ initialize: function(){ var self = this; - App.addEvent('load', self.addSettings.bind(self)) + App.addEvent('loadSettings', self.addSettings.bind(self)); }, addSettings: function(){ var self = this; - self.settings = App.getPage('Settings') + self.settings = App.getPage('Settings'); self.settings.addEvent('create', function(){ var tab = self.settings.createTab('about', { 'label': 'About', @@ -28,7 +28,9 @@ var AboutSettingTab = new Class({ }); self.settings.default_action = 'about'; - + // WebUI Feature: + self.hide_about_dirs = !! App.options && App.options.webui_feature && App.options.webui_feature.hide_about_dirs; + self.hide_about_update = !! App.options && App.options.webui_feature && App.options.webui_feature.hide_about_update; }, createAbout: function(){ @@ -38,75 +40,89 @@ var AboutSettingTab = new Class({ today = new Date(), one_day = 1000*60*60*24; + + var about_block; self.settings.createGroup({ 'label': 'About This CouchPotato', 'name': 'variables' }).inject(self.content).adopt( - new Element('dl.info').adopt( + (about_block = new Element('dl.info')).adopt( new Element('dt[text=Version]'), self.version_text = new Element('dd.version', { - 'text': 'Getting version...', - 'events': { - 'click': App.checkForUpdate.bind(App, function(json){ - self.fillVersion(json.info) - }), - 'mouseenter': function(){ - this.set('text', 'Check for updates') - }, - 'mouseleave': function(){ - self.fillVersion(Updater.getInfo()) - } - } + 'text': 'Getting version...' }), + new Element('dt[text=Updater]'), self.updater_type = new Element('dd.updater'), new Element('dt[text=ID]'), - new Element('dd', {'text': App.getOption('pid')}), + new Element('dd', {'text': App.getOption('pid')}) + ) + ); + + if (!self.hide_about_update){ + self.version_text.addEvents({ + 'click': App.checkForUpdate.bind(App, function(json){ + self.fillVersion(json.info); + }), + 'mouseenter': function(){ + this.set('text', 'Check for updates'); + }, + 'mouseleave': function(){ + self.fillVersion(Updater.getInfo()); + } + }); + } else { + // override cursor style from CSS + self.version_text.setProperty('style', 'cursor: auto'); + } + + if (!self.hide_about_dirs){ + about_block.adopt( new Element('dt[text=Directories]'), new Element('dd', {'text': App.getOption('app_dir')}), new Element('dd', {'text': App.getOption('data_dir')}), new Element('dt[text=Startup Args]'), new Element('dd', {'html': App.getOption('args')}), new Element('dd', {'html': App.getOption('options')}) - ) - ); + ); + } if(!self.fillVersion(Updater.getInfo())) - Updater.addEvent('loaded', self.fillVersion.bind(self)) + Updater.addEvent('loaded', self.fillVersion.bind(self)); self.settings.createGroup({ 'name': 'Help Support CouchPotato' }).inject(self.content).adopt( new Element('div.usenet').adopt( - new Element('span', { - 'text': 'Help support CouchPotato and save some money for yourself by signing up for an account at' - }), - new Element('a', { - 'href': 'https://usenetserver.com/partners/?a_aid=couchpotato&a_bid=3f357c6f', - 'target': '_blank', - 'text': 'UsenetServer' - }), - new Element('span[text=or]'), - new Element('a', { - 'href': 'http://www.newshosting.com/partners/?a_aid=couchpotato&a_bid=a0b022df', - 'target': '_blank', - 'text': 'Newshosting' - }), - new Element('span', { - 'text': '. For as low as $7.95 per month, youБ─≥ll get:' - }), + new Element('div.text').adopt( + new Element('span', { + 'text': 'Help support CouchPotato and save some money for yourself by signing up for an account at' + }), + new Element('a', { + 'href': 'https://usenetserver.com/partners/?a_aid=couchpotato&a_bid=3f357c6f', + 'target': '_blank', + 'text': 'UsenetServer' + }), + new Element('span[text=or]'), + new Element('a', { + 'href': 'https://www.newshosting.com/partners/?a_aid=couchpotato&a_bid=a0b022df', + 'target': '_blank', + 'text': 'Newshosting' + }), + new Element('span', { + 'text': '. For as low as $7.95 per month, youБ─≥ll get:' + }) + ), new Element('ul').adopt( - new Element('li', { + new Element('li.icon-ok', { 'text': Math.ceil((today.getTime()-millennium.getTime())/(one_day))+" days retention" }), - new Element('li[text=No speed or download limits]'), - new Element('li[text=Free SSL Encrypted connections]') + new Element('li.icon-ok[text=No speed or download limits]'), + new Element('li.icon-ok[text=Free SSL Encrypted connections]') ) ), new Element('div.donate', { - 'html': - 'Or support me via:' + - '<iframe src="http://couchpota.to/donate.html" style="border:none; height: 200px;" scrolling="no"></iframe>' + 'html': 'Or support me via: <iframe src="https://couchpota.to/donate.html" scrolling="no"></iframe>' }) ); @@ -117,11 +133,11 @@ var AboutSettingTab = new Class({ var self = this; var date = new Date(json.version.date * 1000); self.version_text.set('text', json.version.hash + (json.version.date ? ' ('+date.toLocaleString()+')' : '')); - self.updater_type.set('text', json.version.type + ', ' + json.branch); + self.updater_type.set('text', (json.version.type != json.branch) ? (json.version.type + ', ' + json.branch) : json.branch); } }); window.addEvent('domready', function(){ new AboutSettingTab(); -}); \ No newline at end of file +}); diff --git a/couchpotato/static/scripts/page/home.js b/couchpotato/static/scripts/page/home.js index caf4d646ab..cddea44506 100644 --- a/couchpotato/static/scripts/page/home.js +++ b/couchpotato/static/scripts/page/home.js @@ -4,88 +4,187 @@ Page.Home = new Class({ name: 'home', title: 'Manage new stuff for things and such', + icon: 'home', - indexAction: function(param){ + indexAction: function () { var self = this; - if(self.soon_list) - return + if(self.soon_list){ + + // Reset lists + self.available_list.update(); + + if(self.late_list) + self.late_list.update(); + + return; + } + + self.chain = new Chain(); + self.chain.chain( + self.createAvailable.bind(self), + self.createBigsearch.bind(self), + self.createSoon.bind(self), + self.createSuggestions.bind(self), + self.createCharts.bind(self), + self.createLate.bind(self) + ); + + self.chain.callChain(); + + }, + + createBigsearch: function(){ + var self = this; + + new Element('.big_search').grab( + new BlockSearch(self, { + 'animate': false + }) + ).inject(self.content); + + self.chain.callChain(); + }, + + createAvailable: function(){ + var self = this; - // Snatched self.available_list = new MovieList({ 'navigation': false, 'identifier': 'snatched', 'load_more': false, 'view': 'list', - 'actions': MovieActions, + 'actions': [MA.MarkAsDone, MA.IMDB, MA.Release, MA.Trailer, MA.Refresh, MA.Readd, MA.Delete, MA.Category, MA.Profile], 'title': 'Snatched & Available', + 'description': 'These movies have been snatched or have finished downloading', + 'on_empty_element': new Element('div').adopt( + new Element('h2', {'text': 'Snatched & Available'}), + new Element('span.no_movies', { + 'html': 'No snatched movies or anything!? Damn.. <a href="#">Maybe add a movie.</a>', + 'events': { + 'click': function(e){ + (e).preventDefault(); + $(document.body).getElement('.big_search input').focus(); + } + } + }) + ), 'filter': { - 'release_status': 'snatched,available' + 'release_status': 'snatched,missing,available,downloaded,done,seeding', + 'with_tags': 'recent' + }, + 'limit': null, + 'onLoaded': function(){ + self.chain.callChain(); + }, + 'onMovieAdded': function(notification){ + + // Track movie added + var after_search = function(data){ + if(notification.data._id != data.data._id) return; + + // Force update after search + self.available_list.update(); + App.off('movie.searcher.ended', after_search); + }; + App.on('movie.searcher.ended', after_search); + } }); - // Downloaded - // self.downloaded_list = new MovieList({ - // 'navigation': false, - // 'identifier': 'downloaded', - // 'load_more': false, - // 'view': 'titles', - // 'filter': { - // 'release_status': 'done', - // 'order': 'release_order' - // } - // }); - // self.el.adopt( - // new Element('h2', { - // 'text': 'Just downloaded' - // }), - // $(self.downloaded_list) - // ); - - // Comming Soon + $(self.available_list).inject(self.content); + + }, + + createSoon: function(){ + var self = this; + + // Coming Soon self.soon_list = new MovieList({ 'navigation': false, 'identifier': 'soon', - 'limit': 24, - 'title': 'Soon', + 'limit': 12, + 'title': 'Available soon', + 'description': 'Should be available soon as they will be released on DVD/Blu-ray in the coming weeks.', 'filter': { 'random': true }, + 'actions': [MA.IMDB, MA.Release, MA.Trailer, MA.Refresh, MA.Delete, MA.Category, MA.Profile], 'load_more': false, - 'view': 'thumbs', - 'api_call': 'dashboard.soon' + 'view': 'thumb', + 'force_view': true, + 'api_call': 'dashboard.soon', + 'onLoaded': function(){ + self.chain.callChain(); + } }); - self.el.adopt( - $(self.available_list), - $(self.soon_list) - ); + $(self.soon_list).inject(self.content); + + }, + + createSuggestions: function(){ + var self = this; + + self.suggestions_list = new MovieList({ + 'navigation': false, + 'identifier': 'suggest', + 'limit': 12, + 'title': 'Suggestions', + 'description': 'Based on your current wanted and managed items', + 'actions': [MA.Add, MA.SuggestIgnore, MA.SuggestSeen, MA.IMDB, MA.Trailer], + 'load_more': false, + 'view': 'thumb', + 'force_view': true, + 'api_call': 'suggestion.view', + 'onLoaded': function(){ + self.chain.callChain(); + } + }); + + $(self.suggestions_list).inject(self.content); + + }, + + createCharts: function(){ + var self = this; + + // Charts + self.charts_list = new Charts({ + 'onCreated': function(){ + self.chain.callChain(); + } + }); + + $(self.charts_list).inject(self.content); + + }, + + createLate: function(){ + var self = this; + + // Still not available + self.late_list = new MovieList({ + 'navigation': false, + 'identifier': 'late', + 'limit': 50, + 'title': 'Still not available', + 'description': 'Try another quality profile or maybe add more providers in <a href="' + App.createUrl('settings/searcher/providers/') + '">Settings</a>.', + 'filter': { + 'late': true + }, + 'loader': false, + 'load_more': false, + 'view': 'list', + 'actions': [MA.IMDB, MA.Trailer, MA.Refresh, MA.Delete, MA.Category, MA.Profile], + 'api_call': 'dashboard.soon', + 'onLoaded': function(){ + self.chain.callChain(); + } + }); - // Suggest - // self.suggestion_list = new MovieList({ - // 'navigation': false, - // 'identifier': 'suggestions', - // 'limit': 6, - // 'load_more': false, - // 'view': 'thumbs', - // 'api_call': 'suggestion.suggest' - // }); - // self.el.adopt( - // new Element('h2', { - // 'text': 'You might like' - // }), - // $(self.suggestion_list) - // ); - - // Recent - // Snatched - // Renamed - // Added - - // Free space - - // Shortcuts + $(self.late_list).inject(self.content); } -}) \ No newline at end of file +}); diff --git a/couchpotato/static/scripts/page/login.js b/couchpotato/static/scripts/page/login.js new file mode 100644 index 0000000000..4e272c0415 --- /dev/null +++ b/couchpotato/static/scripts/page/login.js @@ -0,0 +1,31 @@ +window.addEvent('domready', function(){ + var b = $(document.body), + login_page = b.hasClass('login'); + + if(login_page){ + + var form = b.getElement('form'), + els = b.getElements('h1, .username, .password, .remember_me, .button'); + els.each(function(el, nr){ + + dynamics.css(el, { + opacity: 0, + translateY: 50 + }); + + dynamics.animate(el, { + opacity: 1, + translateY: 0 + }, { + type: dynamics.spring, + frequency: 200, + friction: 300, + duration: 800, + anticipationSize: 175, + anticipationStrength: 400, + delay: nr * 100 + }); + + }); + } +}); diff --git a/couchpotato/static/scripts/page/manage.js b/couchpotato/static/scripts/page/manage.js deleted file mode 100644 index ec293f860c..0000000000 --- a/couchpotato/static/scripts/page/manage.js +++ /dev/null @@ -1,122 +0,0 @@ -Page.Manage = new Class({ - - Extends: PageBase, - - name: 'manage', - title: 'Do stuff to your existing movies!', - - indexAction: function(param){ - var self = this; - - if(!self.list){ - self.refresh_button = new Element('a', { - 'title': 'Rescan your library for new movies', - 'text': 'Full library refresh', - 'events':{ - 'click': self.refresh.bind(self, true) - } - }); - - self.refresh_quick = new Element('a', { - 'title': 'Just scan for recently changed', - 'text': 'Quick library scan', - 'events':{ - 'click': self.refresh.bind(self, false) - } - }); - - self.list = new MovieList({ - 'identifier': 'manage', - 'filter': { - 'release_status': 'done' - }, - 'actions': MovieActions, - 'menu': [self.refresh_button, self.refresh_quick], - 'on_empty_element': new Element('div.empty_manage').adopt( - new Element('div', { - 'text': 'Seems like you don\'t have anything in your library yet.' - }), - new Element('div', { - 'text': 'Add your existing movie folders in ' - }).adopt( - new Element('a', { - 'text': 'Settings > Manage', - 'href': App.createUrl('settings/manage') - }) - ), - new Element('div.after_manage', { - 'text': 'When you\'ve done that, hit this button Б├▓ ' - }).adopt( - new Element('a.button.green', { - 'text': 'Hit me, but not too hard', - 'events':{ - 'click': self.refresh.bind(self, true) - } - }) - ) - ) - }); - $(self.list).inject(self.el); - - // Check if search is in progress - self.startProgressInterval(); - } - - }, - - refresh: function(full){ - var self = this; - - if(!self.update_in_progress){ - - Api.request('manage.update', { - 'data': { - 'full': +full - } - }) - - self.startProgressInterval(); - - } - - }, - - startProgressInterval: function(){ - var self = this; - - self.progress_interval = setInterval(function(){ - - Api.request('manage.progress', { - 'onComplete': function(json){ - self.update_in_progress = true; - - if(!json || !json.progress){ - clearInterval(self.progress_interval); - self.update_in_progress = false; - if(self.progress_container){ - self.progress_container.destroy(); - self.list.update(); - } - } - else { - if(!self.progress_container) - self.progress_container = new Element('div.progress').inject(self.list.navigation, 'after') - - self.progress_container.empty(); - - Object.each(json.progress, function(progress, folder){ - new Element('div').adopt( - new Element('span.folder', {'text': folder}), - new Element('span.percentage', {'text': progress.total ? (((progress.total-progress.to_go)/progress.total)*100).round() + '%' : '0%'}) - ).inject(self.progress_container) - }); - - } - } - }) - - }, 1000); - - } - -}); diff --git a/couchpotato/static/scripts/page/settings.js b/couchpotato/static/scripts/page/settings.js index ddb2abf18b..498484da61 100644 --- a/couchpotato/static/scripts/page/settings.js +++ b/couchpotato/static/scripts/page/settings.js @@ -2,6 +2,7 @@ Page.Settings = new Class({ Extends: PageBase, + order: 50, name: 'settings', title: 'Change settings.', wizard_only: false, @@ -11,20 +12,6 @@ Page.Settings = new Class({ current: 'about', has_tab: false, - initialize: function(options){ - var self = this; - self.parent(options); - - // Add to more menu - if(self.name == 'settings') - App.getBlock('more').addLink(new Element('a', { - 'href': App.createUrl(self.name), - 'text': self.name.capitalize(), - 'title': self.title - }), 'top') - - }, - open: function(action, params){ var self = this; self.action = action == 'index' ? self.default_action : action; @@ -41,7 +28,7 @@ Page.Settings = new Class({ openTab: function(action){ var self = this; - var action = (action == 'index' ? 'about' : action) || self.action; + action = (action == 'index' ? 'about' : action) || self.action; if(self.current) self.toggleTab(self.current, true); @@ -57,19 +44,20 @@ Page.Settings = new Class({ var a = hide ? 'removeClass' : 'addClass'; var c = 'active'; + tab_name = tab_name.split('/')[0]; var t = self.tabs[tab_name] || self.tabs[self.action] || self.tabs.general; // Subtab - var subtab = null + var subtab = null; Object.each(self.params, function(param, subtab_name){ - subtab = subtab_name; - }) + subtab = param; + }); - self.el.getElements('li.'+c+' , .tab_content.'+c).each(function(active){ + self.content.getElements('li.'+c+' , .tab_content.'+c).each(function(active){ active.removeClass(c); }); - if (t.subtabs[subtab]){ + if(t.subtabs[subtab]){ t.tab[a](c); t.subtabs[subtab].tab[a](c); t.subtabs[subtab].content[a](c); @@ -85,7 +73,7 @@ Page.Settings = new Class({ t.content.fireEvent('activate'); } - return t + return t; }, getData: function(onComplete){ @@ -95,13 +83,13 @@ Page.Settings = new Class({ Api.request('settings', { 'useSpinner': true, 'spinnerOptions': { - 'target': self.el + 'target': self.content }, 'onComplete': function(json){ self.data = json; onComplete(json); } - }) + }); return self.data; }, @@ -112,7 +100,7 @@ Page.Settings = new Class({ return self.data.values[section][name]; } catch(e){ - return '' + return ''; } }, @@ -125,43 +113,54 @@ Page.Settings = new Class({ Cookie.write('advanced_toggle_checked', +self.advanced_toggle.checked, {'duration': 365}); }, + sortByOrder: function(a, b){ + return (a.order || 100) - (b.order || 100); + }, + create: function(json){ var self = this; - self.el.adopt( - self.tabs_container = new Element('ul.tabs'), - self.containers = new Element('form.uniForm.containers').adopt( - new Element('label.advanced_toggle').adopt( - new Element('span', { - 'text': 'Show advanced settings' - }), - self.advanced_toggle = new Element('input[type=checkbox].inlay', { + self.navigation = new Element('div.navigation').adopt( + new Element('h2[text=Settings]'), + new Element('div.advanced_toggle').adopt( + new Element('span', { + 'text': 'Show advanced' + }), + new Element('label.switch').adopt( + self.advanced_toggle = new Element('input[type=checkbox]', { 'checked': +Cookie.read('advanced_toggle_checked'), 'events': { 'change': self.showAdvanced.bind(self) } - }) + }), + new Element('div.toggle') ) ) ); - self.showAdvanced(); - new Form.Check(self.advanced_toggle); + self.tabs_container = new Element('ul.tabs'); + + self.containers = new Element('form.uniForm.containers', { + 'events': { + 'click:relay(.enabler.disabled h2)': function(e, el){ + el.getPrevious().getElements('.check').fireEvent('click'); + } + } + }); + self.showAdvanced(); // Add content to tabs var options = []; Object.each(json.options, function(section, section_name){ - section['section_name'] = section_name; + section.section_name = section_name; options.include(section); - }) + }); - options.sort(function(a, b){ - return (a.order || 100) - (b.order || 100) - }).each(function(section){ + options.stableSort(self.sortByOrder).each(function(section){ var section_name = section.section_name; // Add groups to content - section.groups.sortBy('order').each(function(group){ + section.groups.stableSort(self.sortByOrder).each(function(group){ if(group.hidden) return; if(self.wizard_only && !group.wizard) @@ -170,13 +169,13 @@ Page.Settings = new Class({ // Create tab if(!self.tabs[group.tab] || !self.tabs[group.tab].groups) self.createTab(group.tab, {}); - var content_container = self.tabs[group.tab].content + var content_container = self.tabs[group.tab].content; // Create subtab if(group.subtab){ - if (!self.tabs[group.tab].subtabs[group.subtab]) - self.createSubTab(group.subtab, {}, self.tabs[group.tab], group.tab); - var content_container = self.tabs[group.tab].subtabs[group.subtab].content + if(!self.tabs[group.tab].subtabs[group.subtab]) + self.createSubTab(group.subtab, group, self.tabs[group.tab], group.tab); + content_container = self.tabs[group.tab].subtabs[group.subtab].content; } if(group.list && !self.lists[group.list]){ @@ -184,12 +183,10 @@ Page.Settings = new Class({ } // Create the group - if(!self.tabs[group.tab].groups[group.name]){ - var group_el = self.createGroup(group) + if(!self.tabs[group.tab].groups[group.name]) + self.tabs[group.tab].groups[group.name] = self.createGroup(group) .inject(group.list ? self.lists[group.list] : content_container) .addClass('section_'+section_name); - self.tabs[group.tab].groups[group.name] = group_el; - } // Create list if needed if(group.type && group.type == 'list'){ @@ -200,9 +197,7 @@ Page.Settings = new Class({ } // Add options to group - group.options.sort(function(a, b){ - return (a.order || 100) - (b.order || 100) - }).each(function(option){ + group.options.stableSort(self.sortByOrder).each(function(option){ if(option.hidden) return; var class_name = (option.type || 'string').capitalize(); var input = new Option[class_name](section_name, option.name, self.getValue(section_name, option.name), option); @@ -213,8 +208,19 @@ Page.Settings = new Class({ }); }); - self.fireEvent('create'); - self.openTab(); + requestTimeout(function(){ + self.el.grab( + self.navigation + ); + + self.content.adopt( + self.tabs_container, + self.containers + ); + + self.fireEvent('create'); + self.openTab(); + }, 0); }, @@ -222,9 +228,9 @@ Page.Settings = new Class({ var self = this; if(self.tabs[tab_name] && self.tabs[tab_name].tab) - return self.tabs[tab_name].tab + return self.tabs[tab_name].tab; - var label = tab.label || (tab.name || tab_name).capitalize() + var label = tab.label || (tab.name || tab_name).capitalize(); var tab_el = new Element('li.t_'+tab_name).adopt( new Element('a', { 'href': App.createUrl(self.name+'/'+tab_name), @@ -235,16 +241,16 @@ Page.Settings = new Class({ if(!self.tabs[tab_name]) self.tabs[tab_name] = { 'label': label - } + }; self.tabs[tab_name] = Object.merge(self.tabs[tab_name], { 'tab': tab_el, 'subtabs': {}, - 'content': new Element('div.tab_content.tab_'+tab_name).inject(self.containers), + 'content': new Element('div.tab_content.tab_' + tab_name).inject(self.containers), 'groups': {} - }) + }); - return self.tabs[tab_name] + return self.tabs[tab_name]; }, @@ -252,12 +258,12 @@ Page.Settings = new Class({ var self = this; if(parent_tab.subtabs[tab_name]) - return parent_tab.subtabs[tab_name] + return parent_tab.subtabs[tab_name]; if(!parent_tab.subtabs_el) parent_tab.subtabs_el = new Element('ul.subtabs').inject(parent_tab.tab); - var label = tab.label || (tab.name || tab_name.replace('_', ' ')).capitalize() + var label = tab.subtab_label || tab_name.replace('_', ' ').capitalize(); var tab_el = new Element('li.t_'+tab_name).adopt( new Element('a', { 'href': App.createUrl(self.name+'/'+parent_tab_name+'/'+tab_name), @@ -268,7 +274,7 @@ Page.Settings = new Class({ if(!parent_tab.subtabs[tab_name]) parent_tab.subtabs[tab_name] = { 'label': label - } + }; parent_tab.subtabs[tab_name] = Object.merge(parent_tab.subtabs[tab_name], { 'tab': tab_el, @@ -276,34 +282,47 @@ Page.Settings = new Class({ 'groups': {} }); - return parent_tab.subtabs[tab_name] + return parent_tab.subtabs[tab_name]; }, createGroup: function(group){ - var self = this; + var hint; + + if((typeOf(group.description) == 'array')){ + hint = new Element('span.hint.more_hint', { + 'html': group.description[0] + }); + + createTooltip(group.description[1]).inject(hint); + } + else { + hint = new Element('span.hint', { + 'html': group.description || '' + }); + } + + var icon; + if(group.icon){ + icon = new Element('span.icon').grab(new Element('img', { + 'src': 'data:image/png;base64,' + group.icon + })); + } - var group_el = new Element('fieldset', { + var label = new Element('span.group_label', { + 'text': group.label || (group.name).capitalize() + }); + + return new Element('fieldset', { 'class': (group.advanced ? 'inlineLabels advanced' : 'inlineLabels') + ' group_' + (group.name || '') + ' subtab_' + (group.subtab || '') - }).adopt( - new Element('h2', { - 'text': group.label || (group.name).capitalize() - }).adopt( - new Element('span.hint', { - 'html': group.description || '' - }) - ) - ) + }).grab( + new Element('h2').adopt(icon, label, hint) + ); - return group_el }, - + createList: function(content_container){ - return new Element('div.option_list').grab( - new Element('h3', { - 'text': 'Enable another' - }) - ).inject(content_container) + return new Element('div.option_list').inject(content_container); } }); @@ -312,17 +331,19 @@ var OptionBase = new Class({ Implements: [Options, Events], - klass: 'textInput', - focused_class : 'focused', + klass: '', + focused_class: 'focused', save_on_change: true, + read_only: false, initialize: function(section, name, value, options){ - var self = this - self.setOptions(options) + var self = this; + self.setOptions(options); self.section = section; self.name = name; self.value = self.previous_value = value; + self.read_only = !(options && !options.readonly); self.createBase(); self.create(); @@ -335,7 +356,7 @@ var OptionBase = new Class({ 'keyup': self.changed.bind(self) }); - self.addEvent('injected', self.afterInject.bind(self)) + self.addEvent('injected', self.afterInject.bind(self)); }, @@ -343,32 +364,49 @@ var OptionBase = new Class({ * Create the element */ createBase: function(){ - var self = this - self.el = new Element('div.ctrlHolder') + var self = this; + self.el = new Element('div.ctrlHolder.' + + self.section + '_' + self.name + + (self.klass ? '.' + self.klass : '') + + (self.read_only ? '.read_only' : '') + ); }, - create: function(){}, + create: function(){ + }, createLabel: function(){ var self = this; return new Element('label', { 'text': (self.options.label || self.options.name.replace('_', ' ')).capitalize() - }) + }); }, setAdvanced: function(){ - this.el.addClass(this.options.advanced ? 'advanced': '') + this.el.addClass(this.options.advanced ? 'advanced' : ''); }, createHint: function(){ var self = this; - if(self.options.description) - new Element('p.formHint', { - 'html': self.options.description - }).inject(self.el); + if(self.options.description){ + + if((typeOf(self.options.description) == 'array')){ + var hint = new Element('p.formHint.more_hint', { + 'html': self.options.description[0] + }).inject(self.el); + + createTooltip(self.options.description[1]).inject(hint); + } + else { + new Element('p.formHint', { + 'html': self.options.description || '' + }).inject(self.el); + } + } }, - afterInject: function(){}, + afterInject: function(){ + }, // Element has changed, do something changed: function(){ @@ -376,22 +414,31 @@ var OptionBase = new Class({ if(self.getValue() != self.previous_value){ if(self.save_on_change){ - if(self.changed_timer) clearTimeout(self.changed_timer); - self.changed_timer = self.save.delay(300, self); + if(self.changed_timer) clearRequestTimeout(self.changed_timer); + self.changed_timer = requestTimeout(self.save.bind(self), 300); } - self.fireEvent('change') + self.fireEvent('change'); } }, save: function(){ - var self = this; + var self = this, + value = self.getValue(), + ro = self.read_only; + + if (ro) { + console.warn('Unable to save readonly-option ' + self.section + '.' + self.name); + return; + } + + App.fireEvent('setting.save.'+self.section+'.'+self.name, value); Api.request('settings.save', { 'data': { 'section': self.section, 'name': self.name, - 'value': self.getValue() + 'value': value }, 'useSpinner': true, 'spinnerOptions': { @@ -410,9 +457,9 @@ var OptionBase = new Class({ self.previous_value = self.getValue(); self.el.addClass(sc); - (function(){ + requestTimeout(function(){ self.el.removeClass(sc); - }).delay(3000, self); + }, 3000); }, setName: function(name){ @@ -421,7 +468,7 @@ var OptionBase = new Class({ postName: function(){ var self = this; - return self.section +'['+self.name+']'; + return self.section + '[' + self.name + ']'; }, getValue: function(){ @@ -441,30 +488,39 @@ var OptionBase = new Class({ toElement: function(){ return this.el; } -}) +}); -var Option = {} +var Option = {}; Option.String = new Class({ Extends: OptionBase, type: 'string', create: function(){ - var self = this + var self = this; - self.el.adopt( - self.createLabel(), - self.input = new Element('input.inlay', { + if(self.read_only){ + self.input = new Element('span', { + 'text': self.getSettingValue() + }); + } + else { + self.input = new Element('input', { 'type': 'text', 'name': self.postName(), 'value': self.getSettingValue(), 'placeholder': self.getPlaceholder() - }) + }); + } + + self.el.adopt( + self.createLabel(), + self.input ); }, getPlaceholder: function(){ - return this.options.placeholder + return this.options.placeholder; } }); @@ -472,28 +528,27 @@ Option.Dropdown = new Class({ Extends: OptionBase, create: function(){ - var self = this + var self = this; self.el.adopt( self.createLabel(), - self.input = new Element('select', { - 'name': self.postName() - }) - ) + new Element('div.select_wrapper.icon-dropdown').grab( + self.input = new Element('select', { + 'name': self.postName(), + 'readonly' : self.read_only, + 'disabled' : self.read_only + }) + ) + ); Object.each(self.options.values, function(value){ new Element('option', { 'text': value[0], 'value': value[1] - }).inject(self.input) - }) + }).inject(self.input); + }); self.input.set('value', self.getSettingValue()); - - var dd = new Form.Dropdown(self.input, { - 'onChange': self.changed.bind(self) - }); - self.input = dd.input; } }); @@ -505,20 +560,20 @@ Option.Checkbox = new Class({ create: function(){ var self = this; - var randomId = 'r-'+randomString() + var randomId = 'r-' + randomString(); self.el.adopt( self.createLabel().set('for', randomId), - self.input = new Element('input.inlay', { + self.input = new Element('input', { 'name': self.postName(), 'type': 'checkbox', 'checked': self.getSettingValue(), - 'id': randomId + 'id': randomId, + 'readonly' : self.read_only, + 'disabled' : self.read_only }) ); - new Form.Check(self.input); - }, getValue: function(){ @@ -534,12 +589,22 @@ Option.Password = new Class({ create: function(){ var self = this; - self.parent() - self.input.set('type', 'password') + self.el.adopt( + self.createLabel(), + self.input = new Element('input', { + 'type': 'text', + 'name': self.postName(), + 'value': self.getSettingValue() ? '********' : '', + 'placeholder': self.getPlaceholder(), + 'readonly' : self.read_only, + 'disabled' : self.read_only + }) + ); self.input.addEvent('focus', function(){ - self.input.set('value', '') - }) + self.input.set('value', ''); + self.input.set('type', 'password'); + }); } }); @@ -555,14 +620,18 @@ Option.Enabler = new Class({ var self = this; self.el.adopt( - self.input = new Element('input.inlay', { - 'type': 'checkbox', - 'checked': self.getSettingValue(), - 'id': 'r-'+randomString() - }) + new Element('label.switch').adopt( + self.input = new Element('input', { + 'type': 'checkbox', + 'checked': self.getSettingValue(), + 'id': 'r-'+randomString(), + 'readonly' : self.read_only, + 'disabled' : self.read_only, + }), + new Element('div.toggle') + ) ); - new Form.Check(self.input); }, changed: function(){ @@ -576,18 +645,18 @@ Option.Enabler = new Class({ self.parentFieldset[ enabled ? 'removeClass' : 'addClass']('disabled'); - if(self.parentList) - self.parentFieldset.inject(self.parentList.getElement('h3'), enabled ? 'before' : 'after'); + //if(self.parentList) + // self.parentFieldset.inject(self.parentList.getElement('h3'), enabled ? 'before' : 'after'); }, afterInject: function(){ var self = this; - self.parentFieldset = self.el.getParent('fieldset').addClass('enabler') + self.parentFieldset = self.el.getParent('fieldset').addClass('enabler'); self.parentList = self.parentFieldset.getParent('.option_list'); - self.el.inject(self.parentFieldset, 'top') - self.checkState() + self.el.inject(self.parentFieldset, 'top'); + self.checkState(); } }); @@ -608,112 +677,204 @@ Option.Directory = new Class({ browser: null, save_on_change: false, use_cache: false, + current_dir: '', create: function(){ var self = this; - - self.el.adopt( - self.createLabel(), - self.directory_inlay = new Element('span.directory.inlay', { - 'events': { - 'click': self.showBrowser.bind(self) - } - }).adopt( - self.input = new Element('span', { - 'text': self.getSettingValue() + if (self.read_only) { + // create disabled textbox: + self.el.adopt( + self.createLabel(), + self.input = new Element('input', { + 'type': 'text', + 'name': self.postName(), + 'value': self.getSettingValue(), + 'readonly' : true, + 'disabled' : true }) - ) - ); + ); + } else { + self.el.adopt( + self.createLabel(), + self.directory_inlay = new Element('span.directory', { + 'events': { + 'click': self.showBrowser.bind(self) + } + }).adopt( + self.input = new Element('input', { + 'value': self.getSettingValue(), + 'readonly' : self.read_only, + 'disabled' : self.read_only, + 'events': { + 'change': self.filterDirectory.bind(self), + 'keydown': function(e){ + if(e.key == 'enter' || e.key == 'tab') + (e).stop(); + }, + 'keyup': self.filterDirectory.bind(self), + 'paste': self.filterDirectory.bind(self) + } + }) + ) + ); + } self.cached = {}; }, + filterDirectory: function(e){ + var self = this, + value = self.getValue(), + path_sep = Api.getOption('path_sep'), + active_selector = 'li:not(.blur):not(.empty)', + first; + + if(e.key == 'enter' || e.key == 'tab'){ + (e).stop(); + + first = self.dir_list.getElement(active_selector); + if(first){ + self.selectDirectory(first.get('data-value')); + } + } + else { + + // New folder + if(value.substr(-1) == path_sep){ + if(self.current_dir != value) + self.selectDirectory(value); + } + else { + var pd = self.getParentDir(value); + if(self.current_dir != pd) + self.getDirs(pd); + + var folder_filter = value.split(path_sep).getLast(); + self.dir_list.getElements('li').each(function(li){ + var valid = li.get('text').substr(0, folder_filter.length).toLowerCase() != folder_filter.toLowerCase(); + li[valid ? 'addClass' : 'removeClass']('blur'); + }); + + first = self.dir_list.getElement(active_selector); + if(first){ + if(!self.dir_list_scroll) + self.dir_list_scroll = new Fx.Scroll(self.dir_list, { + 'transition': 'quint:in:out' + }); + + self.dir_list_scroll.toElement(first); + } + } + } + }, + selectDirectory: function(dir){ var self = this; - self.input.set('text', dir); + self.input.set('value', dir); - self.getDirs() + self.getDirs(); }, - previousDirectory: function(e){ + previousDirectory: function(){ var self = this; - self.selectDirectory(self.getParentDir()) + self.selectDirectory(self.getParentDir()); + }, + + caretAtEnd: function(){ + var self = this; + + self.input.focus(); + + if (typeof self.input.selectionStart == "number") { + self.input.selectionStart = self.input.selectionEnd = self.input.get('value').length; + } else if (typeof el.createTextRange != "undefined") { + self.input.focus(); + var range = self.input.createTextRange(); + range.collapse(false); + range.select(); + } }, showBrowser: function(){ var self = this; + // Move caret to back of the input + if(!self.browser || self.browser && !self.browser.isVisible()) + self.caretAtEnd(); + if(!self.browser){ self.browser = new Element('div.directory_list').adopt( - new Element('div.pointer'), - new Element('div.actions').adopt( - self.back_button = new Element('a.back', { - 'html': '', - 'events': { - 'click': self.previousDirectory.bind(self) - } - }), - new Element('label', { - 'text': 'Hidden folders' - }).adopt( - self.show_hidden = new Element('input[type=checkbox].inlay', { + self.pointer = new Element('div.pointer'), + new Element('div.wrapper').adopt( + new Element('div.actions').adopt( + self.back_button = new Element('a.back', { + 'html': '', 'events': { - 'change': self.getDirs.bind(self) + 'click': self.previousDirectory.bind(self) } - }) - ) - ), - self.dir_list = new Element('ul', { - 'events': { - 'click:relay(li:not(.empty))': function(e, el){ - (e).preventDefault(); - self.selectDirectory(el.get('data-value')) - }, - 'mousewheel': function(e){ - (e).stopPropagation(); - } - } - }), - new Element('div.actions').adopt( - new Element('a.clear.button', { - 'text': 'Clear', + }), + new Element('label', { + 'text': 'Hidden folders' + }).adopt( + self.show_hidden = new Element('input[type=checkbox]', { + 'events': { + 'change': function(){ + self.getDirs(); + } + } + }) + ) + ), + self.dir_list = new Element('ul', { 'events': { - 'click': function(e){ - self.input.set('text', ''); - self.hideBrowser(e, true); + 'click:relay(li:not(.empty))': function(e, el){ + (e).preventDefault(); + self.selectDirectory(el.get('data-value')); + }, + 'mousewheel': function(e){ + (e).stopPropagation(); } } }), - new Element('a.cancel', { - 'text': 'Cancel', - 'events': { - 'click': self.hideBrowser.bind(self) - } - }), - new Element('span', { - 'text': 'or' - }), - self.save_button = new Element('a.button.save', { - 'text': 'Save', - 'events': { - 'click': function(e){ - self.hideBrowser(e, true) + new Element('div.actions').adopt( + new Element('a.clear.button', { + 'text': 'Clear', + 'events': { + 'click': function(e){ + self.input.set('value', ''); + self.hideBrowser(e, true); + } } - } - }) + }), + new Element('a.cancel', { + 'text': 'Cancel', + 'events': { + 'click': self.hideBrowser.bind(self) + } + }), + new Element('span', { + 'text': 'or' + }), + self.save_button = new Element('a.button.save', { + 'text': 'Save', + 'events': { + 'click': function(e){ + self.hideBrowser(e, true); + } + } + }) + ) ) ).inject(self.directory_inlay, 'before'); - - new Form.Check(self.show_hidden); } - self.initial_directory = self.input.get('text'); + self.initial_directory = self.input.get('value'); - self.getDirs() - self.browser.show() - self.el.addEvent('outerClick', self.hideBrowser.bind(self)) + self.getDirs(); + self.browser.show(); + self.el.addEvent('outerClick', self.hideBrowser.bind(self)); }, hideBrowser: function(e, save){ @@ -721,40 +882,40 @@ Option.Directory = new Class({ (e).preventDefault(); if(save) - self.save() + self.save(); else - self.input.set('text', self.initial_directory); + self.input.set('value', self.initial_directory); - self.browser.hide() - self.el.removeEvents('outerClick') + self.browser.hide(); + self.el.removeEvents('outerClick'); }, fillBrowser: function(json){ - var self = this; + var self = this, + v = self.getValue(); self.data = json; - var v = self.getValue(); - var previous_dir = self.getParentDir(); + var previous_dir = json.parent; - if(v == '') - self.input.set('text', json.home); + if(v === '') + self.input.set('value', json.home); - if(previous_dir != v && previous_dir.length >= 1 && !json.is_root){ + if(previous_dir.length >= 1 && !json.is_root){ var prev_dirname = self.getCurrentDirname(previous_dir); if(previous_dir == json.home) - prev_dirname = 'Home'; - else if (previous_dir == '/' && json.platform == 'nt') + prev_dirname = 'Home Folder'; + else if(previous_dir == '/' && json.platform == 'nt') prev_dirname = 'Computer'; - self.back_button.set('data-value', previous_dir) - self.back_button.set('html', '« '+prev_dirname) - self.back_button.show() + self.back_button.set('data-value', previous_dir); + self.back_button.set('html', '« ' + prev_dirname); + self.back_button.show(); } else { - self.back_button.hide() + self.back_button.hide(); } if(self.use_cache) @@ -769,21 +930,25 @@ Option.Directory = new Class({ new Element('li', { 'data-value': dir, 'text': self.getCurrentDirname(dir) - }).inject(self.dir_list) + }).inject(self.dir_list); }); else new Element('li.empty', { 'text': 'Selected folder is empty' - }).inject(self.dir_list) - }, + }).inject(self.dir_list); - getDirs: function(){ - var self = this; + //fix for webkit type browsers to refresh the dom for the file browser + //http://stackoverflow.com/questions/3485365/how-can-i-force-webkit-to-redraw-repaint-to-propagate-style-changes + self.dir_list.setStyle('webkitTransform', 'scale(1)'); + self.caretAtEnd(); + }, - var c = self.getValue(); + getDirs: function(dir){ + var self = this, + c = dir || self.getValue(); if(self.cached[c] && self.use_cache){ - self.fillBrowser() + self.fillBrowser(); } else { Api.request('directory.list', { @@ -791,8 +956,11 @@ Option.Directory = new Class({ 'path': c, 'show_hidden': +self.show_hidden.checked }, - 'onComplete': self.fillBrowser.bind(self) - }) + 'onComplete': function(json){ + self.current_dir = c; + self.fillBrowser(json); + } + }); } }, @@ -805,23 +973,21 @@ Option.Directory = new Class({ var v = dir || self.getValue(); var sep = Api.getOption('path_sep'); var dirs = v.split(sep); - if(dirs.pop() == '') - dirs.pop(); + if(dirs.pop() === '') + dirs.pop(); - return dirs.join(sep) + sep + return dirs.join(sep) + sep; }, getCurrentDirname: function(dir){ - var self = this; - var dir_split = dir.split(Api.getOption('path_sep')); - return dir_split[dir_split.length-2] || Api.getOption('path_sep') + return dir_split[dir_split.length-2] || Api.getOption('path_sep'); }, getValue: function(){ var self = this; - return self.input.get('text'); + return self.input.get('value'); } }); @@ -832,7 +998,6 @@ Option.Directories = new Class({ Extends: Option.String, directories: [], - delimiter: '::', afterInject: function(){ var self = this; @@ -840,9 +1005,11 @@ Option.Directories = new Class({ self.el.setStyle('display', 'none'); self.directories = []; - self.getValue().split(self.delimiter).each(function(value){ + + self.getSettingValue().each(function(value){ self.addDirectory(value); }); + self.addDirectory(); }, @@ -861,12 +1028,12 @@ Option.Directories = new Class({ var parent = self.el.getParent('fieldset'); var dirs = parent.getElements('.multi_directory'); - if(dirs.length == 0) - $(dir).inject(parent) + if(dirs.length === 0) + $(dir).inject(parent); else $(dir).inject(dirs.getLast(), 'after'); - // Replace some properties + // TODO : Replace some properties dir.save = self.saveItems.bind(self); $(dir).getElement('label').set('text', 'Movie Folder'); $(dir).getElement('.formHint').destroy(); @@ -876,7 +1043,7 @@ Option.Directories = new Class({ $(dir).addClass('is_empty'); // Add remove button - new Element('a.icon.delete', { + new Element('a.icon-delete.delete', { 'events': { 'click': self.delItem.bind(self, dir) } @@ -899,7 +1066,7 @@ Option.Directories = new Class({ saveItems: function(){ var self = this; - var dirs = [] + var dirs = []; self.directories.each(function(dir){ if(dir.getValue()){ $(dir).removeClass('is_empty'); @@ -909,28 +1076,30 @@ Option.Directories = new Class({ $(dir).addClass('is_empty'); }); - self.input.set('value', dirs.join(self.delimiter)); + self.input.set('value', JSON.encode(dirs) ); self.input.fireEvent('change'); self.addDirectory(); } - - }); Option.Choice = new Class({ Extends: Option.String, + klass: 'choice', afterInject: function(){ var self = this; - self.tags = []; - self.replaceInput(); - - self.select = new Element('select').adopt( - new Element('option[text=Add option]') - ).inject(self.tag_input, 'after'); + var wrapper = new Element('div.select_wrapper.icon-dropdown').grab( + self.select = new Element('select.select', { + 'events': { + 'change': self.addSelection.bind(self) + } + }).grab( + new Element('option[text=Add option]') + ) + ); var o = self.options.options; Object.each(o.choices, function(label, choice){ @@ -940,333 +1109,14 @@ Option.Choice = new Class({ }).inject(self.select); }); - self.select = new Form.Dropdown(self.select, { - 'onChange': self.addSelection.bind(self) - }); - }, - - replaceInput: function(){ - var self = this; - self.initialized = self.initialized ? self.initialized+1 : 1; - - var value = self.getValue(); - var matches = value.match(/<([^>]*)>/g); - - self.tag_input = new Element('ul.inlay', { - 'events': { - 'click': function(e){ - if(e.target == self.tag_input){ - var input = self.tag_input.getElement('li:last-child input'); - input.fireEvent('focus'); - input.focus(); - input.setCaretPosition(input.get('value').length); - } - - self.el.addEvent('outerClick', function(){ - self.reset(); - self.el.removeEvents('outerClick'); - }) - } - } - }).inject(self.input, 'after'); - self.el.addClass('tag_input'); - - var mtches = [] - if(matches) - matches.each(function(match, mnr){ - var pos = value.indexOf(match), - msplit = [value.substr(0, pos), value.substr(pos, match.length), value.substr(pos+match.length)]; - - msplit.each(function(matchsplit, snr){ - if(msplit.length-1 == snr){ - value = matchsplit; - - if(matches.length-1 == mnr) - mtches.append([value]); - - return; - } - mtches.append([value == matchsplit ? match : matchsplit]); - }); - }); - - if(mtches.length == 0 && value != '') - mtches.include(value); - - mtches.each(self.addTag.bind(self)); - - self.addLastTag(); - - // Sortable - self.sortable = new Sortables(self.tag_input, { - 'revert': true, - 'handle': '', - 'opacity': 0.5, - 'onComplete': function(){ - self.setOrder(); - self.reset(); - } - }); - - // Calc width on show - var input_group = self.tag_input.getParent('.tab_content'); - input_group.addEvent('activate', self.setAllWidth.bind(self)); - }, - - addLastTag: function(){ - if(this.tag_input.getElement('li.choice:last-child') || !this.tag_input.getElement('li')) - this.addTag(''); - }, - - addTag: function(tag){ - var self = this; - tag = new Option.Choice.Tag(tag, { - 'onChange': self.setOrder.bind(self), - 'onBlur': function(){ - self.addLastTag(); - }, - 'onGoLeft': function(){ - self.goLeft(this) - }, - 'onGoRight': function(){ - self.goRight(this) - } - }); - $(tag).inject(self.tag_input); - - if(self.initialized > 1) - tag.setWidth(); - else - (function(){ tag.setWidth(); }).delay(10, self); - - self.tags.include(tag); - - return tag; - }, - - goLeft: function(from_tag){ - var self = this; - - from_tag.blur(); - - var prev_index = self.tags.indexOf(from_tag)-1; - if(prev_index >= 0) - self.tags[prev_index].selectFrom('right') - else - from_tag.focus(); - - }, - goRight: function(from_tag){ - var self = this; - - from_tag.blur(); - - var next_index = self.tags.indexOf(from_tag)+1; - if(next_index < self.tags.length) - self.tags[next_index].selectFrom('left') - else - from_tag.focus(); - }, + wrapper.inject(self.input, 'after'); - setOrder: function(){ - var self = this; - - var value = ''; - self.tag_input.getElements('li').each(function(el){ - value += el.getElement('span').get('text'); - }); - self.addLastTag(); - - self.input.set('value', value); - self.input.fireEvent('change'); - self.setAllWidth(); }, addSelection: function(){ var self = this; - - var tag = self.addTag(self.el.getElement('.selection input').get('value')); - self.sortable.addItems($(tag)); - self.setOrder(); - self.setAllWidth(); - }, - - reset: function(){ - var self = this; - - self.tag_input.destroy(); - self.sortable.detach(); - - self.replaceInput(); - self.setAllWidth(); - }, - - setAllWidth: function(){ - var self = this; - self.tags.each(function(tag){ - tag.setWidth.delay(10, tag); - }); - } - -}); - -Option.Choice.Tag = new Class({ - - Implements: [Options, Events], - - options: { - 'pre': '<', - 'post': '>' - }, - - initialize: function(tag, options){ - var self = this; - self.setOptions(options); - - self.tag = tag; - self.is_choice = tag.substr(0, 1) == self.options.pre && tag.substr(-1) == self.options.post; - - self.create(); - }, - - create: function(){ - var self = this; - - self.el = new Element('li', { - 'class': self.is_choice ? 'choice' : '', - 'styles': { - 'border': 0 - }, - 'events': { - 'mouseover': !self.is_choice ? self.fireEvent.bind(self, 'focus') : function(){} - } - }).adopt( - self.input = new Element(self.is_choice ? 'span' : 'input', { - 'text': self.tag, - 'value': self.tag, - 'styles': { - 'width': 0 - }, - 'events': { - 'keyup': self.is_choice ? null : function(e){ - var current_caret_pos = self.input.getCaretPosition(); - if(e.key == 'left' && current_caret_pos == self.last_caret_pos){ - self.fireEvent('goLeft'); - } - else if (e.key == 'right' && self.last_caret_pos === current_caret_pos){ - self.fireEvent('goRight'); - } - self.last_caret_pos = self.input.getCaretPosition(); - - self.setWidth(); - self.fireEvent('change'); - }, - 'focus': self.fireEvent.bind(self, 'focus'), - 'blur': self.fireEvent.bind(self, 'blur') - } - }), - self.span = !self.is_choice ? new Element('span', { - 'text': self.tag - }) : null, - self.del_button = new Element('a.delete', { - 'events': { - 'click': self.del.bind(self) - } - }) - ); - - self.addEvent('focus', self.setWidth.bind(self)); - - }, - - blur: function(){ - var self = this; - - self.input.blur(); - - self.selected = false; - self.el.removeClass('selected'); - self.input.removeEvents('outerClick'); - }, - - focus: function(){ - var self = this; - if(!self.is_choice){ - this.input.focus(); - } - else { - if(self.selected) return; - self.selected = true; - self.el.addClass('selected'); - self.input.addEvent('outerClick', self.blur.bind(self)); - - var temp_input = new Element('input', { - 'events': { - 'keydown': function(e){ - e.stop(); - - if(e.key == 'right'){ - self.fireEvent('goRight'); - this.destroy(); - } - else if (e.key == 'left'){ - self.fireEvent('goLeft'); - this.destroy(); - } - else if (e.key == 'backspace'){ - self.del(); - this.destroy(); - self.fireEvent('goLeft'); - } - } - }, - 'styles': { - 'height': 0, - 'width': 0, - 'position': 'absolute', - 'top': -200 - } - }); - self.el.adopt(temp_input) - temp_input.focus(); - } - }, - - selectFrom: function(direction){ - var self = this; - - if(!direction || self.is_choice){ - self.focus(); - } - else { - self.focus(); - var position = direction == 'left' ? 0 : self.input.get('value').length; - self.input.setCaretPosition(position); - } - - }, - - setWidth: function(){ - var self = this; - - if(self.span && self.input){ - self.span.set('text', self.input.get('value')); - self.input.setStyle('width', self.span.getSize().x+2); - } - }, - - del: function(){ - var self = this; - self.el.destroy(); - self.fireEvent('change'); - }, - - getValue: function(){ - return this.span.get('text'); - }, - - toElement: function(){ - return this.el; + self.input.set('value', self.input.get('value') + self.select.get('value')); + self.input.fireEvent('change'); } }); @@ -1280,9 +1130,11 @@ Option.Combined = new Class({ self.fieldset = self.input.getParent('fieldset'); self.combined_list = new Element('div.combined_table').inject(self.fieldset.getElement('h2'), 'after'); - self.values = {} - self.inputs = {} - self.items = [] + self.values = {}; + self.inputs = {}; + self.items = []; + self.labels = {}; + self.descriptions = {}; self.options.combine.each(function(name){ @@ -1290,27 +1142,31 @@ Option.Combined = new Class({ var values = self.inputs[name].get('value').split(','); values.each(function(value, nr){ - if (!self.values[nr]) self.values[nr] = {}; + if(!self.values[nr]) self.values[nr] = {}; self.values[nr][name] = value.trim(); }); self.inputs[name].getParent('.ctrlHolder').setStyle('display', 'none'); - self.inputs[name].addEvent('change', self.addEmpty.bind(self)) + self.inputs[name].addEvent('change', self.addEmpty.bind(self)); }); - var head = new Element('div.head').inject(self.combined_list) + var head = new Element('div.head').inject(self.combined_list); Object.each(self.inputs, function(input, name){ + var _in = input.getNext(); + self.labels[name] = input.getPrevious().get('text'); + self.descriptions[name] = _in ? _in.get('text') : ''; + new Element('abbr', { 'class': name, - 'text': input.getPrevious().get('text'), - //'title': input.getNext().get('text') - }).inject(head) - }) + 'text': self.labels[name], + 'title': self.descriptions[name] + }).inject(head); + }); - Object.each(self.values, function(item, nr){ + Object.each(self.values, function(item){ self.createItem(item); }); @@ -1322,22 +1178,22 @@ Option.Combined = new Class({ addEmpty: function(){ var self = this; - if(self.add_empty_timeout) clearTimeout(self.add_empty_timeout); + if(self.add_empty_timeout) clearRequestTimeout(self.add_empty_timeout); var has_empty = 0; self.items.each(function(ctrl_holder){ var empty_count = 0; self.options.combine.each(function(name){ - var input = ctrl_holder.getElement('input.'+name) - if(input.get('value') == '' || input.get('type') == 'checkbox') - empty_count++ + var input = ctrl_holder.getElement('input.' + name); + if(input.get('value') === '' || input.get('type') == 'checkbox') + empty_count++; }); has_empty += (empty_count == self.options.combine.length) ? 1 : 0; ctrl_holder[(empty_count == self.options.combine.length) ? 'addClass' : 'removeClass']('is_empty'); }); if(has_empty > 0) return; - self.add_empty_timeout = setTimeout(function(){ + self.add_empty_timeout = requestTimeout(function(){ self.createItem({'use': true}); }, 10); }, @@ -1350,24 +1206,22 @@ Option.Combined = new Class({ value_empty = 0; self.options.combine.each(function(name){ - var value = values[name] || '' + var value = values[name] || ''; if(name.indexOf('use') != -1){ - var checkbox = new Element('input[type=checkbox].inlay.'+name, { + var checkbox = new Element('input[type=checkbox].'+name, { 'checked': +value, 'events': { 'click': self.saveCombined.bind(self), 'change': self.saveCombined.bind(self) } }).inject(item); - - new Form.Check(checkbox); } else { value_count++; - new Element('input[type=text].inlay.'+name, { + new Element('input[type=text].'+name, { 'value': value, - 'placeholder': name, + 'placeholder': self.labels[name] || name, 'events': { 'keyup': self.saveCombined.bind(self), 'change': self.saveCombined.bind(self) @@ -1383,11 +1237,11 @@ Option.Combined = new Class({ item[value_empty == value_count ? 'addClass' : 'removeClass']('is_empty'); - new Element('a.icon.delete', { + new Element('a.icon-cancel.delete', { 'events': { 'click': self.deleteCombinedItem.bind(self) } - }).inject(item) + }).inject(item); self.items.include(item); @@ -1395,10 +1249,9 @@ Option.Combined = new Class({ }, saveCombined: function(){ - var self = this; - + var self = this, + temp = {}; - var temp = {} self.items.each(function(item, nr){ self.options.combine.each(function(name){ var input = item.getElement('input.'+name); @@ -1407,7 +1260,7 @@ Option.Combined = new Class({ if(!temp[name]) temp[name] = []; temp[name][nr] = input.get('type') == 'checkbox' ? +input.get('checked') : input.get('value').trim(); - }) + }); }); self.options.combine.each(function(name){ @@ -1415,7 +1268,7 @@ Option.Combined = new Class({ self.inputs[name].fireEvent('change'); }); - self.addEmpty() + self.addEmpty(); }, @@ -1431,4 +1284,25 @@ Option.Combined = new Class({ self.saveCombined(); } -}); \ No newline at end of file +}); + +var createTooltip = function(description){ + + var tip = new Element('div.tooltip', { + 'events': { + 'mouseenter': function(){ + tip.addClass('shown'); + }, + 'mouseleave': function(){ + tip.removeClass('shown'); + } + } + }).adopt( + new Element('a.icon-info.info'), + new Element('div.tip', { + 'html': description + }) + ); + + return tip; +}; diff --git a/couchpotato/static/scripts/page/wanted.js b/couchpotato/static/scripts/page/wanted.js deleted file mode 100644 index a9e0bc71f5..0000000000 --- a/couchpotato/static/scripts/page/wanted.js +++ /dev/null @@ -1,353 +0,0 @@ -Page.Wanted = new Class({ - - Extends: PageBase, - - name: 'wanted', - title: 'Gimmy gimmy gimmy!', - - indexAction: function(param){ - var self = this; - - if(!self.wanted){ - - self.manual_search = new Element('a', { - 'title': 'Force a search for the full wanted list', - 'text': 'Search all wanted', - 'events':{ - 'click': self.doFullSearch.bind(self, true) - } - }); - - // Wanted movies - self.wanted = new MovieList({ - 'identifier': 'wanted', - 'status': 'active', - 'actions': MovieActions, - 'add_new': true, - 'menu': [self.manual_search], - 'on_empty_element': App.createUserscriptButtons().addClass('empty_wanted') - }); - $(self.wanted).inject(self.el); - - // Check if search is in progress - self.startProgressInterval(); - } - - }, - - doFullSearch: function(full){ - var self = this; - - if(!self.search_in_progress){ - - Api.request('searcher.full_search'); - self.startProgressInterval(); - - } - - }, - - startProgressInterval: function(){ - var self = this; - - var start_text = self.manual_search.get('text'); - self.progress_interval = setInterval(function(){ - if(self.search_progress && self.search_progress.running) return; - self.search_progress = Api.request('searcher.progress', { - 'onComplete': function(json){ - self.search_in_progress = true; - if(!json.progress){ - clearInterval(self.progress_interval); - self.search_in_progress = false; - self.manual_search.set('text', start_text); - } - else { - var progress = json.progress; - self.manual_search.set('text', 'Searching.. (' + (((progress.total-progress.to_go)/progress.total)*100).round() + '%)'); - } - } - }); - }, 1000); - - } - -}); - -var MovieActions = {}; -window.addEvent('domready', function(){ - - MovieActions.Wanted = { - 'IMDB': IMDBAction - ,'Trailer': TrailerAction - ,'Releases': ReleaseAction - ,'Edit': new Class({ - - Extends: MovieAction, - - create: function(){ - var self = this; - - self.el = new Element('a.edit', { - 'title': 'Change movie information, like title and quality.', - 'events': { - 'click': self.editMovie.bind(self) - } - }); - - }, - - editMovie: function(e){ - var self = this; - (e).preventDefault(); - - if(!self.options_container){ - self.options_container = new Element('div.options').adopt( - new Element('div.form').adopt( - self.title_select = new Element('select', { - 'name': 'title' - }), - self.profile_select = new Element('select', { - 'name': 'profile' - }), - new Element('a.button.edit', { - 'text': 'Save & Search', - 'events': { - 'click': self.save.bind(self) - } - }) - ) - ).inject(self.movie, 'top'); - - Array.each(self.movie.data.library.titles, function(alt){ - new Element('option', { - 'text': alt.title - }).inject(self.title_select); - - if(alt['default']) - self.title_select.set('value', alt.title); - }); - - - Quality.getActiveProfiles().each(function(profile){ - - var profile_id = profile.id ? profile.id : profile.data.id; - - new Element('option', { - 'value': profile_id, - 'text': profile.label ? profile.label : profile.data.label - }).inject(self.profile_select); - - if(self.movie.profile && self.movie.profile.data && self.movie.profile.data.id == profile_id) - self.profile_select.set('value', profile_id); - }); - - } - - self.movie.slide('in', self.options_container); - }, - - save: function(e){ - (e).preventDefault(); - var self = this; - - Api.request('movie.edit', { - 'data': { - 'id': self.movie.get('id'), - 'default_title': self.title_select.get('value'), - 'profile_id': self.profile_select.get('value') - }, - 'useSpinner': true, - 'spinnerTarget': $(self.movie), - 'onComplete': function(){ - self.movie.quality.set('text', self.profile_select.getSelected()[0].get('text')); - self.movie.title.set('text', self.title_select.getSelected()[0].get('text')); - } - }); - - self.movie.slide('out'); - } - - }) - - ,'Refresh': new Class({ - - Extends: MovieAction, - - create: function(){ - var self = this; - - self.el = new Element('a.refresh', { - 'title': 'Refresh the movie info and do a forced search', - 'events': { - 'click': self.doRefresh.bind(self) - } - }); - - }, - - doRefresh: function(e){ - var self = this; - (e).preventDefault(); - - Api.request('movie.refresh', { - 'data': { - 'id': self.movie.get('id') - } - }); - } - - }) - - ,'Delete': new Class({ - - Extends: MovieAction, - - Implements: [Chain], - - create: function(){ - var self = this; - - self.el = new Element('a.delete', { - 'title': 'Remove the movie from this CP list', - 'events': { - 'click': self.showConfirm.bind(self) - } - }); - - }, - - showConfirm: function(e){ - var self = this; - (e).preventDefault(); - - if(!self.delete_container){ - self.delete_container = new Element('div.buttons.delete_container').adopt( - new Element('a.cancel', { - 'text': 'Cancel', - 'events': { - 'click': self.hideConfirm.bind(self) - } - }), - new Element('span.or', { - 'text': 'or' - }), - new Element('a.button.delete', { - 'text': 'Delete ' + self.movie.title.get('text'), - 'events': { - 'click': self.del.bind(self) - } - }) - ).inject(self.movie, 'top'); - } - - self.movie.slide('in', self.delete_container); - - }, - - hideConfirm: function(e){ - var self = this; - (e).preventDefault(); - - self.movie.slide('out'); - }, - - del: function(e){ - (e).preventDefault(); - var self = this; - - var movie = $(self.movie); - - self.chain( - function(){ - self.callChain(); - }, - function(){ - Api.request('movie.delete', { - 'data': { - 'id': self.movie.get('id'), - 'delete_from': self.movie.list.options.identifier - }, - 'onComplete': function(){ - movie.set('tween', { - 'duration': 300, - 'onComplete': function(){ - self.movie.destroy() - } - }); - movie.tween('height', 0); - } - }); - } - ); - - self.callChain(); - - } - - }) - }; - - MovieActions.Snatched = { - 'IMDB': IMDBAction - ,'Delete': MovieActions.Wanted.Delete - }; - - MovieActions.Done = { - 'IMDB': IMDBAction - ,'Edit': MovieActions.Wanted.Edit - ,'Trailer': TrailerAction - ,'Files': new Class({ - - Extends: MovieAction, - - create: function(){ - var self = this; - - self.el = new Element('a.directory', { - 'title': 'Available files', - 'events': { - 'click': self.showFiles.bind(self) - } - }); - - }, - - showFiles: function(e){ - var self = this; - (e).preventDefault(); - - if(!self.options_container){ - self.options_container = new Element('div.options').adopt( - self.files_container = new Element('div.files.table') - ).inject(self.movie, 'top'); - - // Header - new Element('div.item.head').adopt( - new Element('span.name', {'text': 'File'}), - new Element('span.type', {'text': 'Type'}), - new Element('span.is_available', {'text': 'Available'}) - ).inject(self.files_container) - - Array.each(self.movie.data.releases, function(release){ - - var rel = new Element('div.release').inject(self.files_container); - - Array.each(release.files, function(file){ - new Element('div.file.item').adopt( - new Element('span.name', {'text': file.path}), - new Element('span.type', {'text': File.Type.get(file.type_id).name}), - new Element('span.available', {'text': file.available}) - ).inject(rel) - }); - }); - - } - - self.movie.slide('in', self.options_container); - }, - - }) - ,'Delete': MovieActions.Wanted.Delete - }; - -}) \ No newline at end of file diff --git a/couchpotato/static/scripts/vendor/Array.stableSort.js b/couchpotato/static/scripts/vendor/Array.stableSort.js new file mode 100644 index 0000000000..062c756635 --- /dev/null +++ b/couchpotato/static/scripts/vendor/Array.stableSort.js @@ -0,0 +1,56 @@ +/* +--- + +script: Array.stableSort.js + +description: Add a stable sort algorithm for all browsers + +license: MIT-style license. + +authors: + - Yorick Sijsling + +requires: + core/1.3: '*' + +provides: + - [Array.stableSort, Array.mergeSort] + +... +*/ + +(function() { + + var defaultSortFunction = function(a, b) { + return a > b ? 1 : (a < b ? -1 : 0); + } + + Array.implement({ + + stableSort: function(compare) { + // I would love some real feature recognition. Problem is that an unstable algorithm sometimes/often gives the same result as an unstable algorithm. + return (Browser.chrome || Browser.firefox2 || Browser.opera9) ? this.mergeSort(compare) : this.sort(compare); + }, + + mergeSort: function(compare, token) { + compare = compare || defaultSortFunction; + if (this.length > 1) { + // Split and sort both parts + var right = this.splice(Math.floor(this.length / 2)).mergeSort(compare); + var left = this.splice(0).mergeSort(compare); // 'this' is now empty. + + // Merge parts together + while (left.length > 0 || right.length > 0) { + this.push( + right.length === 0 ? left.shift() + : left.length === 0 ? right.shift() + : compare(left[0], right[0]) > 0 ? right.shift() + : left.shift()); + } + } + return this; + } + + }); +})(); + diff --git a/couchpotato/static/scripts/vendor/dynamics.js b/couchpotato/static/scripts/vendor/dynamics.js new file mode 100644 index 0000000000..5a065fc218 --- /dev/null +++ b/couchpotato/static/scripts/vendor/dynamics.js @@ -0,0 +1,1989 @@ +// Generated by CoffeeScript 1.7.1 +(function() { + var Color, DecomposedMatrix, DecomposedMatrix2D, InterpolableArray, InterpolableColor, InterpolableObject, InterpolableWithUnit, Matrix, Matrix2D, Set, Vector, addTimeout, animationTick, animations, animationsTimeouts, applyDefaults, applyFrame, applyProperties, baseSVG, cacheFn, cancelTimeout, clone, createInterpolable, defaultValueForKey, degProperties, dynamics, getCurrentProperties, interpolate, isDocumentVisible, isSVGElement, lastTime, leftDelayForTimeout, makeArrayFn, observeVisibilityChange, parseProperties, prefixFor, propertyWithPrefix, pxProperties, rAF, roundf, runLoopPaused, runLoopRunning, runLoopTick, setRealTimeout, slow, slowRatio, startAnimation, startRunLoop, svgProperties, timeBeforeVisibilityChange, timeoutLastId, timeouts, toDashed, transformProperties, transformValueForProperty, unitForProperty, + __bind = function(fn, me){ return function(){ return fn.apply(me, arguments); }; }; + + isDocumentVisible = function() { + return document.visibilityState === "visible" || (dynamics.tests != null); + }; + + observeVisibilityChange = (function() { + var fns; + fns = []; + if (typeof document !== "undefined" && document !== null) { + document.addEventListener("visibilitychange", function() { + var fn, _i, _len, _results; + _results = []; + for (_i = 0, _len = fns.length; _i < _len; _i++) { + fn = fns[_i]; + _results.push(fn(isDocumentVisible())); + } + return _results; + }); + } + return function(fn) { + return fns.push(fn); + }; + })(); + + clone = function(o) { + var k, newO, v; + newO = {}; + for (k in o) { + v = o[k]; + newO[k] = v; + } + return newO; + }; + + cacheFn = function(func) { + var data; + data = {}; + return function() { + var k, key, result, _i, _len; + key = ""; + for (_i = 0, _len = arguments.length; _i < _len; _i++) { + k = arguments[_i]; + key += k.toString() + ","; + } + result = data[key]; + if (!result) { + data[key] = result = func.apply(this, arguments); + } + return result; + }; + }; + + makeArrayFn = function(fn) { + return function(el) { + var args, i, res; + if (el instanceof Array || el instanceof NodeList || el instanceof HTMLCollection) { + res = (function() { + var _i, _ref, _results; + _results = []; + for (i = _i = 0, _ref = el.length; 0 <= _ref ? _i < _ref : _i > _ref; i = 0 <= _ref ? ++_i : --_i) { + args = Array.prototype.slice.call(arguments, 1); + args.splice(0, 0, el[i]); + _results.push(fn.apply(this, args)); + } + return _results; + }).apply(this, arguments); + return res; + } + return fn.apply(this, arguments); + }; + }; + + applyDefaults = function(options, defaults) { + var k, v, _results; + _results = []; + for (k in defaults) { + v = defaults[k]; + _results.push(options[k] != null ? options[k] : options[k] = v); + } + return _results; + }; + + applyFrame = function(el, properties) { + var k, v, _results; + if ((el.style != null)) { + return applyProperties(el, properties); + } else { + _results = []; + for (k in properties) { + v = properties[k]; + _results.push(el[k] = v.format()); + } + return _results; + } + }; + + applyProperties = function(el, properties) { + var isSVG, k, matrix, transforms, v; + properties = parseProperties(properties); + transforms = []; + isSVG = isSVGElement(el); + for (k in properties) { + v = properties[k]; + if (transformProperties.contains(k)) { + transforms.push([k, v]); + } else { + if (v.format != null) { + v = v.format(); + } else { + v = "" + v + (unitForProperty(k, v)); + } + if (isSVG && svgProperties.contains(k)) { + el.setAttribute(k, v); + } else { + el.style[propertyWithPrefix(k)] = v; + } + } + } + if (transforms.length > 0) { + if (isSVG) { + matrix = new Matrix2D(); + matrix.applyProperties(transforms); + return el.setAttribute("transform", matrix.decompose().format()); + } else { + v = (transforms.map(function(transform) { + return transformValueForProperty(transform[0], transform[1]); + })).join(" "); + return el.style[propertyWithPrefix("transform")] = v; + } + } + }; + + isSVGElement = function(el) { + var _ref, _ref1; + if ((typeof SVGElement !== "undefined" && SVGElement !== null) && (typeof SVGSVGElement !== "undefined" && SVGSVGElement !== null)) { + return el instanceof SVGElement && !(el instanceof SVGSVGElement); + } else { + return (_ref = (_ref1 = dynamics.tests) != null ? typeof _ref1.isSVG === "function" ? _ref1.isSVG(el) : void 0 : void 0) != null ? _ref : false; + } + }; + + roundf = function(v, decimal) { + var d; + d = Math.pow(10, decimal); + return Math.round(v * d) / d; + }; + + Set = (function() { + function Set(array) { + var v, _i, _len; + this.obj = {}; + for (_i = 0, _len = array.length; _i < _len; _i++) { + v = array[_i]; + this.obj[v] = 1; + } + } + + Set.prototype.contains = function(v) { + return this.obj[v] === 1; + }; + + return Set; + + })(); + + toDashed = function(str) { + return str.replace(/([A-Z])/g, function($1) { + return "-" + $1.toLowerCase(); + }); + }; + + pxProperties = new Set('marginTop,marginLeft,marginBottom,marginRight,paddingTop,paddingLeft,paddingBottom,paddingRight,top,left,bottom,right,translateX,translateY,translateZ,perspectiveX,perspectiveY,perspectiveZ,width,height,maxWidth,maxHeight,minWidth,minHeight,borderRadius'.split(',')); + + degProperties = new Set('rotate,rotateX,rotateY,rotateZ,skew,skewX,skewY,skewZ'.split(',')); + + transformProperties = new Set('translate,translateX,translateY,translateZ,scale,scaleX,scaleY,scaleZ,rotate,rotateX,rotateY,rotateZ,rotateC,rotateCX,rotateCY,skew,skewX,skewY,skewZ,perspective'.split(',')); + + svgProperties = new Set('accent-height,ascent,azimuth,baseFrequency,baseline-shift,bias,cx,cy,d,diffuseConstant,divisor,dx,dy,elevation,filterRes,fx,fy,gradientTransform,height,k1,k2,k3,k4,kernelMatrix,kernelUnitLength,letter-spacing,limitingConeAngle,markerHeight,markerWidth,numOctaves,order,overline-position,overline-thickness,pathLength,points,pointsAtX,pointsAtY,pointsAtZ,r,radius,rx,ry,seed,specularConstant,specularExponent,stdDeviation,stop-color,stop-opacity,strikethrough-position,strikethrough-thickness,surfaceScale,target,targetX,targetY,transform,underline-position,underline-thickness,viewBox,width,x,x1,x2,y,y1,y2,z'.split(',')); + + unitForProperty = function(k, v) { + if (typeof v !== 'number') { + return ''; + } + if (pxProperties.contains(k)) { + return 'px'; + } else if (degProperties.contains(k)) { + return 'deg'; + } + return ''; + }; + + transformValueForProperty = function(k, v) { + var match, unit; + match = ("" + v).match(/^([0-9.-]*)([^0-9]*)$/); + if (match != null) { + v = match[1]; + unit = match[2]; + } else { + v = parseFloat(v); + } + v = roundf(parseFloat(v), 10); + if ((unit == null) || unit === "") { + unit = unitForProperty(k, v); + } + return "" + k + "(" + v + unit + ")"; + }; + + parseProperties = function(properties) { + var axis, match, parsed, property, value, _i, _len, _ref; + parsed = {}; + for (property in properties) { + value = properties[property]; + if (transformProperties.contains(property)) { + match = property.match(/(translate|rotateC|rotate|skew|scale|perspective)(X|Y|Z|)/); + if (match && match[2].length > 0) { + parsed[property] = value; + } else { + _ref = ['X', 'Y', 'Z']; + for (_i = 0, _len = _ref.length; _i < _len; _i++) { + axis = _ref[_i]; + parsed[match[1] + axis] = value; + } + } + } else { + parsed[property] = value; + } + } + return parsed; + }; + + defaultValueForKey = function(key) { + var v; + v = key === 'opacity' ? 1 : 0; + return "" + v + (unitForProperty(key, v)); + }; + + getCurrentProperties = function(el, keys) { + var isSVG, key, matrix, properties, style, v, _i, _j, _len, _len1, _ref; + properties = {}; + isSVG = isSVGElement(el); + if (el.style != null) { + style = window.getComputedStyle(el, null); + for (_i = 0, _len = keys.length; _i < _len; _i++) { + key = keys[_i]; + if (transformProperties.contains(key)) { + if (properties['transform'] == null) { + if (isSVG) { + matrix = new Matrix2D((_ref = el.transform.baseVal.consolidate()) != null ? _ref.matrix : void 0); + } else { + matrix = Matrix.fromTransform(style[propertyWithPrefix('transform')]); + } + properties['transform'] = matrix.decompose(); + } + } else { + v = style[key]; + if ((v == null) && svgProperties.contains(key)) { + v = el.getAttribute(key); + } + if (v === "" || (v == null)) { + v = defaultValueForKey(key); + } + properties[key] = createInterpolable(v); + } + } + } else { + for (_j = 0, _len1 = keys.length; _j < _len1; _j++) { + key = keys[_j]; + properties[key] = createInterpolable(el[key]); + } + } + return properties; + }; + + createInterpolable = function(value) { + var interpolable, klass, klasses, _i, _len; + klasses = [InterpolableColor, InterpolableArray, InterpolableObject, InterpolableWithUnit]; + for (_i = 0, _len = klasses.length; _i < _len; _i++) { + klass = klasses[_i]; + interpolable = klass.create(value); + if (interpolable != null) { + return interpolable; + } + } + return null; + }; + + InterpolableObject = (function() { + function InterpolableObject(obj) { + this.format = __bind(this.format, this); + this.interpolate = __bind(this.interpolate, this); + this.obj = obj; + } + + InterpolableObject.prototype.interpolate = function(endInterpolable, t) { + var end, k, newObj, start, v; + start = this.obj; + end = endInterpolable.obj; + newObj = {}; + for (k in start) { + v = start[k]; + if (v.interpolate != null) { + newObj[k] = v.interpolate(end[k], t); + } else { + newObj[k] = v; + } + } + return new InterpolableObject(newObj); + }; + + InterpolableObject.prototype.format = function() { + return this.obj; + }; + + InterpolableObject.create = function(value) { + var k, obj, v; + if (value instanceof Object) { + obj = {}; + for (k in value) { + v = value[k]; + obj[k] = createInterpolable(v); + } + return new InterpolableObject(obj); + } + return null; + }; + + return InterpolableObject; + + })(); + + InterpolableWithUnit = (function() { + function InterpolableWithUnit(value, prefix, suffix) { + this.prefix = prefix; + this.suffix = suffix; + this.format = __bind(this.format, this); + this.interpolate = __bind(this.interpolate, this); + this.value = parseFloat(value); + } + + InterpolableWithUnit.prototype.interpolate = function(endInterpolable, t) { + var end, start; + start = this.value; + end = endInterpolable.value; + return new InterpolableWithUnit((end - start) * t + start, endInterpolable.prefix || this.prefix, endInterpolable.suffix || this.suffix); + }; + + InterpolableWithUnit.prototype.format = function() { + if ((this.prefix == null) && (this.suffix == null)) { + return roundf(this.value, 5); + } + return this.prefix + roundf(this.value, 5) + this.suffix; + }; + + InterpolableWithUnit.create = function(value) { + var match; + if (typeof value !== "string") { + return new InterpolableWithUnit(value); + } + match = ("" + value).match("([^0-9.+-]*)([0-9.+-]+)([^0-9.+-]*)"); + if (match != null) { + return new InterpolableWithUnit(match[2], match[1], match[3]); + } + return null; + }; + + return InterpolableWithUnit; + + })(); + + InterpolableArray = (function() { + function InterpolableArray(values, sep) { + this.values = values; + this.sep = sep; + this.format = __bind(this.format, this); + this.interpolate = __bind(this.interpolate, this); + } + + InterpolableArray.prototype.interpolate = function(endInterpolable, t) { + var end, i, newValues, start, _i, _ref; + start = this.values; + end = endInterpolable.values; + newValues = []; + for (i = _i = 0, _ref = Math.min(start.length, end.length); 0 <= _ref ? _i < _ref : _i > _ref; i = 0 <= _ref ? ++_i : --_i) { + if (start[i].interpolate != null) { + newValues.push(start[i].interpolate(end[i], t)); + } else { + newValues.push(start[i]); + } + } + return new InterpolableArray(newValues, this.sep); + }; + + InterpolableArray.prototype.format = function() { + var values; + values = this.values.map(function(val) { + if (val.format != null) { + return val.format(); + } else { + return val; + } + }); + if (this.sep != null) { + return values.join(this.sep); + } else { + return values; + } + }; + + InterpolableArray.createFromArray = function(arr, sep) { + var values; + values = arr.map(function(val) { + return createInterpolable(val) || val; + }); + values = values.filter(function(val) { + return val != null; + }); + return new InterpolableArray(values, sep); + }; + + InterpolableArray.create = function(value) { + var arr, sep, seps, _i, _len; + if (value instanceof Array) { + return InterpolableArray.createFromArray(value, null); + } + if (typeof value !== "string") { + return; + } + seps = [' ', ',', '|', ';', '/', ':']; + for (_i = 0, _len = seps.length; _i < _len; _i++) { + sep = seps[_i]; + arr = value.split(sep); + if (arr.length > 1) { + return InterpolableArray.createFromArray(arr, sep); + } + } + return null; + }; + + return InterpolableArray; + + })(); + + Color = (function() { + function Color(rgb, format) { + this.rgb = rgb != null ? rgb : {}; + this.format = format; + this.toRgba = __bind(this.toRgba, this); + this.toRgb = __bind(this.toRgb, this); + this.toHex = __bind(this.toHex, this); + } + + Color.fromHex = function(hex) { + var hex3, result; + hex3 = hex.match(/^#([a-f\d]{1})([a-f\d]{1})([a-f\d]{1})$/i); + if (hex3 != null) { + hex = "#" + hex3[1] + hex3[1] + hex3[2] + hex3[2] + hex3[3] + hex3[3]; + } + result = hex.match(/^#([a-f\d]{2})([a-f\d]{2})([a-f\d]{2})$/i); + if (result != null) { + return new Color({ + r: parseInt(result[1], 16), + g: parseInt(result[2], 16), + b: parseInt(result[3], 16), + a: 1 + }, "hex"); + } + return null; + }; + + Color.fromRgb = function(rgb) { + var match, _ref; + match = rgb.match(/^rgba?\(([0-9.]*), ?([0-9.]*), ?([0-9.]*)(?:, ?([0-9.]*))?\)$/); + if (match != null) { + return new Color({ + r: parseFloat(match[1]), + g: parseFloat(match[2]), + b: parseFloat(match[3]), + a: parseFloat((_ref = match[4]) != null ? _ref : 1) + }, match[4] != null ? "rgba" : "rgb"); + } + return null; + }; + + Color.componentToHex = function(c) { + var hex; + hex = c.toString(16); + if (hex.length === 1) { + return "0" + hex; + } else { + return hex; + } + }; + + Color.prototype.toHex = function() { + return "#" + Color.componentToHex(this.rgb.r) + Color.componentToHex(this.rgb.g) + Color.componentToHex(this.rgb.b); + }; + + Color.prototype.toRgb = function() { + return "rgb(" + this.rgb.r + ", " + this.rgb.g + ", " + this.rgb.b + ")"; + }; + + Color.prototype.toRgba = function() { + return "rgba(" + this.rgb.r + ", " + this.rgb.g + ", " + this.rgb.b + ", " + this.rgb.a + ")"; + }; + + return Color; + + })(); + + InterpolableColor = (function() { + function InterpolableColor(color) { + this.color = color; + this.format = __bind(this.format, this); + this.interpolate = __bind(this.interpolate, this); + } + + InterpolableColor.prototype.interpolate = function(endInterpolable, t) { + var end, k, rgb, start, v, _i, _len, _ref; + start = this.color; + end = endInterpolable.color; + rgb = {}; + _ref = ['r', 'g', 'b']; + for (_i = 0, _len = _ref.length; _i < _len; _i++) { + k = _ref[_i]; + v = Math.round((end.rgb[k] - start.rgb[k]) * t + start.rgb[k]); + rgb[k] = Math.min(255, Math.max(0, v)); + } + k = "a"; + v = roundf((end.rgb[k] - start.rgb[k]) * t + start.rgb[k], 5); + rgb[k] = Math.min(1, Math.max(0, v)); + return new InterpolableColor(new Color(rgb, end.format)); + }; + + InterpolableColor.prototype.format = function() { + if (this.color.format === "hex") { + return this.color.toHex(); + } else if (this.color.format === "rgb") { + return this.color.toRgb(); + } else if (this.color.format === "rgba") { + return this.color.toRgba(); + } + }; + + InterpolableColor.create = function(value) { + var color; + if (typeof value !== "string") { + return; + } + color = Color.fromHex(value) || Color.fromRgb(value); + if (color != null) { + return new InterpolableColor(color); + } + return null; + }; + + return InterpolableColor; + + })(); + + DecomposedMatrix2D = (function() { + function DecomposedMatrix2D(props) { + this.props = props; + this.applyRotateCenter = __bind(this.applyRotateCenter, this); + this.format = __bind(this.format, this); + this.interpolate = __bind(this.interpolate, this); + } + + DecomposedMatrix2D.prototype.interpolate = function(endMatrix, t) { + var i, k, newProps, _i, _j, _k, _l, _len, _len1, _ref, _ref1, _ref2; + newProps = {}; + _ref = ['translate', 'scale', 'rotate']; + for (_i = 0, _len = _ref.length; _i < _len; _i++) { + k = _ref[_i]; + newProps[k] = []; + for (i = _j = 0, _ref1 = this.props[k].length; 0 <= _ref1 ? _j < _ref1 : _j > _ref1; i = 0 <= _ref1 ? ++_j : --_j) { + newProps[k][i] = (endMatrix.props[k][i] - this.props[k][i]) * t + this.props[k][i]; + } + } + for (i = _k = 1; _k <= 2; i = ++_k) { + newProps['rotate'][i] = endMatrix.props['rotate'][i]; + } + _ref2 = ['skew']; + for (_l = 0, _len1 = _ref2.length; _l < _len1; _l++) { + k = _ref2[_l]; + newProps[k] = (endMatrix.props[k] - this.props[k]) * t + this.props[k]; + } + return new DecomposedMatrix2D(newProps); + }; + + DecomposedMatrix2D.prototype.format = function() { + return "translate(" + (this.props.translate.join(',')) + ") rotate(" + (this.props.rotate.join(',')) + ") skewX(" + this.props.skew + ") scale(" + (this.props.scale.join(',')) + ")"; + }; + + DecomposedMatrix2D.prototype.applyRotateCenter = function(rotateC) { + var i, m, m2d, negativeTranslate, _i, _results; + m = baseSVG.createSVGMatrix(); + m = m.translate(rotateC[0], rotateC[1]); + m = m.rotate(this.props.rotate[0]); + m = m.translate(-rotateC[0], -rotateC[1]); + m2d = new Matrix2D(m); + negativeTranslate = m2d.decompose().props.translate; + _results = []; + for (i = _i = 0; _i <= 1; i = ++_i) { + _results.push(this.props.translate[i] -= negativeTranslate[i]); + } + return _results; + }; + + return DecomposedMatrix2D; + + })(); + + baseSVG = typeof document !== "undefined" && document !== null ? document.createElementNS("http://www.w3.org/2000/svg", "svg") : void 0; + + Matrix2D = (function() { + function Matrix2D(m) { + this.m = m; + this.applyProperties = __bind(this.applyProperties, this); + this.decompose = __bind(this.decompose, this); + if (!this.m) { + this.m = baseSVG.createSVGMatrix(); + } + } + + Matrix2D.prototype.decompose = function() { + var kx, ky, kz, r0, r1; + r0 = new Vector([this.m.a, this.m.b]); + r1 = new Vector([this.m.c, this.m.d]); + kx = r0.length(); + kz = r0.dot(r1); + r0 = r0.normalize(); + ky = r1.combine(r0, 1, -kz).length(); + return new DecomposedMatrix2D({ + translate: [this.m.e, this.m.f], + rotate: [Math.atan2(this.m.b, this.m.a) * 180 / Math.PI, this.rotateCX, this.rotateCY], + scale: [kx, ky], + skew: kz / ky * 180 / Math.PI + }); + }; + + Matrix2D.prototype.applyProperties = function(properties) { + var hash, k, props, v, _i, _len, _ref, _ref1; + hash = {}; + for (_i = 0, _len = properties.length; _i < _len; _i++) { + props = properties[_i]; + hash[props[0]] = props[1]; + } + for (k in hash) { + v = hash[k]; + if (k === "translateX") { + this.m = this.m.translate(v, 0); + } else if (k === "translateY") { + this.m = this.m.translate(0, v); + } else if (k === "scaleX") { + this.m = this.m.scale(v, 1); + } else if (k === "scaleY") { + this.m = this.m.scale(1, v); + } else if (k === "rotateZ") { + this.m = this.m.rotate(v); + } else if (k === "skewX") { + this.m = this.m.skewX(v); + } else if (k === "skewY") { + this.m = this.m.skewY(v); + } + } + this.rotateCX = (_ref = hash.rotateCX) != null ? _ref : 0; + return this.rotateCY = (_ref1 = hash.rotateCY) != null ? _ref1 : 0; + }; + + return Matrix2D; + + })(); + + Vector = (function() { + function Vector(els) { + this.els = els; + this.combine = __bind(this.combine, this); + this.normalize = __bind(this.normalize, this); + this.length = __bind(this.length, this); + this.cross = __bind(this.cross, this); + this.dot = __bind(this.dot, this); + this.e = __bind(this.e, this); + } + + Vector.prototype.e = function(i) { + if (i < 1 || i > this.els.length) { + return null; + } else { + return this.els[i - 1]; + } + }; + + Vector.prototype.dot = function(vector) { + var V, n, product; + V = vector.els || vector; + product = 0; + n = this.els.length; + if (n !== V.length) { + return null; + } + n += 1; + while (--n) { + product += this.els[n - 1] * V[n - 1]; + } + return product; + }; + + Vector.prototype.cross = function(vector) { + var A, B; + B = vector.els || vector; + if (this.els.length !== 3 || B.length !== 3) { + return null; + } + A = this.els; + return new Vector([(A[1] * B[2]) - (A[2] * B[1]), (A[2] * B[0]) - (A[0] * B[2]), (A[0] * B[1]) - (A[1] * B[0])]); + }; + + Vector.prototype.length = function() { + var a, e, _i, _len, _ref; + a = 0; + _ref = this.els; + for (_i = 0, _len = _ref.length; _i < _len; _i++) { + e = _ref[_i]; + a += Math.pow(e, 2); + } + return Math.sqrt(a); + }; + + Vector.prototype.normalize = function() { + var e, i, length, newElements, _ref; + length = this.length(); + newElements = []; + _ref = this.els; + for (i in _ref) { + e = _ref[i]; + newElements[i] = e / length; + } + return new Vector(newElements); + }; + + Vector.prototype.combine = function(b, ascl, bscl) { + var i, result, _i, _ref; + result = []; + for (i = _i = 0, _ref = this.els.length; 0 <= _ref ? _i < _ref : _i > _ref; i = 0 <= _ref ? ++_i : --_i) { + result[i] = (ascl * this.els[i]) + (bscl * b.els[i]); + } + return new Vector(result); + }; + + return Vector; + + })(); + + DecomposedMatrix = (function() { + function DecomposedMatrix() { + this.toMatrix = __bind(this.toMatrix, this); + this.format = __bind(this.format, this); + this.interpolate = __bind(this.interpolate, this); + } + + DecomposedMatrix.prototype.interpolate = function(decomposedB, t, only) { + var angle, decomposed, decomposedA, i, invscale, invth, k, qa, qb, scale, th, _i, _j, _k, _l, _len, _ref, _ref1; + if (only == null) { + only = null; + } + decomposedA = this; + decomposed = new DecomposedMatrix; + _ref = ['translate', 'scale', 'skew', 'perspective']; + for (_i = 0, _len = _ref.length; _i < _len; _i++) { + k = _ref[_i]; + decomposed[k] = []; + for (i = _j = 0, _ref1 = decomposedA[k].length - 1; 0 <= _ref1 ? _j <= _ref1 : _j >= _ref1; i = 0 <= _ref1 ? ++_j : --_j) { + if ((only == null) || only.indexOf(k) > -1 || only.indexOf("" + k + ['x', 'y', 'z'][i]) > -1) { + decomposed[k][i] = (decomposedB[k][i] - decomposedA[k][i]) * t + decomposedA[k][i]; + } else { + decomposed[k][i] = decomposedA[k][i]; + } + } + } + if ((only == null) || only.indexOf('rotate') !== -1) { + qa = decomposedA.quaternion; + qb = decomposedB.quaternion; + angle = qa[0] * qb[0] + qa[1] * qb[1] + qa[2] * qb[2] + qa[3] * qb[3]; + if (angle < 0.0) { + for (i = _k = 0; _k <= 3; i = ++_k) { + qa[i] = -qa[i]; + } + angle = -angle; + } + if (angle + 1.0 > .05) { + if (1.0 - angle >= .05) { + th = Math.acos(angle); + invth = 1.0 / Math.sin(th); + scale = Math.sin(th * (1.0 - t)) * invth; + invscale = Math.sin(th * t) * invth; + } else { + scale = 1.0 - t; + invscale = t; + } + } else { + qb[0] = -qa[1]; + qb[1] = qa[0]; + qb[2] = -qa[3]; + qb[3] = qa[2]; + scale = Math.sin(piDouble * (.5 - t)); + invscale = Math.sin(piDouble * t); + } + decomposed.quaternion = []; + for (i = _l = 0; _l <= 3; i = ++_l) { + decomposed.quaternion[i] = qa[i] * scale + qb[i] * invscale; + } + } else { + decomposed.quaternion = decomposedA.quaternion; + } + return decomposed; + }; + + DecomposedMatrix.prototype.format = function() { + return this.toMatrix().toString(); + }; + + DecomposedMatrix.prototype.toMatrix = function() { + var decomposedMatrix, i, j, match, matrix, quaternion, skew, temp, w, x, y, z, _i, _j, _k, _l; + decomposedMatrix = this; + matrix = Matrix.I(4); + for (i = _i = 0; _i <= 3; i = ++_i) { + matrix.els[i][3] = decomposedMatrix.perspective[i]; + } + quaternion = decomposedMatrix.quaternion; + x = quaternion[0]; + y = quaternion[1]; + z = quaternion[2]; + w = quaternion[3]; + skew = decomposedMatrix.skew; + match = [[1, 0], [2, 0], [2, 1]]; + for (i = _j = 2; _j >= 0; i = --_j) { + if (skew[i]) { + temp = Matrix.I(4); + temp.els[match[i][0]][match[i][1]] = skew[i]; + matrix = matrix.multiply(temp); + } + } + matrix = matrix.multiply(new Matrix([[1 - 2 * (y * y + z * z), 2 * (x * y - z * w), 2 * (x * z + y * w), 0], [2 * (x * y + z * w), 1 - 2 * (x * x + z * z), 2 * (y * z - x * w), 0], [2 * (x * z - y * w), 2 * (y * z + x * w), 1 - 2 * (x * x + y * y), 0], [0, 0, 0, 1]])); + for (i = _k = 0; _k <= 2; i = ++_k) { + for (j = _l = 0; _l <= 2; j = ++_l) { + matrix.els[i][j] *= decomposedMatrix.scale[i]; + } + matrix.els[3][i] = decomposedMatrix.translate[i]; + } + return matrix; + }; + + return DecomposedMatrix; + + })(); + + Matrix = (function() { + function Matrix(els) { + this.els = els; + this.toString = __bind(this.toString, this); + this.decompose = __bind(this.decompose, this); + this.inverse = __bind(this.inverse, this); + this.augment = __bind(this.augment, this); + this.toRightTriangular = __bind(this.toRightTriangular, this); + this.transpose = __bind(this.transpose, this); + this.multiply = __bind(this.multiply, this); + this.dup = __bind(this.dup, this); + this.e = __bind(this.e, this); + } + + Matrix.prototype.e = function(i, j) { + if (i < 1 || i > this.els.length || j < 1 || j > this.els[0].length) { + return null; + } + return this.els[i - 1][j - 1]; + }; + + Matrix.prototype.dup = function() { + return new Matrix(this.els); + }; + + Matrix.prototype.multiply = function(matrix) { + var M, c, cols, elements, i, j, ki, kj, nc, ni, nj, returnVector, sum; + returnVector = matrix.modulus ? true : false; + M = matrix.els || matrix; + if (typeof M[0][0] === 'undefined') { + M = new Matrix(M).els; + } + ni = this.els.length; + ki = ni; + kj = M[0].length; + cols = this.els[0].length; + elements = []; + ni += 1; + while (--ni) { + i = ki - ni; + elements[i] = []; + nj = kj; + nj += 1; + while (--nj) { + j = kj - nj; + sum = 0; + nc = cols; + nc += 1; + while (--nc) { + c = cols - nc; + sum += this.els[i][c] * M[c][j]; + } + elements[i][j] = sum; + } + } + M = new Matrix(elements); + if (returnVector) { + return M.col(1); + } else { + return M; + } + }; + + Matrix.prototype.transpose = function() { + var cols, elements, i, j, ni, nj, rows; + rows = this.els.length; + cols = this.els[0].length; + elements = []; + ni = cols; + ni += 1; + while (--ni) { + i = cols - ni; + elements[i] = []; + nj = rows; + nj += 1; + while (--nj) { + j = rows - nj; + elements[i][j] = this.els[j][i]; + } + } + return new Matrix(elements); + }; + + Matrix.prototype.toRightTriangular = function() { + var M, els, i, j, k, kp, multiplier, n, np, p, _i, _j, _ref, _ref1; + M = this.dup(); + n = this.els.length; + k = n; + kp = this.els[0].length; + while (--n) { + i = k - n; + if (M.els[i][i] === 0) { + for (j = _i = _ref = i + 1; _ref <= k ? _i < k : _i > k; j = _ref <= k ? ++_i : --_i) { + if (M.els[j][i] !== 0) { + els = []; + np = kp; + np += 1; + while (--np) { + p = kp - np; + els.push(M.els[i][p] + M.els[j][p]); + } + M.els[i] = els; + break; + } + } + } + if (M.els[i][i] !== 0) { + for (j = _j = _ref1 = i + 1; _ref1 <= k ? _j < k : _j > k; j = _ref1 <= k ? ++_j : --_j) { + multiplier = M.els[j][i] / M.els[i][i]; + els = []; + np = kp; + np += 1; + while (--np) { + p = kp - np; + els.push(p <= i ? 0 : M.els[j][p] - M.els[i][p] * multiplier); + } + M.els[j] = els; + } + } + } + return M; + }; + + Matrix.prototype.augment = function(matrix) { + var M, T, cols, i, j, ki, kj, ni, nj; + M = matrix.els || matrix; + if (typeof M[0][0] === 'undefined') { + M = new Matrix(M).els; + } + T = this.dup(); + cols = T.els[0].length; + ni = T.els.length; + ki = ni; + kj = M[0].length; + if (ni !== M.length) { + return null; + } + ni += 1; + while (--ni) { + i = ki - ni; + nj = kj; + nj += 1; + while (--nj) { + j = kj - nj; + T.els[i][cols + j] = M[i][j]; + } + } + return T; + }; + + Matrix.prototype.inverse = function() { + var M, divisor, els, i, inverse_elements, j, ki, kp, new_element, ni, np, p, _i; + ni = this.els.length; + ki = ni; + M = this.augment(Matrix.I(ni)).toRightTriangular(); + kp = M.els[0].length; + inverse_elements = []; + ni += 1; + while (--ni) { + i = ni - 1; + els = []; + np = kp; + inverse_elements[i] = []; + divisor = M.els[i][i]; + np += 1; + while (--np) { + p = kp - np; + new_element = M.els[i][p] / divisor; + els.push(new_element); + if (p >= ki) { + inverse_elements[i].push(new_element); + } + } + M.els[i] = els; + for (j = _i = 0; 0 <= i ? _i < i : _i > i; j = 0 <= i ? ++_i : --_i) { + els = []; + np = kp; + np += 1; + while (--np) { + p = kp - np; + els.push(M.els[j][p] - M.els[i][p] * M.els[j][i]); + } + M.els[j] = els; + } + } + return new Matrix(inverse_elements); + }; + + Matrix.I = function(n) { + var els, i, j, k, nj; + els = []; + k = n; + n += 1; + while (--n) { + i = k - n; + els[i] = []; + nj = k; + nj += 1; + while (--nj) { + j = k - nj; + els[i][j] = i === j ? 1 : 0; + } + } + return new Matrix(els); + }; + + Matrix.prototype.decompose = function() { + var els, i, inversePerspectiveMatrix, j, k, matrix, pdum3, perspective, perspectiveMatrix, quaternion, result, rightHandSide, rotate, row, rowElement, s, scale, skew, t, translate, transposedInversePerspectiveMatrix, type, typeKey, v, w, x, y, z, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r; + matrix = this; + translate = []; + scale = []; + skew = []; + quaternion = []; + perspective = []; + els = []; + for (i = _i = 0; _i <= 3; i = ++_i) { + els[i] = []; + for (j = _j = 0; _j <= 3; j = ++_j) { + els[i][j] = matrix.els[i][j]; + } + } + if (els[3][3] === 0) { + return false; + } + for (i = _k = 0; _k <= 3; i = ++_k) { + for (j = _l = 0; _l <= 3; j = ++_l) { + els[i][j] /= els[3][3]; + } + } + perspectiveMatrix = matrix.dup(); + for (i = _m = 0; _m <= 2; i = ++_m) { + perspectiveMatrix.els[i][3] = 0; + } + perspectiveMatrix.els[3][3] = 1; + if (els[0][3] !== 0 || els[1][3] !== 0 || els[2][3] !== 0) { + rightHandSide = new Vector(els.slice(0, 4)[3]); + inversePerspectiveMatrix = perspectiveMatrix.inverse(); + transposedInversePerspectiveMatrix = inversePerspectiveMatrix.transpose(); + perspective = transposedInversePerspectiveMatrix.multiply(rightHandSide).els; + for (i = _n = 0; _n <= 2; i = ++_n) { + els[i][3] = 0; + } + els[3][3] = 1; + } else { + perspective = [0, 0, 0, 1]; + } + for (i = _o = 0; _o <= 2; i = ++_o) { + translate[i] = els[3][i]; + els[3][i] = 0; + } + row = []; + for (i = _p = 0; _p <= 2; i = ++_p) { + row[i] = new Vector(els[i].slice(0, 3)); + } + scale[0] = row[0].length(); + row[0] = row[0].normalize(); + skew[0] = row[0].dot(row[1]); + row[1] = row[1].combine(row[0], 1.0, -skew[0]); + scale[1] = row[1].length(); + row[1] = row[1].normalize(); + skew[0] /= scale[1]; + skew[1] = row[0].dot(row[2]); + row[2] = row[2].combine(row[0], 1.0, -skew[1]); + skew[2] = row[1].dot(row[2]); + row[2] = row[2].combine(row[1], 1.0, -skew[2]); + scale[2] = row[2].length(); + row[2] = row[2].normalize(); + skew[1] /= scale[2]; + skew[2] /= scale[2]; + pdum3 = row[1].cross(row[2]); + if (row[0].dot(pdum3) < 0) { + for (i = _q = 0; _q <= 2; i = ++_q) { + scale[i] *= -1; + for (j = _r = 0; _r <= 2; j = ++_r) { + row[i].els[j] *= -1; + } + } + } + rowElement = function(index, elementIndex) { + return row[index].els[elementIndex]; + }; + rotate = []; + rotate[1] = Math.asin(-rowElement(0, 2)); + if (Math.cos(rotate[1]) !== 0) { + rotate[0] = Math.atan2(rowElement(1, 2), rowElement(2, 2)); + rotate[2] = Math.atan2(rowElement(0, 1), rowElement(0, 0)); + } else { + rotate[0] = Math.atan2(-rowElement(2, 0), rowElement(1, 1)); + rotate[1] = 0; + } + t = rowElement(0, 0) + rowElement(1, 1) + rowElement(2, 2) + 1.0; + if (t > 1e-4) { + s = 0.5 / Math.sqrt(t); + w = 0.25 / s; + x = (rowElement(2, 1) - rowElement(1, 2)) * s; + y = (rowElement(0, 2) - rowElement(2, 0)) * s; + z = (rowElement(1, 0) - rowElement(0, 1)) * s; + } else if ((rowElement(0, 0) > rowElement(1, 1)) && (rowElement(0, 0) > rowElement(2, 2))) { + s = Math.sqrt(1.0 + rowElement(0, 0) - rowElement(1, 1) - rowElement(2, 2)) * 2.0; + x = 0.25 * s; + y = (rowElement(0, 1) + rowElement(1, 0)) / s; + z = (rowElement(0, 2) + rowElement(2, 0)) / s; + w = (rowElement(2, 1) - rowElement(1, 2)) / s; + } else if (rowElement(1, 1) > rowElement(2, 2)) { + s = Math.sqrt(1.0 + rowElement(1, 1) - rowElement(0, 0) - rowElement(2, 2)) * 2.0; + x = (rowElement(0, 1) + rowElement(1, 0)) / s; + y = 0.25 * s; + z = (rowElement(1, 2) + rowElement(2, 1)) / s; + w = (rowElement(0, 2) - rowElement(2, 0)) / s; + } else { + s = Math.sqrt(1.0 + rowElement(2, 2) - rowElement(0, 0) - rowElement(1, 1)) * 2.0; + x = (rowElement(0, 2) + rowElement(2, 0)) / s; + y = (rowElement(1, 2) + rowElement(2, 1)) / s; + z = 0.25 * s; + w = (rowElement(1, 0) - rowElement(0, 1)) / s; + } + quaternion = [x, y, z, w]; + result = new DecomposedMatrix; + result.translate = translate; + result.scale = scale; + result.skew = skew; + result.quaternion = quaternion; + result.perspective = perspective; + result.rotate = rotate; + for (typeKey in result) { + type = result[typeKey]; + for (k in type) { + v = type[k]; + if (isNaN(v)) { + type[k] = 0; + } + } + } + return result; + }; + + Matrix.prototype.toString = function() { + var i, j, str, _i, _j; + str = 'matrix3d('; + for (i = _i = 0; _i <= 3; i = ++_i) { + for (j = _j = 0; _j <= 3; j = ++_j) { + str += roundf(this.els[i][j], 10); + if (!(i === 3 && j === 3)) { + str += ','; + } + } + } + str += ')'; + return str; + }; + + Matrix.matrixForTransform = cacheFn(function(transform) { + var matrixEl, result, style, _ref, _ref1, _ref2; + matrixEl = document.createElement('div'); + matrixEl.style.position = 'absolute'; + matrixEl.style.visibility = 'hidden'; + matrixEl.style[propertyWithPrefix("transform")] = transform; + document.body.appendChild(matrixEl); + style = window.getComputedStyle(matrixEl, null); + result = (_ref = (_ref1 = style.transform) != null ? _ref1 : style[propertyWithPrefix("transform")]) != null ? _ref : (_ref2 = dynamics.tests) != null ? _ref2.matrixForTransform(transform) : void 0; + document.body.removeChild(matrixEl); + return result; + }); + + Matrix.fromTransform = function(transform) { + var digits, elements, i, match, matrixElements, _i; + match = transform != null ? transform.match(/matrix3?d?\(([-0-9,e \.]*)\)/) : void 0; + if (match) { + digits = match[1].split(','); + digits = digits.map(parseFloat); + if (digits.length === 6) { + elements = [digits[0], digits[1], 0, 0, digits[2], digits[3], 0, 0, 0, 0, 1, 0, digits[4], digits[5], 0, 1]; + } else { + elements = digits; + } + } else { + elements = [1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1]; + } + matrixElements = []; + for (i = _i = 0; _i <= 3; i = ++_i) { + matrixElements.push(elements.slice(i * 4, i * 4 + 4)); + } + return new Matrix(matrixElements); + }; + + return Matrix; + + })(); + + prefixFor = cacheFn(function(property) { + var k, prefix, prop, propArray, propertyName, _i, _j, _len, _len1, _ref; + if (document.body.style[property] !== void 0) { + return ''; + } + propArray = property.split('-'); + propertyName = ""; + for (_i = 0, _len = propArray.length; _i < _len; _i++) { + prop = propArray[_i]; + propertyName += prop.substring(0, 1).toUpperCase() + prop.substring(1); + } + _ref = ["Webkit", "Moz", "ms"]; + for (_j = 0, _len1 = _ref.length; _j < _len1; _j++) { + prefix = _ref[_j]; + k = prefix + propertyName; + if (document.body.style[k] !== void 0) { + return prefix; + } + } + return ''; + }); + + propertyWithPrefix = cacheFn(function(property) { + var prefix; + prefix = prefixFor(property); + if (prefix === 'Moz') { + return "" + prefix + (property.substring(0, 1).toUpperCase() + property.substring(1)); + } + if (prefix !== '') { + return "-" + (prefix.toLowerCase()) + "-" + (toDashed(property)); + } + return toDashed(property); + }); + + rAF = typeof window !== "undefined" && window !== null ? window.requestAnimationFrame : void 0; + + animations = []; + + animationsTimeouts = []; + + slow = false; + + slowRatio = 1; + + if (typeof window !== "undefined" && window !== null) { + window.addEventListener('keyup', function(e) { + if (e.keyCode === 68 && e.shiftKey && e.ctrlKey) { + return dynamics.toggleSlow(); + } + }); + } + + if (rAF == null) { + lastTime = 0; + rAF = function(callback) { + var currTime, id, timeToCall; + currTime = Date.now(); + timeToCall = Math.max(0, 16 - (currTime - lastTime)); + id = window.setTimeout(function() { + return callback(currTime + timeToCall); + }, timeToCall); + lastTime = currTime + timeToCall; + return id; + }; + } + + runLoopRunning = false; + + runLoopPaused = false; + + startRunLoop = function() { + if (!runLoopRunning) { + runLoopRunning = true; + return rAF(runLoopTick); + } + }; + + runLoopTick = function(t) { + var animation, toRemoveAnimations, _i, _len; + if (runLoopPaused) { + rAF(runLoopTick); + return; + } + toRemoveAnimations = []; + for (_i = 0, _len = animations.length; _i < _len; _i++) { + animation = animations[_i]; + if (!animationTick(t, animation)) { + toRemoveAnimations.push(animation); + } + } + animations = animations.filter(function(animation) { + return toRemoveAnimations.indexOf(animation) === -1; + }); + if (animations.length === 0) { + return runLoopRunning = false; + } else { + return rAF(runLoopTick); + } + }; + + animationTick = function(t, animation) { + var key, properties, property, tt, y, _base, _base1, _ref; + if (animation.tStart == null) { + animation.tStart = t; + } + tt = (t - animation.tStart) / animation.options.duration; + y = animation.curve(tt); + properties = {}; + if (tt >= 1) { + if (animation.curve.initialForce) { + properties = animation.properties.start; + } else { + properties = animation.properties.end; + } + } else { + _ref = animation.properties.start; + for (key in _ref) { + property = _ref[key]; + properties[key] = interpolate(property, animation.properties.end[key], y); + } + } + applyFrame(animation.el, properties); + if (typeof (_base = animation.options).change === "function") { + _base.change(animation.el); + } + if (tt >= 1) { + if (typeof (_base1 = animation.options).complete === "function") { + _base1.complete(animation.el); + } + } + return tt < 1; + }; + + interpolate = function(start, end, y) { + if ((start != null) && (start.interpolate != null)) { + return start.interpolate(end, y); + } + return null; + }; + + startAnimation = function(el, properties, options, timeoutId) { + var endProperties, isSVG, k, matrix, startProperties, transforms, v, _base; + if (timeoutId != null) { + animationsTimeouts = animationsTimeouts.filter(function(timeout) { + return timeout.id !== timeoutId; + }); + } + dynamics.stop(el, { + timeout: false + }); + if (!options.animated) { + dynamics.css(el, properties); + if (typeof options.complete === "function") { + options.complete(this); + } + return; + } + properties = parseProperties(properties); + startProperties = getCurrentProperties(el, Object.keys(properties)); + endProperties = {}; + transforms = []; + for (k in properties) { + v = properties[k]; + if (transformProperties.contains(k)) { + transforms.push([k, v]); + } else { + endProperties[k] = createInterpolable(v); + if (endProperties[k] instanceof InterpolableWithUnit && (el.style != null)) { + endProperties[k].prefix = ''; + if ((_base = endProperties[k]).suffix == null) { + _base.suffix = unitForProperty(k, 0); + } + } + } + } + if (transforms.length > 0) { + isSVG = isSVGElement(el); + if (isSVG) { + matrix = new Matrix2D(); + matrix.applyProperties(transforms); + } else { + v = (transforms.map(function(transform) { + return transformValueForProperty(transform[0], transform[1]); + })).join(" "); + matrix = Matrix.fromTransform(Matrix.matrixForTransform(v)); + } + endProperties['transform'] = matrix.decompose(); + if (isSVG) { + startProperties.transform.applyRotateCenter([endProperties.transform.props.rotate[1], endProperties.transform.props.rotate[2]]); + } + } + animations.push({ + el: el, + properties: { + start: startProperties, + end: endProperties + }, + options: options, + curve: options.type.call(options.type, options) + }); + return startRunLoop(); + }; + + timeouts = []; + + timeoutLastId = 0; + + setRealTimeout = function(timeout) { + if (!isDocumentVisible()) { + return; + } + return timeout.realTimeoutId = setTimeout(function() { + timeout.fn(); + return cancelTimeout(timeout.id); + }, timeout.delay); + }; + + addTimeout = function(fn, delay) { + var timeout; + timeoutLastId += 1; + timeout = { + id: timeoutLastId, + tStart: Date.now(), + fn: fn, + delay: delay, + originalDelay: delay + }; + setRealTimeout(timeout); + timeouts.push(timeout); + return timeoutLastId; + }; + + cancelTimeout = function(id) { + return timeouts = timeouts.filter(function(timeout) { + if (timeout.id === id) { + clearTimeout(timeout.realTimeoutId); + } + return timeout.id !== id; + }); + }; + + leftDelayForTimeout = function(time, timeout) { + var consumedDelay; + if (time != null) { + consumedDelay = time - timeout.tStart; + return timeout.originalDelay - consumedDelay; + } else { + return timeout.originalDelay; + } + }; + + if (typeof window !== "undefined" && window !== null) { + window.addEventListener('unload', function() {}); + } + + timeBeforeVisibilityChange = null; + + observeVisibilityChange(function(visible) { + var animation, difference, timeout, _i, _j, _k, _len, _len1, _len2, _results; + runLoopPaused = !visible; + if (!visible) { + timeBeforeVisibilityChange = Date.now(); + _results = []; + for (_i = 0, _len = timeouts.length; _i < _len; _i++) { + timeout = timeouts[_i]; + _results.push(clearTimeout(timeout.realTimeoutId)); + } + return _results; + } else { + if (runLoopRunning) { + difference = Date.now() - timeBeforeVisibilityChange; + for (_j = 0, _len1 = animations.length; _j < _len1; _j++) { + animation = animations[_j]; + if (animation.tStart != null) { + animation.tStart += difference; + } + } + } + for (_k = 0, _len2 = timeouts.length; _k < _len2; _k++) { + timeout = timeouts[_k]; + timeout.delay = leftDelayForTimeout(timeBeforeVisibilityChange, timeout); + setRealTimeout(timeout); + } + return timeBeforeVisibilityChange = null; + } + }); + + dynamics = {}; + + dynamics.linear = function() { + return function(t) { + return t; + }; + }; + + dynamics.spring = function(options) { + var A1, A2, decal, frequency, friction, s; + if (options == null) { + options = {}; + } + applyDefaults(options, arguments.callee.defaults); + frequency = Math.max(1, options.frequency / 20); + friction = Math.pow(20, options.friction / 100); + s = options.anticipationSize / 1000; + decal = Math.max(0, s); + A1 = function(t) { + var M, a, b, x0, x1; + M = 0.8; + x0 = s / (1 - s); + x1 = 0; + b = (x0 - (M * x1)) / (x0 - x1); + a = (M - b) / x0; + return (a * t * options.anticipationStrength / 100) + b; + }; + A2 = function(t) { + return Math.pow(friction / 10, -t) * (1 - t); + }; + return function(t) { + var A, At, a, angle, b, frictionT, y0, yS; + frictionT = (t / (1 - s)) - (s / (1 - s)); + if (t < s) { + yS = (s / (1 - s)) - (s / (1 - s)); + y0 = (0 / (1 - s)) - (s / (1 - s)); + b = Math.acos(1 / A1(yS)); + a = (Math.acos(1 / A1(y0)) - b) / (frequency * (-s)); + A = A1; + } else { + A = A2; + b = 0; + a = 1; + } + At = A(frictionT); + angle = frequency * (t - s) * a + b; + return 1 - (At * Math.cos(angle)); + }; + }; + + dynamics.bounce = function(options) { + var A, fn, frequency, friction; + if (options == null) { + options = {}; + } + applyDefaults(options, arguments.callee.defaults); + frequency = Math.max(1, options.frequency / 20); + friction = Math.pow(20, options.friction / 100); + A = function(t) { + return Math.pow(friction / 10, -t) * (1 - t); + }; + fn = function(t) { + var At, a, angle, b; + b = -3.14 / 2; + a = 1; + At = A(t); + angle = frequency * t * a + b; + return At * Math.cos(angle); + }; + fn.initialForce = true; + return fn; + }; + + dynamics.gravity = function(options) { + var L, bounciness, curves, elasticity, fn, getPointInCurve, gravity; + if (options == null) { + options = {}; + } + applyDefaults(options, arguments.callee.defaults); + bounciness = Math.min(options.bounciness / 1250, 0.8); + elasticity = options.elasticity / 1000; + gravity = 100; + curves = []; + L = (function() { + var b, curve; + b = Math.sqrt(2 / gravity); + curve = { + a: -b, + b: b, + H: 1 + }; + if (options.initialForce) { + curve.a = 0; + curve.b = curve.b * 2; + } + while (curve.H > 0.001) { + L = curve.b - curve.a; + curve = { + a: curve.b, + b: curve.b + L * bounciness, + H: curve.H * bounciness * bounciness + }; + } + return curve.b; + })(); + getPointInCurve = function(a, b, H, t) { + var c, t2; + L = b - a; + t2 = (2 / L) * t - 1 - (a * 2 / L); + c = t2 * t2 * H - H + 1; + if (options.initialForce) { + c = 1 - c; + } + return c; + }; + (function() { + var L2, b, curve, _results; + b = Math.sqrt(2 / (gravity * L * L)); + curve = { + a: -b, + b: b, + H: 1 + }; + if (options.initialForce) { + curve.a = 0; + curve.b = curve.b * 2; + } + curves.push(curve); + L2 = L; + _results = []; + while (curve.b < 1 && curve.H > 0.001) { + L2 = curve.b - curve.a; + curve = { + a: curve.b, + b: curve.b + L2 * bounciness, + H: curve.H * elasticity + }; + _results.push(curves.push(curve)); + } + return _results; + })(); + fn = function(t) { + var curve, i, v; + i = 0; + curve = curves[i]; + while (!(t >= curve.a && t <= curve.b)) { + i += 1; + curve = curves[i]; + if (!curve) { + break; + } + } + if (!curve) { + v = options.initialForce ? 0 : 1; + } else { + v = getPointInCurve(curve.a, curve.b, curve.H, t); + } + return v; + }; + fn.initialForce = options.initialForce; + return fn; + }; + + dynamics.forceWithGravity = function(options) { + if (options == null) { + options = {}; + } + applyDefaults(options, arguments.callee.defaults); + options.initialForce = true; + return dynamics.gravity(options); + }; + + dynamics.bezier = (function() { + var Bezier, Bezier_, yForX; + Bezier_ = function(t, p0, p1, p2, p3) { + return (Math.pow(1 - t, 3) * p0) + (3 * Math.pow(1 - t, 2) * t * p1) + (3 * (1 - t) * Math.pow(t, 2) * p2) + Math.pow(t, 3) * p3; + }; + Bezier = function(t, p0, p1, p2, p3) { + return { + x: Bezier_(t, p0.x, p1.x, p2.x, p3.x), + y: Bezier_(t, p0.y, p1.y, p2.y, p3.y) + }; + }; + yForX = function(xTarget, Bs, returnsToSelf) { + var B, aB, i, lower, percent, upper, x, xTolerance, _i, _len; + B = null; + for (_i = 0, _len = Bs.length; _i < _len; _i++) { + aB = Bs[_i]; + if (xTarget >= aB(0).x && xTarget <= aB(1).x) { + B = aB; + } + if (B !== null) { + break; + } + } + if (!B) { + if (returnsToSelf) { + return 0; + } else { + return 1; + } + } + xTolerance = 0.0001; + lower = 0; + upper = 1; + percent = (upper + lower) / 2; + x = B(percent).x; + i = 0; + while (Math.abs(xTarget - x) > xTolerance && i < 100) { + if (xTarget > x) { + lower = percent; + } else { + upper = percent; + } + percent = (upper + lower) / 2; + x = B(percent).x; + i += 1; + } + return B(percent).y; + }; + return function(options) { + var Bs, points, returnsToSelf; + if (options == null) { + options = {}; + } + points = options.points; + returnsToSelf = false; + Bs = (function() { + var i, k, _fn; + Bs = []; + _fn = function(pointA, pointB) { + var B2; + B2 = function(t) { + return Bezier(t, pointA, pointA.cp[pointA.cp.length - 1], pointB.cp[0], pointB); + }; + return Bs.push(B2); + }; + for (i in points) { + k = parseInt(i); + if (k >= points.length - 1) { + break; + } + _fn(points[k], points[k + 1]); + } + return Bs; + })(); + return function(t) { + if (t === 0) { + return 0; + } else if (t === 1) { + return 1; + } else { + return yForX(t, Bs, returnsToSelf); + } + }; + }; + })(); + + dynamics.easeInOut = function(options) { + var friction, _ref; + if (options == null) { + options = {}; + } + friction = (_ref = options.friction) != null ? _ref : arguments.callee.defaults.friction; + return dynamics.bezier({ + points: [ + { + x: 0, + y: 0, + cp: [ + { + x: 0.92 - (friction / 1000), + y: 0 + } + ] + }, { + x: 1, + y: 1, + cp: [ + { + x: 0.08 + (friction / 1000), + y: 1 + } + ] + } + ] + }); + }; + + dynamics.easeIn = function(options) { + var friction, _ref; + if (options == null) { + options = {}; + } + friction = (_ref = options.friction) != null ? _ref : arguments.callee.defaults.friction; + return dynamics.bezier({ + points: [ + { + x: 0, + y: 0, + cp: [ + { + x: 0.92 - (friction / 1000), + y: 0 + } + ] + }, { + x: 1, + y: 1, + cp: [ + { + x: 1, + y: 1 + } + ] + } + ] + }); + }; + + dynamics.easeOut = function(options) { + var friction, _ref; + if (options == null) { + options = {}; + } + friction = (_ref = options.friction) != null ? _ref : arguments.callee.defaults.friction; + return dynamics.bezier({ + points: [ + { + x: 0, + y: 0, + cp: [ + { + x: 0, + y: 0 + } + ] + }, { + x: 1, + y: 1, + cp: [ + { + x: 0.08 + (friction / 1000), + y: 1 + } + ] + } + ] + }); + }; + + dynamics.spring.defaults = { + frequency: 300, + friction: 200, + anticipationSize: 0, + anticipationStrength: 0 + }; + + dynamics.bounce.defaults = { + frequency: 300, + friction: 200 + }; + + dynamics.forceWithGravity.defaults = dynamics.gravity.defaults = { + bounciness: 400, + elasticity: 200 + }; + + dynamics.easeInOut.defaults = dynamics.easeIn.defaults = dynamics.easeOut.defaults = { + friction: 500 + }; + + dynamics.css = makeArrayFn(function(el, properties) { + return applyProperties(el, properties, true); + }); + + dynamics.animate = makeArrayFn(function(el, properties, options) { + var id; + if (options == null) { + options = {}; + } + options = clone(options); + applyDefaults(options, { + type: dynamics.easeInOut, + duration: 1000, + delay: 0, + animated: true + }); + options.duration = Math.max(0, options.duration * slowRatio); + options.delay = Math.max(0, options.delay); + if (options.delay === 0) { + return startAnimation(el, properties, options); + } else { + id = dynamics.setTimeout(function() { + return startAnimation(el, properties, options, id); + }, options.delay); + return animationsTimeouts.push({ + id: id, + el: el + }); + } + }); + + dynamics.stop = makeArrayFn(function(el, options) { + if (options == null) { + options = {}; + } + if (options.timeout == null) { + options.timeout = true; + } + if (options.timeout) { + animationsTimeouts = animationsTimeouts.filter(function(timeout) { + if (timeout.el === el && ((options.filter == null) || options.filter(timeout))) { + dynamics.clearTimeout(timeout.id); + return true; + } + return false; + }); + } + return animations = animations.filter(function(animation) { + return animation.el !== el; + }); + }); + + dynamics.setTimeout = function(fn, delay) { + return addTimeout(fn, delay * slowRatio); + }; + + dynamics.clearTimeout = function(id) { + return cancelTimeout(id); + }; + + dynamics.toggleSlow = function() { + slow = !slow; + if (slow) { + slowRatio = 3; + } else { + slowRatio = 1; + } + return typeof console !== "undefined" && console !== null ? typeof console.log === "function" ? console.log("dynamics.js: slow animations " + (slow ? "enabled" : "disabled")) : void 0 : void 0; + }; + + if (typeof module === "object" && typeof module.exports === "object") { + module.exports = dynamics; + } else if (typeof define === "function") { + define('dynamics', function() { + return dynamics; + }); + } else { + window.dynamics = dynamics; + } + +}).call(this); diff --git a/couchpotato/static/scripts/vendor/fastclick.js b/couchpotato/static/scripts/vendor/fastclick.js new file mode 100644 index 0000000000..3af4f9d6f1 --- /dev/null +++ b/couchpotato/static/scripts/vendor/fastclick.js @@ -0,0 +1,841 @@ +;(function () { + 'use strict'; + + /** + * @preserve FastClick: polyfill to remove click delays on browsers with touch UIs. + * + * @codingstandard ftlabs-jsv2 + * @copyright The Financial Times Limited [All Rights Reserved] + * @license MIT License (see LICENSE.txt) + */ + + /*jslint browser:true, node:true*/ + /*global define, Event, Node*/ + + + /** + * Instantiate fast-clicking listeners on the specified layer. + * + * @constructor + * @param {Element} layer The layer to listen on + * @param {Object} [options={}] The options to override the defaults + */ + function FastClick(layer, options) { + var oldOnClick; + + options = options || {}; + + /** + * Whether a click is currently being tracked. + * + * @type boolean + */ + this.trackingClick = false; + + + /** + * Timestamp for when click tracking started. + * + * @type number + */ + this.trackingClickStart = 0; + + + /** + * The element being tracked for a click. + * + * @type EventTarget + */ + this.targetElement = null; + + + /** + * X-coordinate of touch start event. + * + * @type number + */ + this.touchStartX = 0; + + + /** + * Y-coordinate of touch start event. + * + * @type number + */ + this.touchStartY = 0; + + + /** + * ID of the last touch, retrieved from Touch.identifier. + * + * @type number + */ + this.lastTouchIdentifier = 0; + + + /** + * Touchmove boundary, beyond which a click will be cancelled. + * + * @type number + */ + this.touchBoundary = options.touchBoundary || 10; + + + /** + * The FastClick layer. + * + * @type Element + */ + this.layer = layer; + + /** + * The minimum time between tap(touchstart and touchend) events + * + * @type number + */ + this.tapDelay = options.tapDelay || 200; + + /** + * The maximum time for a tap + * + * @type number + */ + this.tapTimeout = options.tapTimeout || 700; + + if (FastClick.notNeeded(layer)) { + return; + } + + // Some old versions of Android don't have Function.prototype.bind + function bind(method, context) { + return function() { return method.apply(context, arguments); }; + } + + + var methods = ['onMouse', 'onClick', 'onTouchStart', 'onTouchMove', 'onTouchEnd', 'onTouchCancel']; + var context = this; + for (var i = 0, l = methods.length; i < l; i++) { + context[methods[i]] = bind(context[methods[i]], context); + } + + // Set up event handlers as required + if (deviceIsAndroid) { + layer.addEventListener('mouseover', this.onMouse, true); + layer.addEventListener('mousedown', this.onMouse, true); + layer.addEventListener('mouseup', this.onMouse, true); + } + + layer.addEventListener('click', this.onClick, true); + layer.addEventListener('touchstart', this.onTouchStart, false); + layer.addEventListener('touchmove', this.onTouchMove, false); + layer.addEventListener('touchend', this.onTouchEnd, false); + layer.addEventListener('touchcancel', this.onTouchCancel, false); + + // Hack is required for browsers that don't support Event#stopImmediatePropagation (e.g. Android 2) + // which is how FastClick normally stops click events bubbling to callbacks registered on the FastClick + // layer when they are cancelled. + if (!Event.prototype.stopImmediatePropagation) { + layer.removeEventListener = function(type, callback, capture) { + var rmv = Node.prototype.removeEventListener; + if (type === 'click') { + rmv.call(layer, type, callback.hijacked || callback, capture); + } else { + rmv.call(layer, type, callback, capture); + } + }; + + layer.addEventListener = function(type, callback, capture) { + var adv = Node.prototype.addEventListener; + if (type === 'click') { + adv.call(layer, type, callback.hijacked || (callback.hijacked = function(event) { + if (!event.propagationStopped) { + callback(event); + } + }), capture); + } else { + adv.call(layer, type, callback, capture); + } + }; + } + + // If a handler is already declared in the element's onclick attribute, it will be fired before + // FastClick's onClick handler. Fix this by pulling out the user-defined handler function and + // adding it as listener. + if (typeof layer.onclick === 'function') { + + // Android browser on at least 3.2 requires a new reference to the function in layer.onclick + // - the old one won't work if passed to addEventListener directly. + oldOnClick = layer.onclick; + layer.addEventListener('click', function(event) { + oldOnClick(event); + }, false); + layer.onclick = null; + } + } + + /** + * Windows Phone 8.1 fakes user agent string to look like Android and iPhone. + * + * @type boolean + */ + var deviceIsWindowsPhone = navigator.userAgent.indexOf("Windows Phone") >= 0; + + /** + * Android requires exceptions. + * + * @type boolean + */ + var deviceIsAndroid = navigator.userAgent.indexOf('Android') > 0 && !deviceIsWindowsPhone; + + + /** + * iOS requires exceptions. + * + * @type boolean + */ + var deviceIsIOS = /iP(ad|hone|od)/.test(navigator.userAgent) && !deviceIsWindowsPhone; + + + /** + * iOS 4 requires an exception for select elements. + * + * @type boolean + */ + var deviceIsIOS4 = deviceIsIOS && (/OS 4_\d(_\d)?/).test(navigator.userAgent); + + + /** + * iOS 6.0-7.* requires the target element to be manually derived + * + * @type boolean + */ + var deviceIsIOSWithBadTarget = deviceIsIOS && (/OS [6-7]_\d/).test(navigator.userAgent); + + /** + * BlackBerry requires exceptions. + * + * @type boolean + */ + var deviceIsBlackBerry10 = navigator.userAgent.indexOf('BB10') > 0; + + /** + * Determine whether a given element requires a native click. + * + * @param {EventTarget|Element} target Target DOM element + * @returns {boolean} Returns true if the element needs a native click + */ + FastClick.prototype.needsClick = function(target) { + switch (target.nodeName.toLowerCase()) { + + // Don't send a synthetic click to disabled inputs (issue #62) + case 'button': + case 'select': + case 'textarea': + if (target.disabled) { + return true; + } + + break; + case 'input': + + // File inputs need real clicks on iOS 6 due to a browser bug (issue #68) + if ((deviceIsIOS && target.type === 'file') || target.disabled) { + return true; + } + + break; + case 'label': + case 'iframe': // iOS8 homescreen apps can prevent events bubbling into frames + case 'video': + return true; + } + + return (/\bneedsclick\b/).test(target.className); + }; + + + /** + * Determine whether a given element requires a call to focus to simulate click into element. + * + * @param {EventTarget|Element} target Target DOM element + * @returns {boolean} Returns true if the element requires a call to focus to simulate native click. + */ + FastClick.prototype.needsFocus = function(target) { + switch (target.nodeName.toLowerCase()) { + case 'textarea': + return true; + case 'select': + return !deviceIsAndroid; + case 'input': + switch (target.type) { + case 'button': + case 'checkbox': + case 'file': + case 'image': + case 'radio': + case 'submit': + return false; + } + + // No point in attempting to focus disabled inputs + return !target.disabled && !target.readOnly; + default: + return (/\bneedsfocus\b/).test(target.className); + } + }; + + + /** + * Send a click event to the specified element. + * + * @param {EventTarget|Element} targetElement + * @param {Event} event + */ + FastClick.prototype.sendClick = function(targetElement, event) { + var clickEvent, touch; + + // On some Android devices activeElement needs to be blurred otherwise the synthetic click will have no effect (#24) + if (document.activeElement && document.activeElement !== targetElement) { + document.activeElement.blur(); + } + + touch = event.changedTouches[0]; + + // Synthesise a click event, with an extra attribute so it can be tracked + clickEvent = document.createEvent('MouseEvents'); + clickEvent.initMouseEvent(this.determineEventType(targetElement), true, true, window, 1, touch.screenX, touch.screenY, touch.clientX, touch.clientY, false, false, false, false, 0, null); + clickEvent.forwardedTouchEvent = true; + targetElement.dispatchEvent(clickEvent); + }; + + FastClick.prototype.determineEventType = function(targetElement) { + + //Issue #159: Android Chrome Select Box does not open with a synthetic click event + if (deviceIsAndroid && targetElement.tagName.toLowerCase() === 'select') { + return 'mousedown'; + } + + return 'click'; + }; + + + /** + * @param {EventTarget|Element} targetElement + */ + FastClick.prototype.focus = function(targetElement) { + var length; + + // Issue #160: on iOS 7, some input elements (e.g. date datetime month) throw a vague TypeError on setSelectionRange. These elements don't have an integer value for the selectionStart and selectionEnd properties, but unfortunately that can't be used for detection because accessing the properties also throws a TypeError. Just check the type instead. Filed as Apple bug #15122724. + if (deviceIsIOS && targetElement.setSelectionRange && targetElement.type.indexOf('date') !== 0 && targetElement.type !== 'time' && targetElement.type !== 'month') { + length = targetElement.value.length; + targetElement.setSelectionRange(length, length); + } else { + targetElement.focus(); + } + }; + + + /** + * Check whether the given target element is a child of a scrollable layer and if so, set a flag on it. + * + * @param {EventTarget|Element} targetElement + */ + FastClick.prototype.updateScrollParent = function(targetElement) { + var scrollParent, parentElement; + + scrollParent = targetElement.fastClickScrollParent; + + // Attempt to discover whether the target element is contained within a scrollable layer. Re-check if the + // target element was moved to another parent. + if (!scrollParent || !scrollParent.contains(targetElement)) { + parentElement = targetElement; + do { + if (parentElement.scrollHeight > parentElement.offsetHeight) { + scrollParent = parentElement; + targetElement.fastClickScrollParent = parentElement; + break; + } + + parentElement = parentElement.parentElement; + } while (parentElement); + } + + // Always update the scroll top tracker if possible. + if (scrollParent) { + scrollParent.fastClickLastScrollTop = scrollParent.scrollTop; + } + }; + + + /** + * @param {EventTarget} targetElement + * @returns {Element|EventTarget} + */ + FastClick.prototype.getTargetElementFromEventTarget = function(eventTarget) { + + // On some older browsers (notably Safari on iOS 4.1 - see issue #56) the event target may be a text node. + if (eventTarget.nodeType === Node.TEXT_NODE) { + return eventTarget.parentNode; + } + + return eventTarget; + }; + + + /** + * On touch start, record the position and scroll offset. + * + * @param {Event} event + * @returns {boolean} + */ + FastClick.prototype.onTouchStart = function(event) { + var targetElement, touch, selection; + + // Ignore multiple touches, otherwise pinch-to-zoom is prevented if both fingers are on the FastClick element (issue #111). + if (event.targetTouches.length > 1) { + return true; + } + + targetElement = this.getTargetElementFromEventTarget(event.target); + touch = event.targetTouches[0]; + + if (deviceIsIOS) { + + // Only trusted events will deselect text on iOS (issue #49) + selection = window.getSelection(); + if (selection.rangeCount && !selection.isCollapsed) { + return true; + } + + if (!deviceIsIOS4) { + + // Weird things happen on iOS when an alert or confirm dialog is opened from a click event callback (issue #23): + // when the user next taps anywhere else on the page, new touchstart and touchend events are dispatched + // with the same identifier as the touch event that previously triggered the click that triggered the alert. + // Sadly, there is an issue on iOS 4 that causes some normal touch events to have the same identifier as an + // immediately preceeding touch event (issue #52), so this fix is unavailable on that platform. + // Issue 120: touch.identifier is 0 when Chrome dev tools 'Emulate touch events' is set with an iOS device UA string, + // which causes all touch events to be ignored. As this block only applies to iOS, and iOS identifiers are always long, + // random integers, it's safe to to continue if the identifier is 0 here. + if (touch.identifier && touch.identifier === this.lastTouchIdentifier) { + event.preventDefault(); + return false; + } + + this.lastTouchIdentifier = touch.identifier; + + // If the target element is a child of a scrollable layer (using -webkit-overflow-scrolling: touch) and: + // 1) the user does a fling scroll on the scrollable layer + // 2) the user stops the fling scroll with another tap + // then the event.target of the last 'touchend' event will be the element that was under the user's finger + // when the fling scroll was started, causing FastClick to send a click event to that layer - unless a check + // is made to ensure that a parent layer was not scrolled before sending a synthetic click (issue #42). + this.updateScrollParent(targetElement); + } + } + + this.trackingClick = true; + this.trackingClickStart = event.timeStamp; + this.targetElement = targetElement; + + this.touchStartX = touch.pageX; + this.touchStartY = touch.pageY; + + // Prevent phantom clicks on fast double-tap (issue #36) + if ((event.timeStamp - this.lastClickTime) < this.tapDelay) { + event.preventDefault(); + } + + return true; + }; + + + /** + * Based on a touchmove event object, check whether the touch has moved past a boundary since it started. + * + * @param {Event} event + * @returns {boolean} + */ + FastClick.prototype.touchHasMoved = function(event) { + var touch = event.changedTouches[0], boundary = this.touchBoundary; + + if (Math.abs(touch.pageX - this.touchStartX) > boundary || Math.abs(touch.pageY - this.touchStartY) > boundary) { + return true; + } + + return false; + }; + + + /** + * Update the last position. + * + * @param {Event} event + * @returns {boolean} + */ + FastClick.prototype.onTouchMove = function(event) { + if (!this.trackingClick) { + return true; + } + + // If the touch has moved, cancel the click tracking + if (this.targetElement !== this.getTargetElementFromEventTarget(event.target) || this.touchHasMoved(event)) { + this.trackingClick = false; + this.targetElement = null; + } + + return true; + }; + + + /** + * Attempt to find the labelled control for the given label element. + * + * @param {EventTarget|HTMLLabelElement} labelElement + * @returns {Element|null} + */ + FastClick.prototype.findControl = function(labelElement) { + + // Fast path for newer browsers supporting the HTML5 control attribute + if (labelElement.control !== undefined) { + return labelElement.control; + } + + // All browsers under test that support touch events also support the HTML5 htmlFor attribute + if (labelElement.htmlFor) { + return document.getElementById(labelElement.htmlFor); + } + + // If no for attribute exists, attempt to retrieve the first labellable descendant element + // the list of which is defined here: http://www.w3.org/TR/html5/forms.html#category-label + return labelElement.querySelector('button, input:not([type=hidden]), keygen, meter, output, progress, select, textarea'); + }; + + + /** + * On touch end, determine whether to send a click event at once. + * + * @param {Event} event + * @returns {boolean} + */ + FastClick.prototype.onTouchEnd = function(event) { + var forElement, trackingClickStart, targetTagName, scrollParent, touch, targetElement = this.targetElement; + + if (!this.trackingClick) { + return true; + } + + // Prevent phantom clicks on fast double-tap (issue #36) + if ((event.timeStamp - this.lastClickTime) < this.tapDelay) { + this.cancelNextClick = true; + return true; + } + + if ((event.timeStamp - this.trackingClickStart) > this.tapTimeout) { + return true; + } + + // Reset to prevent wrong click cancel on input (issue #156). + this.cancelNextClick = false; + + this.lastClickTime = event.timeStamp; + + trackingClickStart = this.trackingClickStart; + this.trackingClick = false; + this.trackingClickStart = 0; + + // On some iOS devices, the targetElement supplied with the event is invalid if the layer + // is performing a transition or scroll, and has to be re-detected manually. Note that + // for this to function correctly, it must be called *after* the event target is checked! + // See issue #57; also filed as rdar://13048589 . + if (deviceIsIOSWithBadTarget) { + touch = event.changedTouches[0]; + + // In certain cases arguments of elementFromPoint can be negative, so prevent setting targetElement to null + targetElement = document.elementFromPoint(touch.pageX - window.pageXOffset, touch.pageY - window.pageYOffset) || targetElement; + targetElement.fastClickScrollParent = this.targetElement.fastClickScrollParent; + } + + targetTagName = targetElement.tagName.toLowerCase(); + if (targetTagName === 'label') { + forElement = this.findControl(targetElement); + if (forElement) { + this.focus(targetElement); + if (deviceIsAndroid) { + return false; + } + + targetElement = forElement; + } + } else if (this.needsFocus(targetElement)) { + + // Case 1: If the touch started a while ago (best guess is 100ms based on tests for issue #36) then focus will be triggered anyway. Return early and unset the target element reference so that the subsequent click will be allowed through. + // Case 2: Without this exception for input elements tapped when the document is contained in an iframe, then any inputted text won't be visible even though the value attribute is updated as the user types (issue #37). + if ((event.timeStamp - trackingClickStart) > 100 || (deviceIsIOS && window.top !== window && targetTagName === 'input')) { + this.targetElement = null; + return false; + } + + this.focus(targetElement); + this.sendClick(targetElement, event); + + // Select elements need the event to go through on iOS 4, otherwise the selector menu won't open. + // Also this breaks opening selects when VoiceOver is active on iOS6, iOS7 (and possibly others) + if (!deviceIsIOS || targetTagName !== 'select') { + this.targetElement = null; + event.preventDefault(); + } + + return false; + } + + if (deviceIsIOS && !deviceIsIOS4) { + + // Don't send a synthetic click event if the target element is contained within a parent layer that was scrolled + // and this tap is being used to stop the scrolling (usually initiated by a fling - issue #42). + scrollParent = targetElement.fastClickScrollParent; + if (scrollParent && scrollParent.fastClickLastScrollTop !== scrollParent.scrollTop) { + return true; + } + } + + // Prevent the actual click from going though - unless the target node is marked as requiring + // real clicks or if it is in the whitelist in which case only non-programmatic clicks are permitted. + if (!this.needsClick(targetElement)) { + event.preventDefault(); + this.sendClick(targetElement, event); + } + + return false; + }; + + + /** + * On touch cancel, stop tracking the click. + * + * @returns {void} + */ + FastClick.prototype.onTouchCancel = function() { + this.trackingClick = false; + this.targetElement = null; + }; + + + /** + * Determine mouse events which should be permitted. + * + * @param {Event} event + * @returns {boolean} + */ + FastClick.prototype.onMouse = function(event) { + + // If a target element was never set (because a touch event was never fired) allow the event + if (!this.targetElement) { + return true; + } + + if (event.forwardedTouchEvent) { + return true; + } + + // Programmatically generated events targeting a specific element should be permitted + if (!event.cancelable) { + return true; + } + + // Derive and check the target element to see whether the mouse event needs to be permitted; + // unless explicitly enabled, prevent non-touch click events from triggering actions, + // to prevent ghost/doubleclicks. + if (!this.needsClick(this.targetElement) || this.cancelNextClick) { + + // Prevent any user-added listeners declared on FastClick element from being fired. + if (event.stopImmediatePropagation) { + event.stopImmediatePropagation(); + } else { + + // Part of the hack for browsers that don't support Event#stopImmediatePropagation (e.g. Android 2) + event.propagationStopped = true; + } + + // Cancel the event + event.stopPropagation(); + event.preventDefault(); + + return false; + } + + // If the mouse event is permitted, return true for the action to go through. + return true; + }; + + + /** + * On actual clicks, determine whether this is a touch-generated click, a click action occurring + * naturally after a delay after a touch (which needs to be cancelled to avoid duplication), or + * an actual click which should be permitted. + * + * @param {Event} event + * @returns {boolean} + */ + FastClick.prototype.onClick = function(event) { + var permitted; + + // It's possible for another FastClick-like library delivered with third-party code to fire a click event before FastClick does (issue #44). In that case, set the click-tracking flag back to false and return early. This will cause onTouchEnd to return early. + if (this.trackingClick) { + this.targetElement = null; + this.trackingClick = false; + return true; + } + + // Very odd behaviour on iOS (issue #18): if a submit element is present inside a form and the user hits enter in the iOS simulator or clicks the Go button on the pop-up OS keyboard the a kind of 'fake' click event will be triggered with the submit-type input element as the target. + if (event.target.type === 'submit' && event.detail === 0) { + return true; + } + + permitted = this.onMouse(event); + + // Only unset targetElement if the click is not permitted. This will ensure that the check for !targetElement in onMouse fails and the browser's click doesn't go through. + if (!permitted) { + this.targetElement = null; + } + + // If clicks are permitted, return true for the action to go through. + return permitted; + }; + + + /** + * Remove all FastClick's event listeners. + * + * @returns {void} + */ + FastClick.prototype.destroy = function() { + var layer = this.layer; + + if (deviceIsAndroid) { + layer.removeEventListener('mouseover', this.onMouse, true); + layer.removeEventListener('mousedown', this.onMouse, true); + layer.removeEventListener('mouseup', this.onMouse, true); + } + + layer.removeEventListener('click', this.onClick, true); + layer.removeEventListener('touchstart', this.onTouchStart, false); + layer.removeEventListener('touchmove', this.onTouchMove, false); + layer.removeEventListener('touchend', this.onTouchEnd, false); + layer.removeEventListener('touchcancel', this.onTouchCancel, false); + }; + + + /** + * Check whether FastClick is needed. + * + * @param {Element} layer The layer to listen on + */ + FastClick.notNeeded = function(layer) { + var metaViewport; + var chromeVersion; + var blackberryVersion; + var firefoxVersion; + + // Devices that don't support touch don't need FastClick + if (typeof window.ontouchstart === 'undefined') { + return true; + } + + // Chrome version - zero for other browsers + chromeVersion = +(/Chrome\/([0-9]+)/.exec(navigator.userAgent) || [,0])[1]; + + if (chromeVersion) { + + if (deviceIsAndroid) { + metaViewport = document.querySelector('meta[name=viewport]'); + + if (metaViewport) { + // Chrome on Android with user-scalable="no" doesn't need FastClick (issue #89) + if (metaViewport.content.indexOf('user-scalable=no') !== -1) { + return true; + } + // Chrome 32 and above with width=device-width or less don't need FastClick + if (chromeVersion > 31 && document.documentElement.scrollWidth <= window.outerWidth) { + return true; + } + } + + // Chrome desktop doesn't need FastClick (issue #15) + } else { + return true; + } + } + + if (deviceIsBlackBerry10) { + blackberryVersion = navigator.userAgent.match(/Version\/([0-9]*)\.([0-9]*)/); + + // BlackBerry 10.3+ does not require Fastclick library. + // https://github.com/ftlabs/fastclick/issues/251 + if (blackberryVersion[1] >= 10 && blackberryVersion[2] >= 3) { + metaViewport = document.querySelector('meta[name=viewport]'); + + if (metaViewport) { + // user-scalable=no eliminates click delay. + if (metaViewport.content.indexOf('user-scalable=no') !== -1) { + return true; + } + // width=device-width (or less than device-width) eliminates click delay. + if (document.documentElement.scrollWidth <= window.outerWidth) { + return true; + } + } + } + } + + // IE10 with -ms-touch-action: none or manipulation, which disables double-tap-to-zoom (issue #97) + if (layer.style.msTouchAction === 'none' || layer.style.touchAction === 'manipulation') { + return true; + } + + // Firefox version - zero for other browsers + firefoxVersion = +(/Firefox\/([0-9]+)/.exec(navigator.userAgent) || [,0])[1]; + + if (firefoxVersion >= 27) { + // Firefox 27+ does not have tap delay if the content is not zoomable - https://bugzilla.mozilla.org/show_bug.cgi?id=922896 + + metaViewport = document.querySelector('meta[name=viewport]'); + if (metaViewport && (metaViewport.content.indexOf('user-scalable=no') !== -1 || document.documentElement.scrollWidth <= window.outerWidth)) { + return true; + } + } + + // IE11: prefixed -ms-touch-action is no longer supported and it's recomended to use non-prefixed version + // http://msdn.microsoft.com/en-us/library/windows/apps/Hh767313.aspx + if (layer.style.touchAction === 'none' || layer.style.touchAction === 'manipulation') { + return true; + } + + return false; + }; + + + /** + * Factory method for creating a FastClick object + * + * @param {Element} layer The layer to listen on + * @param {Object} [options={}] The options to override the defaults + */ + FastClick.attach = function(layer, options) { + return new FastClick(layer, options); + }; + + + if (typeof define === 'function' && typeof define.amd === 'object' && define.amd) { + + // AMD. Register as an anonymous module. + define(function() { + return FastClick; + }); + } else if (typeof module !== 'undefined' && module.exports) { + module.exports = FastClick.attach; + module.exports.FastClick = FastClick; + } else { + window.FastClick = FastClick; + } +}()); diff --git a/couchpotato/static/scripts/library/history.js b/couchpotato/static/scripts/vendor/history.js similarity index 100% rename from couchpotato/static/scripts/library/history.js rename to couchpotato/static/scripts/vendor/history.js diff --git a/couchpotato/static/scripts/library/mootools.js b/couchpotato/static/scripts/vendor/mootools.js similarity index 89% rename from couchpotato/static/scripts/library/mootools.js rename to couchpotato/static/scripts/vendor/mootools.js index 9917ad3255..1e816e02bc 100644 --- a/couchpotato/static/scripts/library/mootools.js +++ b/couchpotato/static/scripts/vendor/mootools.js @@ -1,16 +1,7 @@ +/* MooTools: the javascript framework. license: MIT-style license. copyright: Copyright (c) 2006-2014 [Valerio Proietti](http://mad4milk.net/).*/ /* ---- -MooTools: the javascript framework - -web build: - - http://mootools.net/core/f42fb6d73ea1a13146c5ad9502b442f0 - -packager build: - - packager build Core/Class Core/Class.Extras Core/Element Core/Element.Style Core/Element.Delegation Core/Element.Dimensions Core/Fx.Tween Core/Fx.Morph Core/Fx.Transitions Core/Request.JSON Core/Cookie Core/DOMReady - -... +Web Build: http://mootools.net/core/builder/90d9ff9fd02e4de69368c13d49be4022 */ - /* --- @@ -20,7 +11,7 @@ description: The heart of MooTools. license: MIT-style license. -copyright: Copyright (c) 2006-2012 [Valerio Proietti](http://mad4milk.net/). +copyright: Copyright (c) 2006-2014 [Valerio Proietti](http://mad4milk.net/). authors: The MooTools production team (http://mootools.net/developers/) @@ -32,12 +23,12 @@ provides: [Core, MooTools, Type, typeOf, instanceOf, Native] ... */ - +/*! MooTools: the javascript framework. license: MIT-style license. copyright: Copyright (c) 2006-2014 [Valerio Proietti](http://mad4milk.net/).*/ (function(){ this.MooTools = { - version: '1.4.5', - build: 'ab8ea8824dc3b24b6666867a2c4ed58ebb762cf0' + version: '1.5.1', + build: '0542c135fdeb7feed7d9917e01447a408f22c876' }; // typeOf, instanceOf @@ -50,7 +41,7 @@ var typeOf = this.typeOf = function(item){ if (item.nodeType == 1) return 'element'; if (item.nodeType == 3) return (/\S/).test(item.nodeValue) ? 'textnode' : 'whitespace'; } else if (typeof item.length == 'number'){ - if (item.callee) return 'arguments'; + if ('callee' in item) return 'arguments'; if ('item' in item) return 'collection'; } @@ -267,7 +258,7 @@ var force = function(name, object, methods){ if (!methodsEnumerable) for (var i = 0, l = methods.length; i < l; i++){ fn.call(prototype, prototype[methods[i]], methods[i]); } - for (var key in prototype) fn.call(prototype, prototype[key], key) + for (var key in prototype) fn.call(prototype, prototype[key], key); }; } @@ -275,7 +266,7 @@ var force = function(name, object, methods){ }; force('String', String, [ - 'charAt', 'charCodeAt', 'concat', 'indexOf', 'lastIndexOf', 'match', 'quote', 'replace', 'search', + 'charAt', 'charCodeAt', 'concat', 'contains', 'indexOf', 'lastIndexOf', 'match', 'quote', 'replace', 'search', 'slice', 'split', 'substr', 'substring', 'trim', 'toLowerCase', 'toUpperCase' ])('Array', Array, [ 'pop', 'push', 'reverse', 'shift', 'sort', 'splice', 'unshift', 'concat', 'join', 'slice', @@ -325,11 +316,13 @@ Object.each = Object.forEach; Array.implement({ + /*<!ES5>*/ forEach: function(fn, bind){ for (var i = 0, l = this.length; i < l; i++){ if (i in this) fn.call(bind, this[i], i, this); } }, + /*</!ES5>*/ each: function(fn, bind){ Array.forEach(this, fn, bind); @@ -411,7 +404,6 @@ String.extend('uniqueID', function(){ })(); - /* --- @@ -421,7 +413,7 @@ description: Contains Array Prototypes like each, contains, and erase. license: MIT-style license. -requires: Type +requires: [Type] provides: Array @@ -564,7 +556,7 @@ Array.implement({ if (this.length != 3) return null; var rgb = this.map(function(value){ if (value.length == 1) value += value; - return value.toInt(16); + return parseInt(value, 16); }); return (array) ? rgb : 'rgb(' + rgb + ')'; }, @@ -584,7 +576,6 @@ Array.implement({ - /* --- @@ -594,7 +585,7 @@ description: Contains String Prototypes like camelCase, capitalize, test, and to license: MIT-style license. -requires: Type +requires: [Type, Array] provides: String @@ -603,12 +594,14 @@ provides: String String.implement({ - test: function(regex, params){ - return ((typeOf(regex) == 'regexp') ? regex : new RegExp('' + regex, params)).test(this); + //<!ES6> + contains: function(string, index){ + return (index ? String(this).slice(index) : String(this)).indexOf(string) > -1; }, + //</!ES6> - contains: function(string, separator){ - return (separator) ? (separator + this + separator).indexOf(separator + string + separator) > -1 : String(this).indexOf(string) > -1; + test: function(regex, params){ + return ((typeOf(regex) == 'regexp') ? regex : new RegExp('' + regex, params)).test(this); }, trim: function(){ @@ -669,6 +662,7 @@ String.implement({ }); + /* --- @@ -749,7 +743,6 @@ Function.implement({ - /* --- @@ -803,7 +796,6 @@ Number.alias('each', 'times'); Number.implement(methods); })(['abs', 'acos', 'asin', 'atan', 'atan2', 'ceil', 'cos', 'exp', 'floor', 'log', 'max', 'min', 'pow', 'sin', 'sqrt', 'tan']); - /* --- @@ -920,7 +912,6 @@ Class.Mutators = { })(); - /* --- @@ -1000,7 +991,7 @@ this.Events = new Class({ type = removeOn(type); var events = this.$events[type]; if (events && !fn.internal){ - var index = events.indexOf(fn); + var index = events.indexOf(fn); if (index != -1) delete events[index]; } return this; @@ -1041,7 +1032,6 @@ this.Options = new Class({ })(); - /* --- @@ -1063,37 +1053,47 @@ provides: [Browser, Window, Document] var document = this.document; var window = document.window = this; -var ua = navigator.userAgent.toLowerCase(), - platform = navigator.platform.toLowerCase(), - UA = ua.match(/(opera|ie|firefox|chrome|version)[\s\/:]([\w\d\.]+)?.*?(safari|version[\s\/:]([\w\d\.]+)|$)/) || [null, 'unknown', 0], - mode = UA[1] == 'ie' && document.documentMode; +var parse = function(ua, platform){ + ua = ua.toLowerCase(); + platform = (platform ? platform.toLowerCase() : ''); -var Browser = this.Browser = { + var UA = ua.match(/(opera|ie|firefox|chrome|trident|crios|version)[\s\/:]([\w\d\.]+)?.*?(safari|(?:rv[\s\/:]|version[\s\/:])([\w\d\.]+)|$)/) || [null, 'unknown', 0]; + + if (UA[1] == 'trident'){ + UA[1] = 'ie'; + if (UA[4]) UA[2] = UA[4]; + } else if (UA[1] == 'crios'){ + UA[1] = 'chrome'; + } - extend: Function.prototype.extend, + platform = ua.match(/ip(?:ad|od|hone)/) ? 'ios' : (ua.match(/(?:webos|android)/) || platform.match(/mac|win|linux/) || ['other'])[0]; + if (platform == 'win') platform = 'windows'; - name: (UA[1] == 'version') ? UA[3] : UA[1], + return { + extend: Function.prototype.extend, + name: (UA[1] == 'version') ? UA[3] : UA[1], + version: parseFloat((UA[1] == 'opera' && UA[4]) ? UA[4] : UA[2]), + platform: platform + }; +}; - version: mode || parseFloat((UA[1] == 'opera' && UA[4]) ? UA[4] : UA[2]), +var Browser = this.Browser = parse(navigator.userAgent, navigator.platform); - Platform: { - name: ua.match(/ip(?:ad|od|hone)/) ? 'ios' : (ua.match(/(?:webos|android)/) || platform.match(/mac|win|linux/) || ['other'])[0] - }, +if (Browser.name == 'ie'){ + Browser.version = document.documentMode; +} +Browser.extend({ Features: { xpath: !!(document.evaluate), air: !!(window.runtime), query: !!(document.querySelector), json: !!(window.JSON) }, + parseUA: parse +}); - Plugins: {} - -}; -Browser[Browser.name] = true; -Browser[Browser.name + parseInt(Browser.version, 10)] = true; -Browser.Platform[Browser.Platform.name] = true; // Request @@ -1126,18 +1126,7 @@ Browser.Request = (function(){ Browser.Features.xhr = !!(Browser.Request); -// Flash detection - -var version = (Function.attempt(function(){ - return navigator.plugins['Shockwave Flash'].description; -}, function(){ - return new ActiveXObject('ShockwaveFlash.ShockwaveFlash').GetVariable('$version'); -}) || '0 r0').match(/\d+/g); -Browser.Plugins.Flash = { - version: Number(version[0] || '0.' + version[1]) || 0, - build: Number(version[2]) || 0 -}; // String scripts @@ -1203,6 +1192,7 @@ if (this.attachEvent && !this.addEventListener){ var unloadEvent = function(){ this.detachEvent('onunload', unloadEvent); document.head = document.html = document.window = null; + window = this.Window = document = null; }; this.attachEvent('onunload', unloadEvent); } @@ -1236,7 +1226,6 @@ try { })(); - /* --- @@ -1359,7 +1348,6 @@ Object.extend({ - /* --- name: Slick.Parser @@ -1591,7 +1579,6 @@ if (!this.Slick) this.Slick = Slick; }).apply(/*<CommonJS>*/(typeof exports != 'undefined') ? exports : /*</CommonJS>*/this); - /* --- name: Slick.Finder @@ -1756,7 +1743,7 @@ local.setDocument = function(document){ // native matchesSelector function - features.nativeMatchesSelector = root.matchesSelector || /*root.msMatchesSelector ||*/ root.mozMatchesSelector || root.webkitMatchesSelector; + features.nativeMatchesSelector = root.matches || /*root.msMatchesSelector ||*/ root.mozMatchesSelector || root.webkitMatchesSelector; if (features.nativeMatchesSelector) try { // if matchesSelector trows errors on incorrect sintaxes we can use it features.nativeMatchesSelector.call(root, ':slick'); @@ -1769,7 +1756,7 @@ local.setDocument = function(document){ root.slick_expando = 1; delete root.slick_expando; features.getUID = this.getUIDHTML; - } catch(e) { + } catch(e){ features.getUID = this.getUIDXML; } @@ -1790,9 +1777,9 @@ local.setDocument = function(document){ // hasAttribute - features.hasAttribute = (root && this.isNativeCode(root.hasAttribute)) ? function(node, attribute) { + features.hasAttribute = (root && this.isNativeCode(root.hasAttribute)) ? function(node, attribute){ return node.hasAttribute(attribute); - } : function(node, attribute) { + } : function(node, attribute){ node = node.getAttributeNode(attribute); return !!(node && (node.specified || node.nodeValue)); }; @@ -1874,7 +1861,7 @@ local.search = function(context, expression, append, first){ /*<simple-selectors-override>*/ var simpleSelector = expression.match(reSimpleSelector); - simpleSelectors: if (simpleSelector) { + simpleSelectors: if (simpleSelector){ var symbol = simpleSelector[1], name = simpleSelector[2], @@ -1927,7 +1914,7 @@ local.search = function(context, expression, append, first){ /*</simple-selectors-override>*/ /*<query-selector-override>*/ - querySelector: if (context.querySelectorAll) { + querySelector: if (context.querySelectorAll){ if (!this.isHTMLDocument || qsaFailExpCache[expression] @@ -1956,7 +1943,7 @@ local.search = function(context, expression, append, first){ try { if (first) return context.querySelector(_expression) || null; else nodes = context.querySelectorAll(_expression); - } catch(e) { + } catch(e){ qsaFailExpCache[expression] = 1; break querySelector; } finally { @@ -2155,14 +2142,14 @@ local.matchNode = function(node, selector){ if (this.isHTMLDocument && this.nativeMatchesSelector){ try { return this.nativeMatchesSelector.call(node, selector.replace(/\[([^=]+)=\s*([^'"\]]+?)\s*\]/g, '[$1="$2"]')); - } catch(matchError) {} + } catch(matchError){} } var parsed = this.Slick.parse(selector); if (!parsed) return true; // simple (single) selectors - var expressions = parsed.expressions, simpleExpCounter = 0, i; + var expressions = parsed.expressions, simpleExpCounter = 0, i, currentExpression; for (i = 0; (currentExpression = expressions[i]); i++){ if (currentExpression.length == 1){ var exp = currentExpression[0]; @@ -2578,7 +2565,6 @@ if (!this.Slick) this.Slick = Slick; }).apply(/*<CommonJS>*/(typeof exports != 'undefined') ? exports : /*</CommonJS>*/this); - /* --- @@ -2590,12 +2576,12 @@ license: MIT-style license. requires: [Window, Document, Array, String, Function, Object, Number, Slick.Parser, Slick.Finder] -provides: [Element, Elements, $, $$, Iframe, Selectors] +provides: [Element, Elements, $, $$, IFrame, Selectors] ... */ -var Element = function(tag, props){ +var Element = this.Element = function(tag, props){ var konstructor = Element.Constructors[tag]; if (konstructor) return konstructor(props); if (typeof tag != 'string') return document.id(tag).set(props); @@ -2779,7 +2765,7 @@ Array.mirror(Elements); /*<ltIE8>*/ var createElementAcceptsHTML; try { - createElementAcceptsHTML = (document.createElement('<input name=x>').name == 'x'); + createElementAcceptsHTML = (document.createElement('<input name=x>').name == 'x'); } catch (e){} var escapeQuotes = function(html){ @@ -2787,20 +2773,44 @@ var escapeQuotes = function(html){ }; /*</ltIE8>*/ +/*<ltIE9>*/ +// #2479 - IE8 Cannot set HTML of style element +var canChangeStyleHTML = (function(){ + var div = document.createElement('style'), + flag = false; + try { + div.innerHTML = '#justTesing{margin: 0px;}'; + flag = !!div.innerHTML; + } catch(e){} + return flag; +})(); +/*</ltIE9>*/ + Document.implement({ newElement: function(tag, props){ - if (props && props.checked != null) props.defaultChecked = props.checked; - /*<ltIE8>*/// Fix for readonly name and type properties in IE < 8 - if (createElementAcceptsHTML && props){ - tag = '<' + tag; - if (props.name) tag += ' name="' + escapeQuotes(props.name) + '"'; - if (props.type) tag += ' type="' + escapeQuotes(props.type) + '"'; - tag += '>'; - delete props.name; - delete props.type; - } - /*</ltIE8>*/ + if (props){ + if (props.checked != null) props.defaultChecked = props.checked; + if ((props.type == 'checkbox' || props.type == 'radio') && props.value == null) props.value = 'on'; + /*<ltIE9>*/ // IE needs the type to be set before changing content of style element + if (!canChangeStyleHTML && tag == 'style'){ + var styleElement = document.createElement('style'); + styleElement.setAttribute('type', 'text/css'); + if (props.type) delete props.type; + return this.id(styleElement).set(props); + } + /*</ltIE9>*/ + /*<ltIE8>*/// Fix for readonly name and type properties in IE < 8 + if (createElementAcceptsHTML){ + tag = '<' + tag; + if (props.name) tag += ' name="' + escapeQuotes(props.name) + '"'; + if (props.type) tag += ' type="' + escapeQuotes(props.type) + '"'; + tag += '>'; + delete props.name; + delete props.type; + } + /*</ltIE8>*/ + } return this.id(this.createElement(tag)).set(props); } @@ -3035,6 +3045,21 @@ Object.forEach(properties, function(real, key){ }; }); +/*<ltIE9>*/ +propertySetters.text = (function(setter){ + return function(node, value){ + if (node.get('tag') == 'style') node.set('html', value); + else node[properties.text] = value; + }; +})(propertySetters.text); + +propertyGetters.text = (function(getter){ + return function(node){ + return (node.get('tag') == 'style') ? node.innerHTML : getter(node); + }; +})(propertyGetters.text); +/*</ltIE9>*/ + // Booleans var bools = [ @@ -3093,15 +3118,42 @@ el = null; /* </webkit> */ /*<IE>*/ -var input = document.createElement('input'); + +/*<ltIE9>*/ +// #2479 - IE8 Cannot set HTML of style element +var canChangeStyleHTML = (function(){ + var div = document.createElement('style'), + flag = false; + try { + div.innerHTML = '#justTesing{margin: 0px;}'; + flag = !!div.innerHTML; + } catch(e){} + return flag; +})(); +/*</ltIE9>*/ + +var input = document.createElement('input'), volatileInputValue, html5InputSupport; + +// #2178 input.value = 't'; input.type = 'submit'; -if (input.value != 't') propertySetters.type = function(node, type){ - var value = node.value; - node.type = type; - node.value = value; -}; +volatileInputValue = input.value != 't'; + +// #2443 - IE throws "Invalid Argument" when trying to use html5 input types +try { + input.type = 'email'; + html5InputSupport = input.type == 'email'; +} catch(e){} + input = null; + +if (volatileInputValue || !html5InputSupport) propertySetters.type = function(node, type){ + try { + var value = node.value; + node.type = type; + node.value = value; + } catch (e){} +}; /*</IE>*/ /* getProperty, setProperty */ @@ -3112,7 +3164,28 @@ var pollutesGetAttribute = (function(div){ return (div.getAttribute('random') == 'attribute'); })(document.createElement('div')); -/* <ltIE9> */ +var hasCloneBug = (function(test){ + test.innerHTML = '<object><param name="should_fix" value="the unknown" /></object>'; + return test.cloneNode(true).firstChild.childNodes.length != 1; +})(document.createElement('div')); +/* </ltIE9> */ + +var hasClassList = !!document.createElement('div').classList; + +var classes = function(className){ + var classNames = (className || '').clean().split(" "), uniques = {}; + return classNames.filter(function(className){ + if (className !== "" && !uniques[className]) return uniques[className] = className; + }); +}; + +var addToClassList = function(name){ + this.classList.add(name); +}; + +var removeFromClassList = function(name){ + this.classList.remove(name); +}; Element.implement({ @@ -3122,7 +3195,8 @@ Element.implement({ setter(this, value); } else { /* <ltIE9> */ - if (pollutesGetAttribute) var attributeWhiteList = this.retrieve('$attributeWhiteList', {}); + var attributeWhiteList; + if (pollutesGetAttribute) attributeWhiteList = this.retrieve('$attributeWhiteList', {}); /* </ltIE9> */ if (value == null){ @@ -3194,17 +3268,27 @@ Element.implement({ return this; }, - hasClass: function(className){ - return this.className.clean().contains(className, ' '); + hasClass: hasClassList ? function(className){ + return this.classList.contains(className); + } : function(className){ + return classes(this.className).contains(className); }, - addClass: function(className){ - if (!this.hasClass(className)) this.className = (this.className + ' ' + className).clean(); + addClass: hasClassList ? function(className){ + classes(className).forEach(addToClassList, this); + return this; + } : function(className){ + this.className = classes(className + ' ' + this.className).join(' '); return this; }, - removeClass: function(className){ - this.className = this.className.replace(new RegExp('(^|\\s)' + className + '(?:\\s|$)'), '$1'); + removeClass: hasClassList ? function(className){ + classes(className).forEach(removeFromClassList, this); + return this; + } : function(className){ + var classNames = classes(this.className); + classes(className).forEach(classNames.erase, classNames); + this.className = classNames.join(' '); return this; }, @@ -3279,6 +3363,37 @@ Element.implement({ }); + +// appendHTML + +var appendInserters = { + before: 'beforeBegin', + after: 'afterEnd', + bottom: 'beforeEnd', + top: 'afterBegin', + inside: 'beforeEnd' +}; + +Element.implement('appendHTML', ('insertAdjacentHTML' in document.createElement('div')) ? function(html, where){ + this.insertAdjacentHTML(appendInserters[where || 'bottom'], html); + return this; +} : function(html, where){ + var temp = new Element('div', {html: html}), + children = temp.childNodes, + fragment = temp.firstChild; + + if (!fragment) return this; + if (children.length > 1){ + fragment = document.createDocumentFragment(); + for (var i = 0, l = children.length; i < l; i++){ + fragment.appendChild(children[i]); + } + } + + inserters[where || 'bottom'](fragment, this); + return this; +}); + var collected = {}, storage = {}; var get = function(uid){ @@ -3344,7 +3459,7 @@ Element.implement({ } /*<ltIE9>*/ - if (Browser.ie){ + if (hasCloneBug){ var co = clone.getElementsByTagName('object'), to = this.getElementsByTagName('object'); for (i = co.length; i--;) co[i].outerHTML = to[i].outerHTML; } @@ -3357,13 +3472,7 @@ Element.implement({ [Element, Window, Document].invoke('implement', { addListener: function(type, fn){ - if (type == 'unload'){ - var old = fn, self = this; - fn = function(){ - self.removeListener('unload', fn); - old(); - }; - } else { + if (window.attachEvent && !window.addEventListener){ collected[Slick.uidOf(this)] = this; } if (this.addEventListener) this.addEventListener(type, fn, !!arguments[2]); @@ -3398,10 +3507,14 @@ Element.implement({ }); /*<ltIE9>*/ -if (window.attachEvent && !window.addEventListener) window.addListener('unload', function(){ - Object.each(collected, clean); - if (window.CollectGarbage) CollectGarbage(); -}); +if (window.attachEvent && !window.addEventListener){ + var gc = function(){ + Object.each(collected, clean); + if (window.CollectGarbage) CollectGarbage(); + window.removeListener('unload', gc); + } + window.addListener('unload', gc); +} /*</ltIE9>*/ Element.Properties = {}; @@ -3437,20 +3550,24 @@ Element.Properties.html = { set: function(html){ if (html == null) html = ''; else if (typeOf(html) == 'array') html = html.join(''); - this.innerHTML = html; - }, + /*<ltIE9>*/ + if (this.styleSheet && !canChangeStyleHTML) this.styleSheet.cssText = html; + else /*</ltIE9>*/this.innerHTML = html; + }, erase: function(){ - this.innerHTML = ''; + this.set('html', ''); } }; +var supportsHTML5Elements = true, supportsTableInnerHTML = true, supportsTRInnerHTML = true; + /*<ltIE9>*/ // technique by jdbarlett - http://jdbartlett.com/innershiv/ var div = document.createElement('div'); div.innerHTML = '<nav></nav>'; -var supportsHTML5Elements = (div.childNodes.length == 1); +supportsHTML5Elements = (div.childNodes.length == 1); if (!supportsHTML5Elements){ var tags = 'abbr article aside audio canvas datalist details figcaption figure footer header hgroup mark meter nav output progress section summary time video'.split(' '), fragment = document.createDocumentFragment(), l = tags.length; @@ -3460,7 +3577,7 @@ div = null; /*</ltIE9>*/ /*<IE>*/ -var supportsTableInnerHTML = Function.attempt(function(){ +supportsTableInnerHTML = Function.attempt(function(){ var table = document.createElement('table'); table.innerHTML = '<tr><td></td></tr>'; return true; @@ -3469,7 +3586,7 @@ var supportsTableInnerHTML = Function.attempt(function(){ /*<ltFF4>*/ var tr = document.createElement('tr'), html = '<td></td>'; tr.innerHTML = html; -var supportsTRInnerHTML = (tr.innerHTML == html); +supportsTRInnerHTML = (tr.innerHTML == html); tr = null; /*</ltFF4>*/ @@ -3487,6 +3604,10 @@ if (!supportsTableInnerHTML || !supportsTRInnerHTML || !supportsHTML5Elements){ translations.thead = translations.tfoot = translations.tbody; return function(html){ + + /*<ltIE9>*/ + if (this.styleSheet) return set.call(this, html); + /*</ltIE9>*/ var wrap = translations[this.get('tag')]; if (!wrap && !supportsHTML5Elements) wrap = [0, '', '']; if (!wrap) return set.call(this, html); @@ -3514,11 +3635,12 @@ if (testForm.firstChild.value != 's') Element.Properties.value = { var tag = this.get('tag'); if (tag != 'select') return this.setProperty('value', value); var options = this.getElements('option'); + value = String(value); for (var i = 0; i < options.length; i++){ var option = options[i], attr = option.getAttributeNode('value'), optionValue = (attr && attr.specified) ? option.value : option.get('text'); - if (optionValue == value) return option.selected = true; + if (optionValue === value) return option.selected = true; } }, @@ -3553,300 +3675,120 @@ if (document.createElement('div').getAttributeNode('id')) Element.Properties.id })(); - /* --- -name: Element.Style +name: Event -description: Contains methods for interacting with the styles of Elements in a fashionable way. +description: Contains the Event Type, to make the event object cross-browser. license: MIT-style license. -requires: Element +requires: [Window, Document, Array, Function, String, Object] -provides: Element.Style +provides: Event ... */ (function(){ -var html = document.html; +var _keys = {}; +var normalizeWheelSpeed = function(event){ + var normalized; + if (event.wheelDelta){ + normalized = event.wheelDelta % 120 == 0 ? event.wheelDelta / 120 : event.wheelDelta / 12; + } else { + var rawAmount = event.deltaY || event.detail || 0; + normalized = -(rawAmount % 3 == 0 ? rawAmount / 3 : rawAmount * 10); + } + return normalized; +} -//<ltIE9> -// Check for oldIE, which does not remove styles when they're set to null -var el = document.createElement('div'); -el.style.color = 'red'; -el.style.color = null; -var doesNotRemoveStyles = el.style.color == 'red'; -el = null; -//</ltIE9> +var DOMEvent = this.DOMEvent = new Type('DOMEvent', function(event, win){ + if (!win) win = window; + event = event || win.event; + if (event.$extended) return event; + this.event = event; + this.$extended = true; + this.shift = event.shiftKey; + this.control = event.ctrlKey; + this.alt = event.altKey; + this.meta = event.metaKey; + var type = this.type = event.type; + var target = event.target || event.srcElement; + while (target && target.nodeType == 3) target = target.parentNode; + this.target = document.id(target); -Element.Properties.styles = {set: function(styles){ - this.setStyles(styles); -}}; + if (type.indexOf('key') == 0){ + var code = this.code = (event.which || event.keyCode); + this.key = _keys[code]; + if (type == 'keydown' || type == 'keyup'){ + if (code > 111 && code < 124) this.key = 'f' + (code - 111); + else if (code > 95 && code < 106) this.key = code - 96; + } + if (this.key == null) this.key = String.fromCharCode(code).toLowerCase(); + } else if (type == 'click' || type == 'dblclick' || type == 'contextmenu' || type == 'wheel' || type == 'DOMMouseScroll' || type.indexOf('mouse') == 0){ + var doc = win.document; + doc = (!doc.compatMode || doc.compatMode == 'CSS1Compat') ? doc.html : doc.body; + this.page = { + x: (event.pageX != null) ? event.pageX : event.clientX + doc.scrollLeft, + y: (event.pageY != null) ? event.pageY : event.clientY + doc.scrollTop + }; + this.client = { + x: (event.pageX != null) ? event.pageX - win.pageXOffset : event.clientX, + y: (event.pageY != null) ? event.pageY - win.pageYOffset : event.clientY + }; + if (type == 'DOMMouseScroll' || type == 'wheel' || type == 'mousewheel') this.wheel = normalizeWheelSpeed(event); + this.rightClick = (event.which == 3 || event.button == 2); + if (type == 'mouseover' || type == 'mouseout'){ + var related = event.relatedTarget || event[(type == 'mouseover' ? 'from' : 'to') + 'Element']; + while (related && related.nodeType == 3) related = related.parentNode; + this.relatedTarget = document.id(related); + } + } else if (type.indexOf('touch') == 0 || type.indexOf('gesture') == 0){ + this.rotation = event.rotation; + this.scale = event.scale; + this.targetTouches = event.targetTouches; + this.changedTouches = event.changedTouches; + var touches = this.touches = event.touches; + if (touches && touches[0]){ + var touch = touches[0]; + this.page = {x: touch.pageX, y: touch.pageY}; + this.client = {x: touch.clientX, y: touch.clientY}; + } + } -var hasOpacity = (html.style.opacity != null), - hasFilter = (html.style.filter != null), - reAlpha = /alpha\(opacity=([\d.]+)\)/i; + if (!this.client) this.client = {}; + if (!this.page) this.page = {}; +}); -var setVisibility = function(element, opacity){ - element.store('$opacity', opacity); - element.style.visibility = opacity > 0 || opacity == null ? 'visible' : 'hidden'; -}; +DOMEvent.implement({ -var setOpacity = (hasOpacity ? function(element, opacity){ - element.style.opacity = opacity; -} : (hasFilter ? function(element, opacity){ - var style = element.style; - if (!element.currentStyle || !element.currentStyle.hasLayout) style.zoom = 1; - if (opacity == null || opacity == 1) opacity = ''; - else opacity = 'alpha(opacity=' + (opacity * 100).limit(0, 100).round() + ')'; - var filter = style.filter || element.getComputedStyle('filter') || ''; - style.filter = reAlpha.test(filter) ? filter.replace(reAlpha, opacity) : filter + opacity; - if (!style.filter) style.removeAttribute('filter'); -} : setVisibility)); + stop: function(){ + return this.preventDefault().stopPropagation(); + }, -var getOpacity = (hasOpacity ? function(element){ - var opacity = element.style.opacity || element.getComputedStyle('opacity'); - return (opacity == '') ? 1 : opacity.toFloat(); -} : (hasFilter ? function(element){ - var filter = (element.style.filter || element.getComputedStyle('filter')), - opacity; - if (filter) opacity = filter.match(reAlpha); - return (opacity == null || filter == null) ? 1 : (opacity[1] / 100); -} : function(element){ - var opacity = element.retrieve('$opacity'); - if (opacity == null) opacity = (element.style.visibility == 'hidden' ? 0 : 1); - return opacity; -})); + stopPropagation: function(){ + if (this.event.stopPropagation) this.event.stopPropagation(); + else this.event.cancelBubble = true; + return this; + }, -var floatName = (html.style.cssFloat == null) ? 'styleFloat' : 'cssFloat'; + preventDefault: function(){ + if (this.event.preventDefault) this.event.preventDefault(); + else this.event.returnValue = false; + return this; + } -Element.implement({ +}); - getComputedStyle: function(property){ - if (this.currentStyle) return this.currentStyle[property.camelCase()]; - var defaultView = Element.getDocument(this).defaultView, - computed = defaultView ? defaultView.getComputedStyle(this, null) : null; - return (computed) ? computed.getPropertyValue((property == floatName) ? 'float' : property.hyphenate()) : null; - }, +DOMEvent.defineKey = function(code, key){ + _keys[code] = key; + return this; +}; - setStyle: function(property, value){ - if (property == 'opacity'){ - if (value != null) value = parseFloat(value); - setOpacity(this, value); - return this; - } - property = (property == 'float' ? floatName : property).camelCase(); - if (typeOf(value) != 'string'){ - var map = (Element.Styles[property] || '@').split(' '); - value = Array.from(value).map(function(val, i){ - if (!map[i]) return ''; - return (typeOf(val) == 'number') ? map[i].replace('@', Math.round(val)) : val; - }).join(' '); - } else if (value == String(Number(value))){ - value = Math.round(value); - } - this.style[property] = value; - //<ltIE9> - if ((value == '' || value == null) && doesNotRemoveStyles && this.style.removeAttribute){ - this.style.removeAttribute(property); - } - //</ltIE9> - return this; - }, - - getStyle: function(property){ - if (property == 'opacity') return getOpacity(this); - property = (property == 'float' ? floatName : property).camelCase(); - var result = this.style[property]; - if (!result || property == 'zIndex'){ - result = []; - for (var style in Element.ShortStyles){ - if (property != style) continue; - for (var s in Element.ShortStyles[style]) result.push(this.getStyle(s)); - return result.join(' '); - } - result = this.getComputedStyle(property); - } - if (result){ - result = String(result); - var color = result.match(/rgba?\([\d\s,]+\)/); - if (color) result = result.replace(color[0], color[0].rgbToHex()); - } - if (Browser.opera || Browser.ie){ - if ((/^(height|width)$/).test(property) && !(/px$/.test(result))){ - var values = (property == 'width') ? ['left', 'right'] : ['top', 'bottom'], size = 0; - values.each(function(value){ - size += this.getStyle('border-' + value + '-width').toInt() + this.getStyle('padding-' + value).toInt(); - }, this); - return this['offset' + property.capitalize()] - size + 'px'; - } - if (Browser.ie && (/^border(.+)Width|margin|padding/).test(property) && isNaN(parseFloat(result))){ - return '0px'; - } - } - return result; - }, - - setStyles: function(styles){ - for (var style in styles) this.setStyle(style, styles[style]); - return this; - }, - - getStyles: function(){ - var result = {}; - Array.flatten(arguments).each(function(key){ - result[key] = this.getStyle(key); - }, this); - return result; - } - -}); - -Element.Styles = { - left: '@px', top: '@px', bottom: '@px', right: '@px', - width: '@px', height: '@px', maxWidth: '@px', maxHeight: '@px', minWidth: '@px', minHeight: '@px', - backgroundColor: 'rgb(@, @, @)', backgroundPosition: '@px @px', color: 'rgb(@, @, @)', - fontSize: '@px', letterSpacing: '@px', lineHeight: '@px', clip: 'rect(@px @px @px @px)', - margin: '@px @px @px @px', padding: '@px @px @px @px', border: '@px @ rgb(@, @, @) @px @ rgb(@, @, @) @px @ rgb(@, @, @)', - borderWidth: '@px @px @px @px', borderStyle: '@ @ @ @', borderColor: 'rgb(@, @, @) rgb(@, @, @) rgb(@, @, @) rgb(@, @, @)', - zIndex: '@', 'zoom': '@', fontWeight: '@', textIndent: '@px', opacity: '@' -}; - - - - - -Element.ShortStyles = {margin: {}, padding: {}, border: {}, borderWidth: {}, borderStyle: {}, borderColor: {}}; - -['Top', 'Right', 'Bottom', 'Left'].each(function(direction){ - var Short = Element.ShortStyles; - var All = Element.Styles; - ['margin', 'padding'].each(function(style){ - var sd = style + direction; - Short[style][sd] = All[sd] = '@px'; - }); - var bd = 'border' + direction; - Short.border[bd] = All[bd] = '@px @ rgb(@, @, @)'; - var bdw = bd + 'Width', bds = bd + 'Style', bdc = bd + 'Color'; - Short[bd] = {}; - Short.borderWidth[bdw] = Short[bd][bdw] = All[bdw] = '@px'; - Short.borderStyle[bds] = Short[bd][bds] = All[bds] = '@'; - Short.borderColor[bdc] = Short[bd][bdc] = All[bdc] = 'rgb(@, @, @)'; -}); - -})(); - - -/* ---- - -name: Event - -description: Contains the Event Type, to make the event object cross-browser. - -license: MIT-style license. - -requires: [Window, Document, Array, Function, String, Object] - -provides: Event - -... -*/ - -(function() { - -var _keys = {}; - -var DOMEvent = this.DOMEvent = new Type('DOMEvent', function(event, win){ - if (!win) win = window; - event = event || win.event; - if (event.$extended) return event; - this.event = event; - this.$extended = true; - this.shift = event.shiftKey; - this.control = event.ctrlKey; - this.alt = event.altKey; - this.meta = event.metaKey; - var type = this.type = event.type; - var target = event.target || event.srcElement; - while (target && target.nodeType == 3) target = target.parentNode; - this.target = document.id(target); - - if (type.indexOf('key') == 0){ - var code = this.code = (event.which || event.keyCode); - this.key = _keys[code]; - if (type == 'keydown'){ - if (code > 111 && code < 124) this.key = 'f' + (code - 111); - else if (code > 95 && code < 106) this.key = code - 96; - } - if (this.key == null) this.key = String.fromCharCode(code).toLowerCase(); - } else if (type == 'click' || type == 'dblclick' || type == 'contextmenu' || type == 'DOMMouseScroll' || type.indexOf('mouse') == 0){ - var doc = win.document; - doc = (!doc.compatMode || doc.compatMode == 'CSS1Compat') ? doc.html : doc.body; - this.page = { - x: (event.pageX != null) ? event.pageX : event.clientX + doc.scrollLeft, - y: (event.pageY != null) ? event.pageY : event.clientY + doc.scrollTop - }; - this.client = { - x: (event.pageX != null) ? event.pageX - win.pageXOffset : event.clientX, - y: (event.pageY != null) ? event.pageY - win.pageYOffset : event.clientY - }; - if (type == 'DOMMouseScroll' || type == 'mousewheel') - this.wheel = (event.wheelDelta) ? event.wheelDelta / 120 : -(event.detail || 0) / 3; - - this.rightClick = (event.which == 3 || event.button == 2); - if (type == 'mouseover' || type == 'mouseout'){ - var related = event.relatedTarget || event[(type == 'mouseover' ? 'from' : 'to') + 'Element']; - while (related && related.nodeType == 3) related = related.parentNode; - this.relatedTarget = document.id(related); - } - } else if (type.indexOf('touch') == 0 || type.indexOf('gesture') == 0){ - this.rotation = event.rotation; - this.scale = event.scale; - this.targetTouches = event.targetTouches; - this.changedTouches = event.changedTouches; - var touches = this.touches = event.touches; - if (touches && touches[0]){ - var touch = touches[0]; - this.page = {x: touch.pageX, y: touch.pageY}; - this.client = {x: touch.clientX, y: touch.clientY}; - } - } - - if (!this.client) this.client = {}; - if (!this.page) this.page = {}; -}); - -DOMEvent.implement({ - - stop: function(){ - return this.preventDefault().stopPropagation(); - }, - - stopPropagation: function(){ - if (this.event.stopPropagation) this.event.stopPropagation(); - else this.event.cancelBubble = true; - return this; - }, - - preventDefault: function(){ - if (this.event.preventDefault) this.event.preventDefault(); - else this.event.returnValue = false; - return this; - } - -}); - -DOMEvent.defineKey = function(code, key){ - _keys[code] = key; - return this; -}; - -DOMEvent.defineKeys = DOMEvent.defineKey.overloadSetter(true); +DOMEvent.defineKeys = DOMEvent.defineKey.overloadSetter(true); DOMEvent.defineKeys({ '38': 'up', '40': 'down', '37': 'left', '39': 'right', @@ -3860,7 +3802,6 @@ DOMEvent.defineKeys({ - /* --- @@ -3993,7 +3934,7 @@ Element.Properties.events = {set: function(events){ Element.NativeEvents = { click: 2, dblclick: 2, mouseup: 2, mousedown: 2, contextmenu: 2, //mouse buttons - mousewheel: 2, DOMMouseScroll: 2, //mouse wheel + wheel: 2, mousewheel: 2, DOMMouseScroll: 2, //mouse wheel mouseover: 2, mouseout: 2, mousemove: 2, selectstart: 2, selectend: 2, //mouse movement keydown: 2, keypress: 2, keyup: 2, //keyboard orientationchange: 2, // mobile @@ -4001,23 +3942,27 @@ Element.NativeEvents = { gesturestart: 2, gesturechange: 2, gestureend: 2, // gesture focus: 2, blur: 2, change: 2, reset: 2, select: 2, submit: 2, paste: 2, input: 2, //form elements load: 2, unload: 1, beforeunload: 2, resize: 1, move: 1, DOMContentLoaded: 1, readystatechange: 1, //window - error: 1, abort: 1, scroll: 1 //misc + hashchange: 1, popstate: 2, // history + error: 1, abort: 1, scroll: 1, message: 2 //misc }; -Element.Events = {mousewheel: { - base: (Browser.firefox) ? 'DOMMouseScroll' : 'mousewheel' -}}; +Element.Events = { + mousewheel: { + base: 'onwheel' in document ? 'wheel' : 'onmousewheel' in document ? 'mousewheel' : 'DOMMouseScroll' + } +}; + +var check = function(event){ + var related = event.relatedTarget; + if (related == null) return true; + if (!related) return false; + return (related != this && related.prefix != 'xul' && typeOf(this) != 'document' && !this.contains(related)); +}; if ('onmouseenter' in document.documentElement){ Element.NativeEvents.mouseenter = Element.NativeEvents.mouseleave = 2; + Element.MouseenterCheck = check; } else { - var check = function(event){ - var related = event.relatedTarget; - if (related == null) return true; - if (!related) return false; - return (related != this && related.prefix != 'xul' && typeOf(this) != 'document' && !this.contains(related)); - }; - Element.Events.mouseenter = { base: 'mouseover', condition: check @@ -4035,12 +3980,12 @@ if (!window.addEventListener){ Element.Events.change = { base: function(){ var type = this.type; - return (this.get('tag') == 'input' && (type == 'radio' || type == 'checkbox')) ? 'propertychange' : 'change' + return (this.get('tag') == 'input' && (type == 'radio' || type == 'checkbox')) ? 'propertychange' : 'change'; }, condition: function(event){ - return this.type != 'radio' || (event.event.propertyName == 'checked' && this.checked); + return event.type != 'propertychange' || event.event.propertyName == 'checked'; } - } + }; } /*</ltIE9>*/ @@ -4048,7 +3993,6 @@ if (!window.addEventListener){ })(); - /* --- @@ -4080,10 +4024,12 @@ var bubbleUp = function(self, match, fn, event, target){ var map = { mouseenter: { - base: 'mouseover' + base: 'mouseover', + condition: Element.MouseenterCheck }, mouseleave: { - base: 'mouseout' + base: 'mouseout', + condition: Element.MouseenterCheck }, focus: { base: 'focus' + (eventListenerSupport ? '' : 'in'), @@ -4106,7 +4052,10 @@ var formObserver = function(type){ remove: function(self, uid){ var list = self.retrieve(_key + type + 'listeners', {})[uid]; if (list && list.forms) for (var i = list.forms.length; i--;){ - list.forms[i].removeEvent(type, list.fns[i]); + // the form may have been destroyed, so it won't have the + // removeEvent method anymore. In that case the event was + // removed as well. + if (list.forms[i].removeEvent) list.forms[i].removeEvent(type, list.fns[i]); } }, @@ -4190,8 +4139,8 @@ var delegation = { }; var elementEvent = Element.Events[_type]; - if (elementEvent && elementEvent.condition){ - var __match = match, condition = elementEvent.condition; + if (_map.condition || elementEvent && elementEvent.condition){ + var __match = match, condition = _map.condition || elementEvent.condition; match = function(target, event){ return __match(target, event) && condition.call(target, event, type); }; @@ -4226,7 +4175,7 @@ var delegation = { if (_map.remove) _map.remove(this, _uid); delete stored[_uid]; storage[_type] = stored; - return removeEvent.call(this, type, delegator); + return removeEvent.call(this, type, delegator, _map.capture); } var __uid, s; @@ -4249,6 +4198,237 @@ var delegation = { })(); +/* +--- + +name: Element.Style + +description: Contains methods for interacting with the styles of Elements in a fashionable way. + +license: MIT-style license. + +requires: Element + +provides: Element.Style + +... +*/ + +(function(){ + +var html = document.html, el; + +//<ltIE9> +// Check for oldIE, which does not remove styles when they're set to null +el = document.createElement('div'); +el.style.color = 'red'; +el.style.color = null; +var doesNotRemoveStyles = el.style.color == 'red'; + +// check for oldIE, which returns border* shorthand styles in the wrong order (color-width-style instead of width-style-color) +var border = '1px solid #123abc'; +el.style.border = border; +var returnsBordersInWrongOrder = el.style.border != border; +el = null; +//</ltIE9> + +var hasGetComputedStyle = !!window.getComputedStyle, + supportBorderRadius = document.createElement('div').style.borderRadius != null; + +Element.Properties.styles = {set: function(styles){ + this.setStyles(styles); +}}; + +var hasOpacity = (html.style.opacity != null), + hasFilter = (html.style.filter != null), + reAlpha = /alpha\(opacity=([\d.]+)\)/i; + +var setVisibility = function(element, opacity){ + element.store('$opacity', opacity); + element.style.visibility = opacity > 0 || opacity == null ? 'visible' : 'hidden'; +}; + +//<ltIE9> +var setFilter = function(element, regexp, value){ + var style = element.style, + filter = style.filter || element.getComputedStyle('filter') || ''; + style.filter = (regexp.test(filter) ? filter.replace(regexp, value) : filter + ' ' + value).trim(); + if (!style.filter) style.removeAttribute('filter'); +}; +//</ltIE9> + +var setOpacity = (hasOpacity ? function(element, opacity){ + element.style.opacity = opacity; +} : (hasFilter ? function(element, opacity){ + if (!element.currentStyle || !element.currentStyle.hasLayout) element.style.zoom = 1; + if (opacity == null || opacity == 1){ + setFilter(element, reAlpha, ''); + if (opacity == 1 && getOpacity(element) != 1) setFilter(element, reAlpha, 'alpha(opacity=100)'); + } else { + setFilter(element, reAlpha, 'alpha(opacity=' + (opacity * 100).limit(0, 100).round() + ')'); + } +} : setVisibility)); + +var getOpacity = (hasOpacity ? function(element){ + var opacity = element.style.opacity || element.getComputedStyle('opacity'); + return (opacity == '') ? 1 : opacity.toFloat(); +} : (hasFilter ? function(element){ + var filter = (element.style.filter || element.getComputedStyle('filter')), + opacity; + if (filter) opacity = filter.match(reAlpha); + return (opacity == null || filter == null) ? 1 : (opacity[1] / 100); +} : function(element){ + var opacity = element.retrieve('$opacity'); + if (opacity == null) opacity = (element.style.visibility == 'hidden' ? 0 : 1); + return opacity; +})); + +var floatName = (html.style.cssFloat == null) ? 'styleFloat' : 'cssFloat', + namedPositions = {left: '0%', top: '0%', center: '50%', right: '100%', bottom: '100%'}, + hasBackgroundPositionXY = (html.style.backgroundPositionX != null); + +//<ltIE9> +var removeStyle = function(style, property){ + if (property == 'backgroundPosition'){ + style.removeAttribute(property + 'X'); + property += 'Y'; + } + style.removeAttribute(property); +}; +//</ltIE9> + +Element.implement({ + + getComputedStyle: function(property){ + if (!hasGetComputedStyle && this.currentStyle) return this.currentStyle[property.camelCase()]; + var defaultView = Element.getDocument(this).defaultView, + computed = defaultView ? defaultView.getComputedStyle(this, null) : null; + return (computed) ? computed.getPropertyValue((property == floatName) ? 'float' : property.hyphenate()) : ''; + }, + + setStyle: function(property, value){ + if (property == 'opacity'){ + if (value != null) value = parseFloat(value); + setOpacity(this, value); + return this; + } + property = (property == 'float' ? floatName : property).camelCase(); + if (typeOf(value) != 'string'){ + var map = (Element.Styles[property] || '@').split(' '); + value = Array.from(value).map(function(val, i){ + if (!map[i]) return ''; + return (typeOf(val) == 'number') ? map[i].replace('@', Math.round(val)) : val; + }).join(' '); + } else if (value == String(Number(value))){ + value = Math.round(value); + } + this.style[property] = value; + //<ltIE9> + if ((value == '' || value == null) && doesNotRemoveStyles && this.style.removeAttribute){ + removeStyle(this.style, property); + } + //</ltIE9> + return this; + }, + + getStyle: function(property){ + if (property == 'opacity') return getOpacity(this); + property = (property == 'float' ? floatName : property).camelCase(); + if (supportBorderRadius && property.indexOf('borderRadius') != -1){ + return ['borderTopLeftRadius', 'borderTopRightRadius', 'borderBottomRightRadius', 'borderBottomLeftRadius'].map(function(corner){ + return this.style[corner] || '0px'; + }, this).join(' '); + } + var result = this.style[property]; + if (!result || property == 'zIndex'){ + if (Element.ShortStyles.hasOwnProperty(property)){ + result = []; + for (var s in Element.ShortStyles[property]) result.push(this.getStyle(s)); + return result.join(' '); + } + result = this.getComputedStyle(property); + } + if (hasBackgroundPositionXY && /^backgroundPosition[XY]?$/.test(property)){ + return result.replace(/(top|right|bottom|left)/g, function(position){ + return namedPositions[position]; + }) || '0px'; + } + if (!result && property == 'backgroundPosition') return '0px 0px'; + if (result){ + result = String(result); + var color = result.match(/rgba?\([\d\s,]+\)/); + if (color) result = result.replace(color[0], color[0].rgbToHex()); + } + if (!hasGetComputedStyle && !this.style[property]){ + if ((/^(height|width)$/).test(property) && !(/px$/.test(result))){ + var values = (property == 'width') ? ['left', 'right'] : ['top', 'bottom'], size = 0; + values.each(function(value){ + size += this.getStyle('border-' + value + '-width').toInt() + this.getStyle('padding-' + value).toInt(); + }, this); + return this['offset' + property.capitalize()] - size + 'px'; + } + if ((/^border(.+)Width|margin|padding/).test(property) && isNaN(parseFloat(result))){ + return '0px'; + } + } + //<ltIE9> + if (returnsBordersInWrongOrder && /^border(Top|Right|Bottom|Left)?$/.test(property) && /^#/.test(result)){ + return result.replace(/^(.+)\s(.+)\s(.+)$/, '$2 $3 $1'); + } + //</ltIE9> + + return result; + }, + + setStyles: function(styles){ + for (var style in styles) this.setStyle(style, styles[style]); + return this; + }, + + getStyles: function(){ + var result = {}; + Array.flatten(arguments).each(function(key){ + result[key] = this.getStyle(key); + }, this); + return result; + } + +}); + +Element.Styles = { + left: '@px', top: '@px', bottom: '@px', right: '@px', + width: '@px', height: '@px', maxWidth: '@px', maxHeight: '@px', minWidth: '@px', minHeight: '@px', + backgroundColor: 'rgb(@, @, @)', backgroundSize: '@px', backgroundPosition: '@px @px', color: 'rgb(@, @, @)', + fontSize: '@px', letterSpacing: '@px', lineHeight: '@px', clip: 'rect(@px @px @px @px)', + margin: '@px @px @px @px', padding: '@px @px @px @px', border: '@px @ rgb(@, @, @) @px @ rgb(@, @, @) @px @ rgb(@, @, @)', + borderWidth: '@px @px @px @px', borderStyle: '@ @ @ @', borderColor: 'rgb(@, @, @) rgb(@, @, @) rgb(@, @, @) rgb(@, @, @)', + zIndex: '@', 'zoom': '@', fontWeight: '@', textIndent: '@px', opacity: '@', borderRadius: '@px @px @px @px' +}; + + + + + +Element.ShortStyles = {margin: {}, padding: {}, border: {}, borderWidth: {}, borderStyle: {}, borderColor: {}}; + +['Top', 'Right', 'Bottom', 'Left'].each(function(direction){ + var Short = Element.ShortStyles; + var All = Element.Styles; + ['margin', 'padding'].each(function(style){ + var sd = style + direction; + Short[style][sd] = All[sd] = '@px'; + }); + var bd = 'border' + direction; + Short.border[bd] = All[bd] = '@px @ rgb(@, @, @)'; + var bdw = bd + 'Width', bds = bd + 'Style', bdc = bd + 'Color'; + Short[bd] = {}; + Short.borderWidth[bdw] = Short[bd][bdw] = All[bdw] = '@px'; + Short.borderStyle[bds] = Short[bd][bds] = All[bds] = '@'; + Short.borderColor[bdc] = Short[bd][bdc] = All[bdc] = 'rgb(@, @, @)'; +}); + +if (hasBackgroundPositionXY) Element.ShortStyles.backgroundPosition = {backgroundPositionX: '@', backgroundPositionY: '@'}; +})(); /* --- @@ -4279,6 +4459,23 @@ element.appendChild(child); var brokenOffsetParent = (child.offsetParent === element); element = child = null; +var heightComponents = ['height', 'paddingTop', 'paddingBottom', 'borderTopWidth', 'borderBottomWidth'], + widthComponents = ['width', 'paddingLeft', 'paddingRight', 'borderLeftWidth', 'borderRightWidth']; + +var svgCalculateSize = function(el){ + + var gCS = window.getComputedStyle(el), + bounds = {x: 0, y: 0}; + + heightComponents.each(function(css){ + bounds.y += parseFloat(gCS[css]); + }); + widthComponents.each(function(css){ + bounds.x += parseFloat(gCS[css]); + }); + return bounds; +}; + var isOffset = function(el){ return styleString(el, 'position') != 'static' || isBody(el); }; @@ -4301,7 +4498,18 @@ Element.implement({ getSize: function(){ if (isBody(this)) return this.getWindow().getSize(); - return {x: this.offsetWidth, y: this.offsetHeight}; + + //<ltIE9> + // This if clause is because IE8- cannot calculate getBoundingClientRect of elements with visibility hidden. + if (!window.getComputedStyle) return {x: this.offsetWidth, y: this.offsetHeight}; + //</ltIE9> + + // This svg section under, calling `svgCalculateSize()`, can be removed when FF fixed the svg size bug. + // Bug info: https://bugzilla.mozilla.org/show_bug.cgi?id=530985 + if (this.get('tag') == 'svg') return svgCalculateSize(this); + + var bounds = this.getBoundingClientRect(); + return {x: bounds.width, y: bounds.height}; }, getScrollSize: function(){ @@ -4339,12 +4547,14 @@ Element.implement({ try { return element.offsetParent; - } catch(e) {} + } catch(e){} return null; }, getOffsets: function(){ - if (this.getBoundingClientRect && !Browser.Platform.ios){ + var hasGetBoundingClientRect = this.getBoundingClientRect; + + if (hasGetBoundingClientRect){ var bound = this.getBoundingClientRect(), html = document.id(this.getDocument().documentElement), htmlScroll = html.getScroll(), @@ -4353,7 +4563,7 @@ Element.implement({ return { x: bound.left.toInt() + elemScrolls.x + ((isFixed) ? 0 : htmlScroll.x) - html.clientLeft, - y: bound.top.toInt() + elemScrolls.y + ((isFixed) ? 0 : htmlScroll.y) - html.clientTop + y: bound.top.toInt() + elemScrolls.y + ((isFixed) ? 0 : htmlScroll.y) - html.clientTop }; } @@ -4362,29 +4572,11 @@ Element.implement({ while (element && !isBody(element)){ position.x += element.offsetLeft; - position.y += element.offsetTop; - - if (Browser.firefox){ - if (!borderBox(element)){ - position.x += leftBorder(element); - position.y += topBorder(element); - } - var parent = element.parentNode; - if (parent && styleString(parent, 'overflow') != 'visible'){ - position.x += leftBorder(parent); - position.y += topBorder(parent); - } - } else if (element != this && Browser.safari){ - position.x += leftBorder(element); - position.y += topBorder(element); - } + position.y += element.offsetTop; element = element.offsetParent; } - if (Browser.firefox && !borderBox(this)){ - position.x -= leftBorder(this); - position.y -= topBorder(this); - } + return position; }, @@ -4533,7 +4725,6 @@ Element.alias({position: 'setPosition'}); //compatability }); - /* --- @@ -4666,13 +4857,17 @@ var Fx = this.Fx = new Class({ }, resume: function(){ - if ((this.frame < this.frames) && !this.isRunning()) pushInstance.call(this, this.options.fps); + if (this.isPaused()) pushInstance.call(this, this.options.fps); return this; }, isRunning: function(){ var list = instances[this.options.fps]; return list && list.contains(this); + }, + + isPaused: function(){ + return (this.frame < this.frames) && !this.isRunning(); } }); @@ -4714,7 +4909,6 @@ var pullInstance = function(fps){ })(); - /* --- @@ -4745,7 +4939,7 @@ Fx.CSS = new Class({ from = element.getStyle(property); var unit = this.options.unit; // adapted from: https://github.com/ryanmorr/fx/blob/master/fx.js#L299 - if (unit && from.slice(-unit.length) != unit && parseFloat(from) != 0){ + if (unit && from && typeof from == 'string' && from.slice(-unit.length) != unit && parseFloat(from) != 0){ element.setStyle(property, to + unit); var value = element.getComputedStyle(property); // IE and Opera support pixelLeft or pixelWidth @@ -4817,11 +5011,13 @@ Fx.CSS = new Class({ search: function(selector){ if (Fx.CSS.Cache[selector]) return Fx.CSS.Cache[selector]; var to = {}, selectorTest = new RegExp('^' + selector.escapeRegExp() + '$'); - Array.each(document.styleSheets, function(sheet, j){ - var href = sheet.href; - if (href && href.contains('://') && !href.contains(document.domain)) return; - var rules = sheet.rules || sheet.cssRules; + + var searchStyles = function(rules){ Array.each(rules, function(rule, i){ + if (rule.media){ + searchStyles(rule.rules || rule.cssRules); + return; + } if (!rule.style) return; var selectorText = (rule.selectorText) ? rule.selectorText.replace(/^\w+/, function(m){ return m.toLowerCase(); @@ -4833,6 +5029,13 @@ Fx.CSS = new Class({ to[style] = ((/^rgb/).test(value)) ? value.rgbToHex() : value; }); }); + }; + + Array.each(document.styleSheets, function(sheet, j){ + var href = sheet.href; + if (href && href.indexOf('://') > -1 && href.indexOf(document.domain) == -1) return; + var rules = sheet.rules || sheet.cssRules; + searchStyles(rules); }); return Fx.CSS.Cache[selector] = to; } @@ -4880,120 +5083,6 @@ Fx.CSS.Parsers = { - -/* ---- - -name: Fx.Tween - -description: Formerly Fx.Style, effect to transition any CSS property for an element. - -license: MIT-style license. - -requires: Fx.CSS - -provides: [Fx.Tween, Element.fade, Element.highlight] - -... -*/ - -Fx.Tween = new Class({ - - Extends: Fx.CSS, - - initialize: function(element, options){ - this.element = this.subject = document.id(element); - this.parent(options); - }, - - set: function(property, now){ - if (arguments.length == 1){ - now = property; - property = this.property || this.options.property; - } - this.render(this.element, property, now, this.options.unit); - return this; - }, - - start: function(property, from, to){ - if (!this.check(property, from, to)) return this; - var args = Array.flatten(arguments); - this.property = this.options.property || args.shift(); - var parsed = this.prepare(this.element, this.property, args); - return this.parent(parsed.from, parsed.to); - } - -}); - -Element.Properties.tween = { - - set: function(options){ - this.get('tween').cancel().setOptions(options); - return this; - }, - - get: function(){ - var tween = this.retrieve('tween'); - if (!tween){ - tween = new Fx.Tween(this, {link: 'cancel'}); - this.store('tween', tween); - } - return tween; - } - -}; - -Element.implement({ - - tween: function(property, from, to){ - this.get('tween').start(property, from, to); - return this; - }, - - fade: function(how){ - var fade = this.get('tween'), method, args = ['opacity'].append(arguments), toggle; - if (args[1] == null) args[1] = 'toggle'; - switch (args[1]){ - case 'in': method = 'start'; args[1] = 1; break; - case 'out': method = 'start'; args[1] = 0; break; - case 'show': method = 'set'; args[1] = 1; break; - case 'hide': method = 'set'; args[1] = 0; break; - case 'toggle': - var flag = this.retrieve('fade:flag', this.getStyle('opacity') == 1); - method = 'start'; - args[1] = flag ? 0 : 1; - this.store('fade:flag', !flag); - toggle = true; - break; - default: method = 'start'; - } - if (!toggle) this.eliminate('fade:flag'); - fade[method].apply(fade, args); - var to = args[args.length - 1]; - if (method == 'set' || to != 0) this.setStyle('visibility', to == 0 ? 'hidden' : 'visible'); - else fade.chain(function(){ - this.element.setStyle('visibility', 'hidden'); - this.callChain(); - }); - return this; - }, - - highlight: function(start, end){ - if (!end){ - end = this.retrieve('highlight:original', this.getStyle('background-color')); - end = (end == 'transparent') ? '#fff' : end; - } - var tween = this.get('tween'); - tween.start('background-color', start || '#ffff88', end).chain(function(){ - this.setStyle('background-color', this.retrieve('highlight:original')); - tween.callChain(); - }.bind(this)); - return this; - } - -}); - - /* --- @@ -5072,7 +5161,6 @@ Element.implement({ }); - /* --- @@ -5183,6 +5271,117 @@ Fx.Transitions.extend({ }); }); +/* +--- + +name: Fx.Tween + +description: Formerly Fx.Style, effect to transition any CSS property for an element. + +license: MIT-style license. + +requires: Fx.CSS + +provides: [Fx.Tween, Element.fade, Element.highlight] + +... +*/ + +Fx.Tween = new Class({ + + Extends: Fx.CSS, + + initialize: function(element, options){ + this.element = this.subject = document.id(element); + this.parent(options); + }, + + set: function(property, now){ + if (arguments.length == 1){ + now = property; + property = this.property || this.options.property; + } + this.render(this.element, property, now, this.options.unit); + return this; + }, + + start: function(property, from, to){ + if (!this.check(property, from, to)) return this; + var args = Array.flatten(arguments); + this.property = this.options.property || args.shift(); + var parsed = this.prepare(this.element, this.property, args); + return this.parent(parsed.from, parsed.to); + } + +}); + +Element.Properties.tween = { + + set: function(options){ + this.get('tween').cancel().setOptions(options); + return this; + }, + + get: function(){ + var tween = this.retrieve('tween'); + if (!tween){ + tween = new Fx.Tween(this, {link: 'cancel'}); + this.store('tween', tween); + } + return tween; + } + +}; + +Element.implement({ + + tween: function(property, from, to){ + this.get('tween').start(property, from, to); + return this; + }, + + fade: function(how){ + var fade = this.get('tween'), method, args = ['opacity'].append(arguments), toggle; + if (args[1] == null) args[1] = 'toggle'; + switch (args[1]){ + case 'in': method = 'start'; args[1] = 1; break; + case 'out': method = 'start'; args[1] = 0; break; + case 'show': method = 'set'; args[1] = 1; break; + case 'hide': method = 'set'; args[1] = 0; break; + case 'toggle': + var flag = this.retrieve('fade:flag', this.getStyle('opacity') == 1); + method = 'start'; + args[1] = flag ? 0 : 1; + this.store('fade:flag', !flag); + toggle = true; + break; + default: method = 'start'; + } + if (!toggle) this.eliminate('fade:flag'); + fade[method].apply(fade, args); + var to = args[args.length - 1]; + if (method == 'set' || to != 0) this.setStyle('visibility', to == 0 ? 'hidden' : 'visible'); + else fade.chain(function(){ + this.element.setStyle('visibility', 'hidden'); + this.callChain(); + }); + return this; + }, + + highlight: function(start, end){ + if (!end){ + end = this.retrieve('highlight:original', this.getStyle('background-color')); + end = (end == 'transparent') ? '#fff' : end; + } + var tween = this.get('tween'); + tween.start('background-color', start || '#ffff88', end).chain(function(){ + this.setStyle('background-color', this.retrieve('highlight:original')); + tween.callChain(); + }.bind(this)); + return this; + } + +}); /* --- @@ -5220,7 +5419,8 @@ var Request = this.Request = new Class({ onException: function(headerName, value){}, onTimeout: function(){}, user: '', - password: '',*/ + password: '', + withCredentials: false,*/ url: '', data: '', headers: { @@ -5258,7 +5458,10 @@ var Request = this.Request = new Class({ }.bind(this)); xhr.onreadystatechange = empty; if (progressSupport) xhr.onprogress = xhr.onloadstart = empty; - clearTimeout(this.timer); + if (this.timer){ + clearTimeout(this.timer); + delete this.timer; + } this.response = {text: this.xhr.responseText || '', xml: this.xhr.responseXML}; if (this.options.isSuccess.call(this, this.status)) @@ -5369,10 +5572,10 @@ var Request = this.Request = new Class({ if (trimPosition > -1 && (trimPosition = url.indexOf('#')) > -1) url = url.substr(0, trimPosition); if (this.options.noCache) - url += (url.contains('?') ? '&' : '?') + String.uniqueID(); + url += (url.indexOf('?') > -1 ? '&' : '?') + String.uniqueID(); - if (data && method == 'get'){ - url += (url.contains('?') ? '&' : '?') + data; + if (data && (method == 'get' || method == 'delete')){ + url += (url.indexOf('?') > -1 ? '&' : '?') + data; data = null; } @@ -5383,7 +5586,7 @@ var Request = this.Request = new Class({ } xhr.open(method.toUpperCase(), url, this.options.async, this.options.user, this.options.password); - if (this.options.user && 'withCredentials' in xhr) xhr.withCredentials = true; + if ((this.options.withCredentials) && 'withCredentials' in xhr) xhr.withCredentials = true; xhr.onreadystatechange = this.onStateChange.bind(this); @@ -5407,7 +5610,10 @@ var Request = this.Request = new Class({ this.running = false; var xhr = this.xhr; xhr.abort(); - clearTimeout(this.timer); + if (this.timer){ + clearTimeout(this.timer); + delete this.timer; + } xhr.onreadystatechange = empty; if (progressSupport) xhr.onprogress = xhr.onloadstart = empty; this.xhr = new Browser.Request(); @@ -5418,7 +5624,7 @@ var Request = this.Request = new Class({ }); var methods = {}; -['get', 'post', 'put', 'delete', 'GET', 'POST', 'PUT', 'DELETE'].each(function(method){ +['get', 'post', 'put', 'delete', 'patch', 'head', 'GET', 'POST', 'PUT', 'DELETE', 'PATCH', 'HEAD'].each(function(method){ methods[method] = function(data){ var object = { method: method @@ -5463,7 +5669,6 @@ Element.implement({ })(); - /* --- @@ -5526,10 +5731,14 @@ JSON.encode = JSON.stringify ? function(obj){ return null; }; +JSON.secure = true; + + JSON.decode = function(string, secure){ if (!string || typeOf(string) != 'string') return null; - - if (secure || JSON.secure){ + + if (secure == null) secure = JSON.secure; + if (secure){ if (JSON.parse) return JSON.parse(string); if (!JSON.validate(string)) throw new Error('JSON could not decode the input; security is enabled and the value is not secure.'); } @@ -5539,7 +5748,6 @@ JSON.decode = function(string, secure){ })(); - /* --- @@ -5587,7 +5795,6 @@ Request.JSON = new Class({ }); - /* --- @@ -5663,7 +5870,6 @@ Cookie.dispose = function(key, options){ return new Cookie(key, options).dispose(); }; - /* --- @@ -5691,12 +5897,14 @@ var ready, var domready = function(){ clearTimeout(timer); - if (ready) return; - Browser.loaded = ready = true; - document.removeListener('DOMContentLoaded', domready).removeListener('readystatechange', check); - - document.fireEvent('domready'); - window.fireEvent('domready'); + if (!ready) { + Browser.loaded = ready = true; + document.removeListener('DOMContentLoaded', domready).removeListener('readystatechange', check); + document.fireEvent('domready'); + window.fireEvent('domready'); + } + // cleanup scope vars + document = window = testElement = null; }; var check = function(){ @@ -5769,4 +5977,3 @@ window.addEvent('load', function(){ }); })(window, document); - diff --git a/couchpotato/static/scripts/library/mootools_more.js b/couchpotato/static/scripts/vendor/mootools_more.js similarity index 94% rename from couchpotato/static/scripts/library/mootools_more.js rename to couchpotato/static/scripts/vendor/mootools_more.js index d2d703692f..a2b00975e7 100644 --- a/couchpotato/static/scripts/library/mootools_more.js +++ b/couchpotato/static/scripts/vendor/mootools_more.js @@ -1,6 +1,7 @@ -// MooTools: the javascript framework. -// Load this file's selection again by visiting: http://mootools.net/more/43db227db7a621ebb062ee621432ae3d -// Or build this file again with packager using: packager build More/Events.Pseudos More/Date More/Date.Extras More/Element.Forms More/Element.Position More/Element.Shortcuts More/Fx.Scroll More/Fx.Slide More/Sortables More/Request.JSONP More/Request.Periodical +/* MooTools: the javascript framework. license: MIT-style license. copyright: Copyright (c) 2006-2014 [Valerio Proietti](http://mad4milk.net/).*/ +/* +Web Build: http://mootools.net/more/builder/c5d56f178d3e33908b4f47169e617a6c +*/ /* --- @@ -31,11 +32,10 @@ provides: [MooTools.More] */ MooTools.More = { - 'version': '1.4.0.1', - 'build': 'a4244edf2aa97ac8a196fc96082dd35af1abab87' + version: '1.5.1', + build: '2dd695ba957196ae4b0275a690765d6636a61ccd' }; - /* --- @@ -48,7 +48,7 @@ license: MIT-style license authors: - Arian Stolwijk -requires: [Core/Class.Extras, Core/Slick.Parser, More/MooTools.More] +requires: [Core/Class.Extras, Core/Slick.Parser, MooTools.More] provides: [Events.Pseudos] @@ -194,993 +194,728 @@ Events.implement(Events.Pseudos(pseudos, proto.addEvent, proto.removeEvent)); })(); - /* --- -script: Object.Extras.js +script: Drag.js -name: Object.Extras +name: Drag -description: Extra Object generics, like getFromPath which allows a path notation to child elements. +description: The base Drag Class. Can be used to drag and resize Elements using mouse events. license: MIT-style license authors: - - Aaron Newton + - Valerio Proietti + - Tom Occhinno + - Jan Kassens requires: - - Core/Object - - /MooTools.More - -provides: [Object.Extras] + - Core/Events + - Core/Options + - Core/Element.Event + - Core/Element.Style + - Core/Element.Dimensions + - MooTools.More +provides: [Drag] ... + */ -(function(){ +var Drag = new Class({ -var defined = function(value){ - return value != null; -}; + Implements: [Events, Options], -var hasOwnProperty = Object.prototype.hasOwnProperty; + options: {/* + onBeforeStart: function(thisElement){}, + onStart: function(thisElement, event){}, + onSnap: function(thisElement){}, + onDrag: function(thisElement, event){}, + onCancel: function(thisElement){}, + onComplete: function(thisElement, event){},*/ + snap: 6, + unit: 'px', + grid: false, + style: true, + limit: false, + handle: false, + invert: false, + preventDefault: false, + stopPropagation: false, + compensateScroll: false, + modifiers: {x: 'left', y: 'top'} + }, -Object.extend({ + initialize: function(){ + var params = Array.link(arguments, { + 'options': Type.isObject, + 'element': function(obj){ + return obj != null; + } + }); - getFromPath: function(source, parts){ - if (typeof parts == 'string') parts = parts.split('.'); - for (var i = 0, l = parts.length; i < l; i++){ - if (hasOwnProperty.call(source, parts[i])) source = source[parts[i]]; - else return null; - } - return source; - }, + this.element = document.id(params.element); + this.document = this.element.getDocument(); + this.setOptions(params.options || {}); + var htype = typeOf(this.options.handle); + this.handles = ((htype == 'array' || htype == 'collection') ? $$(this.options.handle) : document.id(this.options.handle)) || this.element; + this.mouse = {'now': {}, 'pos': {}}; + this.value = {'start': {}, 'now': {}}; + this.offsetParent = (function(el){ + var offsetParent = el.getOffsetParent(); + var isBody = !offsetParent || (/^(?:body|html)$/i).test(offsetParent.tagName); + return isBody ? window : document.id(offsetParent); + })(this.element); + this.selection = 'selectstart' in document ? 'selectstart' : 'mousedown'; - cleanValues: function(object, method){ - method = method || defined; - for (var key in object) if (!method(object[key])){ - delete object[key]; + this.compensateScroll = {start: {}, diff: {}, last: {}}; + + if ('ondragstart' in document && !('FileReader' in window) && !Drag.ondragstartFixed){ + document.ondragstart = Function.from(false); + Drag.ondragstartFixed = true; } - return object; - }, - erase: function(object, key){ - if (hasOwnProperty.call(object, key)) delete object[key]; - return object; + this.bound = { + start: this.start.bind(this), + check: this.check.bind(this), + drag: this.drag.bind(this), + stop: this.stop.bind(this), + cancel: this.cancel.bind(this), + eventStop: Function.from(false), + scrollListener: this.scrollListener.bind(this) + }; + this.attach(); }, - run: function(object){ - var args = Array.slice(arguments, 1); - for (var key in object) if (object[key].apply){ - object[key].apply(object, args); - } - return object; - } - -}); + attach: function(){ + this.handles.addEvent('mousedown', this.bound.start); + if (this.options.compensateScroll) this.offsetParent.addEvent('scroll', this.bound.scrollListener); + return this; + }, -})(); + detach: function(){ + this.handles.removeEvent('mousedown', this.bound.start); + if (this.options.compensateScroll) this.offsetParent.removeEvent('scroll', this.bound.scrollListener); + return this; + }, + scrollListener: function(){ -/* ---- + if (!this.mouse.start) return; + var newScrollValue = this.offsetParent.getScroll(); -script: Locale.js + if (this.element.getStyle('position') == 'absolute'){ + var scrollDiff = this.sumValues(newScrollValue, this.compensateScroll.last, -1); + this.mouse.now = this.sumValues(this.mouse.now, scrollDiff, 1); + } else { + this.compensateScroll.diff = this.sumValues(newScrollValue, this.compensateScroll.start, -1); + } + if (this.offsetParent != window) this.compensateScroll.diff = this.sumValues(this.compensateScroll.start, newScrollValue, -1); + this.compensateScroll.last = newScrollValue; + this.render(this.options); + }, -name: Locale + sumValues: function(alpha, beta, op){ + var sum = {}, options = this.options; + for (z in options.modifiers){ + if (!options.modifiers[z]) continue; + sum[z] = alpha[z] + beta[z] * op; + } + return sum; + }, -description: Provides methods for localization. + start: function(event){ + var options = this.options; -license: MIT-style license + if (event.rightClick) return; -authors: - - Aaron Newton - - Arian Stolwijk + if (options.preventDefault) event.preventDefault(); + if (options.stopPropagation) event.stopPropagation(); + this.compensateScroll.start = this.compensateScroll.last = this.offsetParent.getScroll(); + this.compensateScroll.diff = {x: 0, y: 0}; + this.mouse.start = event.page; + this.fireEvent('beforeStart', this.element); -requires: - - Core/Events - - /Object.Extras - - /MooTools.More + var limit = options.limit; + this.limit = {x: [], y: []}; -provides: [Locale, Lang] + var z, coordinates, offsetParent = this.offsetParent == window ? null : this.offsetParent; + for (z in options.modifiers){ + if (!options.modifiers[z]) continue; -... -*/ + var style = this.element.getStyle(options.modifiers[z]); -(function(){ + // Some browsers (IE and Opera) don't always return pixels. + if (style && !style.match(/px$/)){ + if (!coordinates) coordinates = this.element.getCoordinates(offsetParent); + style = coordinates[options.modifiers[z]]; + } -var current = null, - locales = {}, - inherits = {}; + if (options.style) this.value.now[z] = (style || 0).toInt(); + else this.value.now[z] = this.element[options.modifiers[z]]; -var getSet = function(set){ - if (instanceOf(set, Locale.Set)) return set; - else return locales[set]; -}; + if (options.invert) this.value.now[z] *= -1; -var Locale = this.Locale = { + this.mouse.pos[z] = event.page[z] - this.value.now[z]; - define: function(locale, set, key, value){ - var name; - if (instanceOf(locale, Locale.Set)){ - name = locale.name; - if (name) locales[name] = locale; - } else { - name = locale; - if (!locales[name]) locales[name] = new Locale.Set(name); - locale = locales[name]; + if (limit && limit[z]){ + var i = 2; + while (i--){ + var limitZI = limit[z][i]; + if (limitZI || limitZI === 0) this.limit[z][i] = (typeof limitZI == 'function') ? limitZI() : limitZI; + } + } } - if (set) locale.define(set, key, value); - - + if (typeOf(this.options.grid) == 'number') this.options.grid = { + x: this.options.grid, + y: this.options.grid + }; - if (!current) current = locale; + var events = { + mousemove: this.bound.check, + mouseup: this.bound.cancel + }; + events[this.selection] = this.bound.eventStop; + this.document.addEvents(events); + }, - return locale; + check: function(event){ + if (this.options.preventDefault) event.preventDefault(); + var distance = Math.round(Math.sqrt(Math.pow(event.page.x - this.mouse.start.x, 2) + Math.pow(event.page.y - this.mouse.start.y, 2))); + if (distance > this.options.snap){ + this.cancel(); + this.document.addEvents({ + mousemove: this.bound.drag, + mouseup: this.bound.stop + }); + this.fireEvent('start', [this.element, event]).fireEvent('snap', this.element); + } }, - use: function(locale){ - locale = getSet(locale); + drag: function(event){ + var options = this.options; + if (options.preventDefault) event.preventDefault(); + this.mouse.now = this.sumValues(event.page, this.compensateScroll.diff, -1); - if (locale){ - current = locale; + this.render(options); + this.fireEvent('drag', [this.element, event]); + }, - this.fireEvent('change', locale); + render: function(options){ + for (var z in options.modifiers){ + if (!options.modifiers[z]) continue; + this.value.now[z] = this.mouse.now[z] - this.mouse.pos[z]; - + if (options.invert) this.value.now[z] *= -1; + if (options.limit && this.limit[z]){ + if ((this.limit[z][1] || this.limit[z][1] === 0) && (this.value.now[z] > this.limit[z][1])){ + this.value.now[z] = this.limit[z][1]; + } else if ((this.limit[z][0] || this.limit[z][0] === 0) && (this.value.now[z] < this.limit[z][0])){ + this.value.now[z] = this.limit[z][0]; + } + } + if (options.grid[z]) this.value.now[z] -= ((this.value.now[z] - (this.limit[z][0]||0)) % options.grid[z]); + if (options.style) this.element.setStyle(options.modifiers[z], this.value.now[z] + options.unit); + else this.element[options.modifiers[z]] = this.value.now[z]; } - - return this; }, - getCurrent: function(){ - return current; + cancel: function(event){ + this.document.removeEvents({ + mousemove: this.bound.check, + mouseup: this.bound.cancel + }); + if (event){ + this.document.removeEvent(this.selection, this.bound.eventStop); + this.fireEvent('cancel', this.element); + } }, - get: function(key, args){ - return (current) ? current.get(key, args) : ''; - }, + stop: function(event){ + var events = { + mousemove: this.bound.drag, + mouseup: this.bound.stop + }; + events[this.selection] = this.bound.eventStop; + this.document.removeEvents(events); + this.mouse.start = null; + if (event) this.fireEvent('complete', [this.element, event]); + } - inherit: function(locale, inherits, set){ - locale = getSet(locale); +}); - if (locale) locale.inherit(inherits, set); - return this; - }, +Element.implement({ - list: function(){ - return Object.keys(locales); + makeResizable: function(options){ + var drag = new Drag(this, Object.merge({ + modifiers: { + x: 'width', + y: 'height' + } + }, options)); + + this.store('resizer', drag); + return drag.addEvent('drag', function(){ + this.fireEvent('resize', drag); + }.bind(this)); } -}; +}); -Object.append(Locale, new Events); +/* +--- -Locale.Set = new Class({ +script: Drag.Move.js - sets: {}, +name: Drag.Move - inherits: { - locales: [], - sets: {} - }, +description: A Drag extension that provides support for the constraining of draggables to containers and droppables. - initialize: function(name){ - this.name = name || ''; +license: MIT-style license + +authors: + - Valerio Proietti + - Tom Occhinno + - Jan Kassens + - Aaron Newton + - Scott Kyle + +requires: + - Core/Element.Dimensions + - Drag + +provides: [Drag.Move] + +... +*/ + +Drag.Move = new Class({ + + Extends: Drag, + + options: {/* + onEnter: function(thisElement, overed){}, + onLeave: function(thisElement, overed){}, + onDrop: function(thisElement, overed, event){},*/ + droppables: [], + container: false, + precalculate: false, + includeMargins: true, + checkDroppables: true }, - define: function(set, key, value){ - var defineData = this.sets[set]; - if (!defineData) defineData = {}; + initialize: function(element, options){ + this.parent(element, options); + element = this.element; - if (key){ - if (typeOf(key) == 'object') defineData = Object.merge(defineData, key); - else defineData[key] = value; + this.droppables = $$(this.options.droppables); + this.setContainer(this.options.container); + + if (this.options.style){ + if (this.options.modifiers.x == 'left' && this.options.modifiers.y == 'top'){ + var parent = element.getOffsetParent(), + styles = element.getStyles('left', 'top'); + if (parent && (styles.left == 'auto' || styles.top == 'auto')){ + element.setPosition(element.getPosition(parent)); + } + } + + if (element.getStyle('position') == 'static') element.setStyle('position', 'absolute'); } - this.sets[set] = defineData; - return this; + this.addEvent('start', this.checkDroppables, true); + this.overed = null; + }, + + setContainer: function(container) { + this.container = document.id(container); + if (this.container && typeOf(this.container) != 'element'){ + this.container = document.id(this.container.getDocument().body); + } }, - get: function(key, args, _base){ - var value = Object.getFromPath(this.sets, key); - if (value != null){ - var type = typeOf(value); - if (type == 'function') value = value.apply(null, Array.from(args)); - else if (type == 'object') value = Object.clone(value); - return value; + start: function(event){ + if (this.container) this.options.limit = this.calculateLimit(); + + if (this.options.precalculate){ + this.positions = this.droppables.map(function(el){ + return el.getCoordinates(); + }); } - // get value of inherited locales - var index = key.indexOf('.'), - set = index < 0 ? key : key.substr(0, index), - names = (this.inherits.sets[set] || []).combine(this.inherits.locales).include('en-US'); - if (!_base) _base = []; + this.parent(event); + }, - for (var i = 0, l = names.length; i < l; i++){ - if (_base.contains(names[i])) continue; - _base.include(names[i]); + calculateLimit: function(){ + var element = this.element, + container = this.container, - var locale = locales[names[i]]; - if (!locale) continue; + offsetParent = document.id(element.getOffsetParent()) || document.body, + containerCoordinates = container.getCoordinates(offsetParent), + elementMargin = {}, + elementBorder = {}, + containerMargin = {}, + containerBorder = {}, + offsetParentPadding = {}, + offsetScroll = offsetParent.getScroll(); - value = locale.get(key, args, _base); - if (value != null) return value; + ['top', 'right', 'bottom', 'left'].each(function(pad){ + elementMargin[pad] = element.getStyle('margin-' + pad).toInt(); + elementBorder[pad] = element.getStyle('border-' + pad).toInt(); + containerMargin[pad] = container.getStyle('margin-' + pad).toInt(); + containerBorder[pad] = container.getStyle('border-' + pad).toInt(); + offsetParentPadding[pad] = offsetParent.getStyle('padding-' + pad).toInt(); + }, this); + + var width = element.offsetWidth + elementMargin.left + elementMargin.right, + height = element.offsetHeight + elementMargin.top + elementMargin.bottom, + left = 0 + offsetScroll.x, + top = 0 + offsetScroll.y, + right = containerCoordinates.right - containerBorder.right - width + offsetScroll.x, + bottom = containerCoordinates.bottom - containerBorder.bottom - height + offsetScroll.y; + + if (this.options.includeMargins){ + left += elementMargin.left; + top += elementMargin.top; + } else { + right += elementMargin.right; + bottom += elementMargin.bottom; } - return ''; + if (element.getStyle('position') == 'relative'){ + var coords = element.getCoordinates(offsetParent); + coords.left -= element.getStyle('left').toInt(); + coords.top -= element.getStyle('top').toInt(); + + left -= coords.left; + top -= coords.top; + if (container.getStyle('position') != 'relative'){ + left += containerBorder.left; + top += containerBorder.top; + } + right += elementMargin.left - coords.left; + bottom += elementMargin.top - coords.top; + + if (container != offsetParent){ + left += containerMargin.left + offsetParentPadding.left; + if (!offsetParentPadding.left && left < 0) left = 0; + top += offsetParent == document.body ? 0 : containerMargin.top + offsetParentPadding.top; + if (!offsetParentPadding.top && top < 0) top = 0; + } + } else { + left -= elementMargin.left; + top -= elementMargin.top; + if (container != offsetParent){ + left += containerCoordinates.left + containerBorder.left; + top += containerCoordinates.top + containerBorder.top; + } + } + + return { + x: [left, right], + y: [top, bottom] + }; }, - inherit: function(names, set){ - names = Array.from(names); + getDroppableCoordinates: function(element){ + var position = element.getCoordinates(); + if (element.getStyle('position') == 'fixed'){ + var scroll = window.getScroll(); + position.left += scroll.x; + position.right += scroll.x; + position.top += scroll.y; + position.bottom += scroll.y; + } + return position; + }, - if (set && !this.inherits.sets[set]) this.inherits.sets[set] = []; + checkDroppables: function(){ + var overed = this.droppables.filter(function(el, i){ + el = this.positions ? this.positions[i] : this.getDroppableCoordinates(el); + var now = this.mouse.now; + return (now.x > el.left && now.x < el.right && now.y < el.bottom && now.y > el.top); + }, this).getLast(); - var l = names.length; - while (l--) (set ? this.inherits.sets[set] : this.inherits.locales).unshift(names[l]); + if (this.overed != overed){ + if (this.overed) this.fireEvent('leave', [this.element, this.overed]); + if (overed) this.fireEvent('enter', [this.element, overed]); + this.overed = overed; + } + }, - return this; + drag: function(event){ + this.parent(event); + if (this.options.checkDroppables && this.droppables.length) this.checkDroppables(); + }, + + stop: function(event){ + this.checkDroppables(); + this.fireEvent('drop', [this.element, this.overed, event]); + this.overed = null; + return this.parent(event); } }); +Element.implement({ + makeDraggable: function(options){ + var drag = new Drag.Move(this, options); + this.store('dragger', drag); + return drag; + } -})(); - +}); /* --- -name: Locale.en-US.Date +script: Sortables.js -description: Date messages for US English. +name: Sortables + +description: Class for creating a drag and drop sorting interface for lists of items. license: MIT-style license authors: - - Aaron Newton + - Tom Occhino requires: - - /Locale + - Core/Fx.Morph + - Drag.Move -provides: [Locale.en-US.Date] +provides: [Sortables] ... */ -Locale.define('en-US', 'Date', { - - months: ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December'], - months_abbr: ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'], - days: ['Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday'], - days_abbr: ['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat'], +var Sortables = new Class({ - // Culture's date order: MM/DD/YYYY - dateOrder: ['month', 'date', 'year'], - shortDate: '%m/%d/%Y', - shortTime: '%I:%M%p', - AM: 'AM', - PM: 'PM', - firstDayOfWeek: 0, + Implements: [Events, Options], - // Date.Extras - ordinal: function(dayOfMonth){ - // 1st, 2nd, 3rd, etc. - return (dayOfMonth > 3 && dayOfMonth < 21) ? 'th' : ['th', 'st', 'nd', 'rd', 'th'][Math.min(dayOfMonth % 10, 4)]; + options: {/* + onSort: function(element, clone){}, + onStart: function(element, clone){}, + onComplete: function(element){},*/ + opacity: 1, + clone: false, + revert: false, + handle: false, + dragOptions: {}, + unDraggableTags: ['button', 'input', 'a', 'textarea', 'select', 'option'] }, - lessThanMinuteAgo: 'less than a minute ago', - minuteAgo: 'about a minute ago', - minutesAgo: '{delta} minutes ago', - hourAgo: 'about an hour ago', - hoursAgo: 'about {delta} hours ago', - dayAgo: '1 day ago', - daysAgo: '{delta} days ago', - weekAgo: '1 week ago', - weeksAgo: '{delta} weeks ago', - monthAgo: '1 month ago', - monthsAgo: '{delta} months ago', - yearAgo: '1 year ago', - yearsAgo: '{delta} years ago', - - lessThanMinuteUntil: 'less than a minute from now', - minuteUntil: 'about a minute from now', - minutesUntil: '{delta} minutes from now', - hourUntil: 'about an hour from now', - hoursUntil: 'about {delta} hours from now', - dayUntil: '1 day from now', - daysUntil: '{delta} days from now', - weekUntil: '1 week from now', - weeksUntil: '{delta} weeks from now', - monthUntil: '1 month from now', - monthsUntil: '{delta} months from now', - yearUntil: '1 year from now', - yearsUntil: '{delta} years from now' - -}); - - -/* ---- - -script: Date.js - -name: Date - -description: Extends the Date native object to include methods useful in managing dates. - -license: MIT-style license - -authors: - - Aaron Newton - - Nicholas Barthelemy - https://svn.nbarthelemy.com/date-js/ - - Harald Kirshner - mail [at] digitarald.de; http://digitarald.de - - Scott Kyle - scott [at] appden.com; http://appden.com - -requires: - - Core/Array - - Core/String - - Core/Number - - MooTools.More - - Locale - - Locale.en-US.Date - -provides: [Date] - -... -*/ - -(function(){ - -var Date = this.Date; - -var DateMethods = Date.Methods = { - ms: 'Milliseconds', - year: 'FullYear', - min: 'Minutes', - mo: 'Month', - sec: 'Seconds', - hr: 'Hours' -}; - -['Date', 'Day', 'FullYear', 'Hours', 'Milliseconds', 'Minutes', 'Month', 'Seconds', 'Time', 'TimezoneOffset', - 'Week', 'Timezone', 'GMTOffset', 'DayOfYear', 'LastMonth', 'LastDayOfMonth', 'UTCDate', 'UTCDay', 'UTCFullYear', - 'AMPM', 'Ordinal', 'UTCHours', 'UTCMilliseconds', 'UTCMinutes', 'UTCMonth', 'UTCSeconds', 'UTCMilliseconds'].each(function(method){ - Date.Methods[method.toLowerCase()] = method; -}); - -var pad = function(n, digits, string){ - if (digits == 1) return n; - return n < Math.pow(10, digits - 1) ? (string || '0') + pad(n, digits - 1, string) : n; -}; - -Date.implement({ + initialize: function(lists, options){ + this.setOptions(options); - set: function(prop, value){ - prop = prop.toLowerCase(); - var method = DateMethods[prop] && 'set' + DateMethods[prop]; - if (method && this[method]) this[method](value); - return this; - }.overloadSetter(), + this.elements = []; + this.lists = []; + this.idle = true; - get: function(prop){ - prop = prop.toLowerCase(); - var method = DateMethods[prop] && 'get' + DateMethods[prop]; - if (method && this[method]) return this[method](); - return null; - }.overloadGetter(), + this.addLists($$(document.id(lists) || lists)); - clone: function(){ - return new Date(this.get('time')); + if (!this.options.clone) this.options.revert = false; + if (this.options.revert) this.effect = new Fx.Morph(null, Object.merge({ + duration: 250, + link: 'cancel' + }, this.options.revert)); }, - increment: function(interval, times){ - interval = interval || 'day'; - times = times != null ? times : 1; - - switch (interval){ - case 'year': - return this.increment('month', times * 12); - case 'month': - var d = this.get('date'); - this.set('date', 1).set('mo', this.get('mo') + times); - return this.set('date', d.min(this.get('lastdayofmonth'))); - case 'week': - return this.increment('day', times * 7); - case 'day': - return this.set('date', this.get('date') + times); - } - - if (!Date.units[interval]) throw new Error(interval + ' is not a supported interval'); - - return this.set('time', this.get('time') + times * Date.units[interval]()); + attach: function(){ + this.addLists(this.lists); + return this; }, - decrement: function(interval, times){ - return this.increment(interval, -1 * (times != null ? times : 1)); + detach: function(){ + this.lists = this.removeLists(this.lists); + return this; }, - isLeapYear: function(){ - return Date.isLeapYear(this.get('year')); + addItems: function(){ + Array.flatten(arguments).each(function(element){ + this.elements.push(element); + var start = element.retrieve('sortables:start', function(event){ + this.start.call(this, event, element); + }.bind(this)); + (this.options.handle ? element.getElement(this.options.handle) || element : element).addEvent('mousedown', start); + }, this); + return this; }, - clearTime: function(){ - return this.set({hr: 0, min: 0, sec: 0, ms: 0}); + addLists: function(){ + Array.flatten(arguments).each(function(list){ + this.lists.include(list); + this.addItems(list.getChildren()); + }, this); + return this; }, - diff: function(date, resolution){ - if (typeOf(date) == 'string') date = Date.parse(date); + removeItems: function(){ + return $$(Array.flatten(arguments).map(function(element){ + this.elements.erase(element); + var start = element.retrieve('sortables:start'); + (this.options.handle ? element.getElement(this.options.handle) || element : element).removeEvent('mousedown', start); - return ((date - this) / Date.units[resolution || 'day'](3, 3)).round(); // non-leap year, 30-day month + return element; + }, this)); }, - getLastDayOfMonth: function(){ - return Date.daysInMonth(this.get('mo'), this.get('year')); - }, + removeLists: function(){ + return $$(Array.flatten(arguments).map(function(list){ + this.lists.erase(list); + this.removeItems(list.getChildren()); - getDayOfYear: function(){ - return (Date.UTC(this.get('year'), this.get('mo'), this.get('date') + 1) - - Date.UTC(this.get('year'), 0, 1)) / Date.units.day(); + return list; + }, this)); }, + + getDroppableCoordinates: function (element){ + var offsetParent = element.getOffsetParent(); + var position = element.getPosition(offsetParent); + var scroll = { + w: window.getScroll(), + offsetParent: offsetParent.getScroll() + }; + position.x += scroll.offsetParent.x; + position.y += scroll.offsetParent.y; - setDay: function(day, firstDayOfWeek){ - if (firstDayOfWeek == null){ - firstDayOfWeek = Date.getMsg('firstDayOfWeek'); - if (firstDayOfWeek === '') firstDayOfWeek = 1; + if (offsetParent.getStyle('position') == 'fixed'){ + position.x -= scroll.w.x; + position.y -= scroll.w.y; } - day = (7 + Date.parseDay(day, true) - firstDayOfWeek) % 7; - var currentDay = (7 + this.get('day') - firstDayOfWeek) % 7; - - return this.increment('day', day - currentDay); + return position; }, - getWeek: function(firstDayOfWeek){ - if (firstDayOfWeek == null){ - firstDayOfWeek = Date.getMsg('firstDayOfWeek'); - if (firstDayOfWeek === '') firstDayOfWeek = 1; + getClone: function(event, element){ + if (!this.options.clone) return new Element(element.tagName).inject(document.body); + if (typeOf(this.options.clone) == 'function') return this.options.clone.call(this, event, element, this.list); + var clone = element.clone(true).setStyles({ + margin: 0, + position: 'absolute', + visibility: 'hidden', + width: element.getStyle('width') + }).addEvent('mousedown', function(event){ + element.fireEvent('mousedown', event); + }); + //prevent the duplicated radio inputs from unchecking the real one + if (clone.get('html').test('radio')){ + clone.getElements('input[type=radio]').each(function(input, i){ + input.set('name', 'clone_' + i); + if (input.get('checked')) element.getElements('input[type=radio]')[i].set('checked', true); + }); } - var date = this, - dayOfWeek = (7 + date.get('day') - firstDayOfWeek) % 7, - dividend = 0, - firstDayOfYear; - - if (firstDayOfWeek == 1){ - // ISO-8601, week belongs to year that has the most days of the week (i.e. has the thursday of the week) - var month = date.get('month'), - startOfWeek = date.get('date') - dayOfWeek; - - if (month == 11 && startOfWeek > 28) return 1; // Week 1 of next year + return clone.inject(this.list).setPosition(this.getDroppableCoordinates(this.element)); + }, - if (month == 0 && startOfWeek < -2){ - // Use a date from last year to determine the week - date = new Date(date).decrement('day', dayOfWeek); - dayOfWeek = 0; - } + getDroppables: function(){ + var droppables = this.list.getChildren().erase(this.clone).erase(this.element); + if (!this.options.constrain) droppables.append(this.lists).erase(this.list); + return droppables; + }, - firstDayOfYear = new Date(date.get('year'), 0, 1).get('day') || 7; - if (firstDayOfYear > 4) dividend = -7; // First week of the year is not week 1 + insert: function(dragging, element){ + var where = 'inside'; + if (this.lists.contains(element)){ + this.list = element; + this.drag.droppables = this.getDroppables(); } else { - // In other cultures the first week of the year is always week 1 and the last week always 53 or 54. - // Days in the same week can have a different weeknumber if the week spreads across two years. - firstDayOfYear = new Date(date.get('year'), 0, 1).get('day'); + where = this.element.getAllPrevious().contains(element) ? 'before' : 'after'; } + this.element.inject(element, where); + this.fireEvent('sort', [this.element, this.clone]); + }, - dividend += date.get('dayofyear'); - dividend += 6 - dayOfWeek; // Add days so we calculate the current date's week as a full week - dividend += (7 + firstDayOfYear - firstDayOfWeek) % 7; // Make up for first week of the year not being a full week - - return (dividend / 7); - }, - - getOrdinal: function(day){ - return Date.getMsg('ordinal', day || this.get('date')); - }, - - getTimezone: function(){ - return this.toString() - .replace(/^.*? ([A-Z]{3}).[0-9]{4}.*$/, '$1') - .replace(/^.*?\(([A-Z])[a-z]+ ([A-Z])[a-z]+ ([A-Z])[a-z]+\)$/, '$1$2$3'); - }, - - getGMTOffset: function(){ - var off = this.get('timezoneOffset'); - return ((off > 0) ? '-' : '+') + pad((off.abs() / 60).floor(), 2) + pad(off % 60, 2); - }, - - setAMPM: function(ampm){ - ampm = ampm.toUpperCase(); - var hr = this.get('hr'); - if (hr > 11 && ampm == 'AM') return this.decrement('hour', 12); - else if (hr < 12 && ampm == 'PM') return this.increment('hour', 12); - return this; - }, - - getAMPM: function(){ - return (this.get('hr') < 12) ? 'AM' : 'PM'; - }, - - parse: function(str){ - this.set('time', Date.parse(str)); - return this; - }, - - isValid: function(date){ - if (!date) date = this; - return typeOf(date) == 'date' && !isNaN(date.valueOf()); - }, - - format: function(format){ - if (!this.isValid()) return 'invalid date'; - - if (!format) format = '%x %X'; - if (typeof format == 'string') format = formats[format.toLowerCase()] || format; - if (typeof format == 'function') return format(this); - - var d = this; - return format.replace(/%([a-z%])/gi, - function($0, $1){ - switch ($1){ - case 'a': return Date.getMsg('days_abbr')[d.get('day')]; - case 'A': return Date.getMsg('days')[d.get('day')]; - case 'b': return Date.getMsg('months_abbr')[d.get('month')]; - case 'B': return Date.getMsg('months')[d.get('month')]; - case 'c': return d.format('%a %b %d %H:%M:%S %Y'); - case 'd': return pad(d.get('date'), 2); - case 'e': return pad(d.get('date'), 2, ' '); - case 'H': return pad(d.get('hr'), 2); - case 'I': return pad((d.get('hr') % 12) || 12, 2); - case 'j': return pad(d.get('dayofyear'), 3); - case 'k': return pad(d.get('hr'), 2, ' '); - case 'l': return pad((d.get('hr') % 12) || 12, 2, ' '); - case 'L': return pad(d.get('ms'), 3); - case 'm': return pad((d.get('mo') + 1), 2); - case 'M': return pad(d.get('min'), 2); - case 'o': return d.get('ordinal'); - case 'p': return Date.getMsg(d.get('ampm')); - case 's': return Math.round(d / 1000); - case 'S': return pad(d.get('seconds'), 2); - case 'T': return d.format('%H:%M:%S'); - case 'U': return pad(d.get('week'), 2); - case 'w': return d.get('day'); - case 'x': return d.format(Date.getMsg('shortDate')); - case 'X': return d.format(Date.getMsg('shortTime')); - case 'y': return d.get('year').toString().substr(2); - case 'Y': return d.get('year'); - case 'z': return d.get('GMTOffset'); - case 'Z': return d.get('Timezone'); - } - return $1; - } - ); - }, - - toISOString: function(){ - return this.format('iso8601'); - } - -}).alias({ - toJSON: 'toISOString', - compare: 'diff', - strftime: 'format' -}); - -// The day and month abbreviations are standardized, so we cannot use simply %a and %b because they will get localized -var rfcDayAbbr = ['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat'], - rfcMonthAbbr = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']; - -var formats = { - db: '%Y-%m-%d %H:%M:%S', - compact: '%Y%m%dT%H%M%S', - 'short': '%d %b %H:%M', - 'long': '%B %d, %Y %H:%M', - rfc822: function(date){ - return rfcDayAbbr[date.get('day')] + date.format(', %d ') + rfcMonthAbbr[date.get('month')] + date.format(' %Y %H:%M:%S %Z'); - }, - rfc2822: function(date){ - return rfcDayAbbr[date.get('day')] + date.format(', %d ') + rfcMonthAbbr[date.get('month')] + date.format(' %Y %H:%M:%S %z'); - }, - iso8601: function(date){ - return ( - date.getUTCFullYear() + '-' + - pad(date.getUTCMonth() + 1, 2) + '-' + - pad(date.getUTCDate(), 2) + 'T' + - pad(date.getUTCHours(), 2) + ':' + - pad(date.getUTCMinutes(), 2) + ':' + - pad(date.getUTCSeconds(), 2) + '.' + - pad(date.getUTCMilliseconds(), 3) + 'Z' - ); - } -}; - -var parsePatterns = [], - nativeParse = Date.parse; - -var parseWord = function(type, word, num){ - var ret = -1, - translated = Date.getMsg(type + 's'); - switch (typeOf(word)){ - case 'object': - ret = translated[word.get(type)]; - break; - case 'number': - ret = translated[word]; - if (!ret) throw new Error('Invalid ' + type + ' index: ' + word); - break; - case 'string': - var match = translated.filter(function(name){ - return this.test(name); - }, new RegExp('^' + word, 'i')); - if (!match.length) throw new Error('Invalid ' + type + ' string'); - if (match.length > 1) throw new Error('Ambiguous ' + type); - ret = match[0]; - } - - return (num) ? translated.indexOf(ret) : ret; -}; - -var startCentury = 1900, - startYear = 70; - -Date.extend({ - - getMsg: function(key, args){ - return Locale.get('Date.' + key, args); - }, - - units: { - ms: Function.from(1), - second: Function.from(1000), - minute: Function.from(60000), - hour: Function.from(3600000), - day: Function.from(86400000), - week: Function.from(608400000), - month: function(month, year){ - var d = new Date; - return Date.daysInMonth(month != null ? month : d.get('mo'), year != null ? year : d.get('year')) * 86400000; - }, - year: function(year){ - year = year || new Date().get('year'); - return Date.isLeapYear(year) ? 31622400000 : 31536000000; - } - }, - - daysInMonth: function(month, year){ - return [31, Date.isLeapYear(year) ? 29 : 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31][month]; - }, - - isLeapYear: function(year){ - return ((year % 4 === 0) && (year % 100 !== 0)) || (year % 400 === 0); - }, - - parse: function(from){ - var t = typeOf(from); - if (t == 'number') return new Date(from); - if (t != 'string') return from; - from = from.clean(); - if (!from.length) return null; - - var parsed; - parsePatterns.some(function(pattern){ - var bits = pattern.re.exec(from); - return (bits) ? (parsed = pattern.handler(bits)) : false; - }); - - if (!(parsed && parsed.isValid())){ - parsed = new Date(nativeParse(from)); - if (!(parsed && parsed.isValid())) parsed = new Date(from.toInt()); - } - return parsed; - }, - - parseDay: function(day, num){ - return parseWord('day', day, num); - }, - - parseMonth: function(month, num){ - return parseWord('month', month, num); - }, - - parseUTC: function(value){ - var localDate = new Date(value); - var utcSeconds = Date.UTC( - localDate.get('year'), - localDate.get('mo'), - localDate.get('date'), - localDate.get('hr'), - localDate.get('min'), - localDate.get('sec'), - localDate.get('ms') - ); - return new Date(utcSeconds); - }, - - orderIndex: function(unit){ - return Date.getMsg('dateOrder').indexOf(unit) + 1; - }, - - defineFormat: function(name, format){ - formats[name] = format; - return this; - }, - - - - defineParser: function(pattern){ - parsePatterns.push((pattern.re && pattern.handler) ? pattern : build(pattern)); - return this; - }, - - defineParsers: function(){ - Array.flatten(arguments).each(Date.defineParser); - return this; - }, - - define2DigitYearStart: function(year){ - startYear = year % 100; - startCentury = year - startYear; - return this; - } - -}).extend({ - defineFormats: Date.defineFormat.overloadSetter() -}); - -var regexOf = function(type){ - return new RegExp('(?:' + Date.getMsg(type).map(function(name){ - return name.substr(0, 3); - }).join('|') + ')[a-z]*'); -}; - -var replacers = function(key){ - switch (key){ - case 'T': - return '%H:%M:%S'; - case 'x': // iso8601 covers yyyy-mm-dd, so just check if month is first - return ((Date.orderIndex('month') == 1) ? '%m[-./]%d' : '%d[-./]%m') + '([-./]%y)?'; - case 'X': - return '%H([.:]%M)?([.:]%S([.:]%s)?)? ?%p? ?%z?'; - } - return null; -}; - -var keys = { - d: /[0-2]?[0-9]|3[01]/, - H: /[01]?[0-9]|2[0-3]/, - I: /0?[1-9]|1[0-2]/, - M: /[0-5]?\d/, - s: /\d+/, - o: /[a-z]*/, - p: /[ap]\.?m\.?/, - y: /\d{2}|\d{4}/, - Y: /\d{4}/, - z: /Z|[+-]\d{2}(?::?\d{2})?/ -}; - -keys.m = keys.I; -keys.S = keys.M; - -var currentLanguage; - -var recompile = function(language){ - currentLanguage = language; - - keys.a = keys.A = regexOf('days'); - keys.b = keys.B = regexOf('months'); - - parsePatterns.each(function(pattern, i){ - if (pattern.format) parsePatterns[i] = build(pattern.format); - }); -}; - -var build = function(format){ - if (!currentLanguage) return {format: format}; - - var parsed = []; - var re = (format.source || format) // allow format to be regex - .replace(/%([a-z])/gi, - function($0, $1){ - return replacers($1) || $0; - } - ).replace(/\((?!\?)/g, '(?:') // make all groups non-capturing - .replace(/ (?!\?|\*)/g, ',? ') // be forgiving with spaces and commas - .replace(/%([a-z%])/gi, - function($0, $1){ - var p = keys[$1]; - if (!p) return $1; - parsed.push($1); - return '(' + p.source + ')'; - } - ).replace(/\[a-z\]/gi, '[a-z\\u00c0-\\uffff;\&]'); // handle unicode words - - return { - format: format, - re: new RegExp('^' + re + '$', 'i'), - handler: function(bits){ - bits = bits.slice(1).associate(parsed); - var date = new Date().clearTime(), - year = bits.y || bits.Y; - - if (year != null) handle.call(date, 'y', year); // need to start in the right year - if ('d' in bits) handle.call(date, 'd', 1); - if ('m' in bits || bits.b || bits.B) handle.call(date, 'm', 1); - - for (var key in bits) handle.call(date, key, bits[key]); - return date; - } - }; -}; - -var handle = function(key, value){ - if (!value) return this; - - switch (key){ - case 'a': case 'A': return this.set('day', Date.parseDay(value, true)); - case 'b': case 'B': return this.set('mo', Date.parseMonth(value, true)); - case 'd': return this.set('date', value); - case 'H': case 'I': return this.set('hr', value); - case 'm': return this.set('mo', value - 1); - case 'M': return this.set('min', value); - case 'p': return this.set('ampm', value.replace(/\./g, '')); - case 'S': return this.set('sec', value); - case 's': return this.set('ms', ('0.' + value) * 1000); - case 'w': return this.set('day', value); - case 'Y': return this.set('year', value); - case 'y': - value = +value; - if (value < 100) value += startCentury + (value < startYear ? 100 : 0); - return this.set('year', value); - case 'z': - if (value == 'Z') value = '+00'; - var offset = value.match(/([+-])(\d{2}):?(\d{2})?/); - offset = (offset[1] + '1') * (offset[2] * 60 + (+offset[3] || 0)) + this.getTimezoneOffset(); - return this.set('time', this - offset * 60000); - } - - return this; -}; - -Date.defineParsers( - '%Y([-./]%m([-./]%d((T| )%X)?)?)?', // "1999-12-31", "1999-12-31 11:59pm", "1999-12-31 23:59:59", ISO8601 - '%Y%m%d(T%H(%M%S?)?)?', // "19991231", "19991231T1159", compact - '%x( %X)?', // "12/31", "12.31.99", "12-31-1999", "12/31/2008 11:59 PM" - '%d%o( %b( %Y)?)?( %X)?', // "31st", "31st December", "31 Dec 1999", "31 Dec 1999 11:59pm" - '%b( %d%o)?( %Y)?( %X)?', // Same as above with month and day switched - '%Y %b( %d%o( %X)?)?', // Same as above with year coming first - '%o %b %d %X %z %Y', // "Thu Oct 22 08:11:23 +0000 2009" - '%T', // %H:%M:%S - '%H:%M( ?%p)?' // "11:05pm", "11:05 am" and "11:05" -); - -Locale.addEvent('change', function(language){ - if (Locale.get('Date')) recompile(language); -}).fireEvent('change', Locale.getCurrent()); - -})(); - - -/* ---- - -script: Date.Extras.js - -name: Date.Extras - -description: Extends the Date native object to include extra methods (on top of those in Date.js). - -license: MIT-style license - -authors: - - Aaron Newton - - Scott Kyle - -requires: - - /Date - -provides: [Date.Extras] + start: function(event, element){ + if ( + !this.idle || + event.rightClick || + (!this.options.handle && this.options.unDraggableTags.contains(event.target.get('tag'))) + ) return; -... -*/ + this.idle = false; + this.element = element; + this.opacity = element.getStyle('opacity'); + this.list = element.getParent(); + this.clone = this.getClone(event, element); -Date.implement({ + this.drag = new Drag.Move(this.clone, Object.merge({ + + droppables: this.getDroppables() + }, this.options.dragOptions)).addEvents({ + onSnap: function(){ + event.stop(); + this.clone.setStyle('visibility', 'visible'); + this.element.setStyle('opacity', this.options.opacity || 0); + this.fireEvent('start', [this.element, this.clone]); + }.bind(this), + onEnter: this.insert.bind(this), + onCancel: this.end.bind(this), + onComplete: this.end.bind(this) + }); - timeDiffInWords: function(to){ - return Date.distanceOfTimeInWords(this, to || new Date); + this.clone.inject(this.element, 'before'); + this.drag.start(event); }, - timeDiff: function(to, separator){ - if (to == null) to = new Date; - var delta = ((to - this) / 1000).floor().abs(); + end: function(){ + this.drag.detach(); + this.element.setStyle('opacity', this.opacity); + var self = this; + if (this.effect){ + var dim = this.element.getStyles('width', 'height'), + clone = this.clone, + pos = clone.computePosition(this.getDroppableCoordinates(clone)); - var vals = [], - durations = [60, 60, 24, 365, 0], - names = ['s', 'm', 'h', 'd', 'y'], - value, duration; + var destroy = function(){ + this.removeEvent('cancel', destroy); + clone.destroy(); + self.reset(); + }; - for (var item = 0; item < durations.length; item++){ - if (item && !delta) break; - value = delta; - if ((duration = durations[item])){ - value = (delta % duration); - delta = (delta / duration).floor(); - } - vals.unshift(value + (names[item] || '')); + this.effect.element = clone; + this.effect.start({ + top: pos.top, + left: pos.left, + width: dim.width, + height: dim.height, + opacity: 0.25 + }).addEvent('cancel', destroy).chain(destroy); + } else { + this.clone.destroy(); + self.reset(); } - - return vals.join(separator || ':'); - } - -}).extend({ - - distanceOfTimeInWords: function(from, to){ - return Date.getTimePhrase(((to - from) / 1000).toInt()); + }, - getTimePhrase: function(delta){ - var suffix = (delta < 0) ? 'Until' : 'Ago'; - if (delta < 0) delta *= -1; - - var units = { - minute: 60, - hour: 60, - day: 24, - week: 7, - month: 52 / 12, - year: 12, - eon: Infinity - }; - - var msg = 'lessThanMinute'; - - for (var unit in units){ - var interval = units[unit]; - if (delta < 1.5 * interval){ - if (delta > 0.75 * interval) msg = unit; - break; - } - delta /= interval; - msg = unit + 's'; - } - - delta = delta.round(); - return Date.getMsg(msg + suffix, delta).substitute({delta: delta}); - } - -}).defineParsers( + reset: function(){ + this.idle = true; + this.fireEvent('complete', this.element); + }, - { - // "today", "tomorrow", "yesterday" - re: /^(?:tod|tom|yes)/i, - handler: function(bits){ - var d = new Date().clearTime(); - switch (bits[0]){ - case 'tom': return d.increment(); - case 'yes': return d.decrement(); - default: return d; + serialize: function(){ + var params = Array.link(arguments, { + modifier: Type.isFunction, + index: function(obj){ + return obj != null; } - } - }, + }); + var serial = this.lists.map(function(list){ + return list.getChildren().map(params.modifier || function(element){ + return element.get('id'); + }, this); + }, this); - { - // "next Wednesday", "last Thursday" - re: /^(next|last) ([a-z]+)$/i, - handler: function(bits){ - var d = new Date().clearTime(); - var day = d.getDay(); - var newDay = Date.parseDay(bits[2], true); - var addDays = newDay - day; - if (newDay <= day) addDays += 7; - if (bits[1] == 'last') addDays -= 7; - return d.set('date', d.getDate() + addDays); - } + var index = params.index; + if (this.lists.length == 1) index = 0; + return (index || index === 0) && index >= 0 && index < this.lists.length ? serial[index] : serial; } -).alias('timeAgoInWords', 'timeDiffInWords'); - +}); /* --- @@ -1235,10 +970,8 @@ var special = { 'S': /[е═е·е ]/g, 't': /[е╔её]/g, 'T': /[е╓е╒]/g, - 'ue': /[ц╪]/g, - 'UE': /[ц°]/g, - 'u': /[ц╧ц╨ц╩е╞б╣]/g, - 'U': /[ц≥ц ц⌡е╝]/g, + 'u': /[ц╧ц╨ц╩е╞ц╪б╣]/g, + 'U': /[ц≥ц ц⌡е╝ц°]/g, 'y': /[ц©ц╫]/g, 'Y': /[е╦ц²]/g, 'z': /[е╬е╨е╪]/g, @@ -1263,7 +996,16 @@ tidy = { '-': /[\u2013]/g, // '--': /[\u2014]/g, '»': /[\uFFFD]/g -}; +}, + +conversions = { + ms: 1, + s: 1000, + m: 6e4, + h: 36e5 +}, + +findUnits = /(\d*.?\d+)([msh]+)/; var walk = function(string, replacements){ var result = string, key; @@ -1325,13 +1067,19 @@ String.implement({ if (trail) string += trail; } return string; + }, + + ms: function(){ + // "Borrowed" from https://gist.github.com/1503944 + var units = findUnits.exec(this); + if (units == null) return Number(this); + return Number(units[1]) * conversions[units[2]]; } }); })(); - /* --- @@ -1348,8 +1096,8 @@ authors: requires: - Core/Element - - /String.Extras - - /MooTools.More + - String.Extras + - MooTools.More provides: [Element.Forms] @@ -1473,7 +1221,6 @@ Element.implement({ }); - /* --- @@ -1493,7 +1240,7 @@ authors: requires: - Core/Element.Style - Core/Element.Dimensions - - /MooTools.More + - MooTools.More provides: [Element.Measure] @@ -1642,7 +1389,6 @@ Element.implement({ })(); - /* --- @@ -1710,13 +1456,15 @@ var local = Element.Position = { }, setOffsetOption: function(element, options){ - var parentOffset = {x: 0, y: 0}, - offsetParent = element.measure(function(){ - return document.id(this.getOffsetParent()); - }), - parentScroll = offsetParent.getScroll(); + var parentOffset = {x: 0, y: 0}; + var parentScroll = {x: 0, y: 0}; + var offsetParent = element.measure(function(){ + return document.id(this.getOffsetParent()); + }); if (!offsetParent || offsetParent == element.getDocument().body) return; + + parentScroll = offsetParent.getScroll(); parentOffset = offsetParent.measure(function(){ var position = this.getPosition(); if (this.getStyle('position') == 'fixed'){ @@ -1879,7 +1627,6 @@ Element.implement({ })(Element.prototype.position); - /* --- @@ -1896,7 +1643,7 @@ authors: requires: - Core/Element.Style - - /MooTools.More + - MooTools.More provides: [Element.Shortcuts] @@ -1957,1014 +1704,1212 @@ Document.implement({ }); +/* +--- + +script: Object.Extras.js + +name: Object.Extras + +description: Extra Object generics, like getFromPath which allows a path notation to child elements. + +license: MIT-style license + +authors: + - Aaron Newton + +requires: + - Core/Object + - MooTools.More + +provides: [Object.Extras] + +... +*/ + +(function(){ + +var defined = function(value){ + return value != null; +}; + +var hasOwnProperty = Object.prototype.hasOwnProperty; + +Object.extend({ + + getFromPath: function(source, parts){ + if (typeof parts == 'string') parts = parts.split('.'); + for (var i = 0, l = parts.length; i < l; i++){ + if (hasOwnProperty.call(source, parts[i])) source = source[parts[i]]; + else return null; + } + return source; + }, + + cleanValues: function(object, method){ + method = method || defined; + for (var key in object) if (!method(object[key])){ + delete object[key]; + } + return object; + }, + + erase: function(object, key){ + if (hasOwnProperty.call(object, key)) delete object[key]; + return object; + }, + + run: function(object){ + var args = Array.slice(arguments, 1); + for (var key in object) if (object[key].apply){ + object[key].apply(object, args); + } + return object; + } + +}); + +})(); /* --- -script: Fx.Scroll.js +script: Locale.js -name: Fx.Scroll +name: Locale -description: Effect to smoothly scroll any element, including the window. +description: Provides methods for localization. license: MIT-style license authors: - - Valerio Proietti + - Aaron Newton + - Arian Stolwijk requires: - - Core/Fx - - Core/Element.Event - - Core/Element.Dimensions - - /MooTools.More + - Core/Events + - Object.Extras + - MooTools.More -provides: [Fx.Scroll] +provides: [Locale, Lang] ... */ (function(){ -Fx.Scroll = new Class({ +var current = null, + locales = {}, + inherits = {}; - Extends: Fx, +var getSet = function(set){ + if (instanceOf(set, Locale.Set)) return set; + else return locales[set]; +}; - options: { - offset: {x: 0, y: 0}, - wheelStops: true +var Locale = this.Locale = { + + define: function(locale, set, key, value){ + var name; + if (instanceOf(locale, Locale.Set)){ + name = locale.name; + if (name) locales[name] = locale; + } else { + name = locale; + if (!locales[name]) locales[name] = new Locale.Set(name); + locale = locales[name]; + } + + if (set) locale.define(set, key, value); + + + + if (!current) current = locale; + + return locale; }, - initialize: function(element, options){ - this.element = this.subject = document.id(element); - this.parent(options); + use: function(locale){ + locale = getSet(locale); - if (typeOf(this.element) != 'element') this.element = document.id(this.element.getDocument().body); + if (locale){ + current = locale; - if (this.options.wheelStops){ - var stopper = this.element, - cancel = this.cancel.pass(false, this); - this.addEvent('start', function(){ - stopper.addEvent('mousewheel', cancel); - }, true); - this.addEvent('complete', function(){ - stopper.removeEvent('mousewheel', cancel); - }, true); + this.fireEvent('change', locale); + + } + + return this; }, - set: function(){ - var now = Array.flatten(arguments); - if (Browser.firefox) now = [Math.round(now[0]), Math.round(now[1])]; // not needed anymore in newer firefox versions - this.element.scrollTo(now[0], now[1]); + getCurrent: function(){ + return current; + }, + + get: function(key, args){ + return (current) ? current.get(key, args) : ''; + }, + + inherit: function(locale, inherits, set){ + locale = getSet(locale); + + if (locale) locale.inherit(inherits, set); return this; }, - compute: function(from, to, delta){ - return [0, 1].map(function(i){ - return Fx.compute(from[i], to[i], delta); - }); + list: function(){ + return Object.keys(locales); + } + +}; + +Object.append(Locale, new Events); + +Locale.Set = new Class({ + + sets: {}, + + inherits: { + locales: [], + sets: {} }, - start: function(x, y){ - if (!this.check(x, y)) return this; - var scroll = this.element.getScroll(); - return this.parent([scroll.x, scroll.y], [x, y]); + initialize: function(name){ + this.name = name || ''; }, - calculateScroll: function(x, y){ - var element = this.element, - scrollSize = element.getScrollSize(), - scroll = element.getScroll(), - size = element.getSize(), - offset = this.options.offset, - values = {x: x, y: y}; + define: function(set, key, value){ + var defineData = this.sets[set]; + if (!defineData) defineData = {}; + + if (key){ + if (typeOf(key) == 'object') defineData = Object.merge(defineData, key); + else defineData[key] = value; + } + this.sets[set] = defineData; + + return this; + }, + + get: function(key, args, _base){ + var value = Object.getFromPath(this.sets, key); + if (value != null){ + var type = typeOf(value); + if (type == 'function') value = value.apply(null, Array.from(args)); + else if (type == 'object') value = Object.clone(value); + return value; + } + + // get value of inherited locales + var index = key.indexOf('.'), + set = index < 0 ? key : key.substr(0, index), + names = (this.inherits.sets[set] || []).combine(this.inherits.locales).include('en-US'); + if (!_base) _base = []; + + for (var i = 0, l = names.length; i < l; i++){ + if (_base.contains(names[i])) continue; + _base.include(names[i]); + + var locale = locales[names[i]]; + if (!locale) continue; - for (var z in values){ - if (!values[z] && values[z] !== 0) values[z] = scroll[z]; - if (typeOf(values[z]) != 'number') values[z] = scrollSize[z] - size[z]; - values[z] += offset[z]; + value = locale.get(key, args, _base); + if (value != null) return value; } - return [values.x, values.y]; + return ''; }, - toTop: function(){ - return this.start.apply(this, this.calculateScroll(false, 0)); - }, + inherit: function(names, set){ + names = Array.from(names); - toLeft: function(){ - return this.start.apply(this, this.calculateScroll(0, false)); - }, + if (set && !this.inherits.sets[set]) this.inherits.sets[set] = []; - toRight: function(){ - return this.start.apply(this, this.calculateScroll('right', false)); - }, + var l = names.length; + while (l--) (set ? this.inherits.sets[set] : this.inherits.locales).unshift(names[l]); - toBottom: function(){ - return this.start.apply(this, this.calculateScroll(false, 'bottom')); - }, + return this; + } - toElement: function(el, axes){ - axes = axes ? Array.from(axes) : ['x', 'y']; - var scroll = isBody(this.element) ? {x: 0, y: 0} : this.element.getScroll(); - var position = Object.map(document.id(el).getPosition(this.element), function(value, axis){ - return axes.contains(axis) ? value + scroll[axis] : false; - }); - return this.start.apply(this, this.calculateScroll(position.x, position.y)); - }, +}); - toElementEdge: function(el, axes, offset){ - axes = axes ? Array.from(axes) : ['x', 'y']; - el = document.id(el); - var to = {}, - position = el.getPosition(this.element), - size = el.getSize(), - scroll = this.element.getScroll(), - containerSize = this.element.getSize(), - edge = { - x: position.x + size.x, - y: position.y + size.y - }; - ['x', 'y'].each(function(axis){ - if (axes.contains(axis)){ - if (edge[axis] > scroll[axis] + containerSize[axis]) to[axis] = edge[axis] - containerSize[axis]; - if (position[axis] < scroll[axis]) to[axis] = position[axis]; - } - if (to[axis] == null) to[axis] = scroll[axis]; - if (offset && offset[axis]) to[axis] = to[axis] + offset[axis]; - }, this); - if (to.x != scroll.x || to.y != scroll.y) this.start(to.x, to.y); - return this; - }, +})(); - toElementCenter: function(el, axes, offset){ - axes = axes ? Array.from(axes) : ['x', 'y']; - el = document.id(el); - var to = {}, - position = el.getPosition(this.element), - size = el.getSize(), - scroll = this.element.getScroll(), - containerSize = this.element.getSize(); +/* +--- - ['x', 'y'].each(function(axis){ - if (axes.contains(axis)){ - to[axis] = position[axis] - (containerSize[axis] - size[axis]) / 2; - } - if (to[axis] == null) to[axis] = scroll[axis]; - if (offset && offset[axis]) to[axis] = to[axis] + offset[axis]; - }, this); +name: Locale.en-US.Date - if (to.x != scroll.x || to.y != scroll.y) this.start(to.x, to.y); - return this; - } +description: Date messages for US English. -}); +license: MIT-style license +authors: + - Aaron Newton +requires: + - Locale -function isBody(element){ - return (/^(?:body|html)$/i).test(element.tagName); -} +provides: [Locale.en-US.Date] -})(); +... +*/ + +Locale.define('en-US', 'Date', { + + months: ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December'], + months_abbr: ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'], + days: ['Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday'], + days_abbr: ['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat'], + + // Culture's date order: MM/DD/YYYY + dateOrder: ['month', 'date', 'year'], + shortDate: '%m/%d/%Y', + shortTime: '%I:%M%p', + AM: 'AM', + PM: 'PM', + firstDayOfWeek: 0, + + // Date.Extras + ordinal: function(dayOfMonth){ + // 1st, 2nd, 3rd, etc. + return (dayOfMonth > 3 && dayOfMonth < 21) ? 'th' : ['th', 'st', 'nd', 'rd', 'th'][Math.min(dayOfMonth % 10, 4)]; + }, + + lessThanMinuteAgo: 'less than a minute ago', + minuteAgo: 'about a minute ago', + minutesAgo: '{delta} minutes ago', + hourAgo: 'about an hour ago', + hoursAgo: 'about {delta} hours ago', + dayAgo: '1 day ago', + daysAgo: '{delta} days ago', + weekAgo: '1 week ago', + weeksAgo: '{delta} weeks ago', + monthAgo: '1 month ago', + monthsAgo: '{delta} months ago', + yearAgo: '1 year ago', + yearsAgo: '{delta} years ago', + + lessThanMinuteUntil: 'less than a minute from now', + minuteUntil: 'about a minute from now', + minutesUntil: '{delta} minutes from now', + hourUntil: 'about an hour from now', + hoursUntil: 'about {delta} hours from now', + dayUntil: '1 day from now', + daysUntil: '{delta} days from now', + weekUntil: '1 week from now', + weeksUntil: '{delta} weeks from now', + monthUntil: '1 month from now', + monthsUntil: '{delta} months from now', + yearUntil: '1 year from now', + yearsUntil: '{delta} years from now' +}); /* --- -script: Fx.Slide.js +script: Date.js -name: Fx.Slide +name: Date -description: Effect to slide an element in and out of view. +description: Extends the Date native object to include methods useful in managing dates. license: MIT-style license authors: - - Valerio Proietti + - Aaron Newton + - Nicholas Barthelemy - https://svn.nbarthelemy.com/date-js/ + - Harald Kirshner - mail [at] digitarald.de; http://digitarald.de + - Scott Kyle - scott [at] appden.com; http://appden.com requires: - - Core/Fx - - Core/Element.Style - - /MooTools.More + - Core/Array + - Core/String + - Core/Number + - MooTools.More + - Locale + - Locale.en-US.Date -provides: [Fx.Slide] +provides: [Date] ... */ -Fx.Slide = new Class({ - - Extends: Fx, +(function(){ - options: { - mode: 'vertical', - wrapper: false, - hideOverflow: true, - resetHeight: false - }, +var Date = this.Date; - initialize: function(element, options){ - element = this.element = this.subject = document.id(element); - this.parent(options); - options = this.options; +var DateMethods = Date.Methods = { + ms: 'Milliseconds', + year: 'FullYear', + min: 'Minutes', + mo: 'Month', + sec: 'Seconds', + hr: 'Hours' +}; - var wrapper = element.retrieve('wrapper'), - styles = element.getStyles('margin', 'position', 'overflow'); +['Date', 'Day', 'FullYear', 'Hours', 'Milliseconds', 'Minutes', 'Month', 'Seconds', 'Time', 'TimezoneOffset', + 'Week', 'Timezone', 'GMTOffset', 'DayOfYear', 'LastMonth', 'LastDayOfMonth', 'UTCDate', 'UTCDay', 'UTCFullYear', + 'AMPM', 'Ordinal', 'UTCHours', 'UTCMilliseconds', 'UTCMinutes', 'UTCMonth', 'UTCSeconds', 'UTCMilliseconds'].each(function(method){ + Date.Methods[method.toLowerCase()] = method; +}); - if (options.hideOverflow) styles = Object.append(styles, {overflow: 'hidden'}); - if (options.wrapper) wrapper = document.id(options.wrapper).setStyles(styles); +var pad = function(n, digits, string){ + if (digits == 1) return n; + return n < Math.pow(10, digits - 1) ? (string || '0') + pad(n, digits - 1, string) : n; +}; - if (!wrapper) wrapper = new Element('div', { - styles: styles - }).wraps(element); +Date.implement({ - element.store('wrapper', wrapper).setStyle('margin', 0); - if (element.getStyle('overflow') == 'visible') element.setStyle('overflow', 'hidden'); + set: function(prop, value){ + prop = prop.toLowerCase(); + var method = DateMethods[prop] && 'set' + DateMethods[prop]; + if (method && this[method]) this[method](value); + return this; + }.overloadSetter(), - this.now = []; - this.open = true; - this.wrapper = wrapper; + get: function(prop){ + prop = prop.toLowerCase(); + var method = DateMethods[prop] && 'get' + DateMethods[prop]; + if (method && this[method]) return this[method](); + return null; + }.overloadGetter(), - this.addEvent('complete', function(){ - this.open = (wrapper['offset' + this.layout.capitalize()] != 0); - if (this.open && this.options.resetHeight) wrapper.setStyle('height', ''); - }, true); + clone: function(){ + return new Date(this.get('time')); }, - vertical: function(){ - this.margin = 'margin-top'; - this.layout = 'height'; - this.offset = this.element.offsetHeight; - }, + increment: function(interval, times){ + interval = interval || 'day'; + times = times != null ? times : 1; - horizontal: function(){ - this.margin = 'margin-left'; - this.layout = 'width'; - this.offset = this.element.offsetWidth; - }, + switch (interval){ + case 'year': + return this.increment('month', times * 12); + case 'month': + var d = this.get('date'); + this.set('date', 1).set('mo', this.get('mo') + times); + return this.set('date', d.min(this.get('lastdayofmonth'))); + case 'week': + return this.increment('day', times * 7); + case 'day': + return this.set('date', this.get('date') + times); + } + + if (!Date.units[interval]) throw new Error(interval + ' is not a supported interval'); - set: function(now){ - this.element.setStyle(this.margin, now[0]); - this.wrapper.setStyle(this.layout, now[1]); - return this; + return this.set('time', this.get('time') + times * Date.units[interval]()); }, - compute: function(from, to, delta){ - return [0, 1].map(function(i){ - return Fx.compute(from[i], to[i], delta); - }); + decrement: function(interval, times){ + return this.increment(interval, -1 * (times != null ? times : 1)); }, - start: function(how, mode){ - if (!this.check(how, mode)) return this; - this[mode || this.options.mode](); - - var margin = this.element.getStyle(this.margin).toInt(), - layout = this.wrapper.getStyle(this.layout).toInt(), - caseIn = [[margin, layout], [0, this.offset]], - caseOut = [[margin, layout], [-this.offset, 0]], - start; - - switch (how){ - case 'in': start = caseIn; break; - case 'out': start = caseOut; break; - case 'toggle': start = (layout == 0) ? caseIn : caseOut; - } - return this.parent(start[0], start[1]); + isLeapYear: function(){ + return Date.isLeapYear(this.get('year')); }, - slideIn: function(mode){ - return this.start('in', mode); + clearTime: function(){ + return this.set({hr: 0, min: 0, sec: 0, ms: 0}); }, - slideOut: function(mode){ - return this.start('out', mode); - }, + diff: function(date, resolution){ + if (typeOf(date) == 'string') date = Date.parse(date); - hide: function(mode){ - this[mode || this.options.mode](); - this.open = false; - return this.set([-this.offset, 0]); + return ((date - this) / Date.units[resolution || 'day'](3, 3)).round(); // non-leap year, 30-day month }, - show: function(mode){ - this[mode || this.options.mode](); - this.open = true; - return this.set([0, this.offset]); + getLastDayOfMonth: function(){ + return Date.daysInMonth(this.get('mo'), this.get('year')); }, - toggle: function(mode){ - return this.start('toggle', mode); - } + getDayOfYear: function(){ + return (Date.UTC(this.get('year'), this.get('mo'), this.get('date') + 1) + - Date.UTC(this.get('year'), 0, 1)) / Date.units.day(); + }, -}); + setDay: function(day, firstDayOfWeek){ + if (firstDayOfWeek == null){ + firstDayOfWeek = Date.getMsg('firstDayOfWeek'); + if (firstDayOfWeek === '') firstDayOfWeek = 1; + } -Element.Properties.slide = { + day = (7 + Date.parseDay(day, true) - firstDayOfWeek) % 7; + var currentDay = (7 + this.get('day') - firstDayOfWeek) % 7; - set: function(options){ - this.get('slide').cancel().setOptions(options); - return this; + return this.increment('day', day - currentDay); }, - get: function(){ - var slide = this.retrieve('slide'); - if (!slide){ - slide = new Fx.Slide(this, {link: 'cancel'}); - this.store('slide', slide); + getWeek: function(firstDayOfWeek){ + if (firstDayOfWeek == null){ + firstDayOfWeek = Date.getMsg('firstDayOfWeek'); + if (firstDayOfWeek === '') firstDayOfWeek = 1; } - return slide; - } -}; + var date = this, + dayOfWeek = (7 + date.get('day') - firstDayOfWeek) % 7, + dividend = 0, + firstDayOfYear; -Element.implement({ + if (firstDayOfWeek == 1){ + // ISO-8601, week belongs to year that has the most days of the week (i.e. has the thursday of the week) + var month = date.get('month'), + startOfWeek = date.get('date') - dayOfWeek; - slide: function(how, mode){ - how = how || 'toggle'; - var slide = this.get('slide'), toggle; - switch (how){ - case 'hide': slide.hide(mode); break; - case 'show': slide.show(mode); break; - case 'toggle': - var flag = this.retrieve('slide:flag', slide.open); - slide[flag ? 'slideOut' : 'slideIn'](mode); - this.store('slide:flag', !flag); - toggle = true; - break; - default: slide.start(how, mode); + if (month == 11 && startOfWeek > 28) return 1; // Week 1 of next year + + if (month == 0 && startOfWeek < -2){ + // Use a date from last year to determine the week + date = new Date(date).decrement('day', dayOfWeek); + dayOfWeek = 0; + } + + firstDayOfYear = new Date(date.get('year'), 0, 1).get('day') || 7; + if (firstDayOfYear > 4) dividend = -7; // First week of the year is not week 1 + } else { + // In other cultures the first week of the year is always week 1 and the last week always 53 or 54. + // Days in the same week can have a different weeknumber if the week spreads across two years. + firstDayOfYear = new Date(date.get('year'), 0, 1).get('day'); } - if (!toggle) this.eliminate('slide:flag'); - return this; - } -}); + dividend += date.get('dayofyear'); + dividend += 6 - dayOfWeek; // Add days so we calculate the current date's week as a full week + dividend += (7 + firstDayOfYear - firstDayOfWeek) % 7; // Make up for first week of the year not being a full week + return (dividend / 7); + }, -/* ---- + getOrdinal: function(day){ + return Date.getMsg('ordinal', day || this.get('date')); + }, -script: Drag.js + getTimezone: function(){ + return this.toString() + .replace(/^.*? ([A-Z]{3}).[0-9]{4}.*$/, '$1') + .replace(/^.*?\(([A-Z])[a-z]+ ([A-Z])[a-z]+ ([A-Z])[a-z]+\)$/, '$1$2$3'); + }, -name: Drag + getGMTOffset: function(){ + var off = this.get('timezoneOffset'); + return ((off > 0) ? '-' : '+') + pad((off.abs() / 60).floor(), 2) + pad(off % 60, 2); + }, -description: The base Drag Class. Can be used to drag and resize Elements using mouse events. + setAMPM: function(ampm){ + ampm = ampm.toUpperCase(); + var hr = this.get('hr'); + if (hr > 11 && ampm == 'AM') return this.decrement('hour', 12); + else if (hr < 12 && ampm == 'PM') return this.increment('hour', 12); + return this; + }, -license: MIT-style license + getAMPM: function(){ + return (this.get('hr') < 12) ? 'AM' : 'PM'; + }, -authors: - - Valerio Proietti - - Tom Occhinno - - Jan Kassens + parse: function(str){ + this.set('time', Date.parse(str)); + return this; + }, -requires: - - Core/Events - - Core/Options - - Core/Element.Event - - Core/Element.Style - - Core/Element.Dimensions - - /MooTools.More + isValid: function(date){ + if (!date) date = this; + return typeOf(date) == 'date' && !isNaN(date.valueOf()); + }, -provides: [Drag] -... + format: function(format){ + if (!this.isValid()) return 'invalid date'; -*/ + if (!format) format = '%x %X'; + if (typeof format == 'string') format = formats[format.toLowerCase()] || format; + if (typeof format == 'function') return format(this); -var Drag = new Class({ + var d = this; + return format.replace(/%([a-z%])/gi, + function($0, $1){ + switch ($1){ + case 'a': return Date.getMsg('days_abbr')[d.get('day')]; + case 'A': return Date.getMsg('days')[d.get('day')]; + case 'b': return Date.getMsg('months_abbr')[d.get('month')]; + case 'B': return Date.getMsg('months')[d.get('month')]; + case 'c': return d.format('%a %b %d %H:%M:%S %Y'); + case 'd': return pad(d.get('date'), 2); + case 'e': return pad(d.get('date'), 2, ' '); + case 'H': return pad(d.get('hr'), 2); + case 'I': return pad((d.get('hr') % 12) || 12, 2); + case 'j': return pad(d.get('dayofyear'), 3); + case 'k': return pad(d.get('hr'), 2, ' '); + case 'l': return pad((d.get('hr') % 12) || 12, 2, ' '); + case 'L': return pad(d.get('ms'), 3); + case 'm': return pad((d.get('mo') + 1), 2); + case 'M': return pad(d.get('min'), 2); + case 'o': return d.get('ordinal'); + case 'p': return Date.getMsg(d.get('ampm')); + case 's': return Math.round(d / 1000); + case 'S': return pad(d.get('seconds'), 2); + case 'T': return d.format('%H:%M:%S'); + case 'U': return pad(d.get('week'), 2); + case 'w': return d.get('day'); + case 'x': return d.format(Date.getMsg('shortDate')); + case 'X': return d.format(Date.getMsg('shortTime')); + case 'y': return d.get('year').toString().substr(2); + case 'Y': return d.get('year'); + case 'z': return d.get('GMTOffset'); + case 'Z': return d.get('Timezone'); + } + return $1; + } + ); + }, - Implements: [Events, Options], + toISOString: function(){ + return this.format('iso8601'); + } - options: {/* - onBeforeStart: function(thisElement){}, - onStart: function(thisElement, event){}, - onSnap: function(thisElement){}, - onDrag: function(thisElement, event){}, - onCancel: function(thisElement){}, - onComplete: function(thisElement, event){},*/ - snap: 6, - unit: 'px', - grid: false, - style: true, - limit: false, - handle: false, - invert: false, - preventDefault: false, - stopPropagation: false, - modifiers: {x: 'left', y: 'top'} +}).alias({ + toJSON: 'toISOString', + compare: 'diff', + strftime: 'format' +}); + +// The day and month abbreviations are standardized, so we cannot use simply %a and %b because they will get localized +var rfcDayAbbr = ['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat'], + rfcMonthAbbr = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']; + +var formats = { + db: '%Y-%m-%d %H:%M:%S', + compact: '%Y%m%dT%H%M%S', + 'short': '%d %b %H:%M', + 'long': '%B %d, %Y %H:%M', + rfc822: function(date){ + return rfcDayAbbr[date.get('day')] + date.format(', %d ') + rfcMonthAbbr[date.get('month')] + date.format(' %Y %H:%M:%S %Z'); + }, + rfc2822: function(date){ + return rfcDayAbbr[date.get('day')] + date.format(', %d ') + rfcMonthAbbr[date.get('month')] + date.format(' %Y %H:%M:%S %z'); }, + iso8601: function(date){ + return ( + date.getUTCFullYear() + '-' + + pad(date.getUTCMonth() + 1, 2) + '-' + + pad(date.getUTCDate(), 2) + 'T' + + pad(date.getUTCHours(), 2) + ':' + + pad(date.getUTCMinutes(), 2) + ':' + + pad(date.getUTCSeconds(), 2) + '.' + + pad(date.getUTCMilliseconds(), 3) + 'Z' + ); + } +}; - initialize: function(){ - var params = Array.link(arguments, { - 'options': Type.isObject, - 'element': function(obj){ - return obj != null; - } - }); +var parsePatterns = [], + nativeParse = Date.parse; - this.element = document.id(params.element); - this.document = this.element.getDocument(); - this.setOptions(params.options || {}); - var htype = typeOf(this.options.handle); - this.handles = ((htype == 'array' || htype == 'collection') ? $$(this.options.handle) : document.id(this.options.handle)) || this.element; - this.mouse = {'now': {}, 'pos': {}}; - this.value = {'start': {}, 'now': {}}; +var parseWord = function(type, word, num){ + var ret = -1, + translated = Date.getMsg(type + 's'); + switch (typeOf(word)){ + case 'object': + ret = translated[word.get(type)]; + break; + case 'number': + ret = translated[word]; + if (!ret) throw new Error('Invalid ' + type + ' index: ' + word); + break; + case 'string': + var match = translated.filter(function(name){ + return this.test(name); + }, new RegExp('^' + word, 'i')); + if (!match.length) throw new Error('Invalid ' + type + ' string'); + if (match.length > 1) throw new Error('Ambiguous ' + type); + ret = match[0]; + } - this.selection = (Browser.ie) ? 'selectstart' : 'mousedown'; + return (num) ? translated.indexOf(ret) : ret; +}; +var startCentury = 1900, + startYear = 70; - if (Browser.ie && !Drag.ondragstartFixed){ - document.ondragstart = Function.from(false); - Drag.ondragstartFixed = true; - } +Date.extend({ - this.bound = { - start: this.start.bind(this), - check: this.check.bind(this), - drag: this.drag.bind(this), - stop: this.stop.bind(this), - cancel: this.cancel.bind(this), - eventStop: Function.from(false) - }; - this.attach(); + getMsg: function(key, args){ + return Locale.get('Date.' + key, args); }, - attach: function(){ - this.handles.addEvent('mousedown', this.bound.start); - return this; + units: { + ms: Function.from(1), + second: Function.from(1000), + minute: Function.from(60000), + hour: Function.from(3600000), + day: Function.from(86400000), + week: Function.from(608400000), + month: function(month, year){ + var d = new Date; + return Date.daysInMonth(month != null ? month : d.get('mo'), year != null ? year : d.get('year')) * 86400000; + }, + year: function(year){ + year = year || new Date().get('year'); + return Date.isLeapYear(year) ? 31622400000 : 31536000000; + } }, - detach: function(){ - this.handles.removeEvent('mousedown', this.bound.start); - return this; + daysInMonth: function(month, year){ + return [31, Date.isLeapYear(year) ? 29 : 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31][month]; }, - start: function(event){ - var options = this.options; + isLeapYear: function(year){ + return ((year % 4 === 0) && (year % 100 !== 0)) || (year % 400 === 0); + }, - if (event.rightClick) return; + parse: function(from){ + var t = typeOf(from); + if (t == 'number') return new Date(from); + if (t != 'string') return from; + from = from.clean(); + if (!from.length) return null; - if (options.preventDefault) event.preventDefault(); - if (options.stopPropagation) event.stopPropagation(); - this.mouse.start = event.page; + var parsed; + parsePatterns.some(function(pattern){ + var bits = pattern.re.exec(from); + return (bits) ? (parsed = pattern.handler(bits)) : false; + }); - this.fireEvent('beforeStart', this.element); + if (!(parsed && parsed.isValid())){ + parsed = new Date(nativeParse(from)); + if (!(parsed && parsed.isValid())) parsed = new Date(from.toInt()); + } + return parsed; + }, - var limit = options.limit; - this.limit = {x: [], y: []}; + parseDay: function(day, num){ + return parseWord('day', day, num); + }, - var z, coordinates; - for (z in options.modifiers){ - if (!options.modifiers[z]) continue; + parseMonth: function(month, num){ + return parseWord('month', month, num); + }, - var style = this.element.getStyle(options.modifiers[z]); + parseUTC: function(value){ + var localDate = new Date(value); + var utcSeconds = Date.UTC( + localDate.get('year'), + localDate.get('mo'), + localDate.get('date'), + localDate.get('hr'), + localDate.get('min'), + localDate.get('sec'), + localDate.get('ms') + ); + return new Date(utcSeconds); + }, - // Some browsers (IE and Opera) don't always return pixels. - if (style && !style.match(/px$/)){ - if (!coordinates) coordinates = this.element.getCoordinates(this.element.getOffsetParent()); - style = coordinates[options.modifiers[z]]; - } + orderIndex: function(unit){ + return Date.getMsg('dateOrder').indexOf(unit) + 1; + }, - if (options.style) this.value.now[z] = (style || 0).toInt(); - else this.value.now[z] = this.element[options.modifiers[z]]; + defineFormat: function(name, format){ + formats[name] = format; + return this; + }, - if (options.invert) this.value.now[z] *= -1; + - this.mouse.pos[z] = event.page[z] - this.value.now[z]; + defineParser: function(pattern){ + parsePatterns.push((pattern.re && pattern.handler) ? pattern : build(pattern)); + return this; + }, - if (limit && limit[z]){ - var i = 2; - while (i--){ - var limitZI = limit[z][i]; - if (limitZI || limitZI === 0) this.limit[z][i] = (typeof limitZI == 'function') ? limitZI() : limitZI; - } - } - } + defineParsers: function(){ + Array.flatten(arguments).each(Date.defineParser); + return this; + }, - if (typeOf(this.options.grid) == 'number') this.options.grid = { - x: this.options.grid, - y: this.options.grid - }; + define2DigitYearStart: function(year){ + startYear = year % 100; + startCentury = year - startYear; + return this; + } - var events = { - mousemove: this.bound.check, - mouseup: this.bound.cancel - }; - events[this.selection] = this.bound.eventStop; - this.document.addEvents(events); - }, +}).extend({ + defineFormats: Date.defineFormat.overloadSetter() +}); - check: function(event){ - if (this.options.preventDefault) event.preventDefault(); - var distance = Math.round(Math.sqrt(Math.pow(event.page.x - this.mouse.start.x, 2) + Math.pow(event.page.y - this.mouse.start.y, 2))); - if (distance > this.options.snap){ - this.cancel(); - this.document.addEvents({ - mousemove: this.bound.drag, - mouseup: this.bound.stop - }); - this.fireEvent('start', [this.element, event]).fireEvent('snap', this.element); - } - }, +var regexOf = function(type){ + return new RegExp('(?:' + Date.getMsg(type).map(function(name){ + return name.substr(0, 3); + }).join('|') + ')[a-z]*'); +}; - drag: function(event){ - var options = this.options; +var replacers = function(key){ + switch (key){ + case 'T': + return '%H:%M:%S'; + case 'x': // iso8601 covers yyyy-mm-dd, so just check if month is first + return ((Date.orderIndex('month') == 1) ? '%m[-./]%d' : '%d[-./]%m') + '([-./]%y)?'; + case 'X': + return '%H([.:]%M)?([.:]%S([.:]%s)?)? ?%p? ?%z?'; + } + return null; +}; - if (options.preventDefault) event.preventDefault(); - this.mouse.now = event.page; +var keys = { + d: /[0-2]?[0-9]|3[01]/, + H: /[01]?[0-9]|2[0-3]/, + I: /0?[1-9]|1[0-2]/, + M: /[0-5]?\d/, + s: /\d+/, + o: /[a-z]*/, + p: /[ap]\.?m\.?/, + y: /\d{2}|\d{4}/, + Y: /\d{4}/, + z: /Z|[+-]\d{2}(?::?\d{2})?/ +}; - for (var z in options.modifiers){ - if (!options.modifiers[z]) continue; - this.value.now[z] = this.mouse.now[z] - this.mouse.pos[z]; +keys.m = keys.I; +keys.S = keys.M; - if (options.invert) this.value.now[z] *= -1; +var currentLanguage; - if (options.limit && this.limit[z]){ - if ((this.limit[z][1] || this.limit[z][1] === 0) && (this.value.now[z] > this.limit[z][1])){ - this.value.now[z] = this.limit[z][1]; - } else if ((this.limit[z][0] || this.limit[z][0] === 0) && (this.value.now[z] < this.limit[z][0])){ - this.value.now[z] = this.limit[z][0]; - } - } +var recompile = function(language){ + currentLanguage = language; - if (options.grid[z]) this.value.now[z] -= ((this.value.now[z] - (this.limit[z][0]||0)) % options.grid[z]); + keys.a = keys.A = regexOf('days'); + keys.b = keys.B = regexOf('months'); - if (options.style) this.element.setStyle(options.modifiers[z], this.value.now[z] + options.unit); - else this.element[options.modifiers[z]] = this.value.now[z]; - } + parsePatterns.each(function(pattern, i){ + if (pattern.format) parsePatterns[i] = build(pattern.format); + }); +}; - this.fireEvent('drag', [this.element, event]); - }, +var build = function(format){ + if (!currentLanguage) return {format: format}; - cancel: function(event){ - this.document.removeEvents({ - mousemove: this.bound.check, - mouseup: this.bound.cancel - }); - if (event){ - this.document.removeEvent(this.selection, this.bound.eventStop); - this.fireEvent('cancel', this.element); + var parsed = []; + var re = (format.source || format) // allow format to be regex + .replace(/%([a-z])/gi, + function($0, $1){ + return replacers($1) || $0; } - }, + ).replace(/\((?!\?)/g, '(?:') // make all groups non-capturing + .replace(/ (?!\?|\*)/g, ',? ') // be forgiving with spaces and commas + .replace(/%([a-z%])/gi, + function($0, $1){ + var p = keys[$1]; + if (!p) return $1; + parsed.push($1); + return '(' + p.source + ')'; + } + ).replace(/\[a-z\]/gi, '[a-z\\u00c0-\\uffff;\&]'); // handle unicode words - stop: function(event){ - var events = { - mousemove: this.bound.drag, - mouseup: this.bound.stop - }; - events[this.selection] = this.bound.eventStop; - this.document.removeEvents(events); - if (event) this.fireEvent('complete', [this.element, event]); - } + return { + format: format, + re: new RegExp('^' + re + '$', 'i'), + handler: function(bits){ + bits = bits.slice(1).associate(parsed); + var date = new Date().clearTime(), + year = bits.y || bits.Y; -}); + if (year != null) handle.call(date, 'y', year); // need to start in the right year + if ('d' in bits) handle.call(date, 'd', 1); + if ('m' in bits || bits.b || bits.B) handle.call(date, 'm', 1); -Element.implement({ + for (var key in bits) handle.call(date, key, bits[key]); + return date; + } + }; +}; - makeResizable: function(options){ - var drag = new Drag(this, Object.merge({ - modifiers: { - x: 'width', - y: 'height' - } - }, options)); +var handle = function(key, value){ + if (!value) return this; - this.store('resizer', drag); - return drag.addEvent('drag', function(){ - this.fireEvent('resize', drag); - }.bind(this)); + switch (key){ + case 'a': case 'A': return this.set('day', Date.parseDay(value, true)); + case 'b': case 'B': return this.set('mo', Date.parseMonth(value, true)); + case 'd': return this.set('date', value); + case 'H': case 'I': return this.set('hr', value); + case 'm': return this.set('mo', value - 1); + case 'M': return this.set('min', value); + case 'p': return this.set('ampm', value.replace(/\./g, '')); + case 'S': return this.set('sec', value); + case 's': return this.set('ms', ('0.' + value) * 1000); + case 'w': return this.set('day', value); + case 'Y': return this.set('year', value); + case 'y': + value = +value; + if (value < 100) value += startCentury + (value < startYear ? 100 : 0); + return this.set('year', value); + case 'z': + if (value == 'Z') value = '+00'; + var offset = value.match(/([+-])(\d{2}):?(\d{2})?/); + offset = (offset[1] + '1') * (offset[2] * 60 + (+offset[3] || 0)) + this.getTimezoneOffset(); + return this.set('time', this - offset * 60000); } -}); + return this; +}; + +Date.defineParsers( + '%Y([-./]%m([-./]%d((T| )%X)?)?)?', // "1999-12-31", "1999-12-31 11:59pm", "1999-12-31 23:59:59", ISO8601 + '%Y%m%d(T%H(%M%S?)?)?', // "19991231", "19991231T1159", compact + '%x( %X)?', // "12/31", "12.31.99", "12-31-1999", "12/31/2008 11:59 PM" + '%d%o( %b( %Y)?)?( %X)?', // "31st", "31st December", "31 Dec 1999", "31 Dec 1999 11:59pm" + '%b( %d%o)?( %Y)?( %X)?', // Same as above with month and day switched + '%Y %b( %d%o( %X)?)?', // Same as above with year coming first + '%o %b %d %X %z %Y', // "Thu Oct 22 08:11:23 +0000 2009" + '%T', // %H:%M:%S + '%H:%M( ?%p)?' // "11:05pm", "11:05 am" and "11:05" +); + +Locale.addEvent('change', function(language){ + if (Locale.get('Date')) recompile(language); +}).fireEvent('change', Locale.getCurrent()); +})(); /* --- -script: Drag.Move.js +script: Fx.Scroll.js -name: Drag.Move +name: Fx.Scroll -description: A Drag extension that provides support for the constraining of draggables to containers and droppables. +description: Effect to smoothly scroll any element, including the window. license: MIT-style license authors: - Valerio Proietti - - Tom Occhinno - - Jan Kassens - - Aaron Newton - - Scott Kyle requires: + - Core/Fx + - Core/Element.Event - Core/Element.Dimensions - - /Drag + - MooTools.More -provides: [Drag.Move] +provides: [Fx.Scroll] ... */ -Drag.Move = new Class({ +(function(){ - Extends: Drag, +Fx.Scroll = new Class({ - options: {/* - onEnter: function(thisElement, overed){}, - onLeave: function(thisElement, overed){}, - onDrop: function(thisElement, overed, event){},*/ - droppables: [], - container: false, - precalculate: false, - includeMargins: true, - checkDroppables: true + Extends: Fx, + + options: { + offset: {x: 0, y: 0}, + wheelStops: true }, initialize: function(element, options){ - this.parent(element, options); - element = this.element; - - this.droppables = $$(this.options.droppables); - this.container = document.id(this.options.container); - - if (this.container && typeOf(this.container) != 'element') - this.container = document.id(this.container.getDocument().body); + this.element = this.subject = document.id(element); + this.parent(options); - if (this.options.style){ - if (this.options.modifiers.x == 'left' && this.options.modifiers.y == 'top'){ - var parent = element.getOffsetParent(), - styles = element.getStyles('left', 'top'); - if (parent && (styles.left == 'auto' || styles.top == 'auto')){ - element.setPosition(element.getPosition(parent)); - } - } + if (typeOf(this.element) != 'element') this.element = document.id(this.element.getDocument().body); - if (element.getStyle('position') == 'static') element.setStyle('position', 'absolute'); + if (this.options.wheelStops){ + var stopper = this.element, + cancel = this.cancel.pass(false, this); + this.addEvent('start', function(){ + stopper.addEvent('mousewheel', cancel); + }, true); + this.addEvent('complete', function(){ + stopper.removeEvent('mousewheel', cancel); + }, true); } - - this.addEvent('start', this.checkDroppables, true); - this.overed = null; }, - start: function(event){ - if (this.container) this.options.limit = this.calculateLimit(); + set: function(){ + var now = Array.flatten(arguments); + this.element.scrollTo(now[0], now[1]); + return this; + }, - if (this.options.precalculate){ - this.positions = this.droppables.map(function(el){ - return el.getCoordinates(); - }); - } + compute: function(from, to, delta){ + return [0, 1].map(function(i){ + return Fx.compute(from[i], to[i], delta); + }); + }, - this.parent(event); + start: function(x, y){ + if (!this.check(x, y)) return this; + var scroll = this.element.getScroll(); + return this.parent([scroll.x, scroll.y], [x, y]); }, - calculateLimit: function(){ + calculateScroll: function(x, y){ var element = this.element, - container = this.container, - - offsetParent = document.id(element.getOffsetParent()) || document.body, - containerCoordinates = container.getCoordinates(offsetParent), - elementMargin = {}, - elementBorder = {}, - containerMargin = {}, - containerBorder = {}, - offsetParentPadding = {}; - - ['top', 'right', 'bottom', 'left'].each(function(pad){ - elementMargin[pad] = element.getStyle('margin-' + pad).toInt(); - elementBorder[pad] = element.getStyle('border-' + pad).toInt(); - containerMargin[pad] = container.getStyle('margin-' + pad).toInt(); - containerBorder[pad] = container.getStyle('border-' + pad).toInt(); - offsetParentPadding[pad] = offsetParent.getStyle('padding-' + pad).toInt(); - }, this); - - var width = element.offsetWidth + elementMargin.left + elementMargin.right, - height = element.offsetHeight + elementMargin.top + elementMargin.bottom, - left = 0, - top = 0, - right = containerCoordinates.right - containerBorder.right - width, - bottom = containerCoordinates.bottom - containerBorder.bottom - height; - - if (this.options.includeMargins){ - left += elementMargin.left; - top += elementMargin.top; - } else { - right += elementMargin.right; - bottom += elementMargin.bottom; - } - - if (element.getStyle('position') == 'relative'){ - var coords = element.getCoordinates(offsetParent); - coords.left -= element.getStyle('left').toInt(); - coords.top -= element.getStyle('top').toInt(); - - left -= coords.left; - top -= coords.top; - if (container.getStyle('position') != 'relative'){ - left += containerBorder.left; - top += containerBorder.top; - } - right += elementMargin.left - coords.left; - bottom += elementMargin.top - coords.top; - - if (container != offsetParent){ - left += containerMargin.left + offsetParentPadding.left; - top += ((Browser.ie6 || Browser.ie7) ? 0 : containerMargin.top) + offsetParentPadding.top; - } - } else { - left -= elementMargin.left; - top -= elementMargin.top; - if (container != offsetParent){ - left += containerCoordinates.left + containerBorder.left; - top += containerCoordinates.top + containerBorder.top; - } + scrollSize = element.getScrollSize(), + scroll = element.getScroll(), + size = element.getSize(), + offset = this.options.offset, + values = {x: x, y: y}; + + for (var z in values){ + if (!values[z] && values[z] !== 0) values[z] = scroll[z]; + if (typeOf(values[z]) != 'number') values[z] = scrollSize[z] - size[z]; + values[z] += offset[z]; } - return { - x: [left, right], - y: [top, bottom] - }; + return [values.x, values.y]; }, - getDroppableCoordinates: function(element){ - var position = element.getCoordinates(); - if (element.getStyle('position') == 'fixed'){ - var scroll = window.getScroll(); - position.left += scroll.x; - position.right += scroll.x; - position.top += scroll.y; - position.bottom += scroll.y; - } - return position; + toTop: function(){ + return this.start.apply(this, this.calculateScroll(false, 0)); }, - checkDroppables: function(){ - var overed = this.droppables.filter(function(el, i){ - el = this.positions ? this.positions[i] : this.getDroppableCoordinates(el); - var now = this.mouse.now; - return (now.x > el.left && now.x < el.right && now.y < el.bottom && now.y > el.top); - }, this).getLast(); + toLeft: function(){ + return this.start.apply(this, this.calculateScroll(0, false)); + }, - if (this.overed != overed){ - if (this.overed) this.fireEvent('leave', [this.element, this.overed]); - if (overed) this.fireEvent('enter', [this.element, overed]); - this.overed = overed; - } + toRight: function(){ + return this.start.apply(this, this.calculateScroll('right', false)); }, - drag: function(event){ - this.parent(event); - if (this.options.checkDroppables && this.droppables.length) this.checkDroppables(); + toBottom: function(){ + return this.start.apply(this, this.calculateScroll(false, 'bottom')); }, - stop: function(event){ - this.checkDroppables(); - this.fireEvent('drop', [this.element, this.overed, event]); - this.overed = null; - return this.parent(event); - } + toElement: function(el, axes){ + axes = axes ? Array.from(axes) : ['x', 'y']; + var scroll = isBody(this.element) ? {x: 0, y: 0} : this.element.getScroll(); + var position = Object.map(document.id(el).getPosition(this.element), function(value, axis){ + return axes.contains(axis) ? value + scroll[axis] : false; + }); + return this.start.apply(this, this.calculateScroll(position.x, position.y)); + }, -}); + toElementEdge: function(el, axes, offset){ + axes = axes ? Array.from(axes) : ['x', 'y']; + el = document.id(el); + var to = {}, + position = el.getPosition(this.element), + size = el.getSize(), + scroll = this.element.getScroll(), + containerSize = this.element.getSize(), + edge = { + x: position.x + size.x, + y: position.y + size.y + }; -Element.implement({ + ['x', 'y'].each(function(axis){ + if (axes.contains(axis)){ + if (edge[axis] > scroll[axis] + containerSize[axis]) to[axis] = edge[axis] - containerSize[axis]; + if (position[axis] < scroll[axis]) to[axis] = position[axis]; + } + if (to[axis] == null) to[axis] = scroll[axis]; + if (offset && offset[axis]) to[axis] = to[axis] + offset[axis]; + }, this); - makeDraggable: function(options){ - var drag = new Drag.Move(this, options); - this.store('dragger', drag); - return drag; + if (to.x != scroll.x || to.y != scroll.y) this.start(to.x, to.y); + return this; + }, + + toElementCenter: function(el, axes, offset){ + axes = axes ? Array.from(axes) : ['x', 'y']; + el = document.id(el); + var to = {}, + position = el.getPosition(this.element), + size = el.getSize(), + scroll = this.element.getScroll(), + containerSize = this.element.getSize(); + + ['x', 'y'].each(function(axis){ + if (axes.contains(axis)){ + to[axis] = position[axis] - (containerSize[axis] - size[axis]) / 2; + } + if (to[axis] == null) to[axis] = scroll[axis]; + if (offset && offset[axis]) to[axis] = to[axis] + offset[axis]; + }, this); + + if (to.x != scroll.x || to.y != scroll.y) this.start(to.x, to.y); + return this; } }); + +function isBody(element){ + return (/^(?:body|html)$/i).test(element.tagName); +} + +})(); + /* --- -script: Sortables.js +script: Fx.Slide.js -name: Sortables +name: Fx.Slide -description: Class for creating a drag and drop sorting interface for lists of items. +description: Effect to slide an element in and out of view. license: MIT-style license authors: - - Tom Occhino + - Valerio Proietti requires: - - Core/Fx.Morph - - /Drag.Move + - Core/Fx + - Core/Element.Style + - MooTools.More -provides: [Sortables] +provides: [Fx.Slide] ... */ -var Sortables = new Class({ +Fx.Slide = new Class({ - Implements: [Events, Options], + Extends: Fx, - options: {/* - onSort: function(element, clone){}, - onStart: function(element, clone){}, - onComplete: function(element){},*/ - opacity: 1, - clone: false, - revert: false, - handle: false, - dragOptions: {} + options: { + mode: 'vertical', + wrapper: false, + hideOverflow: true, + resetHeight: false }, - initialize: function(lists, options){ - this.setOptions(options); + initialize: function(element, options){ + element = this.element = this.subject = document.id(element); + this.parent(options); + options = this.options; - this.elements = []; - this.lists = []; - this.idle = true; + var wrapper = element.retrieve('wrapper'), + styles = element.getStyles('margin', 'position', 'overflow'); - this.addLists($$(document.id(lists) || lists)); + if (options.hideOverflow) styles = Object.append(styles, {overflow: 'hidden'}); + if (options.wrapper) wrapper = document.id(options.wrapper).setStyles(styles); - if (!this.options.clone) this.options.revert = false; - if (this.options.revert) this.effect = new Fx.Morph(null, Object.merge({ - duration: 250, - link: 'cancel' - }, this.options.revert)); - }, + if (!wrapper) wrapper = new Element('div', { + styles: styles + }).wraps(element); - attach: function(){ - this.addLists(this.lists); - return this; + element.store('wrapper', wrapper).setStyle('margin', 0); + if (element.getStyle('overflow') == 'visible') element.setStyle('overflow', 'hidden'); + + this.now = []; + this.open = true; + this.wrapper = wrapper; + + this.addEvent('complete', function(){ + this.open = (wrapper['offset' + this.layout.capitalize()] != 0); + if (this.open && this.options.resetHeight) wrapper.setStyle('height', ''); + }, true); }, - detach: function(){ - this.lists = this.removeLists(this.lists); - return this; + vertical: function(){ + this.margin = 'margin-top'; + this.layout = 'height'; + this.offset = this.element.offsetHeight; }, - addItems: function(){ - Array.flatten(arguments).each(function(element){ - this.elements.push(element); - var start = element.retrieve('sortables:start', function(event){ - this.start.call(this, event, element); - }.bind(this)); - (this.options.handle ? element.getElement(this.options.handle) || element : element).addEvent('mousedown', start); - }, this); - return this; + horizontal: function(){ + this.margin = 'margin-left'; + this.layout = 'width'; + this.offset = this.element.offsetWidth; }, - addLists: function(){ - Array.flatten(arguments).each(function(list){ - this.lists.include(list); - this.addItems(list.getChildren()); - }, this); + set: function(now){ + this.element.setStyle(this.margin, now[0]); + this.wrapper.setStyle(this.layout, now[1]); return this; }, - removeItems: function(){ - return $$(Array.flatten(arguments).map(function(element){ - this.elements.erase(element); - var start = element.retrieve('sortables:start'); - (this.options.handle ? element.getElement(this.options.handle) || element : element).removeEvent('mousedown', start); - - return element; - }, this)); + compute: function(from, to, delta){ + return [0, 1].map(function(i){ + return Fx.compute(from[i], to[i], delta); + }); }, - removeLists: function(){ - return $$(Array.flatten(arguments).map(function(list){ - this.lists.erase(list); - this.removeItems(list.getChildren()); - - return list; - }, this)); - }, + start: function(how, mode){ + if (!this.check(how, mode)) return this; + this[mode || this.options.mode](); - getClone: function(event, element){ - if (!this.options.clone) return new Element(element.tagName).inject(document.body); - if (typeOf(this.options.clone) == 'function') return this.options.clone.call(this, event, element, this.list); - var clone = element.clone(true).setStyles({ - margin: 0, - position: 'absolute', - visibility: 'hidden', - width: element.getStyle('width') - }).addEvent('mousedown', function(event){ - element.fireEvent('mousedown', event); - }); - //prevent the duplicated radio inputs from unchecking the real one - if (clone.get('html').test('radio')){ - clone.getElements('input[type=radio]').each(function(input, i){ - input.set('name', 'clone_' + i); - if (input.get('checked')) element.getElements('input[type=radio]')[i].set('checked', true); - }); - } + var margin = this.element.getStyle(this.margin).toInt(), + layout = this.wrapper.getStyle(this.layout).toInt(), + caseIn = [[margin, layout], [0, this.offset]], + caseOut = [[margin, layout], [-this.offset, 0]], + start; - return clone.inject(this.list).setPosition(element.getPosition(element.getOffsetParent())); + switch (how){ + case 'in': start = caseIn; break; + case 'out': start = caseOut; break; + case 'toggle': start = (layout == 0) ? caseIn : caseOut; + } + return this.parent(start[0], start[1]); }, - getDroppables: function(){ - var droppables = this.list.getChildren().erase(this.clone).erase(this.element); - if (!this.options.constrain) droppables.append(this.lists).erase(this.list); - return droppables; + slideIn: function(mode){ + return this.start('in', mode); }, - insert: function(dragging, element){ - var where = 'inside'; - if (this.lists.contains(element)){ - this.list = element; - this.drag.droppables = this.getDroppables(); - } else { - where = this.element.getAllPrevious().contains(element) ? 'before' : 'after'; - } - this.element.inject(element, where); - this.fireEvent('sort', [this.element, this.clone]); + slideOut: function(mode){ + return this.start('out', mode); }, - start: function(event, element){ - if ( - !this.idle || - event.rightClick || - ['button', 'input', 'a', 'textarea'].contains(event.target.get('tag')) - ) return; + hide: function(mode){ + this[mode || this.options.mode](); + this.open = false; + return this.set([-this.offset, 0]); + }, - this.idle = false; - this.element = element; - this.opacity = element.getStyle('opacity'); - this.list = element.getParent(); - this.clone = this.getClone(event, element); + show: function(mode){ + this[mode || this.options.mode](); + this.open = true; + return this.set([0, this.offset]); + }, - this.drag = new Drag.Move(this.clone, Object.merge({ - - droppables: this.getDroppables() - }, this.options.dragOptions)).addEvents({ - onSnap: function(){ - event.stop(); - this.clone.setStyle('visibility', 'visible'); - this.element.setStyle('opacity', this.options.opacity || 0); - this.fireEvent('start', [this.element, this.clone]); - }.bind(this), - onEnter: this.insert.bind(this), - onCancel: this.end.bind(this), - onComplete: this.end.bind(this) - }); + toggle: function(mode){ + return this.start('toggle', mode); + } - this.clone.inject(this.element, 'before'); - this.drag.start(event); - }, +}); - end: function(){ - this.drag.detach(); - this.element.setStyle('opacity', this.opacity); - if (this.effect){ - var dim = this.element.getStyles('width', 'height'), - clone = this.clone, - pos = clone.computePosition(this.element.getPosition(this.clone.getOffsetParent())); +Element.Properties.slide = { - var destroy = function(){ - this.removeEvent('cancel', destroy); - clone.destroy(); - }; + set: function(options){ + this.get('slide').cancel().setOptions(options); + return this; + }, - this.effect.element = clone; - this.effect.start({ - top: pos.top, - left: pos.left, - width: dim.width, - height: dim.height, - opacity: 0.25 - }).addEvent('cancel', destroy).chain(destroy); - } else { - this.clone.destroy(); + get: function(){ + var slide = this.retrieve('slide'); + if (!slide){ + slide = new Fx.Slide(this, {link: 'cancel'}); + this.store('slide', slide); } - this.reset(); - }, + return slide; + } - reset: function(){ - this.idle = true; - this.fireEvent('complete', this.element); - }, +}; - serialize: function(){ - var params = Array.link(arguments, { - modifier: Type.isFunction, - index: function(obj){ - return obj != null; - } - }); - var serial = this.lists.map(function(list){ - return list.getChildren().map(params.modifier || function(element){ - return element.get('id'); - }, this); - }, this); +Element.implement({ - var index = params.index; - if (this.lists.length == 1) index = 0; - return (index || index === 0) && index >= 0 && index < this.lists.length ? serial[index] : serial; + slide: function(how, mode){ + how = how || 'toggle'; + var slide = this.get('slide'), toggle; + switch (how){ + case 'hide': slide.hide(mode); break; + case 'show': slide.show(mode); break; + case 'toggle': + var flag = this.retrieve('slide:flag', slide.open); + slide[flag ? 'slideOut' : 'slideIn'](mode); + this.store('slide:flag', !flag); + toggle = true; + break; + default: slide.start(how, mode); + } + if (!toggle) this.eliminate('slide:flag'); + return this; } }); - /* --- @@ -3108,7 +3053,6 @@ Request.JSONP = new Class({ Request.JSONP.counter = 0; Request.JSONP.request_map = {}; - /* --- @@ -3125,7 +3069,7 @@ authors: requires: - Core/Request - - /MooTools.More + - MooTools.More provides: [Request.Periodical] @@ -3161,3 +3105,120 @@ Request.implement({ }); +/* +--- + +script: Date.Extras.js + +name: Date.Extras + +description: Extends the Date native object to include extra methods (on top of those in Date.js). + +license: MIT-style license + +authors: + - Aaron Newton + - Scott Kyle + +requires: + - Date + +provides: [Date.Extras] + +... +*/ + +Date.implement({ + + timeDiffInWords: function(to){ + return Date.distanceOfTimeInWords(this, to || new Date); + }, + + timeDiff: function(to, separator){ + if (to == null) to = new Date; + var delta = ((to - this) / 1000).floor().abs(); + + var vals = [], + durations = [60, 60, 24, 365, 0], + names = ['s', 'm', 'h', 'd', 'y'], + value, duration; + + for (var item = 0; item < durations.length; item++){ + if (item && !delta) break; + value = delta; + if ((duration = durations[item])){ + value = (delta % duration); + delta = (delta / duration).floor(); + } + vals.unshift(value + (names[item] || '')); + } + + return vals.join(separator || ':'); + } + +}).extend({ + + distanceOfTimeInWords: function(from, to){ + return Date.getTimePhrase(((to - from) / 1000).toInt()); + }, + + getTimePhrase: function(delta){ + var suffix = (delta < 0) ? 'Until' : 'Ago'; + if (delta < 0) delta *= -1; + + var units = { + minute: 60, + hour: 60, + day: 24, + week: 7, + month: 52 / 12, + year: 12, + eon: Infinity + }; + + var msg = 'lessThanMinute'; + + for (var unit in units){ + var interval = units[unit]; + if (delta < 1.5 * interval){ + if (delta > 0.75 * interval) msg = unit; + break; + } + delta /= interval; + msg = unit + 's'; + } + + delta = delta.round(); + return Date.getMsg(msg + suffix, delta).substitute({delta: delta}); + } + +}).defineParsers( + + { + // "today", "tomorrow", "yesterday" + re: /^(?:tod|tom|yes)/i, + handler: function(bits){ + var d = new Date().clearTime(); + switch (bits[0]){ + case 'tom': return d.increment(); + case 'yes': return d.decrement(); + default: return d; + } + } + }, + + { + // "next Wednesday", "last Thursday" + re: /^(next|last) ([a-z]+)$/i, + handler: function(bits){ + var d = new Date().clearTime(); + var day = d.getDay(); + var newDay = Date.parseDay(bits[2], true); + var addDays = newDay - day; + if (newDay <= day) addDays += 7; + if (bits[1] == 'last') addDays -= 7; + return d.set('date', d.getDate() + addDays); + } + } + +).alias('timeAgoInWords', 'timeDiffInWords'); diff --git a/couchpotato/static/scripts/vendor/requestAnimationFrame.js b/couchpotato/static/scripts/vendor/requestAnimationFrame.js new file mode 100644 index 0000000000..9737d47a16 --- /dev/null +++ b/couchpotato/static/scripts/vendor/requestAnimationFrame.js @@ -0,0 +1,100 @@ +// requestAnimationFrame() shim by Paul Irish +// http://paulirish.com/2011/requestanimationframe-for-smart-animating/ +window.requestAnimFrame = (function() { + return window.requestAnimationFrame || + window.webkitRequestAnimationFrame || + window.mozRequestAnimationFrame || + window.oRequestAnimationFrame || + window.msRequestAnimationFrame || + function(/* function */ callback, /* DOMElement */ element){ + window.setTimeout(callback, 1000 / 60); + }; +})(); + +/** + * Behaves the same as setInterval except uses requestAnimationFrame() where possible for better performance + * @param {function} fn The callback function + * @param {int} delay The delay in milliseconds + */ +window.requestInterval = function(fn, delay) { + if( !window.requestAnimationFrame && + !window.webkitRequestAnimationFrame && + !(window.mozRequestAnimationFrame && window.mozCancelRequestAnimationFrame) && // Firefox 5 ships without cancel support + !window.oRequestAnimationFrame && + !window.msRequestAnimationFrame) + return window.setInterval(fn, delay); + + var start = new Date().getTime(), + handle = new Object(); + + function loop() { + var current = new Date().getTime(), + delta = current - start; + + if(delta >= delay) { + fn.call(); + start = new Date().getTime(); + } + + handle.value = requestAnimFrame(loop); + }; + + handle.value = requestAnimFrame(loop); + return handle; +} + +/** + * Behaves the same as clearInterval except uses cancelRequestAnimationFrame() where possible for better performance + * @param {int|object} fn The callback function + */ +window.clearRequestInterval = function(handle) { + window.cancelAnimationFrame ? window.cancelAnimationFrame(handle.value) : + window.webkitCancelAnimationFrame ? window.webkitCancelAnimationFrame(handle.value) : + window.webkitCancelRequestAnimationFrame ? window.webkitCancelRequestAnimationFrame(handle.value) : /* Support for legacy API */ + window.mozCancelRequestAnimationFrame ? window.mozCancelRequestAnimationFrame(handle.value) : + window.oCancelRequestAnimationFrame ? window.oCancelRequestAnimationFrame(handle.value) : + window.msCancelRequestAnimationFrame ? window.msCancelRequestAnimationFrame(handle.value) : + clearInterval(handle); +}; + +/** + * Behaves the same as setTimeout except uses requestAnimationFrame() where possible for better performance + * @param {function} fn The callback function + * @param {int} delay The delay in milliseconds + */ + +window.requestTimeout = function(fn, delay) { + if( !window.requestAnimationFrame && + !window.webkitRequestAnimationFrame && + !(window.mozRequestAnimationFrame && window.mozCancelRequestAnimationFrame) && // Firefox 5 ships without cancel support + !window.oRequestAnimationFrame && + !window.msRequestAnimationFrame) + return window.setTimeout(fn, delay); + + var start = new Date().getTime(), + handle = new Object(); + + function loop(){ + var current = new Date().getTime(), + delta = current - start; + + delta >= delay ? fn.call() : handle.value = requestAnimFrame(loop); + }; + + handle.value = requestAnimFrame(loop); + return handle; +}; + +/** + * Behaves the same as clearTimeout except uses cancelRequestAnimationFrame() where possible for better performance + * @param {int|object} fn The callback function + */ +window.clearRequestTimeout = function(handle) { + window.cancelAnimationFrame ? window.cancelAnimationFrame(handle.value) : + window.webkitCancelAnimationFrame ? window.webkitCancelAnimationFrame(handle.value) : + window.webkitCancelRequestAnimationFrame ? window.webkitCancelRequestAnimationFrame(handle.value) : /* Support for legacy API */ + window.mozCancelRequestAnimationFrame ? window.mozCancelRequestAnimationFrame(handle.value) : + window.oCancelRequestAnimationFrame ? window.oCancelRequestAnimationFrame(handle.value) : + window.msCancelRequestAnimationFrame ? window.msCancelRequestAnimationFrame(handle.value) : + clearTimeout(handle); +}; diff --git a/couchpotato/static/style/_fonts.scss b/couchpotato/static/style/_fonts.scss new file mode 100644 index 0000000000..4f91e35e58 --- /dev/null +++ b/couchpotato/static/style/_fonts.scss @@ -0,0 +1,119 @@ +/* Fonts */ +@font-face { + font-family: 'icons'; + src: url('../fonts/icons.eot?3'); + src: url('../fonts/icons.eot?3#iefix') format('embedded-opentype'), + url('../fonts/icons.woff?3') format('woff'), + url('../fonts/icons.ttf?3') format('truetype'), + url('../fonts/icons.svg?3#icons') format('svg'); + font-weight: normal; + font-style: normal; +} + +[class^="icon-"]:before, [class*=" icon-"]:before { + font-family: "icons"; + font-style: normal; + font-weight: normal; + speak: none; + + -webkit-font-smoothing: antialiased; + -moz-osx-font-smoothing: grayscale; +} +.icon-left-arrow:before { content: '\e800'; } +.icon-settings:before { content: '\e801'; } +.icon-search:before { content: '\e802'; } +.icon-dots:before { content: '\e803'; } +.icon-filter:before { content: '\e804'; } +.icon-thumb:before { content: '\e805'; } +.icon-list:before { content: '\e806'; } +.icon-notifications:before { content: '\e807'; } +.icon-emo-cry:before { content: '\e808'; } +.icon-emo-coffee:before { content: '\e809'; } +.icon-emo-sunglasses:before { content: '\e80a'; } +.icon-info:before { content: '\e80b'; } +.icon-download:before { content: '\e80c'; } +.icon-delete:before { content: '\e80d'; } +.icon-cancel:before { content: '\e80e'; } +.icon-redo:before { content: '\e80f'; } +.icon-ok:before { content: '\e810'; } +.icon-dropdown:before { content: '\e811'; } +.icon-play:before { content: '\e812'; } +.icon-plus:before { content: '\e813'; } +.icon-eye:before { content: '\e814'; } +.icon-error:before { content: '\e815'; } +.icon-refresh:before { content: '\e816'; } +.icon-home:before { content: '\e817'; } +.icon-movie:before { content: '\e818'; } +.icon-menu:before, .icon-handle:before { content: '\e819'; } +.icon-star:before { content: '\e81a'; } +.icon-star-empty:before { content: '\e81b'; } +.icon-star-half:before { content: '\e81c'; } +.icon-donate:before { content: '\e81d'; } + +@font-face { + font-family: 'OpenSans'; + src: url('../fonts/OpenSans-Light-webfont.eot'); + src: url('../fonts/OpenSans-Light-webfont.eot?#iefix') format('embedded-opentype'), + url('../fonts/OpenSans-Light-webfont.woff') format('woff'), + url('../fonts/OpenSans-Light-webfont.ttf') format('truetype'), + url('../fonts/OpenSans-Light-webfont.svg#OpenSansRegular') format('svg'); + font-weight: 200; + font-style: normal; + +} + +@font-face { + font-family: 'OpenSans'; + src: url('../fonts/OpenSans-Regular-webfont.eot'); + src: url('../fonts/OpenSans-Regular-webfont.eot?#iefix') format('embedded-opentype'), + url('../fonts/OpenSans-Regular-webfont.woff') format('woff'), + url('../fonts/OpenSans-Regular-webfont.ttf') format('truetype'), + url('../fonts/OpenSans-Regular-webfont.svg#OpenSansRegular') format('svg'); + font-weight: normal; + font-style: normal; +} + +@font-face { + font-family: 'OpenSans'; + src: url('../fonts/OpenSans-Italic-webfont.eot'); + src: url('../fonts/OpenSans-Italic-webfont.eot?#iefix') format('embedded-opentype'), + url('../fonts/OpenSans-Italic-webfont.woff') format('woff'), + url('../fonts/OpenSans-Italic-webfont.ttf') format('truetype'), + url('../fonts/OpenSans-Italic-webfont.svg#OpenSansItalic') format('svg'); + font-weight: normal; + font-style: italic; +} + +@font-face { + font-family: 'OpenSans'; + src: url('../fonts/OpenSans-Bold-webfont.eot'); + src: url('../fonts/OpenSans-Bold-webfont.eot?#iefix') format('embedded-opentype'), + url('../fonts/OpenSans-Bold-webfont.woff') format('woff'), + url('../fonts/OpenSans-Bold-webfont.ttf') format('truetype'), + url('../fonts/OpenSans-Bold-webfont.svg#OpenSansBold') format('svg'); + font-weight: bold; + font-style: normal; +} + +@font-face { + font-family: 'OpenSans'; + src: url('../fonts/OpenSans-BoldItalic-webfont.eot'); + src: url('../fonts/OpenSans-BoldItalic-webfont.eot?#iefix') format('embedded-opentype'), + url('../fonts/OpenSans-BoldItalic-webfont.woff') format('woff'), + url('../fonts/OpenSans-BoldItalic-webfont.ttf') format('truetype'), + url('../fonts/OpenSans-BoldItalic-webfont.svg#OpenSansBoldItalic') format('svg'); + font-weight: bold; + font-style: italic; +} + +@font-face { + font-family: 'Lobster'; + src: url('../fonts/Lobster-webfont.eot'); + src: url('../fonts/Lobster-webfont.eot?#iefix') format('embedded-opentype'), + url('../fonts/Lobster-webfont.woff2') format('woff2'), + url('../fonts/Lobster-webfont.woff') format('woff'), + url('../fonts/Lobster-webfont.ttf') format('truetype'), + url('../fonts/Lobster-webfont.svg#lobster_14regular') format('svg'); + font-weight: normal; + font-style: normal; +} diff --git a/couchpotato/static/style/_mixins.scss b/couchpotato/static/style/_mixins.scss new file mode 100644 index 0000000000..7a00b334db --- /dev/null +++ b/couchpotato/static/style/_mixins.scss @@ -0,0 +1,101 @@ +$vars: 'primary', 'background', 'off', 'text', 'menu', 'menu_off'; +$theme_light: #ac0000, #FFF, darken(#FFF, 8), #000, #111, lighten(#111, 12); +$theme_dark: #f85c22, #2d2d2d, lighten(#2d2d2d, 3), #FFF, #111, lighten(#111, 12); + +$font_size: 14px; +$header_height: 80px; +$header_width: 132px; +$header_width_mobile: 44px; +$padding: 20px; +$border_radius: 3px; + +@mixin theme($property, $value) { + #{$property}: nth($theme_light, index($vars, $value)); + + .dark & { + #{$property}: nth($theme_dark, index($vars, $value)); + } +} + +@mixin theme-dark { + .dark & { + @content; + } +} + +@function get-theme($property){ + @return nth($theme_light, index($vars, $property)); +} + +@function get-theme-dark($property){ + @return nth($theme_dark, index($vars, $property)); +} + +$cubic: cubic-bezier(0.9,0,0.1,1); + +$mq-phone: 320px !default; +$mq-phablet: 485px !default; +$mq-tablet: 768px !default; +$mq-desktop: 1024px !default; +$mq-desktop-plus: 1382px !default; + +@mixin media-phone { + @media (max-width : $mq-phone) { + @content; + } +} + +@mixin media-phablet { + @media (max-width : $mq-phablet) { + @content; + } +} + +@mixin media-tablet { + @media (max-width : $mq-tablet) { + @content; + } +} + +@mixin media-desktop { + @media (max-width : $mq-desktop) { + @content; + } +} + +@mixin media-desktop-plus { + @media (max-width : $mq-desktop-plus) { + @content; + } +} + + +@mixin media-phone-and-up { + @media (min-width : $mq-phone) { + @content; + } +} + +@mixin media-phablet-and-up { + @media (min-width : $mq-phablet) { + @content; + } +} + +@mixin media-tablet-and-up { + @media (min-width : $mq-tablet) { + @content; + } +} + +@mixin media-desktop-and-up { + @media (min-width : $mq-desktop) { + @content; + } +} + +@mixin media-desktop-plus-and-up { + @media (min-width : $mq-desktop-plus) { + @content; + } +} diff --git a/couchpotato/static/style/api.css b/couchpotato/static/style/api.css deleted file mode 100644 index c63540986b..0000000000 --- a/couchpotato/static/style/api.css +++ /dev/null @@ -1,96 +0,0 @@ -html { - font-size: 12px; - line-height: 1.5; - font-family: "Helvetica Neue", Helvetica, Arial, Geneva, sans-serif; - font-size: 14px; -} - -* { - margin: 0; - padding: 0; - -moz-box-sizing: border-box; - -webkit-box-sizing: border-box; - box-sizing: border-box; -} - -h1, h2, h3, h4, h5 { - clear: both; - font-size: 14px; -} - -h1 { - font-size: 25px; - padding: 20px 40px; -} - -h2 { - font-size: 20px; -} - -pre { - background: #eee; - font-family: monospace; - margin: 0; - padding: 10px; - width: 100%; - display: block; - font-size: 12px; -} - -.api, .missing { - overflow: hidden; - border-bottom: 1px solid #eee; - padding: 40px; -} - .api:hover { - color: #000; - } - - .api .description { - color: #333; - padding: 0 0 5px; - } - - .api .params { - background: #fafafa; - width: 100%; - } - .api h3 { - clear: both; - float: left; - width: 100px; - } - - .api .params { - float: left; - width: 700px; - } - - .api .params td, .api .params th { - padding: 3px 5px; - border-bottom: 1px solid #eee; - } - .api .params tr:last-child td, .api .params tr:last-child th { - border: 0; - } - - .api .params .param { - vertical-align: top; - } - - .api .params .param th { - text-align: left; - width: 100px; - } - - .api .param .type { - font-style: italic; - margin-right: 10px; - width: 100px; - color: #666; - } - - .api .return { - float: left; - width: 700px; - } \ No newline at end of file diff --git a/couchpotato/static/style/api.scss b/couchpotato/static/style/api.scss new file mode 100644 index 0000000000..b3941d82a7 --- /dev/null +++ b/couchpotato/static/style/api.scss @@ -0,0 +1,154 @@ +@import "_mixins"; + +.api_docs { + + h1 { + font-size: 25px; + padding: 20px 40px; + } + + pre { + background: #eee; + font-family: monospace; + margin: 0; + padding: 10px; + width: 100%; + display: block; + font-size: 12px; + } + + body { + display: block; + overflow: auto; + background-color: #FFF; + } + + .api, .missing { + overflow: hidden; + border-bottom: 1px solid #eee; + padding: 40px; + } + + .api { + &:hover { + color: #000; + } + + + .description { + color: #333; + padding: 0 0 5px; + } + + .params { + background: #fafafa; + width: 100%; + + h3 { + clear: both; + float: left; + width: 100px; + } + + td, th { + padding: 3px 5px; + border-bottom: 1px solid #eee; + } + + tr:last-child td, tr:last-child th { + border: 0; + } + + .param { + vertical-align: top; + + th { + text-align: left; + width: 100px; + } + + .type { + font-style: italic; + margin-right: 10px; + width: 100px; + color: #666; + } + } + + .return { + float: left; + width: 700px; + } + } + } + + + .database { + padding: 20px; + margin: 0; + + * { + margin: 0; + padding: 0; + } + + .nav { + li { + display: inline-block; + + a { + padding: 5px; + } + } + + } + } + .database table { + font-size: 11px; + + th { + text-align: left; + } + + tr:hover { + position: relative; + z-index: 20; + } + + td { + vertical-align: top; + position: relative; + } + + .id { + width: 100px; + } + + ._rev { + width: 60px; + } + + ._t { + width: 60px; + } + + .form { + width: 600px; + } + + form { + width: 600px; + } + + textarea { + font-size: 12px; + width: 100%; + height: 200px; + } + + input[type=submit] { + display: block; + } + } + +} diff --git a/couchpotato/static/style/combined.min.css b/couchpotato/static/style/combined.min.css new file mode 100644 index 0000000000..62cde0a950 --- /dev/null +++ b/couchpotato/static/style/combined.min.css @@ -0,0 +1,1008 @@ +.page.movie_details,.thumb_list .movie.hover_start .actions{pointer-events:none} +.movies>.description a:hover,.page.movie_details .releases .buttons a:hover{text-decoration:underline} +.update.message{background:#FFF;padding:20px;text-align:center;font-size:1.25em} +.dark .update.message{background:#2d2d2d} +@media (max-width:768px){.update.message{font-size:1em} +} +.update.message a{color:#ac0000;padding:5px} +.dark .update.message a{color:#f85c22} +.search_form{display:inline-block;z-index:11;width:44px;position:relative} +.search_form *{-webkit-transform:rotateZ(360deg);transform:rotateZ(360deg)} +.search_form .icon-search{position:absolute;z-index:2;top:50%;left:0;height:100%;text-align:center;color:#FFF;font-size:20px;-webkit-transform:translateY(-50%);transform:translateY(-50%)} +.dark .search_form .icon-search:hover,.search_form .icon-search:hover{background:#303030} +.search_form .wrapper{position:absolute;left:44px;bottom:0;background:#ac0000;border-radius:3px 0 0 3px;display:none;box-shadow:0 0 15px 2px rgba(0,0,0,.15)} +.dark .search_form .wrapper{background:#f85c22;box-shadow:0 5px 15px 2px rgba(0,0,0,.4)} +.search_form .wrapper:before{-webkit-transform:rotate(45deg);transform:rotate(45deg);content:'';display:block;position:absolute;height:10px;width:10px;background:#ac0000;left:-6px;bottom:16px;z-index:1} +.dark .search_form .wrapper:before{background:#f85c22} +.search_form .input{background:#FFF;border-radius:3px 0 0 3px;position:relative;left:4px;height:44px;overflow:hidden;width:100%} +.dark .search_form .input{background:#2d2d2d} +.search_form .input input{position:absolute;top:0;left:0;height:100%;width:100%;z-index:1} +.search_form .input input::-ms-clear{width:0;height:0} +.search_form .input input:focus{background:rgba(255,255,255,.2)} +.dark .search_form .input input:focus{background:rgba(0,0,0,.2)} +.search_form .input input:focus::-webkit-input-placeholder{opacity:.7} +.search_form .input input:focus::-moz-placeholder{opacity:.7} +.search_form .input input:focus:-ms-input-placeholder{opacity:.7} +.page.home .search_form.filled .icon-search:before,.search_form.filled.focused .icon-search:before{content:'\e80e'} +.search_form.filled .input input{background:rgba(255,255,255,.3)} +.dark .search_form.filled .input input{background:rgba(0,0,0,.3)} +.page.home .search_form,.search_form.focused,.search_form.shown{border-color:#04bce6} +.page.home .search_form .wrapper,.search_form.focused .wrapper,.search_form.shown .wrapper{display:block;width:380px;-webkit-transform-origin:0 90%;transform-origin:0 90%} +.page.home .search_form .input input,.search_form.focused .input input,.search_form.shown .input input{opacity:1} +.search_form .results_container{min-height:50px;text-align:left;position:relative;left:4px;display:none;background:#FFF;border-radius:3px 0 0;overflow:hidden} +.dark .search_form .results_container{background:#2d2d2d} +.search_form .results_container .results{max-height:280px;overflow-x:hidden} +.search_form .results_container .results .media_result{overflow:hidden;height:50px;position:relative} +.search_form .results_container .results .media_result .options{position:absolute;height:100%;top:0;left:30px;right:0;display:-webkit-flex;display:-ms-flexbox;display:flex;-webkit-align-items:center;-ms-flex-align:center;align-items:center;background:#ebebeb} +.dark .search_form .results_container .results .media_result .options{background:#353535} +.search_form .results_container .results .media_result .options>.in_library_wanted{margin-top:-7px} +.search_form .results_container .results .media_result .options>div{border:0;display:-webkit-flex;display:-ms-flexbox;display:flex;padding:10px;-webkit-align-items:stretch;-ms-flex-align:stretch;align-items:stretch;-webkit-justify-content:space-between;-ms-flex-pack:justify;justify-content:space-between} +@media (max-width:485px){.page.home .search_form .wrapper,.search_form.focused .wrapper,.search_form.shown .wrapper{width:260px} +.search_form .results_container .results .media_result{font-size:12px} +.search_form .results_container .results .media_result .options{left:0} +.search_form .results_container .results .media_result .options>div{padding:3px} +.search_form .results_container .results .media_result .options select{min-width:0;margin-right:2px} +} +.search_form .results_container .results .media_result .options select{display:block;height:100%;width:100%} +.search_form .results_container .results .media_result .options .title{margin-right:5px;width:210px} +@media (max-width:485px){.search_form .results_container .results .media_result .options .title{width:140px;margin-right:2px} +} +.search_form .results_container .results .media_result .options .category,.search_form .results_container .results .media_result .options .profile{margin:0 5px 0 0} +@media (max-width:485px){.search_form .results_container .results .media_result .options .category,.search_form .results_container .results .media_result .options .profile{margin-right:2px} +} +.search_form .results_container .results .media_result .options .add{width:42px;-webkit-flex:1 auto;-ms-flex:1 auto;flex:1 auto} +.search_form .results_container .results .media_result .options .add a{color:#FFF} +.search_form .results_container .results .media_result .options .button{display:block;background:#ac0000;text-align:center;margin:0} +.dark .search_form .results_container .results .media_result .options .button{background:#f85c22} +.search_form .results_container .results .media_result .options .message{font-size:20px;color:#fff} +.search_form .results_container .results .media_result .thumbnail{width:30px;min-height:100%;display:block;margin:0;vertical-align:top} +.search_form .results_container .results .media_result .data{position:absolute;height:100%;top:0;left:30px;right:0;cursor:pointer;border-top:1px solid rgba(255,255,255,.08);transition:all .4s cubic-bezier(.9,0,.1,1);will-change:transform;-webkit-transform:translateX(0) rotateZ(360deg);transform:translateX(0) rotateZ(360deg);background:#FFF} +.dark .search_form .results_container .results .media_result .data{background:#2d2d2d;border-color:rgba(255,255,255,.08)} +@media (max-width:485px){.search_form .results_container .results .media_result .thumbnail{display:none} +.search_form .results_container .results .media_result .data{left:0} +} +.search_form .results_container .results .media_result .data:hover{-webkit-transform:translateX(2%) rotateZ(360deg);transform:translateX(2%) rotateZ(360deg)} +.search_form .results_container .results .media_result .data.open{-webkit-transform:translateX(100%) rotateZ(360deg);transform:translateX(100%) rotateZ(360deg)} +.search_form .results_container .results .media_result .data .info{position:absolute;top:20%;left:15px;right:7px;vertical-align:middle} +.search_form .results_container .results .media_result .data .info h2{margin:0;font-weight:300;font-size:1.25em;padding:0;position:absolute;width:100%;display:-webkit-flex;display:-ms-flexbox;display:flex} +.search_form .results_container .results .media_result .data .info h2 .title{display:inline-block;margin:0;text-overflow:ellipsis;overflow:hidden;white-space:nowrap;-webkit-flex:1 auto;-ms-flex:1 auto;flex:1 auto} +.search_form .results_container .results .media_result .data .info h2 .year{opacity:.4;padding:0 5px;width:auto} +.search_form .results_container .results .media_result .data .info h2 .in_library,.search_form .results_container .results .media_result .data .info h2 .in_wanted{position:absolute;top:15px;left:0;font-size:11px;color:#ac0000} +.dark .search_form .results_container .results .media_result .data .info h2 .in_library,.dark .search_form .results_container .results .media_result .data .info h2 .in_wanted{color:#f85c22} +.search_form .results_container .results .media_result .data .info h2.in_library_wanted .title{margin-top:-7px} +.search_form .results_container .results .media_result:hover .info h2 .year{display:inline-block} +.search_form .results_container .results .media_result:last-child .data{border-bottom:0} +.search_form.focused.filled .results_container,.search_form.shown.filled .results_container{display:block} +.search_form.focused.filled .input,.search_form.shown.filled .input{border-radius:0 0 0 3px} +.page.home .search_form{display:block;padding:20px;width:100%;max-width:500px;margin:20px auto 0;height:106px;position:relative} +@media (max-width:485px){.page.home .search_form{margin-top:10px;height:64px} +} +.page.home .search_form .icon-search{display:block;color:#000;right:20px;top:20px;width:66px;height:66px;line-height:66px;left:auto;-webkit-transform:none;transform:none;font-size:2em;opacity:.5} +.page.home .search_form .wrapper:before,.page.movies .scroll_content{display:none} +.dark .page.home .search_form .icon-search{color:#FFF} +.page.home .search_form .icon-search:hover{background:0 0} +@media (max-width:485px){.page.home .search_form .icon-search{width:44px;height:44px;line-height:44px;right:10px;top:10px;font-size:1.5em} +} +.page.home .search_form .wrapper{border-radius:0;box-shadow:none;bottom:auto;top:20px;left:20px;right:20px;position:absolute;width:auto} +@media (max-width:485px){.page.home .search_form .wrapper{right:10px;top:10px;left:10px} +} +.page.home .search_form .wrapper .input{border-radius:0;left:0;position:absolute;top:0;height:66px} +.page.home .search_form .wrapper .input input{box-shadow:0;font-size:2em;font-weight:400;padding-right:66px;background:#FFF} +.dark .page.home .search_form .wrapper .input input{background:#2d2d2d} +.page.home .search_form .wrapper .results_container{min-height:66px;position:absolute;top:66px;left:0;right:0;border:1px solid #ebebeb;border-top:0} +.dark .page.home .search_form .wrapper .results_container{border-color:#353535} +@media (max-width:485px){.page.home .search_form .wrapper .input{height:44px} +.page.home .search_form .wrapper .input input{padding-right:44px;font-size:1em} +.page.home .search_form .wrapper .results_container{top:44px;min-height:44px} +} +@media (min-width:480px){.page.home .search_form .wrapper .results_container .results{max-height:400px} +.page.home .search_form .wrapper .results_container .results .media_result{height:66px} +.page.home .search_form .wrapper .results_container .results .media_result .thumbnail{width:40px} +.page.home .search_form .wrapper .results_container .results .media_result .options{left:40px} +.page.home .search_form .wrapper .results_container .results .media_result .options .title{margin-right:5px;width:320px} +} +@media (min-width:480px) and (max-width:480px){.page.home .search_form .wrapper .results_container .results .media_result{height:44px} +.page.home .search_form .wrapper .results_container .results .media_result .options .title{width:140px;margin-right:2px} +} +@media (min-width:480px){.page.home .search_form .wrapper .results_container .results .media_result .data{left:40px} +} +@media (max-width:485px){.page.home .search_form .wrapper .results_container .results .media_result{height:44px} +.page.home .search_form .wrapper .results_container .results .media_result .options .title{width:140px;margin-right:2px} +} +.big_search{background:#ebebeb} +.dark .big_search{background:#353535} +.page.movies{bottom:auto;z-index:21;height:80px} +.page.movies_manage,.page.movies_wanted{top:80px;padding:0;will-change:top;transition:top .3s cubic-bezier(.9,0,.1,1)} +@media (max-width:485px){.page.movies{height:44px} +.page.movies_manage,.page.movies_wanted{top:44px} +} +.mass_editing .page.movies_manage,.mass_editing .page.movies_wanted{top:124px} +.page.movies_manage .load_more,.page.movies_wanted .load_more{text-align:center;padding:20px;font-size:2em;display:block} +.page.movies_manage .empty_manage,.page.movies_wanted .empty_manage{padding:20px} +.page.movies_manage .empty_manage .after_manage,.page.movies_wanted .empty_manage .after_manage{margin-top:20px} +.movie .ripple,.movie input[type=checkbox]{display:none} +.with_navigation .movie input[type=checkbox]{display:inline-block;position:absolute;will-change:opacity;transition:opacity .2s;opacity:0;z-index:2;cursor:pointer} +@media (max-width:485px){.with_navigation .movie input[type=checkbox]{display:none} +} +.with_navigation .movie input[type=checkbox]:hover{opacity:1!important} +.with_navigation .movie:hover input[type=checkbox]{opacity:.5} +.with_navigation .movie.checked input[type=checkbox]{opacity:1} +.movie .quality{font-weight:400} +.movie .quality span{display:inline-block;background:#ebebeb;border:1px solid transparent;color:rgba(0,0,0,.5);border-radius:1px;padding:1px 3px} +.dark .movie .quality span{color:rgba(255,255,255,.5)} +.movie .quality span.failed{background:#993619;color:#FFF} +.movie .quality span.available{color:#009902;border-color:#009902;background:#FFF} +.movie .quality span.snatched{background:#548399;color:#FFF} +.movie .quality span.done,.movie .quality span.downloaded{background:#009902;color:#FFF} +.dark .movie .quality span{background:0 0} +.dark .movie .quality span.available{border-color:transparent;background:0 0} +.dark .movie .quality span.snatched{background:#548399} +.dark .movie .quality span.done,.dark .movie .quality span.downloaded{background:#009902;color:#FFF} +.movie .rating .votes{opacity:.7;margin-left:4px} +.movie.status_suggested .quality{display:none} +.movies{position:relative} +.movies .no_movies{display:block;padding:20px} +@media (max-width:768px){.movies .no_movies{padding:10px} +.movies>.description{display:none} +} +.movies .no_movies a{color:#ac0000} +.dark .movies .no_movies a{color:#f85c22} +.movies .message{padding:20px 0;text-align:center} +.movies .message a{color:#ac0000} +.dark .movies .message a{color:#f85c22} +.movies.movies>h2{padding:0 20px;line-height:80px} +@media (max-width:485px){.movies.movies>h2{line-height:44px;padding:0 10px} +.movies .movie .actions{pointer-events:none} +} +.movies>.description{position:absolute;top:0;right:20px;width:auto;line-height:80px;opacity:.7} +.movies>.description a{color:#ac0000;display:inline} +.dark .movies>.description a{color:#f85c22} +.movies>.loading{background:#FFF} +.dark .movies>.loading{background:#2d2d2d} +.movies>.loading .message{color:#000} +.dark .movies>.loading .message{color:#FFF} +.movies>.loading .spinner{background-color:#FFF} +.dark .movies>.loading .spinner{background-color:#2d2d2d} +.movies .movie .actions{will-change:transform,opacity;-webkit-transform:rotateZ(360deg);transform:rotateZ(360deg)} +.movies .progress div{width:50%;padding:5px 10px;display:-webkit-flex;display:-ms-flexbox;display:flex} +.movies .progress div .folder{-webkit-flex:1 auto;-ms-flex:1 auto;flex:1 auto;overflow:hidden;text-overflow:ellipsis;white-space:nowrap;margin-right:10px} +.movies .progress div .percentage{font-weight:700} +.list_list{font-weight:300} +.list_list .movie{display:block;border-bottom:1px solid transparent;border-color:#ebebeb;position:relative;cursor:pointer} +.dark .list_list .movie{border-color:#353535} +.list_list .movie:last-child{border-bottom:none} +.list_list .movie:hover{background:#ebebeb} +.dark .list_list .movie:hover{background:#353535} +.list_list .movie input[type=checkbox]{left:20px;top:50%;-webkit-transform:translateY(-50%);transform:translateY(-50%)} +.list_list .movie .poster{display:none} +.list_list .movie .info{padding:10px 20px;display:-webkit-flex;display:-ms-flexbox;display:flex;-webkit-flex-flow:row nowrap;-ms-flex-flow:row nowrap;flex-flow:row nowrap;-webkit-align-items:center;-ms-flex-align:center;align-items:center} +.list_list .movie .info .title{-webkit-flex:1 auto;-ms-flex:1 auto;flex:1 auto} +.list_list .movie .info .title span{transition:margin .2s cubic-bezier(.9,0,.1,1);overflow:hidden;text-overflow:ellipsis;white-space:nowrap} +@media (max-width:768px){.movies .progress div{width:100%} +.list_list .movie .info{display:block;padding:10px} +.list_list .movie .info .title{display:-webkit-flex;display:-ms-flexbox;display:flex;-webkit-flex-flow:row nowrap;-ms-flex-flow:row nowrap;flex-flow:row nowrap} +.list_list .movie .info .title span{width:100%} +} +.list_list .movie .info .title .year{display:inline-block;margin:0 10px;opacity:.5} +.list_list .movie .info .eta{font-size:.8em;opacity:.5;margin-right:4px} +@media (max-width:485px){.list_list .movie .info .eta{display:none} +} +.list_list .movie .info .quality{clear:both;overflow:hidden} +.list_list .movie .info .quality span{float:left;font-size:.7em;margin:2px 0 0 2px} +.list_list .movie .info .rating .vote{display:inline-block;min-width:60px;text-align:right} +.list_list .movie .actions{position:absolute;right:10px;top:0;bottom:0;display:none;z-index:10} +.list_list .movie .actions .action{display:inline-block} +.list_list .movie .actions a{height:100%;display:block;background:#FFF;color:#ac0000;padding:10px;width:auto;float:right;text:#000} +.list_list .movie .actions a .icon,.list_list .movie .actions a:before{display:none} +.dark .list_list .movie .actions a{background:#2d2d2d;color:#f85c22;text:#FFF} +.list_list .movie .actions a:hover{background:#ebebeb;color:#000} +.dark .list_list .movie .actions a:hover{background:#353535;color:#FFF} +.list_list .movie:hover .actions{display:block} +@media (max-width:768px){.list_list .movie .info .quality span{margin:2px 2px 0 0} +.list_list .movie:hover .actions{display:none} +} +.list_list.with_navigation .movie.checked .info .title span,.list_list.with_navigation .movie:hover .info .title span{margin-left:20px} +.thumb_list{padding:0 5px} +.thumb_list>div:last-child{padding:0 12px} +.thumb_list .movie{overflow:visible;display:inline-block;vertical-align:top;margin-bottom:20px;position:relative;cursor:pointer;width:150px;border:0 solid transparent;border-width:0 6.67px} +.thumb_list .movie .inner{will-change:transform;-webkit-transform:rotateZ(360deg);transform:rotateZ(360deg)} +@media (min-width:4275px) and (max-width:4500px){.thumb_list .movie{width:5%} +} +@media (min-width:4050px) and (max-width:4275px){.thumb_list .movie{width:5.26316%} +} +@media (min-width:3825px) and (max-width:4050px){.thumb_list .movie{width:5.55556%} +} +@media (min-width:3600px) and (max-width:3825px){.thumb_list .movie{width:5.88235%} +} +@media (min-width:3375px) and (max-width:3600px){.thumb_list .movie{width:6.25%} +} +@media (min-width:3150px) and (max-width:3375px){.thumb_list .movie{width:6.66667%} +} +@media (min-width:2925px) and (max-width:3150px){.thumb_list .movie{width:7.14286%} +} +@media (min-width:2700px) and (max-width:2925px){.thumb_list .movie{width:7.69231%} +} +@media (min-width:2475px) and (max-width:2700px){.thumb_list .movie{width:8.33333%} +} +@media (min-width:2250px) and (max-width:2475px){.thumb_list .movie{width:9.09091%} +} +@media (min-width:2025px) and (max-width:2250px){.thumb_list .movie{width:10%} +} +@media (min-width:1800px) and (max-width:2025px){.thumb_list .movie{width:11.11111%} +} +@media (min-width:1575px) and (max-width:1800px){.thumb_list .movie{width:12.5%} +} +@media (min-width:1350px) and (max-width:1575px){.thumb_list .movie{width:14.28571%} +} +@media (min-width:1125px) and (max-width:1350px){.thumb_list .movie{width:16.66667%} +} +@media (min-width:900px) and (max-width:1125px){.thumb_list .movie{width:20%} +} +@media (min-width:675px) and (max-width:900px){.thumb_list .movie{width:25%} +} +@media (min-width:450px) and (max-width:675px){.thumb_list .movie{width:33.33333%} +} +@media (min-width:225px) and (max-width:450px){.thumb_list .movie{width:50%} +} +@media (min-width:0px) and (max-width:225px){.thumb_list .movie{width:100%} +} +@media (max-width:768px){.list_list.with_navigation .movie.checked .info .title span,.list_list.with_navigation .movie:hover .info .title span{margin-left:0} +.thumb_list .movie{width:33.333%;border-width:0 5px} +} +@media (max-width:485px){.thumb_list>div:last-child{padding:0 3.33px} +.thumb_list .movie{width:50%;border-width:0 4px} +} +.thumb_list .movie input[type=checkbox]{top:10px;left:10px} +.thumb_list .movie .poster_container{border-radius:3px;position:relative;width:100%;padding-bottom:150%;overflow:hidden} +.thumb_list .movie .poster{position:absolute;background:center no-repeat #ebebeb;background-size:cover;overflow:hidden;height:100%;width:100%} +.dark .thumb_list .movie .poster{background-color:#353535} +.thumb_list .movie .info{clear:both;font-size:.9em} +.thumb_list .movie .info .title{display:-webkit-flex;display:-ms-flexbox;display:flex;padding:3px 0;font-weight:400} +.thumb_list .movie .info .title span{-webkit-flex:1 auto;-ms-flex:1 auto;flex:1 auto;white-space:nowrap;overflow:hidden;text-overflow:ellipsis} +.thumb_list .movie .info .title .year{display:inline-block;margin-left:5px;opacity:.5} +.thumb_list .movie .info .eta{opacity:.5;float:right;margin-left:4px} +.thumb_list .movie .info .quality{white-space:nowrap;overflow:hidden;font-size:.9em} +.thumb_list .movie .info .quality span{font-size:.8em;margin-right:2px} +.thumb_list .movie .actions{background-image:linear-gradient(25deg,rgba(172,0,0,.3) 0,#ac0000 80%);will-change:opacity,visibility;transition:all .4s;transition-property:opacity,visibility;opacity:0;visibility:hidden;position:absolute;top:0;right:0;bottom:0;left:0;text-align:right} +.dark .thumb_list .movie .actions{background-image:linear-gradient(25deg,rgba(248,92,34,.3) 0,#f85c22 80%)} +.thumb_list .movie .actions .action{position:relative;margin-right:10px;float:right;clear:both} +.thumb_list .movie .actions .action:first-child{margin-top:10px} +.thumb_list .movie .actions .action a{transition:all 150ms cubic-bezier(.9,0,.1,1);will-change:color,background;transition-property:color,background;display:block;width:auto;padding:6.67px;color:#FFF;border-radius:2px;font-weight:400} +.thumb_list .movie .actions .action a:hover{background:#FFF;color:#ac0000} +.dark .thumb_list .movie .actions .action a:hover{background:#2d2d2d;color:#FFF} +.thumb_list .movie:hover .actions{opacity:1;visibility:visible} +.thumb_list .movie .mask{bottom:44px;border-radius:3px;will-change:opacity;transition:opacity 30ms} +@media (max-width:485px){.thumb_list .movie:hover .actions{display:none} +.page.movie_details{left:0} +} +.page.movie_details .overlay{position:fixed;top:0;bottom:0;right:0;left:132px;background:rgba(0,0,0,.6);border-radius:3px 0 0 3px;opacity:0;will-change:opacity;-webkit-transform:rotateZ(360deg);transform:rotateZ(360deg);transition:opacity .3s ease .4s;z-index:1} +.page.movie_details .overlay .ripple{background:#FFF} +@media (max-width:485px){.page.movie_details .overlay{left:0;border-radius:0;transition:none} +} +.page.movie_details .overlay .close{display:inline-block;text-align:center;font-size:60px;line-height:80px;color:#FFF;width:100%;height:100%;opacity:0;will-change:opacity;transition:opacity .3s ease .2s} +.page.movie_details .overlay .close:before{display:block;width:44px} +.page.movie_details .scroll_content{position:fixed;z-index:2;top:0;bottom:0;right:0;left:176px;background:#FFF;border-radius:3px 0 0 3px;overflow-y:auto;will-change:transform;-webkit-transform:translateX(100%) rotateZ(360deg);transform:translateX(100%) rotateZ(360deg);transition:-webkit-transform 450ms cubic-bezier(.9,0,.1,1);transition:transform 450ms cubic-bezier(.9,0,.1,1)} +.dark .page.movie_details .scroll_content{background:#2d2d2d} +.page.movie_details .scroll_content>.head{display:-webkit-flex;display:-ms-flexbox;display:flex;-webkit-flex-flow:row wrap;-ms-flex-flow:row wrap;flex-flow:row wrap;padding:0 20px 0 10px;position:relative;z-index:2;will-change:transform,opacity;-webkit-transform:rotateZ(360deg);transform:rotateZ(360deg)} +.page.movie_details .scroll_content>.head h1{-webkit-flex:1 auto;-ms-flex:1 auto;flex:1 auto;margin:0;font-size:24px;font-weight:300;max-width:100%} +@media (max-width:485px){.page.movie_details .overlay .close{width:44px} +.page.movie_details .scroll_content{left:44px} +.page.movie_details .scroll_content>.head{padding:0;line-height:44px} +.page.movie_details .scroll_content>.head h1{min-width:100%;line-height:44px} +.page.movie_details .scroll_content>.head h1 .more_menu{width:100%} +.page.movie_details .scroll_content>.head h1 .more_menu .icon-dropdown:before{right:10px} +} +.page.movie_details .scroll_content>.head h1 .more_menu a{color:#000} +.dark .page.movie_details .scroll_content>.head h1 .more_menu a{color:#FFF} +.page.movie_details .scroll_content>.head h1 .more_menu .icon-dropdown{padding-right:30px} +.page.movie_details .scroll_content>.head .more_menu{display:inline-block;vertical-align:top;max-width:100%;margin-bottom:0} +.page.movie_details .scroll_content>.head .more_menu>a{float:left;line-height:80px;color:#ac0000} +.dark .page.movie_details .scroll_content>.head .more_menu>a{color:#f85c22} +.page.movie_details .scroll_content>.head .more_menu>a:hover{color:#000} +.dark .page.movie_details .scroll_content>.head .more_menu>a:hover{color:#FFF} +.page.movie_details .scroll_content>.head .more_menu .icon-dropdown{position:relative;padding:0 25px 0 10px} +.page.movie_details .scroll_content>.head .more_menu .icon-dropdown:before{position:absolute;right:10px;top:-2px;opacity:.2} +.page.movie_details .scroll_content>.head .more_menu .icon-dropdown:hover:before{opacity:1} +.page.movie_details .scroll_content>.head .more_menu .wrapper{top:70px;padding-top:4px;border-radius:3px 3px 0 0;font-size:14px} +@media (max-width:485px){.page.movie_details .scroll_content>.head .more_menu>a{line-height:44px} +.page.movie_details .scroll_content>.head .more_menu .wrapper{top:25px} +} +.page.movie_details .scroll_content>.head .more_menu .wrapper:before{top:0;left:auto;right:22px} +.page.movie_details .scroll_content>.head .more_menu .wrapper ul{border-radius:3px 3px 0 0;max-height:215px;overflow-y:auto} +.page.movie_details .scroll_content>.head .more_menu .wrapper a{padding-right:30px} +.page.movie_details .scroll_content>.head .more_menu .wrapper a:before{position:absolute;right:10px} +.page.movie_details .scroll_content>.head .more_menu .wrapper a.icon-ok,.page.movie_details .scroll_content>.head .more_menu .wrapper a:hover{color:#ac0000} +.dark .page.movie_details .scroll_content>.head .more_menu .wrapper a.icon-ok,.dark .page.movie_details .scroll_content>.head .more_menu .wrapper a:hover{color:#f85c22} +.page.movie_details .scroll_content>.head .more_menu.title>a{display:inline-block;text-overflow:ellipsis;overflow:hidden;white-space:nowrap;width:100%} +.page.movie_details .scroll_content>.head .more_menu.title .wrapper{-webkit-transform-origin:0 0;transform-origin:0 0;left:0;right:auto} +.page.movie_details .scroll_content>.head .more_menu.title .wrapper:before{left:22px;right:auto} +.page.movie_details .scroll_content>.head .buttons{display:-webkit-flex;display:-ms-flexbox;display:flex;-webkit-flex-wrap:wrap;-ms-flex-wrap:wrap;flex-wrap:wrap} +.page.movie_details .scroll_content>.head .buttons>a{display:inline-block;padding:0 10px;color:#ac0000;line-height:80px} +.dark .page.movie_details .scroll_content>.head .buttons>a{color:#f85c22} +.page.movie_details .scroll_content>.head .buttons>a:hover{background:#ebebeb;color:#000} +.dark .page.movie_details .scroll_content>.head .buttons>a:hover{background:#353535;color:#FFF} +.page.movie_details .scroll_content .section{padding:20px;border-top:1px solid rgba(0,0,0,.1);will-change:transform,opacity;-webkit-transform:rotateZ(360deg);transform:rotateZ(360deg)} +.dark .page.movie_details .scroll_content .section{border-color:rgba(255,255,255,.1)} +@media (max-width:485px){.page.movie_details .scroll_content>.head .more_menu.title .wrapper{top:30px;max-width:240px} +.page.movie_details .scroll_content>.head .buttons{margin:0} +.page.movie_details .scroll_content>.head .buttons>a{line-height:44px} +.page.movie_details .scroll_content .section{padding:10px} +} +.page.movie_details .files span,.page.movie_details .releases .item span{white-space:nowrap;padding:6.67px 0;overflow:hidden;text-overflow:ellipsis} +.page.movie_details.show{pointer-events:auto} +.page.movie_details.show .overlay{opacity:1;transition-delay:0s} +.page.movie_details.show .overlay .close{opacity:1;transition-delay:.3s} +.page.movie_details.show .scroll_content{transition-delay:50ms;-webkit-transform:translateX(0) rotateZ(360deg);transform:translateX(0) rotateZ(360deg)} +.page.movie_details .section_description .meta{text-align:right;font-style:italic;font-size:.9em} +.page.movie_details .section_description .meta span{display:inline-block;margin:10px 10px 0} +.page.movie_details .section_description .meta span:last-child{margin-right:0} +.page.movie_details .section_add{background:#ebebeb} +.dark .page.movie_details .section_add{background:#353535} +.page.movie_details .section_add .options>div{display:-webkit-flex;display:-ms-flexbox;display:flex;-webkit-align-items:center;-ms-flex-align:center;align-items:center} +.page.movie_details .section_add .options>div select{display:block;width:100%} +.page.movie_details .section_add .options>div .title{min-width:75px;width:2000px;margin:0 10px 0 0} +.page.movie_details .section_add .options>div .category,.page.movie_details .section_add .options>div .profile{width:200px;min-width:50px;margin:0 10px 0 0} +.page.movie_details .section_add .options>div .add{width:200px} +.page.movie_details .section_add .options>div .add .button{background:#FFF;-webkit-flex:1 auto;-ms-flex:1 auto;flex:1 auto;display:block;text-align:center;width:100%;margin:0} +.dark .page.movie_details .section_add .options>div .add .button{background:#2d2d2d} +.page.movie_details .section_add .options>div .add .button:hover{background:#ac0000} +.dark .page.movie_details .section_add .options>div .add .button:hover{background:#f85c22} +.page.movie_details .section_add .data,.page.movie_details .section_add .thumbnail{display:none} +.page.movie_details .files span{text-align:center} +.page.movie_details .files .name{text-align:left;-webkit-flex:1 1 auto;-ms-flex:1 1 auto;flex:1 1 auto} +.page.movie_details .files .type{min-width:80px} +.page.movie_details .releases .buttons{margin-bottom:10px} +.page.movie_details .releases .buttons a{display:inline;color:#ac0000} +.dark .page.movie_details .releases .buttons a{color:#f85c22} +@media (max-width:485px){.page.movie_details .releases .item{display:block} +} +.page.movie_details .releases .item:not(.head):hover{background:#ebebeb;text:#000} +.dark .page.movie_details .releases .item:not(.head):hover{background:#353535;text:#FFF} +.page.movie_details .releases .item span{text-align:center} +.page.movie_details .releases .item span:before{display:none;font-weight:700;opacity:.8;margin-right:3px;width:100%;font-size:.9em} +.page.movie_details .releases .item .name{-webkit-flex:1 auto;-ms-flex:1 auto;flex:1 auto;text-align:left} +.page.movie_details .releases .item.ignored span:not(.actions){opacity:.3} +.page.movie_details .releases .item.ignored .name{text-decoration:line-through} +.page.movie_details .releases .item .actions{padding:0} +@media (max-width:485px){.page.movie_details .releases .item span:before{display:inline-block} +.page.movie_details .releases .item span{vertical-align:top;white-space:normal;display:inline-block;width:50%;padding:0;min-width:0;max-width:none;text-align:left;margin-top:3px} +.page.movie_details .releases .item .name{width:100%;font-weight:700} +.page.movie_details .releases .item.head{display:none} +.page.movie_details .releases .item .actions{width:100%;text-align:center} +.page.movie_details .releases .item .actions a{text-align:center} +} +.page.movie_details .releases .item .actions a{display:inline-block;vertical-align:top;padding:6.67px;min-width:26px;color:#000} +.dark .page.movie_details .releases .item .actions a{color:#FFF} +.page.movie_details .releases .item .actions a:hover{color:#ac0000} +.dark .page.movie_details .releases .item .actions a:hover{color:#f85c22} +.page.movie_details .releases .item .actions a:after{margin-left:3px;font-size:.9em} +@media (max-width:485px){.page.movie_details .releases .item .actions a.icon-info:after{content:"more info"} +.page.movie_details .releases .item .actions a.icon-download:after{content:"download"} +.page.movie_details .releases .item .actions a.icon-cancel:after{content:"ignore"} +.page.movie_details .section_trailer.section_trailer{max-height:450px} +} +.page.movie_details .releases .status{min-width:70px;max-width:70px} +.page.movie_details .releases .status:before{content:"Status:"} +.page.movie_details .releases .quality{min-width:60px;max-width:60px} +.page.movie_details .releases .quality:before{content:"Quality:"} +.page.movie_details .releases .size{min-width:50px;max-width:50px} +.page.movie_details .releases .size:before{content:"Size:"} +.page.movie_details .releases .age{min-width:40px;max-width:40px} +.page.movie_details .releases .age:before{content:"Age:"} +.page.movie_details .releases .score{min-width:45px;max-width:45px} +.page.movie_details .releases .score:before{content:"Score:"} +.page.movie_details .releases .provider{min-width:110px;max-width:110px} +.page.movie_details .releases .provider:before{content:"Provider:"} +.page.movie_details .releases .actions{min-width:80px;max-width:80px} +.page.movie_details .section_trailer.section_trailer{padding:0;background:#111;max-height:450px;overflow:hidden} +.dark .page.movie_details .section_trailer.section_trailer{background:#111} +.page.movie_details .section_trailer.section_trailer.no_trailer{display:none} +.page.movie_details .section_trailer.section_trailer .trailer_container{max-height:450px;position:relative;overflow:hidden;max-width:800px;margin:0 auto;cursor:pointer} +.page.movie_details .section_trailer.section_trailer .trailer_container .background{opacity:0;background:center no-repeat;background-size:cover;position:relative;z-index:1;max-height:450px;padding-bottom:56.25%;will-change:opacity;transition:opacity 1s} +.page.movie_details .section_trailer.section_trailer .trailer_container .background.visible{opacity:.4} +.page.movie_details .section_trailer.section_trailer .trailer_container .icon-play{opacity:.9;position:absolute;z-index:2;text-align:center;width:100%;top:50%;-webkit-transform:translateY(-50%);transform:translateY(-50%);will-change:opacity;transition:all .3s;color:#FFF;font-size:110px} +@media (max-width:1024px){.page.movie_details .section_trailer.section_trailer .trailer_container .icon-play{font-size:55px} +} +@media (max-width:485px){.page.movie_details .section_trailer.section_trailer .trailer_container{margin-bottom:10px} +.page.movie_details .section_trailer.section_trailer .trailer_container .icon-play{font-size:31.43px} +} +.page.movie_details .section_trailer.section_trailer .trailer_container .icon-play span{transition:all .3s;opacity:.9;position:absolute;font-size:1em;top:50%;left:50%;margin-left:55px;-webkit-transform:translateY(-54%);transform:translateY(-54%);will-change:opacity} +@media (max-width:1024px){.page.movie_details .section_trailer.section_trailer .trailer_container .icon-play span{margin-left:27.5px} +} +@media (max-width:485px){.page.movie_details .section_trailer.section_trailer .trailer_container .icon-play span{margin-left:15.71px} +} +.page.movie_details .section_trailer.section_trailer .trailer_container .icon-play span:first-child{margin-left:-55px;-webkit-transform:translate(-100%,-54%);transform:translate(-100%,-54%)} +@media (max-width:1024px){.page.movie_details .section_trailer.section_trailer .trailer_container .icon-play span:first-child{margin-left:-27.5px} +} +@media (max-width:485px){.page.movie_details .section_trailer.section_trailer .trailer_container .icon-play span:first-child{margin-left:-15.71px} +} +.page.movie_details .section_trailer.section_trailer .trailer_container:hover{color:#ac0000} +.dark .page.movie_details .section_trailer.section_trailer .trailer_container:hover{color:#f85c22} +.page.movie_details .section_trailer.section_trailer .trailer_container:hover .icon-play,.page.movie_details .section_trailer.section_trailer .trailer_container:hover .icon-play span{opacity:1} +.page.movie_details .section_trailer.section_trailer .trailer_container iframe{position:absolute;width:100%;height:100%;border:0;top:0;left:0;max-height:450px;z-index:10} +.alph_nav{position:relative} +.alph_nav .mass_edit_form{display:-webkit-flex;display:-ms-flexbox;display:flex;background:#FFF;position:fixed;top:80px;right:0;left:132px;-webkit-flex-flow:row nowrap;-ms-flex-flow:row nowrap;flex-flow:row nowrap;-webkit-align-items:center;-ms-flex-align:center;align-items:center;will-change:max-height;transition:max-height .3s cubic-bezier(.9,0,.1,1);max-height:0;overflow:hidden} +.dark .alph_nav .mass_edit_form{background:#2d2d2d} +.mass_editing .alph_nav .mass_edit_form{max-height:44px} +.alph_nav .mass_edit_form>*{display:-webkit-flex;display:-ms-flexbox;display:flex;-webkit-align-items:center;-ms-flex-align:center;align-items:center} +.alph_nav .mass_edit_form .select{margin:0 10px 0 20px} +@media (max-width:485px){.alph_nav .mass_edit_form .select{margin:0 5px 0 10px} +} +.alph_nav .mass_edit_form .select .count,.alph_nav .mass_edit_form .select input{margin-right:5px} +.alph_nav .menus .button{padding:0 10px;line-height:80px} +.alph_nav .menus .actions,.alph_nav .menus .counter,.alph_nav .menus .more_menu{float:left} +.alph_nav .menus .actions .wrapper,.alph_nav .menus .counter .wrapper,.alph_nav .menus .more_menu .wrapper{-webkit-transform-origin:92% 0;transform-origin:92% 0;right:-7px} +.alph_nav .menus .actions>a,.alph_nav .menus .counter>a,.alph_nav .menus .more_menu>a{display:inline-block;width:30px;line-height:80px;text-align:center;float:left} +.alph_nav .menus .actions>a:hover,.alph_nav .menus .counter>a:hover,.alph_nav .menus .more_menu>a:hover{background:#ebebeb} +.dark .alph_nav .menus .actions>a:hover,.dark .alph_nav .menus .counter>a:hover,.dark .alph_nav .menus .more_menu>a:hover{background:#353535} +@media (max-width:768px){.alph_nav .menus .actions>a,.alph_nav .menus .counter>a,.alph_nav .menus .more_menu>a{line-height:44px} +.alph_nav .menus .counter{display:none} +} +.alph_nav .menus .counter{line-height:80px;padding:0 10px} +.alph_nav .menus .actions a{display:inline-block} +.alph_nav .menus .actions .active{display:none} +.alph_nav .menus .filter .wrapper{width:320px} +.alph_nav .menus .filter .button{margin-top:-2px} +.alph_nav .menus .filter .search{position:relative} +.alph_nav .menus .filter .search:before{position:absolute;height:100%;line-height:38px;padding-left:10px;font-size:16px;opacity:.5} +.alph_nav .menus .filter .search input{width:100%;padding:10px 10px 10px 30px;background:#FFF;border:#ebebeb;border-bottom:1px solid transparent} +.dark .alph_nav .menus .filter .search input{background:#2d2d2d;border-color:#353535} +.alph_nav .menus .filter .numbers{padding:10px} +.alph_nav .menus .filter .numbers li{float:left;width:10%;height:30px;line-height:30px;text-align:center;opacity:.2;cursor:default;border:0} +.alph_nav .menus .filter .numbers li.active{background:#ebebeb} +.dark .alph_nav .menus .filter .numbers li.active{background:#353535} +.alph_nav .menus .filter .numbers li.available{opacity:1;cursor:pointer} +.alph_nav .menus .filter .numbers li.available:hover{background:#ebebeb} +.dark .alph_nav .menus .filter .numbers li.available:hover{background:#353535} +.alph_nav .menus .more_menu .wrapper{top:70px;padding-top:4px;border-radius:3px 3px 0 0;min-width:140px} +@media (max-width:485px){.alph_nav .menus .filter .wrapper{right:-70px;-webkit-transform-origin:75% 0;transform-origin:75% 0} +.alph_nav .menus .filter .wrapper:before{right:83px!important} +.alph_nav .menus .filter .search input{font-size:1.2em} +.alph_nav .menus .more_menu .wrapper{top:44px} +} +.alph_nav .menus .more_menu .wrapper:before{top:0;left:auto;right:22px} +.alph_nav .menus .more_menu .wrapper ul{border-radius:3px 3px 0 0} +.add_new_category{padding:20px;display:block;text-align:center;font-size:20px} +.category{margin-bottom:20px;position:relative} +.category>.delete{position:absolute;padding:6.67px 20px;right:0;cursor:pointer;opacity:.6;color:#fd5353;font-size:1.5em;z-index:2} +.category>.delete:hover{opacity:1} +.category .ctrlHolder:hover{background:0 0} +.page.log .nav li.active,.page.log .nav li:hover:not(.active):not(.filter){background:rgba(255,255,255,.1)} +.category .formHint{opacity:.1} +.category:hover .formHint{opacity:1} +#category_ordering ul{float:left;margin:0;width:275px;padding:0} +#category_ordering li{cursor:-webkit-grab;cursor:grab;border-bottom:1px solid transparent;border-color:#ebebeb;padding:5px;list-style:none} +.dark #category_ordering li{border-color:#353535} +#category_ordering li:last-child{border:0} +#category_ordering li .check{margin:2px 10px 0 0;vertical-align:top} +#category_ordering li>span{display:inline-block;height:20px;vertical-align:top;line-height:20px} +#category_ordering li .handle{width:20px;float:right} +.page.log .nav{text-align:right;padding:0;margin:0} +.page.log .nav li{display:inline-block;padding:5px 10px;margin:0} +.page.log .nav li.clear,.page.log .nav li.select{cursor:pointer} +.page.log .nav li.active{font-weight:700;cursor:default} +.page.log .hint{font-style:italic;opacity:.5;margin-top:3px} +@media (max-width:768px){.page.log .nav li.filter,.page.log .nav li:nth-child(10),.page.log .nav li:nth-child(11),.page.log .nav li:nth-child(12),.page.log .nav li:nth-child(7),.page.log .nav li:nth-child(8),.page.log .nav li:nth-child(9){display:none} +.page.log .nav li:last-child{display:inline-block} +.page.log .hint{display:none} +} +.page.log .container{padding:20px;overflow:hidden;line-height:150%;-webkit-transform:rotateZ(360deg);transform:rotateZ(360deg)} +@media (max-width:485px){.page.log .container{padding:20px 10px} +} +.page.log .container.loading{text-align:center;font-size:20px;padding:100px 50px} +.page.log .container select{vertical-align:top} +.page.log .container .time{font-size:.75em;border-top:1px solid transparent;border-color:#ebebeb;padding:0 3px;font-family:Lucida Console,Monaco,Nimbus Mono L,monospace,serif;display:block;cursor:pointer;position:relative} +.dark .page.log .container .time{border-color:#353535} +.page.log .container .time:hover{background:#ebebeb} +.dark .page.log .container .time:hover{background:#353535} +.page.log .container .time.highlight{background:#ebebeb} +.dark .page.log .container .time.highlight{background:#353535} +.page.log .container .time span{display:inline-block;padding:5px 0 3px} +.page.log .container .time .time_type{position:absolute;width:130px;top:0;left:0} +.page.log .container .time .message{display:block;margin:0 0 0 130px} +@media (max-width:768px){.page.log .container .time .time_type{position:static;width:auto} +.page.log .container .time .message{display:inline;margin:0 0 0 6.67px;padding:0} +} +.page.log .container .error{color:#FFA4A4} +.page.log .container .debug span{opacity:.6} +.page.log [data-filter=DEBUG] .error,.page.log [data-filter=DEBUG] .info,.page.log [data-filter=ERROR] .debug,.page.log [data-filter=ERROR] .info,.page.log [data-filter=INFO] .debug,.page.log [data-filter=INFO] .error{display:none} +.report_popup.report_popup{position:fixed;left:0;right:0;bottom:0;top:0;z-index:99999;font-size:14px;display:-webkit-flex;display:-ms-flexbox;display:flex;-webkit-justify-content:center;-ms-flex-pack:center;justify-content:center;-webkit-align-items:center;-ms-flex-align:center;align-items:center;opacity:1;color:#FFF;pointer-events:auto} +.disable_hover .scroll_content>*,.mask,.ripple{pointer-events:none} +.report_popup.report_popup .button{margin:10px 0;padding:10px;color:#FFF;background:#ac0000} +.dark .report_popup.report_popup .button{background:#f85c22} +.report_popup.report_popup .bug{width:80%;height:80%;max-height:800px;max-width:800px;display:-webkit-flex;display:-ms-flexbox;display:flex;-webkit-flex-flow:column nowrap;-ms-flex-flow:column nowrap;flex-flow:column nowrap} +.report_popup.report_popup .bug>span{margin:10px 0 20px} +.report_popup.report_popup .bug textarea{display:block;width:100%;background:#FFF;padding:20px;overflow:auto;color:#666;height:70%;font-size:12px} +.do_report.do_report{z-index:10000;position:absolute;padding:10px;background:#ac0000;color:#FFF!important} +.dark .do_report.do_report{background:#f85c22} +.add_new_profile{padding:20px;display:block;text-align:center;font-size:20px;border-bottom:1px solid transparent;border-color:#ebebeb} +.dark .add_new_profile{border-color:#353535} +.profile{margin-bottom:20px} +.profile .quality_label input{font-weight:700} +.profile>.delete{position:absolute;padding:6.67px 20px;right:0;cursor:pointer;opacity:.6;color:#fd5353;font-size:1.5em;z-index:2} +.profile>.delete:hover{opacity:1} +.profile .ctrlHolder .types{-webkit-flex:1 1 auto;-ms-flex:1 1 auto;flex:1 1 auto;min-width:360px} +.profile .ctrlHolder .types .type{display:-webkit-flex;display:-ms-flexbox;display:flex;flex-row:row nowrap;-webkit-align-items:center;-ms-flex-align:center;align-items:center;padding:2px 0} +.profile .ctrlHolder .types .type label{min-width:0;margin-left:10px} +.profile .ctrlHolder .types .type label span{font-size:.9em} +.profile .ctrlHolder .types .type input[type=checkbox]{margin-right:3px} +.profile .ctrlHolder .types .type .delete,.profile .ctrlHolder .types .type .handle{margin-left:5px;width:20px;font-size:20px;opacity:.1;text-align:center;cursor:pointer} +.profile .ctrlHolder .types .type .delete.handle,.profile .ctrlHolder .types .type .handle.handle{cursor:move;cursor:-webkit-grab;cursor:grab} +.profile .ctrlHolder .types .type .delete:hover,.profile .ctrlHolder .types .type .handle:hover{opacity:1} +.profile .ctrlHolder .types .type.is_empty .delete,.profile .ctrlHolder .types .type.is_empty .handle{display:none} +.profile .ctrlHolder.wait_for.wait_for{display:block} +.profile .ctrlHolder.wait_for.wait_for input{min-width:0;width:40px;text-align:center;margin:0 2px} +.profile .ctrlHolder.wait_for.wait_for .advanced{display:none;color:#ac0000} +.dark .profile .ctrlHolder.wait_for.wait_for .advanced{color:#f85c22} +.show_advanced .profile .ctrlHolder.wait_for.wait_for .advanced{display:inline} +#profile_ordering ul{list-style:none;margin:0;width:275px;padding:0} +#profile_ordering li{border-bottom:1px solid transparent;border-color:#ebebeb;padding:5px;display:-webkit-flex;display:-ms-flexbox;display:flex;-webkit-align-items:center;-ms-flex-align:center;align-items:center} +.dark #profile_ordering li{border-color:#353535} +#profile_ordering li:hover{background:#ebebeb} +.dark #profile_ordering li:hover{background:#353535} +#profile_ordering li:last-child{border:0} +#profile_ordering li input[type=checkbox]{margin:2px 10px 0 0;vertical-align:top} +#profile_ordering li>span{display:inline-block;height:20px;vertical-align:top;line-height:20px} +#profile_ordering li>span.profile_label{-webkit-flex:1 1 auto;-ms-flex:1 1 auto;flex:1 1 auto} +#profile_ordering li .handle{font-size:20px;width:20px;float:right;cursor:move;cursor:-webkit-grab;cursor:grab;opacity:.5;text-align:center} +#profile_ordering li .handle:hover{opacity:1} +.group_sizes .item .label{min-width:150px} +.group_sizes .item .max,.group_sizes .item .min{display:inline-block;width:70px!important;min-width:0!important;margin-right:10px;text-align:center} +.page.userscript{position:absolute;width:100%;top:0;bottom:0;left:0;right:0;padding:0} +.page.userscript .frame.loading{text-align:center;font-size:20px;padding:20px} +.page.userscript .media_result{height:140px;display:-webkit-flex;display:-ms-flexbox;display:flex} +.page.userscript .thumbnail{width:90px} +.page.userscript .options{left:90px;display:-webkit-flex;display:-ms-flexbox;display:flex;-webkit-align-items:center;-ms-flex-align:center;align-items:center;padding:10px} +.page.userscript .options>div{display:-webkit-flex;display:-ms-flexbox;display:flex;-webkit-flex-wrap:wrap;-ms-flex-wrap:wrap;flex-wrap:wrap} +.page.userscript .options>div div{-webkit-flex:1 auto;-ms-flex:1 auto;flex:1 auto;margin:0;padding:0 5px} +.page.userscript .options>div .title{min-width:100%;margin-bottom:20px} +.page.userscript .options>div .add{text-align:right} +.page.userscript .options>div .add a{display:block;text-align:center} +.page.userscript .options>div select{width:100%} +.page.userscript .message{font-size:1.5em} +.page.userscript .data,.page.userscript .year{display:none} +.group_userscript.group_userscript{display:block} +.empty_wanted .group_userscript.group_userscript{padding:20px 0} +.wgroup_automation .group_userscript.group_userscript{padding:10px 0;margin-left:0} +.group_userscript.group_userscript h2{margin:0 0 10px} +.group_userscript.group_userscript .userscript{margin-left:20px} +@media (max-width:768px){.group_userscript.group_userscript .userscript{margin-left:10px} +} +.wgroup_automation .group_userscript.group_userscript .userscript{margin-left:0} +.group_userscript.group_userscript .bookmarklet span{margin-left:10px;display:inline-block} +.group_userscript.group_userscript img{clear:both;margin:20px;width:100%;max-width:600px} +@media (max-width:768px){.group_userscript.group_userscript img{margin:10px} +} +.wgroup_automation .group_userscript.group_userscript img{margin-left:0} +.page.wizard{top:0!important} +.page.wizard .navigation.navigation{display:none} +.page.wizard .tab_content.tab_content{display:block} +.page.wizard .tab_content.tab_content fieldset .ctrlHolder,.page.wizard .tab_content.tab_content fieldset h2{padding:5px} +.page.wizard h1{padding:10px 0;display:block;font-size:30px;margin:80px 5px 0;font-weight:300} +.page.wizard .description{padding:10px 5px;font-size:1.45em;line-height:1.4em;display:block} +.page.wizard form.uniForm.containers{margin:0} +.page.wizard form>div{min-height:300px;max-width:1024px;padding:20px;margin:0 auto} +@media (max-width:485px){.page.wizard form>div{padding:10px} +} +.page.wizard .button.green{padding:20px;font-size:25px;margin:10px 0 80px;display:inline-block} +.page.wizard .tab_nzb_providers{margin:20px 0 0} +.api_docs h1{font-size:25px;padding:20px 40px} +.api_docs pre{background:#eee;font-family:monospace;margin:0;padding:10px;width:100%;display:block;font-size:12px} +.api_docs body{display:block;overflow:auto;background-color:#FFF} +.api_docs .api,.api_docs .missing{overflow:hidden;border-bottom:1px solid #eee;padding:40px} +.api_docs .api:hover{color:#000} +.api_docs .api .description{color:#333;padding:0 0 5px} +.api_docs .api .params{background:#fafafa;width:100%} +.api_docs .api .params h3{clear:both;float:left;width:100px} +.api_docs .api .params td,.api_docs .api .params th{padding:3px 5px;border-bottom:1px solid #eee} +.api_docs .api .params tr:last-child td,.api_docs .api .params tr:last-child th{border:0} +.api_docs .api .params .param{vertical-align:top} +.api_docs .api .params .param th{text-align:left;width:100px} +.api_docs .api .params .param .type{font-style:italic;margin-right:10px;width:100px;color:#666} +.api_docs .api .params .return{float:left;width:700px} +.api_docs .database{padding:20px;margin:0} +.api_docs .database *{margin:0;padding:0} +.api_docs .database .nav li{display:inline-block} +.api_docs .database .nav li a{padding:5px} +.api_docs .database table{font-size:11px} +.api_docs .database table th{text-align:left} +.api_docs .database table tr:hover{position:relative;z-index:20} +.api_docs .database table td{vertical-align:top;position:relative} +.api_docs .database table .id{width:100px} +.api_docs .database table ._rev,.api_docs .database table ._t{width:60px} +.api_docs .database table .form,.api_docs .database table form{width:600px} +.api_docs .database table textarea{font-size:12px;width:100%;height:200px} +.api_docs .database table input[type=submit]{display:block} +.page.login{background:#FFF;display:-webkit-flex;display:-ms-flexbox;display:flex;-webkit-justify-content:center;-ms-flex-pack:center;justify-content:center;-webkit-align-items:center;-ms-flex-align:center;align-items:center;font-size:1.25em} +.dark .page.login{background:#2d2d2d} +.page.login h1{padding:0 0 10px;font-size:60px;font-family:Lobster;font-weight:400;color:#ac0000;text-align:center} +.dark .page.login h1{color:#f85c22} +.page.login form{padding:0;width:300px} +.page.login .ctrlHolder{padding:0;margin:0 0 20px} +.page.login .ctrlHolder:hover{background:0 0} +.page.login input[type=password],.page.login input[type=text]{width:100%!important} +.page.login .remember_me{font-size:15px;float:left;width:150px} +.page.login .button{float:right;margin:0;transition:none} +@font-face{font-family:icons;src:url(../fonts/icons.eot?3);src:url(../fonts/icons.eot?3#iefix) format("embedded-opentype"),url(../fonts/icons.woff?3) format("woff"),url(../fonts/icons.ttf?3) format("truetype"),url(../fonts/icons.svg?3#icons) format("svg");font-weight:400;font-style:normal} +[class*=" icon-"]:before,[class^=icon-]:before{font-family:icons;font-style:normal;font-weight:400;speak:none;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale} +.icon-left-arrow:before{content:'\e800'} +.icon-settings:before{content:'\e801'} +.icon-search:before{content:'\e802'} +.icon-dots:before{content:'\e803'} +.icon-filter:before{content:'\e804'} +.icon-thumb:before{content:'\e805'} +.icon-list:before{content:'\e806'} +.icon-notifications:before{content:'\e807'} +.icon-emo-cry:before{content:'\e808'} +.icon-emo-coffee:before{content:'\e809'} +.icon-emo-sunglasses:before{content:'\e80a'} +.icon-info:before{content:'\e80b'} +.icon-download:before{content:'\e80c'} +.icon-delete:before{content:'\e80d'} +.icon-cancel:before{content:'\e80e'} +.icon-redo:before{content:'\e80f'} +.icon-ok:before{content:'\e810'} +.icon-dropdown:before{content:'\e811'} +.icon-play:before{content:'\e812'} +.icon-plus:before{content:'\e813'} +.icon-eye:before{content:'\e814'} +.icon-error:before{content:'\e815'} +.icon-refresh:before{content:'\e816'} +.icon-home:before{content:'\e817'} +.icon-movie:before{content:'\e818'} +.icon-handle:before,.icon-menu:before{content:'\e819'} +.icon-star:before{content:'\e81a'} +.icon-star-empty:before{content:'\e81b'} +.icon-star-half:before{content:'\e81c'} +.icon-donate:before{content:'\e81d'} +@font-face{font-family:OpenSans;src:url(../fonts/OpenSans-Light-webfont.eot);src:url(../fonts/OpenSans-Light-webfont.eot?#iefix) format("embedded-opentype"),url(../fonts/OpenSans-Light-webfont.woff) format("woff"),url(../fonts/OpenSans-Light-webfont.ttf) format("truetype"),url(../fonts/OpenSans-Light-webfont.svg#OpenSansRegular) format("svg");font-weight:200;font-style:normal} +@font-face{font-family:OpenSans;src:url(../fonts/OpenSans-Regular-webfont.eot);src:url(../fonts/OpenSans-Regular-webfont.eot?#iefix) format("embedded-opentype"),url(../fonts/OpenSans-Regular-webfont.woff) format("woff"),url(../fonts/OpenSans-Regular-webfont.ttf) format("truetype"),url(../fonts/OpenSans-Regular-webfont.svg#OpenSansRegular) format("svg");font-weight:400;font-style:normal} +@font-face{font-family:OpenSans;src:url(../fonts/OpenSans-Italic-webfont.eot);src:url(../fonts/OpenSans-Italic-webfont.eot?#iefix) format("embedded-opentype"),url(../fonts/OpenSans-Italic-webfont.woff) format("woff"),url(../fonts/OpenSans-Italic-webfont.ttf) format("truetype"),url(../fonts/OpenSans-Italic-webfont.svg#OpenSansItalic) format("svg");font-weight:400;font-style:italic} +@font-face{font-family:OpenSans;src:url(../fonts/OpenSans-Bold-webfont.eot);src:url(../fonts/OpenSans-Bold-webfont.eot?#iefix) format("embedded-opentype"),url(../fonts/OpenSans-Bold-webfont.woff) format("woff"),url(../fonts/OpenSans-Bold-webfont.ttf) format("truetype"),url(../fonts/OpenSans-Bold-webfont.svg#OpenSansBold) format("svg");font-weight:700;font-style:normal} +@font-face{font-family:OpenSans;src:url(../fonts/OpenSans-BoldItalic-webfont.eot);src:url(../fonts/OpenSans-BoldItalic-webfont.eot?#iefix) format("embedded-opentype"),url(../fonts/OpenSans-BoldItalic-webfont.woff) format("woff"),url(../fonts/OpenSans-BoldItalic-webfont.ttf) format("truetype"),url(../fonts/OpenSans-BoldItalic-webfont.svg#OpenSansBoldItalic) format("svg");font-weight:700;font-style:italic} +@font-face{font-family:Lobster;src:url(../fonts/Lobster-webfont.eot);src:url(../fonts/Lobster-webfont.eot?#iefix) format("embedded-opentype"),url(../fonts/Lobster-webfont.woff2) format("woff2"),url(../fonts/Lobster-webfont.woff) format("woff"),url(../fonts/Lobster-webfont.ttf) format("truetype"),url(../fonts/Lobster-webfont.svg#lobster_14regular) format("svg");font-weight:400;font-style:normal} +*{margin:0;padding:0;box-sizing:border-box;text-rendering:optimizeSpeed;-webkit-backface-visibility:hidden;backface-visibility:hidden} +body,html{font-size:14px;line-height:1.5;font-family:OpenSans,"Helvetica Neue",Helvetica,Arial,Geneva,sans-serif;font-weight:300;height:100%;margin:0;padding:0;background:#111;color:#000;overflow:hidden} +.dark body,.dark html{background:#111;color:#FFF} +a{position:relative;overflow:hidden;text-decoration:none;cursor:pointer;-webkit-tap-highlight-color:transparent} +a:active,a:visited{color:inherit} +input,select,textarea{font-size:1em;font-weight:300;padding:6.67px;border-radius:0;border:1px solid #b8b8b8;background:#ebebeb;color:#000} +.dark input,.dark select,.dark textarea{border-color:#1b1b1b;background:#353535;color:#FFF} +input[type=text],textarea{-webkit-appearance:none} +.button{color:#ac0000;font-weight:300;padding:5px;cursor:pointer;border:1px solid #ac0000;border-radius:3px;margin:0 5px;transition:all 150ms} +.dark .button{color:#f85c22;border-color:#f85c22} +.button:hover{background:#ac0000;color:#FFF} +.dark .button:hover{background:#f85c22} +.ripple{position:absolute;height:10px;width:10px;border-radius:50%;background:#ac0000;will-change:transform,opacity;-webkit-transform:translate(-50%,-50%) scale(1) rotateZ(360deg);transform:translate(-50%,-50%) scale(1) rotateZ(360deg);opacity:.2;transition:all 1.5s ease;transition-property:opacity,-webkit-transform;transition-property:opacity,transform} +.dark .ripple{background:#f85c22} +.ripple.animate{-webkit-transform:translate(-50%,-50%) scale(100) rotateZ(360deg);transform:translate(-50%,-50%) scale(100) rotateZ(360deg);opacity:0} +.header{width:132px;position:relative;z-index:100;height:100%} +@media (max-width:485px){.header{width:44px;z-index:21} +} +.header a{color:#FFF;letter-spacing:1px} +.header .ripple{background:#FFF} +.header .navigation .logo{background:#ac0000;display:block;text-align:center;position:relative;overflow:hidden;font-family:Lobster,serif;color:#FFF;font-size:38px;line-height:80px;height:80px} +.dark .header .navigation .logo{background:#f85c22} +.dark .header .donate:hover,.dark .header .navigation ul li a:hover,.header .donate:hover,.header .navigation ul li a:hover{background:#303030} +.header .navigation .logo span{position:absolute;display:block;height:100%;width:100%;text-align:center;left:0} +.header .navigation .logo span:nth-child(even){-webkit-transform:translateX(100%);transform:translateX(100%)} +@media (max-width:485px){.header .navigation .logo{font-size:28px;line-height:44px;height:44px} +.header .navigation .logo:after{content:'CP'} +.header .navigation .logo span{display:none} +.header .navigation ul li{line-height:0} +} +.header .navigation ul{padding:0;margin:0} +.header .navigation ul li{display:block} +.header .navigation ul li a{padding:10px 20px;display:block;position:relative} +.header .navigation ul li a:before{position:absolute;width:100%;display:none;text-align:center;font-size:18px;text-indent:0} +@media (max-width:485px){.header .navigation ul li a{line-height:24px;height:44px;padding:10px 0;text-align:center} +.header .navigation ul li a span{display:none} +.header .navigation ul li a:before{display:block} +} +.header .navigation ul li a.icon-home:before{font-size:24px} +.header .donate{position:absolute;bottom:44px;left:0;right:0;padding:10px 20px;transition:background .2s} +.header .donate:before{display:none;font-size:20px;text-align:center} +@media (max-width:485px){.header .donate{bottom:132px;padding:10px 0} +.header .donate span{display:none} +.header .donate:before{display:block} +} +.header .menu,.header .notification_menu,.header .search_form{position:absolute;z-index:21;bottom:0;width:44px;height:44px} +.header .menu .wrapper,.header .notification_menu .wrapper,.header .search_form .wrapper{min-width:170px;-webkit-transform-origin:0 90%;transform-origin:0 90%} +.header .menu>a,.header .notification_menu>a,.header .search_form>a{display:inline-block;height:100%;width:100%;text-align:center;line-height:44px;font-size:20px} +.header .notification_menu{left:50%;-webkit-transform:translateX(-50%);transform:translateX(-50%)} +.header .notification_menu .button:before{font-size:20px;top:-2px} +.header .notification_menu .badge{position:absolute;color:#FFF;top:5px;right:0;background:#ac0000;border-radius:50%;width:18px;height:18px;line-height:16px;text-align:center;font-size:10px;font-weight:lighter;z-index:2;pointer-events:none} +.dark .header .notification_menu .badge{background:#f85c22} +.header .notification_menu .wrapper{width:320px} +@media (max-width:485px){.header .notification_menu{bottom:44px} +.header .notification_menu .wrapper{width:250px} +} +.header .notification_menu ul{min-height:60px;max-height:300px;overflow-y:auto!important} +.header .notification_menu ul:empty:after{content:'No notifications (yet)';width:100%;position:absolute;line-height:60px;font-size:15px;font-style:italic;opacity:.4;left:40px} +.header .notification_menu ul:empty:before{content:'\e808';font-family:icons;height:100%;line-height:60px;margin-left:20px;text-align:center;opacity:.4} +.header .notification_menu ul li{padding:20px;word-wrap:break-word} +.header .notification_menu ul li .added{font-weight:lighter;font-size:11px;display:block;text-align:right} +.header .menu{left:0} +.header .menu .button:before{font-size:20px;top:-2px} +.header .search_form{right:0} +@media (max-width:485px){.header .menu{bottom:88px;left:50%;-webkit-transform:translateX(-50%);transform:translateX(-50%)} +.header .search_form{right:auto;left:50%;-webkit-transform:translateX(-50%);transform:translateX(-50%)} +} +.header .more_menu .wrapper{bottom:0;left:44px;right:auto;padding-left:4px} +.dark .header .more_menu a:hover,.header .more_menu a:hover{background:#303030} +.corner_background{display:block;position:absolute;height:10px;width:10px;background:#ac0000;top:0;left:132px} +.dark .corner_background{background:#f85c22} +@media (max-width:485px){.corner_background{left:44px} +} +.content{position:absolute;height:100%;top:0;left:132px;right:0;display:-webkit-flex;display:-ms-flexbox;display:flex;-webkit-flex-flow:column nowrap;-ms-flex-flow:column nowrap;flex-flow:column nowrap;background:#FFF;border-radius:3px 0 0 3px} +@media (max-width:485px){.content{left:44px} +} +.dark .content{background:#2d2d2d} +.content h1,.content h2,.content h3{padding:0;margin:0} +.content .pages{-webkit-flex:1 auto;-ms-flex:1 auto;flex:1 auto;width:100%;position:relative} +.content .footer{width:100%} +.page{position:absolute;top:0;left:0;right:0;bottom:0;display:none} +.page.active{display:block} +.page>.scroll_content{position:relative;height:100%;overflow:hidden;overflow-y:auto;-webkit-overflow-scrolling:touch} +.page.home .scroll_content{padding:0 0 20px} +.page h1,.page h2,.page h3,.page h4{font-weight:300} +.page h2{font-size:24px;padding:20px} +.page .navigation{z-index:2;display:-webkit-flex;display:-ms-flexbox;display:flex;-webkit-align-items:center;-ms-flex-align:center;align-items:center;position:fixed;top:0;height:80px;left:152px;right:20px;background:#FFF;border-radius:3px 0 0} +.more_menu .button,.more_menu a,.page .navigation ul li{display:inline-block} +.more_menu,.more_menu .button:before{position:relative} +.dark .page .navigation{background:#2d2d2d} +@media (max-width:485px){.page h2{font-size:18px} +.page .navigation{height:44px;left:54px;right:10px} +} +.page .navigation ul{-webkit-flex:1 auto;-ms-flex:1 auto;flex:1 auto;list-style:none} +.page .navigation ul li a{display:inline-block;font-size:24px;line-height:80px;padding:0 20px;color:#000;opacity:.5;vertical-align:bottom} +.page .navigation ul .active a,.page .navigation ul li a:hover,.question.show{opacity:1} +.dark .page .navigation ul li a{color:#FFF} +.page .navigation>ul>li:first-child{margin-left:-20px} +@media (max-width:485px){.page .navigation ul li a{font-size:18px;line-height:44px;padding:0 10px} +.page .navigation>ul>li:first-child{margin-left:-10px} +} +.page .navigation h2{padding:20px 20px 20px 0} +.level_1{z-index:10} +.level_2{z-index:20} +.level_3{z-index:30} +.level_4{z-index:40} +.more_menu{line-height:1em} +.more_menu a{float:left} +.more_menu a:hover{background:#ebebeb} +.dark .more_menu a:hover{background:#353535} +.more_menu .button{font-size:24px;cursor:pointer} +.more_menu .wrapper{display:none;position:absolute;right:0;background:#ac0000;z-index:5000;box-shadow:0 0 15px 2px rgba(0,0,0,.15);border-radius:3px 0 0 3px;-webkit-transform-origin:80% 0;transform-origin:80% 0} +.dark .more_menu .wrapper{background:#f85c22;box-shadow:0 5px 15px 2px rgba(0,0,0,.4)} +.more_menu .wrapper:before{-webkit-transform:rotate(45deg) translateY(-60%);transform:rotate(45deg) translateY(-60%);content:'';display:block;position:absolute;background:#ac0000;height:10px;width:10px;left:-9px;bottom:11px;z-index:1;opacity:1;border-radius:3px} +.dark .more_menu .wrapper:before{background:#f85c22} +.more_menu .wrapper ul{background:#FFF;position:relative;z-index:2;overflow:hidden;border-radius:3px 0 0 3px} +.dark .more_menu .wrapper ul{background:#2d2d2d} +.more_menu .wrapper ul li{display:block;line-height:1em;border-top:1px solid transparent;border-color:#ebebeb} +.dark .more_menu .wrapper ul li{border-color:#353535} +.more_menu .wrapper ul li:first-child{border-top:0} +.more_menu .wrapper ul li a{display:block;color:#000;padding:5px 10px;font-size:1em;line-height:22px;width:100%} +.dark .more_menu .wrapper ul li a{color:#FFF} +.more_menu .wrapper ul li a:hover{background:#ebebeb} +.dark .more_menu .wrapper ul li a:hover{background:#353535} +.more_menu .wrapper ul li:first-child a{padding-top:10px} +.more_menu .wrapper ul li:last-child a{padding-bottom:10px} +.messages{position:fixed;right:0;bottom:0;width:320px;z-index:2000;overflow:hidden;font-size:14px;font-weight:700;padding:5px} +.messages .message{overflow:hidden;transition:all .6s cubic-bezier(.9,0,.1,1);width:100%;position:relative;max-height:0;font-size:1.1em;font-weight:400;-webkit-transform:scale(0);transform:scale(0);-webkit-transform-origin:100% 50%;transform-origin:100% 50%;background:#ac0000;margin-bottom:4px;border-radius:3px;box-shadow:0 0 15px 2px rgba(0,0,0,.15)} +.mask,.messages .close{position:absolute;top:0;right:0} +.dark .messages .message{background:#f85c22;box-shadow:0 5px 15px 2px rgba(0,0,0,.4)} +.messages .message .inner{padding:15px 30px 15px 20px;background:#FFF;margin-bottom:4px;border-radius:3px} +.dark .messages .message .inner{background:#2d2d2d} +.messages .message.sticky .inner{background-color:#ac0000} +.dark .messages .message.sticky .inner{background-color:#f85c22} +.messages .message.sticky .icon-cancel{position:absolute;display:block;height:30px;width:30px;line-height:30px;top:0;right:0;text-align:center} +.question,.table .item{display:-webkit-flex;display:-ms-flexbox} +.messages .message.show{max-height:130px;-webkit-transform:scale(1);transform:scale(1)} +.messages .message.hide{max-height:0;padding:0 20px;margin:0;-webkit-transform:scale(0);transform:scale(0)} +.messages .close{padding:10px 8px;color:#FFF} +.question{position:fixed;z-index:20000;color:#FFF;padding:20px;display:flex;-webkit-align-items:center;-ms-flex-align:center;align-items:center;-webkit-justify-content:center;-ms-flex-pack:center;justify-content:center} +.question .inner{width:100%;max-width:500px} +.question h3{display:block;margin-bottom:20px;font-size:1.4em;font-weight:lighter} +.question .hint{margin:-20px 0 20px} +.question a{border-color:#FFF;color:#FFF;transition:none} +.question a:hover{background:#ac0000;color:#FFF} +.dark .question a:hover{background:#f85c22} +.mask{background:rgba(0,0,0,.8);z-index:1000;text-align:center;bottom:0;left:0;opacity:0;transition:opacity .5s} +.mask .message,.mask .spinner{position:absolute;top:50%;left:50%} +.mask .message{color:#FFF;text-align:center;width:320px;margin:-49px 0 0 -160px;font-size:16px} +.mask .message h1{font-size:1.5em} +.mask .spinner{width:22px;height:22px;display:block;background:#fff;margin-top:-11px;margin-left:-11px;outline:transparent solid 1px;-webkit-animation:rotating 2.5s cubic-bezier(.9,0,.1,1) infinite normal;animation:rotating 2.5s cubic-bezier(.9,0,.1,1) infinite normal;-webkit-transform:scale(0);transform:scale(0)} +.mask.with_message .spinner{margin-top:-88px} +.mask.show{pointer-events:auto;opacity:1} +.mask.show .spinner{-webkit-transform:scale(1);transform:scale(1)} +.mask.hide{opacity:0} +.mask.hide .spinner{-webkit-transform:scale(0);transform:scale(0)} +@-webkit-keyframes rotating{0%{-webkit-transform:rotate(0) scale(1.6);transform:rotate(0) scale(1.6);border-radius:1px} +48%{-webkit-transform:rotate(360deg) scale(1);transform:rotate(360deg) scale(1);border-radius:50%} +100%{-webkit-transform:rotate(720deg) scale(1.6);transform:rotate(720deg) scale(1.6);border-radius:1px} +} +@keyframes rotating{0%{-webkit-transform:rotate(0) scale(1.6);transform:rotate(0) scale(1.6);border-radius:1px} +48%{-webkit-transform:rotate(360deg) scale(1);transform:rotate(360deg) scale(1);border-radius:50%} +100%{-webkit-transform:rotate(720deg) scale(1.6);transform:rotate(720deg) scale(1.6);border-radius:1px} +} +.table .head{font-weight:700} +.table .item{display:flex;border-bottom:1px solid rgba(0,0,0,.2)} +.dark .table .item{border-color:rgba(255,255,255,.2)} +.table .item:last-child{border-bottom:none} +.table .item span{padding:1px 2px} +.table .item span:first-child{padding-left:0} +.table .item span:last-child{padding-right:0} +.page.settings{padding-top:80px} +.page.settings.active .scroll_content{display:-webkit-flex;display:-ms-flexbox;display:flex} +@media (max-width:485px){.page.settings{padding-top:44px} +.page.settings.active .scroll_content{display:block} +} +.page.settings .navigation{display:-webkit-flex;display:-ms-flexbox;display:flex;-webkit-justify-content:space-between;-ms-flex-pack:justify;justify-content:space-between} +.page.settings .navigation .advanced_toggle{display:-webkit-flex;display:-ms-flexbox;display:flex;-webkit-align-items:center;-ms-flex-align:center;align-items:center} +.page.settings .navigation .advanced_toggle span{margin-right:10px} +.page.settings .tab_content{display:none} +.page.settings .tab_content.active{display:block} +.page.settings .tabs{margin:0 20px 20px;list-style:none;font-size:24px} +@media (max-width:485px){.page.settings .tabs{margin:0 10px 20px} +} +.page.settings .tabs ul{list-style:none;font-size:14px} +.page.settings .tabs li a{color:rgba(0,0,0,.5)} +.dark .page.settings .tabs li a{color:rgba(255,255,255,.5)} +.page.settings .tabs li.active a{color:#000} +.dark .page.settings .tabs li.active a{color:#FFF} +.page.settings form.containers{margin:0 20px 0 0;-webkit-flex:1;-ms-flex:1;flex:1} +@media (max-width:485px){.page.settings form.containers{margin:0 10px 0 0} +} +.page.settings fieldset h2 .group_label,.page.settings fieldset h2 .icon{margin-right:10px} +.page.settings fieldset{border:0;padding:10px 0;position:relative} +.page.settings fieldset h2{display:-webkit-flex;display:-ms-flexbox;display:flex;-webkit-flex-flow:row wrap;-ms-flex-flow:row wrap;flex-flow:row wrap;-webkit-align-items:baseline;-ms-flex-align:baseline;align-items:baseline;padding:0 0 0 20px} +.page.settings fieldset h2 .icon img{vertical-align:middle;position:relative;top:-1px} +.page.settings fieldset h2 .hint{-webkit-flex:1;-ms-flex:1;flex:1;font-size:1rem} +@media (max-width:485px){.page.settings fieldset h2{display:block;padding:0 0 0 10px} +.page.settings fieldset h2 .hint{margin:0;display:block} +} +.page.settings fieldset h2 .hint a{font-weight:400;color:#ac0000;text-decoration:underline;display:inline} +.dark .page.settings fieldset h2 .hint a{color:#f85c22} +.page.settings fieldset .more_hint{position:relative} +.page.settings fieldset .more_hint .tooltip{display:inline;padding:10px} +.page.settings fieldset .more_hint .tooltip .icon-info{vertical-align:middle;display:inline-block;text-align:center;border:1px solid #ac0000;border-radius:50%;width:18px;height:18px;line-height:16px;font-size:.8em;text-decoration:none} +.dark .page.settings fieldset .more_hint .tooltip .icon-info{border-color:#f85c22} +.page.settings fieldset .more_hint .tooltip .tip{bottom:100%;left:0;right:0;position:absolute;background:#ebebeb;z-index:20;display:none;padding:10px;margin-left:-10px} +.dark .page.settings fieldset .more_hint .tooltip .tip{background:#353535} +.page.settings fieldset .more_hint .tooltip:hover .tip{display:block} +.page.settings fieldset .ctrlHolder{padding:6.67px 20px;border-bottom:1px solid transparent;border-color:#ebebeb;display:-webkit-flex;display:-ms-flexbox;display:flex;-webkit-flex-flow:row nowrap;-ms-flex-flow:row nowrap;flex-flow:row nowrap;-webkit-align-items:center;-ms-flex-align:center;align-items:center} +.dark .page.settings fieldset .ctrlHolder{border-color:#353535} +.page.settings fieldset .ctrlHolder:last-child{border-bottom:0} +.page.settings fieldset .ctrlHolder:nth-child(2){margin-top:10px} +.page.settings fieldset .ctrlHolder label{display:inline-block;min-width:150px} +.page.settings fieldset .ctrlHolder input,.page.settings fieldset .ctrlHolder select,.page.settings fieldset .ctrlHolder textarea{min-width:200px} +.page.settings fieldset .ctrlHolder input[type=checkbox]{width:auto;min-width:0} +@media (max-width:485px){.page.settings fieldset .ctrlHolder{-webkit-flex-flow:row wrap;-ms-flex-flow:row wrap;flex-flow:row wrap;padding:6.67px 0 6.67px 10px} +.page.settings fieldset .ctrlHolder input,.page.settings fieldset .ctrlHolder label,.page.settings fieldset .ctrlHolder select,.page.settings fieldset .ctrlHolder textarea{-webkit-flex:1 1 auto;-ms-flex:1 1 auto;flex:1 1 auto} +.page.settings fieldset .ctrlHolder input[type=checkbox]{margin-right:20px;-webkit-flex:none;-ms-flex:none;flex:none} +.page.settings fieldset .ctrlHolder .select_wrapper{width:100%} +} +.page.settings fieldset .ctrlHolder .select_wrapper{position:relative;display:-webkit-flex;display:-ms-flexbox;display:flex;-webkit-align-items:center;-ms-flex-align:center;align-items:center} +.page.settings fieldset .ctrlHolder .select_wrapper select{cursor:pointer;-webkit-appearance:none;-moz-appearance:none;appearance:none;width:100%;min-width:200px;border-radius:0} +.page.settings fieldset .ctrlHolder .select_wrapper select::-ms-expand{display:none} +.page.settings fieldset .ctrlHolder .select_wrapper:before{vertical-align:top;pointer-events:none;position:absolute;top:0;line-height:2em;right:10px;height:100%} +.page.settings fieldset .ctrlHolder .formHint{-webkit-flex:1;-ms-flex:1;flex:1;opacity:.8;margin-left:20px} +.page.settings fieldset .ctrlHolder .formHint a{font-weight:400;color:#ac0000;text-decoration:underline} +.dark .page.settings fieldset .ctrlHolder .formHint a{color:#f85c22} +@media (max-width:485px){.page.settings fieldset .ctrlHolder .formHint{min-width:100%;margin-left:0} +} +.page.settings fieldset .ctrlHolder.test_button a{margin:0} +.page.settings fieldset .ctrlHolder.test_button .success{margin-left:10px} +.page.settings fieldset .ctrlHolder.read_only{opacity:.5} +.page.settings fieldset .ctrlHolder.read_only label{position:relative} +.page.settings fieldset .ctrlHolder.read_only label:after{left:0;bottom:-10px;position:absolute;content:'(read-only)';font-size:.7em} +.page.settings fieldset.disabled .ctrlHolder{display:none} +.page.settings fieldset.disabled>.ctrlHolder:first-child{display:-webkit-flex;display:-ms-flexbox;display:flex} +.page.settings fieldset.enabler{display:block} +.page.settings fieldset.enabler.disabled:hover{background:rgba(255,255,255,.1)} +.dark .page.settings fieldset.enabler.disabled:hover{background:rgba(0,0,0,.1)} +.page.settings fieldset.enabler>:first-child{position:absolute;right:0;border:0;padding:0;z-index:10} +.page.settings fieldset.enabler>:first-child~h2{margin-right:86px} +@media (max-width:485px){.page.settings fieldset.enabler>:first-child~h2{margin-right:0} +} +.page.settings fieldset.enabler>:nth-child(2){margin-top:0} +.page.settings fieldset.enabler>:nth-child(3){margin-top:10px} +.page.settings fieldset .ctrlHolder.advanced,.page.settings fieldset.advanced{display:none;color:#ac0000} +.dark .page.settings fieldset .ctrlHolder.advanced,.dark .page.settings fieldset.advanced{color:#f85c22} +.page.settings.show_advanced fieldset.advanced{display:block} +.page.settings.show_advanced fieldset:not(.disabled)>.ctrlHolder.advanced{display:-webkit-flex;display:-ms-flexbox;display:flex} +.page.settings .switch{display:inline-block;background:#ac0000;height:20px;width:50px;min-width:0!important;transition:all 250ms;cursor:pointer;border-radius:20px} +.dark .page.settings .switch{background:#f85c22} +.page.settings .switch input{display:none} +.page.settings .switch .toggle{background:#FFF;margin:1px;height:18px;width:18px;transition:-webkit-transform 250ms;transition:transform 250ms;-webkit-transform:translateX(30px);transform:translateX(30px);border-radius:20px} +.dark .page.settings .switch .toggle{background:#2d2d2d} +.page.settings fieldset.enabler.disabled .switch,.page.settings:not(.show_advanced) .advanced_toggle .switch{background:#ebebeb;border-color:#ebebeb} +.dark .page.settings fieldset.enabler.disabled .switch,.dark .page.settings:not(.show_advanced) .advanced_toggle .switch{background:#4e4e4e;border-color:#4e4e4e} +.page.settings fieldset.enabler.disabled .switch .toggle,.page.settings:not(.show_advanced) .advanced_toggle .switch .toggle{-webkit-transform:translateX(0);transform:translateX(0)} +.page.settings fieldset.enabler.disabled .switch:hover,.page.settings:not(.show_advanced) .advanced_toggle .switch:hover{background:#b8b8b8;border-color:#b8b8b8} +.dark .page.settings fieldset.enabler.disabled .switch:hover,.dark .page.settings:not(.show_advanced) .advanced_toggle .switch:hover{background:#020202;border-color:#020202} +.page.settings .option_list{background:#FFF;margin-top:10px} +.dark .page.settings .option_list{background:#2d2d2d} +.page.settings .option_list fieldset{position:relative} +.page.settings .option_list fieldset h2 .group_label{min-width:100px} +.page.settings .option_list fieldset.disabled h2{padding:0 20px} +@media (max-width:485px){.page.settings .option_list fieldset.disabled h2{padding:0 10px} +} +.page.settings .option_list fieldset:after{position:absolute;content:'';display:block;width:100%;border-bottom:1px solid transparent;border-color:#ebebeb;bottom:0} +.dark .page.settings .option_list fieldset:after{border-color:#353535} +.page.settings .option_list fieldset:after:last-child{border-bottom:0} +.page.settings .option_list h2{font-size:1em;font-weight:400} +.page.settings .option_list h2 .hint{font-weight:300} +.page.settings .combined_table{margin-top:20px} +.page.settings .combined_table .head{margin:0 10px 0 46px;font-size:.8em} +.page.settings .combined_table .head abbr{display:inline-block;font-weight:700;border-bottom:1px dotted #fff;line-height:140%;cursor:help;margin-right:10px;text-align:center} +.page.settings .combined_table .head abbr:first-child{display:none} +.page.settings .combined_table input{min-width:0!important;display:inline-block;margin-right:10px} +.page.settings .combined_table .automation_ids,.page.settings .combined_table .automation_urls,.page.settings .combined_table .host{width:200px} +.page.settings .combined_table .api_key,.page.settings .combined_table .name,.page.settings .combined_table .pass_key{width:150px} +.page.settings .combined_table .extra_score,.page.settings .combined_table .seed_ratio,.page.settings .combined_table .seed_time{width:70px;text-align:center} +.page.settings .combined_table .custom_tag{width:120px;text-align:center} +.page.settings .combined_table .ctrlHolder{margin:0 0 0 20px;padding-left:0} +.page.settings .combined_table .ctrlHolder .delete{display:none;font-size:20px;width:22px;height:22px;line-height:20px;text-align:center;vertical-align:middle} +.page.settings .combined_table .ctrlHolder:hover .delete{display:inline-block} +.page.settings .disabled .combined_table,.page.settings .multi_directory.is_empty .delete{display:none} +.page.settings .combined_table .ctrlHolder.is_empty .delete,.page.settings .combined_table .ctrlHolder.is_empty input[type=checkbox]{visibility:hidden} +.page.settings .tab_about .usenet{padding:20px 20px 0;font-size:1.5em;line-height:1.3em} +@media (max-width:485px){.page.settings .tab_about .usenet{padding:10px!important;font-size:1em;line-height:1.5em} +} +.page.settings .tab_about .usenet a{color:#ac0000;padding:0 5px} +.dark .page.settings .tab_about .usenet a{color:#f85c22} +.page.settings .tab_about .usenet ul{list-style:none;float:left;width:50%;margin:10px 0;padding:0} +@media (max-width:485px){.page.settings .tab_about .usenet ul{float:none;width:auto;margin:0} +} +.page.settings .tab_about .usenet li{font-size:.8em} +.page.settings .tab_about .usenet li:before{margin-right:10px} +.page.settings .tab_about .donate{float:left;width:42%;text-align:center;font-size:17px;padding:0 0 0 4%;margin:20px 0 0;border-left:1px solid rgba(0,0,0,.2);height:150px} +@media (max-width:485px){.page.settings .tab_about .donate{padding:0;float:none;width:auto;margin:0;border:none} +} +.dark .page.settings .tab_about .donate{border-color:rgba(255,255,255,.2)} +.page.settings .tab_about .donate iframe{border:none;width:100%;height:100%} +.page.settings .tab_about .info{padding:20px;margin:0;overflow:hidden} +.page.settings .tab_about .info dt{clear:both;float:left;width:17%;font-weight:700} +@media (max-width:485px){.page.settings .tab_about .info{padding:10px} +.page.settings .tab_about .info dt{float:none;width:auto} +} +.page.settings .tab_about .info dd{float:right;width:80%;padding:0;margin:0;font-style:italic} +@media (max-width:485px){.page.settings .tab_about .info dd{float:none;width:auto;margin-bottom:10px} +.page.settings .directory{width:100%} +} +.page.settings .tab_about .info dd.version{cursor:pointer} +.page.settings .tab_about .group_actions>div{padding:20px;text-align:center} +.page.settings .tab_about .group_actions a{margin:0 10px;font-size:20px} +.page.settings .directory input{width:100%} +.page.settings .multi_directory .delete{color:#ac0000;padding:0 10px;opacity:.6;font-size:1.5em} +.dark .page.settings .multi_directory .delete{color:#f85c22} +.page.settings .multi_directory .delete:hover{opacity:1} +.page.settings .choice .select_wrapper{margin-left:20px;width:120px;min-width:120px} +@media (max-width:485px){.page.settings .choice .select_wrapper{margin:10px 0 0} +} +.page.settings .choice .select_wrapper select{min-width:0!important} +.page.settings .renamer_to.renamer_to{-webkit-flex-flow:row wrap;-ms-flex-flow:row wrap;flex-flow:row wrap} +.page.settings .renamer_to.renamer_to .ctrlHolder{width:100%} +.directory_list{z-index:2;position:absolute;width:450px;margin:28px 0 20px;background:#ac0000;box-shadow:0 0 15px 2px rgba(0,0,0,.15);border-radius:3px 3px 0 0} +.dark .directory_list{background:#f85c22;box-shadow:0 5px 15px 2px rgba(0,0,0,.4)} +.directory_list .pointer{border-right:6px solid transparent;border-left:6px solid transparent;border-bottom:6px solid transparent;border-bottom-color:#ac0000;display:block;position:absolute;width:0;margin:-6px 0 0 100px} +.dark .directory_list .pointer{border-bottom-color:#f85c22} +.directory_list .wrapper{background:#FFF;border-radius:3px 3px 0 0;margin-top:5px} +.dark .directory_list .wrapper{background:#2d2d2d} +.directory_list ul{width:92%;height:300px;overflow:auto;margin:0 20px} +.directory_list li{padding:4px 20px 4px 0;cursor:pointer;margin:0!important;border-top:1px solid rgba(255,255,255,.1);overflow:hidden;white-space:nowrap;text-overflow:ellipsis} +.directory_list li.blur{opacity:.3} +.directory_list li:last-child{border-bottom:1px solid rgba(255,255,255,.1)} +.directory_list li:hover{color:#ac0000} +.dark .directory_list li:hover{color:#f85c22} +.directory_list li.empty{background:0 0;height:100px;text-align:center;font-style:italic;border:none;line-height:100px;cursor:default;color:#BBB;text-shadow:none;font-size:12px} +.directory_list .actions{clear:both;padding:20px;min-height:45px;position:relative;width:100%;text-align:right} +.directory_list .actions label{float:right;width:auto;padding:0} +.directory_list .actions label input{margin-left:10px} +.directory_list .actions .back{font-weight:700;width:160px;display:inline-block;padding:0;line-height:120%;vertical-align:top;position:absolute;text-align:left;left:20px} +.directory_list .actions:last-child{padding:20px} +.directory_list .actions:last-child>span{padding:0 5px;text-shadow:none} +.directory_list .actions:last-child>.clear{left:20px;position:absolute;top:50%;-webkit-transform:translateY(-50%);transform:translateY(-50%);margin:0} +.directory_list .actions:last-child>.cancel{opacity:.7} +.directory_list .actions:last-child>.save{margin-right:0} diff --git a/couchpotato/static/style/login.scss b/couchpotato/static/style/login.scss new file mode 100644 index 0000000000..5f077c1b76 --- /dev/null +++ b/couchpotato/static/style/login.scss @@ -0,0 +1,50 @@ +@import "_mixins"; + +/*** Login ***/ +.page.login { + @include theme(background, background); + display: flex; + justify-content: center; + align-items: center; + font-size: 1.25em; + + h1 { + padding: 0 0 10px; + font-size: 60px; + font-family: Lobster; + font-weight: normal; + @include theme(color, primary); + text-align: center; + } + + form { + padding: 0; + width: 300px; + } + + .ctrlHolder { + padding: 0; + margin: 0 0 20px; + + &:hover { + background: none; + } + } + + input[type=text], + input[type=password] { + width: 100% !important; + } + + .remember_me { + font-size: 15px; + float: left; + width: 150px; + } + + .button { + float: right; + margin: 0; + transition: none; + } +} diff --git a/couchpotato/static/style/main.css b/couchpotato/static/style/main.css deleted file mode 100644 index ff292226f2..0000000000 --- a/couchpotato/static/style/main.css +++ /dev/null @@ -1,622 +0,0 @@ -html { - color: #fff; - font-size: 12px; - line-height: 1.5; - font-family: "Helvetica Neue", Helvetica, Arial, Geneva, sans-serif; - height: 100%; - text-shadow: 0 1px 0 #000; -} - -body { - margin: 0; - padding: 0; - background: #4e5969; - overflow-y: scroll; - height: 100%; -} - body.noscroll { overflow: hidden; } - - #clean { - background: transparent !important; - } - -* { - -webkit-box-sizing: border-box; - -moz-box-sizing: border-box; - box-sizing: border-box; -} - -pre { - white-space: pre-wrap; - word-wrap: break-word; -} - -input, textarea { - font-size: 12px; - font-family: "Helvetica Neue", Helvetica, Arial, Geneva, sans-serif; -} - -input:-moz-placeholder, textarea:-moz-placeholder { - color: rgba(255, 255, 255, 0.6); -} - -::-webkit-input-placeholder, ::-webkit-textarea-placeholder { - color: rgba(255, 255, 255, 0.6); -} - -a img { - border:none; -} - -a { - text-decoration:none; - color: #ebfcbc; - outline: 0; - cursor: pointer; - font-weight: bold; -} -a:hover { color: #f3f3f3; } - -.page { - display: none; - width: 960px; - margin: 0 auto; - line-height: 24px; - padding: 0 0 20px; -} - .page.active { display: block; } - - .page .noticeMe { - background-color: lightgoldenrodyellow; - display: block; - padding: 20px 10px; - margin: 0 -10px 40px; - font-size: 19px; - text-align: center; - } - -.content { - clear:both; - padding: 80px 0 10px; -} - -h2 { - font-size: 30px; - padding: 0; - margin: 20px 0 0 0; -} - -.footer { - text-align:center; - padding: 50px 0 0 0; - color: #999; - font-size: 10px; - clear: both; -} - - .footer .check { - color: #333; - } - -#toTop { - background: black; - position: fixed; - bottom: 0; - right: 0; - padding: 10px 10px 10px 40px; - background: #f7f7f7 url('../images/toTop.gif') no-repeat 10px center; - border-radius: 5px 0 0 0; -} - -form { - padding:0; - margin:0; -} - -body > .spinner, .mask{ - background: rgba(0,0,0, 0.9); - z-index: 100; - text-align: center; -} - body > .mask { - position: fixed; - top: 0; - left: 0; - height: 100%; - width: 100%; - padding: 200px; - } - -.button { - background: #5082bc url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAAyCAYAAACd+7GKAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAClJREFUeNpi/v//vwMTAwPDfzjBgMpFI/7hFSOT9Y8qRuF3JLoHAQIMAHYtMmRA+CugAAAAAElFTkSuQmCC") repeat-x; - padding: 5px 10px 6px; - color: #fff; - text-decoration: none; - font-weight: bold; - line-height: 1; - border-radius: 5px; - box-shadow: 0 1px 3px rgba(0,0,0,0.5); - text-shadow: 0 -1px 1px rgba(0,0,0,0.25); - border-bottom: 1px solid rgba(0,0,0,0.25); - cursor: pointer; -} - .button.red { background-color: #ff0000; } - .button.green { background-color: #2aa300; } - .button.orange { background-color: #ffa200; } - .button.yellow { background-color: #ffe400; } - -/*** Icons ***/ -.icon { - display: inline-block; - background: center no-repeat; -} -.icon.delete { background-image: url('../images/icon.delete.png'); } -.icon.download { background-image: url('../images/icon.download.png'); } -.icon.edit { background-image: url('../images/icon.edit.png'); } -.icon.completed { background-image: url('../images/icon.check.png'); } -.icon.folder { background-image: url('../images/icon.folder.png'); } -.icon.imdb { background-image: url('../images/icon.imdb.png'); } -.icon.refresh { background-image: url('../images/icon.refresh.png'); } -.icon.rating { background-image: url('../images/icon.rating.png'); } -.icon.files { background-image: url('../images/icon.files.png'); } -.icon.info { background-image: url('../images/icon.info.png'); } -.icon.trailer { background-image: url('../images/icon.trailer.png'); } -.icon.spinner { background-image: url('../images/icon.spinner.gif'); } -.icon.attention { background-image: url('../images/icon.attention.png'); } - -/*** Navigation ***/ -.header { - background: #4e5969; - padding: 10px 0; - height: 80px; - position: fixed; - margin: 0; - width: 100%; - z-index: 5; - box-shadow: 0 20px 30px -30px rgba(0,0,0,0.05); - transition: box-shadow .4s cubic-bezier(0.9,0,0.1,1); -} - .header.with_shadow { - box-shadow: 0 20px 30px -30px rgba(0,0,0,0.3); - } - -.header > div { - width: 960px; - margin: 0 auto; - overflow: hidden; -} - .header .navigation { - display: inline-block; - vertical-align: middle; - width: 67.2%; - } - .header .navigation ul { - margin: 0; - padding: 0; - } - .header .navigation li { - color: #fff; - display: inline-block; - font-size:20px; - font-weight: bold; - margin: 0; - text-align: center; - } - - .header .navigation li a { - display: block; - padding: 15px; - position: relative; - } - .header .navigation li:first-child a { padding-left: 10px; } - .header .navigation li span { - display: block; - margin-top: 5px; - } - - .header .navigation li a:after { - content: ''; - display: inline-block; - height: 2px; - width: 76%; - left: 12%; - position: absolute; - top: 46px; - background-color: #46505e; - outline: none; - box-shadow: inset 0 1px 8px rgba(0,0,0,0.05), 0 1px 0px rgba(255,255,255,0.15); - transition: all .4s cubic-bezier(0.9,0,0.1,1); - } - - .header .navigation li:hover a:after { background-color: #047792; } - .header .navigation li.active a:after { background-color: #04bce6; } - - .header .navigation li.disabled { color: #e5e5e5; } - .header .navigation li a { color: #fff; } - - .header .navigation .backtotop { - opacity: 0; - display: block; - width: 80px; - left: 50%; - position: absolute; - text-align: center; - margin: -10px 0 0 -40px; - background: #4e5969; - padding: 5px 0; - border-radius: 0 0 5px 5px; - color: rgba(255,255,255,.4); - text-shadow: none; - font-weight: normal; - } - .header:hover .navigation .backtotop { color: #fff; } - - .header .more_menu { - margin-left: 12px; - } - .header .more_menu .wrapper { - width: 150px; - margin-left: -110px; - } - .header .more_menu .wrapper:before { - margin-left: -34px; - } - - .header .more_menu .red { color: red; } - .header .more_menu .orange { color: orange; } - - .badge { - position: absolute; - width: 14px; - height: 14px; - text-align: center; - line-height: 14px; - border-radius: 50%; - font-size: 8px; - margin: -5px 0 0 15px; - box-shadow: inset 0 1px 0 rgba(255,255,255,.6), 0 0 3px rgba(0,0,0,.7); - background-color: #1b79b8; - text-shadow: none; - background-image: -*-linear-gradient(0deg, rgba(255,255,255,.3) 0%, rgba(255,255,255,.1) 100%); - } - - .header .notification_menu .wrapper { - width: 300px; - margin-left: -260px; - text-align: left; - } - - .header .notification_menu .wrapper:before { - left: 296px; - } - - .header .notification_menu ul { - max-height: 300px; - overflow: auto; - } - - .header .notification_menu > a { - background-position: center -209px; - } - - .header .notification_menu li > span { - padding: 5px; - display: block; - border-bottom: 1px solid rgba(0,0,0,0.2); - word-wrap: break-word; - } - .header .notification_menu li > span { color: #777; } - .header .notification_menu li:last-child > span { border: 0; } - .header .notification_menu li .added { - display: block; - font-size: 10px; - color: #aaa; - text-align: ; - } - - .header .notification_menu li .more { - text-align: center; - } - - .header .message.update { - text-align: center; - position: relative; - top: -70px; - padding: 2px 0; - background: #ff6134; - font-size: 12px; - border-radius: 0 0 5px 5px; - box-shadow: 0 2px 1px rgba(0,0,0, 0.3); - } - - .header .message a { - padding: 0 10px; - } - -/*** Global Styles ***/ -.check { - display: inline-block; - vertical-align: middle; - height: 16px; - width: 16px; - cursor: pointer; - background: url('../images/sprite.png') no-repeat -200px; -} - .check.highlighted { background-color: #424c59; } - .check.checked { background-position: -2px 0; } - .check.indeterminate { background-position: -1px -119px; } - .check input { - display: none !important; - } - -.select { - cursor: pointer; - display: inline-block; - color: #fff; -} - - .select .selection { - display: inline-block; - padding: 0 30px 0 20px; - border-radius:30px; - - box-shadow: 0 1px 1px rgba(0,0,0,0.35), inset 0 1px 0px rgba(255,255,255,0.20); - background: url('../images/sprite.png') no-repeat 94% -53px, -*-linear-gradient( - 270deg, - #5b9bd1 0%, - #406db8 100% - ); - } - - .select .selection .selectionDisplay { - display: inline-block; - padding-right: 15px; - border-right: 1px solid rgba(0,0,0,0.2); - - box-shadow: 1px 0 0 rgba(255,255,255,0.15); - } - - .select .menu { - clear: both; - overflow: hidden; - font-weight: bold; - } - - .select .list { - display: none; - background: #282d34; - border: 1px solid #1f242b; - position: absolute; - margin: 28px 0 0 0; - box-shadow: 0 20px 20px -10px rgba(0,0,0,0.4); - border-radius:3px; - z-index: 3; - } - .select.active .list { - display: block; - } - .select .list ul { - display: block; - width: 100% !important; - } - .select .list li { - padding: 0 33px 0 20px; - margin: 0 !important; - display: block; - border-top: 1px solid rgba(255,255,255,0.1); - white-space: nowrap; - } - .select .list li.highlighted { - background: rgba(255,255,255,0.1); - border-color: transparent; - } - - .select input { display: none; } - -.inlay { - color: #fff; - border: 0; - border-radius:3px; - background-color: #282d34; - box-shadow: inset 0 1px 8px rgba(0,0,0,0.25), 0 1px 0px rgba(255,255,255,0.25); -} - - .inlay.light { - background-color: #47515f; - outline: none; - box-shadow: inset 0 1px 8px rgba(0,0,0,0.05), 0 1px 0px rgba(255,255,255,0.15); - } - - .inlay:focus { - background-color: #3a4350; - outline: none; - } - -.onlay, .inlay .selected, .inlay:not(.reversed) > li:hover, .inlay > li.active, .inlay.reversed > li { - border-radius:3px; - border: 1px solid #252930; - box-shadow: inset 0 1px 0px rgba(255,255,255,0.20), 0 0 3px rgba(0,0,0, 0.2); - background: rgb(55,62,74); - background-image: -*-linear-gradient( - 90deg, - rgb(55,62,74) 0%, - rgb(73,83,98) 100% - ); -} -.onlay:active, .inlay.reversed > li:active { - color: #fff; - border: 1px solid transparent; - background-color: #282d34; - box-shadow: inset 0 1px 8px rgba(0,0,0,0.25), 0 1px 0px rgba(255,255,255,0.25); -} - -.question { - display: block; - width: 600px; - padding: 20px; - background: #f5f5f5; - position:fixed; - z-index:101; - text-align: center; - background: #5c697b; - border-radius: 3px; - box-shadow: 0 0 50px rgba(0,0,0,0.55); -} - - .question h3 { - font-size: 25px; - padding: 0; - margin: 0 0 20px; - } - - .question .hint { - font-size: 14px; - color: #ccc; - text-shadow: none; - } - - .question .answer { - font-size: 17px; - display: inline-block; - padding: 10px; - margin: 5px 1%; - cursor: pointer; - width: auto; - } - .question .answer:hover { - background: #000; - } - - .question .answer.delete { - background-color: #a82f12; - } - .question .answer.cancel { - margin-top: 20px; - background-color: #4c5766; - } - - .more_menu { - display: inline-block; - vertical-align: middle; - } - - .more_menu > a { - display: block; - background: url('../images/sprite.png') no-repeat center -137px; - height: 25px; - width: 25px; - border: 1px solid rgba(0,0,0,0.3); - transition: all 0.3s ease-in-out; - } - .more_menu.show > a:not(:active), .more_menu > a:hover:not(:active) { - background-color: #406db8; - } - - .more_menu .wrapper { - display: none; - border: 1px solid #333; - background: rgba(255,255,255,0.98); - border-radius: 3px; - padding: 4px !important; - position: absolute; - z-index: 9; - margin: 32px 0 0 -145px; - width: 185px; - box-shadow: 0 10px 10px -5px rgba(0,0,0,0.4); - text-align: center; - color: #000; - text-shadow: none; - background-image: -*-linear-gradient( - 45deg, - rgb(200,200,200) 0%, - rgb(255,255,255) 100% - ); - } - - .more_menu .wrapper:before { - content: ' '; - height: 0; - position: relative; - width: 0; - border: 6px solid transparent; - border-bottom-color: #fff; - display: block; - top: -16px; - left: 146px; - } - .more_menu.show .wrapper { - display: block; - } - - .more_menu ul { - padding: 0; - margin: -12px 0 0 0; - list-style: none; - } - - .more_menu .wrapper li { - width: 100%; - height: auto; - } - - .more_menu .wrapper li a { - display: block; - border-bottom: 1px solid rgba(255,255,255,0.2); - box-shadow: none; - font-weight: normal; - font-size: 11px; - text-transform: uppercase; - letter-spacing: 1px; - padding: 3px 0; - color: #000; - } - - .more_menu .wrapper li:last-child a { - border: none; - } - .more_menu .wrapper li a:hover { - background: rgba(0,0,0,0.05); - } - -.messages { - position: fixed; - right: 0; - bottom: 0; - padding: 2px; - width: 240px; - z-index: 2; - overflow: hidden; - font-size: 14px; - font-weight: bold; -} - - .messages .message { - text-align: center; - border-radius: 2px; - margin: 2px 0 0 0; - height: 0; - overflow: hidden; - transition: all .6s cubic-bezier(0.9,0,0.1,1); - box-shadow: 0 1px 1px rgba(0,0,0,0.35), inset 0 1px 0px rgba(255,255,255,0.20); - background-image: -*-linear-gradient( - 270deg, - #5b9bd1 0%, - #406db8 100% - ); - width: 100%; - padding: 0 5px; - visibility: hidden; - max-height: 0; - } - .messages .message.show { - visibility: visible; - height: auto; - padding-top: 3px; - padding-bottom: 3px; - min-height: 1px; - max-height: 400px; - } - .messages .message.hide { - margin-left: 240px; - opacity: 0; - } \ No newline at end of file diff --git a/couchpotato/static/style/main.scss b/couchpotato/static/style/main.scss new file mode 100644 index 0000000000..82e93b5072 --- /dev/null +++ b/couchpotato/static/style/main.scss @@ -0,0 +1,890 @@ +@import "_fonts"; +@import "_mixins"; + +* { + margin: 0; + padding: 0; + box-sizing: border-box; + text-rendering: optimizeSpeed; + backface-visibility: hidden; +} + +body, html { + font-size: $font_size; + line-height: 1.5; + font-family: OpenSans, "Helvetica Neue", Helvetica, Arial, Geneva, sans-serif; + font-weight: 300; + height: 100%; + margin: 0; + padding: 0; + @include theme(background, menu); + @include theme(color, text); + overflow: hidden; +} + +a { + position: relative; + overflow: hidden; + text-decoration: none; + cursor: pointer; + -webkit-tap-highlight-color: rgba(0, 0, 0, 0); + + &:visited, &:active { + color: inherit; + } +} + +input, textarea, select { + font-size: 1em; + font-weight: 300; + padding: $padding/3; + border-radius: 0; + border: 1px solid darken(get-theme(off), 20); + + @include theme-dark { + border-color: darken(get-theme-dark(off), 10); + } + + @include theme(background, off); + @include theme(color, text); +} + +input[type=text], textarea { + -webkit-appearance: none; +} + +.button { + @include theme(color, primary); + font-weight: 300; + padding: $padding/4; + cursor: pointer; + border: 1px solid transparent; + @include theme(border-color, primary); + border-radius: $border_radius; + margin: 0 $padding/4; + transition: all 150ms; + + &:hover { + @include theme(background, primary); + color: #FFF; + } +} + +.ripple { + position: absolute; + height: 10px; + width: 10px; + border-radius: 50%; + @include theme(background, primary); + will-change: transform, opacity; + transform: translate(-50%, -50%) scale(1) rotateZ(360deg); + opacity: 0.2; + transition: all 1.5s ease; + transition-property: opacity, transform; + pointer-events: none; + + &.animate { + transform: translate(-50%, -50%) scale(100) rotateZ(360deg); + opacity: 0; + } +} + +.disable_hover .scroll_content > * { + pointer-events: none; +} + +/* Header */ +.header { + width: $header_width; + position: relative; + z-index: 100; + height: 100%; + + @include media-phablet { + width: $header_width_mobile; + z-index: 21; + } + + a { + color: #FFF; + letter-spacing: 1px; + } + + .ripple { + background: #FFF; + } + + .navigation { + + .logo { + @include theme(background, primary); + display: block; + text-align: center; + position: relative; + overflow: hidden; + + font-family: Lobster, serif; + color: #FFF; + font-size: 38px; + line-height: $header_height; + height: $header_height; + + span { + position: absolute; + display: block; + height: 100%; + width: 100%; + text-align: center; + left: 0; + + &:nth-child(even){ + transform: translateX(100%); + } + } + + @include media-phablet { + font-size: 28px; + line-height: $header_width_mobile; + height: $header_width_mobile; + + &:after { + content: 'CP'; + } + + span { display: none; } + } + } + + ul { + padding: 0; + margin: 0; + + li { + display: block; + + @include media-phablet { + line-height: 0; + } + } + + li a { + padding: $padding/2 $padding; + display: block; + position: relative; + + &:hover { + @include theme(background, menu_off); + } + + @include media-phablet { + line-height: $header_width_mobile - $padding; + height: $header_width_mobile; + padding: $padding/2 0; + text-align: center; + } + + span { + @include media-phablet { + display: none; + } + } + + &:before { + position: absolute; + width: 100%; + display: none; + text-align: center; + font-size: 18px; + text-indent: 0; + + @include media-phablet { + display: block; + } + } + + &.icon-home:before { + font-size: 24px; + } + } + } + + } + + .donate { + position: absolute; + bottom: 44px; + left: 0; + right: 0; + padding: $padding/2 $padding; + transition: background 200ms; + + &:hover { + @include theme(background, menu_off); + } + + &:before { + display: none; + font-size: 20px; + text-align: center; + } + + @include media-phablet { + bottom: 44px * 3; + padding: $padding/2 0; + + span { + display: none; + } + + &:before { + display: block; + } + } + } + + .menu, .search_form, .notification_menu { + position: absolute; + z-index: 21; + bottom: 0; + width: 44px; + height: 44px; + + .wrapper { + min-width: 170px; + transform-origin: 0 90%; + } + + > a { + display: inline-block; + height: 100%; + width: 100%; + text-align: center; + line-height: 44px; + font-size: 20px; + } + } + + .notification_menu { + left: 50%; + transform: translateX(-50%); + + @include media-phablet { + bottom: 44px; + } + + .button:before { + font-size: 20px; + top: -2px; + } + + .badge { + position: absolute; + color: #FFF; + top: 5px; + right: 0; + @include theme(background, primary); + border-radius: 50%; + width: 18px; + height: 18px; + line-height: 16px; + text-align: center; + font-size: 10px; + font-weight: lighter; + z-index: 2; + pointer-events: none; + } + + .wrapper { + width: 320px; + + @include media-phablet { + width: 250px; + } + } + + ul { + min-height: 60px; + max-height: 300px; + overflow-y: auto !important; + + &:empty:after { + content: 'No notifications (yet)'; + width: 100%; + position: absolute; + line-height: 60px; + font-size: 15px; + font-style: italic; + opacity: .4; + left: $padding * 2; + } + + &:empty:before { + content: '\e808'; + font-family: "icons"; + height: 100%; + line-height: 60px; + margin-left: $padding; + text-align: center; + opacity: .4; + } + + li { + padding: $padding; + word-wrap: break-word; + + .added { + font-weight: lighter; + font-size: 11px; + display: block; + text-align: right; + } + } + } + } + + .menu { + left: 0; + + @include media-phablet { + bottom: 88px; + left: 50%; + transform: translateX(-50%); + } + + .button:before { + font-size: 20px; + top: -2px; + } + } + + .search_form { + right: 0; + + @include media-phablet { + right: auto; + left: 50%; + transform: translateX(-50%); + } + } + + .more_menu { + + .wrapper { + bottom: 0; + left: 44px; + right: auto; + padding-left: 4px; + } + + a { + &:hover { + @include theme(background, menu_off); + } + } + + } +} + +.corner_background { + display: block; + position: absolute; + height: 10px; + width: 10px; + @include theme(background, primary); + top: 0; + left: $header_width; + + @include media-phablet { + left: $header_width_mobile; + } +} + +/* Content */ +.content { + position: absolute; + height: 100%; + top: 0; + left: $header_width; + right: 0; + + @include media-phablet { + left: $header_width_mobile; + } + + display: flex; + flex-flow: column nowrap; + + @include theme(background, background); + border-radius: $border_radius 0 0 $border_radius; + + h1, h2, h3 { + padding: 0; + margin: 0; + } + + .pages { + flex: 1 auto; + width: 100%; + position: relative; + } + + .footer { + width: 100%; + } +} + +/* Page */ +.page { + position: absolute; + top: 0; + left: 0; + right: 0; + bottom: 0; + display: none; + + &.active { + display: block; + } + + > .scroll_content { + position: relative; + height: 100%; + overflow: hidden; + overflow-y: auto; + -webkit-overflow-scrolling:touch; + } + + &.home .scroll_content { + padding: 0 0 $padding; + } + + h1, h2, h3, h4 { + font-weight: 300; + } + + h2 { + font-size: 24px; + padding: $padding; + + @include media-phablet { + font-size: 18px; + } + } + + .navigation { + z-index: 2; + display: flex; + align-items: center; + position: fixed; + top: 0; + height: $header_height; + left: $header_width + $padding; + right: $padding; + @include theme(background, background); + border-radius: $border_radius 0 0 0; + + @include media-phablet { + height: $header_width_mobile; + left: $header_width_mobile + $padding/2; + right: $padding/2; + } + + ul { + flex: 1 auto; + list-style: none; + + li { + display: inline-block; + + a { + display: inline-block; + font-size: 24px; + line-height: $header_height; + padding: 0 $padding; + @include theme(color, text); + opacity: .5; + vertical-align: bottom; + + &:hover { + opacity: 1; + } + + @include media-phablet { + font-size: 18px; + line-height: $header_width_mobile; + padding: 0 $padding/2; + } + } + } + + .active a { + opacity: 1; + } + } + + > ul > li:first-child { + margin-left: -$padding; + + @include media-phablet { + margin-left: -$padding/2; + } + } + + h2 { + padding: $padding $padding $padding 0; + } + + } + +} + +/* Page levels */ +@for $i from 1 through 4 { + .level_#{$i} { + z-index: #{$i * 10}; + } +} + + +/* Menu basis */ +.more_menu { + position: relative; + line-height: 1em; + + a { + display: inline-block; + float: left; + + &:hover { + @include theme(background, off); + } + } + + .button { + font-size: 24px; + cursor: pointer; + display: inline-block; + + &:before { + position: relative; + } + } + + .wrapper { + display: none; + position: absolute; + right: 0; + @include theme(background, primary); + z-index: 5000; + box-shadow: 0 0 15px 2px rgba(0,0,0,.15); + border-radius: $border_radius 0 0 $border_radius; + transform-origin: 80% 0; + + @include theme-dark { + box-shadow: 0 5px 15px 2px rgba(0,0,0,.4); + } + + &:before { + transform: rotate(45deg) translateY(-60%); + content: ''; + display: block; + position: absolute; + @include theme(background, primary); + height: 10px; + width: 10px; + left: -9px; + bottom: 11px; + z-index: 1; + opacity: 1; + border-radius: $border_radius; + } + + ul { + @include theme(background, background); + position: relative; + z-index: 2; + overflow: hidden; + border-radius: $border_radius 0 0 $border_radius; + } + + ul li { + display: block; + line-height: 1em; + border-top: 1px solid transparent; + @include theme(border-color, off); + + &:first-child { + border-top: 0; + } + + a { + display: block; + @include theme(color, text); + padding: $padding/4 $padding/2; + font-size: 1em; + line-height: 22px; + width: 100%; + + &:hover { + @include theme(background, off); + } + } + + &:first-child a { + padding-top: $padding/2; + } + + &:last-child a { + padding-bottom: $padding/2; + } + } + } + + //&.show { + // + // .wrapper { + // display: block; + // + // &:before { + // opacity: 1; + // } + // } + //} +} + +/* Messages */ +.messages { + position: fixed; + right: 0; + bottom: 0; + width: 320px; + z-index: 2000; + overflow: hidden; + font-size: 14px; + font-weight: bold; + padding: 5px; + + .message { + overflow: hidden; + transition: all .6s cubic-bezier(0.9,0,0.1,1); + width: 100%; + position: relative; + max-height: 0; + font-size: 1.1em; + font-weight: normal; + transform: scale(0); + transform-origin: 100% 50%; + @include theme(background, primary); + margin-bottom: 4px; + border-radius: $border_radius; + box-shadow: 0 0 15px 2px rgba(0,0,0,.15); + + @include theme-dark { + box-shadow: 0 5px 15px 2px rgba(0,0,0,.4); + } + + .inner { + padding: 15px 30px 15px 20px; + @include theme(background, background); + margin-bottom: 4px; + border-radius: $border_radius; + } + + &.sticky { + .inner { + @include theme(background-color, primary); + } + + .icon-cancel { + position: absolute; + display: block; + height: 30px; + width: 30px; + line-height: 30px; + top: 0; + right: 0; + text-align: center; + } + } + + &.show { + max-height: 130px; + transform: scale(1); + } + + &.hide { + max-height: 0; + padding: 0 20px; + margin: 0; + transform: scale(0); + } + } + + .close { + position: absolute; + padding: 10px 8px; + top: 0; + right: 0; + color: #FFF; + } +} + +/* Question */ +.question { + position: fixed; + z-index: 20000; + color: #FFF; + padding: $padding; + display: flex; + align-items: center; + justify-content: center; + + &.show { + opacity: 1; + } + + .inner { + width: 100%; + max-width: 500px; + } + + h3 { + display: block; + margin-bottom: $padding; + font-size: 1.4em; + font-weight: lighter; + } + + .hint { + margin: -$padding 0 $padding; + } + + a { + border-color: #FFF; + color: #FFF; + transition: none; + + &:hover { + @include theme(background, primary); + color: #FFF; + } + } +} + +/* Mask */ +.mask { + background: rgba(0,0,0,.8); + z-index: 1000; + text-align: center; + position: absolute; + top: 0; + right: 0; + bottom: 0; + left: 0; + opacity: 0; + transition: opacity 500ms; + pointer-events: none; + + $spinner_size: 22px; + + .message { + color: #FFF; + text-align: center; + position: absolute; + top: 50%; + left: 50%; + width: 320px; + margin: -($spinner_size*2 + 5px) 0 0 -160px; + font-size: 16px; + + h1 { + font-size: 1.5em; + } + } + + .spinner { + position: absolute; + top: 50%; + left: 50%; + width: $spinner_size; + height: $spinner_size; + display: block; + background: white; + margin-top: -($spinner_size/2); + margin-left: -($spinner_size/2); + outline: 1px solid transparent; + animation: rotating 2.5s $cubic infinite normal; + transform: scale(0); + } + + &.with_message { + .spinner { + margin-top: -($spinner_size*4); + } + } + + &.show { + pointer-events: auto; + opacity: 1; + + .spinner { + transform: scale(1); + } + } + + &.hide { + opacity: 0; + + .spinner { + transform: scale(0); + } + } +} + +@keyframes rotating { + 0% { + transform: rotate(0deg) scale(1.6); + border-radius: 1px; + } + 48% { + transform: rotate(360deg) scale(1); + border-radius: 50%; + } + 52% {} + 100% { + transform: rotate(720deg) scale(1.6); + border-radius: 1px; + } +} + +.table { + + .head { + font-weight: bold; + } + + .item { + display: flex; + border-bottom: 1px solid rgba(0,0,0,.2); + + @include theme-dark { + border-color: rgba(255,255,255,.2); + } + + &:last-child { + border-bottom: none; + } + + span { + padding: 1px 2px; + + &:first-child { + padding-left: 0; + } + + &:last-child { + padding-right: 0; + } + } + } +} diff --git a/couchpotato/static/style/settings.css b/couchpotato/static/style/settings.css deleted file mode 100644 index 3af7dba785..0000000000 --- a/couchpotato/static/style/settings.css +++ /dev/null @@ -1,651 +0,0 @@ -.page.settings:after { - content: "."; - display: block; - clear: both; - visibility: hidden; - line-height: 0; - height: 0; -} - - .page.settings .tabs { - float: left; - width: 20%; - font-size: 20px; - text-align: right; - list-style: none; - padding: 40px 0; - margin: 0; - min-height: 470px; - background-image: -*-linear-gradient( - 20deg, - rgba(0,0,0,0) 50%, - rgba(0,0,0,0.3) 100% - ); - } - .page.settings .tabs a { - display: block; - padding: 7px 15px; - font-weight: normal; - transition: all 0.3s ease-in-out; - color: rgba(255, 255, 255, 0.8); - text-shadow: none; - } - .page.settings .tabs a:hover, - .page.settings .tabs .active a { - background: rgb(78, 89, 105); - color: #fff; - } - .page.settings .tabs > li { - border-bottom: 1px solid rgb(78, 89, 105); - } - - .page.settings .tabs .subtabs { - list-style: none; - padding: 0; - margin: -5px 0 10px; - } - - .page.settings .tabs .subtabs a { - font-size: 13px; - padding: 0 15px; - font-weight: normal; - transition: all .3s ease-in-out; - color: rgba(255, 255, 255, 0.7); - } - - .page.settings .tabs .subtabs .active a { - color: #fff; - background: rgb(78, 89, 105); - } - - - .page.settings .containers { - width: 80%; - float: left; - padding: 20px 2%; - min-height: 300px; - } - - .page .advanced { - display: none; - color: #edc07f; - } - .page.show_advanced .advanced { display: block; } - - .page.settings .tab_content { - display: none; - } - .page.settings .tab_content.active { display: block; } - - .page fieldset { - padding: 10px 0; - } - .page fieldset h2 { - font-weight: normal; - font-size: 25px; - padding: 0 9px 10px 30px; - margin: 0; - border-bottom: 1px solid #333; - box-shadow: 0 1px 0px rgba(255,255,255, 0.15); - } - .page fieldset h2 .hint { - font-size: 12px; - margin-left: 10px; - } - .page fieldset h2 .hint a { - margin: 0 !important; - padding: 0; - } - - .page fieldset.disabled .ctrlHolder { - display: none; - } - .page fieldset > .ctrlHolder:first-child { - display: block; - padding: 0; - width: auto; - margin: 0; - position: relative; - margin-bottom: -23px; - border: none; - width: 20px; - } - - .page .ctrlHolder { - line-height: 25px; - padding: 10px 10px 10px 30px; - font-size: 14px; - border: 0; - } - .page .ctrlHolder.save_success:not(:first-child) { - background: url('../images/icon.check.png') no-repeat 7px center; - } - .page .ctrlHolder:last-child { border: none; } - .page .ctrlHolder:hover { background-color: rgba(255,255,255,0.05); } - .page .ctrlHolder.focused { background-color: rgba(255,255,255,0.2); } - .page .ctrlHolder.focused:first-child, .page .ctrlHolder:first-child{ background-color: transparent; } - - .page .ctrlHolder .formHint { - width: 47%; - margin: -18px 0; - padding: 0; - color: #fff !important; - display: inline-block; - vertical-align: middle; - padding-left: 2%; - } - - .page .check + .formHint { - float: none; - width: auto; - display: inline-block; - padding-left: 1% !important; - height: 24px; - vertical-align: middle; - } - - .page .ctrlHolder label { - font-weight: bold; - width: 20%; - margin: 0; - padding: 6px 0 0; - } - - .page .xsmall { width: 20px !important; text-align: center; } - - .page .enabler { - display: block; - } - - .page .option_list { - margin-bottom: 20px; - } - - .page .option_list .enabler { - padding: 0; - margin-left: 5px !important; - } - - .page .option_list .enabler:not(.disabled) { - margin: 0 0 0 30px; - } - - .page .option_list .enabler:not(.disabled) .ctrlHolder:first-child { - margin: 10px 0 -33px 0; - } - - .page .option_list h3 { - padding: 0; - margin: 10px 5px 0; - text-align: center; - font-weight: normal; - text-shadow: none; - text-transform: uppercase; - font-size: 12px; - background: rgba(255,255,255,0.03); - } - - .page .option_list .enabler.disabled { - display: inline-block; - margin: 3px 3px 3px 20px; - padding: 4px 0; - width: 173px; - vertical-align: top; - } - - .page .option_list .enabler.disabled h2 { - border: none; - box-shadow: none; - padding: 0 10px 0 25px; - font-size: 16px; - } - - .page .option_list .enabler:not(.disabled) h2 { - font-size: 16px; - font-weight: bold; - border: none; - border-top: 1px solid rgba(255,255,255, 0.15); - box-shadow: 0 -1px 0px #333; - margin: 0; - padding: 10px 0 5px 25px; - } - .page .option_list .enabler:not(.disabled):first-child h2 { - border: none; - box-shadow: none; - } - - .page .option_list .enabler.disabled h2 .hint { - display: none; - } - .page .option_list .enabler h2 .hint { - font-weight: normal; - } - - .page input[type=text], .page input[type=password] { - padding: 5px 3px; - margin: 0; - width: 30%; - border-radius: 3px; - } - .page .input.xsmall { width: 5% } - .page .input.small { width: 10% } - .page .input.medium { width: 15% } - .page .input.large { width: 25% } - .page .input.xlarge { width: 30% } - - .page .advanced_toggle { - clear: both; - display: block; - text-align: right; - height: 20px; - margin: 0; - } - .page .advanced_toggle span { padding: 0 5px; } - .page.show_advanced .advanced_toggle { - color: #edc07f; - } - - .page .directory { - display: inline-block; - padding: 0 4% 0 4px; - font-size: 13px; - width: 30%; - background-image: url('../images/icon.folder.gif'); - background-repeat: no-repeat; - background-position: 97% center; - overflow: hidden; - vertical-align: top; - } - .page .directory > span { - height: 25px; - display: inline-block; - float: right; - text-align: right; - white-space: nowrap; - cursor: pointer; - } - - .page .directory_list { - z-index: 2; - position: absolute; - width: 450px; - margin: 28px 0 20px 18%; - background: #5c697b; - border-radius: 3px; - box-shadow: 0 0 50px rgba(0,0,0,0.55); - } - - .page .directory_list .pointer { - border-right: 6px solid transparent; - border-left: 6px solid transparent; - border-bottom: 6px solid #5c697b; - display: block; - position: absolute; - width: 0px; - margin: -6px 0 0 22%; - } - - .page .directory_list ul { - width: 92%; - height: 300px; - overflow: auto; - margin: 0 4%; - font-size: 16px; - } - - .page .directory_list li { - padding: 4px 10px; - cursor: pointer; - margin: 0 !important; - border-top: 1px solid rgba(255,255,255,0.1); - background: url('../images/right.arrow.png') no-repeat 98% center; - } - .page .directory_list li:last-child { - border-bottom: 1px solid rgba(255,255,255,0.1); - } - - .page .directory_list li:hover { - background-color: #515c68; - } - - .page .directory_list li.empty { - background: none; - height: 100px; - text-align: center; - font-style: italic; - border: none; - line-height: 100px; - cursor: default; - color: #BBB; - text-shadow: none; - font-size: 12px; - } - - .page .directory_list .actions { - clear: both; - padding: 4% 4% 2%; - min-height: 25px; - } - - .page .directory_list .actions label { - float: right; - width: auto; - padding: 0; - } - .page .directory_list .actions .inlay { - margin: -2px 0 0 7px; - } - - .page .directory_list .actions .back { - font-weight: bold; - width: 160px; - display: inline-block; - padding: 0; - line-height: 120%; - vertical-align: top; - } - - .page .directory_list .actions:last-child { - float: right; - padding: 4%; - } - - .page .directory_list .actions:last-child > span { - padding: 0 5px; - text-shadow: none; - } - - .page .directory_list .actions:last-child > .clear { - left: -90%; - position: relative; - background-color: #af3128; -} - - .page .directory_list .actions:last-child > .cancel { - font-weight: bold; - color: #ddd; - } - - .page .directory_list .actions:last-child > .save { - background: #9dc156; - } - - - .page .multi_directory.is_empty .delete { - visibility: hidden; - } - - .page .multi_directory .delete { - display: none; - } - .page .multi_directory:hover .delete { - display: inline-block; - width: 22px; - height: 24px; - vertical-align: top; - background-position: center; - margin-left: 5px; - } - - - .page .tag_input select { - width: 20%; - display: inline-block; - } - - .page .tag_input .selection { - border-radius: 0 10px 10px 0; - height: 26px; - } - - .page .tag_input > input { - display: none; - } - - .page .tag_input > ul { - list-style: none; - border-radius: 3px; - cursor: text; - width: 30%; - margin: 0 !important; - min-height: 27px; - line-height: 0; - display: inline-block; - } - .page .tag_input:hover > ul { - border-radius: 3px 0 0 3px; - } - .page .tag_input:hover .formHint { display: none; } - - .page .tag_input > ul > li { - display: inline-block; - min-height: 20px; - min-width: 2px; - font-size: 12px; - padding: 0; - margin: 4px 0 0 !important; - border-width: 0; - background: 0; - line-height: 20px; - } - .page .tag_input > ul > li:first-child { min-width: 4px; } - .page .tag_input li.choice { - cursor: -moz-grab; - cursor: -webkit-grab; - cursor: grab; - padding: 0; - border-radius: 2px; - } - .page .tag_input > ul:hover > li.choice { - background: -*-linear-gradient( - 270deg, - rgba(255,255,255,0.3) 0%, - rgba(255,255,255,0.1) 100% - ); - } - .page .tag_input > ul > li.choice:hover, - .page .tag_input > ul > li.choice.selected { - background: -*-linear-gradient( - 270deg, - #5b9bd1 0%, - #406db8 100% - ); - } - - .page .tag_input .select { - display: none; - } - .page .tag_input:hover .select { display: inline-block; } - - .page .tag_input li input { - background: 0; - border: 0; - color: #fff; - outline-width: 0; - padding: 0; - min-width: 2px; - } - .page .tag_input li:first-child input { - padding-left: 2px; - min-width: 0; - } - - .page .tag_input li:not(.choice) span { - white-space: pre; - position: absolute; - top: -9999px; - } - - .page .tag_input .delete { - display: none; - height: 10px; - width: 16px; - position: absolute; - margin: -9px 0 0 -16px; - border-radius: 30px 30px 0 0; - cursor: pointer; - background: url('../images/icon.delete.png') no-repeat center 2px, -*-linear-gradient( - 270deg, - #5b9bd1 0%, - #5b9bd1 100% - ); - background-size: 65%; - } - .page .tag_input .choice:hover .delete, - .page .tag_input .choice.selected .delete { display: inline-block; } - .page .tag_input .choice .delete:hover { - height: 14px; - margin-top: -13px; - } - - .page .combined_table .head { - margin: 0 0 0 60px; - } - .page .disabled .head { display: none; } - .page .combined_table .head abbr { - display: inline-block; - font-weight: bold; - border-bottom: 1px dotted #fff; - line-height: 140%; - cursor: help; - } - .page .combined_table .head abbr.use, .page .combined_table .head abbr.automation_urls_use { - display: none; - } - .page .combined_table .head abbr.host { - margin-right: 197px; - } - - .page .combined_table .ctrlHolder { - padding-top: 2px; - padding-bottom: 3px; - } - .page .combined_table .ctrlHolder.hide { display: none; } - - .page .combined_table .ctrlHolder > * { - margin: 0 10px 0 0; - } - - .page .combined_table .ctrlHolder .delete { - display: none; - width: 22px; - height: 22px; - vertical-align: middle; - background-position: left center; - } - .page .combined_table .ctrlHolder:hover .delete { - display: inline-block; - } - - .page .combined_table .ctrlHolder.is_empty .delete, .page.settings .combined_table .ctrlHolder.is_empty .check { - visibility: hidden; -} - - .page .tab_about .usenet { - padding: 20px 30px 0; - font-size: 17px; - } - - .page .tab_about .usenet a { - padding: 0 5px; - } - - .page .tab_about .usenet ul { - float: left; - width: 50%; - margin: 10px 0; - padding: 0; - } - - .page .tab_about .usenet li { - background: url('../images/icon.check.png') no-repeat left center; - padding: 0 0 0 25px; - } - - .page .tab_about .donate { - float: left; - width: 42%; - text-align: center; - font-size: 17px; - padding: 0 0 0 4%; - margin: 20px 0 0; - border-left: 1px solid #333; - box-shadow: -1px 0 0 rgba(255,255,255, 0.15); - } - .page .tab_about .donate form { - padding: 10px 0 0; - } - - .page .tab_about .info { - padding: 20px 30px; - margin: 0; - overflow: hidden; - } - - .page .tab_about .info dt { - clear: both; - float: left; - width: 17%; - font-weight: bold; - } - - .page .tab_about .info dd { - float: right; - width: 80%; - padding: 0; - margin: 0; - font-style: italic; - } - .page .tab_about .info dd.version { cursor: pointer; } - - .page .tab_about .group_actions > div { - padding: 30px; - text-align: center; - } - - .page .tab_about .group_actions a { - margin: 0 10px; - font-size: 20px; - } - -.group_userscript { - background: center bottom no-repeat; - min-height: 360px; - font-size: 20px; - font-weight: normal; -} - - .group_userscript h2 .hint { - display: block; - margin: 0 !important; - } - - .group_userscript .userscript { - float: left; - margin: 14px 0 0 25px; - height: 36px; - line-height: 25px; - } - - .group_userscript .or { - float: left; - margin: 20px -10px 0 10px; - } - - .group_userscript .bookmarklet { - display: block; - display: block; - float: left; - padding: 20px 15px 0 25px; - border-radius: 5px; - } - - .group_userscript .bookmarklet span { - margin-left: 10px; - display: inline-block; - } - -.active .group_imdb_automation:not(.disabled) { - background: url('../images/imdb_watchlist.png') no-repeat right 50px; - min-height: 210px; -} \ No newline at end of file diff --git a/couchpotato/static/style/settings.scss b/couchpotato/static/style/settings.scss new file mode 100644 index 0000000000..0724adad19 --- /dev/null +++ b/couchpotato/static/style/settings.scss @@ -0,0 +1,830 @@ +@import "_mixins"; + +.page.settings { + padding-top: $header_height; + + @include media-phablet { + padding-top: $header_width_mobile; + } + + &.active .scroll_content { + display: flex; + + @include media-phablet { + display: block; + } + } + + .navigation { + display: flex; + justify-content: space-between; + + .advanced_toggle { + display: flex; + align-items: center; + + span { + margin-right: $padding / 2; + } + } + } + + .tab_content { + display: none; + + &.active { + display: block; + } + } + + .tabs { + margin: 0 $padding $padding; + list-style: none; + font-size: 24px; + + @include media-phablet { + margin: 0 $padding/2 $padding; + } + + ul { + list-style: none; + font-size: $font_size; + } + + li { + a { + color: rgba(0,0,0,.5); + + @include theme-dark { + color: rgba(255,255,255,.5); + } + } + + &.active { + a { + @include theme(color, text); + } + } + } + + } + + form.containers { + margin: 0 $padding 0 0; + flex: 1; + + @include media-phablet { + margin: 0 $padding/2 0 0; + } + } + + fieldset { + border: 0; + padding: $padding/2 0; + position: relative; + + h2 { + display: flex; + flex-flow: row wrap; + align-items: baseline; + padding: 0 0 0 $padding; + + @include media-phablet { + display: block; + padding: 0 0 0 $padding/2; + } + + .icon { + margin-right: $padding/2; + + img { + vertical-align: middle; + position: relative; + top: -1px; + } + } + + .group_label { + margin-right: $padding/2; + } + + .hint { + flex: 1; + font-size: 1rem; + + @include media-phablet { + margin: 0; + display: block; + } + + a { + font-weight: 400; + @include theme(color, primary); + text-decoration: underline; + display: inline; + } + + + } + } + + .more_hint { + position: relative; + + .tooltip { + display: inline; + padding: $padding/2; + + .icon-info { + vertical-align: middle; + display: inline-block; + text-align: center; + border: 1px solid transparent; + @include theme(border-color, primary); + border-radius: 50%; + width: 18px; + height: 18px; + line-height: 16px; + font-size: .8em; + text-decoration: none; + } + + .tip { + bottom: 100%; + left: 0; + right: 0; + position: absolute; + @include theme(background, off); + z-index: 20; + display: none; + padding: $padding/2; + margin-left: -$padding/2; + } + + &:hover { + .tip { + display: block; + } + } + } + } + + .ctrlHolder { + padding: $padding/3 $padding; + border-bottom: 1px solid transparent; + @include theme(border-color, off); + display: flex; + flex-flow: row nowrap; + align-items: center; + + @include media-phablet { + flex-flow: row wrap; + padding: $padding/3 0 $padding/3 $padding/2; + } + + &:last-child { + border-bottom: 0; + } + + &:nth-child(2) { + margin-top: $padding/2; + } + + label { + display: inline-block; + min-width: 150px; + + @include media-phablet { + flex: 1 1 auto; + } + } + + input, textarea, select { + min-width: 200px; + + @include media-phablet { + flex: 1 1 auto; + } + } + + input[type=checkbox] { + width: auto; + min-width: 0; + + @include media-phablet { + margin-right: $padding; + flex: none; + } + } + + .select_wrapper { + position: relative; + display: flex; + align-items: center; + + @include media-phablet { + width: 100%; + } + + select { + cursor: pointer; + appearance: none; + width: 100%; + min-width: 200px; + border-radius: 0; + + &::-ms-expand { + display: none; + } + } + + &:before { + vertical-align: top; + pointer-events: none; + position: absolute; + top: 0; + line-height: 2em; + right: $padding/2; + height: 100%; + } + } + + .formHint { + flex: 1; + opacity: .8; + margin-left: $padding; + + a { + font-weight: 400; + @include theme(color, primary); + text-decoration: underline; + } + + @include media-phablet { + min-width: 100%; + margin-left: 0; + } + } + + &.test_button { + a { + margin: 0; + } + + .success { + margin-left: $padding / 2; + } + } + + &.read_only { + opacity: .5; + + label { + position: relative; + } + + label:after { + left: 0; + bottom: -10px; + position: absolute; + content: '(read-only)'; + font-size: .7em; + } + } + } + + &.disabled { + + .ctrlHolder { + display: none; + } + + > .ctrlHolder:first-child { + display: flex; + } + } + + &.enabler { + display: block; + + &.disabled:hover { + background: rgba(255,255,255,.1); + + @include theme-dark { + background: rgba(0,0,0,.1); + } + } + + > :first-child { + position: absolute; + right: 0; + border: 0; + padding: 0; + z-index: 10; + + ~ h2 { + margin-right: 66px + $padding; + + @include media-phablet { + margin-right: 0; + } + } + } + + > :nth-child(2){ + margin-top: 0; + } + + > :nth-child(3){ + margin-top: $padding/2; + } + + } + } + + fieldset.advanced, + fieldset .ctrlHolder.advanced { + display: none; + @include theme(color, primary); + } + + &.show_advanced { + fieldset.advanced { + display: block; + } + fieldset:not(.disabled) > .ctrlHolder.advanced { + display: flex; + } + } + + .switch { + $switch_height: 20px; + display: inline-block; + @include theme(background, primary); + height: $switch_height; + width: $switch_height * 2.5; + min-width: 0 !important; + transition: all 250ms; + cursor: pointer; + border-radius: $switch_height; + + input { + display: none; + } + + .toggle { + @include theme(background, background); + margin: 1px; + height: $switch_height - 2px; + width: $switch_height - 2px; + transition: transform 250ms; + transform: translateX($switch_height*1.5); + border-radius: $switch_height; + } + + } + + fieldset.enabler.disabled .switch, + &:not(.show_advanced) .advanced_toggle .switch { + @include theme(background, off); + @include theme(border-color, off); + + @include theme-dark { + background: lighten(get-theme-dark(off), 10); + border-color: lighten(get-theme-dark(off), 10); + } + + .toggle { + transform: translateX(0); + } + + + &:hover { + background: darken(get-theme(off), 20); + border-color: darken(get-theme(off), 20); + + @include theme-dark { + background: darken(get-theme-dark(off), 20); + border-color: darken(get-theme-dark(off), 20); + } + } + } + + /** Options list **/ + .option_list { + @include theme(background, background); + margin-top: $padding/2; + + fieldset { + position: relative; + + h2 { + + .group_label { + min-width: 100px; + } + + } + + &.disabled h2 { + padding: 0 $padding; + + @include media-phablet { + padding: 0 $padding/2; + } + } + } + + fieldset:after { + position: absolute; + content: ''; + display: block; + width: 100%; + border-bottom: 1px solid transparent; + @include theme(border-color, off); + bottom: 0; + + &:last-child { + border-bottom: 0; + } + } + + h2 { + font-size: 1em; + font-weight: 400; + + .hint { + font-weight: 300; + } + } + } + + + .combined_table { + margin-top: $padding; + + .head { + margin: 0 ($padding/2) 0 ($padding+26px); + font-size: .8em; + + abbr { + display: inline-block; + font-weight: bold; + border-bottom: 1px dotted #fff; + line-height: 140%; + cursor: help; + margin-right: $padding/2; + text-align: center; + + &:first-child { + display: none; + } + } + } + + input { + min-width: 0 !important; + display: inline-block; + margin-right: $padding/2; + } + + .use { } + .host, .automation_ids, .automation_urls { width: 200px; } + .api_key, .pass_key { width: 150px; } + .name { width: 150px; } + .extra_score, .seed_ratio, .seed_time { width: 70px; text-align: center; } + .custom_tag { width: 120px; text-align: center; } + + .ctrlHolder { + margin: 0 0 0 $padding; + padding-left: 0; + + .delete { + display: none; + font-size: 20px; + width: 22px; + height: 22px; + line-height: 20px; + text-align: center; + vertical-align: middle; + } + + &:hover .delete { + display: inline-block; + } + + &.is_empty .delete, + &.is_empty input[type=checkbox] { + visibility: hidden; + } + } + + } + .disabled .combined_table { display: none; } + + + .tab_about { + .usenet { + padding: $padding $padding 0; + font-size: 1.5em; + line-height: 1.3em; + + @include media-phablet { + padding: $padding/2 !important; + font-size: 1em; + line-height: 1.5em; + } + + a { + @include theme(color, primary); + padding: 0 5px; + } + + ul { + list-style: none; + float: left; + width: 50%; + margin: 10px 0; + padding: 0; + + @include media-phablet { + float: none; + width: auto; + margin: 0; + } + } + + li { + font-size: .8em; + + &:before { + margin-right: 10px; + } + } + } + + .donate { + float: left; + width: 42%; + text-align: center; + font-size: 17px; + padding: 0 0 0 4%; + margin: 20px 0 0; + border-left: 1px solid rgba(0,0,0,.2); + height: 150px; + + @include media-phablet { + padding: 0; + float: none; + width: auto; + margin: 0; + border: none; + } + + @include theme-dark { + border-color: rgba(255,255,255,.2); + } + + iframe { + border: none; + width: 100%; + height: 100%; + } + } + + .info { + padding: $padding; + margin: 0; + overflow: hidden; + + @include media-phablet { + padding: $padding/2; + } + + dt { + clear: both; + float: left; + width: 17%; + font-weight: bold; + + @include media-phablet { + float: none; + width: auto; + } + } + + dd { + float: right; + width: 80%; + padding: 0; + margin: 0; + font-style: italic; + + @include media-phablet { + float: none; + width: auto; + margin-bottom: $padding/2; + } + + &.version { cursor: pointer; } + } + } + + .group_actions { + + > div { + padding: $padding; + text-align: center; + } + + a { + margin: 0 10px; + font-size: 20px; + } + } + } + + .directory { + + @include media-phablet { + width: 100%; + } + + input { + width: 100%; + } + } + + .multi_directory { + + .delete { + @include theme(color, primary); + padding: 0 $padding/2; + opacity: .6; + font-size: 1.5em; + + &:hover { + opacity: 1; + } + } + + &.is_empty .delete { + display: none; + } + } + + .choice { + .select_wrapper { + margin-left: $padding; + width: 120px; + min-width: 120px; + + @include media-phablet { + margin: $padding/2 0 0; + } + + select { + min-width: 0 !important; + } + + } + } + + .renamer_to.renamer_to { + flex-flow: row wrap; + + .ctrlHolder { + width: 100%; + } + } + +} + +.directory_list { + z-index: 2; + position: absolute; + width: 450px; + margin: 28px 0 20px 0; + @include theme(background, primary); + box-shadow: 0 0 15px 2px rgba(0,0,0,.15); + border-radius: $border_radius $border_radius 0 0; + + @include theme-dark { + box-shadow: 0 5px 15px 2px rgba(0,0,0,.4); + } + + .pointer { + border-right: 6px solid transparent; + border-left: 6px solid transparent; + border-bottom: 6px solid transparent; + @include theme(border-bottom-color, primary); + display: block; + position: absolute; + width: 0; + margin: -6px 0 0 100px; + } + + .wrapper { + @include theme(background, background); + border-radius: $border_radius $border_radius 0 0; + margin-top: 5px; + } + + ul { + width: 92%; + height: 300px; + overflow: auto; + margin: 0 $padding; + } + + li { + padding: 4px $padding 4px 0; + cursor: pointer; + margin: 0 !important; + border-top: 1px solid rgba(255,255,255,0.1); + overflow: hidden; + white-space: nowrap; + text-overflow: ellipsis; + + &.blur { + opacity: .3; + } + + &:last-child { + border-bottom: 1px solid rgba(255,255,255,0.1); + } + + &:hover { + @include theme(color, primary); + } + + &.empty { + background: none; + height: 100px; + text-align: center; + font-style: italic; + border: none; + line-height: 100px; + cursor: default; + color: #BBB; + text-shadow: none; + font-size: 12px; + } + } + + .actions { + clear: both; + padding: $padding; + min-height: 45px; + position: relative; + width: 100%; + text-align: right; + + label { + float: right; + width: auto; + padding: 0; + + input { + margin-left: $padding/2; + } + } + + .back { + font-weight: bold; + width: 160px; + display: inline-block; + padding: 0; + line-height: 120%; + vertical-align: top; + position: absolute; + text-align: left; + left: $padding; + } + + &:last-child { + padding: $padding; + + > span { + padding: 0 5px; + text-shadow: none; + } + + > .clear { + left: $padding; + position: absolute; + top: 50%; + transform: translateY(-50%); + margin: 0; + } + + > .cancel { + opacity: .7; + } + + > .save { + margin-right: 0; + } + } + } +} diff --git a/couchpotato/static/style/uniform.css b/couchpotato/static/style/uniform.css deleted file mode 100644 index 91bc83fcb9..0000000000 --- a/couchpotato/static/style/uniform.css +++ /dev/null @@ -1,154 +0,0 @@ -/* ------------------------------------------------------------------------------ - - Copyright (c) 2010, Dragan Babic - - Permission is hereby granted, free of charge, to any person - obtaining a copy of this software and associated documentation - files (the "Software"), to deal in the Software without - restriction, including without limitation the rights to use, - copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the - Software is furnished to do so, subject to the following - conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES - OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT - HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - OTHER DEALINGS IN THE SOFTWARE. - - ------------------------------------------------------------------------------ */ -/* ############################# GENERALS ################################### */ -/* ------------------------------------------------------------------------------ */ - -.uniForm{ margin: 0; padding: 0; position: relative; z-index: 1; } /* reset stuff */ - - /* Some generals and more resets */ - .uniForm fieldset{ border: none; margin: 0; padding: 0; } - .uniForm fieldset legend{ margin: 0; padding: 0; } - - /* This are the main units that contain form elements */ - .uniForm .ctrlHolder, - .uniForm .buttonHolder{ margin: 0; padding: 0; clear: both; } - - /* Clear all floats */ - .uniForm:after, - .uniForm .buttonHolder:after, - .uniForm .ctrlHolder:after, - .uniForm .ctrlHolder .multiField:after, - .uniForm .inlineLabel:after{ content: "."; display: block; height: 0; line-height: 0; font-size: 0; clear: both; min-height: 0; visibility: hidden; } - - .uniForm label, - .uniForm button{ cursor: pointer; } - -/* ------------------------------------------------------------------------------ */ -/* ########################## DEFAULT LAYOUT ################################ */ -/* ------------------------------------------------------------------------------ */ -/* Styles for form controls where labels are above the input elements */ -/* ------------------------------------------------------------------------------ */ - - .uniForm label, - .uniForm .label{ display: block; float: none; margin: 0 0 .5em 0; padding: 0; line-height: 100%; width: auto; } - - /* Float the input elements */ - .uniForm .textInput, - .uniForm .fileUpload, - .uniForm .selectInput, - .uniForm select, - .uniForm textarea{ float: left; width: 53%; margin: 0; } - - /* Postition the hints */ - .uniForm .formHint{ float: right; width: 43%; margin: 0; clear: none; } - - /* Position the elements inside combo boxes (multiple inputs/selects/checkboxes/radio buttons per unit) */ - .uniForm ul{ float: left; width: 53%; margin: 0; padding: 0; } - .uniForm ul li{ margin: 0 0 .5em 0; list-style: none; } - .uniForm ul li label{ margin: 0; float: none; display: block; overflow: visible; } - /* Alternate layout */ - .uniForm ul.alternate li{ float: left; width: 30%; margin-right: 3%; } - .uniForm ul.alternate li label{ float: none; display: block; width: 98%; } - .uniForm ul .textInput, - .uniForm ul .selectInput, - .uniForm ul select, - .uniForm ul.alternate .textInput, - .uniForm ul.alternate .selectInput, - .uniForm ul.alternate select{ width: 98%; margin-top: .5em; display: block; float: none; } - - /* Required fields asterisk styling */ - .uniForm label em, - .uniForm .label em{ float: left; width: 1em; margin: 0 0 0 -1em; } - -/* ------------------------------------------------------------------------------ */ -/* ######################### ALTERNATE LAYOUT ############################### */ -/* ------------------------------------------------------------------------------ */ -/* Styles for form controls where labels are in line with the input elements */ -/* Set the class of the parent (preferably to a fieldset) to .inlineLabels */ -/* ------------------------------------------------------------------------------ */ - - .uniForm .inlineLabels label, - .uniForm .inlineLabels .label{ float: left; margin: .3em 2% 0 0; padding: 0; line-height: 1; position: relative; width: 32%; } - - /* Float the input elements */ - .uniForm .inlineLabels .textInput, - .uniForm .inlineLabels .fileUpload, - .uniForm .inlineLabels .selectInput, - .uniForm .inlineLabels select, - .uniForm .inlineLabels textarea{ float: left; width: 64%; } - - /* Postition the hints */ - .uniForm .inlineLabels .formHint{ clear: both; float: none; width: auto; margin-left: 34%; position: static; } - - /* Position the elements inside combo boxes (multiple inputs/selects/checkboxes/radio buttons per unit) */ - .uniForm .inlineLabels ul{ float: left; width: 66%; } - .uniForm .inlineLabels ul li{ margin: .5em 0; } - .uniForm .inlineLabels ul li label{ float: none; display: block; width: 100%; } - /* Alternate layout */ - .uniForm .inlineLabels ul.alternate li{ margin-right: 3%; margin-top: .25em; } - .uniForm .inlineLabels ul li label .textInput, - .uniForm .inlineLabels ul li label textarea, - .uniForm .inlineLabels ul li label select{ float: none; display: block; width: 98%; } - - /* Required fields asterisk styling */ - .uniForm .inlineLabels label em, - .uniForm .inlineLabels .label em{ display: block; float: none; margin: 0; position: absolute; right: 0; } - -/* ----------------------------------------------------------------------------- */ -/* ########################### Additional Stuff ################################ */ -/* ----------------------------------------------------------------------------- */ - - /* Generals */ - .uniForm legend{ color: inherit; } - - .uniForm .secondaryAction{ float: left; } - - /* .inlineLabel is used for inputs within labels - checkboxes and radio buttons */ - .uniForm .inlineLabel input, - .uniForm .inlineLabels .inlineLabel input, - .uniForm .blockLabels .inlineLabel input, - /* class .inlineLabel is depreciated */ - .uniForm label input{ float: none; display: inline; margin: 0; padding: 0; border: none; } - - .uniForm .buttonHolder .inlineLabel, - .uniForm .buttonHolder label{ float: left; margin: .5em 0 0 0; width: auto; max-width: 60%; text-align: left; } - - /* When you don't want to use a label */ - .uniForm .inlineLabels .noLabel ul{ margin-left: 34%; /* Match to width of label + gap to field */ } - - /* Classes for control of the widths of the fields */ - .uniForm .small { width: 30% !important; } - .uniForm .medium{ width: 45% !important; } - .uniForm .large { } /* Large is default and should match the value you set for .textInput, textarea or select */ - .uniForm .auto { width: auto !important; } - .uniForm .small, - .uniForm .medium, - .uniForm .auto{ margin-right: 4px; } - -/* Columns */ -.uniForm .col{ float: left; } -.uniForm .col{ width: 50%; } \ No newline at end of file diff --git a/couchpotato/static/style/uniform.generic.css b/couchpotato/static/style/uniform.generic.css deleted file mode 100644 index e70a9158b9..0000000000 --- a/couchpotato/static/style/uniform.generic.css +++ /dev/null @@ -1,143 +0,0 @@ -/* ------------------------------------------------------------------------------ - - UNI-FORM DEFAULT by DRAGAN BABIC (v2) | Wed, 31 Mar 10 - - ------------------------------------------------------------------------------ - - Copyright (c) 2010, Dragan Babic - - Permission is hereby granted, free of charge, to any person - obtaining a copy of this software and associated documentation - files (the "Software"), to deal in the Software without - restriction, including without limitation the rights to use, - copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the - Software is furnished to do so, subject to the following - conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES - OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT - HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - OTHER DEALINGS IN THE SOFTWARE. - - ------------------------------------------------------------------------------ */ - -.uniForm{} - - .uniForm legend{ font-weight: bold; font-size: 100%; margin: 0; padding: 1.5em 0; } - - .uniForm .ctrlHolder{ padding: 1em; border-bottom: 1px solid #efefef; } - .uniForm .ctrlHolder.focused{ background: #fffcdf; } - - .uniForm .inlineLabels .noLabel{} - - .uniForm .buttonHolder{ background: #efefef; text-align: right; margin: 1.5em 0 0 0; padding: 1.5em; - /* CSS3 */ - border-radius: 4px; - } - .uniForm .buttonHolder .primaryAction{ padding: 10px 22px; line-height: 1; background: #254a86; border: 1px solid #163362; font-size: 12px; font-weight: bold; color: #fff; - /* CSS3 */ - border-radius: 4px; - box-shadow: 1px 1px 0 #fff; - text-shadow: -1px -1px 0 rgba(0,0,0,.25); - } - .uniForm .buttonHolder .primaryAction:active{ position: relative; top: 1px; } - .uniForm .secondaryAction { text-align: left; } - .uniForm button.secondaryAction { background: transparent; border: none; color: #777; margin: 1.25em 0 0 0; padding: 0; } - - .uniForm .inlineLabels label em, - .uniForm .inlineLabels .label em{ font-style: normal; font-weight: bold; } - .uniForm label small{ font-size: .75em; color: #777; } - - .uniForm .textInput, - .uniForm textarea { padding: 4px 2px; border: 1px solid #aaa; background: #fff; } - .uniForm textarea { height: 12em; } - .uniForm select {} - .uniForm .fileUpload {} - - .uniForm ul{} - .uniForm li{} - .uniForm ul li label{ font-size: .85em; } - - .uniForm .small {} - .uniForm .medium{} - .uniForm .large {} /* Large is default and should match the value you set for .textInput, textarea or select */ - .uniForm .auto {} - .uniForm .small, - .uniForm .medium, - .uniForm .auto{} - - /* Get rid of the 'glow' effect in WebKit, optional */ - .uniForm .ctrlHolder .textInput:focus, - .uniForm .ctrlHolder textarea:focus{ outline: none; } - - .uniForm .formHint { font-size: .85em; color: #777; } - .uniForm .inlineLabels .formHint { padding-top: .5em; } - .uniForm .ctrlHolder.focused .formHint{ color: #333; } - -/* ----------------------------------------------------------------------------- */ -/* ############################### Messages #################################### */ -/* ----------------------------------------------------------------------------- */ - - /* Error message at the top of the form */ - .uniForm #errorMsg{ background: #ffdfdf; border: 1px solid #f3afb5; margin: 0 0 1.5em 0; padding: 0 1.5em; - /* CSS3 */ - border-radius: 4px; - -webkit-border-radius: 4px; - -moz-border-radius: 4px; - -o-border-radius: 4px; - -khtml-border-radius: 4px; - } - .uniForm #errorMsg h3{} /* Feel free to use a heading level suitable to your page structure */ - .uniForm #errorMsg ol{ margin: 0 0 1.5em 0; padding: 0; } - .uniForm #errorMsg ol li{ margin: 0 0 3px 1.5em; padding: 7px; background: #f6bec1; position: relative; font-size: .85em; - /* CSS3 */ - border-radius: 4px; - -webkit-border-radius: 4px; - -moz-border-radius: 4px; - -o-border-radius: 4px; - -khtml-border-radius: 4px; - } - - .uniForm .ctrlHolder.error, - .uniForm .ctrlHolder.focused.error{ background: #ffdfdf; border: 1px solid #f3afb5; - /* CSS3 */ - border-radius: 4px; - -webkit-border-radius: 4px; - -moz-border-radius: 4px; - -o-border-radius: 4px; - -khtml-border-radius: 4px; - } - .uniForm .ctrlHolder.error input.error, - .uniForm .ctrlHolder.error select.error, - .uniForm .ctrlHolder.error textarea.error{ color: #af4c4c; margin: 0 0 6px 0; padding: 4px; } - - /* Success messages at the top of the form */ - .uniForm #okMsg{ background: #c8ffbf; border: 1px solid #a2ef95; margin: 0 0 1.5em 0; padding: 0 1.5em; text-align: center; - /* CSS3 */ - border-radius: 4px; - -webkit-border-radius: 4px; - -moz-border-radius: 4px; - -o-border-radius: 4px; - -khtml-border-radius: 4px; - } - .uniForm #OKMsg p{ margin: 0; } - -/* ----------------------------------------------------------------------------- */ -/* ############################### Columns ##################################### */ -/* ----------------------------------------------------------------------------- */ - - .uniForm .col{} - .uniForm .col.first{} - .uniForm .col.last{} - .uniForm .col{ margin-bottom: 1.5em; } - /* Use .first and .last classes to control the layout/spacing of your columns */ - .uniForm .col.first{ width: 49%; float: left; clear: none; } - .uniForm .col.last { width: 49%; float: right; clear: none; margin-right: 0; } \ No newline at end of file diff --git a/couchpotato/templates/_desktop.html b/couchpotato/templates/_desktop.html deleted file mode 100644 index 1d61806632..0000000000 --- a/couchpotato/templates/_desktop.html +++ /dev/null @@ -1,78 +0,0 @@ -<!doctype html> -<html> - <head> - {% for url in fireEvent('clientscript.get_styles', as_html = True, location = 'front', single = True) %} - <link rel="stylesheet" href="{{ url_for('web.index') }}{{ url }}" type="text/css">{% endfor %} - {% for url in fireEvent('clientscript.get_scripts', as_html = True, location = 'front', single = True) %} - <script type="text/javascript" src="{{ url_for('web.index') }}{{ url }}"></script>{% endfor %} - - {% for url in fireEvent('clientscript.get_scripts', as_html = True, location = 'head', single = True) %} - <script type="text/javascript" src="{{ url_for('web.index') }}{{ url }}"></script>{% endfor %} - {% for url in fireEvent('clientscript.get_styles', as_html = True, location = 'head', single = True) %} - <link rel="stylesheet" href="{{ url_for('web.index') }}{{ url }}" type="text/css">{% endfor %} - - <link href="{{ url_for('web.static', filename='images/favicon.ico') }}" rel="icon" type="image/x-icon" /> - <link rel="apple-touch-icon" href="{{ url_for('web.static', filename='images/homescreen.png') }}" /> - - <script type="text/javascript" src="https://www.youtube.com/player_api" defer="defer"></script> - - <script type="text/javascript"> - window.addEvent('domready', function() { - new Uniform(); - - Api.setup({ - 'url': {{ url_for('api.index')|tojson|safe }}, - 'path_sep': {{ sep|tojson|safe }}, - 'is_remote': false - }); - - $(document.body).set('data-api', window.location.protocol + '//' + window.location.host + Api.createUrl().replace('/default/', '/')); - - // Catch errors - window.onerror = function(message, file, line){ - - p(message, file, line); - - Api.request('logging.log', { - 'data': { - 'type': 'error', - 'message': Browser.name + ' ' + Browser.version + ': \n' + message, - 'page': window.location.href.replace(window.location.host, 'HOST'), - 'file': file.replace(window.location.host, 'HOST'), - 'line': line - } - }); - - return true; - } - - Quality.setup({ - 'profiles': {{ fireEvent('profile.all', single = True)|tojson|safe }}, - 'qualities': {{ fireEvent('quality.all', single = True)|tojson|safe }} - }); - - Status.setup({{ fireEvent('status.all', single = True)|tojson|safe }}); - - File.Type.setup({{ fireEvent('file.types', single = True)|tojson|safe }}); - - App.setup({ - 'base_url': {{ url_for('web.index')|tojson|safe }}, - 'args': {{ env.get('args')|tojson|safe }}, - 'options': {{ ('%s' % env.get('options'))|tojson|safe }}, - 'app_dir': {{ env.get('app_dir')|tojson|safe }}, - 'data_dir': {{ env.get('data_dir')|tojson|safe }}, - 'pid': {{ env.getPid()|tojson|safe }}, - 'userscript_version': {{ fireEvent('userscript.get_version', single = True)|tojson|safe }} - }); - }) - - {% if env.setting('show_wizard') %} - if(!window.location.href.contains('wizard')) - window.location = '{{ url_for('web.index') }}wizard/' - {% endif %} - - </script> - <title>CouchPotato - - - \ No newline at end of file diff --git a/couchpotato/templates/api.html b/couchpotato/templates/api.html index 45d20f1547..0780935fd7 100644 --- a/couchpotato/templates/api.html +++ b/couchpotato/templates/api.html @@ -1,31 +1,32 @@ +{% autoescape None %} - + - + API documentation

    CouchPotato API Documentation

    - You can access the API via
    {{ fireEvent('app.api_url', single = True)|safe }}/
    - To see it in action, have a look at the webinterface with Firebug (on firefox) or the development tools included in Chrome. + You can access the API via
    {{ Env.get('api_base') }}
    + To see it in action, have a look at the webinterface with Firebug (on firefox) or the development tools included in Chrome. All the data that you see there are from the API.

    A normal API call: -
    {{ fireEvent('app.api_url', single = True)|safe }}/updater.info/
    +
    {{ Env.get('api_base') }}updater.info/

    You can also use the API over another domain using JSONP, the callback function should be in 'callback_func' -
    {{ fireEvent('app.api_url', single = True)|safe }}/updater.info/?callback_func=myfunction
    +
    {{ Env.get('api_base') }}updater.info/?callback_func=myfunction


    Get the API key: -
    {{ url_for('web.index') }}getkey/?p=md5(password)&u=md5(username)
    +
    {{ Env.get('web_base') }}getkey/?p=md5(password)&u=md5(username)
    Will return {"api_key": "XXXXXXXXXX", "success": true}. When username or password is empty you don't need to md5 it.
    - + {% for route in routes %} {% if api_docs.get(route) %}
    @@ -41,9 +42,9 @@

    Params

  • {{ api_docs[route]['params'][param].get('type', 'string') }} {{ api_docs[route]['params'][param]['desc'] }}
    - {% endif %} + {% end %} {% if api_docs[route].get('return') %}

    Return

    @@ -52,14 +53,14 @@

    Return

    {% if api_docs[route]['return'].get('example') %}

    Example

    -
    {{ api_docs[route]['return'].get('example', '')|safe }}
    +
    {{ api_docs[route]['return'].get('example', '') }}
    - {% endif %} + {% end %} - {% endif %} + {% end %} - {% endif %} - {% endfor %} + {% end %} + {% end %}

    Missing documentation

    @@ -67,4 +68,4 @@

    Missing documentation

    - \ No newline at end of file + diff --git a/couchpotato/templates/database.html b/couchpotato/templates/database.html new file mode 100644 index 0000000000..3643f9cd73 --- /dev/null +++ b/couchpotato/templates/database.html @@ -0,0 +1,144 @@ +{% autoescape None %} + + + + + + + + + + Document Manager + + + +

    Documents

    +
    + + + diff --git a/couchpotato/templates/index.html b/couchpotato/templates/index.html index 217d6bfc60..4020773b6f 100644 --- a/couchpotato/templates/index.html +++ b/couchpotato/templates/index.html @@ -1 +1,111 @@ -{% extends "_desktop.html" %} +{% autoescape None %} + + + + CouchPotato + + + + + + + {% set icon_path = Env.get('static_path') + 'images/icons/' %} + {% set themed_icon_path = icon_path + ('dark/' if Env.setting('dark_theme') else '') %} + + + + + + + + + + + + + + + + + + + + {% for url in fireEvent('clientscript.get_scripts', single = True) %} + {% end %} + + {% if Env.get('dev') %} + + {% end %} + + + + {% for url in fireEvent('clientscript.get_styles', single = True) %} + {% end %} + + + diff --git a/couchpotato/templates/login.html b/couchpotato/templates/login.html new file mode 100644 index 0000000000..63c7b59178 --- /dev/null +++ b/couchpotato/templates/login.html @@ -0,0 +1,36 @@ +{% autoescape None %} + + + + + + + + + + {% for url in fireEvent('clientscript.get_styles', single = True) %} + {% end %} + {% for url in fireEvent('clientscript.get_scripts', single = True) %}{% if 'combined.plugins' not in url %} + {% end %}{% end %} + + + + + {% if Env.get('dev') %} + + {% end %} + + CouchPotato + + +
    +

    CouchPotato

    +
    +
    +
    + + +
    +
    + + diff --git a/init/couchpotato.fedora.service b/init/couchpotato.fedora.service deleted file mode 100644 index 7df166bc23..0000000000 --- a/init/couchpotato.fedora.service +++ /dev/null @@ -1,12 +0,0 @@ -[Unit] -Description=CouchPotato application instance - -[Service] -ExecStart=/usr/lib/CouchPotatoServer/CouchPotato.py --daemon -GuessMainPID=no -Type=forking -User=couchpotato -Group=couchpotato - -[Install] -WantedBy=multi-user.target \ No newline at end of file diff --git a/init/couchpotato.fedora.service b/init/couchpotato.fedora.service new file mode 120000 index 0000000000..c8d29d3184 --- /dev/null +++ b/init/couchpotato.fedora.service @@ -0,0 +1 @@ +couchpotato.service \ No newline at end of file diff --git a/init/couchpotato.service b/init/couchpotato.service new file mode 100644 index 0000000000..c9a0c47f68 --- /dev/null +++ b/init/couchpotato.service @@ -0,0 +1,12 @@ +[Unit] +Description=CouchPotato application instance +After=network.target + +[Service] +ExecStart=/var/lib/CouchPotatoServer/CouchPotato.py +Type=simple +User=couchpotato +Group=couchpotato + +[Install] +WantedBy=multi-user.target diff --git a/init/fedora b/init/fedora index 4735247188..ec8a9ccdb1 100644 --- a/init/fedora +++ b/init/fedora @@ -1,3 +1,5 @@ +#!/bin/sh +# ### BEGIN INIT INFO # Provides: CouchPotato application instance # Required-Start: $all diff --git a/init/freebsd b/init/freebsd index d389933245..bf67b48a27 100644 --- a/init/freebsd +++ b/init/freebsd @@ -1,7 +1,7 @@ #!/bin/sh # # PROVIDE: couchpotato -# REQUIRE: DAEMON +# REQUIRE: LOGIN # KEYWORD: shutdown # Add the following lines to /etc/rc.conf to enable couchpotato: diff --git a/init/synology b/init/synology index 859582bd18..c437afad50 100644 --- a/init/synology +++ b/init/synology @@ -15,10 +15,13 @@ CFG_FILE="${INSTALL_DIR}/var/settings.conf" PID_FILE="${INSTALL_DIR}/var/couchpotato.pid" LOG_FILE="${INSTALL_DIR}/var/logs/CouchPotato.log" - -start_daemon() +start_daemon () { -su ${RUNAS} -c "PATH=${PATH} ${PYTHON} ${COUCHPOTATO} --daemon --pid_file ${PID_FILE} --config ${CFG_FILE}" + if [ `/bin/get_key_value /etc.defaults/VERSION majorversion` -ge 6 ]; then + sudo -u ${RUNAS} PATH=${PATH} ${PYTHON} ${COUCHPOTATO} --daemon --pid_file ${PID_FILE} --config ${CFG_FILE} + else + su ${RUNAS} -c "PATH=${PATH} ${PYTHON} ${COUCHPOTATO} --daemon --pid_file ${PID_FILE} --config ${CFG_FILE}" + fi } stop_daemon() @@ -31,44 +34,45 @@ stop_daemon() daemon_status() { if [ -f ${PID_FILE} ] && [ -d /proc/`cat ${PID_FILE}` ]; then -return 0 + return 0 fi -return 1 + return 1 } wait_for_status() { counter=$2 while [ ${counter} -gt 0 ]; do -daemon_status + daemon_status [ $? -eq $1 ] && break -let counter=counter-1 + let counter=counter-1 sleep 1 done } + case $1 in start) if daemon_status; then -echo ${DNAME} is already running + echo ${DNAME} is already running else -echo Starting ${DNAME} ... + echo Starting ${DNAME} ... start_daemon fi ;; stop) if daemon_status; then -echo Stopping ${DNAME} ... + echo Stopping ${DNAME} ... stop_daemon else -echo ${DNAME} is not running + echo ${DNAME} is not running fi ;; status) if daemon_status; then -echo ${DNAME} is running + echo ${DNAME} is running exit 0 else -echo ${DNAME} is not running + echo ${DNAME} is not running exit 1 fi ;; @@ -78,4 +82,4 @@ echo ${DNAME} is not running *) exit 1 ;; -esac \ No newline at end of file +esac diff --git a/init/ubuntu b/init/ubuntu old mode 100644 new mode 100755 index 376c001f0c..a1b5986cce --- a/init/ubuntu +++ b/init/ubuntu @@ -1,73 +1,107 @@ -#! /bin/sh +#!/bin/sh +# +# DON'T EDIT THIS FILE DIRECTLY! +# +# Instead, create your own configuration by setting any of the 7 variables +# listed below in /etc/default/couchpotato. For example: adding CP_USER=noob +# to /etc/default/couchpotato makes the service run under the 'noob' account, +# overruling the default value of 'couchpotato'. +# +# Accepted variables with default values -if any- in parentheses: +# CP_USER # username to run couchpotato under (couchpotato) +# CP_HOME # directory of CouchPotato.py (/opt/couchpotato) +# CP_DATA # directory of couchpotato's db, cache and logs (/var/opt/couchpotato) +# CP_PIDFILE # full path of couchpotato.pid (/var/run/couchpotato/couchpotato.pid) +# PYTHON_BIN # full path of the python binary (/usr/bin/python) +# CP_OPTS # extra cli options for couchpotato, see 'CouchPotato.py --help' +# SSD_OPTS # extra options for start-stop-daemon, see 'man start-stop-daemon' ### BEGIN INIT INFO # Provides: couchpotato -# Required-Start: $local_fs $network $remote_fs -# Required-Stop: $local_fs $network $remote_fs -# Should-Start: $NetworkManager -# Should-Stop: $NetworkManager +# Required-Start: $network $remote_fs +# Required-Stop: $network $remote_fs +# Should-Start: $named deluged network-manager nzbget qbittorrent-nox sabnzbdplus transmission-daemon +# Should-Stop: $named deluged network-manager nzbget qbittorrent-nox sabnzbdplus transmission-daemon # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 -# Short-Description: starts instance of CouchPotato +# Short-Description: CouchPotato PVR for Usenet and torrents # Description: starts instance of CouchPotato using start-stop-daemon ### END INIT INFO -# Check for existance of defaults file -# and utilze if available -if [ -f /etc/default/couchpotato ]; then - . /etc/default/couchpotato -else - echo "/etc/default/couchpotato not found using default settings."; -fi +DESC=CouchPotato +CONFIG=/etc/default/couchpotato -# Script name -NAME=couchpotato +# don't accept config vars from the shell environment +unset CP_USER CP_HOME CP_DATA CP_PIDFILE PYTHON_BIN CP_OPTS SSD_OPTS -# App name -DESC=CouchPotato +# source lsb init functions +. /lib/lsb/init-functions -# Path to app root -CP_APP_PATH=${APP_PATH-/usr/local/sbin/CouchPotatoServer/} +# try loading the configuration file +[ -r "$CONFIG" ] && . "$CONFIG" \ + || log_action_msg "$DESC: $CONFIG unreadable, falling back to default settings" -# User to run CP as -CP_RUN_AS=${RUN_AS-root} +# assorted settings and their defaults +: "${CP_USER:=couchpotato}" +: "${CP_HOME:=/opt/couchpotato}" +: "${CP_DATA:=/var/opt/couchpotato}" +: "${CP_PIDFILE:=/var/run/couchpotato/couchpotato.pid}" +: "${PYTHON_BIN:=/usr/bin/python}" -# Path to python bin -CP_DAEMON=${DAEMON_PATH-/usr/bin/python} +# basic sanity checks +([ -x "$PYTHON_BIN" ] && [ -f "$CP_HOME/CouchPotato.py" ]) || { + log_failure_msg "$DESC: init script setup failed basic sanity checks, aborting!"; + # exit zero since this condition may also occur after a user + # uninstalled cp while leaving this script in place. + exit 0; +} -# Path to store PID file -CP_PID_FILE=${PID_FILE-/var/run/couchpotato.pid} +start_cp() { + # create directories with sensible ownership and permissions + # (but refuse to touch any pre-existing ones) + for D in "$(dirname "$CP_PIDFILE")" "$CP_DATA"; do + [ ! -d "$D" ] && { + install --directory --owner="$CP_USER" --group=root --mode=0750 "$D" || exit 1; + } + done -# Other startup args -CP_DAEMON_OPTS=" CouchPotato.py --daemon --pid_file=${CP_PID_FILE}" +# # for backwards compatibility create an empty pidfile so it +# # can be in any pre-existing directory, even those unwritable +# # for the $CP_USER. PEBCAK? +# [ ! -e "$CP_PIDFILE" ] && { +# touch "$CP_PIDFILE" && \ +# chmod 0600 "$CP_PIDFILE" && \ +# chown "$CP_USER" "$CP_PIDFILE" \ +# || exit 1; +# } -test -x $CP_DAEMON || exit 0 + log_daemon_msg "Starting $DESC" + start-stop-daemon --start --quiet --pidfile "$CP_PIDFILE" --chdir "$CP_HOME" --chuid "$CP_USER" --oknodo --exec "$PYTHON_BIN" $SSD_OPTS -- \ + CouchPotato.py --daemon --quiet --pid_file="$CP_PIDFILE" --data_dir="$CP_DATA" $CP_OPTS + log_end_msg $? || exit $? +} -set -e +stop_cp() { + log_daemon_msg "Stopping $DESC" + # for security reasons, require the process to be both: + # 1) listed in the pidfile and 2) running as $CP_USER + start-stop-daemon --stop --quiet --pidfile "$CP_PIDFILE" --user "$CP_USER" --retry 15 --oknodo + log_end_msg $? || exit $? +} case "$1" in - start) - echo "Starting $DESC" - rm -rf $CP_PID_FILE || return 1 - touch $CP_PID_FILE - chown $CP_RUN_AS $CP_PID_FILE - start-stop-daemon -d $CP_APP_PATH -c $CP_RUN_AS --start --background --pidfile $CP_PID_FILE --exec $CP_DAEMON -- $CP_DAEMON_OPTS - ;; - stop) - echo "Stopping $DESC" - start-stop-daemon --stop --pidfile $CP_PID_FILE --retry 15 - ;; - - restart|force-reload) - echo "Restarting $DESC" - start-stop-daemon --stop --pidfile $CP_PID_FILE --retry 15 - start-stop-daemon -d $CP_APP_PATH -c $CP_RUN_AS --start --background --pidfile $CP_PID_FILE --exec $CP_DAEMON -- $CP_DAEMON_OPTS - ;; - *) - N=/etc/init.d/$NAME - echo "Usage: $N {start|stop|restart|force-reload}" >&2 - exit 1 - ;; + start) + start_cp;; + stop) + stop_cp;; + restart|force-reload) + stop_cp && start_cp;; + status) + status_of_proc -p "$CP_PIDFILE" "$PYTHON_BIN" "$DESC" + exit $?;; + *) + echo "Usage: $0 {start|stop|restart|force-reload|status}" >&2 + exit 3;; esac exit 0 diff --git a/init/ubuntu.default b/init/ubuntu.default index 0d1e712859..d4b8fcbd11 100644 --- a/init/ubuntu.default +++ b/init/ubuntu.default @@ -1,5 +1,19 @@ -# COPY THIS FILE TO /etc/default/couchpotato -# OPTIONS: APP_PATH, RUN_AS, DAEMON_PATH, CP_PID_FILE +# COPY THIS FILE TO /etc/default/couchpotato +# Accepted variables with default values -if any- in parentheses: -APP_PATH= -RUN_AS=root \ No newline at end of file +# username to run couchpotato under (couchpotato) +CP_USER=couchpotato +# directory of CouchPotato.py (/opt/couchpotato) +CP_HOME= + +# directory of couchpotato's db, cache and logs (/var/opt/couchpotato) +CP_DATA= +# full path of couchpotato.pid (/var/run/couchpotato/couchpotato.pid) +CP_PIDFILE= +# full path of the python binary (/usr/bin/python) +PYTHON_BIN= + +# extra cli options for couchpotato, see 'CouchPotato.py --help' +CP_OPTS= +# extra options for start-stop-daemon, see 'man start-stop-daemon' +SSD_OPTS= diff --git a/libs/CodernityDB/__init__.py b/libs/CodernityDB/__init__.py new file mode 100644 index 0000000000..c059538f48 --- /dev/null +++ b/libs/CodernityDB/__init__.py @@ -0,0 +1,20 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright 2011-2013 Codernity (http://codernity.com) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +__version__ = '0.5.0' +__license__ = "Apache 2.0" diff --git a/libs/CodernityDB/database.py b/libs/CodernityDB/database.py new file mode 100644 index 0000000000..7aa177a82f --- /dev/null +++ b/libs/CodernityDB/database.py @@ -0,0 +1,1214 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright 2011-2013 Codernity (http://codernity.com) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import io +from inspect import getsource + +# for custom indexes +from CodernityDB.storage import Storage, IU_Storage +from CodernityDB.hash_index import (IU_UniqueHashIndex, + IU_HashIndex, + HashIndex, + UniqueHashIndex) +# normal imports + +from CodernityDB.index import (ElemNotFound, + DocIdNotFound, + IndexException, + Index, + TryReindexException, + ReindexException, + IndexNotFoundException, + IndexConflict) + +from CodernityDB.misc import NONE + +from CodernityDB.env import cdb_environment + +from random import randrange + +import warnings + + +def header_for_indexes(index_name, index_class, db_custom="", ind_custom="", classes_code=""): + return """# %s +# %s + +# inserted automatically +import os +import marshal + +import struct +import shutil + +from hashlib import md5 + +# custom db code start +# db_custom +%s + +# custom index code start +# ind_custom +%s + +# source of classes in index.classes_code +# classes_code +%s + +# index code start + +""" % (index_name, index_class, db_custom, ind_custom, classes_code) + + +class DatabaseException(Exception): + pass + + +class PreconditionsException(DatabaseException): + pass + + +class RecordDeleted(DatabaseException): + pass + + +class RecordNotFound(DatabaseException): + pass + + +class RevConflict(DatabaseException): + pass + + +class DatabaseConflict(DatabaseException): + pass + + +class DatabasePathException(DatabaseException): + pass + + +class DatabaseIsNotOpened(PreconditionsException): + pass + + +class Database(object): + """ + A default single thread database object. + """ + + custom_header = "" # : use it for imports required by your database + + def __init__(self, path): + self.path = path + self.storage = None + self.indexes = [] + self.id_ind = None + self.indexes_names = {} + self.opened = False + + def create_new_rev(self, old_rev=None): + """ + Creates new revision number based on previous one. + Increments it + random bytes. On overflow starts from 0 again. + """ + if old_rev: + try: + rev_num = int(old_rev[:4], 16) + except: + raise RevConflict() + rev_num += 1 + if rev_num > 65025: + # starting the counter from 0 again + rev_num = 0 + rnd = randrange(65536) + return "%04x%04x" % (rev_num, rnd) + else: + # new rev + rnd = randrange(256 ** 2) + return '0001%04x' % rnd + + def __not_opened(self): + if not self.opened: + raise DatabaseIsNotOpened("Database is not opened") + + def set_indexes(self, indexes=[]): + """ + Set indexes using ``indexes`` param + + :param indexes: indexes to set in db + :type indexes: iterable of :py:class:`CodernityDB.index.Index` objects. + + """ + for ind in indexes: + self.add_index(ind, create=False) + + def _add_single_index(self, p, i, index): + """ + Adds single index to a database. + It will use :py:meth:`inspect.getsource` to get class source. + Then it will build real index file, save it in ``_indexes`` directory. + """ + code = getsource(index.__class__) + if not code.startswith('c'): # fix for indented index codes + import textwrap + code = textwrap.dedent(code) + index._order = i + cls_code = getattr(index, 'classes_code', []) + classes_code = "" + for curr in cls_code: + classes_code += getsource(curr) + '\n\n' + with io.FileIO(os.path.join(p, "%.2d%s" % (i, index.name) + '.py'), 'w') as f: + f.write(header_for_indexes(index.name, + index.__class__.__name__, + getattr(self, 'custom_header', ''), + getattr(index, 'custom_header', ''), + classes_code)) + f.write(code) + return True + + def _read_index_single(self, p, ind, ind_kwargs={}): + """ + It will read single index from index file (ie. generated in :py:meth:`._add_single_index`). + Then it will perform ``exec`` on that code + + If error will occur the index file will be saved with ``_broken`` suffix + + :param p: path + :param ind: index name (will be joined with *p*) + :returns: new index object + """ + with io.FileIO(os.path.join(p, ind), 'r') as f: + name = f.readline()[2:].strip() + _class = f.readline()[2:].strip() + code = f.read() + try: + obj = compile(code, '', f.__name__, repr(args[1:]) + res = f(*args, **kwargs) +# if db.opened: +# db.flush() +# print '<=', f.__name__, repr(args[1:]) + return res + return _inner + + def __new__(cls, classname, bases, attr): + new_attr = {} + for base in bases: + for b_attr in dir(base): + a = getattr(base, b_attr, None) + if isinstance(a, MethodType) and not b_attr.startswith('_'): + if b_attr == 'flush' or b_attr == 'flush_indexes': + pass + else: + # setattr(base, b_attr, SuperLock.wrapper(a)) + new_attr[b_attr] = SuperLock.wrapper(a) + for attr_name, attr_value in attr.iteritems(): + if isinstance(attr_value, FunctionType) and not attr_name.startswith('_'): + attr_value = SuperLock.wrapper(attr_value) + new_attr[attr_name] = attr_value + new_attr['super_lock'] = RLock() + return type.__new__(cls, classname, bases, new_attr) + + +class SuperThreadSafeDatabase(Database): + """ + Thread safe version that always allows single thread to use db. + It adds the same lock for all methods, so only one operation can be + performed in given time. Completely different implementation + than ThreadSafe version (without super word) + """ + + __metaclass__ = SuperLock + + def __init__(self, *args, **kwargs): + super(SuperThreadSafeDatabase, self).__init__(*args, **kwargs) + + def __patch_index_gens(self, name): + ind = self.indexes_names[name] + for c in ('all', 'get_many'): + m = getattr(ind, c) + if getattr(ind, c + "_orig", None): + return + m_fixed = th_safe_gen.wrapper(m, name, c, self.super_lock) + setattr(ind, c, m_fixed) + setattr(ind, c + '_orig', m) + + def open(self, *args, **kwargs): + res = super(SuperThreadSafeDatabase, self).open(*args, **kwargs) + for name in self.indexes_names.iterkeys(): + self.__patch_index_gens(name) + return res + + def create(self, *args, **kwargs): + res = super(SuperThreadSafeDatabase, self).create(*args, **kwargs) + for name in self.indexes_names.iterkeys(): + self.__patch_index_gens(name) + return res + + def add_index(self, *args, **kwargs): + res = super(SuperThreadSafeDatabase, self).add_index(*args, **kwargs) + self.__patch_index_gens(res) + return res + + def edit_index(self, *args, **kwargs): + res = super(SuperThreadSafeDatabase, self).edit_index(*args, **kwargs) + self.__patch_index_gens(res) + return res diff --git a/libs/CodernityDB/database_thread_safe.py b/libs/CodernityDB/database_thread_safe.py new file mode 100644 index 0000000000..5349e09b32 --- /dev/null +++ b/libs/CodernityDB/database_thread_safe.py @@ -0,0 +1,35 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright 2011-2013 Codernity (http://codernity.com) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from threading import RLock + +from CodernityDB.env import cdb_environment + +cdb_environment['mode'] = "threads" +cdb_environment['rlock_obj'] = RLock + + +from database_safe_shared import SafeDatabase + + +class ThreadSafeDatabase(SafeDatabase): + """ + Thread safe version of CodernityDB that uses several lock objects, + on different methods / different indexes etc. It's completely different + implementation of locking than SuperThreadSafe one. + """ + pass diff --git a/libs/CodernityDB/debug_stuff.py b/libs/CodernityDB/debug_stuff.py new file mode 100644 index 0000000000..2dce69513f --- /dev/null +++ b/libs/CodernityDB/debug_stuff.py @@ -0,0 +1,211 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright 2011-2013 Codernity (http://codernity.com) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from CodernityDB.tree_index import TreeBasedIndex +import struct +import os + +import inspect +from functools import wraps +import json + + +class DebugTreeBasedIndex(TreeBasedIndex): + + def __init__(self, *args, **kwargs): + super(DebugTreeBasedIndex, self).__init__(*args, **kwargs) + + def print_tree(self): + print '-----CURRENT TREE-----' + print self.root_flag + + if self.root_flag == 'l': + print '---ROOT---' + self._print_leaf_data(self.data_start) + return + else: + print '---ROOT---' + self._print_node_data(self.data_start) + nr_of_el, children_flag = self._read_node_nr_of_elements_and_children_flag( + self.data_start) + nodes = [] + for index in range(nr_of_el): + l_pointer, key, r_pointer = self._read_single_node_key( + self.data_start, index) + nodes.append(l_pointer) + nodes.append(r_pointer) + print 'ROOT NODES', nodes + while children_flag == 'n': + self._print_level(nodes, 'n') + new_nodes = [] + for node in nodes: + nr_of_el, children_flag = \ + self._read_node_nr_of_elements_and_children_flag(node) + for index in range(nr_of_el): + l_pointer, key, r_pointer = self._read_single_node_key( + node, index) + new_nodes.append(l_pointer) + new_nodes.append(r_pointer) + nodes = new_nodes + self._print_level(nodes, 'l') + + def _print_level(self, nodes, flag): + print '---NEXT LVL---' + if flag == 'n': + for node in nodes: + self._print_node_data(node) + elif flag == 'l': + for node in nodes: + self._print_leaf_data(node) + + def _print_leaf_data(self, leaf_start_position): + print 'printing data of leaf at', leaf_start_position + nr_of_elements = self._read_leaf_nr_of_elements(leaf_start_position) + self.buckets.seek(leaf_start_position) + data = self.buckets.read(self.leaf_heading_size + + nr_of_elements * self.single_leaf_record_size) + leaf = struct.unpack('<' + self.leaf_heading_format + + nr_of_elements * self.single_leaf_record_format, data) + print leaf + print + + def _print_node_data(self, node_start_position): + print 'printing data of node at', node_start_position + nr_of_elements = self._read_node_nr_of_elements_and_children_flag( + node_start_position)[0] + self.buckets.seek(node_start_position) + data = self.buckets.read(self.node_heading_size + self.pointer_size + + nr_of_elements * (self.key_size + self.pointer_size)) + node = struct.unpack('<' + self.node_heading_format + self.pointer_format + + nr_of_elements * ( + self.key_format + self.pointer_format), + data) + print node + print +# ------------------> + + +def database_step_by_step(db_obj, path=None): + + if not path: + # ugly for multiplatform support.... + p = db_obj.path + p1 = os.path.split(p) + p2 = os.path.split(p1[0]) + p3 = '_'.join([p2[1], 'operation_logger.log']) + path = os.path.join(os.path.split(p2[0])[0], p3) + f_obj = open(path, 'wb') + + __stack = [] # inspect.stack() is not working on pytest etc + + def remove_from_stack(name): + for i in range(len(__stack)): + if __stack[-i] == name: + __stack.pop(-i) + + def __dumper(f): + @wraps(f) + def __inner(*args, **kwargs): + funct_name = f.__name__ + if funct_name == 'count': + name = args[0].__name__ + meth_args = (name,) + args[1:] + elif funct_name in ('reindex_index', 'compact_index'): + name = args[0].name + meth_args = (name,) + args[1:] + else: + meth_args = args + kwargs_copy = kwargs.copy() + res = None + __stack.append(funct_name) + if funct_name == 'insert': + try: + res = f(*args, **kwargs) + except: + packed = json.dumps((funct_name, + meth_args, kwargs_copy, None)) + f_obj.write('%s\n' % packed) + f_obj.flush() + raise + else: + packed = json.dumps((funct_name, + meth_args, kwargs_copy, res)) + f_obj.write('%s\n' % packed) + f_obj.flush() + else: + if funct_name == 'get': + for curr in __stack: + if ('delete' in curr or 'update' in curr) and not curr.startswith('test'): + remove_from_stack(funct_name) + return f(*args, **kwargs) + packed = json.dumps((funct_name, meth_args, kwargs_copy)) + f_obj.write('%s\n' % packed) + f_obj.flush() + res = f(*args, **kwargs) + remove_from_stack(funct_name) + return res + return __inner + + for meth_name, meth_f in inspect.getmembers(db_obj, predicate=inspect.ismethod): + if not meth_name.startswith('_'): + setattr(db_obj, meth_name, __dumper(meth_f)) + + setattr(db_obj, 'operation_logger', f_obj) + + +def database_from_steps(db_obj, path): + # db_obj.insert=lambda data : insert_for_debug(db_obj, data) + with open(path, 'rb') as f_obj: + for current in f_obj: + line = json.loads(current[:-1]) + if line[0] == 'count': + obj = getattr(db_obj, line[1][0]) + line[1] = [obj] + line[1][1:] + name = line[0] + if name == 'insert': + try: + line[1][0].pop('_rev') + except: + pass + elif name in ('delete', 'update'): + el = db_obj.get('id', line[1][0]['_id']) + line[1][0]['_rev'] = el['_rev'] +# print 'FROM STEPS doing', line + meth = getattr(db_obj, line[0], None) + if not meth: + raise Exception("Method = `%s` not found" % line[0]) + + meth(*line[1], **line[2]) + + +# def insert_for_debug(self, data): +# +# _rev = data['_rev'] +# +# if not '_id' in data: +# _id = uuid4().hex +# else: +# _id = data['_id'] +# data['_id'] = _id +# try: +# _id = bytes(_id) +# except: +# raise DatabaseException("`_id` must be valid bytes object") +# self._insert_indexes(_id, _rev, data) +# ret = {'_id': _id, '_rev': _rev} +# data.update(ret) +# return ret diff --git a/libs/CodernityDB/env.py b/libs/CodernityDB/env.py new file mode 100644 index 0000000000..69ca8cddc3 --- /dev/null +++ b/libs/CodernityDB/env.py @@ -0,0 +1,25 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright 2011-2013 Codernity (http://codernity.com) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +It's CodernityDB environment. +Handles internal informations.' +""" + +cdb_environment = { + 'mode': 'normal' +} diff --git a/libs/CodernityDB/hash_index.py b/libs/CodernityDB/hash_index.py new file mode 100644 index 0000000000..cd160fd0a5 --- /dev/null +++ b/libs/CodernityDB/hash_index.py @@ -0,0 +1,880 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright 2011-2013 Codernity (http://codernity.com) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from CodernityDB.index import (Index, + IndexException, + DocIdNotFound, + ElemNotFound, + TryReindexException, + IndexPreconditionsException) + +import os +import marshal +import io +import struct +import shutil + +from CodernityDB.storage import IU_Storage, DummyStorage + +from CodernityDB.env import cdb_environment + +if cdb_environment.get('rlock_obj'): + from CodernityDB import patch + patch.patch_cache_rr(cdb_environment['rlock_obj']) + +from CodernityDB.rr_cache import cache1lvl + + +from CodernityDB.misc import random_hex_32 + +try: + from CodernityDB import __version__ +except ImportError: + from __init__ import __version__ + + +class IU_HashIndex(Index): + """ + That class is for Internal Use only, if you want to use HashIndex just subclass the :py:class:`HashIndex` instead this one. + + That design is because main index logic should be always in database not in custom user indexes. + """ + + def __init__(self, db_path, name, entry_line_format='<32s{key}IIcI', hash_lim=0xfffff, storage_class=None, key_format='c'): + """ + The index is capable to solve conflicts by `Separate chaining` + :param db_path: database path + :type db_path: string + :param name: index name + :type name: ascii string + :param line_format: line format, `key_format` parameter value will replace `{key}` if present. + :type line_format: string (32s{key}IIcI by default) {doc_id}{hash_key}{start}{size}{status}{next} + :param hash_lim: maximum hash functon results (remember about birthday problem) count from 0 + :type hash_lim: integer + :param storage_class: Storage class by default it will open standard :py:class:`CodernityDB.storage.Storage` (if string has to be accesible by globals()[storage_class]) + :type storage_class: class name which will be instance of CodernityDB.storage.Storage instance or None + :param key_format: a index key format + """ + if key_format and '{key}' in entry_line_format: + entry_line_format = entry_line_format.replace('{key}', key_format) + super(IU_HashIndex, self).__init__(db_path, name) + self.hash_lim = hash_lim + if not storage_class: + storage_class = IU_Storage + if storage_class and not isinstance(storage_class, basestring): + storage_class = storage_class.__name__ + self.storage_class = storage_class + self.storage = None + + self.bucket_line_format = "= self.data_start: + self.buckets.seek(pos_prev) + data = self.buckets.read(self.entry_line_size) + if data: + doc_id, l_key, start, size, status, _next = self.entry_struct.unpack(data) + self.buckets.seek(pos_prev) + self.buckets.write(self.entry_struct.pack(doc_id, + l_key, + start, + size, + status, + pos_next)) + self.flush() + if pos_next: + self.buckets.seek(pos_next) + data = self.buckets.read(self.entry_line_size) + if data: + doc_id, l_key, start, size, status, _next = self.entry_struct.unpack(data) + self.buckets.seek(pos_next) + self.buckets.write(self.entry_struct.pack(doc_id, + l_key, + start, + size, + status, + _next)) + self.flush() + return + + def delete(self, doc_id, key, start=0, size=0): + start_position = self._calculate_position(key) + self.buckets.seek(start_position) + curr_data = self.buckets.read(self.bucket_line_size) + if curr_data: + location = self.bucket_struct.unpack(curr_data)[0] + else: + # case happens when trying to delete element with new index key in data + # after adding new index to database without reindex + raise TryReindexException() + found_at, _doc_id, _key, start, size, status, _next = self._locate_doc_id(doc_id, key, location) + self.buckets.seek(found_at) + self.buckets.write(self.entry_struct.pack(doc_id, + key, + start, + size, + 'd', + _next)) + self.flush() + # self._fix_link(_key, _prev, _next) + self._find_key.delete(key) + self._locate_doc_id.delete(doc_id) + return True + + def compact(self, hash_lim=None): + + if not hash_lim: + hash_lim = self.hash_lim + + compact_ind = self.__class__( + self.db_path, self.name + '_compact', hash_lim=hash_lim) + compact_ind.create_index() + + gen = self.all() + while True: + try: + doc_id, key, start, size, status = gen.next() + except StopIteration: + break + self.storage._f.seek(start) + value = self.storage._f.read(size) + start_ = compact_ind.storage._f.tell() + compact_ind.storage._f.write(value) + compact_ind.insert(doc_id, key, start_, size, status) + + compact_ind.close_index() + original_name = self.name + # os.unlink(os.path.join(self.db_path, self.name + "_buck")) + self.close_index() + shutil.move(os.path.join(compact_ind.db_path, compact_ind. + name + "_buck"), os.path.join(self.db_path, self.name + "_buck")) + shutil.move(os.path.join(compact_ind.db_path, compact_ind. + name + "_stor"), os.path.join(self.db_path, self.name + "_stor")) + # self.name = original_name + self.open_index() # reload... + self.name = original_name + self._save_params(dict(name=original_name)) + self._fix_params() + self._clear_cache() + return True + + def make_key(self, key): + return key + + def make_key_value(self, data): + return '1', data + + def _clear_cache(self): + self._find_key.clear() + self._locate_doc_id.clear() + + def close_index(self): + super(IU_HashIndex, self).close_index() + self._clear_cache() + + +class IU_UniqueHashIndex(IU_HashIndex): + """ + Index for *unique* keys! Designed to be a **id** index. + + That class is for Internal Use only, if you want to use UniqueHashIndex just subclass the :py:class:`UniqueHashIndex` instead this one. + + That design is because main index logic should be always in database not in custom user indexes. + """ + + def __init__(self, db_path, name, entry_line_format="<32s8sIIcI", *args, **kwargs): + if 'key' in kwargs: + raise IndexPreconditionsException( + "UniqueHashIndex doesn't accept key parameter'") + super(IU_UniqueHashIndex, self).__init__(db_path, name, + entry_line_format, *args, **kwargs) + self.create_key = random_hex_32 # : set the function to create random key when no _id given + # self.entry_struct=struct.Struct(entry_line_format) + +# @lfu_cache(100) + def _find_key(self, key): + """ + Find the key position + + :param key: the key to find + """ + start_position = self._calculate_position(key) + self.buckets.seek(start_position) + curr_data = self.buckets.read(self.bucket_line_size) + if curr_data: + location = self.bucket_struct.unpack(curr_data)[0] + found_at, l_key, rev, start, size, status, _next = self._locate_key( + key, location) + return l_key, rev, start, size, status + else: + return None, None, 0, 0, 'u' + + def _find_key_many(self, *args, **kwargs): + raise NotImplementedError() + + def _find_place(self, start, key): + """ + Find a place to where put the key. It will iterate using `next` field in record, until + empty `next` found + + :param start: position to start from + """ + location = start + while True: + self.buckets.seek(location) + data = self.buckets.read(self.entry_line_size) + # todo, maybe partial read there... + l_key, rev, start, size, status, _next = self.entry_struct.unpack( + data) + if l_key == key: + raise IndexException("The '%s' key already exists" % key) + if not _next or status == 'd': + return self.buckets.tell() - self.entry_line_size, l_key, rev, start, size, status, _next + else: + location = _next # go to next record + + # @lfu_cache(100) + def _locate_key(self, key, start): + """ + Locate position of the key, it will iterate using `next` field in record + until required key will be find. + + :param key: the key to locate + :param start: position to start from + """ + location = start + while True: + self.buckets.seek(location) + data = self.buckets.read(self.entry_line_size) + # todo, maybe partial read there... + try: + l_key, rev, start, size, status, _next = self.entry_struct.unpack(data) + except struct.error: + raise ElemNotFound("Location '%s' not found" % key) + if l_key == key: + break + else: + if not _next: + # not found + raise ElemNotFound("Location '%s' not found" % key) + else: + location = _next # go to next record + return self.buckets.tell() - self.entry_line_size, l_key, rev, start, size, status, _next + + def update(self, key, rev, u_start=0, u_size=0, u_status='o'): + start_position = self._calculate_position(key) + self.buckets.seek(start_position) + curr_data = self.buckets.read(self.bucket_line_size) + # test if it's unique or not really unique hash + + if curr_data: + location = self.bucket_struct.unpack(curr_data)[0] + else: + raise ElemNotFound("Location '%s' not found" % key) + found_at, _key, _rev, start, size, status, _next = self._locate_key( + key, location) + if u_start == 0: + u_start = start + if u_size == 0: + u_size = size + self.buckets.seek(found_at) + self.buckets.write(self.entry_struct.pack(key, + rev, + u_start, + u_size, + u_status, + _next)) + self.flush() + self._find_key.delete(key) + return True + + def insert(self, key, rev, start, size, status='o'): + start_position = self._calculate_position(key) + self.buckets.seek(start_position) + curr_data = self.buckets.read(self.bucket_line_size) + + # conflict occurs? + if curr_data: + location = self.bucket_struct.unpack(curr_data)[0] + else: + location = 0 + if location: + # last key with that hash + found_at, _key, _rev, _start, _size, _status, _next = self._find_place( + location, key) + self.buckets.seek(0, 2) + wrote_at = self.buckets.tell() + + # check if position is bigger than all hash entries... + if wrote_at < self.data_start: + self.buckets.seek(self.data_start) + wrote_at = self.buckets.tell() + + self.buckets.write(self.entry_struct.pack(key, + rev, + start, + size, + status, + _next)) + +# self.flush() + self.buckets.seek(found_at) + self.buckets.write(self.entry_struct.pack(_key, + _rev, + _start, + _size, + _status, + wrote_at)) + self.flush() + self._find_key.delete(_key) + # self._locate_key.delete(_key) + return True + # raise NotImplementedError + else: + self.buckets.seek(0, 2) + wrote_at = self.buckets.tell() + + # check if position is bigger than all hash entries... + if wrote_at < self.data_start: + self.buckets.seek(self.data_start) + wrote_at = self.buckets.tell() + + self.buckets.write(self.entry_struct.pack(key, + rev, + start, + size, + status, + 0)) +# self.flush() + self.buckets.seek(start_position) + self.buckets.write(self.bucket_struct.pack(wrote_at)) + self.flush() + self._find_key.delete(key) + return True + + def all(self, limit=-1, offset=0): + self.buckets.seek(self.data_start) + while offset: + curr_data = self.buckets.read(self.entry_line_size) + if not curr_data: + break + try: + doc_id, rev, start, size, status, next = self.entry_struct.unpack(curr_data) + except IndexException: + break + else: + if status != 'd': + offset -= 1 + + while limit: + curr_data = self.buckets.read(self.entry_line_size) + if not curr_data: + break + try: + doc_id, rev, start, size, status, next = self.entry_struct.unpack(curr_data) + except IndexException: + break + else: + if status != 'd': + yield doc_id, rev, start, size, status + limit -= 1 + + def get_many(self, *args, **kwargs): + raise NotImplementedError() + + def delete(self, key, start=0, size=0): + self.update(key, '00000000', start, size, 'd') + + def make_key_value(self, data): + _id = data['_id'] + try: + _id = bytes(data['_id']) + except: + raise IndexPreconditionsException( + "_id must be valid string/bytes object") + if len(_id) != 32: + raise IndexPreconditionsException("Invalid _id lenght") + del data['_id'] + del data['_rev'] + return _id, data + + def destroy(self): + Index.destroy(self) + self._clear_cache() + + def _clear_cache(self): + self._find_key.clear() + + def insert_with_storage(self, _id, _rev, value): + if value: + start, size = self.storage.insert(value) + else: + start = 1 + size = 0 + return self.insert(_id, _rev, start, size) + + def update_with_storage(self, _id, _rev, value): + if value: + start, size = self.storage.insert(value) + else: + start = 1 + size = 0 + return self.update(_id, _rev, start, size) + + +class DummyHashIndex(IU_HashIndex): + def __init__(self, db_path, name, entry_line_format="<32s4sIIcI", *args, **kwargs): + super(DummyHashIndex, self).__init__(db_path, name, + entry_line_format, *args, **kwargs) + self.create_key = random_hex_32 # : set the function to create random key when no _id given + # self.entry_struct=struct.Struct(entry_line_format) + + def update(self, *args, **kwargs): + return True + + def insert(self, *args, **kwargs): + return True + + def all(self, *args, **kwargs): + raise StopIteration + + def get(self, *args, **kwargs): + raise ElemNotFound + + def get_many(self, *args, **kwargs): + raise StopIteration + + def delete(self, *args, **kwargs): + pass + + def make_key_value(self, data): + return '1', {'_': 1} + + def destroy(self): + pass + + def _clear_cache(self): + pass + + def _open_storage(self): + if not self.storage: + self.storage = DummyStorage() + self.storage.open() + + def _create_storage(self): + if not self.storage: + self.storage = DummyStorage() + self.storage.create() + + +class IU_MultiHashIndex(IU_HashIndex): + """ + Class that allows to index more than one key per database record. + + It operates very well on GET/INSERT. It's not optimized for + UPDATE operations (will always readd everything) + """ + + def __init__(self, *args, **kwargs): + super(IU_MultiHashIndex, self).__init__(*args, **kwargs) + + def insert(self, doc_id, key, start, size, status='o'): + if isinstance(key, (list, tuple)): + key = set(key) + elif not isinstance(key, set): + key = set([key]) + ins = super(IU_MultiHashIndex, self).insert + for curr_key in key: + ins(doc_id, curr_key, start, size, status) + return True + + def update(self, doc_id, key, u_start, u_size, u_status='o'): + if isinstance(key, (list, tuple)): + key = set(key) + elif not isinstance(key, set): + key = set([key]) + upd = super(IU_MultiHashIndex, self).update + for curr_key in key: + upd(doc_id, curr_key, u_start, u_size, u_status) + + def delete(self, doc_id, key, start=0, size=0): + if isinstance(key, (list, tuple)): + key = set(key) + elif not isinstance(key, set): + key = set([key]) + delete = super(IU_MultiHashIndex, self).delete + for curr_key in key: + delete(doc_id, curr_key, start, size) + + def get(self, key): + return super(IU_MultiHashIndex, self).get(key) + + def make_key_value(self, data): + raise NotImplementedError() + + +# classes for public use, done in this way because of +# generation static files with indexes (_index directory) + + +class HashIndex(IU_HashIndex): + """ + That class is designed to be used in custom indexes. + """ + pass + + +class UniqueHashIndex(IU_UniqueHashIndex): + """ + That class is designed to be used in custom indexes. It's designed to be **id** index. + """ + pass + + +class MultiHashIndex(IU_MultiHashIndex): + """ + That class is designed to be used in custom indexes. + """ diff --git a/libs/CodernityDB/index.py b/libs/CodernityDB/index.py new file mode 100644 index 0000000000..48db2a4a63 --- /dev/null +++ b/libs/CodernityDB/index.py @@ -0,0 +1,195 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright 2011-2013 Codernity (http://codernity.com) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import os +import marshal + +import struct +import shutil + +from CodernityDB.storage import IU_Storage, DummyStorage + +try: + from CodernityDB import __version__ +except ImportError: + from __init__ import __version__ + + +import io + + +class IndexException(Exception): + pass + + +class IndexNotFoundException(IndexException): + pass + + +class ReindexException(IndexException): + pass + + +class TryReindexException(ReindexException): + pass + + +class ElemNotFound(IndexException): + pass + + +class DocIdNotFound(ElemNotFound): + pass + + +class IndexConflict(IndexException): + pass + + +class IndexPreconditionsException(IndexException): + pass + + +class Index(object): + + __version__ = __version__ + + custom_header = "" # : use it for imports required by your index + + def __init__(self, + db_path, + name): + self.name = name + self._start_ind = 500 + self.db_path = db_path + + def open_index(self): + if not os.path.isfile(os.path.join(self.db_path, self.name + '_buck')): + raise IndexException("Doesn't exists") + self.buckets = io.open( + os.path.join(self.db_path, self.name + "_buck"), 'r+b', buffering=0) + self._fix_params() + self._open_storage() + + def _close(self): + self.buckets.close() + self.storage.close() + + def close_index(self): + self.flush() + self.fsync() + self._close() + + def create_index(self): + raise NotImplementedError() + + def _fix_params(self): + self.buckets.seek(0) + props = marshal.loads(self.buckets.read(self._start_ind)) + for k, v in props.iteritems(): + self.__dict__[k] = v + self.buckets.seek(0, 2) + + def _save_params(self, in_params={}): + self.buckets.seek(0) + props = marshal.loads(self.buckets.read(self._start_ind)) + props.update(in_params) + self.buckets.seek(0) + data = marshal.dumps(props) + if len(data) > self._start_ind: + raise IndexException("To big props") + self.buckets.write(data) + self.flush() + self.buckets.seek(0, 2) + self.__dict__.update(props) + + def _open_storage(self, *args, **kwargs): + pass + + def _create_storage(self, *args, **kwargs): + pass + + def _destroy_storage(self, *args, **kwargs): + self.storage.destroy() + + def _find_key(self, key): + raise NotImplementedError() + + def update(self, doc_id, key, start, size): + raise NotImplementedError() + + def insert(self, doc_id, key, start, size): + raise NotImplementedError() + + def get(self, key): + raise NotImplementedError() + + def get_many(self, key, start_from=None, limit=0): + raise NotImplementedError() + + def all(self, start_pos): + raise NotImplementedError() + + def delete(self, key, start, size): + raise NotImplementedError() + + def make_key_value(self, data): + raise NotImplementedError() + + def make_key(self, data): + raise NotImplementedError() + + def compact(self, *args, **kwargs): + raise NotImplementedError() + + def destroy(self, *args, **kwargs): + self._close() + bucket_file = os.path.join(self.db_path, self.name + '_buck') + os.unlink(bucket_file) + self._destroy_storage() + self._find_key.clear() + + def flush(self): + try: + self.buckets.flush() + self.storage.flush() + except: + pass + + def fsync(self): + try: + os.fsync(self.buckets.fileno()) + self.storage.fsync() + except: + pass + + def update_with_storage(self, doc_id, key, value): + if value: + start, size = self.storage.insert(value) + else: + start = 1 + size = 0 + return self.update(doc_id, key, start, size) + + def insert_with_storage(self, doc_id, key, value): + if value: + start, size = self.storage.insert(value) + else: + start = 1 + size = 0 + return self.insert(doc_id, key, start, size) diff --git a/libs/CodernityDB/indexcreator.py b/libs/CodernityDB/indexcreator.py new file mode 100644 index 0000000000..1e09a22b35 --- /dev/null +++ b/libs/CodernityDB/indexcreator.py @@ -0,0 +1,645 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright 2011-2013 Codernity (http://codernity.com) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import re +import tokenize +import token +import uuid + + +class IndexCreatorException(Exception): + def __init__(self, ex, line=None): + self.ex = ex + self.line = line + + def __str__(self): + if self.line: + return repr(self.ex + "(in line: %d)" % self.line) + return repr(self.ex) + + +class IndexCreatorFunctionException(IndexCreatorException): + pass + + +class IndexCreatorValueException(IndexCreatorException): + pass + + +class Parser(object): + def __init__(self): + pass + + def parse(self, data, name=None): + if not name: + self.name = "_" + uuid.uuid4().hex + else: + self.name = name + + self.ind = 0 + self.stage = 0 + self.logic = ['and', 'or', 'in'] + self.logic2 = ['&', '|'] + self.allowed_props = {'TreeBasedIndex': ['type', 'name', 'key_format', 'node_capacity', 'pointer_format', 'meta_format'], + 'HashIndex': ['type', 'name', 'key_format', 'hash_lim', 'entry_line_format'], + 'MultiHashIndex': ['type', 'name', 'key_format', 'hash_lim', 'entry_line_format'], + 'MultiTreeBasedIndex': ['type', 'name', 'key_format', 'node_capacity', 'pointer_format', 'meta_format'] + } + self.funcs = {'md5': (['md5'], ['.digest()']), + 'len': (['len'], []), + 'str': (['str'], []), + 'fix_r': (['self.fix_r'], []), + 'prefix': (['self.prefix'], []), + 'infix': (['self.infix'], []), + 'suffix': (['self.suffix'], []) + } + self.handle_int_imports = {'infix': "from itertools import izip\n"} + + self.funcs_with_body = {'fix_r': + (""" def fix_r(self,s,l): + e = len(s) + if e == l: + return s + elif e > l: + return s[:l] + else: + return s.rjust(l,'_')\n""", False), + 'prefix': + (""" def prefix(self,s,m,l,f): + t = len(s) + if m < 1: + m = 1 + o = set() + if t > l: + s = s[:l] + t = l + while m <= t: + o.add(s.rjust(f,'_')) + s = s[:-1] + t -= 1 + return o\n""", False), + 'suffix': + (""" def suffix(self,s,m,l,f): + t = len(s) + if m < 1: + m = 1 + o = set() + if t > l: + s = s[t-l:] + t = len(s) + while m <= t: + o.add(s.rjust(f,'_')) + s = s[1:] + t -= 1 + return o\n""", False), + 'infix': + (""" def infix(self,s,m,l,f): + t = len(s) + o = set() + for x in xrange(m - 1, l): + t = (s, ) + for y in xrange(0, x): + t += (s[y + 1:],) + o.update(set(''.join(x).rjust(f, '_').lower() for x in izip(*t))) + return o\n""", False)} + self.none = ['None', 'none', 'null'] + self.props_assign = ['=', ':'] + self.all_adj_num_comp = {token.NUMBER: ( + token.NUMBER, token.NAME, '-', '('), + token.NAME: (token.NUMBER, token.NAME, '-', '('), + ')': (token.NUMBER, token.NAME, '-', '(') + } + + self.all_adj_num_op = {token.NUMBER: (token.NUMBER, token.NAME, '('), + token.NAME: (token.NUMBER, token.NAME, '('), + ')': (token.NUMBER, token.NAME, '(') + } + self.allowed_adjacent = { + "<=": self.all_adj_num_comp, + ">=": self.all_adj_num_comp, + ">": self.all_adj_num_comp, + "<": self.all_adj_num_comp, + + "==": {token.NUMBER: (token.NUMBER, token.NAME, '('), + token.NAME: (token.NUMBER, token.NAME, token.STRING, '('), + token.STRING: (token.NAME, token.STRING, '('), + ')': (token.NUMBER, token.NAME, token.STRING, '('), + ']': (token.NUMBER, token.NAME, token.STRING, '(') + }, + + "+": {token.NUMBER: (token.NUMBER, token.NAME, '('), + token.NAME: (token.NUMBER, token.NAME, token.STRING, '('), + token.STRING: (token.NAME, token.STRING, '('), + ')': (token.NUMBER, token.NAME, token.STRING, '('), + ']': (token.NUMBER, token.NAME, token.STRING, '(') + }, + + "-": {token.NUMBER: (token.NUMBER, token.NAME, '('), + token.NAME: (token.NUMBER, token.NAME, '('), + ')': (token.NUMBER, token.NAME, '('), + '<': (token.NUMBER, token.NAME, '('), + '>': (token.NUMBER, token.NAME, '('), + '<=': (token.NUMBER, token.NAME, '('), + '>=': (token.NUMBER, token.NAME, '('), + '==': (token.NUMBER, token.NAME, '('), + ']': (token.NUMBER, token.NAME, '(') + }, + "*": self.all_adj_num_op, + "/": self.all_adj_num_op, + "%": self.all_adj_num_op, + ",": {token.NUMBER: (token.NUMBER, token.NAME, token.STRING, '{', '[', '('), + token.NAME: (token.NUMBER, token.NAME, token.STRING, '(', '{', '['), + token.STRING: (token.NAME, token.STRING, token.NUMBER, '(', '{', '['), + ')': (token.NUMBER, token.NAME, token.STRING, '(', '{', '['), + ']': (token.NUMBER, token.NAME, token.STRING, '(', '{', '['), + '}': (token.NUMBER, token.NAME, token.STRING, '(', '{', '[') + } + } + + def is_num(s): + m = re.search('[^0-9*()+\-\s/]+', s) + return not m + + def is_string(s): + m = re.search('\s*(?P[\'\"]+).*?(?P=a)\s*', s) + return m + data = re.split('make_key_value\:', data) + + if len(data) < 2: + raise IndexCreatorFunctionException( + "Couldn't find a definition of make_key_value function!\n") + + spl1 = re.split('make_key\:', data[0]) + spl2 = re.split('make_key\:', data[1]) + + self.funcs_rev = False + + if len(spl1) > 1: + data = [spl1[0]] + [data[1]] + [spl1[1]] + self.funcs_rev = True + elif len(spl2) > 1: + data = [data[0]] + spl2 + else: + data.append("key") + + if data[1] == re.search('\s*', data[1], re.S | re.M).group(0): + raise IndexCreatorFunctionException("Empty function body ", + len(re.split('\n', data[0])) + (len(re.split('\n', data[2])) if self.funcs_rev else 1) - 1) + if data[2] == re.search('\s*', data[2], re.S | re.M).group(0): + raise IndexCreatorFunctionException("Empty function body ", + len(re.split('\n', data[0])) + (1 if self.funcs_rev else len(re.split('\n', data[1]))) - 1) + if data[0] == re.search('\s*', data[0], re.S | re.M).group(0): + raise IndexCreatorValueException("You didn't set any properity or you set them not at the begining of the code\n") + + data = [re.split( + '\n', data[0]), re.split('\n', data[1]), re.split('\n', data[2])] + self.cnt_lines = (len(data[0]), len(data[1]), len(data[2])) + ind = 0 + self.predata = data + self.data = [[], [], []] + for i, v in enumerate(self.predata[0]): + for k, w in enumerate(self.predata[0][i]): + if self.predata[0][i][k] in self.props_assign: + if not is_num(self.predata[0][i][k + 1:]) and self.predata[0][i].strip()[:4] != 'type' and self.predata[0][i].strip()[:4] != 'name': + s = self.predata[0][i][k + 1:] + self.predata[0][i] = self.predata[0][i][:k + 1] + + m = re.search('\s+', s.strip()) + if not is_string(s) and not m: + s = "'" + s.strip() + "'" + self.predata[0][i] += s + break + + for n, i in enumerate(self.predata): + for k in i: + k = k.strip() + if k: + self.data[ind].append(k) + self.check_enclosures(k, n) + ind += 1 + + return self.parse_ex() + + def readline(self, stage): + def foo(): + if len(self.data[stage]) <= self.ind: + self.ind = 0 + return "" + else: + self.ind += 1 + return self.data[stage][self.ind - 1] + return foo + + def add(self, l, i): + def add_aux(*args): + # print args,self.ind + if len(l[i]) < self.ind: + l[i].append([]) + l[i][self.ind - 1].append(args) + return add_aux + + def parse_ex(self): + self.index_name = "" + self.index_type = "" + self.curLine = -1 + self.con = -1 + self.brackets = -1 + self.curFunc = None + self.colons = 0 + self.line_cons = ([], [], []) + self.pre_tokens = ([], [], []) + self.known_dicts_in_mkv = [] + self.prop_name = True + self.prop_assign = False + self.is_one_arg_enough = False + self.funcs_stack = [] + self.last_line = [-1, -1, -1] + self.props_set = [] + self.custom_header = set() + + self.tokens = [] + self.tokens_head = ['# %s\n' % self.name, 'class %s(' % self.name, '):\n', ' def __init__(self, *args, **kwargs): '] + + for i in xrange(3): + tokenize.tokenize(self.readline(i), self.add(self.pre_tokens, i)) + # tokenize treats some keyword not in the right way, thats why we + # have to change some of them + for nk, k in enumerate(self.pre_tokens[i]): + for na, a in enumerate(k): + if a[0] == token.NAME and a[1] in self.logic: + self.pre_tokens[i][nk][ + na] = (token.OP, a[1], a[2], a[3], a[4]) + + for i in self.pre_tokens[1]: + self.line_cons[1].append(self.check_colons(i, 1)) + self.check_adjacents(i, 1) + if self.check_for_2nd_arg(i) == -1 and not self.is_one_arg_enough: + raise IndexCreatorValueException("No 2nd value to return (did u forget about ',None'?", self.cnt_line_nr(i[0][4], 1)) + self.is_one_arg_enough = False + + for i in self.pre_tokens[2]: + self.line_cons[2].append(self.check_colons(i, 2)) + self.check_adjacents(i, 2) + + for i in self.pre_tokens[0]: + self.handle_prop_line(i) + + self.cur_brackets = 0 + self.tokens += ['\n super(%s, self).__init__(*args, **kwargs)\n def make_key_value(self, data): ' % self.name] + + for i in self.pre_tokens[1]: + for k in i: + self.handle_make_value(*k) + + self.curLine = -1 + self.con = -1 + self.cur_brackets = 0 + self.tokens += ['\n def make_key(self, key):'] + + for i in self.pre_tokens[2]: + for k in i: + self.handle_make_key(*k) + + if self.index_type == "": + raise IndexCreatorValueException("Missing index type definition\n") + if self.index_name == "": + raise IndexCreatorValueException("Missing index name\n") + + self.tokens_head[0] = "# " + self.index_name + "\n" + \ + self.tokens_head[0] + + for i in self.funcs_with_body: + if self.funcs_with_body[i][1]: + self.tokens_head.insert(4, self.funcs_with_body[i][0]) + + if None in self.custom_header: + self.custom_header.remove(None) + if self.custom_header: + s = ' custom_header = """' + for i in self.custom_header: + s += i + s += '"""\n' + self.tokens_head.insert(4, s) + + if self.index_type in self.allowed_props: + for i in self.props_set: + if i not in self.allowed_props[self.index_type]: + raise IndexCreatorValueException("Properity %s is not allowed for index type: %s" % (i, self.index_type)) + + # print "".join(self.tokens_head) + # print "----------" + # print (" ".join(self.tokens)) + return "".join(self.custom_header), "".join(self.tokens_head) + (" ".join(self.tokens)) + + # has to be run BEFORE tokenize + def check_enclosures(self, d, st): + encs = [] + contr = {'(': ')', '{': '}', '[': ']', "'": "'", '"': '"'} + ends = [')', '}', ']', "'", '"'] + for i in d: + if len(encs) > 0 and encs[-1] in ['"', "'"]: + if encs[-1] == i: + del encs[-1] + elif i in contr: + encs += [i] + elif i in ends: + if len(encs) < 1 or contr[encs[-1]] != i: + raise IndexCreatorValueException("Missing opening enclosure for \'%s\'" % i, self.cnt_line_nr(d, st)) + del encs[-1] + + if len(encs) > 0: + raise IndexCreatorValueException("Missing closing enclosure for \'%s\'" % encs[0], self.cnt_line_nr(d, st)) + + def check_adjacents(self, d, st): + def std_check(d, n): + if n == 0: + prev = -1 + else: + prev = d[n - 1][1] if d[n - 1][0] == token.OP else d[n - 1][0] + + cur = d[n][1] if d[n][0] == token.OP else d[n][0] + + # there always is an endmarker at the end, but this is a precaution + if n + 2 > len(d): + nex = -1 + else: + nex = d[n + 1][1] if d[n + 1][0] == token.OP else d[n + 1][0] + + if prev not in self.allowed_adjacent[cur]: + raise IndexCreatorValueException("Wrong left value of the %s" % cur, self.cnt_line_nr(line, st)) + + # there is an assumption that whole data always ends with 0 marker, the idea prolly needs a rewritting to allow more whitespaces + # between tokens, so it will be handled anyway + elif nex not in self.allowed_adjacent[cur][prev]: + raise IndexCreatorValueException("Wrong right value of the %s" % cur, self.cnt_line_nr(line, st)) + + for n, (t, i, _, _, line) in enumerate(d): + if t == token.NAME or t == token.STRING: + if n + 1 < len(d) and d[n + 1][0] in [token.NAME, token.STRING]: + raise IndexCreatorValueException("Did you forget about an operator in between?", self.cnt_line_nr(line, st)) + elif i in self.allowed_adjacent: + std_check(d, n) + + def check_colons(self, d, st): + cnt = 0 + br = 0 + + def check_ret_args_nr(a, s): + c_b_cnt = 0 + s_b_cnt = 0 + n_b_cnt = 0 + comas_cnt = 0 + for _, i, _, _, line in a: + + if c_b_cnt == n_b_cnt == s_b_cnt == 0: + if i == ',': + comas_cnt += 1 + if (s == 1 and comas_cnt > 1) or (s == 2 and comas_cnt > 0): + raise IndexCreatorFunctionException("Too much arguments to return", self.cnt_line_nr(line, st)) + if s == 0 and comas_cnt > 0: + raise IndexCreatorValueException("A coma here doesn't make any sense", self.cnt_line_nr(line, st)) + + elif i == ':': + if s == 0: + raise IndexCreatorValueException("A colon here doesn't make any sense", self.cnt_line_nr(line, st)) + raise IndexCreatorFunctionException("Two colons don't make any sense", self.cnt_line_nr(line, st)) + + if i == '{': + c_b_cnt += 1 + elif i == '}': + c_b_cnt -= 1 + elif i == '(': + n_b_cnt += 1 + elif i == ')': + n_b_cnt -= 1 + elif i == '[': + s_b_cnt += 1 + elif i == ']': + s_b_cnt -= 1 + + def check_if_empty(a): + for i in a: + if i not in [token.NEWLINE, token.INDENT, token.ENDMARKER]: + return False + return True + if st == 0: + check_ret_args_nr(d, st) + return + + for n, i in enumerate(d): + if i[1] == ':': + if br == 0: + if len(d) < n or check_if_empty(d[n + 1:]): + raise IndexCreatorValueException( + "Empty return value", self.cnt_line_nr(i[4], st)) + elif len(d) >= n: + check_ret_args_nr(d[n + 1:], st) + return cnt + else: + cnt += 1 + elif i[1] == '{': + br += 1 + elif i[1] == '}': + br -= 1 + check_ret_args_nr(d, st) + return -1 + + def check_for_2nd_arg(self, d): + c_b_cnt = 0 # curly brackets counter '{}' + s_b_cnt = 0 # square brackets counter '[]' + n_b_cnt = 0 # normal brackets counter '()' + + def check_2nd_arg(d, ind): + d = d[ind[0]:] + for t, i, (n, r), _, line in d: + if i == '{' or i is None: + return 0 + elif t == token.NAME: + self.known_dicts_in_mkv.append((i, (n, r))) + return 0 + elif t == token.STRING or t == token.NUMBER: + raise IndexCreatorValueException("Second return value of make_key_value function has to be a dictionary!", self.cnt_line_nr(line, 1)) + + for ind in enumerate(d): + t, i, _, _, _ = ind[1] + if s_b_cnt == n_b_cnt == c_b_cnt == 0: + if i == ',': + return check_2nd_arg(d, ind) + elif (t == token.NAME and i not in self.funcs) or i == '{': + self.is_one_arg_enough = True + + if i == '{': + c_b_cnt += 1 + self.is_one_arg_enough = True + elif i == '}': + c_b_cnt -= 1 + elif i == '(': + n_b_cnt += 1 + elif i == ')': + n_b_cnt -= 1 + elif i == '[': + s_b_cnt += 1 + elif i == ']': + s_b_cnt -= 1 + return -1 + + def cnt_line_nr(self, l, stage): + nr = -1 + for n, i in enumerate(self.predata[stage]): + # print i,"|||",i.strip(),"|||",l + if l == i.strip(): + nr = n + if nr == -1: + return -1 + + if stage == 0: + return nr + 1 + elif stage == 1: + return nr + self.cnt_lines[0] + (self.cnt_lines[2] - 1 if self.funcs_rev else 0) + elif stage == 2: + return nr + self.cnt_lines[0] + (self.cnt_lines[1] - 1 if not self.funcs_rev else 0) + + return -1 + + def handle_prop_line(self, d): + d_len = len(d) + if d[d_len - 1][0] == token.ENDMARKER: + d_len -= 1 + + if d_len < 3: + raise IndexCreatorValueException("Can't handle properity assingment ", self.cnt_line_nr(d[0][4], 0)) + + if not d[1][1] in self.props_assign: + raise IndexCreatorValueException( + "Did you forget : or =?", self.cnt_line_nr(d[0][4], 0)) + + if d[0][0] == token.NAME or d[0][0] == token.STRING: + if d[0][1] in self.props_set: + raise IndexCreatorValueException("Properity %s is set more than once" % d[0][1], self.cnt_line_nr(d[0][4], 0)) + self.props_set += [d[0][1]] + if d[0][1] == "type" or d[0][1] == "name": + t, tk, _, _, line = d[2] + + if d_len > 3: + raise IndexCreatorValueException( + "Wrong value to assign", self.cnt_line_nr(line, 0)) + + if t == token.STRING: + m = re.search('\s*(?P[\'\"]+)(.*?)(?P=a)\s*', tk) + if m: + tk = m.groups()[1] + elif t != token.NAME: + raise IndexCreatorValueException( + "Wrong value to assign", self.cnt_line_nr(line, 0)) + + if d[0][1] == "type": + if d[2][1] == "TreeBasedIndex": + self.custom_header.add("from CodernityDB.tree_index import TreeBasedIndex\n") + elif d[2][1] == "MultiTreeBasedIndex": + self.custom_header.add("from CodernityDB.tree_index import MultiTreeBasedIndex\n") + elif d[2][1] == "MultiHashIndex": + self.custom_header.add("from CodernityDB.hash_index import MultiHashIndex\n") + self.tokens_head.insert(2, tk) + self.index_type = tk + else: + self.index_name = tk + return + else: + self.tokens += ['\n kwargs["' + d[0][1] + '"]'] + else: + raise IndexCreatorValueException("Can't handle properity assingment ", self.cnt_line_nr(d[0][4], 0)) + + self.tokens += ['='] + + self.check_adjacents(d[2:], 0) + self.check_colons(d[2:], 0) + + for i in d[2:]: + self.tokens += [i[1]] + + def generate_func(self, t, tk, pos_start, pos_end, line, hdata, stage): + if self.last_line[stage] != -1 and pos_start[0] > self.last_line[stage] and line != '': + raise IndexCreatorFunctionException("This line will never be executed!", self.cnt_line_nr(line, stage)) + if t == 0: + return + + if pos_start[1] == 0: + if self.line_cons[stage][pos_start[0] - 1] == -1: + self.tokens += ['\n return'] + self.last_line[stage] = pos_start[0] + else: + self.tokens += ['\n if'] + elif tk == ':' and self.line_cons[stage][pos_start[0] - 1] > -1: + if self.line_cons[stage][pos_start[0] - 1] == 0: + self.tokens += [':\n return'] + return + self.line_cons[stage][pos_start[0] - 1] -= 1 + + if tk in self.logic2: + # print tk + if line[pos_start[1] - 1] != tk and line[pos_start[1] + 1] != tk: + self.tokens += [tk] + if line[pos_start[1] - 1] != tk and line[pos_start[1] + 1] == tk: + if tk == '&': + self.tokens += ['and'] + else: + self.tokens += ['or'] + return + + if self.brackets != 0: + def search_through_known_dicts(a): + for i, (n, r) in self.known_dicts_in_mkv: + if i == tk and r > pos_start[1] and n == pos_start[0] and hdata == 'data': + return True + return False + + if t == token.NAME and len(self.funcs_stack) > 0 and self.funcs_stack[-1][0] == 'md5' and search_through_known_dicts(tk): + raise IndexCreatorValueException("Second value returned by make_key_value for sure isn't a dictionary ", self.cnt_line_nr(line, 1)) + + if tk == ')': + self.cur_brackets -= 1 + if len(self.funcs_stack) > 0 and self.cur_brackets == self.funcs_stack[-1][1]: + self.tokens += [tk] + self.tokens += self.funcs[self.funcs_stack[-1][0]][1] + del self.funcs_stack[-1] + return + if tk == '(': + self.cur_brackets += 1 + + if tk in self.none: + self.tokens += ['None'] + return + + if t == token.NAME and tk not in self.logic and tk != hdata: + if tk not in self.funcs: + self.tokens += [hdata + '["' + tk + '"]'] + else: + self.tokens += self.funcs[tk][0] + if tk in self.funcs_with_body: + self.funcs_with_body[tk] = ( + self.funcs_with_body[tk][0], True) + self.custom_header.add(self.handle_int_imports.get(tk)) + self.funcs_stack += [(tk, self.cur_brackets)] + else: + self.tokens += [tk] + + def handle_make_value(self, t, tk, pos_start, pos_end, line): + self.generate_func(t, tk, pos_start, pos_end, line, 'data', 1) + + def handle_make_key(self, t, tk, pos_start, pos_end, line): + self.generate_func(t, tk, pos_start, pos_end, line, 'key', 2) diff --git a/libs/CodernityDB/lfu_cache.py b/libs/CodernityDB/lfu_cache.py new file mode 100644 index 0000000000..e11ffc95b9 --- /dev/null +++ b/libs/CodernityDB/lfu_cache.py @@ -0,0 +1,150 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright 2011-2013 Codernity (http://codernity.com) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import functools +from heapq import nsmallest +from operator import itemgetter +from collections import defaultdict + +try: + from collections import Counter +except ImportError: + class Counter(dict): + 'Mapping where default values are zero' + def __missing__(self, key): + return 0 + + +def cache1lvl(maxsize=100): + """ + modified version of http://code.activestate.com/recipes/498245/ + """ + def decorating_function(user_function): + cache = {} + use_count = Counter() + + @functools.wraps(user_function) + def wrapper(key, *args, **kwargs): + try: + result = cache[key] + except KeyError: + if len(cache) == maxsize: + for k, _ in nsmallest(maxsize // 10 or 1, + use_count.iteritems(), + key=itemgetter(1)): + del cache[k], use_count[k] + cache[key] = user_function(key, *args, **kwargs) + result = cache[key] + # result = user_function(obj, key, *args, **kwargs) + finally: + use_count[key] += 1 + return result + + def clear(): + cache.clear() + use_count.clear() + + def delete(key): + try: + del cache[key] + del use_count[key] + except KeyError: + return False + else: + return True + + wrapper.clear = clear + wrapper.cache = cache + wrapper.delete = delete + return wrapper + return decorating_function + + +def twolvl_iterator(dict): + for k, v in dict.iteritems(): + for kk, vv in v.iteritems(): + yield k, kk, vv + + +def cache2lvl(maxsize=100): + """ + modified version of http://code.activestate.com/recipes/498245/ + """ + def decorating_function(user_function): + cache = {} + use_count = defaultdict(Counter) + + @functools.wraps(user_function) + def wrapper(*args, **kwargs): +# return user_function(*args, **kwargs) + try: + result = cache[args[0]][args[1]] + except KeyError: + if wrapper.cache_size == maxsize: + to_delete = maxsize // 10 or 1 + for k1, k2, v in nsmallest(to_delete, + twolvl_iterator(use_count), + key=itemgetter(2)): + del cache[k1][k2], use_count[k1][k2] + if not cache[k1]: + del cache[k1] + del use_count[k1] + wrapper.cache_size -= to_delete + result = user_function(*args, **kwargs) + try: + cache[args[0]][args[1]] = result + except KeyError: + cache[args[0]] = {args[1]: result} + wrapper.cache_size += 1 + finally: + use_count[args[0]][args[1]] += 1 + return result + + def clear(): + cache.clear() + use_count.clear() + + def delete(key, inner_key=None): + if inner_key is not None: + try: + del cache[key][inner_key] + del use_count[key][inner_key] + if not cache[key]: + del cache[key] + del use_count[key] + wrapper.cache_size -= 1 + except KeyError: + return False + else: + return True + else: + try: + wrapper.cache_size -= len(cache[key]) + del cache[key] + del use_count[key] + except KeyError: + return False + else: + return True + + wrapper.clear = clear + wrapper.cache = cache + wrapper.delete = delete + wrapper.cache_size = 0 + return wrapper + return decorating_function diff --git a/libs/CodernityDB/lfu_cache_with_lock.py b/libs/CodernityDB/lfu_cache_with_lock.py new file mode 100644 index 0000000000..39f43cc66a --- /dev/null +++ b/libs/CodernityDB/lfu_cache_with_lock.py @@ -0,0 +1,158 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright 2011-2013 Codernity (http://codernity.com) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import functools +from heapq import nsmallest +from operator import itemgetter +from collections import defaultdict + + +try: + from collections import Counter +except ImportError: + class Counter(dict): + 'Mapping where default values are zero' + def __missing__(self, key): + return 0 + + +def twolvl_iterator(dict): + for k, v in dict.iteritems(): + for kk, vv in v.iteritems(): + yield k, kk, vv + + +def create_cache1lvl(lock_obj): + def cache1lvl(maxsize=100): + """ + modified version of http://code.activestate.com/recipes/498245/ + """ + def decorating_function(user_function): + cache = {} + use_count = Counter() + lock = lock_obj() + + @functools.wraps(user_function) + def wrapper(key, *args, **kwargs): + try: + result = cache[key] + except KeyError: + with lock: + if len(cache) == maxsize: + for k, _ in nsmallest(maxsize // 10 or 1, + use_count.iteritems(), + key=itemgetter(1)): + del cache[k], use_count[k] + cache[key] = user_function(key, *args, **kwargs) + result = cache[key] + use_count[key] += 1 + else: + with lock: + use_count[key] += 1 + return result + + def clear(): + cache.clear() + use_count.clear() + + def delete(key): + try: + del cache[key] + del use_count[key] + return True + except KeyError: + return False + + wrapper.clear = clear + wrapper.cache = cache + wrapper.delete = delete + return wrapper + return decorating_function + return cache1lvl + + +def create_cache2lvl(lock_obj): + def cache2lvl(maxsize=100): + """ + modified version of http://code.activestate.com/recipes/498245/ + """ + def decorating_function(user_function): + cache = {} + use_count = defaultdict(Counter) + lock = lock_obj() + + @functools.wraps(user_function) + def wrapper(*args, **kwargs): + try: + result = cache[args[0]][args[1]] + except KeyError: + with lock: + if wrapper.cache_size == maxsize: + to_delete = maxsize / 10 or 1 + for k1, k2, v in nsmallest(to_delete, + twolvl_iterator( + use_count), + key=itemgetter(2)): + del cache[k1][k2], use_count[k1][k2] + if not cache[k1]: + del cache[k1] + del use_count[k1] + wrapper.cache_size -= to_delete + result = user_function(*args, **kwargs) + try: + cache[args[0]][args[1]] = result + except KeyError: + cache[args[0]] = {args[1]: result} + use_count[args[0]][args[1]] += 1 + wrapper.cache_size += 1 + else: + use_count[args[0]][args[1]] += 1 + return result + + def clear(): + cache.clear() + use_count.clear() + + def delete(key, *args): + if args: + try: + del cache[key][args[0]] + del use_count[key][args[0]] + if not cache[key]: + del cache[key] + del use_count[key] + wrapper.cache_size -= 1 + return True + except KeyError: + return False + else: + try: + wrapper.cache_size -= len(cache[key]) + del cache[key] + del use_count[key] + return True + except KeyError: + return False + + wrapper.clear = clear + wrapper.cache = cache + wrapper.delete = delete + wrapper.cache_size = 0 + return wrapper + return decorating_function + return cache2lvl diff --git a/libs/CodernityDB/migrate.py b/libs/CodernityDB/migrate.py new file mode 100644 index 0000000000..4d0b400557 --- /dev/null +++ b/libs/CodernityDB/migrate.py @@ -0,0 +1,45 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright 2011-2013 Codernity (http://codernity.com) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from CodernityDB.database import Database +import shutil +import os + + +def migrate(source, destination): + """ + Very basic for now + """ + dbs = Database(source) + dbt = Database(destination) + dbs.open() + dbt.create() + dbt.close() + for curr in os.listdir(os.path.join(dbs.path, '_indexes')): + if curr != '00id.py': + shutil.copyfile(os.path.join(dbs.path, '_indexes', curr), + os.path.join(dbt.path, '_indexes', curr)) + dbt.open() + for c in dbs.all('id'): + del c['_rev'] + dbt.insert(c) + return True + + +if __name__ == '__main__': + import sys + migrate(sys.argv[1], sys.argv[2]) diff --git a/libs/CodernityDB/misc.py b/libs/CodernityDB/misc.py new file mode 100644 index 0000000000..54c94812ed --- /dev/null +++ b/libs/CodernityDB/misc.py @@ -0,0 +1,35 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright 2011-2013 Codernity (http://codernity.com) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from random import getrandbits, randrange +import uuid + + +class NONE: + """ + It's inteded to be None but different, + for internal use only! + """ + pass + + +def random_hex_32(): + return uuid.UUID(int=getrandbits(128), version=4).hex + + +def random_hex_4(*args, **kwargs): + return '%04x' % randrange(256 ** 2) diff --git a/libs/CodernityDB/patch.py b/libs/CodernityDB/patch.py new file mode 100644 index 0000000000..4c074f432e --- /dev/null +++ b/libs/CodernityDB/patch.py @@ -0,0 +1,99 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright 2011-2013 Codernity (http://codernity.com) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from CodernityDB.misc import NONE + + +def __patch(obj, name, new): + n = NONE() + orig = getattr(obj, name, n) + if orig is not n: + if orig == new: + raise Exception("Shouldn't happen, new and orig are the same") + setattr(obj, name, new) + return + + +def patch_cache_lfu(lock_obj): + """ + Patnches cache mechanizm to be thread safe (gevent ones also) + + .. note:: + + It's internal CodernityDB mechanizm, it will be called when needed + + """ + import lfu_cache + import lfu_cache_with_lock + lfu_lock1lvl = lfu_cache_with_lock.create_cache1lvl(lock_obj) + lfu_lock2lvl = lfu_cache_with_lock.create_cache2lvl(lock_obj) + __patch(lfu_cache, 'cache1lvl', lfu_lock1lvl) + __patch(lfu_cache, 'cache2lvl', lfu_lock2lvl) + + +def patch_cache_rr(lock_obj): + """ + Patches cache mechanizm to be thread safe (gevent ones also) + + .. note:: + + It's internal CodernityDB mechanizm, it will be called when needed + + """ + import rr_cache + import rr_cache_with_lock + rr_lock1lvl = rr_cache_with_lock.create_cache1lvl(lock_obj) + rr_lock2lvl = rr_cache_with_lock.create_cache2lvl(lock_obj) + __patch(rr_cache, 'cache1lvl', rr_lock1lvl) + __patch(rr_cache, 'cache2lvl', rr_lock2lvl) + + +def patch_flush_fsync(db_obj): + """ + Will always execute index.fsync after index.flush. + + .. note:: + + It's for advanced users, use when you understand difference between `flush` and `fsync`, and when you definitely need that. + + It's important to call it **AFTER** database has all indexes etc (after db.create or db.open) + + Example usage:: + + ... + db = Database('/tmp/patch_demo') + db.create() + patch_flush_fsync(db) + ... + + """ + + def always_fsync(ind_obj): + def _inner(): + ind_obj.orig_flush() + ind_obj.fsync() + return _inner + + for index in db_obj.indexes: + setattr(index, 'orig_flush', index.flush) + setattr(index, 'flush', always_fsync(index)) + + setattr(db_obj, 'orig_flush', db_obj.flush) + setattr(db_obj, 'flush', always_fsync(db_obj)) + + return diff --git a/libs/CodernityDB/rr_cache.py b/libs/CodernityDB/rr_cache.py new file mode 100644 index 0000000000..5801b7cced --- /dev/null +++ b/libs/CodernityDB/rr_cache.py @@ -0,0 +1,112 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright 2011-2013 Codernity (http://codernity.com) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import functools +from random import choice + + +def cache1lvl(maxsize=100): + def decorating_function(user_function): + cache1lvl = {} + + @functools.wraps(user_function) + def wrapper(key, *args, **kwargs): + try: + result = cache1lvl[key] + except KeyError: + if len(cache1lvl) == maxsize: + for i in xrange(maxsize // 10 or 1): + del cache1lvl[choice(cache1lvl.keys())] + cache1lvl[key] = user_function(key, *args, **kwargs) + result = cache1lvl[key] +# result = user_function(obj, key, *args, **kwargs) + return result + + def clear(): + cache1lvl.clear() + + def delete(key): + try: + del cache1lvl[key] + return True + except KeyError: + return False + + wrapper.clear = clear + wrapper.cache = cache1lvl + wrapper.delete = delete + return wrapper + return decorating_function + + +def cache2lvl(maxsize=100): + def decorating_function(user_function): + cache = {} + + @functools.wraps(user_function) + def wrapper(*args, **kwargs): +# return user_function(*args, **kwargs) + try: + result = cache[args[0]][args[1]] + except KeyError: +# print wrapper.cache_size + if wrapper.cache_size == maxsize: + to_delete = maxsize // 10 or 1 + for i in xrange(to_delete): + key1 = choice(cache.keys()) + key2 = choice(cache[key1].keys()) + del cache[key1][key2] + if not cache[key1]: + del cache[key1] + wrapper.cache_size -= to_delete +# print wrapper.cache_size + result = user_function(*args, **kwargs) + try: + cache[args[0]][args[1]] = result + except KeyError: + cache[args[0]] = {args[1]: result} + wrapper.cache_size += 1 + return result + + def clear(): + cache.clear() + wrapper.cache_size = 0 + + def delete(key, inner_key=None): + if inner_key: + try: + del cache[key][inner_key] + if not cache[key]: + del cache[key] + wrapper.cache_size -= 1 + return True + except KeyError: + return False + else: + try: + wrapper.cache_size -= len(cache[key]) + del cache[key] + return True + except KeyError: + return False + + wrapper.clear = clear + wrapper.cache = cache + wrapper.delete = delete + wrapper.cache_size = 0 + return wrapper + return decorating_function diff --git a/libs/CodernityDB/rr_cache_with_lock.py b/libs/CodernityDB/rr_cache_with_lock.py new file mode 100644 index 0000000000..66298c5940 --- /dev/null +++ b/libs/CodernityDB/rr_cache_with_lock.py @@ -0,0 +1,117 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright 2011-2013 Codernity (http://codernity.com) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import functools +from random import choice + + +def create_cache1lvl(lock_obj): + def cache1lvl(maxsize=100): + def decorating_function(user_function): + cache = {} + lock = lock_obj() + + @functools.wraps(user_function) + def wrapper(key, *args, **kwargs): + try: + result = cache[key] + except KeyError: + with lock: + if len(cache) == maxsize: + for i in xrange(maxsize // 10 or 1): + del cache[choice(cache.keys())] + cache[key] = user_function(key, *args, **kwargs) + result = cache[key] + return result + + def clear(): + cache.clear() + + def delete(key): + try: + del cache[key] + return True + except KeyError: + return False + + wrapper.clear = clear + wrapper.cache = cache + wrapper.delete = delete + return wrapper + return decorating_function + return cache1lvl + + +def create_cache2lvl(lock_obj): + def cache2lvl(maxsize=100): + def decorating_function(user_function): + cache = {} + lock = lock_obj() + + @functools.wraps(user_function) + def wrapper(*args, **kwargs): + try: + result = cache[args[0]][args[1]] + except KeyError: + with lock: + if wrapper.cache_size == maxsize: + to_delete = maxsize // 10 or 1 + for i in xrange(to_delete): + key1 = choice(cache.keys()) + key2 = choice(cache[key1].keys()) + del cache[key1][key2] + if not cache[key1]: + del cache[key1] + wrapper.cache_size -= to_delete + result = user_function(*args, **kwargs) + try: + cache[args[0]][args[1]] = result + except KeyError: + cache[args[0]] = {args[1]: result} + wrapper.cache_size += 1 + return result + + def clear(): + cache.clear() + wrapper.cache_size = 0 + + def delete(key, *args): + if args: + try: + del cache[key][args[0]] + if not cache[key]: + del cache[key] + wrapper.cache_size -= 1 + return True + except KeyError: + return False + else: + try: + wrapper.cache_size -= len(cache[key]) + del cache[key] + return True + except KeyError: + return False + + wrapper.clear = clear + wrapper.cache = cache + wrapper.delete = delete + wrapper.cache_size = 0 + return wrapper + return decorating_function + return cache2lvl diff --git a/libs/CodernityDB/sharded_hash.py b/libs/CodernityDB/sharded_hash.py new file mode 100644 index 0000000000..3cf76ac072 --- /dev/null +++ b/libs/CodernityDB/sharded_hash.py @@ -0,0 +1,146 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright 2011-2013 Codernity (http://codernity.com) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from CodernityDB.hash_index import UniqueHashIndex, HashIndex +from CodernityDB.sharded_index import ShardedIndex +from CodernityDB.index import IndexPreconditionsException + +from random import getrandbits +import uuid + + +class IU_ShardedUniqueHashIndex(ShardedIndex): + + custom_header = """import uuid +from random import getrandbits +from CodernityDB.sharded_index import ShardedIndex +""" + + def __init__(self, db_path, name, *args, **kwargs): + if kwargs.get('sh_nums', 0) > 255: + raise IndexPreconditionsException("Too many shards") + kwargs['ind_class'] = UniqueHashIndex + super(IU_ShardedUniqueHashIndex, self).__init__(db_path, + name, *args, **kwargs) + self.patchers.append(self.wrap_insert_id_index) + + @staticmethod + def wrap_insert_id_index(db_obj, ind_obj, clean=False): + def _insert_id_index(_rev, data): + """ + Performs insert on **id** index. + """ + _id, value = db_obj.id_ind.make_key_value(data) # may be improved + trg_shard = _id[:2] + storage = db_obj.id_ind.shards_r[trg_shard].storage + start, size = storage.insert(value) + db_obj.id_ind.insert(_id, _rev, start, size) + return _id + if not clean: + if hasattr(db_obj, '_insert_id_index_orig'): + raise IndexPreconditionsException( + "Already patched, something went wrong") + setattr(db_obj, "_insert_id_index_orig", db_obj._insert_id_index) + setattr(db_obj, "_insert_id_index", _insert_id_index) + else: + setattr(db_obj, "_insert_id_index", db_obj._insert_id_index_orig) + delattr(db_obj, "_insert_id_index_orig") + + def create_key(self): + h = uuid.UUID(int=getrandbits(128), version=4).hex + trg = self.last_used + 1 + if trg >= self.sh_nums: + trg = 0 + self.last_used = trg + h = '%02x%30s' % (trg, h[2:]) + return h + + def delete(self, key, *args, **kwargs): + trg_shard = key[:2] + op = self.shards_r[trg_shard] + return op.delete(key, *args, **kwargs) + + def update(self, key, *args, **kwargs): + trg_shard = key[:2] + self.last_used = int(trg_shard, 16) + op = self.shards_r[trg_shard] + return op.update(key, *args, **kwargs) + + def insert(self, key, *args, **kwargs): + trg_shard = key[:2] # in most cases it's in create_key BUT not always + self.last_used = int(key[:2], 16) + op = self.shards_r[trg_shard] + return op.insert(key, *args, **kwargs) + + def get(self, key, *args, **kwargs): + trg_shard = key[:2] + self.last_used = int(trg_shard, 16) + op = self.shards_r[trg_shard] + return op.get(key, *args, **kwargs) + + +class ShardedUniqueHashIndex(IU_ShardedUniqueHashIndex): + + # allow unique hash to be used directly + custom_header = 'from CodernityDB.sharded_hash import IU_ShardedUniqueHashIndex' + + pass + + +class IU_ShardedHashIndex(ShardedIndex): + + custom_header = """from CodernityDB.sharded_index import ShardedIndex""" + + def __init__(self, db_path, name, *args, **kwargs): + kwargs['ind_class'] = HashIndex + super(IU_ShardedHashIndex, self).__init__(db_path, name, * + args, **kwargs) + + def calculate_shard(self, key): + """ + Must be implemented. It has to return shard to be used by key + + :param key: key + :returns: target shard + :rtype: int + """ + raise NotImplementedError() + + def delete(self, doc_id, key, *args, **kwargs): + trg_shard = self.calculate_shard(key) + op = self.shards_r[trg_shard] + return op.delete(doc_id, key, *args, **kwargs) + + def insert(self, doc_id, key, *args, **kwargs): + trg_shard = self.calculate_shard(key) + op = self.shards_r[trg_shard] + return op.insert(doc_id, key, *args, **kwargs) + + def update(self, doc_id, key, *args, **kwargs): + trg_shard = self.calculate_shard(key) + op = self.shards_r[trg_shard] + return op.insert(doc_id, key, *args, **kwargs) + + def get(self, key, *args, **kwargs): + trg_shard = self.calculate_shard(key) + op = self.shards_r[trg_shard] + return op.get(key, *args, **kwargs) + + +class ShardedHashIndex(IU_ShardedHashIndex): + pass diff --git a/libs/CodernityDB/sharded_index.py b/libs/CodernityDB/sharded_index.py new file mode 100644 index 0000000000..2bdf9d7595 --- /dev/null +++ b/libs/CodernityDB/sharded_index.py @@ -0,0 +1,112 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright 2011-2013 Codernity (http://codernity.com) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from CodernityDB.index import Index +# from CodernityDB.env import cdb_environment +# import warnings + + +class ShardedIndex(Index): + + def __init__(self, db_path, name, *args, **kwargs): + """ + There are 3 additional parameters. You have to hardcode them in your custom class. **NEVER** use directly + + :param int sh_nums: how many shards should be + :param class ind_class: Index class to use (HashIndex or your custom one) + :param bool use_make_keys: if True, `make_key`, and `make_key_value` will be overriden with those from first shard + + The rest parameters are passed straight to `ind_class` shards. + + """ + super(ShardedIndex, self).__init__(db_path, name) + try: + self.sh_nums = kwargs.pop('sh_nums') + except KeyError: + self.sh_nums = 5 + try: + ind_class = kwargs.pop('ind_class') + except KeyError: + raise Exception("ind_class must be given") + else: + # if not isinstance(ind_class, basestring): + # ind_class = ind_class.__name__ + self.ind_class = ind_class + if 'use_make_keys' in kwargs: + self.use_make_keys = kwargs.pop('use_make_keys') + else: + self.use_make_keys = False + self._set_shard_datas(*args, **kwargs) + self.patchers = [] # database object patchers + + def _set_shard_datas(self, *args, **kwargs): + self.shards = {} + self.shards_r = {} +# ind_class = globals()[self.ind_class] + ind_class = self.ind_class + i = 0 + for sh_name in [self.name + str(x) for x in xrange(self.sh_nums)]: + # dict is better than list in that case + self.shards[i] = ind_class(self.db_path, sh_name, *args, **kwargs) + self.shards_r['%02x' % i] = self.shards[i] + self.shards_r[i] = self.shards[i] + i += 1 + + if not self.use_make_keys: + self.make_key = self.shards[0].make_key + self.make_key_value = self.shards[0].make_key_value + + self.last_used = 0 + + @property + def storage(self): + st = self.shards[self.last_used].storage + return st + + def __getattr__(self, name): + return getattr(self.shards[self.last_used], name) + + def open_index(self): + for curr in self.shards.itervalues(): + curr.open_index() + + def create_index(self): + for curr in self.shards.itervalues(): + curr.create_index() + + def destroy(self): + for curr in self.shards.itervalues(): + curr.destroy() + + def compact(self): + for curr in self.shards.itervalues(): + curr.compact() + + def reindex(self): + for curr in self.shards.itervalues(): + curr.reindex() + + def all(self, *args, **kwargs): + for curr in self.shards.itervalues(): + for now in curr.all(*args, **kwargs): + yield now + + def get_many(self, *args, **kwargs): + for curr in self.shards.itervalues(): + for now in curr.get_many(*args, **kwargs): + yield now diff --git a/libs/CodernityDB/storage.py b/libs/CodernityDB/storage.py new file mode 100644 index 0000000000..30be1f3be8 --- /dev/null +++ b/libs/CodernityDB/storage.py @@ -0,0 +1,153 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright 2011-2013 Codernity (http://codernity.com) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import struct +import shutil +import marshal +import io + + +try: + from CodernityDB import __version__ +except ImportError: + from __init__ import __version__ + + +class StorageException(Exception): + pass + + +class DummyStorage(object): + """ + Storage mostly used to fake real storage + """ + + def create(self, *args, **kwargs): + pass + + def open(self, *args, **kwargs): + pass + + def close(self, *args, **kwargs): + pass + + def data_from(self, *args, **kwargs): + pass + + def data_to(self, *args, **kwargs): + pass + + def save(self, *args, **kwargs): + return 0, 0 + + def insert(self, *args, **kwargs): + return self.save(*args, **kwargs) + + def update(self, *args, **kwargs): + return 0, 0 + + def get(self, *args, **kwargs): + return None + + # def compact(self, *args, **kwargs): + # pass + + def fsync(self, *args, **kwargs): + pass + + def flush(self, *args, **kwargs): + pass + + +class IU_Storage(object): + + __version__ = __version__ + + def __init__(self, db_path, name='main'): + self.db_path = db_path + self.name = name + self._header_size = 100 + + def create(self): + if os.path.exists(os.path.join(self.db_path, self.name + "_stor")): + raise IOError("Storage already exists!") + with io.open(os.path.join(self.db_path, self.name + "_stor"), 'wb') as f: + f.write(struct.pack("10s90s", self.__version__, '|||||')) + f.close() + self._f = io.open(os.path.join( + self.db_path, self.name + "_stor"), 'r+b', buffering=0) + self.flush() + self._f.seek(0, 2) + + def open(self): + if not os.path.exists(os.path.join(self.db_path, self.name + "_stor")): + raise IOError("Storage doesn't exists!") + self._f = io.open(os.path.join( + self.db_path, self.name + "_stor"), 'r+b', buffering=0) + self.flush() + self._f.seek(0, 2) + + def destroy(self): + os.unlink(os.path.join(self.db_path, self.name + '_stor')) + + def close(self): + self._f.close() + # self.flush() + # self.fsync() + + def data_from(self, data): + return marshal.loads(data) + + def data_to(self, data): + return marshal.dumps(data) + + def save(self, data): + s_data = self.data_to(data) + self._f.seek(0, 2) + start = self._f.tell() + size = len(s_data) + self._f.write(s_data) + self.flush() + return start, size + + def insert(self, data): + return self.save(data) + + def update(self, data): + return self.save(data) + + def get(self, start, size, status='c'): + if status == 'd': + return None + else: + self._f.seek(start) + return self.data_from(self._f.read(size)) + + def flush(self): + self._f.flush() + + def fsync(self): + os.fsync(self._f.fileno()) + + +# classes for public use, done in this way because of +# generation static files with indexes (_index directory) + + +class Storage(IU_Storage): + pass diff --git a/libs/CodernityDB/tree_index.py b/libs/CodernityDB/tree_index.py new file mode 100644 index 0000000000..4257b442a2 --- /dev/null +++ b/libs/CodernityDB/tree_index.py @@ -0,0 +1,2048 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright 2011-2013 Codernity (http://codernity.com) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from index import Index, IndexException, DocIdNotFound, ElemNotFound +import struct +import marshal +import os +import io +import shutil +from storage import IU_Storage +# from ipdb import set_trace + +from CodernityDB.env import cdb_environment +from CodernityDB.index import TryReindexException + +if cdb_environment.get('rlock_obj'): + from CodernityDB import patch + patch.patch_cache_rr(cdb_environment['rlock_obj']) + +from CodernityDB.rr_cache import cache1lvl, cache2lvl + +tree_buffer_size = io.DEFAULT_BUFFER_SIZE + +cdb_environment['tree_buffer_size'] = tree_buffer_size + + +MODE_FIRST = 0 +MODE_LAST = 1 + +MOVE_BUFFER_PREV = 0 +MOVE_BUFFER_NEXT = 1 + + +class NodeCapacityException(IndexException): + pass + + +class IU_TreeBasedIndex(Index): + + custom_header = 'from CodernityDB.tree_index import TreeBasedIndex' + + def __init__(self, db_path, name, key_format='32s', pointer_format='I', + meta_format='32sIIc', node_capacity=10, storage_class=None): + if node_capacity < 3: + raise NodeCapacityException + super(IU_TreeBasedIndex, self).__init__(db_path, name) + self.data_start = self._start_ind + 1 + self.node_capacity = node_capacity + self.flag_format = 'c' + self.elements_counter_format = 'h' + self.pointer_format = pointer_format + self.key_format = key_format + self.meta_format = meta_format + self._count_props() + if not storage_class: + storage_class = IU_Storage + if storage_class and not isinstance(storage_class, basestring): + storage_class = storage_class.__name__ + self.storage_class = storage_class + self.storage = None + cache = cache1lvl(100) + twolvl_cache = cache2lvl(150) + self._find_key = cache(self._find_key) + self._match_doc_id = cache(self._match_doc_id) +# self._read_single_leaf_record = +# twolvl_cache(self._read_single_leaf_record) + self._find_key_in_leaf = twolvl_cache(self._find_key_in_leaf) + self._read_single_node_key = twolvl_cache(self._read_single_node_key) + self._find_first_key_occurence_in_node = twolvl_cache( + self._find_first_key_occurence_in_node) + self._find_last_key_occurence_in_node = twolvl_cache( + self._find_last_key_occurence_in_node) + self._read_leaf_nr_of_elements = cache(self._read_leaf_nr_of_elements) + self._read_leaf_neighbours = cache(self._read_leaf_neighbours) + self._read_leaf_nr_of_elements_and_neighbours = cache( + self._read_leaf_nr_of_elements_and_neighbours) + self._read_node_nr_of_elements_and_children_flag = cache( + self._read_node_nr_of_elements_and_children_flag) + + def _count_props(self): + """ + Counts dynamic properties for tree, such as all complex formats + """ + self.single_leaf_record_format = self.key_format + self.meta_format + self.single_node_record_format = self.pointer_format + \ + self.key_format + self.pointer_format + self.node_format = self.elements_counter_format + self.flag_format\ + + self.pointer_format + (self.key_format + + self.pointer_format) * self.node_capacity + self.leaf_format = self.elements_counter_format + self.pointer_format * 2\ + + (self.single_leaf_record_format) * self.node_capacity + self.leaf_heading_format = self.elements_counter_format + \ + self.pointer_format * 2 + self.node_heading_format = self.elements_counter_format + \ + self.flag_format + self.key_size = struct.calcsize('<' + self.key_format) + self.meta_size = struct.calcsize('<' + self.meta_format) + self.single_leaf_record_size = struct.calcsize('<' + self. + single_leaf_record_format) + self.single_node_record_size = struct.calcsize('<' + self. + single_node_record_format) + self.node_size = struct.calcsize('<' + self.node_format) + self.leaf_size = struct.calcsize('<' + self.leaf_format) + self.flag_size = struct.calcsize('<' + self.flag_format) + self.elements_counter_size = struct.calcsize('<' + self. + elements_counter_format) + self.pointer_size = struct.calcsize('<' + self.pointer_format) + self.leaf_heading_size = struct.calcsize( + '<' + self.leaf_heading_format) + self.node_heading_size = struct.calcsize( + '<' + self.node_heading_format) + + def create_index(self): + if os.path.isfile(os.path.join(self.db_path, self.name + '_buck')): + raise IndexException('Already exists') + with io.open(os.path.join(self.db_path, self.name + "_buck"), 'w+b') as f: + props = dict(name=self.name, + flag_format=self.flag_format, + pointer_format=self.pointer_format, + elements_counter_format=self.elements_counter_format, + node_capacity=self.node_capacity, + key_format=self.key_format, + meta_format=self.meta_format, + version=self.__version__, + storage_class=self.storage_class) + f.write(marshal.dumps(props)) + self.buckets = io.open(os.path.join(self.db_path, self.name + + "_buck"), 'r+b', buffering=0) + self._create_storage() + self.buckets.seek(self._start_ind) + self.buckets.write(struct.pack(' candidate_start: + move_buffer = MOVE_BUFFER_PREV + elif buffer_end < candidate_start + self.single_leaf_record_size: + move_buffer = MOVE_BUFFER_NEXT + else: + move_buffer = None + return self._calculate_key_position(leaf_start, (imin + imax) / 2, 'l'), (imin + imax) / 2, move_buffer + + def _choose_next_candidate_index_in_node(self, node_start, candidate_start, buffer_start, buffer_end, imin, imax): + if buffer_start > candidate_start: + move_buffer = MOVE_BUFFER_PREV + elif buffer_end < candidate_start + self.single_node_record_size: + (self.pointer_size + self.key_size) - 1 + move_buffer = MOVE_BUFFER_NEXT + else: + move_buffer = None + return self._calculate_key_position(node_start, (imin + imax) / 2, 'n'), (imin + imax) / 2, move_buffer + + def _find_key_in_leaf(self, leaf_start, key, nr_of_elements): + if nr_of_elements == 1: + return self._find_key_in_leaf_with_one_element(key, leaf_start)[-5:] + else: + return self._find_key_in_leaf_using_binary_search(key, leaf_start, nr_of_elements)[-5:] + + def _find_key_in_leaf_for_update(self, key, doc_id, leaf_start, nr_of_elements): + if nr_of_elements == 1: + return self._find_key_in_leaf_with_one_element(key, leaf_start, doc_id=doc_id) + else: + return self._find_key_in_leaf_using_binary_search(key, leaf_start, nr_of_elements, mode=MODE_FIRST, doc_id=doc_id) + + def _find_index_of_first_key_equal_or_smaller_key(self, key, leaf_start, nr_of_elements): + if nr_of_elements == 1: + return self._find_key_in_leaf_with_one_element(key, leaf_start, mode=MODE_FIRST, return_closest=True)[:2] + else: + return self._find_key_in_leaf_using_binary_search(key, leaf_start, nr_of_elements, mode=MODE_FIRST, return_closest=True)[:2] + + def _find_index_of_last_key_equal_or_smaller_key(self, key, leaf_start, nr_of_elements): + if nr_of_elements == 1: + return self._find_key_in_leaf_with_one_element(key, leaf_start, mode=MODE_LAST, return_closest=True)[:2] + else: + return self._find_key_in_leaf_using_binary_search(key, leaf_start, nr_of_elements, mode=MODE_LAST, return_closest=True)[:2] + + def _find_index_of_first_key_equal(self, key, leaf_start, nr_of_elements): + if nr_of_elements == 1: + return self._find_key_in_leaf_with_one_element(key, leaf_start, mode=MODE_FIRST)[:2] + else: + return self._find_key_in_leaf_using_binary_search(key, leaf_start, nr_of_elements, mode=MODE_FIRST)[:2] + + def _find_key_in_leaf_with_one_element(self, key, leaf_start, doc_id=None, mode=None, return_closest=False): + curr_key, curr_doc_id, curr_start, curr_size,\ + curr_status = self._read_single_leaf_record(leaf_start, 0) + if key != curr_key: + if return_closest and curr_status != 'd': + return leaf_start, 0 + else: + raise ElemNotFound + else: + if curr_status == 'd': + raise ElemNotFound + elif doc_id is not None and doc_id != curr_doc_id: +# should't happen, crashes earlier on id index + raise DocIdNotFound + else: + return leaf_start, 0, curr_doc_id, curr_key, curr_start, curr_size, curr_status + + def _find_key_in_leaf_using_binary_search(self, key, leaf_start, nr_of_elements, doc_id=None, mode=None, return_closest=False): + """ + Binary search implementation used in all get functions + """ + imin, imax = 0, nr_of_elements - 1 + buffer_start, buffer_end = self._set_buffer_limits() + candidate_start, candidate_index, move_buffer = self._choose_next_candidate_index_in_leaf(leaf_start, + self._calculate_key_position(leaf_start, + (imin + imax) / 2, + 'l'), + buffer_start, + buffer_end, + imin, imax) + while imax != imin and imax > imin: + curr_key, curr_doc_id, curr_start, curr_size, curr_status = self._read_single_leaf_record(leaf_start, + candidate_index) + candidate_start = self._calculate_key_position( + leaf_start, candidate_index, 'l') + if key < curr_key: + if move_buffer == MOVE_BUFFER_PREV: + buffer_start, buffer_end = self._prev_buffer( + buffer_start, buffer_end) + else: # if next chosen element is in current buffer, abort moving to other + move_buffer is None + imax = candidate_index - 1 + candidate_start, candidate_index, move_buffer = self._choose_next_candidate_index_in_leaf(leaf_start, + candidate_start, + buffer_start, + buffer_end, + imin, imax) + elif key == curr_key: + if mode == MODE_LAST: + if move_buffer == MOVE_BUFFER_NEXT: + buffer_start, buffer_end = self._next_buffer( + buffer_start, buffer_end) + else: + move_buffer is None + imin = candidate_index + 1 + candidate_start, candidate_index, move_buffer = self._choose_next_candidate_index_in_leaf(leaf_start, + candidate_start, + buffer_start, + buffer_end, + imin, imax) + else: + if curr_status == 'o': + break + else: + if move_buffer == MOVE_BUFFER_PREV: + buffer_start, buffer_end = self._prev_buffer( + buffer_start, buffer_end) + else: + move_buffer is None + imax = candidate_index + candidate_start, candidate_index, move_buffer = self._choose_next_candidate_index_in_leaf(leaf_start, + candidate_start, + buffer_start, + buffer_end, + imin, imax) + else: + if move_buffer == MOVE_BUFFER_NEXT: + buffer_start, buffer_end = self._next_buffer( + buffer_start, buffer_end) + else: + move_buffer is None + imin = candidate_index + 1 + candidate_start, candidate_index, move_buffer = self._choose_next_candidate_index_in_leaf(leaf_start, + candidate_start, + buffer_start, + buffer_end, + imin, imax) + + if imax > imin: + chosen_key_position = candidate_index + else: + chosen_key_position = imax + curr_key, curr_doc_id, curr_start, curr_size, curr_status = self._read_single_leaf_record(leaf_start, + chosen_key_position) + if key != curr_key: + if return_closest: # useful for find all bigger/smaller methods + return leaf_start, chosen_key_position + else: + raise ElemNotFound + if doc_id and doc_id == curr_doc_id and curr_status == 'o': + return leaf_start, chosen_key_position, curr_doc_id, curr_key, curr_start, curr_size, curr_status + else: + if mode == MODE_FIRST and imin < chosen_key_position: # check if there isn't any element with equal key before chosen one + matching_record_index = self._leaf_linear_key_search(key, + self._calculate_key_position(leaf_start, + imin, + 'l'), + imin, + chosen_key_position) + else: + matching_record_index = chosen_key_position + curr_key, curr_doc_id, curr_start, curr_size, curr_status = self._read_single_leaf_record(leaf_start, + matching_record_index) + if curr_status == 'd' and not return_closest: + leaf_start, nr_of_elements, matching_record_index = self._find_existing(key, + matching_record_index, + leaf_start, + nr_of_elements) + curr_key, curr_doc_id, curr_start, curr_size, curr_status = self._read_single_leaf_record(leaf_start, + matching_record_index) + if doc_id is not None and doc_id != curr_doc_id: + leaf_start, nr_of_elements, matching_record_index = self._match_doc_id(doc_id, + key, + matching_record_index, + leaf_start, + nr_of_elements) + curr_key, curr_doc_id, curr_start, curr_size, curr_status = self._read_single_leaf_record(leaf_start, + matching_record_index) + return leaf_start, matching_record_index, curr_doc_id, curr_key, curr_start, curr_size, curr_status + + def _find_place_in_leaf(self, key, leaf_start, nr_of_elements): + if nr_of_elements == 1: + return self._find_place_in_leaf_with_one_element(key, leaf_start) + else: + return self._find_place_in_leaf_using_binary_search(key, leaf_start, nr_of_elements) + + def _find_place_in_leaf_with_one_element(self, key, leaf_start): + curr_key, curr_doc_id, curr_start, curr_size,\ + curr_status = self._read_single_leaf_record(leaf_start, 0) + if curr_status == 'd': + return leaf_start, 0, 0, False, True # leaf start, index of new key position, nr of rec to rewrite, full_leaf flag, on_deleted flag + else: + if key < curr_key: + return leaf_start, 0, 1, False, False + else: + return leaf_start, 1, 0, False, False + + def _find_place_in_leaf_using_binary_search(self, key, leaf_start, nr_of_elements): + """ + Binary search implementation used in insert function + """ + imin, imax = 0, nr_of_elements - 1 + buffer_start, buffer_end = self._set_buffer_limits() + candidate_start, candidate_index, move_buffer = self._choose_next_candidate_index_in_leaf(leaf_start, + self._calculate_key_position(leaf_start, + (imin + imax) / 2, + 'l'), + buffer_start, + buffer_end, + imin, imax) + while imax != imin and imax > imin: + curr_key, curr_doc_id, curr_start, curr_size, curr_status = self._read_single_leaf_record(leaf_start, + candidate_index) + candidate_start = self._calculate_key_position( + leaf_start, candidate_index, 'l') + if key < curr_key: + if move_buffer == MOVE_BUFFER_PREV: + buffer_start, buffer_end = self._prev_buffer( + buffer_start, buffer_end) + else: # if next chosen element is in current buffer, abort moving to other + move_buffer is None + imax = candidate_index - 1 + candidate_start, candidate_index, move_buffer = self._choose_next_candidate_index_in_leaf(leaf_start, + candidate_start, + buffer_start, + buffer_end, + imin, imax) + else: + if move_buffer == MOVE_BUFFER_NEXT: + buffer_start, buffer_end = self._next_buffer( + buffer_start, buffer_end) + else: + move_buffer is None + imin = candidate_index + 1 + candidate_start, candidate_index, move_buffer = self._choose_next_candidate_index_in_leaf(leaf_start, + candidate_start, + buffer_start, + buffer_end, + imin, imax) + if imax < imin and imin < nr_of_elements: + chosen_key_position = imin + else: + chosen_key_position = imax + curr_key, curr_doc_id, curr_start, curr_size, curr_status = self._read_single_leaf_record(leaf_start, + chosen_key_position) + if curr_status == 'd': + return leaf_start, chosen_key_position, 0, False, True + elif key < curr_key: + if chosen_key_position > 0: + curr_key, curr_doc_id, curr_start, curr_size, curr_status = self._read_single_leaf_record(leaf_start, + chosen_key_position - 1) + if curr_start == 'd': + return leaf_start, chosen_key_position - 1, 0, False, True + else: + return leaf_start, chosen_key_position, nr_of_elements - chosen_key_position, (nr_of_elements == self.node_capacity), False + else: + return leaf_start, chosen_key_position, nr_of_elements - chosen_key_position, (nr_of_elements == self.node_capacity), False + else: + if chosen_key_position < nr_of_elements - 1: + curr_key, curr_doc_id, curr_start, curr_size, curr_status = self._read_single_leaf_record(leaf_start, + chosen_key_position + 1) + if curr_start == 'd': + return leaf_start, chosen_key_position + 1, 0, False, True + else: + return leaf_start, chosen_key_position + 1, nr_of_elements - chosen_key_position - 1, (nr_of_elements == self.node_capacity), False + else: + return leaf_start, chosen_key_position + 1, nr_of_elements - chosen_key_position - 1, (nr_of_elements == self.node_capacity), False + + def _set_buffer_limits(self): + pos = self.buckets.tell() + buffer_start = pos - (pos % tree_buffer_size) + return buffer_start, (buffer_start + tree_buffer_size) + + def _find_first_key_occurence_in_node(self, node_start, key, nr_of_elements): + if nr_of_elements == 1: + return self._find_key_in_node_with_one_element(key, node_start, mode=MODE_FIRST) + else: + return self._find_key_in_node_using_binary_search(key, node_start, nr_of_elements, mode=MODE_FIRST) + + def _find_last_key_occurence_in_node(self, node_start, key, nr_of_elements): + if nr_of_elements == 1: + return self._find_key_in_node_with_one_element(key, node_start, mode=MODE_LAST) + else: + return self._find_key_in_node_using_binary_search(key, node_start, nr_of_elements, mode=MODE_LAST) + + def _find_key_in_node_with_one_element(self, key, node_start, mode=None): + l_pointer, curr_key, r_pointer = self._read_single_node_key( + node_start, 0) + if key < curr_key: + return 0, l_pointer + elif key > curr_key: + return 0, r_pointer + else: + if mode == MODE_FIRST: + return 0, l_pointer + elif mode == MODE_LAST: + return 0, r_pointer + else: + raise Exception('Invalid mode declared: set first/last') + + def _find_key_in_node_using_binary_search(self, key, node_start, nr_of_elements, mode=None): + imin, imax = 0, nr_of_elements - 1 + buffer_start, buffer_end = self._set_buffer_limits() + candidate_start, candidate_index, move_buffer = self._choose_next_candidate_index_in_node(node_start, + self._calculate_key_position(node_start, + (imin + imax) / 2, + 'n'), + buffer_start, + buffer_end, + imin, imax) + while imax != imin and imax > imin: + l_pointer, curr_key, r_pointer = self._read_single_node_key( + node_start, candidate_index) + candidate_start = self._calculate_key_position( + node_start, candidate_index, 'n') + if key < curr_key: + if move_buffer == MOVE_BUFFER_PREV: + buffer_start, buffer_end = self._prev_buffer( + buffer_start, buffer_end) + else: # if next chosen element is in current buffer, abort moving to other + move_buffer is None + imax = candidate_index - 1 + candidate_start, candidate_index, move_buffer = self._choose_next_candidate_index_in_node(node_start, + candidate_start, + buffer_start, + buffer_end, + imin, imax) + elif key == curr_key: + if mode == MODE_LAST: + if move_buffer == MOVE_BUFFER_NEXT: + buffer_start, buffer_end = self._next_buffer( + buffer_start, buffer_end) + else: + move_buffer is None + imin = candidate_index + 1 + candidate_start, candidate_index, move_buffer = self._choose_next_candidate_index_in_node(node_start, + candidate_start, + buffer_start, + buffer_end, + imin, imax) + else: + break + else: + if move_buffer == MOVE_BUFFER_NEXT: + buffer_start, buffer_end = self._next_buffer( + buffer_start, buffer_end) + else: + move_buffer is None + imin = candidate_index + 1 + candidate_start, candidate_index, move_buffer = self._choose_next_candidate_index_in_node(node_start, + candidate_start, + buffer_start, + buffer_end, + imin, imax) + + if imax > imin: + chosen_key_position = candidate_index + elif imax < imin and imin < nr_of_elements: + chosen_key_position = imin + else: + chosen_key_position = imax + l_pointer, curr_key, r_pointer = self._read_single_node_key( + node_start, chosen_key_position) + if mode == MODE_FIRST and imin < chosen_key_position: # check if there is no elements with equal key before chosen one + matching_record_index = self._node_linear_key_search(key, + self._calculate_key_position(node_start, + imin, + 'n'), + imin, + chosen_key_position) + else: + matching_record_index = chosen_key_position + l_pointer, curr_key, r_pointer = self._read_single_node_key( + node_start, matching_record_index) + if key < curr_key: + return matching_record_index, l_pointer + elif key > curr_key: + return matching_record_index, r_pointer + else: + if mode == MODE_FIRST: + return matching_record_index, l_pointer + elif mode == MODE_LAST: + return matching_record_index, r_pointer + else: + raise Exception('Invalid mode declared: first/last') + + def _update_leaf_ready_data(self, leaf_start, start_index, new_nr_of_elements, records_to_rewrite): + self.buckets.seek(leaf_start) + self.buckets.write(struct.pack(' new_leaf_size - 1: + key_moved_to_parent_node = leaf_data[(old_leaf_size - 1) * 5] + elif nr_of_records_to_rewrite == new_leaf_size - 1: + key_moved_to_parent_node = new_data[0] + else: + key_moved_to_parent_node = leaf_data[old_leaf_size * 5] + data_to_write = self._prepare_new_root_data(key_moved_to_parent_node, + left_leaf_start_position, + right_leaf_start_position, + 'l') + if nr_of_records_to_rewrite > half_size: + # key goes to first half + # prepare left leaf data + left_leaf_data = struct.pack('<' + self.leaf_heading_format + self.single_leaf_record_format + * (self.node_capacity - nr_of_records_to_rewrite), + old_leaf_size, + 0, + right_leaf_start_position, + *leaf_data[:-nr_of_records_to_rewrite * 5]) + left_leaf_data += struct.pack( + '<' + self.single_leaf_record_format * ( + nr_of_records_to_rewrite - new_leaf_size + 1), + new_data[0], + new_data[1], + new_data[2], + new_data[3], + new_data[4], + *leaf_data[-nr_of_records_to_rewrite * 5:(old_leaf_size - 1) * 5]) + # prepare right leaf_data + right_leaf_data = struct.pack('<' + self.elements_counter_format + 2 * self.pointer_format + + self.single_leaf_record_format * + new_leaf_size, + new_leaf_size, + left_leaf_start_position, + 0, + *leaf_data[-new_leaf_size * 5:]) + else: + # key goes to second half + if nr_of_records_to_rewrite: + records_before = leaf_data[old_leaf_size * + 5:-nr_of_records_to_rewrite * 5] + records_after = leaf_data[-nr_of_records_to_rewrite * 5:] + else: + records_before = leaf_data[old_leaf_size * 5:] + records_after = [] + + left_leaf_data = struct.pack( + '<' + self.leaf_heading_format + + self.single_leaf_record_format * old_leaf_size, + old_leaf_size, + 0, + right_leaf_start_position, + *leaf_data[:old_leaf_size * 5]) + # prepare right leaf_data + right_leaf_data = struct.pack('<' + self.elements_counter_format + 2 * self.pointer_format + + self.single_leaf_record_format * (new_leaf_size - + nr_of_records_to_rewrite - 1), + new_leaf_size, + left_leaf_start_position, + 0, + *records_before) + right_leaf_data += struct.pack( + '<' + self.single_leaf_record_format * ( + nr_of_records_to_rewrite + 1), + new_data[0], + new_data[1], + new_data[2], + new_data[3], + new_data[4], + *records_after) + left_leaf_data += (self.node_capacity - + old_leaf_size) * self.single_leaf_record_size * '\x00' + right_leaf_data += blanks + data_to_write += left_leaf_data + data_to_write += right_leaf_data + self.buckets.seek(self._start_ind) + self.buckets.write(struct.pack(' half_size: # insert key into first half of leaf + self.buckets.seek(self._calculate_key_position(leaf_start, + self.node_capacity - nr_of_records_to_rewrite, + 'l')) + # read all records with key>new_key + data = self.buckets.read( + nr_of_records_to_rewrite * self.single_leaf_record_size) + records_to_rewrite = struct.unpack( + '<' + nr_of_records_to_rewrite * self.single_leaf_record_format, data) + # remove deleted records, if succeded abort spliting + if self._update_if_has_deleted(leaf_start, + records_to_rewrite, + self.node_capacity - + nr_of_records_to_rewrite, + [new_key, new_doc_id, new_start, new_size, new_status]): + return None + key_moved_to_parent_node = records_to_rewrite[ + -new_leaf_size * 5] + # write new leaf at end of file + self.buckets.seek(0, 2) # end of file + new_leaf_start = self.buckets.tell() + # prepare new leaf_data + new_leaf = struct.pack('<' + self.elements_counter_format + 2 * self.pointer_format + + self.single_leaf_record_format * + new_leaf_size, + new_leaf_size, + leaf_start, + next_l, + *records_to_rewrite[-new_leaf_size * 5:]) + new_leaf += blanks + # write new leaf + self.buckets.write(new_leaf) + # update old leaf heading + self._update_leaf_size_and_pointers(leaf_start, + old_leaf_size, + prev_l, + new_leaf_start) + # seek position of new key in first half + self.buckets.seek(self._calculate_key_position(leaf_start, + self.node_capacity - nr_of_records_to_rewrite, + 'l')) + # write new key and keys after + self.buckets.write( + struct.pack( + '<' + self.single_leaf_record_format * + (nr_of_records_to_rewrite - new_leaf_size + 1), + new_key, + new_doc_id, + new_start, + new_size, + 'o', + *records_to_rewrite[:-new_leaf_size * 5])) + + if next_l: # when next_l is 0 there is no next leaf to update, avoids writing data at 0 position of file + self._update_leaf_prev_pointer( + next_l, new_leaf_start) + +# self._read_single_leaf_record.delete(leaf_start) + self._find_key_in_leaf.delete(leaf_start) + + return new_leaf_start, key_moved_to_parent_node + else: # key goes into second half of leaf ' + # seek half of the leaf + self.buckets.seek(self._calculate_key_position( + leaf_start, old_leaf_size, 'l')) + data = self.buckets.read( + self.single_leaf_record_size * (new_leaf_size - 1)) + records_to_rewrite = struct.unpack('<' + (new_leaf_size - 1) * + self.single_leaf_record_format, data) + # remove deleted records, if succeded abort spliting + if self._update_if_has_deleted(leaf_start, + records_to_rewrite, + old_leaf_size, + [new_key, new_doc_id, new_start, new_size, new_status]): + return None + key_moved_to_parent_node = records_to_rewrite[ + -(new_leaf_size - 1) * 5] + if key_moved_to_parent_node > new_key: + key_moved_to_parent_node = new_key + self.buckets.seek(0, 2) # end of file + new_leaf_start = self.buckets.tell() + # prepare new leaf data + index_of_records_split = nr_of_records_to_rewrite * 5 + if index_of_records_split: + records_before = records_to_rewrite[ + :-index_of_records_split] + records_after = records_to_rewrite[ + -index_of_records_split:] + else: + records_before = records_to_rewrite + records_after = [] + new_leaf = struct.pack('<' + self.elements_counter_format + 2 * self.pointer_format + + self.single_leaf_record_format * (new_leaf_size - + nr_of_records_to_rewrite - 1), + new_leaf_size, + leaf_start, + next_l, + *records_before) + new_leaf += struct.pack( + '<' + self.single_leaf_record_format * + (nr_of_records_to_rewrite + 1), + new_key, + new_doc_id, + new_start, + new_size, + 'o', + *records_after) + new_leaf += blanks + self.buckets.write(new_leaf) + self._update_leaf_size_and_pointers(leaf_start, + old_leaf_size, + prev_l, + new_leaf_start) + if next_l: # pren next_l is 0 there is no next leaf to update, avoids writing data at 0 position of file + self._update_leaf_prev_pointer( + next_l, new_leaf_start) + +# self._read_single_leaf_record.delete(leaf_start) + self._find_key_in_leaf.delete(leaf_start) + + return new_leaf_start, key_moved_to_parent_node + + def _update_if_has_deleted(self, leaf_start, records_to_rewrite, start_position, new_record_data): + """ + Checks if there are any deleted elements in data to rewrite and prevent from writing then back. + """ + curr_index = 0 + nr_of_elements = self.node_capacity + records_to_rewrite = list(records_to_rewrite) + for status in records_to_rewrite[4::5]: # remove deleted from list + if status != 'o': + del records_to_rewrite[curr_index * 5:curr_index * 5 + 5] + nr_of_elements -= 1 + else: + curr_index += 1 + # if were deleted dont have to split, just update leaf + if nr_of_elements < self.node_capacity: + data_split_index = 0 + for key in records_to_rewrite[0::5]: + if key > new_record_data[0]: + break + else: + data_split_index += 1 + records_to_rewrite = records_to_rewrite[:data_split_index * 5]\ + + new_record_data\ + + records_to_rewrite[data_split_index * 5:] + self._update_leaf_ready_data(leaf_start, + start_position, + nr_of_elements + 1, + records_to_rewrite), + return True + else: # did not found any deleted records in leaf + return False + + def _prepare_new_root_data(self, root_key, left_pointer, right_pointer, children_flag='n'): + new_root = struct.pack( + '<' + self.node_heading_format + self.single_node_record_format, + 1, + children_flag, + left_pointer, + root_key, + right_pointer) + new_root += (self.key_size + self.pointer_size) * (self. + node_capacity - 1) * '\x00' + return new_root + + def _create_new_root_from_node(self, node_start, children_flag, nr_of_keys_to_rewrite, new_node_size, old_node_size, new_key, new_pointer): + # reading second half of node + self.buckets.seek(self.data_start + self.node_heading_size) + # read all keys with key>new_key + data = self.buckets.read(self.pointer_size + self. + node_capacity * (self.key_size + self.pointer_size)) + old_node_data = struct.unpack('<' + self.pointer_format + self.node_capacity * + (self.key_format + self.pointer_format), data) + self.buckets.seek(0, 2) # end of file + new_node_start = self.buckets.tell() + if nr_of_keys_to_rewrite == new_node_size: + key_moved_to_root = new_key + # prepare new nodes data + left_node = struct.pack('<' + self.node_heading_format + self.pointer_format + + old_node_size * (self. + key_format + self.pointer_format), + old_node_size, + children_flag, + *old_node_data[:old_node_size * 2 + 1]) + + right_node = struct.pack('<' + self.node_heading_format + self.pointer_format + + new_node_size * (self. + key_format + self.pointer_format), + new_node_size, + children_flag, + new_pointer, + *old_node_data[old_node_size * 2 + 1:]) + elif nr_of_keys_to_rewrite > new_node_size: + key_moved_to_root = old_node_data[old_node_size * 2 - 1] + # prepare new nodes data + if nr_of_keys_to_rewrite == self.node_capacity: + keys_before = old_node_data[:1] + keys_after = old_node_data[1:old_node_size * 2 - 1] + else: + keys_before = old_node_data[:-nr_of_keys_to_rewrite * 2] + keys_after = old_node_data[-( + nr_of_keys_to_rewrite) * 2:old_node_size * 2 - 1] + left_node = struct.pack('<' + self.node_heading_format + self.pointer_format + + (self.node_capacity - nr_of_keys_to_rewrite) * (self. + key_format + self.pointer_format), + old_node_size, + children_flag, + *keys_before) + left_node += struct.pack( + '<' + (self.key_format + self.pointer_format) * + (nr_of_keys_to_rewrite - new_node_size), + new_key, + new_pointer, + *keys_after) + + right_node = struct.pack('<' + self.node_heading_format + self.pointer_format + + new_node_size * (self. + key_format + self.pointer_format), + new_node_size, + children_flag, + *old_node_data[old_node_size * 2:]) + else: +# 'inserting key into second half of node and creating new root' + key_moved_to_root = old_node_data[old_node_size * 2 + 1] + # prepare new nodes data + left_node = struct.pack('<' + self.node_heading_format + self.pointer_format + + old_node_size * (self. + key_format + self.pointer_format), + old_node_size, + children_flag, + *old_node_data[:old_node_size * 2 + 1]) + if nr_of_keys_to_rewrite: + keys_before = old_node_data[(old_node_size + + 1) * 2:-nr_of_keys_to_rewrite * 2] + keys_after = old_node_data[-nr_of_keys_to_rewrite * 2:] + else: + keys_before = old_node_data[(old_node_size + 1) * 2:] + keys_after = [] + right_node = struct.pack('<' + self.node_heading_format + self.pointer_format + + (new_node_size - nr_of_keys_to_rewrite - 1) * (self. + key_format + self.pointer_format), + new_node_size, + children_flag, + *keys_before) + right_node += struct.pack( + '<' + (nr_of_keys_to_rewrite + 1) * + (self.key_format + self.pointer_format), + new_key, + new_pointer, + *keys_after) + new_root = self._prepare_new_root_data(key_moved_to_root, + new_node_start, + new_node_start + self.node_size) + left_node += (self.node_capacity - old_node_size) * \ + (self.key_size + self.pointer_size) * '\x00' + # adding blanks after new node + right_node += (self.node_capacity - new_node_size) * \ + (self.key_size + self.pointer_size) * '\x00' + self.buckets.seek(0, 2) + self.buckets.write(left_node + right_node) + self.buckets.seek(self.data_start) + self.buckets.write(new_root) + + self._read_single_node_key.delete(node_start) + self._read_node_nr_of_elements_and_children_flag.delete(node_start) + return None + + def _split_node(self, node_start, nr_of_keys_to_rewrite, new_key, new_pointer, children_flag, create_new_root=False): + """ + Splits full node in two separate ones, first half of records stays on old position, + second half is written as new leaf at the end of file. + """ + half_size = self.node_capacity / 2 + if self.node_capacity % 2 == 0: + old_node_size = new_node_size = half_size + else: + old_node_size = half_size + new_node_size = half_size + 1 + if create_new_root: + self._create_new_root_from_node(node_start, children_flag, nr_of_keys_to_rewrite, new_node_size, old_node_size, new_key, new_pointer) + else: + blanks = (self.node_capacity - new_node_size) * ( + self.key_size + self.pointer_size) * '\x00' + if nr_of_keys_to_rewrite == new_node_size: # insert key into first half of node + # reading second half of node + self.buckets.seek(self._calculate_key_position(node_start, + old_node_size, + 'n') + self.pointer_size) + # read all keys with key>new_key + data = self.buckets.read(nr_of_keys_to_rewrite * + (self.key_size + self.pointer_size)) + old_node_data = struct.unpack('<' + nr_of_keys_to_rewrite * + (self.key_format + self.pointer_format), data) + # write new node at end of file + self.buckets.seek(0, 2) + new_node_start = self.buckets.tell() + # prepare new node_data + new_node = struct.pack('<' + self.node_heading_format + self.pointer_format + + (self.key_format + + self.pointer_format) * new_node_size, + new_node_size, + children_flag, + new_pointer, + *old_node_data) + new_node += blanks + # write new node + self.buckets.write(new_node) + # update old node data + self._update_size( + node_start, old_node_size) + + self._read_single_node_key.delete(node_start) + self._read_node_nr_of_elements_and_children_flag.delete( + node_start) + + return new_node_start, new_key + elif nr_of_keys_to_rewrite > half_size: # insert key into first half of node + # seek for first key to rewrite + self.buckets.seek(self._calculate_key_position(node_start, self.node_capacity - nr_of_keys_to_rewrite, 'n') + + self.pointer_size) + # read all keys with key>new_key + data = self.buckets.read( + nr_of_keys_to_rewrite * (self.key_size + self.pointer_size)) + old_node_data = struct.unpack( + '<' + nr_of_keys_to_rewrite * (self.key_format + self.pointer_format), data) + key_moved_to_parent_node = old_node_data[-( + new_node_size + 1) * 2] + self.buckets.seek(0, 2) + new_node_start = self.buckets.tell() + # prepare new node_data + new_node = struct.pack('<' + self.node_heading_format + + self.pointer_format + (self.key_format + + self.pointer_format) * new_node_size, + new_node_size, + children_flag, + old_node_data[-new_node_size * 2 - 1], + *old_node_data[-new_node_size * 2:]) + new_node += blanks + # write new node + self.buckets.write(new_node) + self._update_size( + node_start, old_node_size) + # seek position of new key in first half + self.buckets.seek(self._calculate_key_position(node_start, self.node_capacity - nr_of_keys_to_rewrite, 'n') + + self.pointer_size) + # write new key and keys after + self.buckets.write( + struct.pack( + '<' + (self.key_format + self.pointer_format) * + (nr_of_keys_to_rewrite - new_node_size), + new_key, + new_pointer, + *old_node_data[:-(new_node_size + 1) * 2])) + + self._read_single_node_key.delete(node_start) + self._read_node_nr_of_elements_and_children_flag.delete( + node_start) + + return new_node_start, key_moved_to_parent_node + else: # key goes into second half + # reading second half of node + self.buckets.seek(self._calculate_key_position(node_start, + old_node_size, + 'n') + + self.pointer_size) + data = self.buckets.read( + new_node_size * (self.key_size + self.pointer_size)) + old_node_data = struct.unpack('<' + new_node_size * + (self.key_format + self.pointer_format), data) + # find key which goes to parent node + key_moved_to_parent_node = old_node_data[0] + self.buckets.seek(0, 2) # end of file + new_node_start = self.buckets.tell() + index_of_records_split = nr_of_keys_to_rewrite * 2 + # prepare new node_data + first_leaf_pointer = old_node_data[1] + old_node_data = old_node_data[2:] + if index_of_records_split: + keys_before = old_node_data[:-index_of_records_split] + keys_after = old_node_data[-index_of_records_split:] + else: + keys_before = old_node_data + keys_after = [] + new_node = struct.pack('<' + self.node_heading_format + self.pointer_format + + (self.key_format + self.pointer_format) * + (new_node_size - + nr_of_keys_to_rewrite - 1), + new_node_size, + children_flag, + first_leaf_pointer, + *keys_before) + new_node += struct.pack('<' + (self.key_format + self.pointer_format) * + (nr_of_keys_to_rewrite + 1), + new_key, + new_pointer, + *keys_after) + new_node += blanks + # write new node + self.buckets.write(new_node) + self._update_size(node_start, old_node_size) + + self._read_single_node_key.delete(node_start) + self._read_node_nr_of_elements_and_children_flag.delete( + node_start) + + return new_node_start, key_moved_to_parent_node + + def insert_first_record_into_leaf(self, leaf_start, key, doc_id, start, size, status): + self.buckets.seek(leaf_start) + self.buckets.write(struct.pack('<' + self.elements_counter_format, + 1)) + self.buckets.seek(leaf_start + self.leaf_heading_size) + self.buckets.write(struct.pack('<' + self.single_leaf_record_format, + key, + doc_id, + start, + size, + status)) + +# self._read_single_leaf_record.delete(leaf_start) + self._find_key_in_leaf.delete(leaf_start) + self._read_leaf_nr_of_elements.delete(leaf_start) + self._read_leaf_nr_of_elements_and_neighbours.delete(leaf_start) + + def _insert_new_record_into_leaf(self, leaf_start, key, doc_id, start, size, status, nodes_stack, indexes): + nr_of_elements = self._read_leaf_nr_of_elements(leaf_start) + if nr_of_elements == 0: + self.insert_first_record_into_leaf( + leaf_start, key, doc_id, start, size, status) + return + leaf_start, new_record_position, nr_of_records_to_rewrite, full_leaf, on_deleted\ + = self._find_place_in_leaf(key, leaf_start, nr_of_elements) + if full_leaf: + try: # check if leaf has parent node + leaf_parent_pointer = nodes_stack.pop() + except IndexError: # leaf is a root + leaf_parent_pointer = 0 + split_data = self._split_leaf(leaf_start, + nr_of_records_to_rewrite, + key, + doc_id, + start, + size, + status, + create_new_root=(False if leaf_parent_pointer else True)) + if split_data is not None: # means that split created new root or replaced split with update_if_has_deleted + new_leaf_start_position, key_moved_to_parent_node = split_data + self._insert_new_key_into_node(leaf_parent_pointer, + key_moved_to_parent_node, + leaf_start, + new_leaf_start_position, + nodes_stack, + indexes) + else: # there is a place for record in leaf + self.buckets.seek(leaf_start) + self._update_leaf( + leaf_start, new_record_position, nr_of_elements, nr_of_records_to_rewrite, + on_deleted, key, doc_id, start, size, status) + + def _update_node(self, new_key_position, nr_of_keys_to_rewrite, new_key, new_pointer): + if nr_of_keys_to_rewrite == 0: + self.buckets.seek(new_key_position) + self.buckets.write( + struct.pack('<' + self.key_format + self.pointer_format, + new_key, + new_pointer)) + self.flush() + else: + self.buckets.seek(new_key_position) + data = self.buckets.read(nr_of_keys_to_rewrite * ( + self.key_size + self.pointer_size)) + keys_to_rewrite = struct.unpack( + '<' + nr_of_keys_to_rewrite * (self.key_format + self.pointer_format), data) + self.buckets.seek(new_key_position) + self.buckets.write( + struct.pack( + '<' + (nr_of_keys_to_rewrite + 1) * + (self.key_format + self.pointer_format), + new_key, + new_pointer, + *keys_to_rewrite)) + self.flush() + + def _insert_new_key_into_node(self, node_start, new_key, old_half_start, new_half_start, nodes_stack, indexes): + parent_key_index = indexes.pop() + nr_of_elements, children_flag = self._read_node_nr_of_elements_and_children_flag(node_start) + parent_prev_pointer = self._read_single_node_key( + node_start, parent_key_index)[0] + if parent_prev_pointer == old_half_start: # splited child was on the left side of his parent key, must write new key before it + new_key_position = self.pointer_size + self._calculate_key_position(node_start, parent_key_index, 'n') + nr_of_keys_to_rewrite = nr_of_elements - parent_key_index + else: # splited child was on the right side of his parent key, must write new key after it + new_key_position = self.pointer_size + self._calculate_key_position(node_start, parent_key_index + 1, 'n') + nr_of_keys_to_rewrite = nr_of_elements - (parent_key_index + 1) + if nr_of_elements == self.node_capacity: + try: # check if node has parent + node_parent_pointer = nodes_stack.pop() + except IndexError: # node is a root + node_parent_pointer = 0 + new_data = self._split_node(node_start, + nr_of_keys_to_rewrite, + new_key, + new_half_start, + children_flag, + create_new_root=(False if node_parent_pointer else True)) + if new_data: # if not new_data, new root has been created + new_node_start_position, key_moved_to_parent_node = new_data + self._insert_new_key_into_node(node_parent_pointer, + key_moved_to_parent_node, + node_start, + new_node_start_position, + nodes_stack, + indexes) + + self._find_first_key_occurence_in_node.delete(node_start) + self._find_last_key_occurence_in_node.delete(node_start) + else: # there is a empty slot for new key in node + self._update_size(node_start, nr_of_elements + 1) + self._update_node(new_key_position, + nr_of_keys_to_rewrite, + new_key, + new_half_start) + + self._find_first_key_occurence_in_node.delete(node_start) + self._find_last_key_occurence_in_node.delete(node_start) + self._read_single_node_key.delete(node_start) + self._read_node_nr_of_elements_and_children_flag.delete(node_start) + + def _find_leaf_to_insert(self, key): + """ + Traverses tree in search for leaf for insert, remembering parent nodes in path, + looks for last occurence of key if already in tree. + """ + nodes_stack = [self.data_start] + if self.root_flag == 'l': + return nodes_stack, [] + else: + nr_of_elements, curr_child_flag = self._read_node_nr_of_elements_and_children_flag(self.data_start) + curr_index, curr_pointer = self._find_last_key_occurence_in_node( + self.data_start, key, nr_of_elements) + nodes_stack.append(curr_pointer) + indexes = [curr_index] + while(curr_child_flag == 'n'): + nr_of_elements, curr_child_flag = self._read_node_nr_of_elements_and_children_flag(curr_pointer) + curr_index, curr_pointer = self._find_last_key_occurence_in_node(curr_pointer, key, nr_of_elements) + nodes_stack.append(curr_pointer) + indexes.append(curr_index) + return nodes_stack, indexes + # nodes stack contains start addreses of nodes directly above leaf with key, indexes match keys adjacent nodes_stack values (as pointers) + # required when inserting new keys in upper tree levels + + def _find_leaf_with_last_key_occurence(self, key): + if self.root_flag == 'l': + return self.data_start + else: + nr_of_elements, curr_child_flag = self._read_node_nr_of_elements_and_children_flag(self.data_start) + curr_position = self._find_last_key_occurence_in_node( + self.data_start, key, nr_of_elements)[1] + while(curr_child_flag == 'n'): + nr_of_elements, curr_child_flag = self._read_node_nr_of_elements_and_children_flag(curr_position) + curr_position = self._find_last_key_occurence_in_node( + curr_position, key, nr_of_elements)[1] + return curr_position + + def _find_leaf_with_first_key_occurence(self, key): + if self.root_flag == 'l': + return self.data_start + else: + nr_of_elements, curr_child_flag = self._read_node_nr_of_elements_and_children_flag(self.data_start) + curr_position = self._find_first_key_occurence_in_node( + self.data_start, key, nr_of_elements)[1] + while(curr_child_flag == 'n'): + nr_of_elements, curr_child_flag = self._read_node_nr_of_elements_and_children_flag(curr_position) + curr_position = self._find_first_key_occurence_in_node( + curr_position, key, nr_of_elements)[1] + return curr_position + + def _find_key(self, key): + containing_leaf_start = self._find_leaf_with_first_key_occurence(key) + nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(containing_leaf_start) + try: + doc_id, l_key, start, size, status = self._find_key_in_leaf( + containing_leaf_start, key, nr_of_elements) + except ElemNotFound: + if next_leaf: + nr_of_elements = self._read_leaf_nr_of_elements(next_leaf) + else: + raise ElemNotFound + doc_id, l_key, start, size, status = self._find_key_in_leaf( + next_leaf, key, nr_of_elements) + return doc_id, l_key, start, size, status + + def _find_key_to_update(self, key, doc_id): + """ + Search tree for key that matches not only given key but also doc_id. + """ + containing_leaf_start = self._find_leaf_with_first_key_occurence(key) + nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(containing_leaf_start) + try: + leaf_start, record_index, doc_id, l_key, start, size, status = self._find_key_in_leaf_for_update(key, + doc_id, + containing_leaf_start, + nr_of_elements) + except ElemNotFound: + if next_leaf: + nr_of_elements = self._read_leaf_nr_of_elements(next_leaf) + else: + raise TryReindexException() + try: + leaf_start, record_index, doc_id, l_key, start, size, status = self._find_key_in_leaf_for_update(key, + doc_id, + next_leaf, + nr_of_elements) + except ElemNotFound: + raise TryReindexException() + return leaf_start, record_index, doc_id, l_key, start, size, status + + def update(self, doc_id, key, u_start=0, u_size=0, u_status='o'): + containing_leaf_start, element_index, old_doc_id, old_key, old_start, old_size, old_status = self._find_key_to_update(key, doc_id) + if u_start: + old_start = u_start + if u_size: + old_size = u_size + if u_status: + old_status = u_status + new_data = (old_doc_id, old_start, old_size, old_status) + self._update_element(containing_leaf_start, element_index, new_data) + + self._find_key.delete(key) + self._match_doc_id.delete(doc_id) + self._find_key_in_leaf.delete(containing_leaf_start, key) + return True + + def delete(self, doc_id, key, start=0, size=0): + containing_leaf_start, element_index = self._find_key_to_update( + key, doc_id)[:2] + self._delete_element(containing_leaf_start, element_index) + + self._find_key.delete(key) + self._match_doc_id.delete(doc_id) + self._find_key_in_leaf.delete(containing_leaf_start, key) + return True + + def _find_key_many(self, key, limit=1, offset=0): + leaf_with_key = self._find_leaf_with_first_key_occurence(key) + nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(leaf_with_key) + try: + leaf_with_key, key_index = self._find_index_of_first_key_equal( + key, leaf_with_key, nr_of_elements) + nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(leaf_with_key) + except ElemNotFound: + leaf_with_key = next_leaf + key_index = 0 + nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(leaf_with_key) + while offset: + if key_index < nr_of_elements: + curr_key, doc_id, start, size, status = self._read_single_leaf_record( + leaf_with_key, key_index) + if key == curr_key: + if status != 'd': + offset -= 1 + key_index += 1 + else: + return + else: + key_index = 0 + if next_leaf: + leaf_with_key = next_leaf + nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(next_leaf) + else: + return + while limit: + if key_index < nr_of_elements: + curr_key, doc_id, start, size, status = self._read_single_leaf_record( + leaf_with_key, key_index) + if key == curr_key: + if status != 'd': + yield doc_id, start, size, status + limit -= 1 + key_index += 1 + else: + return + else: + key_index = 0 + if next_leaf: + leaf_with_key = next_leaf + nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(next_leaf) + else: + return + + def _find_key_smaller(self, key, limit=1, offset=0): + leaf_with_key = self._find_leaf_with_first_key_occurence(key) + nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(leaf_with_key) + leaf_with_key, key_index = self._find_index_of_first_key_equal_or_smaller_key(key, leaf_with_key, nr_of_elements) + nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(leaf_with_key) + curr_key = self._read_single_leaf_record(leaf_with_key, key_index)[0] + if curr_key >= key: + key_index -= 1 + while offset: + if key_index >= 0: + key, doc_id, start, size, status = self._read_single_leaf_record( + leaf_with_key, key_index) + if status != 'd': + offset -= 1 + key_index -= 1 + else: + if prev_leaf: + leaf_with_key = prev_leaf + nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(prev_leaf) + key_index = nr_of_elements - 1 + else: + return + while limit: + if key_index >= 0: + key, doc_id, start, size, status = self._read_single_leaf_record( + leaf_with_key, key_index) + if status != 'd': + yield doc_id, key, start, size, status + limit -= 1 + key_index -= 1 + else: + if prev_leaf: + leaf_with_key = prev_leaf + nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(prev_leaf) + key_index = nr_of_elements - 1 + else: + return + + def _find_key_equal_and_smaller(self, key, limit=1, offset=0): + leaf_with_key = self._find_leaf_with_last_key_occurence(key) + nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(leaf_with_key) + try: + leaf_with_key, key_index = self._find_index_of_last_key_equal_or_smaller_key(key, leaf_with_key, nr_of_elements) + nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(leaf_with_key) + except ElemNotFound: + leaf_with_key = prev_leaf + key_index = self._read_leaf_nr_of_elements_and_neighbours( + leaf_with_key)[0] + curr_key = self._read_single_leaf_record(leaf_with_key, key_index)[0] + if curr_key > key: + key_index -= 1 + while offset: + if key_index >= 0: + key, doc_id, start, size, status = self._read_single_leaf_record( + leaf_with_key, key_index) + if status != 'd': + offset -= 1 + key_index -= 1 + else: + if prev_leaf: + leaf_with_key = prev_leaf + nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(prev_leaf) + key_index = nr_of_elements - 1 + else: + return + while limit: + if key_index >= 0: + key, doc_id, start, size, status = self._read_single_leaf_record( + leaf_with_key, key_index) + if status != 'd': + yield doc_id, key, start, size, status + limit -= 1 + key_index -= 1 + else: + if prev_leaf: + leaf_with_key = prev_leaf + nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(prev_leaf) + key_index = nr_of_elements - 1 + else: + return + + def _find_key_bigger(self, key, limit=1, offset=0): + leaf_with_key = self._find_leaf_with_last_key_occurence(key) + nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(leaf_with_key) + try: + leaf_with_key, key_index = self._find_index_of_last_key_equal_or_smaller_key(key, leaf_with_key, nr_of_elements) + nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(leaf_with_key) + except ElemNotFound: + key_index = 0 + curr_key = self._read_single_leaf_record(leaf_with_key, key_index)[0] + if curr_key <= key: + key_index += 1 + while offset: + if key_index < nr_of_elements: + curr_key, doc_id, start, size, status = self._read_single_leaf_record( + leaf_with_key, key_index) + if status != 'd': + offset -= 1 + key_index += 1 + else: + key_index = 0 + if next_leaf: + leaf_with_key = next_leaf + nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(next_leaf) + else: + return + while limit: + if key_index < nr_of_elements: + curr_key, doc_id, start, size, status = self._read_single_leaf_record( + leaf_with_key, key_index) + if status != 'd': + yield doc_id, curr_key, start, size, status + limit -= 1 + key_index += 1 + else: + key_index = 0 + if next_leaf: + leaf_with_key = next_leaf + nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(next_leaf) + else: + return + + def _find_key_equal_and_bigger(self, key, limit=1, offset=0): + leaf_with_key = self._find_leaf_with_first_key_occurence(key) + nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(leaf_with_key) + leaf_with_key, key_index = self._find_index_of_first_key_equal_or_smaller_key(key, leaf_with_key, nr_of_elements) + nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(leaf_with_key) + curr_key = self._read_single_leaf_record(leaf_with_key, key_index)[0] + if curr_key < key: + key_index += 1 + while offset: + if key_index < nr_of_elements: + curr_key, doc_id, start, size, status = self._read_single_leaf_record( + leaf_with_key, key_index) + if status != 'd': + offset -= 1 + key_index += 1 + else: + key_index = 0 + if next_leaf: + leaf_with_key = next_leaf + nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(next_leaf) + else: + return + while limit: + if key_index < nr_of_elements: + curr_key, doc_id, start, size, status = self._read_single_leaf_record( + leaf_with_key, key_index) + if status != 'd': + yield doc_id, curr_key, start, size, status + limit -= 1 + key_index += 1 + else: + key_index = 0 + if next_leaf: + leaf_with_key = next_leaf + nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(next_leaf) + else: + return + + def _find_key_between(self, start, end, limit, offset, inclusive_start, inclusive_end): + """ + Returns generator containing all keys withing given interval. + """ + if inclusive_start: + leaf_with_key = self._find_leaf_with_first_key_occurence(start) + nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(leaf_with_key) + leaf_with_key, key_index = self._find_index_of_first_key_equal_or_smaller_key(start, leaf_with_key, nr_of_elements) + nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(leaf_with_key) + curr_key = self._read_single_leaf_record( + leaf_with_key, key_index)[0] + if curr_key < start: + key_index += 1 + else: + leaf_with_key = self._find_leaf_with_last_key_occurence(start) + nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(leaf_with_key) + leaf_with_key, key_index = self._find_index_of_last_key_equal_or_smaller_key(start, leaf_with_key, nr_of_elements) + curr_key, curr_doc_id, curr_start, curr_size, curr_status = self._read_single_leaf_record(leaf_with_key, key_index) + if curr_key <= start: + key_index += 1 + while offset: + if key_index < nr_of_elements: + curr_key, curr_doc_id, curr_start, curr_size, curr_status = self._read_single_leaf_record(leaf_with_key, key_index) + if curr_status != 'd': + offset -= 1 + key_index += 1 + else: + key_index = 0 + if next_leaf: + leaf_with_key = next_leaf + nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(next_leaf) + else: + return + while limit: + if key_index < nr_of_elements: + curr_key, curr_doc_id, curr_start, curr_size, curr_status = self._read_single_leaf_record(leaf_with_key, key_index) + if curr_key > end or (curr_key == end and not inclusive_end): + return + elif curr_status != 'd': + yield curr_doc_id, curr_key, curr_start, curr_size, curr_status + limit -= 1 + key_index += 1 + else: + key_index = 0 + if next_leaf: + leaf_with_key = next_leaf + nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(next_leaf) + else: + return + + def get(self, key): + return self._find_key(self.make_key(key)) + + def get_many(self, key, limit=1, offset=0): + return self._find_key_many(self.make_key(key), limit, offset) + + def get_between(self, start, end, limit=1, offset=0, inclusive_start=True, inclusive_end=True): + if start is None: + end = self.make_key(end) + if inclusive_end: + return self._find_key_equal_and_smaller(end, limit, offset) + else: + return self._find_key_smaller(end, limit, offset) + elif end is None: + start = self.make_key(start) + if inclusive_start: + return self._find_key_equal_and_bigger(start, limit, offset) + else: + return self._find_key_bigger(start, limit, offset) + else: + start = self.make_key(start) + end = self.make_key(end) + return self._find_key_between(start, end, limit, offset, inclusive_start, inclusive_end) + + def all(self, limit=-1, offset=0): + """ + Traverses linked list of all tree leaves and returns generator containing all elements stored in index. + """ + if self.root_flag == 'n': + leaf_start = self.data_start + self.node_size + else: + leaf_start = self.data_start + nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(leaf_start) + key_index = 0 + while offset: + if key_index < nr_of_elements: + curr_key, doc_id, start, size, status = self._read_single_leaf_record( + leaf_start, key_index) + if status != 'd': + offset -= 1 + key_index += 1 + else: + key_index = 0 + if next_leaf: + leaf_start = next_leaf + nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(next_leaf) + else: + return + while limit: + if key_index < nr_of_elements: + curr_key, doc_id, start, size, status = self._read_single_leaf_record( + leaf_start, key_index) + if status != 'd': + yield doc_id, curr_key, start, size, status + limit -= 1 + key_index += 1 + else: + key_index = 0 + if next_leaf: + leaf_start = next_leaf + nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(next_leaf) + else: + return + + def make_key(self, key): + raise NotImplementedError() + + def make_key_value(self, data): + raise NotImplementedError() + + def _open_storage(self): + s = globals()[self.storage_class] + if not self.storage: + self.storage = s(self.db_path, self.name) + self.storage.open() + + def _create_storage(self): + s = globals()[self.storage_class] + if not self.storage: + self.storage = s(self.db_path, self.name) + self.storage.create() + + def compact(self, node_capacity=0): + if not node_capacity: + node_capacity = self.node_capacity + + compact_ind = self.__class__( + self.db_path, self.name + '_compact', node_capacity=node_capacity) + compact_ind.create_index() + + gen = self.all() + while True: + try: + doc_id, key, start, size, status = gen.next() + except StopIteration: + break + self.storage._f.seek(start) + value = self.storage._f.read(size) + start_ = compact_ind.storage._f.tell() + compact_ind.storage._f.write(value) + compact_ind.insert(doc_id, key, start_, size, status) + + compact_ind.close_index() + original_name = self.name + # os.unlink(os.path.join(self.db_path, self.name + "_buck")) + self.close_index() + shutil.move(os.path.join(compact_ind.db_path, compact_ind. + name + "_buck"), os.path.join(self.db_path, self.name + "_buck")) + shutil.move(os.path.join(compact_ind.db_path, compact_ind. + name + "_stor"), os.path.join(self.db_path, self.name + "_stor")) + # self.name = original_name + self.open_index() # reload... + self.name = original_name + self._save_params(dict(name=original_name)) + self._fix_params() + self._clear_cache() + return True + + def _fix_params(self): + super(IU_TreeBasedIndex, self)._fix_params() + self._count_props() + + def _clear_cache(self): + self._find_key.clear() + self._match_doc_id.clear() +# self._read_single_leaf_record.clear() + self._find_key_in_leaf.clear() + self._read_single_node_key.clear() + self._find_first_key_occurence_in_node.clear() + self._find_last_key_occurence_in_node.clear() + self._read_leaf_nr_of_elements.clear() + self._read_leaf_neighbours.clear() + self._read_leaf_nr_of_elements_and_neighbours.clear() + self._read_node_nr_of_elements_and_children_flag.clear() + + def close_index(self): + super(IU_TreeBasedIndex, self).close_index() + self._clear_cache() + + +class IU_MultiTreeBasedIndex(IU_TreeBasedIndex): + """ + Class that allows to index more than one key per database record. + + It operates very well on GET/INSERT. It's not optimized for + UPDATE operations (will always readd everything) + """ + + def __init__(self, *args, **kwargs): + super(IU_MultiTreeBasedIndex, self).__init__(*args, **kwargs) + + def insert(self, doc_id, key, start, size, status='o'): + if isinstance(key, (list, tuple)): + key = set(key) + elif not isinstance(key, set): + key = set([key]) + ins = super(IU_MultiTreeBasedIndex, self).insert + for curr_key in key: + ins(doc_id, curr_key, start, size, status) + return True + + def update(self, doc_id, key, u_start, u_size, u_status='o'): + if isinstance(key, (list, tuple)): + key = set(key) + elif not isinstance(key, set): + key = set([key]) + upd = super(IU_MultiTreeBasedIndex, self).update + for curr_key in key: + upd(doc_id, curr_key, u_start, u_size, u_status) + + def delete(self, doc_id, key, start=0, size=0): + if isinstance(key, (list, tuple)): + key = set(key) + elif not isinstance(key, set): + key = set([key]) + delete = super(IU_MultiTreeBasedIndex, self).delete + for curr_key in key: + delete(doc_id, curr_key, start, size) + + def get(self, key): + return super(IU_MultiTreeBasedIndex, self).get(key) + + def make_key_value(self, data): + raise NotImplementedError() + + +# classes for public use, done in this way because of +# generation static files with indexes (_index directory) + + +class TreeBasedIndex(IU_TreeBasedIndex): + pass + + +class MultiTreeBasedIndex(IU_MultiTreeBasedIndex): + """ + It allows to index more than one key for record. (ie. prefix/infix/suffix search mechanizms) + That class is designed to be used in custom indexes. + """ + pass diff --git a/libs/apscheduler/__init__.py b/libs/apscheduler/__init__.py index a55959fe10..71cc53dbb8 100644 --- a/libs/apscheduler/__init__.py +++ b/libs/apscheduler/__init__.py @@ -1,3 +1,3 @@ -version_info = (2, 0, 2) +version_info = (2, 1, 2) version = '.'.join(str(n) for n in version_info[:3]) -release = version + ''.join(str(n) for n in version_info[3:]) +release = '.'.join(str(n) for n in version_info) diff --git a/libs/apscheduler/job.py b/libs/apscheduler/job.py index 868e7234e8..cfc09a2f5b 100644 --- a/libs/apscheduler/job.py +++ b/libs/apscheduler/job.py @@ -16,22 +16,25 @@ class MaxInstancesReachedError(Exception): class Job(object): """ Encapsulates the actual Job along with its metadata. Job instances - are created by the scheduler when adding jobs, and it should not be - directly instantiated. - - :param trigger: trigger that determines the execution times - :param func: callable to call when the trigger is triggered - :param args: list of positional arguments to call func with - :param kwargs: dict of keyword arguments to call func with - :param name: name of the job (optional) - :param misfire_grace_time: seconds after the designated run time that + are created by the scheduler when adding jobs, and should not be + directly instantiated. These options can be set when adding jobs + to the scheduler (see :ref:`job_options`). + + :var trigger: trigger that determines the execution times + :var func: callable to call when the trigger is triggered + :var args: list of positional arguments to call func with + :var kwargs: dict of keyword arguments to call func with + :var name: name of the job + :var misfire_grace_time: seconds after the designated run time that the job is still allowed to be run - :param coalesce: run once instead of many times if the scheduler determines + :var coalesce: run once instead of many times if the scheduler determines that the job should be run more than once in succession - :param max_runs: maximum number of times this job is allowed to be + :var max_runs: maximum number of times this job is allowed to be triggered - :param max_instances: maximum number of concurrently running + :var max_instances: maximum number of concurrently running instances allowed for this job + :var runs: number of times this job has been triggered + :var instances: number of concurrently running instances of this job """ id = None next_run_time = None @@ -130,5 +133,5 @@ def __repr__(self): return '' % (self.name, repr(self.trigger)) def __str__(self): - return '%s (trigger: %s, next run at: %s)' % (self.name, - str(self.trigger), str(self.next_run_time)) + return '%s (trigger: %s, next run at: %s)' % ( + self.name, str(self.trigger), str(self.next_run_time)) diff --git a/libs/apscheduler/jobstores/ram_store.py b/libs/apscheduler/jobstores/ram_store.py index 85091fe88f..60458fbab8 100644 --- a/libs/apscheduler/jobstores/ram_store.py +++ b/libs/apscheduler/jobstores/ram_store.py @@ -8,7 +8,7 @@ class RAMJobStore(JobStore): def __init__(self): self.jobs = [] - + def add_job(self, job): self.jobs.append(job) diff --git a/libs/apscheduler/jobstores/redis_store.py b/libs/apscheduler/jobstores/redis_store.py new file mode 100644 index 0000000000..5eabf4b1e5 --- /dev/null +++ b/libs/apscheduler/jobstores/redis_store.py @@ -0,0 +1,91 @@ +""" +Stores jobs in a Redis database. +""" +from uuid import uuid4 +from datetime import datetime +import logging + +from apscheduler.jobstores.base import JobStore +from apscheduler.job import Job + +try: + import cPickle as pickle +except ImportError: # pragma: nocover + import pickle + +try: + from redis import StrictRedis +except ImportError: # pragma: nocover + raise ImportError('RedisJobStore requires redis installed') + +try: + long = long +except NameError: + long = int + +logger = logging.getLogger(__name__) + + +class RedisJobStore(JobStore): + def __init__(self, db=0, key_prefix='jobs.', + pickle_protocol=pickle.HIGHEST_PROTOCOL, **connect_args): + self.jobs = [] + self.pickle_protocol = pickle_protocol + self.key_prefix = key_prefix + + if db is None: + raise ValueError('The "db" parameter must not be empty') + if not key_prefix: + raise ValueError('The "key_prefix" parameter must not be empty') + + self.redis = StrictRedis(db=db, **connect_args) + + def add_job(self, job): + job.id = str(uuid4()) + job_state = job.__getstate__() + job_dict = { + 'job_state': pickle.dumps(job_state, self.pickle_protocol), + 'runs': '0', + 'next_run_time': job_state.pop('next_run_time').isoformat()} + self.redis.hmset(self.key_prefix + job.id, job_dict) + self.jobs.append(job) + + def remove_job(self, job): + self.redis.delete(self.key_prefix + job.id) + self.jobs.remove(job) + + def load_jobs(self): + jobs = [] + keys = self.redis.keys(self.key_prefix + '*') + pipeline = self.redis.pipeline() + for key in keys: + pipeline.hgetall(key) + results = pipeline.execute() + + for job_dict in results: + job_state = {} + try: + job = Job.__new__(Job) + job_state = pickle.loads(job_dict['job_state'.encode()]) + job_state['runs'] = long(job_dict['runs'.encode()]) + dateval = job_dict['next_run_time'.encode()].decode() + job_state['next_run_time'] = datetime.strptime( + dateval, '%Y-%m-%dT%H:%M:%S') + job.__setstate__(job_state) + jobs.append(job) + except Exception: + job_name = job_state.get('name', '(unknown)') + logger.exception('Unable to restore job "%s"', job_name) + self.jobs = jobs + + def update_job(self, job): + attrs = { + 'next_run_time': job.next_run_time.isoformat(), + 'runs': job.runs} + self.redis.hmset(self.key_prefix + job.id, attrs) + + def close(self): + self.redis.connection_pool.disconnect() + + def __repr__(self): + return '<%s>' % self.__class__.__name__ diff --git a/libs/apscheduler/jobstores/shelve_store.py b/libs/apscheduler/jobstores/shelve_store.py index 87c95f8fe4..d1be58f911 100644 --- a/libs/apscheduler/jobstores/shelve_store.py +++ b/libs/apscheduler/jobstores/shelve_store.py @@ -21,7 +21,10 @@ def __init__(self, path, pickle_protocol=pickle.HIGHEST_PROTOCOL): self.jobs = [] self.path = path self.pickle_protocol = pickle_protocol - self.store = shelve.open(path, 'c', self.pickle_protocol) + self._open_store() + + def _open_store(self): + self.store = shelve.open(self.path, 'c', self.pickle_protocol) def _generate_id(self): id = None @@ -32,17 +35,23 @@ def _generate_id(self): def add_job(self, job): job.id = self._generate_id() - self.jobs.append(job) self.store[job.id] = job.__getstate__() + self.store.close() + self._open_store() + self.jobs.append(job) def update_job(self, job): job_dict = self.store[job.id] job_dict['next_run_time'] = job.next_run_time job_dict['runs'] = job.runs self.store[job.id] = job_dict + self.store.close() + self._open_store() def remove_job(self, job): del self.store[job.id] + self.store.close() + self._open_store() self.jobs.remove(job) def load_jobs(self): diff --git a/libs/apscheduler/jobstores/sqlalchemy_store.py b/libs/apscheduler/jobstores/sqlalchemy_store.py index 41ed4c7a93..5b64a35a68 100644 --- a/libs/apscheduler/jobstores/sqlalchemy_store.py +++ b/libs/apscheduler/jobstores/sqlalchemy_store.py @@ -4,6 +4,8 @@ import pickle import logging +import sqlalchemy + from apscheduler.jobstores.base import JobStore from apscheduler.job import Job @@ -28,17 +30,19 @@ def __init__(self, url=None, engine=None, tablename='apscheduler_jobs', else: raise ValueError('Need either "engine" or "url" defined') - self.jobs_t = Table(tablename, metadata or MetaData(), + if sqlalchemy.__version__ < '0.7': + pickle_coltype = PickleType(pickle_protocol, mutable=False) + else: + pickle_coltype = PickleType(pickle_protocol) + self.jobs_t = Table( + tablename, metadata or MetaData(), Column('id', Integer, Sequence(tablename + '_id_seq', optional=True), primary_key=True), - Column('trigger', PickleType(pickle_protocol, mutable=False), - nullable=False), + Column('trigger', pickle_coltype, nullable=False), Column('func_ref', String(1024), nullable=False), - Column('args', PickleType(pickle_protocol, mutable=False), - nullable=False), - Column('kwargs', PickleType(pickle_protocol, mutable=False), - nullable=False), + Column('args', pickle_coltype, nullable=False), + Column('kwargs', pickle_coltype, nullable=False), Column('name', Unicode(1024)), Column('misfire_grace_time', Integer, nullable=False), Column('coalesce', Boolean, nullable=False), diff --git a/libs/apscheduler/scheduler.py b/libs/apscheduler/scheduler.py index 50769e4d11..319037a9e6 100644 --- a/libs/apscheduler/scheduler.py +++ b/libs/apscheduler/scheduler.py @@ -35,7 +35,7 @@ class Scheduler(object): their execution. """ - _stopped = False + _stopped = True _thread = None def __init__(self, gconfig={}, **options): @@ -60,6 +60,7 @@ def configure(self, gconfig={}, **options): self.misfire_grace_time = int(config.pop('misfire_grace_time', 1)) self.coalesce = asbool(config.pop('coalesce', True)) self.daemonic = asbool(config.pop('daemonic', True)) + self.standalone = asbool(config.pop('standalone', False)) # Configure the thread pool if 'threadpool' in config: @@ -85,6 +86,12 @@ def configure(self, gconfig={}, **options): def start(self): """ Starts the scheduler in a new thread. + + In threaded mode (the default), this method will return immediately + after starting the scheduler thread. + + In standalone mode, this method will block until there are no more + scheduled jobs. """ if self.running: raise SchedulerAlreadyRunningError @@ -99,11 +106,15 @@ def start(self): del self._pending_jobs[:] self._stopped = False - self._thread = Thread(target=self._main_loop, name='APScheduler') - self._thread.setDaemon(self.daemonic) - self._thread.start() + if self.standalone: + self._main_loop() + else: + self._thread = Thread(target=self._main_loop, name='APScheduler') + self._thread.setDaemon(self.daemonic) + self._thread.start() - def shutdown(self, wait=True, shutdown_threadpool=True): + def shutdown(self, wait=True, shutdown_threadpool=True, + close_jobstores=True): """ Shuts down the scheduler and terminates the thread. Does not interrupt any currently running jobs. @@ -111,6 +122,7 @@ def shutdown(self, wait=True, shutdown_threadpool=True): :param wait: ``True`` to wait until all currently executing jobs have finished (if ``shutdown_threadpool`` is also ``True``) :param shutdown_threadpool: ``True`` to shut down the thread pool + :param close_jobstores: ``True`` to close all job stores after shutdown """ if not self.running: return @@ -123,11 +135,19 @@ def shutdown(self, wait=True, shutdown_threadpool=True): self._threadpool.shutdown(wait) # Wait until the scheduler thread terminates - self._thread.join() + if self._thread: + self._thread.join() + + # Close all job stores + if close_jobstores: + for jobstore in itervalues(self._jobstores): + jobstore.close() @property def running(self): - return not self._stopped and self._thread and self._thread.isAlive() + thread_alive = self._thread and self._thread.isAlive() + standalone = getattr(self, 'standalone', False) + return not self._stopped and (standalone or thread_alive) def add_jobstore(self, jobstore, alias, quiet=False): """ @@ -156,21 +176,25 @@ def add_jobstore(self, jobstore, alias, quiet=False): if not quiet: self._wakeup.set() - def remove_jobstore(self, alias): + def remove_jobstore(self, alias, close=True): """ Removes the job store by the given alias from this scheduler. + :param close: ``True`` to close the job store after removing it :type alias: str """ self._jobstores_lock.acquire() try: - try: - del self._jobstores[alias] - except KeyError: + jobstore = self._jobstores.pop(alias) + if not jobstore: raise KeyError('No such job store: %s' % alias) finally: self._jobstores_lock.release() + # Close the job store if requested + if close: + jobstore.close() + # Notify listeners that a job store has been removed self._notify_listeners(JobStoreEvent(EVENT_JOBSTORE_REMOVED, alias)) @@ -245,8 +269,10 @@ def add_job(self, trigger, func, args, kwargs, jobstore='default', **options): """ Adds the given job to the job list and notifies the scheduler thread. + Any extra keyword arguments are passed along to the constructor of the + :class:`~apscheduler.job.Job` class (see :ref:`job_options`). - :param trigger: alias of the job store to store the job in + :param trigger: trigger that determines when ``func`` is called :param func: callable to run at the given time :param args: list of positional arguments to call func with :param kwargs: dict of keyword arguments to call func with @@ -276,6 +302,8 @@ def _remove_job(self, job, alias, jobstore): def add_date_job(self, func, date, args=None, kwargs=None, **options): """ Schedules a job to be completed on a specific date and time. + Any extra keyword arguments are passed along to the constructor of the + :class:`~apscheduler.job.Job` class (see :ref:`job_options`). :param func: callable to run at the given time :param date: the date/time to run the job at @@ -294,6 +322,8 @@ def add_interval_job(self, func, weeks=0, days=0, hours=0, minutes=0, **options): """ Schedules a job to be completed on specified intervals. + Any extra keyword arguments are passed along to the constructor of the + :class:`~apscheduler.job.Job` class (see :ref:`job_options`). :param func: callable to run :param weeks: number of weeks to wait @@ -322,6 +352,8 @@ def add_cron_job(self, func, year=None, month=None, day=None, week=None, """ Schedules a job to be completed on times that match the given expressions. + Any extra keyword arguments are passed along to the constructor of the + :class:`~apscheduler.job.Job` class (see :ref:`job_options`). :param func: callable to run :param year: year to run on @@ -352,6 +384,8 @@ def cron_schedule(self, **options): This decorator does not wrap its host function. Unscheduling decorated functions is possible by passing the ``job`` attribute of the scheduled function to :meth:`unschedule_job`. + Any extra keyword arguments are passed along to the constructor of the + :class:`~apscheduler.job.Job` class (see :ref:`job_options`). """ def inner(func): func.job = self.add_cron_job(func, **options) @@ -364,6 +398,8 @@ def interval_schedule(self, **options): This decorator does not wrap its host function. Unscheduling decorated functions is possible by passing the ``job`` attribute of the scheduled function to :meth:`unschedule_job`. + Any extra keyword arguments are passed along to the constructor of the + :class:`~apscheduler.job.Job` class (see :ref:`job_options`). """ def inner(func): func.job = self.add_interval_job(func, **options) @@ -517,7 +553,8 @@ def _process_jobs(self, now): job.runs += len(run_times) # Update the job, but don't keep finished jobs around - if job.compute_next_run_time(now + timedelta(microseconds=1)): + if job.compute_next_run_time( + now + timedelta(microseconds=1)): jobstore.update_job(job) else: self._remove_job(job, alias, jobstore) @@ -549,11 +586,22 @@ def _main_loop(self): wait_seconds = time_difference(next_wakeup_time, now) logger.debug('Next wakeup is due at %s (in %f seconds)', next_wakeup_time, wait_seconds) - self._wakeup.wait(wait_seconds) + try: + self._wakeup.wait(wait_seconds) + except IOError: # Catch errno 514 on some Linux kernels + pass + self._wakeup.clear() + elif self.standalone: + logger.debug('No jobs left; shutting down scheduler') + self.shutdown() + break else: logger.debug('No jobs; waiting until a job is added') - self._wakeup.wait() - self._wakeup.clear() + try: + self._wakeup.wait() + except IOError: # Catch errno 514 on some Linux kernels + pass + self._wakeup.clear() logger.info('Scheduler has been shut down') self._notify_listeners(SchedulerEvent(EVENT_SCHEDULER_SHUTDOWN)) diff --git a/libs/apscheduler/triggers/cron/__init__.py b/libs/apscheduler/triggers/cron/__init__.py index 763edb1e4b..9e69f720ff 100644 --- a/libs/apscheduler/triggers/cron/__init__.py +++ b/libs/apscheduler/triggers/cron/__init__.py @@ -21,8 +21,10 @@ def __init__(self, **values): if self.start_date: self.start_date = convert_to_datetime(self.start_date) - # Yank out all None valued fields + # Check field names and yank out all None valued fields for key, value in list(iteritems(values)): + if key not in self.FIELD_NAMES: + raise TypeError('Invalid field name: %s' % key) if value is None: del values[key] @@ -111,17 +113,17 @@ def get_next_fire_time(self, start_date): if next_value is None: # No valid value was found - next_date, fieldnum = self._increment_field_value(next_date, - fieldnum - 1) + next_date, fieldnum = self._increment_field_value( + next_date, fieldnum - 1) elif next_value > curr_value: # A valid, but higher than the starting value, was found if field.REAL: - next_date = self._set_field_value(next_date, fieldnum, - next_value) + next_date = self._set_field_value( + next_date, fieldnum, next_value) fieldnum += 1 else: - next_date, fieldnum = self._increment_field_value(next_date, - fieldnum) + next_date, fieldnum = self._increment_field_value( + next_date, fieldnum) else: # A valid value was found, no changes necessary fieldnum += 1 diff --git a/libs/apscheduler/triggers/cron/expressions.py b/libs/apscheduler/triggers/cron/expressions.py index 018c7a30df..b5d2919547 100644 --- a/libs/apscheduler/triggers/cron/expressions.py +++ b/libs/apscheduler/triggers/cron/expressions.py @@ -8,7 +8,7 @@ from apscheduler.util import asint __all__ = ('AllExpression', 'RangeExpression', 'WeekdayRangeExpression', - 'WeekdayPositionExpression') + 'WeekdayPositionExpression', 'LastDayOfMonthExpression') WEEKDAYS = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun'] @@ -176,3 +176,19 @@ def __repr__(self): return "%s('%s', '%s')" % (self.__class__.__name__, self.options[self.option_num], WEEKDAYS[self.weekday]) + + +class LastDayOfMonthExpression(AllExpression): + value_re = re.compile(r'last', re.IGNORECASE) + + def __init__(self): + pass + + def get_next_value(self, date, field): + return monthrange(date.year, date.month)[1] + + def __str__(self): + return 'last' + + def __repr__(self): + return "%s()" % self.__class__.__name__ diff --git a/libs/apscheduler/triggers/cron/fields.py b/libs/apscheduler/triggers/cron/fields.py index ef970cc9dc..be5e5e33ef 100644 --- a/libs/apscheduler/triggers/cron/fields.py +++ b/libs/apscheduler/triggers/cron/fields.py @@ -85,7 +85,8 @@ def get_value(self, dateval): class DayOfMonthField(BaseField): - COMPILERS = BaseField.COMPILERS + [WeekdayPositionExpression] + COMPILERS = BaseField.COMPILERS + [WeekdayPositionExpression, + LastDayOfMonthExpression] def get_max(self, dateval): return monthrange(dateval.year, dateval.month)[1] diff --git a/libs/apscheduler/util.py b/libs/apscheduler/util.py index a49aaed8c1..dcede4c357 100644 --- a/libs/apscheduler/util.py +++ b/libs/apscheduler/util.py @@ -6,7 +6,6 @@ from time import mktime import re import sys -from types import MethodType __all__ = ('asint', 'asbool', 'convert_to_datetime', 'timedelta_seconds', 'time_difference', 'datetime_ceil', 'combine_opts', @@ -64,7 +63,7 @@ def convert_to_datetime(input): return input elif isinstance(input, date): return datetime.fromordinal(input.toordinal()) - elif isinstance(input, str): + elif isinstance(input, basestring): m = _DATE_REGEX.match(input) if not m: raise ValueError('Invalid date string') @@ -109,7 +108,7 @@ def datetime_ceil(dateval): """ if dateval.microsecond > 0: return dateval + timedelta(seconds=1, - microseconds= -dateval.microsecond) + microseconds=-dateval.microsecond) return dateval @@ -143,7 +142,8 @@ def get_callable_name(func): if f_self and hasattr(func, '__name__'): if isinstance(f_self, type): # class method - return '%s.%s' % (f_self.__name__, func.__name__) + clsname = getattr(f_self, '__qualname__', None) or f_self.__name__ + return '%s.%s' % (clsname, func.__name__) # bound method return '%s.%s' % (f_self.__class__.__name__, func.__name__) @@ -169,7 +169,7 @@ def obj_to_ref(obj): raise ValueError except Exception: raise ValueError('Cannot determine the reference to %s' % repr(obj)) - + return ref diff --git a/libs/axl/axel.py b/libs/axl/axel.py index 46940da79b..2abf12a903 100644 --- a/libs/axl/axel.py +++ b/libs/axl/axel.py @@ -1,6 +1,7 @@ # axel.py # # Copyright (C) 2010 Adrian Cristea adrian dot cristea at gmail dotcom +# Edits by Ruud Burger # # Based on an idea by Peter Thatcher, found on # http://www.valuedlessons.com/2008/04/events-in-python.html @@ -11,12 +12,14 @@ # Source: http://pypi.python.org/pypi/axel # Docs: http://packages.python.org/axel -from couchpotato.core.helpers.variable import natcmp -import Queue +from Queue import Empty, Queue import hashlib import sys import threading +from couchpotato.core.helpers.variable import natsortKey + + class Event(object): """ Event object inspired by C# events. Handlers can be registered and @@ -140,7 +143,7 @@ def unhandle(self, handler): def fire(self, *args, **kwargs): """ Stores all registered handlers in a queue for processing """ - self.queue = Queue.Queue() + self.queue = Queue() result = {} if self.handlers: @@ -158,7 +161,10 @@ def add_to(key, value): t.daemon = True t.start() - for handler in sorted(self.handlers.iterkeys(), cmp = natcmp): + handler_keys = self.handlers.keys() + handler_keys.sort(key = natsortKey) + + for handler in handler_keys: self.queue.put(handler) if self.asynchronous: @@ -229,16 +235,16 @@ def _execute(self, *args, **kwargs): self.error_handler(sys.exc_info()) finally: - if not self.asynchronous: - self.queue.task_done() - if order_lock: order_lock.release() + if not self.asynchronous: + self.queue.task_done() + if self.queue.empty(): - raise Queue.Empty + raise Empty - except Queue.Empty: + except Empty: break def _extract(self, queue_item): diff --git a/libs/backports/__init__.py b/libs/backports/__init__.py new file mode 100644 index 0000000000..612d32836b --- /dev/null +++ b/libs/backports/__init__.py @@ -0,0 +1,3 @@ +# This is a Python "namespace package" http://www.python.org/dev/peps/pep-0382/ +from pkgutil import extend_path +__path__ = extend_path(__path__, __name__) diff --git a/libs/backports/ssl_match_hostname/README.txt b/libs/backports/ssl_match_hostname/README.txt new file mode 100644 index 0000000000..f024fd7b84 --- /dev/null +++ b/libs/backports/ssl_match_hostname/README.txt @@ -0,0 +1,42 @@ + +The ssl.match_hostname() function from Python 3.2 +================================================= + +The Secure Sockets layer is only actually *secure* +if you check the hostname in the certificate returned +by the server to which you are connecting, +and verify that it matches to hostname +that you are trying to reach. + +But the matching logic, defined in `RFC2818`_, +can be a bit tricky to implement on your own. +So the ``ssl`` package in the Standard Library of Python 3.2 +now includes a ``match_hostname()`` function +for performing this check instead of requiring every application +to implement the check separately. + +This backport brings ``match_hostname()`` to users +of earlier versions of Python. +Simply make this distribution a dependency of your package, +and then use it like this:: + + from backports.ssl_match_hostname import match_hostname, CertificateError + ... + sslsock = ssl.wrap_socket(sock, ssl_version=ssl.PROTOCOL_SSLv3, + cert_reqs=ssl.CERT_REQUIRED, ca_certs=...) + try: + match_hostname(sslsock.getpeercert(), hostname) + except CertificateError, ce: + ... + +Note that the ``ssl`` module is only included in the Standard Library +for Python 2.6 and later; +users of Python 2.5 or earlier versions +will also need to install the ``ssl`` distribution +from the Python Package Index to use code like that shown above. + +Brandon Craig Rhodes is merely the packager of this distribution; +the actual code inside comes verbatim from Python 3.2. + +.. _RFC2818: http://tools.ietf.org/html/rfc2818.html + diff --git a/libs/backports/ssl_match_hostname/__init__.py b/libs/backports/ssl_match_hostname/__init__.py new file mode 100644 index 0000000000..57076497aa --- /dev/null +++ b/libs/backports/ssl_match_hostname/__init__.py @@ -0,0 +1,60 @@ +"""The match_hostname() function from Python 3.2, essential when using SSL.""" + +import re + +__version__ = '3.2a3' + +class CertificateError(ValueError): + pass + +def _dnsname_to_pat(dn): + pats = [] + for frag in dn.split(r'.'): + if frag == '*': + # When '*' is a fragment by itself, it matches a non-empty dotless + # fragment. + pats.append('[^.]+') + else: + # Otherwise, '*' matches any dotless fragment. + frag = re.escape(frag) + pats.append(frag.replace(r'\*', '[^.]*')) + return re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE) + +def match_hostname(cert, hostname): + """Verify that *cert* (in decoded format as returned by + SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 rules + are mostly followed, but IP addresses are not accepted for *hostname*. + + CertificateError is raised on failure. On success, the function + returns nothing. + """ + if not cert: + raise ValueError("empty or no certificate") + dnsnames = [] + san = cert.get('subjectAltName', ()) + for key, value in san: + if key == 'DNS': + if _dnsname_to_pat(value).match(hostname): + return + dnsnames.append(value) + if not san: + # The subject is only checked when subjectAltName is empty + for sub in cert.get('subject', ()): + for key, value in sub: + # XXX according to RFC 2818, the most specific Common Name + # must be used. + if key == 'commonName': + if _dnsname_to_pat(value).match(hostname): + return + dnsnames.append(value) + if len(dnsnames) > 1: + raise CertificateError("hostname %r " + "doesn't match either of %s" + % (hostname, ', '.join(map(repr, dnsnames)))) + elif len(dnsnames) == 1: + raise CertificateError("hostname %r " + "doesn't match %r" + % (hostname, dnsnames[0])) + else: + raise CertificateError("no appropriate commonName or " + "subjectAltName fields were found") diff --git a/libs/bencode/LICENSE.txt b/libs/bencode/LICENSE.txt deleted file mode 100644 index 4b7a674703..0000000000 --- a/libs/bencode/LICENSE.txt +++ /dev/null @@ -1,143 +0,0 @@ -BitTorrent Open Source License - -Version 1.1 - -This BitTorrent Open Source License (the "License") applies to the BitTorrent client and related software products as well as any updates or maintenance releases of that software ("BitTorrent Products") that are distributed by BitTorrent, Inc. ("Licensor"). Any BitTorrent Product licensed pursuant to this License is a Licensed Product. Licensed Product, in its entirety, is protected by U.S. copyright law. This License identifies the terms under which you may use, copy, distribute or modify Licensed Product. - -Preamble - -This Preamble is intended to describe, in plain English, the nature and scope of this License. However, this Preamble is not a part of this license. The legal effect of this License is dependent only upon the terms of the License and not this Preamble. - -This License complies with the Open Source Definition and is derived from the Jabber Open Source License 1.0 (the "JOSL"), which has been approved by Open Source Initiative. Sections 4(c) and 4(f)(iii) from the JOSL have been deleted. - -This License provides that: - -1. You may use or give away the Licensed Product, alone or as a component of an aggregate software distribution containing programs from several different sources. No royalty or other fee is required. - -2. Both Source Code and executable versions of the Licensed Product, including Modifications made by previous Contributors, are available for your use. (The terms "Licensed Product," "Modifications," "Contributors" and "Source Code" are defined in the License.) - -3. You are allowed to make Modifications to the Licensed Product, and you can create Derivative Works from it. (The term "Derivative Works" is defined in the License.) - -4. By accepting the Licensed Product under the provisions of this License, you agree that any Modifications you make to the Licensed Product and then distribute are governed by the provisions of this License. In particular, you must make the Source Code of your Modifications available to others free of charge and without a royalty. - -5. You may sell, accept donations or otherwise receive compensation for executable versions of a Licensed Product, without paying a royalty or other fee to the Licensor or any Contributor, provided that such executable versions contain your or another Contributor?s material Modifications. For the avoidance of doubt, to the extent your executable version of a Licensed Product does not contain your or another Contributor?s material Modifications, you may not sell, accept donations or otherwise receive compensation for such executable. - -You may use the Licensed Product for any purpose, but the Licensor is not providing you any warranty whatsoever, nor is the Licensor accepting any liability in the event that the Licensed Product doesn't work properly or causes you any injury or damages. - -6. If you sublicense the Licensed Product or Derivative Works, you may charge fees for warranty or support, or for accepting indemnity or liability obligations to your customers. You cannot charge for, sell, accept donations or otherwise receive compensation for the Source Code. - -7. If you assert any patent claims against the Licensor relating to the Licensed Product, or if you breach any terms of the License, your rights to the Licensed Product under this License automatically terminate. - -You may use this License to distribute your own Derivative Works, in which case the provisions of this License will apply to your Derivative Works just as they do to the original Licensed Product. - -Alternatively, you may distribute your Derivative Works under any other OSI-approved Open Source license, or under a proprietary license of your choice. If you use any license other than this License, however, you must continue to fulfill the requirements of this License (including the provisions relating to publishing the Source Code) for those portions of your Derivative Works that consist of the Licensed Product, including the files containing Modifications. - -New versions of this License may be published from time to time in connection with new versions of a Licensed Product or otherwise. You may choose to continue to use the license terms in this version of the License for the Licensed Product that was originally licensed hereunder, however, the new versions of this License will at all times apply to new versions of the Licensed Product released by Licensor after the release of the new version of this License. Only the Licensor has the right to change the License terms as they apply to the Licensed Product. - -This License relies on precise definitions for certain terms. Those terms are defined when they are first used, and the definitions are repeated for your convenience in a Glossary at the end of the License. - -License Terms - -1. Grant of License From Licensor. Subject to the terms and conditions of this License, Licensor hereby grants you a world-wide, royalty-free, non-exclusive license, subject to third party intellectual property claims, to do the following: - -a. Use, reproduce, modify, display, perform, sublicense and distribute any Modifications created by a Contributor or portions thereof, in both Source Code or as an executable program, either on an unmodified basis or as part of Derivative Works. - -b. Under claims of patents now or hereafter owned or controlled by Contributor, to make, use, sell, offer for sale, have made, and/or otherwise dispose of Modifications or portions thereof, but solely to the extent that any such claim is necessary to enable you to make, use, sell, offer for sale, have made, and/or otherwise dispose of Modifications or portions thereof or Derivative Works thereof. - -2. Grant of License to Modifications From Contributor. "Modifications" means any additions to or deletions from the substance or structure of (i) a file containing a Licensed Product, or (ii) any new file that contains any part of a Licensed Product. Hereinafter in this License, the term "Licensed Product" shall include all previous Modifications that you receive from any Contributor. Subject to the terms and conditions of this License, By application of the provisions in Section 4(a) below, each person or entity who created or contributed to the creation of, and distributed, a Modification (a "Contributor") hereby grants you a world-wide, royalty-free, non-exclusive license, subject to third party intellectual property claims, to do the following: - -a. Use, reproduce, modify, display, perform, sublicense and distribute any Modifications created by such Contributor or portions thereof, in both Source Code or as an executable program, either on an unmodified basis or as part of Derivative Works. - -b. Under claims of patents now or hereafter owned or controlled by Contributor, to make, use, sell, offer for sale, have made, and/or otherwise dispose of Modifications or portions thereof, but solely to the extent that any such claim is necessary to enable you to make, use, sell, offer for sale, have made, and/or otherwise dispose of Modifications or portions thereof or Derivative Works thereof. - -3. Exclusions From License Grant. Nothing in this License shall be deemed to grant any rights to trademarks, copyrights, patents, trade secrets or any other intellectual property of Licensor or any Contributor except as expressly stated herein. No patent license is granted separate from the Licensed Product, for code that you delete from the Licensed Product, or for combinations of the Licensed Product with other software or hardware. No right is granted to the trademarks of Licensor or any Contributor even if such marks are included in the Licensed Product. Nothing in this License shall be interpreted to prohibit Licensor from licensing under different terms from this License any code that Licensor otherwise would have a right to license. As an express condition for your use of the Licensed Product, you hereby agree that you will not, without the prior written consent of Licensor, use any trademarks, copyrights, patents, trade secrets or any other intellectual property of Licensor or any Contributor except as expressly stated herein. For the avoidance of doubt and without limiting the foregoing, you hereby agree that you will not use or display any trademark of Licensor or any Contributor in any domain name, directory filepath, advertisement, link or other reference to you in any manner or in any media. - -4. Your Obligations Regarding Distribution. - -a. Application of This License to Your Modifications. As an express condition for your use of the Licensed Product, you hereby agree that any Modifications that you create or to which you contribute, and which you distribute, are governed by the terms of this License including, without limitation, Section 2. Any Modifications that you create or to which you contribute may be distributed only under the terms of this License or a future version of this License released under Section 7. You must include a copy of this License with every copy of the Modifications you distribute. You agree not to offer or impose any terms on any Source Code or executable version of the Licensed Product or Modifications that alter or restrict the applicable version of this License or the recipients' rights hereunder. However, you may include an additional document offering the additional rights described in Section 4(d). - -b. Availability of Source Code. You must make available, without charge, under the terms of this License, the Source Code of the Licensed Product and any Modifications that you distribute, either on the same media as you distribute any executable or other form of the Licensed Product, or via a mechanism generally accepted in the software development community for the electronic transfer of data (an "Electronic Distribution Mechanism"). The Source Code for any version of Licensed Product or Modifications that you distribute must remain available for as long as any executable or other form of the Licensed Product is distributed by you. You are responsible for ensuring that the Source Code version remains available even if the Electronic Distribution Mechanism is maintained by a third party. - -c. Intellectual Property Matters. - - i. Third Party Claims. If you have knowledge that a license to a third party's intellectual property right is required to exercise the rights granted by this License, you must include a text file with the Source Code distribution titled "LEGAL" that describes the claim and the party making the claim in sufficient detail that a recipient will know whom to contact. If you obtain such knowledge after you make any Modifications available as described in Section 4(b), you shall promptly modify the LEGAL file in all copies you make available thereafter and shall take other steps (such as notifying appropriate mailing lists or newsgroups) reasonably calculated to inform those who received the Licensed Product from you that new knowledge has been obtained. - - ii. Contributor APIs. If your Modifications include an application programming interface ("API") and you have knowledge of patent licenses that are reasonably necessary to implement that API, you must also include this information in the LEGAL file. - - iii. Representations. You represent that, except as disclosed pursuant to 4(c)(i) above, you believe that any Modifications you distribute are your original creations and that you have sufficient rights to grant the rights conveyed by this License. - -d. Required Notices. You must duplicate this License in any documentation you provide along with the Source Code of any Modifications you create or to which you contribute, and which you distribute, wherever you describe recipients' rights relating to Licensed Product. You must duplicate the notice contained in Exhibit A (the "Notice") in each file of the Source Code of any copy you distribute of the Licensed Product. If you created a Modification, you may add your name as a Contributor to the Notice. If it is not possible to put the Notice in a particular Source Code file due to its structure, then you must include such Notice in a location (such as a relevant directory file) where a user would be likely to look for such a notice. You may choose to offer, and charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Licensed Product. However, you may do so only on your own behalf, and not on behalf of the Licensor or any Contributor. You must make it clear that any such warranty, support, indemnity or liability obligation is offered by you alone, and you hereby agree to indemnify the Licensor and every Contributor for any liability incurred by the Licensor or such Contributor as a result of warranty, support, indemnity or liability terms you offer. - -e. Distribution of Executable Versions. You may distribute Licensed Product as an executable program under a license of your choice that may contain terms different from this License provided (i) you have satisfied the requirements of Sections 4(a) through 4(d) for that distribution, (ii) you include a conspicuous notice in the executable version, related documentation and collateral materials stating that the Source Code version of the -Licensed Product is available under the terms of this License, including a description of how and where you have fulfilled the obligations of Section 4(b), and (iii) you make it clear that any terms that differ from this License are offered by you alone, not by Licensor or any Contributor. You hereby agree to indemnify the Licensor and every Contributor for any liability incurred by Licensor or such Contributor as a result of any terms you offer. - -f. Distribution of Derivative Works. You may create Derivative Works (e.g., combinations of some or all of the Licensed Product with other code) and distribute the Derivative Works as products under any other license you select, with the proviso that the requirements of this License are fulfilled for those portions of the Derivative Works that consist of the Licensed Product or any Modifications thereto. - -g. Compensation for Distribution of Executable Versions of Licensed Products, Modifications or Derivative Works. Notwithstanding any provision of this License to the contrary, by distributing, selling, licensing, sublicensing or otherwise making available any Licensed Product, or Modification or Derivative Work thereof, you and Licensor hereby acknowledge and agree that you may sell, license or sublicense for a fee, accept donations or otherwise receive compensation for executable versions of a Licensed Product, without paying a royalty or other fee to the Licensor or any other Contributor, provided that such executable versions (i) contain your or another Contributor?s material Modifications, or (ii) are otherwise material Derivative Works. For purposes of this License, an executable version of the Licensed Product will be deemed to contain a material Modification, or will otherwise be deemed a material Derivative Work, if (a) the Licensed Product is modified with your own or a third party?s software programs or other code, and/or the Licensed Product is combined with a number of your own or a third party?s software programs or code, respectively, and (b) such software programs or code add or contribute material value, functionality or features to the License Product. For the avoidance of doubt, to the extent your executable version of a Licensed Product does not contain your or another Contributor?s material Modifications or is otherwise not a material Derivative Work, in each case as contemplated herein, you may not sell, license or sublicense for a fee, accept donations or otherwise receive compensation for such executable. Additionally, without limitation of the foregoing and notwithstanding any provision of this License to the contrary, you cannot charge for, sell, license or sublicense for a fee, accept donations or otherwise receive compensation for the Source Code. - -5. Inability to Comply Due to Statute or Regulation. If it is impossible for you to comply with any of the terms of this License with respect to some or all of the Licensed Product due to statute, judicial order, or regulation, then you must (i) comply with the terms of this License to the maximum extent possible, (ii) cite the statute or regulation that prohibits you from adhering to the License, and (iii) describe the limitations and the code they affect. Such description must be included in the LEGAL file described in Section 4(d), and must be included with all distributions of the Source Code. Except to the extent prohibited by statute or regulation, such description must be sufficiently detailed for a recipient of ordinary skill at computer programming to be able to understand it. - -6. Application of This License. This License applies to code to which Licensor or Contributor has attached the Notice in Exhibit A, which is incorporated herein by this reference. - -7. Versions of This License. - -a. New Versions. Licensor may publish from time to time revised and/or new versions of the License. - -b. Effect of New Versions. Once Licensed Product has been published under a particular version of the License, you may always continue to use it under the terms of that version, provided that any such license be in full force and effect at the time, and has not been revoked or otherwise terminated. You may also choose to use such Licensed Product under the terms of any subsequent version (but not any prior version) of the License published by Licensor. No one other than Licensor has the right to modify the terms applicable to Licensed Product created under this License. - -c. Derivative Works of this License. If you create or use a modified version of this License, which you may do only in order to apply it to software that is not already a Licensed Product under this License, you must rename your license so that it is not confusingly similar to this License, and must make it clear that your license contains terms that differ from this License. In so naming your license, you may not use any trademark of Licensor or any Contributor. - -8. Disclaimer of Warranty. LICENSED PRODUCT IS PROVIDED UNDER THIS LICENSE ON AN AS IS BASIS, WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE LICENSED PRODUCT IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE LICENSED PRODUCT IS WITH YOU. SHOULD LICENSED PRODUCT PROVE DEFECTIVE IN ANY RESPECT, YOU (AND NOT THE LICENSOR OR ANY OTHER CONTRIBUTOR) ASSUME THE COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS -DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF LICENSED PRODUCT IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER. - -9. Termination. - -a. Automatic Termination Upon Breach. This license and the rights granted hereunder will terminate automatically if you fail to comply with the terms herein and fail to cure such breach within ten (10) days of being notified of the breach by the Licensor. For purposes of this provision, proof of delivery via email to the address listed in the ?WHOIS? database of the registrar for any website through which you distribute or market any Licensed Product, or to any alternate email address which you designate in writing to the Licensor, shall constitute sufficient notification. All sublicenses to the Licensed Product that are properly granted shall survive any termination of this license so long as they continue to complye with the terms of this License. Provisions that, by their nature, must remain in effect beyond the termination of this License, shall survive. - -b. Termination Upon Assertion of Patent Infringement. If you initiate litigation by asserting a patent infringement claim (excluding declaratory judgment actions) against Licensor or a Contributor (Licensor or Contributor against whom you file such an action is referred to herein as Respondent) alleging that Licensed Product directly or indirectly infringes any patent, then any and all rights granted by such Respondent to you under Sections 1 or 2 of this License shall terminate prospectively upon sixty (60) days notice from Respondent (the "Notice Period") unless within that Notice Period you either agree in writing (i) to pay Respondent a mutually agreeable reasonably royalty for your past or future use of Licensed Product made by such Respondent, or (ii) withdraw your litigation claim with respect to Licensed Product against such Respondent. If within said Notice Period a reasonable royalty and payment arrangement are not mutually agreed upon in writing by the parties or the litigation claim is not withdrawn, the rights granted by Licensor to you under Sections 1 and 2 automatically terminate at the expiration of said Notice Period. - -c. Reasonable Value of This License. If you assert a patent infringement claim against Respondent alleging that Licensed Product directly or indirectly infringes any patent where such claim is resolved (such as by license or settlement) prior to the initiation of patent infringement litigation, then the reasonable value of the licenses granted by said Respondent under Sections 1 and 2 shall be taken into account in determining the amount or value of any payment or license. - -d. No Retroactive Effect of Termination. In the event of termination under Sections 9(a) or 9(b) above, all end user license agreements (excluding licenses to distributors and resellers) that have been validly granted by you or any distributor hereunder prior to termination shall survive termination. - -10. Limitation of Liability. UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL THE LICENSOR, ANY CONTRIBUTOR, OR ANY DISTRIBUTOR OF LICENSED PRODUCT, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOSS OF GOODWILL, WORK STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY RESULTING FROM SUCH PARTYS NEGLIGENCE TO THE EXTENT APPLICABLE LAW PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU. - -11. Responsibility for Claims. As between Licensor and Contributors, each party is responsible for claims and damages arising, directly or indirectly, out of its utilization of rights under this License. You agree to work with Licensor and Contributors to distribute such responsibility on an equitable basis. Nothing herein is intended or shall be deemed to constitute any admission of liability. - -12. U.S. Government End Users. The Licensed Product is a commercial item, as that term is defined in 48 C.F.R. 2.101 (Oct. 1995), consisting of commercial computer software and commercial computer software documentation, as such terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (June 1995), all U.S. Government End Users acquire Licensed Product with only those rights set forth herein. - -13. Miscellaneous. This License represents the complete agreement concerning the subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable. This License shall be governed by California law provisions (except to the extent applicable law, if any, provides otherwise), excluding its conflict-of-law provisions. You expressly agree that in any litigation relating to this license the losing party shall be responsible for costs including, without limitation, court costs and reasonable attorneys fees and expenses. The application of the United Nations Convention on Contracts for the International Sale of Goods is expressly excluded. Any law or regulation that provides that the language of a contract shall be construed against the drafter shall not apply to this License. - -14. Definition of You in This License. You throughout this License, whether in upper or lower case, means an individual or a legal entity exercising rights under, and complying with all of the terms of, this License or a future version of this License issued under Section 7. For legal entities, you includes any entity that controls, is controlled by, is under common control with, or affiliated with, you. For purposes of this definition, control means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. You are responsible for advising any affiliated entity of the terms of this License, and that any rights or privileges derived from or obtained by way of this License are subject to the restrictions outlined herein. - -15. Glossary. All defined terms in this License that are used in more than one Section of this License are repeated here, in alphabetical order, for the convenience of the reader. The Section of this License in which each defined term is first used is shown in parentheses. - -Contributor: Each person or entity who created or contributed to the creation of, and distributed, a Modification. (See Section 2) - -Derivative Works: That term as used in this License is defined under U.S. copyright law. (See Section 1(b)) - -License: This BitTorrent Open Source License. (See first paragraph of License) - -Licensed Product: Any BitTorrent Product licensed pursuant to this License. The term "Licensed Product" includes all previous Modifications from any Contributor that you receive. (See first paragraph of License and Section 2) - -Licensor: BitTorrent, Inc. (See first paragraph of License) - -Modifications: Any additions to or deletions from the substance or structure of (i) a file containing Licensed Product, or (ii) any new file that contains any part of Licensed Product. (See Section 2) - -Notice: The notice contained in Exhibit A. (See Section 4(e)) - -Source Code: The preferred form for making modifications to the Licensed Product, including all modules contained therein, plus any associated interface definition files, scripts used to control compilation and installation of an executable program, or a list of differential comparisons against the Source Code of the Licensed Product. (See Section 1(a)) - -You: This term is defined in Section 14 of this License. - - -EXHIBIT A - -The Notice below must appear in each file of the Source Code of any copy you distribute of the Licensed Product or any hereto. Contributors to any Modifications may add their own copyright notices to identify their own contributions. - -License: - -The contents of this file are subject to the BitTorrent Open Source License Version 1.0 (the License). You may not copy or use this file, in either source code or executable form, except in compliance with the License. You may obtain a copy of the License at http://www.bittorrent.com/license/. - -Software distributed under the License is distributed on an AS IS basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for the specific language governing rights and limitations under the License. - diff --git a/libs/bencode/__init__.py b/libs/bencode/__init__.py index 4424fc7e1b..7a2af1722d 100644 --- a/libs/bencode/__init__.py +++ b/libs/bencode/__init__.py @@ -1 +1,131 @@ -from bencode import * \ No newline at end of file +# The contents of this file are subject to the BitTorrent Open Source License +# Version 1.1 (the License). You may not copy or use this file, in either +# source code or executable form, except in compliance with the License. You +# may obtain a copy of the License at http://www.bittorrent.com/license/. +# +# Software distributed under the License is distributed on an AS IS basis, +# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License +# for the specific language governing rights and limitations under the +# License. + +# Written by Petru Paler + +from BTL import BTFailure + + +def decode_int(x, f): + f += 1 + newf = x.index('e', f) + n = int(x[f:newf]) + if x[f] == '-': + if x[f + 1] == '0': + raise ValueError + elif x[f] == '0' and newf != f+1: + raise ValueError + return (n, newf+1) + +def decode_string(x, f): + colon = x.index(':', f) + n = int(x[f:colon]) + if x[f] == '0' and colon != f+1: + raise ValueError + colon += 1 + return (x[colon:colon+n], colon+n) + +def decode_list(x, f): + r, f = [], f+1 + while x[f] != 'e': + v, f = decode_func[x[f]](x, f) + r.append(v) + return (r, f + 1) + +def decode_dict(x, f): + r, f = {}, f+1 + while x[f] != 'e': + k, f = decode_string(x, f) + r[k], f = decode_func[x[f]](x, f) + return (r, f + 1) + +decode_func = {} +decode_func['l'] = decode_list +decode_func['d'] = decode_dict +decode_func['i'] = decode_int +decode_func['0'] = decode_string +decode_func['1'] = decode_string +decode_func['2'] = decode_string +decode_func['3'] = decode_string +decode_func['4'] = decode_string +decode_func['5'] = decode_string +decode_func['6'] = decode_string +decode_func['7'] = decode_string +decode_func['8'] = decode_string +decode_func['9'] = decode_string + +def bdecode(x): + try: + r, l = decode_func[x[0]](x, 0) + except (IndexError, KeyError, ValueError): + raise BTFailure("not a valid bencoded string") + if l != len(x): + raise BTFailure("invalid bencoded value (data after valid prefix)") + return r + +from types import StringType, IntType, LongType, DictType, ListType, TupleType + + +class Bencached(object): + + __slots__ = ['bencoded'] + + def __init__(self, s): + self.bencoded = s + +def encode_bencached(x,r): + r.append(x.bencoded) + +def encode_int(x, r): + r.extend(('i', str(x), 'e')) + +def encode_bool(x, r): + if x: + encode_int(1, r) + else: + encode_int(0, r) + +def encode_string(x, r): + r.extend((str(len(x)), ':', x)) + +def encode_list(x, r): + r.append('l') + for i in x: + encode_func[type(i)](i, r) + r.append('e') + +def encode_dict(x,r): + r.append('d') + ilist = x.items() + ilist.sort() + for k, v in ilist: + r.extend((str(len(k)), ':', k)) + encode_func[type(v)](v, r) + r.append('e') + +encode_func = {} +encode_func[Bencached] = encode_bencached +encode_func[IntType] = encode_int +encode_func[LongType] = encode_int +encode_func[StringType] = encode_string +encode_func[ListType] = encode_list +encode_func[TupleType] = encode_list +encode_func[DictType] = encode_dict + +try: + from types import BooleanType + encode_func[BooleanType] = encode_bool +except ImportError: + pass + +def bencode(x): + r = [] + encode_func[type(x)](x, r) + return ''.join(r) diff --git a/libs/bencode/bencode.py b/libs/bencode/bencode.py deleted file mode 100644 index 7a2af1722d..0000000000 --- a/libs/bencode/bencode.py +++ /dev/null @@ -1,131 +0,0 @@ -# The contents of this file are subject to the BitTorrent Open Source License -# Version 1.1 (the License). You may not copy or use this file, in either -# source code or executable form, except in compliance with the License. You -# may obtain a copy of the License at http://www.bittorrent.com/license/. -# -# Software distributed under the License is distributed on an AS IS basis, -# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License -# for the specific language governing rights and limitations under the -# License. - -# Written by Petru Paler - -from BTL import BTFailure - - -def decode_int(x, f): - f += 1 - newf = x.index('e', f) - n = int(x[f:newf]) - if x[f] == '-': - if x[f + 1] == '0': - raise ValueError - elif x[f] == '0' and newf != f+1: - raise ValueError - return (n, newf+1) - -def decode_string(x, f): - colon = x.index(':', f) - n = int(x[f:colon]) - if x[f] == '0' and colon != f+1: - raise ValueError - colon += 1 - return (x[colon:colon+n], colon+n) - -def decode_list(x, f): - r, f = [], f+1 - while x[f] != 'e': - v, f = decode_func[x[f]](x, f) - r.append(v) - return (r, f + 1) - -def decode_dict(x, f): - r, f = {}, f+1 - while x[f] != 'e': - k, f = decode_string(x, f) - r[k], f = decode_func[x[f]](x, f) - return (r, f + 1) - -decode_func = {} -decode_func['l'] = decode_list -decode_func['d'] = decode_dict -decode_func['i'] = decode_int -decode_func['0'] = decode_string -decode_func['1'] = decode_string -decode_func['2'] = decode_string -decode_func['3'] = decode_string -decode_func['4'] = decode_string -decode_func['5'] = decode_string -decode_func['6'] = decode_string -decode_func['7'] = decode_string -decode_func['8'] = decode_string -decode_func['9'] = decode_string - -def bdecode(x): - try: - r, l = decode_func[x[0]](x, 0) - except (IndexError, KeyError, ValueError): - raise BTFailure("not a valid bencoded string") - if l != len(x): - raise BTFailure("invalid bencoded value (data after valid prefix)") - return r - -from types import StringType, IntType, LongType, DictType, ListType, TupleType - - -class Bencached(object): - - __slots__ = ['bencoded'] - - def __init__(self, s): - self.bencoded = s - -def encode_bencached(x,r): - r.append(x.bencoded) - -def encode_int(x, r): - r.extend(('i', str(x), 'e')) - -def encode_bool(x, r): - if x: - encode_int(1, r) - else: - encode_int(0, r) - -def encode_string(x, r): - r.extend((str(len(x)), ':', x)) - -def encode_list(x, r): - r.append('l') - for i in x: - encode_func[type(i)](i, r) - r.append('e') - -def encode_dict(x,r): - r.append('d') - ilist = x.items() - ilist.sort() - for k, v in ilist: - r.extend((str(len(k)), ':', k)) - encode_func[type(v)](v, r) - r.append('e') - -encode_func = {} -encode_func[Bencached] = encode_bencached -encode_func[IntType] = encode_int -encode_func[LongType] = encode_int -encode_func[StringType] = encode_string -encode_func[ListType] = encode_list -encode_func[TupleType] = encode_list -encode_func[DictType] = encode_dict - -try: - from types import BooleanType - encode_func[BooleanType] = encode_bool -except ImportError: - pass - -def bencode(x): - r = [] - encode_func[type(x)](x, r) - return ''.join(r) diff --git a/libs/bs4/__init__.py b/libs/bs4/__init__.py index af8c718dc0..7ba34269af 100644 --- a/libs/bs4/__init__.py +++ b/libs/bs4/__init__.py @@ -17,16 +17,17 @@ """ __author__ = "Leonard Richardson (leonardr@segfault.org)" -__version__ = "4.1.0" -__copyright__ = "Copyright (c) 2004-2012 Leonard Richardson" +__version__ = "4.3.2" +__copyright__ = "Copyright (c) 2004-2013 Leonard Richardson" __license__ = "MIT" __all__ = ['BeautifulSoup'] +import os import re import warnings -from .builder import builder_registry +from .builder import builder_registry, ParserRejectedMarkup from .dammit import UnicodeDammit from .element import ( CData, @@ -74,11 +75,7 @@ class BeautifulSoup(Tag): # want, look for one with these features. DEFAULT_BUILDER_FEATURES = ['html', 'fast'] - # Used when determining whether a text node is all whitespace and - # can be replaced with a single space. A text node that contains - # fancy Unicode spaces (usually non-breaking) should be left - # alone. - STRIP_ASCII_SPACES = {9: None, 10: None, 12: None, 13: None, 32: None, } + ASCII_SPACES = '\x20\x0a\x09\x0c\x0d' def __init__(self, markup="", features=None, builder=None, parse_only=None, from_encoding=None, **kwargs): @@ -149,7 +146,7 @@ def deprecated_argument(old_name, new_name): features = self.DEFAULT_BUILDER_FEATURES builder_class = builder_registry.lookup(*features) if builder_class is None: - raise ValueError( + raise FeatureNotFound( "Couldn't find a tree builder with the features you " "requested: %s. Do you need to install a parser library?" % ",".join(features)) @@ -160,18 +157,46 @@ def deprecated_argument(old_name, new_name): self.parse_only = parse_only - self.reset() - if hasattr(markup, 'read'): # It's a file-type object. markup = markup.read() - (self.markup, self.original_encoding, self.declared_html_encoding, - self.contains_replacement_characters) = ( - self.builder.prepare_markup(markup, from_encoding)) - - try: - self._feed() - except StopParsing: - pass + elif len(markup) <= 256: + # Print out warnings for a couple beginner problems + # involving passing non-markup to Beautiful Soup. + # Beautiful Soup will still parse the input as markup, + # just in case that's what the user really wants. + if (isinstance(markup, unicode) + and not os.path.supports_unicode_filenames): + possible_filename = markup.encode("utf8") + else: + possible_filename = markup + is_file = False + try: + is_file = os.path.exists(possible_filename) + except Exception, e: + # This is almost certainly a problem involving + # characters not valid in filenames on this + # system. Just let it go. + pass + if is_file: + warnings.warn( + '"%s" looks like a filename, not markup. You should probably open this file and pass the filehandle into Beautiful Soup.' % markup) + if markup[:5] == "http:" or markup[:6] == "https:": + # TODO: This is ugly but I couldn't get it to work in + # Python 3 otherwise. + if ((isinstance(markup, bytes) and not b' ' in markup) + or (isinstance(markup, unicode) and not u' ' in markup)): + warnings.warn( + '"%s" looks like a URL. Beautiful Soup is not an HTTP client. You should probably use an HTTP client to get the document behind the URL, and feed that document to Beautiful Soup.' % markup) + + for (self.markup, self.original_encoding, self.declared_html_encoding, + self.contains_replacement_characters) in ( + self.builder.prepare_markup(markup, from_encoding)): + self.reset() + try: + self._feed() + break + except ParserRejectedMarkup: + pass # Clear out the markup and remove the builder's circular # reference to this object. @@ -192,29 +217,32 @@ def reset(self): Tag.__init__(self, self, self.builder, self.ROOT_TAG_NAME) self.hidden = 1 self.builder.reset() - self.currentData = [] + self.current_data = [] self.currentTag = None self.tagStack = [] + self.preserve_whitespace_tag_stack = [] self.pushTag(self) def new_tag(self, name, namespace=None, nsprefix=None, **attrs): """Create a new tag associated with this soup.""" return Tag(None, self.builder, name, namespace, nsprefix, attrs) - def new_string(self, s): + def new_string(self, s, subclass=NavigableString): """Create a new NavigableString associated with this soup.""" - navigable = NavigableString(s) + navigable = subclass(s) navigable.setup() return navigable def insert_before(self, successor): - raise ValueError("BeautifulSoup objects don't support insert_before().") + raise NotImplementedError("BeautifulSoup objects don't support insert_before().") def insert_after(self, successor): - raise ValueError("BeautifulSoup objects don't support insert_after().") + raise NotImplementedError("BeautifulSoup objects don't support insert_after().") def popTag(self): tag = self.tagStack.pop() + if self.preserve_whitespace_tag_stack and tag == self.preserve_whitespace_tag_stack[-1]: + self.preserve_whitespace_tag_stack.pop() #print "Pop", tag.name if self.tagStack: self.currentTag = self.tagStack[-1] @@ -226,32 +254,49 @@ def pushTag(self, tag): self.currentTag.contents.append(tag) self.tagStack.append(tag) self.currentTag = self.tagStack[-1] + if tag.name in self.builder.preserve_whitespace_tags: + self.preserve_whitespace_tag_stack.append(tag) def endData(self, containerClass=NavigableString): - if self.currentData: - currentData = u''.join(self.currentData) - if (currentData.translate(self.STRIP_ASCII_SPACES) == '' and - not set([tag.name for tag in self.tagStack]).intersection( - self.builder.preserve_whitespace_tags)): - if '\n' in currentData: - currentData = '\n' - else: - currentData = ' ' - self.currentData = [] + if self.current_data: + current_data = u''.join(self.current_data) + # If whitespace is not preserved, and this string contains + # nothing but ASCII spaces, replace it with a single space + # or newline. + if not self.preserve_whitespace_tag_stack: + strippable = True + for i in current_data: + if i not in self.ASCII_SPACES: + strippable = False + break + if strippable: + if '\n' in current_data: + current_data = '\n' + else: + current_data = ' ' + + # Reset the data collector. + self.current_data = [] + + # Should we add this string to the tree at all? if self.parse_only and len(self.tagStack) <= 1 and \ (not self.parse_only.text or \ - not self.parse_only.search(currentData)): + not self.parse_only.search(current_data)): return - o = containerClass(currentData) + + o = containerClass(current_data) self.object_was_parsed(o) - def object_was_parsed(self, o): + def object_was_parsed(self, o, parent=None, most_recent_element=None): """Add an object to the parse tree.""" - o.setup(self.currentTag, self.previous_element) - if self.previous_element: - self.previous_element.next_element = o - self.previous_element = o - self.currentTag.contents.append(o) + parent = parent or self.currentTag + most_recent_element = most_recent_element or self._most_recent_element + o.setup(parent, most_recent_element) + + if most_recent_element is not None: + most_recent_element.next_element = o + self._most_recent_element = o + parent.contents.append(o) def _popToTag(self, name, nsprefix=None, inclusivePop=True): """Pops the tag stack up to and including the most recent @@ -260,22 +305,21 @@ def _popToTag(self, name, nsprefix=None, inclusivePop=True): the given tag.""" #print "Popping to %s" % name if name == self.ROOT_TAG_NAME: + # The BeautifulSoup object itself can never be popped. return - numPops = 0 - mostRecentTag = None + most_recently_popped = None - for i in range(len(self.tagStack) - 1, 0, -1): - if (name == self.tagStack[i].name - and nsprefix == self.tagStack[i].nsprefix == nsprefix): - numPops = len(self.tagStack) - i + stack_size = len(self.tagStack) + for i in range(stack_size - 1, 0, -1): + t = self.tagStack[i] + if (name == t.name and nsprefix == t.prefix): + if inclusivePop: + most_recently_popped = self.popTag() break - if not inclusivePop: - numPops = numPops - 1 + most_recently_popped = self.popTag() - for i in range(0, numPops): - mostRecentTag = self.popTag() - return mostRecentTag + return most_recently_popped def handle_starttag(self, name, namespace, nsprefix, attrs): """Push a start tag on to the stack. @@ -295,12 +339,12 @@ def handle_starttag(self, name, namespace, nsprefix, attrs): return None tag = Tag(self, self.builder, name, namespace, nsprefix, attrs, - self.currentTag, self.previous_element) + self.currentTag, self._most_recent_element) if tag is None: return tag - if self.previous_element: - self.previous_element.next_element = tag - self.previous_element = tag + if self._most_recent_element: + self._most_recent_element.next_element = tag + self._most_recent_element = tag self.pushTag(tag) return tag @@ -310,7 +354,7 @@ def handle_endtag(self, name, nsprefix=None): self._popToTag(name, nsprefix) def handle_data(self, data): - self.currentData.append(data) + self.current_data.append(data) def decode(self, pretty_print=False, eventual_encoding=DEFAULT_OUTPUT_ENCODING, @@ -333,6 +377,10 @@ def decode(self, pretty_print=False, return prefix + super(BeautifulSoup, self).decode( indent_level, eventual_encoding, formatter) +# Alias to make it easier to type import: 'from bs4 import _soup' +_s = BeautifulSoup +_soup = BeautifulSoup + class BeautifulStoneSoup(BeautifulSoup): """Deprecated interface to an XML parser.""" @@ -347,6 +395,9 @@ def __init__(self, *args, **kwargs): class StopParsing(Exception): pass +class FeatureNotFound(ValueError): + pass + #By default, act as an HTML pretty-printer. if __name__ == '__main__': diff --git a/libs/bs4/builder/__init__.py b/libs/bs4/builder/__init__.py index 4c22b864a5..740f5f29cd 100644 --- a/libs/bs4/builder/__init__.py +++ b/libs/bs4/builder/__init__.py @@ -147,18 +147,29 @@ def _replace_cdata_list_attribute_values(self, tag_name, attrs): Modifies its input in place. """ + if not attrs: + return attrs if self.cdata_list_attributes: universal = self.cdata_list_attributes.get('*', []) tag_specific = self.cdata_list_attributes.get( - tag_name.lower(), []) - for cdata_list_attr in itertools.chain(universal, tag_specific): - if cdata_list_attr in dict(attrs): - # Basically, we have a "class" attribute whose - # value is a whitespace-separated list of CSS - # classes. Split it into a list. - value = attrs[cdata_list_attr] - values = whitespace_re.split(value) - attrs[cdata_list_attr] = values + tag_name.lower(), None) + for attr in attrs.keys(): + if attr in universal or (tag_specific and attr in tag_specific): + # We have a "class"-type attribute whose string + # value is a whitespace-separated list of + # values. Split it into a list. + value = attrs[attr] + if isinstance(value, basestring): + values = whitespace_re.split(value) + else: + # html5lib sometimes calls setAttributes twice + # for the same tag when rearranging the parse + # tree. On the second call the attribute value + # here is already a list. If this happens, + # leave the value alone rather than trying to + # split it again. + values = value + attrs[attr] = values return attrs class SAXTreeBuilder(TreeBuilder): @@ -287,6 +298,9 @@ def register_treebuilders_from(module): # Register the builder while we're at it. this_module.builder_registry.register(obj) +class ParserRejectedMarkup(Exception): + pass + # Builders are registered in reverse order of priority, so that custom # builder registrations will take precedence. In general, we want lxml # to take precedence over html5lib, because it's faster. And we only diff --git a/libs/bs4/builder/_html5lib.py b/libs/bs4/builder/_html5lib.py index 6001e38691..7de36ae75e 100644 --- a/libs/bs4/builder/_html5lib.py +++ b/libs/bs4/builder/_html5lib.py @@ -27,7 +27,7 @@ class HTML5TreeBuilder(HTMLTreeBuilder): def prepare_markup(self, markup, user_specified_encoding): # Store the user-specified encoding for use later on. self.user_specified_encoding = user_specified_encoding - return markup, None, None, False + yield (markup, None, None, False) # These methods are defined by Beautiful Soup. def feed(self, markup): @@ -123,17 +123,50 @@ def __init__(self, element, soup, namespace): self.namespace = namespace def appendChild(self, node): - if (node.element.__class__ == NavigableString and self.element.contents + string_child = child = None + if isinstance(node, basestring): + # Some other piece of code decided to pass in a string + # instead of creating a TextElement object to contain the + # string. + string_child = child = node + elif isinstance(node, Tag): + # Some other piece of code decided to pass in a Tag + # instead of creating an Element object to contain the + # Tag. + child = node + elif node.element.__class__ == NavigableString: + string_child = child = node.element + else: + child = node.element + + if not isinstance(child, basestring) and child.parent is not None: + node.element.extract() + + if (string_child and self.element.contents and self.element.contents[-1].__class__ == NavigableString): - # Concatenate new text onto old text node - # XXX This has O(n^2) performance, for input like + # We are appending a string onto another string. + # TODO This has O(n^2) performance, for input like # "aaa..." old_element = self.element.contents[-1] - new_element = self.soup.new_string(old_element + node.element) + new_element = self.soup.new_string(old_element + string_child) old_element.replace_with(new_element) + self.soup._most_recent_element = new_element else: - self.element.append(node.element) - node.parent = self + if isinstance(node, basestring): + # Create a brand new NavigableString from this string. + child = self.soup.new_string(node) + + # Tell Beautiful Soup to act as if it parsed this element + # immediately after the parent's last descendant. (Or + # immediately after the parent, if it has no children.) + if self.element.contents: + most_recent_element = self.element._last_descendant(False) + else: + most_recent_element = self.element + + self.soup.object_was_parsed( + child, parent=self.element, + most_recent_element=most_recent_element) def getAttributes(self): return AttrList(self.element) @@ -162,11 +195,11 @@ def setAttributes(self, attributes): attributes = property(getAttributes, setAttributes) def insertText(self, data, insertBefore=None): - text = TextNode(self.soup.new_string(data), self.soup) if insertBefore: - self.insertBefore(text, insertBefore) + text = TextNode(self.soup.new_string(data), self.soup) + self.insertBefore(data, insertBefore) else: - self.appendChild(text) + self.appendChild(data) def insertBefore(self, node, refNode): index = self.element.index(refNode.element) @@ -183,16 +216,46 @@ def insertBefore(self, node, refNode): def removeChild(self, node): node.element.extract() - def reparentChildren(self, newParent): - while self.element.contents: - child = self.element.contents[0] - child.extract() - if isinstance(child, Tag): - newParent.appendChild( - Element(child, self.soup, namespaces["html"])) - else: - newParent.appendChild( - TextNode(child, self.soup)) + def reparentChildren(self, new_parent): + """Move all of this tag's children into another tag.""" + element = self.element + new_parent_element = new_parent.element + # Determine what this tag's next_element will be once all the children + # are removed. + final_next_element = element.next_sibling + + new_parents_last_descendant = new_parent_element._last_descendant(False, False) + if len(new_parent_element.contents) > 0: + # The new parent already contains children. We will be + # appending this tag's children to the end. + new_parents_last_child = new_parent_element.contents[-1] + new_parents_last_descendant_next_element = new_parents_last_descendant.next_element + else: + # The new parent contains no children. + new_parents_last_child = None + new_parents_last_descendant_next_element = new_parent_element.next_element + + to_append = element.contents + append_after = new_parent.element.contents + if len(to_append) > 0: + # Set the first child's previous_element and previous_sibling + # to elements within the new parent + first_child = to_append[0] + first_child.previous_element = new_parents_last_descendant + first_child.previous_sibling = new_parents_last_child + + # Fix the last child's next_element and next_sibling + last_child = to_append[-1] + last_child.next_element = new_parents_last_descendant_next_element + last_child.next_sibling = None + + for child in to_append: + child.parent = new_parent_element + new_parent_element.contents.append(child) + + # Now that this element has no children, change its .next_element. + element.contents = [] + element.next_element = final_next_element def cloneNode(self): tag = self.soup.new_tag(self.element.name, self.namespace) diff --git a/libs/bs4/builder/_htmlparser.py b/libs/bs4/builder/_htmlparser.py index ede5cecb2f..ca8d8b892b 100644 --- a/libs/bs4/builder/_htmlparser.py +++ b/libs/bs4/builder/_htmlparser.py @@ -45,7 +45,15 @@ class BeautifulSoupHTMLParser(HTMLParser): def handle_starttag(self, name, attrs): # XXX namespace - self.soup.handle_starttag(name, None, None, dict(attrs)) + attr_dict = {} + for key, value in attrs: + # Change None attribute values to the empty string + # for consistency with the other tree builders. + if value is None: + value = '' + attr_dict[key] = value + attrvalue = '""' + self.soup.handle_starttag(name, None, None, attr_dict) def handle_endtag(self, name): self.soup.handle_endtag(name) @@ -58,6 +66,8 @@ def handle_charref(self, name): # it's fixed. if name.startswith('x'): real_name = int(name.lstrip('x'), 16) + elif name.startswith('X'): + real_name = int(name.lstrip('X'), 16) else: real_name = int(name) @@ -85,6 +95,9 @@ def handle_decl(self, data): self.soup.endData() if data.startswith("DOCTYPE "): data = data[len("DOCTYPE "):] + elif data == 'DOCTYPE': + # i.e. "" + data = '' self.soup.handle_data(data) self.soup.endData(Doctype) @@ -130,13 +143,14 @@ def prepare_markup(self, markup, user_specified_encoding=None, replaced with REPLACEMENT CHARACTER). """ if isinstance(markup, unicode): - return markup, None, None, False + yield (markup, None, None, False) + return try_encodings = [user_specified_encoding, document_declared_encoding] dammit = UnicodeDammit(markup, try_encodings, is_html=True) - return (dammit.markup, dammit.original_encoding, - dammit.declared_html_encoding, - dammit.contains_replacement_characters) + yield (dammit.markup, dammit.original_encoding, + dammit.declared_html_encoding, + dammit.contains_replacement_characters) def feed(self, markup): args, kwargs = self.parser_args diff --git a/libs/bs4/builder/_lxml.py b/libs/bs4/builder/_lxml.py index c78fdff67f..fa5d49875e 100644 --- a/libs/bs4/builder/_lxml.py +++ b/libs/bs4/builder/_lxml.py @@ -3,6 +3,7 @@ 'LXMLTreeBuilder', ] +from io import BytesIO from StringIO import StringIO import collections from lxml import etree @@ -12,9 +13,10 @@ HTML, HTMLTreeBuilder, PERMISSIVE, + ParserRejectedMarkup, TreeBuilder, XML) -from bs4.dammit import UnicodeDammit +from bs4.dammit import EncodingDetector LXML = 'lxml' @@ -28,24 +30,36 @@ class LXMLTreeBuilderForXML(TreeBuilder): CHUNK_SIZE = 512 - @property - def default_parser(self): + # This namespace mapping is specified in the XML Namespace + # standard. + DEFAULT_NSMAPS = {'http://www.w3.org/XML/1998/namespace' : "xml"} + + def default_parser(self, encoding): # This can either return a parser object or a class, which # will be instantiated with default arguments. - return etree.XMLParser(target=self, strip_cdata=False, recover=True) + if self._default_parser is not None: + return self._default_parser + return etree.XMLParser( + target=self, strip_cdata=False, recover=True, encoding=encoding) + + def parser_for(self, encoding): + # Use the default parser. + parser = self.default_parser(encoding) + + if isinstance(parser, collections.Callable): + # Instantiate the parser with default arguments + parser = parser(target=self, strip_cdata=False, encoding=encoding) + return parser def __init__(self, parser=None, empty_element_tags=None): + # TODO: Issue a warning if parser is present but not a + # callable, since that means there's no way to create new + # parsers for different encodings. + self._default_parser = parser if empty_element_tags is not None: self.empty_element_tags = set(empty_element_tags) - if parser is None: - # Use the default parser. - parser = self.default_parser - if isinstance(parser, collections.Callable): - # Instantiate the parser with default arguments - parser = parser(target=self, strip_cdata=False) - self.parser = parser self.soup = None - self.nsmaps = None + self.nsmaps = [self.DEFAULT_NSMAPS] def _getNsTag(self, tag): # Split the namespace URL out of a fully-qualified lxml tag @@ -58,50 +72,69 @@ def _getNsTag(self, tag): def prepare_markup(self, markup, user_specified_encoding=None, document_declared_encoding=None): """ - :return: A 3-tuple (markup, original encoding, encoding - declared within markup). + :yield: A series of 4-tuples. + (markup, encoding, declared encoding, + has undergone character replacement) + + Each 4-tuple represents a strategy for parsing the document. """ if isinstance(markup, unicode): - return markup, None, None, False + # We were given Unicode. Maybe lxml can parse Unicode on + # this system? + yield markup, None, document_declared_encoding, False + if isinstance(markup, unicode): + # No, apparently not. Convert the Unicode to UTF-8 and + # tell lxml to parse it as UTF-8. + yield (markup.encode("utf8"), "utf8", + document_declared_encoding, False) + + # Instead of using UnicodeDammit to convert the bytestring to + # Unicode using different encodings, use EncodingDetector to + # iterate over the encodings, and tell lxml to try to parse + # the document as each one in turn. + is_html = not self.is_xml try_encodings = [user_specified_encoding, document_declared_encoding] - dammit = UnicodeDammit(markup, try_encodings, is_html=True) - return (dammit.markup, dammit.original_encoding, - dammit.declared_html_encoding, - dammit.contains_replacement_characters) + detector = EncodingDetector(markup, try_encodings, is_html) + for encoding in detector.encodings: + yield (detector.markup, encoding, document_declared_encoding, False) def feed(self, markup): - if isinstance(markup, basestring): + if isinstance(markup, bytes): + markup = BytesIO(markup) + elif isinstance(markup, unicode): markup = StringIO(markup) + # Call feed() at least once, even if the markup is empty, # or the parser won't be initialized. data = markup.read(self.CHUNK_SIZE) - self.parser.feed(data) - while data != '': - # Now call feed() on the rest of the data, chunk by chunk. - data = markup.read(self.CHUNK_SIZE) - if data != '': - self.parser.feed(data) - self.parser.close() + try: + self.parser = self.parser_for(self.soup.original_encoding) + self.parser.feed(data) + while len(data) != 0: + # Now call feed() on the rest of the data, chunk by chunk. + data = markup.read(self.CHUNK_SIZE) + if len(data) != 0: + self.parser.feed(data) + self.parser.close() + except (UnicodeDecodeError, LookupError, etree.ParserError), e: + raise ParserRejectedMarkup(str(e)) def close(self): - self.nsmaps = None + self.nsmaps = [self.DEFAULT_NSMAPS] def start(self, name, attrs, nsmap={}): # Make sure attrs is a mutable dict--lxml may send an immutable dictproxy. attrs = dict(attrs) - nsprefix = None # Invert each namespace map as it comes in. - if len(nsmap) == 0 and self.nsmaps != None: - # There are no new namespaces for this tag, but namespaces - # are in play, so we need a separate tag stack to know - # when they end. + if len(self.nsmaps) > 1: + # There are no new namespaces for this tag, but + # non-default namespaces are in play, so we need a + # separate tag stack to know when they end. self.nsmaps.append(None) elif len(nsmap) > 0: # A new namespace mapping has come into play. - if self.nsmaps is None: - self.nsmaps = [] inverted_nsmap = dict((value, key) for key, value in nsmap.items()) self.nsmaps.append(inverted_nsmap) # Also treat the namespace mapping as a set of attributes on the @@ -111,14 +144,34 @@ def start(self, name, attrs, nsmap={}): attribute = NamespacedAttribute( "xmlns", prefix, "http://www.w3.org/2000/xmlns/") attrs[attribute] = namespace + + # Namespaces are in play. Find any attributes that came in + # from lxml with namespaces attached to their names, and + # turn then into NamespacedAttribute objects. + new_attrs = {} + for attr, value in attrs.items(): + namespace, attr = self._getNsTag(attr) + if namespace is None: + new_attrs[attr] = value + else: + nsprefix = self._prefix_for_namespace(namespace) + attr = NamespacedAttribute(nsprefix, attr, namespace) + new_attrs[attr] = value + attrs = new_attrs + namespace, name = self._getNsTag(name) - if namespace is not None: - for inverted_nsmap in reversed(self.nsmaps): - if inverted_nsmap is not None and namespace in inverted_nsmap: - nsprefix = inverted_nsmap[namespace] - break + nsprefix = self._prefix_for_namespace(namespace) self.soup.handle_starttag(name, namespace, nsprefix, attrs) + def _prefix_for_namespace(self, namespace): + """Find the currently active prefix for the given namespace.""" + if namespace is None: + return None + for inverted_nsmap in reversed(self.nsmaps): + if inverted_nsmap is not None and namespace in inverted_nsmap: + return inverted_nsmap[namespace] + return None + def end(self, name): self.soup.endData() completed_tag = self.soup.tagStack[-1] @@ -130,14 +183,10 @@ def end(self, name): nsprefix = inverted_nsmap[namespace] break self.soup.handle_endtag(name, nsprefix) - if self.nsmaps != None: + if len(self.nsmaps) > 1: # This tag, or one of its parents, introduced a namespace # mapping, so pop it off the stack. self.nsmaps.pop() - if len(self.nsmaps) == 0: - # Namespaces are no longer in play, so don't bother keeping - # track of the namespace stack. - self.nsmaps = None def pi(self, target, data): pass @@ -166,13 +215,18 @@ class LXMLTreeBuilder(HTMLTreeBuilder, LXMLTreeBuilderForXML): features = [LXML, HTML, FAST, PERMISSIVE] is_xml = False - @property - def default_parser(self): + def default_parser(self, encoding): return etree.HTMLParser def feed(self, markup): - self.parser.feed(markup) - self.parser.close() + encoding = self.soup.original_encoding + try: + self.parser = self.parser_for(encoding) + self.parser.feed(markup) + self.parser.close() + except (UnicodeDecodeError, LookupError, etree.ParserError), e: + raise ParserRejectedMarkup(str(e)) + def test_fragment_to_document(self, fragment): """See `TreeBuilder`.""" diff --git a/libs/bs4/dammit.py b/libs/bs4/dammit.py index 58cad9baa1..59640b7ce3 100644 --- a/libs/bs4/dammit.py +++ b/libs/bs4/dammit.py @@ -1,27 +1,40 @@ # -*- coding: utf-8 -*- """Beautiful Soup bonus library: Unicode, Dammit -This class forces XML data into a standard format (usually to UTF-8 or -Unicode). It is heavily based on code from Mark Pilgrim's Universal -Feed Parser. It does not rewrite the XML or HTML to reflect a new -encoding; that's the tree builder's job. +This library converts a bytestream to Unicode through any means +necessary. It is heavily based on code from Mark Pilgrim's Universal +Feed Parser. It works best on XML and XML, but it does not rewrite the +XML or HTML to reflect a new encoding; that's the tree builder's job. """ import codecs from htmlentitydefs import codepoint2name import re -import warnings +import logging +import string -# Autodetects character encodings. Very useful. -# Download from http://chardet.feedparser.org/ -# or 'apt-get install python-chardet' -# or 'easy_install chardet' +# Import a library to autodetect character encodings. +chardet_type = None try: - import chardet - #import chardet.constants - #chardet.constants._debug = 1 + # First try the fast C implementation. + # PyPI package: cchardet + import cchardet + def chardet_dammit(s): + return cchardet.detect(s)['encoding'] except ImportError: - chardet = None + try: + # Fall back to the pure Python implementation + # Debian package: python-chardet + # PyPI package: chardet + import chardet + def chardet_dammit(s): + return chardet.detect(s)['encoding'] + #import chardet.constants + #chardet.constants._debug = 1 + except ImportError: + # No chardet available. + def chardet_dammit(s): + return None # Available from http://cjkpython.i18n.org/. try: @@ -69,6 +82,8 @@ def _populate_class_variables(): "&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)" ")") + AMPERSAND_OR_BRACKET = re.compile("([<>&])") + @classmethod def _substitute_html_entity(cls, matchobj): entity = cls.CHARACTER_TO_HTML_ENTITY.get(matchobj.group(0)) @@ -122,6 +137,28 @@ def quoted_attribute_value(self, value): def substitute_xml(cls, value, make_quoted_attribute=False): """Substitute XML entities for special XML characters. + :param value: A string to be substituted. The less-than sign + will become <, the greater-than sign will become >, + and any ampersands will become &. If you want ampersands + that appear to be part of an entity definition to be left + alone, use substitute_xml_containing_entities() instead. + + :param make_quoted_attribute: If True, then the string will be + quoted, as befits an attribute value. + """ + # Escape angle brackets and ampersands. + value = cls.AMPERSAND_OR_BRACKET.sub( + cls._substitute_xml_entity, value) + + if make_quoted_attribute: + value = cls.quoted_attribute_value(value) + return value + + @classmethod + def substitute_xml_containing_entities( + cls, value, make_quoted_attribute=False): + """Substitute XML entities for special XML characters. + :param value: A string to be substituted. The less-than sign will become <, the greater-than sign will become >, and any ampersands that are not part of an entity defition will @@ -155,6 +192,125 @@ def substitute_html(cls, s): cls._substitute_html_entity, s) +class EncodingDetector: + """Suggests a number of possible encodings for a bytestring. + + Order of precedence: + + 1. Encodings you specifically tell EncodingDetector to try first + (the override_encodings argument to the constructor). + + 2. An encoding declared within the bytestring itself, either in an + XML declaration (if the bytestring is to be interpreted as an XML + document), or in a tag (if the bytestring is to be + interpreted as an HTML document.) + + 3. An encoding detected through textual analysis by chardet, + cchardet, or a similar external library. + + 4. UTF-8. + + 5. Windows-1252. + """ + def __init__(self, markup, override_encodings=None, is_html=False): + self.override_encodings = override_encodings or [] + self.chardet_encoding = None + self.is_html = is_html + self.declared_encoding = None + + # First order of business: strip a byte-order mark. + self.markup, self.sniffed_encoding = self.strip_byte_order_mark(markup) + + def _usable(self, encoding, tried): + if encoding is not None: + encoding = encoding.lower() + if encoding not in tried: + tried.add(encoding) + return True + return False + + @property + def encodings(self): + """Yield a number of encodings that might work for this markup.""" + tried = set() + for e in self.override_encodings: + if self._usable(e, tried): + yield e + + # Did the document originally start with a byte-order mark + # that indicated its encoding? + if self._usable(self.sniffed_encoding, tried): + yield self.sniffed_encoding + + # Look within the document for an XML or HTML encoding + # declaration. + if self.declared_encoding is None: + self.declared_encoding = self.find_declared_encoding( + self.markup, self.is_html) + if self._usable(self.declared_encoding, tried): + yield self.declared_encoding + + # Use third-party character set detection to guess at the + # encoding. + if self.chardet_encoding is None: + self.chardet_encoding = chardet_dammit(self.markup) + if self._usable(self.chardet_encoding, tried): + yield self.chardet_encoding + + # As a last-ditch effort, try utf-8 and windows-1252. + for e in ('utf-8', 'windows-1252'): + if self._usable(e, tried): + yield e + + @classmethod + def strip_byte_order_mark(cls, data): + """If a byte-order mark is present, strip it and return the encoding it implies.""" + encoding = None + if (len(data) >= 4) and (data[:2] == b'\xfe\xff') \ + and (data[2:4] != '\x00\x00'): + encoding = 'utf-16be' + data = data[2:] + elif (len(data) >= 4) and (data[:2] == b'\xff\xfe') \ + and (data[2:4] != '\x00\x00'): + encoding = 'utf-16le' + data = data[2:] + elif data[:3] == b'\xef\xbb\xbf': + encoding = 'utf-8' + data = data[3:] + elif data[:4] == b'\x00\x00\xfe\xff': + encoding = 'utf-32be' + data = data[4:] + elif data[:4] == b'\xff\xfe\x00\x00': + encoding = 'utf-32le' + data = data[4:] + return data, encoding + + @classmethod + def find_declared_encoding(cls, markup, is_html=False, search_entire_document=False): + """Given a document, tries to find its declared encoding. + + An XML encoding is declared at the beginning of the document. + + An HTML encoding is declared in a tag, hopefully near the + beginning of the document. + """ + if search_entire_document: + xml_endpos = html_endpos = len(markup) + else: + xml_endpos = 1024 + html_endpos = max(2048, int(len(markup) * 0.05)) + + declared_encoding = None + declared_encoding_match = xml_encoding_re.search(markup, endpos=xml_endpos) + if not declared_encoding_match and is_html: + declared_encoding_match = html_meta_re.search(markup, endpos=html_endpos) + if declared_encoding_match is not None: + declared_encoding = declared_encoding_match.groups()[0].decode( + 'ascii') + if declared_encoding: + return declared_encoding.lower() + return None + class UnicodeDammit: """A class for detecting the encoding of a *ML document and converting it to a Unicode string. If the source encoding is @@ -176,65 +332,48 @@ class UnicodeDammit: def __init__(self, markup, override_encodings=[], smart_quotes_to=None, is_html=False): - self.declared_html_encoding = None self.smart_quotes_to = smart_quotes_to self.tried_encodings = [] self.contains_replacement_characters = False + self.is_html = is_html - if markup == '' or isinstance(markup, unicode): + self.detector = EncodingDetector(markup, override_encodings, is_html) + + # Short-circuit if the data is in Unicode to begin with. + if isinstance(markup, unicode) or markup == '': self.markup = markup self.unicode_markup = unicode(markup) self.original_encoding = None return - new_markup, document_encoding, sniffed_encoding = \ - self._detectEncoding(markup, is_html) - self.markup = new_markup + # The encoding detector may have stripped a byte-order mark. + # Use the stripped markup from this point on. + self.markup = self.detector.markup u = None - if new_markup != markup: - # _detectEncoding modified the markup, then converted it to - # Unicode and then to UTF-8. So convert it from UTF-8. - u = self._convert_from("utf8") - self.original_encoding = sniffed_encoding + for encoding in self.detector.encodings: + markup = self.detector.markup + u = self._convert_from(encoding) + if u is not None: + break if not u: - for proposed_encoding in ( - override_encodings + [document_encoding, sniffed_encoding]): - if proposed_encoding is not None: - u = self._convert_from(proposed_encoding) - if u: - break + # None of the encodings worked. As an absolute last resort, + # try them again with character replacement. - # If no luck and we have auto-detection library, try that: - if not u and chardet and not isinstance(self.markup, unicode): - u = self._convert_from(chardet.detect(self.markup)['encoding']) - - # As a last resort, try utf-8 and windows-1252: - if not u: - for proposed_encoding in ("utf-8", "windows-1252"): - u = self._convert_from(proposed_encoding) - if u: - break - - # As an absolute last resort, try the encodings again with - # character replacement. - if not u: - for proposed_encoding in ( - override_encodings + [ - document_encoding, sniffed_encoding, "utf-8", "windows-1252"]): - if proposed_encoding != "ascii": - u = self._convert_from(proposed_encoding, "replace") + for encoding in self.detector.encodings: + if encoding != "ascii": + u = self._convert_from(encoding, "replace") if u is not None: - warnings.warn( - UnicodeWarning( + logging.warning( "Some characters could not be decoded, and were " - "replaced with REPLACEMENT CHARACTER.")) + "replaced with REPLACEMENT CHARACTER.") self.contains_replacement_characters = True break - # We could at this point force it to ASCII, but that would - # destroy so much data that I think giving up is better + # If none of that worked, we could at this point force it to + # ASCII, but that would destroy so much data that I think + # giving up is better. self.unicode_markup = u if not u: self.original_encoding = None @@ -262,11 +401,10 @@ def _convert_from(self, proposed, errors="strict"): return None self.tried_encodings.append((proposed, errors)) markup = self.markup - # Convert smart quotes to HTML if coming from an encoding # that might have them. if (self.smart_quotes_to is not None - and proposed.lower() in self.ENCODINGS_WITH_SMART_QUOTES): + and proposed in self.ENCODINGS_WITH_SMART_QUOTES): smart_quotes_re = b"([\x80-\x9f])" smart_quotes_compiled = re.compile(smart_quotes_re) markup = smart_quotes_compiled.sub(self._sub_ms_char, markup) @@ -287,99 +425,24 @@ def _convert_from(self, proposed, errors="strict"): def _to_unicode(self, data, encoding, errors="strict"): '''Given a string and its encoding, decodes the string into Unicode. %encoding is a string recognized by encodings.aliases''' + return unicode(data, encoding, errors) - # strip Byte Order Mark (if present) - if (len(data) >= 4) and (data[:2] == '\xfe\xff') \ - and (data[2:4] != '\x00\x00'): - encoding = 'utf-16be' - data = data[2:] - elif (len(data) >= 4) and (data[:2] == '\xff\xfe') \ - and (data[2:4] != '\x00\x00'): - encoding = 'utf-16le' - data = data[2:] - elif data[:3] == '\xef\xbb\xbf': - encoding = 'utf-8' - data = data[3:] - elif data[:4] == '\x00\x00\xfe\xff': - encoding = 'utf-32be' - data = data[4:] - elif data[:4] == '\xff\xfe\x00\x00': - encoding = 'utf-32le' - data = data[4:] - newdata = unicode(data, encoding, errors) - return newdata - - def _detectEncoding(self, xml_data, is_html=False): - """Given a document, tries to detect its XML encoding.""" - xml_encoding = sniffed_xml_encoding = None - try: - if xml_data[:4] == b'\x4c\x6f\xa7\x94': - # EBCDIC - xml_data = self._ebcdic_to_ascii(xml_data) - elif xml_data[:4] == b'\x00\x3c\x00\x3f': - # UTF-16BE - sniffed_xml_encoding = 'utf-16be' - xml_data = unicode(xml_data, 'utf-16be').encode('utf-8') - elif (len(xml_data) >= 4) and (xml_data[:2] == b'\xfe\xff') \ - and (xml_data[2:4] != b'\x00\x00'): - # UTF-16BE with BOM - sniffed_xml_encoding = 'utf-16be' - xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8') - elif xml_data[:4] == b'\x3c\x00\x3f\x00': - # UTF-16LE - sniffed_xml_encoding = 'utf-16le' - xml_data = unicode(xml_data, 'utf-16le').encode('utf-8') - elif (len(xml_data) >= 4) and (xml_data[:2] == b'\xff\xfe') and \ - (xml_data[2:4] != b'\x00\x00'): - # UTF-16LE with BOM - sniffed_xml_encoding = 'utf-16le' - xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8') - elif xml_data[:4] == b'\x00\x00\x00\x3c': - # UTF-32BE - sniffed_xml_encoding = 'utf-32be' - xml_data = unicode(xml_data, 'utf-32be').encode('utf-8') - elif xml_data[:4] == b'\x3c\x00\x00\x00': - # UTF-32LE - sniffed_xml_encoding = 'utf-32le' - xml_data = unicode(xml_data, 'utf-32le').encode('utf-8') - elif xml_data[:4] == b'\x00\x00\xfe\xff': - # UTF-32BE with BOM - sniffed_xml_encoding = 'utf-32be' - xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8') - elif xml_data[:4] == b'\xff\xfe\x00\x00': - # UTF-32LE with BOM - sniffed_xml_encoding = 'utf-32le' - xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8') - elif xml_data[:3] == b'\xef\xbb\xbf': - # UTF-8 with BOM - sniffed_xml_encoding = 'utf-8' - xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8') - else: - sniffed_xml_encoding = 'ascii' - pass - except: - xml_encoding_match = None - xml_encoding_match = xml_encoding_re.match(xml_data) - if not xml_encoding_match and is_html: - xml_encoding_match = html_meta_re.search(xml_data) - if xml_encoding_match is not None: - xml_encoding = xml_encoding_match.groups()[0].decode( - 'ascii').lower() - if is_html: - self.declared_html_encoding = xml_encoding - if sniffed_xml_encoding and \ - (xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode', - 'iso-10646-ucs-4', 'ucs-4', 'csucs4', - 'utf-16', 'utf-32', 'utf_16', 'utf_32', - 'utf16', 'u16')): - xml_encoding = sniffed_xml_encoding - return xml_data, xml_encoding, sniffed_xml_encoding + @property + def declared_html_encoding(self): + if not self.is_html: + return None + return self.detector.declared_encoding def find_codec(self, charset): - return self._codec(self.CHARSET_ALIASES.get(charset, charset)) \ - or (charset and self._codec(charset.replace("-", ""))) \ - or (charset and self._codec(charset.replace("-", "_"))) \ + value = (self._codec(self.CHARSET_ALIASES.get(charset, charset)) + or (charset and self._codec(charset.replace("-", ""))) + or (charset and self._codec(charset.replace("-", "_"))) + or (charset and charset.lower()) or charset + ) + if value: + return value.lower() + return None def _codec(self, charset): if not charset: @@ -392,32 +455,6 @@ def _codec(self, charset): pass return codec - EBCDIC_TO_ASCII_MAP = None - - def _ebcdic_to_ascii(self, s): - c = self.__class__ - if not c.EBCDIC_TO_ASCII_MAP: - emap = (0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15, - 16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31, - 128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7, - 144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26, - 32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33, - 38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94, - 45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63, - 186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34, - 195,97,98,99,100,101,102,103,104,105,196,197,198,199,200, - 201,202,106,107,108,109,110,111,112,113,114,203,204,205, - 206,207,208,209,126,115,116,117,118,119,120,121,122,210, - 211,212,213,214,215,216,217,218,219,220,221,222,223,224, - 225,226,227,228,229,230,231,123,65,66,67,68,69,70,71,72, - 73,232,233,234,235,236,237,125,74,75,76,77,78,79,80,81, - 82,238,239,240,241,242,243,92,159,83,84,85,86,87,88,89, - 90,244,245,246,247,248,249,48,49,50,51,52,53,54,55,56,57, - 250,251,252,253,254,255) - import string - c.EBCDIC_TO_ASCII_MAP = string.maketrans( - ''.join(map(chr, list(range(256)))), ''.join(map(chr, emap))) - return s.translate(c.EBCDIC_TO_ASCII_MAP) # A partial mapping of ISO-Latin-1 to HTML entities/XML numeric entities. MS_CHARS = {b'\x80': ('euro', '20AC'), diff --git a/libs/bs4/diagnose.py b/libs/bs4/diagnose.py new file mode 100644 index 0000000000..4d0b00afad --- /dev/null +++ b/libs/bs4/diagnose.py @@ -0,0 +1,204 @@ +"""Diagnostic functions, mainly for use when doing tech support.""" +import cProfile +from StringIO import StringIO +from HTMLParser import HTMLParser +import bs4 +from bs4 import BeautifulSoup, __version__ +from bs4.builder import builder_registry + +import os +import pstats +import random +import tempfile +import time +import traceback +import sys +import cProfile + +def diagnose(data): + """Diagnostic suite for isolating common problems.""" + print "Diagnostic running on Beautiful Soup %s" % __version__ + print "Python version %s" % sys.version + + basic_parsers = ["html.parser", "html5lib", "lxml"] + for name in basic_parsers: + for builder in builder_registry.builders: + if name in builder.features: + break + else: + basic_parsers.remove(name) + print ( + "I noticed that %s is not installed. Installing it may help." % + name) + + if 'lxml' in basic_parsers: + basic_parsers.append(["lxml", "xml"]) + from lxml import etree + print "Found lxml version %s" % ".".join(map(str,etree.LXML_VERSION)) + + if 'html5lib' in basic_parsers: + import html5lib + print "Found html5lib version %s" % html5lib.__version__ + + if hasattr(data, 'read'): + data = data.read() + elif os.path.exists(data): + print '"%s" looks like a filename. Reading data from the file.' % data + data = open(data).read() + elif data.startswith("http:") or data.startswith("https:"): + print '"%s" looks like a URL. Beautiful Soup is not an HTTP client.' % data + print "You need to use some other library to get the document behind the URL, and feed that document to Beautiful Soup." + return + print + + for parser in basic_parsers: + print "Trying to parse your markup with %s" % parser + success = False + try: + soup = BeautifulSoup(data, parser) + success = True + except Exception, e: + print "%s could not parse the markup." % parser + traceback.print_exc() + if success: + print "Here's what %s did with the markup:" % parser + print soup.prettify() + + print "-" * 80 + +def lxml_trace(data, html=True, **kwargs): + """Print out the lxml events that occur during parsing. + + This lets you see how lxml parses a document when no Beautiful + Soup code is running. + """ + from lxml import etree + for event, element in etree.iterparse(StringIO(data), html=html, **kwargs): + print("%s, %4s, %s" % (event, element.tag, element.text)) + +class AnnouncingParser(HTMLParser): + """Announces HTMLParser parse events, without doing anything else.""" + + def _p(self, s): + print(s) + + def handle_starttag(self, name, attrs): + self._p("%s START" % name) + + def handle_endtag(self, name): + self._p("%s END" % name) + + def handle_data(self, data): + self._p("%s DATA" % data) + + def handle_charref(self, name): + self._p("%s CHARREF" % name) + + def handle_entityref(self, name): + self._p("%s ENTITYREF" % name) + + def handle_comment(self, data): + self._p("%s COMMENT" % data) + + def handle_decl(self, data): + self._p("%s DECL" % data) + + def unknown_decl(self, data): + self._p("%s UNKNOWN-DECL" % data) + + def handle_pi(self, data): + self._p("%s PI" % data) + +def htmlparser_trace(data): + """Print out the HTMLParser events that occur during parsing. + + This lets you see how HTMLParser parses a document when no + Beautiful Soup code is running. + """ + parser = AnnouncingParser() + parser.feed(data) + +_vowels = "aeiou" +_consonants = "bcdfghjklmnpqrstvwxyz" + +def rword(length=5): + "Generate a random word-like string." + s = '' + for i in range(length): + if i % 2 == 0: + t = _consonants + else: + t = _vowels + s += random.choice(t) + return s + +def rsentence(length=4): + "Generate a random sentence-like string." + return " ".join(rword(random.randint(4,9)) for i in range(length)) + +def rdoc(num_elements=1000): + """Randomly generate an invalid HTML document.""" + tag_names = ['p', 'div', 'span', 'i', 'b', 'script', 'table'] + elements = [] + for i in range(num_elements): + choice = random.randint(0,3) + if choice == 0: + # New tag. + tag_name = random.choice(tag_names) + elements.append("<%s>" % tag_name) + elif choice == 1: + elements.append(rsentence(random.randint(1,4))) + elif choice == 2: + # Close a tag. + tag_name = random.choice(tag_names) + elements.append("" % tag_name) + return "" + "\n".join(elements) + "" + +def benchmark_parsers(num_elements=100000): + """Very basic head-to-head performance benchmark.""" + print "Comparative parser benchmark on Beautiful Soup %s" % __version__ + data = rdoc(num_elements) + print "Generated a large invalid HTML document (%d bytes)." % len(data) + + for parser in ["lxml", ["lxml", "html"], "html5lib", "html.parser"]: + success = False + try: + a = time.time() + soup = BeautifulSoup(data, parser) + b = time.time() + success = True + except Exception, e: + print "%s could not parse the markup." % parser + traceback.print_exc() + if success: + print "BS4+%s parsed the markup in %.2fs." % (parser, b-a) + + from lxml import etree + a = time.time() + etree.HTML(data) + b = time.time() + print "Raw lxml parsed the markup in %.2fs." % (b-a) + + import html5lib + parser = html5lib.HTMLParser() + a = time.time() + parser.parse(data) + b = time.time() + print "Raw html5lib parsed the markup in %.2fs." % (b-a) + +def profile(num_elements=100000, parser="lxml"): + + filehandle = tempfile.NamedTemporaryFile() + filename = filehandle.name + + data = rdoc(num_elements) + vars = dict(bs4=bs4, data=data, parser=parser) + cProfile.runctx('bs4.BeautifulSoup(data, parser)' , vars, vars, filename) + + stats = pstats.Stats(filename) + # stats.strip_dirs() + stats.sort_stats("cumulative") + stats.print_stats('_html5lib|bs4', 50) + +if __name__ == '__main__': + diagnose(sys.stdin.read()) diff --git a/libs/bs4/element.py b/libs/bs4/element.py index 91a4007844..da9afdf48e 100644 --- a/libs/bs4/element.py +++ b/libs/bs4/element.py @@ -26,6 +26,9 @@ class NamespacedAttribute(unicode): def __new__(cls, prefix, name, namespace=None): if name is None: obj = unicode.__new__(cls, prefix) + elif prefix is None: + # Not really namespaced. + obj = unicode.__new__(cls, name) else: obj = unicode.__new__(cls, prefix + ":" + name) obj.prefix = prefix @@ -78,6 +81,40 @@ def rewrite(match): return match.group(1) + encoding return self.CHARSET_RE.sub(rewrite, self.original_value) +class HTMLAwareEntitySubstitution(EntitySubstitution): + + """Entity substitution rules that are aware of some HTML quirks. + + Specifically, the contents of +""" + soup = BeautifulSoup(doc, "xml") + # lxml would have stripped this while parsing, but we can add + # it later. + soup.script.string = 'console.log("< < hey > > ");' + encoded = soup.encode() + self.assertTrue(b"< < hey > >" in encoded) + + def test_can_parse_unicode_document(self): + markup = u'Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!' + soup = self.soup(markup) + self.assertEqual(u'Sacr\xe9 bleu!', soup.root.string) + + def test_popping_namespaced_tag(self): + markup = 'b2012-07-02T20:33:42Zcd' + soup = self.soup(markup) + self.assertEqual( + unicode(soup.rss), markup) def test_docstring_includes_correct_encoding(self): soup = self.soup("") @@ -472,6 +529,20 @@ def test_namespaces_are_preserved(self): self.assertEqual("http://example.com/", root['xmlns:a']) self.assertEqual("http://example.net/", root['xmlns:b']) + def test_closing_namespaced_tag(self): + markup = '

    20010504

    ' + soup = self.soup(markup) + self.assertEqual(unicode(soup.p), markup) + + def test_namespaced_attributes(self): + markup = '' + soup = self.soup(markup) + self.assertEqual(unicode(soup.foo), markup) + + def test_namespaced_attributes_xml_namespace(self): + markup = 'bar' + soup = self.soup(markup) + self.assertEqual(unicode(soup.foo), markup) class HTML5TreeBuilderSmokeTest(HTMLTreeBuilderSmokeTest): """Smoke test for a tree builder that supports HTML5.""" @@ -501,6 +572,12 @@ def test_mathml_tags_have_namespace(self): self.assertEqual(namespace, soup.math.namespace) self.assertEqual(namespace, soup.msqrt.namespace) + def test_xml_declaration_becomes_comment(self): + markup = '' + soup = self.soup(markup) + self.assertTrue(isinstance(soup.contents[0], Comment)) + self.assertEqual(soup.contents[0], '?xml version="1.0" encoding="utf-8"?') + self.assertEqual("html", soup.contents[0].next_element.name) def skipIf(condition, reason): def nothing(test, *args, **kwargs): diff --git a/libs/cache/__init__.py b/libs/cache/__init__.py new file mode 100644 index 0000000000..f105dfcd02 --- /dev/null +++ b/libs/cache/__init__.py @@ -0,0 +1,262 @@ +""" + copied from + werkzeug.contrib.cache + ~~~~~~~~~~~~~~~~~~~~~~ + + :copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details. + :license: BSD, see LICENSE for more details. +""" +from cache.posixemulation import rename +from itertools import izip +from time import time +import os +import re +import tempfile +try: + from hashlib import md5 +except ImportError: + from md5 import new as md5 + +try: + import cPickle as pickle +except ImportError: + import pickle + + +def _items(mappingorseq): + """Wrapper for efficient iteration over mappings represented by dicts + or sequences:: + + >>> for k, v in _items((i, i*i) for i in xrange(5)): + ... assert k*k == v + + >>> for k, v in _items(dict((i, i*i) for i in xrange(5))): + ... assert k*k == v + + """ + return mappingorseq.iteritems() if hasattr(mappingorseq, 'iteritems') \ + else mappingorseq + + +class BaseCache(object): + """Baseclass for the cache systems. All the cache systems implement this + API or a superset of it. + + :param default_timeout: the default timeout that is used if no timeout is + specified on :meth:`set`. + """ + + def __init__(self, default_timeout = 300): + self.default_timeout = default_timeout + + def delete(self, key): + """Deletes `key` from the cache. If it does not exist in the cache + nothing happens. + + :param key: the key to delete. + """ + pass + + def get_many(self, *keys): + """Returns a list of values for the given keys. + For each key a item in the list is created. Example:: + + foo, bar = cache.get_many("foo", "bar") + + If a key can't be looked up `None` is returned for that key + instead. + + :param keys: The function accepts multiple keys as positional + arguments. + """ + return map(self.get, keys) + + def get_dict(self, *keys): + """Works like :meth:`get_many` but returns a dict:: + + d = cache.get_dict("foo", "bar") + foo = d["foo"] + bar = d["bar"] + + :param keys: The function accepts multiple keys as positional + arguments. + """ + return dict(izip(keys, self.get_many(*keys))) + + def set(self, key, value, timeout = None): + """Adds a new key/value to the cache (overwrites value, if key already + exists in the cache). + + :param key: the key to set + :param value: the value for the key + :param timeout: the cache timeout for the key (if not specified, + it uses the default timeout). + """ + pass + + def add(self, key, value, timeout = None): + """Works like :meth:`set` but does not overwrite the values of already + existing keys. + + :param key: the key to set + :param value: the value for the key + :param timeout: the cache timeout for the key or the default + timeout if not specified. + """ + pass + + def set_many(self, mapping, timeout = None): + """Sets multiple keys and values from a mapping. + + :param mapping: a mapping with the keys/values to set. + :param timeout: the cache timeout for the key (if not specified, + it uses the default timeout). + """ + for key, value in _items(mapping): + self.set(key, value, timeout) + + def delete_many(self, *keys): + """Deletes multiple keys at once. + + :param keys: The function accepts multiple keys as positional + arguments. + """ + for key in keys: + self.delete(key) + + def clear(self): + """Clears the cache. Keep in mind that not all caches support + completely clearing the cache. + """ + pass + + def inc(self, key, delta = 1): + """Increments the value of a key by `delta`. If the key does + not yet exist it is initialized with `delta`. + + For supporting caches this is an atomic operation. + + :param key: the key to increment. + :param delta: the delta to add. + """ + self.set(key, (self.get(key) or 0) + delta) + + def dec(self, key, delta = 1): + """Decrements the value of a key by `delta`. If the key does + not yet exist it is initialized with `-delta`. + + For supporting caches this is an atomic operation. + + :param key: the key to increment. + :param delta: the delta to subtract. + """ + self.set(key, (self.get(key) or 0) - delta) + + +class FileSystemCache(BaseCache): + """A cache that stores the items on the file system. This cache depends + on being the only user of the `cache_dir`. Make absolutely sure that + nobody but this cache stores files there or otherwise the cache will + randomly delete files therein. + + :param cache_dir: the directory where cache files are stored. + :param threshold: the maximum number of items the cache stores before + it starts deleting some. + :param default_timeout: the default timeout that is used if no timeout is + specified on :meth:`~BaseCache.set`. + :param mode: the file mode wanted for the cache files, default 0600 + """ + + #: used for temporary files by the FileSystemCache + _fs_transaction_suffix = '.__wz_cache' + + def __init__(self, cache_dir, threshold = 500, default_timeout = 300, mode = 0600): + BaseCache.__init__(self, default_timeout) + self._path = cache_dir + self._threshold = threshold + self._mode = mode + if not os.path.exists(self._path): + os.makedirs(self._path) + + def _list_dir(self): + """return a list of (fully qualified) cache filenames + """ + return [os.path.join(self._path, fn) for fn in os.listdir(self._path) + if not fn.endswith(self._fs_transaction_suffix)] + + def _prune(self): + entries = self._list_dir() + if len(entries) > self._threshold: + now = time() + for idx, fname in enumerate(entries): + remove = False + f = None + try: + try: + f = open(fname, 'rb') + expires = pickle.load(f) + remove = expires <= now or idx % 3 == 0 + finally: + if f is not None: + f.close() + except Exception: + pass + if remove: + try: + os.remove(fname) + except (IOError, OSError): + pass + + def clear(self): + for fname in self._list_dir(): + try: + os.remove(fname) + except (IOError, OSError): + pass + + def _get_filename(self, key): + hash = md5(key).hexdigest() + return os.path.join(self._path, hash) + + def get(self, key): + filename = self._get_filename(key) + try: + f = open(filename, 'rb') + try: + if pickle.load(f) >= time(): + return pickle.load(f) + finally: + f.close() + os.remove(filename) + except Exception: + return None + + def add(self, key, value, timeout = None): + filename = self._get_filename(key) + if not os.path.exists(filename): + self.set(key, value, timeout) + + def set(self, key, value, timeout = None): + if timeout is None: + timeout = self.default_timeout + filename = self._get_filename(key) + self._prune() + try: + fd, tmp = tempfile.mkstemp(suffix = self._fs_transaction_suffix, + dir = self._path) + f = os.fdopen(fd, 'wb') + try: + pickle.dump(int(time() + timeout), f, 1) + pickle.dump(value, f, pickle.HIGHEST_PROTOCOL) + finally: + f.close() + rename(tmp, filename) + os.chmod(filename, self._mode) + except (IOError, OSError): + pass + + def delete(self, key): + try: + os.remove(self._get_filename(key)) + except (IOError, OSError): + pass diff --git a/libs/werkzeug/posixemulation.py b/libs/cache/posixemulation.py similarity index 100% rename from libs/werkzeug/posixemulation.py rename to libs/cache/posixemulation.py diff --git a/libs/caper/__init__.py b/libs/caper/__init__.py new file mode 100644 index 0000000000..95fb6d73f8 --- /dev/null +++ b/libs/caper/__init__.py @@ -0,0 +1,195 @@ +# Copyright 2013 Dean Gardiner +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from logr import Logr +from caper.matcher import FragmentMatcher +from caper.objects import CaperFragment, CaperClosure +from caper.parsers.anime import AnimeParser +from caper.parsers.scene import SceneParser +from caper.parsers.usenet import UsenetParser + + +__version_info__ = ('0', '3', '1') +__version_branch__ = 'master' + +__version__ = "%s%s" % ( + '.'.join(__version_info__), + '-' + __version_branch__ if __version_branch__ else '' +) + + +CL_START_CHARS = ['(', '[', '<', '>'] +CL_END_CHARS = [')', ']', '<', '>'] +CL_END_STRINGS = [' - '] + +STRIP_START_CHARS = ''.join(CL_START_CHARS) +STRIP_END_CHARS = ''.join(CL_END_CHARS) +STRIP_CHARS = ''.join(['_', ' ', '.']) + +FRAGMENT_SEPARATORS = ['.', '-', '_', ' '] + + +CL_START = 0 +CL_END = 1 + + +class Caper(object): + def __init__(self, debug=False): + self.debug = debug + + self.parsers = { + 'anime': AnimeParser, + 'scene': SceneParser, + 'usenet': UsenetParser + } + + def _closure_split(self, name): + """ + :type name: str + + :rtype: list of CaperClosure + """ + + closures = [] + + def end_closure(closures, buf): + buf = buf.strip(STRIP_CHARS) + if len(buf) < 2: + return + + cur = CaperClosure(len(closures), buf) + cur.left = closures[len(closures) - 1] if len(closures) > 0 else None + + if cur.left: + cur.left.right = cur + + closures.append(cur) + + state = CL_START + buf = "" + for x, ch in enumerate(name): + # Check for start characters + if state == CL_START and ch in CL_START_CHARS: + end_closure(closures, buf) + + state = CL_END + buf = "" + + buf += ch + + if state == CL_END and ch in CL_END_CHARS: + # End character found, create the closure + end_closure(closures, buf) + + state = CL_START + buf = "" + elif state == CL_START and buf[-3:] in CL_END_STRINGS: + # End string found, create the closure + end_closure(closures, buf[:-3]) + + state = CL_START + buf = "" + + end_closure(closures, buf) + + return closures + + def _clean_closure(self, closure): + """ + :type closure: str + + :rtype: str + """ + + return closure.lstrip(STRIP_START_CHARS).rstrip(STRIP_END_CHARS) + + def _fragment_split(self, closures): + """ + :type closures: list of CaperClosure + + :rtype: list of CaperClosure + """ + + cur_position = 0 + cur = None + + def end_fragment(fragments, cur, cur_position): + cur.position = cur_position + + cur.left = fragments[len(fragments) - 1] if len(fragments) > 0 else None + if cur.left: + cur.left_sep = cur.left.right_sep + cur.left.right = cur + + cur.right_sep = ch + + fragments.append(cur) + + for closure in closures: + closure.fragments = [] + + separator_buffer = "" + + for x, ch in enumerate(self._clean_closure(closure.value)): + if not cur: + cur = CaperFragment(closure) + + if ch in FRAGMENT_SEPARATORS: + if cur.value: + separator_buffer = "" + + separator_buffer += ch + + if cur.value or not closure.fragments: + end_fragment(closure.fragments, cur, cur_position) + elif len(separator_buffer) > 1: + cur.value = separator_buffer.strip() + + if cur.value: + end_fragment(closure.fragments, cur, cur_position) + + separator_buffer = "" + + # Reset + cur = None + cur_position += 1 + else: + cur.value += ch + + # Finish parsing the last fragment + if cur and cur.value: + end_fragment(closure.fragments, cur, cur_position) + + # Reset + cur_position = 0 + cur = None + + return closures + + def parse(self, name, parser='scene'): + closures = self._closure_split(name) + closures = self._fragment_split(closures) + + # Print closures + for closure in closures: + Logr.debug("closure [%s]", closure.value) + + for fragment in closure.fragments: + Logr.debug("\tfragment [%s]", fragment.value) + + if parser not in self.parsers: + raise ValueError("Unknown parser") + + # TODO autodetect the parser type + return self.parsers[parser](self.debug).run(closures) diff --git a/libs/caper/constraint.py b/libs/caper/constraint.py new file mode 100644 index 0000000000..e092d33d7d --- /dev/null +++ b/libs/caper/constraint.py @@ -0,0 +1,134 @@ +# Copyright 2013 Dean Gardiner +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +class CaptureConstraint(object): + def __init__(self, capture_group, constraint_type, comparisons=None, target=None, **kwargs): + """Capture constraint object + + :type capture_group: CaptureGroup + """ + + self.capture_group = capture_group + + self.constraint_type = constraint_type + self.target = target + + self.comparisons = comparisons if comparisons else [] + self.kwargs = {} + + for orig_key, value in kwargs.items(): + key = orig_key.split('__') + if len(key) != 2: + self.kwargs[orig_key] = value + continue + name, method = key + + method = 'constraint_match_' + method + if not hasattr(self, method): + self.kwargs[orig_key] = value + continue + + self.comparisons.append((name, getattr(self, method), value)) + + def execute(self, parent_node, node, **kwargs): + func_name = 'constraint_%s' % self.constraint_type + + if hasattr(self, func_name): + return getattr(self, func_name)(parent_node, node, **kwargs) + + raise ValueError('Unknown constraint type "%s"' % self.constraint_type) + + # + # Node Matching + # + + def constraint_match(self, parent_node, node): + results = [] + total_weight = 0 + + for name, method, argument in self.comparisons: + weight, success = method(node, name, argument) + total_weight += weight + results.append(success) + + return total_weight / (float(len(results)) or 1), all(results) if len(results) > 0 else False + + def constraint_match_eq(self, node, name, expected): + if not hasattr(node, name): + return 1.0, False + + return 1.0, getattr(node, name) == expected + + def constraint_match_re(self, node, name, arg): + # Node match + if name == 'node': + group, minimum_weight = arg if type(arg) is tuple and len(arg) > 1 else (arg, 0) + + weight, match, num_fragments = self.capture_group.parser.matcher.fragment_match(node, group) + return weight, weight > minimum_weight + + # Regex match + if type(arg).__name__ == 'SRE_Pattern': + return 1.0, arg.match(getattr(node, name)) is not None + + # Value match + if hasattr(node, name): + match = self.capture_group.parser.matcher.value_match(getattr(node, name), arg, single=True) + return 1.0, match is not None + + raise ValueError("Unknown constraint match type '%s'" % name) + + # + # Result + # + + def constraint_result(self, parent_node, fragment): + ctag = self.kwargs.get('tag') + if not ctag: + return 0, False + + ckey = self.kwargs.get('key') + + for tag, result in parent_node.captured(): + if tag != ctag: + continue + + if not ckey or ckey in result.keys(): + return 1.0, True + + return 0.0, False + + # + # Failure + # + + def constraint_failure(self, parent_node, fragment, match): + if not match or not match.success: + return 1.0, True + + return 0, False + + # + # Success + # + + def constraint_success(self, parent_node, fragment, match): + if match and match.success: + return 1.0, True + + return 0, False + + def __repr__(self): + return "CaptureConstraint(comparisons=%s)" % repr(self.comparisons) diff --git a/libs/caper/group.py b/libs/caper/group.py new file mode 100644 index 0000000000..8f0399ef3f --- /dev/null +++ b/libs/caper/group.py @@ -0,0 +1,284 @@ +# Copyright 2013 Dean Gardiner +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from logr import Logr +from caper import CaperClosure, CaperFragment +from caper.helpers import clean_dict +from caper.result import CaperFragmentNode, CaperClosureNode +from caper.step import CaptureStep +from caper.constraint import CaptureConstraint + + +class CaptureGroup(object): + def __init__(self, parser, result): + """Capture group object + + :type parser: caper.parsers.base.Parser + :type result: caper.result.CaperResult + """ + + self.parser = parser + self.result = result + + #: @type: list of CaptureStep + self.steps = [] + + #: type: str + self.step_source = None + + #: @type: list of CaptureConstraint + self.pre_constraints = [] + + #: :type: list of CaptureConstraint + self.post_constraints = [] + + def capture_fragment(self, tag, regex=None, func=None, single=True, **kwargs): + Logr.debug('capture_fragment("%s", "%s", %s, %s)', tag, regex, func, single) + + if self.step_source != 'fragment': + if self.step_source is None: + self.step_source = 'fragment' + else: + raise ValueError("Unable to mix fragment and closure capturing in a group") + + self.steps.append(CaptureStep( + self, tag, + 'fragment', + regex=regex, + func=func, + single=single, + **kwargs + )) + + return self + + def capture_closure(self, tag, regex=None, func=None, single=True, **kwargs): + Logr.debug('capture_closure("%s", "%s", %s, %s)', tag, regex, func, single) + + if self.step_source != 'closure': + if self.step_source is None: + self.step_source = 'closure' + else: + raise ValueError("Unable to mix fragment and closure capturing in a group") + + self.steps.append(CaptureStep( + self, tag, + 'closure', + regex=regex, + func=func, + single=single, + **kwargs + )) + + return self + + def until_closure(self, **kwargs): + self.pre_constraints.append(CaptureConstraint(self, 'match', target='closure', **kwargs)) + + return self + + def until_fragment(self, **kwargs): + self.pre_constraints.append(CaptureConstraint(self, 'match', target='fragment', **kwargs)) + + return self + + def until_result(self, **kwargs): + self.pre_constraints.append(CaptureConstraint(self, 'result', **kwargs)) + + return self + + def until_failure(self, **kwargs): + self.post_constraints.append(CaptureConstraint(self, 'failure', **kwargs)) + + return self + + def until_success(self, **kwargs): + self.post_constraints.append(CaptureConstraint(self, 'success', **kwargs)) + + return self + + def parse_subject(self, parent_head, subject): + Logr.debug("parse_subject (%s) subject: %s", self.step_source, repr(subject)) + + if type(subject) is CaperClosure: + return self.parse_closure(parent_head, subject) + + if type(subject) is CaperFragment: + return self.parse_fragment(parent_head, subject) + + raise ValueError('Unknown subject (%s)', subject) + + def parse_fragment(self, parent_head, subject): + parent_node = parent_head[0] if type(parent_head) is list else parent_head + + nodes, match = self.match(parent_head, parent_node, subject) + + # Capturing broke on constraint, return now + if not match: + return nodes + + Logr.debug('created fragment node with subject.value: "%s"' % subject.value) + + result = [CaperFragmentNode( + parent_node.closure, + subject.take_right(match.num_fragments), + parent_head, + match + )] + + # Branch if the match was indefinite (weight below 1.0) + if match.result and match.weight < 1.0: + if match.num_fragments == 1: + result.append(CaperFragmentNode(parent_node.closure, [subject], parent_head)) + else: + nodes.append(CaperFragmentNode(parent_node.closure, [subject], parent_head)) + + nodes.append(result[0] if len(result) == 1 else result) + + return nodes + + def parse_closure(self, parent_head, subject): + parent_node = parent_head[0] if type(parent_head) is list else parent_head + + nodes, match = self.match(parent_head, parent_node, subject) + + # Capturing broke on constraint, return now + if not match: + return nodes + + Logr.debug('created closure node with subject.value: "%s"' % subject.value) + + result = [CaperClosureNode( + subject, + parent_head, + match + )] + + # Branch if the match was indefinite (weight below 1.0) + if match.result and match.weight < 1.0: + if match.num_fragments == 1: + result.append(CaperClosureNode(subject, parent_head)) + else: + nodes.append(CaperClosureNode(subject, parent_head)) + + nodes.append(result[0] if len(result) == 1 else result) + + return nodes + + def match(self, parent_head, parent_node, subject): + nodes = [] + + # Check pre constaints + broke, definite = self.check_constraints(self.pre_constraints, parent_head, subject) + + if broke: + nodes.append(parent_head) + + if definite: + return nodes, None + + # Try match subject against the steps available + match = None + + for step in self.steps: + if step.source == 'closure' and type(subject) is not CaperClosure: + pass + elif step.source == 'fragment' and type(subject) is CaperClosure: + Logr.debug('Closure encountered on fragment step, jumping into fragments') + return [CaperClosureNode(subject, parent_head, None)], None + + match = step.execute(subject) + + if match.success: + if type(match.result) is dict: + match.result = clean_dict(match.result) + + Logr.debug('Found match with weight %s, match: %s, num_fragments: %s' % ( + match.weight, match.result, match.num_fragments + )) + + step.matched = True + + break + + if all([step.single and step.matched for step in self.steps]): + Logr.debug('All steps completed, group finished') + parent_node.finished_groups.append(self) + return nodes, match + + # Check post constraints + broke, definite = self.check_constraints(self.post_constraints, parent_head, subject, match=match) + if broke: + return nodes, None + + return nodes, match + + def check_constraints(self, constraints, parent_head, subject, **kwargs): + parent_node = parent_head[0] if type(parent_head) is list else parent_head + + # Check constraints + for constraint in [c for c in constraints if c.target == subject.__key__ or not c.target]: + Logr.debug("Testing constraint %s against subject %s", repr(constraint), repr(subject)) + + weight, success = constraint.execute(parent_node, subject, **kwargs) + + if success: + Logr.debug('capturing broke on "%s" at %s', subject.value, constraint) + parent_node.finished_groups.append(self) + + return True, weight == 1.0 + + return False, None + + def execute(self): + heads_finished = None + + while heads_finished is None or not (len(heads_finished) == len(self.result.heads) and all(heads_finished)): + heads_finished = [] + + heads = self.result.heads + self.result.heads = [] + + for head in heads: + node = head[0] if type(head) is list else head + + if self in node.finished_groups: + Logr.debug("head finished for group") + self.result.heads.append(head) + heads_finished.append(True) + continue + + Logr.debug('') + + Logr.debug(node) + + next_subject = node.next() + + Logr.debug('----------[%s] (%s)----------' % (next_subject, repr(next_subject.value) if next_subject else None)) + + if next_subject: + for node_result in self.parse_subject(head, next_subject): + self.result.heads.append(node_result) + + Logr.debug('Heads: %s', self.result.heads) + + heads_finished.append(self in node.finished_groups or next_subject is None) + + if len(self.result.heads) == 0: + self.result.heads = heads + + Logr.debug("heads_finished: %s, self.result.heads: %s", heads_finished, self.result.heads) + + Logr.debug("group finished") diff --git a/libs/caper/helpers.py b/libs/caper/helpers.py new file mode 100644 index 0000000000..ded5d482d0 --- /dev/null +++ b/libs/caper/helpers.py @@ -0,0 +1,80 @@ +# Copyright 2013 Dean Gardiner +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys + + +PY2 = sys.version_info[0] == 2 +PY3 = sys.version_info[0] == 3 + + +def is_list_type(obj, element_type): + if not type(obj) is list: + return False + + if len(obj) < 1: + raise ValueError("Unable to determine list element type from empty list") + + return type(obj[0]) is element_type + + +def clean_dict(target, remove=None): + """Recursively remove items matching a value 'remove' from the dictionary + + :type target: dict + """ + if type(target) is not dict: + raise ValueError("Target is required to be a dict") + + remove_keys = [] + for key in target.keys(): + if type(target[key]) is not dict: + if target[key] == remove: + remove_keys.append(key) + else: + clean_dict(target[key], remove) + + for key in remove_keys: + target.pop(key) + + return target + + +def update_dict(a, b): + for key, value in b.items(): + if key not in a: + a[key] = value + elif isinstance(a[key], dict) and isinstance(value, dict): + update_dict(a[key], value) + elif isinstance(a[key], list): + a[key].append(value) + else: + a[key] = [a[key], value] + + +def xrange_six(start, stop=None, step=None): + if stop is not None and step is not None: + if PY3: + return range(start, stop, step) + else: + return xrange(start, stop, step) + else: + if PY3: + return range(start) + else: + return xrange(start) + + +def delta_seconds(td): + return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 1e6) / 1e6 diff --git a/libs/caper/matcher.py b/libs/caper/matcher.py new file mode 100644 index 0000000000..3acf2e6807 --- /dev/null +++ b/libs/caper/matcher.py @@ -0,0 +1,144 @@ +# Copyright 2013 Dean Gardiner +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from caper.helpers import is_list_type, update_dict, delta_seconds +from datetime import datetime +from logr import Logr +import re + + +class FragmentMatcher(object): + def __init__(self, pattern_groups): + self.regex = {} + + self.construct_patterns(pattern_groups) + + def construct_patterns(self, pattern_groups): + compile_start = datetime.now() + compile_count = 0 + + for group_name, patterns in pattern_groups: + if group_name not in self.regex: + self.regex[group_name] = [] + + # Transform into weight groups + if type(patterns[0]) is str or type(patterns[0][0]) not in [int, float]: + patterns = [(1.0, patterns)] + + for weight, patterns in patterns: + weight_patterns = [] + + for pattern in patterns: + # Transform into multi-fragment patterns + if type(pattern) is str: + pattern = (pattern,) + + if type(pattern) is tuple and len(pattern) == 2: + if type(pattern[0]) is str and is_list_type(pattern[1], str): + pattern = (pattern,) + + result = [] + for value in pattern: + if type(value) is tuple: + if len(value) == 2: + # Construct OR-list pattern + value = value[0] % '|'.join(value[1]) + elif len(value) == 1: + value = value[0] + + result.append(re.compile(value, re.IGNORECASE)) + compile_count += 1 + + weight_patterns.append(tuple(result)) + + self.regex[group_name].append((weight, weight_patterns)) + + Logr.info("Compiled %s patterns in %ss", compile_count, delta_seconds(datetime.now() - compile_start)) + + def find_group(self, name): + for group_name, weight_groups in self.regex.items(): + if group_name and group_name == name: + return group_name, weight_groups + + return None, None + + def value_match(self, value, group_name=None, single=True): + result = None + + for group, weight_groups in self.regex.items(): + if group_name and group != group_name: + continue + + # TODO handle multiple weights + weight, patterns = weight_groups[0] + + for pattern in patterns: + match = pattern[0].match(value) + if not match: + continue + + if result is None: + result = {} + if group not in result: + result[group] = {} + + result[group].update(match.groupdict()) + + if single: + return result + + return result + + def fragment_match(self, fragment, group_name=None): + """Follow a fragment chain to try find a match + + :type fragment: caper.objects.CaperFragment + :type group_name: str or None + + :return: The weight of the match found between 0.0 and 1.0, + where 1.0 means perfect match and 0.0 means no match + :rtype: (float, dict, int) + """ + + group_name, weight_groups = self.find_group(group_name) + + for weight, patterns in weight_groups: + for pattern in patterns: + cur_fragment = fragment + success = True + result = {} + + # Ignore empty patterns + if len(pattern) < 1: + break + + for fragment_pattern in pattern: + if not cur_fragment: + success = False + break + + match = fragment_pattern.match(cur_fragment.value) + if match: + update_dict(result, match.groupdict()) + else: + success = False + break + + cur_fragment = cur_fragment.right if cur_fragment else None + + if success: + Logr.debug("Found match with weight %s" % weight) + return float(weight), result, len(pattern) + + return 0.0, None, 1 diff --git a/libs/caper/objects.py b/libs/caper/objects.py new file mode 100644 index 0000000000..b7d9084d1e --- /dev/null +++ b/libs/caper/objects.py @@ -0,0 +1,124 @@ +# Copyright 2013 Dean Gardiner +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from caper.helpers import xrange_six + + +class CaperClosure(object): + __key__ = 'closure' + + def __init__(self, index, value): + #: :type: int + self.index = index + + #: :type: str + self.value = value + + #: :type: CaperClosure + self.left = None + #: :type: CaperClosure + self.right = None + + #: :type: list of CaperFragment + self.fragments = [] + + def __str__(self): + return "" % repr(self.result) + + def __repr__(self): + return self.__str__() diff --git a/libs/minify/__init__.py b/libs/caper/parsers/__init__.py similarity index 100% rename from libs/minify/__init__.py rename to libs/caper/parsers/__init__.py diff --git a/libs/caper/parsers/anime.py b/libs/caper/parsers/anime.py new file mode 100644 index 0000000000..86c70917c1 --- /dev/null +++ b/libs/caper/parsers/anime.py @@ -0,0 +1,88 @@ +# Copyright 2013 Dean Gardiner +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import re +from caper.parsers.base import Parser + + +REGEX_GROUP = re.compile(r'(\(|\[)(?P.*?)(\)|\])', re.IGNORECASE) + + +PATTERN_GROUPS = [ + ('identifier', [ + r'S(?P\d+)E(?P\d+)', + r'(S(?P\d+))|(E(?P\d+))', + + r'Ep(?P\d+)', + r'$(?P\d+)^', + + (r'Episode', r'(?P\d+)'), + ]), + ('video', [ + (r'(?P%s)', [ + 'Hi10P' + ]), + (r'.(?P%s)', [ + '720p', + '1080p', + + '960x720', + '1920x1080' + ]), + (r'(?P%s)', [ + 'BD' + ]), + ]), + ('audio', [ + (r'(?P%s)', [ + 'FLAC' + ]), + ]) +] + + +class AnimeParser(Parser): + def __init__(self, debug=False): + super(AnimeParser, self).__init__(PATTERN_GROUPS, debug) + + def capture_group(self, fragment): + match = REGEX_GROUP.match(fragment.value) + + if not match: + return None + + return match.group('group') + + def run(self, closures): + """ + :type closures: list of CaperClosure + """ + + self.setup(closures) + + self.capture_closure('group', func=self.capture_group)\ + .execute(once=True) + + self.capture_fragment('show_name', single=False)\ + .until_fragment(value__re='identifier')\ + .until_fragment(value__re='video')\ + .execute() + + self.capture_fragment('identifier', regex='identifier') \ + .capture_fragment('video', regex='video', single=False) \ + .capture_fragment('audio', regex='audio', single=False) \ + .execute() + + self.result.build() + return self.result diff --git a/libs/caper/parsers/base.py b/libs/caper/parsers/base.py new file mode 100644 index 0000000000..16bbc19f35 --- /dev/null +++ b/libs/caper/parsers/base.py @@ -0,0 +1,84 @@ +# Copyright 2013 Dean Gardiner +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from caper import FragmentMatcher +from caper.group import CaptureGroup +from caper.result import CaperResult, CaperClosureNode, CaperRootNode +from logr import Logr + + +class Parser(object): + def __init__(self, matcher, debug=False): + self.debug = debug + + self.matcher = matcher + + self.closures = None + #: :type: caper.result.CaperResult + self.result = None + + self._match_cache = None + self._fragment_pos = None + self._closure_pos = None + self._history = None + + self.reset() + + def reset(self): + self.closures = None + self.result = CaperResult() + + self._match_cache = {} + self._fragment_pos = -1 + self._closure_pos = -1 + self._history = [] + + def setup(self, closures): + """ + :type closures: list of CaperClosure + """ + + self.reset() + self.closures = closures + + self.result.heads = [CaperRootNode(closures[0])] + + def run(self, closures): + """ + :type closures: list of CaperClosure + """ + + raise NotImplementedError() + + # + # Capture Methods + # + + def capture_fragment(self, tag, regex=None, func=None, single=True, **kwargs): + return CaptureGroup(self, self.result).capture_fragment( + tag, + regex=regex, + func=func, + single=single, + **kwargs + ) + + def capture_closure(self, tag, regex=None, func=None, single=True, **kwargs): + return CaptureGroup(self, self.result).capture_closure( + tag, + regex=regex, + func=func, + single=single, + **kwargs + ) diff --git a/libs/caper/parsers/scene.py b/libs/caper/parsers/scene.py new file mode 100644 index 0000000000..cd0a8fdf4f --- /dev/null +++ b/libs/caper/parsers/scene.py @@ -0,0 +1,230 @@ +# Copyright 2013 Dean Gardiner +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from logr import Logr +from caper import FragmentMatcher +from caper.parsers.base import Parser +from caper.result import CaperFragmentNode + + +PATTERN_GROUPS = [ + ('identifier', [ + (1.0, [ + # S01E01-E02 + ('^S(?P\d+)E(?P\d+)$', '^E(?P\d+)$'), + # 'S03 E01 to E08' or 'S03 E01 - E09' + ('^S(?P\d+)$', '^E(?P\d+)$', '^(to|-)$', '^E(?P\d+)$'), + # 'E01 to E08' or 'E01 - E09' + ('^E(?P\d+)$', '^(to|-)$', '^E(?P\d+)$'), + + # S01-S03 + ('^S(?P\d+)$', '^S(?P\d+)$'), + + # S02E13 + r'^S(?P\d+)E(?P\d+)$', + # S01 E13 + (r'^(S(?P\d+))$', r'^(E(?P\d+))$'), + # S02 + # E13 + r'^((S(?P\d+))|(E(?P\d+)))$', + # 3x19 + r'^(?P\d+)x(?P\d+)$', + + # 2013.09.15 + (r'^(?P\d{4})$', r'^(?P\d{2})$', r'^(?P\d{2})$'), + # 09.15.2013 + (r'^(?P\d{2})$', r'^(?P\d{2})$', r'^(?P\d{4})$'), + # TODO - US/UK Date Format Conflict? will only support US format for now.. + # 15.09.2013 + #(r'^(?P\d{2})$', r'^(?P\d{2})$', r'^(?P\d{4})$'), + # 130915 + r'^(?P\d{2})(?P\d{2})(?P\d{2})$', + + # Season 3 Episode 14 + (r'^Se(ason)?$', r'^(?P\d+)$', r'^Ep(isode)?$', r'^(?P\d+)$'), + # Season 3 + (r'^Se(ason)?$', r'^(?P\d+)$'), + # Episode 14 + (r'^Ep(isode)?$', r'^(?P\d+)$'), + + # Part.3 + # Part.1.and.Part.3 + ('^Part$', '(?P\d+)'), + + r'(?PSpecial)', + r'(?PNZ|AU|US|UK)' + ]), + (0.8, [ + # 100 - 1899, 2100 - 9999 (skips 1900 to 2099 - so we don't get years my mistake) + # TODO - Update this pattern on 31 Dec 2099 + r'^(?P([1-9])|(1[0-8])|(2[1-9])|([3-9][0-9]))(?P\d{2})$' + ]), + (0.5, [ + # 100 - 9999 + r'^(?P([1-9])|([1-9][0-9]))(?P\d{2})$' + ]) + ]), + + ('video', [ + r'(?PFS|WS)', + + (r'(?P%s)', [ + '480p', + '720p', + '1080p' + ]), + + # + # Source + # + + (r'(?P%s)', [ + 'DVDRiP', + # HDTV + 'HDTV', + 'PDTV', + 'DSR', + # WEB + 'WEBRip', + 'WEBDL', + # BluRay + 'BluRay', + 'B(D|R)Rip', + # DVD + 'DVDR', + 'DVD9', + 'DVD5' + ]), + + # For multi-fragment 'WEB-DL', 'WEB-Rip', etc... matches + ('(?PWEB)', '(?PDL|Rip)'), + + # + # Codec + # + + (r'(?P%s)', [ + 'x264', + 'XViD', + 'H264', + 'AVC' + ]), + + # For multi-fragment 'H 264' tags + ('(?PH)', '(?P264)'), + ]), + + ('dvd', [ + r'D(ISC)?(?P\d+)', + + r'R(?P[0-8])', + + (r'(?P%s)', [ + 'PAL', + 'NTSC' + ]), + ]), + + ('audio', [ + (r'(?P%s)', [ + 'AC3', + 'TrueHD' + ]), + + (r'(?P%s)', [ + 'GERMAN', + 'DUTCH', + 'FRENCH', + 'SWEDiSH', + 'DANiSH', + 'iTALiAN' + ]), + ]), + + ('scene', [ + r'(?PPROPER|REAL)', + ]) +] + + +class SceneParser(Parser): + matcher = None + + def __init__(self, debug=False): + if not SceneParser.matcher: + SceneParser.matcher = FragmentMatcher(PATTERN_GROUPS) + Logr.info("Fragment matcher for %s created", self.__class__.__name__) + + super(SceneParser, self).__init__(SceneParser.matcher, debug) + + def capture_group(self, fragment): + if fragment.closure.index + 1 != len(self.closures): + return None + + if fragment.left_sep != '-' or fragment.right: + return None + + return fragment.value + + def run(self, closures): + """ + :type closures: list of CaperClosure + """ + + self.setup(closures) + + self.capture_fragment('show_name', single=False)\ + .until_fragment(node__re='identifier')\ + .until_fragment(node__re='video')\ + .until_fragment(node__re='dvd')\ + .until_fragment(node__re='audio')\ + .until_fragment(node__re='scene')\ + .execute() + + self.capture_fragment('identifier', regex='identifier', single=False)\ + .capture_fragment('video', regex='video', single=False)\ + .capture_fragment('dvd', regex='dvd', single=False)\ + .capture_fragment('audio', regex='audio', single=False)\ + .capture_fragment('scene', regex='scene', single=False)\ + .until_fragment(left_sep__eq='-', right__eq=None)\ + .execute() + + self.capture_fragment('group', func=self.capture_group)\ + .execute() + + self.print_tree(self.result.heads) + + self.result.build() + return self.result + + def print_tree(self, heads): + if not self.debug: + return + + for head in heads: + head = head if type(head) is list else [head] + + if type(head[0]) is CaperFragmentNode: + for fragment in head[0].fragments: + Logr.debug(fragment.value) + else: + Logr.debug(head[0].closure.value) + + for node in head: + Logr.debug('\t' + str(node).ljust(55) + '\t' + ( + str(node.match.weight) + '\t' + str(node.match.result) + ) if node.match else '') + + if len(head) > 0 and head[0].parent: + self.print_tree([head[0].parent]) diff --git a/libs/caper/parsers/usenet.py b/libs/caper/parsers/usenet.py new file mode 100644 index 0000000000..f622d43bc0 --- /dev/null +++ b/libs/caper/parsers/usenet.py @@ -0,0 +1,115 @@ +# Copyright 2013 Dean Gardiner +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from logr import Logr +from caper import FragmentMatcher +from caper.parsers.base import Parser + + +PATTERN_GROUPS = [ + ('usenet', [ + r'\[(?P#[\w\.@]+)\]', + r'^\[(?P\w+)\]$', + r'\[(?PFULL)\]', + r'\[\s?(?PTOWN)\s?\]', + r'(.*?\s)?[_\W]*(?Pwww\..*?\.[a-z0-9]+)[_\W]*(.*?\s)?', + r'(.*?\s)?[_\W]*(?P(www\.)?[-\w]+\.(com|org|info))[_\W]*(.*?\s)?' + ]), + + ('part', [ + r'.?(?P\d+)/(?P\d+).?' + ]), + + ('detail', [ + r'[\s-]*\w*?[\s-]*\"(?P.*?)\"[\s-]*\w*?[\s-]*(?P[\d,\.]*\s?MB)?[\s-]*(?PyEnc)?', + r'(?P[\d,\.]*\s?MB)[\s-]*(?PyEnc)', + r'(?P[\d,\.]*\s?MB)|(?PyEnc)' + ]) +] + + +class UsenetParser(Parser): + matcher = None + + def __init__(self, debug=False): + if not UsenetParser.matcher: + UsenetParser.matcher = FragmentMatcher(PATTERN_GROUPS) + Logr.info("Fragment matcher for %s created", self.__class__.__name__) + + super(UsenetParser, self).__init__(UsenetParser.matcher, debug) + + def run(self, closures): + """ + :type closures: list of CaperClosure + """ + + self.setup(closures) + + # Capture usenet or part info until we get a part or matching fails + self.capture_closure('usenet', regex='usenet', single=False)\ + .capture_closure('part', regex='part', single=True) \ + .until_result(tag='part') \ + .until_failure()\ + .execute() + + is_town_release, has_part = self.get_state() + + if not is_town_release: + self.capture_release_name() + + # If we already have the part (TOWN releases), ignore matching part again + if not is_town_release and not has_part: + self.capture_fragment('part', regex='part', single=True)\ + .until_closure(node__re='usenet')\ + .until_success()\ + .execute() + + # Capture any leftover details + self.capture_closure('usenet', regex='usenet', single=False)\ + .capture_closure('detail', regex='detail', single=False)\ + .execute() + + self.result.build() + return self.result + + def capture_release_name(self): + self.capture_closure('detail', regex='detail', single=False)\ + .until_failure()\ + .execute() + + self.capture_fragment('release_name', single=False, include_separators=True) \ + .until_closure(node__re='usenet') \ + .until_closure(node__re='detail') \ + .until_closure(node__re='part') \ + .until_fragment(value__eq='-')\ + .execute() + + # Capture any detail after the release name + self.capture_closure('detail', regex='detail', single=False)\ + .until_failure()\ + .execute() + + def get_state(self): + # TODO multiple-chains? + is_town_release = False + has_part = False + + for tag, result in self.result.heads[0].captured(): + if tag == 'usenet' and result.get('group') == 'TOWN': + is_town_release = True + + if tag == 'part': + has_part = True + + return is_town_release, has_part diff --git a/libs/caper/result.py b/libs/caper/result.py new file mode 100644 index 0000000000..c9e3423790 --- /dev/null +++ b/libs/caper/result.py @@ -0,0 +1,213 @@ +# Copyright 2013 Dean Gardiner +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +from logr import Logr + + +GROUP_MATCHES = ['identifier'] + + +class CaperNode(object): + def __init__(self, closure, parent=None, match=None): + """ + :type parent: CaperNode + :type weight: float + """ + + #: :type: caper.objects.CaperClosure + self.closure = closure + + #: :type: CaperNode + self.parent = parent + + #: :type: CaptureMatch + self.match = match + + #: :type: list of CaptureGroup + self.finished_groups = [] + + def next(self): + raise NotImplementedError() + + def captured(self): + cur = self + + if cur.match: + yield cur.match.tag, cur.match.result + + while cur.parent: + cur = cur.parent + + if cur.match: + yield cur.match.tag, cur.match.result + + +class CaperRootNode(CaperNode): + def __init__(self, closure): + """ + :type closure: caper.objects.CaperClosure or list of caper.objects.CaperClosure + """ + super(CaperRootNode, self).__init__(closure) + + def next(self): + return self.closure + + +class CaperClosureNode(CaperNode): + def __init__(self, closure, parent=None, match=None): + """ + :type closure: caper.objects.CaperClosure or list of caper.objects.CaperClosure + """ + super(CaperClosureNode, self).__init__(closure, parent, match) + + def next(self): + if not self.closure: + return None + + if self.match: + # Jump to next closure if we have a match + return self.closure.right + elif len(self.closure.fragments) > 0: + # Otherwise parse the fragments + return self.closure.fragments[0] + + return None + + def __str__(self): + return "" % repr(self.match) + + def __repr__(self): + return self.__str__() + + +class CaperFragmentNode(CaperNode): + def __init__(self, closure, fragments, parent=None, match=None): + """ + :type closure: caper.objects.CaperClosure + :type fragments: list of caper.objects.CaperFragment + """ + super(CaperFragmentNode, self).__init__(closure, parent, match) + + #: :type: caper.objects.CaperFragment or list of caper.objects.CaperFragment + self.fragments = fragments + + def next(self): + if len(self.fragments) > 0 and self.fragments[-1] and self.fragments[-1].right: + return self.fragments[-1].right + + if self.closure.right: + return self.closure.right + + return None + + def __str__(self): + return "" % repr(self.match) + + def __repr__(self): + return self.__str__() + + +class CaperResult(object): + def __init__(self): + #: :type: list of CaperNode + self.heads = [] + + self.chains = [] + + def build(self): + max_matched = 0 + + for head in self.heads: + for chain in self.combine_chain(head): + if chain.num_matched > max_matched: + max_matched = chain.num_matched + + self.chains.append(chain) + + for chain in self.chains: + chain.weights.append(chain.num_matched / float(max_matched or chain.num_matched or 1)) + chain.finish() + + self.chains.sort(key=lambda chain: chain.weight, reverse=True) + + for chain in self.chains: + Logr.debug("chain weight: %.02f", chain.weight) + Logr.debug("\tInfo: %s", chain.info) + + Logr.debug("\tWeights: %s", chain.weights) + Logr.debug("\tNumber of Fragments Matched: %s", chain.num_matched) + + def combine_chain(self, subject, chain=None): + nodes = subject if type(subject) is list else [subject] + + if chain is None: + chain = CaperResultChain() + + result = [] + + for x, node in enumerate(nodes): + node_chain = chain if x == len(nodes) - 1 else chain.copy() + + if not node.parent: + result.append(node_chain) + continue + + node_chain.update(node) + result.extend(self.combine_chain(node.parent, node_chain)) + + return result + + +class CaperResultChain(object): + def __init__(self): + #: :type: float + self.weight = None + self.info = {} + self.num_matched = 0 + + self.weights = [] + + def update(self, subject): + """ + :type subject: CaperFragmentNode + """ + if not subject.match or not subject.match.success: + return + + # TODO this should support closure nodes + if type(subject) is CaperFragmentNode: + self.num_matched += len(subject.fragments) if subject.fragments is not None else 0 + + self.weights.append(subject.match.weight) + + if subject.match: + if subject.match.tag not in self.info: + self.info[subject.match.tag] = [] + + self.info[subject.match.tag].insert(0, subject.match.result) + + def finish(self): + self.weight = sum(self.weights) / len(self.weights) + + def copy(self): + chain = CaperResultChain() + + chain.weight = self.weight + chain.info = copy.deepcopy(self.info) + + chain.num_matched = self.num_matched + chain.weights = copy.copy(self.weights) + + return chain \ No newline at end of file diff --git a/libs/caper/step.py b/libs/caper/step.py new file mode 100644 index 0000000000..817514b666 --- /dev/null +++ b/libs/caper/step.py @@ -0,0 +1,96 @@ +# Copyright 2013 Dean Gardiner +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from caper.objects import CaptureMatch +from logr import Logr + + +class CaptureStep(object): + REPR_KEYS = ['regex', 'func', 'single'] + + def __init__(self, capture_group, tag, source, regex=None, func=None, single=None, **kwargs): + #: @type: CaptureGroup + self.capture_group = capture_group + + #: @type: str + self.tag = tag + #: @type: str + self.source = source + #: @type: str + self.regex = regex + #: @type: function + self.func = func + #: @type: bool + self.single = single + + self.kwargs = kwargs + + self.matched = False + + def execute(self, fragment): + """Execute step on fragment + + :type fragment: CaperFragment + :rtype : CaptureMatch + """ + + match = CaptureMatch(self.tag, self) + + if self.regex: + weight, result, num_fragments = self.capture_group.parser.matcher.fragment_match(fragment, self.regex) + Logr.debug('(execute) [regex] tag: "%s"', self.tag) + + if not result: + return match + + # Populate CaptureMatch + match.success = True + match.weight = weight + match.result = result + match.num_fragments = num_fragments + elif self.func: + result = self.func(fragment) + Logr.debug('(execute) [func] %s += "%s"', self.tag, match) + + if not result: + return match + + # Populate CaptureMatch + match.success = True + match.weight = 1.0 + match.result = result + else: + Logr.debug('(execute) [raw] %s += "%s"', self.tag, fragment.value) + + include_separators = self.kwargs.get('include_separators', False) + + # Populate CaptureMatch + match.success = True + match.weight = 1.0 + + if include_separators: + match.result = (fragment.left_sep, fragment.value, fragment.right_sep) + else: + match.result = fragment.value + + return match + + def __repr__(self): + attribute_values = [key + '=' + repr(getattr(self, key)) + for key in self.REPR_KEYS + if hasattr(self, key) and getattr(self, key)] + + attribute_string = ', ' + ', '.join(attribute_values) if len(attribute_values) > 0 else '' + + return "CaptureStep('%s'%s)" % (self.tag, attribute_string) diff --git a/libs/chardet/__init__.py b/libs/chardet/__init__.py index b1872fe8cb..82c2a48d29 100755 --- a/libs/chardet/__init__.py +++ b/libs/chardet/__init__.py @@ -3,22 +3,28 @@ # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. -# +# # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. -# +# # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### -__version__ = "1.0.1" +__version__ = "2.3.0" +from sys import version_info + def detect(aBuf): - import universaldetector + if ((version_info < (3, 0) and isinstance(aBuf, unicode)) or + (version_info >= (3, 0) and not isinstance(aBuf, bytes))): + raise ValueError('Expected a bytes object, not a unicode object') + + from . import universaldetector u = universaldetector.UniversalDetector() u.reset() u.feed(aBuf) diff --git a/libs/chardet/big5freq.py b/libs/chardet/big5freq.py index c1b0f3cec0..65bffc04b0 100755 --- a/libs/chardet/big5freq.py +++ b/libs/chardet/big5freq.py @@ -1,11 +1,11 @@ ######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Communicator client code. -# +# # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. -# +# # Contributor(s): # Mark Pilgrim - port to Python # @@ -13,12 +13,12 @@ # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. -# +# # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. -# +# # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA @@ -26,18 +26,18 @@ ######################### END LICENSE BLOCK ######################### # Big5 frequency table -# by Taiwan's Mandarin Promotion Council +# by Taiwan's Mandarin Promotion Council # -# +# # 128 --> 0.42261 # 256 --> 0.57851 # 512 --> 0.74851 # 1024 --> 0.89384 # 2048 --> 0.97583 -# +# # Ideal Distribution Ratio = 0.74851/(1-0.74851) =2.98 # Random Distribution Ration = 512/(5401-512)=0.105 -# +# # Typical Distribution Ratio about 25% of Ideal one, still much higher than RDR BIG5_TYPICAL_DISTRIBUTION_RATIO = 0.75 @@ -45,7 +45,7 @@ #Char to FreqOrder table BIG5_TABLE_SIZE = 5376 -Big5CharToFreqOrder = ( \ +Big5CharToFreqOrder = ( 1,1801,1506, 255,1431, 198, 9, 82, 6,5008, 177, 202,3681,1256,2821, 110, # 16 3814, 33,3274, 261, 76, 44,2114, 16,2946,2187,1176, 659,3971, 26,3451,2653, # 32 1198,3972,3350,4202, 410,2215, 302, 590, 361,1964, 8, 204, 58,4510,5009,1932, # 48 @@ -921,3 +921,5 @@ 13936,13937,13938,13939,13940,13941,13942,13943,13944,13945,13946,13947,13948,13949,13950,13951, #13952 13952,13953,13954,13955,13956,13957,13958,13959,13960,13961,13962,13963,13964,13965,13966,13967, #13968 13968,13969,13970,13971,13972) #13973 + +# flake8: noqa diff --git a/libs/chardet/big5prober.py b/libs/chardet/big5prober.py index e6b52aadba..becce81e5e 100755 --- a/libs/chardet/big5prober.py +++ b/libs/chardet/big5prober.py @@ -1,11 +1,11 @@ ######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Communicator client code. -# +# # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. -# +# # Contributor(s): # Mark Pilgrim - port to Python # @@ -13,22 +13,23 @@ # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. -# +# # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. -# +# # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### -from mbcharsetprober import MultiByteCharSetProber -from codingstatemachine import CodingStateMachine -from chardistribution import Big5DistributionAnalysis -from mbcssm import Big5SMModel +from .mbcharsetprober import MultiByteCharSetProber +from .codingstatemachine import CodingStateMachine +from .chardistribution import Big5DistributionAnalysis +from .mbcssm import Big5SMModel + class Big5Prober(MultiByteCharSetProber): def __init__(self): diff --git a/libs/chardet/chardetect.py b/libs/chardet/chardetect.py new file mode 100644 index 0000000000..ffe892f25d --- /dev/null +++ b/libs/chardet/chardetect.py @@ -0,0 +1,80 @@ +#!/usr/bin/env python +""" +Script which takes one or more file paths and reports on their detected +encodings + +Example:: + + % chardetect somefile someotherfile + somefile: windows-1252 with confidence 0.5 + someotherfile: ascii with confidence 1.0 + +If no paths are provided, it takes its input from stdin. + +""" + +from __future__ import absolute_import, print_function, unicode_literals + +import argparse +import sys +from io import open + +from chardet import __version__ +from chardet.universaldetector import UniversalDetector + + +def description_of(lines, name='stdin'): + """ + Return a string describing the probable encoding of a file or + list of strings. + + :param lines: The lines to get the encoding of. + :type lines: Iterable of bytes + :param name: Name of file or collection of lines + :type name: str + """ + u = UniversalDetector() + for line in lines: + u.feed(line) + u.close() + result = u.result + if result['encoding']: + return '{0}: {1} with confidence {2}'.format(name, result['encoding'], + result['confidence']) + else: + return '{0}: no result'.format(name) + + +def main(argv=None): + ''' + Handles command line arguments and gets things started. + + :param argv: List of arguments, as if specified on the command-line. + If None, ``sys.argv[1:]`` is used instead. + :type argv: list of str + ''' + # Get command line arguments + parser = argparse.ArgumentParser( + description="Takes one or more file paths and reports their detected \ + encodings", + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + conflict_handler='resolve') + parser.add_argument('input', + help='File whose encoding we would like to determine.', + type=argparse.FileType('rb'), nargs='*', + default=[sys.stdin]) + parser.add_argument('--version', action='version', + version='%(prog)s {0}'.format(__version__)) + args = parser.parse_args(argv) + + for f in args.input: + if f.isatty(): + print("You are running chardetect interactively. Press " + + "CTRL-D twice at the start of a blank line to signal the " + + "end of your input. If you want help, run chardetect " + + "--help\n", file=sys.stderr) + print(description_of(f, f.name)) + + +if __name__ == '__main__': + main() diff --git a/libs/chardet/chardistribution.py b/libs/chardet/chardistribution.py index b893341845..4e64a00bef 100755 --- a/libs/chardet/chardistribution.py +++ b/libs/chardet/chardistribution.py @@ -1,11 +1,11 @@ ######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Communicator client code. -# +# # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. -# +# # Contributor(s): # Mark Pilgrim - port to Python # @@ -13,47 +13,63 @@ # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. -# +# # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. -# +# # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### -import constants -from euctwfreq import EUCTWCharToFreqOrder, EUCTW_TABLE_SIZE, EUCTW_TYPICAL_DISTRIBUTION_RATIO -from euckrfreq import EUCKRCharToFreqOrder, EUCKR_TABLE_SIZE, EUCKR_TYPICAL_DISTRIBUTION_RATIO -from gb2312freq import GB2312CharToFreqOrder, GB2312_TABLE_SIZE, GB2312_TYPICAL_DISTRIBUTION_RATIO -from big5freq import Big5CharToFreqOrder, BIG5_TABLE_SIZE, BIG5_TYPICAL_DISTRIBUTION_RATIO -from jisfreq import JISCharToFreqOrder, JIS_TABLE_SIZE, JIS_TYPICAL_DISTRIBUTION_RATIO +from .euctwfreq import (EUCTWCharToFreqOrder, EUCTW_TABLE_SIZE, + EUCTW_TYPICAL_DISTRIBUTION_RATIO) +from .euckrfreq import (EUCKRCharToFreqOrder, EUCKR_TABLE_SIZE, + EUCKR_TYPICAL_DISTRIBUTION_RATIO) +from .gb2312freq import (GB2312CharToFreqOrder, GB2312_TABLE_SIZE, + GB2312_TYPICAL_DISTRIBUTION_RATIO) +from .big5freq import (Big5CharToFreqOrder, BIG5_TABLE_SIZE, + BIG5_TYPICAL_DISTRIBUTION_RATIO) +from .jisfreq import (JISCharToFreqOrder, JIS_TABLE_SIZE, + JIS_TYPICAL_DISTRIBUTION_RATIO) +from .compat import wrap_ord ENOUGH_DATA_THRESHOLD = 1024 SURE_YES = 0.99 SURE_NO = 0.01 +MINIMUM_DATA_THRESHOLD = 3 + class CharDistributionAnalysis: def __init__(self): - self._mCharToFreqOrder = None # Mapping table to get frequency order from char order (get from GetOrder()) - self._mTableSize = None # Size of above table - self._mTypicalDistributionRatio = None # This is a constant value which varies from language to language, used in calculating confidence. See http://www.mozilla.org/projects/intl/UniversalCharsetDetection.html for further detail. + # Mapping table to get frequency order from char order (get from + # GetOrder()) + self._mCharToFreqOrder = None + self._mTableSize = None # Size of above table + # This is a constant value which varies from language to language, + # used in calculating confidence. See + # http://www.mozilla.org/projects/intl/UniversalCharsetDetection.html + # for further detail. + self._mTypicalDistributionRatio = None self.reset() - + def reset(self): """reset analyser, clear any state""" - self._mDone = constants.False # If this flag is set to constants.True, detection is done and conclusion has been made - self._mTotalChars = 0 # Total characters encountered - self._mFreqChars = 0 # The number of characters whose frequency order is less than 512 - - def feed(self, aStr, aCharLen): + # If this flag is set to True, detection is done and conclusion has + # been made + self._mDone = False + self._mTotalChars = 0 # Total characters encountered + # The number of characters whose frequency order is less than 512 + self._mFreqChars = 0 + + def feed(self, aBuf, aCharLen): """feed a character with known length""" if aCharLen == 2: # we only care about 2-bytes character in our distribution analysis - order = self.get_order(aStr) + order = self.get_order(aBuf) else: order = -1 if order >= 0: @@ -65,12 +81,14 @@ def feed(self, aStr, aCharLen): def get_confidence(self): """return confidence based on existing data""" - # if we didn't receive any character in our consideration range, return negative answer - if self._mTotalChars <= 0: + # if we didn't receive any character in our consideration range, + # return negative answer + if self._mTotalChars <= 0 or self._mFreqChars <= MINIMUM_DATA_THRESHOLD: return SURE_NO if self._mTotalChars != self._mFreqChars: - r = self._mFreqChars / ((self._mTotalChars - self._mFreqChars) * self._mTypicalDistributionRatio) + r = (self._mFreqChars / ((self._mTotalChars - self._mFreqChars) + * self._mTypicalDistributionRatio)) if r < SURE_YES: return r @@ -78,16 +96,18 @@ def get_confidence(self): return SURE_YES def got_enough_data(self): - # It is not necessary to receive all data to draw conclusion. For charset detection, - # certain amount of data is enough + # It is not necessary to receive all data to draw conclusion. + # For charset detection, certain amount of data is enough return self._mTotalChars > ENOUGH_DATA_THRESHOLD - def get_order(self, aStr): - # We do not handle characters based on the original encoding string, but - # convert this encoding string to a number, here called order. - # This allows multiple encodings of a language to share one frequency table. + def get_order(self, aBuf): + # We do not handle characters based on the original encoding string, + # but convert this encoding string to a number, here called order. + # This allows multiple encodings of a language to share one frequency + # table. return -1 - + + class EUCTWDistributionAnalysis(CharDistributionAnalysis): def __init__(self): CharDistributionAnalysis.__init__(self) @@ -95,16 +115,18 @@ def __init__(self): self._mTableSize = EUCTW_TABLE_SIZE self._mTypicalDistributionRatio = EUCTW_TYPICAL_DISTRIBUTION_RATIO - def get_order(self, aStr): - # for euc-TW encoding, we are interested + def get_order(self, aBuf): + # for euc-TW encoding, we are interested # first byte range: 0xc4 -- 0xfe # second byte range: 0xa1 -- 0xfe # no validation needed here. State machine has done that - if aStr[0] >= '\xC4': - return 94 * (ord(aStr[0]) - 0xC4) + ord(aStr[1]) - 0xA1 + first_char = wrap_ord(aBuf[0]) + if first_char >= 0xC4: + return 94 * (first_char - 0xC4) + wrap_ord(aBuf[1]) - 0xA1 else: return -1 + class EUCKRDistributionAnalysis(CharDistributionAnalysis): def __init__(self): CharDistributionAnalysis.__init__(self) @@ -112,15 +134,17 @@ def __init__(self): self._mTableSize = EUCKR_TABLE_SIZE self._mTypicalDistributionRatio = EUCKR_TYPICAL_DISTRIBUTION_RATIO - def get_order(self, aStr): - # for euc-KR encoding, we are interested + def get_order(self, aBuf): + # for euc-KR encoding, we are interested # first byte range: 0xb0 -- 0xfe # second byte range: 0xa1 -- 0xfe # no validation needed here. State machine has done that - if aStr[0] >= '\xB0': - return 94 * (ord(aStr[0]) - 0xB0) + ord(aStr[1]) - 0xA1 + first_char = wrap_ord(aBuf[0]) + if first_char >= 0xB0: + return 94 * (first_char - 0xB0) + wrap_ord(aBuf[1]) - 0xA1 else: - return -1; + return -1 + class GB2312DistributionAnalysis(CharDistributionAnalysis): def __init__(self): @@ -129,15 +153,17 @@ def __init__(self): self._mTableSize = GB2312_TABLE_SIZE self._mTypicalDistributionRatio = GB2312_TYPICAL_DISTRIBUTION_RATIO - def get_order(self, aStr): - # for GB2312 encoding, we are interested + def get_order(self, aBuf): + # for GB2312 encoding, we are interested # first byte range: 0xb0 -- 0xfe # second byte range: 0xa1 -- 0xfe # no validation needed here. State machine has done that - if (aStr[0] >= '\xB0') and (aStr[1] >= '\xA1'): - return 94 * (ord(aStr[0]) - 0xB0) + ord(aStr[1]) - 0xA1 + first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1]) + if (first_char >= 0xB0) and (second_char >= 0xA1): + return 94 * (first_char - 0xB0) + second_char - 0xA1 else: - return -1; + return -1 + class Big5DistributionAnalysis(CharDistributionAnalysis): def __init__(self): @@ -146,19 +172,21 @@ def __init__(self): self._mTableSize = BIG5_TABLE_SIZE self._mTypicalDistributionRatio = BIG5_TYPICAL_DISTRIBUTION_RATIO - def get_order(self, aStr): - # for big5 encoding, we are interested + def get_order(self, aBuf): + # for big5 encoding, we are interested # first byte range: 0xa4 -- 0xfe # second byte range: 0x40 -- 0x7e , 0xa1 -- 0xfe # no validation needed here. State machine has done that - if aStr[0] >= '\xA4': - if aStr[1] >= '\xA1': - return 157 * (ord(aStr[0]) - 0xA4) + ord(aStr[1]) - 0xA1 + 63 + first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1]) + if first_char >= 0xA4: + if second_char >= 0xA1: + return 157 * (first_char - 0xA4) + second_char - 0xA1 + 63 else: - return 157 * (ord(aStr[0]) - 0xA4) + ord(aStr[1]) - 0x40 + return 157 * (first_char - 0xA4) + second_char - 0x40 else: return -1 + class SJISDistributionAnalysis(CharDistributionAnalysis): def __init__(self): CharDistributionAnalysis.__init__(self) @@ -166,22 +194,24 @@ def __init__(self): self._mTableSize = JIS_TABLE_SIZE self._mTypicalDistributionRatio = JIS_TYPICAL_DISTRIBUTION_RATIO - def get_order(self, aStr): - # for sjis encoding, we are interested + def get_order(self, aBuf): + # for sjis encoding, we are interested # first byte range: 0x81 -- 0x9f , 0xe0 -- 0xfe # second byte range: 0x40 -- 0x7e, 0x81 -- oxfe # no validation needed here. State machine has done that - if (aStr[0] >= '\x81') and (aStr[0] <= '\x9F'): - order = 188 * (ord(aStr[0]) - 0x81) - elif (aStr[0] >= '\xE0') and (aStr[0] <= '\xEF'): - order = 188 * (ord(aStr[0]) - 0xE0 + 31) + first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1]) + if (first_char >= 0x81) and (first_char <= 0x9F): + order = 188 * (first_char - 0x81) + elif (first_char >= 0xE0) and (first_char <= 0xEF): + order = 188 * (first_char - 0xE0 + 31) else: - return -1; - order = order + ord(aStr[1]) - 0x40 - if aStr[1] > '\x7F': - order =- 1 + return -1 + order = order + second_char - 0x40 + if second_char > 0x7F: + order = -1 return order + class EUCJPDistributionAnalysis(CharDistributionAnalysis): def __init__(self): CharDistributionAnalysis.__init__(self) @@ -189,12 +219,13 @@ def __init__(self): self._mTableSize = JIS_TABLE_SIZE self._mTypicalDistributionRatio = JIS_TYPICAL_DISTRIBUTION_RATIO - def get_order(self, aStr): - # for euc-JP encoding, we are interested + def get_order(self, aBuf): + # for euc-JP encoding, we are interested # first byte range: 0xa0 -- 0xfe # second byte range: 0xa1 -- 0xfe # no validation needed here. State machine has done that - if aStr[0] >= '\xA0': - return 94 * (ord(aStr[0]) - 0xA1) + ord(aStr[1]) - 0xa1 + char = wrap_ord(aBuf[0]) + if char >= 0xA0: + return 94 * (char - 0xA1) + wrap_ord(aBuf[1]) - 0xa1 else: return -1 diff --git a/libs/chardet/charsetgroupprober.py b/libs/chardet/charsetgroupprober.py index 5188069499..85e7a1c67d 100755 --- a/libs/chardet/charsetgroupprober.py +++ b/libs/chardet/charsetgroupprober.py @@ -25,8 +25,10 @@ # 02110-1301 USA ######################### END LICENSE BLOCK ######################### -import constants, sys -from charsetprober import CharSetProber +from . import constants +import sys +from .charsetprober import CharSetProber + class CharSetGroupProber(CharSetProber): def __init__(self): @@ -34,35 +36,39 @@ def __init__(self): self._mActiveNum = 0 self._mProbers = [] self._mBestGuessProber = None - + def reset(self): CharSetProber.reset(self) self._mActiveNum = 0 for prober in self._mProbers: if prober: prober.reset() - prober.active = constants.True + prober.active = True self._mActiveNum += 1 self._mBestGuessProber = None def get_charset_name(self): if not self._mBestGuessProber: self.get_confidence() - if not self._mBestGuessProber: return None + if not self._mBestGuessProber: + return None # self._mBestGuessProber = self._mProbers[0] return self._mBestGuessProber.get_charset_name() def feed(self, aBuf): for prober in self._mProbers: - if not prober: continue - if not prober.active: continue + if not prober: + continue + if not prober.active: + continue st = prober.feed(aBuf) - if not st: continue + if not st: + continue if st == constants.eFoundIt: self._mBestGuessProber = prober return self.get_state() elif st == constants.eNotMe: - prober.active = constants.False + prober.active = False self._mActiveNum -= 1 if self._mActiveNum <= 0: self._mState = constants.eNotMe @@ -78,18 +84,22 @@ def get_confidence(self): bestConf = 0.0 self._mBestGuessProber = None for prober in self._mProbers: - if not prober: continue + if not prober: + continue if not prober.active: if constants._debug: - sys.stderr.write(prober.get_charset_name() + ' not active\n') + sys.stderr.write(prober.get_charset_name() + + ' not active\n') continue cf = prober.get_confidence() if constants._debug: - sys.stderr.write('%s confidence = %s\n' % (prober.get_charset_name(), cf)) + sys.stderr.write('%s confidence = %s\n' % + (prober.get_charset_name(), cf)) if bestConf < cf: bestConf = cf self._mBestGuessProber = prober - if not self._mBestGuessProber: return 0.0 + if not self._mBestGuessProber: + return 0.0 return bestConf # else: # self._mBestGuessProber = self._mProbers[0] diff --git a/libs/chardet/charsetprober.py b/libs/chardet/charsetprober.py index 3ac1683c79..97581712c1 100755 --- a/libs/chardet/charsetprober.py +++ b/libs/chardet/charsetprober.py @@ -1,11 +1,11 @@ ######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Universal charset detector code. -# +# # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 2001 # the Initial Developer. All Rights Reserved. -# +# # Contributor(s): # Mark Pilgrim - port to Python # Shy Shalom - original C code @@ -14,27 +14,29 @@ # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. -# +# # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. -# +# # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### -import constants, re +from . import constants +import re + class CharSetProber: def __init__(self): pass - + def reset(self): self._mState = constants.eDetecting - + def get_charset_name(self): return None @@ -48,13 +50,13 @@ def get_confidence(self): return 0.0 def filter_high_bit_only(self, aBuf): - aBuf = re.sub(r'([\x00-\x7F])+', ' ', aBuf) + aBuf = re.sub(b'([\x00-\x7F])+', b' ', aBuf) return aBuf - + def filter_without_english_letters(self, aBuf): - aBuf = re.sub(r'([A-Za-z])+', ' ', aBuf) + aBuf = re.sub(b'([A-Za-z])+', b' ', aBuf) return aBuf - + def filter_with_english_letters(self, aBuf): # TODO return aBuf diff --git a/libs/chardet/codingstatemachine.py b/libs/chardet/codingstatemachine.py index 452d3b0a06..8dd8c91798 100755 --- a/libs/chardet/codingstatemachine.py +++ b/libs/chardet/codingstatemachine.py @@ -13,19 +13,21 @@ # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. -# +# # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. -# +# # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### -from constants import eStart, eError, eItsMe +from .constants import eStart +from .compat import wrap_ord + class CodingStateMachine: def __init__(self, sm): @@ -40,12 +42,15 @@ def reset(self): def next_state(self, c): # for each byte we get its class # if it is first byte, we also get byte length - byteCls = self._mModel['classTable'][ord(c)] + # PY3K: aBuf is a byte stream, so c is an int, not a byte + byteCls = self._mModel['classTable'][wrap_ord(c)] if self._mCurrentState == eStart: self._mCurrentBytePos = 0 self._mCurrentCharLen = self._mModel['charLenTable'][byteCls] # from byte's class and stateTable, we get its next state - self._mCurrentState = self._mModel['stateTable'][self._mCurrentState * self._mModel['classFactor'] + byteCls] + curr_state = (self._mCurrentState * self._mModel['classFactor'] + + byteCls) + self._mCurrentState = self._mModel['stateTable'][curr_state] self._mCurrentBytePos += 1 return self._mCurrentState diff --git a/libs/chardet/compat.py b/libs/chardet/compat.py new file mode 100644 index 0000000000..d9e30addf9 --- /dev/null +++ b/libs/chardet/compat.py @@ -0,0 +1,34 @@ +######################## BEGIN LICENSE BLOCK ######################## +# Contributor(s): +# Ian Cordasco - port to Python +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA +# 02110-1301 USA +######################### END LICENSE BLOCK ######################### + +import sys + + +if sys.version_info < (3, 0): + base_str = (str, unicode) +else: + base_str = (bytes, str) + + +def wrap_ord(a): + if sys.version_info < (3, 0) and isinstance(a, base_str): + return ord(a) + else: + return a diff --git a/libs/chardet/compat.py~HEAD b/libs/chardet/compat.py~HEAD new file mode 100644 index 0000000000..22f7463fae --- /dev/null +++ b/libs/chardet/compat.py~HEAD @@ -0,0 +1,52 @@ +######################## BEGIN LICENSE BLOCK ######################## +# Contributor(s): +# Ian Cordasco - port to Python +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA +# 02110-1301 USA +######################### END LICENSE BLOCK ######################### + +<<<<<<< HEAD:libs/chardet/compat.py:libs/chardet/compat.py +import sys + + +if sys.version_info < (3, 0): + base_str = (str, unicode) +else: + base_str = (bytes, str) + + +def wrap_ord(a): + if sys.version_info < (3, 0) and isinstance(a, base_str): + return ord(a) + else: + return a +======= +__version__ = "2.2.1" +from sys import version_info + + +def detect(aBuf): + if ((version_info < (3, 0) and isinstance(aBuf, unicode)) or + (version_info >= (3, 0) and not isinstance(aBuf, bytes))): + raise ValueError('Expected a bytes object, not a unicode object') + + from . import universaldetector + u = universaldetector.UniversalDetector() + u.reset() + u.feed(aBuf) + u.close() + return u.result +>>>>>>> b839b971765cf032c05b2f3d2627afc41fed332c:libs/requests/packages/chardet/__init__.py:libs/requests/packages/chardet/__init__.py diff --git a/libs/chardet/compat.py~b839b971765cf032c05b2f3d2627afc41fed332c b/libs/chardet/compat.py~b839b971765cf032c05b2f3d2627afc41fed332c new file mode 100644 index 0000000000..d9e30addf9 --- /dev/null +++ b/libs/chardet/compat.py~b839b971765cf032c05b2f3d2627afc41fed332c @@ -0,0 +1,34 @@ +######################## BEGIN LICENSE BLOCK ######################## +# Contributor(s): +# Ian Cordasco - port to Python +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA +# 02110-1301 USA +######################### END LICENSE BLOCK ######################### + +import sys + + +if sys.version_info < (3, 0): + base_str = (str, unicode) +else: + base_str = (bytes, str) + + +def wrap_ord(a): + if sys.version_info < (3, 0) and isinstance(a, base_str): + return ord(a) + else: + return a diff --git a/libs/chardet/constants.py b/libs/chardet/constants.py index e94e226b0a..e4d148b3c5 100755 --- a/libs/chardet/constants.py +++ b/libs/chardet/constants.py @@ -37,11 +37,3 @@ eItsMe = 2 SHORTCUT_THRESHOLD = 0.95 - -import __builtin__ -if not hasattr(__builtin__, 'False'): - False = 0 - True = 1 -else: - False = __builtin__.False - True = __builtin__.True diff --git a/libs/chardet/cp949prober.py b/libs/chardet/cp949prober.py new file mode 100644 index 0000000000..ff4272f82a --- /dev/null +++ b/libs/chardet/cp949prober.py @@ -0,0 +1,44 @@ +######################## BEGIN LICENSE BLOCK ######################## +# The Original Code is mozilla.org code. +# +# The Initial Developer of the Original Code is +# Netscape Communications Corporation. +# Portions created by the Initial Developer are Copyright (C) 1998 +# the Initial Developer. All Rights Reserved. +# +# Contributor(s): +# Mark Pilgrim - port to Python +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA +# 02110-1301 USA +######################### END LICENSE BLOCK ######################### + +from .mbcharsetprober import MultiByteCharSetProber +from .codingstatemachine import CodingStateMachine +from .chardistribution import EUCKRDistributionAnalysis +from .mbcssm import CP949SMModel + + +class CP949Prober(MultiByteCharSetProber): + def __init__(self): + MultiByteCharSetProber.__init__(self) + self._mCodingSM = CodingStateMachine(CP949SMModel) + # NOTE: CP949 is a superset of EUC-KR, so the distribution should be + # not different. + self._mDistributionAnalyzer = EUCKRDistributionAnalysis() + self.reset() + + def get_charset_name(self): + return "CP949" diff --git a/libs/chardet/cp949prober.py~HEAD b/libs/chardet/cp949prober.py~HEAD new file mode 100644 index 0000000000..ff4272f82a --- /dev/null +++ b/libs/chardet/cp949prober.py~HEAD @@ -0,0 +1,44 @@ +######################## BEGIN LICENSE BLOCK ######################## +# The Original Code is mozilla.org code. +# +# The Initial Developer of the Original Code is +# Netscape Communications Corporation. +# Portions created by the Initial Developer are Copyright (C) 1998 +# the Initial Developer. All Rights Reserved. +# +# Contributor(s): +# Mark Pilgrim - port to Python +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA +# 02110-1301 USA +######################### END LICENSE BLOCK ######################### + +from .mbcharsetprober import MultiByteCharSetProber +from .codingstatemachine import CodingStateMachine +from .chardistribution import EUCKRDistributionAnalysis +from .mbcssm import CP949SMModel + + +class CP949Prober(MultiByteCharSetProber): + def __init__(self): + MultiByteCharSetProber.__init__(self) + self._mCodingSM = CodingStateMachine(CP949SMModel) + # NOTE: CP949 is a superset of EUC-KR, so the distribution should be + # not different. + self._mDistributionAnalyzer = EUCKRDistributionAnalysis() + self.reset() + + def get_charset_name(self): + return "CP949" diff --git a/libs/chardet/cp949prober.py~b839b971765cf032c05b2f3d2627afc41fed332c b/libs/chardet/cp949prober.py~b839b971765cf032c05b2f3d2627afc41fed332c new file mode 100644 index 0000000000..ff4272f82a --- /dev/null +++ b/libs/chardet/cp949prober.py~b839b971765cf032c05b2f3d2627afc41fed332c @@ -0,0 +1,44 @@ +######################## BEGIN LICENSE BLOCK ######################## +# The Original Code is mozilla.org code. +# +# The Initial Developer of the Original Code is +# Netscape Communications Corporation. +# Portions created by the Initial Developer are Copyright (C) 1998 +# the Initial Developer. All Rights Reserved. +# +# Contributor(s): +# Mark Pilgrim - port to Python +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA +# 02110-1301 USA +######################### END LICENSE BLOCK ######################### + +from .mbcharsetprober import MultiByteCharSetProber +from .codingstatemachine import CodingStateMachine +from .chardistribution import EUCKRDistributionAnalysis +from .mbcssm import CP949SMModel + + +class CP949Prober(MultiByteCharSetProber): + def __init__(self): + MultiByteCharSetProber.__init__(self) + self._mCodingSM = CodingStateMachine(CP949SMModel) + # NOTE: CP949 is a superset of EUC-KR, so the distribution should be + # not different. + self._mDistributionAnalyzer = EUCKRDistributionAnalysis() + self.reset() + + def get_charset_name(self): + return "CP949" diff --git a/libs/chardet/escprober.py b/libs/chardet/escprober.py index 572ed7be37..80a844ff34 100755 --- a/libs/chardet/escprober.py +++ b/libs/chardet/escprober.py @@ -13,39 +13,43 @@ # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. -# +# # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. -# +# # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### -import constants, sys -from escsm import HZSMModel, ISO2022CNSMModel, ISO2022JPSMModel, ISO2022KRSMModel -from charsetprober import CharSetProber -from codingstatemachine import CodingStateMachine +from . import constants +from .escsm import (HZSMModel, ISO2022CNSMModel, ISO2022JPSMModel, + ISO2022KRSMModel) +from .charsetprober import CharSetProber +from .codingstatemachine import CodingStateMachine +from .compat import wrap_ord + class EscCharSetProber(CharSetProber): def __init__(self): CharSetProber.__init__(self) - self._mCodingSM = [ \ + self._mCodingSM = [ CodingStateMachine(HZSMModel), CodingStateMachine(ISO2022CNSMModel), CodingStateMachine(ISO2022JPSMModel), CodingStateMachine(ISO2022KRSMModel) - ] + ] self.reset() def reset(self): CharSetProber.reset(self) for codingSM in self._mCodingSM: - if not codingSM: continue - codingSM.active = constants.True + if not codingSM: + continue + codingSM.active = True codingSM.reset() self._mActiveSM = len(self._mCodingSM) self._mDetectedCharset = None @@ -61,19 +65,22 @@ def get_confidence(self): def feed(self, aBuf): for c in aBuf: + # PY3K: aBuf is a byte array, so c is an int, not a byte for codingSM in self._mCodingSM: - if not codingSM: continue - if not codingSM.active: continue - codingState = codingSM.next_state(c) + if not codingSM: + continue + if not codingSM.active: + continue + codingState = codingSM.next_state(wrap_ord(c)) if codingState == constants.eError: - codingSM.active = constants.False + codingSM.active = False self._mActiveSM -= 1 if self._mActiveSM <= 0: self._mState = constants.eNotMe return self.get_state() elif codingState == constants.eItsMe: self._mState = constants.eFoundIt - self._mDetectedCharset = codingSM.get_coding_state_machine() + self._mDetectedCharset = codingSM.get_coding_state_machine() # nopep8 return self.get_state() - + return self.get_state() diff --git a/libs/chardet/escsm.py b/libs/chardet/escsm.py index 9fa22952e1..bd302b4c61 100755 --- a/libs/chardet/escsm.py +++ b/libs/chardet/escsm.py @@ -13,62 +13,62 @@ # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. -# +# # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. -# +# # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### -from constants import eStart, eError, eItsMe - -HZ_cls = ( \ -1,0,0,0,0,0,0,0, # 00 - 07 -0,0,0,0,0,0,0,0, # 08 - 0f -0,0,0,0,0,0,0,0, # 10 - 17 -0,0,0,1,0,0,0,0, # 18 - 1f -0,0,0,0,0,0,0,0, # 20 - 27 -0,0,0,0,0,0,0,0, # 28 - 2f -0,0,0,0,0,0,0,0, # 30 - 37 -0,0,0,0,0,0,0,0, # 38 - 3f -0,0,0,0,0,0,0,0, # 40 - 47 -0,0,0,0,0,0,0,0, # 48 - 4f -0,0,0,0,0,0,0,0, # 50 - 57 -0,0,0,0,0,0,0,0, # 58 - 5f -0,0,0,0,0,0,0,0, # 60 - 67 -0,0,0,0,0,0,0,0, # 68 - 6f -0,0,0,0,0,0,0,0, # 70 - 77 -0,0,0,4,0,5,2,0, # 78 - 7f -1,1,1,1,1,1,1,1, # 80 - 87 -1,1,1,1,1,1,1,1, # 88 - 8f -1,1,1,1,1,1,1,1, # 90 - 97 -1,1,1,1,1,1,1,1, # 98 - 9f -1,1,1,1,1,1,1,1, # a0 - a7 -1,1,1,1,1,1,1,1, # a8 - af -1,1,1,1,1,1,1,1, # b0 - b7 -1,1,1,1,1,1,1,1, # b8 - bf -1,1,1,1,1,1,1,1, # c0 - c7 -1,1,1,1,1,1,1,1, # c8 - cf -1,1,1,1,1,1,1,1, # d0 - d7 -1,1,1,1,1,1,1,1, # d8 - df -1,1,1,1,1,1,1,1, # e0 - e7 -1,1,1,1,1,1,1,1, # e8 - ef -1,1,1,1,1,1,1,1, # f0 - f7 -1,1,1,1,1,1,1,1, # f8 - ff +from .constants import eStart, eError, eItsMe + +HZ_cls = ( +1,0,0,0,0,0,0,0, # 00 - 07 +0,0,0,0,0,0,0,0, # 08 - 0f +0,0,0,0,0,0,0,0, # 10 - 17 +0,0,0,1,0,0,0,0, # 18 - 1f +0,0,0,0,0,0,0,0, # 20 - 27 +0,0,0,0,0,0,0,0, # 28 - 2f +0,0,0,0,0,0,0,0, # 30 - 37 +0,0,0,0,0,0,0,0, # 38 - 3f +0,0,0,0,0,0,0,0, # 40 - 47 +0,0,0,0,0,0,0,0, # 48 - 4f +0,0,0,0,0,0,0,0, # 50 - 57 +0,0,0,0,0,0,0,0, # 58 - 5f +0,0,0,0,0,0,0,0, # 60 - 67 +0,0,0,0,0,0,0,0, # 68 - 6f +0,0,0,0,0,0,0,0, # 70 - 77 +0,0,0,4,0,5,2,0, # 78 - 7f +1,1,1,1,1,1,1,1, # 80 - 87 +1,1,1,1,1,1,1,1, # 88 - 8f +1,1,1,1,1,1,1,1, # 90 - 97 +1,1,1,1,1,1,1,1, # 98 - 9f +1,1,1,1,1,1,1,1, # a0 - a7 +1,1,1,1,1,1,1,1, # a8 - af +1,1,1,1,1,1,1,1, # b0 - b7 +1,1,1,1,1,1,1,1, # b8 - bf +1,1,1,1,1,1,1,1, # c0 - c7 +1,1,1,1,1,1,1,1, # c8 - cf +1,1,1,1,1,1,1,1, # d0 - d7 +1,1,1,1,1,1,1,1, # d8 - df +1,1,1,1,1,1,1,1, # e0 - e7 +1,1,1,1,1,1,1,1, # e8 - ef +1,1,1,1,1,1,1,1, # f0 - f7 +1,1,1,1,1,1,1,1, # f8 - ff ) -HZ_st = ( \ -eStart,eError, 3,eStart,eStart,eStart,eError,eError,# 00-07 -eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,# 08-0f -eItsMe,eItsMe,eError,eError,eStart,eStart, 4,eError,# 10-17 - 5,eError, 6,eError, 5, 5, 4,eError,# 18-1f - 4,eError, 4, 4, 4,eError, 4,eError,# 20-27 - 4,eItsMe,eStart,eStart,eStart,eStart,eStart,eStart,# 28-2f +HZ_st = ( +eStart,eError, 3,eStart,eStart,eStart,eError,eError,# 00-07 +eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,# 08-0f +eItsMe,eItsMe,eError,eError,eStart,eStart, 4,eError,# 10-17 + 5,eError, 6,eError, 5, 5, 4,eError,# 18-1f + 4,eError, 4, 4, 4,eError, 4,eError,# 20-27 + 4,eItsMe,eStart,eStart,eStart,eStart,eStart,eStart,# 28-2f ) HZCharLenTable = (0, 0, 0, 0, 0, 0) @@ -79,50 +79,50 @@ 'charLenTable': HZCharLenTable, 'name': "HZ-GB-2312"} -ISO2022CN_cls = ( \ -2,0,0,0,0,0,0,0, # 00 - 07 -0,0,0,0,0,0,0,0, # 08 - 0f -0,0,0,0,0,0,0,0, # 10 - 17 -0,0,0,1,0,0,0,0, # 18 - 1f -0,0,0,0,0,0,0,0, # 20 - 27 -0,3,0,0,0,0,0,0, # 28 - 2f -0,0,0,0,0,0,0,0, # 30 - 37 -0,0,0,0,0,0,0,0, # 38 - 3f -0,0,0,4,0,0,0,0, # 40 - 47 -0,0,0,0,0,0,0,0, # 48 - 4f -0,0,0,0,0,0,0,0, # 50 - 57 -0,0,0,0,0,0,0,0, # 58 - 5f -0,0,0,0,0,0,0,0, # 60 - 67 -0,0,0,0,0,0,0,0, # 68 - 6f -0,0,0,0,0,0,0,0, # 70 - 77 -0,0,0,0,0,0,0,0, # 78 - 7f -2,2,2,2,2,2,2,2, # 80 - 87 -2,2,2,2,2,2,2,2, # 88 - 8f -2,2,2,2,2,2,2,2, # 90 - 97 -2,2,2,2,2,2,2,2, # 98 - 9f -2,2,2,2,2,2,2,2, # a0 - a7 -2,2,2,2,2,2,2,2, # a8 - af -2,2,2,2,2,2,2,2, # b0 - b7 -2,2,2,2,2,2,2,2, # b8 - bf -2,2,2,2,2,2,2,2, # c0 - c7 -2,2,2,2,2,2,2,2, # c8 - cf -2,2,2,2,2,2,2,2, # d0 - d7 -2,2,2,2,2,2,2,2, # d8 - df -2,2,2,2,2,2,2,2, # e0 - e7 -2,2,2,2,2,2,2,2, # e8 - ef -2,2,2,2,2,2,2,2, # f0 - f7 -2,2,2,2,2,2,2,2, # f8 - ff +ISO2022CN_cls = ( +2,0,0,0,0,0,0,0, # 00 - 07 +0,0,0,0,0,0,0,0, # 08 - 0f +0,0,0,0,0,0,0,0, # 10 - 17 +0,0,0,1,0,0,0,0, # 18 - 1f +0,0,0,0,0,0,0,0, # 20 - 27 +0,3,0,0,0,0,0,0, # 28 - 2f +0,0,0,0,0,0,0,0, # 30 - 37 +0,0,0,0,0,0,0,0, # 38 - 3f +0,0,0,4,0,0,0,0, # 40 - 47 +0,0,0,0,0,0,0,0, # 48 - 4f +0,0,0,0,0,0,0,0, # 50 - 57 +0,0,0,0,0,0,0,0, # 58 - 5f +0,0,0,0,0,0,0,0, # 60 - 67 +0,0,0,0,0,0,0,0, # 68 - 6f +0,0,0,0,0,0,0,0, # 70 - 77 +0,0,0,0,0,0,0,0, # 78 - 7f +2,2,2,2,2,2,2,2, # 80 - 87 +2,2,2,2,2,2,2,2, # 88 - 8f +2,2,2,2,2,2,2,2, # 90 - 97 +2,2,2,2,2,2,2,2, # 98 - 9f +2,2,2,2,2,2,2,2, # a0 - a7 +2,2,2,2,2,2,2,2, # a8 - af +2,2,2,2,2,2,2,2, # b0 - b7 +2,2,2,2,2,2,2,2, # b8 - bf +2,2,2,2,2,2,2,2, # c0 - c7 +2,2,2,2,2,2,2,2, # c8 - cf +2,2,2,2,2,2,2,2, # d0 - d7 +2,2,2,2,2,2,2,2, # d8 - df +2,2,2,2,2,2,2,2, # e0 - e7 +2,2,2,2,2,2,2,2, # e8 - ef +2,2,2,2,2,2,2,2, # f0 - f7 +2,2,2,2,2,2,2,2, # f8 - ff ) -ISO2022CN_st = ( \ -eStart, 3,eError,eStart,eStart,eStart,eStart,eStart,# 00-07 -eStart,eError,eError,eError,eError,eError,eError,eError,# 08-0f -eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,# 10-17 -eItsMe,eItsMe,eItsMe,eError,eError,eError, 4,eError,# 18-1f -eError,eError,eError,eItsMe,eError,eError,eError,eError,# 20-27 - 5, 6,eError,eError,eError,eError,eError,eError,# 28-2f -eError,eError,eError,eItsMe,eError,eError,eError,eError,# 30-37 -eError,eError,eError,eError,eError,eItsMe,eError,eStart,# 38-3f +ISO2022CN_st = ( +eStart, 3,eError,eStart,eStart,eStart,eStart,eStart,# 00-07 +eStart,eError,eError,eError,eError,eError,eError,eError,# 08-0f +eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,# 10-17 +eItsMe,eItsMe,eItsMe,eError,eError,eError, 4,eError,# 18-1f +eError,eError,eError,eItsMe,eError,eError,eError,eError,# 20-27 + 5, 6,eError,eError,eError,eError,eError,eError,# 28-2f +eError,eError,eError,eItsMe,eError,eError,eError,eError,# 30-37 +eError,eError,eError,eError,eError,eItsMe,eError,eStart,# 38-3f ) ISO2022CNCharLenTable = (0, 0, 0, 0, 0, 0, 0, 0, 0) @@ -133,51 +133,51 @@ 'charLenTable': ISO2022CNCharLenTable, 'name': "ISO-2022-CN"} -ISO2022JP_cls = ( \ -2,0,0,0,0,0,0,0, # 00 - 07 -0,0,0,0,0,0,2,2, # 08 - 0f -0,0,0,0,0,0,0,0, # 10 - 17 -0,0,0,1,0,0,0,0, # 18 - 1f -0,0,0,0,7,0,0,0, # 20 - 27 -3,0,0,0,0,0,0,0, # 28 - 2f -0,0,0,0,0,0,0,0, # 30 - 37 -0,0,0,0,0,0,0,0, # 38 - 3f -6,0,4,0,8,0,0,0, # 40 - 47 -0,9,5,0,0,0,0,0, # 48 - 4f -0,0,0,0,0,0,0,0, # 50 - 57 -0,0,0,0,0,0,0,0, # 58 - 5f -0,0,0,0,0,0,0,0, # 60 - 67 -0,0,0,0,0,0,0,0, # 68 - 6f -0,0,0,0,0,0,0,0, # 70 - 77 -0,0,0,0,0,0,0,0, # 78 - 7f -2,2,2,2,2,2,2,2, # 80 - 87 -2,2,2,2,2,2,2,2, # 88 - 8f -2,2,2,2,2,2,2,2, # 90 - 97 -2,2,2,2,2,2,2,2, # 98 - 9f -2,2,2,2,2,2,2,2, # a0 - a7 -2,2,2,2,2,2,2,2, # a8 - af -2,2,2,2,2,2,2,2, # b0 - b7 -2,2,2,2,2,2,2,2, # b8 - bf -2,2,2,2,2,2,2,2, # c0 - c7 -2,2,2,2,2,2,2,2, # c8 - cf -2,2,2,2,2,2,2,2, # d0 - d7 -2,2,2,2,2,2,2,2, # d8 - df -2,2,2,2,2,2,2,2, # e0 - e7 -2,2,2,2,2,2,2,2, # e8 - ef -2,2,2,2,2,2,2,2, # f0 - f7 -2,2,2,2,2,2,2,2, # f8 - ff +ISO2022JP_cls = ( +2,0,0,0,0,0,0,0, # 00 - 07 +0,0,0,0,0,0,2,2, # 08 - 0f +0,0,0,0,0,0,0,0, # 10 - 17 +0,0,0,1,0,0,0,0, # 18 - 1f +0,0,0,0,7,0,0,0, # 20 - 27 +3,0,0,0,0,0,0,0, # 28 - 2f +0,0,0,0,0,0,0,0, # 30 - 37 +0,0,0,0,0,0,0,0, # 38 - 3f +6,0,4,0,8,0,0,0, # 40 - 47 +0,9,5,0,0,0,0,0, # 48 - 4f +0,0,0,0,0,0,0,0, # 50 - 57 +0,0,0,0,0,0,0,0, # 58 - 5f +0,0,0,0,0,0,0,0, # 60 - 67 +0,0,0,0,0,0,0,0, # 68 - 6f +0,0,0,0,0,0,0,0, # 70 - 77 +0,0,0,0,0,0,0,0, # 78 - 7f +2,2,2,2,2,2,2,2, # 80 - 87 +2,2,2,2,2,2,2,2, # 88 - 8f +2,2,2,2,2,2,2,2, # 90 - 97 +2,2,2,2,2,2,2,2, # 98 - 9f +2,2,2,2,2,2,2,2, # a0 - a7 +2,2,2,2,2,2,2,2, # a8 - af +2,2,2,2,2,2,2,2, # b0 - b7 +2,2,2,2,2,2,2,2, # b8 - bf +2,2,2,2,2,2,2,2, # c0 - c7 +2,2,2,2,2,2,2,2, # c8 - cf +2,2,2,2,2,2,2,2, # d0 - d7 +2,2,2,2,2,2,2,2, # d8 - df +2,2,2,2,2,2,2,2, # e0 - e7 +2,2,2,2,2,2,2,2, # e8 - ef +2,2,2,2,2,2,2,2, # f0 - f7 +2,2,2,2,2,2,2,2, # f8 - ff ) -ISO2022JP_st = ( \ -eStart, 3,eError,eStart,eStart,eStart,eStart,eStart,# 00-07 -eStart,eStart,eError,eError,eError,eError,eError,eError,# 08-0f -eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,# 10-17 -eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,eError,# 18-1f -eError, 5,eError,eError,eError, 4,eError,eError,# 20-27 -eError,eError,eError, 6,eItsMe,eError,eItsMe,eError,# 28-2f -eError,eError,eError,eError,eError,eError,eItsMe,eItsMe,# 30-37 -eError,eError,eError,eItsMe,eError,eError,eError,eError,# 38-3f -eError,eError,eError,eError,eItsMe,eError,eStart,eStart,# 40-47 +ISO2022JP_st = ( +eStart, 3,eError,eStart,eStart,eStart,eStart,eStart,# 00-07 +eStart,eStart,eError,eError,eError,eError,eError,eError,# 08-0f +eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,# 10-17 +eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,eError,# 18-1f +eError, 5,eError,eError,eError, 4,eError,eError,# 20-27 +eError,eError,eError, 6,eItsMe,eError,eItsMe,eError,# 28-2f +eError,eError,eError,eError,eError,eError,eItsMe,eItsMe,# 30-37 +eError,eError,eError,eItsMe,eError,eError,eError,eError,# 38-3f +eError,eError,eError,eError,eItsMe,eError,eStart,eStart,# 40-47 ) ISO2022JPCharLenTable = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0) @@ -188,47 +188,47 @@ 'charLenTable': ISO2022JPCharLenTable, 'name': "ISO-2022-JP"} -ISO2022KR_cls = ( \ -2,0,0,0,0,0,0,0, # 00 - 07 -0,0,0,0,0,0,0,0, # 08 - 0f -0,0,0,0,0,0,0,0, # 10 - 17 -0,0,0,1,0,0,0,0, # 18 - 1f -0,0,0,0,3,0,0,0, # 20 - 27 -0,4,0,0,0,0,0,0, # 28 - 2f -0,0,0,0,0,0,0,0, # 30 - 37 -0,0,0,0,0,0,0,0, # 38 - 3f -0,0,0,5,0,0,0,0, # 40 - 47 -0,0,0,0,0,0,0,0, # 48 - 4f -0,0,0,0,0,0,0,0, # 50 - 57 -0,0,0,0,0,0,0,0, # 58 - 5f -0,0,0,0,0,0,0,0, # 60 - 67 -0,0,0,0,0,0,0,0, # 68 - 6f -0,0,0,0,0,0,0,0, # 70 - 77 -0,0,0,0,0,0,0,0, # 78 - 7f -2,2,2,2,2,2,2,2, # 80 - 87 -2,2,2,2,2,2,2,2, # 88 - 8f -2,2,2,2,2,2,2,2, # 90 - 97 -2,2,2,2,2,2,2,2, # 98 - 9f -2,2,2,2,2,2,2,2, # a0 - a7 -2,2,2,2,2,2,2,2, # a8 - af -2,2,2,2,2,2,2,2, # b0 - b7 -2,2,2,2,2,2,2,2, # b8 - bf -2,2,2,2,2,2,2,2, # c0 - c7 -2,2,2,2,2,2,2,2, # c8 - cf -2,2,2,2,2,2,2,2, # d0 - d7 -2,2,2,2,2,2,2,2, # d8 - df -2,2,2,2,2,2,2,2, # e0 - e7 -2,2,2,2,2,2,2,2, # e8 - ef -2,2,2,2,2,2,2,2, # f0 - f7 -2,2,2,2,2,2,2,2, # f8 - ff +ISO2022KR_cls = ( +2,0,0,0,0,0,0,0, # 00 - 07 +0,0,0,0,0,0,0,0, # 08 - 0f +0,0,0,0,0,0,0,0, # 10 - 17 +0,0,0,1,0,0,0,0, # 18 - 1f +0,0,0,0,3,0,0,0, # 20 - 27 +0,4,0,0,0,0,0,0, # 28 - 2f +0,0,0,0,0,0,0,0, # 30 - 37 +0,0,0,0,0,0,0,0, # 38 - 3f +0,0,0,5,0,0,0,0, # 40 - 47 +0,0,0,0,0,0,0,0, # 48 - 4f +0,0,0,0,0,0,0,0, # 50 - 57 +0,0,0,0,0,0,0,0, # 58 - 5f +0,0,0,0,0,0,0,0, # 60 - 67 +0,0,0,0,0,0,0,0, # 68 - 6f +0,0,0,0,0,0,0,0, # 70 - 77 +0,0,0,0,0,0,0,0, # 78 - 7f +2,2,2,2,2,2,2,2, # 80 - 87 +2,2,2,2,2,2,2,2, # 88 - 8f +2,2,2,2,2,2,2,2, # 90 - 97 +2,2,2,2,2,2,2,2, # 98 - 9f +2,2,2,2,2,2,2,2, # a0 - a7 +2,2,2,2,2,2,2,2, # a8 - af +2,2,2,2,2,2,2,2, # b0 - b7 +2,2,2,2,2,2,2,2, # b8 - bf +2,2,2,2,2,2,2,2, # c0 - c7 +2,2,2,2,2,2,2,2, # c8 - cf +2,2,2,2,2,2,2,2, # d0 - d7 +2,2,2,2,2,2,2,2, # d8 - df +2,2,2,2,2,2,2,2, # e0 - e7 +2,2,2,2,2,2,2,2, # e8 - ef +2,2,2,2,2,2,2,2, # f0 - f7 +2,2,2,2,2,2,2,2, # f8 - ff ) -ISO2022KR_st = ( \ -eStart, 3,eError,eStart,eStart,eStart,eError,eError,# 00-07 -eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,# 08-0f -eItsMe,eItsMe,eError,eError,eError, 4,eError,eError,# 10-17 -eError,eError,eError,eError, 5,eError,eError,eError,# 18-1f -eError,eError,eError,eItsMe,eStart,eStart,eStart,eStart,# 20-27 +ISO2022KR_st = ( +eStart, 3,eError,eStart,eStart,eStart,eError,eError,# 00-07 +eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,# 08-0f +eItsMe,eItsMe,eError,eError,eError, 4,eError,eError,# 10-17 +eError,eError,eError,eError, 5,eError,eError,eError,# 18-1f +eError,eError,eError,eItsMe,eStart,eStart,eStart,eStart,# 20-27 ) ISO2022KRCharLenTable = (0, 0, 0, 0, 0, 0) @@ -238,3 +238,5 @@ 'stateTable': ISO2022KR_st, 'charLenTable': ISO2022KRCharLenTable, 'name': "ISO-2022-KR"} + +# flake8: noqa diff --git a/libs/chardet/eucjpprober.py b/libs/chardet/eucjpprober.py index 46a8b38b77..8e64fdcc26 100755 --- a/libs/chardet/eucjpprober.py +++ b/libs/chardet/eucjpprober.py @@ -13,25 +13,26 @@ # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. -# +# # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. -# +# # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### -import constants, sys -from constants import eStart, eError, eItsMe -from mbcharsetprober import MultiByteCharSetProber -from codingstatemachine import CodingStateMachine -from chardistribution import EUCJPDistributionAnalysis -from jpcntx import EUCJPContextAnalysis -from mbcssm import EUCJPSMModel +import sys +from . import constants +from .mbcharsetprober import MultiByteCharSetProber +from .codingstatemachine import CodingStateMachine +from .chardistribution import EUCJPDistributionAnalysis +from .jpcntx import EUCJPContextAnalysis +from .mbcssm import EUCJPSMModel + class EUCJPProber(MultiByteCharSetProber): def __init__(self): @@ -44,37 +45,41 @@ def __init__(self): def reset(self): MultiByteCharSetProber.reset(self) self._mContextAnalyzer.reset() - + def get_charset_name(self): return "EUC-JP" def feed(self, aBuf): aLen = len(aBuf) for i in range(0, aLen): + # PY3K: aBuf is a byte array, so aBuf[i] is an int, not a byte codingState = self._mCodingSM.next_state(aBuf[i]) - if codingState == eError: + if codingState == constants.eError: if constants._debug: - sys.stderr.write(self.get_charset_name() + ' prober hit error at byte ' + str(i) + '\n') + sys.stderr.write(self.get_charset_name() + + ' prober hit error at byte ' + str(i) + + '\n') self._mState = constants.eNotMe break - elif codingState == eItsMe: + elif codingState == constants.eItsMe: self._mState = constants.eFoundIt break - elif codingState == eStart: + elif codingState == constants.eStart: charLen = self._mCodingSM.get_current_charlen() if i == 0: self._mLastChar[1] = aBuf[0] self._mContextAnalyzer.feed(self._mLastChar, charLen) self._mDistributionAnalyzer.feed(self._mLastChar, charLen) else: - self._mContextAnalyzer.feed(aBuf[i-1:i+1], charLen) - self._mDistributionAnalyzer.feed(aBuf[i-1:i+1], charLen) - + self._mContextAnalyzer.feed(aBuf[i - 1:i + 1], charLen) + self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1], + charLen) + self._mLastChar[0] = aBuf[aLen - 1] - + if self.get_state() == constants.eDetecting: - if self._mContextAnalyzer.got_enough_data() and \ - (self.get_confidence() > constants.SHORTCUT_THRESHOLD): + if (self._mContextAnalyzer.got_enough_data() and + (self.get_confidence() > constants.SHORTCUT_THRESHOLD)): self._mState = constants.eFoundIt return self.get_state() diff --git a/libs/chardet/euckrfreq.py b/libs/chardet/euckrfreq.py index 1463fa1d85..a179e4c21c 100755 --- a/libs/chardet/euckrfreq.py +++ b/libs/chardet/euckrfreq.py @@ -592,3 +592,5 @@ 8704,8705,8706,8707,8708,8709,8710,8711,8712,8713,8714,8715,8716,8717,8718,8719, 8720,8721,8722,8723,8724,8725,8726,8727,8728,8729,8730,8731,8732,8733,8734,8735, 8736,8737,8738,8739,8740,8741) + +# flake8: noqa diff --git a/libs/chardet/euckrprober.py b/libs/chardet/euckrprober.py index bd697ebf35..5982a46b60 100755 --- a/libs/chardet/euckrprober.py +++ b/libs/chardet/euckrprober.py @@ -13,22 +13,23 @@ # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. -# +# # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. -# +# # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### -from mbcharsetprober import MultiByteCharSetProber -from codingstatemachine import CodingStateMachine -from chardistribution import EUCKRDistributionAnalysis -from mbcssm import EUCKRSMModel +from .mbcharsetprober import MultiByteCharSetProber +from .codingstatemachine import CodingStateMachine +from .chardistribution import EUCKRDistributionAnalysis +from .mbcssm import EUCKRSMModel + class EUCKRProber(MultiByteCharSetProber): def __init__(self): diff --git a/libs/chardet/euctwfreq.py b/libs/chardet/euctwfreq.py index c057209505..576e7504dc 100755 --- a/libs/chardet/euctwfreq.py +++ b/libs/chardet/euctwfreq.py @@ -13,12 +13,12 @@ # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. -# +# # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. -# +# # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA @@ -26,8 +26,8 @@ ######################### END LICENSE BLOCK ######################### # EUCTW frequency table -# Converted from big5 work -# by Taiwan's Mandarin Promotion Council +# Converted from big5 work +# by Taiwan's Mandarin Promotion Council # # 128 --> 0.42261 @@ -38,15 +38,15 @@ # # Idea Distribution Ratio = 0.74851/(1-0.74851) =2.98 # Random Distribution Ration = 512/(5401-512)=0.105 -# +# # Typical Distribution Ratio about 25% of Ideal one, still much higher than RDR EUCTW_TYPICAL_DISTRIBUTION_RATIO = 0.75 -# Char to FreqOrder table , +# Char to FreqOrder table , EUCTW_TABLE_SIZE = 8102 -EUCTWCharToFreqOrder = ( \ +EUCTWCharToFreqOrder = ( 1,1800,1506, 255,1431, 198, 9, 82, 6,7310, 177, 202,3615,1256,2808, 110, # 2742 3735, 33,3241, 261, 76, 44,2113, 16,2931,2184,1176, 659,3868, 26,3404,2643, # 2758 1198,3869,3313,4060, 410,2211, 302, 590, 361,1963, 8, 204, 58,4296,7311,1931, # 2774 @@ -424,3 +424,5 @@ 8694,8695,8696,8697,8698,8699,8700,8701,8702,8703,8704,8705,8706,8707,8708,8709, # 8710 8710,8711,8712,8713,8714,8715,8716,8717,8718,8719,8720,8721,8722,8723,8724,8725, # 8726 8726,8727,8728,8729,8730,8731,8732,8733,8734,8735,8736,8737,8738,8739,8740,8741) # 8742 + +# flake8: noqa diff --git a/libs/chardet/euctwprober.py b/libs/chardet/euctwprober.py index b073f134fd..fe652fe37a 100755 --- a/libs/chardet/euctwprober.py +++ b/libs/chardet/euctwprober.py @@ -25,10 +25,10 @@ # 02110-1301 USA ######################### END LICENSE BLOCK ######################### -from mbcharsetprober import MultiByteCharSetProber -from codingstatemachine import CodingStateMachine -from chardistribution import EUCTWDistributionAnalysis -from mbcssm import EUCTWSMModel +from .mbcharsetprober import MultiByteCharSetProber +from .codingstatemachine import CodingStateMachine +from .chardistribution import EUCTWDistributionAnalysis +from .mbcssm import EUCTWSMModel class EUCTWProber(MultiByteCharSetProber): def __init__(self): diff --git a/libs/chardet/gb2312freq.py b/libs/chardet/gb2312freq.py index 7a4d5a1b34..1238f510fc 100755 --- a/libs/chardet/gb2312freq.py +++ b/libs/chardet/gb2312freq.py @@ -13,12 +13,12 @@ # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. -# +# # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. -# +# # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA @@ -36,14 +36,14 @@ # # Ideal Distribution Ratio = 0.79135/(1-0.79135) = 3.79 # Random Distribution Ration = 512 / (3755 - 512) = 0.157 -# +# # Typical Distribution Ratio about 25% of Ideal one, still much higher that RDR GB2312_TYPICAL_DISTRIBUTION_RATIO = 0.9 GB2312_TABLE_SIZE = 3760 -GB2312CharToFreqOrder = ( \ +GB2312CharToFreqOrder = ( 1671, 749,1443,2364,3924,3807,2330,3921,1704,3463,2691,1511,1515, 572,3191,2205, 2361, 224,2558, 479,1711, 963,3162, 440,4060,1905,2966,2947,3580,2647,3961,3842, 2204, 869,4207, 970,2678,5626,2944,2956,1479,4048, 514,3595, 588,1346,2820,3409, @@ -469,3 +469,4 @@ 5867,5507,6273,4206,6274,4789,6098,6764,3619,3646,3833,3804,2394,3788,4936,3978, 4866,4899,6099,6100,5559,6478,6765,3599,5868,6101,5869,5870,6275,6766,4527,6767) +# flake8: noqa diff --git a/libs/chardet/gb2312prober.py b/libs/chardet/gb2312prober.py index 91eb3925a4..0325a2d861 100755 --- a/libs/chardet/gb2312prober.py +++ b/libs/chardet/gb2312prober.py @@ -25,10 +25,10 @@ # 02110-1301 USA ######################### END LICENSE BLOCK ######################### -from mbcharsetprober import MultiByteCharSetProber -from codingstatemachine import CodingStateMachine -from chardistribution import GB2312DistributionAnalysis -from mbcssm import GB2312SMModel +from .mbcharsetprober import MultiByteCharSetProber +from .codingstatemachine import CodingStateMachine +from .chardistribution import GB2312DistributionAnalysis +from .mbcssm import GB2312SMModel class GB2312Prober(MultiByteCharSetProber): def __init__(self): diff --git a/libs/chardet/hebrewprober.py b/libs/chardet/hebrewprober.py index a2b1eaa992..ba225c5ef4 100755 --- a/libs/chardet/hebrewprober.py +++ b/libs/chardet/hebrewprober.py @@ -13,20 +13,21 @@ # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. -# +# # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. -# +# # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### -from charsetprober import CharSetProber -import constants +from .charsetprober import CharSetProber +from .constants import eNotMe, eDetecting +from .compat import wrap_ord # This prober doesn't actually recognize a language or a charset. # It is a helper prober for the use of the Hebrew model probers @@ -35,40 +36,40 @@ # # Four main charsets exist in Hebrew: # "ISO-8859-8" - Visual Hebrew -# "windows-1255" - Logical Hebrew +# "windows-1255" - Logical Hebrew # "ISO-8859-8-I" - Logical Hebrew # "x-mac-hebrew" - ?? Logical Hebrew ?? # # Both "ISO" charsets use a completely identical set of code points, whereas -# "windows-1255" and "x-mac-hebrew" are two different proper supersets of +# "windows-1255" and "x-mac-hebrew" are two different proper supersets of # these code points. windows-1255 defines additional characters in the range -# 0x80-0x9F as some misc punctuation marks as well as some Hebrew-specific +# 0x80-0x9F as some misc punctuation marks as well as some Hebrew-specific # diacritics and additional 'Yiddish' ligature letters in the range 0xc0-0xd6. -# x-mac-hebrew defines similar additional code points but with a different +# x-mac-hebrew defines similar additional code points but with a different # mapping. # -# As far as an average Hebrew text with no diacritics is concerned, all four -# charsets are identical with respect to code points. Meaning that for the -# main Hebrew alphabet, all four map the same values to all 27 Hebrew letters +# As far as an average Hebrew text with no diacritics is concerned, all four +# charsets are identical with respect to code points. Meaning that for the +# main Hebrew alphabet, all four map the same values to all 27 Hebrew letters # (including final letters). # # The dominant difference between these charsets is their directionality. # "Visual" directionality means that the text is ordered as if the renderer is -# not aware of a BIDI rendering algorithm. The renderer sees the text and -# draws it from left to right. The text itself when ordered naturally is read +# not aware of a BIDI rendering algorithm. The renderer sees the text and +# draws it from left to right. The text itself when ordered naturally is read # backwards. A buffer of Visual Hebrew generally looks like so: # "[last word of first line spelled backwards] [whole line ordered backwards -# and spelled backwards] [first word of first line spelled backwards] +# and spelled backwards] [first word of first line spelled backwards] # [end of line] [last word of second line] ... etc' " # adding punctuation marks, numbers and English text to visual text is # naturally also "visual" and from left to right. -# +# # "Logical" directionality means the text is ordered "naturally" according to -# the order it is read. It is the responsibility of the renderer to display -# the text from right to left. A BIDI algorithm is used to place general +# the order it is read. It is the responsibility of the renderer to display +# the text from right to left. A BIDI algorithm is used to place general # punctuation marks, numbers and English text in the text. # -# Texts in x-mac-hebrew are almost impossible to find on the Internet. From +# Texts in x-mac-hebrew are almost impossible to find on the Internet. From # what little evidence I could find, it seems that its general directionality # is Logical. # @@ -76,17 +77,17 @@ # charsets: # Visual Hebrew - "ISO-8859-8" - backwards text - Words and sentences are # backwards while line order is natural. For charset recognition purposes -# the line order is unimportant (In fact, for this implementation, even +# the line order is unimportant (In fact, for this implementation, even # word order is unimportant). # Logical Hebrew - "windows-1255" - normal, naturally ordered text. # -# "ISO-8859-8-I" is a subset of windows-1255 and doesn't need to be +# "ISO-8859-8-I" is a subset of windows-1255 and doesn't need to be # specifically identified. # "x-mac-hebrew" is also identified as windows-1255. A text in x-mac-hebrew # that contain special punctuation marks or diacritics is displayed with # some unconverted characters showing as question marks. This problem might # be corrected using another model prober for x-mac-hebrew. Due to the fact -# that x-mac-hebrew texts are so rare, writing another model prober isn't +# that x-mac-hebrew texts are so rare, writing another model prober isn't # worth the effort and performance hit. # #### The Prober #### @@ -126,28 +127,31 @@ # charset identified, either "windows-1255" or "ISO-8859-8". # windows-1255 / ISO-8859-8 code points of interest -FINAL_KAF = '\xea' -NORMAL_KAF = '\xeb' -FINAL_MEM = '\xed' -NORMAL_MEM = '\xee' -FINAL_NUN = '\xef' -NORMAL_NUN = '\xf0' -FINAL_PE = '\xf3' -NORMAL_PE = '\xf4' -FINAL_TSADI = '\xf5' -NORMAL_TSADI = '\xf6' +FINAL_KAF = 0xea +NORMAL_KAF = 0xeb +FINAL_MEM = 0xed +NORMAL_MEM = 0xee +FINAL_NUN = 0xef +NORMAL_NUN = 0xf0 +FINAL_PE = 0xf3 +NORMAL_PE = 0xf4 +FINAL_TSADI = 0xf5 +NORMAL_TSADI = 0xf6 # Minimum Visual vs Logical final letter score difference. -# If the difference is below this, don't rely solely on the final letter score distance. +# If the difference is below this, don't rely solely on the final letter score +# distance. MIN_FINAL_CHAR_DISTANCE = 5 # Minimum Visual vs Logical model score difference. -# If the difference is below this, don't rely at all on the model score distance. +# If the difference is below this, don't rely at all on the model score +# distance. MIN_MODEL_DISTANCE = 0.01 VISUAL_HEBREW_NAME = "ISO-8859-8" LOGICAL_HEBREW_NAME = "windows-1255" + class HebrewProber(CharSetProber): def __init__(self): CharSetProber.__init__(self) @@ -159,84 +163,91 @@ def reset(self): self._mFinalCharLogicalScore = 0 self._mFinalCharVisualScore = 0 # The two last characters seen in the previous buffer, - # mPrev and mBeforePrev are initialized to space in order to simulate a word - # delimiter at the beginning of the data + # mPrev and mBeforePrev are initialized to space in order to simulate + # a word delimiter at the beginning of the data self._mPrev = ' ' self._mBeforePrev = ' ' # These probers are owned by the group prober. - + def set_model_probers(self, logicalProber, visualProber): self._mLogicalProber = logicalProber self._mVisualProber = visualProber def is_final(self, c): - return c in [FINAL_KAF, FINAL_MEM, FINAL_NUN, FINAL_PE, FINAL_TSADI] + return wrap_ord(c) in [FINAL_KAF, FINAL_MEM, FINAL_NUN, FINAL_PE, + FINAL_TSADI] def is_non_final(self, c): - # The normal Tsadi is not a good Non-Final letter due to words like - # 'lechotet' (to chat) containing an apostrophe after the tsadi. This - # apostrophe is converted to a space in FilterWithoutEnglishLetters causing - # the Non-Final tsadi to appear at an end of a word even though this is not - # the case in the original text. - # The letters Pe and Kaf rarely display a related behavior of not being a - # good Non-Final letter. Words like 'Pop', 'Winamp' and 'Mubarak' for - # example legally end with a Non-Final Pe or Kaf. However, the benefit of - # these letters as Non-Final letters outweighs the damage since these words - # are quite rare. - return c in [NORMAL_KAF, NORMAL_MEM, NORMAL_NUN, NORMAL_PE] - + # The normal Tsadi is not a good Non-Final letter due to words like + # 'lechotet' (to chat) containing an apostrophe after the tsadi. This + # apostrophe is converted to a space in FilterWithoutEnglishLetters + # causing the Non-Final tsadi to appear at an end of a word even + # though this is not the case in the original text. + # The letters Pe and Kaf rarely display a related behavior of not being + # a good Non-Final letter. Words like 'Pop', 'Winamp' and 'Mubarak' + # for example legally end with a Non-Final Pe or Kaf. However, the + # benefit of these letters as Non-Final letters outweighs the damage + # since these words are quite rare. + return wrap_ord(c) in [NORMAL_KAF, NORMAL_MEM, NORMAL_NUN, NORMAL_PE] + def feed(self, aBuf): # Final letter analysis for logical-visual decision. - # Look for evidence that the received buffer is either logical Hebrew or - # visual Hebrew. + # Look for evidence that the received buffer is either logical Hebrew + # or visual Hebrew. # The following cases are checked: - # 1) A word longer than 1 letter, ending with a final letter. This is an - # indication that the text is laid out "naturally" since the final letter - # really appears at the end. +1 for logical score. - # 2) A word longer than 1 letter, ending with a Non-Final letter. In normal - # Hebrew, words ending with Kaf, Mem, Nun, Pe or Tsadi, should not end with - # the Non-Final form of that letter. Exceptions to this rule are mentioned - # above in isNonFinal(). This is an indication that the text is laid out - # backwards. +1 for visual score - # 3) A word longer than 1 letter, starting with a final letter. Final letters - # should not appear at the beginning of a word. This is an indication that - # the text is laid out backwards. +1 for visual score. - # - # The visual score and logical score are accumulated throughout the text and - # are finally checked against each other in GetCharSetName(). - # No checking for final letters in the middle of words is done since that case - # is not an indication for either Logical or Visual text. - # - # We automatically filter out all 7-bit characters (replace them with spaces) - # so the word boundary detection works properly. [MAP] + # 1) A word longer than 1 letter, ending with a final letter. This is + # an indication that the text is laid out "naturally" since the + # final letter really appears at the end. +1 for logical score. + # 2) A word longer than 1 letter, ending with a Non-Final letter. In + # normal Hebrew, words ending with Kaf, Mem, Nun, Pe or Tsadi, + # should not end with the Non-Final form of that letter. Exceptions + # to this rule are mentioned above in isNonFinal(). This is an + # indication that the text is laid out backwards. +1 for visual + # score + # 3) A word longer than 1 letter, starting with a final letter. Final + # letters should not appear at the beginning of a word. This is an + # indication that the text is laid out backwards. +1 for visual + # score. + # + # The visual score and logical score are accumulated throughout the + # text and are finally checked against each other in GetCharSetName(). + # No checking for final letters in the middle of words is done since + # that case is not an indication for either Logical or Visual text. + # + # We automatically filter out all 7-bit characters (replace them with + # spaces) so the word boundary detection works properly. [MAP] - if self.get_state() == constants.eNotMe: + if self.get_state() == eNotMe: # Both model probers say it's not them. No reason to continue. - return constants.eNotMe + return eNotMe aBuf = self.filter_high_bit_only(aBuf) - + for cur in aBuf: if cur == ' ': # We stand on a space - a word just ended if self._mBeforePrev != ' ': - # next-to-last char was not a space so self._mPrev is not a 1 letter word + # next-to-last char was not a space so self._mPrev is not a + # 1 letter word if self.is_final(self._mPrev): # case (1) [-2:not space][-1:final letter][cur:space] self._mFinalCharLogicalScore += 1 elif self.is_non_final(self._mPrev): - # case (2) [-2:not space][-1:Non-Final letter][cur:space] + # case (2) [-2:not space][-1:Non-Final letter][ + # cur:space] self._mFinalCharVisualScore += 1 else: # Not standing on a space - if (self._mBeforePrev == ' ') and (self.is_final(self._mPrev)) and (cur != ' '): + if ((self._mBeforePrev == ' ') and + (self.is_final(self._mPrev)) and (cur != ' ')): # case (3) [-2:space][-1:final letter][cur:not space] self._mFinalCharVisualScore += 1 self._mBeforePrev = self._mPrev self._mPrev = cur - # Forever detecting, till the end or until both model probers return eNotMe (handled above) - return constants.eDetecting + # Forever detecting, till the end or until both model probers return + # eNotMe (handled above) + return eDetecting def get_charset_name(self): # Make the decision: is it Logical or Visual? @@ -248,22 +259,25 @@ def get_charset_name(self): return VISUAL_HEBREW_NAME # It's not dominant enough, try to rely on the model scores instead. - modelsub = self._mLogicalProber.get_confidence() - self._mVisualProber.get_confidence() + modelsub = (self._mLogicalProber.get_confidence() + - self._mVisualProber.get_confidence()) if modelsub > MIN_MODEL_DISTANCE: return LOGICAL_HEBREW_NAME if modelsub < -MIN_MODEL_DISTANCE: return VISUAL_HEBREW_NAME - # Still no good, back to final letter distance, maybe it'll save the day. + # Still no good, back to final letter distance, maybe it'll save the + # day. if finalsub < 0.0: return VISUAL_HEBREW_NAME - # (finalsub > 0 - Logical) or (don't know what to do) default to Logical. + # (finalsub > 0 - Logical) or (don't know what to do) default to + # Logical. return LOGICAL_HEBREW_NAME def get_state(self): # Remain active as long as any of the model probers are active. - if (self._mLogicalProber.get_state() == constants.eNotMe) and \ - (self._mVisualProber.get_state() == constants.eNotMe): - return constants.eNotMe - return constants.eDetecting + if (self._mLogicalProber.get_state() == eNotMe) and \ + (self._mVisualProber.get_state() == eNotMe): + return eNotMe + return eDetecting diff --git a/libs/chardet/jisfreq.py b/libs/chardet/jisfreq.py index 5fe4a5c3fe..064345b086 100755 --- a/libs/chardet/jisfreq.py +++ b/libs/chardet/jisfreq.py @@ -13,12 +13,12 @@ # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. -# +# # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. -# +# # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA @@ -28,7 +28,7 @@ # Sampling from about 20M text materials include literature and computer technology # # Japanese frequency table, applied to both S-JIS and EUC-JP -# They are sorted in order. +# They are sorted in order. # 128 --> 0.77094 # 256 --> 0.85710 @@ -38,15 +38,15 @@ # # Ideal Distribution Ratio = 0.92635 / (1-0.92635) = 12.58 # Random Distribution Ration = 512 / (2965+62+83+86-512) = 0.191 -# -# Typical Distribution Ratio, 25% of IDR +# +# Typical Distribution Ratio, 25% of IDR JIS_TYPICAL_DISTRIBUTION_RATIO = 3.0 -# Char to FreqOrder table , +# Char to FreqOrder table , JIS_TABLE_SIZE = 4368 -JISCharToFreqOrder = ( \ +JISCharToFreqOrder = ( 40, 1, 6, 182, 152, 180, 295,2127, 285, 381,3295,4304,3068,4606,3165,3510, # 16 3511,1822,2785,4607,1193,2226,5070,4608, 171,2996,1247, 18, 179,5071, 856,1661, # 32 1262,5072, 619, 127,3431,3512,3230,1899,1700, 232, 228,1294,1298, 284, 283,2041, # 48 @@ -565,3 +565,5 @@ 8224,8225,8226,8227,8228,8229,8230,8231,8232,8233,8234,8235,8236,8237,8238,8239, # 8240 8240,8241,8242,8243,8244,8245,8246,8247,8248,8249,8250,8251,8252,8253,8254,8255, # 8256 8256,8257,8258,8259,8260,8261,8262,8263,8264,8265,8266,8267,8268,8269,8270,8271) # 8272 + +# flake8: noqa diff --git a/libs/chardet/jpcntx.py b/libs/chardet/jpcntx.py index 93db4a9cba..59aeb6a878 100755 --- a/libs/chardet/jpcntx.py +++ b/libs/chardet/jpcntx.py @@ -13,19 +13,19 @@ # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. -# +# # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. -# +# # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### -import constants +from .compat import wrap_ord NUM_OF_CATEGORY = 6 DONT_KNOW = -1 @@ -34,7 +34,7 @@ MINIMUM_DATA_THRESHOLD = 4 # This is hiragana 2-char sequence table, the number in each cell represents its frequency category -jp2CharContext = ( \ +jp2CharContext = ( (0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1), (2,4,0,4,0,3,0,4,0,3,4,4,4,2,4,3,3,4,3,2,3,3,4,2,3,3,3,2,4,1,4,3,3,1,5,4,3,4,3,4,3,5,3,0,3,5,4,2,0,3,1,0,3,3,0,3,3,0,1,1,0,4,3,0,3,3,0,4,0,2,0,3,5,5,5,5,4,0,4,1,0,3,4), (0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2), @@ -123,26 +123,33 @@ class JapaneseContextAnalysis: def __init__(self): self.reset() - + def reset(self): - self._mTotalRel = 0 # total sequence received - self._mRelSample = [0] * NUM_OF_CATEGORY # category counters, each interger counts sequence in its category - self._mNeedToSkipCharNum = 0 # if last byte in current buffer is not the last byte of a character, we need to know how many bytes to skip in next buffer - self._mLastCharOrder = -1 # The order of previous char - self._mDone = constants.False # If this flag is set to constants.True, detection is done and conclusion has been made + self._mTotalRel = 0 # total sequence received + # category counters, each interger counts sequence in its category + self._mRelSample = [0] * NUM_OF_CATEGORY + # if last byte in current buffer is not the last byte of a character, + # we need to know how many bytes to skip in next buffer + self._mNeedToSkipCharNum = 0 + self._mLastCharOrder = -1 # The order of previous char + # If this flag is set to True, detection is done and conclusion has + # been made + self._mDone = False def feed(self, aBuf, aLen): - if self._mDone: return - + if self._mDone: + return + # The buffer we got is byte oriented, and a character may span in more than one - # buffers. In case the last one or two byte in last buffer is not complete, we - # record how many byte needed to complete that character and skip these bytes here. - # We can choose to record those bytes as well and analyse the character once it - # is complete, but since a character will not make much difference, by simply skipping + # buffers. In case the last one or two byte in last buffer is not + # complete, we record how many byte needed to complete that character + # and skip these bytes here. We can choose to record those bytes as + # well and analyse the character once it is complete, but since a + # character will not make much difference, by simply skipping # this character will simply our logic and improve performance. i = self._mNeedToSkipCharNum while i < aLen: - order, charLen = self.get_order(aBuf[i:i+2]) + order, charLen = self.get_order(aBuf[i:i + 2]) i += charLen if i > aLen: self._mNeedToSkipCharNum = i - aLen @@ -151,14 +158,14 @@ def feed(self, aBuf, aLen): if (order != -1) and (self._mLastCharOrder != -1): self._mTotalRel += 1 if self._mTotalRel > MAX_REL_THRESHOLD: - self._mDone = constants.True + self._mDone = True break self._mRelSample[jp2CharContext[self._mLastCharOrder][order]] += 1 self._mLastCharOrder = order def got_enough_data(self): return self._mTotalRel > ENOUGH_REL_THRESHOLD - + def get_confidence(self): # This is just one way to calculate confidence. It works well for me. if self._mTotalRel > MINIMUM_DATA_THRESHOLD: @@ -166,45 +173,55 @@ def get_confidence(self): else: return DONT_KNOW - def get_order(self, aStr): + def get_order(self, aBuf): return -1, 1 - + class SJISContextAnalysis(JapaneseContextAnalysis): - def get_order(self, aStr): - if not aStr: return -1, 1 + def __init__(self): + self.charset_name = "SHIFT_JIS" + + def get_charset_name(self): + return self.charset_name + + def get_order(self, aBuf): + if not aBuf: + return -1, 1 # find out current char's byte length - if ((aStr[0] >= '\x81') and (aStr[0] <= '\x9F')) or \ - ((aStr[0] >= '\xE0') and (aStr[0] <= '\xFC')): + first_char = wrap_ord(aBuf[0]) + if ((0x81 <= first_char <= 0x9F) or (0xE0 <= first_char <= 0xFC)): charLen = 2 + if (first_char == 0x87) or (0xFA <= first_char <= 0xFC): + self.charset_name = "CP932" else: charLen = 1 # return its order if it is hiragana - if len(aStr) > 1: - if (aStr[0] == '\202') and \ - (aStr[1] >= '\x9F') and \ - (aStr[1] <= '\xF1'): - return ord(aStr[1]) - 0x9F, charLen + if len(aBuf) > 1: + second_char = wrap_ord(aBuf[1]) + if (first_char == 202) and (0x9F <= second_char <= 0xF1): + return second_char - 0x9F, charLen return -1, charLen class EUCJPContextAnalysis(JapaneseContextAnalysis): - def get_order(self, aStr): - if not aStr: return -1, 1 + def get_order(self, aBuf): + if not aBuf: + return -1, 1 # find out current char's byte length - if (aStr[0] == '\x8E') or \ - ((aStr[0] >= '\xA1') and (aStr[0] <= '\xFE')): + first_char = wrap_ord(aBuf[0]) + if (first_char == 0x8E) or (0xA1 <= first_char <= 0xFE): charLen = 2 - elif aStr[0] == '\x8F': + elif first_char == 0x8F: charLen = 3 else: charLen = 1 # return its order if it is hiragana - if len(aStr) > 1: - if (aStr[0] == '\xA4') and \ - (aStr[1] >= '\xA1') and \ - (aStr[1] <= '\xF3'): - return ord(aStr[1]) - 0xA1, charLen + if len(aBuf) > 1: + second_char = wrap_ord(aBuf[1]) + if (first_char == 0xA4) and (0xA1 <= second_char <= 0xF3): + return second_char - 0xA1, charLen return -1, charLen + +# flake8: noqa diff --git a/libs/chardet/langbulgarianmodel.py b/libs/chardet/langbulgarianmodel.py index bf5641e7bd..e5788fc64a 100755 --- a/libs/chardet/langbulgarianmodel.py +++ b/libs/chardet/langbulgarianmodel.py @@ -13,30 +13,28 @@ # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. -# +# # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. -# +# # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### -import constants - # 255: Control characters that usually does not exist in any text # 254: Carriage/Return # 253: symbol (punctuation) that does not belong to word # 252: 0 - 9 # Character Mapping Table: -# this table is modified base on win1251BulgarianCharToOrderMap, so +# this table is modified base on win1251BulgarianCharToOrderMap, so # only number <64 is sure valid -Latin5_BulgarianCharToOrderMap = ( \ +Latin5_BulgarianCharToOrderMap = ( 255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 @@ -55,7 +53,7 @@ 62,242,243,244, 58,245, 98,246,247,248,249,250,251, 91,252,253, # f0 ) -win1251BulgarianCharToOrderMap = ( \ +win1251BulgarianCharToOrderMap = ( 255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 @@ -74,13 +72,13 @@ 7, 8, 5, 19, 29, 25, 22, 21, 27, 24, 17, 75, 52,253, 42, 16, # f0 ) -# Model Table: +# Model Table: # total sequences: 100% # first 512 sequences: 96.9392% # first 1024 sequences:3.0618% # rest sequences: 0.2992% -# negative sequences: 0.0020% -BulgarianLangModel = ( \ +# negative sequences: 0.0020% +BulgarianLangModel = ( 0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,2,3,3,3,3,3, 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,0,3,3,3,2,2,3,2,2,1,2,2, 3,1,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,0,3,3,3,3,3,3,3,3,3,3,0,3,0,1, @@ -211,18 +209,21 @@ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1, ) -Latin5BulgarianModel = { \ +Latin5BulgarianModel = { 'charToOrderMap': Latin5_BulgarianCharToOrderMap, 'precedenceMatrix': BulgarianLangModel, 'mTypicalPositiveRatio': 0.969392, - 'keepEnglishLetter': constants.False, + 'keepEnglishLetter': False, 'charsetName': "ISO-8859-5" } -Win1251BulgarianModel = { \ +Win1251BulgarianModel = { 'charToOrderMap': win1251BulgarianCharToOrderMap, 'precedenceMatrix': BulgarianLangModel, 'mTypicalPositiveRatio': 0.969392, - 'keepEnglishLetter': constants.False, + 'keepEnglishLetter': False, 'charsetName': "windows-1251" } + + +# flake8: noqa diff --git a/libs/chardet/langcyrillicmodel.py b/libs/chardet/langcyrillicmodel.py index e604cc73d5..a86f54bd54 100755 --- a/libs/chardet/langcyrillicmodel.py +++ b/libs/chardet/langcyrillicmodel.py @@ -13,23 +13,21 @@ # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. -# +# # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. -# +# # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### -import constants - # KOI8-R language model # Character Mapping Table: -KOI8R_CharToOrderMap = ( \ +KOI8R_CharToOrderMap = ( 255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 @@ -48,7 +46,7 @@ 35, 43, 45, 32, 40, 52, 56, 33, 61, 62, 51, 57, 47, 63, 50, 70, # f0 ) -win1251_CharToOrderMap = ( \ +win1251_CharToOrderMap = ( 255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 @@ -67,7 +65,7 @@ 9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16, ) -latin5_CharToOrderMap = ( \ +latin5_CharToOrderMap = ( 255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 @@ -86,7 +84,7 @@ 239, 68,240,241,242,243,244,245,246,247,248,249,250,251,252,255, ) -macCyrillic_CharToOrderMap = ( \ +macCyrillic_CharToOrderMap = ( 255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 @@ -105,7 +103,7 @@ 9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27,255, ) -IBM855_CharToOrderMap = ( \ +IBM855_CharToOrderMap = ( 255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 @@ -124,7 +122,7 @@ 250, 18, 62, 20, 51, 25, 57, 30, 47, 29, 63, 22, 50,251,252,255, ) -IBM866_CharToOrderMap = ( \ +IBM866_CharToOrderMap = ( 255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 @@ -143,13 +141,13 @@ 239, 68,240,241,242,243,244,245,246,247,248,249,250,251,252,255, ) -# Model Table: +# Model Table: # total sequences: 100% # first 512 sequences: 97.6601% # first 1024 sequences: 2.3389% # rest sequences: 0.1237% -# negative sequences: 0.0009% -RussianLangModel = ( \ +# negative sequences: 0.0009% +RussianLangModel = ( 0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,1,3,3,3,3,1,3,3,3,2,3,2,3,3, 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,0,3,2,2,2,2,2,0,0,2, 3,3,3,2,3,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,3,2,3,2,0, @@ -280,50 +278,52 @@ 0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0, ) -Koi8rModel = { \ +Koi8rModel = { 'charToOrderMap': KOI8R_CharToOrderMap, 'precedenceMatrix': RussianLangModel, 'mTypicalPositiveRatio': 0.976601, - 'keepEnglishLetter': constants.False, + 'keepEnglishLetter': False, 'charsetName': "KOI8-R" } -Win1251CyrillicModel = { \ +Win1251CyrillicModel = { 'charToOrderMap': win1251_CharToOrderMap, 'precedenceMatrix': RussianLangModel, 'mTypicalPositiveRatio': 0.976601, - 'keepEnglishLetter': constants.False, + 'keepEnglishLetter': False, 'charsetName': "windows-1251" } -Latin5CyrillicModel = { \ +Latin5CyrillicModel = { 'charToOrderMap': latin5_CharToOrderMap, 'precedenceMatrix': RussianLangModel, 'mTypicalPositiveRatio': 0.976601, - 'keepEnglishLetter': constants.False, + 'keepEnglishLetter': False, 'charsetName': "ISO-8859-5" } -MacCyrillicModel = { \ +MacCyrillicModel = { 'charToOrderMap': macCyrillic_CharToOrderMap, 'precedenceMatrix': RussianLangModel, 'mTypicalPositiveRatio': 0.976601, - 'keepEnglishLetter': constants.False, + 'keepEnglishLetter': False, 'charsetName': "MacCyrillic" }; -Ibm866Model = { \ +Ibm866Model = { 'charToOrderMap': IBM866_CharToOrderMap, 'precedenceMatrix': RussianLangModel, 'mTypicalPositiveRatio': 0.976601, - 'keepEnglishLetter': constants.False, + 'keepEnglishLetter': False, 'charsetName': "IBM866" } -Ibm855Model = { \ +Ibm855Model = { 'charToOrderMap': IBM855_CharToOrderMap, 'precedenceMatrix': RussianLangModel, 'mTypicalPositiveRatio': 0.976601, - 'keepEnglishLetter': constants.False, + 'keepEnglishLetter': False, 'charsetName': "IBM855" } + +# flake8: noqa diff --git a/libs/chardet/langgreekmodel.py b/libs/chardet/langgreekmodel.py index ec6d49e800..ddb5837655 100755 --- a/libs/chardet/langgreekmodel.py +++ b/libs/chardet/langgreekmodel.py @@ -13,27 +13,25 @@ # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. -# +# # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. -# +# # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### -import constants - # 255: Control characters that usually does not exist in any text # 254: Carriage/Return # 253: symbol (punctuation) that does not belong to word # 252: 0 - 9 # Character Mapping Table: -Latin7_CharToOrderMap = ( \ +Latin7_CharToOrderMap = ( 255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 @@ -52,7 +50,7 @@ 9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0 ) -win1253_CharToOrderMap = ( \ +win1253_CharToOrderMap = ( 255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 @@ -71,13 +69,13 @@ 9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0 ) -# Model Table: +# Model Table: # total sequences: 100% # first 512 sequences: 98.2851% # first 1024 sequences:1.7001% # rest sequences: 0.0359% -# negative sequences: 0.0148% -GreekLangModel = ( \ +# negative sequences: 0.0148% +GreekLangModel = ( 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,3,2,2,3,3,3,3,3,3,3,3,1,3,3,3,0,2,2,3,3,0,3,0,3,2,0,3,3,3,0, @@ -208,18 +206,20 @@ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, ) -Latin7GreekModel = { \ +Latin7GreekModel = { 'charToOrderMap': Latin7_CharToOrderMap, 'precedenceMatrix': GreekLangModel, 'mTypicalPositiveRatio': 0.982851, - 'keepEnglishLetter': constants.False, + 'keepEnglishLetter': False, 'charsetName': "ISO-8859-7" } -Win1253GreekModel = { \ +Win1253GreekModel = { 'charToOrderMap': win1253_CharToOrderMap, 'precedenceMatrix': GreekLangModel, 'mTypicalPositiveRatio': 0.982851, - 'keepEnglishLetter': constants.False, + 'keepEnglishLetter': False, 'charsetName': "windows-1253" } + +# flake8: noqa diff --git a/libs/chardet/langhebrewmodel.py b/libs/chardet/langhebrewmodel.py index a8bcc65bf2..75f2bc7fe7 100755 --- a/libs/chardet/langhebrewmodel.py +++ b/libs/chardet/langhebrewmodel.py @@ -15,20 +15,18 @@ # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. -# +# # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. -# +# # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### -import constants - # 255: Control characters that usually does not exist in any text # 254: Carriage/Return # 253: symbol (punctuation) that does not belong to word @@ -36,7 +34,7 @@ # Windows-1255 language model # Character Mapping Table: -win1255_CharToOrderMap = ( \ +win1255_CharToOrderMap = ( 255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 @@ -55,13 +53,13 @@ 12, 19, 13, 26, 18, 27, 21, 17, 7, 10, 5,251,252,128, 96,253, ) -# Model Table: +# Model Table: # total sequences: 100% # first 512 sequences: 98.4004% # first 1024 sequences: 1.5981% # rest sequences: 0.087% -# negative sequences: 0.0015% -HebrewLangModel = ( \ +# negative sequences: 0.0015% +HebrewLangModel = ( 0,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,2,3,2,1,2,0,1,0,0, 3,0,3,1,0,0,1,3,2,0,1,1,2,0,2,2,2,1,1,1,1,2,1,1,1,2,0,0,2,2,0,1, 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2, @@ -192,10 +190,12 @@ 0,0,0,0,0,0,0,0,0,0,1,2,1,0,0,0,0,0,1,1,1,1,1,0,1,0,0,0,1,1,0,0, ) -Win1255HebrewModel = { \ +Win1255HebrewModel = { 'charToOrderMap': win1255_CharToOrderMap, 'precedenceMatrix': HebrewLangModel, 'mTypicalPositiveRatio': 0.984004, - 'keepEnglishLetter': constants.False, + 'keepEnglishLetter': False, 'charsetName': "windows-1255" } + +# flake8: noqa diff --git a/libs/chardet/langhungarianmodel.py b/libs/chardet/langhungarianmodel.py index d635f03c29..49d2f0fe75 100755 --- a/libs/chardet/langhungarianmodel.py +++ b/libs/chardet/langhungarianmodel.py @@ -13,27 +13,25 @@ # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. -# +# # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. -# +# # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### -import constants - # 255: Control characters that usually does not exist in any text # 254: Carriage/Return # 253: symbol (punctuation) that does not belong to word # 252: 0 - 9 # Character Mapping Table: -Latin2_HungarianCharToOrderMap = ( \ +Latin2_HungarianCharToOrderMap = ( 255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 @@ -52,7 +50,7 @@ 245,246,247, 25, 73, 42, 24,248,249,250, 31, 56, 29,251,252,253, ) -win1250HungarianCharToOrderMap = ( \ +win1250HungarianCharToOrderMap = ( 255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 @@ -71,13 +69,13 @@ 245,246,247, 25, 74, 42, 24,248,249,250, 31, 56, 29,251,252,253, ) -# Model Table: +# Model Table: # total sequences: 100% # first 512 sequences: 94.7368% # first 1024 sequences:5.2623% # rest sequences: 0.8894% -# negative sequences: 0.0009% -HungarianLangModel = ( \ +# negative sequences: 0.0009% +HungarianLangModel = ( 0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3, 3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,2,2,3,3,1,1,2,2,2,2,2,1,2, 3,2,2,3,3,3,3,3,2,3,3,3,3,3,3,1,2,3,3,3,3,2,3,3,1,1,3,3,0,1,1,1, @@ -208,18 +206,20 @@ 0,1,1,1,1,1,1,0,1,1,0,1,0,1,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0, ) -Latin2HungarianModel = { \ +Latin2HungarianModel = { 'charToOrderMap': Latin2_HungarianCharToOrderMap, 'precedenceMatrix': HungarianLangModel, 'mTypicalPositiveRatio': 0.947368, - 'keepEnglishLetter': constants.True, + 'keepEnglishLetter': True, 'charsetName': "ISO-8859-2" } -Win1250HungarianModel = { \ +Win1250HungarianModel = { 'charToOrderMap': win1250HungarianCharToOrderMap, 'precedenceMatrix': HungarianLangModel, 'mTypicalPositiveRatio': 0.947368, - 'keepEnglishLetter': constants.True, + 'keepEnglishLetter': True, 'charsetName': "windows-1250" } + +# flake8: noqa diff --git a/libs/chardet/langthaimodel.py b/libs/chardet/langthaimodel.py index 96ec054f24..0508b1b1ab 100755 --- a/libs/chardet/langthaimodel.py +++ b/libs/chardet/langthaimodel.py @@ -13,29 +13,27 @@ # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. -# +# # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. -# +# # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### -import constants - # 255: Control characters that usually does not exist in any text # 254: Carriage/Return # 253: symbol (punctuation) that does not belong to word # 252: 0 - 9 -# The following result for thai was collected from a limited sample (1M). +# The following result for thai was collected from a limited sample (1M). # Character Mapping Table: -TIS620CharToOrderMap = ( \ +TIS620CharToOrderMap = ( 255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 @@ -54,13 +52,13 @@ 68, 56, 59, 65, 69, 60, 70, 80, 71, 87,248,249,250,251,252,253, ) -# Model Table: +# Model Table: # total sequences: 100% # first 512 sequences: 92.6386% # first 1024 sequences:7.3177% # rest sequences: 1.0230% -# negative sequences: 0.0436% -ThaiLangModel = ( \ +# negative sequences: 0.0436% +ThaiLangModel = ( 0,1,3,3,3,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,0,0,3,3,3,0,3,3,3,3, 0,3,3,0,0,0,1,3,0,3,3,2,3,3,0,1,2,3,3,3,3,0,2,0,2,0,0,3,2,1,2,2, 3,0,3,3,2,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,0,3,2,3,0,2,2,2,3, @@ -191,10 +189,12 @@ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, ) -TIS620ThaiModel = { \ +TIS620ThaiModel = { 'charToOrderMap': TIS620CharToOrderMap, 'precedenceMatrix': ThaiLangModel, 'mTypicalPositiveRatio': 0.926386, - 'keepEnglishLetter': constants.False, + 'keepEnglishLetter': False, 'charsetName': "TIS-620" } + +# flake8: noqa diff --git a/libs/chardet/latin1prober.py b/libs/chardet/latin1prober.py index b46129ba82..eef3573543 100755 --- a/libs/chardet/latin1prober.py +++ b/libs/chardet/latin1prober.py @@ -14,85 +14,86 @@ # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. -# +# # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. -# +# # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### -from charsetprober import CharSetProber -import constants -import operator +from .charsetprober import CharSetProber +from .constants import eNotMe +from .compat import wrap_ord FREQ_CAT_NUM = 4 -UDF = 0 # undefined -OTH = 1 # other -ASC = 2 # ascii capital letter -ASS = 3 # ascii small letter -ACV = 4 # accent capital vowel -ACO = 5 # accent capital other -ASV = 6 # accent small vowel -ASO = 7 # accent small other -CLASS_NUM = 8 # total classes +UDF = 0 # undefined +OTH = 1 # other +ASC = 2 # ascii capital letter +ASS = 3 # ascii small letter +ACV = 4 # accent capital vowel +ACO = 5 # accent capital other +ASV = 6 # accent small vowel +ASO = 7 # accent small other +CLASS_NUM = 8 # total classes -Latin1_CharToClass = ( \ - OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 00 - 07 - OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 08 - 0F - OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 10 - 17 - OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 18 - 1F - OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 20 - 27 - OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 28 - 2F - OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 30 - 37 - OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 38 - 3F - OTH, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 40 - 47 - ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 48 - 4F - ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 50 - 57 - ASC, ASC, ASC, OTH, OTH, OTH, OTH, OTH, # 58 - 5F - OTH, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 60 - 67 - ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 68 - 6F - ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 70 - 77 - ASS, ASS, ASS, OTH, OTH, OTH, OTH, OTH, # 78 - 7F - OTH, UDF, OTH, ASO, OTH, OTH, OTH, OTH, # 80 - 87 - OTH, OTH, ACO, OTH, ACO, UDF, ACO, UDF, # 88 - 8F - UDF, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 90 - 97 - OTH, OTH, ASO, OTH, ASO, UDF, ASO, ACO, # 98 - 9F - OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # A0 - A7 - OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # A8 - AF - OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B0 - B7 - OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B8 - BF - ACV, ACV, ACV, ACV, ACV, ACV, ACO, ACO, # C0 - C7 - ACV, ACV, ACV, ACV, ACV, ACV, ACV, ACV, # C8 - CF - ACO, ACO, ACV, ACV, ACV, ACV, ACV, OTH, # D0 - D7 - ACV, ACV, ACV, ACV, ACV, ACO, ACO, ACO, # D8 - DF - ASV, ASV, ASV, ASV, ASV, ASV, ASO, ASO, # E0 - E7 - ASV, ASV, ASV, ASV, ASV, ASV, ASV, ASV, # E8 - EF - ASO, ASO, ASV, ASV, ASV, ASV, ASV, OTH, # F0 - F7 - ASV, ASV, ASV, ASV, ASV, ASO, ASO, ASO, # F8 - FF +Latin1_CharToClass = ( + OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 00 - 07 + OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 08 - 0F + OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 10 - 17 + OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 18 - 1F + OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 20 - 27 + OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 28 - 2F + OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 30 - 37 + OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 38 - 3F + OTH, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 40 - 47 + ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 48 - 4F + ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 50 - 57 + ASC, ASC, ASC, OTH, OTH, OTH, OTH, OTH, # 58 - 5F + OTH, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 60 - 67 + ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 68 - 6F + ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 70 - 77 + ASS, ASS, ASS, OTH, OTH, OTH, OTH, OTH, # 78 - 7F + OTH, UDF, OTH, ASO, OTH, OTH, OTH, OTH, # 80 - 87 + OTH, OTH, ACO, OTH, ACO, UDF, ACO, UDF, # 88 - 8F + UDF, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 90 - 97 + OTH, OTH, ASO, OTH, ASO, UDF, ASO, ACO, # 98 - 9F + OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # A0 - A7 + OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # A8 - AF + OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B0 - B7 + OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B8 - BF + ACV, ACV, ACV, ACV, ACV, ACV, ACO, ACO, # C0 - C7 + ACV, ACV, ACV, ACV, ACV, ACV, ACV, ACV, # C8 - CF + ACO, ACO, ACV, ACV, ACV, ACV, ACV, OTH, # D0 - D7 + ACV, ACV, ACV, ACV, ACV, ACO, ACO, ACO, # D8 - DF + ASV, ASV, ASV, ASV, ASV, ASV, ASO, ASO, # E0 - E7 + ASV, ASV, ASV, ASV, ASV, ASV, ASV, ASV, # E8 - EF + ASO, ASO, ASV, ASV, ASV, ASV, ASV, OTH, # F0 - F7 + ASV, ASV, ASV, ASV, ASV, ASO, ASO, ASO, # F8 - FF ) -# 0 : illegal -# 1 : very unlikely -# 2 : normal +# 0 : illegal +# 1 : very unlikely +# 2 : normal # 3 : very likely -Latin1ClassModel = ( \ -# UDF OTH ASC ASS ACV ACO ASV ASO - 0, 0, 0, 0, 0, 0, 0, 0, # UDF - 0, 3, 3, 3, 3, 3, 3, 3, # OTH - 0, 3, 3, 3, 3, 3, 3, 3, # ASC - 0, 3, 3, 3, 1, 1, 3, 3, # ASS - 0, 3, 3, 3, 1, 2, 1, 2, # ACV - 0, 3, 3, 3, 3, 3, 3, 3, # ACO - 0, 3, 1, 3, 1, 1, 1, 3, # ASV - 0, 3, 1, 3, 1, 1, 3, 3, # ASO +Latin1ClassModel = ( + # UDF OTH ASC ASS ACV ACO ASV ASO + 0, 0, 0, 0, 0, 0, 0, 0, # UDF + 0, 3, 3, 3, 3, 3, 3, 3, # OTH + 0, 3, 3, 3, 3, 3, 3, 3, # ASC + 0, 3, 3, 3, 1, 1, 3, 3, # ASS + 0, 3, 3, 3, 1, 2, 1, 2, # ACV + 0, 3, 3, 3, 3, 3, 3, 3, # ACO + 0, 3, 1, 3, 1, 1, 1, 3, # ASV + 0, 3, 1, 3, 1, 1, 3, 3, # ASO ) + class Latin1Prober(CharSetProber): def __init__(self): CharSetProber.__init__(self) @@ -109,10 +110,11 @@ def get_charset_name(self): def feed(self, aBuf): aBuf = self.filter_with_english_letters(aBuf) for c in aBuf: - charClass = Latin1_CharToClass[ord(c)] - freq = Latin1ClassModel[(self._mLastCharClass * CLASS_NUM) + charClass] + charClass = Latin1_CharToClass[wrap_ord(c)] + freq = Latin1ClassModel[(self._mLastCharClass * CLASS_NUM) + + charClass] if freq == 0: - self._mState = constants.eNotMe + self._mState = eNotMe break self._mFreqCounter[freq] += 1 self._mLastCharClass = charClass @@ -120,17 +122,18 @@ def feed(self, aBuf): return self.get_state() def get_confidence(self): - if self.get_state() == constants.eNotMe: + if self.get_state() == eNotMe: return 0.01 - - total = reduce(operator.add, self._mFreqCounter) + + total = sum(self._mFreqCounter) if total < 0.01: confidence = 0.0 else: - confidence = (self._mFreqCounter[3] / total) - (self._mFreqCounter[1] * 20.0 / total) + confidence = ((self._mFreqCounter[3] - self._mFreqCounter[1] * 20.0) + / total) if confidence < 0.0: confidence = 0.0 - # lower the confidence of latin1 so that other more accurate detector - # can take priority. - confidence = confidence * 0.5 + # lower the confidence of latin1 so that other more accurate + # detector can take priority. + confidence = confidence * 0.73 return confidence diff --git a/libs/chardet/mbcharsetprober.py b/libs/chardet/mbcharsetprober.py index a8131445a0..bb42f2fb5e 100755 --- a/libs/chardet/mbcharsetprober.py +++ b/libs/chardet/mbcharsetprober.py @@ -15,28 +15,29 @@ # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. -# +# # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. -# +# # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### -import constants, sys -from constants import eStart, eError, eItsMe -from charsetprober import CharSetProber +import sys +from . import constants +from .charsetprober import CharSetProber + class MultiByteCharSetProber(CharSetProber): def __init__(self): CharSetProber.__init__(self) self._mDistributionAnalyzer = None self._mCodingSM = None - self._mLastChar = ['\x00', '\x00'] + self._mLastChar = [0, 0] def reset(self): CharSetProber.reset(self) @@ -44,7 +45,7 @@ def reset(self): self._mCodingSM.reset() if self._mDistributionAnalyzer: self._mDistributionAnalyzer.reset() - self._mLastChar = ['\x00', '\x00'] + self._mLastChar = [0, 0] def get_charset_name(self): pass @@ -53,27 +54,30 @@ def feed(self, aBuf): aLen = len(aBuf) for i in range(0, aLen): codingState = self._mCodingSM.next_state(aBuf[i]) - if codingState == eError: + if codingState == constants.eError: if constants._debug: - sys.stderr.write(self.get_charset_name() + ' prober hit error at byte ' + str(i) + '\n') + sys.stderr.write(self.get_charset_name() + + ' prober hit error at byte ' + str(i) + + '\n') self._mState = constants.eNotMe break - elif codingState == eItsMe: + elif codingState == constants.eItsMe: self._mState = constants.eFoundIt break - elif codingState == eStart: + elif codingState == constants.eStart: charLen = self._mCodingSM.get_current_charlen() if i == 0: self._mLastChar[1] = aBuf[0] self._mDistributionAnalyzer.feed(self._mLastChar, charLen) else: - self._mDistributionAnalyzer.feed(aBuf[i-1:i+1], charLen) - + self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1], + charLen) + self._mLastChar[0] = aBuf[aLen - 1] - + if self.get_state() == constants.eDetecting: - if self._mDistributionAnalyzer.got_enough_data() and \ - (self.get_confidence() > constants.SHORTCUT_THRESHOLD): + if (self._mDistributionAnalyzer.got_enough_data() and + (self.get_confidence() > constants.SHORTCUT_THRESHOLD)): self._mState = constants.eFoundIt return self.get_state() diff --git a/libs/chardet/mbcsgroupprober.py b/libs/chardet/mbcsgroupprober.py index 941cc3e376..03c9dcf3eb 100755 --- a/libs/chardet/mbcsgroupprober.py +++ b/libs/chardet/mbcsgroupprober.py @@ -15,36 +15,40 @@ # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. -# +# # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. -# +# # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### -from charsetgroupprober import CharSetGroupProber -from utf8prober import UTF8Prober -from sjisprober import SJISProber -from eucjpprober import EUCJPProber -from gb2312prober import GB2312Prober -from euckrprober import EUCKRProber -from big5prober import Big5Prober -from euctwprober import EUCTWProber +from .charsetgroupprober import CharSetGroupProber +from .utf8prober import UTF8Prober +from .sjisprober import SJISProber +from .eucjpprober import EUCJPProber +from .gb2312prober import GB2312Prober +from .euckrprober import EUCKRProber +from .cp949prober import CP949Prober +from .big5prober import Big5Prober +from .euctwprober import EUCTWProber + class MBCSGroupProber(CharSetGroupProber): def __init__(self): CharSetGroupProber.__init__(self) - self._mProbers = [ \ + self._mProbers = [ UTF8Prober(), SJISProber(), EUCJPProber(), GB2312Prober(), EUCKRProber(), + CP949Prober(), Big5Prober(), - EUCTWProber()] + EUCTWProber() + ] self.reset() diff --git a/libs/chardet/mbcssm.py b/libs/chardet/mbcssm.py index e46c1ffebc..efe678ca03 100755 --- a/libs/chardet/mbcssm.py +++ b/libs/chardet/mbcssm.py @@ -13,60 +13,62 @@ # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. -# +# # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. -# +# # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### -from constants import eStart, eError, eItsMe +from .constants import eStart, eError, eItsMe -# BIG5 +# BIG5 -BIG5_cls = ( \ +BIG5_cls = ( 1,1,1,1,1,1,1,1, # 00 - 07 #allow 0x00 as legal value - 1,1,1,1,1,1,0,0, # 08 - 0f - 1,1,1,1,1,1,1,1, # 10 - 17 - 1,1,1,0,1,1,1,1, # 18 - 1f - 1,1,1,1,1,1,1,1, # 20 - 27 - 1,1,1,1,1,1,1,1, # 28 - 2f - 1,1,1,1,1,1,1,1, # 30 - 37 - 1,1,1,1,1,1,1,1, # 38 - 3f - 2,2,2,2,2,2,2,2, # 40 - 47 - 2,2,2,2,2,2,2,2, # 48 - 4f - 2,2,2,2,2,2,2,2, # 50 - 57 - 2,2,2,2,2,2,2,2, # 58 - 5f - 2,2,2,2,2,2,2,2, # 60 - 67 - 2,2,2,2,2,2,2,2, # 68 - 6f - 2,2,2,2,2,2,2,2, # 70 - 77 - 2,2,2,2,2,2,2,1, # 78 - 7f - 4,4,4,4,4,4,4,4, # 80 - 87 - 4,4,4,4,4,4,4,4, # 88 - 8f - 4,4,4,4,4,4,4,4, # 90 - 97 - 4,4,4,4,4,4,4,4, # 98 - 9f - 4,3,3,3,3,3,3,3, # a0 - a7 - 3,3,3,3,3,3,3,3, # a8 - af - 3,3,3,3,3,3,3,3, # b0 - b7 - 3,3,3,3,3,3,3,3, # b8 - bf - 3,3,3,3,3,3,3,3, # c0 - c7 - 3,3,3,3,3,3,3,3, # c8 - cf - 3,3,3,3,3,3,3,3, # d0 - d7 - 3,3,3,3,3,3,3,3, # d8 - df - 3,3,3,3,3,3,3,3, # e0 - e7 - 3,3,3,3,3,3,3,3, # e8 - ef - 3,3,3,3,3,3,3,3, # f0 - f7 - 3,3,3,3,3,3,3,0) # f8 - ff - -BIG5_st = ( \ - eError,eStart,eStart, 3,eError,eError,eError,eError,#00-07 - eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,#08-0f - eError,eStart,eStart,eStart,eStart,eStart,eStart,eStart)#10-17 + 1,1,1,1,1,1,0,0, # 08 - 0f + 1,1,1,1,1,1,1,1, # 10 - 17 + 1,1,1,0,1,1,1,1, # 18 - 1f + 1,1,1,1,1,1,1,1, # 20 - 27 + 1,1,1,1,1,1,1,1, # 28 - 2f + 1,1,1,1,1,1,1,1, # 30 - 37 + 1,1,1,1,1,1,1,1, # 38 - 3f + 2,2,2,2,2,2,2,2, # 40 - 47 + 2,2,2,2,2,2,2,2, # 48 - 4f + 2,2,2,2,2,2,2,2, # 50 - 57 + 2,2,2,2,2,2,2,2, # 58 - 5f + 2,2,2,2,2,2,2,2, # 60 - 67 + 2,2,2,2,2,2,2,2, # 68 - 6f + 2,2,2,2,2,2,2,2, # 70 - 77 + 2,2,2,2,2,2,2,1, # 78 - 7f + 4,4,4,4,4,4,4,4, # 80 - 87 + 4,4,4,4,4,4,4,4, # 88 - 8f + 4,4,4,4,4,4,4,4, # 90 - 97 + 4,4,4,4,4,4,4,4, # 98 - 9f + 4,3,3,3,3,3,3,3, # a0 - a7 + 3,3,3,3,3,3,3,3, # a8 - af + 3,3,3,3,3,3,3,3, # b0 - b7 + 3,3,3,3,3,3,3,3, # b8 - bf + 3,3,3,3,3,3,3,3, # c0 - c7 + 3,3,3,3,3,3,3,3, # c8 - cf + 3,3,3,3,3,3,3,3, # d0 - d7 + 3,3,3,3,3,3,3,3, # d8 - df + 3,3,3,3,3,3,3,3, # e0 - e7 + 3,3,3,3,3,3,3,3, # e8 - ef + 3,3,3,3,3,3,3,3, # f0 - f7 + 3,3,3,3,3,3,3,0 # f8 - ff +) + +BIG5_st = ( + eError,eStart,eStart, 3,eError,eError,eError,eError,#00-07 + eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,#08-0f + eError,eStart,eStart,eStart,eStart,eStart,eStart,eStart#10-17 +) Big5CharLenTable = (0, 1, 1, 2, 0) @@ -76,48 +78,90 @@ 'charLenTable': Big5CharLenTable, 'name': 'Big5'} +# CP949 + +CP949_cls = ( + 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,0,0, # 00 - 0f + 1,1,1,1,1,1,1,1, 1,1,1,0,1,1,1,1, # 10 - 1f + 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, # 20 - 2f + 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, # 30 - 3f + 1,4,4,4,4,4,4,4, 4,4,4,4,4,4,4,4, # 40 - 4f + 4,4,5,5,5,5,5,5, 5,5,5,1,1,1,1,1, # 50 - 5f + 1,5,5,5,5,5,5,5, 5,5,5,5,5,5,5,5, # 60 - 6f + 5,5,5,5,5,5,5,5, 5,5,5,1,1,1,1,1, # 70 - 7f + 0,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6, # 80 - 8f + 6,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6, # 90 - 9f + 6,7,7,7,7,7,7,7, 7,7,7,7,7,8,8,8, # a0 - af + 7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7, # b0 - bf + 7,7,7,7,7,7,9,2, 2,3,2,2,2,2,2,2, # c0 - cf + 2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2, # d0 - df + 2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2, # e0 - ef + 2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,0, # f0 - ff +) + +CP949_st = ( +#cls= 0 1 2 3 4 5 6 7 8 9 # previous state = + eError,eStart, 3,eError,eStart,eStart, 4, 5,eError, 6, # eStart + eError,eError,eError,eError,eError,eError,eError,eError,eError,eError, # eError + eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe, # eItsMe + eError,eError,eStart,eStart,eError,eError,eError,eStart,eStart,eStart, # 3 + eError,eError,eStart,eStart,eStart,eStart,eStart,eStart,eStart,eStart, # 4 + eError,eStart,eStart,eStart,eStart,eStart,eStart,eStart,eStart,eStart, # 5 + eError,eStart,eStart,eStart,eStart,eError,eError,eStart,eStart,eStart, # 6 +) + +CP949CharLenTable = (0, 1, 2, 0, 1, 1, 2, 2, 0, 2) + +CP949SMModel = {'classTable': CP949_cls, + 'classFactor': 10, + 'stateTable': CP949_st, + 'charLenTable': CP949CharLenTable, + 'name': 'CP949'} + # EUC-JP -EUCJP_cls = ( \ - 4,4,4,4,4,4,4,4, # 00 - 07 - 4,4,4,4,4,4,5,5, # 08 - 0f - 4,4,4,4,4,4,4,4, # 10 - 17 - 4,4,4,5,4,4,4,4, # 18 - 1f - 4,4,4,4,4,4,4,4, # 20 - 27 - 4,4,4,4,4,4,4,4, # 28 - 2f - 4,4,4,4,4,4,4,4, # 30 - 37 - 4,4,4,4,4,4,4,4, # 38 - 3f - 4,4,4,4,4,4,4,4, # 40 - 47 - 4,4,4,4,4,4,4,4, # 48 - 4f - 4,4,4,4,4,4,4,4, # 50 - 57 - 4,4,4,4,4,4,4,4, # 58 - 5f - 4,4,4,4,4,4,4,4, # 60 - 67 - 4,4,4,4,4,4,4,4, # 68 - 6f - 4,4,4,4,4,4,4,4, # 70 - 77 - 4,4,4,4,4,4,4,4, # 78 - 7f - 5,5,5,5,5,5,5,5, # 80 - 87 - 5,5,5,5,5,5,1,3, # 88 - 8f - 5,5,5,5,5,5,5,5, # 90 - 97 - 5,5,5,5,5,5,5,5, # 98 - 9f - 5,2,2,2,2,2,2,2, # a0 - a7 - 2,2,2,2,2,2,2,2, # a8 - af - 2,2,2,2,2,2,2,2, # b0 - b7 - 2,2,2,2,2,2,2,2, # b8 - bf - 2,2,2,2,2,2,2,2, # c0 - c7 - 2,2,2,2,2,2,2,2, # c8 - cf - 2,2,2,2,2,2,2,2, # d0 - d7 - 2,2,2,2,2,2,2,2, # d8 - df - 0,0,0,0,0,0,0,0, # e0 - e7 - 0,0,0,0,0,0,0,0, # e8 - ef - 0,0,0,0,0,0,0,0, # f0 - f7 - 0,0,0,0,0,0,0,5) # f8 - ff - -EUCJP_st = ( \ - 3, 4, 3, 5,eStart,eError,eError,eError,#00-07 - eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f - eItsMe,eItsMe,eStart,eError,eStart,eError,eError,eError,#10-17 - eError,eError,eStart,eError,eError,eError, 3,eError,#18-1f - 3,eError,eError,eError,eStart,eStart,eStart,eStart)#20-27 +EUCJP_cls = ( + 4,4,4,4,4,4,4,4, # 00 - 07 + 4,4,4,4,4,4,5,5, # 08 - 0f + 4,4,4,4,4,4,4,4, # 10 - 17 + 4,4,4,5,4,4,4,4, # 18 - 1f + 4,4,4,4,4,4,4,4, # 20 - 27 + 4,4,4,4,4,4,4,4, # 28 - 2f + 4,4,4,4,4,4,4,4, # 30 - 37 + 4,4,4,4,4,4,4,4, # 38 - 3f + 4,4,4,4,4,4,4,4, # 40 - 47 + 4,4,4,4,4,4,4,4, # 48 - 4f + 4,4,4,4,4,4,4,4, # 50 - 57 + 4,4,4,4,4,4,4,4, # 58 - 5f + 4,4,4,4,4,4,4,4, # 60 - 67 + 4,4,4,4,4,4,4,4, # 68 - 6f + 4,4,4,4,4,4,4,4, # 70 - 77 + 4,4,4,4,4,4,4,4, # 78 - 7f + 5,5,5,5,5,5,5,5, # 80 - 87 + 5,5,5,5,5,5,1,3, # 88 - 8f + 5,5,5,5,5,5,5,5, # 90 - 97 + 5,5,5,5,5,5,5,5, # 98 - 9f + 5,2,2,2,2,2,2,2, # a0 - a7 + 2,2,2,2,2,2,2,2, # a8 - af + 2,2,2,2,2,2,2,2, # b0 - b7 + 2,2,2,2,2,2,2,2, # b8 - bf + 2,2,2,2,2,2,2,2, # c0 - c7 + 2,2,2,2,2,2,2,2, # c8 - cf + 2,2,2,2,2,2,2,2, # d0 - d7 + 2,2,2,2,2,2,2,2, # d8 - df + 0,0,0,0,0,0,0,0, # e0 - e7 + 0,0,0,0,0,0,0,0, # e8 - ef + 0,0,0,0,0,0,0,0, # f0 - f7 + 0,0,0,0,0,0,0,5 # f8 - ff +) + +EUCJP_st = ( + 3, 4, 3, 5,eStart,eError,eError,eError,#00-07 + eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f + eItsMe,eItsMe,eStart,eError,eStart,eError,eError,eError,#10-17 + eError,eError,eStart,eError,eError,eError, 3,eError,#18-1f + 3,eError,eError,eError,eStart,eStart,eStart,eStart#20-27 +) EUCJPCharLenTable = (2, 2, 2, 3, 1, 0) @@ -129,43 +173,45 @@ # EUC-KR -EUCKR_cls = ( \ - 1,1,1,1,1,1,1,1, # 00 - 07 - 1,1,1,1,1,1,0,0, # 08 - 0f - 1,1,1,1,1,1,1,1, # 10 - 17 - 1,1,1,0,1,1,1,1, # 18 - 1f - 1,1,1,1,1,1,1,1, # 20 - 27 - 1,1,1,1,1,1,1,1, # 28 - 2f - 1,1,1,1,1,1,1,1, # 30 - 37 - 1,1,1,1,1,1,1,1, # 38 - 3f - 1,1,1,1,1,1,1,1, # 40 - 47 - 1,1,1,1,1,1,1,1, # 48 - 4f - 1,1,1,1,1,1,1,1, # 50 - 57 - 1,1,1,1,1,1,1,1, # 58 - 5f - 1,1,1,1,1,1,1,1, # 60 - 67 - 1,1,1,1,1,1,1,1, # 68 - 6f - 1,1,1,1,1,1,1,1, # 70 - 77 - 1,1,1,1,1,1,1,1, # 78 - 7f - 0,0,0,0,0,0,0,0, # 80 - 87 - 0,0,0,0,0,0,0,0, # 88 - 8f - 0,0,0,0,0,0,0,0, # 90 - 97 - 0,0,0,0,0,0,0,0, # 98 - 9f - 0,2,2,2,2,2,2,2, # a0 - a7 - 2,2,2,2,2,3,3,3, # a8 - af - 2,2,2,2,2,2,2,2, # b0 - b7 - 2,2,2,2,2,2,2,2, # b8 - bf - 2,2,2,2,2,2,2,2, # c0 - c7 - 2,3,2,2,2,2,2,2, # c8 - cf - 2,2,2,2,2,2,2,2, # d0 - d7 - 2,2,2,2,2,2,2,2, # d8 - df - 2,2,2,2,2,2,2,2, # e0 - e7 - 2,2,2,2,2,2,2,2, # e8 - ef - 2,2,2,2,2,2,2,2, # f0 - f7 - 2,2,2,2,2,2,2,0) # f8 - ff +EUCKR_cls = ( + 1,1,1,1,1,1,1,1, # 00 - 07 + 1,1,1,1,1,1,0,0, # 08 - 0f + 1,1,1,1,1,1,1,1, # 10 - 17 + 1,1,1,0,1,1,1,1, # 18 - 1f + 1,1,1,1,1,1,1,1, # 20 - 27 + 1,1,1,1,1,1,1,1, # 28 - 2f + 1,1,1,1,1,1,1,1, # 30 - 37 + 1,1,1,1,1,1,1,1, # 38 - 3f + 1,1,1,1,1,1,1,1, # 40 - 47 + 1,1,1,1,1,1,1,1, # 48 - 4f + 1,1,1,1,1,1,1,1, # 50 - 57 + 1,1,1,1,1,1,1,1, # 58 - 5f + 1,1,1,1,1,1,1,1, # 60 - 67 + 1,1,1,1,1,1,1,1, # 68 - 6f + 1,1,1,1,1,1,1,1, # 70 - 77 + 1,1,1,1,1,1,1,1, # 78 - 7f + 0,0,0,0,0,0,0,0, # 80 - 87 + 0,0,0,0,0,0,0,0, # 88 - 8f + 0,0,0,0,0,0,0,0, # 90 - 97 + 0,0,0,0,0,0,0,0, # 98 - 9f + 0,2,2,2,2,2,2,2, # a0 - a7 + 2,2,2,2,2,3,3,3, # a8 - af + 2,2,2,2,2,2,2,2, # b0 - b7 + 2,2,2,2,2,2,2,2, # b8 - bf + 2,2,2,2,2,2,2,2, # c0 - c7 + 2,3,2,2,2,2,2,2, # c8 - cf + 2,2,2,2,2,2,2,2, # d0 - d7 + 2,2,2,2,2,2,2,2, # d8 - df + 2,2,2,2,2,2,2,2, # e0 - e7 + 2,2,2,2,2,2,2,2, # e8 - ef + 2,2,2,2,2,2,2,2, # f0 - f7 + 2,2,2,2,2,2,2,0 # f8 - ff +) EUCKR_st = ( - eError,eStart, 3,eError,eError,eError,eError,eError,#00-07 - eItsMe,eItsMe,eItsMe,eItsMe,eError,eError,eStart,eStart)#08-0f + eError,eStart, 3,eError,eError,eError,eError,eError,#00-07 + eItsMe,eItsMe,eItsMe,eItsMe,eError,eError,eStart,eStart #08-0f +) EUCKRCharLenTable = (0, 1, 2, 0) @@ -177,47 +223,49 @@ # EUC-TW -EUCTW_cls = ( \ - 2,2,2,2,2,2,2,2, # 00 - 07 - 2,2,2,2,2,2,0,0, # 08 - 0f - 2,2,2,2,2,2,2,2, # 10 - 17 - 2,2,2,0,2,2,2,2, # 18 - 1f - 2,2,2,2,2,2,2,2, # 20 - 27 - 2,2,2,2,2,2,2,2, # 28 - 2f - 2,2,2,2,2,2,2,2, # 30 - 37 - 2,2,2,2,2,2,2,2, # 38 - 3f - 2,2,2,2,2,2,2,2, # 40 - 47 - 2,2,2,2,2,2,2,2, # 48 - 4f - 2,2,2,2,2,2,2,2, # 50 - 57 - 2,2,2,2,2,2,2,2, # 58 - 5f - 2,2,2,2,2,2,2,2, # 60 - 67 - 2,2,2,2,2,2,2,2, # 68 - 6f - 2,2,2,2,2,2,2,2, # 70 - 77 - 2,2,2,2,2,2,2,2, # 78 - 7f - 0,0,0,0,0,0,0,0, # 80 - 87 - 0,0,0,0,0,0,6,0, # 88 - 8f - 0,0,0,0,0,0,0,0, # 90 - 97 - 0,0,0,0,0,0,0,0, # 98 - 9f - 0,3,4,4,4,4,4,4, # a0 - a7 - 5,5,1,1,1,1,1,1, # a8 - af - 1,1,1,1,1,1,1,1, # b0 - b7 - 1,1,1,1,1,1,1,1, # b8 - bf - 1,1,3,1,3,3,3,3, # c0 - c7 - 3,3,3,3,3,3,3,3, # c8 - cf - 3,3,3,3,3,3,3,3, # d0 - d7 - 3,3,3,3,3,3,3,3, # d8 - df - 3,3,3,3,3,3,3,3, # e0 - e7 - 3,3,3,3,3,3,3,3, # e8 - ef - 3,3,3,3,3,3,3,3, # f0 - f7 - 3,3,3,3,3,3,3,0) # f8 - ff - -EUCTW_st = ( \ - eError,eError,eStart, 3, 3, 3, 4,eError,#00-07 - eError,eError,eError,eError,eError,eError,eItsMe,eItsMe,#08-0f - eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,eStart,eError,#10-17 - eStart,eStart,eStart,eError,eError,eError,eError,eError,#18-1f - 5,eError,eError,eError,eStart,eError,eStart,eStart,#20-27 - eStart,eError,eStart,eStart,eStart,eStart,eStart,eStart)#28-2f +EUCTW_cls = ( + 2,2,2,2,2,2,2,2, # 00 - 07 + 2,2,2,2,2,2,0,0, # 08 - 0f + 2,2,2,2,2,2,2,2, # 10 - 17 + 2,2,2,0,2,2,2,2, # 18 - 1f + 2,2,2,2,2,2,2,2, # 20 - 27 + 2,2,2,2,2,2,2,2, # 28 - 2f + 2,2,2,2,2,2,2,2, # 30 - 37 + 2,2,2,2,2,2,2,2, # 38 - 3f + 2,2,2,2,2,2,2,2, # 40 - 47 + 2,2,2,2,2,2,2,2, # 48 - 4f + 2,2,2,2,2,2,2,2, # 50 - 57 + 2,2,2,2,2,2,2,2, # 58 - 5f + 2,2,2,2,2,2,2,2, # 60 - 67 + 2,2,2,2,2,2,2,2, # 68 - 6f + 2,2,2,2,2,2,2,2, # 70 - 77 + 2,2,2,2,2,2,2,2, # 78 - 7f + 0,0,0,0,0,0,0,0, # 80 - 87 + 0,0,0,0,0,0,6,0, # 88 - 8f + 0,0,0,0,0,0,0,0, # 90 - 97 + 0,0,0,0,0,0,0,0, # 98 - 9f + 0,3,4,4,4,4,4,4, # a0 - a7 + 5,5,1,1,1,1,1,1, # a8 - af + 1,1,1,1,1,1,1,1, # b0 - b7 + 1,1,1,1,1,1,1,1, # b8 - bf + 1,1,3,1,3,3,3,3, # c0 - c7 + 3,3,3,3,3,3,3,3, # c8 - cf + 3,3,3,3,3,3,3,3, # d0 - d7 + 3,3,3,3,3,3,3,3, # d8 - df + 3,3,3,3,3,3,3,3, # e0 - e7 + 3,3,3,3,3,3,3,3, # e8 - ef + 3,3,3,3,3,3,3,3, # f0 - f7 + 3,3,3,3,3,3,3,0 # f8 - ff +) + +EUCTW_st = ( + eError,eError,eStart, 3, 3, 3, 4,eError,#00-07 + eError,eError,eError,eError,eError,eError,eItsMe,eItsMe,#08-0f + eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,eStart,eError,#10-17 + eStart,eStart,eStart,eError,eError,eError,eError,eError,#18-1f + 5,eError,eError,eError,eStart,eError,eStart,eStart,#20-27 + eStart,eError,eStart,eStart,eStart,eStart,eStart,eStart #28-2f +) EUCTWCharLenTable = (0, 0, 1, 2, 2, 2, 3) @@ -229,53 +277,55 @@ # GB2312 -GB2312_cls = ( \ - 1,1,1,1,1,1,1,1, # 00 - 07 - 1,1,1,1,1,1,0,0, # 08 - 0f - 1,1,1,1,1,1,1,1, # 10 - 17 - 1,1,1,0,1,1,1,1, # 18 - 1f - 1,1,1,1,1,1,1,1, # 20 - 27 - 1,1,1,1,1,1,1,1, # 28 - 2f - 3,3,3,3,3,3,3,3, # 30 - 37 - 3,3,1,1,1,1,1,1, # 38 - 3f - 2,2,2,2,2,2,2,2, # 40 - 47 - 2,2,2,2,2,2,2,2, # 48 - 4f - 2,2,2,2,2,2,2,2, # 50 - 57 - 2,2,2,2,2,2,2,2, # 58 - 5f - 2,2,2,2,2,2,2,2, # 60 - 67 - 2,2,2,2,2,2,2,2, # 68 - 6f - 2,2,2,2,2,2,2,2, # 70 - 77 - 2,2,2,2,2,2,2,4, # 78 - 7f - 5,6,6,6,6,6,6,6, # 80 - 87 - 6,6,6,6,6,6,6,6, # 88 - 8f - 6,6,6,6,6,6,6,6, # 90 - 97 - 6,6,6,6,6,6,6,6, # 98 - 9f - 6,6,6,6,6,6,6,6, # a0 - a7 - 6,6,6,6,6,6,6,6, # a8 - af - 6,6,6,6,6,6,6,6, # b0 - b7 - 6,6,6,6,6,6,6,6, # b8 - bf - 6,6,6,6,6,6,6,6, # c0 - c7 - 6,6,6,6,6,6,6,6, # c8 - cf - 6,6,6,6,6,6,6,6, # d0 - d7 - 6,6,6,6,6,6,6,6, # d8 - df - 6,6,6,6,6,6,6,6, # e0 - e7 - 6,6,6,6,6,6,6,6, # e8 - ef - 6,6,6,6,6,6,6,6, # f0 - f7 - 6,6,6,6,6,6,6,0) # f8 - ff - -GB2312_st = ( \ - eError,eStart,eStart,eStart,eStart,eStart, 3,eError,#00-07 - eError,eError,eError,eError,eError,eError,eItsMe,eItsMe,#08-0f - eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,eError,eStart,#10-17 - 4,eError,eStart,eStart,eError,eError,eError,eError,#18-1f - eError,eError, 5,eError,eError,eError,eItsMe,eError,#20-27 - eError,eError,eStart,eStart,eStart,eStart,eStart,eStart)#28-2f - -# To be accurate, the length of class 6 can be either 2 or 4. -# But it is not necessary to discriminate between the two since -# it is used for frequency analysis only, and we are validing -# each code range there as well. So it is safe to set it to be -# 2 here. +GB2312_cls = ( + 1,1,1,1,1,1,1,1, # 00 - 07 + 1,1,1,1,1,1,0,0, # 08 - 0f + 1,1,1,1,1,1,1,1, # 10 - 17 + 1,1,1,0,1,1,1,1, # 18 - 1f + 1,1,1,1,1,1,1,1, # 20 - 27 + 1,1,1,1,1,1,1,1, # 28 - 2f + 3,3,3,3,3,3,3,3, # 30 - 37 + 3,3,1,1,1,1,1,1, # 38 - 3f + 2,2,2,2,2,2,2,2, # 40 - 47 + 2,2,2,2,2,2,2,2, # 48 - 4f + 2,2,2,2,2,2,2,2, # 50 - 57 + 2,2,2,2,2,2,2,2, # 58 - 5f + 2,2,2,2,2,2,2,2, # 60 - 67 + 2,2,2,2,2,2,2,2, # 68 - 6f + 2,2,2,2,2,2,2,2, # 70 - 77 + 2,2,2,2,2,2,2,4, # 78 - 7f + 5,6,6,6,6,6,6,6, # 80 - 87 + 6,6,6,6,6,6,6,6, # 88 - 8f + 6,6,6,6,6,6,6,6, # 90 - 97 + 6,6,6,6,6,6,6,6, # 98 - 9f + 6,6,6,6,6,6,6,6, # a0 - a7 + 6,6,6,6,6,6,6,6, # a8 - af + 6,6,6,6,6,6,6,6, # b0 - b7 + 6,6,6,6,6,6,6,6, # b8 - bf + 6,6,6,6,6,6,6,6, # c0 - c7 + 6,6,6,6,6,6,6,6, # c8 - cf + 6,6,6,6,6,6,6,6, # d0 - d7 + 6,6,6,6,6,6,6,6, # d8 - df + 6,6,6,6,6,6,6,6, # e0 - e7 + 6,6,6,6,6,6,6,6, # e8 - ef + 6,6,6,6,6,6,6,6, # f0 - f7 + 6,6,6,6,6,6,6,0 # f8 - ff +) + +GB2312_st = ( + eError,eStart,eStart,eStart,eStart,eStart, 3,eError,#00-07 + eError,eError,eError,eError,eError,eError,eItsMe,eItsMe,#08-0f + eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,eError,eStart,#10-17 + 4,eError,eStart,eStart,eError,eError,eError,eError,#18-1f + eError,eError, 5,eError,eError,eError,eItsMe,eError,#20-27 + eError,eError,eStart,eStart,eStart,eStart,eStart,eStart #28-2f +) + +# To be accurate, the length of class 6 can be either 2 or 4. +# But it is not necessary to discriminate between the two since +# it is used for frequency analysis only, and we are validing +# each code range there as well. So it is safe to set it to be +# 2 here. GB2312CharLenTable = (0, 1, 1, 1, 1, 1, 2) GB2312SMModel = {'classTable': GB2312_cls, @@ -286,46 +336,48 @@ # Shift_JIS -SJIS_cls = ( \ - 1,1,1,1,1,1,1,1, # 00 - 07 - 1,1,1,1,1,1,0,0, # 08 - 0f - 1,1,1,1,1,1,1,1, # 10 - 17 - 1,1,1,0,1,1,1,1, # 18 - 1f - 1,1,1,1,1,1,1,1, # 20 - 27 - 1,1,1,1,1,1,1,1, # 28 - 2f - 1,1,1,1,1,1,1,1, # 30 - 37 - 1,1,1,1,1,1,1,1, # 38 - 3f - 2,2,2,2,2,2,2,2, # 40 - 47 - 2,2,2,2,2,2,2,2, # 48 - 4f - 2,2,2,2,2,2,2,2, # 50 - 57 - 2,2,2,2,2,2,2,2, # 58 - 5f - 2,2,2,2,2,2,2,2, # 60 - 67 - 2,2,2,2,2,2,2,2, # 68 - 6f - 2,2,2,2,2,2,2,2, # 70 - 77 - 2,2,2,2,2,2,2,1, # 78 - 7f - 3,3,3,3,3,3,3,3, # 80 - 87 - 3,3,3,3,3,3,3,3, # 88 - 8f - 3,3,3,3,3,3,3,3, # 90 - 97 - 3,3,3,3,3,3,3,3, # 98 - 9f - #0xa0 is illegal in sjis encoding, but some pages does +SJIS_cls = ( + 1,1,1,1,1,1,1,1, # 00 - 07 + 1,1,1,1,1,1,0,0, # 08 - 0f + 1,1,1,1,1,1,1,1, # 10 - 17 + 1,1,1,0,1,1,1,1, # 18 - 1f + 1,1,1,1,1,1,1,1, # 20 - 27 + 1,1,1,1,1,1,1,1, # 28 - 2f + 1,1,1,1,1,1,1,1, # 30 - 37 + 1,1,1,1,1,1,1,1, # 38 - 3f + 2,2,2,2,2,2,2,2, # 40 - 47 + 2,2,2,2,2,2,2,2, # 48 - 4f + 2,2,2,2,2,2,2,2, # 50 - 57 + 2,2,2,2,2,2,2,2, # 58 - 5f + 2,2,2,2,2,2,2,2, # 60 - 67 + 2,2,2,2,2,2,2,2, # 68 - 6f + 2,2,2,2,2,2,2,2, # 70 - 77 + 2,2,2,2,2,2,2,1, # 78 - 7f + 3,3,3,3,3,2,2,3, # 80 - 87 + 3,3,3,3,3,3,3,3, # 88 - 8f + 3,3,3,3,3,3,3,3, # 90 - 97 + 3,3,3,3,3,3,3,3, # 98 - 9f + #0xa0 is illegal in sjis encoding, but some pages does #contain such byte. We need to be more error forgiven. - 2,2,2,2,2,2,2,2, # a0 - a7 - 2,2,2,2,2,2,2,2, # a8 - af - 2,2,2,2,2,2,2,2, # b0 - b7 - 2,2,2,2,2,2,2,2, # b8 - bf - 2,2,2,2,2,2,2,2, # c0 - c7 - 2,2,2,2,2,2,2,2, # c8 - cf - 2,2,2,2,2,2,2,2, # d0 - d7 - 2,2,2,2,2,2,2,2, # d8 - df - 3,3,3,3,3,3,3,3, # e0 - e7 - 3,3,3,3,3,4,4,4, # e8 - ef - 4,4,4,4,4,4,4,4, # f0 - f7 - 4,4,4,4,4,0,0,0) # f8 - ff - -SJIS_st = ( \ - eError,eStart,eStart, 3,eError,eError,eError,eError,#00-07 - eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f - eItsMe,eItsMe,eError,eError,eStart,eStart,eStart,eStart)#10-17 + 2,2,2,2,2,2,2,2, # a0 - a7 + 2,2,2,2,2,2,2,2, # a8 - af + 2,2,2,2,2,2,2,2, # b0 - b7 + 2,2,2,2,2,2,2,2, # b8 - bf + 2,2,2,2,2,2,2,2, # c0 - c7 + 2,2,2,2,2,2,2,2, # c8 - cf + 2,2,2,2,2,2,2,2, # d0 - d7 + 2,2,2,2,2,2,2,2, # d8 - df + 3,3,3,3,3,3,3,3, # e0 - e7 + 3,3,3,3,3,4,4,4, # e8 - ef + 3,3,3,3,3,3,3,3, # f0 - f7 + 3,3,3,3,3,0,0,0) # f8 - ff + + +SJIS_st = ( + eError,eStart,eStart, 3,eError,eError,eError,eError,#00-07 + eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f + eItsMe,eItsMe,eError,eError,eStart,eStart,eStart,eStart #10-17 +) SJISCharLenTable = (0, 1, 1, 2, 0, 0) @@ -337,48 +389,50 @@ # UCS2-BE -UCS2BE_cls = ( \ - 0,0,0,0,0,0,0,0, # 00 - 07 - 0,0,1,0,0,2,0,0, # 08 - 0f - 0,0,0,0,0,0,0,0, # 10 - 17 - 0,0,0,3,0,0,0,0, # 18 - 1f - 0,0,0,0,0,0,0,0, # 20 - 27 - 0,3,3,3,3,3,0,0, # 28 - 2f - 0,0,0,0,0,0,0,0, # 30 - 37 - 0,0,0,0,0,0,0,0, # 38 - 3f - 0,0,0,0,0,0,0,0, # 40 - 47 - 0,0,0,0,0,0,0,0, # 48 - 4f - 0,0,0,0,0,0,0,0, # 50 - 57 - 0,0,0,0,0,0,0,0, # 58 - 5f - 0,0,0,0,0,0,0,0, # 60 - 67 - 0,0,0,0,0,0,0,0, # 68 - 6f - 0,0,0,0,0,0,0,0, # 70 - 77 - 0,0,0,0,0,0,0,0, # 78 - 7f - 0,0,0,0,0,0,0,0, # 80 - 87 - 0,0,0,0,0,0,0,0, # 88 - 8f - 0,0,0,0,0,0,0,0, # 90 - 97 - 0,0,0,0,0,0,0,0, # 98 - 9f - 0,0,0,0,0,0,0,0, # a0 - a7 - 0,0,0,0,0,0,0,0, # a8 - af - 0,0,0,0,0,0,0,0, # b0 - b7 - 0,0,0,0,0,0,0,0, # b8 - bf - 0,0,0,0,0,0,0,0, # c0 - c7 - 0,0,0,0,0,0,0,0, # c8 - cf - 0,0,0,0,0,0,0,0, # d0 - d7 - 0,0,0,0,0,0,0,0, # d8 - df - 0,0,0,0,0,0,0,0, # e0 - e7 - 0,0,0,0,0,0,0,0, # e8 - ef - 0,0,0,0,0,0,0,0, # f0 - f7 - 0,0,0,0,0,0,4,5) # f8 - ff - -UCS2BE_st = ( \ - 5, 7, 7,eError, 4, 3,eError,eError,#00-07 - eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f - eItsMe,eItsMe, 6, 6, 6, 6,eError,eError,#10-17 - 6, 6, 6, 6, 6,eItsMe, 6, 6,#18-1f - 6, 6, 6, 6, 5, 7, 7,eError,#20-27 - 5, 8, 6, 6,eError, 6, 6, 6,#28-2f - 6, 6, 6, 6,eError,eError,eStart,eStart)#30-37 +UCS2BE_cls = ( + 0,0,0,0,0,0,0,0, # 00 - 07 + 0,0,1,0,0,2,0,0, # 08 - 0f + 0,0,0,0,0,0,0,0, # 10 - 17 + 0,0,0,3,0,0,0,0, # 18 - 1f + 0,0,0,0,0,0,0,0, # 20 - 27 + 0,3,3,3,3,3,0,0, # 28 - 2f + 0,0,0,0,0,0,0,0, # 30 - 37 + 0,0,0,0,0,0,0,0, # 38 - 3f + 0,0,0,0,0,0,0,0, # 40 - 47 + 0,0,0,0,0,0,0,0, # 48 - 4f + 0,0,0,0,0,0,0,0, # 50 - 57 + 0,0,0,0,0,0,0,0, # 58 - 5f + 0,0,0,0,0,0,0,0, # 60 - 67 + 0,0,0,0,0,0,0,0, # 68 - 6f + 0,0,0,0,0,0,0,0, # 70 - 77 + 0,0,0,0,0,0,0,0, # 78 - 7f + 0,0,0,0,0,0,0,0, # 80 - 87 + 0,0,0,0,0,0,0,0, # 88 - 8f + 0,0,0,0,0,0,0,0, # 90 - 97 + 0,0,0,0,0,0,0,0, # 98 - 9f + 0,0,0,0,0,0,0,0, # a0 - a7 + 0,0,0,0,0,0,0,0, # a8 - af + 0,0,0,0,0,0,0,0, # b0 - b7 + 0,0,0,0,0,0,0,0, # b8 - bf + 0,0,0,0,0,0,0,0, # c0 - c7 + 0,0,0,0,0,0,0,0, # c8 - cf + 0,0,0,0,0,0,0,0, # d0 - d7 + 0,0,0,0,0,0,0,0, # d8 - df + 0,0,0,0,0,0,0,0, # e0 - e7 + 0,0,0,0,0,0,0,0, # e8 - ef + 0,0,0,0,0,0,0,0, # f0 - f7 + 0,0,0,0,0,0,4,5 # f8 - ff +) + +UCS2BE_st = ( + 5, 7, 7,eError, 4, 3,eError,eError,#00-07 + eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f + eItsMe,eItsMe, 6, 6, 6, 6,eError,eError,#10-17 + 6, 6, 6, 6, 6,eItsMe, 6, 6,#18-1f + 6, 6, 6, 6, 5, 7, 7,eError,#20-27 + 5, 8, 6, 6,eError, 6, 6, 6,#28-2f + 6, 6, 6, 6,eError,eError,eStart,eStart #30-37 +) UCS2BECharLenTable = (2, 2, 2, 0, 2, 2) @@ -390,48 +444,50 @@ # UCS2-LE -UCS2LE_cls = ( \ - 0,0,0,0,0,0,0,0, # 00 - 07 - 0,0,1,0,0,2,0,0, # 08 - 0f - 0,0,0,0,0,0,0,0, # 10 - 17 - 0,0,0,3,0,0,0,0, # 18 - 1f - 0,0,0,0,0,0,0,0, # 20 - 27 - 0,3,3,3,3,3,0,0, # 28 - 2f - 0,0,0,0,0,0,0,0, # 30 - 37 - 0,0,0,0,0,0,0,0, # 38 - 3f - 0,0,0,0,0,0,0,0, # 40 - 47 - 0,0,0,0,0,0,0,0, # 48 - 4f - 0,0,0,0,0,0,0,0, # 50 - 57 - 0,0,0,0,0,0,0,0, # 58 - 5f - 0,0,0,0,0,0,0,0, # 60 - 67 - 0,0,0,0,0,0,0,0, # 68 - 6f - 0,0,0,0,0,0,0,0, # 70 - 77 - 0,0,0,0,0,0,0,0, # 78 - 7f - 0,0,0,0,0,0,0,0, # 80 - 87 - 0,0,0,0,0,0,0,0, # 88 - 8f - 0,0,0,0,0,0,0,0, # 90 - 97 - 0,0,0,0,0,0,0,0, # 98 - 9f - 0,0,0,0,0,0,0,0, # a0 - a7 - 0,0,0,0,0,0,0,0, # a8 - af - 0,0,0,0,0,0,0,0, # b0 - b7 - 0,0,0,0,0,0,0,0, # b8 - bf - 0,0,0,0,0,0,0,0, # c0 - c7 - 0,0,0,0,0,0,0,0, # c8 - cf - 0,0,0,0,0,0,0,0, # d0 - d7 - 0,0,0,0,0,0,0,0, # d8 - df - 0,0,0,0,0,0,0,0, # e0 - e7 - 0,0,0,0,0,0,0,0, # e8 - ef - 0,0,0,0,0,0,0,0, # f0 - f7 - 0,0,0,0,0,0,4,5) # f8 - ff - -UCS2LE_st = ( \ - 6, 6, 7, 6, 4, 3,eError,eError,#00-07 - eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f - eItsMe,eItsMe, 5, 5, 5,eError,eItsMe,eError,#10-17 - 5, 5, 5,eError, 5,eError, 6, 6,#18-1f - 7, 6, 8, 8, 5, 5, 5,eError,#20-27 - 5, 5, 5,eError,eError,eError, 5, 5,#28-2f - 5, 5, 5,eError, 5,eError,eStart,eStart)#30-37 +UCS2LE_cls = ( + 0,0,0,0,0,0,0,0, # 00 - 07 + 0,0,1,0,0,2,0,0, # 08 - 0f + 0,0,0,0,0,0,0,0, # 10 - 17 + 0,0,0,3,0,0,0,0, # 18 - 1f + 0,0,0,0,0,0,0,0, # 20 - 27 + 0,3,3,3,3,3,0,0, # 28 - 2f + 0,0,0,0,0,0,0,0, # 30 - 37 + 0,0,0,0,0,0,0,0, # 38 - 3f + 0,0,0,0,0,0,0,0, # 40 - 47 + 0,0,0,0,0,0,0,0, # 48 - 4f + 0,0,0,0,0,0,0,0, # 50 - 57 + 0,0,0,0,0,0,0,0, # 58 - 5f + 0,0,0,0,0,0,0,0, # 60 - 67 + 0,0,0,0,0,0,0,0, # 68 - 6f + 0,0,0,0,0,0,0,0, # 70 - 77 + 0,0,0,0,0,0,0,0, # 78 - 7f + 0,0,0,0,0,0,0,0, # 80 - 87 + 0,0,0,0,0,0,0,0, # 88 - 8f + 0,0,0,0,0,0,0,0, # 90 - 97 + 0,0,0,0,0,0,0,0, # 98 - 9f + 0,0,0,0,0,0,0,0, # a0 - a7 + 0,0,0,0,0,0,0,0, # a8 - af + 0,0,0,0,0,0,0,0, # b0 - b7 + 0,0,0,0,0,0,0,0, # b8 - bf + 0,0,0,0,0,0,0,0, # c0 - c7 + 0,0,0,0,0,0,0,0, # c8 - cf + 0,0,0,0,0,0,0,0, # d0 - d7 + 0,0,0,0,0,0,0,0, # d8 - df + 0,0,0,0,0,0,0,0, # e0 - e7 + 0,0,0,0,0,0,0,0, # e8 - ef + 0,0,0,0,0,0,0,0, # f0 - f7 + 0,0,0,0,0,0,4,5 # f8 - ff +) + +UCS2LE_st = ( + 6, 6, 7, 6, 4, 3,eError,eError,#00-07 + eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f + eItsMe,eItsMe, 5, 5, 5,eError,eItsMe,eError,#10-17 + 5, 5, 5,eError, 5,eError, 6, 6,#18-1f + 7, 6, 8, 8, 5, 5, 5,eError,#20-27 + 5, 5, 5,eError,eError,eError, 5, 5,#28-2f + 5, 5, 5,eError, 5,eError,eStart,eStart #30-37 +) UCS2LECharLenTable = (2, 2, 2, 2, 2, 2) @@ -443,67 +499,69 @@ # UTF-8 -UTF8_cls = ( \ +UTF8_cls = ( 1,1,1,1,1,1,1,1, # 00 - 07 #allow 0x00 as a legal value - 1,1,1,1,1,1,0,0, # 08 - 0f - 1,1,1,1,1,1,1,1, # 10 - 17 - 1,1,1,0,1,1,1,1, # 18 - 1f - 1,1,1,1,1,1,1,1, # 20 - 27 - 1,1,1,1,1,1,1,1, # 28 - 2f - 1,1,1,1,1,1,1,1, # 30 - 37 - 1,1,1,1,1,1,1,1, # 38 - 3f - 1,1,1,1,1,1,1,1, # 40 - 47 - 1,1,1,1,1,1,1,1, # 48 - 4f - 1,1,1,1,1,1,1,1, # 50 - 57 - 1,1,1,1,1,1,1,1, # 58 - 5f - 1,1,1,1,1,1,1,1, # 60 - 67 - 1,1,1,1,1,1,1,1, # 68 - 6f - 1,1,1,1,1,1,1,1, # 70 - 77 - 1,1,1,1,1,1,1,1, # 78 - 7f - 2,2,2,2,3,3,3,3, # 80 - 87 - 4,4,4,4,4,4,4,4, # 88 - 8f - 4,4,4,4,4,4,4,4, # 90 - 97 - 4,4,4,4,4,4,4,4, # 98 - 9f - 5,5,5,5,5,5,5,5, # a0 - a7 - 5,5,5,5,5,5,5,5, # a8 - af - 5,5,5,5,5,5,5,5, # b0 - b7 - 5,5,5,5,5,5,5,5, # b8 - bf - 0,0,6,6,6,6,6,6, # c0 - c7 - 6,6,6,6,6,6,6,6, # c8 - cf - 6,6,6,6,6,6,6,6, # d0 - d7 - 6,6,6,6,6,6,6,6, # d8 - df - 7,8,8,8,8,8,8,8, # e0 - e7 - 8,8,8,8,8,9,8,8, # e8 - ef - 10,11,11,11,11,11,11,11, # f0 - f7 - 12,13,13,13,14,15,0,0) # f8 - ff - -UTF8_st = ( \ - eError,eStart,eError,eError,eError,eError, 12, 10,#00-07 - 9, 11, 8, 7, 6, 5, 4, 3,#08-0f - eError,eError,eError,eError,eError,eError,eError,eError,#10-17 - eError,eError,eError,eError,eError,eError,eError,eError,#18-1f - eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,#20-27 - eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,#28-2f - eError,eError, 5, 5, 5, 5,eError,eError,#30-37 - eError,eError,eError,eError,eError,eError,eError,eError,#38-3f - eError,eError,eError, 5, 5, 5,eError,eError,#40-47 - eError,eError,eError,eError,eError,eError,eError,eError,#48-4f - eError,eError, 7, 7, 7, 7,eError,eError,#50-57 - eError,eError,eError,eError,eError,eError,eError,eError,#58-5f - eError,eError,eError,eError, 7, 7,eError,eError,#60-67 - eError,eError,eError,eError,eError,eError,eError,eError,#68-6f - eError,eError, 9, 9, 9, 9,eError,eError,#70-77 - eError,eError,eError,eError,eError,eError,eError,eError,#78-7f - eError,eError,eError,eError,eError, 9,eError,eError,#80-87 - eError,eError,eError,eError,eError,eError,eError,eError,#88-8f - eError,eError, 12, 12, 12, 12,eError,eError,#90-97 - eError,eError,eError,eError,eError,eError,eError,eError,#98-9f - eError,eError,eError,eError,eError, 12,eError,eError,#a0-a7 - eError,eError,eError,eError,eError,eError,eError,eError,#a8-af - eError,eError, 12, 12, 12,eError,eError,eError,#b0-b7 - eError,eError,eError,eError,eError,eError,eError,eError,#b8-bf - eError,eError,eStart,eStart,eStart,eStart,eError,eError,#c0-c7 - eError,eError,eError,eError,eError,eError,eError,eError)#c8-cf + 1,1,1,1,1,1,0,0, # 08 - 0f + 1,1,1,1,1,1,1,1, # 10 - 17 + 1,1,1,0,1,1,1,1, # 18 - 1f + 1,1,1,1,1,1,1,1, # 20 - 27 + 1,1,1,1,1,1,1,1, # 28 - 2f + 1,1,1,1,1,1,1,1, # 30 - 37 + 1,1,1,1,1,1,1,1, # 38 - 3f + 1,1,1,1,1,1,1,1, # 40 - 47 + 1,1,1,1,1,1,1,1, # 48 - 4f + 1,1,1,1,1,1,1,1, # 50 - 57 + 1,1,1,1,1,1,1,1, # 58 - 5f + 1,1,1,1,1,1,1,1, # 60 - 67 + 1,1,1,1,1,1,1,1, # 68 - 6f + 1,1,1,1,1,1,1,1, # 70 - 77 + 1,1,1,1,1,1,1,1, # 78 - 7f + 2,2,2,2,3,3,3,3, # 80 - 87 + 4,4,4,4,4,4,4,4, # 88 - 8f + 4,4,4,4,4,4,4,4, # 90 - 97 + 4,4,4,4,4,4,4,4, # 98 - 9f + 5,5,5,5,5,5,5,5, # a0 - a7 + 5,5,5,5,5,5,5,5, # a8 - af + 5,5,5,5,5,5,5,5, # b0 - b7 + 5,5,5,5,5,5,5,5, # b8 - bf + 0,0,6,6,6,6,6,6, # c0 - c7 + 6,6,6,6,6,6,6,6, # c8 - cf + 6,6,6,6,6,6,6,6, # d0 - d7 + 6,6,6,6,6,6,6,6, # d8 - df + 7,8,8,8,8,8,8,8, # e0 - e7 + 8,8,8,8,8,9,8,8, # e8 - ef + 10,11,11,11,11,11,11,11, # f0 - f7 + 12,13,13,13,14,15,0,0 # f8 - ff +) + +UTF8_st = ( + eError,eStart,eError,eError,eError,eError, 12, 10,#00-07 + 9, 11, 8, 7, 6, 5, 4, 3,#08-0f + eError,eError,eError,eError,eError,eError,eError,eError,#10-17 + eError,eError,eError,eError,eError,eError,eError,eError,#18-1f + eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,#20-27 + eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,#28-2f + eError,eError, 5, 5, 5, 5,eError,eError,#30-37 + eError,eError,eError,eError,eError,eError,eError,eError,#38-3f + eError,eError,eError, 5, 5, 5,eError,eError,#40-47 + eError,eError,eError,eError,eError,eError,eError,eError,#48-4f + eError,eError, 7, 7, 7, 7,eError,eError,#50-57 + eError,eError,eError,eError,eError,eError,eError,eError,#58-5f + eError,eError,eError,eError, 7, 7,eError,eError,#60-67 + eError,eError,eError,eError,eError,eError,eError,eError,#68-6f + eError,eError, 9, 9, 9, 9,eError,eError,#70-77 + eError,eError,eError,eError,eError,eError,eError,eError,#78-7f + eError,eError,eError,eError,eError, 9,eError,eError,#80-87 + eError,eError,eError,eError,eError,eError,eError,eError,#88-8f + eError,eError, 12, 12, 12, 12,eError,eError,#90-97 + eError,eError,eError,eError,eError,eError,eError,eError,#98-9f + eError,eError,eError,eError,eError, 12,eError,eError,#a0-a7 + eError,eError,eError,eError,eError,eError,eError,eError,#a8-af + eError,eError, 12, 12, 12,eError,eError,eError,#b0-b7 + eError,eError,eError,eError,eError,eError,eError,eError,#b8-bf + eError,eError,eStart,eStart,eStart,eStart,eError,eError,#c0-c7 + eError,eError,eError,eError,eError,eError,eError,eError #c8-cf +) UTF8CharLenTable = (0, 1, 0, 0, 0, 0, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6) diff --git a/libs/chardet/sbcharsetprober.py b/libs/chardet/sbcharsetprober.py index da07116321..37291bd27a 100755 --- a/libs/chardet/sbcharsetprober.py +++ b/libs/chardet/sbcharsetprober.py @@ -14,20 +14,22 @@ # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. -# +# # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. -# +# # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### -import constants, sys -from charsetprober import CharSetProber +import sys +from . import constants +from .charsetprober import CharSetProber +from .compat import wrap_ord SAMPLE_SIZE = 64 SB_ENOUGH_REL_THRESHOLD = 1024 @@ -37,22 +39,27 @@ NUMBER_OF_SEQ_CAT = 4 POSITIVE_CAT = NUMBER_OF_SEQ_CAT - 1 #NEGATIVE_CAT = 0 - + + class SingleByteCharSetProber(CharSetProber): - def __init__(self, model, reversed=constants.False, nameProber=None): + def __init__(self, model, reversed=False, nameProber=None): CharSetProber.__init__(self) self._mModel = model - self._mReversed = reversed # TRUE if we need to reverse every pair in the model lookup - self._mNameProber = nameProber # Optional auxiliary prober for name decision + # TRUE if we need to reverse every pair in the model lookup + self._mReversed = reversed + # Optional auxiliary prober for name decision + self._mNameProber = nameProber self.reset() def reset(self): CharSetProber.reset(self) - self._mLastOrder = 255 # char order of last character + # char order of last character + self._mLastOrder = 255 self._mSeqCounters = [0] * NUMBER_OF_SEQ_CAT self._mTotalSeqs = 0 self._mTotalChar = 0 - self._mFreqChar = 0 # characters that fall in our sampling range + # characters that fall in our sampling range + self._mFreqChar = 0 def get_charset_name(self): if self._mNameProber: @@ -67,7 +74,7 @@ def feed(self, aBuf): if not aLen: return self.get_state() for c in aBuf: - order = self._mModel['charToOrderMap'][ord(c)] + order = self._mModel['charToOrderMap'][wrap_ord(c)] if order < SYMBOL_CAT_ORDER: self._mTotalChar += 1 if order < SAMPLE_SIZE: @@ -75,9 +82,12 @@ def feed(self, aBuf): if self._mLastOrder < SAMPLE_SIZE: self._mTotalSeqs += 1 if not self._mReversed: - self._mSeqCounters[self._mModel['precedenceMatrix'][(self._mLastOrder * SAMPLE_SIZE) + order]] += 1 - else: # reverse the order of the letters in the lookup - self._mSeqCounters[self._mModel['precedenceMatrix'][(order * SAMPLE_SIZE) + self._mLastOrder]] += 1 + i = (self._mLastOrder * SAMPLE_SIZE) + order + model = self._mModel['precedenceMatrix'][i] + else: # reverse the order of the letters in the lookup + i = (order * SAMPLE_SIZE) + self._mLastOrder + model = self._mModel['precedenceMatrix'][i] + self._mSeqCounters[model] += 1 self._mLastOrder = order if self.get_state() == constants.eDetecting: @@ -85,11 +95,16 @@ def feed(self, aBuf): cf = self.get_confidence() if cf > POSITIVE_SHORTCUT_THRESHOLD: if constants._debug: - sys.stderr.write('%s confidence = %s, we have a winner\n' % (self._mModel['charsetName'], cf)) + sys.stderr.write('%s confidence = %s, we have a' + 'winner\n' % + (self._mModel['charsetName'], cf)) self._mState = constants.eFoundIt elif cf < NEGATIVE_SHORTCUT_THRESHOLD: if constants._debug: - sys.stderr.write('%s confidence = %s, below negative shortcut threshhold %s\n' % (self._mModel['charsetName'], cf, NEGATIVE_SHORTCUT_THRESHOLD)) + sys.stderr.write('%s confidence = %s, below negative' + 'shortcut threshhold %s\n' % + (self._mModel['charsetName'], cf, + NEGATIVE_SHORTCUT_THRESHOLD)) self._mState = constants.eNotMe return self.get_state() @@ -97,9 +112,8 @@ def feed(self, aBuf): def get_confidence(self): r = 0.01 if self._mTotalSeqs > 0: -# print self._mSeqCounters[POSITIVE_CAT], self._mTotalSeqs, self._mModel['mTypicalPositiveRatio'] - r = (1.0 * self._mSeqCounters[POSITIVE_CAT]) / self._mTotalSeqs / self._mModel['mTypicalPositiveRatio'] -# print r, self._mFreqChar, self._mTotalChar + r = ((1.0 * self._mSeqCounters[POSITIVE_CAT]) / self._mTotalSeqs + / self._mModel['mTypicalPositiveRatio']) r = r * self._mFreqChar / self._mTotalChar if r >= 1.0: r = 0.99 diff --git a/libs/chardet/sbcsgroupprober.py b/libs/chardet/sbcsgroupprober.py index d19160c86c..1b6196cd16 100755 --- a/libs/chardet/sbcsgroupprober.py +++ b/libs/chardet/sbcsgroupprober.py @@ -14,33 +14,35 @@ # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. -# +# # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. -# +# # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### -import constants, sys -from charsetgroupprober import CharSetGroupProber -from sbcharsetprober import SingleByteCharSetProber -from langcyrillicmodel import Win1251CyrillicModel, Koi8rModel, Latin5CyrillicModel, MacCyrillicModel, Ibm866Model, Ibm855Model -from langgreekmodel import Latin7GreekModel, Win1253GreekModel -from langbulgarianmodel import Latin5BulgarianModel, Win1251BulgarianModel -from langhungarianmodel import Latin2HungarianModel, Win1250HungarianModel -from langthaimodel import TIS620ThaiModel -from langhebrewmodel import Win1255HebrewModel -from hebrewprober import HebrewProber +from .charsetgroupprober import CharSetGroupProber +from .sbcharsetprober import SingleByteCharSetProber +from .langcyrillicmodel import (Win1251CyrillicModel, Koi8rModel, + Latin5CyrillicModel, MacCyrillicModel, + Ibm866Model, Ibm855Model) +from .langgreekmodel import Latin7GreekModel, Win1253GreekModel +from .langbulgarianmodel import Latin5BulgarianModel, Win1251BulgarianModel +from .langhungarianmodel import Latin2HungarianModel, Win1250HungarianModel +from .langthaimodel import TIS620ThaiModel +from .langhebrewmodel import Win1255HebrewModel +from .hebrewprober import HebrewProber + class SBCSGroupProber(CharSetGroupProber): def __init__(self): CharSetGroupProber.__init__(self) - self._mProbers = [ \ + self._mProbers = [ SingleByteCharSetProber(Win1251CyrillicModel), SingleByteCharSetProber(Koi8rModel), SingleByteCharSetProber(Latin5CyrillicModel), @@ -54,11 +56,14 @@ def __init__(self): SingleByteCharSetProber(Latin2HungarianModel), SingleByteCharSetProber(Win1250HungarianModel), SingleByteCharSetProber(TIS620ThaiModel), - ] + ] hebrewProber = HebrewProber() - logicalHebrewProber = SingleByteCharSetProber(Win1255HebrewModel, constants.False, hebrewProber) - visualHebrewProber = SingleByteCharSetProber(Win1255HebrewModel, constants.True, hebrewProber) + logicalHebrewProber = SingleByteCharSetProber(Win1255HebrewModel, + False, hebrewProber) + visualHebrewProber = SingleByteCharSetProber(Win1255HebrewModel, True, + hebrewProber) hebrewProber.set_model_probers(logicalHebrewProber, visualHebrewProber) - self._mProbers.extend([hebrewProber, logicalHebrewProber, visualHebrewProber]) + self._mProbers.extend([hebrewProber, logicalHebrewProber, + visualHebrewProber]) self.reset() diff --git a/libs/chardet/sjisprober.py b/libs/chardet/sjisprober.py index fea2690c1a..cd0e9e7078 100755 --- a/libs/chardet/sjisprober.py +++ b/libs/chardet/sjisprober.py @@ -13,25 +13,26 @@ # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. -# +# # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. -# +# # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### -from mbcharsetprober import MultiByteCharSetProber -from codingstatemachine import CodingStateMachine -from chardistribution import SJISDistributionAnalysis -from jpcntx import SJISContextAnalysis -from mbcssm import SJISSMModel -import constants, sys -from constants import eStart, eError, eItsMe +import sys +from .mbcharsetprober import MultiByteCharSetProber +from .codingstatemachine import CodingStateMachine +from .chardistribution import SJISDistributionAnalysis +from .jpcntx import SJISContextAnalysis +from .mbcssm import SJISSMModel +from . import constants + class SJISProber(MultiByteCharSetProber): def __init__(self): @@ -44,37 +45,42 @@ def __init__(self): def reset(self): MultiByteCharSetProber.reset(self) self._mContextAnalyzer.reset() - + def get_charset_name(self): - return "SHIFT_JIS" + return self._mContextAnalyzer.get_charset_name() def feed(self, aBuf): aLen = len(aBuf) for i in range(0, aLen): codingState = self._mCodingSM.next_state(aBuf[i]) - if codingState == eError: + if codingState == constants.eError: if constants._debug: - sys.stderr.write(self.get_charset_name() + ' prober hit error at byte ' + str(i) + '\n') + sys.stderr.write(self.get_charset_name() + + ' prober hit error at byte ' + str(i) + + '\n') self._mState = constants.eNotMe break - elif codingState == eItsMe: + elif codingState == constants.eItsMe: self._mState = constants.eFoundIt break - elif codingState == eStart: + elif codingState == constants.eStart: charLen = self._mCodingSM.get_current_charlen() if i == 0: self._mLastChar[1] = aBuf[0] - self._mContextAnalyzer.feed(self._mLastChar[2 - charLen :], charLen) + self._mContextAnalyzer.feed(self._mLastChar[2 - charLen:], + charLen) self._mDistributionAnalyzer.feed(self._mLastChar, charLen) else: - self._mContextAnalyzer.feed(aBuf[i + 1 - charLen : i + 3 - charLen], charLen) - self._mDistributionAnalyzer.feed(aBuf[i - 1 : i + 1], charLen) - + self._mContextAnalyzer.feed(aBuf[i + 1 - charLen:i + 3 + - charLen], charLen) + self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1], + charLen) + self._mLastChar[0] = aBuf[aLen - 1] - + if self.get_state() == constants.eDetecting: - if self._mContextAnalyzer.got_enough_data() and \ - (self.get_confidence() > constants.SHORTCUT_THRESHOLD): + if (self._mContextAnalyzer.got_enough_data() and + (self.get_confidence() > constants.SHORTCUT_THRESHOLD)): self._mState = constants.eFoundIt return self.get_state() diff --git a/libs/chardet/universaldetector.py b/libs/chardet/universaldetector.py index 809df2276f..476522b999 100755 --- a/libs/chardet/universaldetector.py +++ b/libs/chardet/universaldetector.py @@ -14,23 +14,25 @@ # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. -# +# # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. -# +# # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### -import constants, sys -from latin1prober import Latin1Prober # windows-1252 -from mbcsgroupprober import MBCSGroupProber # multi-byte character sets -from sbcsgroupprober import SBCSGroupProber # single-byte character sets -from escprober import EscCharSetProber # ISO-2122, etc. +from . import constants +import sys +import codecs +from .latin1prober import Latin1Prober # windows-1252 +from .mbcsgroupprober import MBCSGroupProber # multi-byte character sets +from .sbcsgroupprober import SBCSGroupProber # single-byte character sets +from .escprober import EscCharSetProber # ISO-2122, etc. import re MINIMUM_THRESHOLD = 0.20 @@ -38,68 +40,78 @@ eEscAscii = 1 eHighbyte = 2 + class UniversalDetector: def __init__(self): - self._highBitDetector = re.compile(r'[\x80-\xFF]') - self._escDetector = re.compile(r'(\033|~{)') + self._highBitDetector = re.compile(b'[\x80-\xFF]') + self._escDetector = re.compile(b'(\033|~{)') self._mEscCharSetProber = None self._mCharSetProbers = [] self.reset() def reset(self): self.result = {'encoding': None, 'confidence': 0.0} - self.done = constants.False - self._mStart = constants.True - self._mGotData = constants.False + self.done = False + self._mStart = True + self._mGotData = False self._mInputState = ePureAscii - self._mLastChar = '' + self._mLastChar = b'' if self._mEscCharSetProber: self._mEscCharSetProber.reset() for prober in self._mCharSetProbers: prober.reset() def feed(self, aBuf): - if self.done: return + if self.done: + return aLen = len(aBuf) - if not aLen: return - + if not aLen: + return + if not self._mGotData: # If the data starts with BOM, we know it is UTF - if aBuf[:3] == '\xEF\xBB\xBF': + if aBuf[:3] == codecs.BOM_UTF8: # EF BB BF UTF-8 with BOM - self.result = {'encoding': "UTF-8", 'confidence': 1.0} - elif aBuf[:4] == '\xFF\xFE\x00\x00': + self.result = {'encoding': "UTF-8-SIG", 'confidence': 1.0} + elif aBuf[:4] == codecs.BOM_UTF32_LE: # FF FE 00 00 UTF-32, little-endian BOM self.result = {'encoding': "UTF-32LE", 'confidence': 1.0} - elif aBuf[:4] == '\x00\x00\xFE\xFF': + elif aBuf[:4] == codecs.BOM_UTF32_BE: # 00 00 FE FF UTF-32, big-endian BOM self.result = {'encoding': "UTF-32BE", 'confidence': 1.0} - elif aBuf[:4] == '\xFE\xFF\x00\x00': + elif aBuf[:4] == b'\xFE\xFF\x00\x00': # FE FF 00 00 UCS-4, unusual octet order BOM (3412) - self.result = {'encoding': "X-ISO-10646-UCS-4-3412", 'confidence': 1.0} - elif aBuf[:4] == '\x00\x00\xFF\xFE': + self.result = { + 'encoding': "X-ISO-10646-UCS-4-3412", + 'confidence': 1.0 + } + elif aBuf[:4] == b'\x00\x00\xFF\xFE': # 00 00 FF FE UCS-4, unusual octet order BOM (2143) - self.result = {'encoding': "X-ISO-10646-UCS-4-2143", 'confidence': 1.0} - elif aBuf[:2] == '\xFF\xFE': + self.result = { + 'encoding': "X-ISO-10646-UCS-4-2143", + 'confidence': 1.0 + } + elif aBuf[:2] == codecs.BOM_LE: # FF FE UTF-16, little endian BOM self.result = {'encoding': "UTF-16LE", 'confidence': 1.0} - elif aBuf[:2] == '\xFE\xFF': + elif aBuf[:2] == codecs.BOM_BE: # FE FF UTF-16, big endian BOM self.result = {'encoding': "UTF-16BE", 'confidence': 1.0} - self._mGotData = constants.True + self._mGotData = True if self.result['encoding'] and (self.result['confidence'] > 0.0): - self.done = constants.True + self.done = True return if self._mInputState == ePureAscii: if self._highBitDetector.search(aBuf): self._mInputState = eHighbyte - elif (self._mInputState == ePureAscii) and self._escDetector.search(self._mLastChar + aBuf): + elif ((self._mInputState == ePureAscii) and + self._escDetector.search(self._mLastChar + aBuf)): self._mInputState = eEscAscii - self._mLastChar = aBuf[-1] + self._mLastChar = aBuf[-1:] if self._mInputState == eEscAscii: if not self._mEscCharSetProber: @@ -107,25 +119,27 @@ def feed(self, aBuf): if self._mEscCharSetProber.feed(aBuf) == constants.eFoundIt: self.result = {'encoding': self._mEscCharSetProber.get_charset_name(), 'confidence': self._mEscCharSetProber.get_confidence()} - self.done = constants.True + self.done = True elif self._mInputState == eHighbyte: if not self._mCharSetProbers: - self._mCharSetProbers = [MBCSGroupProber(), SBCSGroupProber(), Latin1Prober()] + self._mCharSetProbers = [MBCSGroupProber(), SBCSGroupProber(), + Latin1Prober()] for prober in self._mCharSetProbers: if prober.feed(aBuf) == constants.eFoundIt: self.result = {'encoding': prober.get_charset_name(), 'confidence': prober.get_confidence()} - self.done = constants.True + self.done = True break def close(self): - if self.done: return + if self.done: + return if not self._mGotData: if constants._debug: sys.stderr.write('no data received!\n') return - self.done = constants.True - + self.done = True + if self._mInputState == ePureAscii: self.result = {'encoding': 'ascii', 'confidence': 1.0} return self.result @@ -135,7 +149,8 @@ def close(self): maxProberConfidence = 0.0 maxProber = None for prober in self._mCharSetProbers: - if not prober: continue + if not prober: + continue proberConfidence = prober.get_confidence() if proberConfidence > maxProberConfidence: maxProberConfidence = proberConfidence @@ -148,7 +163,8 @@ def close(self): if constants._debug: sys.stderr.write('no probers hit minimum threshhold\n') for prober in self._mCharSetProbers[0].mProbers: - if not prober: continue - sys.stderr.write('%s confidence = %s\n' % \ - (prober.get_charset_name(), \ + if not prober: + continue + sys.stderr.write('%s confidence = %s\n' % + (prober.get_charset_name(), prober.get_confidence())) diff --git a/libs/chardet/utf8prober.py b/libs/chardet/utf8prober.py index c1792bb377..1c0bb5d8fd 100755 --- a/libs/chardet/utf8prober.py +++ b/libs/chardet/utf8prober.py @@ -13,26 +13,26 @@ # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. -# +# # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. -# +# # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### -import constants, sys -from constants import eStart, eError, eItsMe -from charsetprober import CharSetProber -from codingstatemachine import CodingStateMachine -from mbcssm import UTF8SMModel +from . import constants +from .charsetprober import CharSetProber +from .codingstatemachine import CodingStateMachine +from .mbcssm import UTF8SMModel ONE_CHAR_PROB = 0.5 + class UTF8Prober(CharSetProber): def __init__(self): CharSetProber.__init__(self) @@ -50,13 +50,13 @@ def get_charset_name(self): def feed(self, aBuf): for c in aBuf: codingState = self._mCodingSM.next_state(c) - if codingState == eError: + if codingState == constants.eError: self._mState = constants.eNotMe break - elif codingState == eItsMe: + elif codingState == constants.eItsMe: self._mState = constants.eFoundIt break - elif codingState == eStart: + elif codingState == constants.eStart: if self._mCodingSM.get_current_charlen() >= 2: self._mNumOfMBChar += 1 diff --git a/libs/decorator.py b/libs/decorator.py deleted file mode 100644 index ea7e990952..0000000000 --- a/libs/decorator.py +++ /dev/null @@ -1,210 +0,0 @@ -########################## LICENCE ############################### -## -## Copyright (c) 2005-2011, Michele Simionato -## All rights reserved. -## -## Redistributions of source code must retain the above copyright -## notice, this list of conditions and the following disclaimer. -## Redistributions in bytecode form must reproduce the above copyright -## notice, this list of conditions and the following disclaimer in -## the documentation and/or other materials provided with the -## distribution. - -## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -## HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -## INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -## BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS -## OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -## ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR -## TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE -## USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH -## DAMAGE. - -""" -Decorator module, see http://pypi.python.org/pypi/decorator -for the documentation. -""" - -__version__ = '3.3.2' - -__all__ = ["decorator", "FunctionMaker", "partial"] - -import sys, re, inspect - -try: - from functools import partial -except ImportError: # for Python version < 2.5 - class partial(object): - "A simple replacement of functools.partial" - def __init__(self, func, *args, **kw): - self.func = func - self.args = args - self.keywords = kw - def __call__(self, *otherargs, **otherkw): - kw = self.keywords.copy() - kw.update(otherkw) - return self.func(*(self.args + otherargs), **kw) - -if sys.version >= '3': - from inspect import getfullargspec -else: - class getfullargspec(object): - "A quick and dirty replacement for getfullargspec for Python 2.X" - def __init__(self, f): - self.args, self.varargs, self.varkw, self.defaults = \ - inspect.getargspec(f) - self.kwonlyargs = [] - self.kwonlydefaults = None - self.annotations = getattr(f, '__annotations__', {}) - def __iter__(self): - yield self.args - yield self.varargs - yield self.varkw - yield self.defaults - -DEF = re.compile('\s*def\s*([_\w][_\w\d]*)\s*\(') - -# basic functionality -class FunctionMaker(object): - """ - An object with the ability to create functions with a given signature. - It has attributes name, doc, module, signature, defaults, dict and - methods update and make. - """ - def __init__(self, func=None, name=None, signature=None, - defaults=None, doc=None, module=None, funcdict=None): - self.shortsignature = signature - if func: - # func can be a class or a callable, but not an instance method - self.name = func.__name__ - if self.name == '': # small hack for lambda functions - self.name = '_lambda_' - self.doc = func.__doc__ - self.module = func.__module__ - if inspect.isfunction(func): - argspec = getfullargspec(func) - for a in ('args', 'varargs', 'varkw', 'defaults', 'kwonlyargs', - 'kwonlydefaults', 'annotations'): - setattr(self, a, getattr(argspec, a)) - for i, arg in enumerate(self.args): - setattr(self, 'arg%d' % i, arg) - self.signature = inspect.formatargspec( - formatvalue=lambda val: "", *argspec)[1:-1] - allargs = list(self.args) - if self.varargs: - allargs.append('*' + self.varargs) - if self.varkw: - allargs.append('**' + self.varkw) - try: - self.shortsignature = ', '.join(allargs) - except TypeError: # exotic signature, valid only in Python 2.X - self.shortsignature = self.signature - self.dict = func.__dict__.copy() - # func=None happens when decorating a caller - if name: - self.name = name - if signature is not None: - self.signature = signature - if defaults: - self.defaults = defaults - if doc: - self.doc = doc - if module: - self.module = module - if funcdict: - self.dict = funcdict - # check existence required attributes - assert hasattr(self, 'name') - if not hasattr(self, 'signature'): - raise TypeError('You are decorating a non function: %s' % func) - - def update(self, func, **kw): - "Update the signature of func with the data in self" - func.__name__ = self.name - func.__doc__ = getattr(self, 'doc', None) - func.__dict__ = getattr(self, 'dict', {}) - func.func_defaults = getattr(self, 'defaults', ()) - func.__kwdefaults__ = getattr(self, 'kwonlydefaults', None) - callermodule = sys._getframe(3).f_globals.get('__name__', '?') - func.__module__ = getattr(self, 'module', callermodule) - func.__dict__.update(kw) - - def make(self, src_templ, evaldict=None, addsource=False, **attrs): - "Make a new function from a given template and update the signature" - src = src_templ % vars(self) # expand name and signature - evaldict = evaldict or {} - mo = DEF.match(src) - if mo is None: - raise SyntaxError('not a valid function template\n%s' % src) - name = mo.group(1) # extract the function name - names = set([name] + [arg.strip(' *') for arg in - self.shortsignature.split(',')]) - for n in names: - if n in ('_func_', '_call_'): - raise NameError('%s is overridden in\n%s' % (n, src)) - if not src.endswith('\n'): # add a newline just for safety - src += '\n' # this is needed in old versions of Python - try: - code = compile(src, '', 'single') - # print >> sys.stderr, 'Compiling %s' % src - exec code in evaldict - except: - print >> sys.stderr, 'Error in generated code:' - print >> sys.stderr, src - raise - func = evaldict[name] - if addsource: - attrs['__source__'] = src - self.update(func, **attrs) - return func - - @classmethod - def create(cls, obj, body, evaldict, defaults=None, - doc=None, module=None, addsource=True,**attrs): - """ - Create a function from the strings name, signature and body. - evaldict is the evaluation dictionary. If addsource is true an attribute - __source__ is added to the result. The attributes attrs are added, - if any. - """ - if isinstance(obj, str): # "name(signature)" - name, rest = obj.strip().split('(', 1) - signature = rest[:-1] #strip a right parens - func = None - else: # a function - name = None - signature = None - func = obj - self = cls(func, name, signature, defaults, doc, module) - ibody = '\n'.join(' ' + line for line in body.splitlines()) - return self.make('def %(name)s(%(signature)s):\n' + ibody, - evaldict, addsource, **attrs) - -def decorator(caller, func=None): - """ - decorator(caller) converts a caller function into a decorator; - decorator(caller, func) decorates a function using a caller. - """ - if func is not None: # returns a decorated function - evaldict = func.func_globals.copy() - evaldict['_call_'] = caller - evaldict['_func_'] = func - return FunctionMaker.create( - func, "return _call_(_func_, %(shortsignature)s)", - evaldict, undecorated=func, __wrapped__=func) - else: # returns a decorator - if isinstance(caller, partial): - return partial(decorator, caller) - # otherwise assume caller is a function - first = inspect.getargspec(caller)[0][0] # first arg - evaldict = caller.func_globals.copy() - evaldict['_call_'] = caller - evaldict['decorator'] = decorator - return FunctionMaker.create( - '%s(%s)' % (caller.__name__, first), - 'return decorator(_call_, %s)' % first, - evaldict, undecorated=caller, __wrapped__=caller, - doc=caller.__doc__, module=caller.__module__) diff --git a/libs/elixir/__init__.py b/libs/elixir/__init__.py deleted file mode 100644 index a242b538a1..0000000000 --- a/libs/elixir/__init__.py +++ /dev/null @@ -1,114 +0,0 @@ -''' -Elixir package - -A declarative layer on top of the `SQLAlchemy library -`_. It is a fairly thin wrapper, which provides -the ability to create simple Python classes that map directly to relational -database tables (this pattern is often referred to as the Active Record design -pattern), providing many of the benefits of traditional databases -without losing the convenience of Python objects. - -Elixir is intended to replace the ActiveMapper SQLAlchemy extension, and the -TurboEntity project but does not intend to replace SQLAlchemy's core features, -and instead focuses on providing a simpler syntax for defining model objects -when you do not need the full expressiveness of SQLAlchemy's manual mapper -definitions. -''' - -try: - set -except NameError: - from sets import Set as set - -import sqlalchemy -from sqlalchemy.types import * - -from elixir.options import using_options, using_table_options, \ - using_mapper_options, options_defaults, \ - using_options_defaults -from elixir.entity import Entity, EntityBase, EntityMeta, EntityDescriptor, \ - setup_entities, cleanup_entities -from elixir.fields import has_field, Field -from elixir.relationships import belongs_to, has_one, has_many, \ - has_and_belongs_to_many, \ - ManyToOne, OneToOne, OneToMany, ManyToMany -from elixir.properties import has_property, GenericProperty, ColumnProperty, \ - Synonym -from elixir.statements import Statement -from elixir.collection import EntityCollection, GlobalEntityCollection - - -__version__ = '0.8.0dev' - -__all__ = ['Entity', 'EntityBase', 'EntityMeta', 'EntityCollection', - 'entities', - 'Field', 'has_field', - 'has_property', 'GenericProperty', 'ColumnProperty', 'Synonym', - 'belongs_to', 'has_one', 'has_many', 'has_and_belongs_to_many', - 'ManyToOne', 'OneToOne', 'OneToMany', 'ManyToMany', - 'using_options', 'using_table_options', 'using_mapper_options', - 'options_defaults', 'using_options_defaults', - 'metadata', 'session', - 'create_all', 'drop_all', - 'setup_all', 'cleanup_all', - 'setup_entities', 'cleanup_entities'] + \ - sqlalchemy.types.__all__ - -__doc_all__ = ['create_all', 'drop_all', - 'setup_all', 'cleanup_all', - 'metadata', 'session'] - -# default session -session = sqlalchemy.orm.scoped_session(sqlalchemy.orm.sessionmaker()) - -# default metadata -metadata = sqlalchemy.MetaData() - -metadatas = set() - -# default entity collection -entities = GlobalEntityCollection() - - -def create_all(*args, **kwargs): - '''Create the necessary tables for all declared entities''' - for md in metadatas: - md.create_all(*args, **kwargs) - - -def drop_all(*args, **kwargs): - '''Drop tables for all declared entities''' - for md in metadatas: - md.drop_all(*args, **kwargs) - - -def setup_all(create_tables=False, *args, **kwargs): - '''Setup the table and mapper of all entities in the default entity - collection. - ''' - setup_entities(entities) - - # issue the "CREATE" SQL statements - if create_tables: - create_all(*args, **kwargs) - - -def cleanup_all(drop_tables=False, *args, **kwargs): - '''Clear all mappers, clear the session, and clear all metadatas. - Optionally drops the tables. - ''' - session.close() - - cleanup_entities(entities) - - sqlalchemy.orm.clear_mappers() - entities.clear() - - if drop_tables: - drop_all(*args, **kwargs) - - for md in metadatas: - md.clear() - metadatas.clear() - - diff --git a/libs/elixir/collection.py b/libs/elixir/collection.py deleted file mode 100644 index 78127e3e13..0000000000 --- a/libs/elixir/collection.py +++ /dev/null @@ -1,125 +0,0 @@ -''' -Default entity collection implementation -''' -import sys -import re - -class BaseCollection(list): - def __init__(self, entities=None): - list.__init__(self) - if entities is not None: - self.extend(entities) - - def extend(self, entities): - for e in entities: - self.append(e) - - def clear(self): - del self[:] - - def resolve_absolute(self, key, full_path, entity=None, root=None): - if root is None: - root = entity._descriptor.resolve_root - if root: - full_path = '%s.%s' % (root, full_path) - module_path, classname = full_path.rsplit('.', 1) - module = sys.modules[module_path] - res = getattr(module, classname, None) - if res is None: - if entity is not None: - raise Exception("Couldn't resolve target '%s' <%s> in '%s'!" - % (key, full_path, entity.__name__)) - else: - raise Exception("Couldn't resolve target '%s' <%s>!" - % (key, full_path)) - return res - - def __getattr__(self, key): - return self.resolve(key) - -# default entity collection -class GlobalEntityCollection(BaseCollection): - def __init__(self, entities=None): - # _entities is a dict of entities keyed on their name. - self._entities = {} - super(GlobalEntityCollection, self).__init__(entities) - - def append(self, entity): - ''' - Add an entity to the collection. - ''' - super(EntityCollection, self).append(entity) - - existing_entities = self._entities.setdefault(entity.__name__, []) - existing_entities.append(entity) - - def resolve(self, key, entity=None): - ''' - Resolve a key to an Entity. The optional `entity` argument is the - "source" entity when resolving relationship targets. - ''' - # Do we have a fully qualified entity name? - if '.' in key: - return self.resolve_absolute(key, key, entity) - else: - # Otherwise we look in the entities of this collection - res = self._entities.get(key, None) - if res is None: - if entity: - raise Exception("Couldn't resolve target '%s' in '%s'" - % (key, entity.__name__)) - else: - raise Exception("This collection does not contain any " - "entity corresponding to the key '%s'!" - % key) - elif len(res) > 1: - raise Exception("'%s' resolves to several entities, you should" - " use the full path (including the full module" - " name) to that entity." % key) - else: - return res[0] - - def clear(self): - self._entities = {} - super(GlobalEntityCollection, self).clear() - -# backward compatible name -EntityCollection = GlobalEntityCollection - -_leading_dots = re.compile('^([.]*).*$') - -class RelativeEntityCollection(BaseCollection): - # the entity=None does not make any sense with a relative entity collection - def resolve(self, key, entity): - ''' - Resolve a key to an Entity. The optional `entity` argument is the - "source" entity when resolving relationship targets. - ''' - full_path = key - - if '.' not in key or key.startswith('.'): - # relative target - - # any leading dot is stripped and with each dot removed, - # the entity_module is stripped of one more chunk (starting with - # the last one). - num_dots = _leading_dots.match(full_path).end(1) - full_path = full_path[num_dots:] - chunks = entity.__module__.split('.') - chunkstokeep = len(chunks) - num_dots - if chunkstokeep < 0: - raise Exception("Couldn't resolve relative target " - "'%s' relative to '%s'" % (key, entity.__module__)) - entity_module = '.'.join(chunks[:chunkstokeep]) - - if entity_module and entity_module is not '__main__': - full_path = '%s.%s' % (entity_module, full_path) - - root = '' - else: - root = None - return self.resolve_absolute(key, full_path, entity, root=root) - - def __getattr__(self, key): - raise NotImplementedError - diff --git a/libs/elixir/entity.py b/libs/elixir/entity.py deleted file mode 100644 index 87f5154cdf..0000000000 --- a/libs/elixir/entity.py +++ /dev/null @@ -1,1039 +0,0 @@ -''' -This module provides the ``Entity`` base class, as well as its metaclass -``EntityMeta``. -''' - -import sys -import types -import warnings - -from copy import deepcopy - -import sqlalchemy -from sqlalchemy import Table, Column, Integer, desc, ForeignKey, and_, \ - ForeignKeyConstraint -from sqlalchemy.orm import MapperExtension, mapper, object_session, \ - EXT_CONTINUE, polymorphic_union, ScopedSession, \ - ColumnProperty -from sqlalchemy.sql import ColumnCollection - -import elixir -from elixir.statements import process_mutators, MUTATORS -from elixir import options -from elixir.properties import Property - -DEBUG = False - -__doc_all__ = ['Entity', 'EntityMeta'] - - -def session_mapper_factory(scoped_session): - def session_mapper(cls, *args, **kwargs): - if kwargs.pop('save_on_init', True): - old_init = cls.__init__ - def __init__(self, *args, **kwargs): - old_init(self, *args, **kwargs) - scoped_session.add(self) - cls.__init__ = __init__ - cls.query = scoped_session.query_property() - return mapper(cls, *args, **kwargs) - return session_mapper - - -class EntityDescriptor(object): - ''' - EntityDescriptor describes fields and options needed for table creation. - ''' - - def __init__(self, entity): - self.entity = entity - self.parent = None - - bases = [] - for base in entity.__bases__: - if isinstance(base, EntityMeta): - if is_entity(base) and not is_abstract_entity(base): - if self.parent: - raise Exception( - '%s entity inherits from several entities, ' - 'and this is not supported.' - % self.entity.__name__) - else: - self.parent = base - bases.extend(base._descriptor.bases) - self.parent._descriptor.children.append(entity) - else: - bases.append(base) - self.bases = bases - if not is_entity(entity) or is_abstract_entity(entity): - return - - # entity.__module__ is not always reliable (eg in mod_python) - self.module = sys.modules.get(entity.__module__) - - self.builders = [] - - #XXX: use entity.__subclasses__ ? - self.children = [] - - # used for multi-table inheritance - self.join_condition = None - self.has_pk = False - self._pk_col_done = False - - # columns and constraints waiting for a table to exist - self._columns = ColumnCollection() - self.constraints = [] - - # properties (it is only useful for checking dupe properties at the - # moment, and when adding properties before the mapper is created, - # which shouldn't happen). - self.properties = {} - - # - self.relationships = [] - - # set default value for options - self.table_args = [] - - # base class(es) options_defaults - options_defaults = self.options_defaults() - - complete_defaults = options.options_defaults.copy() - complete_defaults.update({ - 'metadata': elixir.metadata, - 'session': elixir.session, - 'collection': elixir.entities - }) - - # set default value for other options - for key in options.valid_options: - value = options_defaults.get(key, complete_defaults[key]) - if isinstance(value, dict): - value = value.copy() - setattr(self, key, value) - - # override options with module-level defaults defined - for key in ('metadata', 'session', 'collection'): - attr = '__%s__' % key - if hasattr(self.module, attr): - setattr(self, key, getattr(self.module, attr)) - - def options_defaults(self): - base_defaults = {} - for base in self.bases: - base_defaults.update(base._descriptor.options_defaults()) - base_defaults.update(getattr(self.entity, 'options_defaults', {})) - return base_defaults - - def setup_options(self): - ''' - Setup any values that might depend on the "using_options" class - mutator. For example, the tablename or the metadata. - ''' - elixir.metadatas.add(self.metadata) - if self.collection is not None: - self.collection.append(self.entity) - - entity = self.entity - if self.parent: - if self.inheritance == 'single': - self.tablename = self.parent._descriptor.tablename - - if not self.tablename: - if self.shortnames: - self.tablename = entity.__name__.lower() - else: - modulename = entity.__module__.replace('.', '_') - tablename = "%s_%s" % (modulename, entity.__name__) - self.tablename = tablename.lower() - elif hasattr(self.tablename, '__call__'): - self.tablename = self.tablename(entity) - - if not self.identity: - if 'polymorphic_identity' in self.mapper_options: - self.identity = self.mapper_options['polymorphic_identity'] - else: - #TODO: include module name (We could have b.Account inherit - # from a.Account) - self.identity = entity.__name__.lower() - elif 'polymorphic_identity' in self.mapper_options: - raise Exception('You cannot use the "identity" option and the ' - 'polymorphic_identity mapper option at the same ' - 'time.') - elif hasattr(self.identity, '__call__'): - self.identity = self.identity(entity) - - if self.polymorphic: - if not isinstance(self.polymorphic, basestring): - self.polymorphic = options.DEFAULT_POLYMORPHIC_COL_NAME - - #--------------------- - # setup phase methods - - def setup_autoload_table(self): - self.setup_table(True) - - def create_pk_cols(self): - """ - Create primary_key columns. That is, call the 'create_pk_cols' - builders then add a primary key to the table if it hasn't already got - one and needs one. - - This method is "semi-recursive" in some cases: it calls the - create_keys method on ManyToOne relationships and those in turn call - create_pk_cols on their target. It shouldn't be possible to have an - infinite loop since a loop of primary_keys is not a valid situation. - """ - if self._pk_col_done: - return - - self.call_builders('create_pk_cols') - - if not self.autoload: - if self.parent: - if self.inheritance == 'multi': - # Add columns with foreign keys to the parent's primary - # key columns - parent_desc = self.parent._descriptor - tablename = parent_desc.table_fullname - join_clauses = [] - fk_columns = [] - for pk_col in parent_desc.primary_keys: - colname = options.MULTIINHERITANCECOL_NAMEFORMAT % \ - {'entity': self.parent.__name__.lower(), - 'key': pk_col.key} - - # It seems like SA ForeignKey is not happy being given - # a real column object when said column is not yet - # attached to a table - pk_col_name = "%s.%s" % (tablename, pk_col.key) - col = Column(colname, pk_col.type, primary_key=True) - fk_columns.append(col) - self.add_column(col) - join_clauses.append(col == pk_col) - self.join_condition = and_(*join_clauses) - self.add_constraint( - ForeignKeyConstraint(fk_columns, - parent_desc.primary_keys, ondelete='CASCADE')) - elif self.inheritance == 'concrete': - # Copy primary key columns from the parent. - for col in self.parent._descriptor.columns: - if col.primary_key: - self.add_column(col.copy()) - elif not self.has_pk and self.auto_primarykey: - if isinstance(self.auto_primarykey, basestring): - colname = self.auto_primarykey - else: - colname = options.DEFAULT_AUTO_PRIMARYKEY_NAME - - self.add_column( - Column(colname, options.DEFAULT_AUTO_PRIMARYKEY_TYPE, - primary_key=True)) - self._pk_col_done = True - - def setup_relkeys(self): - self.call_builders('create_non_pk_cols') - - def before_table(self): - self.call_builders('before_table') - - def setup_table(self, only_autoloaded=False): - ''' - Create a SQLAlchemy table-object with all columns that have been - defined up to this point. - ''' - if self.entity.table is not None: - return - - if self.autoload != only_autoloaded: - return - - kwargs = self.table_options - if self.autoload: - args = self.table_args - kwargs['autoload'] = True - else: - if self.parent: - if self.inheritance == 'single': - # we know the parent is setup before the child - self.entity.table = self.parent.table - - # re-add the entity columns to the parent entity so that - # they are added to the parent's table (whether the - # parent's table is already setup or not). - for col in self._columns: - self.parent._descriptor.add_column(col) - for constraint in self.constraints: - self.parent._descriptor.add_constraint(constraint) - return - elif self.inheritance == 'concrete': - #TODO: we should also copy columns from the parent table - # if the parent is a base (abstract?) entity (whatever the - # inheritance type -> elif will need to be changed) - - # Copy all non-primary key columns from parent table - # (primary key columns have already been copied earlier). - for col in self.parent._descriptor.columns: - if not col.primary_key: - self.add_column(col.copy()) - - for con in self.parent._descriptor.constraints: - self.add_constraint( - ForeignKeyConstraint( - [e.parent.key for e in con.elements], - [e.target_fullname for e in con.elements], - name=con.name, #TODO: modify it - onupdate=con.onupdate, ondelete=con.ondelete, - use_alter=con.use_alter)) - - if self.polymorphic and \ - self.inheritance in ('single', 'multi') and \ - self.children and not self.parent: - self.add_column(Column(self.polymorphic, - options.POLYMORPHIC_COL_TYPE)) - - if self.version_id_col: - if not isinstance(self.version_id_col, basestring): - self.version_id_col = options.DEFAULT_VERSION_ID_COL_NAME - self.add_column(Column(self.version_id_col, Integer)) - - args = list(self.columns) + self.constraints + self.table_args - self.entity.table = Table(self.tablename, self.metadata, - *args, **kwargs) - if DEBUG: - print self.entity.table.repr2() - - def setup_reltables(self): - self.call_builders('create_tables') - - def after_table(self): - self.call_builders('after_table') - - def setup_events(self): - def make_proxy_method(methods): - def proxy_method(self, mapper, connection, instance): - for func in methods: - ret = func(instance) - # I couldn't commit myself to force people to - # systematicaly return EXT_CONTINUE in all their event - # methods. - # But not doing that diverge to how SQLAlchemy works. - # I should try to convince Mike to do EXT_CONTINUE by - # default, and stop processing as the special case. -# if ret != EXT_CONTINUE: - if ret is not None and ret != EXT_CONTINUE: - return ret - return EXT_CONTINUE - return proxy_method - - # create a list of callbacks for each event - methods = {} - - all_methods = getmembers(self.entity, - lambda a: isinstance(a, types.MethodType)) - - for name, method in all_methods: - for event in getattr(method, '_elixir_events', []): - event_methods = methods.setdefault(event, []) - event_methods.append(method) - - if not methods: - return - - # transform that list into methods themselves - for event in methods: - methods[event] = make_proxy_method(methods[event]) - - # create a custom mapper extension class, tailored to our entity - ext = type('EventMapperExtension', (MapperExtension,), methods)() - - # then, make sure that the entity's mapper has our mapper extension - self.add_mapper_extension(ext) - - def before_mapper(self): - self.call_builders('before_mapper') - - def _get_children(self): - children = self.children[:] - for child in self.children: - children.extend(child._descriptor._get_children()) - return children - - def translate_order_by(self, order_by): - if isinstance(order_by, basestring): - order_by = [order_by] - - order = [] - for colname in order_by: - #FIXME: get_column uses self.columns[key] instead of property - # names. self.columns correspond to the columns of the table if - # the table was already created and to self._columns otherwise, - # which is a ColumnCollection indexed on columns.key - # See ticket #108. - col = self.get_column(colname.strip('-')) - if colname.startswith('-'): - col = desc(col) - order.append(col) - return order - - def setup_mapper(self): - ''' - Initializes and assign a mapper to the entity. - At this point the mapper will usually have no property as they are - added later. - ''' - if self.entity.mapper: - return - - # for now we don't support the "abstract" parent class in a concrete - # inheritance scenario as demonstrated in - # sqlalchemy/test/orm/inheritance/concrete.py - # this should be added along other - kwargs = {} - if self.order_by: - kwargs['order_by'] = self.translate_order_by(self.order_by) - - if self.version_id_col: - kwargs['version_id_col'] = self.get_column(self.version_id_col) - - if self.inheritance in ('single', 'concrete', 'multi'): - if self.parent and \ - (self.inheritance != 'concrete' or self.polymorphic): - # non-polymorphic concrete doesn't need this - kwargs['inherits'] = self.parent.mapper - - if self.inheritance == 'multi' and self.parent: - kwargs['inherit_condition'] = self.join_condition - - if self.polymorphic: - if self.children: - if self.inheritance == 'concrete': - keys = [(self.identity, self.entity.table)] - keys.extend([(child._descriptor.identity, child.table) - for child in self._get_children()]) - # Having the same alias name for an entity and one of - # its child (which is a parent itself) shouldn't cause - # any problem because the join shouldn't be used at - # the same time. But in reality, some versions of SA - # do misbehave on this. Since it doesn't hurt to have - # different names anyway, here they go. - pjoin = polymorphic_union( - dict(keys), self.polymorphic, - 'pjoin_%s' % self.identity) - - kwargs['with_polymorphic'] = ('*', pjoin) - kwargs['polymorphic_on'] = \ - getattr(pjoin.c, self.polymorphic) - elif not self.parent: - kwargs['polymorphic_on'] = \ - self.get_column(self.polymorphic) - - if self.children or self.parent: - kwargs['polymorphic_identity'] = self.identity - - if self.parent and self.inheritance == 'concrete': - kwargs['concrete'] = True - - if self.parent and self.inheritance == 'single': - args = [] - else: - args = [self.entity.table] - - # let user-defined kwargs override Elixir-generated ones, though that's - # not very usefull since most of them expect Column instances. - kwargs.update(self.mapper_options) - - #TODO: document this! - if 'primary_key' in kwargs: - cols = self.entity.table.c - kwargs['primary_key'] = [getattr(cols, colname) for - colname in kwargs['primary_key']] - - # do the mapping - if self.session is None: - self.entity.mapper = mapper(self.entity, *args, **kwargs) - elif isinstance(self.session, ScopedSession): - session_mapper = session_mapper_factory(self.session) - self.entity.mapper = session_mapper(self.entity, *args, **kwargs) - else: - raise Exception("Failed to map entity '%s' with its table or " - "selectable. You can only bind an Entity to a " - "ScopedSession object or None for manual session " - "management." - % self.entity.__name__) - - def after_mapper(self): - self.call_builders('after_mapper') - - def setup_properties(self): - self.call_builders('create_properties') - - def finalize(self): - self.call_builders('finalize') - self.entity._setup_done = True - - #---------------- - # helper methods - - def call_builders(self, what): - for builder in self.builders: - if hasattr(builder, what): - getattr(builder, what)() - - def add_column(self, col, check_duplicate=None): - '''when check_duplicate is None, the value of the allowcoloverride - option of the entity is used. - ''' - if check_duplicate is None: - check_duplicate = not self.allowcoloverride - - if col.key in self._columns: - if check_duplicate: - raise Exception("Column '%s' already exist in '%s' ! " % - (col.key, self.entity.__name__)) - else: - del self._columns[col.key] - # are indexed on col.key - self._columns.add(col) - - if col.primary_key: - self.has_pk = True - - table = self.entity.table - if table is not None: - if check_duplicate and col.key in table.columns.keys(): - raise Exception("Column '%s' already exist in table '%s' ! " % - (col.key, table.name)) - table.append_column(col) - if DEBUG: - print "table.append_column(%s)" % col - - def add_constraint(self, constraint): - self.constraints.append(constraint) - - table = self.entity.table - if table is not None: - table.append_constraint(constraint) - - def add_property(self, name, property, check_duplicate=True): - if check_duplicate and name in self.properties: - raise Exception("property '%s' already exist in '%s' ! " % - (name, self.entity.__name__)) - self.properties[name] = property - -#FIXME: something like this is needed to propagate the relationships from -# parent entities to their children in a concrete inheritance scenario. But -# this doesn't work because of the backref matching code. In most case -# (test_concrete.py) it doesn't even happen at all. -# if self.children and self.inheritance == 'concrete': -# for child in self.children: -# child._descriptor.add_property(name, property) - - mapper = self.entity.mapper - if mapper: - mapper.add_property(name, property) - if DEBUG: - print "mapper.add_property('%s', %s)" % (name, repr(property)) - - def add_mapper_extension(self, extension): - extensions = self.mapper_options.get('extension', []) - if not isinstance(extensions, list): - extensions = [extensions] - extensions.append(extension) - self.mapper_options['extension'] = extensions - - def get_column(self, key, check_missing=True): - #TODO: this needs to work whether the table is already setup or not - #TODO: support SA table/autoloaded entity - try: - return self.columns[key] - except KeyError: - if check_missing: - raise Exception("No column named '%s' found in the table of " - "the '%s' entity!" - % (key, self.entity.__name__)) - - def get_inverse_relation(self, rel, check_reverse=True): - ''' - Return the inverse relation of rel, if any, None otherwise. - ''' - - matching_rel = None - for other_rel in self.relationships: - if rel.is_inverse(other_rel): - if matching_rel is None: - matching_rel = other_rel - else: - raise Exception( - "Several relations match as inverse of the '%s' " - "relation in entity '%s'. You should specify " - "inverse relations manually by using the inverse " - "keyword." - % (rel.name, rel.entity.__name__)) - # When a matching inverse is found, we check that it has only - # one relation matching as its own inverse. We don't need the result - # of the method though. But we do need to be careful not to start an - # infinite recursive loop. - if matching_rel and check_reverse: - rel.entity._descriptor.get_inverse_relation(matching_rel, False) - - return matching_rel - - def find_relationship(self, name): - for rel in self.relationships: - if rel.name == name: - return rel - if self.parent: - return self.parent._descriptor.find_relationship(name) - else: - return None - - #------------------------ - # some useful properties - - @property - def table_fullname(self): - ''' - Complete name of the table for the related entity. - Includes the schema name if there is one specified. - ''' - schema = self.table_options.get('schema', None) - if schema is not None: - return "%s.%s" % (schema, self.tablename) - else: - return self.tablename - - @property - def columns(self): - if self.entity.table is not None: - return self.entity.table.columns - else: - #FIXME: depending on the type of inheritance, we should also - # return the parent entity's columns (for example for order_by - # using a column defined in the parent. - return self._columns - - @property - def primary_keys(self): - """ - Returns the list of primary key columns of the entity. - - This property isn't valid before the "create_pk_cols" phase. - """ - if self.autoload: - return [col for col in self.entity.table.primary_key.columns] - else: - if self.parent and self.inheritance == 'single': - return self.parent._descriptor.primary_keys - else: - return [col for col in self.columns if col.primary_key] - - @property - def table(self): - if self.entity.table is not None: - return self.entity.table - else: - return FakeTable(self) - - @property - def primary_key_properties(self): - """ - Returns the list of (mapper) properties corresponding to the primary - key columns of the table of the entity. - - This property caches its value, so it shouldn't be called before the - entity is fully set up. - """ - if not hasattr(self, '_pk_props'): - col_to_prop = {} - mapper = self.entity.mapper - for prop in mapper.iterate_properties: - if isinstance(prop, ColumnProperty): - for col in prop.columns: - #XXX: Why is this extra loop necessary? What is this - # "proxy_set" supposed to mean? - for col in col.proxy_set: - col_to_prop[col] = prop - pk_cols = [c for c in mapper.mapped_table.c if c.primary_key] - self._pk_props = [col_to_prop[c] for c in pk_cols] - return self._pk_props - -class FakePK(object): - def __init__(self, descriptor): - self.descriptor = descriptor - - @property - def columns(self): - return self.descriptor.primary_keys - -class FakeTable(object): - def __init__(self, descriptor): - self.descriptor = descriptor - self.primary_key = FakePK(descriptor) - - @property - def columns(self): - return self.descriptor.columns - - @property - def fullname(self): - ''' - Complete name of the table for the related entity. - Includes the schema name if there is one specified. - ''' - schema = self.descriptor.table_options.get('schema', None) - if schema is not None: - return "%s.%s" % (schema, self.descriptor.tablename) - else: - return self.descriptor.tablename - - -def is_entity(cls): - """ - Scan the bases classes of `cls` to see if any is an instance of - EntityMeta. If we don't find any, it means it is either an unrelated class - or an entity base class (like the 'Entity' class). - """ - for base in cls.__bases__: - if isinstance(base, EntityMeta): - return True - return False - - -# Note that we don't use inspect.getmembers because of -# http://bugs.python.org/issue1785 -# See also http://elixir.ematia.de/trac/changeset/262 -def getmembers(object, predicate=None): - base_props = [] - for key in dir(object): - try: - value = getattr(object, key) - except AttributeError: - continue - if not predicate or predicate(value): - base_props.append((key, value)) - return base_props - -def is_abstract_entity(dict_or_cls): - if not isinstance(dict_or_cls, dict): - dict_or_cls = dict_or_cls.__dict__ - for mutator, args, kwargs in dict_or_cls.get(MUTATORS, []): - if 'abstract' in kwargs: - return kwargs['abstract'] - - return False - -def instrument_class(cls): - """ - Instrument a class as an Entity. This is usually done automatically through - the EntityMeta metaclass. - """ - # Create the entity descriptor - desc = cls._descriptor = EntityDescriptor(cls) - - # Process mutators - # We *do* want mutators to be processed for base/abstract classes - # (so that statements like using_options_defaults work). - process_mutators(cls) - - # We do not want to do any more processing for base/abstract classes - # (Entity et al.). - if not is_entity(cls) or is_abstract_entity(cls): - return - - cls.table = None - cls.mapper = None - - # Copy the properties ('Property' instances) of the entity base class(es). - # We use getmembers (instead of __dict__) so that we also get the - # properties from the parents of the base class if any. - base_props = [] - for base in cls.__bases__: - if isinstance(base, EntityMeta) and \ - (not is_entity(base) or is_abstract_entity(base)): - base_props += [(name, deepcopy(attr)) for name, attr in - getmembers(base, lambda a: isinstance(a, Property))] - - # Process attributes (using the assignment syntax), looking for - # 'Property' instances and attaching them to this entity. - properties = [(name, attr) for name, attr in cls.__dict__.iteritems() - if isinstance(attr, Property)] - sorted_props = sorted(base_props + properties, - key=lambda i: i[1]._counter) - for name, prop in sorted_props: - prop.attach(cls, name) - - # setup misc options here (like tablename etc.) - desc.setup_options() - - -class EntityMeta(type): - """ - Entity meta class. - You should only use it directly if you want to define your own base class - for your entities (ie you don't want to use the provided 'Entity' class). - """ - - def __init__(cls, name, bases, dict_): - instrument_class(cls) - - def __setattr__(cls, key, value): - if isinstance(value, Property): - if hasattr(cls, '_setup_done'): - raise Exception('Cannot set attribute on a class after ' - 'setup_all') - else: - value.attach(cls, key) - else: - type.__setattr__(cls, key, value) - - -def setup_entities(entities): - '''Setup all entities in the list passed as argument''' - - for entity in entities: - # delete all Elixir properties so that it doesn't interfere with - # SQLAlchemy. At this point they should have be converted to - # builders. - for name, attr in entity.__dict__.items(): - if isinstance(attr, Property): - delattr(entity, name) - - for method_name in ( - 'setup_autoload_table', 'create_pk_cols', 'setup_relkeys', - 'before_table', 'setup_table', 'setup_reltables', 'after_table', - 'setup_events', - 'before_mapper', 'setup_mapper', 'after_mapper', - 'setup_properties', - 'finalize'): -# if DEBUG: -# print "=" * 40 -# print method_name -# print "=" * 40 - for entity in entities: -# print entity.__name__, "...", - if hasattr(entity, '_setup_done'): -# print "already done" - continue - method = getattr(entity._descriptor, method_name) - method() -# print "ok" - - -def cleanup_entities(entities): - """ - Try to revert back the list of entities passed as argument to the state - they had just before their setup phase. - - As of now, this function is *not* functional in that it doesn't revert to - the exact same state the entities were before setup. For example, the - properties do not work yet as those would need to be regenerated (since the - columns they are based on are regenerated too -- and as such the - corresponding joins are not correct) but this doesn't happen because of - the way relationship setup is designed to be called only once (especially - the backref stuff in create_properties). - """ - for entity in entities: - desc = entity._descriptor - - if hasattr(entity, '_setup_done'): - del entity._setup_done - - entity.table = None - entity.mapper = None - - desc._pk_col_done = False - desc.has_pk = False - desc._columns = ColumnCollection() - desc.constraints = [] - desc.properties = {} - -class EntityBase(object): - """ - This class holds all methods of the "Entity" base class, but does not act - as a base class itself (it does not use the EntityMeta metaclass), but - rather as a parent class for Entity. This is meant so that people who want - to provide their own base class but don't want to loose or copy-paste all - the methods of Entity can do so by inheriting from EntityBase: - - .. sourcecode:: python - - class MyBase(EntityBase): - __metaclass__ = EntityMeta - - def myCustomMethod(self): - # do something great - """ - - def __init__(self, **kwargs): - self.set(**kwargs) - - def set(self, **kwargs): - for key, value in kwargs.iteritems(): - setattr(self, key, value) - - @classmethod - def update_or_create(cls, data, surrogate=True): - pk_props = cls._descriptor.primary_key_properties - - # if all pk are present and not None - if not [1 for p in pk_props if data.get(p.key) is None]: - pk_tuple = tuple([data[prop.key] for prop in pk_props]) - record = cls.query.get(pk_tuple) - if record is None: - if surrogate: - raise Exception("Cannot create surrogate with pk") - else: - record = cls() - else: - if surrogate: - record = cls() - else: - raise Exception("Cannot create non surrogate without pk") - record.from_dict(data) - return record - - def from_dict(self, data): - """ - Update a mapped class with data from a JSON-style nested dict/list - structure. - """ - # surrogate can be guessed from autoincrement/sequence but I guess - # that's not 100% reliable, so we'll need an override - - mapper = sqlalchemy.orm.object_mapper(self) - - for key, value in data.iteritems(): - if isinstance(value, dict): - dbvalue = getattr(self, key) - rel_class = mapper.get_property(key).mapper.class_ - pk_props = rel_class._descriptor.primary_key_properties - - # If the data doesn't contain any pk, and the relationship - # already has a value, update that record. - if not [1 for p in pk_props if p.key in data] and \ - dbvalue is not None: - dbvalue.from_dict(value) - else: - record = rel_class.update_or_create(value) - setattr(self, key, record) - elif isinstance(value, list) and \ - value and isinstance(value[0], dict): - - rel_class = mapper.get_property(key).mapper.class_ - new_attr_value = [] - for row in value: - if not isinstance(row, dict): - raise Exception( - 'Cannot send mixed (dict/non dict) data ' - 'to list relationships in from_dict data.') - record = rel_class.update_or_create(row) - new_attr_value.append(record) - setattr(self, key, new_attr_value) - else: - setattr(self, key, value) - - def to_dict(self, deep={}, exclude=[]): - """Generate a JSON-style nested dict/list structure from an object.""" - col_prop_names = [p.key for p in self.mapper.iterate_properties \ - if isinstance(p, ColumnProperty)] - data = dict([(name, getattr(self, name)) - for name in col_prop_names if name not in exclude]) - for rname, rdeep in deep.iteritems(): - dbdata = getattr(self, rname) - #FIXME: use attribute names (ie coltoprop) instead of column names - fks = self.mapper.get_property(rname).remote_side - exclude = [c.name for c in fks] - if dbdata is None: - data[rname] = None - elif isinstance(dbdata, list): - data[rname] = [o.to_dict(rdeep, exclude) for o in dbdata] - else: - data[rname] = dbdata.to_dict(rdeep, exclude) - return data - - # session methods - def flush(self, *args, **kwargs): - return object_session(self).flush([self], *args, **kwargs) - - def delete(self, *args, **kwargs): - return object_session(self).delete(self, *args, **kwargs) - - def expire(self, *args, **kwargs): - return object_session(self).expire(self, *args, **kwargs) - - def refresh(self, *args, **kwargs): - return object_session(self).refresh(self, *args, **kwargs) - - def expunge(self, *args, **kwargs): - return object_session(self).expunge(self, *args, **kwargs) - - # This bunch of session methods, along with all the query methods below - # only make sense when using a global/scoped/contextual session. - @property - def _global_session(self): - return self._descriptor.session.registry() - - #FIXME: remove all deprecated methods, possibly all of these - def merge(self, *args, **kwargs): - return self._global_session.merge(self, *args, **kwargs) - - def save(self, *args, **kwargs): - return self._global_session.save(self, *args, **kwargs) - - def update(self, *args, **kwargs): - return self._global_session.update(self, *args, **kwargs) - - # only exist in SA < 0.5 - # IMO, the replacement (session.add) doesn't sound good enough to be added - # here. For example: "o = Order(); o.add()" is not very telling. It's - # better to leave it as "session.add(o)" - def save_or_update(self, *args, **kwargs): - return self._global_session.save_or_update(self, *args, **kwargs) - - # query methods - @classmethod - def get_by(cls, *args, **kwargs): - """ - Returns the first instance of this class matching the given criteria. - This is equivalent to: - session.query(MyClass).filter_by(...).first() - """ - return cls.query.filter_by(*args, **kwargs).first() - - @classmethod - def get(cls, *args, **kwargs): - """ - Return the instance of this class based on the given identifier, - or None if not found. This is equivalent to: - session.query(MyClass).get(...) - """ - return cls.query.get(*args, **kwargs) - - -class Entity(EntityBase): - ''' - The base class for all entities - - All Elixir model objects should inherit from this class. Statements can - appear within the body of the definition of an entity to define its - fields, relationships, and other options. - - Here is an example: - - .. sourcecode:: python - - class Person(Entity): - name = Field(Unicode(128)) - birthdate = Field(DateTime, default=datetime.now) - - Please note, that if you don't specify any primary keys, Elixir will - automatically create one called ``id``. - - For further information, please refer to the provided examples or - tutorial. - ''' - __metaclass__ = EntityMeta - - diff --git a/libs/elixir/events.py b/libs/elixir/events.py deleted file mode 100644 index 293a8a4aad..0000000000 --- a/libs/elixir/events.py +++ /dev/null @@ -1,27 +0,0 @@ -from sqlalchemy.orm import reconstructor - -__all__ = [ - 'before_insert', - 'after_insert', - 'before_update', - 'after_update', - 'before_delete', - 'after_delete', - 'reconstructor' -] - -def create_decorator(event_name): - def decorator(func): - if not hasattr(func, '_elixir_events'): - func._elixir_events = [] - func._elixir_events.append(event_name) - return func - return decorator - -before_insert = create_decorator('before_insert') -after_insert = create_decorator('after_insert') -before_update = create_decorator('before_update') -after_update = create_decorator('after_update') -before_delete = create_decorator('before_delete') -after_delete = create_decorator('after_delete') - diff --git a/libs/elixir/ext/__init__.py b/libs/elixir/ext/__init__.py deleted file mode 100644 index c8708f259f..0000000000 --- a/libs/elixir/ext/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -''' -Ext package - -Additional Elixir statements and functionality. -''' diff --git a/libs/elixir/ext/associable.py b/libs/elixir/ext/associable.py deleted file mode 100644 index b31c5a74d4..0000000000 --- a/libs/elixir/ext/associable.py +++ /dev/null @@ -1,234 +0,0 @@ -''' -Associable Elixir Statement Generator - -========== -Associable -========== - -About Polymorphic Associations ------------------------------- - -A frequent pattern in database schemas is the has_and_belongs_to_many, or a -many-to-many table. Quite often multiple tables will refer to a single one -creating quite a few many-to-many intermediate tables. - -Polymorphic associations lower the amount of many-to-many tables by setting up -a table that allows relations to any other table in the database, and relates -it to the associable table. In some implementations, this layout does not -enforce referential integrity with database foreign key constraints, this -implementation uses an additional many-to-many table with foreign key -constraints to avoid this problem. - -.. note: - SQLite does not support foreign key constraints, so referential integrity - can only be enforced using database backends with such support. - -Elixir Statement Generator for Polymorphic Associations -------------------------------------------------------- - -The ``associable`` function generates the intermediary tables for an Elixir -entity that should be associable with other Elixir entities and returns an -Elixir Statement for use with them. This automates the process of creating the -polymorphic association tables and ensuring their referential integrity. - -Matching select_XXX and select_by_XXX are also added to the associated entity -which allow queries to be run for the associated objects. - -Example usage: - -.. sourcecode:: python - - class Tag(Entity): - name = Field(Unicode) - - acts_as_taggable = associable(Tag) - - class Entry(Entity): - title = Field(Unicode) - acts_as_taggable('tags') - - class Article(Entity): - title = Field(Unicode) - acts_as_taggable('tags') - -Or if one of the entities being associated should only have a single member of -the associated table: - -.. sourcecode:: python - - class Address(Entity): - street = Field(String(130)) - city = Field(String(100)) - - is_addressable = associable(Address, 'addresses') - - class Person(Entity): - name = Field(Unicode) - orders = OneToMany('Order') - is_addressable() - - class Order(Entity): - order_num = Field(primary_key=True) - item_count = Field(Integer) - person = ManyToOne('Person') - is_addressable('address', uselist=False) - - home = Address(street='123 Elm St.', city='Spooksville') - user = Person(name='Jane Doe') - user.addresses.append(home) - - neworder = Order(item_count=4) - neworder.address = home - user.orders.append(neworder) - - # Queries using the added helpers - Person.select_by_addresses(city='Cupertino') - Person.select_addresses(and_(Address.c.street=='132 Elm St', - Address.c.city=='Smallville')) - -Statement Options ------------------ - -The generated Elixir Statement has several options available: - -+---------------+-------------------------------------------------------------+ -| Option Name | Description | -+===============+=============================================================+ -| ``name`` | Specify a custom name for the Entity attribute. This is | -| | used to declare the attribute used to access the associated | -| | table values. Otherwise, the name will use the plural_name | -| | provided to the associable call. | -+---------------+-------------------------------------------------------------+ -| ``uselist`` | Whether or not the associated table should be represented | -| | as a list, or a single property. It should be set to False | -| | when the entity should only have a single associated | -| | entity. Defaults to True. | -+---------------+-------------------------------------------------------------+ -| ``lazy`` | Determines eager loading of the associated entity objects. | -| | Defaults to False, to indicate that they should not be | -| | lazily loaded. | -+---------------+-------------------------------------------------------------+ -''' -from elixir.statements import Statement -import sqlalchemy as sa - -__doc_all__ = ['associable'] - - -def associable(assoc_entity, plural_name=None, lazy=True): - ''' - Generate an associable Elixir Statement - ''' - interface_name = assoc_entity._descriptor.tablename - able_name = interface_name + 'able' - - if plural_name: - attr_name = "%s_rel" % plural_name - else: - plural_name = interface_name - attr_name = "%s_rel" % interface_name - - class GenericAssoc(object): - - def __init__(self, tablename): - self.type = tablename - - #TODO: inherit from entity builder - class Associable(object): - """An associable Elixir Statement object""" - - def __init__(self, entity, name=None, uselist=True, lazy=True): - self.entity = entity - self.lazy = lazy - self.uselist = uselist - - if name is None: - self.name = plural_name - else: - self.name = name - - def after_table(self): - col = sa.Column('%s_assoc_id' % interface_name, sa.Integer, - sa.ForeignKey('%s.id' % able_name)) - self.entity._descriptor.add_column(col) - - if not hasattr(assoc_entity, '_assoc_table'): - metadata = assoc_entity._descriptor.metadata - association_table = sa.Table("%s" % able_name, metadata, - sa.Column('id', sa.Integer, primary_key=True), - sa.Column('type', sa.String(40), nullable=False), - ) - tablename = "%s_to_%s" % (able_name, interface_name) - association_to_table = sa.Table(tablename, metadata, - sa.Column('assoc_id', sa.Integer, - sa.ForeignKey(association_table.c.id, - ondelete="CASCADE"), - primary_key=True), - #FIXME: this assumes a single id col - sa.Column('%s_id' % interface_name, sa.Integer, - sa.ForeignKey(assoc_entity.table.c.id, - ondelete="RESTRICT"), - primary_key=True), - ) - - assoc_entity._assoc_table = association_table - assoc_entity._assoc_to_table = association_to_table - - def after_mapper(self): - if not hasattr(assoc_entity, '_assoc_mapper'): - assoc_entity._assoc_mapper = sa.orm.mapper( - GenericAssoc, assoc_entity._assoc_table, properties={ - 'targets': sa.orm.relation( - assoc_entity, - secondary=assoc_entity._assoc_to_table, - lazy=lazy, backref='associations', - order_by=assoc_entity.mapper.order_by) - }) - - entity = self.entity - entity.mapper.add_property( - attr_name, - sa.orm.relation(GenericAssoc, lazy=self.lazy, - backref='_backref_%s' % entity.table.name) - ) - - if self.uselist: - def get(self): - if getattr(self, attr_name) is None: - setattr(self, attr_name, - GenericAssoc(entity.table.name)) - return getattr(self, attr_name).targets - setattr(entity, self.name, property(get)) - else: - # scalar based property decorator - def get(self): - attr = getattr(self, attr_name) - if attr is not None: - return attr.targets[0] - else: - return None - def set(self, value): - if getattr(self, attr_name) is None: - setattr(self, attr_name, - GenericAssoc(entity.table.name)) - getattr(self, attr_name).targets = [value] - setattr(entity, self.name, property(get, set)) - - # self.name is both set via mapper synonym and the python - # property, but that's how synonym properties work. - # adding synonym property after "real" property otherwise it - # breaks when using SQLAlchemy > 0.4.1 - entity.mapper.add_property(self.name, sa.orm.synonym(attr_name)) - - # add helper methods - def select_by(cls, **kwargs): - return cls.query.join(attr_name, 'targets') \ - .filter_by(**kwargs).all() - setattr(entity, 'select_by_%s' % self.name, classmethod(select_by)) - - def select(cls, *args, **kwargs): - return cls.query.join(attr_name, 'targets') \ - .filter(*args, **kwargs).all() - setattr(entity, 'select_%s' % self.name, classmethod(select)) - - return Statement(Associable) diff --git a/libs/elixir/ext/encrypted.py b/libs/elixir/ext/encrypted.py deleted file mode 100644 index 410855d2e9..0000000000 --- a/libs/elixir/ext/encrypted.py +++ /dev/null @@ -1,124 +0,0 @@ -''' -An encryption plugin for Elixir utilizing the excellent PyCrypto library, which -can be downloaded here: http://www.amk.ca/python/code/crypto - -Values for columns that are specified to be encrypted will be transparently -encrypted and safely encoded for storage in a unicode column using the powerful -and secure Blowfish Cipher using a specified "secret" which can be passed into -the plugin at class declaration time. - -Example usage: - -.. sourcecode:: python - - from elixir import * - from elixir.ext.encrypted import acts_as_encrypted - - class Person(Entity): - name = Field(Unicode) - password = Field(Unicode) - ssn = Field(Unicode) - acts_as_encrypted(for_fields=['password', 'ssn'], - with_secret='secret') - -The above Person entity will automatically encrypt and decrypt the password and -ssn columns on save, update, and load. Different secrets can be specified on -an entity by entity basis, for added security. - -**Important note**: instance attributes are encrypted in-place. This means that -if one of the encrypted attributes of an instance is accessed after the -instance has been flushed to the database (and thus encrypted), the value for -that attribute will be crypted in the in-memory object in addition to the -database row. -''' - -from Crypto.Cipher import Blowfish -from elixir.statements import Statement -from sqlalchemy.orm import MapperExtension, EXT_CONTINUE, EXT_STOP - -try: - from sqlalchemy.orm import EXT_PASS - SA05orlater = False -except ImportError: - SA05orlater = True - -__all__ = ['acts_as_encrypted'] -__doc_all__ = [] - - -# -# encryption and decryption functions -# - -def encrypt_value(value, secret): - return Blowfish.new(secret, Blowfish.MODE_CFB) \ - .encrypt(value).encode('string_escape') - -def decrypt_value(value, secret): - return Blowfish.new(secret, Blowfish.MODE_CFB) \ - .decrypt(value.decode('string_escape')) - - -# -# acts_as_encrypted statement -# - -class ActsAsEncrypted(object): - - def __init__(self, entity, for_fields=[], with_secret='abcdef'): - - def perform_encryption(instance, encrypt=True): - encrypted = getattr(instance, '_elixir_encrypted', None) - if encrypted is encrypt: - # skipping encryption or decryption, as it is already done - return - else: - # marking instance as already encrypted/decrypted - instance._elixir_encrypted = encrypt - - if encrypt: - func = encrypt_value - else: - func = decrypt_value - - for column_name in for_fields: - current_value = getattr(instance, column_name) - if current_value: - setattr(instance, column_name, - func(current_value, with_secret)) - - def perform_decryption(instance): - perform_encryption(instance, encrypt=False) - - class EncryptedMapperExtension(MapperExtension): - - def before_insert(self, mapper, connection, instance): - perform_encryption(instance) - return EXT_CONTINUE - - def before_update(self, mapper, connection, instance): - perform_encryption(instance) - return EXT_CONTINUE - - if SA05orlater: - def reconstruct_instance(self, mapper, instance): - perform_decryption(instance) - # no special return value is required for - # reconstruct_instance, but you never know... - return EXT_CONTINUE - else: - def populate_instance(self, mapper, selectcontext, row, - instance, *args, **kwargs): - mapper.populate_instance(selectcontext, instance, row, - *args, **kwargs) - perform_decryption(instance) - # EXT_STOP because we already did populate the instance and - # the normal processing should not happen - return EXT_STOP - - # make sure that the entity's mapper has our mapper extension - entity._descriptor.add_mapper_extension(EncryptedMapperExtension()) - - -acts_as_encrypted = Statement(ActsAsEncrypted) - diff --git a/libs/elixir/ext/perform_ddl.py b/libs/elixir/ext/perform_ddl.py deleted file mode 100644 index bb8528df26..0000000000 --- a/libs/elixir/ext/perform_ddl.py +++ /dev/null @@ -1,106 +0,0 @@ -''' -DDL statements for Elixir. - -Entities having the perform_ddl statement, will automatically execute the -given DDL statement, at the given moment: ether before or after the table -creation in SQL. - -The 'when' argument can be either 'before-create' or 'after-create'. -The 'statement' argument can be one of: - -- a single string statement -- a list of string statements, in which case, each of them will be executed - in turn. -- a callable which should take no argument and return either a single string - or a list of strings. - -In each string statement, you may use the special '%(fullname)s' construct, -that will be replaced with the real table name including schema, if unknown -to you. Also, self explained '%(table)s' and '%(schema)s' may be used here. - -You would use this extension to handle non elixir sql statemts, like triggers -etc. - -.. sourcecode:: python - - class Movie(Entity): - title = Field(Unicode(30), primary_key=True) - year = Field(Integer) - - perform_ddl('after-create', - "insert into %(fullname)s values ('Alien', 1979)") - -preload_data is a more specific statement meant to preload data in your -entity table from a list of tuples (of fields values for each row). - -.. sourcecode:: python - - class Movie(Entity): - title = Field(Unicode(30), primary_key=True) - year = Field(Integer) - - preload_data(('title', 'year'), - [(u'Alien', 1979), (u'Star Wars', 1977)]) - preload_data(('year', 'title'), - [(1982, u'Blade Runner')]) - preload_data(data=[(u'Batman', 1966)]) -''' - -from elixir.statements import Statement -from elixir.properties import EntityBuilder -from sqlalchemy import DDL - -__all__ = ['perform_ddl', 'preload_data'] -__doc_all__ = [] - -# -# the perform_ddl statement -# -class PerformDDLEntityBuilder(EntityBuilder): - - def __init__(self, entity, when, statement, on=None, context=None): - self.entity = entity - self.when = when - self.statement = statement - self.on = on - self.context = context - - def after_table(self): - statement = self.statement - if hasattr(statement, '__call__'): - statement = statement() - if not isinstance(statement, list): - statement = [statement] - for s in statement: - ddl = DDL(s, self.on, self.context) - ddl.execute_at(self.when, self.entity.table) - -perform_ddl = Statement(PerformDDLEntityBuilder) - -# -# the preload_data statement -# -class PreloadDataEntityBuilder(EntityBuilder): - - def __init__(self, entity, columns=None, data=None): - self.entity = entity - self.columns = columns - self.data = data - - def after_table(self): - all_columns = [col.name for col in self.entity.table.columns] - def onload(event, schema_item, connection): - columns = self.columns - if columns is None: - columns = all_columns - data = self.data - if hasattr(data, '__call__'): - data = data() - insert = schema_item.insert() - connection.execute(insert, - [dict(zip(columns, values)) for values in data]) - - self.entity.table.append_ddl_listener('after-create', onload) - -preload_data = Statement(PreloadDataEntityBuilder) - diff --git a/libs/elixir/ext/versioned.py b/libs/elixir/ext/versioned.py deleted file mode 100644 index 75f406b06e..0000000000 --- a/libs/elixir/ext/versioned.py +++ /dev/null @@ -1,288 +0,0 @@ -''' -A versioning plugin for Elixir. - -Entities that are marked as versioned with the `acts_as_versioned` statement -will automatically have a history table created and a timestamp and version -column added to their tables. In addition, versioned entities are provided -with four new methods: revert, revert_to, compare_with and get_as_of, and one -new attribute: versions. Entities with compound primary keys are supported. - -The `versions` attribute will contain a list of previous versions of the -instance, in increasing version number order. - -The `get_as_of` method will retrieve a previous version of the instance "as of" -a specified datetime. If the current version is the most recent, it will be -returned. - -The `revert` method will rollback the current instance to its previous version, -if possible. Once reverted, the current instance will be expired from the -session, and you will need to fetch it again to retrieve the now reverted -instance. - -The `revert_to` method will rollback the current instance to the specified -version number, if possibe. Once reverted, the current instance will be expired -from the session, and you will need to fetch it again to retrieve the now -reverted instance. - -The `compare_with` method will compare the instance with a previous version. A -dictionary will be returned with each field difference as an element in the -dictionary where the key is the field name and the value is a tuple of the -format (current_value, version_value). Version instances also have a -`compare_with` method so that two versions can be compared. - -Also included in the module is a `after_revert` decorator that can be used to -decorate methods on the versioned entity that will be called following that -instance being reverted. - -The acts_as_versioned statement also accepts an optional `ignore` argument -that consists of a list of strings, specifying names of fields. Changes in -those fields will not result in a version increment. In addition, you can -pass in an optional `check_concurrent` argument, which will use SQLAlchemy's -built-in optimistic concurrency mechanisms. - -Note that relationships that are stored in mapping tables will not be included -as part of the versioning process, and will need to be handled manually. Only -values within the entity's main table will be versioned into the history table. -''' - -from datetime import datetime -import inspect - -from sqlalchemy import Table, Column, and_, desc -from sqlalchemy.orm import mapper, MapperExtension, EXT_CONTINUE, \ - object_session - -from elixir import Integer, DateTime -from elixir.statements import Statement -from elixir.properties import EntityBuilder -from elixir.entity import getmembers - -__all__ = ['acts_as_versioned', 'after_revert'] -__doc_all__ = [] - -# -# utility functions -# - -def get_entity_where(instance): - clauses = [] - for column in instance.table.primary_key.columns: - instance_value = getattr(instance, column.name) - clauses.append(column==instance_value) - return and_(*clauses) - - -def get_history_where(instance): - clauses = [] - history_columns = instance.__history_table__.primary_key.columns - for column in instance.table.primary_key.columns: - instance_value = getattr(instance, column.name) - history_column = getattr(history_columns, column.name) - clauses.append(history_column==instance_value) - return and_(*clauses) - - -# -# a mapper extension to track versions on insert, update, and delete -# - -class VersionedMapperExtension(MapperExtension): - def before_insert(self, mapper, connection, instance): - version_colname, timestamp_colname = \ - instance.__class__.__versioned_column_names__ - setattr(instance, version_colname, 1) - setattr(instance, timestamp_colname, datetime.now()) - return EXT_CONTINUE - - def before_update(self, mapper, connection, instance): - old_values = instance.table.select(get_entity_where(instance)) \ - .execute().fetchone() - - # SA might've flagged this for an update even though it didn't change. - # This occurs when a relation is updated, thus marking this instance - # for a save/update operation. We check here against the last version - # to ensure we really should save this version and update the version - # data. - ignored = instance.__class__.__ignored_fields__ - version_colname, timestamp_colname = \ - instance.__class__.__versioned_column_names__ - for key in instance.table.c.keys(): - if key in ignored: - continue - if getattr(instance, key) != old_values[key]: - # the instance was really updated, so we create a new version - dict_values = dict(old_values.items()) - connection.execute( - instance.__class__.__history_table__.insert(), dict_values) - old_version = getattr(instance, version_colname) - setattr(instance, version_colname, old_version + 1) - setattr(instance, timestamp_colname, datetime.now()) - break - - return EXT_CONTINUE - - def before_delete(self, mapper, connection, instance): - connection.execute(instance.__history_table__.delete( - get_history_where(instance) - )) - return EXT_CONTINUE - - -versioned_mapper_extension = VersionedMapperExtension() - - -# -# the acts_as_versioned statement -# - -class VersionedEntityBuilder(EntityBuilder): - - def __init__(self, entity, ignore=None, check_concurrent=False, - column_names=None): - self.entity = entity - self.add_mapper_extension(versioned_mapper_extension) - #TODO: we should rather check that the version_id_col isn't set - # externally - self.check_concurrent = check_concurrent - - # Changes in these fields will be ignored - if column_names is None: - column_names = ['version', 'timestamp'] - entity.__versioned_column_names__ = column_names - if ignore is None: - ignore = [] - ignore.extend(column_names) - entity.__ignored_fields__ = ignore - - def create_non_pk_cols(self): - # add a version column to the entity, along with a timestamp - version_colname, timestamp_colname = \ - self.entity.__versioned_column_names__ - #XXX: fail in case the columns already exist? - #col_names = [col.name for col in self.entity._descriptor.columns] - #if version_colname not in col_names: - self.add_table_column(Column(version_colname, Integer)) - #if timestamp_colname not in col_names: - self.add_table_column(Column(timestamp_colname, DateTime)) - - # add a concurrent_version column to the entity, if required - if self.check_concurrent: - self.entity._descriptor.version_id_col = 'concurrent_version' - - # we copy columns from the main entity table, so we need it to exist first - def after_table(self): - entity = self.entity - version_colname, timestamp_colname = \ - entity.__versioned_column_names__ - - # look for events - after_revert_events = [] - for name, func in getmembers(entity, inspect.ismethod): - if getattr(func, '_elixir_after_revert', False): - after_revert_events.append(func) - - # create a history table for the entity - skipped_columns = [version_colname] - if self.check_concurrent: - skipped_columns.append('concurrent_version') - - columns = [ - column.copy() for column in entity.table.c - if column.name not in skipped_columns - ] - columns.append(Column(version_colname, Integer, primary_key=True)) - table = Table(entity.table.name + '_history', entity.table.metadata, - *columns - ) - entity.__history_table__ = table - - # create an object that represents a version of this entity - class Version(object): - pass - - # map the version class to the history table for this entity - Version.__name__ = entity.__name__ + 'Version' - Version.__versioned_entity__ = entity - mapper(Version, entity.__history_table__) - - version_col = getattr(table.c, version_colname) - timestamp_col = getattr(table.c, timestamp_colname) - - # attach utility methods and properties to the entity - def get_versions(self): - v = object_session(self).query(Version) \ - .filter(get_history_where(self)) \ - .order_by(version_col) \ - .all() - # history contains all the previous records. - # Add the current one to the list to get all the versions - v.append(self) - return v - - def get_as_of(self, dt): - # if the passed in timestamp is older than our current version's - # time stamp, then the most recent version is our current version - if getattr(self, timestamp_colname) < dt: - return self - - # otherwise, we need to look to the history table to get our - # older version - sess = object_session(self) - query = sess.query(Version) \ - .filter(and_(get_history_where(self), - timestamp_col <= dt)) \ - .order_by(desc(timestamp_col)).limit(1) - return query.first() - - def revert_to(self, to_version): - if isinstance(to_version, Version): - to_version = getattr(to_version, version_colname) - - old_version = table.select(and_( - get_history_where(self), - version_col == to_version - )).execute().fetchone() - - entity.table.update(get_entity_where(self)).execute( - dict(old_version.items()) - ) - - table.delete(and_(get_history_where(self), - version_col >= to_version)).execute() - self.expire() - for event in after_revert_events: - event(self) - - def revert(self): - assert getattr(self, version_colname) > 1 - self.revert_to(getattr(self, version_colname) - 1) - - def compare_with(self, version): - differences = {} - for column in self.table.c: - if column.name in (version_colname, 'concurrent_version'): - continue - this = getattr(self, column.name) - that = getattr(version, column.name) - if this != that: - differences[column.name] = (this, that) - return differences - - entity.versions = property(get_versions) - entity.get_as_of = get_as_of - entity.revert_to = revert_to - entity.revert = revert - entity.compare_with = compare_with - Version.compare_with = compare_with - -acts_as_versioned = Statement(VersionedEntityBuilder) - - -def after_revert(func): - """ - Decorator for watching for revert events. - """ - func._elixir_after_revert = True - return func - - diff --git a/libs/elixir/fields.py b/libs/elixir/fields.py deleted file mode 100644 index 8659cdd8b7..0000000000 --- a/libs/elixir/fields.py +++ /dev/null @@ -1,191 +0,0 @@ -''' -This module provides support for defining the fields (columns) of your -entities. Elixir currently supports two syntaxes to do so: the default -`Attribute-based syntax`_ as well as the has_field_ DSL statement. - -Attribute-based syntax ----------------------- - -Here is a quick example of how to use the object-oriented syntax. - -.. sourcecode:: python - - class Person(Entity): - id = Field(Integer, primary_key=True) - name = Field(String(50), required=True) - ssn = Field(String(50), unique=True) - biography = Field(Text) - join_date = Field(DateTime, default=datetime.datetime.now) - photo = Field(Binary, deferred=True) - _email = Field(String(20), colname='email', synonym='email') - - def _set_email(self, email): - self._email = email - def _get_email(self): - return self._email - email = property(_get_email, _set_email) - - -The Field class takes one mandatory argument, which is its type. Please refer -to SQLAlchemy documentation for a list of `types supported by SQLAlchemy -`_. - -Following that first mandatory argument, fields can take any number of -optional keyword arguments. Please note that all the **arguments** that are -**not specifically processed by Elixir**, as mentioned in the documentation -below **are passed on to the SQLAlchemy ``Column`` object**. Please refer to -the `SQLAlchemy Column object's documentation -`_ for more details about other -supported keyword arguments. - -The following Elixir-specific arguments are supported: - -+-------------------+---------------------------------------------------------+ -| Argument Name | Description | -+===================+=========================================================+ -| ``required`` | Specify whether or not this field can be set to None | -| | (left without a value). Defaults to ``False``, unless | -| | the field is a primary key. | -+-------------------+---------------------------------------------------------+ -| ``colname`` | Specify a custom name for the column of this field. By | -| | default the column will have the same name as the | -| | attribute. | -+-------------------+---------------------------------------------------------+ -| ``deferred`` | Specify whether this particular column should be | -| | fetched by default (along with the other columns) when | -| | an instance of the entity is fetched from the database | -| | or rather only later on when this particular column is | -| | first referenced. This can be useful when one wants to | -| | avoid loading a large text or binary field into memory | -| | when its not needed. Individual columns can be lazy | -| | loaded by themselves (by using ``deferred=True``) | -| | or placed into groups that lazy-load together (by using | -| | ``deferred`` = `"group_name"`). | -+-------------------+---------------------------------------------------------+ -| ``synonym`` | Specify a synonym name for this field. The field will | -| | also be usable under that name in keyword-based Query | -| | functions such as filter_by. The Synonym class (see the | -| | `properties` module) provides a similar functionality | -| | with an (arguably) nicer syntax, but a limited scope. | -+-------------------+---------------------------------------------------------+ - -has_field ---------- - -The `has_field` statement allows you to define fields one at a time. - -The first argument is the name of the field, the second is its type. Following -these, any number of keyword arguments can be specified for additional -behavior. The following arguments are supported: - -+-------------------+---------------------------------------------------------+ -| Argument Name | Description | -+===================+=========================================================+ -| ``through`` | Specify a relation name to go through. This field will | -| | not exist as a column on the database but will be a | -| | property which automatically proxy values to the | -| | ``attribute`` attribute of the object pointed to by the | -| | relation. If the ``attribute`` argument is not present, | -| | the name of the current field will be used. In an | -| | has_field statement, you can only proxy through a | -| | belongs_to or an has_one relationship. | -+-------------------+---------------------------------------------------------+ -| ``attribute`` | Name of the "endpoint" attribute to proxy to. This | -| | should only be used in combination with the ``through`` | -| | argument. | -+-------------------+---------------------------------------------------------+ - - -Here is a quick example of how to use ``has_field``. - -.. sourcecode:: python - - class Person(Entity): - has_field('id', Integer, primary_key=True) - has_field('name', String(50)) -''' -from sqlalchemy import Column -from sqlalchemy.orm import deferred, synonym -from sqlalchemy.ext.associationproxy import association_proxy - -from elixir.statements import ClassMutator -from elixir.properties import Property - -__doc_all__ = ['Field'] - - -class Field(Property): - ''' - Represents the definition of a 'field' on an entity. - - This class represents a column on the table where the entity is stored. - ''' - - def __init__(self, type, *args, **kwargs): - super(Field, self).__init__() - - self.colname = kwargs.pop('colname', None) - self.synonym = kwargs.pop('synonym', None) - self.deferred = kwargs.pop('deferred', False) - if 'required' in kwargs: - kwargs['nullable'] = not kwargs.pop('required') - self.type = type - self.primary_key = kwargs.get('primary_key', False) - - self.column = None - self.property = None - - self.args = args - self.kwargs = kwargs - - def attach(self, entity, name): - # If no colname was defined (through the 'colname' kwarg), set - # it to the name of the attr. - if self.colname is None: - self.colname = name - super(Field, self).attach(entity, name) - - def create_pk_cols(self): - if self.primary_key: - self.create_col() - - def create_non_pk_cols(self): - if not self.primary_key: - self.create_col() - - def create_col(self): - self.column = Column(self.colname, self.type, - *self.args, **self.kwargs) - self.add_table_column(self.column) - - def create_properties(self): - if self.deferred: - group = None - if isinstance(self.deferred, basestring): - group = self.deferred - self.property = deferred(self.column, group=group) - elif self.name != self.colname: - # if the property name is different from the column name, we need - # to add an explicit property (otherwise nothing is needed as it's - # done automatically by SA) - self.property = self.column - - if self.property is not None: - self.add_mapper_property(self.name, self.property) - - if self.synonym: - self.add_mapper_property(self.synonym, synonym(self.name)) - - -def has_field_handler(entity, name, *args, **kwargs): - if 'through' in kwargs: - setattr(entity, name, - association_proxy(kwargs.pop('through'), - kwargs.pop('attribute', name), - **kwargs)) - return - field = Field(*args, **kwargs) - field.attach(entity, name) - -has_field = ClassMutator(has_field_handler) diff --git a/libs/elixir/options.py b/libs/elixir/options.py deleted file mode 100644 index 27d7d19538..0000000000 --- a/libs/elixir/options.py +++ /dev/null @@ -1,274 +0,0 @@ -''' -This module provides support for defining several options on your Elixir -entities. There are three different kinds of options that can be set -up, and for this there are three different statements: using_options_, -using_table_options_ and using_mapper_options_. - -Alternatively, these options can be set on all Elixir entities by modifying -the `options_defaults` dictionary before defining any entity. - -`using_options` ---------------- -The 'using_options' DSL statement allows you to set up some additional -behaviors on your model objects, including table names, ordering, and -more. To specify an option, simply supply the option as a keyword -argument onto the statement, as follows: - -.. sourcecode:: python - - class Person(Entity): - name = Field(Unicode(64)) - - using_options(shortnames=True, order_by='name') - -The list of supported arguments are as follows: - -+---------------------+-------------------------------------------------------+ -| Option Name | Description | -+=====================+=======================================================+ -| ``inheritance`` | Specify the type of inheritance this entity must use. | -| | It can be one of ``single``, ``concrete`` or | -| | ``multi``. Defaults to ``single``. | -| | Note that polymorphic concrete inheritance is | -| | currently not implemented. See: | -| | http://www.sqlalchemy.org/docs/05/mappers.html | -| | #mapping-class-inheritance-hierarchies for an | -| | explanation of the different kinds of inheritances. | -+---------------------+-------------------------------------------------------+ -| ``abstract`` | Set 'abstract'=True to declare abstract entity. | -| | Abstract base classes are useful when you want to put | -| | some common information into a number of other | -| | entities. Abstract entity will not be used to create | -| | any database table. Instead, when it is used as a base| -| | class for other entity, its fields will be added to | -| | those of the child class. | -+---------------------+-------------------------------------------------------+ -| ``polymorphic`` | Whether the inheritance should be polymorphic or not. | -| | Defaults to ``True``. The column used to store the | -| | type of each row is named "row_type" by default. You | -| | can change this by passing the desired name for the | -| | column to this argument. | -+---------------------+-------------------------------------------------------+ -| ``identity`` | Specify a custom polymorphic identity. When using | -| | polymorphic inheritance, this value (usually a | -| | string) will represent this particular entity (class) | -| | . It will be used to differentiate it from other | -| | entities (classes) in your inheritance hierarchy when | -| | loading from the database instances of different | -| | entities in that hierarchy at the same time. | -| | This value will be stored by default in the | -| | "row_type" column of the entity's table (see above). | -| | You can either provide a | -| | plain string or a callable. The callable will be | -| | given the entity (ie class) as argument and must | -| | return a value (usually a string) representing the | -| | polymorphic identity of that entity. | -| | By default, this value is automatically generated: it | -| | is the name of the entity lower-cased. | -+---------------------+-------------------------------------------------------+ -| ``metadata`` | Specify a custom MetaData for this entity. | -| | By default, entities uses the global | -| | ``elixir.metadata``. | -| | This option can also be set for all entities of a | -| | module by setting the ``__metadata__`` attribute of | -| | that module. | -+---------------------+-------------------------------------------------------+ -| ``autoload`` | Automatically load column definitions from the | -| | existing database table. | -+---------------------+-------------------------------------------------------+ -| ``tablename`` | Specify a custom tablename. You can either provide a | -| | plain string or a callable. The callable will be | -| | given the entity (ie class) as argument and must | -| | return a string representing the name of the table | -| | for that entity. By default, the tablename is | -| | automatically generated: it is a concatenation of the | -| | full module-path to the entity and the entity (class) | -| | name itself. The result is lower-cased and separated | -| | by underscores ("_"), eg.: for an entity named | -| | "MyEntity" in the module "project1.model", the | -| | generated table name will be | -| | "project1_model_myentity". | -+---------------------+-------------------------------------------------------+ -| ``shortnames`` | Specify whether or not the automatically generated | -| | table names include the full module-path | -| | to the entity. If ``shortnames`` is ``True``, only | -| | the entity name is used. Defaults to ``False``. | -+---------------------+-------------------------------------------------------+ -| ``auto_primarykey`` | If given as string, it will represent the | -| | auto-primary-key's column name. If this option | -| | is True, it will allow auto-creation of a primary | -| | key if there's no primary key defined for the | -| | corresponding entity. If this option is False, | -| | it will disallow auto-creation of a primary key. | -| | Defaults to ``True``. | -+---------------------+-------------------------------------------------------+ -| ``version_id_col`` | If this option is True, it will create a version | -| | column automatically using the default name. If given | -| | as string, it will create the column using that name. | -| | This can be used to prevent concurrent modifications | -| | to the entity's table rows (i.e. it will raise an | -| | exception if it happens). Defaults to ``False``. | -+---------------------+-------------------------------------------------------+ -| ``order_by`` | How to order select results. Either a string or a | -| | list of strings, composed of the field name, | -| | optionally lead by a minus (for descending order). | -+---------------------+-------------------------------------------------------+ -| ``session`` | Specify a custom contextual session for this entity. | -| | By default, entities uses the global | -| | ``elixir.session``. | -| | This option takes a ``ScopedSession`` object or | -| | ``None``. In the later case your entity will be | -| | mapped using a non-contextual mapper which requires | -| | manual session management, as seen in pure SQLAlchemy.| -| | This option can also be set for all entities of a | -| | module by setting the ``__session__`` attribute of | -| | that module. | -+---------------------+-------------------------------------------------------+ -| ``allowcoloverride``| Specify whether it is allowed to override columns. | -| | By default, Elixir forbids you to add a column to an | -| | entity's table which already exist in that table. If | -| | you set this option to ``True`` it will skip that | -| | check. Use with care as it is easy to shoot oneself | -| | in the foot when overriding columns. | -+---------------------+-------------------------------------------------------+ - -For examples, please refer to the examples and unit tests. - -`using_table_options` ---------------------- -The 'using_table_options' DSL statement allows you to set up some -additional options on your entity table. It is meant only to handle the -options which are not supported directly by the 'using_options' statement. -By opposition to the 'using_options' statement, these options are passed -directly to the underlying SQLAlchemy Table object (both non-keyword arguments -and keyword arguments) without any processing. - -For further information, please refer to the `SQLAlchemy table's documentation -`_. - -You might also be interested in the section about `constraints -`_. - -`using_mapper_options` ----------------------- -The 'using_mapper_options' DSL statement allows you to set up some -additional options on your entity mapper. It is meant only to handle the -options which are not supported directly by the 'using_options' statement. -By opposition to the 'using_options' statement, these options are passed -directly to the underlying SQLAlchemy mapper (as keyword arguments) -without any processing. - -For further information, please refer to the `SQLAlchemy mapper -function's documentation -`_. - -`using_options_defaults` ------------------------- -The 'using_options_defaults' DSL statement allows you to set up some -default options on a custom base class. These will be used as the default value -for options of all its subclasses. Note that any option not set within the -using_options_defaults (nor specifically on a particular Entity) will use the -global defaults, so you don't have to provide a default value for all options, -but only those you want to change. Please also note that this statement does -not work on normal entities, and the normal using_options statement does not -work on base classes (because normal options do not and should not propagate to -the children classes). -''' - -from sqlalchemy import Integer, String - -from elixir.statements import ClassMutator - -__doc_all__ = ['options_defaults'] - -OLD_M2MCOL_NAMEFORMAT = "%(tablename)s_%(key)s%(numifself)s" -ALTERNATE_M2MCOL_NAMEFORMAT = "%(inversename)s_%(key)s" - -def default_m2m_column_formatter(data): - if data['selfref']: - return ALTERNATE_M2MCOL_NAMEFORMAT % data - else: - return OLD_M2MCOL_NAMEFORMAT % data - -NEW_M2MCOL_NAMEFORMAT = default_m2m_column_formatter - -# format constants -FKCOL_NAMEFORMAT = "%(relname)s_%(key)s" -M2MCOL_NAMEFORMAT = NEW_M2MCOL_NAMEFORMAT -CONSTRAINT_NAMEFORMAT = "%(tablename)s_%(colnames)s_fk" -MULTIINHERITANCECOL_NAMEFORMAT = "%(entity)s_%(key)s" - -# other global constants -DEFAULT_AUTO_PRIMARYKEY_NAME = "id" -DEFAULT_AUTO_PRIMARYKEY_TYPE = Integer -DEFAULT_VERSION_ID_COL_NAME = "row_version" -DEFAULT_POLYMORPHIC_COL_NAME = "row_type" -POLYMORPHIC_COL_SIZE = 40 -POLYMORPHIC_COL_TYPE = String(POLYMORPHIC_COL_SIZE) - -# debugging/migration help -MIGRATION_TO_07_AID = False - -# -options_defaults = dict( - abstract=False, - inheritance='single', - polymorphic=True, - identity=None, - autoload=False, - tablename=None, - shortnames=False, - auto_primarykey=True, - version_id_col=False, - allowcoloverride=False, - order_by=None, - resolve_root=None, - mapper_options={}, - table_options={} -) - -valid_options = options_defaults.keys() + [ - 'metadata', - 'session', - 'collection' -] - - -def using_options_defaults_handler(entity, **kwargs): - for kwarg in kwargs: - if kwarg not in valid_options: - raise Exception("'%s' is not a valid option for Elixir entities." - % kwarg) - - # We use __dict__ instead of hasattr to not check its presence within the - # parent, and thus update the parent dict instead of creating a local dict. - if not entity.__dict__.get('options_defaults'): - entity.options_defaults = {} - entity.options_defaults.update(kwargs) - - -def using_options_handler(entity, *args, **kwargs): - for kwarg in kwargs: - if kwarg in valid_options: - setattr(entity._descriptor, kwarg, kwargs[kwarg]) - else: - raise Exception("'%s' is not a valid option for Elixir entities." - % kwarg) - - -def using_table_options_handler(entity, *args, **kwargs): - entity._descriptor.table_args.extend(list(args)) - entity._descriptor.table_options.update(kwargs) - - -def using_mapper_options_handler(entity, *args, **kwargs): - entity._descriptor.mapper_options.update(kwargs) - - -using_options_defaults = ClassMutator(using_options_defaults_handler) -using_options = ClassMutator(using_options_handler) -using_table_options = ClassMutator(using_table_options_handler) -using_mapper_options = ClassMutator(using_mapper_options_handler) diff --git a/libs/elixir/properties.py b/libs/elixir/properties.py deleted file mode 100644 index 68ff8fabdc..0000000000 --- a/libs/elixir/properties.py +++ /dev/null @@ -1,244 +0,0 @@ -''' -This module provides support for defining properties on your entities. It both -provides, the `Property` class which acts as a building block for common -properties such as fields and relationships (for those, please consult the -corresponding modules), but also provides some more specialized properties, -such as `ColumnProperty` and `Synonym`. It also provides the GenericProperty -class which allows you to wrap any SQLAlchemy property, and its DSL-syntax -equivalent: has_property_. - -`has_property` --------------- -The ``has_property`` statement allows you to define properties which rely on -their entity's table (and columns) being defined before they can be declared -themselves. The `has_property` statement takes two arguments: first the name of -the property to be defined and second a function (often given as an anonymous -lambda) taking one argument and returning the desired SQLAlchemy property. That -function will be called whenever the entity table is completely defined, and -will be given the .c attribute of the entity as argument (as a way to access -the entity columns). - -Here is a quick example of how to use ``has_property``. - -.. sourcecode:: python - - class OrderLine(Entity): - has_field('quantity', Float) - has_field('unit_price', Float) - has_property('price', - lambda c: column_property( - (c.quantity * c.unit_price).label('price'))) -''' - -from elixir.statements import PropertyStatement -from sqlalchemy.orm import column_property, synonym - -__doc_all__ = ['EntityBuilder', 'Property', 'GenericProperty', - 'ColumnProperty'] - -class EntityBuilder(object): - ''' - Abstract base class for all entity builders. An Entity builder is a class - of objects which can be added to an Entity (usually by using special - properties or statements) to "build" that entity. Building an entity, - meaning to add columns to its "main" table, create other tables, add - properties to its mapper, ... To do so an EntityBuilder must override the - corresponding method(s). This is to ensure the different operations happen - in the correct order (for example, that the table is fully created before - the mapper that use it is defined). - ''' - def create_pk_cols(self): - pass - - def create_non_pk_cols(self): - pass - - def before_table(self): - pass - - def create_tables(self): - ''' - Subclasses may override this method to create tables. - ''' - - def after_table(self): - pass - - def create_properties(self): - ''' - Subclasses may override this method to add properties to the involved - entity. - ''' - - def before_mapper(self): - pass - - def after_mapper(self): - pass - - def finalize(self): - pass - - # helper methods - def add_table_column(self, column): - self.entity._descriptor.add_column(column) - - def add_mapper_property(self, name, prop): - self.entity._descriptor.add_property(name, prop) - - def add_mapper_extension(self, ext): - self.entity._descriptor.add_mapper_extension(ext) - - -class CounterMeta(type): - ''' - A simple meta class which adds a ``_counter`` attribute to the instances of - the classes it is used on. This counter is simply incremented for each new - instance. - ''' - counter = 0 - - def __call__(self, *args, **kwargs): - instance = type.__call__(self, *args, **kwargs) - instance._counter = CounterMeta.counter - CounterMeta.counter += 1 - return instance - - -class Property(EntityBuilder): - ''' - Abstract base class for all properties of an Entity. - ''' - __metaclass__ = CounterMeta - - def __init__(self, *args, **kwargs): - self.entity = None - self.name = None - - def attach(self, entity, name): - """Attach this property to its entity, using 'name' as name. - - Properties will be attached in the order they were declared. - """ - self.entity = entity - self.name = name - - # register this property as a builder - entity._descriptor.builders.append(self) - - def __repr__(self): - return "Property(%s, %s)" % (self.name, self.entity) - - -class GenericProperty(Property): - ''' - Generic catch-all class to wrap an SQLAlchemy property. - - .. sourcecode:: python - - class OrderLine(Entity): - quantity = Field(Float) - unit_price = Field(Numeric) - price = GenericProperty(lambda c: column_property( - (c.quantity * c.unit_price).label('price'))) - ''' - - def __init__(self, prop, *args, **kwargs): - super(GenericProperty, self).__init__(*args, **kwargs) - self.prop = prop - #XXX: move this to Property? - self.args = args - self.kwargs = kwargs - - def create_properties(self): - if hasattr(self.prop, '__call__'): - prop_value = self.prop(self.entity.table.c) - else: - prop_value = self.prop - prop_value = self.evaluate_property(prop_value) - self.add_mapper_property(self.name, prop_value) - - def evaluate_property(self, prop): - if self.args or self.kwargs: - raise Exception('superfluous arguments passed to GenericProperty') - return prop - - -class ColumnProperty(GenericProperty): - ''' - A specialized form of the GenericProperty to generate SQLAlchemy - ``column_property``'s. - - It takes a function (often given as an anonymous lambda) as its first - argument. Other arguments and keyword arguments are forwarded to the - column_property construct. That first-argument function must accept exactly - one argument and must return the desired (scalar-returning) SQLAlchemy - ClauseElement. - - The function will be called whenever the entity table is completely - defined, and will be given - the .c attribute of the table of the entity as argument (as a way to - access the entity columns). The ColumnProperty will first wrap your - ClauseElement in an - "empty" label (ie it will be labelled automatically during queries), - then wrap that in a column_property. - - .. sourcecode:: python - - class OrderLine(Entity): - quantity = Field(Float) - unit_price = Field(Numeric) - price = ColumnProperty(lambda c: c.quantity * c.unit_price, - deferred=True) - - Please look at the `corresponding SQLAlchemy - documentation `_ for details. - ''' - - def evaluate_property(self, prop): - return column_property(prop.label(None), *self.args, **self.kwargs) - - -class Synonym(GenericProperty): - ''' - This class represents a synonym property of another property (column, ...) - of an entity. As opposed to the `synonym` kwarg to the Field class (which - share the same goal), this class can be used to define a synonym of a - property defined in a parent class (of the current class). On the other - hand, it cannot define a synonym for the purpose of using a standard python - property in queries. See the Field class for details on that usage. - - .. sourcecode:: python - - class Person(Entity): - name = Field(String(30)) - primary_email = Field(String(100)) - email_address = Synonym('primary_email') - - class User(Person): - user_name = Synonym('name') - password = Field(String(20)) - ''' - - def evaluate_property(self, prop): - return synonym(prop, *self.args, **self.kwargs) - -#class Composite(GenericProperty): -# def __init__(self, prop): -# super(GenericProperty, self).__init__() -# self.prop = prop - -# def evaluate_property(self, prop): -# return composite(prop.label(self.name)) - -#start = Composite(Point, lambda c: (c.x1, c.y1)) - -#mapper(Vertex, vertices, properties={ -# 'start':composite(Point, vertices.c.x1, vertices.c.y1), -# 'end':composite(Point, vertices.c.x2, vertices.c.y2) -#}) - - -has_property = PropertyStatement(GenericProperty) - diff --git a/libs/elixir/relationships.py b/libs/elixir/relationships.py deleted file mode 100644 index 6c14dbb6b2..0000000000 --- a/libs/elixir/relationships.py +++ /dev/null @@ -1,1247 +0,0 @@ -''' -This module provides support for defining relationships between your Elixir -entities. Elixir currently supports two syntaxes to do so: the default -`Attribute-based syntax`_ which supports the following types of relationships: -ManyToOne_, OneToMany_, OneToOne_ and ManyToMany_, as well as a -`DSL-based syntax`_ which provides the following statements: belongs_to_, -has_many_, has_one_ and has_and_belongs_to_many_. - -====================== -Attribute-based syntax -====================== - -The first argument to all these "normal" relationship classes is the name of -the class (entity) you are relating to. - -Following that first mandatory argument, any number of additional keyword -arguments can be specified for advanced behavior. See each relationship type -for a list of their specific keyword arguments. At this point, we'll just note -that all the arguments that are not specifically processed by Elixir, as -mentioned in the documentation below are passed on to the SQLAlchemy -``relation`` function. So, please refer to the `SQLAlchemy relation function's -documentation `_ for further detail about which -keyword arguments are supported. - -You should keep in mind that the following -keyword arguments are automatically generated by Elixir and should not be used -unless you want to override the value provided by Elixir: ``uselist``, -``remote_side``, ``secondary``, ``primaryjoin`` and ``secondaryjoin``. - -Additionally, if you want a bidirectionnal relationship, you should define the -inverse relationship on the other entity explicitly (as opposed to how -SQLAlchemy's backrefs are defined). In non-ambiguous situations, Elixir will -match relationships together automatically. If there are several relationships -of the same type between two entities, Elixir is not able to determine which -relationship is the inverse of which, so you have to disambiguate the -situation by giving the name of the inverse relationship in the ``inverse`` -keyword argument. - -Here is a detailed explanation of each relation type: - -`ManyToOne` ------------ - -Describes the child's side of a parent-child relationship. For example, -a `Pet` object may belong to its owner, who is a `Person`. This could be -expressed like so: - -.. sourcecode:: python - - class Pet(Entity): - owner = ManyToOne('Person') - -Behind the scene, assuming the primary key of the `Person` entity is -an integer column named `id`, the ``ManyToOne`` relationship will -automatically add an integer column named `owner_id` to the entity, with a -foreign key referencing the `id` column of the `Person` entity. - -In addition to the keyword arguments inherited from SQLAlchemy's relation -function, ``ManyToOne`` relationships accept the following optional arguments -which will be directed to the created column: - -+----------------------+------------------------------------------------------+ -| Option Name | Description | -+======================+======================================================+ -| ``colname`` | Specify a custom name for the foreign key column(s). | -| | This argument accepts either a single string or a | -| | list of strings. The number of strings passed must | -| | match the number of primary key columns of the target| -| | entity. If this argument is not used, the name of the| -| | column(s) is generated with the pattern | -| | defined in options.FKCOL_NAMEFORMAT, which is, by | -| | default: "%(relname)s_%(key)s", where relname is the | -| | name of the ManyToOne relationship, and 'key' is the | -| | name (key) of the primary column in the target | -| | entity. That's with, in the above Pet/owner example, | -| | the name of the column would be: "owner_id". | -+----------------------+------------------------------------------------------+ -| ``required`` | Specify whether or not this field can be set to None | -| | (left without a value). Defaults to ``False``, | -| | unless the field is a primary key. | -+----------------------+------------------------------------------------------+ -| ``primary_key`` | Specify whether or not the column(s) created by this | -| | relationship should act as a primary_key. | -| | Defaults to ``False``. | -+----------------------+------------------------------------------------------+ -| ``column_kwargs`` | A dictionary holding any other keyword argument you | -| | might want to pass to the Column. | -+----------------------+------------------------------------------------------+ -| ``target_column`` | Name (or list of names) of the target column(s). | -| | If this argument is not specified, the target entity | -| | primary key column(s) are used. | -+----------------------+------------------------------------------------------+ - -The following optional arguments are also supported to customize the -ForeignKeyConstraint that is created: - -+----------------------+------------------------------------------------------+ -| Option Name | Description | -+======================+======================================================+ -| ``use_alter`` | If True, SQLAlchemy will add the constraint in a | -| | second SQL statement (as opposed to within the | -| | create table statement). This permits to define | -| | tables with a circular foreign key dependency | -| | between them. | -+----------------------+------------------------------------------------------+ -| ``ondelete`` | Value for the foreign key constraint ondelete clause.| -| | May be one of: ``cascade``, ``restrict``, | -| | ``set null``, or ``set default``. | -+----------------------+------------------------------------------------------+ -| ``onupdate`` | Value for the foreign key constraint onupdate clause.| -| | May be one of: ``cascade``, ``restrict``, | -| | ``set null``, or ``set default``. | -+----------------------+------------------------------------------------------+ -| ``constraint_kwargs``| A dictionary holding any other keyword argument you | -| | might want to pass to the Constraint. | -+----------------------+------------------------------------------------------+ - -In some cases, you may want to declare the foreign key column explicitly, -instead of letting it be generated automatically. There are several reasons to -that: it could be because you want to declare it with precise arguments and -using column_kwargs makes your code ugly, or because the name of -your column conflicts with the property name (in which case an error is -thrown). In those cases, you can use the ``field`` argument to specify an -already-declared field to be used for the foreign key column. - -For example, for the Pet example above, if you want the database column -(holding the foreign key) to be called 'owner', one should use the field -parameter to specify the field manually. - -.. sourcecode:: python - - class Pet(Entity): - owner_id = Field(Integer, colname='owner') - owner = ManyToOne('Person', field=owner_id) - -+----------------------+------------------------------------------------------+ -| Option Name | Description | -+======================+======================================================+ -| ``field`` | Specify the previously-declared field to be used for | -| | the foreign key column. Use of this parameter is | -| | mutually exclusive with the colname and column_kwargs| -| | arguments. | -+----------------------+------------------------------------------------------+ - - -Additionally, Elixir supports the belongs_to_ statement as an alternative, -DSL-based, syntax to define ManyToOne_ relationships. - - -`OneToMany` ------------ - -Describes the parent's side of a parent-child relationship when there can be -several children. For example, a `Person` object has many children, each of -them being a `Person`. This could be expressed like so: - -.. sourcecode:: python - - class Person(Entity): - parent = ManyToOne('Person') - children = OneToMany('Person') - -Note that a ``OneToMany`` relationship **cannot exist** without a -corresponding ``ManyToOne`` relationship in the other way. This is because the -``OneToMany`` relationship needs the foreign key created by the ``ManyToOne`` -relationship. - -In addition to keyword arguments inherited from SQLAlchemy, ``OneToMany`` -relationships accept the following optional (keyword) arguments: - -+--------------------+--------------------------------------------------------+ -| Option Name | Description | -+====================+========================================================+ -| ``order_by`` | Specify which field(s) should be used to sort the | -| | results given by accessing the relation field. | -| | Note that this sort order is only applied when loading | -| | objects from the database. Objects appended to the | -| | collection afterwards are not re-sorted in-memory on | -| | the fly. | -| | This argument accepts either a string or a list of | -| | strings, each corresponding to the name of a field in | -| | the target entity. These field names can optionally be | -| | prefixed by a minus (for descending order). | -+--------------------+--------------------------------------------------------+ -| ``filter`` | Specify a filter criterion (as a clause element) for | -| | this relationship. This criterion will be ``and_`` ed | -| | with the normal join criterion (primaryjoin) generated | -| | by Elixir for the relationship. For example: | -| | boston_addresses = | -| | OneToMany('Address', filter=Address.city == 'Boston') | -+--------------------+--------------------------------------------------------+ - -Additionally, Elixir supports an alternate, DSL-based, syntax to define -OneToMany_ relationships, with the has_many_ statement. - - -`OneToOne` ----------- - -Describes the parent's side of a parent-child relationship when there is only -one child. For example, a `Car` object has one gear stick, which is -represented as a `GearStick` object. This could be expressed like so: - -.. sourcecode:: python - - class Car(Entity): - gear_stick = OneToOne('GearStick', inverse='car') - - class GearStick(Entity): - car = ManyToOne('Car') - -Note that a ``OneToOne`` relationship **cannot exist** without a corresponding -``ManyToOne`` relationship in the other way. This is because the ``OneToOne`` -relationship needs the foreign_key created by the ``ManyToOne`` relationship. - -Additionally, Elixir supports an alternate, DSL-based, syntax to define -OneToOne_ relationships, with the has_one_ statement. - - -`ManyToMany` ------------- - -Describes a relationship in which one kind of entity can be related to several -objects of the other kind but the objects of that other kind can be related to -several objects of the first kind. For example, an `Article` can have several -tags, but the same `Tag` can be used on several articles. - -.. sourcecode:: python - - class Article(Entity): - tags = ManyToMany('Tag') - - class Tag(Entity): - articles = ManyToMany('Article') - -Behind the scene, the ``ManyToMany`` relationship will automatically create an -intermediate table to host its data. - -Note that you don't necessarily need to define the inverse relationship. In -our example, even though we want tags to be usable on several articles, we -might not be interested in which articles correspond to a particular tag. In -that case, we could have omitted the `Tag` side of the relationship. - -If your ``ManyToMany`` relationship is self-referencial, the entity -containing it is autoloaded (and you don't intend to specify both the -primaryjoin and secondaryjoin arguments manually), you must specify at least -one of either the ``remote_colname`` or ``local_colname`` argument. - -In addition to keyword arguments inherited from SQLAlchemy, ``ManyToMany`` -relationships accept the following optional (keyword) arguments: - -+--------------------+--------------------------------------------------------+ -| Option Name | Description | -+====================+========================================================+ -| ``tablename`` | Specify a custom name for the intermediary table. This | -| | can be used both when the tables needs to be created | -| | and when the table is autoloaded/reflected from the | -| | database. If this argument is not used, a name will be | -| | automatically generated by Elixir depending on the name| -| | of the tables of the two entities of the relationship, | -| | the name of the relationship, and, if present, the name| -| | of its inverse. Even though this argument is optional, | -| | it is wise to use it if you are not sure what are the | -| | exact consequence of using a generated table name. | -+--------------------+--------------------------------------------------------+ -| ``schema`` | Specify a custom schema for the intermediate table. | -| | This can be used both when the tables needs to | -| | be created and when the table is autoloaded/reflected | -| | from the database. | -+--------------------+--------------------------------------------------------+ -| ``remote_colname`` | A string or list of strings specifying the names of | -| | the column(s) in the intermediary table which | -| | reference the "remote"/target entity's table. | -+--------------------+--------------------------------------------------------+ -| ``local_colname`` | A string or list of strings specifying the names of | -| | the column(s) in the intermediary table which | -| | reference the "local"/current entity's table. | -+--------------------+--------------------------------------------------------+ -| ``table`` | Use a manually created table. If this argument is | -| | used, Elixir will not generate a table for this | -| | relationship, and use the one given instead. This | -| | argument only accepts SQLAlchemy's Table objects. | -+--------------------+--------------------------------------------------------+ -| ``order_by`` | Specify which field(s) should be used to sort the | -| | results given by accessing the relation field. | -| | Note that this sort order is only applied when loading | -| | objects from the database. Objects appended to the | -| | collection afterwards are not re-sorted in-memory on | -| | the fly. | -| | This argument accepts either a string or a list of | -| | strings, each corresponding to the name of a field in | -| | the target entity. These field names can optionally be | -| | prefixed by a minus (for descending order). | -+----------------------+------------------------------------------------------+ -| ``ondelete`` | Value for the foreign key constraint ondelete clause. | -| | May be one of: ``cascade``, ``restrict``, | -| | ``set null``, or ``set default``. | -+--------------------+--------------------------------------------------------+ -| ``onupdate`` | Value for the foreign key constraint onupdate clause. | -| | May be one of: ``cascade``, ``restrict``, | -| | ``set null``, or ``set default``. | -+--------------------+--------------------------------------------------------+ -| ``table_kwargs`` | A dictionary holding any other keyword argument you | -| | might want to pass to the underlying Table object. | -+--------------------+--------------------------------------------------------+ - - -================ -DSL-based syntax -================ - -The following DSL statements provide an alternative way to define relationships -between your entities. The first argument to all those statements is the name -of the relationship, the second is the 'kind' of object you are relating to -(it is usually given using the ``of_kind`` keyword). - -`belongs_to` ------------- - -The ``belongs_to`` statement is the DSL syntax equivalent to the ManyToOne_ -relationship. As such, it supports all the same arguments as ManyToOne_ -relationships. - -.. sourcecode:: python - - class Pet(Entity): - belongs_to('feeder', of_kind='Person') - belongs_to('owner', of_kind='Person', colname="owner_id") - - -`has_many` ----------- - -The ``has_many`` statement is the DSL syntax equivalent to the OneToMany_ -relationship. As such, it supports all the same arguments as OneToMany_ -relationships. - -.. sourcecode:: python - - class Person(Entity): - belongs_to('parent', of_kind='Person') - has_many('children', of_kind='Person') - -There is also an alternate form of the ``has_many`` relationship that takes -only two keyword arguments: ``through`` and ``via`` in order to encourage a -richer form of many-to-many relationship that is an alternative to the -``has_and_belongs_to_many`` statement. Here is an example: - -.. sourcecode:: python - - class Person(Entity): - has_field('name', Unicode) - has_many('assignments', of_kind='Assignment') - has_many('projects', through='assignments', via='project') - - class Assignment(Entity): - has_field('start_date', DateTime) - belongs_to('person', of_kind='Person') - belongs_to('project', of_kind='Project') - - class Project(Entity): - has_field('title', Unicode) - has_many('assignments', of_kind='Assignment') - -In the above example, a `Person` has many `projects` through the `Assignment` -relationship object, via a `project` attribute. - - -`has_one` ---------- - -The ``has_one`` statement is the DSL syntax equivalent to the OneToOne_ -relationship. As such, it supports all the same arguments as OneToOne_ -relationships. - -.. sourcecode:: python - - class Car(Entity): - has_one('gear_stick', of_kind='GearStick', inverse='car') - - class GearStick(Entity): - belongs_to('car', of_kind='Car') - - -`has_and_belongs_to_many` -------------------------- - -The ``has_and_belongs_to_many`` statement is the DSL syntax equivalent to the -ManyToMany_ relationship. As such, it supports all the same arguments as -ManyToMany_ relationships. - -.. sourcecode:: python - - class Article(Entity): - has_and_belongs_to_many('tags', of_kind='Tag') - - class Tag(Entity): - has_and_belongs_to_many('articles', of_kind='Article') - -''' - -import warnings - -from sqlalchemy import ForeignKeyConstraint, Column, Table, and_ -from sqlalchemy.orm import relation, backref, class_mapper -from sqlalchemy.ext.associationproxy import association_proxy - -import options -from elixir.statements import ClassMutator -from elixir.properties import Property -from elixir.entity import EntityMeta, DEBUG - -__doc_all__ = [] - - -class Relationship(Property): - ''' - Base class for relationships. - ''' - - def __init__(self, of_kind, inverse=None, *args, **kwargs): - super(Relationship, self).__init__() - - self.of_kind = of_kind - self.inverse_name = inverse - - self._target = None - - self.property = None # sqlalchemy property - self.backref = None # sqlalchemy backref - - #TODO: unused for now - self.args = args - self.kwargs = kwargs - - def attach(self, entity, name): - super(Relationship, self).attach(entity, name) - entity._descriptor.relationships.append(self) - - def create_pk_cols(self): - self.create_keys(True) - - def create_non_pk_cols(self): - self.create_keys(False) - - def create_keys(self, pk): - ''' - Subclasses (ie. concrete relationships) may override this method to - create foreign keys. - ''' - - def create_properties(self): - if self.property or self.backref: - return - - kwargs = self.get_prop_kwargs() - if 'order_by' in kwargs: - kwargs['order_by'] = \ - self.target._descriptor.translate_order_by(kwargs['order_by']) - - # transform callable arguments - for arg in ('primaryjoin', 'secondaryjoin', 'remote_side', - 'foreign_keys'): - kwarg = kwargs.get(arg, None) - if hasattr(kwarg, '__call__'): - kwargs[arg] = kwarg() - - # viewonly relationships need to create "standalone" relations (ie - # shouldn't be a backref of another relation). - if self.inverse and not kwargs.get('viewonly', False): - # check if the inverse was already processed (and thus has already - # defined a backref we can use) - if self.inverse.backref: - # let the user override the backref argument - if 'backref' not in kwargs: - kwargs['backref'] = self.inverse.backref - else: - # SQLAlchemy doesn't like when 'secondary' is both defined on - # the relation and the backref - kwargs.pop('secondary', None) - - # define backref for use by the inverse - self.backref = backref(self.name, **kwargs) - return - - self.property = relation(self.target, **kwargs) - self.add_mapper_property(self.name, self.property) - - @property - def target(self): - if not self._target: - if isinstance(self.of_kind, basestring): - collection = self.entity._descriptor.collection - self._target = collection.resolve(self.of_kind, self.entity) - else: - self._target = self.of_kind - return self._target - - @property - def inverse(self): - if not hasattr(self, '_inverse'): - if self.inverse_name: - desc = self.target._descriptor - inverse = desc.find_relationship(self.inverse_name) - if inverse is None: - raise Exception( - "Couldn't find a relationship named '%s' in " - "entity '%s' or its parent entities." - % (self.inverse_name, self.target.__name__)) - assert self.match_type_of(inverse), \ - "Relationships '%s' in entity '%s' and '%s' in entity " \ - "'%s' cannot be inverse of each other because their " \ - "types do not form a valid combination." % \ - (self.name, self.entity.__name__, - self.inverse_name, self.target.__name__) - else: - check_reverse = not self.kwargs.get('viewonly', False) - if isinstance(self.target, EntityMeta): - inverse = self.target._descriptor.get_inverse_relation( - self, check_reverse=check_reverse) - else: - inverse = None - self._inverse = inverse - if inverse and not self.kwargs.get('viewonly', False): - inverse._inverse = self - - return self._inverse - - def match_type_of(self, other): - return False - - def is_inverse(self, other): - # viewonly relationships are not symmetrical: a viewonly relationship - # should have exactly one inverse (a ManyToOne relationship), but that - # inverse shouldn't have the viewonly relationship as its inverse. - return not other.kwargs.get('viewonly', False) and \ - other is not self and \ - self.match_type_of(other) and \ - self.entity == other.target and \ - other.entity == self.target and \ - (self.inverse_name == other.name or not self.inverse_name) and \ - (other.inverse_name == self.name or not other.inverse_name) - - -class ManyToOne(Relationship): - ''' - - ''' - - def __init__(self, of_kind, - column_kwargs=None, - colname=None, required=None, primary_key=None, - field=None, - constraint_kwargs=None, - use_alter=None, ondelete=None, onupdate=None, - target_column=None, - *args, **kwargs): - - # 1) handle column-related args - - # check that the column arguments don't conflict - assert not (field and (column_kwargs or colname)), \ - "ManyToOne can accept the 'field' argument or column " \ - "arguments ('colname' or 'column_kwargs') but not both!" - - if colname and not isinstance(colname, list): - colname = [colname] - self.colname = colname or [] - - column_kwargs = column_kwargs or {} - # kwargs go by default to the relation(), so we need to manually - # extract those targeting the Column - if required is not None: - column_kwargs['nullable'] = not required - if primary_key is not None: - column_kwargs['primary_key'] = primary_key - # by default, created columns will have an index. - column_kwargs.setdefault('index', True) - self.column_kwargs = column_kwargs - - if field and not isinstance(field, list): - field = [field] - self.field = field or [] - - # 2) handle constraint kwargs - constraint_kwargs = constraint_kwargs or {} - if use_alter is not None: - constraint_kwargs['use_alter'] = use_alter - if ondelete is not None: - constraint_kwargs['ondelete'] = ondelete - if onupdate is not None: - constraint_kwargs['onupdate'] = onupdate - self.constraint_kwargs = constraint_kwargs - - # 3) misc arguments - if target_column and not isinstance(target_column, list): - target_column = [target_column] - self.target_column = target_column - - self.foreign_key = [] - self.primaryjoin_clauses = [] - - super(ManyToOne, self).__init__(of_kind, *args, **kwargs) - - def match_type_of(self, other): - return isinstance(other, (OneToMany, OneToOne)) - - @property - def target_table(self): - if isinstance(self.target, EntityMeta): - return self.target._descriptor.table - else: - return class_mapper(self.target).local_table - - def create_keys(self, pk): - ''' - Find all primary keys on the target and create foreign keys on the - source accordingly. - ''' - - if self.foreign_key: - return - - if self.column_kwargs.get('primary_key', False) != pk: - return - - source_desc = self.entity._descriptor - if isinstance(self.target, EntityMeta): - # make sure the target has all its pk set up - #FIXME: this is not enough when specifying target_column manually, - # on unique, non-pk col, see tests/test_m2o.py:test_non_pk_forward - self.target._descriptor.create_pk_cols() - - #XXX: another option, instead of the FakeTable, would be to create an - # EntityDescriptor for the SA class. - target_table = self.target_table - - if source_desc.autoload: - #TODO: allow target_column to be used as an alternative to - # specifying primaryjoin, to be consistent with non-autoloaded - # tables - if self.colname: - if 'primaryjoin' not in self.kwargs: - self.primaryjoin_clauses = \ - _get_join_clauses(self.entity.table, - self.colname, None, - target_table)[0] - if not self.primaryjoin_clauses: - colnames = ', '.join(self.colname) - raise Exception( - "Couldn't find a foreign key constraint in table " - "'%s' using the following columns: %s." - % (self.entity.table.name, colnames)) - else: - # in this case we let SA handle everything. - # XXX: we might want to try to build join clauses anyway so - # that we know whether there is an ambiguity or not, and - # suggest using colname if there is one - pass - if self.field: - raise NotImplementedError( - "'field' argument not allowed on autoloaded table " - "relationships.") - else: - fk_refcols = [] - fk_colnames = [] - - if self.target_column is None: - target_columns = target_table.primary_key.columns - else: - target_columns = [target_table.columns[col] - for col in self.target_column] - - if not target_columns: - raise Exception("No primary key found in target table ('%s') " - "for the '%s' relationship of the '%s' entity." - % (target_table.name, self.name, - self.entity.__name__)) - if self.colname and \ - len(self.colname) != len(target_columns): - raise Exception( - "The number of column names provided in the colname " - "keyword argument of the '%s' relationship of the " - "'%s' entity is not the same as the number of columns " - "of the primary key of '%s'." - % (self.name, self.entity.__name__, - self.target.__name__)) - - for key_num, target_col in enumerate(target_columns): - if self.field: - col = self.field[key_num].column - else: - if self.colname: - colname = self.colname[key_num] - else: - colname = options.FKCOL_NAMEFORMAT % \ - {'relname': self.name, - 'key': target_col.key} - - # We can't add the column to the table directly as the - # table might not be created yet. - col = Column(colname, target_col.type, - **self.column_kwargs) - source_desc.add_column(col) - - # If the column name was specified, and it is the same as - # this property's name, there is going to be a conflict. - # Don't allow this to happen. - if col.key == self.name: - raise ValueError( - "ManyToOne named '%s' in '%s' conficts " - " with the column of the same name. " - "You should probably define the foreign key " - "field manually and use the 'field' " - "argument on the ManyToOne relationship" - % (self.name, self.entity.__name__)) - - # Build the list of local columns which will be part of - # the foreign key - self.foreign_key.append(col) - - # Store the names of those columns - fk_colnames.append(col.key) - - # Build the list of column "paths" the foreign key will - # point to - fk_refcols.append("%s.%s" % \ - (target_table.fullname, target_col.key)) - - # Build up the primary join. This is needed when you have - # several ManyToOne relationships between two objects - self.primaryjoin_clauses.append(col == target_col) - - if 'name' not in self.constraint_kwargs: - # In some databases (at least MySQL) the constraint name needs - # to be unique for the whole database, instead of per table. - fk_name = options.CONSTRAINT_NAMEFORMAT % \ - {'tablename': source_desc.tablename, - 'colnames': '_'.join(fk_colnames)} - self.constraint_kwargs['name'] = fk_name - - source_desc.add_constraint( - ForeignKeyConstraint(fk_colnames, fk_refcols, - **self.constraint_kwargs)) - - def get_prop_kwargs(self): - kwargs = {'uselist': False} - - if self.entity.table is self.target_table: - # this is needed because otherwise SA has no way to know what is - # the direction of the relationship since both columns present in - # the primaryjoin belong to the same table. In other words, it is - # necessary to know if this particular relation - # is the many-to-one side, or the one-to-xxx side. The foreignkey - # doesn't help in this case. - kwargs['remote_side'] = \ - [col for col in self.target_table.primary_key.columns] - - if self.primaryjoin_clauses: - kwargs['primaryjoin'] = and_(*self.primaryjoin_clauses) - - kwargs.update(self.kwargs) - - return kwargs - - -class OneToOne(Relationship): - uselist = False - - def __init__(self, of_kind, filter=None, *args, **kwargs): - self.filter = filter - if filter is not None: - # We set viewonly to True by default for filtered relationships, - # unless manually overridden. - # This is not strictly necessary, as SQLAlchemy allows non viewonly - # relationships with a custom join/filter. The example at: - # SADOCS/05/mappers.html#advdatamapping_relation_customjoin - # is not viewonly. Those relationships can be used as if the extra - # filter wasn't present when inserting. This can lead to a - # confusing behavior (if you insert data which doesn't match the - # extra criterion it'll get inserted anyway but you won't see it - # when you query back the attribute after a round-trip to the - # database). - if 'viewonly' not in kwargs: - kwargs['viewonly'] = True - super(OneToOne, self).__init__(of_kind, *args, **kwargs) - - def match_type_of(self, other): - return isinstance(other, ManyToOne) - - def create_keys(self, pk): - # make sure an inverse relationship exists - if self.inverse is None: - raise Exception( - "Couldn't find any relationship in '%s' which " - "match as inverse of the '%s' relationship " - "defined in the '%s' entity. If you are using " - "inheritance you " - "might need to specify inverse relationships " - "manually by using the 'inverse' argument." - % (self.target, self.name, - self.entity)) - - def get_prop_kwargs(self): - kwargs = {'uselist': self.uselist} - - #TODO: for now, we don't break any test if we remove those 2 lines. - # So, we should either complete the selfref test to prove that they - # are indeed useful, or remove them. It might be they are indeed - # useless because the remote_side is already setup in the other way - # (ManyToOne). - if self.entity.table is self.target.table: - # When using a manual/autoloaded table, it will be assigned - # an empty list, which doesn't seem to upset SQLAlchemy - kwargs['remote_side'] = self.inverse.foreign_key - - # Contrary to ManyToMany relationships, we need to specify the join - # clauses even if this relationship is not self-referencial because - # there could be several ManyToOne from the target class to us. - joinclauses = self.inverse.primaryjoin_clauses - if self.filter: - # We need to make a copy of the joinclauses, to not add the filter - # on the backref - joinclauses = joinclauses[:] + [self.filter(self.target.table.c)] - if joinclauses: - kwargs['primaryjoin'] = and_(*joinclauses) - - kwargs.update(self.kwargs) - - return kwargs - - -class OneToMany(OneToOne): - uselist = True - - -class ManyToMany(Relationship): - uselist = True - - def __init__(self, of_kind, tablename=None, - local_colname=None, remote_colname=None, - ondelete=None, onupdate=None, - table=None, schema=None, - filter=None, - table_kwargs=None, - *args, **kwargs): - self.user_tablename = tablename - - if local_colname and not isinstance(local_colname, list): - local_colname = [local_colname] - self.local_colname = local_colname or [] - if remote_colname and not isinstance(remote_colname, list): - remote_colname = [remote_colname] - self.remote_colname = remote_colname or [] - - self.ondelete = ondelete - self.onupdate = onupdate - - self.table = table - self.schema = schema - - #TODO: this can probably be simplified/moved elsewhere since the - #argument disappeared - self.column_format = options.M2MCOL_NAMEFORMAT - if not hasattr(self.column_format, '__call__'): - # we need to store the format in a variable so that the - # closure of the lambda is correct - format = self.column_format - self.column_format = lambda data: format % data - if options.MIGRATION_TO_07_AID: - self.column_format = \ - migration_aid_m2m_column_formatter( - lambda data: options.OLD_M2MCOL_NAMEFORMAT % data, - self.column_format) - - self.filter = filter - if filter is not None: - # We set viewonly to True by default for filtered relationships, - # unless manually overridden. - if 'viewonly' not in kwargs: - kwargs['viewonly'] = True - - self.table_kwargs = table_kwargs or {} - - self.primaryjoin_clauses = [] - self.secondaryjoin_clauses = [] - - super(ManyToMany, self).__init__(of_kind, *args, **kwargs) - - def match_type_of(self, other): - return isinstance(other, ManyToMany) - - def create_tables(self): - if self.table is not None: - if 'primaryjoin' not in self.kwargs or \ - 'secondaryjoin' not in self.kwargs: - self._build_join_clauses() - assert self.inverse is None or self.inverse.table is None or \ - self.inverse.table is self.table - return - - if self.inverse: - inverse = self.inverse - if inverse.table is not None: - self.table = inverse.table - self.primaryjoin_clauses = inverse.secondaryjoin_clauses - self.secondaryjoin_clauses = inverse.primaryjoin_clauses - return - - assert not inverse.user_tablename or not self.user_tablename or \ - inverse.user_tablename == self.user_tablename - assert not inverse.remote_colname or not self.local_colname or \ - inverse.remote_colname == self.local_colname - assert not inverse.local_colname or not self.remote_colname or \ - inverse.local_colname == self.remote_colname - assert not inverse.schema or not self.schema or \ - inverse.schema == self.schema - assert not inverse.table_kwargs or not self.table_kwargs or \ - inverse.table_kwargs == self.table_kwargs - - self.user_tablename = inverse.user_tablename or self.user_tablename - self.local_colname = inverse.remote_colname or self.local_colname - self.remote_colname = inverse.local_colname or self.remote_colname - self.schema = inverse.schema or self.schema - self.local_colname = inverse.remote_colname or self.local_colname - - # compute table_kwargs - complete_kwargs = options.options_defaults['table_options'].copy() - complete_kwargs.update(self.table_kwargs) - - #needs: table_options['schema'], autoload, tablename, primary_keys, - #entity.__name__, table_fullname - e1_desc = self.entity._descriptor - e2_desc = self.target._descriptor - - e1_schema = e1_desc.table_options.get('schema', None) - e2_schema = e2_desc.table_options.get('schema', None) - schema = (self.schema is not None) and self.schema or e1_schema - - assert e1_schema == e2_schema or self.schema, \ - "Schema %r for entity %s differs from schema %r of entity %s." \ - " Consider using the schema-parameter. "\ - % (e1_schema, self.entity.__name__, - e2_schema, self.target.__name__) - - # First, we compute the name of the table. Note that some of the - # intermediary variables are reused later for the constraint - # names. - - # We use the name of the relation for the first entity - # (instead of the name of its primary key), so that we can - # have two many-to-many relations between the same objects - # without having a table name collision. - source_part = "%s_%s" % (e1_desc.tablename, self.name) - - # And we use only the name of the table of the second entity - # when there is no inverse, so that a many-to-many relation - # can be defined without an inverse. - if self.inverse: - target_part = "%s_%s" % (e2_desc.tablename, self.inverse.name) - else: - target_part = e2_desc.tablename - - if self.user_tablename: - tablename = self.user_tablename - else: - # We need to keep the table name consistent (independant of - # whether this relation or its inverse is setup first). - if self.inverse and source_part < target_part: - #XXX: use a different scheme for selfref (to not include the - # table name twice)? - tablename = "%s__%s" % (target_part, source_part) - else: - tablename = "%s__%s" % (source_part, target_part) - - if options.MIGRATION_TO_07_AID: - oldname = (self.inverse and - e1_desc.tablename < e2_desc.tablename) and \ - "%s__%s" % (target_part, source_part) or \ - "%s__%s" % (source_part, target_part) - if oldname != tablename: - warnings.warn( - "The generated table name for the '%s' relationship " - "on the '%s' entity changed from '%s' (the name " - "generated by Elixir 0.6.1 and earlier) to '%s'. " - "You should either rename the table in the database " - "to the new name or use the tablename argument on the " - "relationship to force the old name: tablename='%s'!" - % (self.name, self.entity.__name__, oldname, - tablename, oldname)) - - if e1_desc.autoload: - if not e2_desc.autoload: - raise Exception( - "Entity '%s' is autoloaded and its '%s' " - "ManyToMany relationship points to " - "the '%s' entity which is not autoloaded" - % (self.entity.__name__, self.name, - self.target.__name__)) - - self.table = Table(tablename, e1_desc.metadata, autoload=True, - **complete_kwargs) - if 'primaryjoin' not in self.kwargs or \ - 'secondaryjoin' not in self.kwargs: - self._build_join_clauses() - else: - # We pre-compute the names of the foreign key constraints - # pointing to the source (local) entity's table and to the - # target's table - - # In some databases (at least MySQL) the constraint names need - # to be unique for the whole database, instead of per table. - source_fk_name = "%s_fk" % source_part - if self.inverse: - target_fk_name = "%s_fk" % target_part - else: - target_fk_name = "%s_inverse_fk" % source_part - - columns = [] - constraints = [] - - for num, desc, fk_name, rel, inverse, colnames, join_clauses in ( - (0, e1_desc, source_fk_name, self, self.inverse, - self.local_colname, self.primaryjoin_clauses), - (1, e2_desc, target_fk_name, self.inverse, self, - self.remote_colname, self.secondaryjoin_clauses)): - - fk_colnames = [] - fk_refcols = [] - if colnames: - assert len(colnames) == len(desc.primary_keys) - else: - # The data generated here will be fed to the M2M column - # formatter to generate the name of the columns of the - # intermediate table for *one* side of the relationship, - # that is, from the intermediate table to the current - # entity, as stored in the "desc" variable. - data = {# A) relationships info - - # the name of the rel going *from* the entity - # we are currently generating a column pointing - # *to*. This is generally *not* what you want to - # use. eg in a "Post" and "Tag" example, with - # relationships named 'tags' and 'posts', when - # creating the columns from the intermediate - # table to the "Post" entity, 'relname' will - # contain 'tags'. - 'relname': rel and rel.name or 'inverse', - - # the name of the inverse relationship. In the - # above example, 'inversename' will contain - # 'posts'. - 'inversename': inverse and inverse.name - or 'inverse', - # is A == B? - 'selfref': e1_desc is e2_desc, - # provided for backward compatibility, DO NOT USE! - 'num': num, - # provided for backward compatibility, DO NOT USE! - 'numifself': e1_desc is e2_desc and str(num + 1) - or '', - # B) target information (from the perspective of - # the intermediate table) - 'target': desc.entity, - 'entity': desc.entity.__name__.lower(), - 'tablename': desc.tablename, - - # C) current (intermediate) table name - 'current_table': tablename - } - colnames = [] - for pk_col in desc.primary_keys: - data.update(key=pk_col.key) - colnames.append(self.column_format(data)) - - for pk_col, colname in zip(desc.primary_keys, colnames): - col = Column(colname, pk_col.type, primary_key=True) - columns.append(col) - - # Build the list of local columns which will be part - # of the foreign key. - fk_colnames.append(colname) - - # Build the list of column "paths" the foreign key will - # point to - target_path = "%s.%s" % (desc.table_fullname, pk_col.key) - fk_refcols.append(target_path) - - # Build join clauses (in case we have a self-ref) - if self.entity is self.target: - join_clauses.append(col == pk_col) - - onupdate = rel and rel.onupdate - ondelete = rel and rel.ondelete - - #FIXME: fk_name is misleading - constraints.append( - ForeignKeyConstraint(fk_colnames, fk_refcols, - name=fk_name, onupdate=onupdate, - ondelete=ondelete)) - - args = columns + constraints - - self.table = Table(tablename, e1_desc.metadata, - schema=schema, *args, **complete_kwargs) - if DEBUG: - print self.table.repr2() - - def _build_join_clauses(self): - # In the case we have a self-reference, we need to build join clauses - if self.entity is self.target: - if not self.local_colname and not self.remote_colname: - raise Exception( - "Self-referential ManyToMany " - "relationships in autoloaded entities need to have at " - "least one of either 'local_colname' or 'remote_colname' " - "argument specified. The '%s' relationship in the '%s' " - "entity doesn't have either." - % (self.name, self.entity.__name__)) - - self.primaryjoin_clauses, self.secondaryjoin_clauses = \ - _get_join_clauses(self.table, - self.local_colname, self.remote_colname, - self.entity.table) - - def get_prop_kwargs(self): - kwargs = {'secondary': self.table, - 'uselist': self.uselist} - - if self.filter: - # we need to make a copy of the joinclauses - secondaryjoin_clauses = self.secondaryjoin_clauses[:] + \ - [self.filter(self.target.table.c)] - else: - secondaryjoin_clauses = self.secondaryjoin_clauses - - if self.target is self.entity or self.filter: - kwargs['primaryjoin'] = and_(*self.primaryjoin_clauses) - kwargs['secondaryjoin'] = and_(*secondaryjoin_clauses) - - kwargs.update(self.kwargs) - - return kwargs - - def is_inverse(self, other): - return super(ManyToMany, self).is_inverse(other) and \ - (self.user_tablename == other.user_tablename or - (not self.user_tablename and not other.user_tablename)) - - -def migration_aid_m2m_column_formatter(oldformatter, newformatter): - def debug_formatter(data): - old_name = oldformatter(data) - new_name = newformatter(data) - if new_name != old_name: - complete_data = data.copy() - complete_data.update(old_name=old_name, - new_name=new_name, - targetname=data['target'].__name__) - # Specifying a stacklevel is useless in this case as the name - # generation is triggered by setup_all(), not by the declaration - # of the offending relationship. - warnings.warn("The '%(old_name)s' column in the " - "'%(current_table)s' table, used as the " - "intermediate table for the '%(relname)s' " - "relationship on the '%(targetname)s' entity " - "was renamed to '%(new_name)s'." - % complete_data) - return new_name - return debug_formatter - - -def _get_join_clauses(local_table, local_cols1, local_cols2, target_table): - primary_join, secondary_join = [], [] - cols1 = local_cols1[:] - cols1.sort() - cols1 = tuple(cols1) - - if local_cols2 is not None: - cols2 = local_cols2[:] - cols2.sort() - cols2 = tuple(cols2) - else: - cols2 = None - - # Build a map of fk constraints pointing to the correct table. - # The map is indexed on the local col names. - constraint_map = {} - for constraint in local_table.constraints: - if isinstance(constraint, ForeignKeyConstraint): - use_constraint = True - fk_colnames = [] - - # if all columns point to the correct table, we use the constraint - #TODO: check that it contains as many columns as the pk of the - #target entity, or even that it points to the actual pk columns - for fk in constraint.elements: - if fk.references(target_table): - # local column key - fk_colnames.append(fk.parent.key) - else: - use_constraint = False - if use_constraint: - fk_colnames.sort() - constraint_map[tuple(fk_colnames)] = constraint - - # Either the fk column names match explicitely with the columns given for - # one of the joins (primary or secondary), or we assume the current - # columns match because the columns for this join were not given and we - # know the other join is either not used (is None) or has an explicit - # match. - -#TODO: rewrite this. Even with the comment, I don't even understand it myself. - for cols, constraint in constraint_map.iteritems(): - if cols == cols1 or (cols != cols2 and - not cols1 and (cols2 in constraint_map or - cols2 is None)): - join = primary_join - elif cols == cols2 or (cols2 == () and cols1 in constraint_map): - join = secondary_join - else: - continue - for fk in constraint.elements: - join.append(fk.parent == fk.column) - return primary_join, secondary_join - - -def rel_mutator_handler(target): - def handler(entity, name, of_kind=None, through=None, via=None, - *args, **kwargs): - if through and via: - setattr(entity, name, - association_proxy(through, via, **kwargs)) - return - elif through or via: - raise Exception("'through' and 'via' relationship keyword " - "arguments should be used in combination.") - rel = target(of_kind, *args, **kwargs) - rel.attach(entity, name) - return handler - - -belongs_to = ClassMutator(rel_mutator_handler(ManyToOne)) -has_one = ClassMutator(rel_mutator_handler(OneToOne)) -has_many = ClassMutator(rel_mutator_handler(OneToMany)) -has_and_belongs_to_many = ClassMutator(rel_mutator_handler(ManyToMany)) diff --git a/libs/elixir/statements.py b/libs/elixir/statements.py deleted file mode 100644 index c21bf305e4..0000000000 --- a/libs/elixir/statements.py +++ /dev/null @@ -1,59 +0,0 @@ -import sys - -MUTATORS = '__elixir_mutators__' - -class ClassMutator(object): - ''' - DSL-style syntax - - A ``ClassMutator`` object represents a DSL term. - ''' - - def __init__(self, handler): - ''' - Create a new ClassMutator, using the `handler` callable to process it - when the time will come. - ''' - self.handler = handler - - # called when a mutator (eg. "has_field(...)") is parsed - def __call__(self, *args, **kwargs): - # self in this case is the "generic" mutator (eg "has_field") - - # jam this mutator into the class's mutator list - class_locals = sys._getframe(1).f_locals - mutators = class_locals.setdefault(MUTATORS, []) - mutators.append((self, args, kwargs)) - - def process(self, entity, *args, **kwargs): - ''' - Process one mutator. This version simply calls the handler callable, - but another mutator (sub)class could do more processing. - ''' - self.handler(entity, *args, **kwargs) - - -#TODO: move this to the super class (to be created here) of EntityMeta -def process_mutators(entity): - ''' - Apply all mutators of the given entity. That is, loop over all mutators - in the class's mutator list and process them. - ''' - # we don't use getattr here to not inherit from the parent mutators - # inadvertantly if the current entity hasn't defined any mutator. - mutators = entity.__dict__.get(MUTATORS, []) - for mutator, args, kwargs in mutators: - mutator.process(entity, *args, **kwargs) - -class Statement(ClassMutator): - - def process(self, entity, *args, **kwargs): - builder = self.handler(entity, *args, **kwargs) - entity._descriptor.builders.append(builder) - -class PropertyStatement(ClassMutator): - - def process(self, entity, name, *args, **kwargs): - prop = self.handler(*args, **kwargs) - prop.attach(entity, name) - diff --git a/libs/enzyme/fourcc.py b/libs/enzyme/fourcc.py index ac15b0b2b9..fc62188aa5 100644 --- a/libs/enzyme/fourcc.py +++ b/libs/enzyme/fourcc.py @@ -437,13 +437,14 @@ def resolve(code): 'H262': 'H.262', 'H263': 'H.263', 'H264': 'H.264 AVC', - 'H265': 'H.265', + 'H265': 'H.265 HEVC', 'H266': 'H.266', 'H267': 'H.267', 'H268': 'H.268', 'H269': 'H.269', 'HD10': 'BlueFish444 (lossless RGBA, YUV 10-bit)', 'HDX4': 'Jomigo HDX4', + 'HEVC': 'H.265 HEVC', 'HFYU': 'Huffman Lossless Codec', 'HMCR': 'Rendition Motion Compensation Format (HMCR)', 'HMRR': 'Rendition Motion Compensation Format (HMRR)', @@ -793,6 +794,7 @@ def resolve(code): 'WVP2': 'WVP2 codec', 'X263': 'Xirlink H.263', 'X264': 'XiWave GNU GPL x264 MPEG-4 Codec', + 'X265': 'H.265 HEVC', 'XLV0': 'NetXL Video Decoder', 'XMPG': 'Xing MPEG (I-Frame only)', 'XVID': 'XviD MPEG-4', diff --git a/libs/enzyme/mkv.py b/libs/enzyme/mkv.py index aba5325e29..69bd552c86 100644 --- a/libs/enzyme/mkv.py +++ b/libs/enzyme/mkv.py @@ -123,6 +123,7 @@ 'V_SNOW': 'SNOW', 'V_MPEG4/ISO/ASP': 'MP4V', 'V_MPEG4/ISO/AVC': 'AVC1', + 'V_MPEGH/ISO/HEVC': 'HEVC', 'A_AC3': 0x2000, 'A_MPEG/L3': 0x0055, 'A_MPEG/L2': 0x0050, diff --git a/libs/enzyme/mp4.py b/libs/enzyme/mp4.py index c53f30d3a6..a66d30ad72 100644 --- a/libs/enzyme/mp4.py +++ b/libs/enzyme/mp4.py @@ -284,6 +284,10 @@ def _readatom(self, file): while datasize: mdia = struct.unpack('>I4s', atomdata[pos:pos + 8]) + + if mdia[0] == 0: + break + if mdia[1] == 'mdhd': # Parse based on version of mdhd header. See # http://wiki.multimedia.cx/index.php?title=QuickTime_container#mdhd diff --git a/libs/flask/__init__.py b/libs/flask/__init__.py deleted file mode 100644 index b170ba5f93..0000000000 --- a/libs/flask/__init__.py +++ /dev/null @@ -1,44 +0,0 @@ -# -*- coding: utf-8 -*- -""" - flask - ~~~~~ - - A microframework based on Werkzeug. It's extensively documented - and follows best practice patterns. - - :copyright: (c) 2011 by Armin Ronacher. - :license: BSD, see LICENSE for more details. -""" - -__version__ = '0.9' - -# utilities we import from Werkzeug and Jinja2 that are unused -# in the module but are exported as public interface. -from werkzeug.exceptions import abort -from werkzeug.utils import redirect -from jinja2 import Markup, escape - -from .app import Flask, Request, Response -from .config import Config -from .helpers import url_for, jsonify, json_available, flash, \ - send_file, send_from_directory, get_flashed_messages, \ - get_template_attribute, make_response, safe_join, \ - stream_with_context -from .globals import current_app, g, request, session, _request_ctx_stack, \ - _app_ctx_stack -from .ctx import has_request_context, has_app_context, \ - after_this_request -from .module import Module -from .blueprints import Blueprint -from .templating import render_template, render_template_string - -# the signals -from .signals import signals_available, template_rendered, request_started, \ - request_finished, got_request_exception, request_tearing_down - -# only import json if it's available -if json_available: - from .helpers import json - -# backwards compat, goes away in 1.0 -from .sessions import SecureCookieSession as Session diff --git a/libs/flask/app.py b/libs/flask/app.py deleted file mode 100644 index d30d38097b..0000000000 --- a/libs/flask/app.py +++ /dev/null @@ -1,1701 +0,0 @@ -# -*- coding: utf-8 -*- -""" - flask.app - ~~~~~~~~~ - - This module implements the central WSGI application object. - - :copyright: (c) 2011 by Armin Ronacher. - :license: BSD, see LICENSE for more details. -""" - -from __future__ import with_statement - -import os -import sys -from threading import Lock -from datetime import timedelta -from itertools import chain -from functools import update_wrapper - -from werkzeug.datastructures import ImmutableDict -from werkzeug.routing import Map, Rule, RequestRedirect, BuildError -from werkzeug.exceptions import HTTPException, InternalServerError, \ - MethodNotAllowed, BadRequest - -from .helpers import _PackageBoundObject, url_for, get_flashed_messages, \ - locked_cached_property, _tojson_filter, _endpoint_from_view_func, \ - find_package -from .wrappers import Request, Response -from .config import ConfigAttribute, Config -from .ctx import RequestContext, AppContext, _RequestGlobals -from .globals import _request_ctx_stack, request -from .sessions import SecureCookieSessionInterface -from .module import blueprint_is_module -from .templating import DispatchingJinjaLoader, Environment, \ - _default_template_ctx_processor -from .signals import request_started, request_finished, got_request_exception, \ - request_tearing_down, appcontext_tearing_down - -# a lock used for logger initialization -_logger_lock = Lock() - - -def _make_timedelta(value): - if not isinstance(value, timedelta): - return timedelta(seconds=value) - return value - - -def setupmethod(f): - """Wraps a method so that it performs a check in debug mode if the - first request was already handled. - """ - def wrapper_func(self, *args, **kwargs): - if self.debug and self._got_first_request: - raise AssertionError('A setup function was called after the ' - 'first request was handled. This usually indicates a bug ' - 'in the application where a module was not imported ' - 'and decorators or other functionality was called too late.\n' - 'To fix this make sure to import all your view modules, ' - 'database models and everything related at a central place ' - 'before the application starts serving requests.') - return f(self, *args, **kwargs) - return update_wrapper(wrapper_func, f) - - -class Flask(_PackageBoundObject): - """The flask object implements a WSGI application and acts as the central - object. It is passed the name of the module or package of the - application. Once it is created it will act as a central registry for - the view functions, the URL rules, template configuration and much more. - - The name of the package is used to resolve resources from inside the - package or the folder the module is contained in depending on if the - package parameter resolves to an actual python package (a folder with - an `__init__.py` file inside) or a standard module (just a `.py` file). - - For more information about resource loading, see :func:`open_resource`. - - Usually you create a :class:`Flask` instance in your main module or - in the `__init__.py` file of your package like this:: - - from flask import Flask - app = Flask(__name__) - - .. admonition:: About the First Parameter - - The idea of the first parameter is to give Flask an idea what - belongs to your application. This name is used to find resources - on the file system, can be used by extensions to improve debugging - information and a lot more. - - So it's important what you provide there. If you are using a single - module, `__name__` is always the correct value. If you however are - using a package, it's usually recommended to hardcode the name of - your package there. - - For example if your application is defined in `yourapplication/app.py` - you should create it with one of the two versions below:: - - app = Flask('yourapplication') - app = Flask(__name__.split('.')[0]) - - Why is that? The application will work even with `__name__`, thanks - to how resources are looked up. However it will make debugging more - painful. Certain extensions can make assumptions based on the - import name of your application. For example the Flask-SQLAlchemy - extension will look for the code in your application that triggered - an SQL query in debug mode. If the import name is not properly set - up, that debugging information is lost. (For example it would only - pick up SQL queries in `yourapplication.app` and not - `yourapplication.views.frontend`) - - .. versionadded:: 0.7 - The `static_url_path`, `static_folder`, and `template_folder` - parameters were added. - - .. versionadded:: 0.8 - The `instance_path` and `instance_relative_config` parameters were - added. - - :param import_name: the name of the application package - :param static_url_path: can be used to specify a different path for the - static files on the web. Defaults to the name - of the `static_folder` folder. - :param static_folder: the folder with static files that should be served - at `static_url_path`. Defaults to the ``'static'`` - folder in the root path of the application. - :param template_folder: the folder that contains the templates that should - be used by the application. Defaults to - ``'templates'`` folder in the root path of the - application. - :param instance_path: An alternative instance path for the application. - By default the folder ``'instance'`` next to the - package or module is assumed to be the instance - path. - :param instance_relative_config: if set to `True` relative filenames - for loading the config are assumed to - be relative to the instance path instead - of the application root. - """ - - #: The class that is used for request objects. See :class:`~flask.Request` - #: for more information. - request_class = Request - - #: The class that is used for response objects. See - #: :class:`~flask.Response` for more information. - response_class = Response - - #: The class that is used for the :data:`~flask.g` instance. - #: - #: Example use cases for a custom class: - #: - #: 1. Store arbitrary attributes on flask.g. - #: 2. Add a property for lazy per-request database connectors. - #: 3. Return None instead of AttributeError on expected attributes. - #: 4. Raise exception if an unexpected attr is set, a "controlled" flask.g. - #: - #: .. versionadded:: 0.9 - request_globals_class = _RequestGlobals - - #: The debug flag. Set this to `True` to enable debugging of the - #: application. In debug mode the debugger will kick in when an unhandled - #: exception ocurrs and the integrated server will automatically reload - #: the application if changes in the code are detected. - #: - #: This attribute can also be configured from the config with the `DEBUG` - #: configuration key. Defaults to `False`. - debug = ConfigAttribute('DEBUG') - - #: The testing flag. Set this to `True` to enable the test mode of - #: Flask extensions (and in the future probably also Flask itself). - #: For example this might activate unittest helpers that have an - #: additional runtime cost which should not be enabled by default. - #: - #: If this is enabled and PROPAGATE_EXCEPTIONS is not changed from the - #: default it's implicitly enabled. - #: - #: This attribute can also be configured from the config with the - #: `TESTING` configuration key. Defaults to `False`. - testing = ConfigAttribute('TESTING') - - #: If a secret key is set, cryptographic components can use this to - #: sign cookies and other things. Set this to a complex random value - #: when you want to use the secure cookie for instance. - #: - #: This attribute can also be configured from the config with the - #: `SECRET_KEY` configuration key. Defaults to `None`. - secret_key = ConfigAttribute('SECRET_KEY') - - #: The secure cookie uses this for the name of the session cookie. - #: - #: This attribute can also be configured from the config with the - #: `SESSION_COOKIE_NAME` configuration key. Defaults to ``'session'`` - session_cookie_name = ConfigAttribute('SESSION_COOKIE_NAME') - - #: A :class:`~datetime.timedelta` which is used to set the expiration - #: date of a permanent session. The default is 31 days which makes a - #: permanent session survive for roughly one month. - #: - #: This attribute can also be configured from the config with the - #: `PERMANENT_SESSION_LIFETIME` configuration key. Defaults to - #: ``timedelta(days=31)`` - permanent_session_lifetime = ConfigAttribute('PERMANENT_SESSION_LIFETIME', - get_converter=_make_timedelta) - - #: Enable this if you want to use the X-Sendfile feature. Keep in - #: mind that the server has to support this. This only affects files - #: sent with the :func:`send_file` method. - #: - #: .. versionadded:: 0.2 - #: - #: This attribute can also be configured from the config with the - #: `USE_X_SENDFILE` configuration key. Defaults to `False`. - use_x_sendfile = ConfigAttribute('USE_X_SENDFILE') - - #: The name of the logger to use. By default the logger name is the - #: package name passed to the constructor. - #: - #: .. versionadded:: 0.4 - logger_name = ConfigAttribute('LOGGER_NAME') - - #: Enable the deprecated module support? This is active by default - #: in 0.7 but will be changed to False in 0.8. With Flask 1.0 modules - #: will be removed in favor of Blueprints - enable_modules = True - - #: The logging format used for the debug logger. This is only used when - #: the application is in debug mode, otherwise the attached logging - #: handler does the formatting. - #: - #: .. versionadded:: 0.3 - debug_log_format = ( - '-' * 80 + '\n' + - '%(levelname)s in %(module)s [%(pathname)s:%(lineno)d]:\n' + - '%(message)s\n' + - '-' * 80 - ) - - #: Options that are passed directly to the Jinja2 environment. - jinja_options = ImmutableDict( - extensions=['jinja2.ext.autoescape', 'jinja2.ext.with_'] - ) - - #: Default configuration parameters. - default_config = ImmutableDict({ - 'DEBUG': False, - 'TESTING': False, - 'PROPAGATE_EXCEPTIONS': None, - 'PRESERVE_CONTEXT_ON_EXCEPTION': None, - 'SECRET_KEY': None, - 'PERMANENT_SESSION_LIFETIME': timedelta(days=31), - 'USE_X_SENDFILE': False, - 'LOGGER_NAME': None, - 'SERVER_NAME': None, - 'APPLICATION_ROOT': None, - 'SESSION_COOKIE_NAME': 'session', - 'SESSION_COOKIE_DOMAIN': None, - 'SESSION_COOKIE_PATH': None, - 'SESSION_COOKIE_HTTPONLY': True, - 'SESSION_COOKIE_SECURE': False, - 'MAX_CONTENT_LENGTH': None, - 'SEND_FILE_MAX_AGE_DEFAULT': 12 * 60 * 60, # 12 hours - 'TRAP_BAD_REQUEST_ERRORS': False, - 'TRAP_HTTP_EXCEPTIONS': False, - 'PREFERRED_URL_SCHEME': 'http' - }) - - #: The rule object to use for URL rules created. This is used by - #: :meth:`add_url_rule`. Defaults to :class:`werkzeug.routing.Rule`. - #: - #: .. versionadded:: 0.7 - url_rule_class = Rule - - #: the test client that is used with when `test_client` is used. - #: - #: .. versionadded:: 0.7 - test_client_class = None - - #: the session interface to use. By default an instance of - #: :class:`~flask.sessions.SecureCookieSessionInterface` is used here. - #: - #: .. versionadded:: 0.8 - session_interface = SecureCookieSessionInterface() - - def __init__(self, import_name, static_path=None, static_url_path=None, - static_folder='static', template_folder='templates', - instance_path=None, instance_relative_config=False): - _PackageBoundObject.__init__(self, import_name, - template_folder=template_folder) - if static_path is not None: - from warnings import warn - warn(DeprecationWarning('static_path is now called ' - 'static_url_path'), stacklevel=2) - static_url_path = static_path - - if static_url_path is not None: - self.static_url_path = static_url_path - if static_folder is not None: - self.static_folder = static_folder - if instance_path is None: - instance_path = self.auto_find_instance_path() - elif not os.path.isabs(instance_path): - raise ValueError('If an instance path is provided it must be ' - 'absolute. A relative path was given instead.') - - #: Holds the path to the instance folder. - #: - #: .. versionadded:: 0.8 - self.instance_path = instance_path - - #: The configuration dictionary as :class:`Config`. This behaves - #: exactly like a regular dictionary but supports additional methods - #: to load a config from files. - self.config = self.make_config(instance_relative_config) - - # Prepare the deferred setup of the logger. - self._logger = None - self.logger_name = self.import_name - - #: A dictionary of all view functions registered. The keys will - #: be function names which are also used to generate URLs and - #: the values are the function objects themselves. - #: To register a view function, use the :meth:`route` decorator. - self.view_functions = {} - - # support for the now deprecated `error_handlers` attribute. The - # :attr:`error_handler_spec` shall be used now. - self._error_handlers = {} - - #: A dictionary of all registered error handlers. The key is `None` - #: for error handlers active on the application, otherwise the key is - #: the name of the blueprint. Each key points to another dictionary - #: where they key is the status code of the http exception. The - #: special key `None` points to a list of tuples where the first item - #: is the class for the instance check and the second the error handler - #: function. - #: - #: To register a error handler, use the :meth:`errorhandler` - #: decorator. - self.error_handler_spec = {None: self._error_handlers} - - #: A list of functions that are called when :meth:`url_for` raises a - #: :exc:`~werkzeug.routing.BuildError`. Each function registered here - #: is called with `error`, `endpoint` and `values`. If a function - #: returns `None` or raises a `BuildError` the next function is - #: tried. - #: - #: .. versionadded:: 0.9 - self.url_build_error_handlers = [] - - #: A dictionary with lists of functions that should be called at the - #: beginning of the request. The key of the dictionary is the name of - #: the blueprint this function is active for, `None` for all requests. - #: This can for example be used to open database connections or - #: getting hold of the currently logged in user. To register a - #: function here, use the :meth:`before_request` decorator. - self.before_request_funcs = {} - - #: A lists of functions that should be called at the beginning of the - #: first request to this instance. To register a function here, use - #: the :meth:`before_first_request` decorator. - #: - #: .. versionadded:: 0.8 - self.before_first_request_funcs = [] - - #: A dictionary with lists of functions that should be called after - #: each request. The key of the dictionary is the name of the blueprint - #: this function is active for, `None` for all requests. This can for - #: example be used to open database connections or getting hold of the - #: currently logged in user. To register a function here, use the - #: :meth:`after_request` decorator. - self.after_request_funcs = {} - - #: A dictionary with lists of functions that are called after - #: each request, even if an exception has occurred. The key of the - #: dictionary is the name of the blueprint this function is active for, - #: `None` for all requests. These functions are not allowed to modify - #: the request, and their return values are ignored. If an exception - #: occurred while processing the request, it gets passed to each - #: teardown_request function. To register a function here, use the - #: :meth:`teardown_request` decorator. - #: - #: .. versionadded:: 0.7 - self.teardown_request_funcs = {} - - #: A list of functions that are called when the application context - #: is destroyed. Since the application context is also torn down - #: if the request ends this is the place to store code that disconnects - #: from databases. - #: - #: .. versionadded:: 0.9 - self.teardown_appcontext_funcs = [] - - #: A dictionary with lists of functions that can be used as URL - #: value processor functions. Whenever a URL is built these functions - #: are called to modify the dictionary of values in place. The key - #: `None` here is used for application wide - #: callbacks, otherwise the key is the name of the blueprint. - #: Each of these functions has the chance to modify the dictionary - #: - #: .. versionadded:: 0.7 - self.url_value_preprocessors = {} - - #: A dictionary with lists of functions that can be used as URL value - #: preprocessors. The key `None` here is used for application wide - #: callbacks, otherwise the key is the name of the blueprint. - #: Each of these functions has the chance to modify the dictionary - #: of URL values before they are used as the keyword arguments of the - #: view function. For each function registered this one should also - #: provide a :meth:`url_defaults` function that adds the parameters - #: automatically again that were removed that way. - #: - #: .. versionadded:: 0.7 - self.url_default_functions = {} - - #: A dictionary with list of functions that are called without argument - #: to populate the template context. The key of the dictionary is the - #: name of the blueprint this function is active for, `None` for all - #: requests. Each returns a dictionary that the template context is - #: updated with. To register a function here, use the - #: :meth:`context_processor` decorator. - self.template_context_processors = { - None: [_default_template_ctx_processor] - } - - #: all the attached blueprints in a directory by name. Blueprints - #: can be attached multiple times so this dictionary does not tell - #: you how often they got attached. - #: - #: .. versionadded:: 0.7 - self.blueprints = {} - - #: a place where extensions can store application specific state. For - #: example this is where an extension could store database engines and - #: similar things. For backwards compatibility extensions should register - #: themselves like this:: - #: - #: if not hasattr(app, 'extensions'): - #: app.extensions = {} - #: app.extensions['extensionname'] = SomeObject() - #: - #: The key must match the name of the `flaskext` module. For example in - #: case of a "Flask-Foo" extension in `flaskext.foo`, the key would be - #: ``'foo'``. - #: - #: .. versionadded:: 0.7 - self.extensions = {} - - #: The :class:`~werkzeug.routing.Map` for this instance. You can use - #: this to change the routing converters after the class was created - #: but before any routes are connected. Example:: - #: - #: from werkzeug.routing import BaseConverter - #: - #: class ListConverter(BaseConverter): - #: def to_python(self, value): - #: return value.split(',') - #: def to_url(self, values): - #: return ','.join(BaseConverter.to_url(value) - #: for value in values) - #: - #: app = Flask(__name__) - #: app.url_map.converters['list'] = ListConverter - self.url_map = Map() - - # tracks internally if the application already handled at least one - # request. - self._got_first_request = False - self._before_request_lock = Lock() - - # register the static folder for the application. Do that even - # if the folder does not exist. First of all it might be created - # while the server is running (usually happens during development) - # but also because google appengine stores static files somewhere - # else when mapped with the .yml file. - if self.has_static_folder: - self.add_url_rule(self.static_url_path + '/', - endpoint='static', - view_func=self.send_static_file) - - def _get_error_handlers(self): - from warnings import warn - warn(DeprecationWarning('error_handlers is deprecated, use the ' - 'new error_handler_spec attribute instead.'), stacklevel=1) - return self._error_handlers - def _set_error_handlers(self, value): - self._error_handlers = value - self.error_handler_spec[None] = value - error_handlers = property(_get_error_handlers, _set_error_handlers) - del _get_error_handlers, _set_error_handlers - - @locked_cached_property - def name(self): - """The name of the application. This is usually the import name - with the difference that it's guessed from the run file if the - import name is main. This name is used as a display name when - Flask needs the name of the application. It can be set and overriden - to change the value. - - .. versionadded:: 0.8 - """ - if self.import_name == '__main__': - fn = getattr(sys.modules['__main__'], '__file__', None) - if fn is None: - return '__main__' - return os.path.splitext(os.path.basename(fn))[0] - return self.import_name - - @property - def propagate_exceptions(self): - """Returns the value of the `PROPAGATE_EXCEPTIONS` configuration - value in case it's set, otherwise a sensible default is returned. - - .. versionadded:: 0.7 - """ - rv = self.config['PROPAGATE_EXCEPTIONS'] - if rv is not None: - return rv - return self.testing or self.debug - - @property - def preserve_context_on_exception(self): - """Returns the value of the `PRESERVE_CONTEXT_ON_EXCEPTION` - configuration value in case it's set, otherwise a sensible default - is returned. - - .. versionadded:: 0.7 - """ - rv = self.config['PRESERVE_CONTEXT_ON_EXCEPTION'] - if rv is not None: - return rv - return self.debug - - @property - def logger(self): - """A :class:`logging.Logger` object for this application. The - default configuration is to log to stderr if the application is - in debug mode. This logger can be used to (surprise) log messages. - Here some examples:: - - app.logger.debug('A value for debugging') - app.logger.warning('A warning ocurred (%d apples)', 42) - app.logger.error('An error occoured') - - .. versionadded:: 0.3 - """ - if self._logger and self._logger.name == self.logger_name: - return self._logger - with _logger_lock: - if self._logger and self._logger.name == self.logger_name: - return self._logger - from flask.logging import create_logger - self._logger = rv = create_logger(self) - return rv - - @locked_cached_property - def jinja_env(self): - """The Jinja2 environment used to load templates.""" - rv = self.create_jinja_environment() - - # Hack to support the init_jinja_globals method which is supported - # until 1.0 but has an API deficiency. - if getattr(self.init_jinja_globals, 'im_func', None) is not \ - Flask.init_jinja_globals.im_func: - from warnings import warn - warn(DeprecationWarning('This flask class uses a customized ' - 'init_jinja_globals() method which is deprecated. ' - 'Move the code from that method into the ' - 'create_jinja_environment() method instead.')) - self.__dict__['jinja_env'] = rv - self.init_jinja_globals() - - return rv - - @property - def got_first_request(self): - """This attribute is set to `True` if the application started - handling the first request. - - .. versionadded:: 0.8 - """ - return self._got_first_request - - def make_config(self, instance_relative=False): - """Used to create the config attribute by the Flask constructor. - The `instance_relative` parameter is passed in from the constructor - of Flask (there named `instance_relative_config`) and indicates if - the config should be relative to the instance path or the root path - of the application. - - .. versionadded:: 0.8 - """ - root_path = self.root_path - if instance_relative: - root_path = self.instance_path - return Config(root_path, self.default_config) - - def auto_find_instance_path(self): - """Tries to locate the instance path if it was not provided to the - constructor of the application class. It will basically calculate - the path to a folder named ``instance`` next to your main file or - the package. - - .. versionadded:: 0.8 - """ - prefix, package_path = find_package(self.import_name) - if prefix is None: - return os.path.join(package_path, 'instance') - return os.path.join(prefix, 'var', self.name + '-instance') - - def open_instance_resource(self, resource, mode='rb'): - """Opens a resource from the application's instance folder - (:attr:`instance_path`). Otherwise works like - :meth:`open_resource`. Instance resources can also be opened for - writing. - - :param resource: the name of the resource. To access resources within - subfolders use forward slashes as separator. - """ - return open(os.path.join(self.instance_path, resource), mode) - - def create_jinja_environment(self): - """Creates the Jinja2 environment based on :attr:`jinja_options` - and :meth:`select_jinja_autoescape`. Since 0.7 this also adds - the Jinja2 globals and filters after initialization. Override - this function to customize the behavior. - - .. versionadded:: 0.5 - """ - options = dict(self.jinja_options) - if 'autoescape' not in options: - options['autoescape'] = self.select_jinja_autoescape - rv = Environment(self, **options) - rv.globals.update( - url_for=url_for, - get_flashed_messages=get_flashed_messages - ) - rv.filters['tojson'] = _tojson_filter - return rv - - def create_global_jinja_loader(self): - """Creates the loader for the Jinja2 environment. Can be used to - override just the loader and keeping the rest unchanged. It's - discouraged to override this function. Instead one should override - the :meth:`jinja_loader` function instead. - - The global loader dispatches between the loaders of the application - and the individual blueprints. - - .. versionadded:: 0.7 - """ - return DispatchingJinjaLoader(self) - - def init_jinja_globals(self): - """Deprecated. Used to initialize the Jinja2 globals. - - .. versionadded:: 0.5 - .. versionchanged:: 0.7 - This method is deprecated with 0.7. Override - :meth:`create_jinja_environment` instead. - """ - - def select_jinja_autoescape(self, filename): - """Returns `True` if autoescaping should be active for the given - template name. - - .. versionadded:: 0.5 - """ - if filename is None: - return False - return filename.endswith(('.html', '.htm', '.xml', '.xhtml')) - - def update_template_context(self, context): - """Update the template context with some commonly used variables. - This injects request, session, config and g into the template - context as well as everything template context processors want - to inject. Note that the as of Flask 0.6, the original values - in the context will not be overriden if a context processor - decides to return a value with the same key. - - :param context: the context as a dictionary that is updated in place - to add extra variables. - """ - funcs = self.template_context_processors[None] - bp = _request_ctx_stack.top.request.blueprint - if bp is not None and bp in self.template_context_processors: - funcs = chain(funcs, self.template_context_processors[bp]) - orig_ctx = context.copy() - for func in funcs: - context.update(func()) - # make sure the original values win. This makes it possible to - # easier add new variables in context processors without breaking - # existing views. - context.update(orig_ctx) - - def run(self, host=None, port=None, debug=None, **options): - """Runs the application on a local development server. If the - :attr:`debug` flag is set the server will automatically reload - for code changes and show a debugger in case an exception happened. - - If you want to run the application in debug mode, but disable the - code execution on the interactive debugger, you can pass - ``use_evalex=False`` as parameter. This will keep the debugger's - traceback screen active, but disable code execution. - - .. admonition:: Keep in Mind - - Flask will suppress any server error with a generic error page - unless it is in debug mode. As such to enable just the - interactive debugger without the code reloading, you have to - invoke :meth:`run` with ``debug=True`` and ``use_reloader=False``. - Setting ``use_debugger`` to `True` without being in debug mode - won't catch any exceptions because there won't be any to - catch. - - :param host: the hostname to listen on. Set this to ``'0.0.0.0'`` to - have the server available externally as well. Defaults to - ``'127.0.0.1'``. - :param port: the port of the webserver. Defaults to ``5000``. - :param debug: if given, enable or disable debug mode. - See :attr:`debug`. - :param options: the options to be forwarded to the underlying - Werkzeug server. See - :func:`werkzeug.serving.run_simple` for more - information. - """ - from werkzeug.serving import run_simple - if host is None: - host = '127.0.0.1' - if port is None: - port = 5000 - if debug is not None: - self.debug = bool(debug) - options.setdefault('use_reloader', self.debug) - options.setdefault('use_debugger', self.debug) - try: - run_simple(host, port, self, **options) - finally: - # reset the first request information if the development server - # resetted normally. This makes it possible to restart the server - # without reloader and that stuff from an interactive shell. - self._got_first_request = False - - def test_client(self, use_cookies=True): - """Creates a test client for this application. For information - about unit testing head over to :ref:`testing`. - - Note that if you are testing for assertions or exceptions in your - application code, you must set ``app.testing = True`` in order for the - exceptions to propagate to the test client. Otherwise, the exception - will be handled by the application (not visible to the test client) and - the only indication of an AssertionError or other exception will be a - 500 status code response to the test client. See the :attr:`testing` - attribute. For example:: - - app.testing = True - client = app.test_client() - - The test client can be used in a `with` block to defer the closing down - of the context until the end of the `with` block. This is useful if - you want to access the context locals for testing:: - - with app.test_client() as c: - rv = c.get('/?vodka=42') - assert request.args['vodka'] == '42' - - See :class:`~flask.testing.FlaskClient` for more information. - - .. versionchanged:: 0.4 - added support for `with` block usage for the client. - - .. versionadded:: 0.7 - The `use_cookies` parameter was added as well as the ability - to override the client to be used by setting the - :attr:`test_client_class` attribute. - """ - cls = self.test_client_class - if cls is None: - from flask.testing import FlaskClient as cls - return cls(self, self.response_class, use_cookies=use_cookies) - - def open_session(self, request): - """Creates or opens a new session. Default implementation stores all - session data in a signed cookie. This requires that the - :attr:`secret_key` is set. Instead of overriding this method - we recommend replacing the :class:`session_interface`. - - :param request: an instance of :attr:`request_class`. - """ - return self.session_interface.open_session(self, request) - - def save_session(self, session, response): - """Saves the session if it needs updates. For the default - implementation, check :meth:`open_session`. Instead of overriding this - method we recommend replacing the :class:`session_interface`. - - :param session: the session to be saved (a - :class:`~werkzeug.contrib.securecookie.SecureCookie` - object) - :param response: an instance of :attr:`response_class` - """ - return self.session_interface.save_session(self, session, response) - - def make_null_session(self): - """Creates a new instance of a missing session. Instead of overriding - this method we recommend replacing the :class:`session_interface`. - - .. versionadded:: 0.7 - """ - return self.session_interface.make_null_session(self) - - def register_module(self, module, **options): - """Registers a module with this application. The keyword argument - of this function are the same as the ones for the constructor of the - :class:`Module` class and will override the values of the module if - provided. - - .. versionchanged:: 0.7 - The module system was deprecated in favor for the blueprint - system. - """ - assert blueprint_is_module(module), 'register_module requires ' \ - 'actual module objects. Please upgrade to blueprints though.' - if not self.enable_modules: - raise RuntimeError('Module support was disabled but code ' - 'attempted to register a module named %r' % module) - else: - from warnings import warn - warn(DeprecationWarning('Modules are deprecated. Upgrade to ' - 'using blueprints. Have a look into the documentation for ' - 'more information. If this module was registered by a ' - 'Flask-Extension upgrade the extension or contact the author ' - 'of that extension instead. (Registered %r)' % module), - stacklevel=2) - - self.register_blueprint(module, **options) - - @setupmethod - def register_blueprint(self, blueprint, **options): - """Registers a blueprint on the application. - - .. versionadded:: 0.7 - """ - first_registration = False - if blueprint.name in self.blueprints: - assert self.blueprints[blueprint.name] is blueprint, \ - 'A blueprint\'s name collision ocurred between %r and ' \ - '%r. Both share the same name "%s". Blueprints that ' \ - 'are created on the fly need unique names.' % \ - (blueprint, self.blueprints[blueprint.name], blueprint.name) - else: - self.blueprints[blueprint.name] = blueprint - first_registration = True - blueprint.register(self, options, first_registration) - - @setupmethod - def add_url_rule(self, rule, endpoint=None, view_func=None, **options): - """Connects a URL rule. Works exactly like the :meth:`route` - decorator. If a view_func is provided it will be registered with the - endpoint. - - Basically this example:: - - @app.route('/') - def index(): - pass - - Is equivalent to the following:: - - def index(): - pass - app.add_url_rule('/', 'index', index) - - If the view_func is not provided you will need to connect the endpoint - to a view function like so:: - - app.view_functions['index'] = index - - Internally :meth:`route` invokes :meth:`add_url_rule` so if you want - to customize the behavior via subclassing you only need to change - this method. - - For more information refer to :ref:`url-route-registrations`. - - .. versionchanged:: 0.2 - `view_func` parameter added. - - .. versionchanged:: 0.6 - `OPTIONS` is added automatically as method. - - :param rule: the URL rule as string - :param endpoint: the endpoint for the registered URL rule. Flask - itself assumes the name of the view function as - endpoint - :param view_func: the function to call when serving a request to the - provided endpoint - :param options: the options to be forwarded to the underlying - :class:`~werkzeug.routing.Rule` object. A change - to Werkzeug is handling of method options. methods - is a list of methods this rule should be limited - to (`GET`, `POST` etc.). By default a rule - just listens for `GET` (and implicitly `HEAD`). - Starting with Flask 0.6, `OPTIONS` is implicitly - added and handled by the standard request handling. - """ - if endpoint is None: - endpoint = _endpoint_from_view_func(view_func) - options['endpoint'] = endpoint - methods = options.pop('methods', None) - - # if the methods are not given and the view_func object knows its - # methods we can use that instead. If neither exists, we go with - # a tuple of only `GET` as default. - if methods is None: - methods = getattr(view_func, 'methods', None) or ('GET',) - methods = set(methods) - - # Methods that should always be added - required_methods = set(getattr(view_func, 'required_methods', ())) - - # starting with Flask 0.8 the view_func object can disable and - # force-enable the automatic options handling. - provide_automatic_options = getattr(view_func, - 'provide_automatic_options', None) - - if provide_automatic_options is None: - if 'OPTIONS' not in methods: - provide_automatic_options = True - required_methods.add('OPTIONS') - else: - provide_automatic_options = False - - # Add the required methods now. - methods |= required_methods - - # due to a werkzeug bug we need to make sure that the defaults are - # None if they are an empty dictionary. This should not be necessary - # with Werkzeug 0.7 - options['defaults'] = options.get('defaults') or None - - rule = self.url_rule_class(rule, methods=methods, **options) - rule.provide_automatic_options = provide_automatic_options - self.url_map.add(rule) - if view_func is not None: - self.view_functions[endpoint] = view_func - - def route(self, rule, **options): - """A decorator that is used to register a view function for a - given URL rule. This does the same thing as :meth:`add_url_rule` - but is intended for decorator usage:: - - @app.route('/') - def index(): - return 'Hello World' - - For more information refer to :ref:`url-route-registrations`. - - :param rule: the URL rule as string - :param endpoint: the endpoint for the registered URL rule. Flask - itself assumes the name of the view function as - endpoint - :param view_func: the function to call when serving a request to the - provided endpoint - :param options: the options to be forwarded to the underlying - :class:`~werkzeug.routing.Rule` object. A change - to Werkzeug is handling of method options. methods - is a list of methods this rule should be limited - to (`GET`, `POST` etc.). By default a rule - just listens for `GET` (and implicitly `HEAD`). - Starting with Flask 0.6, `OPTIONS` is implicitly - added and handled by the standard request handling. - """ - def decorator(f): - endpoint = options.pop('endpoint', None) - self.add_url_rule(rule, endpoint, f, **options) - return f - return decorator - - @setupmethod - def endpoint(self, endpoint): - """A decorator to register a function as an endpoint. - Example:: - - @app.endpoint('example.endpoint') - def example(): - return "example" - - :param endpoint: the name of the endpoint - """ - def decorator(f): - self.view_functions[endpoint] = f - return f - return decorator - - @setupmethod - def errorhandler(self, code_or_exception): - """A decorator that is used to register a function give a given - error code. Example:: - - @app.errorhandler(404) - def page_not_found(error): - return 'This page does not exist', 404 - - You can also register handlers for arbitrary exceptions:: - - @app.errorhandler(DatabaseError) - def special_exception_handler(error): - return 'Database connection failed', 500 - - You can also register a function as error handler without using - the :meth:`errorhandler` decorator. The following example is - equivalent to the one above:: - - def page_not_found(error): - return 'This page does not exist', 404 - app.error_handler_spec[None][404] = page_not_found - - Setting error handlers via assignments to :attr:`error_handler_spec` - however is discouraged as it requires fidling with nested dictionaries - and the special case for arbitrary exception types. - - The first `None` refers to the active blueprint. If the error - handler should be application wide `None` shall be used. - - .. versionadded:: 0.7 - One can now additionally also register custom exception types - that do not necessarily have to be a subclass of the - :class:`~werkzeug.exceptions.HTTPException` class. - - :param code: the code as integer for the handler - """ - def decorator(f): - self._register_error_handler(None, code_or_exception, f) - return f - return decorator - - def register_error_handler(self, code_or_exception, f): - """Alternative error attach function to the :meth:`errorhandler` - decorator that is more straightforward to use for non decorator - usage. - - .. versionadded:: 0.7 - """ - self._register_error_handler(None, code_or_exception, f) - - @setupmethod - def _register_error_handler(self, key, code_or_exception, f): - if isinstance(code_or_exception, HTTPException): - code_or_exception = code_or_exception.code - if isinstance(code_or_exception, (int, long)): - assert code_or_exception != 500 or key is None, \ - 'It is currently not possible to register a 500 internal ' \ - 'server error on a per-blueprint level.' - self.error_handler_spec.setdefault(key, {})[code_or_exception] = f - else: - self.error_handler_spec.setdefault(key, {}).setdefault(None, []) \ - .append((code_or_exception, f)) - - @setupmethod - def template_filter(self, name=None): - """A decorator that is used to register custom template filter. - You can specify a name for the filter, otherwise the function - name will be used. Example:: - - @app.template_filter() - def reverse(s): - return s[::-1] - - :param name: the optional name of the filter, otherwise the - function name will be used. - """ - def decorator(f): - self.add_template_filter(f, name=name) - return f - return decorator - - @setupmethod - def add_template_filter(self, f, name=None): - """Register a custom template filter. Works exactly like the - :meth:`template_filter` decorator. - - :param name: the optional name of the filter, otherwise the - function name will be used. - """ - self.jinja_env.filters[name or f.__name__] = f - - @setupmethod - def before_request(self, f): - """Registers a function to run before each request.""" - self.before_request_funcs.setdefault(None, []).append(f) - return f - - @setupmethod - def before_first_request(self, f): - """Registers a function to be run before the first request to this - instance of the application. - - .. versionadded:: 0.8 - """ - self.before_first_request_funcs.append(f) - - @setupmethod - def after_request(self, f): - """Register a function to be run after each request. Your function - must take one parameter, a :attr:`response_class` object and return - a new response object or the same (see :meth:`process_response`). - - As of Flask 0.7 this function might not be executed at the end of the - request in case an unhandled exception ocurred. - """ - self.after_request_funcs.setdefault(None, []).append(f) - return f - - @setupmethod - def teardown_request(self, f): - """Register a function to be run at the end of each request, - regardless of whether there was an exception or not. These functions - are executed when the request context is popped, even if not an - actual request was performed. - - Example:: - - ctx = app.test_request_context() - ctx.push() - ... - ctx.pop() - - When ``ctx.pop()`` is executed in the above example, the teardown - functions are called just before the request context moves from the - stack of active contexts. This becomes relevant if you are using - such constructs in tests. - - Generally teardown functions must take every necesary step to avoid - that they will fail. If they do execute code that might fail they - will have to surround the execution of these code by try/except - statements and log ocurring errors. - - When a teardown function was called because of a exception it will - be passed an error object. - """ - self.teardown_request_funcs.setdefault(None, []).append(f) - return f - - @setupmethod - def teardown_appcontext(self, f): - """Registers a function to be called when the application context - ends. These functions are typically also called when the request - context is popped. - - Example:: - - ctx = app.app_context() - ctx.push() - ... - ctx.pop() - - When ``ctx.pop()`` is executed in the above example, the teardown - functions are called just before the app context moves from the - stack of active contexts. This becomes relevant if you are using - such constructs in tests. - - Since a request context typically also manages an application - context it would also be called when you pop a request context. - - When a teardown function was called because of an exception it will - be passed an error object. - - .. versionadded:: 0.9 - """ - self.teardown_appcontext_funcs.append(f) - return f - - @setupmethod - def context_processor(self, f): - """Registers a template context processor function.""" - self.template_context_processors[None].append(f) - return f - - @setupmethod - def url_value_preprocessor(self, f): - """Registers a function as URL value preprocessor for all view - functions of the application. It's called before the view functions - are called and can modify the url values provided. - """ - self.url_value_preprocessors.setdefault(None, []).append(f) - return f - - @setupmethod - def url_defaults(self, f): - """Callback function for URL defaults for all view functions of the - application. It's called with the endpoint and values and should - update the values passed in place. - """ - self.url_default_functions.setdefault(None, []).append(f) - return f - - def handle_http_exception(self, e): - """Handles an HTTP exception. By default this will invoke the - registered error handlers and fall back to returning the - exception as response. - - .. versionadded:: 0.3 - """ - handlers = self.error_handler_spec.get(request.blueprint) - if handlers and e.code in handlers: - handler = handlers[e.code] - else: - handler = self.error_handler_spec[None].get(e.code) - if handler is None: - return e - return handler(e) - - def trap_http_exception(self, e): - """Checks if an HTTP exception should be trapped or not. By default - this will return `False` for all exceptions except for a bad request - key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to `True`. It - also returns `True` if ``TRAP_HTTP_EXCEPTIONS`` is set to `True`. - - This is called for all HTTP exceptions raised by a view function. - If it returns `True` for any exception the error handler for this - exception is not called and it shows up as regular exception in the - traceback. This is helpful for debugging implicitly raised HTTP - exceptions. - - .. versionadded:: 0.8 - """ - if self.config['TRAP_HTTP_EXCEPTIONS']: - return True - if self.config['TRAP_BAD_REQUEST_ERRORS']: - return isinstance(e, BadRequest) - return False - - def handle_user_exception(self, e): - """This method is called whenever an exception occurs that should be - handled. A special case are - :class:`~werkzeug.exception.HTTPException`\s which are forwarded by - this function to the :meth:`handle_http_exception` method. This - function will either return a response value or reraise the - exception with the same traceback. - - .. versionadded:: 0.7 - """ - exc_type, exc_value, tb = sys.exc_info() - assert exc_value is e - - # ensure not to trash sys.exc_info() at that point in case someone - # wants the traceback preserved in handle_http_exception. Of course - # we cannot prevent users from trashing it themselves in a custom - # trap_http_exception method so that's their fault then. - if isinstance(e, HTTPException) and not self.trap_http_exception(e): - return self.handle_http_exception(e) - - blueprint_handlers = () - handlers = self.error_handler_spec.get(request.blueprint) - if handlers is not None: - blueprint_handlers = handlers.get(None, ()) - app_handlers = self.error_handler_spec[None].get(None, ()) - for typecheck, handler in chain(blueprint_handlers, app_handlers): - if isinstance(e, typecheck): - return handler(e) - - raise exc_type, exc_value, tb - - def handle_exception(self, e): - """Default exception handling that kicks in when an exception - occours that is not caught. In debug mode the exception will - be re-raised immediately, otherwise it is logged and the handler - for a 500 internal server error is used. If no such handler - exists, a default 500 internal server error message is displayed. - - .. versionadded:: 0.3 - """ - exc_type, exc_value, tb = sys.exc_info() - - got_request_exception.send(self, exception=e) - handler = self.error_handler_spec[None].get(500) - - if self.propagate_exceptions: - # if we want to repropagate the exception, we can attempt to - # raise it with the whole traceback in case we can do that - # (the function was actually called from the except part) - # otherwise, we just raise the error again - if exc_value is e: - raise exc_type, exc_value, tb - else: - raise e - - self.log_exception((exc_type, exc_value, tb)) - if handler is None: - return InternalServerError() - return handler(e) - - def log_exception(self, exc_info): - """Logs an exception. This is called by :meth:`handle_exception` - if debugging is disabled and right before the handler is called. - The default implementation logs the exception as error on the - :attr:`logger`. - - .. versionadded:: 0.8 - """ - self.logger.error('Exception on %s [%s]' % ( - request.path, - request.method - ), exc_info=exc_info) - - def raise_routing_exception(self, request): - """Exceptions that are recording during routing are reraised with - this method. During debug we are not reraising redirect requests - for non ``GET``, ``HEAD``, or ``OPTIONS`` requests and we're raising - a different error instead to help debug situations. - - :internal: - """ - if not self.debug \ - or not isinstance(request.routing_exception, RequestRedirect) \ - or request.method in ('GET', 'HEAD', 'OPTIONS'): - raise request.routing_exception - - from .debughelpers import FormDataRoutingRedirect - raise FormDataRoutingRedirect(request) - - def dispatch_request(self): - """Does the request dispatching. Matches the URL and returns the - return value of the view or error handler. This does not have to - be a response object. In order to convert the return value to a - proper response object, call :func:`make_response`. - - .. versionchanged:: 0.7 - This no longer does the exception handling, this code was - moved to the new :meth:`full_dispatch_request`. - """ - req = _request_ctx_stack.top.request - if req.routing_exception is not None: - self.raise_routing_exception(req) - rule = req.url_rule - # if we provide automatic options for this URL and the - # request came with the OPTIONS method, reply automatically - if getattr(rule, 'provide_automatic_options', False) \ - and req.method == 'OPTIONS': - return self.make_default_options_response() - # otherwise dispatch to the handler for that endpoint - return self.view_functions[rule.endpoint](**req.view_args) - - def full_dispatch_request(self): - """Dispatches the request and on top of that performs request - pre and postprocessing as well as HTTP exception catching and - error handling. - - .. versionadded:: 0.7 - """ - self.try_trigger_before_first_request_functions() - try: - request_started.send(self) - rv = self.preprocess_request() - if rv is None: - rv = self.dispatch_request() - except Exception, e: - rv = self.handle_user_exception(e) - response = self.make_response(rv) - response = self.process_response(response) - request_finished.send(self, response=response) - return response - - def try_trigger_before_first_request_functions(self): - """Called before each request and will ensure that it triggers - the :attr:`before_first_request_funcs` and only exactly once per - application instance (which means process usually). - - :internal: - """ - if self._got_first_request: - return - with self._before_request_lock: - if self._got_first_request: - return - self._got_first_request = True - for func in self.before_first_request_funcs: - func() - - def make_default_options_response(self): - """This method is called to create the default `OPTIONS` response. - This can be changed through subclassing to change the default - behavior of `OPTIONS` responses. - - .. versionadded:: 0.7 - """ - adapter = _request_ctx_stack.top.url_adapter - if hasattr(adapter, 'allowed_methods'): - methods = adapter.allowed_methods() - else: - # fallback for Werkzeug < 0.7 - methods = [] - try: - adapter.match(method='--') - except MethodNotAllowed, e: - methods = e.valid_methods - except HTTPException, e: - pass - rv = self.response_class() - rv.allow.update(methods) - return rv - - def make_response(self, rv): - """Converts the return value from a view function to a real - response object that is an instance of :attr:`response_class`. - - The following types are allowed for `rv`: - - .. tabularcolumns:: |p{3.5cm}|p{9.5cm}| - - ======================= =========================================== - :attr:`response_class` the object is returned unchanged - :class:`str` a response object is created with the - string as body - :class:`unicode` a response object is created with the - string encoded to utf-8 as body - a WSGI function the function is called as WSGI application - and buffered as response object - :class:`tuple` A tuple in the form ``(response, status, - headers)`` where `response` is any of the - types defined here, `status` is a string - or an integer and `headers` is a list of - a dictionary with header values. - ======================= =========================================== - - :param rv: the return value from the view function - - .. versionchanged:: 0.9 - Previously a tuple was interpreted as the arguments for the - response object. - """ - status = headers = None - if isinstance(rv, tuple): - rv, status, headers = rv + (None,) * (3 - len(rv)) - - if rv is None: - raise ValueError('View function did not return a response') - - if not isinstance(rv, self.response_class): - # When we create a response object directly, we let the constructor - # set the headers and status. We do this because there can be - # some extra logic involved when creating these objects with - # specific values (like defualt content type selection). - if isinstance(rv, basestring): - rv = self.response_class(rv, headers=headers, status=status) - headers = status = None - else: - rv = self.response_class.force_type(rv, request.environ) - - if status is not None: - if isinstance(status, basestring): - rv.status = status - else: - rv.status_code = status - if headers: - rv.headers.extend(headers) - - return rv - - def create_url_adapter(self, request): - """Creates a URL adapter for the given request. The URL adapter - is created at a point where the request context is not yet set up - so the request is passed explicitly. - - .. versionadded:: 0.6 - - .. versionchanged:: 0.9 - This can now also be called without a request object when the - UR adapter is created for the application context. - """ - if request is not None: - return self.url_map.bind_to_environ(request.environ, - server_name=self.config['SERVER_NAME']) - # We need at the very least the server name to be set for this - # to work. - if self.config['SERVER_NAME'] is not None: - return self.url_map.bind( - self.config['SERVER_NAME'], - script_name=self.config['APPLICATION_ROOT'] or '/', - url_scheme=self.config['PREFERRED_URL_SCHEME']) - - def inject_url_defaults(self, endpoint, values): - """Injects the URL defaults for the given endpoint directly into - the values dictionary passed. This is used internally and - automatically called on URL building. - - .. versionadded:: 0.7 - """ - funcs = self.url_default_functions.get(None, ()) - if '.' in endpoint: - bp = endpoint.rsplit('.', 1)[0] - funcs = chain(funcs, self.url_default_functions.get(bp, ())) - for func in funcs: - func(endpoint, values) - - def handle_url_build_error(self, error, endpoint, values): - """Handle :class:`~werkzeug.routing.BuildError` on :meth:`url_for`. - """ - exc_type, exc_value, tb = sys.exc_info() - for handler in self.url_build_error_handlers: - try: - rv = handler(error, endpoint, values) - if rv is not None: - return rv - except BuildError, error: - pass - - # At this point we want to reraise the exception. If the error is - # still the same one we can reraise it with the original traceback, - # otherwise we raise it from here. - if error is exc_value: - raise exc_type, exc_value, tb - raise error - - def preprocess_request(self): - """Called before the actual request dispatching and will - call every as :meth:`before_request` decorated function. - If any of these function returns a value it's handled as - if it was the return value from the view and further - request handling is stopped. - - This also triggers the :meth:`url_value_processor` functions before - the actualy :meth:`before_request` functions are called. - """ - bp = _request_ctx_stack.top.request.blueprint - - funcs = self.url_value_preprocessors.get(None, ()) - if bp is not None and bp in self.url_value_preprocessors: - funcs = chain(funcs, self.url_value_preprocessors[bp]) - for func in funcs: - func(request.endpoint, request.view_args) - - funcs = self.before_request_funcs.get(None, ()) - if bp is not None and bp in self.before_request_funcs: - funcs = chain(funcs, self.before_request_funcs[bp]) - for func in funcs: - rv = func() - if rv is not None: - return rv - - def process_response(self, response): - """Can be overridden in order to modify the response object - before it's sent to the WSGI server. By default this will - call all the :meth:`after_request` decorated functions. - - .. versionchanged:: 0.5 - As of Flask 0.5 the functions registered for after request - execution are called in reverse order of registration. - - :param response: a :attr:`response_class` object. - :return: a new response object or the same, has to be an - instance of :attr:`response_class`. - """ - ctx = _request_ctx_stack.top - bp = ctx.request.blueprint - funcs = ctx._after_request_functions - if bp is not None and bp in self.after_request_funcs: - funcs = reversed(self.after_request_funcs[bp]) - if None in self.after_request_funcs: - funcs = chain(funcs, reversed(self.after_request_funcs[None])) - for handler in funcs: - response = handler(response) - if not self.session_interface.is_null_session(ctx.session): - self.save_session(ctx.session, response) - return response - - def do_teardown_request(self, exc=None): - """Called after the actual request dispatching and will - call every as :meth:`teardown_request` decorated function. This is - not actually called by the :class:`Flask` object itself but is always - triggered when the request context is popped. That way we have a - tighter control over certain resources under testing environments. - - .. versionchanged:: 0.9 - Added the `exc` argument. Previously this was always using the - current exception information. - """ - if exc is None: - exc = sys.exc_info()[1] - funcs = reversed(self.teardown_request_funcs.get(None, ())) - bp = _request_ctx_stack.top.request.blueprint - if bp is not None and bp in self.teardown_request_funcs: - funcs = chain(funcs, reversed(self.teardown_request_funcs[bp])) - for func in funcs: - rv = func(exc) - request_tearing_down.send(self, exc=exc) - - def do_teardown_appcontext(self, exc=None): - """Called when an application context is popped. This works pretty - much the same as :meth:`do_teardown_request` but for the application - context. - - .. versionadded:: 0.9 - """ - if exc is None: - exc = sys.exc_info()[1] - for func in reversed(self.teardown_appcontext_funcs): - func(exc) - appcontext_tearing_down.send(self, exc=exc) - - def app_context(self): - """Binds the application only. For as long as the application is bound - to the current context the :data:`flask.current_app` points to that - application. An application context is automatically created when a - request context is pushed if necessary. - - Example usage:: - - with app.app_context(): - ... - - .. versionadded:: 0.9 - """ - return AppContext(self) - - def request_context(self, environ): - """Creates a :class:`~flask.ctx.RequestContext` from the given - environment and binds it to the current context. This must be used in - combination with the `with` statement because the request is only bound - to the current context for the duration of the `with` block. - - Example usage:: - - with app.request_context(environ): - do_something_with(request) - - The object returned can also be used without the `with` statement - which is useful for working in the shell. The example above is - doing exactly the same as this code:: - - ctx = app.request_context(environ) - ctx.push() - try: - do_something_with(request) - finally: - ctx.pop() - - .. versionchanged:: 0.3 - Added support for non-with statement usage and `with` statement - is now passed the ctx object. - - :param environ: a WSGI environment - """ - return RequestContext(self, environ) - - def test_request_context(self, *args, **kwargs): - """Creates a WSGI environment from the given values (see - :func:`werkzeug.test.EnvironBuilder` for more information, this - function accepts the same arguments). - """ - from flask.testing import make_test_environ_builder - builder = make_test_environ_builder(self, *args, **kwargs) - try: - return self.request_context(builder.get_environ()) - finally: - builder.close() - - def wsgi_app(self, environ, start_response): - """The actual WSGI application. This is not implemented in - `__call__` so that middlewares can be applied without losing a - reference to the class. So instead of doing this:: - - app = MyMiddleware(app) - - It's a better idea to do this instead:: - - app.wsgi_app = MyMiddleware(app.wsgi_app) - - Then you still have the original application object around and - can continue to call methods on it. - - .. versionchanged:: 0.7 - The behavior of the before and after request callbacks was changed - under error conditions and a new callback was added that will - always execute at the end of the request, independent on if an - error ocurred or not. See :ref:`callbacks-and-errors`. - - :param environ: a WSGI environment - :param start_response: a callable accepting a status code, - a list of headers and an optional - exception context to start the response - """ - with self.request_context(environ): - try: - response = self.full_dispatch_request() - except Exception, e: - response = self.make_response(self.handle_exception(e)) - return response(environ, start_response) - - @property - def modules(self): - from warnings import warn - warn(DeprecationWarning('Flask.modules is deprecated, use ' - 'Flask.blueprints instead'), stacklevel=2) - return self.blueprints - - def __call__(self, environ, start_response): - """Shortcut for :attr:`wsgi_app`.""" - return self.wsgi_app(environ, start_response) diff --git a/libs/flask/blueprints.py b/libs/flask/blueprints.py deleted file mode 100644 index 9c55702891..0000000000 --- a/libs/flask/blueprints.py +++ /dev/null @@ -1,345 +0,0 @@ -# -*- coding: utf-8 -*- -""" - flask.blueprints - ~~~~~~~~~~~~~~~~ - - Blueprints are the recommended way to implement larger or more - pluggable applications in Flask 0.7 and later. - - :copyright: (c) 2011 by Armin Ronacher. - :license: BSD, see LICENSE for more details. -""" -from functools import update_wrapper - -from .helpers import _PackageBoundObject, _endpoint_from_view_func - - -class BlueprintSetupState(object): - """Temporary holder object for registering a blueprint with the - application. An instance of this class is created by the - :meth:`~flask.Blueprint.make_setup_state` method and later passed - to all register callback functions. - """ - - def __init__(self, blueprint, app, options, first_registration): - #: a reference to the current application - self.app = app - - #: a reference to the blueprint that created this setup state. - self.blueprint = blueprint - - #: a dictionary with all options that were passed to the - #: :meth:`~flask.Flask.register_blueprint` method. - self.options = options - - #: as blueprints can be registered multiple times with the - #: application and not everything wants to be registered - #: multiple times on it, this attribute can be used to figure - #: out if the blueprint was registered in the past already. - self.first_registration = first_registration - - subdomain = self.options.get('subdomain') - if subdomain is None: - subdomain = self.blueprint.subdomain - - #: The subdomain that the blueprint should be active for, `None` - #: otherwise. - self.subdomain = subdomain - - url_prefix = self.options.get('url_prefix') - if url_prefix is None: - url_prefix = self.blueprint.url_prefix - - #: The prefix that should be used for all URLs defined on the - #: blueprint. - self.url_prefix = url_prefix - - #: A dictionary with URL defaults that is added to each and every - #: URL that was defined with the blueprint. - self.url_defaults = dict(self.blueprint.url_values_defaults) - self.url_defaults.update(self.options.get('url_defaults', ())) - - def add_url_rule(self, rule, endpoint=None, view_func=None, **options): - """A helper method to register a rule (and optionally a view function) - to the application. The endpoint is automatically prefixed with the - blueprint's name. - """ - if self.url_prefix: - rule = self.url_prefix + rule - options.setdefault('subdomain', self.subdomain) - if endpoint is None: - endpoint = _endpoint_from_view_func(view_func) - defaults = self.url_defaults - if 'defaults' in options: - defaults = dict(defaults, **options.pop('defaults')) - self.app.add_url_rule(rule, '%s.%s' % (self.blueprint.name, endpoint), - view_func, defaults=defaults, **options) - - -class Blueprint(_PackageBoundObject): - """Represents a blueprint. A blueprint is an object that records - functions that will be called with the - :class:`~flask.blueprint.BlueprintSetupState` later to register functions - or other things on the main application. See :ref:`blueprints` for more - information. - - .. versionadded:: 0.7 - """ - - warn_on_modifications = False - _got_registered_once = False - - def __init__(self, name, import_name, static_folder=None, - static_url_path=None, template_folder=None, - url_prefix=None, subdomain=None, url_defaults=None): - _PackageBoundObject.__init__(self, import_name, template_folder) - self.name = name - self.url_prefix = url_prefix - self.subdomain = subdomain - self.static_folder = static_folder - self.static_url_path = static_url_path - self.deferred_functions = [] - self.view_functions = {} - if url_defaults is None: - url_defaults = {} - self.url_values_defaults = url_defaults - - def record(self, func): - """Registers a function that is called when the blueprint is - registered on the application. This function is called with the - state as argument as returned by the :meth:`make_setup_state` - method. - """ - if self._got_registered_once and self.warn_on_modifications: - from warnings import warn - warn(Warning('The blueprint was already registered once ' - 'but is getting modified now. These changes ' - 'will not show up.')) - self.deferred_functions.append(func) - - def record_once(self, func): - """Works like :meth:`record` but wraps the function in another - function that will ensure the function is only called once. If the - blueprint is registered a second time on the application, the - function passed is not called. - """ - def wrapper(state): - if state.first_registration: - func(state) - return self.record(update_wrapper(wrapper, func)) - - def make_setup_state(self, app, options, first_registration=False): - """Creates an instance of :meth:`~flask.blueprints.BlueprintSetupState` - object that is later passed to the register callback functions. - Subclasses can override this to return a subclass of the setup state. - """ - return BlueprintSetupState(self, app, options, first_registration) - - def register(self, app, options, first_registration=False): - """Called by :meth:`Flask.register_blueprint` to register a blueprint - on the application. This can be overridden to customize the register - behavior. Keyword arguments from - :func:`~flask.Flask.register_blueprint` are directly forwarded to this - method in the `options` dictionary. - """ - self._got_registered_once = True - state = self.make_setup_state(app, options, first_registration) - if self.has_static_folder: - state.add_url_rule(self.static_url_path + '/', - view_func=self.send_static_file, - endpoint='static') - - for deferred in self.deferred_functions: - deferred(state) - - def route(self, rule, **options): - """Like :meth:`Flask.route` but for a blueprint. The endpoint for the - :func:`url_for` function is prefixed with the name of the blueprint. - """ - def decorator(f): - endpoint = options.pop("endpoint", f.__name__) - self.add_url_rule(rule, endpoint, f, **options) - return f - return decorator - - def add_url_rule(self, rule, endpoint=None, view_func=None, **options): - """Like :meth:`Flask.add_url_rule` but for a blueprint. The endpoint for - the :func:`url_for` function is prefixed with the name of the blueprint. - """ - if endpoint: - assert '.' not in endpoint, "Blueprint endpoint's should not contain dot's" - self.record(lambda s: - s.add_url_rule(rule, endpoint, view_func, **options)) - - def endpoint(self, endpoint): - """Like :meth:`Flask.endpoint` but for a blueprint. This does not - prefix the endpoint with the blueprint name, this has to be done - explicitly by the user of this method. If the endpoint is prefixed - with a `.` it will be registered to the current blueprint, otherwise - it's an application independent endpoint. - """ - def decorator(f): - def register_endpoint(state): - state.app.view_functions[endpoint] = f - self.record_once(register_endpoint) - return f - return decorator - - def app_template_filter(self, name=None): - """Register a custom template filter, available application wide. Like - :meth:`Flask.template_filter` but for a blueprint. - - :param name: the optional name of the filter, otherwise the - function name will be used. - """ - def decorator(f): - self.add_app_template_filter(f, name=name) - return f - return decorator - - def add_app_template_filter(self, f, name=None): - """Register a custom template filter, available application wide. Like - :meth:`Flask.add_template_filter` but for a blueprint. Works exactly - like the :meth:`app_template_filter` decorator. - - :param name: the optional name of the filter, otherwise the - function name will be used. - """ - def register_template(state): - state.app.jinja_env.filters[name or f.__name__] = f - self.record_once(register_template) - - def before_request(self, f): - """Like :meth:`Flask.before_request` but for a blueprint. This function - is only executed before each request that is handled by a function of - that blueprint. - """ - self.record_once(lambda s: s.app.before_request_funcs - .setdefault(self.name, []).append(f)) - return f - - def before_app_request(self, f): - """Like :meth:`Flask.before_request`. Such a function is executed - before each request, even if outside of a blueprint. - """ - self.record_once(lambda s: s.app.before_request_funcs - .setdefault(None, []).append(f)) - return f - - def before_app_first_request(self, f): - """Like :meth:`Flask.before_first_request`. Such a function is - executed before the first request to the application. - """ - self.record_once(lambda s: s.app.before_first_request_funcs.append(f)) - return f - - def after_request(self, f): - """Like :meth:`Flask.after_request` but for a blueprint. This function - is only executed after each request that is handled by a function of - that blueprint. - """ - self.record_once(lambda s: s.app.after_request_funcs - .setdefault(self.name, []).append(f)) - return f - - def after_app_request(self, f): - """Like :meth:`Flask.after_request` but for a blueprint. Such a function - is executed after each request, even if outside of the blueprint. - """ - self.record_once(lambda s: s.app.after_request_funcs - .setdefault(None, []).append(f)) - return f - - def teardown_request(self, f): - """Like :meth:`Flask.teardown_request` but for a blueprint. This - function is only executed when tearing down requests handled by a - function of that blueprint. Teardown request functions are executed - when the request context is popped, even when no actual request was - performed. - """ - self.record_once(lambda s: s.app.teardown_request_funcs - .setdefault(self.name, []).append(f)) - return f - - def teardown_app_request(self, f): - """Like :meth:`Flask.teardown_request` but for a blueprint. Such a - function is executed when tearing down each request, even if outside of - the blueprint. - """ - self.record_once(lambda s: s.app.teardown_request_funcs - .setdefault(None, []).append(f)) - return f - - def context_processor(self, f): - """Like :meth:`Flask.context_processor` but for a blueprint. This - function is only executed for requests handled by a blueprint. - """ - self.record_once(lambda s: s.app.template_context_processors - .setdefault(self.name, []).append(f)) - return f - - def app_context_processor(self, f): - """Like :meth:`Flask.context_processor` but for a blueprint. Such a - function is executed each request, even if outside of the blueprint. - """ - self.record_once(lambda s: s.app.template_context_processors - .setdefault(None, []).append(f)) - return f - - def app_errorhandler(self, code): - """Like :meth:`Flask.errorhandler` but for a blueprint. This - handler is used for all requests, even if outside of the blueprint. - """ - def decorator(f): - self.record_once(lambda s: s.app.errorhandler(code)(f)) - return f - return decorator - - def url_value_preprocessor(self, f): - """Registers a function as URL value preprocessor for this - blueprint. It's called before the view functions are called and - can modify the url values provided. - """ - self.record_once(lambda s: s.app.url_value_preprocessors - .setdefault(self.name, []).append(f)) - return f - - def url_defaults(self, f): - """Callback function for URL defaults for this blueprint. It's called - with the endpoint and values and should update the values passed - in place. - """ - self.record_once(lambda s: s.app.url_default_functions - .setdefault(self.name, []).append(f)) - return f - - def app_url_value_preprocessor(self, f): - """Same as :meth:`url_value_preprocessor` but application wide. - """ - self.record_once(lambda s: s.app.url_value_preprocessors - .setdefault(None, []).append(f)) - return f - - def app_url_defaults(self, f): - """Same as :meth:`url_defaults` but application wide. - """ - self.record_once(lambda s: s.app.url_default_functions - .setdefault(None, []).append(f)) - return f - - def errorhandler(self, code_or_exception): - """Registers an error handler that becomes active for this blueprint - only. Please be aware that routing does not happen local to a - blueprint so an error handler for 404 usually is not handled by - a blueprint unless it is caused inside a view function. Another - special case is the 500 internal server error which is always looked - up from the application. - - Otherwise works as the :meth:`~flask.Flask.errorhandler` decorator - of the :class:`~flask.Flask` object. - """ - def decorator(f): - self.record_once(lambda s: s.app._register_error_handler( - self.name, code_or_exception, f)) - return f - return decorator diff --git a/libs/flask/config.py b/libs/flask/config.py deleted file mode 100644 index 759fd48822..0000000000 --- a/libs/flask/config.py +++ /dev/null @@ -1,168 +0,0 @@ -# -*- coding: utf-8 -*- -""" - flask.config - ~~~~~~~~~~~~ - - Implements the configuration related objects. - - :copyright: (c) 2011 by Armin Ronacher. - :license: BSD, see LICENSE for more details. -""" - -from __future__ import with_statement - -import imp -import os -import errno - -from werkzeug.utils import import_string - - -class ConfigAttribute(object): - """Makes an attribute forward to the config""" - - def __init__(self, name, get_converter=None): - self.__name__ = name - self.get_converter = get_converter - - def __get__(self, obj, type=None): - if obj is None: - return self - rv = obj.config[self.__name__] - if self.get_converter is not None: - rv = self.get_converter(rv) - return rv - - def __set__(self, obj, value): - obj.config[self.__name__] = value - - -class Config(dict): - """Works exactly like a dict but provides ways to fill it from files - or special dictionaries. There are two common patterns to populate the - config. - - Either you can fill the config from a config file:: - - app.config.from_pyfile('yourconfig.cfg') - - Or alternatively you can define the configuration options in the - module that calls :meth:`from_object` or provide an import path to - a module that should be loaded. It is also possible to tell it to - use the same module and with that provide the configuration values - just before the call:: - - DEBUG = True - SECRET_KEY = 'development key' - app.config.from_object(__name__) - - In both cases (loading from any Python file or loading from modules), - only uppercase keys are added to the config. This makes it possible to use - lowercase values in the config file for temporary values that are not added - to the config or to define the config keys in the same file that implements - the application. - - Probably the most interesting way to load configurations is from an - environment variable pointing to a file:: - - app.config.from_envvar('YOURAPPLICATION_SETTINGS') - - In this case before launching the application you have to set this - environment variable to the file you want to use. On Linux and OS X - use the export statement:: - - export YOURAPPLICATION_SETTINGS='/path/to/config/file' - - On windows use `set` instead. - - :param root_path: path to which files are read relative from. When the - config object is created by the application, this is - the application's :attr:`~flask.Flask.root_path`. - :param defaults: an optional dictionary of default values - """ - - def __init__(self, root_path, defaults=None): - dict.__init__(self, defaults or {}) - self.root_path = root_path - - def from_envvar(self, variable_name, silent=False): - """Loads a configuration from an environment variable pointing to - a configuration file. This is basically just a shortcut with nicer - error messages for this line of code:: - - app.config.from_pyfile(os.environ['YOURAPPLICATION_SETTINGS']) - - :param variable_name: name of the environment variable - :param silent: set to `True` if you want silent failure for missing - files. - :return: bool. `True` if able to load config, `False` otherwise. - """ - rv = os.environ.get(variable_name) - if not rv: - if silent: - return False - raise RuntimeError('The environment variable %r is not set ' - 'and as such configuration could not be ' - 'loaded. Set this variable and make it ' - 'point to a configuration file' % - variable_name) - return self.from_pyfile(rv, silent=silent) - - def from_pyfile(self, filename, silent=False): - """Updates the values in the config from a Python file. This function - behaves as if the file was imported as module with the - :meth:`from_object` function. - - :param filename: the filename of the config. This can either be an - absolute filename or a filename relative to the - root path. - :param silent: set to `True` if you want silent failure for missing - files. - - .. versionadded:: 0.7 - `silent` parameter. - """ - filename = os.path.join(self.root_path, filename) - d = imp.new_module('config') - d.__file__ = filename - try: - execfile(filename, d.__dict__) - except IOError, e: - if silent and e.errno in (errno.ENOENT, errno.EISDIR): - return False - e.strerror = 'Unable to load configuration file (%s)' % e.strerror - raise - self.from_object(d) - return True - - def from_object(self, obj): - """Updates the values from the given object. An object can be of one - of the following two types: - - - a string: in this case the object with that name will be imported - - an actual object reference: that object is used directly - - Objects are usually either modules or classes. - - Just the uppercase variables in that object are stored in the config. - Example usage:: - - app.config.from_object('yourapplication.default_config') - from yourapplication import default_config - app.config.from_object(default_config) - - You should not use this function to load the actual configuration but - rather configuration defaults. The actual config should be loaded - with :meth:`from_pyfile` and ideally from a location not within the - package because the package might be installed system wide. - - :param obj: an import name or object - """ - if isinstance(obj, basestring): - obj = import_string(obj) - for key in dir(obj): - if key.isupper(): - self[key] = getattr(obj, key) - - def __repr__(self): - return '<%s %s>' % (self.__class__.__name__, dict.__repr__(self)) diff --git a/libs/flask/ctx.py b/libs/flask/ctx.py deleted file mode 100644 index 3ea42a279b..0000000000 --- a/libs/flask/ctx.py +++ /dev/null @@ -1,295 +0,0 @@ -# -*- coding: utf-8 -*- -""" - flask.ctx - ~~~~~~~~~ - - Implements the objects required to keep the context. - - :copyright: (c) 2011 by Armin Ronacher. - :license: BSD, see LICENSE for more details. -""" - -import sys - -from werkzeug.exceptions import HTTPException - -from .globals import _request_ctx_stack, _app_ctx_stack -from .module import blueprint_is_module - - -class _RequestGlobals(object): - """A plain object.""" - pass - - -def after_this_request(f): - """Executes a function after this request. This is useful to modify - response objects. The function is passed the response object and has - to return the same or a new one. - - Example:: - - @app.route('/') - def index(): - @after_this_request - def add_header(response): - response.headers['X-Foo'] = 'Parachute' - return response - return 'Hello World!' - - This is more useful if a function other than the view function wants to - modify a response. For instance think of a decorator that wants to add - some headers without converting the return value into a response object. - - .. versionadded:: 0.9 - """ - _request_ctx_stack.top._after_request_functions.append(f) - return f - - -def has_request_context(): - """If you have code that wants to test if a request context is there or - not this function can be used. For instance, you may want to take advantage - of request information if the request object is available, but fail - silently if it is unavailable. - - :: - - class User(db.Model): - - def __init__(self, username, remote_addr=None): - self.username = username - if remote_addr is None and has_request_context(): - remote_addr = request.remote_addr - self.remote_addr = remote_addr - - Alternatively you can also just test any of the context bound objects - (such as :class:`request` or :class:`g` for truthness):: - - class User(db.Model): - - def __init__(self, username, remote_addr=None): - self.username = username - if remote_addr is None and request: - remote_addr = request.remote_addr - self.remote_addr = remote_addr - - .. versionadded:: 0.7 - """ - return _request_ctx_stack.top is not None - - -def has_app_context(): - """Works like :func:`has_request_context` but for the application - context. You can also just do a boolean check on the - :data:`current_app` object instead. - - .. versionadded:: 0.9 - """ - return _app_ctx_stack.top is not None - - -class AppContext(object): - """The application context binds an application object implicitly - to the current thread or greenlet, similar to how the - :class:`RequestContext` binds request information. The application - context is also implicitly created if a request context is created - but the application is not on top of the individual application - context. - """ - - def __init__(self, app): - self.app = app - self.url_adapter = app.create_url_adapter(None) - - # Like request context, app contexts can be pushed multiple times - # but there a basic "refcount" is enough to track them. - self._refcnt = 0 - - def push(self): - """Binds the app context to the current context.""" - self._refcnt += 1 - _app_ctx_stack.push(self) - - def pop(self, exc=None): - """Pops the app context.""" - self._refcnt -= 1 - if self._refcnt <= 0: - if exc is None: - exc = sys.exc_info()[1] - self.app.do_teardown_appcontext(exc) - rv = _app_ctx_stack.pop() - assert rv is self, 'Popped wrong app context. (%r instead of %r)' \ - % (rv, self) - - def __enter__(self): - self.push() - return self - - def __exit__(self, exc_type, exc_value, tb): - self.pop(exc_value) - - -class RequestContext(object): - """The request context contains all request relevant information. It is - created at the beginning of the request and pushed to the - `_request_ctx_stack` and removed at the end of it. It will create the - URL adapter and request object for the WSGI environment provided. - - Do not attempt to use this class directly, instead use - :meth:`~flask.Flask.test_request_context` and - :meth:`~flask.Flask.request_context` to create this object. - - When the request context is popped, it will evaluate all the - functions registered on the application for teardown execution - (:meth:`~flask.Flask.teardown_request`). - - The request context is automatically popped at the end of the request - for you. In debug mode the request context is kept around if - exceptions happen so that interactive debuggers have a chance to - introspect the data. With 0.4 this can also be forced for requests - that did not fail and outside of `DEBUG` mode. By setting - ``'flask._preserve_context'`` to `True` on the WSGI environment the - context will not pop itself at the end of the request. This is used by - the :meth:`~flask.Flask.test_client` for example to implement the - deferred cleanup functionality. - - You might find this helpful for unittests where you need the - information from the context local around for a little longer. Make - sure to properly :meth:`~werkzeug.LocalStack.pop` the stack yourself in - that situation, otherwise your unittests will leak memory. - """ - - def __init__(self, app, environ): - self.app = app - self.request = app.request_class(environ) - self.url_adapter = app.create_url_adapter(self.request) - self.g = app.request_globals_class() - self.flashes = None - self.session = None - - # Request contexts can be pushed multiple times and interleaved with - # other request contexts. Now only if the last level is popped we - # get rid of them. Additionally if an application context is missing - # one is created implicitly so for each level we add this information - self._implicit_app_ctx_stack = [] - - # indicator if the context was preserved. Next time another context - # is pushed the preserved context is popped. - self.preserved = False - - # Functions that should be executed after the request on the response - # object. These will be called before the regular "after_request" - # functions. - self._after_request_functions = [] - - self.match_request() - - # XXX: Support for deprecated functionality. This is going away with - # Flask 1.0 - blueprint = self.request.blueprint - if blueprint is not None: - # better safe than sorry, we don't want to break code that - # already worked - bp = app.blueprints.get(blueprint) - if bp is not None and blueprint_is_module(bp): - self.request._is_old_module = True - - def match_request(self): - """Can be overridden by a subclass to hook into the matching - of the request. - """ - try: - url_rule, self.request.view_args = \ - self.url_adapter.match(return_rule=True) - self.request.url_rule = url_rule - except HTTPException, e: - self.request.routing_exception = e - - def push(self): - """Binds the request context to the current context.""" - # If an exception ocurrs in debug mode or if context preservation is - # activated under exception situations exactly one context stays - # on the stack. The rationale is that you want to access that - # information under debug situations. However if someone forgets to - # pop that context again we want to make sure that on the next push - # it's invalidated otherwise we run at risk that something leaks - # memory. This is usually only a problem in testsuite since this - # functionality is not active in production environments. - top = _request_ctx_stack.top - if top is not None and top.preserved: - top.pop() - - # Before we push the request context we have to ensure that there - # is an application context. - app_ctx = _app_ctx_stack.top - if app_ctx is None or app_ctx.app != self.app: - app_ctx = self.app.app_context() - app_ctx.push() - self._implicit_app_ctx_stack.append(app_ctx) - else: - self._implicit_app_ctx_stack.append(None) - - _request_ctx_stack.push(self) - - # Open the session at the moment that the request context is - # available. This allows a custom open_session method to use the - # request context (e.g. flask-sqlalchemy). - self.session = self.app.open_session(self.request) - if self.session is None: - self.session = self.app.make_null_session() - - def pop(self, exc=None): - """Pops the request context and unbinds it by doing that. This will - also trigger the execution of functions registered by the - :meth:`~flask.Flask.teardown_request` decorator. - - .. versionchanged:: 0.9 - Added the `exc` argument. - """ - app_ctx = self._implicit_app_ctx_stack.pop() - - clear_request = False - if not self._implicit_app_ctx_stack: - self.preserved = False - if exc is None: - exc = sys.exc_info()[1] - self.app.do_teardown_request(exc) - clear_request = True - - rv = _request_ctx_stack.pop() - assert rv is self, 'Popped wrong request context. (%r instead of %r)' \ - % (rv, self) - - # get rid of circular dependencies at the end of the request - # so that we don't require the GC to be active. - if clear_request: - rv.request.environ['werkzeug.request'] = None - - # Get rid of the app as well if necessary. - if app_ctx is not None: - app_ctx.pop(exc) - - def __enter__(self): - self.push() - return self - - def __exit__(self, exc_type, exc_value, tb): - # do not pop the request stack if we are in debug mode and an - # exception happened. This will allow the debugger to still - # access the request object in the interactive shell. Furthermore - # the context can be force kept alive for the test client. - # See flask.testing for how this works. - if self.request.environ.get('flask._preserve_context') or \ - (tb is not None and self.app.preserve_context_on_exception): - self.preserved = True - else: - self.pop(exc_value) - - def __repr__(self): - return '<%s \'%s\' [%s] of %s>' % ( - self.__class__.__name__, - self.request.url, - self.request.method, - self.app.name - ) diff --git a/libs/flask/debughelpers.py b/libs/flask/debughelpers.py deleted file mode 100644 index edf8c111a3..0000000000 --- a/libs/flask/debughelpers.py +++ /dev/null @@ -1,79 +0,0 @@ -# -*- coding: utf-8 -*- -""" - flask.debughelpers - ~~~~~~~~~~~~~~~~~~ - - Various helpers to make the development experience better. - - :copyright: (c) 2011 by Armin Ronacher. - :license: BSD, see LICENSE for more details. -""" - - -class DebugFilesKeyError(KeyError, AssertionError): - """Raised from request.files during debugging. The idea is that it can - provide a better error message than just a generic KeyError/BadRequest. - """ - - def __init__(self, request, key): - form_matches = request.form.getlist(key) - buf = ['You tried to access the file "%s" in the request.files ' - 'dictionary but it does not exist. The mimetype for the request ' - 'is "%s" instead of "multipart/form-data" which means that no ' - 'file contents were transmitted. To fix this error you should ' - 'provide enctype="multipart/form-data" in your form.' % - (key, request.mimetype)] - if form_matches: - buf.append('\n\nThe browser instead transmitted some file names. ' - 'This was submitted: %s' % ', '.join('"%s"' % x - for x in form_matches)) - self.msg = ''.join(buf).encode('utf-8') - - def __str__(self): - return self.msg - - -class FormDataRoutingRedirect(AssertionError): - """This exception is raised by Flask in debug mode if it detects a - redirect caused by the routing system when the request method is not - GET, HEAD or OPTIONS. Reasoning: form data will be dropped. - """ - - def __init__(self, request): - exc = request.routing_exception - buf = ['A request was sent to this URL (%s) but a redirect was ' - 'issued automatically by the routing system to "%s".' - % (request.url, exc.new_url)] - - # In case just a slash was appended we can be extra helpful - if request.base_url + '/' == exc.new_url.split('?')[0]: - buf.append(' The URL was defined with a trailing slash so ' - 'Flask will automatically redirect to the URL ' - 'with the trailing slash if it was accessed ' - 'without one.') - - buf.append(' Make sure to directly send your %s-request to this URL ' - 'since we can\'t make browsers or HTTP clients redirect ' - 'with form data reliably or without user interaction.' % - request.method) - buf.append('\n\nNote: this exception is only raised in debug mode') - AssertionError.__init__(self, ''.join(buf).encode('utf-8')) - - -def attach_enctype_error_multidict(request): - """Since Flask 0.8 we're monkeypatching the files object in case a - request is detected that does not use multipart form data but the files - object is accessed. - """ - oldcls = request.files.__class__ - class newcls(oldcls): - def __getitem__(self, key): - try: - return oldcls.__getitem__(self, key) - except KeyError, e: - if key not in request.form: - raise - raise DebugFilesKeyError(request, key) - newcls.__name__ = oldcls.__name__ - newcls.__module__ = oldcls.__module__ - request.files.__class__ = newcls diff --git a/libs/flask/exceptions.py b/libs/flask/exceptions.py deleted file mode 100644 index 9ccdedaba1..0000000000 --- a/libs/flask/exceptions.py +++ /dev/null @@ -1,49 +0,0 @@ -# -*- coding: utf-8 -*- -""" - flask.exceptions - ~~~~~~~~~~~~ - - Flask specific additions to :class:`~werkzeug.exceptions.HTTPException` - - :copyright: (c) 2011 by Armin Ronacher. - :license: BSD, see LICENSE for more details. -""" -from werkzeug.exceptions import HTTPException, BadRequest -from .helpers import json - - -class JSONHTTPException(HTTPException): - """A base class for HTTP exceptions with ``Content-Type: - application/json``. - - The ``description`` attribute of this class must set to a string (*not* an - HTML string) which describes the error. - - """ - - def get_body(self, environ): - """Overrides :meth:`werkzeug.exceptions.HTTPException.get_body` to - return the description of this error in JSON format instead of HTML. - - """ - return json.dumps(dict(description=self.get_description(environ))) - - def get_headers(self, environ): - """Returns a list of headers including ``Content-Type: - application/json``. - - """ - return [('Content-Type', 'application/json')] - - -class JSONBadRequest(JSONHTTPException, BadRequest): - """Represents an HTTP ``400 Bad Request`` error whose body contains an - error message in JSON format instead of HTML format (as in the superclass). - - """ - - #: The description of the error which occurred as a string. - description = ( - 'The browser (or proxy) sent a request that this server could not ' - 'understand.' - ) diff --git a/libs/flask/ext/__init__.py b/libs/flask/ext/__init__.py deleted file mode 100644 index f29958a19f..0000000000 --- a/libs/flask/ext/__init__.py +++ /dev/null @@ -1,29 +0,0 @@ -# -*- coding: utf-8 -*- -""" - flask.ext - ~~~~~~~~~ - - Redirect imports for extensions. This module basically makes it possible - for us to transition from flaskext.foo to flask_foo without having to - force all extensions to upgrade at the same time. - - When a user does ``from flask.ext.foo import bar`` it will attempt to - import ``from flask_foo import bar`` first and when that fails it will - try to import ``from flaskext.foo import bar``. - - We're switching from namespace packages because it was just too painful for - everybody involved. - - :copyright: (c) 2011 by Armin Ronacher. - :license: BSD, see LICENSE for more details. -""" - - -def setup(): - from ..exthook import ExtensionImporter - importer = ExtensionImporter(['flask_%s', 'flaskext.%s'], __name__) - importer.install() - - -setup() -del setup diff --git a/libs/flask/exthook.py b/libs/flask/exthook.py deleted file mode 100644 index bb1deb2928..0000000000 --- a/libs/flask/exthook.py +++ /dev/null @@ -1,119 +0,0 @@ -# -*- coding: utf-8 -*- -""" - flask.exthook - ~~~~~~~~~~~~~ - - Redirect imports for extensions. This module basically makes it possible - for us to transition from flaskext.foo to flask_foo without having to - force all extensions to upgrade at the same time. - - When a user does ``from flask.ext.foo import bar`` it will attempt to - import ``from flask_foo import bar`` first and when that fails it will - try to import ``from flaskext.foo import bar``. - - We're switching from namespace packages because it was just too painful for - everybody involved. - - This is used by `flask.ext`. - - :copyright: (c) 2011 by Armin Ronacher. - :license: BSD, see LICENSE for more details. -""" -import sys -import os - - -class ExtensionImporter(object): - """This importer redirects imports from this submodule to other locations. - This makes it possible to transition from the old flaskext.name to the - newer flask_name without people having a hard time. - """ - - def __init__(self, module_choices, wrapper_module): - self.module_choices = module_choices - self.wrapper_module = wrapper_module - self.prefix = wrapper_module + '.' - self.prefix_cutoff = wrapper_module.count('.') + 1 - - def __eq__(self, other): - return self.__class__.__module__ == other.__class__.__module__ and \ - self.__class__.__name__ == other.__class__.__name__ and \ - self.wrapper_module == other.wrapper_module and \ - self.module_choices == other.module_choices - - def __ne__(self, other): - return not self.__eq__(other) - - def install(self): - sys.meta_path[:] = [x for x in sys.meta_path if self != x] + [self] - - def find_module(self, fullname, path=None): - if fullname.startswith(self.prefix): - return self - - def load_module(self, fullname): - if fullname in sys.modules: - return sys.modules[fullname] - modname = fullname.split('.', self.prefix_cutoff)[self.prefix_cutoff] - for path in self.module_choices: - realname = path % modname - try: - __import__(realname) - except ImportError: - exc_type, exc_value, tb = sys.exc_info() - # since we only establish the entry in sys.modules at the - # very this seems to be redundant, but if recursive imports - # happen we will call into the move import a second time. - # On the second invocation we still don't have an entry for - # fullname in sys.modules, but we will end up with the same - # fake module name and that import will succeed since this - # one already has a temporary entry in the modules dict. - # Since this one "succeeded" temporarily that second - # invocation now will have created a fullname entry in - # sys.modules which we have to kill. - sys.modules.pop(fullname, None) - - # If it's an important traceback we reraise it, otherwise - # we swallow it and try the next choice. The skipped frame - # is the one from __import__ above which we don't care about - if self.is_important_traceback(realname, tb): - raise exc_type, exc_value, tb.tb_next - continue - module = sys.modules[fullname] = sys.modules[realname] - if '.' not in modname: - setattr(sys.modules[self.wrapper_module], modname, module) - return module - raise ImportError('No module named %s' % fullname) - - def is_important_traceback(self, important_module, tb): - """Walks a traceback's frames and checks if any of the frames - originated in the given important module. If that is the case then we - were able to import the module itself but apparently something went - wrong when the module was imported. (Eg: import of an import failed). - """ - while tb is not None: - if self.is_important_frame(important_module, tb): - return True - tb = tb.tb_next - return False - - def is_important_frame(self, important_module, tb): - """Checks a single frame if it's important.""" - g = tb.tb_frame.f_globals - if '__name__' not in g: - return False - - module_name = g['__name__'] - - # Python 2.7 Behavior. Modules are cleaned up late so the - # name shows up properly here. Success! - if module_name == important_module: - return True - - # Some python verisons will will clean up modules so early that the - # module name at that point is no longer set. Try guessing from - # the filename then. - filename = os.path.abspath(tb.tb_frame.f_code.co_filename) - test_string = os.path.sep + important_module.replace('.', os.path.sep) - return test_string + '.py' in filename or \ - test_string + os.path.sep + '__init__.py' in filename diff --git a/libs/flask/globals.py b/libs/flask/globals.py deleted file mode 100644 index f6d6248537..0000000000 --- a/libs/flask/globals.py +++ /dev/null @@ -1,36 +0,0 @@ -# -*- coding: utf-8 -*- -""" - flask.globals - ~~~~~~~~~~~~~ - - Defines all the global objects that are proxies to the current - active context. - - :copyright: (c) 2011 by Armin Ronacher. - :license: BSD, see LICENSE for more details. -""" - -from functools import partial -from werkzeug.local import LocalStack, LocalProxy - -def _lookup_object(name): - top = _request_ctx_stack.top - if top is None: - raise RuntimeError('working outside of request context') - return getattr(top, name) - - -def _find_app(): - top = _app_ctx_stack.top - if top is None: - raise RuntimeError('working outside of application context') - return top.app - - -# context locals -_request_ctx_stack = LocalStack() -_app_ctx_stack = LocalStack() -current_app = LocalProxy(_find_app) -request = LocalProxy(partial(_lookup_object, 'request')) -session = LocalProxy(partial(_lookup_object, 'session')) -g = LocalProxy(partial(_lookup_object, 'g')) diff --git a/libs/flask/helpers.py b/libs/flask/helpers.py deleted file mode 100644 index 501a2f811c..0000000000 --- a/libs/flask/helpers.py +++ /dev/null @@ -1,893 +0,0 @@ -# -*- coding: utf-8 -*- -""" - flask.helpers - ~~~~~~~~~~~~~ - - Implements various helpers. - - :copyright: (c) 2011 by Armin Ronacher. - :license: BSD, see LICENSE for more details. -""" - -from __future__ import with_statement - -import os -import sys -import pkgutil -import posixpath -import mimetypes -from time import time -from zlib import adler32 -from threading import RLock -from werkzeug.routing import BuildError -from werkzeug.urls import url_quote -from functools import update_wrapper - -# try to load the best simplejson implementation available. If JSON -# is not installed, we add a failing class. -json_available = True -json = None -try: - import simplejson as json -except ImportError: - try: - import json - except ImportError: - try: - # Google Appengine offers simplejson via django - from django.utils import simplejson as json - except ImportError: - json_available = False - - -from werkzeug.datastructures import Headers -from werkzeug.exceptions import NotFound - -# this was moved in 0.7 -try: - from werkzeug.wsgi import wrap_file -except ImportError: - from werkzeug.utils import wrap_file - -from jinja2 import FileSystemLoader - -from .globals import session, _request_ctx_stack, _app_ctx_stack, \ - current_app, request - - -def _assert_have_json(): - """Helper function that fails if JSON is unavailable.""" - if not json_available: - raise RuntimeError('simplejson not installed') - - -# figure out if simplejson escapes slashes. This behavior was changed -# from one version to another without reason. -if not json_available or '\\/' not in json.dumps('/'): - - def _tojson_filter(*args, **kwargs): - if __debug__: - _assert_have_json() - return json.dumps(*args, **kwargs).replace('/', '\\/') -else: - _tojson_filter = json.dumps - - -# sentinel -_missing = object() - - -# what separators does this operating system provide that are not a slash? -# this is used by the send_from_directory function to ensure that nobody is -# able to access files from outside the filesystem. -_os_alt_seps = list(sep for sep in [os.path.sep, os.path.altsep] - if sep not in (None, '/')) - - -def _endpoint_from_view_func(view_func): - """Internal helper that returns the default endpoint for a given - function. This always is the function name. - """ - assert view_func is not None, 'expected view func if endpoint ' \ - 'is not provided.' - return view_func.__name__ - - -def stream_with_context(generator_or_function): - """Request contexts disappear when the response is started on the server. - This is done for efficiency reasons and to make it less likely to encounter - memory leaks with badly written WSGI middlewares. The downside is that if - you are using streamed responses, the generator cannot access request bound - information any more. - - This function however can help you keep the context around for longer:: - - from flask import stream_with_context, request, Response - - @app.route('/stream') - def streamed_response(): - @stream_with_context - def generate(): - yield 'Hello ' - yield request.args['name'] - yield '!' - return Response(generate()) - - Alternatively it can also be used around a specific generator: - - from flask import stream_with_context, request, Response - - @app.route('/stream') - def streamed_response(): - def generate(): - yield 'Hello ' - yield request.args['name'] - yield '!' - return Response(stream_with_context(generate())) - - .. versionadded:: 0.9 - """ - try: - gen = iter(generator_or_function) - except TypeError: - def decorator(*args, **kwargs): - gen = generator_or_function() - return stream_with_context(gen) - return update_wrapper(decorator, generator_or_function) - - def generator(): - ctx = _request_ctx_stack.top - if ctx is None: - raise RuntimeError('Attempted to stream with context but ' - 'there was no context in the first place to keep around.') - with ctx: - # Dummy sentinel. Has to be inside the context block or we're - # not actually keeping the context around. - yield None - - # The try/finally is here so that if someone passes a WSGI level - # iterator in we're still running the cleanup logic. Generators - # don't need that because they are closed on their destruction - # automatically. - try: - for item in gen: - yield item - finally: - if hasattr(gen, 'close'): - gen.close() - - # The trick is to start the generator. Then the code execution runs until - # the first dummy None is yielded at which point the context was already - # pushed. This item is discarded. Then when the iteration continues the - # real generator is executed. - wrapped_g = generator() - wrapped_g.next() - return wrapped_g - - -def jsonify(*args, **kwargs): - """Creates a :class:`~flask.Response` with the JSON representation of - the given arguments with an `application/json` mimetype. The arguments - to this function are the same as to the :class:`dict` constructor. - - Example usage:: - - @app.route('/_get_current_user') - def get_current_user(): - return jsonify(username=g.user.username, - email=g.user.email, - id=g.user.id) - - This will send a JSON response like this to the browser:: - - { - "username": "admin", - "email": "admin@localhost", - "id": 42 - } - - This requires Python 2.6 or an installed version of simplejson. For - security reasons only objects are supported toplevel. For more - information about this, have a look at :ref:`json-security`. - - .. versionadded:: 0.2 - """ - if __debug__: - _assert_have_json() - return current_app.response_class(json.dumps(dict(*args, **kwargs), - indent=None if request.is_xhr else 2), mimetype='application/json') - - -def make_response(*args): - """Sometimes it is necessary to set additional headers in a view. Because - views do not have to return response objects but can return a value that - is converted into a response object by Flask itself, it becomes tricky to - add headers to it. This function can be called instead of using a return - and you will get a response object which you can use to attach headers. - - If view looked like this and you want to add a new header:: - - def index(): - return render_template('index.html', foo=42) - - You can now do something like this:: - - def index(): - response = make_response(render_template('index.html', foo=42)) - response.headers['X-Parachutes'] = 'parachutes are cool' - return response - - This function accepts the very same arguments you can return from a - view function. This for example creates a response with a 404 error - code:: - - response = make_response(render_template('not_found.html'), 404) - - The other use case of this function is to force the return value of a - view function into a response which is helpful with view - decorators:: - - response = make_response(view_function()) - response.headers['X-Parachutes'] = 'parachutes are cool' - - Internally this function does the following things: - - - if no arguments are passed, it creates a new response argument - - if one argument is passed, :meth:`flask.Flask.make_response` - is invoked with it. - - if more than one argument is passed, the arguments are passed - to the :meth:`flask.Flask.make_response` function as tuple. - - .. versionadded:: 0.6 - """ - if not args: - return current_app.response_class() - if len(args) == 1: - args = args[0] - return current_app.make_response(args) - - -def url_for(endpoint, **values): - """Generates a URL to the given endpoint with the method provided. - - Variable arguments that are unknown to the target endpoint are appended - to the generated URL as query arguments. If the value of a query argument - is `None`, the whole pair is skipped. In case blueprints are active - you can shortcut references to the same blueprint by prefixing the - local endpoint with a dot (``.``). - - This will reference the index function local to the current blueprint:: - - url_for('.index') - - For more information, head over to the :ref:`Quickstart `. - - To integrate applications, :class:`Flask` has a hook to intercept URL build - errors through :attr:`Flask.build_error_handler`. The `url_for` function - results in a :exc:`~werkzeug.routing.BuildError` when the current app does - not have a URL for the given endpoint and values. When it does, the - :data:`~flask.current_app` calls its :attr:`~Flask.build_error_handler` if - it is not `None`, which can return a string to use as the result of - `url_for` (instead of `url_for`'s default to raise the - :exc:`~werkzeug.routing.BuildError` exception) or re-raise the exception. - An example:: - - def external_url_handler(error, endpoint, **values): - "Looks up an external URL when `url_for` cannot build a URL." - # This is an example of hooking the build_error_handler. - # Here, lookup_url is some utility function you've built - # which looks up the endpoint in some external URL registry. - url = lookup_url(endpoint, **values) - if url is None: - # External lookup did not have a URL. - # Re-raise the BuildError, in context of original traceback. - exc_type, exc_value, tb = sys.exc_info() - if exc_value is error: - raise exc_type, exc_value, tb - else: - raise error - # url_for will use this result, instead of raising BuildError. - return url - - app.build_error_handler = external_url_handler - - Here, `error` is the instance of :exc:`~werkzeug.routing.BuildError`, and - `endpoint` and `**values` are the arguments passed into `url_for`. Note - that this is for building URLs outside the current application, and not for - handling 404 NotFound errors. - - .. versionadded:: 0.9 - The `_anchor` and `_method` parameters were added. - - .. versionadded:: 0.9 - Calls :meth:`Flask.handle_build_error` on - :exc:`~werkzeug.routing.BuildError`. - - :param endpoint: the endpoint of the URL (name of the function) - :param values: the variable arguments of the URL rule - :param _external: if set to `True`, an absolute URL is generated. - :param _anchor: if provided this is added as anchor to the URL. - :param _method: if provided this explicitly specifies an HTTP method. - """ - appctx = _app_ctx_stack.top - reqctx = _request_ctx_stack.top - if appctx is None: - raise RuntimeError('Attempted to generate a URL with the application ' - 'context being pushed. This has to be executed ') - - # If request specific information is available we have some extra - # features that support "relative" urls. - if reqctx is not None: - url_adapter = reqctx.url_adapter - blueprint_name = request.blueprint - if not reqctx.request._is_old_module: - if endpoint[:1] == '.': - if blueprint_name is not None: - endpoint = blueprint_name + endpoint - else: - endpoint = endpoint[1:] - else: - # TODO: get rid of this deprecated functionality in 1.0 - if '.' not in endpoint: - if blueprint_name is not None: - endpoint = blueprint_name + '.' + endpoint - elif endpoint.startswith('.'): - endpoint = endpoint[1:] - external = values.pop('_external', False) - - # Otherwise go with the url adapter from the appctx and make - # the urls external by default. - else: - url_adapter = appctx.url_adapter - if url_adapter is None: - raise RuntimeError('Application was not able to create a URL ' - 'adapter for request independent URL generation. ' - 'You might be able to fix this by setting ' - 'the SERVER_NAME config variable.') - external = values.pop('_external', True) - - anchor = values.pop('_anchor', None) - method = values.pop('_method', None) - appctx.app.inject_url_defaults(endpoint, values) - try: - rv = url_adapter.build(endpoint, values, method=method, - force_external=external) - except BuildError, error: - # We need to inject the values again so that the app callback can - # deal with that sort of stuff. - values['_external'] = external - values['_anchor'] = anchor - values['_method'] = method - return appctx.app.handle_url_build_error(error, endpoint, values) - - rv = url_adapter.build(endpoint, values, method=method, - force_external=external) - if anchor is not None: - rv += '#' + url_quote(anchor) - return rv - - -def get_template_attribute(template_name, attribute): - """Loads a macro (or variable) a template exports. This can be used to - invoke a macro from within Python code. If you for example have a - template named `_cider.html` with the following contents: - - .. sourcecode:: html+jinja - - {% macro hello(name) %}Hello {{ name }}!{% endmacro %} - - You can access this from Python code like this:: - - hello = get_template_attribute('_cider.html', 'hello') - return hello('World') - - .. versionadded:: 0.2 - - :param template_name: the name of the template - :param attribute: the name of the variable of macro to acccess - """ - return getattr(current_app.jinja_env.get_template(template_name).module, - attribute) - - -def flash(message, category='message'): - """Flashes a message to the next request. In order to remove the - flashed message from the session and to display it to the user, - the template has to call :func:`get_flashed_messages`. - - .. versionchanged:: 0.3 - `category` parameter added. - - :param message: the message to be flashed. - :param category: the category for the message. The following values - are recommended: ``'message'`` for any kind of message, - ``'error'`` for errors, ``'info'`` for information - messages and ``'warning'`` for warnings. However any - kind of string can be used as category. - """ - # Original implementation: - # - # session.setdefault('_flashes', []).append((category, message)) - # - # This assumed that changes made to mutable structures in the session are - # are always in sync with the sess on object, which is not true for session - # implementations that use external storage for keeping their keys/values. - flashes = session.get('_flashes', []) - flashes.append((category, message)) - session['_flashes'] = flashes - - -def get_flashed_messages(with_categories=False, category_filter=[]): - """Pulls all flashed messages from the session and returns them. - Further calls in the same request to the function will return - the same messages. By default just the messages are returned, - but when `with_categories` is set to `True`, the return value will - be a list of tuples in the form ``(category, message)`` instead. - - Filter the flashed messages to one or more categories by providing those - categories in `category_filter`. This allows rendering categories in - separate html blocks. The `with_categories` and `category_filter` - arguments are distinct: - - * `with_categories` controls whether categories are returned with message - text (`True` gives a tuple, where `False` gives just the message text). - * `category_filter` filters the messages down to only those matching the - provided categories. - - See :ref:`message-flashing-pattern` for examples. - - .. versionchanged:: 0.3 - `with_categories` parameter added. - - .. versionchanged:: 0.9 - `category_filter` parameter added. - - :param with_categories: set to `True` to also receive categories. - :param category_filter: whitelist of categories to limit return values - """ - flashes = _request_ctx_stack.top.flashes - if flashes is None: - _request_ctx_stack.top.flashes = flashes = session.pop('_flashes') \ - if '_flashes' in session else [] - if category_filter: - flashes = filter(lambda f: f[0] in category_filter, flashes) - if not with_categories: - return [x[1] for x in flashes] - return flashes - - -def send_file(filename_or_fp, mimetype=None, as_attachment=False, - attachment_filename=None, add_etags=True, - cache_timeout=None, conditional=False): - """Sends the contents of a file to the client. This will use the - most efficient method available and configured. By default it will - try to use the WSGI server's file_wrapper support. Alternatively - you can set the application's :attr:`~Flask.use_x_sendfile` attribute - to ``True`` to directly emit an `X-Sendfile` header. This however - requires support of the underlying webserver for `X-Sendfile`. - - By default it will try to guess the mimetype for you, but you can - also explicitly provide one. For extra security you probably want - to send certain files as attachment (HTML for instance). The mimetype - guessing requires a `filename` or an `attachment_filename` to be - provided. - - Please never pass filenames to this function from user sources without - checking them first. Something like this is usually sufficient to - avoid security problems:: - - if '..' in filename or filename.startswith('/'): - abort(404) - - .. versionadded:: 0.2 - - .. versionadded:: 0.5 - The `add_etags`, `cache_timeout` and `conditional` parameters were - added. The default behavior is now to attach etags. - - .. versionchanged:: 0.7 - mimetype guessing and etag support for file objects was - deprecated because it was unreliable. Pass a filename if you are - able to, otherwise attach an etag yourself. This functionality - will be removed in Flask 1.0 - - .. versionchanged:: 0.9 - cache_timeout pulls its default from application config, when None. - - :param filename_or_fp: the filename of the file to send. This is - relative to the :attr:`~Flask.root_path` if a - relative path is specified. - Alternatively a file object might be provided - in which case `X-Sendfile` might not work and - fall back to the traditional method. Make sure - that the file pointer is positioned at the start - of data to send before calling :func:`send_file`. - :param mimetype: the mimetype of the file if provided, otherwise - auto detection happens. - :param as_attachment: set to `True` if you want to send this file with - a ``Content-Disposition: attachment`` header. - :param attachment_filename: the filename for the attachment if it - differs from the file's filename. - :param add_etags: set to `False` to disable attaching of etags. - :param conditional: set to `True` to enable conditional responses. - - :param cache_timeout: the timeout in seconds for the headers. When `None` - (default), this value is set by - :meth:`~Flask.get_send_file_max_age` of - :data:`~flask.current_app`. - """ - mtime = None - if isinstance(filename_or_fp, basestring): - filename = filename_or_fp - file = None - else: - from warnings import warn - file = filename_or_fp - filename = getattr(file, 'name', None) - - # XXX: this behavior is now deprecated because it was unreliable. - # removed in Flask 1.0 - if not attachment_filename and not mimetype \ - and isinstance(filename, basestring): - warn(DeprecationWarning('The filename support for file objects ' - 'passed to send_file is now deprecated. Pass an ' - 'attach_filename if you want mimetypes to be guessed.'), - stacklevel=2) - if add_etags: - warn(DeprecationWarning('In future flask releases etags will no ' - 'longer be generated for file objects passed to the send_file ' - 'function because this behavior was unreliable. Pass ' - 'filenames instead if possible, otherwise attach an etag ' - 'yourself based on another value'), stacklevel=2) - - if filename is not None: - if not os.path.isabs(filename): - filename = os.path.join(current_app.root_path, filename) - if mimetype is None and (filename or attachment_filename): - mimetype = mimetypes.guess_type(filename or attachment_filename)[0] - if mimetype is None: - mimetype = 'application/octet-stream' - - headers = Headers() - if as_attachment: - if attachment_filename is None: - if filename is None: - raise TypeError('filename unavailable, required for ' - 'sending as attachment') - attachment_filename = os.path.basename(filename) - headers.add('Content-Disposition', 'attachment', - filename=attachment_filename) - - if current_app.use_x_sendfile and filename: - if file is not None: - file.close() - headers['X-Sendfile'] = filename - data = None - else: - if file is None: - file = open(filename, 'rb') - mtime = os.path.getmtime(filename) - data = wrap_file(request.environ, file) - - rv = current_app.response_class(data, mimetype=mimetype, headers=headers, - direct_passthrough=True) - - # if we know the file modification date, we can store it as the - # the time of the last modification. - if mtime is not None: - rv.last_modified = int(mtime) - - rv.cache_control.public = True - if cache_timeout is None: - cache_timeout = current_app.get_send_file_max_age(filename) - if cache_timeout is not None: - rv.cache_control.max_age = cache_timeout - rv.expires = int(time() + cache_timeout) - - if add_etags and filename is not None: - rv.set_etag('flask-%s-%s-%s' % ( - os.path.getmtime(filename), - os.path.getsize(filename), - adler32( - filename.encode('utf8') if isinstance(filename, unicode) - else filename - ) & 0xffffffff - )) - if conditional: - rv = rv.make_conditional(request) - # make sure we don't send x-sendfile for servers that - # ignore the 304 status code for x-sendfile. - if rv.status_code == 304: - rv.headers.pop('x-sendfile', None) - return rv - - -def safe_join(directory, filename): - """Safely join `directory` and `filename`. - - Example usage:: - - @app.route('/wiki/') - def wiki_page(filename): - filename = safe_join(app.config['WIKI_FOLDER'], filename) - with open(filename, 'rb') as fd: - content = fd.read() # Read and process the file content... - - :param directory: the base directory. - :param filename: the untrusted filename relative to that directory. - :raises: :class:`~werkzeug.exceptions.NotFound` if the resulting path - would fall out of `directory`. - """ - filename = posixpath.normpath(filename) - for sep in _os_alt_seps: - if sep in filename: - raise NotFound() - if os.path.isabs(filename) or filename.startswith('../'): - raise NotFound() - return os.path.join(directory, filename) - - -def send_from_directory(directory, filename, **options): - """Send a file from a given directory with :func:`send_file`. This - is a secure way to quickly expose static files from an upload folder - or something similar. - - Example usage:: - - @app.route('/uploads/') - def download_file(filename): - return send_from_directory(app.config['UPLOAD_FOLDER'], - filename, as_attachment=True) - - .. admonition:: Sending files and Performance - - It is strongly recommended to activate either `X-Sendfile` support in - your webserver or (if no authentication happens) to tell the webserver - to serve files for the given path on its own without calling into the - web application for improved performance. - - .. versionadded:: 0.5 - - :param directory: the directory where all the files are stored. - :param filename: the filename relative to that directory to - download. - :param options: optional keyword arguments that are directly - forwarded to :func:`send_file`. - """ - filename = safe_join(directory, filename) - if not os.path.isfile(filename): - raise NotFound() - options.setdefault('conditional', True) - return send_file(filename, **options) - - -def get_root_path(import_name): - """Returns the path to a package or cwd if that cannot be found. This - returns the path of a package or the folder that contains a module. - - Not to be confused with the package path returned by :func:`find_package`. - """ - # Module already imported and has a file attribute. Use that first. - mod = sys.modules.get(import_name) - if mod is not None and hasattr(mod, '__file__'): - return os.path.dirname(os.path.abspath(mod.__file__)) - - # Next attempt: check the loader. - loader = pkgutil.get_loader(import_name) - - # Loader does not exist or we're referring to an unloaded main module - # or a main module without path (interactive sessions), go with the - # current working directory. - if loader is None or import_name == '__main__': - return os.getcwd() - - # For .egg, zipimporter does not have get_filename until Python 2.7. - # Some other loaders might exhibit the same behavior. - if hasattr(loader, 'get_filename'): - filepath = loader.get_filename(import_name) - else: - # Fall back to imports. - __import__(import_name) - filepath = sys.modules[import_name].__file__ - - # filepath is import_name.py for a module, or __init__.py for a package. - return os.path.dirname(os.path.abspath(filepath)) - - -def find_package(import_name): - """Finds a package and returns the prefix (or None if the package is - not installed) as well as the folder that contains the package or - module as a tuple. The package path returned is the module that would - have to be added to the pythonpath in order to make it possible to - import the module. The prefix is the path below which a UNIX like - folder structure exists (lib, share etc.). - """ - root_mod_name = import_name.split('.')[0] - loader = pkgutil.get_loader(root_mod_name) - if loader is None or import_name == '__main__': - # import name is not found, or interactive/main module - package_path = os.getcwd() - else: - # For .egg, zipimporter does not have get_filename until Python 2.7. - if hasattr(loader, 'get_filename'): - filename = loader.get_filename(root_mod_name) - elif hasattr(loader, 'archive'): - # zipimporter's loader.archive points to the .egg or .zip - # archive filename is dropped in call to dirname below. - filename = loader.archive - else: - # At least one loader is missing both get_filename and archive: - # Google App Engine's HardenedModulesHook - # - # Fall back to imports. - __import__(import_name) - filename = sys.modules[import_name].__file__ - package_path = os.path.abspath(os.path.dirname(filename)) - # package_path ends with __init__.py for a package - if loader.is_package(root_mod_name): - package_path = os.path.dirname(package_path) - - site_parent, site_folder = os.path.split(package_path) - py_prefix = os.path.abspath(sys.prefix) - if package_path.startswith(py_prefix): - return py_prefix, package_path - elif site_folder.lower() == 'site-packages': - parent, folder = os.path.split(site_parent) - # Windows like installations - if folder.lower() == 'lib': - base_dir = parent - # UNIX like installations - elif os.path.basename(parent).lower() == 'lib': - base_dir = os.path.dirname(parent) - else: - base_dir = site_parent - return base_dir, package_path - return None, package_path - - -class locked_cached_property(object): - """A decorator that converts a function into a lazy property. The - function wrapped is called the first time to retrieve the result - and then that calculated result is used the next time you access - the value. Works like the one in Werkzeug but has a lock for - thread safety. - """ - - def __init__(self, func, name=None, doc=None): - self.__name__ = name or func.__name__ - self.__module__ = func.__module__ - self.__doc__ = doc or func.__doc__ - self.func = func - self.lock = RLock() - - def __get__(self, obj, type=None): - if obj is None: - return self - with self.lock: - value = obj.__dict__.get(self.__name__, _missing) - if value is _missing: - value = self.func(obj) - obj.__dict__[self.__name__] = value - return value - - -class _PackageBoundObject(object): - - def __init__(self, import_name, template_folder=None): - #: The name of the package or module. Do not change this once - #: it was set by the constructor. - self.import_name = import_name - - #: location of the templates. `None` if templates should not be - #: exposed. - self.template_folder = template_folder - - #: Where is the app root located? - self.root_path = get_root_path(self.import_name) - - self._static_folder = None - self._static_url_path = None - - def _get_static_folder(self): - if self._static_folder is not None: - return os.path.join(self.root_path, self._static_folder) - def _set_static_folder(self, value): - self._static_folder = value - static_folder = property(_get_static_folder, _set_static_folder) - del _get_static_folder, _set_static_folder - - def _get_static_url_path(self): - if self._static_url_path is None: - if self.static_folder is None: - return None - return '/' + os.path.basename(self.static_folder) - return self._static_url_path - def _set_static_url_path(self, value): - self._static_url_path = value - static_url_path = property(_get_static_url_path, _set_static_url_path) - del _get_static_url_path, _set_static_url_path - - @property - def has_static_folder(self): - """This is `True` if the package bound object's container has a - folder named ``'static'``. - - .. versionadded:: 0.5 - """ - return self.static_folder is not None - - @locked_cached_property - def jinja_loader(self): - """The Jinja loader for this package bound object. - - .. versionadded:: 0.5 - """ - if self.template_folder is not None: - return FileSystemLoader(os.path.join(self.root_path, - self.template_folder)) - - def get_send_file_max_age(self, filename): - """Provides default cache_timeout for the :func:`send_file` functions. - - By default, this function returns ``SEND_FILE_MAX_AGE_DEFAULT`` from - the configuration of :data:`~flask.current_app`. - - Static file functions such as :func:`send_from_directory` use this - function, and :func:`send_file` calls this function on - :data:`~flask.current_app` when the given cache_timeout is `None`. If a - cache_timeout is given in :func:`send_file`, that timeout is used; - otherwise, this method is called. - - This allows subclasses to change the behavior when sending files based - on the filename. For example, to set the cache timeout for .js files - to 60 seconds:: - - class MyFlask(flask.Flask): - def get_send_file_max_age(self, name): - if name.lower().endswith('.js'): - return 60 - return flask.Flask.get_send_file_max_age(self, name) - - .. versionadded:: 0.9 - """ - return current_app.config['SEND_FILE_MAX_AGE_DEFAULT'] - - def send_static_file(self, filename): - """Function used internally to send static files from the static - folder to the browser. - - .. versionadded:: 0.5 - """ - if not self.has_static_folder: - raise RuntimeError('No static folder for this object') - # Ensure get_send_file_max_age is called in all cases. - # Here, we ensure get_send_file_max_age is called for Blueprints. - cache_timeout = self.get_send_file_max_age(filename) - return send_from_directory(self.static_folder, filename, - cache_timeout=cache_timeout) - - def open_resource(self, resource, mode='rb'): - """Opens a resource from the application's resource folder. To see - how this works, consider the following folder structure:: - - /myapplication.py - /schema.sql - /static - /style.css - /templates - /layout.html - /index.html - - If you want to open the `schema.sql` file you would do the - following:: - - with app.open_resource('schema.sql') as f: - contents = f.read() - do_something_with(contents) - - :param resource: the name of the resource. To access resources within - subfolders use forward slashes as separator. - """ - if mode not in ('r', 'rb'): - raise ValueError('Resources can only be opened for reading') - return open(os.path.join(self.root_path, resource), mode) diff --git a/libs/flask/logging.py b/libs/flask/logging.py deleted file mode 100644 index 9ad641d1d4..0000000000 --- a/libs/flask/logging.py +++ /dev/null @@ -1,45 +0,0 @@ -# -*- coding: utf-8 -*- -""" - flask.logging - ~~~~~~~~~~~~~ - - Implements the logging support for Flask. - - :copyright: (c) 2011 by Armin Ronacher. - :license: BSD, see LICENSE for more details. -""" - -from __future__ import absolute_import - -from logging import getLogger, StreamHandler, Formatter, getLoggerClass, DEBUG - - -def create_logger(app): - """Creates a logger for the given application. This logger works - similar to a regular Python logger but changes the effective logging - level based on the application's debug flag. Furthermore this - function also removes all attached handlers in case there was a - logger with the log name before. - """ - Logger = getLoggerClass() - - class DebugLogger(Logger): - def getEffectiveLevel(x): - if x.level == 0 and app.debug: - return DEBUG - return Logger.getEffectiveLevel(x) - - class DebugHandler(StreamHandler): - def emit(x, record): - StreamHandler.emit(x, record) if app.debug else None - - handler = DebugHandler() - handler.setLevel(DEBUG) - handler.setFormatter(Formatter(app.debug_log_format)) - logger = getLogger(app.logger_name) - # just in case that was not a new logger, get rid of all the handlers - # already attached to it. - del logger.handlers[:] - logger.__class__ = DebugLogger - logger.addHandler(handler) - return logger diff --git a/libs/flask/module.py b/libs/flask/module.py deleted file mode 100644 index 1c4f466c5a..0000000000 --- a/libs/flask/module.py +++ /dev/null @@ -1,42 +0,0 @@ -# -*- coding: utf-8 -*- -""" - flask.module - ~~~~~~~~~~~~ - - Implements a class that represents module blueprints. - - :copyright: (c) 2011 by Armin Ronacher. - :license: BSD, see LICENSE for more details. -""" - -import os - -from .blueprints import Blueprint - - -def blueprint_is_module(bp): - """Used to figure out if something is actually a module""" - return isinstance(bp, Module) - - -class Module(Blueprint): - """Deprecated module support. Until Flask 0.6 modules were a different - name of the concept now available as blueprints in Flask. They are - essentially doing the same but have some bad semantics for templates and - static files that were fixed with blueprints. - - .. versionchanged:: 0.7 - Modules were deprecated in favor for blueprints. - """ - - def __init__(self, import_name, name=None, url_prefix=None, - static_path=None, subdomain=None): - if name is None: - assert '.' in import_name, 'name required if package name ' \ - 'does not point to a submodule' - name = import_name.rsplit('.', 1)[1] - Blueprint.__init__(self, name, import_name, url_prefix=url_prefix, - subdomain=subdomain, template_folder='templates') - - if os.path.isdir(os.path.join(self.root_path, 'static')): - self._static_folder = 'static' diff --git a/libs/flask/session.py b/libs/flask/session.py deleted file mode 100644 index 1a43fdc153..0000000000 --- a/libs/flask/session.py +++ /dev/null @@ -1,19 +0,0 @@ -# -*- coding: utf-8 -*- -""" - flask.session - ~~~~~~~~~~~~~ - - This module used to flask with the session global so we moved it - over to flask.sessions - - :copyright: (c) 2011 by Armin Ronacher. - :license: BSD, see LICENSE for more details. -""" - -from warnings import warn -warn(DeprecationWarning('please use flask.sessions instead')) - -from .sessions import SecureCookieSession, NullSession - -Session = SecureCookieSession -_NullSession = NullSession diff --git a/libs/flask/sessions.py b/libs/flask/sessions.py deleted file mode 100644 index 2795bb1fbb..0000000000 --- a/libs/flask/sessions.py +++ /dev/null @@ -1,205 +0,0 @@ -# -*- coding: utf-8 -*- -""" - flask.sessions - ~~~~~~~~~~~~~~ - - Implements cookie based sessions based on Werkzeug's secure cookie - system. - - :copyright: (c) 2011 by Armin Ronacher. - :license: BSD, see LICENSE for more details. -""" - -from datetime import datetime -from werkzeug.contrib.securecookie import SecureCookie - - -class SessionMixin(object): - """Expands a basic dictionary with an accessors that are expected - by Flask extensions and users for the session. - """ - - def _get_permanent(self): - return self.get('_permanent', False) - - def _set_permanent(self, value): - self['_permanent'] = bool(value) - - #: this reflects the ``'_permanent'`` key in the dict. - permanent = property(_get_permanent, _set_permanent) - del _get_permanent, _set_permanent - - #: some session backends can tell you if a session is new, but that is - #: not necessarily guaranteed. Use with caution. The default mixin - #: implementation just hardcodes `False` in. - new = False - - #: for some backends this will always be `True`, but some backends will - #: default this to false and detect changes in the dictionary for as - #: long as changes do not happen on mutable structures in the session. - #: The default mixin implementation just hardcodes `True` in. - modified = True - - -class SecureCookieSession(SecureCookie, SessionMixin): - """Expands the session with support for switching between permanent - and non-permanent sessions. - """ - - -class NullSession(SecureCookieSession): - """Class used to generate nicer error messages if sessions are not - available. Will still allow read-only access to the empty session - but fail on setting. - """ - - def _fail(self, *args, **kwargs): - raise RuntimeError('the session is unavailable because no secret ' - 'key was set. Set the secret_key on the ' - 'application to something unique and secret.') - __setitem__ = __delitem__ = clear = pop = popitem = \ - update = setdefault = _fail - del _fail - - -class SessionInterface(object): - """The basic interface you have to implement in order to replace the - default session interface which uses werkzeug's securecookie - implementation. The only methods you have to implement are - :meth:`open_session` and :meth:`save_session`, the others have - useful defaults which you don't need to change. - - The session object returned by the :meth:`open_session` method has to - provide a dictionary like interface plus the properties and methods - from the :class:`SessionMixin`. We recommend just subclassing a dict - and adding that mixin:: - - class Session(dict, SessionMixin): - pass - - If :meth:`open_session` returns `None` Flask will call into - :meth:`make_null_session` to create a session that acts as replacement - if the session support cannot work because some requirement is not - fulfilled. The default :class:`NullSession` class that is created - will complain that the secret key was not set. - - To replace the session interface on an application all you have to do - is to assign :attr:`flask.Flask.session_interface`:: - - app = Flask(__name__) - app.session_interface = MySessionInterface() - - .. versionadded:: 0.8 - """ - - #: :meth:`make_null_session` will look here for the class that should - #: be created when a null session is requested. Likewise the - #: :meth:`is_null_session` method will perform a typecheck against - #: this type. - null_session_class = NullSession - - def make_null_session(self, app): - """Creates a null session which acts as a replacement object if the - real session support could not be loaded due to a configuration - error. This mainly aids the user experience because the job of the - null session is to still support lookup without complaining but - modifications are answered with a helpful error message of what - failed. - - This creates an instance of :attr:`null_session_class` by default. - """ - return self.null_session_class() - - def is_null_session(self, obj): - """Checks if a given object is a null session. Null sessions are - not asked to be saved. - - This checks if the object is an instance of :attr:`null_session_class` - by default. - """ - return isinstance(obj, self.null_session_class) - - def get_cookie_domain(self, app): - """Helpful helper method that returns the cookie domain that should - be used for the session cookie if session cookies are used. - """ - if app.config['SESSION_COOKIE_DOMAIN'] is not None: - return app.config['SESSION_COOKIE_DOMAIN'] - if app.config['SERVER_NAME'] is not None: - # chop of the port which is usually not supported by browsers - return '.' + app.config['SERVER_NAME'].rsplit(':', 1)[0] - - def get_cookie_path(self, app): - """Returns the path for which the cookie should be valid. The - default implementation uses the value from the SESSION_COOKIE_PATH`` - config var if it's set, and falls back to ``APPLICATION_ROOT`` or - uses ``/`` if it's `None`. - """ - return app.config['SESSION_COOKIE_PATH'] or \ - app.config['APPLICATION_ROOT'] or '/' - - def get_cookie_httponly(self, app): - """Returns True if the session cookie should be httponly. This - currently just returns the value of the ``SESSION_COOKIE_HTTPONLY`` - config var. - """ - return app.config['SESSION_COOKIE_HTTPONLY'] - - def get_cookie_secure(self, app): - """Returns True if the cookie should be secure. This currently - just returns the value of the ``SESSION_COOKIE_SECURE`` setting. - """ - return app.config['SESSION_COOKIE_SECURE'] - - def get_expiration_time(self, app, session): - """A helper method that returns an expiration date for the session - or `None` if the session is linked to the browser session. The - default implementation returns now + the permanent session - lifetime configured on the application. - """ - if session.permanent: - return datetime.utcnow() + app.permanent_session_lifetime - - def open_session(self, app, request): - """This method has to be implemented and must either return `None` - in case the loading failed because of a configuration error or an - instance of a session object which implements a dictionary like - interface + the methods and attributes on :class:`SessionMixin`. - """ - raise NotImplementedError() - - def save_session(self, app, session, response): - """This is called for actual sessions returned by :meth:`open_session` - at the end of the request. This is still called during a request - context so if you absolutely need access to the request you can do - that. - """ - raise NotImplementedError() - - -class SecureCookieSessionInterface(SessionInterface): - """The cookie session interface that uses the Werkzeug securecookie - as client side session backend. - """ - session_class = SecureCookieSession - - def open_session(self, app, request): - key = app.secret_key - if key is not None: - return self.session_class.load_cookie(request, - app.session_cookie_name, - secret_key=key) - - def save_session(self, app, session, response): - expires = self.get_expiration_time(app, session) - domain = self.get_cookie_domain(app) - path = self.get_cookie_path(app) - httponly = self.get_cookie_httponly(app) - secure = self.get_cookie_secure(app) - if session.modified and not session: - response.delete_cookie(app.session_cookie_name, path=path, - domain=domain) - else: - session.save_cookie(response, app.session_cookie_name, path=path, - expires=expires, httponly=httponly, - secure=secure, domain=domain) diff --git a/libs/flask/signals.py b/libs/flask/signals.py deleted file mode 100644 index 78a77bd556..0000000000 --- a/libs/flask/signals.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -""" - flask.signals - ~~~~~~~~~~~~~ - - Implements signals based on blinker if available, otherwise - falls silently back to a noop - - :copyright: (c) 2011 by Armin Ronacher. - :license: BSD, see LICENSE for more details. -""" -signals_available = False -try: - from blinker import Namespace - signals_available = True -except ImportError: - class Namespace(object): - def signal(self, name, doc=None): - return _FakeSignal(name, doc) - - class _FakeSignal(object): - """If blinker is unavailable, create a fake class with the same - interface that allows sending of signals but will fail with an - error on anything else. Instead of doing anything on send, it - will just ignore the arguments and do nothing instead. - """ - - def __init__(self, name, doc=None): - self.name = name - self.__doc__ = doc - def _fail(self, *args, **kwargs): - raise RuntimeError('signalling support is unavailable ' - 'because the blinker library is ' - 'not installed.') - send = lambda *a, **kw: None - connect = disconnect = has_receivers_for = receivers_for = \ - temporarily_connected_to = connected_to = _fail - del _fail - -# the namespace for code signals. If you are not flask code, do -# not put signals in here. Create your own namespace instead. -_signals = Namespace() - - -# core signals. For usage examples grep the sourcecode or consult -# the API documentation in docs/api.rst as well as docs/signals.rst -template_rendered = _signals.signal('template-rendered') -request_started = _signals.signal('request-started') -request_finished = _signals.signal('request-finished') -request_tearing_down = _signals.signal('request-tearing-down') -got_request_exception = _signals.signal('got-request-exception') -appcontext_tearing_down = _signals.signal('appcontext-tearing-down') diff --git a/libs/flask/templating.py b/libs/flask/templating.py deleted file mode 100644 index c809a63f0b..0000000000 --- a/libs/flask/templating.py +++ /dev/null @@ -1,140 +0,0 @@ -# -*- coding: utf-8 -*- -""" - flask.templating - ~~~~~~~~~~~~~~~~ - - Implements the bridge to Jinja2. - - :copyright: (c) 2011 by Armin Ronacher. - :license: BSD, see LICENSE for more details. -""" -import posixpath -from jinja2 import BaseLoader, Environment as BaseEnvironment, \ - TemplateNotFound - -from .globals import _request_ctx_stack -from .signals import template_rendered -from .module import blueprint_is_module - - -def _default_template_ctx_processor(): - """Default template context processor. Injects `request`, - `session` and `g`. - """ - reqctx = _request_ctx_stack.top - return dict( - config=reqctx.app.config, - request=reqctx.request, - session=reqctx.session, - g=reqctx.g - ) - - -class Environment(BaseEnvironment): - """Works like a regular Jinja2 environment but has some additional - knowledge of how Flask's blueprint works so that it can prepend the - name of the blueprint to referenced templates if necessary. - """ - - def __init__(self, app, **options): - if 'loader' not in options: - options['loader'] = app.create_global_jinja_loader() - BaseEnvironment.__init__(self, **options) - self.app = app - - -class DispatchingJinjaLoader(BaseLoader): - """A loader that looks for templates in the application and all - the blueprint folders. - """ - - def __init__(self, app): - self.app = app - - def get_source(self, environment, template): - for loader, local_name in self._iter_loaders(template): - try: - return loader.get_source(environment, local_name) - except TemplateNotFound: - pass - - raise TemplateNotFound(template) - - def _iter_loaders(self, template): - loader = self.app.jinja_loader - if loader is not None: - yield loader, template - - # old style module based loaders in case we are dealing with a - # blueprint that is an old style module - try: - module, local_name = posixpath.normpath(template).split('/', 1) - blueprint = self.app.blueprints[module] - if blueprint_is_module(blueprint): - loader = blueprint.jinja_loader - if loader is not None: - yield loader, local_name - except (ValueError, KeyError): - pass - - for blueprint in self.app.blueprints.itervalues(): - if blueprint_is_module(blueprint): - continue - loader = blueprint.jinja_loader - if loader is not None: - yield loader, template - - def list_templates(self): - result = set() - loader = self.app.jinja_loader - if loader is not None: - result.update(loader.list_templates()) - - for name, blueprint in self.app.blueprints.iteritems(): - loader = blueprint.jinja_loader - if loader is not None: - for template in loader.list_templates(): - prefix = '' - if blueprint_is_module(blueprint): - prefix = name + '/' - result.add(prefix + template) - - return list(result) - - -def _render(template, context, app): - """Renders the template and fires the signal""" - rv = template.render(context) - template_rendered.send(app, template=template, context=context) - return rv - - -def render_template(template_name_or_list, **context): - """Renders a template from the template folder with the given - context. - - :param template_name_or_list: the name of the template to be - rendered, or an iterable with template names - the first one existing will be rendered - :param context: the variables that should be available in the - context of the template. - """ - ctx = _request_ctx_stack.top - ctx.app.update_template_context(context) - return _render(ctx.app.jinja_env.get_or_select_template(template_name_or_list), - context, ctx.app) - - -def render_template_string(source, **context): - """Renders a template from the given template source string - with the given context. - - :param template_name: the sourcecode of the template to be - rendered - :param context: the variables that should be available in the - context of the template. - """ - ctx = _request_ctx_stack.top - ctx.app.update_template_context(context) - return _render(ctx.app.jinja_env.from_string(source), - context, ctx.app) diff --git a/libs/flask/testing.py b/libs/flask/testing.py deleted file mode 100644 index 782b40f688..0000000000 --- a/libs/flask/testing.py +++ /dev/null @@ -1,118 +0,0 @@ -# -*- coding: utf-8 -*- -""" - flask.testing - ~~~~~~~~~~~~~ - - Implements test support helpers. This module is lazily imported - and usually not used in production environments. - - :copyright: (c) 2011 by Armin Ronacher. - :license: BSD, see LICENSE for more details. -""" - -from __future__ import with_statement - -from contextlib import contextmanager -from werkzeug.test import Client, EnvironBuilder -from flask import _request_ctx_stack - - -def make_test_environ_builder(app, path='/', base_url=None, *args, **kwargs): - """Creates a new test builder with some application defaults thrown in.""" - http_host = app.config.get('SERVER_NAME') - app_root = app.config.get('APPLICATION_ROOT') - if base_url is None: - base_url = 'http://%s/' % (http_host or 'localhost') - if app_root: - base_url += app_root.lstrip('/') - return EnvironBuilder(path, base_url, *args, **kwargs) - - -class FlaskClient(Client): - """Works like a regular Werkzeug test client but has some knowledge about - how Flask works to defer the cleanup of the request context stack to the - end of a with body when used in a with statement. For general information - about how to use this class refer to :class:`werkzeug.test.Client`. - - Basic usage is outlined in the :ref:`testing` chapter. - """ - - preserve_context = False - - @contextmanager - def session_transaction(self, *args, **kwargs): - """When used in combination with a with statement this opens a - session transaction. This can be used to modify the session that - the test client uses. Once the with block is left the session is - stored back. - - with client.session_transaction() as session: - session['value'] = 42 - - Internally this is implemented by going through a temporary test - request context and since session handling could depend on - request variables this function accepts the same arguments as - :meth:`~flask.Flask.test_request_context` which are directly - passed through. - """ - if self.cookie_jar is None: - raise RuntimeError('Session transactions only make sense ' - 'with cookies enabled.') - app = self.application - environ_overrides = kwargs.setdefault('environ_overrides', {}) - self.cookie_jar.inject_wsgi(environ_overrides) - outer_reqctx = _request_ctx_stack.top - with app.test_request_context(*args, **kwargs) as c: - sess = app.open_session(c.request) - if sess is None: - raise RuntimeError('Session backend did not open a session. ' - 'Check the configuration') - - # Since we have to open a new request context for the session - # handling we want to make sure that we hide out own context - # from the caller. By pushing the original request context - # (or None) on top of this and popping it we get exactly that - # behavior. It's important to not use the push and pop - # methods of the actual request context object since that would - # mean that cleanup handlers are called - _request_ctx_stack.push(outer_reqctx) - try: - yield sess - finally: - _request_ctx_stack.pop() - - resp = app.response_class() - if not app.session_interface.is_null_session(sess): - app.save_session(sess, resp) - headers = resp.get_wsgi_headers(c.request.environ) - self.cookie_jar.extract_wsgi(c.request.environ, headers) - - def open(self, *args, **kwargs): - kwargs.setdefault('environ_overrides', {}) \ - ['flask._preserve_context'] = self.preserve_context - - as_tuple = kwargs.pop('as_tuple', False) - buffered = kwargs.pop('buffered', False) - follow_redirects = kwargs.pop('follow_redirects', False) - builder = make_test_environ_builder(self.application, *args, **kwargs) - - return Client.open(self, builder, - as_tuple=as_tuple, - buffered=buffered, - follow_redirects=follow_redirects) - - def __enter__(self): - if self.preserve_context: - raise RuntimeError('Cannot nest client invocations') - self.preserve_context = True - return self - - def __exit__(self, exc_type, exc_value, tb): - self.preserve_context = False - - # on exit we want to clean up earlier. Normally the request context - # stays preserved until the next request in the same thread comes - # in. See RequestGlobals.push() for the general behavior. - top = _request_ctx_stack.top - if top is not None and top.preserved: - top.pop() diff --git a/libs/flask/views.py b/libs/flask/views.py deleted file mode 100644 index 5192c1c100..0000000000 --- a/libs/flask/views.py +++ /dev/null @@ -1,150 +0,0 @@ -# -*- coding: utf-8 -*- -""" - flask.views - ~~~~~~~~~~~ - - This module provides class-based views inspired by the ones in Django. - - :copyright: (c) 2011 by Armin Ronacher. - :license: BSD, see LICENSE for more details. -""" -from .globals import request - - -http_method_funcs = frozenset(['get', 'post', 'head', 'options', - 'delete', 'put', 'trace', 'patch']) - - -class View(object): - """Alternative way to use view functions. A subclass has to implement - :meth:`dispatch_request` which is called with the view arguments from - the URL routing system. If :attr:`methods` is provided the methods - do not have to be passed to the :meth:`~flask.Flask.add_url_rule` - method explicitly:: - - class MyView(View): - methods = ['GET'] - - def dispatch_request(self, name): - return 'Hello %s!' % name - - app.add_url_rule('/hello/', view_func=MyView.as_view('myview')) - - When you want to decorate a pluggable view you will have to either do that - when the view function is created (by wrapping the return value of - :meth:`as_view`) or you can use the :attr:`decorators` attribute:: - - class SecretView(View): - methods = ['GET'] - decorators = [superuser_required] - - def dispatch_request(self): - ... - - The decorators stored in the decorators list are applied one after another - when the view function is created. Note that you can *not* use the class - based decorators since those would decorate the view class and not the - generated view function! - """ - - #: A for which methods this pluggable view can handle. - methods = None - - #: The canonical way to decorate class-based views is to decorate the - #: return value of as_view(). However since this moves parts of the - #: logic from the class declaration to the place where it's hooked - #: into the routing system. - #: - #: You can place one or more decorators in this list and whenever the - #: view function is created the result is automatically decorated. - #: - #: .. versionadded:: 0.8 - decorators = [] - - def dispatch_request(self): - """Subclasses have to override this method to implement the - actual view function code. This method is called with all - the arguments from the URL rule. - """ - raise NotImplementedError() - - @classmethod - def as_view(cls, name, *class_args, **class_kwargs): - """Converts the class into an actual view function that can be used - with the routing system. Internally this generates a function on the - fly which will instantiate the :class:`View` on each request and call - the :meth:`dispatch_request` method on it. - - The arguments passed to :meth:`as_view` are forwarded to the - constructor of the class. - """ - def view(*args, **kwargs): - self = view.view_class(*class_args, **class_kwargs) - return self.dispatch_request(*args, **kwargs) - - if cls.decorators: - view.__name__ = name - view.__module__ = cls.__module__ - for decorator in cls.decorators: - view = decorator(view) - - # we attach the view class to the view function for two reasons: - # first of all it allows us to easily figure out what class-based - # view this thing came from, secondly it's also used for instantiating - # the view class so you can actually replace it with something else - # for testing purposes and debugging. - view.view_class = cls - view.__name__ = name - view.__doc__ = cls.__doc__ - view.__module__ = cls.__module__ - view.methods = cls.methods - return view - - -class MethodViewType(type): - - def __new__(cls, name, bases, d): - rv = type.__new__(cls, name, bases, d) - if 'methods' not in d: - methods = set(rv.methods or []) - for key in d: - if key in http_method_funcs: - methods.add(key.upper()) - # if we have no method at all in there we don't want to - # add a method list. (This is for instance the case for - # the baseclass or another subclass of a base method view - # that does not introduce new methods). - if methods: - rv.methods = sorted(methods) - return rv - - -class MethodView(View): - """Like a regular class-based view but that dispatches requests to - particular methods. For instance if you implement a method called - :meth:`get` it means you will response to ``'GET'`` requests and - the :meth:`dispatch_request` implementation will automatically - forward your request to that. Also :attr:`options` is set for you - automatically:: - - class CounterAPI(MethodView): - - def get(self): - return session.get('counter', 0) - - def post(self): - session['counter'] = session.get('counter', 0) + 1 - return 'OK' - - app.add_url_rule('/counter', view_func=CounterAPI.as_view('counter')) - """ - __metaclass__ = MethodViewType - - def dispatch_request(self, *args, **kwargs): - meth = getattr(self, request.method.lower(), None) - # if the request method is HEAD and we don't have a handler for it - # retry with GET - if meth is None and request.method == 'HEAD': - meth = getattr(self, 'get', None) - assert meth is not None, 'Unimplemented method %r' % request.method - return meth(*args, **kwargs) diff --git a/libs/flask/wrappers.py b/libs/flask/wrappers.py deleted file mode 100644 index 3ee718ffb8..0000000000 --- a/libs/flask/wrappers.py +++ /dev/null @@ -1,148 +0,0 @@ -# -*- coding: utf-8 -*- -""" - flask.wrappers - ~~~~~~~~~~~~~~ - - Implements the WSGI wrappers (request and response). - - :copyright: (c) 2011 by Armin Ronacher. - :license: BSD, see LICENSE for more details. -""" - -from werkzeug.wrappers import Request as RequestBase, Response as ResponseBase -from werkzeug.utils import cached_property - -from .exceptions import JSONBadRequest -from .debughelpers import attach_enctype_error_multidict -from .helpers import json, _assert_have_json -from .globals import _request_ctx_stack - - -class Request(RequestBase): - """The request object used by default in Flask. Remembers the - matched endpoint and view arguments. - - It is what ends up as :class:`~flask.request`. If you want to replace - the request object used you can subclass this and set - :attr:`~flask.Flask.request_class` to your subclass. - - The request object is a :class:`~werkzeug.wrappers.Request` subclass and - provides all of the attributes Werkzeug defines plus a few Flask - specific ones. - """ - - #: the internal URL rule that matched the request. This can be - #: useful to inspect which methods are allowed for the URL from - #: a before/after handler (``request.url_rule.methods``) etc. - #: - #: .. versionadded:: 0.6 - url_rule = None - - #: a dict of view arguments that matched the request. If an exception - #: happened when matching, this will be `None`. - view_args = None - - #: if matching the URL failed, this is the exception that will be - #: raised / was raised as part of the request handling. This is - #: usually a :exc:`~werkzeug.exceptions.NotFound` exception or - #: something similar. - routing_exception = None - - # switched by the request context until 1.0 to opt in deprecated - # module functionality - _is_old_module = False - - @property - def max_content_length(self): - """Read-only view of the `MAX_CONTENT_LENGTH` config key.""" - ctx = _request_ctx_stack.top - if ctx is not None: - return ctx.app.config['MAX_CONTENT_LENGTH'] - - @property - def endpoint(self): - """The endpoint that matched the request. This in combination with - :attr:`view_args` can be used to reconstruct the same or a - modified URL. If an exception happened when matching, this will - be `None`. - """ - if self.url_rule is not None: - return self.url_rule.endpoint - - @property - def module(self): - """The name of the current module if the request was dispatched - to an actual module. This is deprecated functionality, use blueprints - instead. - """ - from warnings import warn - warn(DeprecationWarning('modules were deprecated in favor of ' - 'blueprints. Use request.blueprint ' - 'instead.'), stacklevel=2) - if self._is_old_module: - return self.blueprint - - @property - def blueprint(self): - """The name of the current blueprint""" - if self.url_rule and '.' in self.url_rule.endpoint: - return self.url_rule.endpoint.rsplit('.', 1)[0] - - @cached_property - def json(self): - """If the mimetype is `application/json` this will contain the - parsed JSON data. Otherwise this will be `None`. - - This requires Python 2.6 or an installed version of simplejson. - """ - if __debug__: - _assert_have_json() - if self.mimetype == 'application/json': - request_charset = self.mimetype_params.get('charset') - try: - if request_charset is not None: - return json.loads(self.data, encoding=request_charset) - return json.loads(self.data) - except ValueError, e: - return self.on_json_loading_failed(e) - - def on_json_loading_failed(self, e): - """Called if decoding of the JSON data failed. The return value of - this method is used by :attr:`json` when an error ocurred. The default - implementation raises a :class:`JSONBadRequest`, which is a subclass of - :class:`~werkzeug.exceptions.BadRequest` which sets the - ``Content-Type`` to ``application/json`` and provides a JSON-formatted - error description:: - - {"description": "The browser (or proxy) sent a request that \ - this server could not understand."} - - .. versionchanged:: 0.9 - Return a :class:`JSONBadRequest` instead of a - :class:`~werkzeug.exceptions.BadRequest` by default. - - .. versionadded:: 0.8 - """ - raise JSONBadRequest() - - def _load_form_data(self): - RequestBase._load_form_data(self) - - # in debug mode we're replacing the files multidict with an ad-hoc - # subclass that raises a different error for key errors. - ctx = _request_ctx_stack.top - if ctx is not None and ctx.app.debug and \ - self.mimetype != 'multipart/form-data' and not self.files: - attach_enctype_error_multidict(self) - - -class Response(ResponseBase): - """The response object that is used by default in Flask. Works like the - response object from Werkzeug but is set to have an HTML mimetype by - default. Quite often you don't have to create this object yourself because - :meth:`~flask.Flask.make_response` will take care of that for you. - - If you want to replace the response object used you can subclass this and - set :attr:`~flask.Flask.response_class` to your subclass. - """ - default_mimetype = 'text/html' diff --git a/libs/gntp/__init__.py b/libs/gntp/__init__.py index eabbfa47d3..e69de29bb2 100755 --- a/libs/gntp/__init__.py +++ b/libs/gntp/__init__.py @@ -1,509 +0,0 @@ -import re -import hashlib -import time -import StringIO - -__version__ = '0.8' - -#GNTP/ [:][ :.] -GNTP_INFO_LINE = re.compile( - 'GNTP/(?P\d+\.\d+) (?PREGISTER|NOTIFY|SUBSCRIBE|\-OK|\-ERROR)' + - ' (?P[A-Z0-9]+(:(?P[A-F0-9]+))?) ?' + - '((?P[A-Z0-9]+):(?P[A-F0-9]+).(?P[A-F0-9]+))?\r\n', - re.IGNORECASE -) - -GNTP_INFO_LINE_SHORT = re.compile( - 'GNTP/(?P\d+\.\d+) (?PREGISTER|NOTIFY|SUBSCRIBE|\-OK|\-ERROR)', - re.IGNORECASE -) - -GNTP_HEADER = re.compile('([\w-]+):(.+)') - -GNTP_EOL = '\r\n' - - -class BaseError(Exception): - def gntp_error(self): - error = GNTPError(self.errorcode, self.errordesc) - return error.encode() - - -class ParseError(BaseError): - errorcode = 500 - errordesc = 'Error parsing the message' - - -class AuthError(BaseError): - errorcode = 400 - errordesc = 'Error with authorization' - - -class UnsupportedError(BaseError): - errorcode = 500 - errordesc = 'Currently unsupported by gntp.py' - - -class _GNTPBuffer(StringIO.StringIO): - """GNTP Buffer class""" - def writefmt(self, message = "", *args): - """Shortcut function for writing GNTP Headers""" - self.write((message % args).encode('utf8', 'replace')) - self.write(GNTP_EOL) - - -class _GNTPBase(object): - """Base initilization - - :param string messagetype: GNTP Message type - :param string version: GNTP Protocol version - :param string encription: Encryption protocol - """ - def __init__(self, messagetype = None, version = '1.0', encryption = None): - self.info = { - 'version': version, - 'messagetype': messagetype, - 'encryptionAlgorithmID': encryption - } - self.headers = {} - self.resources = {} - - def __str__(self): - return self.encode() - - def _parse_info(self, data): - """Parse the first line of a GNTP message to get security and other info values - - :param string data: GNTP Message - :return dict: Parsed GNTP Info line - """ - - match = GNTP_INFO_LINE.match(data) - - if not match: - raise ParseError('ERROR_PARSING_INFO_LINE') - - info = match.groupdict() - if info['encryptionAlgorithmID'] == 'NONE': - info['encryptionAlgorithmID'] = None - - return info - - def set_password(self, password, encryptAlgo = 'MD5'): - """Set a password for a GNTP Message - - :param string password: Null to clear password - :param string encryptAlgo: Supports MD5, SHA1, SHA256, SHA512 - """ - hash = { - 'MD5': hashlib.md5, - 'SHA1': hashlib.sha1, - 'SHA256': hashlib.sha256, - 'SHA512': hashlib.sha512, - } - - self.password = password - self.encryptAlgo = encryptAlgo.upper() - if not password: - self.info['encryptionAlgorithmID'] = None - self.info['keyHashAlgorithm'] = None - return - if not self.encryptAlgo in hash.keys(): - raise UnsupportedError('INVALID HASH "%s"' % self.encryptAlgo) - - hashfunction = hash.get(self.encryptAlgo) - - password = password.encode('utf8') - seed = time.ctime() - salt = hashfunction(seed).hexdigest() - saltHash = hashfunction(seed).digest() - keyBasis = password + saltHash - key = hashfunction(keyBasis).digest() - keyHash = hashfunction(key).hexdigest() - - self.info['keyHashAlgorithmID'] = self.encryptAlgo - self.info['keyHash'] = keyHash.upper() - self.info['salt'] = salt.upper() - - def _decode_hex(self, value): - """Helper function to decode hex string to `proper` hex string - - :param string value: Human readable hex string - :return string: Hex string - """ - result = '' - for i in range(0, len(value), 2): - tmp = int(value[i:i + 2], 16) - result += chr(tmp) - return result - - def _decode_binary(self, rawIdentifier, identifier): - rawIdentifier += '\r\n\r\n' - dataLength = int(identifier['Length']) - pointerStart = self.raw.find(rawIdentifier) + len(rawIdentifier) - pointerEnd = pointerStart + dataLength - data = self.raw[pointerStart:pointerEnd] - if not len(data) == dataLength: - raise ParseError('INVALID_DATA_LENGTH Expected: %s Recieved %s' % (dataLength, len(data))) - return data - - def _validate_password(self, password): - """Validate GNTP Message against stored password""" - self.password = password - if password == None: - raise AuthError('Missing password') - keyHash = self.info.get('keyHash', None) - if keyHash is None and self.password is None: - return True - if keyHash is None: - raise AuthError('Invalid keyHash') - if self.password is None: - raise AuthError('Missing password') - - password = self.password.encode('utf8') - saltHash = self._decode_hex(self.info['salt']) - - keyBasis = password + saltHash - key = hashlib.md5(keyBasis).digest() - keyHash = hashlib.md5(key).hexdigest() - - if not keyHash.upper() == self.info['keyHash'].upper(): - raise AuthError('Invalid Hash') - return True - - def validate(self): - """Verify required headers""" - for header in self._requiredHeaders: - if not self.headers.get(header, False): - raise ParseError('Missing Notification Header: ' + header) - - def _format_info(self): - """Generate info line for GNTP Message - - :return string: - """ - info = u'GNTP/%s %s' % ( - self.info.get('version'), - self.info.get('messagetype'), - ) - if self.info.get('encryptionAlgorithmID', None): - info += ' %s:%s' % ( - self.info.get('encryptionAlgorithmID'), - self.info.get('ivValue'), - ) - else: - info += ' NONE' - - if self.info.get('keyHashAlgorithmID', None): - info += ' %s:%s.%s' % ( - self.info.get('keyHashAlgorithmID'), - self.info.get('keyHash'), - self.info.get('salt') - ) - - return info - - def _parse_dict(self, data): - """Helper function to parse blocks of GNTP headers into a dictionary - - :param string data: - :return dict: - """ - dict = {} - for line in data.split('\r\n'): - match = GNTP_HEADER.match(line) - if not match: - continue - - key = unicode(match.group(1).strip(), 'utf8', 'replace') - val = unicode(match.group(2).strip(), 'utf8', 'replace') - dict[key] = val - return dict - - def add_header(self, key, value): - if isinstance(value, unicode): - self.headers[key] = value - else: - self.headers[key] = unicode('%s' % value, 'utf8', 'replace') - - def add_resource(self, data): - """Add binary resource - - :param string data: Binary Data - """ - identifier = hashlib.md5(data).hexdigest() - self.resources[identifier] = data - return 'x-growl-resource://%s' % identifier - - def decode(self, data, password = None): - """Decode GNTP Message - - :param string data: - """ - self.password = password - self.raw = data - parts = self.raw.split('\r\n\r\n') - self.info = self._parse_info(data) - self.headers = self._parse_dict(parts[0]) - - def encode(self): - """Encode a generic GNTP Message - - :return string: GNTP Message ready to be sent - """ - - buffer = _GNTPBuffer() - - buffer.writefmt(self._format_info()) - - #Headers - for k, v in self.headers.iteritems(): - buffer.writefmt('%s: %s', k, v) - buffer.writefmt() - - #Resources - for resource, data in self.resources.iteritems(): - buffer.writefmt('Identifier: %s', resource) - buffer.writefmt('Length: %d', len(data)) - buffer.writefmt() - buffer.write(data) - buffer.writefmt() - buffer.writefmt() - - return buffer.getvalue() - - -class GNTPRegister(_GNTPBase): - """Represents a GNTP Registration Command - - :param string data: (Optional) See decode() - :param string password: (Optional) Password to use while encoding/decoding messages - """ - _requiredHeaders = [ - 'Application-Name', - 'Notifications-Count' - ] - _requiredNotificationHeaders = ['Notification-Name'] - - def __init__(self, data = None, password = None): - _GNTPBase.__init__(self, 'REGISTER') - self.notifications = [] - - if data: - self.decode(data, password) - else: - self.set_password(password) - self.add_header('Application-Name', 'pygntp') - self.add_header('Notifications-Count', 0) - - def validate(self): - '''Validate required headers and validate notification headers''' - for header in self._requiredHeaders: - if not self.headers.get(header, False): - raise ParseError('Missing Registration Header: ' + header) - for notice in self.notifications: - for header in self._requiredNotificationHeaders: - if not notice.get(header, False): - raise ParseError('Missing Notification Header: ' + header) - - def decode(self, data, password): - """Decode existing GNTP Registration message - - :param string data: Message to decode - """ - self.raw = data - parts = self.raw.split('\r\n\r\n') - self.info = self._parse_info(data) - self._validate_password(password) - self.headers = self._parse_dict(parts[0]) - - for i, part in enumerate(parts): - if i == 0: - continue # Skip Header - if part.strip() == '': - continue - notice = self._parse_dict(part) - if notice.get('Notification-Name', False): - self.notifications.append(notice) - elif notice.get('Identifier', False): - notice['Data'] = self._decode_binary(part, notice) - #open('register.png','wblol').write(notice['Data']) - self.resources[notice.get('Identifier')] = notice - - def add_notification(self, name, enabled = True): - """Add new Notification to Registration message - - :param string name: Notification Name - :param boolean enabled: Enable this notification by default - """ - notice = {} - notice['Notification-Name'] = u'%s' % name - notice['Notification-Enabled'] = u'%s' % enabled - - self.notifications.append(notice) - self.add_header('Notifications-Count', len(self.notifications)) - - def encode(self): - """Encode a GNTP Registration Message - - :return string: Encoded GNTP Registration message - """ - - buffer = _GNTPBuffer() - - buffer.writefmt(self._format_info()) - - #Headers - for k, v in self.headers.iteritems(): - buffer.writefmt('%s: %s', k, v) - buffer.writefmt() - - #Notifications - if len(self.notifications) > 0: - for notice in self.notifications: - for k, v in notice.iteritems(): - buffer.writefmt('%s: %s', k, v) - buffer.writefmt() - - #Resources - for resource, data in self.resources.iteritems(): - buffer.writefmt('Identifier: %s', resource) - buffer.writefmt('Length: %d', len(data)) - buffer.writefmt() - buffer.write(data) - buffer.writefmt() - buffer.writefmt() - - return buffer.getvalue() - - -class GNTPNotice(_GNTPBase): - """Represents a GNTP Notification Command - - :param string data: (Optional) See decode() - :param string app: (Optional) Set Application-Name - :param string name: (Optional) Set Notification-Name - :param string title: (Optional) Set Notification Title - :param string password: (Optional) Password to use while encoding/decoding messages - """ - _requiredHeaders = [ - 'Application-Name', - 'Notification-Name', - 'Notification-Title' - ] - - def __init__(self, data = None, app = None, name = None, title = None, password = None): - _GNTPBase.__init__(self, 'NOTIFY') - - if data: - self.decode(data, password) - else: - self.set_password(password) - if app: - self.add_header('Application-Name', app) - if name: - self.add_header('Notification-Name', name) - if title: - self.add_header('Notification-Title', title) - - def decode(self, data, password): - """Decode existing GNTP Notification message - - :param string data: Message to decode. - """ - self.raw = data - parts = self.raw.split('\r\n\r\n') - self.info = self._parse_info(data) - self._validate_password(password) - self.headers = self._parse_dict(parts[0]) - - for i, part in enumerate(parts): - if i == 0: - continue # Skip Header - if part.strip() == '': - continue - notice = self._parse_dict(part) - if notice.get('Identifier', False): - notice['Data'] = self._decode_binary(part, notice) - #open('notice.png','wblol').write(notice['Data']) - self.resources[notice.get('Identifier')] = notice - - -class GNTPSubscribe(_GNTPBase): - """Represents a GNTP Subscribe Command - - :param string data: (Optional) See decode() - :param string password: (Optional) Password to use while encoding/decoding messages - """ - _requiredHeaders = [ - 'Subscriber-ID', - 'Subscriber-Name', - ] - - def __init__(self, data = None, password = None): - _GNTPBase.__init__(self, 'SUBSCRIBE') - if data: - self.decode(data, password) - else: - self.set_password(password) - - -class GNTPOK(_GNTPBase): - """Represents a GNTP OK Response - - :param string data: (Optional) See _GNTPResponse.decode() - :param string action: (Optional) Set type of action the OK Response is for - """ - _requiredHeaders = ['Response-Action'] - - def __init__(self, data = None, action = None): - _GNTPBase.__init__(self, '-OK') - if data: - self.decode(data) - if action: - self.add_header('Response-Action', action) - - -class GNTPError(_GNTPBase): - """Represents a GNTP Error response - - :param string data: (Optional) See _GNTPResponse.decode() - :param string errorcode: (Optional) Error code - :param string errordesc: (Optional) Error Description - """ - _requiredHeaders = ['Error-Code', 'Error-Description'] - - def __init__(self, data = None, errorcode = None, errordesc = None): - _GNTPBase.__init__(self, '-ERROR') - if data: - self.decode(data) - if errorcode: - self.add_header('Error-Code', errorcode) - self.add_header('Error-Description', errordesc) - - def error(self): - return (self.headers.get('Error-Code', None), - self.headers.get('Error-Description', None)) - - -def parse_gntp(data, password = None): - """Attempt to parse a message as a GNTP message - - :param string data: Message to be parsed - :param string password: Optional password to be used to verify the message - """ - match = GNTP_INFO_LINE_SHORT.match(data) - if not match: - raise ParseError('INVALID_GNTP_INFO') - info = match.groupdict() - if info['messagetype'] == 'REGISTER': - return GNTPRegister(data, password = password) - elif info['messagetype'] == 'NOTIFY': - return GNTPNotice(data, password = password) - elif info['messagetype'] == 'SUBSCRIBE': - return GNTPSubscribe(data, password = password) - elif info['messagetype'] == '-OK': - return GNTPOK(data) - elif info['messagetype'] == '-ERROR': - return GNTPError(data) - raise ParseError('INVALID_GNTP_MESSAGE') diff --git a/libs/gntp/cli.py b/libs/gntp/cli.py new file mode 100644 index 0000000000..bc083062b0 --- /dev/null +++ b/libs/gntp/cli.py @@ -0,0 +1,141 @@ +# Copyright: 2013 Paul Traylor +# These sources are released under the terms of the MIT license: see LICENSE + +import logging +import os +import sys +from optparse import OptionParser, OptionGroup + +from gntp.notifier import GrowlNotifier +from gntp.shim import RawConfigParser +from gntp.version import __version__ + +DEFAULT_CONFIG = os.path.expanduser('~/.gntp') + +config = RawConfigParser({ + 'hostname': 'localhost', + 'password': None, + 'port': 23053, +}) +config.read([DEFAULT_CONFIG]) +if not config.has_section('gntp'): + config.add_section('gntp') + + +class ClientParser(OptionParser): + def __init__(self): + OptionParser.__init__(self, version="%%prog %s" % __version__) + + group = OptionGroup(self, "Network Options") + group.add_option("-H", "--host", + dest="host", default=config.get('gntp', 'hostname'), + help="Specify a hostname to which to send a remote notification. [%default]") + group.add_option("--port", + dest="port", default=config.getint('gntp', 'port'), type="int", + help="port to listen on [%default]") + group.add_option("-P", "--password", + dest='password', default=config.get('gntp', 'password'), + help="Network password") + self.add_option_group(group) + + group = OptionGroup(self, "Notification Options") + group.add_option("-n", "--name", + dest="app", default='Python GNTP Test Client', + help="Set the name of the application [%default]") + group.add_option("-s", "--sticky", + dest='sticky', default=False, action="store_true", + help="Make the notification sticky [%default]") + group.add_option("--image", + dest="icon", default=None, + help="Icon for notification (URL or /path/to/file)") + group.add_option("-m", "--message", + dest="message", default=None, + help="Sets the message instead of using stdin") + group.add_option("-p", "--priority", + dest="priority", default=0, type="int", + help="-2 to 2 [%default]") + group.add_option("-d", "--identifier", + dest="identifier", + help="Identifier for coalescing") + group.add_option("-t", "--title", + dest="title", default=None, + help="Set the title of the notification [%default]") + group.add_option("-N", "--notification", + dest="name", default='Notification', + help="Set the notification name [%default]") + group.add_option("--callback", + dest="callback", + help="URL callback") + self.add_option_group(group) + + # Extra Options + self.add_option('-v', '--verbose', + dest='verbose', default=0, action='count', + help="Verbosity levels") + + def parse_args(self, args=None, values=None): + values, args = OptionParser.parse_args(self, args, values) + + if values.message is None: + print('Enter a message followed by Ctrl-D') + try: + message = sys.stdin.read() + except KeyboardInterrupt: + exit() + else: + message = values.message + + if values.title is None: + values.title = ' '.join(args) + + # If we still have an empty title, use the + # first bit of the message as the title + if values.title == '': + values.title = message[:20] + + values.verbose = logging.WARNING - values.verbose * 10 + + return values, message + + +def main(): + (options, message) = ClientParser().parse_args() + logging.basicConfig(level=options.verbose) + if not os.path.exists(DEFAULT_CONFIG): + logging.info('No config read found at %s', DEFAULT_CONFIG) + + growl = GrowlNotifier( + applicationName=options.app, + notifications=[options.name], + defaultNotifications=[options.name], + hostname=options.host, + password=options.password, + port=options.port, + ) + result = growl.register() + if result is not True: + exit(result) + + # This would likely be better placed within the growl notifier + # class but until I make _checkIcon smarter this is "easier" + if options.icon is not None and not options.icon.startswith('http'): + logging.info('Loading image %s', options.icon) + f = open(options.icon) + options.icon = f.read() + f.close() + + result = growl.notify( + noteType=options.name, + title=options.title, + description=message, + icon=options.icon, + sticky=options.sticky, + priority=options.priority, + callback=options.callback, + identifier=options.identifier, + ) + if result is not True: + exit(result) + +if __name__ == "__main__": + main() diff --git a/libs/gntp/config.py b/libs/gntp/config.py new file mode 100644 index 0000000000..7536bd14c2 --- /dev/null +++ b/libs/gntp/config.py @@ -0,0 +1,77 @@ +# Copyright: 2013 Paul Traylor +# These sources are released under the terms of the MIT license: see LICENSE + +""" +The gntp.config module is provided as an extended GrowlNotifier object that takes +advantage of the ConfigParser module to allow us to setup some default values +(such as hostname, password, and port) in a more global way to be shared among +programs using gntp +""" +import logging +import os + +import gntp.notifier +import gntp.shim + +__all__ = [ + 'mini', + 'GrowlNotifier' +] + +logger = logging.getLogger(__name__) + + +class GrowlNotifier(gntp.notifier.GrowlNotifier): + """ + ConfigParser enhanced GrowlNotifier object + + For right now, we are only interested in letting users overide certain + values from ~/.gntp + + :: + + [gntp] + hostname = ? + password = ? + port = ? + """ + def __init__(self, *args, **kwargs): + config = gntp.shim.RawConfigParser({ + 'hostname': kwargs.get('hostname', 'localhost'), + 'password': kwargs.get('password'), + 'port': kwargs.get('port', 23053), + }) + + config.read([os.path.expanduser('~/.gntp')]) + + # If the file does not exist, then there will be no gntp section defined + # and the config.get() lines below will get confused. Since we are not + # saving the config, it should be safe to just add it here so the + # code below doesn't complain + if not config.has_section('gntp'): + logger.info('Error reading ~/.gntp config file') + config.add_section('gntp') + + kwargs['password'] = config.get('gntp', 'password') + kwargs['hostname'] = config.get('gntp', 'hostname') + kwargs['port'] = config.getint('gntp', 'port') + + super(GrowlNotifier, self).__init__(*args, **kwargs) + + +def mini(description, **kwargs): + """Single notification function + + Simple notification function in one line. Has only one required parameter + and attempts to use reasonable defaults for everything else + :param string description: Notification message + """ + kwargs['notifierFactory'] = GrowlNotifier + gntp.notifier.mini(description, **kwargs) + + +if __name__ == '__main__': + # If we're running this module directly we're likely running it as a test + # so extra debugging is useful + logging.basicConfig(level=logging.INFO) + mini('Testing mini notification') diff --git a/libs/gntp/core.py b/libs/gntp/core.py new file mode 100644 index 0000000000..ee544d3dde --- /dev/null +++ b/libs/gntp/core.py @@ -0,0 +1,511 @@ +# Copyright: 2013 Paul Traylor +# These sources are released under the terms of the MIT license: see LICENSE + +import hashlib +import re +import time + +import gntp.shim +import gntp.errors as errors + +__all__ = [ + 'GNTPRegister', + 'GNTPNotice', + 'GNTPSubscribe', + 'GNTPOK', + 'GNTPError', + 'parse_gntp', +] + +#GNTP/ [:][ :.] +GNTP_INFO_LINE = re.compile( + 'GNTP/(?P\d+\.\d+) (?PREGISTER|NOTIFY|SUBSCRIBE|\-OK|\-ERROR)' + + ' (?P[A-Z0-9]+(:(?P[A-F0-9]+))?) ?' + + '((?P[A-Z0-9]+):(?P[A-F0-9]+).(?P[A-F0-9]+))?\r\n', + re.IGNORECASE +) + +GNTP_INFO_LINE_SHORT = re.compile( + 'GNTP/(?P\d+\.\d+) (?PREGISTER|NOTIFY|SUBSCRIBE|\-OK|\-ERROR)', + re.IGNORECASE +) + +GNTP_HEADER = re.compile('([\w-]+):(.+)') + +GNTP_EOL = gntp.shim.b('\r\n') +GNTP_SEP = gntp.shim.b(': ') + + +class _GNTPBuffer(gntp.shim.StringIO): + """GNTP Buffer class""" + def writeln(self, value=None): + if value: + self.write(gntp.shim.b(value)) + self.write(GNTP_EOL) + + def writeheader(self, key, value): + if not isinstance(value, str): + value = str(value) + self.write(gntp.shim.b(key)) + self.write(GNTP_SEP) + self.write(gntp.shim.b(value)) + self.write(GNTP_EOL) + + +class _GNTPBase(object): + """Base initilization + + :param string messagetype: GNTP Message type + :param string version: GNTP Protocol version + :param string encription: Encryption protocol + """ + def __init__(self, messagetype=None, version='1.0', encryption=None): + self.info = { + 'version': version, + 'messagetype': messagetype, + 'encryptionAlgorithmID': encryption + } + self.hash_algo = { + 'MD5': hashlib.md5, + 'SHA1': hashlib.sha1, + 'SHA256': hashlib.sha256, + 'SHA512': hashlib.sha512, + } + self.headers = {} + self.resources = {} + + def __str__(self): + return self.encode() + + def _parse_info(self, data): + """Parse the first line of a GNTP message to get security and other info values + + :param string data: GNTP Message + :return dict: Parsed GNTP Info line + """ + + match = GNTP_INFO_LINE.match(data) + + if not match: + raise errors.ParseError('ERROR_PARSING_INFO_LINE') + + info = match.groupdict() + if info['encryptionAlgorithmID'] == 'NONE': + info['encryptionAlgorithmID'] = None + + return info + + def set_password(self, password, encryptAlgo='MD5'): + """Set a password for a GNTP Message + + :param string password: Null to clear password + :param string encryptAlgo: Supports MD5, SHA1, SHA256, SHA512 + """ + if not password: + self.info['encryptionAlgorithmID'] = None + self.info['keyHashAlgorithm'] = None + return + + self.password = gntp.shim.b(password) + self.encryptAlgo = encryptAlgo.upper() + + if not self.encryptAlgo in self.hash_algo: + raise errors.UnsupportedError('INVALID HASH "%s"' % self.encryptAlgo) + + hashfunction = self.hash_algo.get(self.encryptAlgo) + + password = password.encode('utf8') + seed = time.ctime().encode('utf8') + salt = hashfunction(seed).hexdigest() + saltHash = hashfunction(seed).digest() + keyBasis = password + saltHash + key = hashfunction(keyBasis).digest() + keyHash = hashfunction(key).hexdigest() + + self.info['keyHashAlgorithmID'] = self.encryptAlgo + self.info['keyHash'] = keyHash.upper() + self.info['salt'] = salt.upper() + + def _decode_hex(self, value): + """Helper function to decode hex string to `proper` hex string + + :param string value: Human readable hex string + :return string: Hex string + """ + result = '' + for i in range(0, len(value), 2): + tmp = int(value[i:i + 2], 16) + result += chr(tmp) + return result + + def _decode_binary(self, rawIdentifier, identifier): + rawIdentifier += '\r\n\r\n' + dataLength = int(identifier['Length']) + pointerStart = self.raw.find(rawIdentifier) + len(rawIdentifier) + pointerEnd = pointerStart + dataLength + data = self.raw[pointerStart:pointerEnd] + if not len(data) == dataLength: + raise errors.ParseError('INVALID_DATA_LENGTH Expected: %s Recieved %s' % (dataLength, len(data))) + return data + + def _validate_password(self, password): + """Validate GNTP Message against stored password""" + self.password = password + if password is None: + raise errors.AuthError('Missing password') + keyHash = self.info.get('keyHash', None) + if keyHash is None and self.password is None: + return True + if keyHash is None: + raise errors.AuthError('Invalid keyHash') + if self.password is None: + raise errors.AuthError('Missing password') + + keyHashAlgorithmID = self.info.get('keyHashAlgorithmID','MD5') + + password = self.password.encode('utf8') + saltHash = self._decode_hex(self.info['salt']) + + keyBasis = password + saltHash + self.key = self.hash_algo[keyHashAlgorithmID](keyBasis).digest() + keyHash = self.hash_algo[keyHashAlgorithmID](self.key).hexdigest() + + if not keyHash.upper() == self.info['keyHash'].upper(): + raise errors.AuthError('Invalid Hash') + return True + + def validate(self): + """Verify required headers""" + for header in self._requiredHeaders: + if not self.headers.get(header, False): + raise errors.ParseError('Missing Notification Header: ' + header) + + def _format_info(self): + """Generate info line for GNTP Message + + :return string: + """ + info = 'GNTP/%s %s' % ( + self.info.get('version'), + self.info.get('messagetype'), + ) + if self.info.get('encryptionAlgorithmID', None): + info += ' %s:%s' % ( + self.info.get('encryptionAlgorithmID'), + self.info.get('ivValue'), + ) + else: + info += ' NONE' + + if self.info.get('keyHashAlgorithmID', None): + info += ' %s:%s.%s' % ( + self.info.get('keyHashAlgorithmID'), + self.info.get('keyHash'), + self.info.get('salt') + ) + + return info + + def _parse_dict(self, data): + """Helper function to parse blocks of GNTP headers into a dictionary + + :param string data: + :return dict: Dictionary of parsed GNTP Headers + """ + d = {} + for line in data.split('\r\n'): + match = GNTP_HEADER.match(line) + if not match: + continue + + key = match.group(1).strip() + val = match.group(2).strip() + d[key] = val + return d + + def add_header(self, key, value): + self.headers[key] = value + + def add_resource(self, data): + """Add binary resource + + :param string data: Binary Data + """ + data = gntp.shim.b(data) + identifier = hashlib.md5(data).hexdigest() + self.resources[identifier] = data + return 'x-growl-resource://%s' % identifier + + def decode(self, data, password=None): + """Decode GNTP Message + + :param string data: + """ + self.password = password + self.raw = gntp.shim.u(data) + parts = self.raw.split('\r\n\r\n') + self.info = self._parse_info(self.raw) + self.headers = self._parse_dict(parts[0]) + + def encode(self): + """Encode a generic GNTP Message + + :return string: GNTP Message ready to be sent. Returned as a byte string + """ + + buff = _GNTPBuffer() + + buff.writeln(self._format_info()) + + #Headers + for k, v in self.headers.items(): + buff.writeheader(k, v) + buff.writeln() + + #Resources + for resource, data in self.resources.items(): + buff.writeheader('Identifier', resource) + buff.writeheader('Length', len(data)) + buff.writeln() + buff.write(data) + buff.writeln() + buff.writeln() + + return buff.getvalue() + + +class GNTPRegister(_GNTPBase): + """Represents a GNTP Registration Command + + :param string data: (Optional) See decode() + :param string password: (Optional) Password to use while encoding/decoding messages + """ + _requiredHeaders = [ + 'Application-Name', + 'Notifications-Count' + ] + _requiredNotificationHeaders = ['Notification-Name'] + + def __init__(self, data=None, password=None): + _GNTPBase.__init__(self, 'REGISTER') + self.notifications = [] + + if data: + self.decode(data, password) + else: + self.set_password(password) + self.add_header('Application-Name', 'pygntp') + self.add_header('Notifications-Count', 0) + + def validate(self): + '''Validate required headers and validate notification headers''' + for header in self._requiredHeaders: + if not self.headers.get(header, False): + raise errors.ParseError('Missing Registration Header: ' + header) + for notice in self.notifications: + for header in self._requiredNotificationHeaders: + if not notice.get(header, False): + raise errors.ParseError('Missing Notification Header: ' + header) + + def decode(self, data, password): + """Decode existing GNTP Registration message + + :param string data: Message to decode + """ + self.raw = gntp.shim.u(data) + parts = self.raw.split('\r\n\r\n') + self.info = self._parse_info(self.raw) + self._validate_password(password) + self.headers = self._parse_dict(parts[0]) + + for i, part in enumerate(parts): + if i == 0: + continue # Skip Header + if part.strip() == '': + continue + notice = self._parse_dict(part) + if notice.get('Notification-Name', False): + self.notifications.append(notice) + elif notice.get('Identifier', False): + notice['Data'] = self._decode_binary(part, notice) + #open('register.png','wblol').write(notice['Data']) + self.resources[notice.get('Identifier')] = notice + + def add_notification(self, name, enabled=True): + """Add new Notification to Registration message + + :param string name: Notification Name + :param boolean enabled: Enable this notification by default + """ + notice = {} + notice['Notification-Name'] = name + notice['Notification-Enabled'] = enabled + + self.notifications.append(notice) + self.add_header('Notifications-Count', len(self.notifications)) + + def encode(self): + """Encode a GNTP Registration Message + + :return string: Encoded GNTP Registration message. Returned as a byte string + """ + + buff = _GNTPBuffer() + + buff.writeln(self._format_info()) + + #Headers + for k, v in self.headers.items(): + buff.writeheader(k, v) + buff.writeln() + + #Notifications + if len(self.notifications) > 0: + for notice in self.notifications: + for k, v in notice.items(): + buff.writeheader(k, v) + buff.writeln() + + #Resources + for resource, data in self.resources.items(): + buff.writeheader('Identifier', resource) + buff.writeheader('Length', len(data)) + buff.writeln() + buff.write(data) + buff.writeln() + buff.writeln() + + return buff.getvalue() + + +class GNTPNotice(_GNTPBase): + """Represents a GNTP Notification Command + + :param string data: (Optional) See decode() + :param string app: (Optional) Set Application-Name + :param string name: (Optional) Set Notification-Name + :param string title: (Optional) Set Notification Title + :param string password: (Optional) Password to use while encoding/decoding messages + """ + _requiredHeaders = [ + 'Application-Name', + 'Notification-Name', + 'Notification-Title' + ] + + def __init__(self, data=None, app=None, name=None, title=None, password=None): + _GNTPBase.__init__(self, 'NOTIFY') + + if data: + self.decode(data, password) + else: + self.set_password(password) + if app: + self.add_header('Application-Name', app) + if name: + self.add_header('Notification-Name', name) + if title: + self.add_header('Notification-Title', title) + + def decode(self, data, password): + """Decode existing GNTP Notification message + + :param string data: Message to decode. + """ + self.raw = gntp.shim.u(data) + parts = self.raw.split('\r\n\r\n') + self.info = self._parse_info(self.raw) + self._validate_password(password) + self.headers = self._parse_dict(parts[0]) + + for i, part in enumerate(parts): + if i == 0: + continue # Skip Header + if part.strip() == '': + continue + notice = self._parse_dict(part) + if notice.get('Identifier', False): + notice['Data'] = self._decode_binary(part, notice) + #open('notice.png','wblol').write(notice['Data']) + self.resources[notice.get('Identifier')] = notice + + +class GNTPSubscribe(_GNTPBase): + """Represents a GNTP Subscribe Command + + :param string data: (Optional) See decode() + :param string password: (Optional) Password to use while encoding/decoding messages + """ + _requiredHeaders = [ + 'Subscriber-ID', + 'Subscriber-Name', + ] + + def __init__(self, data=None, password=None): + _GNTPBase.__init__(self, 'SUBSCRIBE') + if data: + self.decode(data, password) + else: + self.set_password(password) + + +class GNTPOK(_GNTPBase): + """Represents a GNTP OK Response + + :param string data: (Optional) See _GNTPResponse.decode() + :param string action: (Optional) Set type of action the OK Response is for + """ + _requiredHeaders = ['Response-Action'] + + def __init__(self, data=None, action=None): + _GNTPBase.__init__(self, '-OK') + if data: + self.decode(data) + if action: + self.add_header('Response-Action', action) + + +class GNTPError(_GNTPBase): + """Represents a GNTP Error response + + :param string data: (Optional) See _GNTPResponse.decode() + :param string errorcode: (Optional) Error code + :param string errordesc: (Optional) Error Description + """ + _requiredHeaders = ['Error-Code', 'Error-Description'] + + def __init__(self, data=None, errorcode=None, errordesc=None): + _GNTPBase.__init__(self, '-ERROR') + if data: + self.decode(data) + if errorcode: + self.add_header('Error-Code', errorcode) + self.add_header('Error-Description', errordesc) + + def error(self): + return (self.headers.get('Error-Code', None), + self.headers.get('Error-Description', None)) + + +def parse_gntp(data, password=None): + """Attempt to parse a message as a GNTP message + + :param string data: Message to be parsed + :param string password: Optional password to be used to verify the message + """ + data = gntp.shim.u(data) + match = GNTP_INFO_LINE_SHORT.match(data) + if not match: + raise errors.ParseError('INVALID_GNTP_INFO') + info = match.groupdict() + if info['messagetype'] == 'REGISTER': + return GNTPRegister(data, password=password) + elif info['messagetype'] == 'NOTIFY': + return GNTPNotice(data, password=password) + elif info['messagetype'] == 'SUBSCRIBE': + return GNTPSubscribe(data, password=password) + elif info['messagetype'] == '-OK': + return GNTPOK(data) + elif info['messagetype'] == '-ERROR': + return GNTPError(data) + raise errors.ParseError('INVALID_GNTP_MESSAGE') diff --git a/libs/gntp/errors.py b/libs/gntp/errors.py new file mode 100644 index 0000000000..c006fd680c --- /dev/null +++ b/libs/gntp/errors.py @@ -0,0 +1,25 @@ +# Copyright: 2013 Paul Traylor +# These sources are released under the terms of the MIT license: see LICENSE + +class BaseError(Exception): + pass + + +class ParseError(BaseError): + errorcode = 500 + errordesc = 'Error parsing the message' + + +class AuthError(BaseError): + errorcode = 400 + errordesc = 'Error with authorization' + + +class UnsupportedError(BaseError): + errorcode = 500 + errordesc = 'Currently unsupported by gntp.py' + + +class NetworkError(BaseError): + errorcode = 500 + errordesc = "Error connecting to growl server" diff --git a/libs/gntp/notifier.py b/libs/gntp/notifier.py index 539dae2abc..1719ecdf1f 100755 --- a/libs/gntp/notifier.py +++ b/libs/gntp/notifier.py @@ -1,3 +1,6 @@ +# Copyright: 2013 Paul Traylor +# These sources are released under the terms of the MIT license: see LICENSE + """ The gntp.notifier module is provided as a simple way to send notifications using GNTP @@ -9,10 +12,15 @@ `Original Python bindings `_ """ -import gntp -import socket import logging import platform +import socket +import sys + +from gntp.version import __version__ +import gntp.core +import gntp.errors as errors +import gntp.shim __all__ = [ 'mini', @@ -37,9 +45,9 @@ class GrowlNotifier(object): passwordHash = 'MD5' socketTimeout = 3 - def __init__(self, applicationName = 'Python GNTP', notifications = [], - defaultNotifications = None, applicationIcon = None, hostname = 'localhost', - password = None, port = 23053): + def __init__(self, applicationName='Python GNTP', notifications=[], + defaultNotifications=None, applicationIcon=None, hostname='localhost', + password=None, port=23053): self.applicationName = applicationName self.notifications = list(notifications) @@ -61,7 +69,7 @@ def _checkIcon(self, data): then we return False ''' logger.info('Checking icon') - return data.startswith('http') + return gntp.shim.u(data).startswith('http') def register(self): """Send GNTP Registration @@ -71,7 +79,7 @@ def register(self): sent a registration message at least once """ logger.info('Sending registration to %s:%s', self.hostname, self.port) - register = gntp.GNTPRegister() + register = gntp.core.GNTPRegister() register.add_header('Application-Name', self.applicationName) for notification in self.notifications: enabled = notification in self.defaultNotifications @@ -80,16 +88,16 @@ def register(self): if self._checkIcon(self.applicationIcon): register.add_header('Application-Icon', self.applicationIcon) else: - id = register.add_resource(self.applicationIcon) - register.add_header('Application-Icon', id) + resource = register.add_resource(self.applicationIcon) + register.add_header('Application-Icon', resource) if self.password: register.set_password(self.password, self.passwordHash) self.add_origin_info(register) self.register_hook(register) return self._send('register', register) - def notify(self, noteType, title, description, icon = None, sticky = False, - priority = None, callback = None, identifier = None): + def notify(self, noteType, title, description, icon=None, sticky=False, + priority=None, callback=None, identifier=None, custom={}): """Send a GNTP notifications .. warning:: @@ -102,6 +110,8 @@ def notify(self, noteType, title, description, icon = None, sticky = False, :param boolean sticky: Sticky notification :param integer priority: Message priority level from -2 to 2 :param string callback: URL callback + :param dict custom: Custom attributes. Key names should be prefixed with X- + according to the spec but this is not enforced by this class .. warning:: For now, only URL callbacks are supported. In the future, the @@ -109,7 +119,7 @@ def notify(self, noteType, title, description, icon = None, sticky = False, """ logger.info('Sending notification [%s] to %s:%s', noteType, self.hostname, self.port) assert noteType in self.notifications - notice = gntp.GNTPNotice() + notice = gntp.core.GNTPNotice() notice.add_header('Application-Name', self.applicationName) notice.add_header('Notification-Name', noteType) notice.add_header('Notification-Title', title) @@ -123,8 +133,8 @@ def notify(self, noteType, title, description, icon = None, sticky = False, if self._checkIcon(icon): notice.add_header('Notification-Icon', icon) else: - id = notice.add_resource(icon) - notice.add_header('Notification-Icon', id) + resource = notice.add_resource(icon) + notice.add_header('Notification-Icon', resource) if description: notice.add_header('Notification-Text', description) @@ -133,6 +143,9 @@ def notify(self, noteType, title, description, icon = None, sticky = False, if identifier: notice.add_header('Notification-Coalescing-ID', identifier) + for key in custom: + notice.add_header(key, custom[key]) + self.add_origin_info(notice) self.notify_hook(notice) @@ -140,7 +153,7 @@ def notify(self, noteType, title, description, icon = None, sticky = False, def subscribe(self, id, name, port): """Send a Subscribe request to a remote machine""" - sub = gntp.GNTPSubscribe() + sub = gntp.core.GNTPSubscribe() sub.add_header('Subscriber-ID', id) sub.add_header('Subscriber-Name', name) sub.add_header('Subscriber-Port', port) @@ -156,7 +169,7 @@ def add_origin_info(self, packet): """Add optional Origin headers to message""" packet.add_header('Origin-Machine-Name', platform.node()) packet.add_header('Origin-Software-Name', 'gntp.py') - packet.add_header('Origin-Software-Version', gntp.__version__) + packet.add_header('Origin-Software-Version', __version__) packet.add_header('Origin-Platform-Name', platform.system()) packet.add_header('Origin-Platform-Version', platform.platform()) @@ -179,27 +192,33 @@ def _send(self, messagetype, packet): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.settimeout(self.socketTimeout) - s.connect((self.hostname, self.port)) - s.send(data) - recv_data = s.recv(1024) - while not recv_data.endswith("\r\n\r\n"): - recv_data += s.recv(1024) - response = gntp.parse_gntp(recv_data) + try: + s.connect((self.hostname, self.port)) + s.send(data) + recv_data = s.recv(1024) + while not recv_data.endswith(gntp.shim.b("\r\n\r\n")): + recv_data += s.recv(1024) + except socket.error: + # Python2.5 and Python3 compatibile exception + exc = sys.exc_info()[1] + raise errors.NetworkError(exc) + + response = gntp.core.parse_gntp(recv_data) s.close() logger.debug('From : %s:%s <%s>\n%s', self.hostname, self.port, response.__class__, response) - if type(response) == gntp.GNTPOK: + if type(response) == gntp.core.GNTPOK: return True logger.error('Invalid response: %s', response.error()) return response.error() -def mini(description, applicationName = 'PythonMini', noteType = "Message", - title = "Mini Message", applicationIcon = None, hostname = 'localhost', - password = None, port = 23053, sticky = False, priority = None, - callback = None, notificationIcon = None, identifier = None, - notifierFactory = GrowlNotifier): +def mini(description, applicationName='PythonMini', noteType="Message", + title="Mini Message", applicationIcon=None, hostname='localhost', + password=None, port=23053, sticky=False, priority=None, + callback=None, notificationIcon=None, identifier=None, + notifierFactory=GrowlNotifier): """Single notification function Simple notification function in one line. Has only one required parameter @@ -210,32 +229,37 @@ def mini(description, applicationName = 'PythonMini', noteType = "Message", For now, only URL callbacks are supported. In the future, the callback argument will also support a function """ - growl = notifierFactory( - applicationName = applicationName, - notifications = [noteType], - defaultNotifications = [noteType], - applicationIcon = applicationIcon, - hostname = hostname, - password = password, - port = port, - ) - result = growl.register() - if result is not True: - return result - - return growl.notify( - noteType = noteType, - title = title, - description = description, - icon = notificationIcon, - sticky = sticky, - priority = priority, - callback = callback, - identifier = identifier, - ) + try: + growl = notifierFactory( + applicationName=applicationName, + notifications=[noteType], + defaultNotifications=[noteType], + applicationIcon=applicationIcon, + hostname=hostname, + password=password, + port=port, + ) + result = growl.register() + if result is not True: + return result + + return growl.notify( + noteType=noteType, + title=title, + description=description, + icon=notificationIcon, + sticky=sticky, + priority=priority, + callback=callback, + identifier=identifier, + ) + except Exception: + # We want the "mini" function to be simple and swallow Exceptions + # in order to be less invasive + logger.exception("Growl error") if __name__ == '__main__': # If we're running this module directly we're likely running it as a test # so extra debugging is useful - logging.basicConfig(level = logging.INFO) + logging.basicConfig(level=logging.INFO) mini('Testing mini notification') diff --git a/libs/gntp/shim.py b/libs/gntp/shim.py new file mode 100644 index 0000000000..3a3878288e --- /dev/null +++ b/libs/gntp/shim.py @@ -0,0 +1,45 @@ +# Copyright: 2013 Paul Traylor +# These sources are released under the terms of the MIT license: see LICENSE + +""" +Python2.5 and Python3.3 compatibility shim + +Heavily inspirted by the "six" library. +https://pypi.python.org/pypi/six +""" + +import sys + +PY3 = sys.version_info[0] == 3 + +if PY3: + def b(s): + if isinstance(s, bytes): + return s + return s.encode('utf8', 'replace') + + def u(s): + if isinstance(s, bytes): + return s.decode('utf8', 'replace') + return s + + from io import BytesIO as StringIO + from configparser import RawConfigParser +else: + def b(s): + if isinstance(s, unicode): + return s.encode('utf8', 'replace') + return s + + def u(s): + if isinstance(s, unicode): + return s + if isinstance(s, int): + s = str(s) + return unicode(s, "utf8", "replace") + + from StringIO import StringIO + from ConfigParser import RawConfigParser + +b.__doc__ = "Ensure we have a byte string" +u.__doc__ = "Ensure we have a unicode string" diff --git a/libs/gntp/version.py b/libs/gntp/version.py new file mode 100644 index 0000000000..2166aacaac --- /dev/null +++ b/libs/gntp/version.py @@ -0,0 +1,4 @@ +# Copyright: 2013 Paul Traylor +# These sources are released under the terms of the MIT license: see LICENSE + +__version__ = '1.0.2' diff --git a/libs/guessit/ISO-3166-1_utf8.txt b/libs/guessit/ISO-3166-1_utf8.txt old mode 100644 new mode 100755 diff --git a/libs/guessit/ISO-639-2_utf-8.txt b/libs/guessit/ISO-639-2_utf-8.txt old mode 100644 new mode 100755 diff --git a/libs/guessit/__init__.py b/libs/guessit/__init__.py old mode 100644 new mode 100755 index e19da0955e..e6cfa276af --- a/libs/guessit/__init__.py +++ b/libs/guessit/__init__.py @@ -18,8 +18,9 @@ # along with this program. If not, see . # +from __future__ import unicode_literals -__version__ = '0.5.2' +__version__ = '0.6.2' __all__ = ['Guess', 'Language', 'guess_file_info', 'guess_video_info', 'guess_movie_info', 'guess_episode_info'] @@ -73,7 +74,9 @@ def to_hex(x): from guessit.guess import Guess, merge_all from guessit.language import Language from guessit.matcher import IterativeMatcher +from guessit.textutils import clean_string import logging +import json log = logging.getLogger(__name__) @@ -88,6 +91,110 @@ def emit(self, record): log.addHandler(h) +def _guess_filename(filename, filetype): + def find_nodes(tree, props): + """Yields all nodes containing any of the given props.""" + if isinstance(props, base_text_type): + props = [props] + for node in tree.nodes(): + if any(prop in node.guess for prop in props): + yield node + + def warning(title): + log.warning('%s, guesses: %s - %s' % (title, m.nice_string(), m2.nice_string())) + return m + + mtree = IterativeMatcher(filename, filetype=filetype) + + m = mtree.matched() + + second_pass_opts = [] + second_pass_transfo_opts = {} + + # if there are multiple possible years found, we assume the first one is + # part of the title, reparse the tree taking this into account + years = set(n.value for n in find_nodes(mtree.match_tree, 'year')) + if len(years) >= 2: + second_pass_opts.append('skip_first_year') + + to_skip_language_nodes = [] + + title_nodes = set(n for n in find_nodes(mtree.match_tree, ['title', 'series'])) + title_spans = {} + for title_node in title_nodes: + title_spans[title_node.span[0]] = title_node + title_spans[title_node.span[1]] = title_node + + for lang_key in ('language', 'subtitleLanguage'): + langs = {} + lang_nodes = set(n for n in find_nodes(mtree.match_tree, lang_key)) + + for lang_node in lang_nodes: + lang = lang_node.guess.get(lang_key, None) + if len(lang_node.value) > 3 and (lang_node.span[0] in title_spans.keys() or lang_node.span[1] in title_spans.keys()): + # Language is next or before title, and is not a language code. Add to skip for 2nd pass. + + # if filetype is subtitle and the language appears last, just before + # the extension, then it is likely a subtitle language + parts = clean_string(lang_node.root.value).split() + if m['type'] in ['moviesubtitle', 'episodesubtitle'] and (parts.index(lang_node.value) == len(parts) - 2): + continue + + to_skip_language_nodes.append(lang_node) + elif not lang in langs: + langs[lang] = lang_node + else: + # The same language was found. Keep the more confident one, and add others to skip for 2nd pass. + existing_lang_node = langs[lang] + to_skip = None + if existing_lang_node.guess.confidence('language') >= lang_node.guess.confidence('language'): + # lang_node is to remove + to_skip = lang_node + else: + # existing_lang_node is to remove + langs[lang] = lang_node + to_skip = existing_lang_node + to_skip_language_nodes.append(to_skip) + + + if to_skip_language_nodes: + second_pass_transfo_opts['guess_language'] = ( + ((), { 'skip': [ { 'node_idx': node.parent.node_idx, + 'span': node.span } + for node in to_skip_language_nodes ] })) + + if second_pass_opts or second_pass_transfo_opts: + # 2nd pass is needed + log.info("Running 2nd pass with options: %s" % second_pass_opts) + log.info("Transfo options: %s" % second_pass_transfo_opts) + mtree = IterativeMatcher(filename, filetype=filetype, + opts=second_pass_opts, + transfo_opts=second_pass_transfo_opts) + + m = mtree.matched() + + if 'language' not in m and 'subtitleLanguage' not in m or 'title' not in m: + return m + + # if we found some language, make sure we didn't cut a title or sth... + mtree2 = IterativeMatcher(filename, filetype=filetype, + opts=['nolanguage', 'nocountry']) + m2 = mtree2.matched() + + if m.get('title') != m2.get('title'): + title = next(find_nodes(mtree.match_tree, 'title')) + title2 = next(find_nodes(mtree2.match_tree, 'title')) + + # if a node is in an explicit group, then the correct title is probably + # the other one + if title.root.node_at(title.node_idx[:2]).is_explicit(): + return m2 + elif title2.root.node_at(title2.node_idx[:2]).is_explicit(): + return m + + return m + + def guess_file_info(filename, filetype, info=None): """info can contain the names of the various plugins, such as 'filename' to detect filename info, or 'hash_md5' to get the md5 hash of the file. @@ -98,6 +205,9 @@ def guess_file_info(filename, filetype, info=None): result = [] hashers = [] + # Force unicode as soon as possible + filename = u(filename) + if info is None: info = ['filename'] @@ -106,8 +216,7 @@ def guess_file_info(filename, filetype, info=None): for infotype in info: if infotype == 'filename': - m = IterativeMatcher(filename, filetype=filetype) - result.append(m.matched()) + result.append(_guess_filename(filename, filetype)) elif infotype == 'hash_mpc': from guessit.hash_mpc import hash_file @@ -161,7 +270,7 @@ def guess_file_info(filename, filetype, info=None): # last minute adjustments # if country is in the guessed properties, make it part of the filename - if 'country' in result: + if 'series' in result and 'country' in result: result['series'] += ' (%s)' % result['country'].alpha2.upper() diff --git a/libs/guessit/__main__.py b/libs/guessit/__main__.py new file mode 100755 index 0000000000..ccfa3af67e --- /dev/null +++ b/libs/guessit/__main__.py @@ -0,0 +1,126 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# GuessIt - A library for guessing information from filenames +# Copyright (c) 2011 Nicolas Wack +# +# GuessIt is free software; you can redistribute it and/or modify it under +# the terms of the Lesser GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# GuessIt is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# Lesser GNU General Public License for more details. +# +# You should have received a copy of the Lesser GNU General Public License +# along with this program. If not, see . +# + +from __future__ import unicode_literals +from __future__ import print_function +from guessit import u +from guessit import slogging, guess_file_info +from optparse import OptionParser +import logging +import sys +import os +import locale + + +def detect_filename(filename, filetype, info=['filename'], advanced = False): + filename = u(filename) + + print('For:', filename) + print('GuessIt found:', guess_file_info(filename, filetype, info).nice_string(advanced)) + + +def run_demo(episodes=True, movies=True, advanced=False): + # NOTE: tests should not be added here but rather in the tests/ folder + # this is just intended as a quick example + if episodes: + testeps = [ 'Series/Californication/Season 2/Californication.2x05.Vaginatown.HDTV.XviD-0TV.[tvu.org.ru].avi', + 'Series/dexter/Dexter.5x02.Hello,.Bandit.ENG.-.sub.FR.HDTV.XviD-AlFleNi-TeaM.[tvu.org.ru].avi', + 'Series/Treme/Treme.1x03.Right.Place,.Wrong.Time.HDTV.XviD-NoTV.[tvu.org.ru].avi', + 'Series/Duckman/Duckman - 101 (01) - 20021107 - I, Duckman.avi', + 'Series/Duckman/Duckman - S1E13 Joking The Chicken (unedited).avi', + 'Series/Simpsons/The_simpsons_s13e18_-_i_am_furious_yellow.mpg', + 'Series/Simpsons/Saison 12 Franц╖ais/Simpsons,.The.12x08.A.Bas.Le.Sergent.Skinner.FR.[tvu.org.ru].avi', + 'Series/Dr._Slump_-_002_DVB-Rip_Catalan_by_kelf.avi', + 'Series/Kaamelott/Kaamelott - Livre V - Second Volet - HD 704x396 Xvid 2 pass - Son 5.1 - TntRip by Slurm.avi' + ] + + for f in testeps: + print('-'*80) + detect_filename(f, filetype='episode', advanced=advanced) + + + if movies: + testmovies = [ 'Movies/Fear and Loathing in Las Vegas (1998)/Fear.and.Loathing.in.Las.Vegas.720p.HDDVD.DTS.x264-ESiR.mkv', + 'Movies/El Dia de la Bestia (1995)/El.dia.de.la.bestia.DVDrip.Spanish.DivX.by.Artik[SEDG].avi', + 'Movies/Blade Runner (1982)/Blade.Runner.(1982).(Director\'s.Cut).CD1.DVDRip.XviD.AC3-WAF.avi', + 'Movies/Dark City (1998)/Dark.City.(1998).DC.BDRip.720p.DTS.X264-CHD.mkv', + 'Movies/Sin City (BluRay) (2005)/Sin.City.2005.BDRip.720p.x264.AC3-SEPTiC.mkv', + 'Movies/Borat (2006)/Borat.(2006).R5.PROPER.REPACK.DVDRip.XviD-PUKKA.avi', # FIXME: PROPER and R5 get overwritten + '[XCT].Le.Prestige.(The.Prestige).DVDRip.[x264.HP.He-Aac.{Fr-Eng}.St{Fr-Eng}.Chaps].mkv', # FIXME: title gets overwritten + 'Battle Royale (2000)/Battle.Royale.(Batoru.Rowaiaru).(2000).(Special.Edition).CD1of2.DVDRiP.XviD-[ZeaL].avi', + 'Movies/Brazil (1985)/Brazil_Criterion_Edition_(1985).CD2.English.srt', + 'Movies/Persepolis (2007)/[XCT] Persepolis [H264+Aac-128(Fr-Eng)+ST(Fr-Eng)+Ind].mkv', + 'Movies/Toy Story (1995)/Toy Story [HDTV 720p English-Spanish].mkv', + 'Movies/Pirates of the Caribbean: The Curse of the Black Pearl (2003)/Pirates.Of.The.Carribean.DC.2003.iNT.DVDRip.XviD.AC3-NDRT.CD1.avi', + 'Movies/Office Space (1999)/Office.Space.[Dual-DVDRip].[Spanish-English].[XviD-AC3-AC3].[by.Oswald].avi', + 'Movies/The NeverEnding Story (1984)/The.NeverEnding.Story.1.1984.DVDRip.AC3.Xvid-Monteque.avi', + 'Movies/Juno (2007)/Juno KLAXXON.avi', + 'Movies/Chat noir, chat blanc (1998)/Chat noir, Chat blanc - Emir Kusturica (VO - VF - sub FR - Chapters).mkv', + 'Movies/Wild Zero (2000)/Wild.Zero.DVDivX-EPiC.srt', + 'Movies/El Bosque Animado (1987)/El.Bosque.Animado.[Jose.Luis.Cuerda.1987].[Xvid-Dvdrip-720x432].avi', + 'testsmewt_bugs/movies/Baraka_Edition_Collector.avi' + ] + + for f in testmovies: + print('-'*80) + detect_filename(f, filetype = 'movie', advanced = advanced) + + +def main(): + slogging.setupLogging() + + # see http://bugs.python.org/issue2128 + if sys.version_info.major < 3 and os.name == 'nt': + for i, a in enumerate(sys.argv): + sys.argv[i] = a.decode(locale.getpreferredencoding()) + + parser = OptionParser(usage = 'usage: %prog [options] file1 [file2...]') + parser.add_option('-v', '--verbose', action='store_true', dest='verbose', default=False, + help = 'display debug output') + parser.add_option('-i', '--info', dest = 'info', default = 'filename', + help = 'the desired information type: filename, hash_mpc or a hash from python\'s ' + 'hashlib module, such as hash_md5, hash_sha1, ...; or a list of any of ' + 'them, comma-separated') + parser.add_option('-t', '--type', dest = 'filetype', default = 'autodetect', + help = 'the suggested file type: movie, episode or autodetect') + parser.add_option('-a', '--advanced', dest = 'advanced', action='store_true', default = False, + help = 'display advanced information for filename guesses, as json output') + parser.add_option('-d', '--demo', action='store_true', dest='demo', default=False, + help = 'run a few builtin tests instead of analyzing a file') + + options, args = parser.parse_args() + if options.verbose: + logging.getLogger('guessit').setLevel(logging.DEBUG) + + if options.demo: + run_demo(episodes=True, movies=True, advanced=options.advanced) + else: + if args: + for filename in args: + detect_filename(filename, + filetype = options.filetype, + info = options.info.split(','), + advanced = options.advanced) + + else: + parser.print_help() + +if __name__ == '__main__': + main() diff --git a/libs/guessit/country.py b/libs/guessit/country.py old mode 100644 new mode 100755 diff --git a/libs/guessit/date.py b/libs/guessit/date.py old mode 100644 new mode 100755 diff --git a/libs/guessit/fileutils.py b/libs/guessit/fileutils.py old mode 100644 new mode 100755 index 2fca6b7b29..993952afb9 --- a/libs/guessit/fileutils.py +++ b/libs/guessit/fileutils.py @@ -22,6 +22,8 @@ from guessit import s, u import os.path import zipfile +import io +import re def split_path(path): @@ -43,13 +45,21 @@ def split_path(path): result = [] while True: head, tail = os.path.split(path) + headlen = len(head) + + # if a string has a : in position 1 it gets splitted in everycase, also if + # there is not a valid drive letter and also if : is not followed by \ + if headlen >= 2 and headlen <= 3 and head[1] == ':' and ( head + tail == path ) and ( head[1:] != ':\\' or not re.match("^[a-zA-Z]:\\\\", head) ): + tail = path + head = '' + headlen = 0 # on Unix systems, the root folder is '/' - if head == '/' and tail == '': + if head and head == '/'*headlen and tail == '': return ['/'] + result # on Windows, the root folder is a drive letter (eg: 'C:\') or for shares \\ - if ((len(head) == 3 and head[1:] == ':\\') or (len(head) == 2 and head == '\\\\')) and tail == '': + if ((headlen == 3 and head[1:] == ':\\') or (headlen == 2 and head == '\\\\')) and tail == '': return [head] + result if head == '' and tail == '': @@ -60,6 +70,7 @@ def split_path(path): path = head continue + # otherwise, add the last path fragment and keep splitting result = [tail] + result path = head @@ -76,7 +87,9 @@ def file_in_same_dir(ref_file, desired_file): def load_file_in_same_dir(ref_file, filename): """Load a given file. Works even when the file is contained inside a zip.""" - path = split_path(ref_file)[:-1] + [filename] + + from couchpotato.core.helpers.encoding import toUnicode + path = split_path(toUnicode(ref_file))[:-1] + [filename] for i, p in enumerate(path): if p.endswith('.zip'): @@ -84,4 +97,4 @@ def load_file_in_same_dir(ref_file, filename): zfile = zipfile.ZipFile(zfilename) return zfile.read('/'.join(path[i + 1:])) - return u(open(os.path.join(*path)).read()) + return u(io.open(os.path.join(*path), encoding='utf-8').read()) diff --git a/libs/guessit/guess.py b/libs/guessit/guess.py old mode 100644 new mode 100755 index 801af553a7..73babcebd0 --- a/libs/guessit/guess.py +++ b/libs/guessit/guess.py @@ -41,15 +41,21 @@ def __init__(self, *args, **kwargs): confidence = kwargs.pop('confidence') except KeyError: confidence = 0 + + try: + raw = kwargs.pop('raw') + except KeyError: + raw = None dict.__init__(self, *args, **kwargs) self._confidence = {} + self._raw = {} for prop in self: self._confidence[prop] = confidence - - - def to_dict(self): + self._raw[prop] = raw + + def to_dict(self, advanced=False): data = dict(self) for prop, value in data.items(): if isinstance(value, datetime.date): @@ -58,46 +64,65 @@ def to_dict(self): data[prop] = u(value) elif isinstance(value, list): data[prop] = [u(x) for x in value] + if advanced: + data[prop] = {"value": data[prop], "raw": self.raw(prop), "confidence": self.confidence(prop)} return data - def nice_string(self): - data = self.to_dict() - - parts = json.dumps(data, indent=4).split('\n') - for i, p in enumerate(parts): - if p[:5] != ' "': - continue - - prop = p.split('"')[1] - parts[i] = (' [%.2f] "' % self.confidence(prop)) + p[5:] - - return '\n'.join(parts) + def nice_string(self, advanced=False): + if advanced: + data = self.to_dict(advanced) + return json.dumps(data, indent=4) + else: + data = self.to_dict() + + parts = json.dumps(data, indent=4).split('\n') + for i, p in enumerate(parts): + if p[:5] != ' "': + continue + + prop = p.split('"')[1] + parts[i] = (' [%.2f] "' % self.confidence(prop)) + p[5:] + + return '\n'.join(parts) def __unicode__(self): return u(self.to_dict()) def confidence(self, prop): return self._confidence.get(prop, -1) + + def raw(self, prop): + return self._raw.get(prop, None) - def set(self, prop, value, confidence=None): + def set(self, prop, value, confidence=None, raw=None): self[prop] = value if confidence is not None: self._confidence[prop] = confidence + if raw is not None: + self._raw[prop] = raw def set_confidence(self, prop, value): self._confidence[prop] = value + + def set_raw(self, prop, value): + self._raw[prop] = value - def update(self, other, confidence=None): + def update(self, other, confidence=None, raw=None): dict.update(self, other) if isinstance(other, Guess): for prop in other: self._confidence[prop] = other.confidence(prop) + self._raw[prop] = other.raw(prop) if confidence is not None: for prop in other: self._confidence[prop] = confidence + if raw is not None: + for prop in other: + self._raw[prop] = raw + def update_highest_confidence(self, other): """Update this guess with the values from the given one. In case there is property present in both, only the one with the highest one @@ -110,6 +135,7 @@ def update_highest_confidence(self, other): continue self[prop] = other[prop] self._confidence[prop] = other.confidence(prop) + self._raw[prop] = other.raw(prop) def choose_int(g1, g2): @@ -181,7 +207,7 @@ def choose_string(g1, g2): elif v1l in v2l: return (v1, combined_prob) - # in case of conflict, return the one with highest priority + # in case of conflict, return the one with highest confidence else: if c1 > c2: return (v1, c1 - c2) @@ -253,48 +279,26 @@ def merge_similar_guesses(guesses, prop, choose): merge_similar_guesses(guesses, prop, choose) -def merge_append_guesses(guesses, prop): - """Take a list of guesses and merge those which have the same properties by - appending them in a list. - - DEPRECATED, remove with old guessers - - """ - similar = [guess for guess in guesses if prop in guess] - if not similar: - return - - merged = similar[0] - merged[prop] = [merged[prop]] - # TODO: what to do with global confidence? mean of them all? - - for m in similar[1:]: - for prop2 in m: - if prop == prop2: - merged[prop].append(m[prop]) - else: - if prop2 in m: - log.warning('overwriting property "%s" with value %s' % (prop2, m[prop2])) - merged[prop2] = m[prop2] - # TODO: confidence also - - guesses.remove(m) - - def merge_all(guesses, append=None): """Merge all the guesses in a single result, remove very unlikely values, and return it. You can specify a list of properties that should be appended into a list instead of being merged. - >>> s(merge_all([ Guess({ 'season': 2 }, confidence = 0.6), - ... Guess({ 'episodeNumber': 13 }, confidence = 0.8) ])) + >>> s(merge_all([ Guess({'season': 2}, confidence=0.6), + ... Guess({'episodeNumber': 13}, confidence=0.8) ])) {'season': 2, 'episodeNumber': 13} - >>> s(merge_all([ Guess({ 'episodeNumber': 27 }, confidence = 0.02), - ... Guess({ 'season': 1 }, confidence = 0.2) ])) + >>> s(merge_all([ Guess({'episodeNumber': 27}, confidence=0.02), + ... Guess({'season': 1}, confidence=0.2) ])) {'season': 1} + >>> s(merge_all([ Guess({'other': 'PROPER'}, confidence=0.8), + ... Guess({'releaseGroup': '2HD'}, confidence=0.8) ], + ... append=['other'])) + {'releaseGroup': '2HD', 'other': ['PROPER']} + + """ if not guesses: return Guess() @@ -310,14 +314,15 @@ def merge_all(guesses, append=None): result.set(prop, result.get(prop, []) + [g[prop]], # TODO: what to do with confidence here? maybe an # arithmetic mean... - confidence=g.confidence(prop)) + confidence=g.confidence(prop), + raw=g.raw(prop)) del g[prop] # then merge the remaining ones dups = set(result) & set(g) if dups: - log.warning('duplicate properties %s in merged result...' % dups) + log.warning('duplicate properties %s in merged result...' % [ (result[p], g[p]) for p in dups] ) result.update_highest_confidence(g) @@ -328,7 +333,13 @@ def merge_all(guesses, append=None): # make sure our appendable properties contain unique values for prop in append: - if prop in result: - result[prop] = list(set(result[prop])) + try: + value = result[prop] + if isinstance(value, list): + result[prop] = list(set(value)) + else: + result[prop] = [ value ] + except KeyError: + pass return result diff --git a/libs/guessit/hash_ed2k.py b/libs/guessit/hash_ed2k.py old mode 100644 new mode 100755 diff --git a/libs/guessit/hash_mpc.py b/libs/guessit/hash_mpc.py old mode 100644 new mode 100755 diff --git a/libs/guessit/language.py b/libs/guessit/language.py old mode 100644 new mode 100755 index ccdd9cadde..4d22cf05dd --- a/libs/guessit/language.py +++ b/libs/guessit/language.py @@ -21,13 +21,14 @@ from __future__ import unicode_literals from guessit import UnicodeMixin, base_text_type, u, s from guessit.fileutils import load_file_in_same_dir +from guessit.textutils import find_words from guessit.country import Country import re import logging __all__ = [ 'is_iso_language', 'is_language', 'lang_set', 'Language', 'ALL_LANGUAGES', 'ALL_LANGUAGES_NAMES', 'UNDETERMINED', - 'search_language' ] + 'search_language', 'guess_language' ] log = logging.getLogger(__name__) @@ -295,7 +296,7 @@ def __repr__(self): ALL_LANGUAGES = frozenset(Language(lng) for lng in lng_all_names) - frozenset([UNDETERMINED]) ALL_LANGUAGES_NAMES = lng_all_names -def search_language(string, lang_filter=None): +def search_language(string, lang_filter=None, skip=None): """Looks for language patterns, and if found return the language object, its group span and an associated confidence. @@ -317,7 +318,7 @@ def search_language(string, lang_filter=None): 'is', 'it', 'am', 'mad', 'men', 'man', 'run', 'sin', 'st', 'to', 'no', 'non', 'war', 'min', 'new', 'car', 'day', 'bad', 'bat', 'fan', 'fry', 'cop', 'zen', 'gay', 'fat', 'cherokee', 'got', 'an', 'as', - 'cat', 'her', 'be', 'hat', 'sun', 'may', 'my', 'mr', + 'cat', 'her', 'be', 'hat', 'sun', 'may', 'my', 'mr', 'rum', 'pi', # french words 'bas', 'de', 'le', 'son', 'vo', 'vf', 'ne', 'ca', 'ce', 'et', 'que', 'mal', 'est', 'vol', 'or', 'mon', 'se', @@ -325,7 +326,7 @@ def search_language(string, lang_filter=None): 'la', 'el', 'del', 'por', 'mar', # other 'ind', 'arw', 'ts', 'ii', 'bin', 'chan', 'ss', 'san', 'oss', 'iii', - 'vi' + 'vi', 'ben', 'da', 'lt' ]) sep = r'[](){} \._-+' @@ -334,7 +335,8 @@ def search_language(string, lang_filter=None): slow = ' %s ' % string.lower() confidence = 1.0 # for all of them - for lang in lng_all_names: + + for lang in set(find_words(slow)) & lng_all_names: if lang in lng_common_words: continue @@ -343,6 +345,16 @@ def search_language(string, lang_filter=None): if pos != -1: end = pos + len(lang) + + # skip if span in in skip list + while skip and (pos - 1, end - 1) in skip: + pos = slow.find(lang, end) + if pos == -1: + continue + end = pos + len(lang) + if pos == -1: + continue + # make sure our word is always surrounded by separators if slow[pos - 1] not in sep or slow[end] not in sep: continue @@ -351,7 +363,7 @@ def search_language(string, lang_filter=None): if lang_filter and language not in lang_filter: continue - # only allow those languages that have a 2-letter code, those who + # only allow those languages that have a 2-letter code, those that # don't are too esoteric and probably false matches if language.lang not in lng3_to_lng2: continue @@ -364,9 +376,25 @@ def search_language(string, lang_filter=None): else: # Note: we could either be really confident that we found a # language or assume that full language names are too - # common words + # common words and lower their confidence accordingly confidence = 0.3 # going with the low-confidence route here return language, (pos - 1, end - 1), confidence return None, None, None + + +def guess_language(text): + """Guess the language in which a body of text is written. + + This uses the external guess-language python module, and will fail and return + Language(Undetermined) if it is not installed. + """ + try: + from guess_language import guessLanguage + return Language(guessLanguage(text)) + + except ImportError: + log.error('Cannot detect the language of the given text body, missing dependency: guess-language') + log.error('Please install it from PyPI, by doing eg: pip install guess-language') + return UNDETERMINED diff --git a/libs/guessit/matcher.py b/libs/guessit/matcher.py old mode 100644 new mode 100755 index d3392f68b7..1984c01c63 --- a/libs/guessit/matcher.py +++ b/libs/guessit/matcher.py @@ -19,18 +19,16 @@ # from __future__ import unicode_literals -from guessit import PY3, u +from guessit import PY3, u, base_text_type from guessit.matchtree import MatchTree -from guessit.guess import (merge_similar_guesses, merge_all, - choose_int, choose_string) -import copy +from guessit.textutils import normalize_unicode, clean_string import logging log = logging.getLogger(__name__) class IterativeMatcher(object): - def __init__(self, filename, filetype='autodetect'): + def __init__(self, filename, filetype='autodetect', opts=None, transfo_opts=None): """An iterative matcher tries to match different patterns that appear in the filename. @@ -40,7 +38,8 @@ def __init__(self, filename, filetype='autodetect'): a movie. The recognized 'filetype' values are: - [ autodetect, subtitle, movie, moviesubtitle, episode, episodesubtitle ] + [ autodetect, subtitle, info, movie, moviesubtitle, movieinfo, episode, + episodesubtitle, episodeinfo ] The IterativeMatcher works mainly in 2 steps: @@ -63,29 +62,58 @@ def __init__(self, filename, filetype='autodetect'): it corresponds to a video codec, denoted by the letter'v' in the 4th line. (for more info, see guess.matchtree.to_string) + Second, it tries to merge all this information into a single object + containing all the found properties, and does some (basic) conflict + resolution when they arise. - Second, it tries to merge all this information into a single object - containing all the found properties, and does some (basic) conflict - resolution when they arise. + + When you create the Matcher, you can pass it: + - a list 'opts' of option names, that act as global flags + - a dict 'transfo_opts' of { transfo_name: (transfo_args, transfo_kwargs) } + with which to call the transfo.process() function. """ - valid_filetypes = ('autodetect', 'subtitle', 'video', - 'movie', 'moviesubtitle', - 'episode', 'episodesubtitle') + valid_filetypes = ('autodetect', 'subtitle', 'info', 'video', + 'movie', 'moviesubtitle', 'movieinfo', + 'episode', 'episodesubtitle', 'episodeinfo') if filetype not in valid_filetypes: raise ValueError("filetype needs to be one of %s" % valid_filetypes) if not PY3 and not isinstance(filename, unicode): log.warning('Given filename to matcher is not unicode...') + filename = filename.decode('utf-8') + + filename = normalize_unicode(filename) + + if opts is None: + opts = [] + if not isinstance(opts, list): + raise ValueError('opts must be a list of option names! Received: type=%s val=%s', + type(opts), opts) + + if transfo_opts is None: + transfo_opts = {} + if not isinstance(transfo_opts, dict): + raise ValueError('transfo_opts must be a dict of { transfo_name: (args, kwargs) }. '+ + 'Received: type=%s val=%s', type(transfo_opts), transfo_opts) self.match_tree = MatchTree(filename) + + # sanity check: make sure we don't process a (mostly) empty string + if clean_string(filename) == '': + return + mtree = self.match_tree mtree.guess.set('type', filetype, confidence=1.0) def apply_transfo(transfo_name, *args, **kwargs): transfo = __import__('guessit.transfo.' + transfo_name, globals=globals(), locals=locals(), - fromlist=['process'], level=-1) - transfo.process(mtree, *args, **kwargs) + fromlist=['process'], level=0) + default_args, default_kwargs = transfo_opts.get(transfo_name, ((), {})) + all_args = args or default_args + all_kwargs = dict(default_kwargs) + all_kwargs.update(kwargs) # keep all kwargs merged together + transfo.process(mtree, *all_args, **all_kwargs) # 1- first split our path into dirs + basename + ext apply_transfo('split_path_components') @@ -105,7 +133,7 @@ def apply_transfo(transfo_name, *args, **kwargs): # - language before episodes_rexps # - properties before language (eg: he-aac vs hebrew) # - release_group before properties (eg: XviD-?? vs xvid) - if mtree.guess['type'] in ('episode', 'episodesubtitle'): + if mtree.guess['type'] in ('episode', 'episodesubtitle', 'episodeinfo'): strategy = [ 'guess_date', 'guess_website', 'guess_release_group', 'guess_properties', 'guess_language', 'guess_video_rexps', @@ -115,12 +143,22 @@ def apply_transfo(transfo_name, *args, **kwargs): 'guess_properties', 'guess_language', 'guess_video_rexps' ] + if 'nolanguage' in opts: + strategy.remove('guess_language') + + for name in strategy: apply_transfo(name) # more guessers for both movies and episodes - for name in ['guess_bonus_features', 'guess_year', 'guess_country']: - apply_transfo(name) + apply_transfo('guess_bonus_features') + apply_transfo('guess_year', skip_first_year=('skip_first_year' in opts)) + + if 'nocountry' not in opts: + apply_transfo('guess_country') + + apply_transfo('guess_idnumber') + # split into '-' separated subgroups (with required separator chars # around the dash) @@ -128,7 +166,7 @@ def apply_transfo(transfo_name, *args, **kwargs): # 5- try to identify the remaining unknown groups by looking at their # position relative to other known elements - if mtree.guess['type'] in ('episode', 'episodesubtitle'): + if mtree.guess['type'] in ('episode', 'episodesubtitle', 'episodeinfo'): apply_transfo('guess_episode_info_from_position') else: apply_transfo('guess_movie_title_from_position') @@ -139,27 +177,4 @@ def apply_transfo(transfo_name, *args, **kwargs): log.debug('Found match tree:\n%s' % u(mtree)) def matched(self): - # we need to make a copy here, as the merge functions work in place and - # calling them on the match tree would modify it - - parts = [node.guess for node in self.match_tree.nodes() if node.guess] - parts = copy.deepcopy(parts) - - # 1- try to merge similar information together and give it a higher - # confidence - for int_part in ('year', 'season', 'episodeNumber'): - merge_similar_guesses(parts, int_part, choose_int) - - for string_part in ('title', 'series', 'container', 'format', - 'releaseGroup', 'website', 'audioCodec', - 'videoCodec', 'screenSize', 'episodeFormat', - 'audioChannels'): - merge_similar_guesses(parts, string_part, choose_string) - - # 2- merge the rest, potentially discarding information not properly - # merged before - result = merge_all(parts, - append=['language', 'subtitleLanguage', 'other']) - - log.debug('Final result: ' + result.nice_string()) - return result + return self.match_tree.matched() diff --git a/libs/guessit/matchtree.py b/libs/guessit/matchtree.py old mode 100644 new mode 100755 index 28c8efa2e1..0725e8350d --- a/libs/guessit/matchtree.py +++ b/libs/guessit/matchtree.py @@ -22,6 +22,9 @@ from guessit import UnicodeMixin, base_text_type, Guess from guessit.textutils import clean_string, str_fill from guessit.patterns import group_delimiters +from guessit.guess import (merge_similar_guesses, merge_all, + choose_int, choose_string) +import copy import logging log = logging.getLogger(__name__) @@ -257,3 +260,28 @@ def is_explicit(self): """Return whether the group was explicitly enclosed by parentheses/square brackets/etc.""" return (self.value[0] + self.value[-1]) in group_delimiters + + def matched(self): + # we need to make a copy here, as the merge functions work in place and + # calling them on the match tree would modify it + parts = [node.guess for node in self.nodes() if node.guess] + parts = copy.deepcopy(parts) + + # 1- try to merge similar information together and give it a higher + # confidence + for int_part in ('year', 'season', 'episodeNumber'): + merge_similar_guesses(parts, int_part, choose_int) + + for string_part in ('title', 'series', 'container', 'format', + 'releaseGroup', 'website', 'audioCodec', + 'videoCodec', 'screenSize', 'episodeFormat', + 'audioChannels', 'idNumber'): + merge_similar_guesses(parts, string_part, choose_string) + + # 2- merge the rest, potentially discarding information not properly + # merged before + result = merge_all(parts, + append=['language', 'subtitleLanguage', 'other']) + + log.debug('Final result: ' + result.nice_string()) + return result diff --git a/libs/guessit/patterns.py b/libs/guessit/patterns.py index b75ca89b36..f803a11c3d 100755 --- a/libs/guessit/patterns.py +++ b/libs/guessit/patterns.py @@ -20,9 +20,12 @@ # from __future__ import unicode_literals +import re -subtitle_exts = [ 'srt', 'idx', 'sub', 'ssa', 'txt' ] +subtitle_exts = [ 'srt', 'idx', 'sub', 'ssa' ] + +info_exts = [ 'nfo' ] video_exts = ['3g2', '3gp', '3gp2', 'asf', 'avi', 'divx', 'flv', 'm4v', 'mk2', 'mka', 'mkv', 'mov', 'mp4', 'mp4a', 'mpeg', 'mpg', 'ogg', 'ogm', @@ -31,7 +34,7 @@ group_delimiters = [ '()', '[]', '{}' ] # separator character regexp -sep = r'[][)(}{+ /\._-]' # regexp art, hehe :D +sep = r'[][,)(}{+ /\._-]' # regexp art, hehe :D # character used to represent a deleted char (when matching groups) deleted = '_' @@ -42,13 +45,13 @@ (r'saison (?P[0-9]+)', 1.0, (0, 0)), # ... s02e13 ... - (r'[Ss](?P[0-9]{1,2}).{,3}(?P(?:[Ee][0-9]{1,2})+)[^0-9]', 1.0, (0, -1)), + (r'[Ss](?P[0-9]{1,3})[^0-9]?(?P(?:-?[eE-][0-9]{1,3})+)[^0-9]', 1.0, (0, -1)), - # ... s03-x02 ... - (r'[Ss](?P[0-9]{1,2}).{,3}(?P(?:[Xx][0-9]{1,2})+)[^0-9]', 1.0, (0, -1)), + # ... s03-x02 ... # FIXME: redundant? remove it? + #(r'[Ss](?P[0-9]{1,3})[^0-9]?(?P(?:-?[xX-][0-9]{1,3})+)[^0-9]', 1.0, (0, -1)), # ... 2x13 ... - (r'[^0-9](?P[0-9]{1,2})(?P(?:[xX][0-9]{1,2})+)[^0-9]', 0.8, (1, -1)), + (r'[^0-9](?P[0-9]{1,2})[^0-9 .-]?(?P(?:-?[xX][0-9]{1,3})+)[^0-9]', 1.0, (1, -1)), # ... s02 ... #(sep + r's(?P[0-9]{1,2})' + sep, 0.6, (1, -1)), @@ -61,7 +64,7 @@ ('ep' + sep + r'(?P[0-9]{1,2})[^0-9]', 0.7, (0, -1)), # ... e13 ... for a mini-series without a season number - (r'e(?P[0-9]{1,2})[^0-9]', 0.6, (0, -1)) + (sep + r'e(?P[0-9]{1,2})' + sep, 0.6, (1, -1)) ] @@ -99,92 +102,149 @@ (r'f(?P[0-9]{1,2})', 1.0, (0, 0)) ] -websites = [ 'tvu.org.ru', 'emule-island.com', 'UsaBit.com', 'www.divx-overnet.com', 'sharethefiles.com' ] +websites = [ 'tvu.org.ru', 'emule-island.com', 'UsaBit.com', 'www.divx-overnet.com', + 'sharethefiles.com' ] -unlikely_series = ['series'] +unlikely_series = [ 'series' ] -properties = { 'format': [ 'DVDRip', 'HD-DVD', 'HDDVD', 'HDDVDRip', 'BluRay', 'Blu-ray', 'BDRip', 'BRRip', - 'HDRip', 'DVD', 'DVDivX', 'HDTV', 'DVB', 'DVBRip', 'PDTV', 'WEBRip', - 'DVDSCR', 'Screener', 'VHS', 'VIDEO_TS', 'WEB-DL', 'WEBDL' ], - 'screenSize': [ '720p', '720', '1080p', '1080' ], +# prop_multi is a dict of { property_name: { canonical_form: [ pattern ] } } +# pattern is a string considered as a regexp, with the addition that dashes are +# replaced with '([ \.-_])?' which matches more types of separators (or none) +# note: simpler patterns need to be at the end of the list to not shadow more +# complete ones, eg: 'AAC' needs to come after 'He-AAC' +# ie: from most specific to less specific +prop_multi = { 'format': { 'DVD': [ 'DVD', 'DVD-Rip', 'VIDEO-TS', 'DVDivX' ], + 'HD-DVD': [ 'HD-(?:DVD)?-Rip', 'HD-DVD' ], + 'BluRay': [ 'Blu-ray', 'B[DR]Rip' ], + 'HDTV': [ 'HD-TV' ], + 'DVB': [ 'DVB-Rip', 'DVB', 'PD-TV' ], + 'WEBRip': [ 'WEB-Rip' ], + 'Screener': [ 'DVD-SCR', 'Screener' ], + 'VHS': [ 'VHS' ], + 'WEB-DL': [ 'WEB-DL' ] }, - 'videoCodec': [ 'XviD', 'DivX', 'x264', 'h264', 'Rv10' ], + 'is3D': { True: [ '3D' ] }, - 'audioCodec': [ 'AC3', 'DTS', 'He-AAC', 'AAC-He', 'AAC' ], + 'screenSize': { '480p': [ '480[pi]?' ], + '720p': [ '720[pi]?' ], + '1080i': [ '1080i' ], + '1080p': [ '1080p', '1080[^i]' ] }, - 'audioChannels': [ '5.1' ], + 'videoCodec': { 'XviD': [ 'Xvid' ], + 'DivX': [ 'DVDivX', 'DivX' ], + 'h264': [ '[hx]-264' ], + 'Rv10': [ 'Rv10' ], + 'Mpeg2': [ 'Mpeg2' ] }, - 'releaseGroup': [ 'ESiR', 'WAF', 'SEPTiC', '[XCT]', 'iNT', 'PUKKA', - 'CHD', 'ViTE', 'TLF', 'DEiTY', 'FLAiTE', - 'MDX', 'GM4F', 'DVL', 'SVD', 'iLUMiNADOS', ' FiNaLe', - 'UnSeeN', 'aXXo', 'KLAXXON', 'NoTV', 'ZeaL', 'LOL', - 'SiNNERS', 'DiRTY', 'REWARD', 'ECI', 'KiNGS', 'CLUE', - 'CtrlHD', 'POD', 'WiKi', 'DIMENSION', 'IMMERSE', 'FQM', - '2HD', 'REPTiLE', 'CTU', 'HALCYON', 'EbP', 'SiTV', 'SAiNTS', - 'HDBRiSe', 'AlFleNi-TeaM', 'EVOLVE', '0TV' ], + # has nothing to do here (or on filenames for that matter), but some + # releases use it and it helps to identify release groups, so we adapt + 'videoApi': { 'DXVA': [ 'DXVA' ] }, - 'episodeFormat': [ 'Minisode', 'Minisodes' ], + 'audioCodec': { 'AC3': [ 'AC3' ], + 'DTS': [ 'DTS' ], + 'AAC': [ 'He-AAC', 'AAC-He', 'AAC' ] }, - 'other': [ '5ch', 'PROPER', 'REPACK', 'LIMITED', 'DualAudio', 'iNTERNAL', 'Audiofixed', 'R5', - 'complete', 'classic', # not so sure about these ones, could appear in a title - 'ws', # widescreen - ], - } + 'audioChannels': { '5.1': [ r'5\.1', 'DD5[._ ]1', '5ch' ] }, + 'episodeFormat': { 'Minisode': [ 'Minisodes?' ] } + + } -def find_properties(filename): +# prop_single dict of { property_name: [ canonical_form ] } +prop_single = { 'releaseGroup': [ 'ESiR', 'WAF', 'SEPTiC', r'\[XCT\]', 'iNT', 'PUKKA', + 'CHD', 'ViTE', 'TLF', 'FLAiTE', + 'MDX', 'GM4F', 'DVL', 'SVD', 'iLUMiNADOS', + 'aXXo', 'KLAXXON', 'NoTV', 'ZeaL', 'LOL', + 'CtrlHD', 'POD', 'WiKi','IMMERSE', 'FQM', + '2HD', 'CTU', 'HALCYON', 'EbP', 'SiTV', + 'HDBRiSe', 'AlFleNi-TeaM', 'EVOLVE', '0TV', + 'TLA', 'NTB', 'ASAP', 'MOMENTUM', 'FoV', 'D-Z0N3', + 'TrollHD', 'ECI' + ], + + # potentially confusing release group names (they are words) + 'weakReleaseGroup': [ 'DEiTY', 'FiNaLe', 'UnSeeN', 'KiNGS', 'CLUE', 'DIMENSION', + 'SAiNTS', 'ARROW', 'EuReKA', 'SiNNERS', 'DiRTY', 'REWARD', + 'REPTiLE', + ], + + 'other': [ 'PROPER', 'REPACK', 'LIMITED', 'DualAudio', 'Audiofixed', 'R5', + 'complete', 'classic', # not so sure about these ones, could appear in a title + 'ws' ] # widescreen + } + +_dash = '-' +_psep = '[-. _]?' + +def _to_rexp(prop): + return re.compile(prop.replace(_dash, _psep), re.IGNORECASE) + +# properties_rexps dict of { property_name: { canonical_form: [ rexp ] } } +# containing the rexps compiled from both prop_multi and prop_single +properties_rexps = dict((type, dict((canonical_form, + [ _to_rexp(pattern) for pattern in patterns ]) + for canonical_form, patterns in props.items())) + for type, props in prop_multi.items()) + +properties_rexps.update(dict((type, dict((canonical_form, [ _to_rexp(canonical_form) ]) + for canonical_form in props)) + for type, props in prop_single.items())) + + + +def find_properties(string): result = [] - clow = filename.lower() - for prop, values in properties.items(): - for value in values: - pos = clow.find(value.lower()) - if pos != -1: - end = pos + len(value) - # make sure our word is always surrounded by separators - if ((pos > 0 and clow[pos - 1] not in sep) or - (end < len(clow) and clow[end] not in sep)): + for property_name, props in properties_rexps.items(): + # FIXME: this should be done in a more flexible way... + if property_name in ['weakReleaseGroup']: + continue + + for canonical_form, rexps in props.items(): + for value_rexp in rexps: + match = value_rexp.search(string) + if match: + start, end = match.span() + # make sure our word is always surrounded by separators # note: sep is a regexp, but in this case using it as - # a sequence achieves the same goal - continue + # a char sequence achieves the same goal + if ((start > 0 and string[start-1] not in sep) or + (end < len(string) and string[end] not in sep)): + continue - result.append((prop, value, pos, end)) + result.append((property_name, canonical_form, start, end)) return result -property_synonyms = { 'DVD': [ 'DVDRip', 'VIDEO_TS' ], - 'HD-DVD': [ 'HDDVD', 'HDDVDRip' ], - 'BluRay': [ 'BDRip', 'BRRip', 'Blu-ray' ], - 'WEB-DL': [ 'WEBDL' ], - 'DVB': [ 'DVBRip', 'PDTV' ], - 'Screener': [ 'DVDSCR' ], - 'DivX': [ 'DVDivX' ], - 'h264': [ 'x264' ], - '720p': [ '720' ], - '1080p': [ '1080' ], - 'AAC': [ 'He-AAC', 'AAC-He' ], - 'Special Edition': [ 'Special' ], +property_synonyms = { 'Special Edition': [ 'Special' ], 'Collector Edition': [ 'Collector' ], - 'Criterion Edition': [ 'Criterion' ], - 'Minisode': [ 'Minisodes' ] + 'Criterion Edition': [ 'Criterion' ] } def revert_synonyms(): reverse = {} - for _, values in properties.items(): - for value in values: - reverse[value.lower()] = value - for canonical, synonyms in property_synonyms.items(): for synonym in synonyms: reverse[synonym.lower()] = canonical return reverse + reverse_synonyms = revert_synonyms() def canonical_form(string): return reverse_synonyms.get(string.lower(), string) + + +def compute_canonical_form(property_name, value): + """Return the canonical form of a property given its type if it is a valid + one, None otherwise.""" + if isinstance(value, basestring): + for canonical_form, rexps in properties_rexps[property_name].items(): + for rexp in rexps: + if rexp.match(value): + return canonical_form + return None diff --git a/libs/guessit/slogging.py b/libs/guessit/slogging.py old mode 100644 new mode 100755 index f75773c0fe..39591a20b6 --- a/libs/guessit/slogging.py +++ b/libs/guessit/slogging.py @@ -21,6 +21,8 @@ from __future__ import unicode_literals import logging import sys +import os, os.path + GREEN_FONT = "\x1B[0;32m" YELLOW_FONT = "\x1B[0;33m" @@ -29,33 +31,59 @@ RESET_FONT = "\x1B[0m" -def setupLogging(colored=True): +def setupLogging(colored=True, with_time=False, with_thread=False, filename=None, with_lineno=False): """Set up a nice colored logger as the main application logger.""" class SimpleFormatter(logging.Formatter): - def __init__(self): - self.fmt = '%(levelname)-8s %(module)s:%(funcName)s -- %(message)s' + def __init__(self, with_time, with_thread): + self.fmt = (('%(asctime)s ' if with_time else '') + + '%(levelname)-8s ' + + '[%(name)s:%(funcName)s' + + (':%(lineno)s' if with_lineno else '') + ']' + + ('[%(threadName)s]' if with_thread else '') + + ' -- %(message)s') logging.Formatter.__init__(self, self.fmt) class ColoredFormatter(logging.Formatter): - def __init__(self): - self.fmt = ('%(levelname)-8s ' + - BLUE_FONT + '%(name)s:%(funcName)s' + - RESET_FONT + ' -- %(message)s') + def __init__(self, with_time, with_thread): + self.fmt = (('%(asctime)s ' if with_time else '') + + '-CC-%(levelname)-8s ' + + BLUE_FONT + '[%(name)s:%(funcName)s' + + (':%(lineno)s' if with_lineno else '') + ']' + + RESET_FONT + ('[%(threadName)s]' if with_thread else '') + + ' -- %(message)s') + logging.Formatter.__init__(self, self.fmt) def format(self, record): + modpath = record.name.split('.') + record.mname = modpath[0] + record.mmodule = '.'.join(modpath[1:]) result = logging.Formatter.format(self, record) - if record.levelno in (logging.DEBUG, logging.INFO): - return GREEN_FONT + result + if record.levelno == logging.DEBUG: + color = BLUE_FONT + elif record.levelno == logging.INFO: + color = GREEN_FONT elif record.levelno == logging.WARNING: - return YELLOW_FONT + result + color = YELLOW_FONT else: - return RED_FONT + result + color = RED_FONT - ch = logging.StreamHandler() - if colored and sys.platform != 'win32': - ch.setFormatter(ColoredFormatter()) + result = result.replace('-CC-', color) + return result + + if filename is not None: + # make sure we can write to our log file + logdir = os.path.dirname(filename) + if not os.path.exists(logdir): + os.makedirs(logdir) + ch = logging.FileHandler(filename, mode='w') + ch.setFormatter(SimpleFormatter(with_time, with_thread)) else: - ch.setFormatter(SimpleFormatter()) + ch = logging.StreamHandler() + if colored and sys.platform != 'win32': + ch.setFormatter(ColoredFormatter(with_time, with_thread)) + else: + ch.setFormatter(SimpleFormatter(with_time, with_thread)) + logging.getLogger().addHandler(ch) diff --git a/libs/guessit/textutils.py b/libs/guessit/textutils.py old mode 100644 new mode 100755 index 12043e5926..ae9d28c31e --- a/libs/guessit/textutils.py +++ b/libs/guessit/textutils.py @@ -2,7 +2,7 @@ # -*- coding: utf-8 -*- # # Smewt - A smart collection manager -# Copyright (c) 2008 Nicolas Wack +# Copyright (c) 2008-2012 Nicolas Wack # # Smewt is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -23,10 +23,13 @@ from guessit.patterns import sep import functools import unicodedata -import copy +import re # string-related functions +def normalize_unicode(s): + return unicodedata.normalize('NFC', s) + def strip_brackets(s): if not s: @@ -40,10 +43,13 @@ def strip_brackets(s): return s -def clean_string(s): - for c in sep[:-2]: # do not remove dashes ('-') - s = s.replace(c, ' ') - parts = s.split() +def clean_string(st): + for c in sep: + # do not remove certain chars + if c in ['-', ',']: + continue + st = st.replace(c, ' ') + parts = st.split() result = ' '.join(p for p in parts if p != '') # now also remove dashes on the outer part of the string @@ -55,6 +61,21 @@ def clean_string(s): return result +_words_rexp = re.compile('\w+', re.UNICODE) + +def find_words(s): + return _words_rexp.findall(s.replace('_', ' ')) + + +def reorder_title(title): + ltitle = title.lower() + if ltitle[-4:] == ',the': + return title[-3:] + ' ' + title[:-4] + if ltitle[-5:] == ', the': + return title[-3:] + ' ' + title[:-5] + return title + + def str_replace(string, pos, c): return string[:pos] + c + string[pos+1:] diff --git a/libs/guessit/transfo/__init__.py b/libs/guessit/transfo/__init__.py old mode 100644 new mode 100755 index 67875dc6ef..a28aa98873 --- a/libs/guessit/transfo/__init__.py +++ b/libs/guessit/transfo/__init__.py @@ -28,7 +28,7 @@ def found_property(node, name, confidence): - node.guess = Guess({name: node.clean_value}, confidence=confidence) + node.guess = Guess({name: node.clean_value}, confidence=confidence, raw=node.value) log.debug('Found with confidence %.2f: %s' % (confidence, node.guess)) @@ -45,18 +45,24 @@ def format_guess(guess): elif isinstance(value, base_text_type): if prop in ('edition',): value = clean_string(value) - guess[prop] = canonical_form(value) + guess[prop] = canonical_form(value).replace('\\', '') return guess def find_and_split_node(node, strategy, logger): string = ' %s ' % node.value # add sentinels - for matcher, confidence in strategy: + for matcher, confidence, args, kwargs in strategy: + all_args = [string] if getattr(matcher, 'use_node', False): - result, span = matcher(string, node) + all_args.append(node) + if args: + all_args.append(args) + + if kwargs: + result, span = matcher(*all_args, **kwargs) else: - result, span = matcher(string) + result, span = matcher(*all_args) if result: # readjust span to compensate for sentinels @@ -69,7 +75,7 @@ def find_and_split_node(node, strategy, logger): if confidence is None: confidence = 1.0 - guess = format_guess(Guess(result, confidence=confidence)) + guess = format_guess(Guess(result, confidence=confidence, raw=string[span[0] + 1:span[1] + 1])) msg = 'Found with confidence %.2f: %s' % (confidence, guess) (logger or log).debug(msg) @@ -84,10 +90,12 @@ def find_and_split_node(node, strategy, logger): class SingleNodeGuesser(object): - def __init__(self, guess_func, confidence, logger=None): + def __init__(self, guess_func, confidence, logger, *args, **kwargs): self.guess_func = guess_func self.confidence = confidence self.logger = logger + self.args = args + self.kwargs = kwargs def process(self, mtree): # strategy is a list of pairs (guesser, confidence) @@ -95,7 +103,7 @@ def process(self, mtree): # it will override it, otherwise it will leave the guess confidence # - if the guesser returns a simple dict as a guess and confidence is # specified, it will use it, or 1.0 otherwise - strategy = [ (self.guess_func, self.confidence) ] + strategy = [ (self.guess_func, self.confidence, self.args, self.kwargs) ] for node in mtree.unidentified_leaves(): find_and_split_node(node, strategy, self.logger) diff --git a/libs/guessit/transfo/guess_bonus_features.py b/libs/guessit/transfo/guess_bonus_features.py old mode 100644 new mode 100755 diff --git a/libs/guessit/transfo/guess_country.py b/libs/guessit/transfo/guess_country.py old mode 100644 new mode 100755 index f95b62cf01..aadb84f794 --- a/libs/guessit/transfo/guess_country.py +++ b/libs/guessit/transfo/guess_country.py @@ -19,24 +19,30 @@ # from __future__ import unicode_literals -#from guessit.transfo import SingleNodeGuesser -#from guessit.date import search_year from guessit.country import Country from guessit import Guess import logging log = logging.getLogger(__name__) +# list of common words which could be interpreted as countries, but which +# are far too common to be able to say they represent a country +country_common_words = frozenset([ 'bt', 'bb' ]) def process(mtree): for node in mtree.unidentified_leaves(): - # only keep explicit groups (enclosed in parentheses/brackets) if len(node.node_idx) == 2: - try: - country = Country(node.value[1:-1], strict=True) - if node.value[0] + node.value[-1] not in ['()', '[]', '{}']: - continue - node.guess = Guess(country=country, confidence=1.0) + c = node.value[1:-1].lower() + if c in country_common_words: + continue + + # only keep explicit groups (enclosed in parentheses/brackets) + if node.value[0] + node.value[-1] not in ['()', '[]', '{}']: + continue + try: + country = Country(c, strict=True) except ValueError: - pass + continue + + node.guess = Guess(country=country, confidence=1.0, raw=c) diff --git a/libs/guessit/transfo/guess_date.py b/libs/guessit/transfo/guess_date.py old mode 100644 new mode 100755 diff --git a/libs/guessit/transfo/guess_episode_info_from_position.py b/libs/guessit/transfo/guess_episode_info_from_position.py old mode 100644 new mode 100755 diff --git a/libs/guessit/transfo/guess_episodes_rexps.py b/libs/guessit/transfo/guess_episodes_rexps.py old mode 100644 new mode 100755 index 4ebfb547e7..30c2ca2feb --- a/libs/guessit/transfo/guess_episodes_rexps.py +++ b/libs/guessit/transfo/guess_episodes_rexps.py @@ -28,33 +28,34 @@ log = logging.getLogger(__name__) def number_list(s): - return list(re.sub('[^0-9]+', ' ', s).split()) + l = [ int(n) for n in re.sub('[^0-9]+', ' ', s).split() ] + + if len(l) == 2: + # it is an episode interval, return all numbers in between + return range(l[0], l[1]+1) + + return l def guess_episodes_rexps(string): for rexp, confidence, span_adjust in episode_rexps: match = re.search(rexp, string, re.IGNORECASE) if match: - guess = Guess(match.groupdict(), confidence=confidence) - span = (match.start() + span_adjust[0], + span = (match.start() + span_adjust[0], match.end() + span_adjust[1]) - - # episodes which have a season > 25 are most likely errors - # (Simpsons is at 24!) - if int(guess.get('season', 0)) > 25: - continue + guess = Guess(match.groupdict(), confidence=confidence, raw=string[span[0]:span[1]]) # decide whether we have only a single episode number or an # episode list if guess.get('episodeNumber'): eplist = number_list(guess['episodeNumber']) - guess.set('episodeNumber', int(eplist[0]), confidence=confidence) + guess.set('episodeNumber', eplist[0], confidence=confidence, raw=string[span[0]:span[1]]) if len(eplist) > 1: - guess.set('episodeList', list(map(int, eplist)), confidence=confidence) + guess.set('episodeList', eplist, confidence=confidence, raw=string[span[0]:span[1]]) if guess.get('bonusNumber'): eplist = number_list(guess['bonusNumber']) - guess.set('bonusNumber', int(eplist[0]), confidence=confidence) + guess.set('bonusNumber', eplist[0], confidence=confidence, raw=string[span[0]:span[1]]) return guess, span diff --git a/libs/guessit/transfo/guess_filetype.py b/libs/guessit/transfo/guess_filetype.py old mode 100644 new mode 100755 index cdaf114241..4279c0b06f --- a/libs/guessit/transfo/guess_filetype.py +++ b/libs/guessit/transfo/guess_filetype.py @@ -20,8 +20,8 @@ from __future__ import unicode_literals from guessit import Guess -from guessit.patterns import (subtitle_exts, video_exts, episode_rexps, - find_properties, canonical_form) +from guessit.patterns import (subtitle_exts, info_exts, video_exts, episode_rexps, + find_properties, compute_canonical_form) from guessit.date import valid_year from guessit.textutils import clean_string import os.path @@ -53,12 +53,16 @@ def upgrade_episode(): filetype_container[0] = 'episode' elif filetype_container[0] == 'subtitle': filetype_container[0] = 'episodesubtitle' + elif filetype_container[0] == 'info': + filetype_container[0] = 'episodeinfo' def upgrade_movie(): if filetype_container[0] == 'video': filetype_container[0] = 'movie' elif filetype_container[0] == 'subtitle': filetype_container[0] = 'moviesubtitle' + elif filetype_container[0] == 'info': + filetype_container[0] = 'movieinfo' def upgrade_subtitle(): if 'movie' in filetype_container[0]: @@ -68,6 +72,14 @@ def upgrade_subtitle(): else: filetype_container[0] = 'subtitle' + def upgrade_info(): + if 'movie' in filetype_container[0]: + filetype_container[0] = 'movieinfo' + elif 'episode' in filetype_container[0]: + filetype_container[0] = 'episodeinfo' + else: + filetype_container[0] = 'info' + def upgrade(type='unknown'): if filetype_container[0] == 'autodetect': filetype_container[0] = type @@ -78,6 +90,9 @@ def upgrade(type='unknown'): if fileext in subtitle_exts: upgrade_subtitle() other = { 'container': fileext } + elif fileext in info_exts: + upgrade_info() + other = { 'container': fileext } elif fileext in video_exts: upgrade(type='video') other = { 'container': fileext } @@ -89,7 +104,7 @@ def upgrade(type='unknown'): # check whether we are in a 'Movies', 'Tv Shows', ... folder folder_rexps = [ (r'Movies?', upgrade_movie), - (r'Tv ?Shows?', upgrade_episode), + (r'Tv[ _-]?Shows?', upgrade_episode), (r'Series', upgrade_episode) ] for frexp, upgrade_func in folder_rexps: @@ -104,17 +119,20 @@ def upgrade(type='unknown'): fname = clean_string(filename).lower() for m in MOVIES: if m in fname: + log.debug('Found in exception list of movies -> type = movie') upgrade_movie() for s in SERIES: if s in fname: + log.debug('Found in exception list of series -> type = episode') upgrade_episode() # now look whether there are some specific hints for episode vs movie - if filetype_container[0] in ('video', 'subtitle'): + if filetype_container[0] in ('video', 'subtitle', 'info'): # if we have an episode_rexp (eg: s02e13), it is an episode for rexp, _, _ in episode_rexps: match = re.search(rexp, filename, re.IGNORECASE) if match: + log.debug('Found matching regexp: "%s" (string = "%s") -> type = episode', rexp, match.group()) upgrade_episode() break @@ -133,24 +151,29 @@ def upgrade(type='unknown'): possible = False if possible: + log.debug('Found possible episode number: %s (from string "%s") -> type = episode', epnumber, match.group()) upgrade_episode() # if we have certain properties characteristic of episodes, it is an ep for prop, value, _, _ in find_properties(filename): log.debug('prop: %s = %s' % (prop, value)) if prop == 'episodeFormat': + log.debug('Found characteristic property of episodes: %s = "%s"', prop, value) upgrade_episode() break - elif canonical_form(value) == 'DVB': + elif compute_canonical_form('format', value) == 'DVB': + log.debug('Found characteristic property of episodes: %s = "%s"', prop, value) upgrade_episode() break # origin-specific type if 'tvu.org.ru' in filename: + log.debug('Found characteristic property of episodes: %s = "%s"', prop, value) upgrade_episode() # if no episode info found, assume it's a movie + log.debug('Nothing characteristic found, assuming type = movie') upgrade_movie() filetype = filetype_container[0] diff --git a/libs/guessit/transfo/guess_idnumber.py b/libs/guessit/transfo/guess_idnumber.py new file mode 100755 index 0000000000..0e15af5c9e --- /dev/null +++ b/libs/guessit/transfo/guess_idnumber.py @@ -0,0 +1,71 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# GuessIt - A library for guessing information from filenames +# Copyright (c) 2013 Nicolas Wack +# +# GuessIt is free software; you can redistribute it and/or modify it under +# the terms of the Lesser GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# GuessIt is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# Lesser GNU General Public License for more details. +# +# You should have received a copy of the Lesser GNU General Public License +# along with this program. If not, see . +# + +from __future__ import unicode_literals +from guessit.transfo import SingleNodeGuesser +from guessit.patterns import find_properties +import re +import logging + +log = logging.getLogger(__name__) + + +def guess_properties(string): + try: + prop, value, pos, end = find_properties(string)[0] + return { prop: value }, (pos, end) + except IndexError: + return None, None + +_idnum = re.compile(r'(?P[a-zA-Z0-9-]{10,})') # 1.0, (0, 0)) + +def guess_idnumber(string): + match = _idnum.search(string) + if match is not None: + result = match.groupdict() + switch_count = 0 + DIGIT = 0 + LETTER = 1 + OTHER = 2 + last = LETTER + for c in result['idNumber']: + if c in '0123456789': + ci = DIGIT + elif c in 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ': + ci = LETTER + else: + ci = OTHER + + if ci != last: + switch_count += 1 + + last = ci + + switch_ratio = float(switch_count) / len(result['idNumber']) + + # only return the result as probable if we alternate often between + # char type (more likely for hash values than for common words) + if switch_ratio > 0.4: + return result, match.span() + + return None, None + +def process(mtree): + SingleNodeGuesser(guess_idnumber, 0.4, log).process(mtree) diff --git a/libs/guessit/transfo/guess_language.py b/libs/guessit/transfo/guess_language.py old mode 100644 new mode 100755 index fe547e614d..648a06b126 --- a/libs/guessit/transfo/guess_language.py +++ b/libs/guessit/transfo/guess_language.py @@ -22,27 +22,34 @@ from guessit import Guess from guessit.transfo import SingleNodeGuesser from guessit.language import search_language -from guessit.textutils import clean_string import logging log = logging.getLogger(__name__) -def guess_language(string): - language, span, confidence = search_language(string) +def guess_language(string, node, skip=None): + if skip: + relative_skip = [] + for entry in skip: + node_idx = entry['node_idx'] + span = entry['span'] + if node_idx == node.node_idx[:len(node_idx)]: + relative_span = (span[0] - node.offset + 1, span[1] - node.offset + 1) + relative_skip.append(relative_span) + skip = relative_skip + + language, span, confidence = search_language(string, skip=skip) if language: - # is it a subtitle language? - if 'sub' in clean_string(string[:span[0]]).lower().split(' '): - return (Guess({'subtitleLanguage': language}, - confidence=confidence), - span) - else: - return (Guess({'language': language}, - confidence=confidence), - span) + return (Guess({'language': language}, + confidence=confidence, + raw= string[span[0]:span[1]]), + span) return None, None +guess_language.use_node = True + -def process(mtree): - SingleNodeGuesser(guess_language, None, log).process(mtree) +def process(mtree, *args, **kwargs): + SingleNodeGuesser(guess_language, None, log, *args, **kwargs).process(mtree) + # Note: 'language' is promoted to 'subtitleLanguage' in the post_process transfo diff --git a/libs/guessit/transfo/guess_movie_title_from_position.py b/libs/guessit/transfo/guess_movie_title_from_position.py old mode 100644 new mode 100755 index 8b6f5d0a5c..bcb42b4528 --- a/libs/guessit/transfo/guess_movie_title_from_position.py +++ b/libs/guessit/transfo/guess_movie_title_from_position.py @@ -20,6 +20,7 @@ from __future__ import unicode_literals from guessit import Guess +import unicodedata import logging log = logging.getLogger(__name__) @@ -28,7 +29,8 @@ def process(mtree): def found_property(node, name, value, confidence): node.guess = Guess({ name: value }, - confidence=confidence) + confidence=confidence, + raw=value) log.debug('Found with confidence %.2f: %s' % (confidence, node.guess)) def found_title(node, confidence): diff --git a/libs/guessit/transfo/guess_properties.py b/libs/guessit/transfo/guess_properties.py old mode 100644 new mode 100755 diff --git a/libs/guessit/transfo/guess_release_group.py b/libs/guessit/transfo/guess_release_group.py old mode 100644 new mode 100755 index 2cee4b0725..b72c736894 --- a/libs/guessit/transfo/guess_release_group.py +++ b/libs/guessit/transfo/guess_release_group.py @@ -20,49 +20,62 @@ from __future__ import unicode_literals from guessit.transfo import SingleNodeGuesser -from guessit.patterns import properties, canonical_form +from guessit.patterns import prop_multi, compute_canonical_form, _dash, _psep import re import logging log = logging.getLogger(__name__) +def get_patterns(property_name): + return [ p.replace(_dash, _psep) for patterns in prop_multi[property_name].values() for p in patterns ] -CODECS = properties['videoCodec'] -FORMATS = properties['format'] +CODECS = get_patterns('videoCodec') +FORMATS = get_patterns('format') +VAPIS = get_patterns('videoApi') + +# RG names following a codec or format, with a potential space or dash inside the name +GROUP_NAMES = [ r'(?P' + codec + r')[ \.-](?P.+?([- \.].*?)??)[ \.]' + for codec in CODECS ] +GROUP_NAMES += [ r'(?P' + fmt + r')[ \.-](?P.+?([- \.].*?)??)[ \.]' + for fmt in FORMATS ] +GROUP_NAMES += [ r'(?P' + api + r')[ \.-](?P.+?([- \.].*?)??)[ \.]' + for api in VAPIS ] + +GROUP_NAMES2 = [ r'\.(?P' + codec + r')-(?P.*?)(-(.*?))?[ \.]' + for codec in CODECS ] +GROUP_NAMES2 += [ r'\.(?P' + fmt + r')-(?P.*?)(-(.*?))?[ \.]' + for fmt in FORMATS ] +GROUP_NAMES2 += [ r'\.(?P' + vapi + r')-(?P.*?)(-(.*?))?[ \.]' + for vapi in VAPIS ] + +GROUP_NAMES = [ re.compile(r, re.IGNORECASE) for r in GROUP_NAMES ] +GROUP_NAMES2 = [ re.compile(r, re.IGNORECASE) for r in GROUP_NAMES2 ] def adjust_metadata(md): - codec = canonical_form(md['videoCodec']) - if codec in FORMATS: - md['format'] = codec - del md['videoCodec'] - return md + return dict((property_name, compute_canonical_form(property_name, value) or value) + for property_name, value in md.items()) def guess_release_group(string): - group_names = [ r'\.(Xvid)-(?P.*?)[ \.]', - r'\.(DivX)-(?P.*?)[\. ]', - r'\.(DVDivX)-(?P.*?)[\. ]', - ] - # first try to see whether we have both a known codec and a known release group - group_names = [ r'\.(?P' + codec + r')-(?P.*?)[ \.]' - for codec in (CODECS + FORMATS) ] - - for rexp in group_names: - match = re.search(rexp, string, re.IGNORECASE) - if match: + for rexp in GROUP_NAMES: + match = rexp.search(string) + while match: metadata = match.groupdict() - if canonical_form(metadata['releaseGroup']) in properties['releaseGroup']: + # make sure this is an actual release group we caught + release_group = (compute_canonical_form('releaseGroup', metadata['releaseGroup']) or + compute_canonical_form('weakReleaseGroup', metadata['releaseGroup'])) + if release_group: return adjust_metadata(metadata), (match.start(1), match.end(2)) + # we didn't find anything conclusive, keep searching + match = rexp.search(string, match.span()[0]+1) + # pick anything as releaseGroup as long as we have a codec in front # this doesn't include a potential dash ('-') ending the release group # eg: [...].X264-HiS@SiLUHD-English.[...] - group_names = [ r'\.(?P' + codec + r')-(?P.*?)(-(.*?))?[ \.]' - for codec in (CODECS + FORMATS) ] - - for rexp in group_names: - match = re.search(rexp, string, re.IGNORECASE) + for rexp in GROUP_NAMES2: + match = rexp.search(string) if match: return adjust_metadata(match.groupdict()), (match.start(1), match.end(2)) diff --git a/libs/guessit/transfo/guess_video_rexps.py b/libs/guessit/transfo/guess_video_rexps.py old mode 100644 new mode 100755 index 8ae9e6c659..1b511f153f --- a/libs/guessit/transfo/guess_video_rexps.py +++ b/libs/guessit/transfo/guess_video_rexps.py @@ -38,9 +38,10 @@ def guess_video_rexps(string): # the soonest that we can catch it) if metadata.get('cdNumberTotal', -1) is None: del metadata['cdNumberTotal'] - return (Guess(metadata, confidence=confidence), - (match.start() + span_adjust[0], - match.end() + span_adjust[1] - 2)) + span = (match.start() + span_adjust[0], + match.end() + span_adjust[1] - 2) + return (Guess(metadata, confidence=confidence, raw=string[span[0]:span[1]]), + span) return None, None diff --git a/libs/guessit/transfo/guess_weak_episodes_rexps.py b/libs/guessit/transfo/guess_weak_episodes_rexps.py old mode 100644 new mode 100755 index 8436ade855..18306b4357 --- a/libs/guessit/transfo/guess_weak_episodes_rexps.py +++ b/libs/guessit/transfo/guess_weak_episodes_rexps.py @@ -48,9 +48,9 @@ def guess_weak_episodes_rexps(string, node): continue return Guess({ 'season': season, 'episodeNumber': epnum }, - confidence=0.6), span + confidence=0.6, raw=string[span[0]:span[1]]), span else: - return Guess(metadata, confidence=0.3), span + return Guess(metadata, confidence=0.3, raw=string[span[0]:span[1]]), span return None, None diff --git a/libs/guessit/transfo/guess_website.py b/libs/guessit/transfo/guess_website.py old mode 100644 new mode 100755 diff --git a/libs/guessit/transfo/guess_year.py b/libs/guessit/transfo/guess_year.py old mode 100644 new mode 100755 index 4bc9b867e0..c193af7a15 --- a/libs/guessit/transfo/guess_year.py +++ b/libs/guessit/transfo/guess_year.py @@ -33,6 +33,18 @@ def guess_year(string): else: return None, None +def guess_year_skip_first(string): + year, span = search_year(string) + if year: + year2, span2 = guess_year(string[span[1]:]) + if year2: + return year2, (span2[0]+span[1], span2[1]+span[1]) + + return None, None -def process(mtree): - SingleNodeGuesser(guess_year, 1.0, log).process(mtree) + +def process(mtree, skip_first_year=False): + if skip_first_year: + SingleNodeGuesser(guess_year_skip_first, 1.0, log).process(mtree) + else: + SingleNodeGuesser(guess_year, 1.0, log).process(mtree) diff --git a/libs/guessit/transfo/post_process.py b/libs/guessit/transfo/post_process.py old mode 100644 new mode 100755 index a2a7a336fb..5920e3a438 --- a/libs/guessit/transfo/post_process.py +++ b/libs/guessit/transfo/post_process.py @@ -20,6 +20,7 @@ from __future__ import unicode_literals from guessit.patterns import subtitle_exts +from guessit.textutils import reorder_title, find_words import logging log = logging.getLogger(__name__) @@ -45,6 +46,15 @@ def promote_subtitle(): node == mtree.leaves()[-2]): promote_subtitle() + # - if we find the word 'sub' before the language, and in the same explicit + # group, then upgrade the language + explicit_group = mtree.node_at(node.node_idx[:2]) + group_str = explicit_group.value.lower() + + if ('sub' in find_words(group_str) and + 0 <= group_str.find('sub') < (node.span[0] - explicit_group.span[0])): + promote_subtitle() + # - if a language is in an explicit group just preceded by "st", # it is a subtitle language (eg: '...st[fr-eng]...') try: @@ -60,11 +70,4 @@ def promote_subtitle(): if 'series' not in node.guess: continue - series = node.guess['series'] - lseries = series.lower() - - if lseries[-4:] == ',the': - node.guess['series'] = 'The ' + series[:-4] - - if lseries[-5:] == ', the': - node.guess['series'] = 'The ' + series[:-5] + node.guess['series'] = reorder_title(node.guess['series']) diff --git a/libs/guessit/transfo/split_explicit_groups.py b/libs/guessit/transfo/split_explicit_groups.py old mode 100644 new mode 100755 diff --git a/libs/guessit/transfo/split_on_dash.py b/libs/guessit/transfo/split_on_dash.py old mode 100644 new mode 100755 index b4454dcd23..031baff61f --- a/libs/guessit/transfo/split_on_dash.py +++ b/libs/guessit/transfo/split_on_dash.py @@ -38,15 +38,5 @@ def process(mtree): indices.extend([ span[0], span[1] ]) match = pattern.search(node.value, span[1]) - didx = node.value.find('-') - while didx > 0: - if (didx > 10 and - (didx - 1 not in indices and - didx + 2 not in indices)): - - indices.extend([ didx, didx + 1 ]) - - didx = node.value.find('-', didx + 1) - if indices: node.partition(indices) diff --git a/libs/guessit/transfo/split_path_components.py b/libs/guessit/transfo/split_path_components.py old mode 100644 new mode 100755 diff --git a/libs/html5lib/__init__.py b/libs/html5lib/__init__.py index 16537aad3e..19a4b7d692 100644 --- a/libs/html5lib/__init__.py +++ b/libs/html5lib/__init__.py @@ -1,4 +1,4 @@ -""" +""" HTML parsing library based on the WHATWG "HTML5" specification. The parser is designed to be compatible with existing HTML found in the wild and implements well-defined error recovery that @@ -8,10 +8,16 @@ import html5lib f = open("my_document.html") -tree = html5lib.parse(f) +tree = html5lib.parse(f) """ -__version__ = "0.95-dev" -from html5parser import HTMLParser, parse, parseFragment -from treebuilders import getTreeBuilder -from treewalkers import getTreeWalker -from serializer import serialize + +from __future__ import absolute_import, division, unicode_literals + +from .html5parser import HTMLParser, parse, parseFragment +from .treebuilders import getTreeBuilder +from .treewalkers import getTreeWalker +from .serializer import serialize + +__all__ = ["HTMLParser", "parse", "parseFragment", "getTreeBuilder", + "getTreeWalker", "serialize"] +__version__ = "0.999" diff --git a/libs/html5lib/constants.py b/libs/html5lib/constants.py index b533018eeb..e7089846d5 100644 --- a/libs/html5lib/constants.py +++ b/libs/html5lib/constants.py @@ -1,302 +1,301 @@ -import string, gettext -_ = gettext.gettext +from __future__ import absolute_import, division, unicode_literals -try: - frozenset -except NameError: - # Import from the sets module for python 2.3 - from sets import Set as set - from sets import ImmutableSet as frozenset +import string +import gettext +_ = gettext.gettext EOF = None E = { - "null-character": - _(u"Null character in input stream, replaced with U+FFFD."), - "invalid-codepoint": - _(u"Invalid codepoint in stream."), + "null-character": + _("Null character in input stream, replaced with U+FFFD."), + "invalid-codepoint": + _("Invalid codepoint in stream."), "incorrectly-placed-solidus": - _(u"Solidus (/) incorrectly placed in tag."), + _("Solidus (/) incorrectly placed in tag."), "incorrect-cr-newline-entity": - _(u"Incorrect CR newline entity, replaced with LF."), + _("Incorrect CR newline entity, replaced with LF."), "illegal-windows-1252-entity": - _(u"Entity used with illegal number (windows-1252 reference)."), + _("Entity used with illegal number (windows-1252 reference)."), "cant-convert-numeric-entity": - _(u"Numeric entity couldn't be converted to character " - u"(codepoint U+%(charAsInt)08x)."), + _("Numeric entity couldn't be converted to character " + "(codepoint U+%(charAsInt)08x)."), "illegal-codepoint-for-numeric-entity": - _(u"Numeric entity represents an illegal codepoint: " - u"U+%(charAsInt)08x."), + _("Numeric entity represents an illegal codepoint: " + "U+%(charAsInt)08x."), "numeric-entity-without-semicolon": - _(u"Numeric entity didn't end with ';'."), + _("Numeric entity didn't end with ';'."), "expected-numeric-entity-but-got-eof": - _(u"Numeric entity expected. Got end of file instead."), + _("Numeric entity expected. Got end of file instead."), "expected-numeric-entity": - _(u"Numeric entity expected but none found."), + _("Numeric entity expected but none found."), "named-entity-without-semicolon": - _(u"Named entity didn't end with ';'."), + _("Named entity didn't end with ';'."), "expected-named-entity": - _(u"Named entity expected. Got none."), + _("Named entity expected. Got none."), "attributes-in-end-tag": - _(u"End tag contains unexpected attributes."), + _("End tag contains unexpected attributes."), 'self-closing-flag-on-end-tag': - _(u"End tag contains unexpected self-closing flag."), + _("End tag contains unexpected self-closing flag."), "expected-tag-name-but-got-right-bracket": - _(u"Expected tag name. Got '>' instead."), + _("Expected tag name. Got '>' instead."), "expected-tag-name-but-got-question-mark": - _(u"Expected tag name. Got '?' instead. (HTML doesn't " - u"support processing instructions.)"), + _("Expected tag name. Got '?' instead. (HTML doesn't " + "support processing instructions.)"), "expected-tag-name": - _(u"Expected tag name. Got something else instead"), + _("Expected tag name. Got something else instead"), "expected-closing-tag-but-got-right-bracket": - _(u"Expected closing tag. Got '>' instead. Ignoring ''."), + _("Expected closing tag. Got '>' instead. Ignoring ''."), "expected-closing-tag-but-got-eof": - _(u"Expected closing tag. Unexpected end of file."), + _("Expected closing tag. Unexpected end of file."), "expected-closing-tag-but-got-char": - _(u"Expected closing tag. Unexpected character '%(data)s' found."), + _("Expected closing tag. Unexpected character '%(data)s' found."), "eof-in-tag-name": - _(u"Unexpected end of file in the tag name."), + _("Unexpected end of file in the tag name."), "expected-attribute-name-but-got-eof": - _(u"Unexpected end of file. Expected attribute name instead."), + _("Unexpected end of file. Expected attribute name instead."), "eof-in-attribute-name": - _(u"Unexpected end of file in attribute name."), + _("Unexpected end of file in attribute name."), "invalid-character-in-attribute-name": - _(u"Invalid chracter in attribute name"), + _("Invalid character in attribute name"), "duplicate-attribute": - _(u"Dropped duplicate attribute on tag."), + _("Dropped duplicate attribute on tag."), "expected-end-of-tag-name-but-got-eof": - _(u"Unexpected end of file. Expected = or end of tag."), + _("Unexpected end of file. Expected = or end of tag."), "expected-attribute-value-but-got-eof": - _(u"Unexpected end of file. Expected attribute value."), + _("Unexpected end of file. Expected attribute value."), "expected-attribute-value-but-got-right-bracket": - _(u"Expected attribute value. Got '>' instead."), + _("Expected attribute value. Got '>' instead."), 'equals-in-unquoted-attribute-value': - _(u"Unexpected = in unquoted attribute"), + _("Unexpected = in unquoted attribute"), 'unexpected-character-in-unquoted-attribute-value': - _(u"Unexpected character in unquoted attribute"), + _("Unexpected character in unquoted attribute"), "invalid-character-after-attribute-name": - _(u"Unexpected character after attribute name."), + _("Unexpected character after attribute name."), "unexpected-character-after-attribute-value": - _(u"Unexpected character after attribute value."), + _("Unexpected character after attribute value."), "eof-in-attribute-value-double-quote": - _(u"Unexpected end of file in attribute value (\")."), + _("Unexpected end of file in attribute value (\")."), "eof-in-attribute-value-single-quote": - _(u"Unexpected end of file in attribute value (')."), + _("Unexpected end of file in attribute value (')."), "eof-in-attribute-value-no-quotes": - _(u"Unexpected end of file in attribute value."), + _("Unexpected end of file in attribute value."), "unexpected-EOF-after-solidus-in-tag": - _(u"Unexpected end of file in tag. Expected >"), - "unexpected-character-after-soldius-in-tag": - _(u"Unexpected character after / in tag. Expected >"), + _("Unexpected end of file in tag. Expected >"), + "unexpected-character-after-solidus-in-tag": + _("Unexpected character after / in tag. Expected >"), "expected-dashes-or-doctype": - _(u"Expected '--' or 'DOCTYPE'. Not found."), + _("Expected '--' or 'DOCTYPE'. Not found."), "unexpected-bang-after-double-dash-in-comment": - _(u"Unexpected ! after -- in comment"), + _("Unexpected ! after -- in comment"), "unexpected-space-after-double-dash-in-comment": - _(u"Unexpected space after -- in comment"), + _("Unexpected space after -- in comment"), "incorrect-comment": - _(u"Incorrect comment."), + _("Incorrect comment."), "eof-in-comment": - _(u"Unexpected end of file in comment."), + _("Unexpected end of file in comment."), "eof-in-comment-end-dash": - _(u"Unexpected end of file in comment (-)"), + _("Unexpected end of file in comment (-)"), "unexpected-dash-after-double-dash-in-comment": - _(u"Unexpected '-' after '--' found in comment."), + _("Unexpected '-' after '--' found in comment."), "eof-in-comment-double-dash": - _(u"Unexpected end of file in comment (--)."), + _("Unexpected end of file in comment (--)."), "eof-in-comment-end-space-state": - _(u"Unexpected end of file in comment."), + _("Unexpected end of file in comment."), "eof-in-comment-end-bang-state": - _(u"Unexpected end of file in comment."), + _("Unexpected end of file in comment."), "unexpected-char-in-comment": - _(u"Unexpected character in comment found."), + _("Unexpected character in comment found."), "need-space-after-doctype": - _(u"No space after literal string 'DOCTYPE'."), + _("No space after literal string 'DOCTYPE'."), "expected-doctype-name-but-got-right-bracket": - _(u"Unexpected > character. Expected DOCTYPE name."), + _("Unexpected > character. Expected DOCTYPE name."), "expected-doctype-name-but-got-eof": - _(u"Unexpected end of file. Expected DOCTYPE name."), + _("Unexpected end of file. Expected DOCTYPE name."), "eof-in-doctype-name": - _(u"Unexpected end of file in DOCTYPE name."), + _("Unexpected end of file in DOCTYPE name."), "eof-in-doctype": - _(u"Unexpected end of file in DOCTYPE."), + _("Unexpected end of file in DOCTYPE."), "expected-space-or-right-bracket-in-doctype": - _(u"Expected space or '>'. Got '%(data)s'"), + _("Expected space or '>'. Got '%(data)s'"), "unexpected-end-of-doctype": - _(u"Unexpected end of DOCTYPE."), + _("Unexpected end of DOCTYPE."), "unexpected-char-in-doctype": - _(u"Unexpected character in DOCTYPE."), + _("Unexpected character in DOCTYPE."), "eof-in-innerhtml": - _(u"XXX innerHTML EOF"), + _("XXX innerHTML EOF"), "unexpected-doctype": - _(u"Unexpected DOCTYPE. Ignored."), + _("Unexpected DOCTYPE. Ignored."), "non-html-root": - _(u"html needs to be the first start tag."), + _("html needs to be the first start tag."), "expected-doctype-but-got-eof": - _(u"Unexpected End of file. Expected DOCTYPE."), + _("Unexpected End of file. Expected DOCTYPE."), "unknown-doctype": - _(u"Erroneous DOCTYPE."), + _("Erroneous DOCTYPE."), "expected-doctype-but-got-chars": - _(u"Unexpected non-space characters. Expected DOCTYPE."), + _("Unexpected non-space characters. Expected DOCTYPE."), "expected-doctype-but-got-start-tag": - _(u"Unexpected start tag (%(name)s). Expected DOCTYPE."), + _("Unexpected start tag (%(name)s). Expected DOCTYPE."), "expected-doctype-but-got-end-tag": - _(u"Unexpected end tag (%(name)s). Expected DOCTYPE."), + _("Unexpected end tag (%(name)s). Expected DOCTYPE."), "end-tag-after-implied-root": - _(u"Unexpected end tag (%(name)s) after the (implied) root element."), + _("Unexpected end tag (%(name)s) after the (implied) root element."), "expected-named-closing-tag-but-got-eof": - _(u"Unexpected end of file. Expected end tag (%(name)s)."), + _("Unexpected end of file. Expected end tag (%(name)s)."), "two-heads-are-not-better-than-one": - _(u"Unexpected start tag head in existing head. Ignored."), + _("Unexpected start tag head in existing head. Ignored."), "unexpected-end-tag": - _(u"Unexpected end tag (%(name)s). Ignored."), + _("Unexpected end tag (%(name)s). Ignored."), "unexpected-start-tag-out-of-my-head": - _(u"Unexpected start tag (%(name)s) that can be in head. Moved."), + _("Unexpected start tag (%(name)s) that can be in head. Moved."), "unexpected-start-tag": - _(u"Unexpected start tag (%(name)s)."), + _("Unexpected start tag (%(name)s)."), "missing-end-tag": - _(u"Missing end tag (%(name)s)."), + _("Missing end tag (%(name)s)."), "missing-end-tags": - _(u"Missing end tags (%(name)s)."), + _("Missing end tags (%(name)s)."), "unexpected-start-tag-implies-end-tag": - _(u"Unexpected start tag (%(startName)s) " - u"implies end tag (%(endName)s)."), + _("Unexpected start tag (%(startName)s) " + "implies end tag (%(endName)s)."), "unexpected-start-tag-treated-as": - _(u"Unexpected start tag (%(originalName)s). Treated as %(newName)s."), + _("Unexpected start tag (%(originalName)s). Treated as %(newName)s."), "deprecated-tag": - _(u"Unexpected start tag %(name)s. Don't use it!"), + _("Unexpected start tag %(name)s. Don't use it!"), "unexpected-start-tag-ignored": - _(u"Unexpected start tag %(name)s. Ignored."), + _("Unexpected start tag %(name)s. Ignored."), "expected-one-end-tag-but-got-another": - _(u"Unexpected end tag (%(gotName)s). " - u"Missing end tag (%(expectedName)s)."), + _("Unexpected end tag (%(gotName)s). " + "Missing end tag (%(expectedName)s)."), "end-tag-too-early": - _(u"End tag (%(name)s) seen too early. Expected other end tag."), + _("End tag (%(name)s) seen too early. Expected other end tag."), "end-tag-too-early-named": - _(u"Unexpected end tag (%(gotName)s). Expected end tag (%(expectedName)s)."), + _("Unexpected end tag (%(gotName)s). Expected end tag (%(expectedName)s)."), "end-tag-too-early-ignored": - _(u"End tag (%(name)s) seen too early. Ignored."), + _("End tag (%(name)s) seen too early. Ignored."), "adoption-agency-1.1": - _(u"End tag (%(name)s) violates step 1, " - u"paragraph 1 of the adoption agency algorithm."), + _("End tag (%(name)s) violates step 1, " + "paragraph 1 of the adoption agency algorithm."), "adoption-agency-1.2": - _(u"End tag (%(name)s) violates step 1, " - u"paragraph 2 of the adoption agency algorithm."), + _("End tag (%(name)s) violates step 1, " + "paragraph 2 of the adoption agency algorithm."), "adoption-agency-1.3": - _(u"End tag (%(name)s) violates step 1, " - u"paragraph 3 of the adoption agency algorithm."), + _("End tag (%(name)s) violates step 1, " + "paragraph 3 of the adoption agency algorithm."), + "adoption-agency-4.4": + _("End tag (%(name)s) violates step 4, " + "paragraph 4 of the adoption agency algorithm."), "unexpected-end-tag-treated-as": - _(u"Unexpected end tag (%(originalName)s). Treated as %(newName)s."), + _("Unexpected end tag (%(originalName)s). Treated as %(newName)s."), "no-end-tag": - _(u"This element (%(name)s) has no end tag."), + _("This element (%(name)s) has no end tag."), "unexpected-implied-end-tag-in-table": - _(u"Unexpected implied end tag (%(name)s) in the table phase."), + _("Unexpected implied end tag (%(name)s) in the table phase."), "unexpected-implied-end-tag-in-table-body": - _(u"Unexpected implied end tag (%(name)s) in the table body phase."), + _("Unexpected implied end tag (%(name)s) in the table body phase."), "unexpected-char-implies-table-voodoo": - _(u"Unexpected non-space characters in " - u"table context caused voodoo mode."), + _("Unexpected non-space characters in " + "table context caused voodoo mode."), "unexpected-hidden-input-in-table": - _(u"Unexpected input with type hidden in table context."), + _("Unexpected input with type hidden in table context."), "unexpected-form-in-table": - _(u"Unexpected form in table context."), + _("Unexpected form in table context."), "unexpected-start-tag-implies-table-voodoo": - _(u"Unexpected start tag (%(name)s) in " - u"table context caused voodoo mode."), + _("Unexpected start tag (%(name)s) in " + "table context caused voodoo mode."), "unexpected-end-tag-implies-table-voodoo": - _(u"Unexpected end tag (%(name)s) in " - u"table context caused voodoo mode."), + _("Unexpected end tag (%(name)s) in " + "table context caused voodoo mode."), "unexpected-cell-in-table-body": - _(u"Unexpected table cell start tag (%(name)s) " - u"in the table body phase."), + _("Unexpected table cell start tag (%(name)s) " + "in the table body phase."), "unexpected-cell-end-tag": - _(u"Got table cell end tag (%(name)s) " - u"while required end tags are missing."), + _("Got table cell end tag (%(name)s) " + "while required end tags are missing."), "unexpected-end-tag-in-table-body": - _(u"Unexpected end tag (%(name)s) in the table body phase. Ignored."), + _("Unexpected end tag (%(name)s) in the table body phase. Ignored."), "unexpected-implied-end-tag-in-table-row": - _(u"Unexpected implied end tag (%(name)s) in the table row phase."), + _("Unexpected implied end tag (%(name)s) in the table row phase."), "unexpected-end-tag-in-table-row": - _(u"Unexpected end tag (%(name)s) in the table row phase. Ignored."), + _("Unexpected end tag (%(name)s) in the table row phase. Ignored."), "unexpected-select-in-select": - _(u"Unexpected select start tag in the select phase " - u"treated as select end tag."), + _("Unexpected select start tag in the select phase " + "treated as select end tag."), "unexpected-input-in-select": - _(u"Unexpected input start tag in the select phase."), + _("Unexpected input start tag in the select phase."), "unexpected-start-tag-in-select": - _(u"Unexpected start tag token (%(name)s in the select phase. " - u"Ignored."), + _("Unexpected start tag token (%(name)s in the select phase. " + "Ignored."), "unexpected-end-tag-in-select": - _(u"Unexpected end tag (%(name)s) in the select phase. Ignored."), + _("Unexpected end tag (%(name)s) in the select phase. Ignored."), "unexpected-table-element-start-tag-in-select-in-table": - _(u"Unexpected table element start tag (%(name)s) in the select in table phase."), + _("Unexpected table element start tag (%(name)s) in the select in table phase."), "unexpected-table-element-end-tag-in-select-in-table": - _(u"Unexpected table element end tag (%(name)s) in the select in table phase."), + _("Unexpected table element end tag (%(name)s) in the select in table phase."), "unexpected-char-after-body": - _(u"Unexpected non-space characters in the after body phase."), + _("Unexpected non-space characters in the after body phase."), "unexpected-start-tag-after-body": - _(u"Unexpected start tag token (%(name)s)" - u" in the after body phase."), + _("Unexpected start tag token (%(name)s)" + " in the after body phase."), "unexpected-end-tag-after-body": - _(u"Unexpected end tag token (%(name)s)" - u" in the after body phase."), + _("Unexpected end tag token (%(name)s)" + " in the after body phase."), "unexpected-char-in-frameset": - _(u"Unepxected characters in the frameset phase. Characters ignored."), + _("Unexpected characters in the frameset phase. Characters ignored."), "unexpected-start-tag-in-frameset": - _(u"Unexpected start tag token (%(name)s)" - u" in the frameset phase. Ignored."), + _("Unexpected start tag token (%(name)s)" + " in the frameset phase. Ignored."), "unexpected-frameset-in-frameset-innerhtml": - _(u"Unexpected end tag token (frameset) " - u"in the frameset phase (innerHTML)."), + _("Unexpected end tag token (frameset) " + "in the frameset phase (innerHTML)."), "unexpected-end-tag-in-frameset": - _(u"Unexpected end tag token (%(name)s)" - u" in the frameset phase. Ignored."), + _("Unexpected end tag token (%(name)s)" + " in the frameset phase. Ignored."), "unexpected-char-after-frameset": - _(u"Unexpected non-space characters in the " - u"after frameset phase. Ignored."), + _("Unexpected non-space characters in the " + "after frameset phase. Ignored."), "unexpected-start-tag-after-frameset": - _(u"Unexpected start tag (%(name)s)" - u" in the after frameset phase. Ignored."), + _("Unexpected start tag (%(name)s)" + " in the after frameset phase. Ignored."), "unexpected-end-tag-after-frameset": - _(u"Unexpected end tag (%(name)s)" - u" in the after frameset phase. Ignored."), + _("Unexpected end tag (%(name)s)" + " in the after frameset phase. Ignored."), "unexpected-end-tag-after-body-innerhtml": - _(u"Unexpected end tag after body(innerHtml)"), + _("Unexpected end tag after body(innerHtml)"), "expected-eof-but-got-char": - _(u"Unexpected non-space characters. Expected end of file."), + _("Unexpected non-space characters. Expected end of file."), "expected-eof-but-got-start-tag": - _(u"Unexpected start tag (%(name)s)" - u". Expected end of file."), + _("Unexpected start tag (%(name)s)" + ". Expected end of file."), "expected-eof-but-got-end-tag": - _(u"Unexpected end tag (%(name)s)" - u". Expected end of file."), + _("Unexpected end tag (%(name)s)" + ". Expected end of file."), "eof-in-table": - _(u"Unexpected end of file. Expected table content."), + _("Unexpected end of file. Expected table content."), "eof-in-select": - _(u"Unexpected end of file. Expected select content."), + _("Unexpected end of file. Expected select content."), "eof-in-frameset": - _(u"Unexpected end of file. Expected frameset content."), + _("Unexpected end of file. Expected frameset content."), "eof-in-script-in-script": - _(u"Unexpected end of file. Expected script content."), + _("Unexpected end of file. Expected script content."), "eof-in-foreign-lands": - _(u"Unexpected end of file. Expected foreign content"), + _("Unexpected end of file. Expected foreign content"), "non-void-element-with-trailing-solidus": - _(u"Trailing solidus not allowed on element %(name)s"), + _("Trailing solidus not allowed on element %(name)s"), "unexpected-html-element-in-foreign-content": - _(u"Element %(name)s not allowed in a non-html context"), + _("Element %(name)s not allowed in a non-html context"), "unexpected-end-tag-before-html": - _(u"Unexpected end tag (%(name)s) before html."), + _("Unexpected end tag (%(name)s) before html."), "XXX-undefined-error": - (u"Undefined error (this sucks and should be fixed)"), + _("Undefined error (this sucks and should be fixed)"), } namespaces = { - "html":"http://www.w3.org/1999/xhtml", - "mathml":"http://www.w3.org/1998/Math/MathML", - "svg":"http://www.w3.org/2000/svg", - "xlink":"http://www.w3.org/1999/xlink", - "xml":"http://www.w3.org/XML/1998/namespace", - "xmlns":"http://www.w3.org/2000/xmlns/" + "html": "http://www.w3.org/1999/xhtml", + "mathml": "http://www.w3.org/1998/Math/MathML", + "svg": "http://www.w3.org/2000/svg", + "xlink": "http://www.w3.org/1999/xlink", + "xml": "http://www.w3.org/XML/1998/namespace", + "xmlns": "http://www.w3.org/2000/xmlns/" } scopingElements = frozenset(( @@ -380,7 +379,7 @@ (namespaces["html"], "iframe"), # Note that image is commented out in the spec as "this isn't an # element that can end up on the stack, so it doesn't matter," - (namespaces["html"], "image"), + (namespaces["html"], "image"), (namespaces["html"], "img"), (namespaces["html"], "input"), (namespaces["html"], "isindex"), @@ -434,12 +433,30 @@ (namespaces["mathml"], "mtext") )) +adjustForeignAttributes = { + "xlink:actuate": ("xlink", "actuate", namespaces["xlink"]), + "xlink:arcrole": ("xlink", "arcrole", namespaces["xlink"]), + "xlink:href": ("xlink", "href", namespaces["xlink"]), + "xlink:role": ("xlink", "role", namespaces["xlink"]), + "xlink:show": ("xlink", "show", namespaces["xlink"]), + "xlink:title": ("xlink", "title", namespaces["xlink"]), + "xlink:type": ("xlink", "type", namespaces["xlink"]), + "xml:base": ("xml", "base", namespaces["xml"]), + "xml:lang": ("xml", "lang", namespaces["xml"]), + "xml:space": ("xml", "space", namespaces["xml"]), + "xmlns": (None, "xmlns", namespaces["xmlns"]), + "xmlns:xlink": ("xmlns", "xlink", namespaces["xmlns"]) +} + +unadjustForeignAttributes = dict([((ns, local), qname) for qname, (prefix, local, ns) in + adjustForeignAttributes.items()]) + spaceCharacters = frozenset(( - u"\t", - u"\n", - u"\u000C", - u" ", - u"\r" + "\t", + "\n", + "\u000C", + " ", + "\r" )) tableInsertModeElements = frozenset(( @@ -456,8 +473,8 @@ digits = frozenset(string.digits) hexDigits = frozenset(string.hexdigits) -asciiUpper2Lower = dict([(ord(c),ord(c.lower())) - for c in string.ascii_uppercase]) +asciiUpper2Lower = dict([(ord(c), ord(c.lower())) + for c in string.ascii_uppercase]) # Heading elements need to be ordered headingElements = ( @@ -503,8 +520,8 @@ "": frozenset(("irrelevant",)), "style": frozenset(("scoped",)), "img": frozenset(("ismap",)), - "audio": frozenset(("autoplay","controls")), - "video": frozenset(("autoplay","controls")), + "audio": frozenset(("autoplay", "controls")), + "video": frozenset(("autoplay", "controls")), "script": frozenset(("defer", "async")), "details": frozenset(("open",)), "datagrid": frozenset(("multiple", "disabled")), @@ -523,2312 +540,2312 @@ # entitiesWindows1252 has to be _ordered_ and needs to have an index. It # therefore can't be a frozenset. entitiesWindows1252 = ( - 8364, # 0x80 0x20AC EURO SIGN - 65533, # 0x81 UNDEFINED - 8218, # 0x82 0x201A SINGLE LOW-9 QUOTATION MARK - 402, # 0x83 0x0192 LATIN SMALL LETTER F WITH HOOK - 8222, # 0x84 0x201E DOUBLE LOW-9 QUOTATION MARK - 8230, # 0x85 0x2026 HORIZONTAL ELLIPSIS - 8224, # 0x86 0x2020 DAGGER - 8225, # 0x87 0x2021 DOUBLE DAGGER - 710, # 0x88 0x02C6 MODIFIER LETTER CIRCUMFLEX ACCENT - 8240, # 0x89 0x2030 PER MILLE SIGN - 352, # 0x8A 0x0160 LATIN CAPITAL LETTER S WITH CARON - 8249, # 0x8B 0x2039 SINGLE LEFT-POINTING ANGLE QUOTATION MARK - 338, # 0x8C 0x0152 LATIN CAPITAL LIGATURE OE - 65533, # 0x8D UNDEFINED - 381, # 0x8E 0x017D LATIN CAPITAL LETTER Z WITH CARON - 65533, # 0x8F UNDEFINED - 65533, # 0x90 UNDEFINED - 8216, # 0x91 0x2018 LEFT SINGLE QUOTATION MARK - 8217, # 0x92 0x2019 RIGHT SINGLE QUOTATION MARK - 8220, # 0x93 0x201C LEFT DOUBLE QUOTATION MARK - 8221, # 0x94 0x201D RIGHT DOUBLE QUOTATION MARK - 8226, # 0x95 0x2022 BULLET - 8211, # 0x96 0x2013 EN DASH - 8212, # 0x97 0x2014 EM DASH - 732, # 0x98 0x02DC SMALL TILDE - 8482, # 0x99 0x2122 TRADE MARK SIGN - 353, # 0x9A 0x0161 LATIN SMALL LETTER S WITH CARON - 8250, # 0x9B 0x203A SINGLE RIGHT-POINTING ANGLE QUOTATION MARK - 339, # 0x9C 0x0153 LATIN SMALL LIGATURE OE - 65533, # 0x9D UNDEFINED - 382, # 0x9E 0x017E LATIN SMALL LETTER Z WITH CARON - 376 # 0x9F 0x0178 LATIN CAPITAL LETTER Y WITH DIAERESIS + 8364, # 0x80 0x20AC EURO SIGN + 65533, # 0x81 UNDEFINED + 8218, # 0x82 0x201A SINGLE LOW-9 QUOTATION MARK + 402, # 0x83 0x0192 LATIN SMALL LETTER F WITH HOOK + 8222, # 0x84 0x201E DOUBLE LOW-9 QUOTATION MARK + 8230, # 0x85 0x2026 HORIZONTAL ELLIPSIS + 8224, # 0x86 0x2020 DAGGER + 8225, # 0x87 0x2021 DOUBLE DAGGER + 710, # 0x88 0x02C6 MODIFIER LETTER CIRCUMFLEX ACCENT + 8240, # 0x89 0x2030 PER MILLE SIGN + 352, # 0x8A 0x0160 LATIN CAPITAL LETTER S WITH CARON + 8249, # 0x8B 0x2039 SINGLE LEFT-POINTING ANGLE QUOTATION MARK + 338, # 0x8C 0x0152 LATIN CAPITAL LIGATURE OE + 65533, # 0x8D UNDEFINED + 381, # 0x8E 0x017D LATIN CAPITAL LETTER Z WITH CARON + 65533, # 0x8F UNDEFINED + 65533, # 0x90 UNDEFINED + 8216, # 0x91 0x2018 LEFT SINGLE QUOTATION MARK + 8217, # 0x92 0x2019 RIGHT SINGLE QUOTATION MARK + 8220, # 0x93 0x201C LEFT DOUBLE QUOTATION MARK + 8221, # 0x94 0x201D RIGHT DOUBLE QUOTATION MARK + 8226, # 0x95 0x2022 BULLET + 8211, # 0x96 0x2013 EN DASH + 8212, # 0x97 0x2014 EM DASH + 732, # 0x98 0x02DC SMALL TILDE + 8482, # 0x99 0x2122 TRADE MARK SIGN + 353, # 0x9A 0x0161 LATIN SMALL LETTER S WITH CARON + 8250, # 0x9B 0x203A SINGLE RIGHT-POINTING ANGLE QUOTATION MARK + 339, # 0x9C 0x0153 LATIN SMALL LIGATURE OE + 65533, # 0x9D UNDEFINED + 382, # 0x9E 0x017E LATIN SMALL LETTER Z WITH CARON + 376 # 0x9F 0x0178 LATIN CAPITAL LETTER Y WITH DIAERESIS ) xmlEntities = frozenset(('lt;', 'gt;', 'amp;', 'apos;', 'quot;')) entities = { - "AElig": u"\xc6", - "AElig;": u"\xc6", - "AMP": u"&", - "AMP;": u"&", - "Aacute": u"\xc1", - "Aacute;": u"\xc1", - "Abreve;": u"\u0102", - "Acirc": u"\xc2", - "Acirc;": u"\xc2", - "Acy;": u"\u0410", - "Afr;": u"\U0001d504", - "Agrave": u"\xc0", - "Agrave;": u"\xc0", - "Alpha;": u"\u0391", - "Amacr;": u"\u0100", - "And;": u"\u2a53", - "Aogon;": u"\u0104", - "Aopf;": u"\U0001d538", - "ApplyFunction;": u"\u2061", - "Aring": u"\xc5", - "Aring;": u"\xc5", - "Ascr;": u"\U0001d49c", - "Assign;": u"\u2254", - "Atilde": u"\xc3", - "Atilde;": u"\xc3", - "Auml": u"\xc4", - "Auml;": u"\xc4", - "Backslash;": u"\u2216", - "Barv;": u"\u2ae7", - "Barwed;": u"\u2306", - "Bcy;": u"\u0411", - "Because;": u"\u2235", - "Bernoullis;": u"\u212c", - "Beta;": u"\u0392", - "Bfr;": u"\U0001d505", - "Bopf;": u"\U0001d539", - "Breve;": u"\u02d8", - "Bscr;": u"\u212c", - "Bumpeq;": u"\u224e", - "CHcy;": u"\u0427", - "COPY": u"\xa9", - "COPY;": u"\xa9", - "Cacute;": u"\u0106", - "Cap;": u"\u22d2", - "CapitalDifferentialD;": u"\u2145", - "Cayleys;": u"\u212d", - "Ccaron;": u"\u010c", - "Ccedil": u"\xc7", - "Ccedil;": u"\xc7", - "Ccirc;": u"\u0108", - "Cconint;": u"\u2230", - "Cdot;": u"\u010a", - "Cedilla;": u"\xb8", - "CenterDot;": u"\xb7", - "Cfr;": u"\u212d", - "Chi;": u"\u03a7", - "CircleDot;": u"\u2299", - "CircleMinus;": u"\u2296", - "CirclePlus;": u"\u2295", - "CircleTimes;": u"\u2297", - "ClockwiseContourIntegral;": u"\u2232", - "CloseCurlyDoubleQuote;": u"\u201d", - "CloseCurlyQuote;": u"\u2019", - "Colon;": u"\u2237", - "Colone;": u"\u2a74", - "Congruent;": u"\u2261", - "Conint;": u"\u222f", - "ContourIntegral;": u"\u222e", - "Copf;": u"\u2102", - "Coproduct;": u"\u2210", - "CounterClockwiseContourIntegral;": u"\u2233", - "Cross;": u"\u2a2f", - "Cscr;": u"\U0001d49e", - "Cup;": u"\u22d3", - "CupCap;": u"\u224d", - "DD;": u"\u2145", - "DDotrahd;": u"\u2911", - "DJcy;": u"\u0402", - "DScy;": u"\u0405", - "DZcy;": u"\u040f", - "Dagger;": u"\u2021", - "Darr;": u"\u21a1", - "Dashv;": u"\u2ae4", - "Dcaron;": u"\u010e", - "Dcy;": u"\u0414", - "Del;": u"\u2207", - "Delta;": u"\u0394", - "Dfr;": u"\U0001d507", - "DiacriticalAcute;": u"\xb4", - "DiacriticalDot;": u"\u02d9", - "DiacriticalDoubleAcute;": u"\u02dd", - "DiacriticalGrave;": u"`", - "DiacriticalTilde;": u"\u02dc", - "Diamond;": u"\u22c4", - "DifferentialD;": u"\u2146", - "Dopf;": u"\U0001d53b", - "Dot;": u"\xa8", - "DotDot;": u"\u20dc", - "DotEqual;": u"\u2250", - "DoubleContourIntegral;": u"\u222f", - "DoubleDot;": u"\xa8", - "DoubleDownArrow;": u"\u21d3", - "DoubleLeftArrow;": u"\u21d0", - "DoubleLeftRightArrow;": u"\u21d4", - "DoubleLeftTee;": u"\u2ae4", - "DoubleLongLeftArrow;": u"\u27f8", - "DoubleLongLeftRightArrow;": u"\u27fa", - "DoubleLongRightArrow;": u"\u27f9", - "DoubleRightArrow;": u"\u21d2", - "DoubleRightTee;": u"\u22a8", - "DoubleUpArrow;": u"\u21d1", - "DoubleUpDownArrow;": u"\u21d5", - "DoubleVerticalBar;": u"\u2225", - "DownArrow;": u"\u2193", - "DownArrowBar;": u"\u2913", - "DownArrowUpArrow;": u"\u21f5", - "DownBreve;": u"\u0311", - "DownLeftRightVector;": u"\u2950", - "DownLeftTeeVector;": u"\u295e", - "DownLeftVector;": u"\u21bd", - "DownLeftVectorBar;": u"\u2956", - "DownRightTeeVector;": u"\u295f", - "DownRightVector;": u"\u21c1", - "DownRightVectorBar;": u"\u2957", - "DownTee;": u"\u22a4", - "DownTeeArrow;": u"\u21a7", - "Downarrow;": u"\u21d3", - "Dscr;": u"\U0001d49f", - "Dstrok;": u"\u0110", - "ENG;": u"\u014a", - "ETH": u"\xd0", - "ETH;": u"\xd0", - "Eacute": u"\xc9", - "Eacute;": u"\xc9", - "Ecaron;": u"\u011a", - "Ecirc": u"\xca", - "Ecirc;": u"\xca", - "Ecy;": u"\u042d", - "Edot;": u"\u0116", - "Efr;": u"\U0001d508", - "Egrave": u"\xc8", - "Egrave;": u"\xc8", - "Element;": u"\u2208", - "Emacr;": u"\u0112", - "EmptySmallSquare;": u"\u25fb", - "EmptyVerySmallSquare;": u"\u25ab", - "Eogon;": u"\u0118", - "Eopf;": u"\U0001d53c", - "Epsilon;": u"\u0395", - "Equal;": u"\u2a75", - "EqualTilde;": u"\u2242", - "Equilibrium;": u"\u21cc", - "Escr;": u"\u2130", - "Esim;": u"\u2a73", - "Eta;": u"\u0397", - "Euml": u"\xcb", - "Euml;": u"\xcb", - "Exists;": u"\u2203", - "ExponentialE;": u"\u2147", - "Fcy;": u"\u0424", - "Ffr;": u"\U0001d509", - "FilledSmallSquare;": u"\u25fc", - "FilledVerySmallSquare;": u"\u25aa", - "Fopf;": u"\U0001d53d", - "ForAll;": u"\u2200", - "Fouriertrf;": u"\u2131", - "Fscr;": u"\u2131", - "GJcy;": u"\u0403", - "GT": u">", - "GT;": u">", - "Gamma;": u"\u0393", - "Gammad;": u"\u03dc", - "Gbreve;": u"\u011e", - "Gcedil;": u"\u0122", - "Gcirc;": u"\u011c", - "Gcy;": u"\u0413", - "Gdot;": u"\u0120", - "Gfr;": u"\U0001d50a", - "Gg;": u"\u22d9", - "Gopf;": u"\U0001d53e", - "GreaterEqual;": u"\u2265", - "GreaterEqualLess;": u"\u22db", - "GreaterFullEqual;": u"\u2267", - "GreaterGreater;": u"\u2aa2", - "GreaterLess;": u"\u2277", - "GreaterSlantEqual;": u"\u2a7e", - "GreaterTilde;": u"\u2273", - "Gscr;": u"\U0001d4a2", - "Gt;": u"\u226b", - "HARDcy;": u"\u042a", - "Hacek;": u"\u02c7", - "Hat;": u"^", - "Hcirc;": u"\u0124", - "Hfr;": u"\u210c", - "HilbertSpace;": u"\u210b", - "Hopf;": u"\u210d", - "HorizontalLine;": u"\u2500", - "Hscr;": u"\u210b", - "Hstrok;": u"\u0126", - "HumpDownHump;": u"\u224e", - "HumpEqual;": u"\u224f", - "IEcy;": u"\u0415", - "IJlig;": u"\u0132", - "IOcy;": u"\u0401", - "Iacute": u"\xcd", - "Iacute;": u"\xcd", - "Icirc": u"\xce", - "Icirc;": u"\xce", - "Icy;": u"\u0418", - "Idot;": u"\u0130", - "Ifr;": u"\u2111", - "Igrave": u"\xcc", - "Igrave;": u"\xcc", - "Im;": u"\u2111", - "Imacr;": u"\u012a", - "ImaginaryI;": u"\u2148", - "Implies;": u"\u21d2", - "Int;": u"\u222c", - "Integral;": u"\u222b", - "Intersection;": u"\u22c2", - "InvisibleComma;": u"\u2063", - "InvisibleTimes;": u"\u2062", - "Iogon;": u"\u012e", - "Iopf;": u"\U0001d540", - "Iota;": u"\u0399", - "Iscr;": u"\u2110", - "Itilde;": u"\u0128", - "Iukcy;": u"\u0406", - "Iuml": u"\xcf", - "Iuml;": u"\xcf", - "Jcirc;": u"\u0134", - "Jcy;": u"\u0419", - "Jfr;": u"\U0001d50d", - "Jopf;": u"\U0001d541", - "Jscr;": u"\U0001d4a5", - "Jsercy;": u"\u0408", - "Jukcy;": u"\u0404", - "KHcy;": u"\u0425", - "KJcy;": u"\u040c", - "Kappa;": u"\u039a", - "Kcedil;": u"\u0136", - "Kcy;": u"\u041a", - "Kfr;": u"\U0001d50e", - "Kopf;": u"\U0001d542", - "Kscr;": u"\U0001d4a6", - "LJcy;": u"\u0409", - "LT": u"<", - "LT;": u"<", - "Lacute;": u"\u0139", - "Lambda;": u"\u039b", - "Lang;": u"\u27ea", - "Laplacetrf;": u"\u2112", - "Larr;": u"\u219e", - "Lcaron;": u"\u013d", - "Lcedil;": u"\u013b", - "Lcy;": u"\u041b", - "LeftAngleBracket;": u"\u27e8", - "LeftArrow;": u"\u2190", - "LeftArrowBar;": u"\u21e4", - "LeftArrowRightArrow;": u"\u21c6", - "LeftCeiling;": u"\u2308", - "LeftDoubleBracket;": u"\u27e6", - "LeftDownTeeVector;": u"\u2961", - "LeftDownVector;": u"\u21c3", - "LeftDownVectorBar;": u"\u2959", - "LeftFloor;": u"\u230a", - "LeftRightArrow;": u"\u2194", - "LeftRightVector;": u"\u294e", - "LeftTee;": u"\u22a3", - "LeftTeeArrow;": u"\u21a4", - "LeftTeeVector;": u"\u295a", - "LeftTriangle;": u"\u22b2", - "LeftTriangleBar;": u"\u29cf", - "LeftTriangleEqual;": u"\u22b4", - "LeftUpDownVector;": u"\u2951", - "LeftUpTeeVector;": u"\u2960", - "LeftUpVector;": u"\u21bf", - "LeftUpVectorBar;": u"\u2958", - "LeftVector;": u"\u21bc", - "LeftVectorBar;": u"\u2952", - "Leftarrow;": u"\u21d0", - "Leftrightarrow;": u"\u21d4", - "LessEqualGreater;": u"\u22da", - "LessFullEqual;": u"\u2266", - "LessGreater;": u"\u2276", - "LessLess;": u"\u2aa1", - "LessSlantEqual;": u"\u2a7d", - "LessTilde;": u"\u2272", - "Lfr;": u"\U0001d50f", - "Ll;": u"\u22d8", - "Lleftarrow;": u"\u21da", - "Lmidot;": u"\u013f", - "LongLeftArrow;": u"\u27f5", - "LongLeftRightArrow;": u"\u27f7", - "LongRightArrow;": u"\u27f6", - "Longleftarrow;": u"\u27f8", - "Longleftrightarrow;": u"\u27fa", - "Longrightarrow;": u"\u27f9", - "Lopf;": u"\U0001d543", - "LowerLeftArrow;": u"\u2199", - "LowerRightArrow;": u"\u2198", - "Lscr;": u"\u2112", - "Lsh;": u"\u21b0", - "Lstrok;": u"\u0141", - "Lt;": u"\u226a", - "Map;": u"\u2905", - "Mcy;": u"\u041c", - "MediumSpace;": u"\u205f", - "Mellintrf;": u"\u2133", - "Mfr;": u"\U0001d510", - "MinusPlus;": u"\u2213", - "Mopf;": u"\U0001d544", - "Mscr;": u"\u2133", - "Mu;": u"\u039c", - "NJcy;": u"\u040a", - "Nacute;": u"\u0143", - "Ncaron;": u"\u0147", - "Ncedil;": u"\u0145", - "Ncy;": u"\u041d", - "NegativeMediumSpace;": u"\u200b", - "NegativeThickSpace;": u"\u200b", - "NegativeThinSpace;": u"\u200b", - "NegativeVeryThinSpace;": u"\u200b", - "NestedGreaterGreater;": u"\u226b", - "NestedLessLess;": u"\u226a", - "NewLine;": u"\n", - "Nfr;": u"\U0001d511", - "NoBreak;": u"\u2060", - "NonBreakingSpace;": u"\xa0", - "Nopf;": u"\u2115", - "Not;": u"\u2aec", - "NotCongruent;": u"\u2262", - "NotCupCap;": u"\u226d", - "NotDoubleVerticalBar;": u"\u2226", - "NotElement;": u"\u2209", - "NotEqual;": u"\u2260", - "NotEqualTilde;": u"\u2242\u0338", - "NotExists;": u"\u2204", - "NotGreater;": u"\u226f", - "NotGreaterEqual;": u"\u2271", - "NotGreaterFullEqual;": u"\u2267\u0338", - "NotGreaterGreater;": u"\u226b\u0338", - "NotGreaterLess;": u"\u2279", - "NotGreaterSlantEqual;": u"\u2a7e\u0338", - "NotGreaterTilde;": u"\u2275", - "NotHumpDownHump;": u"\u224e\u0338", - "NotHumpEqual;": u"\u224f\u0338", - "NotLeftTriangle;": u"\u22ea", - "NotLeftTriangleBar;": u"\u29cf\u0338", - "NotLeftTriangleEqual;": u"\u22ec", - "NotLess;": u"\u226e", - "NotLessEqual;": u"\u2270", - "NotLessGreater;": u"\u2278", - "NotLessLess;": u"\u226a\u0338", - "NotLessSlantEqual;": u"\u2a7d\u0338", - "NotLessTilde;": u"\u2274", - "NotNestedGreaterGreater;": u"\u2aa2\u0338", - "NotNestedLessLess;": u"\u2aa1\u0338", - "NotPrecedes;": u"\u2280", - "NotPrecedesEqual;": u"\u2aaf\u0338", - "NotPrecedesSlantEqual;": u"\u22e0", - "NotReverseElement;": u"\u220c", - "NotRightTriangle;": u"\u22eb", - "NotRightTriangleBar;": u"\u29d0\u0338", - "NotRightTriangleEqual;": u"\u22ed", - "NotSquareSubset;": u"\u228f\u0338", - "NotSquareSubsetEqual;": u"\u22e2", - "NotSquareSuperset;": u"\u2290\u0338", - "NotSquareSupersetEqual;": u"\u22e3", - "NotSubset;": u"\u2282\u20d2", - "NotSubsetEqual;": u"\u2288", - "NotSucceeds;": u"\u2281", - "NotSucceedsEqual;": u"\u2ab0\u0338", - "NotSucceedsSlantEqual;": u"\u22e1", - "NotSucceedsTilde;": u"\u227f\u0338", - "NotSuperset;": u"\u2283\u20d2", - "NotSupersetEqual;": u"\u2289", - "NotTilde;": u"\u2241", - "NotTildeEqual;": u"\u2244", - "NotTildeFullEqual;": u"\u2247", - "NotTildeTilde;": u"\u2249", - "NotVerticalBar;": u"\u2224", - "Nscr;": u"\U0001d4a9", - "Ntilde": u"\xd1", - "Ntilde;": u"\xd1", - "Nu;": u"\u039d", - "OElig;": u"\u0152", - "Oacute": u"\xd3", - "Oacute;": u"\xd3", - "Ocirc": u"\xd4", - "Ocirc;": u"\xd4", - "Ocy;": u"\u041e", - "Odblac;": u"\u0150", - "Ofr;": u"\U0001d512", - "Ograve": u"\xd2", - "Ograve;": u"\xd2", - "Omacr;": u"\u014c", - "Omega;": u"\u03a9", - "Omicron;": u"\u039f", - "Oopf;": u"\U0001d546", - "OpenCurlyDoubleQuote;": u"\u201c", - "OpenCurlyQuote;": u"\u2018", - "Or;": u"\u2a54", - "Oscr;": u"\U0001d4aa", - "Oslash": u"\xd8", - "Oslash;": u"\xd8", - "Otilde": u"\xd5", - "Otilde;": u"\xd5", - "Otimes;": u"\u2a37", - "Ouml": u"\xd6", - "Ouml;": u"\xd6", - "OverBar;": u"\u203e", - "OverBrace;": u"\u23de", - "OverBracket;": u"\u23b4", - "OverParenthesis;": u"\u23dc", - "PartialD;": u"\u2202", - "Pcy;": u"\u041f", - "Pfr;": u"\U0001d513", - "Phi;": u"\u03a6", - "Pi;": u"\u03a0", - "PlusMinus;": u"\xb1", - "Poincareplane;": u"\u210c", - "Popf;": u"\u2119", - "Pr;": u"\u2abb", - "Precedes;": u"\u227a", - "PrecedesEqual;": u"\u2aaf", - "PrecedesSlantEqual;": u"\u227c", - "PrecedesTilde;": u"\u227e", - "Prime;": u"\u2033", - "Product;": u"\u220f", - "Proportion;": u"\u2237", - "Proportional;": u"\u221d", - "Pscr;": u"\U0001d4ab", - "Psi;": u"\u03a8", - "QUOT": u"\"", - "QUOT;": u"\"", - "Qfr;": u"\U0001d514", - "Qopf;": u"\u211a", - "Qscr;": u"\U0001d4ac", - "RBarr;": u"\u2910", - "REG": u"\xae", - "REG;": u"\xae", - "Racute;": u"\u0154", - "Rang;": u"\u27eb", - "Rarr;": u"\u21a0", - "Rarrtl;": u"\u2916", - "Rcaron;": u"\u0158", - "Rcedil;": u"\u0156", - "Rcy;": u"\u0420", - "Re;": u"\u211c", - "ReverseElement;": u"\u220b", - "ReverseEquilibrium;": u"\u21cb", - "ReverseUpEquilibrium;": u"\u296f", - "Rfr;": u"\u211c", - "Rho;": u"\u03a1", - "RightAngleBracket;": u"\u27e9", - "RightArrow;": u"\u2192", - "RightArrowBar;": u"\u21e5", - "RightArrowLeftArrow;": u"\u21c4", - "RightCeiling;": u"\u2309", - "RightDoubleBracket;": u"\u27e7", - "RightDownTeeVector;": u"\u295d", - "RightDownVector;": u"\u21c2", - "RightDownVectorBar;": u"\u2955", - "RightFloor;": u"\u230b", - "RightTee;": u"\u22a2", - "RightTeeArrow;": u"\u21a6", - "RightTeeVector;": u"\u295b", - "RightTriangle;": u"\u22b3", - "RightTriangleBar;": u"\u29d0", - "RightTriangleEqual;": u"\u22b5", - "RightUpDownVector;": u"\u294f", - "RightUpTeeVector;": u"\u295c", - "RightUpVector;": u"\u21be", - "RightUpVectorBar;": u"\u2954", - "RightVector;": u"\u21c0", - "RightVectorBar;": u"\u2953", - "Rightarrow;": u"\u21d2", - "Ropf;": u"\u211d", - "RoundImplies;": u"\u2970", - "Rrightarrow;": u"\u21db", - "Rscr;": u"\u211b", - "Rsh;": u"\u21b1", - "RuleDelayed;": u"\u29f4", - "SHCHcy;": u"\u0429", - "SHcy;": u"\u0428", - "SOFTcy;": u"\u042c", - "Sacute;": u"\u015a", - "Sc;": u"\u2abc", - "Scaron;": u"\u0160", - "Scedil;": u"\u015e", - "Scirc;": u"\u015c", - "Scy;": u"\u0421", - "Sfr;": u"\U0001d516", - "ShortDownArrow;": u"\u2193", - "ShortLeftArrow;": u"\u2190", - "ShortRightArrow;": u"\u2192", - "ShortUpArrow;": u"\u2191", - "Sigma;": u"\u03a3", - "SmallCircle;": u"\u2218", - "Sopf;": u"\U0001d54a", - "Sqrt;": u"\u221a", - "Square;": u"\u25a1", - "SquareIntersection;": u"\u2293", - "SquareSubset;": u"\u228f", - "SquareSubsetEqual;": u"\u2291", - "SquareSuperset;": u"\u2290", - "SquareSupersetEqual;": u"\u2292", - "SquareUnion;": u"\u2294", - "Sscr;": u"\U0001d4ae", - "Star;": u"\u22c6", - "Sub;": u"\u22d0", - "Subset;": u"\u22d0", - "SubsetEqual;": u"\u2286", - "Succeeds;": u"\u227b", - "SucceedsEqual;": u"\u2ab0", - "SucceedsSlantEqual;": u"\u227d", - "SucceedsTilde;": u"\u227f", - "SuchThat;": u"\u220b", - "Sum;": u"\u2211", - "Sup;": u"\u22d1", - "Superset;": u"\u2283", - "SupersetEqual;": u"\u2287", - "Supset;": u"\u22d1", - "THORN": u"\xde", - "THORN;": u"\xde", - "TRADE;": u"\u2122", - "TSHcy;": u"\u040b", - "TScy;": u"\u0426", - "Tab;": u"\t", - "Tau;": u"\u03a4", - "Tcaron;": u"\u0164", - "Tcedil;": u"\u0162", - "Tcy;": u"\u0422", - "Tfr;": u"\U0001d517", - "Therefore;": u"\u2234", - "Theta;": u"\u0398", - "ThickSpace;": u"\u205f\u200a", - "ThinSpace;": u"\u2009", - "Tilde;": u"\u223c", - "TildeEqual;": u"\u2243", - "TildeFullEqual;": u"\u2245", - "TildeTilde;": u"\u2248", - "Topf;": u"\U0001d54b", - "TripleDot;": u"\u20db", - "Tscr;": u"\U0001d4af", - "Tstrok;": u"\u0166", - "Uacute": u"\xda", - "Uacute;": u"\xda", - "Uarr;": u"\u219f", - "Uarrocir;": u"\u2949", - "Ubrcy;": u"\u040e", - "Ubreve;": u"\u016c", - "Ucirc": u"\xdb", - "Ucirc;": u"\xdb", - "Ucy;": u"\u0423", - "Udblac;": u"\u0170", - "Ufr;": u"\U0001d518", - "Ugrave": u"\xd9", - "Ugrave;": u"\xd9", - "Umacr;": u"\u016a", - "UnderBar;": u"_", - "UnderBrace;": u"\u23df", - "UnderBracket;": u"\u23b5", - "UnderParenthesis;": u"\u23dd", - "Union;": u"\u22c3", - "UnionPlus;": u"\u228e", - "Uogon;": u"\u0172", - "Uopf;": u"\U0001d54c", - "UpArrow;": u"\u2191", - "UpArrowBar;": u"\u2912", - "UpArrowDownArrow;": u"\u21c5", - "UpDownArrow;": u"\u2195", - "UpEquilibrium;": u"\u296e", - "UpTee;": u"\u22a5", - "UpTeeArrow;": u"\u21a5", - "Uparrow;": u"\u21d1", - "Updownarrow;": u"\u21d5", - "UpperLeftArrow;": u"\u2196", - "UpperRightArrow;": u"\u2197", - "Upsi;": u"\u03d2", - "Upsilon;": u"\u03a5", - "Uring;": u"\u016e", - "Uscr;": u"\U0001d4b0", - "Utilde;": u"\u0168", - "Uuml": u"\xdc", - "Uuml;": u"\xdc", - "VDash;": u"\u22ab", - "Vbar;": u"\u2aeb", - "Vcy;": u"\u0412", - "Vdash;": u"\u22a9", - "Vdashl;": u"\u2ae6", - "Vee;": u"\u22c1", - "Verbar;": u"\u2016", - "Vert;": u"\u2016", - "VerticalBar;": u"\u2223", - "VerticalLine;": u"|", - "VerticalSeparator;": u"\u2758", - "VerticalTilde;": u"\u2240", - "VeryThinSpace;": u"\u200a", - "Vfr;": u"\U0001d519", - "Vopf;": u"\U0001d54d", - "Vscr;": u"\U0001d4b1", - "Vvdash;": u"\u22aa", - "Wcirc;": u"\u0174", - "Wedge;": u"\u22c0", - "Wfr;": u"\U0001d51a", - "Wopf;": u"\U0001d54e", - "Wscr;": u"\U0001d4b2", - "Xfr;": u"\U0001d51b", - "Xi;": u"\u039e", - "Xopf;": u"\U0001d54f", - "Xscr;": u"\U0001d4b3", - "YAcy;": u"\u042f", - "YIcy;": u"\u0407", - "YUcy;": u"\u042e", - "Yacute": u"\xdd", - "Yacute;": u"\xdd", - "Ycirc;": u"\u0176", - "Ycy;": u"\u042b", - "Yfr;": u"\U0001d51c", - "Yopf;": u"\U0001d550", - "Yscr;": u"\U0001d4b4", - "Yuml;": u"\u0178", - "ZHcy;": u"\u0416", - "Zacute;": u"\u0179", - "Zcaron;": u"\u017d", - "Zcy;": u"\u0417", - "Zdot;": u"\u017b", - "ZeroWidthSpace;": u"\u200b", - "Zeta;": u"\u0396", - "Zfr;": u"\u2128", - "Zopf;": u"\u2124", - "Zscr;": u"\U0001d4b5", - "aacute": u"\xe1", - "aacute;": u"\xe1", - "abreve;": u"\u0103", - "ac;": u"\u223e", - "acE;": u"\u223e\u0333", - "acd;": u"\u223f", - "acirc": u"\xe2", - "acirc;": u"\xe2", - "acute": u"\xb4", - "acute;": u"\xb4", - "acy;": u"\u0430", - "aelig": u"\xe6", - "aelig;": u"\xe6", - "af;": u"\u2061", - "afr;": u"\U0001d51e", - "agrave": u"\xe0", - "agrave;": u"\xe0", - "alefsym;": u"\u2135", - "aleph;": u"\u2135", - "alpha;": u"\u03b1", - "amacr;": u"\u0101", - "amalg;": u"\u2a3f", - "amp": u"&", - "amp;": u"&", - "and;": u"\u2227", - "andand;": u"\u2a55", - "andd;": u"\u2a5c", - "andslope;": u"\u2a58", - "andv;": u"\u2a5a", - "ang;": u"\u2220", - "ange;": u"\u29a4", - "angle;": u"\u2220", - "angmsd;": u"\u2221", - "angmsdaa;": u"\u29a8", - "angmsdab;": u"\u29a9", - "angmsdac;": u"\u29aa", - "angmsdad;": u"\u29ab", - "angmsdae;": u"\u29ac", - "angmsdaf;": u"\u29ad", - "angmsdag;": u"\u29ae", - "angmsdah;": u"\u29af", - "angrt;": u"\u221f", - "angrtvb;": u"\u22be", - "angrtvbd;": u"\u299d", - "angsph;": u"\u2222", - "angst;": u"\xc5", - "angzarr;": u"\u237c", - "aogon;": u"\u0105", - "aopf;": u"\U0001d552", - "ap;": u"\u2248", - "apE;": u"\u2a70", - "apacir;": u"\u2a6f", - "ape;": u"\u224a", - "apid;": u"\u224b", - "apos;": u"'", - "approx;": u"\u2248", - "approxeq;": u"\u224a", - "aring": u"\xe5", - "aring;": u"\xe5", - "ascr;": u"\U0001d4b6", - "ast;": u"*", - "asymp;": u"\u2248", - "asympeq;": u"\u224d", - "atilde": u"\xe3", - "atilde;": u"\xe3", - "auml": u"\xe4", - "auml;": u"\xe4", - "awconint;": u"\u2233", - "awint;": u"\u2a11", - "bNot;": u"\u2aed", - "backcong;": u"\u224c", - "backepsilon;": u"\u03f6", - "backprime;": u"\u2035", - "backsim;": u"\u223d", - "backsimeq;": u"\u22cd", - "barvee;": u"\u22bd", - "barwed;": u"\u2305", - "barwedge;": u"\u2305", - "bbrk;": u"\u23b5", - "bbrktbrk;": u"\u23b6", - "bcong;": u"\u224c", - "bcy;": u"\u0431", - "bdquo;": u"\u201e", - "becaus;": u"\u2235", - "because;": u"\u2235", - "bemptyv;": u"\u29b0", - "bepsi;": u"\u03f6", - "bernou;": u"\u212c", - "beta;": u"\u03b2", - "beth;": u"\u2136", - "between;": u"\u226c", - "bfr;": u"\U0001d51f", - "bigcap;": u"\u22c2", - "bigcirc;": u"\u25ef", - "bigcup;": u"\u22c3", - "bigodot;": u"\u2a00", - "bigoplus;": u"\u2a01", - "bigotimes;": u"\u2a02", - "bigsqcup;": u"\u2a06", - "bigstar;": u"\u2605", - "bigtriangledown;": u"\u25bd", - "bigtriangleup;": u"\u25b3", - "biguplus;": u"\u2a04", - "bigvee;": u"\u22c1", - "bigwedge;": u"\u22c0", - "bkarow;": u"\u290d", - "blacklozenge;": u"\u29eb", - "blacksquare;": u"\u25aa", - "blacktriangle;": u"\u25b4", - "blacktriangledown;": u"\u25be", - "blacktriangleleft;": u"\u25c2", - "blacktriangleright;": u"\u25b8", - "blank;": u"\u2423", - "blk12;": u"\u2592", - "blk14;": u"\u2591", - "blk34;": u"\u2593", - "block;": u"\u2588", - "bne;": u"=\u20e5", - "bnequiv;": u"\u2261\u20e5", - "bnot;": u"\u2310", - "bopf;": u"\U0001d553", - "bot;": u"\u22a5", - "bottom;": u"\u22a5", - "bowtie;": u"\u22c8", - "boxDL;": u"\u2557", - "boxDR;": u"\u2554", - "boxDl;": u"\u2556", - "boxDr;": u"\u2553", - "boxH;": u"\u2550", - "boxHD;": u"\u2566", - "boxHU;": u"\u2569", - "boxHd;": u"\u2564", - "boxHu;": u"\u2567", - "boxUL;": u"\u255d", - "boxUR;": u"\u255a", - "boxUl;": u"\u255c", - "boxUr;": u"\u2559", - "boxV;": u"\u2551", - "boxVH;": u"\u256c", - "boxVL;": u"\u2563", - "boxVR;": u"\u2560", - "boxVh;": u"\u256b", - "boxVl;": u"\u2562", - "boxVr;": u"\u255f", - "boxbox;": u"\u29c9", - "boxdL;": u"\u2555", - "boxdR;": u"\u2552", - "boxdl;": u"\u2510", - "boxdr;": u"\u250c", - "boxh;": u"\u2500", - "boxhD;": u"\u2565", - "boxhU;": u"\u2568", - "boxhd;": u"\u252c", - "boxhu;": u"\u2534", - "boxminus;": u"\u229f", - "boxplus;": u"\u229e", - "boxtimes;": u"\u22a0", - "boxuL;": u"\u255b", - "boxuR;": u"\u2558", - "boxul;": u"\u2518", - "boxur;": u"\u2514", - "boxv;": u"\u2502", - "boxvH;": u"\u256a", - "boxvL;": u"\u2561", - "boxvR;": u"\u255e", - "boxvh;": u"\u253c", - "boxvl;": u"\u2524", - "boxvr;": u"\u251c", - "bprime;": u"\u2035", - "breve;": u"\u02d8", - "brvbar": u"\xa6", - "brvbar;": u"\xa6", - "bscr;": u"\U0001d4b7", - "bsemi;": u"\u204f", - "bsim;": u"\u223d", - "bsime;": u"\u22cd", - "bsol;": u"\\", - "bsolb;": u"\u29c5", - "bsolhsub;": u"\u27c8", - "bull;": u"\u2022", - "bullet;": u"\u2022", - "bump;": u"\u224e", - "bumpE;": u"\u2aae", - "bumpe;": u"\u224f", - "bumpeq;": u"\u224f", - "cacute;": u"\u0107", - "cap;": u"\u2229", - "capand;": u"\u2a44", - "capbrcup;": u"\u2a49", - "capcap;": u"\u2a4b", - "capcup;": u"\u2a47", - "capdot;": u"\u2a40", - "caps;": u"\u2229\ufe00", - "caret;": u"\u2041", - "caron;": u"\u02c7", - "ccaps;": u"\u2a4d", - "ccaron;": u"\u010d", - "ccedil": u"\xe7", - "ccedil;": u"\xe7", - "ccirc;": u"\u0109", - "ccups;": u"\u2a4c", - "ccupssm;": u"\u2a50", - "cdot;": u"\u010b", - "cedil": u"\xb8", - "cedil;": u"\xb8", - "cemptyv;": u"\u29b2", - "cent": u"\xa2", - "cent;": u"\xa2", - "centerdot;": u"\xb7", - "cfr;": u"\U0001d520", - "chcy;": u"\u0447", - "check;": u"\u2713", - "checkmark;": u"\u2713", - "chi;": u"\u03c7", - "cir;": u"\u25cb", - "cirE;": u"\u29c3", - "circ;": u"\u02c6", - "circeq;": u"\u2257", - "circlearrowleft;": u"\u21ba", - "circlearrowright;": u"\u21bb", - "circledR;": u"\xae", - "circledS;": u"\u24c8", - "circledast;": u"\u229b", - "circledcirc;": u"\u229a", - "circleddash;": u"\u229d", - "cire;": u"\u2257", - "cirfnint;": u"\u2a10", - "cirmid;": u"\u2aef", - "cirscir;": u"\u29c2", - "clubs;": u"\u2663", - "clubsuit;": u"\u2663", - "colon;": u":", - "colone;": u"\u2254", - "coloneq;": u"\u2254", - "comma;": u",", - "commat;": u"@", - "comp;": u"\u2201", - "compfn;": u"\u2218", - "complement;": u"\u2201", - "complexes;": u"\u2102", - "cong;": u"\u2245", - "congdot;": u"\u2a6d", - "conint;": u"\u222e", - "copf;": u"\U0001d554", - "coprod;": u"\u2210", - "copy": u"\xa9", - "copy;": u"\xa9", - "copysr;": u"\u2117", - "crarr;": u"\u21b5", - "cross;": u"\u2717", - "cscr;": u"\U0001d4b8", - "csub;": u"\u2acf", - "csube;": u"\u2ad1", - "csup;": u"\u2ad0", - "csupe;": u"\u2ad2", - "ctdot;": u"\u22ef", - "cudarrl;": u"\u2938", - "cudarrr;": u"\u2935", - "cuepr;": u"\u22de", - "cuesc;": u"\u22df", - "cularr;": u"\u21b6", - "cularrp;": u"\u293d", - "cup;": u"\u222a", - "cupbrcap;": u"\u2a48", - "cupcap;": u"\u2a46", - "cupcup;": u"\u2a4a", - "cupdot;": u"\u228d", - "cupor;": u"\u2a45", - "cups;": u"\u222a\ufe00", - "curarr;": u"\u21b7", - "curarrm;": u"\u293c", - "curlyeqprec;": u"\u22de", - "curlyeqsucc;": u"\u22df", - "curlyvee;": u"\u22ce", - "curlywedge;": u"\u22cf", - "curren": u"\xa4", - "curren;": u"\xa4", - "curvearrowleft;": u"\u21b6", - "curvearrowright;": u"\u21b7", - "cuvee;": u"\u22ce", - "cuwed;": u"\u22cf", - "cwconint;": u"\u2232", - "cwint;": u"\u2231", - "cylcty;": u"\u232d", - "dArr;": u"\u21d3", - "dHar;": u"\u2965", - "dagger;": u"\u2020", - "daleth;": u"\u2138", - "darr;": u"\u2193", - "dash;": u"\u2010", - "dashv;": u"\u22a3", - "dbkarow;": u"\u290f", - "dblac;": u"\u02dd", - "dcaron;": u"\u010f", - "dcy;": u"\u0434", - "dd;": u"\u2146", - "ddagger;": u"\u2021", - "ddarr;": u"\u21ca", - "ddotseq;": u"\u2a77", - "deg": u"\xb0", - "deg;": u"\xb0", - "delta;": u"\u03b4", - "demptyv;": u"\u29b1", - "dfisht;": u"\u297f", - "dfr;": u"\U0001d521", - "dharl;": u"\u21c3", - "dharr;": u"\u21c2", - "diam;": u"\u22c4", - "diamond;": u"\u22c4", - "diamondsuit;": u"\u2666", - "diams;": u"\u2666", - "die;": u"\xa8", - "digamma;": u"\u03dd", - "disin;": u"\u22f2", - "div;": u"\xf7", - "divide": u"\xf7", - "divide;": u"\xf7", - "divideontimes;": u"\u22c7", - "divonx;": u"\u22c7", - "djcy;": u"\u0452", - "dlcorn;": u"\u231e", - "dlcrop;": u"\u230d", - "dollar;": u"$", - "dopf;": u"\U0001d555", - "dot;": u"\u02d9", - "doteq;": u"\u2250", - "doteqdot;": u"\u2251", - "dotminus;": u"\u2238", - "dotplus;": u"\u2214", - "dotsquare;": u"\u22a1", - "doublebarwedge;": u"\u2306", - "downarrow;": u"\u2193", - "downdownarrows;": u"\u21ca", - "downharpoonleft;": u"\u21c3", - "downharpoonright;": u"\u21c2", - "drbkarow;": u"\u2910", - "drcorn;": u"\u231f", - "drcrop;": u"\u230c", - "dscr;": u"\U0001d4b9", - "dscy;": u"\u0455", - "dsol;": u"\u29f6", - "dstrok;": u"\u0111", - "dtdot;": u"\u22f1", - "dtri;": u"\u25bf", - "dtrif;": u"\u25be", - "duarr;": u"\u21f5", - "duhar;": u"\u296f", - "dwangle;": u"\u29a6", - "dzcy;": u"\u045f", - "dzigrarr;": u"\u27ff", - "eDDot;": u"\u2a77", - "eDot;": u"\u2251", - "eacute": u"\xe9", - "eacute;": u"\xe9", - "easter;": u"\u2a6e", - "ecaron;": u"\u011b", - "ecir;": u"\u2256", - "ecirc": u"\xea", - "ecirc;": u"\xea", - "ecolon;": u"\u2255", - "ecy;": u"\u044d", - "edot;": u"\u0117", - "ee;": u"\u2147", - "efDot;": u"\u2252", - "efr;": u"\U0001d522", - "eg;": u"\u2a9a", - "egrave": u"\xe8", - "egrave;": u"\xe8", - "egs;": u"\u2a96", - "egsdot;": u"\u2a98", - "el;": u"\u2a99", - "elinters;": u"\u23e7", - "ell;": u"\u2113", - "els;": u"\u2a95", - "elsdot;": u"\u2a97", - "emacr;": u"\u0113", - "empty;": u"\u2205", - "emptyset;": u"\u2205", - "emptyv;": u"\u2205", - "emsp13;": u"\u2004", - "emsp14;": u"\u2005", - "emsp;": u"\u2003", - "eng;": u"\u014b", - "ensp;": u"\u2002", - "eogon;": u"\u0119", - "eopf;": u"\U0001d556", - "epar;": u"\u22d5", - "eparsl;": u"\u29e3", - "eplus;": u"\u2a71", - "epsi;": u"\u03b5", - "epsilon;": u"\u03b5", - "epsiv;": u"\u03f5", - "eqcirc;": u"\u2256", - "eqcolon;": u"\u2255", - "eqsim;": u"\u2242", - "eqslantgtr;": u"\u2a96", - "eqslantless;": u"\u2a95", - "equals;": u"=", - "equest;": u"\u225f", - "equiv;": u"\u2261", - "equivDD;": u"\u2a78", - "eqvparsl;": u"\u29e5", - "erDot;": u"\u2253", - "erarr;": u"\u2971", - "escr;": u"\u212f", - "esdot;": u"\u2250", - "esim;": u"\u2242", - "eta;": u"\u03b7", - "eth": u"\xf0", - "eth;": u"\xf0", - "euml": u"\xeb", - "euml;": u"\xeb", - "euro;": u"\u20ac", - "excl;": u"!", - "exist;": u"\u2203", - "expectation;": u"\u2130", - "exponentiale;": u"\u2147", - "fallingdotseq;": u"\u2252", - "fcy;": u"\u0444", - "female;": u"\u2640", - "ffilig;": u"\ufb03", - "fflig;": u"\ufb00", - "ffllig;": u"\ufb04", - "ffr;": u"\U0001d523", - "filig;": u"\ufb01", - "fjlig;": u"fj", - "flat;": u"\u266d", - "fllig;": u"\ufb02", - "fltns;": u"\u25b1", - "fnof;": u"\u0192", - "fopf;": u"\U0001d557", - "forall;": u"\u2200", - "fork;": u"\u22d4", - "forkv;": u"\u2ad9", - "fpartint;": u"\u2a0d", - "frac12": u"\xbd", - "frac12;": u"\xbd", - "frac13;": u"\u2153", - "frac14": u"\xbc", - "frac14;": u"\xbc", - "frac15;": u"\u2155", - "frac16;": u"\u2159", - "frac18;": u"\u215b", - "frac23;": u"\u2154", - "frac25;": u"\u2156", - "frac34": u"\xbe", - "frac34;": u"\xbe", - "frac35;": u"\u2157", - "frac38;": u"\u215c", - "frac45;": u"\u2158", - "frac56;": u"\u215a", - "frac58;": u"\u215d", - "frac78;": u"\u215e", - "frasl;": u"\u2044", - "frown;": u"\u2322", - "fscr;": u"\U0001d4bb", - "gE;": u"\u2267", - "gEl;": u"\u2a8c", - "gacute;": u"\u01f5", - "gamma;": u"\u03b3", - "gammad;": u"\u03dd", - "gap;": u"\u2a86", - "gbreve;": u"\u011f", - "gcirc;": u"\u011d", - "gcy;": u"\u0433", - "gdot;": u"\u0121", - "ge;": u"\u2265", - "gel;": u"\u22db", - "geq;": u"\u2265", - "geqq;": u"\u2267", - "geqslant;": u"\u2a7e", - "ges;": u"\u2a7e", - "gescc;": u"\u2aa9", - "gesdot;": u"\u2a80", - "gesdoto;": u"\u2a82", - "gesdotol;": u"\u2a84", - "gesl;": u"\u22db\ufe00", - "gesles;": u"\u2a94", - "gfr;": u"\U0001d524", - "gg;": u"\u226b", - "ggg;": u"\u22d9", - "gimel;": u"\u2137", - "gjcy;": u"\u0453", - "gl;": u"\u2277", - "glE;": u"\u2a92", - "gla;": u"\u2aa5", - "glj;": u"\u2aa4", - "gnE;": u"\u2269", - "gnap;": u"\u2a8a", - "gnapprox;": u"\u2a8a", - "gne;": u"\u2a88", - "gneq;": u"\u2a88", - "gneqq;": u"\u2269", - "gnsim;": u"\u22e7", - "gopf;": u"\U0001d558", - "grave;": u"`", - "gscr;": u"\u210a", - "gsim;": u"\u2273", - "gsime;": u"\u2a8e", - "gsiml;": u"\u2a90", - "gt": u">", - "gt;": u">", - "gtcc;": u"\u2aa7", - "gtcir;": u"\u2a7a", - "gtdot;": u"\u22d7", - "gtlPar;": u"\u2995", - "gtquest;": u"\u2a7c", - "gtrapprox;": u"\u2a86", - "gtrarr;": u"\u2978", - "gtrdot;": u"\u22d7", - "gtreqless;": u"\u22db", - "gtreqqless;": u"\u2a8c", - "gtrless;": u"\u2277", - "gtrsim;": u"\u2273", - "gvertneqq;": u"\u2269\ufe00", - "gvnE;": u"\u2269\ufe00", - "hArr;": u"\u21d4", - "hairsp;": u"\u200a", - "half;": u"\xbd", - "hamilt;": u"\u210b", - "hardcy;": u"\u044a", - "harr;": u"\u2194", - "harrcir;": u"\u2948", - "harrw;": u"\u21ad", - "hbar;": u"\u210f", - "hcirc;": u"\u0125", - "hearts;": u"\u2665", - "heartsuit;": u"\u2665", - "hellip;": u"\u2026", - "hercon;": u"\u22b9", - "hfr;": u"\U0001d525", - "hksearow;": u"\u2925", - "hkswarow;": u"\u2926", - "hoarr;": u"\u21ff", - "homtht;": u"\u223b", - "hookleftarrow;": u"\u21a9", - "hookrightarrow;": u"\u21aa", - "hopf;": u"\U0001d559", - "horbar;": u"\u2015", - "hscr;": u"\U0001d4bd", - "hslash;": u"\u210f", - "hstrok;": u"\u0127", - "hybull;": u"\u2043", - "hyphen;": u"\u2010", - "iacute": u"\xed", - "iacute;": u"\xed", - "ic;": u"\u2063", - "icirc": u"\xee", - "icirc;": u"\xee", - "icy;": u"\u0438", - "iecy;": u"\u0435", - "iexcl": u"\xa1", - "iexcl;": u"\xa1", - "iff;": u"\u21d4", - "ifr;": u"\U0001d526", - "igrave": u"\xec", - "igrave;": u"\xec", - "ii;": u"\u2148", - "iiiint;": u"\u2a0c", - "iiint;": u"\u222d", - "iinfin;": u"\u29dc", - "iiota;": u"\u2129", - "ijlig;": u"\u0133", - "imacr;": u"\u012b", - "image;": u"\u2111", - "imagline;": u"\u2110", - "imagpart;": u"\u2111", - "imath;": u"\u0131", - "imof;": u"\u22b7", - "imped;": u"\u01b5", - "in;": u"\u2208", - "incare;": u"\u2105", - "infin;": u"\u221e", - "infintie;": u"\u29dd", - "inodot;": u"\u0131", - "int;": u"\u222b", - "intcal;": u"\u22ba", - "integers;": u"\u2124", - "intercal;": u"\u22ba", - "intlarhk;": u"\u2a17", - "intprod;": u"\u2a3c", - "iocy;": u"\u0451", - "iogon;": u"\u012f", - "iopf;": u"\U0001d55a", - "iota;": u"\u03b9", - "iprod;": u"\u2a3c", - "iquest": u"\xbf", - "iquest;": u"\xbf", - "iscr;": u"\U0001d4be", - "isin;": u"\u2208", - "isinE;": u"\u22f9", - "isindot;": u"\u22f5", - "isins;": u"\u22f4", - "isinsv;": u"\u22f3", - "isinv;": u"\u2208", - "it;": u"\u2062", - "itilde;": u"\u0129", - "iukcy;": u"\u0456", - "iuml": u"\xef", - "iuml;": u"\xef", - "jcirc;": u"\u0135", - "jcy;": u"\u0439", - "jfr;": u"\U0001d527", - "jmath;": u"\u0237", - "jopf;": u"\U0001d55b", - "jscr;": u"\U0001d4bf", - "jsercy;": u"\u0458", - "jukcy;": u"\u0454", - "kappa;": u"\u03ba", - "kappav;": u"\u03f0", - "kcedil;": u"\u0137", - "kcy;": u"\u043a", - "kfr;": u"\U0001d528", - "kgreen;": u"\u0138", - "khcy;": u"\u0445", - "kjcy;": u"\u045c", - "kopf;": u"\U0001d55c", - "kscr;": u"\U0001d4c0", - "lAarr;": u"\u21da", - "lArr;": u"\u21d0", - "lAtail;": u"\u291b", - "lBarr;": u"\u290e", - "lE;": u"\u2266", - "lEg;": u"\u2a8b", - "lHar;": u"\u2962", - "lacute;": u"\u013a", - "laemptyv;": u"\u29b4", - "lagran;": u"\u2112", - "lambda;": u"\u03bb", - "lang;": u"\u27e8", - "langd;": u"\u2991", - "langle;": u"\u27e8", - "lap;": u"\u2a85", - "laquo": u"\xab", - "laquo;": u"\xab", - "larr;": u"\u2190", - "larrb;": u"\u21e4", - "larrbfs;": u"\u291f", - "larrfs;": u"\u291d", - "larrhk;": u"\u21a9", - "larrlp;": u"\u21ab", - "larrpl;": u"\u2939", - "larrsim;": u"\u2973", - "larrtl;": u"\u21a2", - "lat;": u"\u2aab", - "latail;": u"\u2919", - "late;": u"\u2aad", - "lates;": u"\u2aad\ufe00", - "lbarr;": u"\u290c", - "lbbrk;": u"\u2772", - "lbrace;": u"{", - "lbrack;": u"[", - "lbrke;": u"\u298b", - "lbrksld;": u"\u298f", - "lbrkslu;": u"\u298d", - "lcaron;": u"\u013e", - "lcedil;": u"\u013c", - "lceil;": u"\u2308", - "lcub;": u"{", - "lcy;": u"\u043b", - "ldca;": u"\u2936", - "ldquo;": u"\u201c", - "ldquor;": u"\u201e", - "ldrdhar;": u"\u2967", - "ldrushar;": u"\u294b", - "ldsh;": u"\u21b2", - "le;": u"\u2264", - "leftarrow;": u"\u2190", - "leftarrowtail;": u"\u21a2", - "leftharpoondown;": u"\u21bd", - "leftharpoonup;": u"\u21bc", - "leftleftarrows;": u"\u21c7", - "leftrightarrow;": u"\u2194", - "leftrightarrows;": u"\u21c6", - "leftrightharpoons;": u"\u21cb", - "leftrightsquigarrow;": u"\u21ad", - "leftthreetimes;": u"\u22cb", - "leg;": u"\u22da", - "leq;": u"\u2264", - "leqq;": u"\u2266", - "leqslant;": u"\u2a7d", - "les;": u"\u2a7d", - "lescc;": u"\u2aa8", - "lesdot;": u"\u2a7f", - "lesdoto;": u"\u2a81", - "lesdotor;": u"\u2a83", - "lesg;": u"\u22da\ufe00", - "lesges;": u"\u2a93", - "lessapprox;": u"\u2a85", - "lessdot;": u"\u22d6", - "lesseqgtr;": u"\u22da", - "lesseqqgtr;": u"\u2a8b", - "lessgtr;": u"\u2276", - "lesssim;": u"\u2272", - "lfisht;": u"\u297c", - "lfloor;": u"\u230a", - "lfr;": u"\U0001d529", - "lg;": u"\u2276", - "lgE;": u"\u2a91", - "lhard;": u"\u21bd", - "lharu;": u"\u21bc", - "lharul;": u"\u296a", - "lhblk;": u"\u2584", - "ljcy;": u"\u0459", - "ll;": u"\u226a", - "llarr;": u"\u21c7", - "llcorner;": u"\u231e", - "llhard;": u"\u296b", - "lltri;": u"\u25fa", - "lmidot;": u"\u0140", - "lmoust;": u"\u23b0", - "lmoustache;": u"\u23b0", - "lnE;": u"\u2268", - "lnap;": u"\u2a89", - "lnapprox;": u"\u2a89", - "lne;": u"\u2a87", - "lneq;": u"\u2a87", - "lneqq;": u"\u2268", - "lnsim;": u"\u22e6", - "loang;": u"\u27ec", - "loarr;": u"\u21fd", - "lobrk;": u"\u27e6", - "longleftarrow;": u"\u27f5", - "longleftrightarrow;": u"\u27f7", - "longmapsto;": u"\u27fc", - "longrightarrow;": u"\u27f6", - "looparrowleft;": u"\u21ab", - "looparrowright;": u"\u21ac", - "lopar;": u"\u2985", - "lopf;": u"\U0001d55d", - "loplus;": u"\u2a2d", - "lotimes;": u"\u2a34", - "lowast;": u"\u2217", - "lowbar;": u"_", - "loz;": u"\u25ca", - "lozenge;": u"\u25ca", - "lozf;": u"\u29eb", - "lpar;": u"(", - "lparlt;": u"\u2993", - "lrarr;": u"\u21c6", - "lrcorner;": u"\u231f", - "lrhar;": u"\u21cb", - "lrhard;": u"\u296d", - "lrm;": u"\u200e", - "lrtri;": u"\u22bf", - "lsaquo;": u"\u2039", - "lscr;": u"\U0001d4c1", - "lsh;": u"\u21b0", - "lsim;": u"\u2272", - "lsime;": u"\u2a8d", - "lsimg;": u"\u2a8f", - "lsqb;": u"[", - "lsquo;": u"\u2018", - "lsquor;": u"\u201a", - "lstrok;": u"\u0142", - "lt": u"<", - "lt;": u"<", - "ltcc;": u"\u2aa6", - "ltcir;": u"\u2a79", - "ltdot;": u"\u22d6", - "lthree;": u"\u22cb", - "ltimes;": u"\u22c9", - "ltlarr;": u"\u2976", - "ltquest;": u"\u2a7b", - "ltrPar;": u"\u2996", - "ltri;": u"\u25c3", - "ltrie;": u"\u22b4", - "ltrif;": u"\u25c2", - "lurdshar;": u"\u294a", - "luruhar;": u"\u2966", - "lvertneqq;": u"\u2268\ufe00", - "lvnE;": u"\u2268\ufe00", - "mDDot;": u"\u223a", - "macr": u"\xaf", - "macr;": u"\xaf", - "male;": u"\u2642", - "malt;": u"\u2720", - "maltese;": u"\u2720", - "map;": u"\u21a6", - "mapsto;": u"\u21a6", - "mapstodown;": u"\u21a7", - "mapstoleft;": u"\u21a4", - "mapstoup;": u"\u21a5", - "marker;": u"\u25ae", - "mcomma;": u"\u2a29", - "mcy;": u"\u043c", - "mdash;": u"\u2014", - "measuredangle;": u"\u2221", - "mfr;": u"\U0001d52a", - "mho;": u"\u2127", - "micro": u"\xb5", - "micro;": u"\xb5", - "mid;": u"\u2223", - "midast;": u"*", - "midcir;": u"\u2af0", - "middot": u"\xb7", - "middot;": u"\xb7", - "minus;": u"\u2212", - "minusb;": u"\u229f", - "minusd;": u"\u2238", - "minusdu;": u"\u2a2a", - "mlcp;": u"\u2adb", - "mldr;": u"\u2026", - "mnplus;": u"\u2213", - "models;": u"\u22a7", - "mopf;": u"\U0001d55e", - "mp;": u"\u2213", - "mscr;": u"\U0001d4c2", - "mstpos;": u"\u223e", - "mu;": u"\u03bc", - "multimap;": u"\u22b8", - "mumap;": u"\u22b8", - "nGg;": u"\u22d9\u0338", - "nGt;": u"\u226b\u20d2", - "nGtv;": u"\u226b\u0338", - "nLeftarrow;": u"\u21cd", - "nLeftrightarrow;": u"\u21ce", - "nLl;": u"\u22d8\u0338", - "nLt;": u"\u226a\u20d2", - "nLtv;": u"\u226a\u0338", - "nRightarrow;": u"\u21cf", - "nVDash;": u"\u22af", - "nVdash;": u"\u22ae", - "nabla;": u"\u2207", - "nacute;": u"\u0144", - "nang;": u"\u2220\u20d2", - "nap;": u"\u2249", - "napE;": u"\u2a70\u0338", - "napid;": u"\u224b\u0338", - "napos;": u"\u0149", - "napprox;": u"\u2249", - "natur;": u"\u266e", - "natural;": u"\u266e", - "naturals;": u"\u2115", - "nbsp": u"\xa0", - "nbsp;": u"\xa0", - "nbump;": u"\u224e\u0338", - "nbumpe;": u"\u224f\u0338", - "ncap;": u"\u2a43", - "ncaron;": u"\u0148", - "ncedil;": u"\u0146", - "ncong;": u"\u2247", - "ncongdot;": u"\u2a6d\u0338", - "ncup;": u"\u2a42", - "ncy;": u"\u043d", - "ndash;": u"\u2013", - "ne;": u"\u2260", - "neArr;": u"\u21d7", - "nearhk;": u"\u2924", - "nearr;": u"\u2197", - "nearrow;": u"\u2197", - "nedot;": u"\u2250\u0338", - "nequiv;": u"\u2262", - "nesear;": u"\u2928", - "nesim;": u"\u2242\u0338", - "nexist;": u"\u2204", - "nexists;": u"\u2204", - "nfr;": u"\U0001d52b", - "ngE;": u"\u2267\u0338", - "nge;": u"\u2271", - "ngeq;": u"\u2271", - "ngeqq;": u"\u2267\u0338", - "ngeqslant;": u"\u2a7e\u0338", - "nges;": u"\u2a7e\u0338", - "ngsim;": u"\u2275", - "ngt;": u"\u226f", - "ngtr;": u"\u226f", - "nhArr;": u"\u21ce", - "nharr;": u"\u21ae", - "nhpar;": u"\u2af2", - "ni;": u"\u220b", - "nis;": u"\u22fc", - "nisd;": u"\u22fa", - "niv;": u"\u220b", - "njcy;": u"\u045a", - "nlArr;": u"\u21cd", - "nlE;": u"\u2266\u0338", - "nlarr;": u"\u219a", - "nldr;": u"\u2025", - "nle;": u"\u2270", - "nleftarrow;": u"\u219a", - "nleftrightarrow;": u"\u21ae", - "nleq;": u"\u2270", - "nleqq;": u"\u2266\u0338", - "nleqslant;": u"\u2a7d\u0338", - "nles;": u"\u2a7d\u0338", - "nless;": u"\u226e", - "nlsim;": u"\u2274", - "nlt;": u"\u226e", - "nltri;": u"\u22ea", - "nltrie;": u"\u22ec", - "nmid;": u"\u2224", - "nopf;": u"\U0001d55f", - "not": u"\xac", - "not;": u"\xac", - "notin;": u"\u2209", - "notinE;": u"\u22f9\u0338", - "notindot;": u"\u22f5\u0338", - "notinva;": u"\u2209", - "notinvb;": u"\u22f7", - "notinvc;": u"\u22f6", - "notni;": u"\u220c", - "notniva;": u"\u220c", - "notnivb;": u"\u22fe", - "notnivc;": u"\u22fd", - "npar;": u"\u2226", - "nparallel;": u"\u2226", - "nparsl;": u"\u2afd\u20e5", - "npart;": u"\u2202\u0338", - "npolint;": u"\u2a14", - "npr;": u"\u2280", - "nprcue;": u"\u22e0", - "npre;": u"\u2aaf\u0338", - "nprec;": u"\u2280", - "npreceq;": u"\u2aaf\u0338", - "nrArr;": u"\u21cf", - "nrarr;": u"\u219b", - "nrarrc;": u"\u2933\u0338", - "nrarrw;": u"\u219d\u0338", - "nrightarrow;": u"\u219b", - "nrtri;": u"\u22eb", - "nrtrie;": u"\u22ed", - "nsc;": u"\u2281", - "nsccue;": u"\u22e1", - "nsce;": u"\u2ab0\u0338", - "nscr;": u"\U0001d4c3", - "nshortmid;": u"\u2224", - "nshortparallel;": u"\u2226", - "nsim;": u"\u2241", - "nsime;": u"\u2244", - "nsimeq;": u"\u2244", - "nsmid;": u"\u2224", - "nspar;": u"\u2226", - "nsqsube;": u"\u22e2", - "nsqsupe;": u"\u22e3", - "nsub;": u"\u2284", - "nsubE;": u"\u2ac5\u0338", - "nsube;": u"\u2288", - "nsubset;": u"\u2282\u20d2", - "nsubseteq;": u"\u2288", - "nsubseteqq;": u"\u2ac5\u0338", - "nsucc;": u"\u2281", - "nsucceq;": u"\u2ab0\u0338", - "nsup;": u"\u2285", - "nsupE;": u"\u2ac6\u0338", - "nsupe;": u"\u2289", - "nsupset;": u"\u2283\u20d2", - "nsupseteq;": u"\u2289", - "nsupseteqq;": u"\u2ac6\u0338", - "ntgl;": u"\u2279", - "ntilde": u"\xf1", - "ntilde;": u"\xf1", - "ntlg;": u"\u2278", - "ntriangleleft;": u"\u22ea", - "ntrianglelefteq;": u"\u22ec", - "ntriangleright;": u"\u22eb", - "ntrianglerighteq;": u"\u22ed", - "nu;": u"\u03bd", - "num;": u"#", - "numero;": u"\u2116", - "numsp;": u"\u2007", - "nvDash;": u"\u22ad", - "nvHarr;": u"\u2904", - "nvap;": u"\u224d\u20d2", - "nvdash;": u"\u22ac", - "nvge;": u"\u2265\u20d2", - "nvgt;": u">\u20d2", - "nvinfin;": u"\u29de", - "nvlArr;": u"\u2902", - "nvle;": u"\u2264\u20d2", - "nvlt;": u"<\u20d2", - "nvltrie;": u"\u22b4\u20d2", - "nvrArr;": u"\u2903", - "nvrtrie;": u"\u22b5\u20d2", - "nvsim;": u"\u223c\u20d2", - "nwArr;": u"\u21d6", - "nwarhk;": u"\u2923", - "nwarr;": u"\u2196", - "nwarrow;": u"\u2196", - "nwnear;": u"\u2927", - "oS;": u"\u24c8", - "oacute": u"\xf3", - "oacute;": u"\xf3", - "oast;": u"\u229b", - "ocir;": u"\u229a", - "ocirc": u"\xf4", - "ocirc;": u"\xf4", - "ocy;": u"\u043e", - "odash;": u"\u229d", - "odblac;": u"\u0151", - "odiv;": u"\u2a38", - "odot;": u"\u2299", - "odsold;": u"\u29bc", - "oelig;": u"\u0153", - "ofcir;": u"\u29bf", - "ofr;": u"\U0001d52c", - "ogon;": u"\u02db", - "ograve": u"\xf2", - "ograve;": u"\xf2", - "ogt;": u"\u29c1", - "ohbar;": u"\u29b5", - "ohm;": u"\u03a9", - "oint;": u"\u222e", - "olarr;": u"\u21ba", - "olcir;": u"\u29be", - "olcross;": u"\u29bb", - "oline;": u"\u203e", - "olt;": u"\u29c0", - "omacr;": u"\u014d", - "omega;": u"\u03c9", - "omicron;": u"\u03bf", - "omid;": u"\u29b6", - "ominus;": u"\u2296", - "oopf;": u"\U0001d560", - "opar;": u"\u29b7", - "operp;": u"\u29b9", - "oplus;": u"\u2295", - "or;": u"\u2228", - "orarr;": u"\u21bb", - "ord;": u"\u2a5d", - "order;": u"\u2134", - "orderof;": u"\u2134", - "ordf": u"\xaa", - "ordf;": u"\xaa", - "ordm": u"\xba", - "ordm;": u"\xba", - "origof;": u"\u22b6", - "oror;": u"\u2a56", - "orslope;": u"\u2a57", - "orv;": u"\u2a5b", - "oscr;": u"\u2134", - "oslash": u"\xf8", - "oslash;": u"\xf8", - "osol;": u"\u2298", - "otilde": u"\xf5", - "otilde;": u"\xf5", - "otimes;": u"\u2297", - "otimesas;": u"\u2a36", - "ouml": u"\xf6", - "ouml;": u"\xf6", - "ovbar;": u"\u233d", - "par;": u"\u2225", - "para": u"\xb6", - "para;": u"\xb6", - "parallel;": u"\u2225", - "parsim;": u"\u2af3", - "parsl;": u"\u2afd", - "part;": u"\u2202", - "pcy;": u"\u043f", - "percnt;": u"%", - "period;": u".", - "permil;": u"\u2030", - "perp;": u"\u22a5", - "pertenk;": u"\u2031", - "pfr;": u"\U0001d52d", - "phi;": u"\u03c6", - "phiv;": u"\u03d5", - "phmmat;": u"\u2133", - "phone;": u"\u260e", - "pi;": u"\u03c0", - "pitchfork;": u"\u22d4", - "piv;": u"\u03d6", - "planck;": u"\u210f", - "planckh;": u"\u210e", - "plankv;": u"\u210f", - "plus;": u"+", - "plusacir;": u"\u2a23", - "plusb;": u"\u229e", - "pluscir;": u"\u2a22", - "plusdo;": u"\u2214", - "plusdu;": u"\u2a25", - "pluse;": u"\u2a72", - "plusmn": u"\xb1", - "plusmn;": u"\xb1", - "plussim;": u"\u2a26", - "plustwo;": u"\u2a27", - "pm;": u"\xb1", - "pointint;": u"\u2a15", - "popf;": u"\U0001d561", - "pound": u"\xa3", - "pound;": u"\xa3", - "pr;": u"\u227a", - "prE;": u"\u2ab3", - "prap;": u"\u2ab7", - "prcue;": u"\u227c", - "pre;": u"\u2aaf", - "prec;": u"\u227a", - "precapprox;": u"\u2ab7", - "preccurlyeq;": u"\u227c", - "preceq;": u"\u2aaf", - "precnapprox;": u"\u2ab9", - "precneqq;": u"\u2ab5", - "precnsim;": u"\u22e8", - "precsim;": u"\u227e", - "prime;": u"\u2032", - "primes;": u"\u2119", - "prnE;": u"\u2ab5", - "prnap;": u"\u2ab9", - "prnsim;": u"\u22e8", - "prod;": u"\u220f", - "profalar;": u"\u232e", - "profline;": u"\u2312", - "profsurf;": u"\u2313", - "prop;": u"\u221d", - "propto;": u"\u221d", - "prsim;": u"\u227e", - "prurel;": u"\u22b0", - "pscr;": u"\U0001d4c5", - "psi;": u"\u03c8", - "puncsp;": u"\u2008", - "qfr;": u"\U0001d52e", - "qint;": u"\u2a0c", - "qopf;": u"\U0001d562", - "qprime;": u"\u2057", - "qscr;": u"\U0001d4c6", - "quaternions;": u"\u210d", - "quatint;": u"\u2a16", - "quest;": u"?", - "questeq;": u"\u225f", - "quot": u"\"", - "quot;": u"\"", - "rAarr;": u"\u21db", - "rArr;": u"\u21d2", - "rAtail;": u"\u291c", - "rBarr;": u"\u290f", - "rHar;": u"\u2964", - "race;": u"\u223d\u0331", - "racute;": u"\u0155", - "radic;": u"\u221a", - "raemptyv;": u"\u29b3", - "rang;": u"\u27e9", - "rangd;": u"\u2992", - "range;": u"\u29a5", - "rangle;": u"\u27e9", - "raquo": u"\xbb", - "raquo;": u"\xbb", - "rarr;": u"\u2192", - "rarrap;": u"\u2975", - "rarrb;": u"\u21e5", - "rarrbfs;": u"\u2920", - "rarrc;": u"\u2933", - "rarrfs;": u"\u291e", - "rarrhk;": u"\u21aa", - "rarrlp;": u"\u21ac", - "rarrpl;": u"\u2945", - "rarrsim;": u"\u2974", - "rarrtl;": u"\u21a3", - "rarrw;": u"\u219d", - "ratail;": u"\u291a", - "ratio;": u"\u2236", - "rationals;": u"\u211a", - "rbarr;": u"\u290d", - "rbbrk;": u"\u2773", - "rbrace;": u"}", - "rbrack;": u"]", - "rbrke;": u"\u298c", - "rbrksld;": u"\u298e", - "rbrkslu;": u"\u2990", - "rcaron;": u"\u0159", - "rcedil;": u"\u0157", - "rceil;": u"\u2309", - "rcub;": u"}", - "rcy;": u"\u0440", - "rdca;": u"\u2937", - "rdldhar;": u"\u2969", - "rdquo;": u"\u201d", - "rdquor;": u"\u201d", - "rdsh;": u"\u21b3", - "real;": u"\u211c", - "realine;": u"\u211b", - "realpart;": u"\u211c", - "reals;": u"\u211d", - "rect;": u"\u25ad", - "reg": u"\xae", - "reg;": u"\xae", - "rfisht;": u"\u297d", - "rfloor;": u"\u230b", - "rfr;": u"\U0001d52f", - "rhard;": u"\u21c1", - "rharu;": u"\u21c0", - "rharul;": u"\u296c", - "rho;": u"\u03c1", - "rhov;": u"\u03f1", - "rightarrow;": u"\u2192", - "rightarrowtail;": u"\u21a3", - "rightharpoondown;": u"\u21c1", - "rightharpoonup;": u"\u21c0", - "rightleftarrows;": u"\u21c4", - "rightleftharpoons;": u"\u21cc", - "rightrightarrows;": u"\u21c9", - "rightsquigarrow;": u"\u219d", - "rightthreetimes;": u"\u22cc", - "ring;": u"\u02da", - "risingdotseq;": u"\u2253", - "rlarr;": u"\u21c4", - "rlhar;": u"\u21cc", - "rlm;": u"\u200f", - "rmoust;": u"\u23b1", - "rmoustache;": u"\u23b1", - "rnmid;": u"\u2aee", - "roang;": u"\u27ed", - "roarr;": u"\u21fe", - "robrk;": u"\u27e7", - "ropar;": u"\u2986", - "ropf;": u"\U0001d563", - "roplus;": u"\u2a2e", - "rotimes;": u"\u2a35", - "rpar;": u")", - "rpargt;": u"\u2994", - "rppolint;": u"\u2a12", - "rrarr;": u"\u21c9", - "rsaquo;": u"\u203a", - "rscr;": u"\U0001d4c7", - "rsh;": u"\u21b1", - "rsqb;": u"]", - "rsquo;": u"\u2019", - "rsquor;": u"\u2019", - "rthree;": u"\u22cc", - "rtimes;": u"\u22ca", - "rtri;": u"\u25b9", - "rtrie;": u"\u22b5", - "rtrif;": u"\u25b8", - "rtriltri;": u"\u29ce", - "ruluhar;": u"\u2968", - "rx;": u"\u211e", - "sacute;": u"\u015b", - "sbquo;": u"\u201a", - "sc;": u"\u227b", - "scE;": u"\u2ab4", - "scap;": u"\u2ab8", - "scaron;": u"\u0161", - "sccue;": u"\u227d", - "sce;": u"\u2ab0", - "scedil;": u"\u015f", - "scirc;": u"\u015d", - "scnE;": u"\u2ab6", - "scnap;": u"\u2aba", - "scnsim;": u"\u22e9", - "scpolint;": u"\u2a13", - "scsim;": u"\u227f", - "scy;": u"\u0441", - "sdot;": u"\u22c5", - "sdotb;": u"\u22a1", - "sdote;": u"\u2a66", - "seArr;": u"\u21d8", - "searhk;": u"\u2925", - "searr;": u"\u2198", - "searrow;": u"\u2198", - "sect": u"\xa7", - "sect;": u"\xa7", - "semi;": u";", - "seswar;": u"\u2929", - "setminus;": u"\u2216", - "setmn;": u"\u2216", - "sext;": u"\u2736", - "sfr;": u"\U0001d530", - "sfrown;": u"\u2322", - "sharp;": u"\u266f", - "shchcy;": u"\u0449", - "shcy;": u"\u0448", - "shortmid;": u"\u2223", - "shortparallel;": u"\u2225", - "shy": u"\xad", - "shy;": u"\xad", - "sigma;": u"\u03c3", - "sigmaf;": u"\u03c2", - "sigmav;": u"\u03c2", - "sim;": u"\u223c", - "simdot;": u"\u2a6a", - "sime;": u"\u2243", - "simeq;": u"\u2243", - "simg;": u"\u2a9e", - "simgE;": u"\u2aa0", - "siml;": u"\u2a9d", - "simlE;": u"\u2a9f", - "simne;": u"\u2246", - "simplus;": u"\u2a24", - "simrarr;": u"\u2972", - "slarr;": u"\u2190", - "smallsetminus;": u"\u2216", - "smashp;": u"\u2a33", - "smeparsl;": u"\u29e4", - "smid;": u"\u2223", - "smile;": u"\u2323", - "smt;": u"\u2aaa", - "smte;": u"\u2aac", - "smtes;": u"\u2aac\ufe00", - "softcy;": u"\u044c", - "sol;": u"/", - "solb;": u"\u29c4", - "solbar;": u"\u233f", - "sopf;": u"\U0001d564", - "spades;": u"\u2660", - "spadesuit;": u"\u2660", - "spar;": u"\u2225", - "sqcap;": u"\u2293", - "sqcaps;": u"\u2293\ufe00", - "sqcup;": u"\u2294", - "sqcups;": u"\u2294\ufe00", - "sqsub;": u"\u228f", - "sqsube;": u"\u2291", - "sqsubset;": u"\u228f", - "sqsubseteq;": u"\u2291", - "sqsup;": u"\u2290", - "sqsupe;": u"\u2292", - "sqsupset;": u"\u2290", - "sqsupseteq;": u"\u2292", - "squ;": u"\u25a1", - "square;": u"\u25a1", - "squarf;": u"\u25aa", - "squf;": u"\u25aa", - "srarr;": u"\u2192", - "sscr;": u"\U0001d4c8", - "ssetmn;": u"\u2216", - "ssmile;": u"\u2323", - "sstarf;": u"\u22c6", - "star;": u"\u2606", - "starf;": u"\u2605", - "straightepsilon;": u"\u03f5", - "straightphi;": u"\u03d5", - "strns;": u"\xaf", - "sub;": u"\u2282", - "subE;": u"\u2ac5", - "subdot;": u"\u2abd", - "sube;": u"\u2286", - "subedot;": u"\u2ac3", - "submult;": u"\u2ac1", - "subnE;": u"\u2acb", - "subne;": u"\u228a", - "subplus;": u"\u2abf", - "subrarr;": u"\u2979", - "subset;": u"\u2282", - "subseteq;": u"\u2286", - "subseteqq;": u"\u2ac5", - "subsetneq;": u"\u228a", - "subsetneqq;": u"\u2acb", - "subsim;": u"\u2ac7", - "subsub;": u"\u2ad5", - "subsup;": u"\u2ad3", - "succ;": u"\u227b", - "succapprox;": u"\u2ab8", - "succcurlyeq;": u"\u227d", - "succeq;": u"\u2ab0", - "succnapprox;": u"\u2aba", - "succneqq;": u"\u2ab6", - "succnsim;": u"\u22e9", - "succsim;": u"\u227f", - "sum;": u"\u2211", - "sung;": u"\u266a", - "sup1": u"\xb9", - "sup1;": u"\xb9", - "sup2": u"\xb2", - "sup2;": u"\xb2", - "sup3": u"\xb3", - "sup3;": u"\xb3", - "sup;": u"\u2283", - "supE;": u"\u2ac6", - "supdot;": u"\u2abe", - "supdsub;": u"\u2ad8", - "supe;": u"\u2287", - "supedot;": u"\u2ac4", - "suphsol;": u"\u27c9", - "suphsub;": u"\u2ad7", - "suplarr;": u"\u297b", - "supmult;": u"\u2ac2", - "supnE;": u"\u2acc", - "supne;": u"\u228b", - "supplus;": u"\u2ac0", - "supset;": u"\u2283", - "supseteq;": u"\u2287", - "supseteqq;": u"\u2ac6", - "supsetneq;": u"\u228b", - "supsetneqq;": u"\u2acc", - "supsim;": u"\u2ac8", - "supsub;": u"\u2ad4", - "supsup;": u"\u2ad6", - "swArr;": u"\u21d9", - "swarhk;": u"\u2926", - "swarr;": u"\u2199", - "swarrow;": u"\u2199", - "swnwar;": u"\u292a", - "szlig": u"\xdf", - "szlig;": u"\xdf", - "target;": u"\u2316", - "tau;": u"\u03c4", - "tbrk;": u"\u23b4", - "tcaron;": u"\u0165", - "tcedil;": u"\u0163", - "tcy;": u"\u0442", - "tdot;": u"\u20db", - "telrec;": u"\u2315", - "tfr;": u"\U0001d531", - "there4;": u"\u2234", - "therefore;": u"\u2234", - "theta;": u"\u03b8", - "thetasym;": u"\u03d1", - "thetav;": u"\u03d1", - "thickapprox;": u"\u2248", - "thicksim;": u"\u223c", - "thinsp;": u"\u2009", - "thkap;": u"\u2248", - "thksim;": u"\u223c", - "thorn": u"\xfe", - "thorn;": u"\xfe", - "tilde;": u"\u02dc", - "times": u"\xd7", - "times;": u"\xd7", - "timesb;": u"\u22a0", - "timesbar;": u"\u2a31", - "timesd;": u"\u2a30", - "tint;": u"\u222d", - "toea;": u"\u2928", - "top;": u"\u22a4", - "topbot;": u"\u2336", - "topcir;": u"\u2af1", - "topf;": u"\U0001d565", - "topfork;": u"\u2ada", - "tosa;": u"\u2929", - "tprime;": u"\u2034", - "trade;": u"\u2122", - "triangle;": u"\u25b5", - "triangledown;": u"\u25bf", - "triangleleft;": u"\u25c3", - "trianglelefteq;": u"\u22b4", - "triangleq;": u"\u225c", - "triangleright;": u"\u25b9", - "trianglerighteq;": u"\u22b5", - "tridot;": u"\u25ec", - "trie;": u"\u225c", - "triminus;": u"\u2a3a", - "triplus;": u"\u2a39", - "trisb;": u"\u29cd", - "tritime;": u"\u2a3b", - "trpezium;": u"\u23e2", - "tscr;": u"\U0001d4c9", - "tscy;": u"\u0446", - "tshcy;": u"\u045b", - "tstrok;": u"\u0167", - "twixt;": u"\u226c", - "twoheadleftarrow;": u"\u219e", - "twoheadrightarrow;": u"\u21a0", - "uArr;": u"\u21d1", - "uHar;": u"\u2963", - "uacute": u"\xfa", - "uacute;": u"\xfa", - "uarr;": u"\u2191", - "ubrcy;": u"\u045e", - "ubreve;": u"\u016d", - "ucirc": u"\xfb", - "ucirc;": u"\xfb", - "ucy;": u"\u0443", - "udarr;": u"\u21c5", - "udblac;": u"\u0171", - "udhar;": u"\u296e", - "ufisht;": u"\u297e", - "ufr;": u"\U0001d532", - "ugrave": u"\xf9", - "ugrave;": u"\xf9", - "uharl;": u"\u21bf", - "uharr;": u"\u21be", - "uhblk;": u"\u2580", - "ulcorn;": u"\u231c", - "ulcorner;": u"\u231c", - "ulcrop;": u"\u230f", - "ultri;": u"\u25f8", - "umacr;": u"\u016b", - "uml": u"\xa8", - "uml;": u"\xa8", - "uogon;": u"\u0173", - "uopf;": u"\U0001d566", - "uparrow;": u"\u2191", - "updownarrow;": u"\u2195", - "upharpoonleft;": u"\u21bf", - "upharpoonright;": u"\u21be", - "uplus;": u"\u228e", - "upsi;": u"\u03c5", - "upsih;": u"\u03d2", - "upsilon;": u"\u03c5", - "upuparrows;": u"\u21c8", - "urcorn;": u"\u231d", - "urcorner;": u"\u231d", - "urcrop;": u"\u230e", - "uring;": u"\u016f", - "urtri;": u"\u25f9", - "uscr;": u"\U0001d4ca", - "utdot;": u"\u22f0", - "utilde;": u"\u0169", - "utri;": u"\u25b5", - "utrif;": u"\u25b4", - "uuarr;": u"\u21c8", - "uuml": u"\xfc", - "uuml;": u"\xfc", - "uwangle;": u"\u29a7", - "vArr;": u"\u21d5", - "vBar;": u"\u2ae8", - "vBarv;": u"\u2ae9", - "vDash;": u"\u22a8", - "vangrt;": u"\u299c", - "varepsilon;": u"\u03f5", - "varkappa;": u"\u03f0", - "varnothing;": u"\u2205", - "varphi;": u"\u03d5", - "varpi;": u"\u03d6", - "varpropto;": u"\u221d", - "varr;": u"\u2195", - "varrho;": u"\u03f1", - "varsigma;": u"\u03c2", - "varsubsetneq;": u"\u228a\ufe00", - "varsubsetneqq;": u"\u2acb\ufe00", - "varsupsetneq;": u"\u228b\ufe00", - "varsupsetneqq;": u"\u2acc\ufe00", - "vartheta;": u"\u03d1", - "vartriangleleft;": u"\u22b2", - "vartriangleright;": u"\u22b3", - "vcy;": u"\u0432", - "vdash;": u"\u22a2", - "vee;": u"\u2228", - "veebar;": u"\u22bb", - "veeeq;": u"\u225a", - "vellip;": u"\u22ee", - "verbar;": u"|", - "vert;": u"|", - "vfr;": u"\U0001d533", - "vltri;": u"\u22b2", - "vnsub;": u"\u2282\u20d2", - "vnsup;": u"\u2283\u20d2", - "vopf;": u"\U0001d567", - "vprop;": u"\u221d", - "vrtri;": u"\u22b3", - "vscr;": u"\U0001d4cb", - "vsubnE;": u"\u2acb\ufe00", - "vsubne;": u"\u228a\ufe00", - "vsupnE;": u"\u2acc\ufe00", - "vsupne;": u"\u228b\ufe00", - "vzigzag;": u"\u299a", - "wcirc;": u"\u0175", - "wedbar;": u"\u2a5f", - "wedge;": u"\u2227", - "wedgeq;": u"\u2259", - "weierp;": u"\u2118", - "wfr;": u"\U0001d534", - "wopf;": u"\U0001d568", - "wp;": u"\u2118", - "wr;": u"\u2240", - "wreath;": u"\u2240", - "wscr;": u"\U0001d4cc", - "xcap;": u"\u22c2", - "xcirc;": u"\u25ef", - "xcup;": u"\u22c3", - "xdtri;": u"\u25bd", - "xfr;": u"\U0001d535", - "xhArr;": u"\u27fa", - "xharr;": u"\u27f7", - "xi;": u"\u03be", - "xlArr;": u"\u27f8", - "xlarr;": u"\u27f5", - "xmap;": u"\u27fc", - "xnis;": u"\u22fb", - "xodot;": u"\u2a00", - "xopf;": u"\U0001d569", - "xoplus;": u"\u2a01", - "xotime;": u"\u2a02", - "xrArr;": u"\u27f9", - "xrarr;": u"\u27f6", - "xscr;": u"\U0001d4cd", - "xsqcup;": u"\u2a06", - "xuplus;": u"\u2a04", - "xutri;": u"\u25b3", - "xvee;": u"\u22c1", - "xwedge;": u"\u22c0", - "yacute": u"\xfd", - "yacute;": u"\xfd", - "yacy;": u"\u044f", - "ycirc;": u"\u0177", - "ycy;": u"\u044b", - "yen": u"\xa5", - "yen;": u"\xa5", - "yfr;": u"\U0001d536", - "yicy;": u"\u0457", - "yopf;": u"\U0001d56a", - "yscr;": u"\U0001d4ce", - "yucy;": u"\u044e", - "yuml": u"\xff", - "yuml;": u"\xff", - "zacute;": u"\u017a", - "zcaron;": u"\u017e", - "zcy;": u"\u0437", - "zdot;": u"\u017c", - "zeetrf;": u"\u2128", - "zeta;": u"\u03b6", - "zfr;": u"\U0001d537", - "zhcy;": u"\u0436", - "zigrarr;": u"\u21dd", - "zopf;": u"\U0001d56b", - "zscr;": u"\U0001d4cf", - "zwj;": u"\u200d", - "zwnj;": u"\u200c", + "AElig": "\xc6", + "AElig;": "\xc6", + "AMP": "&", + "AMP;": "&", + "Aacute": "\xc1", + "Aacute;": "\xc1", + "Abreve;": "\u0102", + "Acirc": "\xc2", + "Acirc;": "\xc2", + "Acy;": "\u0410", + "Afr;": "\U0001d504", + "Agrave": "\xc0", + "Agrave;": "\xc0", + "Alpha;": "\u0391", + "Amacr;": "\u0100", + "And;": "\u2a53", + "Aogon;": "\u0104", + "Aopf;": "\U0001d538", + "ApplyFunction;": "\u2061", + "Aring": "\xc5", + "Aring;": "\xc5", + "Ascr;": "\U0001d49c", + "Assign;": "\u2254", + "Atilde": "\xc3", + "Atilde;": "\xc3", + "Auml": "\xc4", + "Auml;": "\xc4", + "Backslash;": "\u2216", + "Barv;": "\u2ae7", + "Barwed;": "\u2306", + "Bcy;": "\u0411", + "Because;": "\u2235", + "Bernoullis;": "\u212c", + "Beta;": "\u0392", + "Bfr;": "\U0001d505", + "Bopf;": "\U0001d539", + "Breve;": "\u02d8", + "Bscr;": "\u212c", + "Bumpeq;": "\u224e", + "CHcy;": "\u0427", + "COPY": "\xa9", + "COPY;": "\xa9", + "Cacute;": "\u0106", + "Cap;": "\u22d2", + "CapitalDifferentialD;": "\u2145", + "Cayleys;": "\u212d", + "Ccaron;": "\u010c", + "Ccedil": "\xc7", + "Ccedil;": "\xc7", + "Ccirc;": "\u0108", + "Cconint;": "\u2230", + "Cdot;": "\u010a", + "Cedilla;": "\xb8", + "CenterDot;": "\xb7", + "Cfr;": "\u212d", + "Chi;": "\u03a7", + "CircleDot;": "\u2299", + "CircleMinus;": "\u2296", + "CirclePlus;": "\u2295", + "CircleTimes;": "\u2297", + "ClockwiseContourIntegral;": "\u2232", + "CloseCurlyDoubleQuote;": "\u201d", + "CloseCurlyQuote;": "\u2019", + "Colon;": "\u2237", + "Colone;": "\u2a74", + "Congruent;": "\u2261", + "Conint;": "\u222f", + "ContourIntegral;": "\u222e", + "Copf;": "\u2102", + "Coproduct;": "\u2210", + "CounterClockwiseContourIntegral;": "\u2233", + "Cross;": "\u2a2f", + "Cscr;": "\U0001d49e", + "Cup;": "\u22d3", + "CupCap;": "\u224d", + "DD;": "\u2145", + "DDotrahd;": "\u2911", + "DJcy;": "\u0402", + "DScy;": "\u0405", + "DZcy;": "\u040f", + "Dagger;": "\u2021", + "Darr;": "\u21a1", + "Dashv;": "\u2ae4", + "Dcaron;": "\u010e", + "Dcy;": "\u0414", + "Del;": "\u2207", + "Delta;": "\u0394", + "Dfr;": "\U0001d507", + "DiacriticalAcute;": "\xb4", + "DiacriticalDot;": "\u02d9", + "DiacriticalDoubleAcute;": "\u02dd", + "DiacriticalGrave;": "`", + "DiacriticalTilde;": "\u02dc", + "Diamond;": "\u22c4", + "DifferentialD;": "\u2146", + "Dopf;": "\U0001d53b", + "Dot;": "\xa8", + "DotDot;": "\u20dc", + "DotEqual;": "\u2250", + "DoubleContourIntegral;": "\u222f", + "DoubleDot;": "\xa8", + "DoubleDownArrow;": "\u21d3", + "DoubleLeftArrow;": "\u21d0", + "DoubleLeftRightArrow;": "\u21d4", + "DoubleLeftTee;": "\u2ae4", + "DoubleLongLeftArrow;": "\u27f8", + "DoubleLongLeftRightArrow;": "\u27fa", + "DoubleLongRightArrow;": "\u27f9", + "DoubleRightArrow;": "\u21d2", + "DoubleRightTee;": "\u22a8", + "DoubleUpArrow;": "\u21d1", + "DoubleUpDownArrow;": "\u21d5", + "DoubleVerticalBar;": "\u2225", + "DownArrow;": "\u2193", + "DownArrowBar;": "\u2913", + "DownArrowUpArrow;": "\u21f5", + "DownBreve;": "\u0311", + "DownLeftRightVector;": "\u2950", + "DownLeftTeeVector;": "\u295e", + "DownLeftVector;": "\u21bd", + "DownLeftVectorBar;": "\u2956", + "DownRightTeeVector;": "\u295f", + "DownRightVector;": "\u21c1", + "DownRightVectorBar;": "\u2957", + "DownTee;": "\u22a4", + "DownTeeArrow;": "\u21a7", + "Downarrow;": "\u21d3", + "Dscr;": "\U0001d49f", + "Dstrok;": "\u0110", + "ENG;": "\u014a", + "ETH": "\xd0", + "ETH;": "\xd0", + "Eacute": "\xc9", + "Eacute;": "\xc9", + "Ecaron;": "\u011a", + "Ecirc": "\xca", + "Ecirc;": "\xca", + "Ecy;": "\u042d", + "Edot;": "\u0116", + "Efr;": "\U0001d508", + "Egrave": "\xc8", + "Egrave;": "\xc8", + "Element;": "\u2208", + "Emacr;": "\u0112", + "EmptySmallSquare;": "\u25fb", + "EmptyVerySmallSquare;": "\u25ab", + "Eogon;": "\u0118", + "Eopf;": "\U0001d53c", + "Epsilon;": "\u0395", + "Equal;": "\u2a75", + "EqualTilde;": "\u2242", + "Equilibrium;": "\u21cc", + "Escr;": "\u2130", + "Esim;": "\u2a73", + "Eta;": "\u0397", + "Euml": "\xcb", + "Euml;": "\xcb", + "Exists;": "\u2203", + "ExponentialE;": "\u2147", + "Fcy;": "\u0424", + "Ffr;": "\U0001d509", + "FilledSmallSquare;": "\u25fc", + "FilledVerySmallSquare;": "\u25aa", + "Fopf;": "\U0001d53d", + "ForAll;": "\u2200", + "Fouriertrf;": "\u2131", + "Fscr;": "\u2131", + "GJcy;": "\u0403", + "GT": ">", + "GT;": ">", + "Gamma;": "\u0393", + "Gammad;": "\u03dc", + "Gbreve;": "\u011e", + "Gcedil;": "\u0122", + "Gcirc;": "\u011c", + "Gcy;": "\u0413", + "Gdot;": "\u0120", + "Gfr;": "\U0001d50a", + "Gg;": "\u22d9", + "Gopf;": "\U0001d53e", + "GreaterEqual;": "\u2265", + "GreaterEqualLess;": "\u22db", + "GreaterFullEqual;": "\u2267", + "GreaterGreater;": "\u2aa2", + "GreaterLess;": "\u2277", + "GreaterSlantEqual;": "\u2a7e", + "GreaterTilde;": "\u2273", + "Gscr;": "\U0001d4a2", + "Gt;": "\u226b", + "HARDcy;": "\u042a", + "Hacek;": "\u02c7", + "Hat;": "^", + "Hcirc;": "\u0124", + "Hfr;": "\u210c", + "HilbertSpace;": "\u210b", + "Hopf;": "\u210d", + "HorizontalLine;": "\u2500", + "Hscr;": "\u210b", + "Hstrok;": "\u0126", + "HumpDownHump;": "\u224e", + "HumpEqual;": "\u224f", + "IEcy;": "\u0415", + "IJlig;": "\u0132", + "IOcy;": "\u0401", + "Iacute": "\xcd", + "Iacute;": "\xcd", + "Icirc": "\xce", + "Icirc;": "\xce", + "Icy;": "\u0418", + "Idot;": "\u0130", + "Ifr;": "\u2111", + "Igrave": "\xcc", + "Igrave;": "\xcc", + "Im;": "\u2111", + "Imacr;": "\u012a", + "ImaginaryI;": "\u2148", + "Implies;": "\u21d2", + "Int;": "\u222c", + "Integral;": "\u222b", + "Intersection;": "\u22c2", + "InvisibleComma;": "\u2063", + "InvisibleTimes;": "\u2062", + "Iogon;": "\u012e", + "Iopf;": "\U0001d540", + "Iota;": "\u0399", + "Iscr;": "\u2110", + "Itilde;": "\u0128", + "Iukcy;": "\u0406", + "Iuml": "\xcf", + "Iuml;": "\xcf", + "Jcirc;": "\u0134", + "Jcy;": "\u0419", + "Jfr;": "\U0001d50d", + "Jopf;": "\U0001d541", + "Jscr;": "\U0001d4a5", + "Jsercy;": "\u0408", + "Jukcy;": "\u0404", + "KHcy;": "\u0425", + "KJcy;": "\u040c", + "Kappa;": "\u039a", + "Kcedil;": "\u0136", + "Kcy;": "\u041a", + "Kfr;": "\U0001d50e", + "Kopf;": "\U0001d542", + "Kscr;": "\U0001d4a6", + "LJcy;": "\u0409", + "LT": "<", + "LT;": "<", + "Lacute;": "\u0139", + "Lambda;": "\u039b", + "Lang;": "\u27ea", + "Laplacetrf;": "\u2112", + "Larr;": "\u219e", + "Lcaron;": "\u013d", + "Lcedil;": "\u013b", + "Lcy;": "\u041b", + "LeftAngleBracket;": "\u27e8", + "LeftArrow;": "\u2190", + "LeftArrowBar;": "\u21e4", + "LeftArrowRightArrow;": "\u21c6", + "LeftCeiling;": "\u2308", + "LeftDoubleBracket;": "\u27e6", + "LeftDownTeeVector;": "\u2961", + "LeftDownVector;": "\u21c3", + "LeftDownVectorBar;": "\u2959", + "LeftFloor;": "\u230a", + "LeftRightArrow;": "\u2194", + "LeftRightVector;": "\u294e", + "LeftTee;": "\u22a3", + "LeftTeeArrow;": "\u21a4", + "LeftTeeVector;": "\u295a", + "LeftTriangle;": "\u22b2", + "LeftTriangleBar;": "\u29cf", + "LeftTriangleEqual;": "\u22b4", + "LeftUpDownVector;": "\u2951", + "LeftUpTeeVector;": "\u2960", + "LeftUpVector;": "\u21bf", + "LeftUpVectorBar;": "\u2958", + "LeftVector;": "\u21bc", + "LeftVectorBar;": "\u2952", + "Leftarrow;": "\u21d0", + "Leftrightarrow;": "\u21d4", + "LessEqualGreater;": "\u22da", + "LessFullEqual;": "\u2266", + "LessGreater;": "\u2276", + "LessLess;": "\u2aa1", + "LessSlantEqual;": "\u2a7d", + "LessTilde;": "\u2272", + "Lfr;": "\U0001d50f", + "Ll;": "\u22d8", + "Lleftarrow;": "\u21da", + "Lmidot;": "\u013f", + "LongLeftArrow;": "\u27f5", + "LongLeftRightArrow;": "\u27f7", + "LongRightArrow;": "\u27f6", + "Longleftarrow;": "\u27f8", + "Longleftrightarrow;": "\u27fa", + "Longrightarrow;": "\u27f9", + "Lopf;": "\U0001d543", + "LowerLeftArrow;": "\u2199", + "LowerRightArrow;": "\u2198", + "Lscr;": "\u2112", + "Lsh;": "\u21b0", + "Lstrok;": "\u0141", + "Lt;": "\u226a", + "Map;": "\u2905", + "Mcy;": "\u041c", + "MediumSpace;": "\u205f", + "Mellintrf;": "\u2133", + "Mfr;": "\U0001d510", + "MinusPlus;": "\u2213", + "Mopf;": "\U0001d544", + "Mscr;": "\u2133", + "Mu;": "\u039c", + "NJcy;": "\u040a", + "Nacute;": "\u0143", + "Ncaron;": "\u0147", + "Ncedil;": "\u0145", + "Ncy;": "\u041d", + "NegativeMediumSpace;": "\u200b", + "NegativeThickSpace;": "\u200b", + "NegativeThinSpace;": "\u200b", + "NegativeVeryThinSpace;": "\u200b", + "NestedGreaterGreater;": "\u226b", + "NestedLessLess;": "\u226a", + "NewLine;": "\n", + "Nfr;": "\U0001d511", + "NoBreak;": "\u2060", + "NonBreakingSpace;": "\xa0", + "Nopf;": "\u2115", + "Not;": "\u2aec", + "NotCongruent;": "\u2262", + "NotCupCap;": "\u226d", + "NotDoubleVerticalBar;": "\u2226", + "NotElement;": "\u2209", + "NotEqual;": "\u2260", + "NotEqualTilde;": "\u2242\u0338", + "NotExists;": "\u2204", + "NotGreater;": "\u226f", + "NotGreaterEqual;": "\u2271", + "NotGreaterFullEqual;": "\u2267\u0338", + "NotGreaterGreater;": "\u226b\u0338", + "NotGreaterLess;": "\u2279", + "NotGreaterSlantEqual;": "\u2a7e\u0338", + "NotGreaterTilde;": "\u2275", + "NotHumpDownHump;": "\u224e\u0338", + "NotHumpEqual;": "\u224f\u0338", + "NotLeftTriangle;": "\u22ea", + "NotLeftTriangleBar;": "\u29cf\u0338", + "NotLeftTriangleEqual;": "\u22ec", + "NotLess;": "\u226e", + "NotLessEqual;": "\u2270", + "NotLessGreater;": "\u2278", + "NotLessLess;": "\u226a\u0338", + "NotLessSlantEqual;": "\u2a7d\u0338", + "NotLessTilde;": "\u2274", + "NotNestedGreaterGreater;": "\u2aa2\u0338", + "NotNestedLessLess;": "\u2aa1\u0338", + "NotPrecedes;": "\u2280", + "NotPrecedesEqual;": "\u2aaf\u0338", + "NotPrecedesSlantEqual;": "\u22e0", + "NotReverseElement;": "\u220c", + "NotRightTriangle;": "\u22eb", + "NotRightTriangleBar;": "\u29d0\u0338", + "NotRightTriangleEqual;": "\u22ed", + "NotSquareSubset;": "\u228f\u0338", + "NotSquareSubsetEqual;": "\u22e2", + "NotSquareSuperset;": "\u2290\u0338", + "NotSquareSupersetEqual;": "\u22e3", + "NotSubset;": "\u2282\u20d2", + "NotSubsetEqual;": "\u2288", + "NotSucceeds;": "\u2281", + "NotSucceedsEqual;": "\u2ab0\u0338", + "NotSucceedsSlantEqual;": "\u22e1", + "NotSucceedsTilde;": "\u227f\u0338", + "NotSuperset;": "\u2283\u20d2", + "NotSupersetEqual;": "\u2289", + "NotTilde;": "\u2241", + "NotTildeEqual;": "\u2244", + "NotTildeFullEqual;": "\u2247", + "NotTildeTilde;": "\u2249", + "NotVerticalBar;": "\u2224", + "Nscr;": "\U0001d4a9", + "Ntilde": "\xd1", + "Ntilde;": "\xd1", + "Nu;": "\u039d", + "OElig;": "\u0152", + "Oacute": "\xd3", + "Oacute;": "\xd3", + "Ocirc": "\xd4", + "Ocirc;": "\xd4", + "Ocy;": "\u041e", + "Odblac;": "\u0150", + "Ofr;": "\U0001d512", + "Ograve": "\xd2", + "Ograve;": "\xd2", + "Omacr;": "\u014c", + "Omega;": "\u03a9", + "Omicron;": "\u039f", + "Oopf;": "\U0001d546", + "OpenCurlyDoubleQuote;": "\u201c", + "OpenCurlyQuote;": "\u2018", + "Or;": "\u2a54", + "Oscr;": "\U0001d4aa", + "Oslash": "\xd8", + "Oslash;": "\xd8", + "Otilde": "\xd5", + "Otilde;": "\xd5", + "Otimes;": "\u2a37", + "Ouml": "\xd6", + "Ouml;": "\xd6", + "OverBar;": "\u203e", + "OverBrace;": "\u23de", + "OverBracket;": "\u23b4", + "OverParenthesis;": "\u23dc", + "PartialD;": "\u2202", + "Pcy;": "\u041f", + "Pfr;": "\U0001d513", + "Phi;": "\u03a6", + "Pi;": "\u03a0", + "PlusMinus;": "\xb1", + "Poincareplane;": "\u210c", + "Popf;": "\u2119", + "Pr;": "\u2abb", + "Precedes;": "\u227a", + "PrecedesEqual;": "\u2aaf", + "PrecedesSlantEqual;": "\u227c", + "PrecedesTilde;": "\u227e", + "Prime;": "\u2033", + "Product;": "\u220f", + "Proportion;": "\u2237", + "Proportional;": "\u221d", + "Pscr;": "\U0001d4ab", + "Psi;": "\u03a8", + "QUOT": "\"", + "QUOT;": "\"", + "Qfr;": "\U0001d514", + "Qopf;": "\u211a", + "Qscr;": "\U0001d4ac", + "RBarr;": "\u2910", + "REG": "\xae", + "REG;": "\xae", + "Racute;": "\u0154", + "Rang;": "\u27eb", + "Rarr;": "\u21a0", + "Rarrtl;": "\u2916", + "Rcaron;": "\u0158", + "Rcedil;": "\u0156", + "Rcy;": "\u0420", + "Re;": "\u211c", + "ReverseElement;": "\u220b", + "ReverseEquilibrium;": "\u21cb", + "ReverseUpEquilibrium;": "\u296f", + "Rfr;": "\u211c", + "Rho;": "\u03a1", + "RightAngleBracket;": "\u27e9", + "RightArrow;": "\u2192", + "RightArrowBar;": "\u21e5", + "RightArrowLeftArrow;": "\u21c4", + "RightCeiling;": "\u2309", + "RightDoubleBracket;": "\u27e7", + "RightDownTeeVector;": "\u295d", + "RightDownVector;": "\u21c2", + "RightDownVectorBar;": "\u2955", + "RightFloor;": "\u230b", + "RightTee;": "\u22a2", + "RightTeeArrow;": "\u21a6", + "RightTeeVector;": "\u295b", + "RightTriangle;": "\u22b3", + "RightTriangleBar;": "\u29d0", + "RightTriangleEqual;": "\u22b5", + "RightUpDownVector;": "\u294f", + "RightUpTeeVector;": "\u295c", + "RightUpVector;": "\u21be", + "RightUpVectorBar;": "\u2954", + "RightVector;": "\u21c0", + "RightVectorBar;": "\u2953", + "Rightarrow;": "\u21d2", + "Ropf;": "\u211d", + "RoundImplies;": "\u2970", + "Rrightarrow;": "\u21db", + "Rscr;": "\u211b", + "Rsh;": "\u21b1", + "RuleDelayed;": "\u29f4", + "SHCHcy;": "\u0429", + "SHcy;": "\u0428", + "SOFTcy;": "\u042c", + "Sacute;": "\u015a", + "Sc;": "\u2abc", + "Scaron;": "\u0160", + "Scedil;": "\u015e", + "Scirc;": "\u015c", + "Scy;": "\u0421", + "Sfr;": "\U0001d516", + "ShortDownArrow;": "\u2193", + "ShortLeftArrow;": "\u2190", + "ShortRightArrow;": "\u2192", + "ShortUpArrow;": "\u2191", + "Sigma;": "\u03a3", + "SmallCircle;": "\u2218", + "Sopf;": "\U0001d54a", + "Sqrt;": "\u221a", + "Square;": "\u25a1", + "SquareIntersection;": "\u2293", + "SquareSubset;": "\u228f", + "SquareSubsetEqual;": "\u2291", + "SquareSuperset;": "\u2290", + "SquareSupersetEqual;": "\u2292", + "SquareUnion;": "\u2294", + "Sscr;": "\U0001d4ae", + "Star;": "\u22c6", + "Sub;": "\u22d0", + "Subset;": "\u22d0", + "SubsetEqual;": "\u2286", + "Succeeds;": "\u227b", + "SucceedsEqual;": "\u2ab0", + "SucceedsSlantEqual;": "\u227d", + "SucceedsTilde;": "\u227f", + "SuchThat;": "\u220b", + "Sum;": "\u2211", + "Sup;": "\u22d1", + "Superset;": "\u2283", + "SupersetEqual;": "\u2287", + "Supset;": "\u22d1", + "THORN": "\xde", + "THORN;": "\xde", + "TRADE;": "\u2122", + "TSHcy;": "\u040b", + "TScy;": "\u0426", + "Tab;": "\t", + "Tau;": "\u03a4", + "Tcaron;": "\u0164", + "Tcedil;": "\u0162", + "Tcy;": "\u0422", + "Tfr;": "\U0001d517", + "Therefore;": "\u2234", + "Theta;": "\u0398", + "ThickSpace;": "\u205f\u200a", + "ThinSpace;": "\u2009", + "Tilde;": "\u223c", + "TildeEqual;": "\u2243", + "TildeFullEqual;": "\u2245", + "TildeTilde;": "\u2248", + "Topf;": "\U0001d54b", + "TripleDot;": "\u20db", + "Tscr;": "\U0001d4af", + "Tstrok;": "\u0166", + "Uacute": "\xda", + "Uacute;": "\xda", + "Uarr;": "\u219f", + "Uarrocir;": "\u2949", + "Ubrcy;": "\u040e", + "Ubreve;": "\u016c", + "Ucirc": "\xdb", + "Ucirc;": "\xdb", + "Ucy;": "\u0423", + "Udblac;": "\u0170", + "Ufr;": "\U0001d518", + "Ugrave": "\xd9", + "Ugrave;": "\xd9", + "Umacr;": "\u016a", + "UnderBar;": "_", + "UnderBrace;": "\u23df", + "UnderBracket;": "\u23b5", + "UnderParenthesis;": "\u23dd", + "Union;": "\u22c3", + "UnionPlus;": "\u228e", + "Uogon;": "\u0172", + "Uopf;": "\U0001d54c", + "UpArrow;": "\u2191", + "UpArrowBar;": "\u2912", + "UpArrowDownArrow;": "\u21c5", + "UpDownArrow;": "\u2195", + "UpEquilibrium;": "\u296e", + "UpTee;": "\u22a5", + "UpTeeArrow;": "\u21a5", + "Uparrow;": "\u21d1", + "Updownarrow;": "\u21d5", + "UpperLeftArrow;": "\u2196", + "UpperRightArrow;": "\u2197", + "Upsi;": "\u03d2", + "Upsilon;": "\u03a5", + "Uring;": "\u016e", + "Uscr;": "\U0001d4b0", + "Utilde;": "\u0168", + "Uuml": "\xdc", + "Uuml;": "\xdc", + "VDash;": "\u22ab", + "Vbar;": "\u2aeb", + "Vcy;": "\u0412", + "Vdash;": "\u22a9", + "Vdashl;": "\u2ae6", + "Vee;": "\u22c1", + "Verbar;": "\u2016", + "Vert;": "\u2016", + "VerticalBar;": "\u2223", + "VerticalLine;": "|", + "VerticalSeparator;": "\u2758", + "VerticalTilde;": "\u2240", + "VeryThinSpace;": "\u200a", + "Vfr;": "\U0001d519", + "Vopf;": "\U0001d54d", + "Vscr;": "\U0001d4b1", + "Vvdash;": "\u22aa", + "Wcirc;": "\u0174", + "Wedge;": "\u22c0", + "Wfr;": "\U0001d51a", + "Wopf;": "\U0001d54e", + "Wscr;": "\U0001d4b2", + "Xfr;": "\U0001d51b", + "Xi;": "\u039e", + "Xopf;": "\U0001d54f", + "Xscr;": "\U0001d4b3", + "YAcy;": "\u042f", + "YIcy;": "\u0407", + "YUcy;": "\u042e", + "Yacute": "\xdd", + "Yacute;": "\xdd", + "Ycirc;": "\u0176", + "Ycy;": "\u042b", + "Yfr;": "\U0001d51c", + "Yopf;": "\U0001d550", + "Yscr;": "\U0001d4b4", + "Yuml;": "\u0178", + "ZHcy;": "\u0416", + "Zacute;": "\u0179", + "Zcaron;": "\u017d", + "Zcy;": "\u0417", + "Zdot;": "\u017b", + "ZeroWidthSpace;": "\u200b", + "Zeta;": "\u0396", + "Zfr;": "\u2128", + "Zopf;": "\u2124", + "Zscr;": "\U0001d4b5", + "aacute": "\xe1", + "aacute;": "\xe1", + "abreve;": "\u0103", + "ac;": "\u223e", + "acE;": "\u223e\u0333", + "acd;": "\u223f", + "acirc": "\xe2", + "acirc;": "\xe2", + "acute": "\xb4", + "acute;": "\xb4", + "acy;": "\u0430", + "aelig": "\xe6", + "aelig;": "\xe6", + "af;": "\u2061", + "afr;": "\U0001d51e", + "agrave": "\xe0", + "agrave;": "\xe0", + "alefsym;": "\u2135", + "aleph;": "\u2135", + "alpha;": "\u03b1", + "amacr;": "\u0101", + "amalg;": "\u2a3f", + "amp": "&", + "amp;": "&", + "and;": "\u2227", + "andand;": "\u2a55", + "andd;": "\u2a5c", + "andslope;": "\u2a58", + "andv;": "\u2a5a", + "ang;": "\u2220", + "ange;": "\u29a4", + "angle;": "\u2220", + "angmsd;": "\u2221", + "angmsdaa;": "\u29a8", + "angmsdab;": "\u29a9", + "angmsdac;": "\u29aa", + "angmsdad;": "\u29ab", + "angmsdae;": "\u29ac", + "angmsdaf;": "\u29ad", + "angmsdag;": "\u29ae", + "angmsdah;": "\u29af", + "angrt;": "\u221f", + "angrtvb;": "\u22be", + "angrtvbd;": "\u299d", + "angsph;": "\u2222", + "angst;": "\xc5", + "angzarr;": "\u237c", + "aogon;": "\u0105", + "aopf;": "\U0001d552", + "ap;": "\u2248", + "apE;": "\u2a70", + "apacir;": "\u2a6f", + "ape;": "\u224a", + "apid;": "\u224b", + "apos;": "'", + "approx;": "\u2248", + "approxeq;": "\u224a", + "aring": "\xe5", + "aring;": "\xe5", + "ascr;": "\U0001d4b6", + "ast;": "*", + "asymp;": "\u2248", + "asympeq;": "\u224d", + "atilde": "\xe3", + "atilde;": "\xe3", + "auml": "\xe4", + "auml;": "\xe4", + "awconint;": "\u2233", + "awint;": "\u2a11", + "bNot;": "\u2aed", + "backcong;": "\u224c", + "backepsilon;": "\u03f6", + "backprime;": "\u2035", + "backsim;": "\u223d", + "backsimeq;": "\u22cd", + "barvee;": "\u22bd", + "barwed;": "\u2305", + "barwedge;": "\u2305", + "bbrk;": "\u23b5", + "bbrktbrk;": "\u23b6", + "bcong;": "\u224c", + "bcy;": "\u0431", + "bdquo;": "\u201e", + "becaus;": "\u2235", + "because;": "\u2235", + "bemptyv;": "\u29b0", + "bepsi;": "\u03f6", + "bernou;": "\u212c", + "beta;": "\u03b2", + "beth;": "\u2136", + "between;": "\u226c", + "bfr;": "\U0001d51f", + "bigcap;": "\u22c2", + "bigcirc;": "\u25ef", + "bigcup;": "\u22c3", + "bigodot;": "\u2a00", + "bigoplus;": "\u2a01", + "bigotimes;": "\u2a02", + "bigsqcup;": "\u2a06", + "bigstar;": "\u2605", + "bigtriangledown;": "\u25bd", + "bigtriangleup;": "\u25b3", + "biguplus;": "\u2a04", + "bigvee;": "\u22c1", + "bigwedge;": "\u22c0", + "bkarow;": "\u290d", + "blacklozenge;": "\u29eb", + "blacksquare;": "\u25aa", + "blacktriangle;": "\u25b4", + "blacktriangledown;": "\u25be", + "blacktriangleleft;": "\u25c2", + "blacktriangleright;": "\u25b8", + "blank;": "\u2423", + "blk12;": "\u2592", + "blk14;": "\u2591", + "blk34;": "\u2593", + "block;": "\u2588", + "bne;": "=\u20e5", + "bnequiv;": "\u2261\u20e5", + "bnot;": "\u2310", + "bopf;": "\U0001d553", + "bot;": "\u22a5", + "bottom;": "\u22a5", + "bowtie;": "\u22c8", + "boxDL;": "\u2557", + "boxDR;": "\u2554", + "boxDl;": "\u2556", + "boxDr;": "\u2553", + "boxH;": "\u2550", + "boxHD;": "\u2566", + "boxHU;": "\u2569", + "boxHd;": "\u2564", + "boxHu;": "\u2567", + "boxUL;": "\u255d", + "boxUR;": "\u255a", + "boxUl;": "\u255c", + "boxUr;": "\u2559", + "boxV;": "\u2551", + "boxVH;": "\u256c", + "boxVL;": "\u2563", + "boxVR;": "\u2560", + "boxVh;": "\u256b", + "boxVl;": "\u2562", + "boxVr;": "\u255f", + "boxbox;": "\u29c9", + "boxdL;": "\u2555", + "boxdR;": "\u2552", + "boxdl;": "\u2510", + "boxdr;": "\u250c", + "boxh;": "\u2500", + "boxhD;": "\u2565", + "boxhU;": "\u2568", + "boxhd;": "\u252c", + "boxhu;": "\u2534", + "boxminus;": "\u229f", + "boxplus;": "\u229e", + "boxtimes;": "\u22a0", + "boxuL;": "\u255b", + "boxuR;": "\u2558", + "boxul;": "\u2518", + "boxur;": "\u2514", + "boxv;": "\u2502", + "boxvH;": "\u256a", + "boxvL;": "\u2561", + "boxvR;": "\u255e", + "boxvh;": "\u253c", + "boxvl;": "\u2524", + "boxvr;": "\u251c", + "bprime;": "\u2035", + "breve;": "\u02d8", + "brvbar": "\xa6", + "brvbar;": "\xa6", + "bscr;": "\U0001d4b7", + "bsemi;": "\u204f", + "bsim;": "\u223d", + "bsime;": "\u22cd", + "bsol;": "\\", + "bsolb;": "\u29c5", + "bsolhsub;": "\u27c8", + "bull;": "\u2022", + "bullet;": "\u2022", + "bump;": "\u224e", + "bumpE;": "\u2aae", + "bumpe;": "\u224f", + "bumpeq;": "\u224f", + "cacute;": "\u0107", + "cap;": "\u2229", + "capand;": "\u2a44", + "capbrcup;": "\u2a49", + "capcap;": "\u2a4b", + "capcup;": "\u2a47", + "capdot;": "\u2a40", + "caps;": "\u2229\ufe00", + "caret;": "\u2041", + "caron;": "\u02c7", + "ccaps;": "\u2a4d", + "ccaron;": "\u010d", + "ccedil": "\xe7", + "ccedil;": "\xe7", + "ccirc;": "\u0109", + "ccups;": "\u2a4c", + "ccupssm;": "\u2a50", + "cdot;": "\u010b", + "cedil": "\xb8", + "cedil;": "\xb8", + "cemptyv;": "\u29b2", + "cent": "\xa2", + "cent;": "\xa2", + "centerdot;": "\xb7", + "cfr;": "\U0001d520", + "chcy;": "\u0447", + "check;": "\u2713", + "checkmark;": "\u2713", + "chi;": "\u03c7", + "cir;": "\u25cb", + "cirE;": "\u29c3", + "circ;": "\u02c6", + "circeq;": "\u2257", + "circlearrowleft;": "\u21ba", + "circlearrowright;": "\u21bb", + "circledR;": "\xae", + "circledS;": "\u24c8", + "circledast;": "\u229b", + "circledcirc;": "\u229a", + "circleddash;": "\u229d", + "cire;": "\u2257", + "cirfnint;": "\u2a10", + "cirmid;": "\u2aef", + "cirscir;": "\u29c2", + "clubs;": "\u2663", + "clubsuit;": "\u2663", + "colon;": ":", + "colone;": "\u2254", + "coloneq;": "\u2254", + "comma;": ",", + "commat;": "@", + "comp;": "\u2201", + "compfn;": "\u2218", + "complement;": "\u2201", + "complexes;": "\u2102", + "cong;": "\u2245", + "congdot;": "\u2a6d", + "conint;": "\u222e", + "copf;": "\U0001d554", + "coprod;": "\u2210", + "copy": "\xa9", + "copy;": "\xa9", + "copysr;": "\u2117", + "crarr;": "\u21b5", + "cross;": "\u2717", + "cscr;": "\U0001d4b8", + "csub;": "\u2acf", + "csube;": "\u2ad1", + "csup;": "\u2ad0", + "csupe;": "\u2ad2", + "ctdot;": "\u22ef", + "cudarrl;": "\u2938", + "cudarrr;": "\u2935", + "cuepr;": "\u22de", + "cuesc;": "\u22df", + "cularr;": "\u21b6", + "cularrp;": "\u293d", + "cup;": "\u222a", + "cupbrcap;": "\u2a48", + "cupcap;": "\u2a46", + "cupcup;": "\u2a4a", + "cupdot;": "\u228d", + "cupor;": "\u2a45", + "cups;": "\u222a\ufe00", + "curarr;": "\u21b7", + "curarrm;": "\u293c", + "curlyeqprec;": "\u22de", + "curlyeqsucc;": "\u22df", + "curlyvee;": "\u22ce", + "curlywedge;": "\u22cf", + "curren": "\xa4", + "curren;": "\xa4", + "curvearrowleft;": "\u21b6", + "curvearrowright;": "\u21b7", + "cuvee;": "\u22ce", + "cuwed;": "\u22cf", + "cwconint;": "\u2232", + "cwint;": "\u2231", + "cylcty;": "\u232d", + "dArr;": "\u21d3", + "dHar;": "\u2965", + "dagger;": "\u2020", + "daleth;": "\u2138", + "darr;": "\u2193", + "dash;": "\u2010", + "dashv;": "\u22a3", + "dbkarow;": "\u290f", + "dblac;": "\u02dd", + "dcaron;": "\u010f", + "dcy;": "\u0434", + "dd;": "\u2146", + "ddagger;": "\u2021", + "ddarr;": "\u21ca", + "ddotseq;": "\u2a77", + "deg": "\xb0", + "deg;": "\xb0", + "delta;": "\u03b4", + "demptyv;": "\u29b1", + "dfisht;": "\u297f", + "dfr;": "\U0001d521", + "dharl;": "\u21c3", + "dharr;": "\u21c2", + "diam;": "\u22c4", + "diamond;": "\u22c4", + "diamondsuit;": "\u2666", + "diams;": "\u2666", + "die;": "\xa8", + "digamma;": "\u03dd", + "disin;": "\u22f2", + "div;": "\xf7", + "divide": "\xf7", + "divide;": "\xf7", + "divideontimes;": "\u22c7", + "divonx;": "\u22c7", + "djcy;": "\u0452", + "dlcorn;": "\u231e", + "dlcrop;": "\u230d", + "dollar;": "$", + "dopf;": "\U0001d555", + "dot;": "\u02d9", + "doteq;": "\u2250", + "doteqdot;": "\u2251", + "dotminus;": "\u2238", + "dotplus;": "\u2214", + "dotsquare;": "\u22a1", + "doublebarwedge;": "\u2306", + "downarrow;": "\u2193", + "downdownarrows;": "\u21ca", + "downharpoonleft;": "\u21c3", + "downharpoonright;": "\u21c2", + "drbkarow;": "\u2910", + "drcorn;": "\u231f", + "drcrop;": "\u230c", + "dscr;": "\U0001d4b9", + "dscy;": "\u0455", + "dsol;": "\u29f6", + "dstrok;": "\u0111", + "dtdot;": "\u22f1", + "dtri;": "\u25bf", + "dtrif;": "\u25be", + "duarr;": "\u21f5", + "duhar;": "\u296f", + "dwangle;": "\u29a6", + "dzcy;": "\u045f", + "dzigrarr;": "\u27ff", + "eDDot;": "\u2a77", + "eDot;": "\u2251", + "eacute": "\xe9", + "eacute;": "\xe9", + "easter;": "\u2a6e", + "ecaron;": "\u011b", + "ecir;": "\u2256", + "ecirc": "\xea", + "ecirc;": "\xea", + "ecolon;": "\u2255", + "ecy;": "\u044d", + "edot;": "\u0117", + "ee;": "\u2147", + "efDot;": "\u2252", + "efr;": "\U0001d522", + "eg;": "\u2a9a", + "egrave": "\xe8", + "egrave;": "\xe8", + "egs;": "\u2a96", + "egsdot;": "\u2a98", + "el;": "\u2a99", + "elinters;": "\u23e7", + "ell;": "\u2113", + "els;": "\u2a95", + "elsdot;": "\u2a97", + "emacr;": "\u0113", + "empty;": "\u2205", + "emptyset;": "\u2205", + "emptyv;": "\u2205", + "emsp13;": "\u2004", + "emsp14;": "\u2005", + "emsp;": "\u2003", + "eng;": "\u014b", + "ensp;": "\u2002", + "eogon;": "\u0119", + "eopf;": "\U0001d556", + "epar;": "\u22d5", + "eparsl;": "\u29e3", + "eplus;": "\u2a71", + "epsi;": "\u03b5", + "epsilon;": "\u03b5", + "epsiv;": "\u03f5", + "eqcirc;": "\u2256", + "eqcolon;": "\u2255", + "eqsim;": "\u2242", + "eqslantgtr;": "\u2a96", + "eqslantless;": "\u2a95", + "equals;": "=", + "equest;": "\u225f", + "equiv;": "\u2261", + "equivDD;": "\u2a78", + "eqvparsl;": "\u29e5", + "erDot;": "\u2253", + "erarr;": "\u2971", + "escr;": "\u212f", + "esdot;": "\u2250", + "esim;": "\u2242", + "eta;": "\u03b7", + "eth": "\xf0", + "eth;": "\xf0", + "euml": "\xeb", + "euml;": "\xeb", + "euro;": "\u20ac", + "excl;": "!", + "exist;": "\u2203", + "expectation;": "\u2130", + "exponentiale;": "\u2147", + "fallingdotseq;": "\u2252", + "fcy;": "\u0444", + "female;": "\u2640", + "ffilig;": "\ufb03", + "fflig;": "\ufb00", + "ffllig;": "\ufb04", + "ffr;": "\U0001d523", + "filig;": "\ufb01", + "fjlig;": "fj", + "flat;": "\u266d", + "fllig;": "\ufb02", + "fltns;": "\u25b1", + "fnof;": "\u0192", + "fopf;": "\U0001d557", + "forall;": "\u2200", + "fork;": "\u22d4", + "forkv;": "\u2ad9", + "fpartint;": "\u2a0d", + "frac12": "\xbd", + "frac12;": "\xbd", + "frac13;": "\u2153", + "frac14": "\xbc", + "frac14;": "\xbc", + "frac15;": "\u2155", + "frac16;": "\u2159", + "frac18;": "\u215b", + "frac23;": "\u2154", + "frac25;": "\u2156", + "frac34": "\xbe", + "frac34;": "\xbe", + "frac35;": "\u2157", + "frac38;": "\u215c", + "frac45;": "\u2158", + "frac56;": "\u215a", + "frac58;": "\u215d", + "frac78;": "\u215e", + "frasl;": "\u2044", + "frown;": "\u2322", + "fscr;": "\U0001d4bb", + "gE;": "\u2267", + "gEl;": "\u2a8c", + "gacute;": "\u01f5", + "gamma;": "\u03b3", + "gammad;": "\u03dd", + "gap;": "\u2a86", + "gbreve;": "\u011f", + "gcirc;": "\u011d", + "gcy;": "\u0433", + "gdot;": "\u0121", + "ge;": "\u2265", + "gel;": "\u22db", + "geq;": "\u2265", + "geqq;": "\u2267", + "geqslant;": "\u2a7e", + "ges;": "\u2a7e", + "gescc;": "\u2aa9", + "gesdot;": "\u2a80", + "gesdoto;": "\u2a82", + "gesdotol;": "\u2a84", + "gesl;": "\u22db\ufe00", + "gesles;": "\u2a94", + "gfr;": "\U0001d524", + "gg;": "\u226b", + "ggg;": "\u22d9", + "gimel;": "\u2137", + "gjcy;": "\u0453", + "gl;": "\u2277", + "glE;": "\u2a92", + "gla;": "\u2aa5", + "glj;": "\u2aa4", + "gnE;": "\u2269", + "gnap;": "\u2a8a", + "gnapprox;": "\u2a8a", + "gne;": "\u2a88", + "gneq;": "\u2a88", + "gneqq;": "\u2269", + "gnsim;": "\u22e7", + "gopf;": "\U0001d558", + "grave;": "`", + "gscr;": "\u210a", + "gsim;": "\u2273", + "gsime;": "\u2a8e", + "gsiml;": "\u2a90", + "gt": ">", + "gt;": ">", + "gtcc;": "\u2aa7", + "gtcir;": "\u2a7a", + "gtdot;": "\u22d7", + "gtlPar;": "\u2995", + "gtquest;": "\u2a7c", + "gtrapprox;": "\u2a86", + "gtrarr;": "\u2978", + "gtrdot;": "\u22d7", + "gtreqless;": "\u22db", + "gtreqqless;": "\u2a8c", + "gtrless;": "\u2277", + "gtrsim;": "\u2273", + "gvertneqq;": "\u2269\ufe00", + "gvnE;": "\u2269\ufe00", + "hArr;": "\u21d4", + "hairsp;": "\u200a", + "half;": "\xbd", + "hamilt;": "\u210b", + "hardcy;": "\u044a", + "harr;": "\u2194", + "harrcir;": "\u2948", + "harrw;": "\u21ad", + "hbar;": "\u210f", + "hcirc;": "\u0125", + "hearts;": "\u2665", + "heartsuit;": "\u2665", + "hellip;": "\u2026", + "hercon;": "\u22b9", + "hfr;": "\U0001d525", + "hksearow;": "\u2925", + "hkswarow;": "\u2926", + "hoarr;": "\u21ff", + "homtht;": "\u223b", + "hookleftarrow;": "\u21a9", + "hookrightarrow;": "\u21aa", + "hopf;": "\U0001d559", + "horbar;": "\u2015", + "hscr;": "\U0001d4bd", + "hslash;": "\u210f", + "hstrok;": "\u0127", + "hybull;": "\u2043", + "hyphen;": "\u2010", + "iacute": "\xed", + "iacute;": "\xed", + "ic;": "\u2063", + "icirc": "\xee", + "icirc;": "\xee", + "icy;": "\u0438", + "iecy;": "\u0435", + "iexcl": "\xa1", + "iexcl;": "\xa1", + "iff;": "\u21d4", + "ifr;": "\U0001d526", + "igrave": "\xec", + "igrave;": "\xec", + "ii;": "\u2148", + "iiiint;": "\u2a0c", + "iiint;": "\u222d", + "iinfin;": "\u29dc", + "iiota;": "\u2129", + "ijlig;": "\u0133", + "imacr;": "\u012b", + "image;": "\u2111", + "imagline;": "\u2110", + "imagpart;": "\u2111", + "imath;": "\u0131", + "imof;": "\u22b7", + "imped;": "\u01b5", + "in;": "\u2208", + "incare;": "\u2105", + "infin;": "\u221e", + "infintie;": "\u29dd", + "inodot;": "\u0131", + "int;": "\u222b", + "intcal;": "\u22ba", + "integers;": "\u2124", + "intercal;": "\u22ba", + "intlarhk;": "\u2a17", + "intprod;": "\u2a3c", + "iocy;": "\u0451", + "iogon;": "\u012f", + "iopf;": "\U0001d55a", + "iota;": "\u03b9", + "iprod;": "\u2a3c", + "iquest": "\xbf", + "iquest;": "\xbf", + "iscr;": "\U0001d4be", + "isin;": "\u2208", + "isinE;": "\u22f9", + "isindot;": "\u22f5", + "isins;": "\u22f4", + "isinsv;": "\u22f3", + "isinv;": "\u2208", + "it;": "\u2062", + "itilde;": "\u0129", + "iukcy;": "\u0456", + "iuml": "\xef", + "iuml;": "\xef", + "jcirc;": "\u0135", + "jcy;": "\u0439", + "jfr;": "\U0001d527", + "jmath;": "\u0237", + "jopf;": "\U0001d55b", + "jscr;": "\U0001d4bf", + "jsercy;": "\u0458", + "jukcy;": "\u0454", + "kappa;": "\u03ba", + "kappav;": "\u03f0", + "kcedil;": "\u0137", + "kcy;": "\u043a", + "kfr;": "\U0001d528", + "kgreen;": "\u0138", + "khcy;": "\u0445", + "kjcy;": "\u045c", + "kopf;": "\U0001d55c", + "kscr;": "\U0001d4c0", + "lAarr;": "\u21da", + "lArr;": "\u21d0", + "lAtail;": "\u291b", + "lBarr;": "\u290e", + "lE;": "\u2266", + "lEg;": "\u2a8b", + "lHar;": "\u2962", + "lacute;": "\u013a", + "laemptyv;": "\u29b4", + "lagran;": "\u2112", + "lambda;": "\u03bb", + "lang;": "\u27e8", + "langd;": "\u2991", + "langle;": "\u27e8", + "lap;": "\u2a85", + "laquo": "\xab", + "laquo;": "\xab", + "larr;": "\u2190", + "larrb;": "\u21e4", + "larrbfs;": "\u291f", + "larrfs;": "\u291d", + "larrhk;": "\u21a9", + "larrlp;": "\u21ab", + "larrpl;": "\u2939", + "larrsim;": "\u2973", + "larrtl;": "\u21a2", + "lat;": "\u2aab", + "latail;": "\u2919", + "late;": "\u2aad", + "lates;": "\u2aad\ufe00", + "lbarr;": "\u290c", + "lbbrk;": "\u2772", + "lbrace;": "{", + "lbrack;": "[", + "lbrke;": "\u298b", + "lbrksld;": "\u298f", + "lbrkslu;": "\u298d", + "lcaron;": "\u013e", + "lcedil;": "\u013c", + "lceil;": "\u2308", + "lcub;": "{", + "lcy;": "\u043b", + "ldca;": "\u2936", + "ldquo;": "\u201c", + "ldquor;": "\u201e", + "ldrdhar;": "\u2967", + "ldrushar;": "\u294b", + "ldsh;": "\u21b2", + "le;": "\u2264", + "leftarrow;": "\u2190", + "leftarrowtail;": "\u21a2", + "leftharpoondown;": "\u21bd", + "leftharpoonup;": "\u21bc", + "leftleftarrows;": "\u21c7", + "leftrightarrow;": "\u2194", + "leftrightarrows;": "\u21c6", + "leftrightharpoons;": "\u21cb", + "leftrightsquigarrow;": "\u21ad", + "leftthreetimes;": "\u22cb", + "leg;": "\u22da", + "leq;": "\u2264", + "leqq;": "\u2266", + "leqslant;": "\u2a7d", + "les;": "\u2a7d", + "lescc;": "\u2aa8", + "lesdot;": "\u2a7f", + "lesdoto;": "\u2a81", + "lesdotor;": "\u2a83", + "lesg;": "\u22da\ufe00", + "lesges;": "\u2a93", + "lessapprox;": "\u2a85", + "lessdot;": "\u22d6", + "lesseqgtr;": "\u22da", + "lesseqqgtr;": "\u2a8b", + "lessgtr;": "\u2276", + "lesssim;": "\u2272", + "lfisht;": "\u297c", + "lfloor;": "\u230a", + "lfr;": "\U0001d529", + "lg;": "\u2276", + "lgE;": "\u2a91", + "lhard;": "\u21bd", + "lharu;": "\u21bc", + "lharul;": "\u296a", + "lhblk;": "\u2584", + "ljcy;": "\u0459", + "ll;": "\u226a", + "llarr;": "\u21c7", + "llcorner;": "\u231e", + "llhard;": "\u296b", + "lltri;": "\u25fa", + "lmidot;": "\u0140", + "lmoust;": "\u23b0", + "lmoustache;": "\u23b0", + "lnE;": "\u2268", + "lnap;": "\u2a89", + "lnapprox;": "\u2a89", + "lne;": "\u2a87", + "lneq;": "\u2a87", + "lneqq;": "\u2268", + "lnsim;": "\u22e6", + "loang;": "\u27ec", + "loarr;": "\u21fd", + "lobrk;": "\u27e6", + "longleftarrow;": "\u27f5", + "longleftrightarrow;": "\u27f7", + "longmapsto;": "\u27fc", + "longrightarrow;": "\u27f6", + "looparrowleft;": "\u21ab", + "looparrowright;": "\u21ac", + "lopar;": "\u2985", + "lopf;": "\U0001d55d", + "loplus;": "\u2a2d", + "lotimes;": "\u2a34", + "lowast;": "\u2217", + "lowbar;": "_", + "loz;": "\u25ca", + "lozenge;": "\u25ca", + "lozf;": "\u29eb", + "lpar;": "(", + "lparlt;": "\u2993", + "lrarr;": "\u21c6", + "lrcorner;": "\u231f", + "lrhar;": "\u21cb", + "lrhard;": "\u296d", + "lrm;": "\u200e", + "lrtri;": "\u22bf", + "lsaquo;": "\u2039", + "lscr;": "\U0001d4c1", + "lsh;": "\u21b0", + "lsim;": "\u2272", + "lsime;": "\u2a8d", + "lsimg;": "\u2a8f", + "lsqb;": "[", + "lsquo;": "\u2018", + "lsquor;": "\u201a", + "lstrok;": "\u0142", + "lt": "<", + "lt;": "<", + "ltcc;": "\u2aa6", + "ltcir;": "\u2a79", + "ltdot;": "\u22d6", + "lthree;": "\u22cb", + "ltimes;": "\u22c9", + "ltlarr;": "\u2976", + "ltquest;": "\u2a7b", + "ltrPar;": "\u2996", + "ltri;": "\u25c3", + "ltrie;": "\u22b4", + "ltrif;": "\u25c2", + "lurdshar;": "\u294a", + "luruhar;": "\u2966", + "lvertneqq;": "\u2268\ufe00", + "lvnE;": "\u2268\ufe00", + "mDDot;": "\u223a", + "macr": "\xaf", + "macr;": "\xaf", + "male;": "\u2642", + "malt;": "\u2720", + "maltese;": "\u2720", + "map;": "\u21a6", + "mapsto;": "\u21a6", + "mapstodown;": "\u21a7", + "mapstoleft;": "\u21a4", + "mapstoup;": "\u21a5", + "marker;": "\u25ae", + "mcomma;": "\u2a29", + "mcy;": "\u043c", + "mdash;": "\u2014", + "measuredangle;": "\u2221", + "mfr;": "\U0001d52a", + "mho;": "\u2127", + "micro": "\xb5", + "micro;": "\xb5", + "mid;": "\u2223", + "midast;": "*", + "midcir;": "\u2af0", + "middot": "\xb7", + "middot;": "\xb7", + "minus;": "\u2212", + "minusb;": "\u229f", + "minusd;": "\u2238", + "minusdu;": "\u2a2a", + "mlcp;": "\u2adb", + "mldr;": "\u2026", + "mnplus;": "\u2213", + "models;": "\u22a7", + "mopf;": "\U0001d55e", + "mp;": "\u2213", + "mscr;": "\U0001d4c2", + "mstpos;": "\u223e", + "mu;": "\u03bc", + "multimap;": "\u22b8", + "mumap;": "\u22b8", + "nGg;": "\u22d9\u0338", + "nGt;": "\u226b\u20d2", + "nGtv;": "\u226b\u0338", + "nLeftarrow;": "\u21cd", + "nLeftrightarrow;": "\u21ce", + "nLl;": "\u22d8\u0338", + "nLt;": "\u226a\u20d2", + "nLtv;": "\u226a\u0338", + "nRightarrow;": "\u21cf", + "nVDash;": "\u22af", + "nVdash;": "\u22ae", + "nabla;": "\u2207", + "nacute;": "\u0144", + "nang;": "\u2220\u20d2", + "nap;": "\u2249", + "napE;": "\u2a70\u0338", + "napid;": "\u224b\u0338", + "napos;": "\u0149", + "napprox;": "\u2249", + "natur;": "\u266e", + "natural;": "\u266e", + "naturals;": "\u2115", + "nbsp": "\xa0", + "nbsp;": "\xa0", + "nbump;": "\u224e\u0338", + "nbumpe;": "\u224f\u0338", + "ncap;": "\u2a43", + "ncaron;": "\u0148", + "ncedil;": "\u0146", + "ncong;": "\u2247", + "ncongdot;": "\u2a6d\u0338", + "ncup;": "\u2a42", + "ncy;": "\u043d", + "ndash;": "\u2013", + "ne;": "\u2260", + "neArr;": "\u21d7", + "nearhk;": "\u2924", + "nearr;": "\u2197", + "nearrow;": "\u2197", + "nedot;": "\u2250\u0338", + "nequiv;": "\u2262", + "nesear;": "\u2928", + "nesim;": "\u2242\u0338", + "nexist;": "\u2204", + "nexists;": "\u2204", + "nfr;": "\U0001d52b", + "ngE;": "\u2267\u0338", + "nge;": "\u2271", + "ngeq;": "\u2271", + "ngeqq;": "\u2267\u0338", + "ngeqslant;": "\u2a7e\u0338", + "nges;": "\u2a7e\u0338", + "ngsim;": "\u2275", + "ngt;": "\u226f", + "ngtr;": "\u226f", + "nhArr;": "\u21ce", + "nharr;": "\u21ae", + "nhpar;": "\u2af2", + "ni;": "\u220b", + "nis;": "\u22fc", + "nisd;": "\u22fa", + "niv;": "\u220b", + "njcy;": "\u045a", + "nlArr;": "\u21cd", + "nlE;": "\u2266\u0338", + "nlarr;": "\u219a", + "nldr;": "\u2025", + "nle;": "\u2270", + "nleftarrow;": "\u219a", + "nleftrightarrow;": "\u21ae", + "nleq;": "\u2270", + "nleqq;": "\u2266\u0338", + "nleqslant;": "\u2a7d\u0338", + "nles;": "\u2a7d\u0338", + "nless;": "\u226e", + "nlsim;": "\u2274", + "nlt;": "\u226e", + "nltri;": "\u22ea", + "nltrie;": "\u22ec", + "nmid;": "\u2224", + "nopf;": "\U0001d55f", + "not": "\xac", + "not;": "\xac", + "notin;": "\u2209", + "notinE;": "\u22f9\u0338", + "notindot;": "\u22f5\u0338", + "notinva;": "\u2209", + "notinvb;": "\u22f7", + "notinvc;": "\u22f6", + "notni;": "\u220c", + "notniva;": "\u220c", + "notnivb;": "\u22fe", + "notnivc;": "\u22fd", + "npar;": "\u2226", + "nparallel;": "\u2226", + "nparsl;": "\u2afd\u20e5", + "npart;": "\u2202\u0338", + "npolint;": "\u2a14", + "npr;": "\u2280", + "nprcue;": "\u22e0", + "npre;": "\u2aaf\u0338", + "nprec;": "\u2280", + "npreceq;": "\u2aaf\u0338", + "nrArr;": "\u21cf", + "nrarr;": "\u219b", + "nrarrc;": "\u2933\u0338", + "nrarrw;": "\u219d\u0338", + "nrightarrow;": "\u219b", + "nrtri;": "\u22eb", + "nrtrie;": "\u22ed", + "nsc;": "\u2281", + "nsccue;": "\u22e1", + "nsce;": "\u2ab0\u0338", + "nscr;": "\U0001d4c3", + "nshortmid;": "\u2224", + "nshortparallel;": "\u2226", + "nsim;": "\u2241", + "nsime;": "\u2244", + "nsimeq;": "\u2244", + "nsmid;": "\u2224", + "nspar;": "\u2226", + "nsqsube;": "\u22e2", + "nsqsupe;": "\u22e3", + "nsub;": "\u2284", + "nsubE;": "\u2ac5\u0338", + "nsube;": "\u2288", + "nsubset;": "\u2282\u20d2", + "nsubseteq;": "\u2288", + "nsubseteqq;": "\u2ac5\u0338", + "nsucc;": "\u2281", + "nsucceq;": "\u2ab0\u0338", + "nsup;": "\u2285", + "nsupE;": "\u2ac6\u0338", + "nsupe;": "\u2289", + "nsupset;": "\u2283\u20d2", + "nsupseteq;": "\u2289", + "nsupseteqq;": "\u2ac6\u0338", + "ntgl;": "\u2279", + "ntilde": "\xf1", + "ntilde;": "\xf1", + "ntlg;": "\u2278", + "ntriangleleft;": "\u22ea", + "ntrianglelefteq;": "\u22ec", + "ntriangleright;": "\u22eb", + "ntrianglerighteq;": "\u22ed", + "nu;": "\u03bd", + "num;": "#", + "numero;": "\u2116", + "numsp;": "\u2007", + "nvDash;": "\u22ad", + "nvHarr;": "\u2904", + "nvap;": "\u224d\u20d2", + "nvdash;": "\u22ac", + "nvge;": "\u2265\u20d2", + "nvgt;": ">\u20d2", + "nvinfin;": "\u29de", + "nvlArr;": "\u2902", + "nvle;": "\u2264\u20d2", + "nvlt;": "<\u20d2", + "nvltrie;": "\u22b4\u20d2", + "nvrArr;": "\u2903", + "nvrtrie;": "\u22b5\u20d2", + "nvsim;": "\u223c\u20d2", + "nwArr;": "\u21d6", + "nwarhk;": "\u2923", + "nwarr;": "\u2196", + "nwarrow;": "\u2196", + "nwnear;": "\u2927", + "oS;": "\u24c8", + "oacute": "\xf3", + "oacute;": "\xf3", + "oast;": "\u229b", + "ocir;": "\u229a", + "ocirc": "\xf4", + "ocirc;": "\xf4", + "ocy;": "\u043e", + "odash;": "\u229d", + "odblac;": "\u0151", + "odiv;": "\u2a38", + "odot;": "\u2299", + "odsold;": "\u29bc", + "oelig;": "\u0153", + "ofcir;": "\u29bf", + "ofr;": "\U0001d52c", + "ogon;": "\u02db", + "ograve": "\xf2", + "ograve;": "\xf2", + "ogt;": "\u29c1", + "ohbar;": "\u29b5", + "ohm;": "\u03a9", + "oint;": "\u222e", + "olarr;": "\u21ba", + "olcir;": "\u29be", + "olcross;": "\u29bb", + "oline;": "\u203e", + "olt;": "\u29c0", + "omacr;": "\u014d", + "omega;": "\u03c9", + "omicron;": "\u03bf", + "omid;": "\u29b6", + "ominus;": "\u2296", + "oopf;": "\U0001d560", + "opar;": "\u29b7", + "operp;": "\u29b9", + "oplus;": "\u2295", + "or;": "\u2228", + "orarr;": "\u21bb", + "ord;": "\u2a5d", + "order;": "\u2134", + "orderof;": "\u2134", + "ordf": "\xaa", + "ordf;": "\xaa", + "ordm": "\xba", + "ordm;": "\xba", + "origof;": "\u22b6", + "oror;": "\u2a56", + "orslope;": "\u2a57", + "orv;": "\u2a5b", + "oscr;": "\u2134", + "oslash": "\xf8", + "oslash;": "\xf8", + "osol;": "\u2298", + "otilde": "\xf5", + "otilde;": "\xf5", + "otimes;": "\u2297", + "otimesas;": "\u2a36", + "ouml": "\xf6", + "ouml;": "\xf6", + "ovbar;": "\u233d", + "par;": "\u2225", + "para": "\xb6", + "para;": "\xb6", + "parallel;": "\u2225", + "parsim;": "\u2af3", + "parsl;": "\u2afd", + "part;": "\u2202", + "pcy;": "\u043f", + "percnt;": "%", + "period;": ".", + "permil;": "\u2030", + "perp;": "\u22a5", + "pertenk;": "\u2031", + "pfr;": "\U0001d52d", + "phi;": "\u03c6", + "phiv;": "\u03d5", + "phmmat;": "\u2133", + "phone;": "\u260e", + "pi;": "\u03c0", + "pitchfork;": "\u22d4", + "piv;": "\u03d6", + "planck;": "\u210f", + "planckh;": "\u210e", + "plankv;": "\u210f", + "plus;": "+", + "plusacir;": "\u2a23", + "plusb;": "\u229e", + "pluscir;": "\u2a22", + "plusdo;": "\u2214", + "plusdu;": "\u2a25", + "pluse;": "\u2a72", + "plusmn": "\xb1", + "plusmn;": "\xb1", + "plussim;": "\u2a26", + "plustwo;": "\u2a27", + "pm;": "\xb1", + "pointint;": "\u2a15", + "popf;": "\U0001d561", + "pound": "\xa3", + "pound;": "\xa3", + "pr;": "\u227a", + "prE;": "\u2ab3", + "prap;": "\u2ab7", + "prcue;": "\u227c", + "pre;": "\u2aaf", + "prec;": "\u227a", + "precapprox;": "\u2ab7", + "preccurlyeq;": "\u227c", + "preceq;": "\u2aaf", + "precnapprox;": "\u2ab9", + "precneqq;": "\u2ab5", + "precnsim;": "\u22e8", + "precsim;": "\u227e", + "prime;": "\u2032", + "primes;": "\u2119", + "prnE;": "\u2ab5", + "prnap;": "\u2ab9", + "prnsim;": "\u22e8", + "prod;": "\u220f", + "profalar;": "\u232e", + "profline;": "\u2312", + "profsurf;": "\u2313", + "prop;": "\u221d", + "propto;": "\u221d", + "prsim;": "\u227e", + "prurel;": "\u22b0", + "pscr;": "\U0001d4c5", + "psi;": "\u03c8", + "puncsp;": "\u2008", + "qfr;": "\U0001d52e", + "qint;": "\u2a0c", + "qopf;": "\U0001d562", + "qprime;": "\u2057", + "qscr;": "\U0001d4c6", + "quaternions;": "\u210d", + "quatint;": "\u2a16", + "quest;": "?", + "questeq;": "\u225f", + "quot": "\"", + "quot;": "\"", + "rAarr;": "\u21db", + "rArr;": "\u21d2", + "rAtail;": "\u291c", + "rBarr;": "\u290f", + "rHar;": "\u2964", + "race;": "\u223d\u0331", + "racute;": "\u0155", + "radic;": "\u221a", + "raemptyv;": "\u29b3", + "rang;": "\u27e9", + "rangd;": "\u2992", + "range;": "\u29a5", + "rangle;": "\u27e9", + "raquo": "\xbb", + "raquo;": "\xbb", + "rarr;": "\u2192", + "rarrap;": "\u2975", + "rarrb;": "\u21e5", + "rarrbfs;": "\u2920", + "rarrc;": "\u2933", + "rarrfs;": "\u291e", + "rarrhk;": "\u21aa", + "rarrlp;": "\u21ac", + "rarrpl;": "\u2945", + "rarrsim;": "\u2974", + "rarrtl;": "\u21a3", + "rarrw;": "\u219d", + "ratail;": "\u291a", + "ratio;": "\u2236", + "rationals;": "\u211a", + "rbarr;": "\u290d", + "rbbrk;": "\u2773", + "rbrace;": "}", + "rbrack;": "]", + "rbrke;": "\u298c", + "rbrksld;": "\u298e", + "rbrkslu;": "\u2990", + "rcaron;": "\u0159", + "rcedil;": "\u0157", + "rceil;": "\u2309", + "rcub;": "}", + "rcy;": "\u0440", + "rdca;": "\u2937", + "rdldhar;": "\u2969", + "rdquo;": "\u201d", + "rdquor;": "\u201d", + "rdsh;": "\u21b3", + "real;": "\u211c", + "realine;": "\u211b", + "realpart;": "\u211c", + "reals;": "\u211d", + "rect;": "\u25ad", + "reg": "\xae", + "reg;": "\xae", + "rfisht;": "\u297d", + "rfloor;": "\u230b", + "rfr;": "\U0001d52f", + "rhard;": "\u21c1", + "rharu;": "\u21c0", + "rharul;": "\u296c", + "rho;": "\u03c1", + "rhov;": "\u03f1", + "rightarrow;": "\u2192", + "rightarrowtail;": "\u21a3", + "rightharpoondown;": "\u21c1", + "rightharpoonup;": "\u21c0", + "rightleftarrows;": "\u21c4", + "rightleftharpoons;": "\u21cc", + "rightrightarrows;": "\u21c9", + "rightsquigarrow;": "\u219d", + "rightthreetimes;": "\u22cc", + "ring;": "\u02da", + "risingdotseq;": "\u2253", + "rlarr;": "\u21c4", + "rlhar;": "\u21cc", + "rlm;": "\u200f", + "rmoust;": "\u23b1", + "rmoustache;": "\u23b1", + "rnmid;": "\u2aee", + "roang;": "\u27ed", + "roarr;": "\u21fe", + "robrk;": "\u27e7", + "ropar;": "\u2986", + "ropf;": "\U0001d563", + "roplus;": "\u2a2e", + "rotimes;": "\u2a35", + "rpar;": ")", + "rpargt;": "\u2994", + "rppolint;": "\u2a12", + "rrarr;": "\u21c9", + "rsaquo;": "\u203a", + "rscr;": "\U0001d4c7", + "rsh;": "\u21b1", + "rsqb;": "]", + "rsquo;": "\u2019", + "rsquor;": "\u2019", + "rthree;": "\u22cc", + "rtimes;": "\u22ca", + "rtri;": "\u25b9", + "rtrie;": "\u22b5", + "rtrif;": "\u25b8", + "rtriltri;": "\u29ce", + "ruluhar;": "\u2968", + "rx;": "\u211e", + "sacute;": "\u015b", + "sbquo;": "\u201a", + "sc;": "\u227b", + "scE;": "\u2ab4", + "scap;": "\u2ab8", + "scaron;": "\u0161", + "sccue;": "\u227d", + "sce;": "\u2ab0", + "scedil;": "\u015f", + "scirc;": "\u015d", + "scnE;": "\u2ab6", + "scnap;": "\u2aba", + "scnsim;": "\u22e9", + "scpolint;": "\u2a13", + "scsim;": "\u227f", + "scy;": "\u0441", + "sdot;": "\u22c5", + "sdotb;": "\u22a1", + "sdote;": "\u2a66", + "seArr;": "\u21d8", + "searhk;": "\u2925", + "searr;": "\u2198", + "searrow;": "\u2198", + "sect": "\xa7", + "sect;": "\xa7", + "semi;": ";", + "seswar;": "\u2929", + "setminus;": "\u2216", + "setmn;": "\u2216", + "sext;": "\u2736", + "sfr;": "\U0001d530", + "sfrown;": "\u2322", + "sharp;": "\u266f", + "shchcy;": "\u0449", + "shcy;": "\u0448", + "shortmid;": "\u2223", + "shortparallel;": "\u2225", + "shy": "\xad", + "shy;": "\xad", + "sigma;": "\u03c3", + "sigmaf;": "\u03c2", + "sigmav;": "\u03c2", + "sim;": "\u223c", + "simdot;": "\u2a6a", + "sime;": "\u2243", + "simeq;": "\u2243", + "simg;": "\u2a9e", + "simgE;": "\u2aa0", + "siml;": "\u2a9d", + "simlE;": "\u2a9f", + "simne;": "\u2246", + "simplus;": "\u2a24", + "simrarr;": "\u2972", + "slarr;": "\u2190", + "smallsetminus;": "\u2216", + "smashp;": "\u2a33", + "smeparsl;": "\u29e4", + "smid;": "\u2223", + "smile;": "\u2323", + "smt;": "\u2aaa", + "smte;": "\u2aac", + "smtes;": "\u2aac\ufe00", + "softcy;": "\u044c", + "sol;": "/", + "solb;": "\u29c4", + "solbar;": "\u233f", + "sopf;": "\U0001d564", + "spades;": "\u2660", + "spadesuit;": "\u2660", + "spar;": "\u2225", + "sqcap;": "\u2293", + "sqcaps;": "\u2293\ufe00", + "sqcup;": "\u2294", + "sqcups;": "\u2294\ufe00", + "sqsub;": "\u228f", + "sqsube;": "\u2291", + "sqsubset;": "\u228f", + "sqsubseteq;": "\u2291", + "sqsup;": "\u2290", + "sqsupe;": "\u2292", + "sqsupset;": "\u2290", + "sqsupseteq;": "\u2292", + "squ;": "\u25a1", + "square;": "\u25a1", + "squarf;": "\u25aa", + "squf;": "\u25aa", + "srarr;": "\u2192", + "sscr;": "\U0001d4c8", + "ssetmn;": "\u2216", + "ssmile;": "\u2323", + "sstarf;": "\u22c6", + "star;": "\u2606", + "starf;": "\u2605", + "straightepsilon;": "\u03f5", + "straightphi;": "\u03d5", + "strns;": "\xaf", + "sub;": "\u2282", + "subE;": "\u2ac5", + "subdot;": "\u2abd", + "sube;": "\u2286", + "subedot;": "\u2ac3", + "submult;": "\u2ac1", + "subnE;": "\u2acb", + "subne;": "\u228a", + "subplus;": "\u2abf", + "subrarr;": "\u2979", + "subset;": "\u2282", + "subseteq;": "\u2286", + "subseteqq;": "\u2ac5", + "subsetneq;": "\u228a", + "subsetneqq;": "\u2acb", + "subsim;": "\u2ac7", + "subsub;": "\u2ad5", + "subsup;": "\u2ad3", + "succ;": "\u227b", + "succapprox;": "\u2ab8", + "succcurlyeq;": "\u227d", + "succeq;": "\u2ab0", + "succnapprox;": "\u2aba", + "succneqq;": "\u2ab6", + "succnsim;": "\u22e9", + "succsim;": "\u227f", + "sum;": "\u2211", + "sung;": "\u266a", + "sup1": "\xb9", + "sup1;": "\xb9", + "sup2": "\xb2", + "sup2;": "\xb2", + "sup3": "\xb3", + "sup3;": "\xb3", + "sup;": "\u2283", + "supE;": "\u2ac6", + "supdot;": "\u2abe", + "supdsub;": "\u2ad8", + "supe;": "\u2287", + "supedot;": "\u2ac4", + "suphsol;": "\u27c9", + "suphsub;": "\u2ad7", + "suplarr;": "\u297b", + "supmult;": "\u2ac2", + "supnE;": "\u2acc", + "supne;": "\u228b", + "supplus;": "\u2ac0", + "supset;": "\u2283", + "supseteq;": "\u2287", + "supseteqq;": "\u2ac6", + "supsetneq;": "\u228b", + "supsetneqq;": "\u2acc", + "supsim;": "\u2ac8", + "supsub;": "\u2ad4", + "supsup;": "\u2ad6", + "swArr;": "\u21d9", + "swarhk;": "\u2926", + "swarr;": "\u2199", + "swarrow;": "\u2199", + "swnwar;": "\u292a", + "szlig": "\xdf", + "szlig;": "\xdf", + "target;": "\u2316", + "tau;": "\u03c4", + "tbrk;": "\u23b4", + "tcaron;": "\u0165", + "tcedil;": "\u0163", + "tcy;": "\u0442", + "tdot;": "\u20db", + "telrec;": "\u2315", + "tfr;": "\U0001d531", + "there4;": "\u2234", + "therefore;": "\u2234", + "theta;": "\u03b8", + "thetasym;": "\u03d1", + "thetav;": "\u03d1", + "thickapprox;": "\u2248", + "thicksim;": "\u223c", + "thinsp;": "\u2009", + "thkap;": "\u2248", + "thksim;": "\u223c", + "thorn": "\xfe", + "thorn;": "\xfe", + "tilde;": "\u02dc", + "times": "\xd7", + "times;": "\xd7", + "timesb;": "\u22a0", + "timesbar;": "\u2a31", + "timesd;": "\u2a30", + "tint;": "\u222d", + "toea;": "\u2928", + "top;": "\u22a4", + "topbot;": "\u2336", + "topcir;": "\u2af1", + "topf;": "\U0001d565", + "topfork;": "\u2ada", + "tosa;": "\u2929", + "tprime;": "\u2034", + "trade;": "\u2122", + "triangle;": "\u25b5", + "triangledown;": "\u25bf", + "triangleleft;": "\u25c3", + "trianglelefteq;": "\u22b4", + "triangleq;": "\u225c", + "triangleright;": "\u25b9", + "trianglerighteq;": "\u22b5", + "tridot;": "\u25ec", + "trie;": "\u225c", + "triminus;": "\u2a3a", + "triplus;": "\u2a39", + "trisb;": "\u29cd", + "tritime;": "\u2a3b", + "trpezium;": "\u23e2", + "tscr;": "\U0001d4c9", + "tscy;": "\u0446", + "tshcy;": "\u045b", + "tstrok;": "\u0167", + "twixt;": "\u226c", + "twoheadleftarrow;": "\u219e", + "twoheadrightarrow;": "\u21a0", + "uArr;": "\u21d1", + "uHar;": "\u2963", + "uacute": "\xfa", + "uacute;": "\xfa", + "uarr;": "\u2191", + "ubrcy;": "\u045e", + "ubreve;": "\u016d", + "ucirc": "\xfb", + "ucirc;": "\xfb", + "ucy;": "\u0443", + "udarr;": "\u21c5", + "udblac;": "\u0171", + "udhar;": "\u296e", + "ufisht;": "\u297e", + "ufr;": "\U0001d532", + "ugrave": "\xf9", + "ugrave;": "\xf9", + "uharl;": "\u21bf", + "uharr;": "\u21be", + "uhblk;": "\u2580", + "ulcorn;": "\u231c", + "ulcorner;": "\u231c", + "ulcrop;": "\u230f", + "ultri;": "\u25f8", + "umacr;": "\u016b", + "uml": "\xa8", + "uml;": "\xa8", + "uogon;": "\u0173", + "uopf;": "\U0001d566", + "uparrow;": "\u2191", + "updownarrow;": "\u2195", + "upharpoonleft;": "\u21bf", + "upharpoonright;": "\u21be", + "uplus;": "\u228e", + "upsi;": "\u03c5", + "upsih;": "\u03d2", + "upsilon;": "\u03c5", + "upuparrows;": "\u21c8", + "urcorn;": "\u231d", + "urcorner;": "\u231d", + "urcrop;": "\u230e", + "uring;": "\u016f", + "urtri;": "\u25f9", + "uscr;": "\U0001d4ca", + "utdot;": "\u22f0", + "utilde;": "\u0169", + "utri;": "\u25b5", + "utrif;": "\u25b4", + "uuarr;": "\u21c8", + "uuml": "\xfc", + "uuml;": "\xfc", + "uwangle;": "\u29a7", + "vArr;": "\u21d5", + "vBar;": "\u2ae8", + "vBarv;": "\u2ae9", + "vDash;": "\u22a8", + "vangrt;": "\u299c", + "varepsilon;": "\u03f5", + "varkappa;": "\u03f0", + "varnothing;": "\u2205", + "varphi;": "\u03d5", + "varpi;": "\u03d6", + "varpropto;": "\u221d", + "varr;": "\u2195", + "varrho;": "\u03f1", + "varsigma;": "\u03c2", + "varsubsetneq;": "\u228a\ufe00", + "varsubsetneqq;": "\u2acb\ufe00", + "varsupsetneq;": "\u228b\ufe00", + "varsupsetneqq;": "\u2acc\ufe00", + "vartheta;": "\u03d1", + "vartriangleleft;": "\u22b2", + "vartriangleright;": "\u22b3", + "vcy;": "\u0432", + "vdash;": "\u22a2", + "vee;": "\u2228", + "veebar;": "\u22bb", + "veeeq;": "\u225a", + "vellip;": "\u22ee", + "verbar;": "|", + "vert;": "|", + "vfr;": "\U0001d533", + "vltri;": "\u22b2", + "vnsub;": "\u2282\u20d2", + "vnsup;": "\u2283\u20d2", + "vopf;": "\U0001d567", + "vprop;": "\u221d", + "vrtri;": "\u22b3", + "vscr;": "\U0001d4cb", + "vsubnE;": "\u2acb\ufe00", + "vsubne;": "\u228a\ufe00", + "vsupnE;": "\u2acc\ufe00", + "vsupne;": "\u228b\ufe00", + "vzigzag;": "\u299a", + "wcirc;": "\u0175", + "wedbar;": "\u2a5f", + "wedge;": "\u2227", + "wedgeq;": "\u2259", + "weierp;": "\u2118", + "wfr;": "\U0001d534", + "wopf;": "\U0001d568", + "wp;": "\u2118", + "wr;": "\u2240", + "wreath;": "\u2240", + "wscr;": "\U0001d4cc", + "xcap;": "\u22c2", + "xcirc;": "\u25ef", + "xcup;": "\u22c3", + "xdtri;": "\u25bd", + "xfr;": "\U0001d535", + "xhArr;": "\u27fa", + "xharr;": "\u27f7", + "xi;": "\u03be", + "xlArr;": "\u27f8", + "xlarr;": "\u27f5", + "xmap;": "\u27fc", + "xnis;": "\u22fb", + "xodot;": "\u2a00", + "xopf;": "\U0001d569", + "xoplus;": "\u2a01", + "xotime;": "\u2a02", + "xrArr;": "\u27f9", + "xrarr;": "\u27f6", + "xscr;": "\U0001d4cd", + "xsqcup;": "\u2a06", + "xuplus;": "\u2a04", + "xutri;": "\u25b3", + "xvee;": "\u22c1", + "xwedge;": "\u22c0", + "yacute": "\xfd", + "yacute;": "\xfd", + "yacy;": "\u044f", + "ycirc;": "\u0177", + "ycy;": "\u044b", + "yen": "\xa5", + "yen;": "\xa5", + "yfr;": "\U0001d536", + "yicy;": "\u0457", + "yopf;": "\U0001d56a", + "yscr;": "\U0001d4ce", + "yucy;": "\u044e", + "yuml": "\xff", + "yuml;": "\xff", + "zacute;": "\u017a", + "zcaron;": "\u017e", + "zcy;": "\u0437", + "zdot;": "\u017c", + "zeetrf;": "\u2128", + "zeta;": "\u03b6", + "zfr;": "\U0001d537", + "zhcy;": "\u0436", + "zigrarr;": "\u21dd", + "zopf;": "\U0001d56b", + "zscr;": "\U0001d4cf", + "zwj;": "\u200d", + "zwnj;": "\u200c", } replacementCharacters = { - 0x0:u"\uFFFD", - 0x0d:u"\u000D", - 0x80:u"\u20AC", - 0x81:u"\u0081", - 0x81:u"\u0081", - 0x82:u"\u201A", - 0x83:u"\u0192", - 0x84:u"\u201E", - 0x85:u"\u2026", - 0x86:u"\u2020", - 0x87:u"\u2021", - 0x88:u"\u02C6", - 0x89:u"\u2030", - 0x8A:u"\u0160", - 0x8B:u"\u2039", - 0x8C:u"\u0152", - 0x8D:u"\u008D", - 0x8E:u"\u017D", - 0x8F:u"\u008F", - 0x90:u"\u0090", - 0x91:u"\u2018", - 0x92:u"\u2019", - 0x93:u"\u201C", - 0x94:u"\u201D", - 0x95:u"\u2022", - 0x96:u"\u2013", - 0x97:u"\u2014", - 0x98:u"\u02DC", - 0x99:u"\u2122", - 0x9A:u"\u0161", - 0x9B:u"\u203A", - 0x9C:u"\u0153", - 0x9D:u"\u009D", - 0x9E:u"\u017E", - 0x9F:u"\u0178", + 0x0: "\uFFFD", + 0x0d: "\u000D", + 0x80: "\u20AC", + 0x81: "\u0081", + 0x81: "\u0081", + 0x82: "\u201A", + 0x83: "\u0192", + 0x84: "\u201E", + 0x85: "\u2026", + 0x86: "\u2020", + 0x87: "\u2021", + 0x88: "\u02C6", + 0x89: "\u2030", + 0x8A: "\u0160", + 0x8B: "\u2039", + 0x8C: "\u0152", + 0x8D: "\u008D", + 0x8E: "\u017D", + 0x8F: "\u008F", + 0x90: "\u0090", + 0x91: "\u2018", + 0x92: "\u2019", + 0x93: "\u201C", + 0x94: "\u201D", + 0x95: "\u2022", + 0x96: "\u2013", + 0x97: "\u2014", + 0x98: "\u02DC", + 0x99: "\u2122", + 0x9A: "\u0161", + 0x9B: "\u203A", + 0x9C: "\u0153", + 0x9D: "\u009D", + 0x9E: "\u017E", + 0x9F: "\u0178", } encodings = { @@ -3061,25 +3078,27 @@ 'x-x-big5': 'big5'} tokenTypes = { - "Doctype":0, - "Characters":1, - "SpaceCharacters":2, - "StartTag":3, - "EndTag":4, - "EmptyTag":5, - "Comment":6, - "ParseError":7 + "Doctype": 0, + "Characters": 1, + "SpaceCharacters": 2, + "StartTag": 3, + "EndTag": 4, + "EmptyTag": 5, + "Comment": 6, + "ParseError": 7 } -tagTokenTypes = frozenset((tokenTypes["StartTag"], tokenTypes["EndTag"], +tagTokenTypes = frozenset((tokenTypes["StartTag"], tokenTypes["EndTag"], tokenTypes["EmptyTag"])) -prefixes = dict([(v,k) for k,v in namespaces.iteritems()]) +prefixes = dict([(v, k) for k, v in namespaces.items()]) prefixes["http://www.w3.org/1998/Math/MathML"] = "math" + class DataLossWarning(UserWarning): pass + class ReparseException(Exception): pass diff --git a/libs/html5lib/filters/_base.py b/libs/html5lib/filters/_base.py index bca94ada40..c7dbaed0fa 100644 --- a/libs/html5lib/filters/_base.py +++ b/libs/html5lib/filters/_base.py @@ -1,3 +1,5 @@ +from __future__ import absolute_import, division, unicode_literals + class Filter(object): def __init__(self, source): diff --git a/libs/html5lib/filters/alphabeticalattributes.py b/libs/html5lib/filters/alphabeticalattributes.py new file mode 100644 index 0000000000..fed6996c1d --- /dev/null +++ b/libs/html5lib/filters/alphabeticalattributes.py @@ -0,0 +1,20 @@ +from __future__ import absolute_import, division, unicode_literals + +from . import _base + +try: + from collections import OrderedDict +except ImportError: + from ordereddict import OrderedDict + + +class Filter(_base.Filter): + def __iter__(self): + for token in _base.Filter.__iter__(self): + if token["type"] in ("StartTag", "EmptyTag"): + attrs = OrderedDict() + for name, value in sorted(token["data"].items(), + key=lambda x: x[0]): + attrs[name] = value + token["data"] = attrs + yield token diff --git a/libs/html5lib/filters/formfiller.py b/libs/html5lib/filters/formfiller.py deleted file mode 100644 index 940017149b..0000000000 --- a/libs/html5lib/filters/formfiller.py +++ /dev/null @@ -1,127 +0,0 @@ -# -# The goal is to finally have a form filler where you pass data for -# each form, using the algorithm for "Seeding a form with initial values" -# See http://www.whatwg.org/specs/web-forms/current-work/#seeding -# - -import _base - -from html5lib.constants import spaceCharacters -spaceCharacters = u"".join(spaceCharacters) - -class SimpleFilter(_base.Filter): - def __init__(self, source, fieldStorage): - _base.Filter.__init__(self, source) - self.fieldStorage = fieldStorage - - def __iter__(self): - field_indices = {} - state = None - field_name = None - for token in _base.Filter.__iter__(self): - type = token["type"] - if type in ("StartTag", "EmptyTag"): - name = token["name"].lower() - if name == "input": - field_name = None - field_type = None - input_value_index = -1 - input_checked_index = -1 - for i,(n,v) in enumerate(token["data"]): - n = n.lower() - if n == u"name": - field_name = v.strip(spaceCharacters) - elif n == u"type": - field_type = v.strip(spaceCharacters) - elif n == u"checked": - input_checked_index = i - elif n == u"value": - input_value_index = i - - value_list = self.fieldStorage.getlist(field_name) - field_index = field_indices.setdefault(field_name, 0) - if field_index < len(value_list): - value = value_list[field_index] - else: - value = "" - - if field_type in (u"checkbox", u"radio"): - if value_list: - if token["data"][input_value_index][1] == value: - if input_checked_index < 0: - token["data"].append((u"checked", u"")) - field_indices[field_name] = field_index + 1 - elif input_checked_index >= 0: - del token["data"][input_checked_index] - - elif field_type not in (u"button", u"submit", u"reset"): - if input_value_index >= 0: - token["data"][input_value_index] = (u"value", value) - else: - token["data"].append((u"value", value)) - field_indices[field_name] = field_index + 1 - - field_type = None - field_name = None - - elif name == "textarea": - field_type = "textarea" - field_name = dict((token["data"])[::-1])["name"] - - elif name == "select": - field_type = "select" - attributes = dict(token["data"][::-1]) - field_name = attributes.get("name") - is_select_multiple = "multiple" in attributes - is_selected_option_found = False - - elif field_type == "select" and field_name and name == "option": - option_selected_index = -1 - option_value = None - for i,(n,v) in enumerate(token["data"]): - n = n.lower() - if n == "selected": - option_selected_index = i - elif n == "value": - option_value = v.strip(spaceCharacters) - if option_value is None: - raise NotImplementedError("