MythTV  master
common_api.py
Go to the documentation of this file.
1 # -*- coding: UTF-8 -*-
2 
3 # ----------------------
4 # Name: common_api.py - Common class libraries for all MythNetvision Mashup processing
5 # Python Script
6 # Author: R.D. Vaughan
7 # Purpose: This python script contains a number of common functions used for processing MythNetvision
8 # Grabbers.
9 #
10 # License:Creative Commons GNU GPL v2
11 # (http://creativecommons.org/licenses/GPL/2.0/)
12 #-------------------------------------
13 __title__ ="common_api - Common class libraries for all MythNetvision Mashup processing"
14 __author__="R.D. Vaughan"
15 __purpose__='''
16 This python script is intended to perform a variety of utility functions for the processing of
17 MythNetvision Grabber scripts that run as a Web application and global functions used by many
18 MNV grabbers.
19 '''
20 
21 __version__="v0.2.3"
22 # 0.0.1 Initial development
23 # 0.1.0 Alpha release
24 # 0.1.1 Added the ability to have a mashup name independant of the mashup title
25 # Added passing on the url the emml hostname and port so a mashup can call other emml mashups
26 # 0.1.2 Modifications to support launching single treeview requests for better integration with MNV
27 # subscription logic.
28 # With the change to allow MNV launching individual tree views the auto shutdown feature had to be
29 # disabled. Unless a safe work around can be found the feature may need to be removed entierly.
30 # 0.1.3 Modifications to support calling grabbers that run on a Web server
31 # Added a class of global functions that could be used by all grabbers
32 # 0.1.4 Changed the rating item element to default to be empty rather than "0.0"
33 # Changed the default logger to stderr
34 # 0.1.5 Added functions and data structures for common "Mashups" grabbers
35 # Changed the api name from "mashups_api" to "common_api"
36 # Added XSLT stylsheets as an alternate process option in the threaded URL download functions
37 # 0.1.6 Removed all logic associated with Web CGI calls as the MNV plugin is now on the backend
38 # Made the pubDate fucntion more adaptable to various input date strings
39 # 0.1.7 Added a common function to get the current selected language (default is 'en' English)
40 # 0.1.8 Fixed a bug with two string functions
41 # Added a customhtml reference for bliptv
42 # 0.1.9 Add a function that allows grabbers to check if an item is already in the data base. This is used
43 # to make grabbers more efficient when processing sources that are largely made up of the same
44 # data. This is particularly important when a grabber is forces to do additional Interent accesses
45 # to aquire all the needed MNV item data.
46 # Added a function that checks if there are any treeview items in the data base for a specific
47 # grabber. Some Mashup grabber's search option only returns results when then there are treeview
48 # items in the database.
49 # 0.2.0 Made the creation of custom HTML page links more flexible so code did not need to be changed
50 # when new custom HTML pages were added.
51 # 0.2.1 Add the ability for a parameters to be passed to a XSLT style sheet
52 # 0.2.2 Added a common XPath extention to test if a string starts or ends with a substring
53 # 0.2.3 Fixed Error messages that were not unicode strings
54 
55 import os, struct, sys, re, datetime, time, subprocess, string
56 import urllib.request, urllib.parse, urllib.error
57 import logging
58 import telnetlib
59 from threading import Thread
60 
61 from .common_exceptions import (WebCgiUrlError, WebCgiHttpError, WebCgiRssError, WebCgiVideoNotFound, WebCgiXmlError, )
62 import io
63 
64 class OutStreamEncoder(object):
65  """Wraps a stream with an encoder"""
66  def __init__(self, outstream, encoding=None):
67  self.out = outstream
68  if not encoding:
69  self.encoding = sys.getfilesystemencoding()
70  else:
71  self.encoding = encoding
72 
73  def write(self, obj):
74  """Wraps the output stream, encoding Unicode strings with the specified encoding"""
75  if isinstance(obj, str):
76  obj = obj.encode(self.encoding)
77  self.out.buffer.write(obj)
78 
79  def __getattr__(self, attr):
80  """Delegate everything but write to the stream"""
81  return getattr(self.out, attr)
82 
83 if isinstance(sys.stdout, io.TextIOWrapper):
84  sys.stdout = OutStreamEncoder(sys.stdout, 'utf8')
85  sys.stderr = OutStreamEncoder(sys.stderr, 'utf8')
86 
87 
88 try:
89  from io import StringIO
90  from lxml import etree
91 except Exception as e:
92  sys.stderr.write('\n! Error - Importing the "lxml" python library failed on error(%s)\n' % e)
93  sys.exit(1)
94 
95 # Check that the lxml library is current enough
96 # From the lxml documents it states: (http://codespeak.net/lxml/installation.html)
97 # "If you want to use XPath, do not use libxml2 2.6.27. We recommend libxml2 2.7.2 or later"
98 # Testing was performed with the Ubuntu 9.10 "python-lxml" version "2.1.5-1ubuntu2" repository package
99 # >>> from lxml import etree
100 # >>> print "lxml.etree:", etree.LXML_VERSION
101 # lxml.etree: (2, 1, 5, 0)
102 # >>> print "libxml used:", etree.LIBXML_VERSION
103 # libxml used: (2, 7, 5)
104 # >>> print "libxml compiled:", etree.LIBXML_COMPILED_VERSION
105 # libxml compiled: (2, 6, 32)
106 # >>> print "libxslt used:", etree.LIBXSLT_VERSION
107 # libxslt used: (1, 1, 24)
108 # >>> print "libxslt compiled:", etree.LIBXSLT_COMPILED_VERSION
109 # libxslt compiled: (1, 1, 24)
110 
111 version = ''
112 for digit in etree.LIBXML_VERSION:
113  version+=str(digit)+'.'
114 version = version[:-1]
115 if version < '2.7.2':
116  sys.stderr.write('''
117 ! Error - The installed version of the "lxml" python library "libxml" version is too old.
118  At least "libxml" version 2.7.2 must be installed. Your version is (%s).
119 ''' % version)
120  sys.exit(1)
121 
122 
123 
128 class Common(object):
129  """A collection of common functions used by many grabbers
130  """
131  def __init__(self,
132  logger=False,
133  debug=False,
134  ):
135  self.logger = logger
136  self.debug = debug
137  self.baseProcessingDir = os.path.dirname( os.path.realpath( __file__ )).replace('/nv_python_libs/common', '')
138  self.namespaces = {
139  'xsi': "http://www.w3.org/2001/XMLSchema-instance",
140  'media': "http://search.yahoo.com/mrss/",
141  'xhtml': "http://www.w3.org/1999/xhtml",
142  'atm': "http://www.w3.org/2005/Atom",
143  'mythtv': "http://www.mythtv.org/wiki/MythNetvision_Grabber_Script_Format",
144  'itunes':"http://www.itunes.com/dtds/podcast-1.0.dtd",
145  }
146  self.parsers = {
147  'xml': etree.XMLParser(remove_blank_text=True),
148  'html': etree.HTMLParser(remove_blank_text=True),
149  'xhtml': etree.HTMLParser(remove_blank_text=True),
150  }
151  self.pubDateFormat = '%a, %d %b %Y %H:%M:%S GMT'
152  self.mnvRSS = """
153 <rss version="2.0"
154  xmlns:itunes="http://www.itunes.com/dtds/podcast-1.0.dtd"
155  xmlns:content="http://purl.org/rss/1.0/modules/content/"
156  xmlns:cnettv="http://cnettv.com/mrss/"
157  xmlns:creativeCommons="http://backend.userland.com/creativeCommonsRssModule"
158  xmlns:media="http://search.yahoo.com/mrss/"
159  xmlns:atom="http://www.w3.org/2005/Atom"
160  xmlns:amp="http://www.adobe.com/amp/1.0"
161  xmlns:dc="http://purl.org/dc/elements/1.1/"
162  xmlns:mythtv="http://www.mythtv.org/wiki/MythNetvision_Grabber_Script_Format">
163 """
164  self.mnvItem = '''
165 <item>
166  <title></title>
167  <author></author>
168  <pubDate></pubDate>
169  <description></description>
170  <link></link>
171  <media:group xmlns:media="http://search.yahoo.com/mrss/">
172  <media:thumbnail url=''/>
173  <media:content url='' length='' duration='' width='' height='' lang=''/>
174  </media:group>
175  <rating></rating>
176 </item>
177 '''
178  # Season and Episode detection regex patterns
179  self.s_e_Patterns = [
180  # "Series 7 - Episode 4" or "Series 7 - Episode 4" or "Series 7: On Holiday: Episode 10"
181  re.compile(r'''^.+?Series\\ (?P<seasno>[0-9]+).*.+?Episode\\ (?P<epno>[0-9]+).*$''', re.UNICODE),
182  # Series 5 - 1
183  re.compile('''^.+?Series\\ (?P<seasno>[0-9]+)\\ \\-\\ (?P<epno>[0-9]+).*$''', re.UNICODE),
184  # Series 1 - Warriors of Kudlak - Part 2
185  re.compile('''^.+?Series\\ (?P<seasno>[0-9]+).*.+?Part\\ (?P<epno>[0-9]+).*$''', re.UNICODE),
186  # Series 3: Programme 3
187  re.compile('''^.+?Series\\ (?P<seasno>[0-9]+)\\:\\ Programme\\ (?P<epno>[0-9]+).*$''', re.UNICODE),
188  # Series 3:
189  re.compile('''^.+?Series\\ (?P<seasno>[0-9]+).*$''', re.UNICODE),
190  # Episode 1
191  re.compile('''^.+?Episode\\ (?P<seasno>[0-9]+).*$''', re.UNICODE),
192  # Title: "s18 | e87"
193  re.compile('''^.+?[Ss](?P<seasno>[0-9]+).*.+?[Ee](?P<epno>[0-9]+).*$''', re.UNICODE),
194  # Description: "season 1, episode 5"
195  re.compile('''^.+?season\\ (?P<seasno>[0-9]+).*.+?episode\\ (?P<epno>[0-9]+).*$''', re.UNICODE),
196  # Thumbnail: "http://media.thewb.com/thewb/images/thumbs/firefly/01/firefly_01_07.jpg"
197  re.compile('''(?P<seriesname>[^_]+)\\_(?P<seasno>[0-9]+)\\_(?P<epno>[0-9]+).*$''', re.UNICODE),
198  # Guid: "http://traffic.libsyn.com/divefilm/episode54hd.m4v"
199  re.compile('''^.+?episode(?P<epno>[0-9]+).*$''', re.UNICODE),
200  # Season 3, Episode 8
201  re.compile('''^.+?Season\\ (?P<seasno>[0-9]+).*.+?Episode\\ (?P<epno>[0-9]+).*$''', re.UNICODE),
202  # "Episode 1" anywhere in text
203  re.compile('''^.+?Episode\\ (?P<seasno>[0-9]+).*$''', re.UNICODE),
204  # "Episode 1" at the start of the text
205  re.compile('''Episode\\ (?P<seasno>[0-9]+).*$''', re.UNICODE),
206  # "--0027--" when the episode is in the URL link
207  re.compile('''^.+?--(?P<seasno>[0-9]+)--.*$''', re.UNICODE),
208  ]
209  self.nv_python_libs_path = 'nv_python_libs'
210  self.apiSuffix = '_api'
211  self.language = 'en'
212  self.mythdb = None
213  self.linksWebPage = None
214  self.etree = etree
215  # end __init__()
216 
217  def massageText(self, text):
218  '''Removes HTML markup from a text string.
219  @param text The HTML source.
220  @return The plain text. If the HTML source contains non-ASCII
221  entities or character references, this is a Unicode string.
222  '''
223  def fixup(m):
224  text = m.group(0)
225  if text[:1] == "<":
226  return "" # ignore tags
227  if text[:2] == "&#":
228  try:
229  if text[:3] == "&#x":
230  return chr(int(text[3:-1], 16))
231  else:
232  return chr(int(text[2:-1]))
233  except ValueError:
234  pass
235  elif text[:1] == "&":
236  import html.entities
237  entity = html.entities.entitydefs.get(text[1:-1])
238  if entity:
239  if entity[:2] == "&#":
240  try:
241  return chr(int(entity[2:-1]))
242  except ValueError:
243  pass
244  else:
245  return str(entity, "iso-8859-1")
246  return text # leave as is
247  return self.ampReplace(re.sub(r"(?s)<[^>]*>|&#?\w+;", fixup, self.textUtf8(text))).replace('\n',' ')
248  # end massageText()
249 
250 
251  def initLogger(self, path=sys.stderr, log_name='MNV_Grabber'):
252  """Setups a logger using the logging module, returns a logger object
253  """
254  logger = logging.getLogger(log_name)
255  formatter = logging.Formatter('%(asctime)s-%(levelname)s: %(message)s', '%Y-%m-%dT%H:%M:%S')
256 
257  if path == sys.stderr:
258  hdlr = logging.StreamHandler(sys.stderr)
259  else:
260  hdlr = logging.FileHandler('%s/%s.log' % (path, log_name))
261 
262  hdlr.setFormatter(formatter)
263  logger.addHandler(hdlr)
264 
265  if self.debug:
266  logger.setLevel(logging.DEBUG)
267  else:
268  logger.setLevel(logging.INFO)
269  self.logger = logger
270  return logger
271  #end initLogger
272 
273 
274  def textUtf8(self, text):
275  if text is None:
276  return text
277  try:
278  return str(text, 'utf8')
279  except UnicodeDecodeError:
280  return ''
281  except (UnicodeEncodeError, TypeError):
282  return text
283  # end textUtf8()
284 
285 
286  def ampReplace(self, text):
287  '''Replace all &, ', ", <, and > characters with the predefined XML
288  entities
289  '''
290  text = self.textUtf8(text)
291  text = text.replace('&amp;','~~~~~').replace('&','&amp;').replace('~~~~~', '&amp;')
292  text = text.replace("'", "&apos;").replace('"', '&quot;')
293  text = text.replace('<', '&lt;').replace('>', '&gt;')
294  return text
295  # end ampReplace()
296 
297  def callCommandLine(self, command, stderr=False):
298  '''Perform the requested command line and return an array of stdout strings and
299  stderr strings if stderr=True
300  return array of stdout string array or stdout and stderr string arrays
301  '''
302  stderrarray = []
303  stdoutarray = []
304  try:
305  p = subprocess.Popen(command, shell=True, bufsize=4096, stdin=subprocess.PIPE,
306  stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
307  except Exception as e:
308  if self.logger:
309  self.logger.error('callCommandLine Popen Exception, error(%s)' % e)
310  if stderr:
311  return [[], []]
312  else:
313  return []
314 
315  if stderr:
316  while True:
317  data = p.stderr.readline()
318  if not data:
319  break
320  try:
321  data = str(data, 'utf8')
322  except (UnicodeDecodeError):
323  continue # Skip any line is cannot be cast as utf8 characters
324  except (UnicodeEncodeError, TypeError):
325  pass
326  stderrarray.append(data)
327 
328  while True:
329  data = p.stdout.readline()
330  if not data:
331  break
332  try:
333  data = str(data, 'utf8')
334  except (UnicodeDecodeError):
335  continue # Skip any line that has non-utf8 characters in it
336  except (UnicodeEncodeError, TypeError):
337  pass
338  stdoutarray.append(data)
339 
340  if stderr:
341  return [stdoutarray, stderrarray]
342  else:
343  return stdoutarray
344  # end callCommandLine()
345 
346 
348  '''Get longitude and latitiude to find videos relative to your location. Up to three different
349  servers will be tried before giving up.
350  return a dictionary e.g.
351  {'Latitude': '43.6667', 'Country': 'Canada', 'Longitude': '-79.4167', 'City': 'Toronto'}
352  return an empty dictionary if there were any errors
353  Code found at: http://blog.suinova.com/2009/04/from-ip-to-geolocation-country-city.html
354  '''
355  def getExternalIP():
356  '''Find the external IP address of this computer.
357  '''
358  url = urllib.request.URLopener()
359  try:
360  resp = url.open('http://www.whatismyip.com/automation/n09230945.asp')
361  return resp.read()
362  except:
363  return None
364  # end getExternalIP()
365 
366  ip = getExternalIP()
367 
368  if ip is None:
369  return {}
370 
371  try:
372  gs = urllib.request.urlopen('http://blogama.org/ip_query.php?ip=%s&output=xml' % ip)
373  txt = gs.read()
374  except:
375  try:
376  gs = urllib.request.urlopen('http://www.seomoz.org/ip2location/look.php?ip=%s' % ip)
377  txt = gs.read()
378  except:
379  try:
380  gs = urllib.request.urlopen('http://api.hostip.info/?ip=%s' % ip)
381  txt = gs.read()
382  except:
383  logging.error('GeoIP servers not available')
384  return {}
385  try:
386  if txt.find('<Response>') > 0:
387  countrys = re.findall(r'<CountryName>([\w ]+)<',txt)[0]
388  citys = re.findall(r'<City>([\w ]+)<',txt)[0]
389  lats,lons = re.findall(r'<Latitude>([\d\-\.]+)</Latitude>\s*<Longitude>([\d\-\.]+)<',txt)[0]
390  elif txt.find('GLatLng') > 0:
391  citys,countrys = re.findall(r'<br />\s*([^<]+)<br />\s*([^<]+)<',txt)[0]
392  lats,lons = re.findall(r'LatLng\(([-\d\.]+),([-\d\.]+)',txt)[0]
393  elif txt.find('<gml:coordinates>') > 0:
394  citys = re.findall(r'<Hostip>\s*<gml:name>(\w+)</gml:name>',txt)[0]
395  countrys = re.findall(r'<countryName>([\w ,\.]+)</countryName>',txt)[0]
396  lats,lons = re.findall(r'gml:coordinates>([-\d\.]+),([-\d\.]+)<',txt)[0]
397  else:
398  logging.error('error parsing IP result %s'%txt)
399  return {}
400  return {'Country':countrys,'City':citys,'Latitude':lats,'Longitude':lons}
401  except:
402  logging.error('Error parsing IP result %s'%txt)
403  return {}
404  # end detectUserLocationByIP()
405 
406 
407  def displayCustomHTML(self):
408  """Common name for a custom HTML display. Used to interface with MythTV plugin NetVision
409  """
410  embedFlashVarFilter = etree.XPath('//embed', namespaces=self.namespaces)
411  variables = self.HTMLvideocode.split('?')
412 
413  url = '%s/nv_python_libs/configs/HTML/%s' % (baseProcessingDir, variables[0])
414  try:
415  customHTML = etree.parse(url)
416  except Exception as e:
417  raise Exception("! Error: The Custom HTML file (%s) cause the exception error (%s)\n" % (url, errormsg))
418 
419  # There may be one or more argumants to replace in the HTML code
420  # Example:
421  # "bbciplayer.html?AttribName1/FirstReplace=bc623bc?SecondReplace/AttribName2=wonderland/..."
422  for arg in variables[1:]:
423  (attrib, key_value) = arg.split('/')
424  (key, value) = key_value.split('=')
425  embedFlashVarFilter(customHTML)[0].attrib[attrib] = embedFlashVarFilter(customHTML)[0].attrib[attrib].replace(key, value)
426 
427  sys.stdout.write(etree.tostring(customHTML, encoding='UTF-8', pretty_print=True))
428 
429  sys.exit(0)
430  # end displayCustomHTML()
431 
432 
433  def mnvChannelElement(self, channelDetails):
434  ''' Create a MNV Channel element populated with channel details
435  return the channel element
436  '''
437  mnvChannel = etree.fromstring("""
438 <channel>
439  <title>%(channel_title)s</title>
440  <link>%(channel_link)s</link>
441  <description>%(channel_description)s</description>
442  <numresults>%(channel_numresults)d</numresults>
443  <returned>%(channel_returned)d</returned>
444  <startindex>%(channel_startindex)d</startindex>
445 </channel>
446 """ % channelDetails
447  )
448  return mnvChannel
449  # end mnvChannelElement()
450 
451  # Verify the a URL actually exists
452  def checkURL(self, url):
453  '''Verify that a URL actually exists. Be careful as redirects can lead to false positives. Use
454  the info details to be sure.
455  return True when it exists and info
456  return False when it does not exist and info
457  '''
458  urlOpened = urllib.request.urlopen(url)
459  code = urlOpened.getcode()
460  actualURL = urlOpened.geturl()
461  info = urlOpened.info()
462  urlOpened.close()
463  if code != 200:
464  return [False, info]
465  if url != actualURL:
466  return [False, info]
467  return [True, info]
468  # end checkURL()
469 
470 
471  def getUrlData(self, inputUrls, pageFilter=None):
472  ''' Fetch url data and extract the desired results using a dynamic filter or XSLT stylesheet.
473  The URLs are requested in parallel using threading
474  return the extracted data organised into directories
475  '''
476  urlDictionary = {}
477 
478  if self.debug:
479  print("inputUrls:")
480  sys.stdout.write(etree.tostring(inputUrls, encoding='UTF-8', pretty_print=True))
481  print()
482 
483  for element in inputUrls.xpath('.//url'):
484  key = element.find('name').text
485  urlDictionary[key] = {}
486  urlDictionary[key]['type'] = 'raw'
487  urlDictionary[key]['href'] = element.find('href').text
488  urlFilter = element.findall('filter')
489  if len(urlFilter):
490  urlDictionary[key]['type'] = 'xpath'
491  for index in range(len(urlFilter)):
492  urlFilter[index] = urlFilter[index].text
493  urlDictionary[key]['filter'] = urlFilter
494  urlXSLT = element.findall('xslt')
495  if len(urlXSLT):
496  urlDictionary[key]['type'] = 'xslt'
497  for index in range(len(urlXSLT)):
498  urlXSLT[index] = etree.XSLT(etree.parse('%s/nv_python_libs/configs/XSLT/%s.xsl' % (self.baseProcessingDir, urlXSLT[index].text)))
499  urlDictionary[key]['xslt'] = urlXSLT
500  urlDictionary[key]['pageFilter'] = pageFilter
501  urlDictionary[key]['parser'] = self.parsers[element.find('parserType').text].copy()
502  urlDictionary[key]['namespaces'] = self.namespaces
503  urlDictionary[key]['result'] = []
504  urlDictionary[key]['morePages'] = 'false'
505  urlDictionary[key]['tmp'] = None
506  urlDictionary[key]['tree'] = None
507  if element.find('parameter') is not None:
508  urlDictionary[key]['parameter'] = element.find('parameter').text
509 
510  if self.debug:
511  print("urlDictionary:")
512  print(urlDictionary)
513  print()
514 
515  thread_list = []
516  getURL.urlDictionary = urlDictionary
517 
518  # Single threaded (commented out) - Only used to prove that multi-threading does
519  # not cause data corruption
520 # for key in urlDictionary.keys():
521 # current = getURL(key, self.debug)
522 # thread_list.append(current)
523 # current.start()
524 # current.join()
525 
526  # Multi-threaded
527  for key in list(urlDictionary.keys()):
528  current = getURL(key, self.debug)
529  thread_list.append(current)
530  current.start()
531  for thread in thread_list:
532  thread.join()
533 
534  # Take the results and make the return element tree
535  root = etree.XML("<xml></xml>")
536  for key in sorted(getURL.urlDictionary.keys()):
537  if not len(getURL.urlDictionary[key]['result']):
538  continue
539  results = etree.SubElement(root, "results")
540  etree.SubElement(results, "name").text = key
541  etree.SubElement(results, "url").text = urlDictionary[key]['href']
542  etree.SubElement(results, "type").text = urlDictionary[key]['type']
543  etree.SubElement(results, "pageInfo").text = getURL.urlDictionary[key]['morePages']
544  result = etree.SubElement(results, "result")
545  if len(getURL.urlDictionary[key]['filter']):
546  for index in range(len(getURL.urlDictionary[key]['result'])):
547  for element in getURL.urlDictionary[key]['result'][index]:
548  result.append(element)
549  elif len(getURL.urlDictionary[key]['xslt']):
550  for index in range(len(getURL.urlDictionary[key]['result'])):
551  for element in getURL.urlDictionary[key]['result'][index].getroot():
552  result.append(element)
553  else:
554  for element in getURL.urlDictionary[key]['result'][0].xpath('/*'):
555  result.append(element)
556 
557  if self.debug:
558  print("root:")
559  sys.stdout.write(etree.tostring(root, encoding='UTF-8', pretty_print=True))
560  print()
561 
562  return root
563  # end getShows()
564 
565 
568  def buildFunctionDict(self):
569  ''' Create a dictionary of functions that manipulate items data. These functions are imported
570  from other MNV grabbers. These functions are meant to be used by the MNV WebCgi type of grabber
571  which aggregates data from a number of different sources (e.g. RSS feeds and HTML Web pages)
572  including sources from other grabbers.
573  Using a dictionary facilitates mixing XSLT functions with pure python functions to use the best
574  capabilities of both technologies when translating source information into MNV compliant item
575  data.
576  return nothing
577  '''
578  # Add the common XPath extention functions
579  self.functionDict = {
580  'pubDate': self.pubDate,
581  'getSeasonEpisode': self.getSeasonEpisode,
582  'convertDuration': self.convertDuration,
583  'getHtmlData': self.getHtmlData,
584  'linkWebPage': self.linkWebPage,
585  'baseDir': self.baseDir,
586  'stringLower': self.stringLower,
587  'stringUpper': self.stringUpper,
588  'stringReplace': self.stringReplace,
589  'stringEscape': self.stringEscape,
590  'removePunc': self.removePunc,
591  'htmlToString': self.htmlToString,
592  'checkIfDBItem': self.checkIfDBItem,
593  'getItemElement': self.getItemElement,
594  'getDBRecords': self.getDBRecords,
595  'createItemElement': self.createItemElement,
596  'testSubString': self.testSubString,
597  }
598  # Get the specific source functions
599  self.addDynamicFunctions('xsltfunctions')
600  return
601  # end buildFunctionDict()
602 
603  def addDynamicFunctions(self, dirPath):
604  ''' Dynamically add functions to the function dictionary from a specified directory
605  return nothing
606  '''
607  fullPath = '%s/nv_python_libs/%s' % (self.baseProcessingDir, dirPath)
608  sys.path.append(fullPath)
609  # Make a list of all functions that need to be included
610  fileList = []
611  for fPath in os.listdir(fullPath):
612  filepath, filename = os.path.split( fPath )
613  filename, ext = os.path.splitext( filename )
614  if filename == '__init__':
615  continue
616  if ext != '.py':
617  continue
618  fileList.append(filename)
619 
620  # Do not stop when there is an abort on a library just send an error message to stderr
621  for fileName in fileList:
622  filename = {'filename': fileName, }
623  try:
624  exec('''
625 import %(filename)s
626 %(filename)s.common = self
627 for xpathClass in %(filename)s.__xpathClassList__:
628  exec(u'xpathClass = %(filename)s.%%s()' %% xpathClass)
629  for func in xpathClass.functList:
630  exec("self.functionDict['%%s'] = %%s" %% (func, u'xpathClass.%%s' %% func))
631 for xsltExtension in %(filename)s.__xsltExtentionList__:
632  exec("self.functionDict['%%s'] = %%s" %% (xsltExtension, u'%(filename)s.%%s' %% xsltExtension))''' % filename )
633  except Exception as errmsg:
634  sys.stderr.write('! Error: Dynamic import of (%s) XPath and XSLT extention functions\nmessage(%s)\n' % (fileName, errmsg))
635 
636  return
637  # end addDynamicFunctions()
638 
639  def pubDate(self, context, *inputArgs):
640  '''Convert a date/time string in a specified format into a pubDate. The default is the
641  MNV item format
642  return the formatted pubDate string
643  return on error return the original date string
644  '''
645  args = []
646  for arg in inputArgs:
647  args.append(arg)
648  if args[0] == '':
649  return datetime.datetime.now().strftime(self.pubDateFormat)
650  index = args[0].find('+')
651  if index == -1:
652  index = args[0].find('-')
653  if index != -1 and index > 5:
654  args[0] = args[0][:index].strip()
655  args[0] = args[0].replace(',', '').replace('.', '')
656  try:
657  if len(args) > 1:
658  args[1] = args[1].replace(',', '').replace('.', '')
659  if args[1].find('GMT') != -1:
660  args[1] = args[1][:args[1].find('GMT')].strip()
661  args[0] = args[0][:args[0].rfind(' ')].strip()
662  try:
663  pubdate = time.strptime(args[0], args[1])
664  except ValueError:
665  if args[1] == '%a %d %b %Y %H:%M:%S':
666  pubdate = time.strptime(args[0], '%a %d %B %Y %H:%M:%S')
667  elif args[1] == '%a %d %B %Y %H:%M:%S':
668  pubdate = time.strptime(args[0], '%a %d %b %Y %H:%M:%S')
669  if len(args) > 2:
670  return time.strftime(args[2], pubdate)
671  else:
672  return time.strftime(self.pubDateFormat, pubdate)
673  else:
674  return datetime.datetime.now().strftime(self.pubDateFormat)
675  except Exception as err:
676  sys.stderr.write('! Error: pubDate variables(%s) error(%s)\n' % (args, err))
677  return args[0]
678  # end pubDate()
679 
680  def getSeasonEpisode(self, context, text):
681  ''' Check is there is any season or episode number information in an item's text
682  return a string of season and/or episode numbers e.g. "2_21"
683  return a string with "None_None" values
684  '''
685  s_e = [None, None]
686  for regexPattern in self.s_e_Patterns:
687  match = regexPattern.match(text)
688  if not match:
689  continue
690  season_episode = match.groups()
691  if len(season_episode) > 1:
692  s_e[0] = season_episode[0]
693  s_e[1] = season_episode[1]
694  else:
695  s_e[1] = season_episode[0]
696  return '%s_%s' % (s_e[0], s_e[1])
697  return '%s_%s' % (s_e[0], s_e[1])
698  # end getSeasonEpisode()
699 
700  def convertDuration(self, context, duration):
701  ''' Take a duration and convert it to seconds
702  return a string of seconds
703  '''
704  min_sec = duration.split(':')
705  seconds = 0
706  for count in range(len(min_sec)):
707  if count != len(min_sec)-1:
708  seconds+=int(min_sec[count])*(60*(len(min_sec)-count-1))
709  else:
710  seconds+=int(min_sec[count])
711  return '%s' % seconds
712  # end convertDuration()
713 
714  def getHtmlData(self, context, *args):
715  ''' Take a HTML string and convert it to an HTML element. Then apply a filter and return
716  that value.
717  return filter value as a string
718  return an empty sting if the filter failed to find any values.
719  '''
720  xpathFilter = None
721  if len(args) > 1:
722  xpathFilter = args[0]
723  htmldata = args[1]
724  else:
725  htmldata = args[0]
726  htmlElement = etree.HTML(htmldata)
727  if not xpathFilter:
728  return htmlElement
729  filteredData = htmlElement.xpath(xpathFilter)
730  if len(filteredData):
731  if xpathFilter.find('@') != -1:
732  return filteredData[0]
733  else:
734  return filteredData[0].text
735  return ''
736  # end getHtmlData()
737 
738  def linkWebPage(self, context, sourceLink):
739  ''' Check if there is a special local HTML page for the link. If not then return a generic
740  download only local HTML url.
741  return a file://.... link to a local HTML web page
742  '''
743  # Currently there are no link specific Web pages
744  if not self.linksWebPage:
745  self.linksWebPage = etree.parse('%s/nv_python_libs/configs/XML/customeHtmlPageList.xml' % (self.baseProcessingDir, ))
746  if self.linksWebPage.find(sourceLink) is not None:
747  return 'file://%s/nv_python_libs/configs/HTML/%s' % (self.baseProcessingDir, self.linksWebPage.find(sourceLink).text)
748  return 'file://%s/nv_python_libs/configs/HTML/%s' % (self.baseProcessingDir, 'nodownloads.html')
749  # end linkWebPage()
750 
751  def baseDir(self, context, dummy):
752  ''' Return the base directory string
753  return the base directory
754  '''
755  return self.baseProcessingDir
756  # end baseDir()
757 
758  def stringLower(self, context, data):
759  '''
760  return a lower case string
761  '''
762  if not len(data):
763  return ''
764  return data[0].lower()
765  # end stringLower()
766 
767  def stringUpper(self, context, data):
768  '''
769  return a upper case string
770  '''
771  if not len(data):
772  return ''
773  return data[0].upper()
774  # end stringUpper()
775 
776  def stringReplace(self, context, *inputArgs):
777  ''' Replace substring values in a string
778  return the resulting string from a replace operation
779  '''
780  args = []
781  for arg in inputArgs:
782  args.append(arg)
783  if not len(args) or len(args) == 1:
784  return data
785  if len(args) == 2:
786  args[0] = args[0].replace(args[1], "")
787  else:
788  args[0] = args[0].replace(args[1], args[2])
789  return args[0].strip()
790  # end stringReplace()
791 
792  def stringEscape(self, context, *args):
793  ''' Replace substring values in a string
794  return the resulting string from a replace operation
795  '''
796  if not len(args):
797  return ""
798  if len(args) == 1:
799  return urllib.parse.quote_plus(args[0].encode("utf-8"))
800  else :
801  return urllib.parse.quote_plus(args[0].encode("utf-8"), args[1])
802  # end stringEscape()
803 
804  def removePunc(self, context, data):
805  ''' Remove all punctuation for a string
806  return the resulting string
807  '''
808  if not len(data):
809  return ""
810  return re.sub('[%s]' % re.escape(string.punctuation), '', data)
811  # end removePunc()
812 
813  def htmlToString(self, context, html):
814  ''' Remove HTML tags and LFs from a string
815  return the string without HTML tags or LFs
816  '''
817  if not len(html):
818  return ""
819  return self.massageText(html).strip().replace('\n', ' ').replace('’', "&apos;").replace('“', "&apos;")
820  # end htmlToString()
821 
822  def getLanguage(self, context, args):
823  ''' Return the current selected language code
824  return language code
825  '''
826  return self.language
827  # end getLanguage()
828 
829  def checkIfDBItem(self, context, arg):
830  ''' Find an 'internetcontentarticles' table record based on fields and values
831  return True if a record was found and an item element created
832  return False if no record was found
833  '''
834  results = self.getDBRecords('dummy', arg)
835  if len(results):
836  self.itemElement = self.createItemElement('dummy', results[0])
837  return True
838  return False
839  # end checkIfDBItem()
840 
841  def getItemElement(self, context, arg):
842  ''' Return an item element that was created by a previous call to the checkIfDBItem function
843  '''
844  return self.itemElement
845  # end getItemElement()
846 
847  def testSubString(self, context, *arg):
848  ''' Return True or False if a substring is at the beginning or end of a string
849  '''
850  if arg[0] == 'starts':
851  return arg[1].startswith(arg[2])
852  elif arg[0] == 'ends':
853  return arg[1].endswith(arg[2])
854  else:
855  index = arg[1].find(arg[2])
856  if index == -1:
857  return False
858  else:
859  return True
860  # end testSubString()
861 
862  def getDBRecords(self, context, *arg):
863  ''' Return a list of 'internetcontentarticles' table records based on field and value matches
864  '''
865  if not self.mythdb:
866  self.initializeMythDB()
867  self.itemThumbnail = etree.XPath('.//media:thumbnail', namespaces=self.namespaces)
868  self.itemContent = etree.XPath('.//media:content', namespaces=self.namespaces)
869  # Encode the search text to UTF-8
870  for key in list(arg[0].keys()):
871  try:
872  arg[0][key] = arg[0][key].encode('UTF-8')
873  except:
874  return []
875  return list(self.mythdb.searchInternetContent(**arg[0]))
876  # end getDBItem()
877 
878  def createItemElement(self, context, *arg):
879  ''' Create an item element from an 'internetcontentarticles' table record dictionary
880  return the item element
881  '''
882  result = arg[0]
883  itemElement = etree.XML(self.mnvItem)
884  # Insert data into a new item element
885  itemElement.find('link').text = result['url']
886  if result['title']:
887  itemElement.find('title').text = result['title']
888  if result['subtitle']:
889  etree.SubElement(itemElement, "subtitle").text = result['subtitle']
890  if result['description']:
891  itemElement.find('description').text = result['description']
892  if result['author']:
893  itemElement.find('author').text = result['author']
894  if result['date']:
895  itemElement.find('pubDate').text = result['date'].strftime(self.pubDateFormat)
896  if result['rating'] != '32576' and result['rating'][0] != '-':
897  itemElement.find('rating').text = result['rating']
898  if result['thumbnail']:
899  self.itemThumbnail(itemElement)[0].attrib['url'] = result['thumbnail']
900  if result['mediaURL']:
901  self.itemContent(itemElement)[0].attrib['url'] = result['mediaURL']
902  if result['filesize'] > 0:
903  self.itemContent(itemElement)[0].attrib['length'] = str(result['filesize'])
904  if result['time'] > 0:
905  self.itemContent(itemElement)[0].attrib['duration'] = str(result['time'])
906  if result['width'] > 0:
907  self.itemContent(itemElement)[0].attrib['width'] = str(result['width'])
908  if result['height'] > 0:
909  self.itemContent(itemElement)[0].attrib['height'] = str(result['height'])
910  if result['language']:
911  self.itemContent(itemElement)[0].attrib['lang'] = result['language']
912  if result['season'] > 0:
913  etree.SubElement(itemElement, "{http://www.mythtv.org/wiki/MythNetvision_Grabber_Script_Format}season").text = str(result['season'])
914  if result['episode'] > 0:
915  etree.SubElement(itemElement, "{http://www.mythtv.org/wiki/MythNetvision_Grabber_Script_Format}episode").text = str(result['episode'])
916  if result['customhtml'] == 1:
917  etree.SubElement(itemElement, "{http://www.mythtv.org/wiki/MythNetvision_Grabber_Script_Format}customhtml").text = 'true'
918  if result['countries']:
919  countries = result['countries'].split(' ')
920  for country in countries:
921  etree.SubElement(itemElement, "{http://www.mythtv.org/wiki/MythNetvision_Grabber_Script_Format}country").text = country
922  return itemElement
923  # end createItemElement()
924 
925  def initializeMythDB(self):
926  ''' Import the MythTV database bindings
927  return nothing
928  '''
929  try:
930  from MythTV import MythDB, MythLog, MythError
931  try:
932  '''Create an instance of each: MythDB
933  '''
934  MythLog._setlevel('none') # Some non option -M cannot have any logging on stdout
935  self.mythdb = MythDB()
936  except MythError as e:
937  sys.stderr.write('\n! Error - %s\n' % e.args[0])
938  filename = os.path.expanduser("~")+'/.mythtv/config.xml'
939  if not os.path.isfile(filename):
940  sys.stderr.write('\n! Error - A correctly configured (%s) file must exist\n' % filename)
941  else:
942  sys.stderr.write('\n! Error - Check that (%s) is correctly configured\n' % filename)
943  sys.exit(1)
944  except Exception as e:
945  sys.stderr.write("\n! Error - Creating an instance caused an error for one of: MythDB. error(%s)\n" % e)
946  sys.exit(1)
947  except Exception as e:
948  sys.stderr.write("\n! Error - MythTV python bindings could not be imported. error(%s)\n" % e)
949  sys.exit(1)
950  # end initializeMythDB()
951 
952 
953 
957 
958 class getURL(Thread):
959  ''' Threaded download of a URL and filter out the desired data for XML and (X)HTML
960  return the filter results
961  '''
962  def __init__ (self, urlKey, debug):
963  Thread.__init__(self)
964  self.urlKey = urlKey
965  self.debug = debug
966 
967  def run(self):
968  if self.debug:
969  print("getURL href(%s)" % (self.urlDictionary[self.urlKey]['href'], ))
970  print()
971 
972  # Input the data from a url
973  try:
974  self.urlDictionary[self.urlKey]['tree'] = etree.parse(self.urlDictionary[self.urlKey]['href'], self.urlDictionary[self.urlKey]['parser'])
975  except Exception as errormsg:
976  sys.stderr.write("! Error: The URL (%s) cause the exception error (%s)\n" % (self.urlDictionary[self.urlKey]['href'], errormsg))
977  return
978 
979  if self.debug:
980  print("Raw unfiltered URL input:")
981  sys.stdout.write(etree.tostring(self.urlDictionary[self.urlKey]['tree'], encoding='UTF-8', pretty_print=True))
982  print()
983 
984  if len(self.urlDictionary[self.urlKey]['filter']):
985  for index in range(len(self.urlDictionary[self.urlKey]['filter'])):
986  # Filter out the desired data
987  try:
988  self.urlDictionary[self.urlKey]['tmp'] = self.urlDictionary[self.urlKey]['tree'].xpath(self.urlDictionary[self.urlKey]['filter'][index], namespaces=self.urlDictionary[self.urlKey]['namespaces'])
989  except AssertionError as e:
990  sys.stderr.write("No filter results for Name(%s)\n" % self.urlKey)
991  sys.stderr.write("No filter results for url(%s)\n" % self.urlDictionary[self.urlKey]['href'])
992  sys.stderr.write("! Error:(%s)\n" % e)
993  if len(self.urlDictionary[self.urlKey]['filter']) == index-1:
994  return
995  else:
996  continue
997  self.urlDictionary[self.urlKey]['result'].append(self.urlDictionary[self.urlKey]['tmp'])
998  elif len(self.urlDictionary[self.urlKey]['xslt']):
999  for index in range(len(self.urlDictionary[self.urlKey]['xslt'])):
1000  # Process the results through a XSLT stylesheet out the desired data
1001  try:
1002  if 'parameter' in self.urlDictionary[self.urlKey]:
1003  self.urlDictionary[self.urlKey]['tmp'] = self.urlDictionary[self.urlKey]['xslt'][index](self.urlDictionary[self.urlKey]['tree'], paraMeter= etree.XSLT.strparam(
1004 self.urlDictionary[self.urlKey]['parameter']) )
1005  else:
1006  self.urlDictionary[self.urlKey]['tmp'] = self.urlDictionary[self.urlKey]['xslt'][index](self.urlDictionary[self.urlKey]['tree'])
1007  except Exception as e:
1008  sys.stderr.write("! XSLT Error:(%s) Key(%s)\n" % (e, self.urlKey))
1009  if len(self.urlDictionary[self.urlKey]['filter']) == index-1:
1010  return
1011  else:
1012  continue
1013  # Was any data found?
1014  if self.urlDictionary[self.urlKey]['tmp'].getroot() is None:
1015  sys.stderr.write("No Xslt results for Name(%s)\n" % self.urlKey)
1016  sys.stderr.write("No Xslt results for url(%s)\n" % self.urlDictionary[self.urlKey]['href'])
1017  if len(self.urlDictionary[self.urlKey]['filter']) == index-1:
1018  return
1019  else:
1020  continue
1021  self.urlDictionary[self.urlKey]['result'].append(self.urlDictionary[self.urlKey]['tmp'])
1022  else:
1023  # Just pass back the raw data
1024  self.urlDictionary[self.urlKey]['result'] = [self.urlDictionary[self.urlKey]['tree']]
1025 
1026  # Check whether there are more pages available
1027  if self.urlDictionary[self.urlKey]['pageFilter']:
1028  if len(self.urlDictionary[self.urlKey]['tree'].xpath(self.urlDictionary[self.urlKey]['pageFilter'], namespaces=self.urlDictionary[self.urlKey]['namespaces'])):
1029  self.urlDictionary[self.urlKey]['morePages'] = 'true'
1030  return
1031  # end run()
1032 # end class getURL()
1033 
1034 
nv_python_libs.common.common_api.OutStreamEncoder.write
def write(self, obj)
Definition: common_api.py:73
nv_python_libs.common.common_api.Common.itemThumbnail
itemThumbnail
Definition: common_api.py:867
nv_python_libs.common.common_api.Common.linkWebPage
def linkWebPage(self, context, sourceLink)
Definition: common_api.py:738
nv_python_libs.common.common_api.Common.itemContent
itemContent
Definition: common_api.py:868
nv_python_libs.common.common_api.getURL.debug
debug
Definition: common_api.py:965
error
static void error(const char *str,...)
Definition: vbi.cpp:36
nv_python_libs.common.common_api.Common.convertDuration
def convertDuration(self, context, duration)
Definition: common_api.py:700
nv_python_libs.common.common_api.Common.parsers
parsers
Definition: common_api.py:143
nv_python_libs.common.common_api.getURL.__init__
def __init__(self, urlKey, debug)
Definition: common_api.py:962
nv_python_libs.common.common_api.Common.logger
logger
Definition: common_api.py:132
nv_python_libs.common.common_api.Common.detectUserLocationByIP
def detectUserLocationByIP(self)
Definition: common_api.py:347
nv_python_libs.common.common_api.Common.etree
etree
Definition: common_api.py:211
nv_python_libs.common.common_api.Common.ampReplace
def ampReplace(self, text)
Definition: common_api.py:286
nv_python_libs.common.common_api.Common.addDynamicFunctions
def addDynamicFunctions(self, dirPath)
Definition: common_api.py:603
nv_python_libs.common.common_api.Common.removePunc
def removePunc(self, context, data)
Definition: common_api.py:804
nv_python_libs.common.common_api.Common.mnvChannelElement
def mnvChannelElement(self, channelDetails)
Definition: common_api.py:433
nv_python_libs.common.common_api.Common.getItemElement
def getItemElement(self, context, arg)
Definition: common_api.py:841
nv_python_libs.common.common_api.Common.linksWebPage
linksWebPage
Definition: common_api.py:210
nv_python_libs.common.common_api.Common.debug
debug
Definition: common_api.py:133
nv_python_libs.common.common_api.Common.getUrlData
def getUrlData(self, inputUrls, pageFilter=None)
Definition: common_api.py:471
nv_python_libs.common.common_api.Common.callCommandLine
def callCommandLine(self, command, stderr=False)
Definition: common_api.py:297
nv_python_libs.common.common_api.Common.s_e_Patterns
s_e_Patterns
Definition: common_api.py:176
nv_python_libs.common.common_api.getURL
Definition: common_api.py:958
nv_python_libs.common.common_api.Common.getDBRecords
def getDBRecords(self, context, *arg)
Definition: common_api.py:862
nv_python_libs.common.common_api.Common.getSeasonEpisode
def getSeasonEpisode(self, context, text)
Definition: common_api.py:680
nv_python_libs.common.common_api.Common.pubDateFormat
pubDateFormat
Definition: common_api.py:148
nv_python_libs.common.common_api.Common.checkURL
def checkURL(self, url)
Definition: common_api.py:452
MythFile::copy
MBASE_PUBLIC long long copy(QFile &dst, QFile &src, uint block_size=0)
Copies src file to dst file.
Definition: mythmiscutil.cpp:264
nv_python_libs.common.common_api.Common.testSubString
def testSubString(self, context, *arg)
Definition: common_api.py:847
nv_python_libs.common.common_api.Common.massageText
def massageText(self, text)
Definition: common_api.py:217
nv_python_libs.common.common_api.Common.apiSuffix
apiSuffix
Definition: common_api.py:207
MythDB
Definition: mythdb.h:15
nv_python_libs.common.common_api.getURL.run
def run(self)
Definition: common_api.py:967
nv_python_libs.common.common_api.Common.functionDict
functionDict
Definition: common_api.py:579
nv_python_libs.common.common_api.Common.mnvItem
mnvItem
Definition: common_api.py:161
nv_python_libs.common.common_api.Common.initializeMythDB
def initializeMythDB(self)
Definition: common_api.py:925
nv_python_libs.common.common_api.Common.baseProcessingDir
baseProcessingDir
Definition: common_api.py:134
nv_python_libs.common.common_api.Common.htmlToString
def htmlToString(self, context, html)
Definition: common_api.py:813
nv_python_libs.common.common_api.Common.baseDir
def baseDir(self, context, dummy)
Definition: common_api.py:751
print
static void print(const QList< uint > &raw_minimas, const QList< uint > &raw_maximas, const QList< float > &minimas, const QList< float > &maximas)
Definition: vbi608extractor.cpp:29
nv_python_libs.common.common_api.Common.createItemElement
def createItemElement(self, context, *arg)
Definition: common_api.py:878
nv_python_libs.common.common_api.Common.initLogger
def initLogger(self, path=sys.stderr, log_name='MNV_Grabber')
Definition: common_api.py:251
nv_python_libs.common.common_api.Common.stringReplace
def stringReplace(self, context, *inputArgs)
Definition: common_api.py:776
nv_python_libs.common.common_api.Common.buildFunctionDict
def buildFunctionDict(self)
Start - Utility functions specifically used to modify MNV item data.
Definition: common_api.py:568
nv_python_libs.common.common_api.Common.nv_python_libs_path
nv_python_libs_path
Definition: common_api.py:206
nv_python_libs.common.common_api.Common.checkIfDBItem
def checkIfDBItem(self, context, arg)
Definition: common_api.py:829
nv_python_libs.common.common_api.Common.stringUpper
def stringUpper(self, context, data)
Definition: common_api.py:767
nv_python_libs.common.common_api.Common.getLanguage
def getLanguage(self, context, args)
Definition: common_api.py:822
nv_python_libs.common.common_api.Common.mythdb
mythdb
Definition: common_api.py:209
nv_python_libs.common.common_api.OutStreamEncoder
Definition: common_api.py:64
nv_python_libs.common.common_api.Common
Start - Utility functions.
Definition: common_api.py:128
nv_python_libs.common.common_api.OutStreamEncoder.__getattr__
def __getattr__(self, attr)
Definition: common_api.py:79
nv_python_libs.common.common_api.Common.namespaces
namespaces
Definition: common_api.py:135
nv_python_libs.common.common_api.Common.mnvRSS
mnvRSS
Definition: common_api.py:149
nv_python_libs.common.common_api.Common.stringLower
def stringLower(self, context, data)
Definition: common_api.py:758
nv_python_libs.common.common_api.getURL.urlKey
urlKey
Definition: common_api.py:964
nv_python_libs.common.common_api.Common.language
language
Definition: common_api.py:208
nv_python_libs.common.common_api.Common.__init__
def __init__(self, logger=False, debug=False)
Definition: common_api.py:131
nv_python_libs.common.common_api.Common.pubDate
def pubDate(self, context, *inputArgs)
Definition: common_api.py:639
nv_python_libs.common.common_api.Common.textUtf8
def textUtf8(self, text)
Definition: common_api.py:274
nv_python_libs.common.common_api.Common.itemElement
itemElement
Definition: common_api.py:836
nv_python_libs.common.common_api.OutStreamEncoder.encoding
encoding
Definition: common_api.py:69
nv_python_libs.common.common_api.OutStreamEncoder.__init__
def __init__(self, outstream, encoding=None)
Definition: common_api.py:66
nv_python_libs.common.common_api.Common.displayCustomHTML
def displayCustomHTML(self)
Definition: common_api.py:407
nv_python_libs.common.common_api.Common.getHtmlData
def getHtmlData(self, context, *args)
Definition: common_api.py:714
nv_python_libs.common.common_api.Common.stringEscape
def stringEscape(self, context, *args)
Definition: common_api.py:792
find
static pid_list_t::iterator find(const PIDInfoMap &map, pid_list_t &list, pid_list_t::iterator begin, pid_list_t::iterator end, bool find_open)
Definition: dvbstreamhandler.cpp:363
nv_python_libs.common.common_api.OutStreamEncoder.out
out
Definition: common_api.py:67