handlers.py 61 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610
  1. # Copyright 2001-2021 by Vinay Sajip. All Rights Reserved.
  2. #
  3. # Permission to use, copy, modify, and distribute this software and its
  4. # documentation for any purpose and without fee is hereby granted,
  5. # provided that the above copyright notice appear in all copies and that
  6. # both that copyright notice and this permission notice appear in
  7. # supporting documentation, and that the name of Vinay Sajip
  8. # not be used in advertising or publicity pertaining to distribution
  9. # of the software without specific, written prior permission.
  10. # VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
  11. # ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
  12. # VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
  13. # ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
  14. # IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
  15. # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  16. """
  17. Additional handlers for the logging package for Python. The core package is
  18. based on PEP 282 and comments thereto in comp.lang.python.
  19. Copyright (C) 2001-2021 Vinay Sajip. All Rights Reserved.
  20. To use, simply 'import logging.handlers' and log away!
  21. """
  22. import io, logging, socket, os, pickle, struct, time, re
  23. from stat import ST_DEV, ST_INO, ST_MTIME
  24. import queue
  25. import threading
  26. import copy
  27. #
  28. # Some constants...
  29. #
  30. DEFAULT_TCP_LOGGING_PORT = 9020
  31. DEFAULT_UDP_LOGGING_PORT = 9021
  32. DEFAULT_HTTP_LOGGING_PORT = 9022
  33. DEFAULT_SOAP_LOGGING_PORT = 9023
  34. SYSLOG_UDP_PORT = 514
  35. SYSLOG_TCP_PORT = 514
  36. _MIDNIGHT = 24 * 60 * 60 # number of seconds in a day
  37. class BaseRotatingHandler(logging.FileHandler):
  38. """
  39. Base class for handlers that rotate log files at a certain point.
  40. Not meant to be instantiated directly. Instead, use RotatingFileHandler
  41. or TimedRotatingFileHandler.
  42. """
  43. namer = None
  44. rotator = None
  45. def __init__(self, filename, mode, encoding=None, delay=False, errors=None):
  46. """
  47. Use the specified filename for streamed logging
  48. """
  49. logging.FileHandler.__init__(self, filename, mode=mode,
  50. encoding=encoding, delay=delay,
  51. errors=errors)
  52. self.mode = mode
  53. self.encoding = encoding
  54. self.errors = errors
  55. def emit(self, record):
  56. """
  57. Emit a record.
  58. Output the record to the file, catering for rollover as described
  59. in doRollover().
  60. """
  61. try:
  62. if self.shouldRollover(record):
  63. self.doRollover()
  64. logging.FileHandler.emit(self, record)
  65. except Exception:
  66. self.handleError(record)
  67. def rotation_filename(self, default_name):
  68. """
  69. Modify the filename of a log file when rotating.
  70. This is provided so that a custom filename can be provided.
  71. The default implementation calls the 'namer' attribute of the
  72. handler, if it's callable, passing the default name to
  73. it. If the attribute isn't callable (the default is None), the name
  74. is returned unchanged.
  75. :param default_name: The default name for the log file.
  76. """
  77. if not callable(self.namer):
  78. result = default_name
  79. else:
  80. result = self.namer(default_name)
  81. return result
  82. def rotate(self, source, dest):
  83. """
  84. When rotating, rotate the current log.
  85. The default implementation calls the 'rotator' attribute of the
  86. handler, if it's callable, passing the source and dest arguments to
  87. it. If the attribute isn't callable (the default is None), the source
  88. is simply renamed to the destination.
  89. :param source: The source filename. This is normally the base
  90. filename, e.g. 'test.log'
  91. :param dest: The destination filename. This is normally
  92. what the source is rotated to, e.g. 'test.log.1'.
  93. """
  94. if not callable(self.rotator):
  95. # Issue 18940: A file may not have been created if delay is True.
  96. if os.path.exists(source):
  97. os.rename(source, dest)
  98. else:
  99. self.rotator(source, dest)
  100. class RotatingFileHandler(BaseRotatingHandler):
  101. """
  102. Handler for logging to a set of files, which switches from one file
  103. to the next when the current file reaches a certain size.
  104. """
  105. def __init__(self, filename, mode='a', maxBytes=0, backupCount=0,
  106. encoding=None, delay=False, errors=None):
  107. """
  108. Open the specified file and use it as the stream for logging.
  109. By default, the file grows indefinitely. You can specify particular
  110. values of maxBytes and backupCount to allow the file to rollover at
  111. a predetermined size.
  112. Rollover occurs whenever the current log file is nearly maxBytes in
  113. length. If backupCount is >= 1, the system will successively create
  114. new files with the same pathname as the base file, but with extensions
  115. ".1", ".2" etc. appended to it. For example, with a backupCount of 5
  116. and a base file name of "app.log", you would get "app.log",
  117. "app.log.1", "app.log.2", ... through to "app.log.5". The file being
  118. written to is always "app.log" - when it gets filled up, it is closed
  119. and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc.
  120. exist, then they are renamed to "app.log.2", "app.log.3" etc.
  121. respectively.
  122. If maxBytes is zero, rollover never occurs.
  123. """
  124. # If rotation/rollover is wanted, it doesn't make sense to use another
  125. # mode. If for example 'w' were specified, then if there were multiple
  126. # runs of the calling application, the logs from previous runs would be
  127. # lost if the 'w' is respected, because the log file would be truncated
  128. # on each run.
  129. if maxBytes > 0:
  130. mode = 'a'
  131. if "b" not in mode:
  132. encoding = io.text_encoding(encoding)
  133. BaseRotatingHandler.__init__(self, filename, mode, encoding=encoding,
  134. delay=delay, errors=errors)
  135. self.maxBytes = maxBytes
  136. self.backupCount = backupCount
  137. def doRollover(self):
  138. """
  139. Do a rollover, as described in __init__().
  140. """
  141. if self.stream:
  142. self.stream.close()
  143. self.stream = None
  144. if self.backupCount > 0:
  145. for i in range(self.backupCount - 1, 0, -1):
  146. sfn = self.rotation_filename("%s.%d" % (self.baseFilename, i))
  147. dfn = self.rotation_filename("%s.%d" % (self.baseFilename,
  148. i + 1))
  149. if os.path.exists(sfn):
  150. if os.path.exists(dfn):
  151. os.remove(dfn)
  152. os.rename(sfn, dfn)
  153. dfn = self.rotation_filename(self.baseFilename + ".1")
  154. if os.path.exists(dfn):
  155. os.remove(dfn)
  156. self.rotate(self.baseFilename, dfn)
  157. if not self.delay:
  158. self.stream = self._open()
  159. def shouldRollover(self, record):
  160. """
  161. Determine if rollover should occur.
  162. Basically, see if the supplied record would cause the file to exceed
  163. the size limit we have.
  164. """
  165. # See bpo-45401: Never rollover anything other than regular files
  166. if os.path.exists(self.baseFilename) and not os.path.isfile(self.baseFilename):
  167. return False
  168. if self.stream is None: # delay was set...
  169. self.stream = self._open()
  170. if self.maxBytes > 0: # are we rolling over?
  171. msg = "%s\n" % self.format(record)
  172. self.stream.seek(0, 2) #due to non-posix-compliant Windows feature
  173. if self.stream.tell() + len(msg) >= self.maxBytes:
  174. return True
  175. return False
  176. class TimedRotatingFileHandler(BaseRotatingHandler):
  177. """
  178. Handler for logging to a file, rotating the log file at certain timed
  179. intervals.
  180. If backupCount is > 0, when rollover is done, no more than backupCount
  181. files are kept - the oldest ones are deleted.
  182. """
  183. def __init__(self, filename, when='h', interval=1, backupCount=0,
  184. encoding=None, delay=False, utc=False, atTime=None,
  185. errors=None):
  186. encoding = io.text_encoding(encoding)
  187. BaseRotatingHandler.__init__(self, filename, 'a', encoding=encoding,
  188. delay=delay, errors=errors)
  189. self.when = when.upper()
  190. self.backupCount = backupCount
  191. self.utc = utc
  192. self.atTime = atTime
  193. # Calculate the real rollover interval, which is just the number of
  194. # seconds between rollovers. Also set the filename suffix used when
  195. # a rollover occurs. Current 'when' events supported:
  196. # S - Seconds
  197. # M - Minutes
  198. # H - Hours
  199. # D - Days
  200. # midnight - roll over at midnight
  201. # W{0-6} - roll over on a certain day; 0 - Monday
  202. #
  203. # Case of the 'when' specifier is not important; lower or upper case
  204. # will work.
  205. if self.when == 'S':
  206. self.interval = 1 # one second
  207. self.suffix = "%Y-%m-%d_%H-%M-%S"
  208. self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}(\.\w+)?$"
  209. elif self.when == 'M':
  210. self.interval = 60 # one minute
  211. self.suffix = "%Y-%m-%d_%H-%M"
  212. self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}(\.\w+)?$"
  213. elif self.when == 'H':
  214. self.interval = 60 * 60 # one hour
  215. self.suffix = "%Y-%m-%d_%H"
  216. self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}(\.\w+)?$"
  217. elif self.when == 'D' or self.when == 'MIDNIGHT':
  218. self.interval = 60 * 60 * 24 # one day
  219. self.suffix = "%Y-%m-%d"
  220. self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$"
  221. elif self.when.startswith('W'):
  222. self.interval = 60 * 60 * 24 * 7 # one week
  223. if len(self.when) != 2:
  224. raise ValueError("You must specify a day for weekly rollover from 0 to 6 (0 is Monday): %s" % self.when)
  225. if self.when[1] < '0' or self.when[1] > '6':
  226. raise ValueError("Invalid day specified for weekly rollover: %s" % self.when)
  227. self.dayOfWeek = int(self.when[1])
  228. self.suffix = "%Y-%m-%d"
  229. self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$"
  230. else:
  231. raise ValueError("Invalid rollover interval specified: %s" % self.when)
  232. self.extMatch = re.compile(self.extMatch, re.ASCII)
  233. self.interval = self.interval * interval # multiply by units requested
  234. # The following line added because the filename passed in could be a
  235. # path object (see Issue #27493), but self.baseFilename will be a string
  236. filename = self.baseFilename
  237. if os.path.exists(filename):
  238. t = os.stat(filename)[ST_MTIME]
  239. else:
  240. t = int(time.time())
  241. self.rolloverAt = self.computeRollover(t)
  242. def computeRollover(self, currentTime):
  243. """
  244. Work out the rollover time based on the specified time.
  245. """
  246. result = currentTime + self.interval
  247. # If we are rolling over at midnight or weekly, then the interval is already known.
  248. # What we need to figure out is WHEN the next interval is. In other words,
  249. # if you are rolling over at midnight, then your base interval is 1 day,
  250. # but you want to start that one day clock at midnight, not now. So, we
  251. # have to fudge the rolloverAt value in order to trigger the first rollover
  252. # at the right time. After that, the regular interval will take care of
  253. # the rest. Note that this code doesn't care about leap seconds. :)
  254. if self.when == 'MIDNIGHT' or self.when.startswith('W'):
  255. # This could be done with less code, but I wanted it to be clear
  256. if self.utc:
  257. t = time.gmtime(currentTime)
  258. else:
  259. t = time.localtime(currentTime)
  260. currentHour = t[3]
  261. currentMinute = t[4]
  262. currentSecond = t[5]
  263. currentDay = t[6]
  264. # r is the number of seconds left between now and the next rotation
  265. if self.atTime is None:
  266. rotate_ts = _MIDNIGHT
  267. else:
  268. rotate_ts = ((self.atTime.hour * 60 + self.atTime.minute)*60 +
  269. self.atTime.second)
  270. r = rotate_ts - ((currentHour * 60 + currentMinute) * 60 +
  271. currentSecond)
  272. if r < 0:
  273. # Rotate time is before the current time (for example when
  274. # self.rotateAt is 13:45 and it now 14:15), rotation is
  275. # tomorrow.
  276. r += _MIDNIGHT
  277. currentDay = (currentDay + 1) % 7
  278. result = currentTime + r
  279. # If we are rolling over on a certain day, add in the number of days until
  280. # the next rollover, but offset by 1 since we just calculated the time
  281. # until the next day starts. There are three cases:
  282. # Case 1) The day to rollover is today; in this case, do nothing
  283. # Case 2) The day to rollover is further in the interval (i.e., today is
  284. # day 2 (Wednesday) and rollover is on day 6 (Sunday). Days to
  285. # next rollover is simply 6 - 2 - 1, or 3.
  286. # Case 3) The day to rollover is behind us in the interval (i.e., today
  287. # is day 5 (Saturday) and rollover is on day 3 (Thursday).
  288. # Days to rollover is 6 - 5 + 3, or 4. In this case, it's the
  289. # number of days left in the current week (1) plus the number
  290. # of days in the next week until the rollover day (3).
  291. # The calculations described in 2) and 3) above need to have a day added.
  292. # This is because the above time calculation takes us to midnight on this
  293. # day, i.e. the start of the next day.
  294. if self.when.startswith('W'):
  295. day = currentDay # 0 is Monday
  296. if day != self.dayOfWeek:
  297. if day < self.dayOfWeek:
  298. daysToWait = self.dayOfWeek - day
  299. else:
  300. daysToWait = 6 - day + self.dayOfWeek + 1
  301. newRolloverAt = result + (daysToWait * (60 * 60 * 24))
  302. if not self.utc:
  303. dstNow = t[-1]
  304. dstAtRollover = time.localtime(newRolloverAt)[-1]
  305. if dstNow != dstAtRollover:
  306. if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
  307. addend = -3600
  308. else: # DST bows out before next rollover, so we need to add an hour
  309. addend = 3600
  310. newRolloverAt += addend
  311. result = newRolloverAt
  312. return result
  313. def shouldRollover(self, record):
  314. """
  315. Determine if rollover should occur.
  316. record is not used, as we are just comparing times, but it is needed so
  317. the method signatures are the same
  318. """
  319. t = int(time.time())
  320. if t >= self.rolloverAt:
  321. # See #89564: Never rollover anything other than regular files
  322. if os.path.exists(self.baseFilename) and not os.path.isfile(self.baseFilename):
  323. # The file is not a regular file, so do not rollover, but do
  324. # set the next rollover time to avoid repeated checks.
  325. self.rolloverAt = self.computeRollover(t)
  326. return False
  327. return True
  328. return False
  329. def getFilesToDelete(self):
  330. """
  331. Determine the files to delete when rolling over.
  332. More specific than the earlier method, which just used glob.glob().
  333. """
  334. dirName, baseName = os.path.split(self.baseFilename)
  335. fileNames = os.listdir(dirName)
  336. result = []
  337. # See bpo-44753: Don't use the extension when computing the prefix.
  338. n, e = os.path.splitext(baseName)
  339. prefix = n + '.'
  340. plen = len(prefix)
  341. for fileName in fileNames:
  342. if self.namer is None:
  343. # Our files will always start with baseName
  344. if not fileName.startswith(baseName):
  345. continue
  346. else:
  347. # Our files could be just about anything after custom naming, but
  348. # likely candidates are of the form
  349. # foo.log.DATETIME_SUFFIX or foo.DATETIME_SUFFIX.log
  350. if (not fileName.startswith(baseName) and fileName.endswith(e) and
  351. len(fileName) > (plen + 1) and not fileName[plen+1].isdigit()):
  352. continue
  353. if fileName[:plen] == prefix:
  354. suffix = fileName[plen:]
  355. # See bpo-45628: The date/time suffix could be anywhere in the
  356. # filename
  357. parts = suffix.split('.')
  358. for part in parts:
  359. if self.extMatch.match(part):
  360. result.append(os.path.join(dirName, fileName))
  361. break
  362. if len(result) < self.backupCount:
  363. result = []
  364. else:
  365. result.sort()
  366. result = result[:len(result) - self.backupCount]
  367. return result
  368. def doRollover(self):
  369. """
  370. do a rollover; in this case, a date/time stamp is appended to the filename
  371. when the rollover happens. However, you want the file to be named for the
  372. start of the interval, not the current time. If there is a backup count,
  373. then we have to get a list of matching filenames, sort them and remove
  374. the one with the oldest suffix.
  375. """
  376. if self.stream:
  377. self.stream.close()
  378. self.stream = None
  379. # get the time that this sequence started at and make it a TimeTuple
  380. currentTime = int(time.time())
  381. dstNow = time.localtime(currentTime)[-1]
  382. t = self.rolloverAt - self.interval
  383. if self.utc:
  384. timeTuple = time.gmtime(t)
  385. else:
  386. timeTuple = time.localtime(t)
  387. dstThen = timeTuple[-1]
  388. if dstNow != dstThen:
  389. if dstNow:
  390. addend = 3600
  391. else:
  392. addend = -3600
  393. timeTuple = time.localtime(t + addend)
  394. dfn = self.rotation_filename(self.baseFilename + "." +
  395. time.strftime(self.suffix, timeTuple))
  396. if os.path.exists(dfn):
  397. os.remove(dfn)
  398. self.rotate(self.baseFilename, dfn)
  399. if self.backupCount > 0:
  400. for s in self.getFilesToDelete():
  401. os.remove(s)
  402. if not self.delay:
  403. self.stream = self._open()
  404. newRolloverAt = self.computeRollover(currentTime)
  405. while newRolloverAt <= currentTime:
  406. newRolloverAt = newRolloverAt + self.interval
  407. #If DST changes and midnight or weekly rollover, adjust for this.
  408. if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc:
  409. dstAtRollover = time.localtime(newRolloverAt)[-1]
  410. if dstNow != dstAtRollover:
  411. if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
  412. addend = -3600
  413. else: # DST bows out before next rollover, so we need to add an hour
  414. addend = 3600
  415. newRolloverAt += addend
  416. self.rolloverAt = newRolloverAt
  417. class WatchedFileHandler(logging.FileHandler):
  418. """
  419. A handler for logging to a file, which watches the file
  420. to see if it has changed while in use. This can happen because of
  421. usage of programs such as newsyslog and logrotate which perform
  422. log file rotation. This handler, intended for use under Unix,
  423. watches the file to see if it has changed since the last emit.
  424. (A file has changed if its device or inode have changed.)
  425. If it has changed, the old file stream is closed, and the file
  426. opened to get a new stream.
  427. This handler is not appropriate for use under Windows, because
  428. under Windows open files cannot be moved or renamed - logging
  429. opens the files with exclusive locks - and so there is no need
  430. for such a handler. Furthermore, ST_INO is not supported under
  431. Windows; stat always returns zero for this value.
  432. This handler is based on a suggestion and patch by Chad J.
  433. Schroeder.
  434. """
  435. def __init__(self, filename, mode='a', encoding=None, delay=False,
  436. errors=None):
  437. if "b" not in mode:
  438. encoding = io.text_encoding(encoding)
  439. logging.FileHandler.__init__(self, filename, mode=mode,
  440. encoding=encoding, delay=delay,
  441. errors=errors)
  442. self.dev, self.ino = -1, -1
  443. self._statstream()
  444. def _statstream(self):
  445. if self.stream:
  446. sres = os.fstat(self.stream.fileno())
  447. self.dev, self.ino = sres[ST_DEV], sres[ST_INO]
  448. def reopenIfNeeded(self):
  449. """
  450. Reopen log file if needed.
  451. Checks if the underlying file has changed, and if it
  452. has, close the old stream and reopen the file to get the
  453. current stream.
  454. """
  455. # Reduce the chance of race conditions by stat'ing by path only
  456. # once and then fstat'ing our new fd if we opened a new log stream.
  457. # See issue #14632: Thanks to John Mulligan for the problem report
  458. # and patch.
  459. try:
  460. # stat the file by path, checking for existence
  461. sres = os.stat(self.baseFilename)
  462. except FileNotFoundError:
  463. sres = None
  464. # compare file system stat with that of our stream file handle
  465. if not sres or sres[ST_DEV] != self.dev or sres[ST_INO] != self.ino:
  466. if self.stream is not None:
  467. # we have an open file handle, clean it up
  468. self.stream.flush()
  469. self.stream.close()
  470. self.stream = None # See Issue #21742: _open () might fail.
  471. # open a new file handle and get new stat info from that fd
  472. self.stream = self._open()
  473. self._statstream()
  474. def emit(self, record):
  475. """
  476. Emit a record.
  477. If underlying file has changed, reopen the file before emitting the
  478. record to it.
  479. """
  480. self.reopenIfNeeded()
  481. logging.FileHandler.emit(self, record)
  482. class SocketHandler(logging.Handler):
  483. """
  484. A handler class which writes logging records, in pickle format, to
  485. a streaming socket. The socket is kept open across logging calls.
  486. If the peer resets it, an attempt is made to reconnect on the next call.
  487. The pickle which is sent is that of the LogRecord's attribute dictionary
  488. (__dict__), so that the receiver does not need to have the logging module
  489. installed in order to process the logging event.
  490. To unpickle the record at the receiving end into a LogRecord, use the
  491. makeLogRecord function.
  492. """
  493. def __init__(self, host, port):
  494. """
  495. Initializes the handler with a specific host address and port.
  496. When the attribute *closeOnError* is set to True - if a socket error
  497. occurs, the socket is silently closed and then reopened on the next
  498. logging call.
  499. """
  500. logging.Handler.__init__(self)
  501. self.host = host
  502. self.port = port
  503. if port is None:
  504. self.address = host
  505. else:
  506. self.address = (host, port)
  507. self.sock = None
  508. self.closeOnError = False
  509. self.retryTime = None
  510. #
  511. # Exponential backoff parameters.
  512. #
  513. self.retryStart = 1.0
  514. self.retryMax = 30.0
  515. self.retryFactor = 2.0
  516. def makeSocket(self, timeout=1):
  517. """
  518. A factory method which allows subclasses to define the precise
  519. type of socket they want.
  520. """
  521. if self.port is not None:
  522. result = socket.create_connection(self.address, timeout=timeout)
  523. else:
  524. result = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
  525. result.settimeout(timeout)
  526. try:
  527. result.connect(self.address)
  528. except OSError:
  529. result.close() # Issue 19182
  530. raise
  531. return result
  532. def createSocket(self):
  533. """
  534. Try to create a socket, using an exponential backoff with
  535. a max retry time. Thanks to Robert Olson for the original patch
  536. (SF #815911) which has been slightly refactored.
  537. """
  538. now = time.time()
  539. # Either retryTime is None, in which case this
  540. # is the first time back after a disconnect, or
  541. # we've waited long enough.
  542. if self.retryTime is None:
  543. attempt = True
  544. else:
  545. attempt = (now >= self.retryTime)
  546. if attempt:
  547. try:
  548. self.sock = self.makeSocket()
  549. self.retryTime = None # next time, no delay before trying
  550. except OSError:
  551. #Creation failed, so set the retry time and return.
  552. if self.retryTime is None:
  553. self.retryPeriod = self.retryStart
  554. else:
  555. self.retryPeriod = self.retryPeriod * self.retryFactor
  556. if self.retryPeriod > self.retryMax:
  557. self.retryPeriod = self.retryMax
  558. self.retryTime = now + self.retryPeriod
  559. def send(self, s):
  560. """
  561. Send a pickled string to the socket.
  562. This function allows for partial sends which can happen when the
  563. network is busy.
  564. """
  565. if self.sock is None:
  566. self.createSocket()
  567. #self.sock can be None either because we haven't reached the retry
  568. #time yet, or because we have reached the retry time and retried,
  569. #but are still unable to connect.
  570. if self.sock:
  571. try:
  572. self.sock.sendall(s)
  573. except OSError: #pragma: no cover
  574. self.sock.close()
  575. self.sock = None # so we can call createSocket next time
  576. def makePickle(self, record):
  577. """
  578. Pickles the record in binary format with a length prefix, and
  579. returns it ready for transmission across the socket.
  580. """
  581. ei = record.exc_info
  582. if ei:
  583. # just to get traceback text into record.exc_text ...
  584. dummy = self.format(record)
  585. # See issue #14436: If msg or args are objects, they may not be
  586. # available on the receiving end. So we convert the msg % args
  587. # to a string, save it as msg and zap the args.
  588. d = dict(record.__dict__)
  589. d['msg'] = record.getMessage()
  590. d['args'] = None
  591. d['exc_info'] = None
  592. # Issue #25685: delete 'message' if present: redundant with 'msg'
  593. d.pop('message', None)
  594. s = pickle.dumps(d, 1)
  595. slen = struct.pack(">L", len(s))
  596. return slen + s
  597. def handleError(self, record):
  598. """
  599. Handle an error during logging.
  600. An error has occurred during logging. Most likely cause -
  601. connection lost. Close the socket so that we can retry on the
  602. next event.
  603. """
  604. if self.closeOnError and self.sock:
  605. self.sock.close()
  606. self.sock = None #try to reconnect next time
  607. else:
  608. logging.Handler.handleError(self, record)
  609. def emit(self, record):
  610. """
  611. Emit a record.
  612. Pickles the record and writes it to the socket in binary format.
  613. If there is an error with the socket, silently drop the packet.
  614. If there was a problem with the socket, re-establishes the
  615. socket.
  616. """
  617. try:
  618. s = self.makePickle(record)
  619. self.send(s)
  620. except Exception:
  621. self.handleError(record)
  622. def close(self):
  623. """
  624. Closes the socket.
  625. """
  626. self.acquire()
  627. try:
  628. sock = self.sock
  629. if sock:
  630. self.sock = None
  631. sock.close()
  632. logging.Handler.close(self)
  633. finally:
  634. self.release()
  635. class DatagramHandler(SocketHandler):
  636. """
  637. A handler class which writes logging records, in pickle format, to
  638. a datagram socket. The pickle which is sent is that of the LogRecord's
  639. attribute dictionary (__dict__), so that the receiver does not need to
  640. have the logging module installed in order to process the logging event.
  641. To unpickle the record at the receiving end into a LogRecord, use the
  642. makeLogRecord function.
  643. """
  644. def __init__(self, host, port):
  645. """
  646. Initializes the handler with a specific host address and port.
  647. """
  648. SocketHandler.__init__(self, host, port)
  649. self.closeOnError = False
  650. def makeSocket(self):
  651. """
  652. The factory method of SocketHandler is here overridden to create
  653. a UDP socket (SOCK_DGRAM).
  654. """
  655. if self.port is None:
  656. family = socket.AF_UNIX
  657. else:
  658. family = socket.AF_INET
  659. s = socket.socket(family, socket.SOCK_DGRAM)
  660. return s
  661. def send(self, s):
  662. """
  663. Send a pickled string to a socket.
  664. This function no longer allows for partial sends which can happen
  665. when the network is busy - UDP does not guarantee delivery and
  666. can deliver packets out of sequence.
  667. """
  668. if self.sock is None:
  669. self.createSocket()
  670. self.sock.sendto(s, self.address)
  671. class SysLogHandler(logging.Handler):
  672. """
  673. A handler class which sends formatted logging records to a syslog
  674. server. Based on Sam Rushing's syslog module:
  675. http://www.nightmare.com/squirl/python-ext/misc/syslog.py
  676. Contributed by Nicolas Untz (after which minor refactoring changes
  677. have been made).
  678. """
  679. # from <linux/sys/syslog.h>:
  680. # ======================================================================
  681. # priorities/facilities are encoded into a single 32-bit quantity, where
  682. # the bottom 3 bits are the priority (0-7) and the top 28 bits are the
  683. # facility (0-big number). Both the priorities and the facilities map
  684. # roughly one-to-one to strings in the syslogd(8) source code. This
  685. # mapping is included in this file.
  686. #
  687. # priorities (these are ordered)
  688. LOG_EMERG = 0 # system is unusable
  689. LOG_ALERT = 1 # action must be taken immediately
  690. LOG_CRIT = 2 # critical conditions
  691. LOG_ERR = 3 # error conditions
  692. LOG_WARNING = 4 # warning conditions
  693. LOG_NOTICE = 5 # normal but significant condition
  694. LOG_INFO = 6 # informational
  695. LOG_DEBUG = 7 # debug-level messages
  696. # facility codes
  697. LOG_KERN = 0 # kernel messages
  698. LOG_USER = 1 # random user-level messages
  699. LOG_MAIL = 2 # mail system
  700. LOG_DAEMON = 3 # system daemons
  701. LOG_AUTH = 4 # security/authorization messages
  702. LOG_SYSLOG = 5 # messages generated internally by syslogd
  703. LOG_LPR = 6 # line printer subsystem
  704. LOG_NEWS = 7 # network news subsystem
  705. LOG_UUCP = 8 # UUCP subsystem
  706. LOG_CRON = 9 # clock daemon
  707. LOG_AUTHPRIV = 10 # security/authorization messages (private)
  708. LOG_FTP = 11 # FTP daemon
  709. LOG_NTP = 12 # NTP subsystem
  710. LOG_SECURITY = 13 # Log audit
  711. LOG_CONSOLE = 14 # Log alert
  712. LOG_SOLCRON = 15 # Scheduling daemon (Solaris)
  713. # other codes through 15 reserved for system use
  714. LOG_LOCAL0 = 16 # reserved for local use
  715. LOG_LOCAL1 = 17 # reserved for local use
  716. LOG_LOCAL2 = 18 # reserved for local use
  717. LOG_LOCAL3 = 19 # reserved for local use
  718. LOG_LOCAL4 = 20 # reserved for local use
  719. LOG_LOCAL5 = 21 # reserved for local use
  720. LOG_LOCAL6 = 22 # reserved for local use
  721. LOG_LOCAL7 = 23 # reserved for local use
  722. priority_names = {
  723. "alert": LOG_ALERT,
  724. "crit": LOG_CRIT,
  725. "critical": LOG_CRIT,
  726. "debug": LOG_DEBUG,
  727. "emerg": LOG_EMERG,
  728. "err": LOG_ERR,
  729. "error": LOG_ERR, # DEPRECATED
  730. "info": LOG_INFO,
  731. "notice": LOG_NOTICE,
  732. "panic": LOG_EMERG, # DEPRECATED
  733. "warn": LOG_WARNING, # DEPRECATED
  734. "warning": LOG_WARNING,
  735. }
  736. facility_names = {
  737. "auth": LOG_AUTH,
  738. "authpriv": LOG_AUTHPRIV,
  739. "console": LOG_CONSOLE,
  740. "cron": LOG_CRON,
  741. "daemon": LOG_DAEMON,
  742. "ftp": LOG_FTP,
  743. "kern": LOG_KERN,
  744. "lpr": LOG_LPR,
  745. "mail": LOG_MAIL,
  746. "news": LOG_NEWS,
  747. "ntp": LOG_NTP,
  748. "security": LOG_SECURITY,
  749. "solaris-cron": LOG_SOLCRON,
  750. "syslog": LOG_SYSLOG,
  751. "user": LOG_USER,
  752. "uucp": LOG_UUCP,
  753. "local0": LOG_LOCAL0,
  754. "local1": LOG_LOCAL1,
  755. "local2": LOG_LOCAL2,
  756. "local3": LOG_LOCAL3,
  757. "local4": LOG_LOCAL4,
  758. "local5": LOG_LOCAL5,
  759. "local6": LOG_LOCAL6,
  760. "local7": LOG_LOCAL7,
  761. }
  762. #The map below appears to be trivially lowercasing the key. However,
  763. #there's more to it than meets the eye - in some locales, lowercasing
  764. #gives unexpected results. See SF #1524081: in the Turkish locale,
  765. #"INFO".lower() != "info"
  766. priority_map = {
  767. "DEBUG" : "debug",
  768. "INFO" : "info",
  769. "WARNING" : "warning",
  770. "ERROR" : "error",
  771. "CRITICAL" : "critical"
  772. }
  773. def __init__(self, address=('localhost', SYSLOG_UDP_PORT),
  774. facility=LOG_USER, socktype=None):
  775. """
  776. Initialize a handler.
  777. If address is specified as a string, a UNIX socket is used. To log to a
  778. local syslogd, "SysLogHandler(address="/dev/log")" can be used.
  779. If facility is not specified, LOG_USER is used. If socktype is
  780. specified as socket.SOCK_DGRAM or socket.SOCK_STREAM, that specific
  781. socket type will be used. For Unix sockets, you can also specify a
  782. socktype of None, in which case socket.SOCK_DGRAM will be used, falling
  783. back to socket.SOCK_STREAM.
  784. """
  785. logging.Handler.__init__(self)
  786. self.address = address
  787. self.facility = facility
  788. self.socktype = socktype
  789. self.socket = None
  790. self.createSocket()
  791. def _connect_unixsocket(self, address):
  792. use_socktype = self.socktype
  793. if use_socktype is None:
  794. use_socktype = socket.SOCK_DGRAM
  795. self.socket = socket.socket(socket.AF_UNIX, use_socktype)
  796. try:
  797. self.socket.connect(address)
  798. # it worked, so set self.socktype to the used type
  799. self.socktype = use_socktype
  800. except OSError:
  801. self.socket.close()
  802. if self.socktype is not None:
  803. # user didn't specify falling back, so fail
  804. raise
  805. use_socktype = socket.SOCK_STREAM
  806. self.socket = socket.socket(socket.AF_UNIX, use_socktype)
  807. try:
  808. self.socket.connect(address)
  809. # it worked, so set self.socktype to the used type
  810. self.socktype = use_socktype
  811. except OSError:
  812. self.socket.close()
  813. raise
  814. def createSocket(self):
  815. """
  816. Try to create a socket and, if it's not a datagram socket, connect it
  817. to the other end. This method is called during handler initialization,
  818. but it's not regarded as an error if the other end isn't listening yet
  819. --- the method will be called again when emitting an event,
  820. if there is no socket at that point.
  821. """
  822. address = self.address
  823. socktype = self.socktype
  824. if isinstance(address, str):
  825. self.unixsocket = True
  826. # Syslog server may be unavailable during handler initialisation.
  827. # C's openlog() function also ignores connection errors.
  828. # Moreover, we ignore these errors while logging, so it's not worse
  829. # to ignore it also here.
  830. try:
  831. self._connect_unixsocket(address)
  832. except OSError:
  833. pass
  834. else:
  835. self.unixsocket = False
  836. if socktype is None:
  837. socktype = socket.SOCK_DGRAM
  838. host, port = address
  839. ress = socket.getaddrinfo(host, port, 0, socktype)
  840. if not ress:
  841. raise OSError("getaddrinfo returns an empty list")
  842. for res in ress:
  843. af, socktype, proto, _, sa = res
  844. err = sock = None
  845. try:
  846. sock = socket.socket(af, socktype, proto)
  847. if socktype == socket.SOCK_STREAM:
  848. sock.connect(sa)
  849. break
  850. except OSError as exc:
  851. err = exc
  852. if sock is not None:
  853. sock.close()
  854. if err is not None:
  855. raise err
  856. self.socket = sock
  857. self.socktype = socktype
  858. def encodePriority(self, facility, priority):
  859. """
  860. Encode the facility and priority. You can pass in strings or
  861. integers - if strings are passed, the facility_names and
  862. priority_names mapping dictionaries are used to convert them to
  863. integers.
  864. """
  865. if isinstance(facility, str):
  866. facility = self.facility_names[facility]
  867. if isinstance(priority, str):
  868. priority = self.priority_names[priority]
  869. return (facility << 3) | priority
  870. def close(self):
  871. """
  872. Closes the socket.
  873. """
  874. self.acquire()
  875. try:
  876. sock = self.socket
  877. if sock:
  878. self.socket = None
  879. sock.close()
  880. logging.Handler.close(self)
  881. finally:
  882. self.release()
  883. def mapPriority(self, levelName):
  884. """
  885. Map a logging level name to a key in the priority_names map.
  886. This is useful in two scenarios: when custom levels are being
  887. used, and in the case where you can't do a straightforward
  888. mapping by lowercasing the logging level name because of locale-
  889. specific issues (see SF #1524081).
  890. """
  891. return self.priority_map.get(levelName, "warning")
  892. ident = '' # prepended to all messages
  893. append_nul = True # some old syslog daemons expect a NUL terminator
  894. def emit(self, record):
  895. """
  896. Emit a record.
  897. The record is formatted, and then sent to the syslog server. If
  898. exception information is present, it is NOT sent to the server.
  899. """
  900. try:
  901. msg = self.format(record)
  902. if self.ident:
  903. msg = self.ident + msg
  904. if self.append_nul:
  905. msg += '\000'
  906. # We need to convert record level to lowercase, maybe this will
  907. # change in the future.
  908. prio = '<%d>' % self.encodePriority(self.facility,
  909. self.mapPriority(record.levelname))
  910. prio = prio.encode('utf-8')
  911. # Message is a string. Convert to bytes as required by RFC 5424
  912. msg = msg.encode('utf-8')
  913. msg = prio + msg
  914. if not self.socket:
  915. self.createSocket()
  916. if self.unixsocket:
  917. try:
  918. self.socket.send(msg)
  919. except OSError:
  920. self.socket.close()
  921. self._connect_unixsocket(self.address)
  922. self.socket.send(msg)
  923. elif self.socktype == socket.SOCK_DGRAM:
  924. self.socket.sendto(msg, self.address)
  925. else:
  926. self.socket.sendall(msg)
  927. except Exception:
  928. self.handleError(record)
  929. class SMTPHandler(logging.Handler):
  930. """
  931. A handler class which sends an SMTP email for each logging event.
  932. """
  933. def __init__(self, mailhost, fromaddr, toaddrs, subject,
  934. credentials=None, secure=None, timeout=5.0):
  935. """
  936. Initialize the handler.
  937. Initialize the instance with the from and to addresses and subject
  938. line of the email. To specify a non-standard SMTP port, use the
  939. (host, port) tuple format for the mailhost argument. To specify
  940. authentication credentials, supply a (username, password) tuple
  941. for the credentials argument. To specify the use of a secure
  942. protocol (TLS), pass in a tuple for the secure argument. This will
  943. only be used when authentication credentials are supplied. The tuple
  944. will be either an empty tuple, or a single-value tuple with the name
  945. of a keyfile, or a 2-value tuple with the names of the keyfile and
  946. certificate file. (This tuple is passed to the `starttls` method).
  947. A timeout in seconds can be specified for the SMTP connection (the
  948. default is one second).
  949. """
  950. logging.Handler.__init__(self)
  951. if isinstance(mailhost, (list, tuple)):
  952. self.mailhost, self.mailport = mailhost
  953. else:
  954. self.mailhost, self.mailport = mailhost, None
  955. if isinstance(credentials, (list, tuple)):
  956. self.username, self.password = credentials
  957. else:
  958. self.username = None
  959. self.fromaddr = fromaddr
  960. if isinstance(toaddrs, str):
  961. toaddrs = [toaddrs]
  962. self.toaddrs = toaddrs
  963. self.subject = subject
  964. self.secure = secure
  965. self.timeout = timeout
  966. def getSubject(self, record):
  967. """
  968. Determine the subject for the email.
  969. If you want to specify a subject line which is record-dependent,
  970. override this method.
  971. """
  972. return self.subject
  973. def emit(self, record):
  974. """
  975. Emit a record.
  976. Format the record and send it to the specified addressees.
  977. """
  978. try:
  979. import smtplib
  980. from email.message import EmailMessage
  981. import email.utils
  982. port = self.mailport
  983. if not port:
  984. port = smtplib.SMTP_PORT
  985. smtp = smtplib.SMTP(self.mailhost, port, timeout=self.timeout)
  986. msg = EmailMessage()
  987. msg['From'] = self.fromaddr
  988. msg['To'] = ','.join(self.toaddrs)
  989. msg['Subject'] = self.getSubject(record)
  990. msg['Date'] = email.utils.localtime()
  991. msg.set_content(self.format(record))
  992. if self.username:
  993. if self.secure is not None:
  994. smtp.ehlo()
  995. smtp.starttls(*self.secure)
  996. smtp.ehlo()
  997. smtp.login(self.username, self.password)
  998. smtp.send_message(msg)
  999. smtp.quit()
  1000. except Exception:
  1001. self.handleError(record)
  1002. class NTEventLogHandler(logging.Handler):
  1003. """
  1004. A handler class which sends events to the NT Event Log. Adds a
  1005. registry entry for the specified application name. If no dllname is
  1006. provided, win32service.pyd (which contains some basic message
  1007. placeholders) is used. Note that use of these placeholders will make
  1008. your event logs big, as the entire message source is held in the log.
  1009. If you want slimmer logs, you have to pass in the name of your own DLL
  1010. which contains the message definitions you want to use in the event log.
  1011. """
  1012. def __init__(self, appname, dllname=None, logtype="Application"):
  1013. logging.Handler.__init__(self)
  1014. try:
  1015. import win32evtlogutil, win32evtlog
  1016. self.appname = appname
  1017. self._welu = win32evtlogutil
  1018. if not dllname:
  1019. dllname = os.path.split(self._welu.__file__)
  1020. dllname = os.path.split(dllname[0])
  1021. dllname = os.path.join(dllname[0], r'win32service.pyd')
  1022. self.dllname = dllname
  1023. self.logtype = logtype
  1024. # Administrative privileges are required to add a source to the registry.
  1025. # This may not be available for a user that just wants to add to an
  1026. # existing source - handle this specific case.
  1027. try:
  1028. self._welu.AddSourceToRegistry(appname, dllname, logtype)
  1029. except Exception as e:
  1030. # This will probably be a pywintypes.error. Only raise if it's not
  1031. # an "access denied" error, else let it pass
  1032. if getattr(e, 'winerror', None) != 5: # not access denied
  1033. raise
  1034. self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE
  1035. self.typemap = {
  1036. logging.DEBUG : win32evtlog.EVENTLOG_INFORMATION_TYPE,
  1037. logging.INFO : win32evtlog.EVENTLOG_INFORMATION_TYPE,
  1038. logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE,
  1039. logging.ERROR : win32evtlog.EVENTLOG_ERROR_TYPE,
  1040. logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE,
  1041. }
  1042. except ImportError:
  1043. print("The Python Win32 extensions for NT (service, event "\
  1044. "logging) appear not to be available.")
  1045. self._welu = None
  1046. def getMessageID(self, record):
  1047. """
  1048. Return the message ID for the event record. If you are using your
  1049. own messages, you could do this by having the msg passed to the
  1050. logger being an ID rather than a formatting string. Then, in here,
  1051. you could use a dictionary lookup to get the message ID. This
  1052. version returns 1, which is the base message ID in win32service.pyd.
  1053. """
  1054. return 1
  1055. def getEventCategory(self, record):
  1056. """
  1057. Return the event category for the record.
  1058. Override this if you want to specify your own categories. This version
  1059. returns 0.
  1060. """
  1061. return 0
  1062. def getEventType(self, record):
  1063. """
  1064. Return the event type for the record.
  1065. Override this if you want to specify your own types. This version does
  1066. a mapping using the handler's typemap attribute, which is set up in
  1067. __init__() to a dictionary which contains mappings for DEBUG, INFO,
  1068. WARNING, ERROR and CRITICAL. If you are using your own levels you will
  1069. either need to override this method or place a suitable dictionary in
  1070. the handler's typemap attribute.
  1071. """
  1072. return self.typemap.get(record.levelno, self.deftype)
  1073. def emit(self, record):
  1074. """
  1075. Emit a record.
  1076. Determine the message ID, event category and event type. Then
  1077. log the message in the NT event log.
  1078. """
  1079. if self._welu:
  1080. try:
  1081. id = self.getMessageID(record)
  1082. cat = self.getEventCategory(record)
  1083. type = self.getEventType(record)
  1084. msg = self.format(record)
  1085. self._welu.ReportEvent(self.appname, id, cat, type, [msg])
  1086. except Exception:
  1087. self.handleError(record)
  1088. def close(self):
  1089. """
  1090. Clean up this handler.
  1091. You can remove the application name from the registry as a
  1092. source of event log entries. However, if you do this, you will
  1093. not be able to see the events as you intended in the Event Log
  1094. Viewer - it needs to be able to access the registry to get the
  1095. DLL name.
  1096. """
  1097. #self._welu.RemoveSourceFromRegistry(self.appname, self.logtype)
  1098. logging.Handler.close(self)
  1099. class HTTPHandler(logging.Handler):
  1100. """
  1101. A class which sends records to a web server, using either GET or
  1102. POST semantics.
  1103. """
  1104. def __init__(self, host, url, method="GET", secure=False, credentials=None,
  1105. context=None):
  1106. """
  1107. Initialize the instance with the host, the request URL, and the method
  1108. ("GET" or "POST")
  1109. """
  1110. logging.Handler.__init__(self)
  1111. method = method.upper()
  1112. if method not in ["GET", "POST"]:
  1113. raise ValueError("method must be GET or POST")
  1114. if not secure and context is not None:
  1115. raise ValueError("context parameter only makes sense "
  1116. "with secure=True")
  1117. self.host = host
  1118. self.url = url
  1119. self.method = method
  1120. self.secure = secure
  1121. self.credentials = credentials
  1122. self.context = context
  1123. def mapLogRecord(self, record):
  1124. """
  1125. Default implementation of mapping the log record into a dict
  1126. that is sent as the CGI data. Overwrite in your class.
  1127. Contributed by Franz Glasner.
  1128. """
  1129. return record.__dict__
  1130. def getConnection(self, host, secure):
  1131. """
  1132. get a HTTP[S]Connection.
  1133. Override when a custom connection is required, for example if
  1134. there is a proxy.
  1135. """
  1136. import http.client
  1137. if secure:
  1138. connection = http.client.HTTPSConnection(host, context=self.context)
  1139. else:
  1140. connection = http.client.HTTPConnection(host)
  1141. return connection
  1142. def emit(self, record):
  1143. """
  1144. Emit a record.
  1145. Send the record to the web server as a percent-encoded dictionary
  1146. """
  1147. try:
  1148. import urllib.parse
  1149. host = self.host
  1150. h = self.getConnection(host, self.secure)
  1151. url = self.url
  1152. data = urllib.parse.urlencode(self.mapLogRecord(record))
  1153. if self.method == "GET":
  1154. if (url.find('?') >= 0):
  1155. sep = '&'
  1156. else:
  1157. sep = '?'
  1158. url = url + "%c%s" % (sep, data)
  1159. h.putrequest(self.method, url)
  1160. # support multiple hosts on one IP address...
  1161. # need to strip optional :port from host, if present
  1162. i = host.find(":")
  1163. if i >= 0:
  1164. host = host[:i]
  1165. # See issue #30904: putrequest call above already adds this header
  1166. # on Python 3.x.
  1167. # h.putheader("Host", host)
  1168. if self.method == "POST":
  1169. h.putheader("Content-type",
  1170. "application/x-www-form-urlencoded")
  1171. h.putheader("Content-length", str(len(data)))
  1172. if self.credentials:
  1173. import base64
  1174. s = ('%s:%s' % self.credentials).encode('utf-8')
  1175. s = 'Basic ' + base64.b64encode(s).strip().decode('ascii')
  1176. h.putheader('Authorization', s)
  1177. h.endheaders()
  1178. if self.method == "POST":
  1179. h.send(data.encode('utf-8'))
  1180. h.getresponse() #can't do anything with the result
  1181. except Exception:
  1182. self.handleError(record)
  1183. class BufferingHandler(logging.Handler):
  1184. """
  1185. A handler class which buffers logging records in memory. Whenever each
  1186. record is added to the buffer, a check is made to see if the buffer should
  1187. be flushed. If it should, then flush() is expected to do what's needed.
  1188. """
  1189. def __init__(self, capacity):
  1190. """
  1191. Initialize the handler with the buffer size.
  1192. """
  1193. logging.Handler.__init__(self)
  1194. self.capacity = capacity
  1195. self.buffer = []
  1196. def shouldFlush(self, record):
  1197. """
  1198. Should the handler flush its buffer?
  1199. Returns true if the buffer is up to capacity. This method can be
  1200. overridden to implement custom flushing strategies.
  1201. """
  1202. return (len(self.buffer) >= self.capacity)
  1203. def emit(self, record):
  1204. """
  1205. Emit a record.
  1206. Append the record. If shouldFlush() tells us to, call flush() to process
  1207. the buffer.
  1208. """
  1209. self.buffer.append(record)
  1210. if self.shouldFlush(record):
  1211. self.flush()
  1212. def flush(self):
  1213. """
  1214. Override to implement custom flushing behaviour.
  1215. This version just zaps the buffer to empty.
  1216. """
  1217. self.acquire()
  1218. try:
  1219. self.buffer.clear()
  1220. finally:
  1221. self.release()
  1222. def close(self):
  1223. """
  1224. Close the handler.
  1225. This version just flushes and chains to the parent class' close().
  1226. """
  1227. try:
  1228. self.flush()
  1229. finally:
  1230. logging.Handler.close(self)
  1231. class MemoryHandler(BufferingHandler):
  1232. """
  1233. A handler class which buffers logging records in memory, periodically
  1234. flushing them to a target handler. Flushing occurs whenever the buffer
  1235. is full, or when an event of a certain severity or greater is seen.
  1236. """
  1237. def __init__(self, capacity, flushLevel=logging.ERROR, target=None,
  1238. flushOnClose=True):
  1239. """
  1240. Initialize the handler with the buffer size, the level at which
  1241. flushing should occur and an optional target.
  1242. Note that without a target being set either here or via setTarget(),
  1243. a MemoryHandler is no use to anyone!
  1244. The ``flushOnClose`` argument is ``True`` for backward compatibility
  1245. reasons - the old behaviour is that when the handler is closed, the
  1246. buffer is flushed, even if the flush level hasn't been exceeded nor the
  1247. capacity exceeded. To prevent this, set ``flushOnClose`` to ``False``.
  1248. """
  1249. BufferingHandler.__init__(self, capacity)
  1250. self.flushLevel = flushLevel
  1251. self.target = target
  1252. # See Issue #26559 for why this has been added
  1253. self.flushOnClose = flushOnClose
  1254. def shouldFlush(self, record):
  1255. """
  1256. Check for buffer full or a record at the flushLevel or higher.
  1257. """
  1258. return (len(self.buffer) >= self.capacity) or \
  1259. (record.levelno >= self.flushLevel)
  1260. def setTarget(self, target):
  1261. """
  1262. Set the target handler for this handler.
  1263. """
  1264. self.acquire()
  1265. try:
  1266. self.target = target
  1267. finally:
  1268. self.release()
  1269. def flush(self):
  1270. """
  1271. For a MemoryHandler, flushing means just sending the buffered
  1272. records to the target, if there is one. Override if you want
  1273. different behaviour.
  1274. The record buffer is also cleared by this operation.
  1275. """
  1276. self.acquire()
  1277. try:
  1278. if self.target:
  1279. for record in self.buffer:
  1280. self.target.handle(record)
  1281. self.buffer.clear()
  1282. finally:
  1283. self.release()
  1284. def close(self):
  1285. """
  1286. Flush, if appropriately configured, set the target to None and lose the
  1287. buffer.
  1288. """
  1289. try:
  1290. if self.flushOnClose:
  1291. self.flush()
  1292. finally:
  1293. self.acquire()
  1294. try:
  1295. self.target = None
  1296. BufferingHandler.close(self)
  1297. finally:
  1298. self.release()
  1299. class QueueHandler(logging.Handler):
  1300. """
  1301. This handler sends events to a queue. Typically, it would be used together
  1302. with a multiprocessing Queue to centralise logging to file in one process
  1303. (in a multi-process application), so as to avoid file write contention
  1304. between processes.
  1305. This code is new in Python 3.2, but this class can be copy pasted into
  1306. user code for use with earlier Python versions.
  1307. """
  1308. def __init__(self, queue):
  1309. """
  1310. Initialise an instance, using the passed queue.
  1311. """
  1312. logging.Handler.__init__(self)
  1313. self.queue = queue
  1314. def enqueue(self, record):
  1315. """
  1316. Enqueue a record.
  1317. The base implementation uses put_nowait. You may want to override
  1318. this method if you want to use blocking, timeouts or custom queue
  1319. implementations.
  1320. """
  1321. self.queue.put_nowait(record)
  1322. def prepare(self, record):
  1323. """
  1324. Prepare a record for queuing. The object returned by this method is
  1325. enqueued.
  1326. The base implementation formats the record to merge the message and
  1327. arguments, and removes unpickleable items from the record in-place.
  1328. Specifically, it overwrites the record's `msg` and
  1329. `message` attributes with the merged message (obtained by
  1330. calling the handler's `format` method), and sets the `args`,
  1331. `exc_info` and `exc_text` attributes to None.
  1332. You might want to override this method if you want to convert
  1333. the record to a dict or JSON string, or send a modified copy
  1334. of the record while leaving the original intact.
  1335. """
  1336. # The format operation gets traceback text into record.exc_text
  1337. # (if there's exception data), and also returns the formatted
  1338. # message. We can then use this to replace the original
  1339. # msg + args, as these might be unpickleable. We also zap the
  1340. # exc_info, exc_text and stack_info attributes, as they are no longer
  1341. # needed and, if not None, will typically not be pickleable.
  1342. msg = self.format(record)
  1343. # bpo-35726: make copy of record to avoid affecting other handlers in the chain.
  1344. record = copy.copy(record)
  1345. record.message = msg
  1346. record.msg = msg
  1347. record.args = None
  1348. record.exc_info = None
  1349. record.exc_text = None
  1350. record.stack_info = None
  1351. return record
  1352. def emit(self, record):
  1353. """
  1354. Emit a record.
  1355. Writes the LogRecord to the queue, preparing it for pickling first.
  1356. """
  1357. try:
  1358. self.enqueue(self.prepare(record))
  1359. except Exception:
  1360. self.handleError(record)
  1361. class QueueListener(object):
  1362. """
  1363. This class implements an internal threaded listener which watches for
  1364. LogRecords being added to a queue, removes them and passes them to a
  1365. list of handlers for processing.
  1366. """
  1367. _sentinel = None
  1368. def __init__(self, queue, *handlers, respect_handler_level=False):
  1369. """
  1370. Initialise an instance with the specified queue and
  1371. handlers.
  1372. """
  1373. self.queue = queue
  1374. self.handlers = handlers
  1375. self._thread = None
  1376. self.respect_handler_level = respect_handler_level
  1377. def dequeue(self, block):
  1378. """
  1379. Dequeue a record and return it, optionally blocking.
  1380. The base implementation uses get. You may want to override this method
  1381. if you want to use timeouts or work with custom queue implementations.
  1382. """
  1383. return self.queue.get(block)
  1384. def start(self):
  1385. """
  1386. Start the listener.
  1387. This starts up a background thread to monitor the queue for
  1388. LogRecords to process.
  1389. """
  1390. self._thread = t = threading.Thread(target=self._monitor)
  1391. t.daemon = True
  1392. t.start()
  1393. def prepare(self, record):
  1394. """
  1395. Prepare a record for handling.
  1396. This method just returns the passed-in record. You may want to
  1397. override this method if you need to do any custom marshalling or
  1398. manipulation of the record before passing it to the handlers.
  1399. """
  1400. return record
  1401. def handle(self, record):
  1402. """
  1403. Handle a record.
  1404. This just loops through the handlers offering them the record
  1405. to handle.
  1406. """
  1407. record = self.prepare(record)
  1408. for handler in self.handlers:
  1409. if not self.respect_handler_level:
  1410. process = True
  1411. else:
  1412. process = record.levelno >= handler.level
  1413. if process:
  1414. handler.handle(record)
  1415. def _monitor(self):
  1416. """
  1417. Monitor the queue for records, and ask the handler
  1418. to deal with them.
  1419. This method runs on a separate, internal thread.
  1420. The thread will terminate if it sees a sentinel object in the queue.
  1421. """
  1422. q = self.queue
  1423. has_task_done = hasattr(q, 'task_done')
  1424. while True:
  1425. try:
  1426. record = self.dequeue(True)
  1427. if record is self._sentinel:
  1428. if has_task_done:
  1429. q.task_done()
  1430. break
  1431. self.handle(record)
  1432. if has_task_done:
  1433. q.task_done()
  1434. except queue.Empty:
  1435. break
  1436. def enqueue_sentinel(self):
  1437. """
  1438. This is used to enqueue the sentinel record.
  1439. The base implementation uses put_nowait. You may want to override this
  1440. method if you want to use timeouts or work with custom queue
  1441. implementations.
  1442. """
  1443. self.queue.put_nowait(self._sentinel)
  1444. def stop(self):
  1445. """
  1446. Stop the listener.
  1447. This asks the thread to terminate, and then waits for it to do so.
  1448. Note that if you don't call this before your application exits, there
  1449. may be some records still left on the queue, which won't be processed.
  1450. """
  1451. self.enqueue_sentinel()
  1452. self._thread.join()
  1453. self._thread = None