##// END OF EJS Templates
encoding: backport paranoid escaping from templatefilters.jsonescape()...
Yuya Nishihara -
r28069:b2d24c28 default
parent child Browse files
Show More
@@ -1,568 +1,572 b''
1 1 # encoding.py - character transcoding support for Mercurial
2 2 #
3 3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import array
11 11 import locale
12 12 import os
13 13 import unicodedata
14 14
15 15 from . import (
16 16 error,
17 17 )
18 18
19 19 # These unicode characters are ignored by HFS+ (Apple Technote 1150,
20 20 # "Unicode Subtleties"), so we need to ignore them in some places for
21 21 # sanity.
22 22 _ignore = [unichr(int(x, 16)).encode("utf-8") for x in
23 23 "200c 200d 200e 200f 202a 202b 202c 202d 202e "
24 24 "206a 206b 206c 206d 206e 206f feff".split()]
25 25 # verify the next function will work
26 26 assert set([i[0] for i in _ignore]) == set(["\xe2", "\xef"])
27 27
28 28 def hfsignoreclean(s):
29 29 """Remove codepoints ignored by HFS+ from s.
30 30
31 31 >>> hfsignoreclean(u'.h\u200cg'.encode('utf-8'))
32 32 '.hg'
33 33 >>> hfsignoreclean(u'.h\ufeffg'.encode('utf-8'))
34 34 '.hg'
35 35 """
36 36 if "\xe2" in s or "\xef" in s:
37 37 for c in _ignore:
38 38 s = s.replace(c, '')
39 39 return s
40 40
41 41 def _getpreferredencoding():
42 42 '''
43 43 On darwin, getpreferredencoding ignores the locale environment and
44 44 always returns mac-roman. http://bugs.python.org/issue6202 fixes this
45 45 for Python 2.7 and up. This is the same corrected code for earlier
46 46 Python versions.
47 47
48 48 However, we can't use a version check for this method, as some distributions
49 49 patch Python to fix this. Instead, we use it as a 'fixer' for the mac-roman
50 50 encoding, as it is unlikely that this encoding is the actually expected.
51 51 '''
52 52 try:
53 53 locale.CODESET
54 54 except AttributeError:
55 55 # Fall back to parsing environment variables :-(
56 56 return locale.getdefaultlocale()[1]
57 57
58 58 oldloc = locale.setlocale(locale.LC_CTYPE)
59 59 locale.setlocale(locale.LC_CTYPE, "")
60 60 result = locale.nl_langinfo(locale.CODESET)
61 61 locale.setlocale(locale.LC_CTYPE, oldloc)
62 62
63 63 return result
64 64
65 65 _encodingfixers = {
66 66 '646': lambda: 'ascii',
67 67 'ANSI_X3.4-1968': lambda: 'ascii',
68 68 'mac-roman': _getpreferredencoding
69 69 }
70 70
71 71 try:
72 72 encoding = os.environ.get("HGENCODING")
73 73 if not encoding:
74 74 encoding = locale.getpreferredencoding() or 'ascii'
75 75 encoding = _encodingfixers.get(encoding, lambda: encoding)()
76 76 except locale.Error:
77 77 encoding = 'ascii'
78 78 encodingmode = os.environ.get("HGENCODINGMODE", "strict")
79 79 fallbackencoding = 'ISO-8859-1'
80 80
81 81 class localstr(str):
82 82 '''This class allows strings that are unmodified to be
83 83 round-tripped to the local encoding and back'''
84 84 def __new__(cls, u, l):
85 85 s = str.__new__(cls, l)
86 86 s._utf8 = u
87 87 return s
88 88 def __hash__(self):
89 89 return hash(self._utf8) # avoid collisions in local string space
90 90
91 91 def tolocal(s):
92 92 """
93 93 Convert a string from internal UTF-8 to local encoding
94 94
95 95 All internal strings should be UTF-8 but some repos before the
96 96 implementation of locale support may contain latin1 or possibly
97 97 other character sets. We attempt to decode everything strictly
98 98 using UTF-8, then Latin-1, and failing that, we use UTF-8 and
99 99 replace unknown characters.
100 100
101 101 The localstr class is used to cache the known UTF-8 encoding of
102 102 strings next to their local representation to allow lossless
103 103 round-trip conversion back to UTF-8.
104 104
105 105 >>> u = 'foo: \\xc3\\xa4' # utf-8
106 106 >>> l = tolocal(u)
107 107 >>> l
108 108 'foo: ?'
109 109 >>> fromlocal(l)
110 110 'foo: \\xc3\\xa4'
111 111 >>> u2 = 'foo: \\xc3\\xa1'
112 112 >>> d = { l: 1, tolocal(u2): 2 }
113 113 >>> len(d) # no collision
114 114 2
115 115 >>> 'foo: ?' in d
116 116 False
117 117 >>> l1 = 'foo: \\xe4' # historical latin1 fallback
118 118 >>> l = tolocal(l1)
119 119 >>> l
120 120 'foo: ?'
121 121 >>> fromlocal(l) # magically in utf-8
122 122 'foo: \\xc3\\xa4'
123 123 """
124 124
125 125 try:
126 126 try:
127 127 # make sure string is actually stored in UTF-8
128 128 u = s.decode('UTF-8')
129 129 if encoding == 'UTF-8':
130 130 # fast path
131 131 return s
132 132 r = u.encode(encoding, "replace")
133 133 if u == r.decode(encoding):
134 134 # r is a safe, non-lossy encoding of s
135 135 return r
136 136 return localstr(s, r)
137 137 except UnicodeDecodeError:
138 138 # we should only get here if we're looking at an ancient changeset
139 139 try:
140 140 u = s.decode(fallbackencoding)
141 141 r = u.encode(encoding, "replace")
142 142 if u == r.decode(encoding):
143 143 # r is a safe, non-lossy encoding of s
144 144 return r
145 145 return localstr(u.encode('UTF-8'), r)
146 146 except UnicodeDecodeError:
147 147 u = s.decode("utf-8", "replace") # last ditch
148 148 return u.encode(encoding, "replace") # can't round-trip
149 149 except LookupError as k:
150 150 raise error.Abort(k, hint="please check your locale settings")
151 151
152 152 def fromlocal(s):
153 153 """
154 154 Convert a string from the local character encoding to UTF-8
155 155
156 156 We attempt to decode strings using the encoding mode set by
157 157 HGENCODINGMODE, which defaults to 'strict'. In this mode, unknown
158 158 characters will cause an error message. Other modes include
159 159 'replace', which replaces unknown characters with a special
160 160 Unicode character, and 'ignore', which drops the character.
161 161 """
162 162
163 163 # can we do a lossless round-trip?
164 164 if isinstance(s, localstr):
165 165 return s._utf8
166 166
167 167 try:
168 168 return s.decode(encoding, encodingmode).encode("utf-8")
169 169 except UnicodeDecodeError as inst:
170 170 sub = s[max(0, inst.start - 10):inst.start + 10]
171 171 raise error.Abort("decoding near '%s': %s!" % (sub, inst))
172 172 except LookupError as k:
173 173 raise error.Abort(k, hint="please check your locale settings")
174 174
175 175 # How to treat ambiguous-width characters. Set to 'wide' to treat as wide.
176 176 wide = (os.environ.get("HGENCODINGAMBIGUOUS", "narrow") == "wide"
177 177 and "WFA" or "WF")
178 178
179 179 def colwidth(s):
180 180 "Find the column width of a string for display in the local encoding"
181 181 return ucolwidth(s.decode(encoding, 'replace'))
182 182
183 183 def ucolwidth(d):
184 184 "Find the column width of a Unicode string for display"
185 185 eaw = getattr(unicodedata, 'east_asian_width', None)
186 186 if eaw is not None:
187 187 return sum([eaw(c) in wide and 2 or 1 for c in d])
188 188 return len(d)
189 189
190 190 def getcols(s, start, c):
191 191 '''Use colwidth to find a c-column substring of s starting at byte
192 192 index start'''
193 193 for x in xrange(start + c, len(s)):
194 194 t = s[start:x]
195 195 if colwidth(t) == c:
196 196 return t
197 197
198 198 def trim(s, width, ellipsis='', leftside=False):
199 199 """Trim string 's' to at most 'width' columns (including 'ellipsis').
200 200
201 201 If 'leftside' is True, left side of string 's' is trimmed.
202 202 'ellipsis' is always placed at trimmed side.
203 203
204 204 >>> ellipsis = '+++'
205 205 >>> from . import encoding
206 206 >>> encoding.encoding = 'utf-8'
207 207 >>> t= '1234567890'
208 208 >>> print trim(t, 12, ellipsis=ellipsis)
209 209 1234567890
210 210 >>> print trim(t, 10, ellipsis=ellipsis)
211 211 1234567890
212 212 >>> print trim(t, 8, ellipsis=ellipsis)
213 213 12345+++
214 214 >>> print trim(t, 8, ellipsis=ellipsis, leftside=True)
215 215 +++67890
216 216 >>> print trim(t, 8)
217 217 12345678
218 218 >>> print trim(t, 8, leftside=True)
219 219 34567890
220 220 >>> print trim(t, 3, ellipsis=ellipsis)
221 221 +++
222 222 >>> print trim(t, 1, ellipsis=ellipsis)
223 223 +
224 224 >>> u = u'\u3042\u3044\u3046\u3048\u304a' # 2 x 5 = 10 columns
225 225 >>> t = u.encode(encoding.encoding)
226 226 >>> print trim(t, 12, ellipsis=ellipsis)
227 227 \xe3\x81\x82\xe3\x81\x84\xe3\x81\x86\xe3\x81\x88\xe3\x81\x8a
228 228 >>> print trim(t, 10, ellipsis=ellipsis)
229 229 \xe3\x81\x82\xe3\x81\x84\xe3\x81\x86\xe3\x81\x88\xe3\x81\x8a
230 230 >>> print trim(t, 8, ellipsis=ellipsis)
231 231 \xe3\x81\x82\xe3\x81\x84+++
232 232 >>> print trim(t, 8, ellipsis=ellipsis, leftside=True)
233 233 +++\xe3\x81\x88\xe3\x81\x8a
234 234 >>> print trim(t, 5)
235 235 \xe3\x81\x82\xe3\x81\x84
236 236 >>> print trim(t, 5, leftside=True)
237 237 \xe3\x81\x88\xe3\x81\x8a
238 238 >>> print trim(t, 4, ellipsis=ellipsis)
239 239 +++
240 240 >>> print trim(t, 4, ellipsis=ellipsis, leftside=True)
241 241 +++
242 242 >>> t = '\x11\x22\x33\x44\x55\x66\x77\x88\x99\xaa' # invalid byte sequence
243 243 >>> print trim(t, 12, ellipsis=ellipsis)
244 244 \x11\x22\x33\x44\x55\x66\x77\x88\x99\xaa
245 245 >>> print trim(t, 10, ellipsis=ellipsis)
246 246 \x11\x22\x33\x44\x55\x66\x77\x88\x99\xaa
247 247 >>> print trim(t, 8, ellipsis=ellipsis)
248 248 \x11\x22\x33\x44\x55+++
249 249 >>> print trim(t, 8, ellipsis=ellipsis, leftside=True)
250 250 +++\x66\x77\x88\x99\xaa
251 251 >>> print trim(t, 8)
252 252 \x11\x22\x33\x44\x55\x66\x77\x88
253 253 >>> print trim(t, 8, leftside=True)
254 254 \x33\x44\x55\x66\x77\x88\x99\xaa
255 255 >>> print trim(t, 3, ellipsis=ellipsis)
256 256 +++
257 257 >>> print trim(t, 1, ellipsis=ellipsis)
258 258 +
259 259 """
260 260 try:
261 261 u = s.decode(encoding)
262 262 except UnicodeDecodeError:
263 263 if len(s) <= width: # trimming is not needed
264 264 return s
265 265 width -= len(ellipsis)
266 266 if width <= 0: # no enough room even for ellipsis
267 267 return ellipsis[:width + len(ellipsis)]
268 268 if leftside:
269 269 return ellipsis + s[-width:]
270 270 return s[:width] + ellipsis
271 271
272 272 if ucolwidth(u) <= width: # trimming is not needed
273 273 return s
274 274
275 275 width -= len(ellipsis)
276 276 if width <= 0: # no enough room even for ellipsis
277 277 return ellipsis[:width + len(ellipsis)]
278 278
279 279 if leftside:
280 280 uslice = lambda i: u[i:]
281 281 concat = lambda s: ellipsis + s
282 282 else:
283 283 uslice = lambda i: u[:-i]
284 284 concat = lambda s: s + ellipsis
285 285 for i in xrange(1, len(u)):
286 286 usub = uslice(i)
287 287 if ucolwidth(usub) <= width:
288 288 return concat(usub.encode(encoding))
289 289 return ellipsis # no enough room for multi-column characters
290 290
291 291 def _asciilower(s):
292 292 '''convert a string to lowercase if ASCII
293 293
294 294 Raises UnicodeDecodeError if non-ASCII characters are found.'''
295 295 s.decode('ascii')
296 296 return s.lower()
297 297
298 298 def asciilower(s):
299 299 # delay importing avoids cyclic dependency around "parsers" in
300 300 # pure Python build (util => i18n => encoding => parsers => util)
301 301 from . import parsers
302 302 impl = getattr(parsers, 'asciilower', _asciilower)
303 303 global asciilower
304 304 asciilower = impl
305 305 return impl(s)
306 306
307 307 def _asciiupper(s):
308 308 '''convert a string to uppercase if ASCII
309 309
310 310 Raises UnicodeDecodeError if non-ASCII characters are found.'''
311 311 s.decode('ascii')
312 312 return s.upper()
313 313
314 314 def asciiupper(s):
315 315 # delay importing avoids cyclic dependency around "parsers" in
316 316 # pure Python build (util => i18n => encoding => parsers => util)
317 317 from . import parsers
318 318 impl = getattr(parsers, 'asciiupper', _asciiupper)
319 319 global asciiupper
320 320 asciiupper = impl
321 321 return impl(s)
322 322
323 323 def lower(s):
324 324 "best-effort encoding-aware case-folding of local string s"
325 325 try:
326 326 return asciilower(s)
327 327 except UnicodeDecodeError:
328 328 pass
329 329 try:
330 330 if isinstance(s, localstr):
331 331 u = s._utf8.decode("utf-8")
332 332 else:
333 333 u = s.decode(encoding, encodingmode)
334 334
335 335 lu = u.lower()
336 336 if u == lu:
337 337 return s # preserve localstring
338 338 return lu.encode(encoding)
339 339 except UnicodeError:
340 340 return s.lower() # we don't know how to fold this except in ASCII
341 341 except LookupError as k:
342 342 raise error.Abort(k, hint="please check your locale settings")
343 343
344 344 def upper(s):
345 345 "best-effort encoding-aware case-folding of local string s"
346 346 try:
347 347 return asciiupper(s)
348 348 except UnicodeDecodeError:
349 349 return upperfallback(s)
350 350
351 351 def upperfallback(s):
352 352 try:
353 353 if isinstance(s, localstr):
354 354 u = s._utf8.decode("utf-8")
355 355 else:
356 356 u = s.decode(encoding, encodingmode)
357 357
358 358 uu = u.upper()
359 359 if u == uu:
360 360 return s # preserve localstring
361 361 return uu.encode(encoding)
362 362 except UnicodeError:
363 363 return s.upper() # we don't know how to fold this except in ASCII
364 364 except LookupError as k:
365 365 raise error.Abort(k, hint="please check your locale settings")
366 366
367 367 class normcasespecs(object):
368 368 '''what a platform's normcase does to ASCII strings
369 369
370 370 This is specified per platform, and should be consistent with what normcase
371 371 on that platform actually does.
372 372
373 373 lower: normcase lowercases ASCII strings
374 374 upper: normcase uppercases ASCII strings
375 375 other: the fallback function should always be called
376 376
377 377 This should be kept in sync with normcase_spec in util.h.'''
378 378 lower = -1
379 379 upper = 1
380 380 other = 0
381 381
382 382 _jsonmap = []
383 383 _jsonmap.extend("\\u%04x" % x for x in xrange(32))
384 384 _jsonmap.extend(chr(x) for x in xrange(32, 127))
385 385 _jsonmap.append('\\u007f')
386 386 _jsonmap[0x09] = '\\t'
387 387 _jsonmap[0x0a] = '\\n'
388 388 _jsonmap[0x22] = '\\"'
389 389 _jsonmap[0x5c] = '\\\\'
390 390 _jsonmap[0x08] = '\\b'
391 391 _jsonmap[0x0c] = '\\f'
392 392 _jsonmap[0x0d] = '\\r'
393 393 _paranoidjsonmap = _jsonmap[:]
394 _paranoidjsonmap[0x3c] = '\\u003c' # '<' (e.g. escape "</script>")
395 _paranoidjsonmap[0x3e] = '\\u003e' # '>'
394 396 _jsonmap.extend(chr(x) for x in xrange(128, 256))
395 397
396 398 def jsonescape(s, paranoid=False):
397 399 '''returns a string suitable for JSON
398 400
399 401 JSON is problematic for us because it doesn't support non-Unicode
400 402 bytes. To deal with this, we take the following approach:
401 403
402 404 - localstr objects are converted back to UTF-8
403 405 - valid UTF-8/ASCII strings are passed as-is
404 406 - other strings are converted to UTF-8b surrogate encoding
405 407 - apply JSON-specified string escaping
406 408
407 409 (escapes are doubled in these tests)
408 410
409 411 >>> jsonescape('this is a test')
410 412 'this is a test'
411 413 >>> jsonescape('escape characters: \\0 \\x0b \\x7f')
412 414 'escape characters: \\\\u0000 \\\\u000b \\\\u007f'
413 415 >>> jsonescape('escape characters: \\t \\n \\r \\" \\\\')
414 416 'escape characters: \\\\t \\\\n \\\\r \\\\" \\\\\\\\'
415 417 >>> jsonescape('a weird byte: \\xdd')
416 418 'a weird byte: \\xed\\xb3\\x9d'
417 419 >>> jsonescape('utf-8: caf\\xc3\\xa9')
418 420 'utf-8: caf\\xc3\\xa9'
419 421 >>> jsonescape('')
420 422 ''
421 423
422 If paranoid, non-ascii characters are also escaped. This is suitable for
423 web output.
424 If paranoid, non-ascii and common troublesome characters are also escaped.
425 This is suitable for web output.
424 426
425 427 >>> jsonescape('escape boundary: \\x7e \\x7f \\xc2\\x80', paranoid=True)
426 428 'escape boundary: ~ \\\\u007f \\\\u0080'
427 429 >>> jsonescape('a weird byte: \\xdd', paranoid=True)
428 430 'a weird byte: \\\\udcdd'
429 431 >>> jsonescape('utf-8: caf\\xc3\\xa9', paranoid=True)
430 432 'utf-8: caf\\\\u00e9'
431 433 >>> jsonescape('non-BMP: \\xf0\\x9d\\x84\\x9e', paranoid=True)
432 434 'non-BMP: \\\\ud834\\\\udd1e'
435 >>> jsonescape('<foo@example.org>', paranoid=True)
436 '\\\\u003cfoo@example.org\\\\u003e'
433 437 '''
434 438
435 439 if paranoid:
436 440 jm = _paranoidjsonmap
437 441 else:
438 442 jm = _jsonmap
439 443
440 444 u8chars = toutf8b(s)
441 445 try:
442 446 return ''.join(jm[x] for x in bytearray(u8chars)) # fast path
443 447 except IndexError:
444 448 pass
445 449 # non-BMP char is represented as UTF-16 surrogate pair
446 450 u16codes = array.array('H', u8chars.decode('utf-8').encode('utf-16'))
447 451 u16codes.pop(0) # drop BOM
448 452 return ''.join(jm[x] if x < 128 else '\\u%04x' % x for x in u16codes)
449 453
450 454 _utf8len = [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 4]
451 455
452 456 def getutf8char(s, pos):
453 457 '''get the next full utf-8 character in the given string, starting at pos
454 458
455 459 Raises a UnicodeError if the given location does not start a valid
456 460 utf-8 character.
457 461 '''
458 462
459 463 # find how many bytes to attempt decoding from first nibble
460 464 l = _utf8len[ord(s[pos]) >> 4]
461 465 if not l: # ascii
462 466 return s[pos]
463 467
464 468 c = s[pos:pos + l]
465 469 # validate with attempted decode
466 470 c.decode("utf-8")
467 471 return c
468 472
469 473 def toutf8b(s):
470 474 '''convert a local, possibly-binary string into UTF-8b
471 475
472 476 This is intended as a generic method to preserve data when working
473 477 with schemes like JSON and XML that have no provision for
474 478 arbitrary byte strings. As Mercurial often doesn't know
475 479 what encoding data is in, we use so-called UTF-8b.
476 480
477 481 If a string is already valid UTF-8 (or ASCII), it passes unmodified.
478 482 Otherwise, unsupported bytes are mapped to UTF-16 surrogate range,
479 483 uDC00-uDCFF.
480 484
481 485 Principles of operation:
482 486
483 487 - ASCII and UTF-8 data successfully round-trips and is understood
484 488 by Unicode-oriented clients
485 489 - filenames and file contents in arbitrary other encodings can have
486 490 be round-tripped or recovered by clueful clients
487 491 - local strings that have a cached known UTF-8 encoding (aka
488 492 localstr) get sent as UTF-8 so Unicode-oriented clients get the
489 493 Unicode data they want
490 494 - because we must preserve UTF-8 bytestring in places such as
491 495 filenames, metadata can't be roundtripped without help
492 496
493 497 (Note: "UTF-8b" often refers to decoding a mix of valid UTF-8 and
494 498 arbitrary bytes into an internal Unicode format that can be
495 499 re-encoded back into the original. Here we are exposing the
496 500 internal surrogate encoding as a UTF-8 string.)
497 501 '''
498 502
499 503 if "\xed" not in s:
500 504 if isinstance(s, localstr):
501 505 return s._utf8
502 506 try:
503 507 s.decode('utf-8')
504 508 return s
505 509 except UnicodeDecodeError:
506 510 pass
507 511
508 512 r = ""
509 513 pos = 0
510 514 l = len(s)
511 515 while pos < l:
512 516 try:
513 517 c = getutf8char(s, pos)
514 518 if "\xed\xb0\x80" <= c <= "\xed\xb3\xbf":
515 519 # have to re-escape existing U+DCxx characters
516 520 c = unichr(0xdc00 + ord(s[pos])).encode('utf-8')
517 521 pos += 1
518 522 else:
519 523 pos += len(c)
520 524 except UnicodeDecodeError:
521 525 c = unichr(0xdc00 + ord(s[pos])).encode('utf-8')
522 526 pos += 1
523 527 r += c
524 528 return r
525 529
526 530 def fromutf8b(s):
527 531 '''Given a UTF-8b string, return a local, possibly-binary string.
528 532
529 533 return the original binary string. This
530 534 is a round-trip process for strings like filenames, but metadata
531 535 that's was passed through tolocal will remain in UTF-8.
532 536
533 537 >>> roundtrip = lambda x: fromutf8b(toutf8b(x)) == x
534 538 >>> m = "\\xc3\\xa9\\x99abcd"
535 539 >>> toutf8b(m)
536 540 '\\xc3\\xa9\\xed\\xb2\\x99abcd'
537 541 >>> roundtrip(m)
538 542 True
539 543 >>> roundtrip("\\xc2\\xc2\\x80")
540 544 True
541 545 >>> roundtrip("\\xef\\xbf\\xbd")
542 546 True
543 547 >>> roundtrip("\\xef\\xef\\xbf\\xbd")
544 548 True
545 549 >>> roundtrip("\\xf1\\x80\\x80\\x80\\x80")
546 550 True
547 551 '''
548 552
549 553 # fast path - look for uDxxx prefixes in s
550 554 if "\xed" not in s:
551 555 return s
552 556
553 557 # We could do this with the unicode type but some Python builds
554 558 # use UTF-16 internally (issue5031) which causes non-BMP code
555 559 # points to be escaped. Instead, we use our handy getutf8char
556 560 # helper again to walk the string without "decoding" it.
557 561
558 562 r = ""
559 563 pos = 0
560 564 l = len(s)
561 565 while pos < l:
562 566 c = getutf8char(s, pos)
563 567 pos += len(c)
564 568 # unescape U+DCxx characters
565 569 if "\xed\xb0\x80" <= c <= "\xed\xb3\xbf":
566 570 c = chr(ord(c.decode("utf-8")) & 0xff)
567 571 r += c
568 572 return r
General Comments 0
You need to be logged in to leave comments. Login now