##// END OF EJS Templates
py3: slice over bytes to prevent getting ascii values
Pulkit Goyal -
r32153:6f173560 default
parent child Browse files
Show More
@@ -1,537 +1,537 b''
1 1 # changelog.py - changelog class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11
12 12 from .i18n import _
13 13 from .node import (
14 14 bin,
15 15 hex,
16 16 nullid,
17 17 )
18 18
19 19 from . import (
20 20 encoding,
21 21 error,
22 22 revlog,
23 23 util,
24 24 )
25 25
26 26 _defaultextra = {'branch': 'default'}
27 27
28 28 def _string_escape(text):
29 29 """
30 30 >>> d = {'nl': chr(10), 'bs': chr(92), 'cr': chr(13), 'nul': chr(0)}
31 31 >>> s = "ab%(nl)scd%(bs)s%(bs)sn%(nul)sab%(cr)scd%(bs)s%(nl)s" % d
32 32 >>> s
33 33 'ab\\ncd\\\\\\\\n\\x00ab\\rcd\\\\\\n'
34 34 >>> res = _string_escape(s)
35 35 >>> s == util.unescapestr(res)
36 36 True
37 37 """
38 38 # subset of the string_escape codec
39 39 text = text.replace('\\', '\\\\').replace('\n', '\\n').replace('\r', '\\r')
40 40 return text.replace('\0', '\\0')
41 41
42 42 def decodeextra(text):
43 43 """
44 44 >>> sorted(decodeextra(encodeextra({'foo': 'bar', 'baz': chr(0) + '2'})
45 45 ... ).iteritems())
46 46 [('baz', '\\x002'), ('branch', 'default'), ('foo', 'bar')]
47 47 >>> sorted(decodeextra(encodeextra({'foo': 'bar',
48 48 ... 'baz': chr(92) + chr(0) + '2'})
49 49 ... ).iteritems())
50 50 [('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')]
51 51 """
52 52 extra = _defaultextra.copy()
53 53 for l in text.split('\0'):
54 54 if l:
55 55 if '\\0' in l:
56 56 # fix up \0 without getting into trouble with \\0
57 57 l = l.replace('\\\\', '\\\\\n')
58 58 l = l.replace('\\0', '\0')
59 59 l = l.replace('\n', '')
60 60 k, v = util.unescapestr(l).split(':', 1)
61 61 extra[k] = v
62 62 return extra
63 63
64 64 def encodeextra(d):
65 65 # keys must be sorted to produce a deterministic changelog entry
66 66 items = [_string_escape('%s:%s' % (k, d[k])) for k in sorted(d)]
67 67 return "\0".join(items)
68 68
69 69 def stripdesc(desc):
70 70 """strip trailing whitespace and leading and trailing empty lines"""
71 71 return '\n'.join([l.rstrip() for l in desc.splitlines()]).strip('\n')
72 72
73 73 class appender(object):
74 74 '''the changelog index must be updated last on disk, so we use this class
75 75 to delay writes to it'''
76 76 def __init__(self, vfs, name, mode, buf):
77 77 self.data = buf
78 78 fp = vfs(name, mode)
79 79 self.fp = fp
80 80 self.offset = fp.tell()
81 81 self.size = vfs.fstat(fp).st_size
82 82 self._end = self.size
83 83
84 84 def end(self):
85 85 return self._end
86 86 def tell(self):
87 87 return self.offset
88 88 def flush(self):
89 89 pass
90 90 def close(self):
91 91 self.fp.close()
92 92
93 93 def seek(self, offset, whence=0):
94 94 '''virtual file offset spans real file and data'''
95 95 if whence == 0:
96 96 self.offset = offset
97 97 elif whence == 1:
98 98 self.offset += offset
99 99 elif whence == 2:
100 100 self.offset = self.end() + offset
101 101 if self.offset < self.size:
102 102 self.fp.seek(self.offset)
103 103
104 104 def read(self, count=-1):
105 105 '''only trick here is reads that span real file and data'''
106 106 ret = ""
107 107 if self.offset < self.size:
108 108 s = self.fp.read(count)
109 109 ret = s
110 110 self.offset += len(s)
111 111 if count > 0:
112 112 count -= len(s)
113 113 if count != 0:
114 114 doff = self.offset - self.size
115 115 self.data.insert(0, "".join(self.data))
116 116 del self.data[1:]
117 117 s = self.data[0][doff:doff + count]
118 118 self.offset += len(s)
119 119 ret += s
120 120 return ret
121 121
122 122 def write(self, s):
123 123 self.data.append(bytes(s))
124 124 self.offset += len(s)
125 125 self._end += len(s)
126 126
127 127 def _divertopener(opener, target):
128 128 """build an opener that writes in 'target.a' instead of 'target'"""
129 129 def _divert(name, mode='r', checkambig=False):
130 130 if name != target:
131 131 return opener(name, mode)
132 132 return opener(name + ".a", mode)
133 133 return _divert
134 134
135 135 def _delayopener(opener, target, buf):
136 136 """build an opener that stores chunks in 'buf' instead of 'target'"""
137 137 def _delay(name, mode='r', checkambig=False):
138 138 if name != target:
139 139 return opener(name, mode)
140 140 return appender(opener, name, mode, buf)
141 141 return _delay
142 142
143 143 _changelogrevision = collections.namedtuple(u'changelogrevision',
144 144 (u'manifest', u'user', u'date',
145 145 u'files', u'description',
146 146 u'extra'))
147 147
148 148 class changelogrevision(object):
149 149 """Holds results of a parsed changelog revision.
150 150
151 151 Changelog revisions consist of multiple pieces of data, including
152 152 the manifest node, user, and date. This object exposes a view into
153 153 the parsed object.
154 154 """
155 155
156 156 __slots__ = (
157 157 u'_offsets',
158 158 u'_text',
159 159 )
160 160
161 161 def __new__(cls, text):
162 162 if not text:
163 163 return _changelogrevision(
164 164 manifest=nullid,
165 165 user='',
166 166 date=(0, 0),
167 167 files=[],
168 168 description='',
169 169 extra=_defaultextra,
170 170 )
171 171
172 172 self = super(changelogrevision, cls).__new__(cls)
173 173 # We could return here and implement the following as an __init__.
174 174 # But doing it here is equivalent and saves an extra function call.
175 175
176 176 # format used:
177 177 # nodeid\n : manifest node in ascii
178 178 # user\n : user, no \n or \r allowed
179 179 # time tz extra\n : date (time is int or float, timezone is int)
180 180 # : extra is metadata, encoded and separated by '\0'
181 181 # : older versions ignore it
182 182 # files\n\n : files modified by the cset, no \n or \r allowed
183 183 # (.*) : comment (free text, ideally utf-8)
184 184 #
185 185 # changelog v0 doesn't use extra
186 186
187 187 nl1 = text.index('\n')
188 188 nl2 = text.index('\n', nl1 + 1)
189 189 nl3 = text.index('\n', nl2 + 1)
190 190
191 191 # The list of files may be empty. Which means nl3 is the first of the
192 192 # double newline that precedes the description.
193 if text[nl3 + 1] == '\n':
193 if text[nl3 + 1:nl3 + 2] == '\n':
194 194 doublenl = nl3
195 195 else:
196 196 doublenl = text.index('\n\n', nl3 + 1)
197 197
198 198 self._offsets = (nl1, nl2, nl3, doublenl)
199 199 self._text = text
200 200
201 201 return self
202 202
203 203 @property
204 204 def manifest(self):
205 205 return bin(self._text[0:self._offsets[0]])
206 206
207 207 @property
208 208 def user(self):
209 209 off = self._offsets
210 210 return encoding.tolocal(self._text[off[0] + 1:off[1]])
211 211
212 212 @property
213 213 def _rawdate(self):
214 214 off = self._offsets
215 215 dateextra = self._text[off[1] + 1:off[2]]
216 216 return dateextra.split(' ', 2)[0:2]
217 217
218 218 @property
219 219 def _rawextra(self):
220 220 off = self._offsets
221 221 dateextra = self._text[off[1] + 1:off[2]]
222 222 fields = dateextra.split(' ', 2)
223 223 if len(fields) != 3:
224 224 return None
225 225
226 226 return fields[2]
227 227
228 228 @property
229 229 def date(self):
230 230 raw = self._rawdate
231 231 time = float(raw[0])
232 232 # Various tools did silly things with the timezone.
233 233 try:
234 234 timezone = int(raw[1])
235 235 except ValueError:
236 236 timezone = 0
237 237
238 238 return time, timezone
239 239
240 240 @property
241 241 def extra(self):
242 242 raw = self._rawextra
243 243 if raw is None:
244 244 return _defaultextra
245 245
246 246 return decodeextra(raw)
247 247
248 248 @property
249 249 def files(self):
250 250 off = self._offsets
251 251 if off[2] == off[3]:
252 252 return []
253 253
254 254 return self._text[off[2] + 1:off[3]].split('\n')
255 255
256 256 @property
257 257 def description(self):
258 258 return encoding.tolocal(self._text[self._offsets[3] + 2:])
259 259
260 260 class changelog(revlog.revlog):
261 261 def __init__(self, opener):
262 262 revlog.revlog.__init__(self, opener, "00changelog.i",
263 263 checkambig=True)
264 264 if self._initempty:
265 265 # changelogs don't benefit from generaldelta
266 266 self.version &= ~revlog.REVLOGGENERALDELTA
267 267 self._generaldelta = False
268 268
269 269 # Delta chains for changelogs tend to be very small because entries
270 270 # tend to be small and don't delta well with each. So disable delta
271 271 # chains.
272 272 self.storedeltachains = False
273 273
274 274 self._realopener = opener
275 275 self._delayed = False
276 276 self._delaybuf = None
277 277 self._divert = False
278 278 self.filteredrevs = frozenset()
279 279
280 280 def tip(self):
281 281 """filtered version of revlog.tip"""
282 282 for i in xrange(len(self) -1, -2, -1):
283 283 if i not in self.filteredrevs:
284 284 return self.node(i)
285 285
286 286 def __contains__(self, rev):
287 287 """filtered version of revlog.__contains__"""
288 288 return (0 <= rev < len(self)
289 289 and rev not in self.filteredrevs)
290 290
291 291 def __iter__(self):
292 292 """filtered version of revlog.__iter__"""
293 293 if len(self.filteredrevs) == 0:
294 294 return revlog.revlog.__iter__(self)
295 295
296 296 def filterediter():
297 297 for i in xrange(len(self)):
298 298 if i not in self.filteredrevs:
299 299 yield i
300 300
301 301 return filterediter()
302 302
303 303 def revs(self, start=0, stop=None):
304 304 """filtered version of revlog.revs"""
305 305 for i in super(changelog, self).revs(start, stop):
306 306 if i not in self.filteredrevs:
307 307 yield i
308 308
309 309 @util.propertycache
310 310 def nodemap(self):
311 311 # XXX need filtering too
312 312 self.rev(self.node(0))
313 313 return self._nodecache
314 314
315 315 def reachableroots(self, minroot, heads, roots, includepath=False):
316 316 return self.index.reachableroots2(minroot, heads, roots, includepath)
317 317
318 318 def headrevs(self):
319 319 if self.filteredrevs:
320 320 try:
321 321 return self.index.headrevsfiltered(self.filteredrevs)
322 322 # AttributeError covers non-c-extension environments and
323 323 # old c extensions without filter handling.
324 324 except AttributeError:
325 325 return self._headrevs()
326 326
327 327 return super(changelog, self).headrevs()
328 328
329 329 def strip(self, *args, **kwargs):
330 330 # XXX make something better than assert
331 331 # We can't expect proper strip behavior if we are filtered.
332 332 assert not self.filteredrevs
333 333 super(changelog, self).strip(*args, **kwargs)
334 334
335 335 def rev(self, node):
336 336 """filtered version of revlog.rev"""
337 337 r = super(changelog, self).rev(node)
338 338 if r in self.filteredrevs:
339 339 raise error.FilteredLookupError(hex(node), self.indexfile,
340 340 _('filtered node'))
341 341 return r
342 342
343 343 def node(self, rev):
344 344 """filtered version of revlog.node"""
345 345 if rev in self.filteredrevs:
346 346 raise error.FilteredIndexError(rev)
347 347 return super(changelog, self).node(rev)
348 348
349 349 def linkrev(self, rev):
350 350 """filtered version of revlog.linkrev"""
351 351 if rev in self.filteredrevs:
352 352 raise error.FilteredIndexError(rev)
353 353 return super(changelog, self).linkrev(rev)
354 354
355 355 def parentrevs(self, rev):
356 356 """filtered version of revlog.parentrevs"""
357 357 if rev in self.filteredrevs:
358 358 raise error.FilteredIndexError(rev)
359 359 return super(changelog, self).parentrevs(rev)
360 360
361 361 def flags(self, rev):
362 362 """filtered version of revlog.flags"""
363 363 if rev in self.filteredrevs:
364 364 raise error.FilteredIndexError(rev)
365 365 return super(changelog, self).flags(rev)
366 366
367 367 def delayupdate(self, tr):
368 368 "delay visibility of index updates to other readers"
369 369
370 370 if not self._delayed:
371 371 if len(self) == 0:
372 372 self._divert = True
373 373 if self._realopener.exists(self.indexfile + '.a'):
374 374 self._realopener.unlink(self.indexfile + '.a')
375 375 self.opener = _divertopener(self._realopener, self.indexfile)
376 376 else:
377 377 self._delaybuf = []
378 378 self.opener = _delayopener(self._realopener, self.indexfile,
379 379 self._delaybuf)
380 380 self._delayed = True
381 381 tr.addpending('cl-%i' % id(self), self._writepending)
382 382 tr.addfinalize('cl-%i' % id(self), self._finalize)
383 383
384 384 def _finalize(self, tr):
385 385 "finalize index updates"
386 386 self._delayed = False
387 387 self.opener = self._realopener
388 388 # move redirected index data back into place
389 389 if self._divert:
390 390 assert not self._delaybuf
391 391 tmpname = self.indexfile + ".a"
392 392 nfile = self.opener.open(tmpname)
393 393 nfile.close()
394 394 self.opener.rename(tmpname, self.indexfile, checkambig=True)
395 395 elif self._delaybuf:
396 396 fp = self.opener(self.indexfile, 'a', checkambig=True)
397 397 fp.write("".join(self._delaybuf))
398 398 fp.close()
399 399 self._delaybuf = None
400 400 self._divert = False
401 401 # split when we're done
402 402 self.checkinlinesize(tr)
403 403
404 404 def readpending(self, file):
405 405 """read index data from a "pending" file
406 406
407 407 During a transaction, the actual changeset data is already stored in the
408 408 main file, but not yet finalized in the on-disk index. Instead, a
409 409 "pending" index is written by the transaction logic. If this function
410 410 is running, we are likely in a subprocess invoked in a hook. The
411 411 subprocess is informed that it is within a transaction and needs to
412 412 access its content.
413 413
414 414 This function will read all the index data out of the pending file and
415 415 overwrite the main index."""
416 416
417 417 if not self.opener.exists(file):
418 418 return # no pending data for changelog
419 419 r = revlog.revlog(self.opener, file)
420 420 self.index = r.index
421 421 self.nodemap = r.nodemap
422 422 self._nodecache = r._nodecache
423 423 self._chunkcache = r._chunkcache
424 424
425 425 def _writepending(self, tr):
426 426 "create a file containing the unfinalized state for pretxnchangegroup"
427 427 if self._delaybuf:
428 428 # make a temporary copy of the index
429 429 fp1 = self._realopener(self.indexfile)
430 430 pendingfilename = self.indexfile + ".a"
431 431 # register as a temp file to ensure cleanup on failure
432 432 tr.registertmp(pendingfilename)
433 433 # write existing data
434 434 fp2 = self._realopener(pendingfilename, "w")
435 435 fp2.write(fp1.read())
436 436 # add pending data
437 437 fp2.write("".join(self._delaybuf))
438 438 fp2.close()
439 439 # switch modes so finalize can simply rename
440 440 self._delaybuf = None
441 441 self._divert = True
442 442 self.opener = _divertopener(self._realopener, self.indexfile)
443 443
444 444 if self._divert:
445 445 return True
446 446
447 447 return False
448 448
449 449 def checkinlinesize(self, tr, fp=None):
450 450 if not self._delayed:
451 451 revlog.revlog.checkinlinesize(self, tr, fp)
452 452
453 453 def read(self, node):
454 454 """Obtain data from a parsed changelog revision.
455 455
456 456 Returns a 6-tuple of:
457 457
458 458 - manifest node in binary
459 459 - author/user as a localstr
460 460 - date as a 2-tuple of (time, timezone)
461 461 - list of files
462 462 - commit message as a localstr
463 463 - dict of extra metadata
464 464
465 465 Unless you need to access all fields, consider calling
466 466 ``changelogrevision`` instead, as it is faster for partial object
467 467 access.
468 468 """
469 469 c = changelogrevision(self.revision(node))
470 470 return (
471 471 c.manifest,
472 472 c.user,
473 473 c.date,
474 474 c.files,
475 475 c.description,
476 476 c.extra
477 477 )
478 478
479 479 def changelogrevision(self, nodeorrev):
480 480 """Obtain a ``changelogrevision`` for a node or revision."""
481 481 return changelogrevision(self.revision(nodeorrev))
482 482
483 483 def readfiles(self, node):
484 484 """
485 485 short version of read that only returns the files modified by the cset
486 486 """
487 487 text = self.revision(node)
488 488 if not text:
489 489 return []
490 490 last = text.index("\n\n")
491 491 l = text[:last].split('\n')
492 492 return l[3:]
493 493
494 494 def add(self, manifest, files, desc, transaction, p1, p2,
495 495 user, date=None, extra=None):
496 496 # Convert to UTF-8 encoded bytestrings as the very first
497 497 # thing: calling any method on a localstr object will turn it
498 498 # into a str object and the cached UTF-8 string is thus lost.
499 499 user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
500 500
501 501 user = user.strip()
502 502 # An empty username or a username with a "\n" will make the
503 503 # revision text contain two "\n\n" sequences -> corrupt
504 504 # repository since read cannot unpack the revision.
505 505 if not user:
506 506 raise error.RevlogError(_("empty username"))
507 507 if "\n" in user:
508 508 raise error.RevlogError(_("username %s contains a newline")
509 509 % repr(user))
510 510
511 511 desc = stripdesc(desc)
512 512
513 513 if date:
514 514 parseddate = "%d %d" % util.parsedate(date)
515 515 else:
516 516 parseddate = "%d %d" % util.makedate()
517 517 if extra:
518 518 branch = extra.get("branch")
519 519 if branch in ("default", ""):
520 520 del extra["branch"]
521 521 elif branch in (".", "null", "tip"):
522 522 raise error.RevlogError(_('the name \'%s\' is reserved')
523 523 % branch)
524 524 if extra:
525 525 extra = encodeextra(extra)
526 526 parseddate = "%s %s" % (parseddate, extra)
527 527 l = [hex(manifest), user, parseddate] + sorted(files) + ["", desc]
528 528 text = "\n".join(l)
529 529 return self.addrevision(text, transaction, len(self), p1, p2)
530 530
531 531 def branchinfo(self, rev):
532 532 """return the branch name and open/close state of a revision
533 533
534 534 This function exists because creating a changectx object
535 535 just to access this is costly."""
536 536 extra = self.read(rev)[5]
537 537 return encoding.tolocal(extra.get("branch")), 'close' in extra
@@ -1,3486 +1,3486 b''
1 1 # cmdutil.py - help for command processing in mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import itertools
12 12 import os
13 13 import re
14 14 import tempfile
15 15
16 16 from .i18n import _
17 17 from .node import (
18 18 bin,
19 19 hex,
20 20 nullid,
21 21 nullrev,
22 22 short,
23 23 )
24 24
25 25 from . import (
26 26 bookmarks,
27 27 changelog,
28 28 copies,
29 29 crecord as crecordmod,
30 30 encoding,
31 31 error,
32 32 formatter,
33 33 graphmod,
34 34 lock as lockmod,
35 35 match as matchmod,
36 36 obsolete,
37 37 patch,
38 38 pathutil,
39 39 phases,
40 40 pycompat,
41 41 repair,
42 42 revlog,
43 43 revset,
44 44 scmutil,
45 45 smartset,
46 46 templatekw,
47 47 templater,
48 48 util,
49 49 vfs as vfsmod,
50 50 )
51 51 stringio = util.stringio
52 52
53 53 # special string such that everything below this line will be ingored in the
54 54 # editor text
55 55 _linebelow = "^HG: ------------------------ >8 ------------------------$"
56 56
57 57 def ishunk(x):
58 58 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
59 59 return isinstance(x, hunkclasses)
60 60
61 61 def newandmodified(chunks, originalchunks):
62 62 newlyaddedandmodifiedfiles = set()
63 63 for chunk in chunks:
64 64 if ishunk(chunk) and chunk.header.isnewfile() and chunk not in \
65 65 originalchunks:
66 66 newlyaddedandmodifiedfiles.add(chunk.header.filename())
67 67 return newlyaddedandmodifiedfiles
68 68
69 69 def parsealiases(cmd):
70 70 return cmd.lstrip("^").split("|")
71 71
72 72 def setupwrapcolorwrite(ui):
73 73 # wrap ui.write so diff output can be labeled/colorized
74 74 def wrapwrite(orig, *args, **kw):
75 75 label = kw.pop('label', '')
76 76 for chunk, l in patch.difflabel(lambda: args):
77 77 orig(chunk, label=label + l)
78 78
79 79 oldwrite = ui.write
80 80 def wrap(*args, **kwargs):
81 81 return wrapwrite(oldwrite, *args, **kwargs)
82 82 setattr(ui, 'write', wrap)
83 83 return oldwrite
84 84
85 85 def filterchunks(ui, originalhunks, usecurses, testfile, operation=None):
86 86 if usecurses:
87 87 if testfile:
88 88 recordfn = crecordmod.testdecorator(testfile,
89 89 crecordmod.testchunkselector)
90 90 else:
91 91 recordfn = crecordmod.chunkselector
92 92
93 93 return crecordmod.filterpatch(ui, originalhunks, recordfn, operation)
94 94
95 95 else:
96 96 return patch.filterpatch(ui, originalhunks, operation)
97 97
98 98 def recordfilter(ui, originalhunks, operation=None):
99 99 """ Prompts the user to filter the originalhunks and return a list of
100 100 selected hunks.
101 101 *operation* is used for to build ui messages to indicate the user what
102 102 kind of filtering they are doing: reverting, committing, shelving, etc.
103 103 (see patch.filterpatch).
104 104 """
105 105 usecurses = crecordmod.checkcurses(ui)
106 106 testfile = ui.config('experimental', 'crecordtest', None)
107 107 oldwrite = setupwrapcolorwrite(ui)
108 108 try:
109 109 newchunks, newopts = filterchunks(ui, originalhunks, usecurses,
110 110 testfile, operation)
111 111 finally:
112 112 ui.write = oldwrite
113 113 return newchunks, newopts
114 114
115 115 def dorecord(ui, repo, commitfunc, cmdsuggest, backupall,
116 116 filterfn, *pats, **opts):
117 117 from . import merge as mergemod
118 118 opts = pycompat.byteskwargs(opts)
119 119 if not ui.interactive():
120 120 if cmdsuggest:
121 121 msg = _('running non-interactively, use %s instead') % cmdsuggest
122 122 else:
123 123 msg = _('running non-interactively')
124 124 raise error.Abort(msg)
125 125
126 126 # make sure username is set before going interactive
127 127 if not opts.get('user'):
128 128 ui.username() # raise exception, username not provided
129 129
130 130 def recordfunc(ui, repo, message, match, opts):
131 131 """This is generic record driver.
132 132
133 133 Its job is to interactively filter local changes, and
134 134 accordingly prepare working directory into a state in which the
135 135 job can be delegated to a non-interactive commit command such as
136 136 'commit' or 'qrefresh'.
137 137
138 138 After the actual job is done by non-interactive command, the
139 139 working directory is restored to its original state.
140 140
141 141 In the end we'll record interesting changes, and everything else
142 142 will be left in place, so the user can continue working.
143 143 """
144 144
145 145 checkunfinished(repo, commit=True)
146 146 wctx = repo[None]
147 147 merge = len(wctx.parents()) > 1
148 148 if merge:
149 149 raise error.Abort(_('cannot partially commit a merge '
150 150 '(use "hg commit" instead)'))
151 151
152 152 def fail(f, msg):
153 153 raise error.Abort('%s: %s' % (f, msg))
154 154
155 155 force = opts.get('force')
156 156 if not force:
157 157 vdirs = []
158 158 match.explicitdir = vdirs.append
159 159 match.bad = fail
160 160
161 161 status = repo.status(match=match)
162 162 if not force:
163 163 repo.checkcommitpatterns(wctx, vdirs, match, status, fail)
164 164 diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True)
165 165 diffopts.nodates = True
166 166 diffopts.git = True
167 167 diffopts.showfunc = True
168 168 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
169 169 originalchunks = patch.parsepatch(originaldiff)
170 170
171 171 # 1. filter patch, since we are intending to apply subset of it
172 172 try:
173 173 chunks, newopts = filterfn(ui, originalchunks)
174 174 except patch.PatchError as err:
175 175 raise error.Abort(_('error parsing patch: %s') % err)
176 176 opts.update(newopts)
177 177
178 178 # We need to keep a backup of files that have been newly added and
179 179 # modified during the recording process because there is a previous
180 180 # version without the edit in the workdir
181 181 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
182 182 contenders = set()
183 183 for h in chunks:
184 184 try:
185 185 contenders.update(set(h.files()))
186 186 except AttributeError:
187 187 pass
188 188
189 189 changed = status.modified + status.added + status.removed
190 190 newfiles = [f for f in changed if f in contenders]
191 191 if not newfiles:
192 192 ui.status(_('no changes to record\n'))
193 193 return 0
194 194
195 195 modified = set(status.modified)
196 196
197 197 # 2. backup changed files, so we can restore them in the end
198 198
199 199 if backupall:
200 200 tobackup = changed
201 201 else:
202 202 tobackup = [f for f in newfiles if f in modified or f in \
203 203 newlyaddedandmodifiedfiles]
204 204 backups = {}
205 205 if tobackup:
206 206 backupdir = repo.vfs.join('record-backups')
207 207 try:
208 208 os.mkdir(backupdir)
209 209 except OSError as err:
210 210 if err.errno != errno.EEXIST:
211 211 raise
212 212 try:
213 213 # backup continues
214 214 for f in tobackup:
215 215 fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
216 216 dir=backupdir)
217 217 os.close(fd)
218 218 ui.debug('backup %r as %r\n' % (f, tmpname))
219 219 util.copyfile(repo.wjoin(f), tmpname, copystat=True)
220 220 backups[f] = tmpname
221 221
222 222 fp = stringio()
223 223 for c in chunks:
224 224 fname = c.filename()
225 225 if fname in backups:
226 226 c.write(fp)
227 227 dopatch = fp.tell()
228 228 fp.seek(0)
229 229
230 230 # 2.5 optionally review / modify patch in text editor
231 231 if opts.get('review', False):
232 232 patchtext = (crecordmod.diffhelptext
233 233 + crecordmod.patchhelptext
234 234 + fp.read())
235 235 reviewedpatch = ui.edit(patchtext, "",
236 236 extra={"suffix": ".diff"},
237 237 repopath=repo.path)
238 238 fp.truncate(0)
239 239 fp.write(reviewedpatch)
240 240 fp.seek(0)
241 241
242 242 [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
243 243 # 3a. apply filtered patch to clean repo (clean)
244 244 if backups:
245 245 # Equivalent to hg.revert
246 246 m = scmutil.matchfiles(repo, backups.keys())
247 247 mergemod.update(repo, repo.dirstate.p1(),
248 248 False, True, matcher=m)
249 249
250 250 # 3b. (apply)
251 251 if dopatch:
252 252 try:
253 253 ui.debug('applying patch\n')
254 254 ui.debug(fp.getvalue())
255 255 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
256 256 except patch.PatchError as err:
257 257 raise error.Abort(str(err))
258 258 del fp
259 259
260 260 # 4. We prepared working directory according to filtered
261 261 # patch. Now is the time to delegate the job to
262 262 # commit/qrefresh or the like!
263 263
264 264 # Make all of the pathnames absolute.
265 265 newfiles = [repo.wjoin(nf) for nf in newfiles]
266 266 return commitfunc(ui, repo, *newfiles, **opts)
267 267 finally:
268 268 # 5. finally restore backed-up files
269 269 try:
270 270 dirstate = repo.dirstate
271 271 for realname, tmpname in backups.iteritems():
272 272 ui.debug('restoring %r to %r\n' % (tmpname, realname))
273 273
274 274 if dirstate[realname] == 'n':
275 275 # without normallookup, restoring timestamp
276 276 # may cause partially committed files
277 277 # to be treated as unmodified
278 278 dirstate.normallookup(realname)
279 279
280 280 # copystat=True here and above are a hack to trick any
281 281 # editors that have f open that we haven't modified them.
282 282 #
283 283 # Also note that this racy as an editor could notice the
284 284 # file's mtime before we've finished writing it.
285 285 util.copyfile(tmpname, repo.wjoin(realname), copystat=True)
286 286 os.unlink(tmpname)
287 287 if tobackup:
288 288 os.rmdir(backupdir)
289 289 except OSError:
290 290 pass
291 291
292 292 def recordinwlock(ui, repo, message, match, opts):
293 293 with repo.wlock():
294 294 return recordfunc(ui, repo, message, match, opts)
295 295
296 296 return commit(ui, repo, recordinwlock, pats, opts)
297 297
298 298 def findpossible(cmd, table, strict=False):
299 299 """
300 300 Return cmd -> (aliases, command table entry)
301 301 for each matching command.
302 302 Return debug commands (or their aliases) only if no normal command matches.
303 303 """
304 304 choice = {}
305 305 debugchoice = {}
306 306
307 307 if cmd in table:
308 308 # short-circuit exact matches, "log" alias beats "^log|history"
309 309 keys = [cmd]
310 310 else:
311 311 keys = table.keys()
312 312
313 313 allcmds = []
314 314 for e in keys:
315 315 aliases = parsealiases(e)
316 316 allcmds.extend(aliases)
317 317 found = None
318 318 if cmd in aliases:
319 319 found = cmd
320 320 elif not strict:
321 321 for a in aliases:
322 322 if a.startswith(cmd):
323 323 found = a
324 324 break
325 325 if found is not None:
326 326 if aliases[0].startswith("debug") or found.startswith("debug"):
327 327 debugchoice[found] = (aliases, table[e])
328 328 else:
329 329 choice[found] = (aliases, table[e])
330 330
331 331 if not choice and debugchoice:
332 332 choice = debugchoice
333 333
334 334 return choice, allcmds
335 335
336 336 def findcmd(cmd, table, strict=True):
337 337 """Return (aliases, command table entry) for command string."""
338 338 choice, allcmds = findpossible(cmd, table, strict)
339 339
340 340 if cmd in choice:
341 341 return choice[cmd]
342 342
343 343 if len(choice) > 1:
344 344 clist = choice.keys()
345 345 clist.sort()
346 346 raise error.AmbiguousCommand(cmd, clist)
347 347
348 348 if choice:
349 349 return choice.values()[0]
350 350
351 351 raise error.UnknownCommand(cmd, allcmds)
352 352
353 353 def findrepo(p):
354 354 while not os.path.isdir(os.path.join(p, ".hg")):
355 355 oldp, p = p, os.path.dirname(p)
356 356 if p == oldp:
357 357 return None
358 358
359 359 return p
360 360
361 361 def bailifchanged(repo, merge=True, hint=None):
362 362 """ enforce the precondition that working directory must be clean.
363 363
364 364 'merge' can be set to false if a pending uncommitted merge should be
365 365 ignored (such as when 'update --check' runs).
366 366
367 367 'hint' is the usual hint given to Abort exception.
368 368 """
369 369
370 370 if merge and repo.dirstate.p2() != nullid:
371 371 raise error.Abort(_('outstanding uncommitted merge'), hint=hint)
372 372 modified, added, removed, deleted = repo.status()[:4]
373 373 if modified or added or removed or deleted:
374 374 raise error.Abort(_('uncommitted changes'), hint=hint)
375 375 ctx = repo[None]
376 376 for s in sorted(ctx.substate):
377 377 ctx.sub(s).bailifchanged(hint=hint)
378 378
379 379 def logmessage(ui, opts):
380 380 """ get the log message according to -m and -l option """
381 381 message = opts.get('message')
382 382 logfile = opts.get('logfile')
383 383
384 384 if message and logfile:
385 385 raise error.Abort(_('options --message and --logfile are mutually '
386 386 'exclusive'))
387 387 if not message and logfile:
388 388 try:
389 389 if logfile == '-':
390 390 message = ui.fin.read()
391 391 else:
392 392 message = '\n'.join(util.readfile(logfile).splitlines())
393 393 except IOError as inst:
394 394 raise error.Abort(_("can't read commit message '%s': %s") %
395 395 (logfile, inst.strerror))
396 396 return message
397 397
398 398 def mergeeditform(ctxorbool, baseformname):
399 399 """return appropriate editform name (referencing a committemplate)
400 400
401 401 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
402 402 merging is committed.
403 403
404 404 This returns baseformname with '.merge' appended if it is a merge,
405 405 otherwise '.normal' is appended.
406 406 """
407 407 if isinstance(ctxorbool, bool):
408 408 if ctxorbool:
409 409 return baseformname + ".merge"
410 410 elif 1 < len(ctxorbool.parents()):
411 411 return baseformname + ".merge"
412 412
413 413 return baseformname + ".normal"
414 414
415 415 def getcommiteditor(edit=False, finishdesc=None, extramsg=None,
416 416 editform='', **opts):
417 417 """get appropriate commit message editor according to '--edit' option
418 418
419 419 'finishdesc' is a function to be called with edited commit message
420 420 (= 'description' of the new changeset) just after editing, but
421 421 before checking empty-ness. It should return actual text to be
422 422 stored into history. This allows to change description before
423 423 storing.
424 424
425 425 'extramsg' is a extra message to be shown in the editor instead of
426 426 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
427 427 is automatically added.
428 428
429 429 'editform' is a dot-separated list of names, to distinguish
430 430 the purpose of commit text editing.
431 431
432 432 'getcommiteditor' returns 'commitforceeditor' regardless of
433 433 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
434 434 they are specific for usage in MQ.
435 435 """
436 436 if edit or finishdesc or extramsg:
437 437 return lambda r, c, s: commitforceeditor(r, c, s,
438 438 finishdesc=finishdesc,
439 439 extramsg=extramsg,
440 440 editform=editform)
441 441 elif editform:
442 442 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
443 443 else:
444 444 return commiteditor
445 445
446 446 def loglimit(opts):
447 447 """get the log limit according to option -l/--limit"""
448 448 limit = opts.get('limit')
449 449 if limit:
450 450 try:
451 451 limit = int(limit)
452 452 except ValueError:
453 453 raise error.Abort(_('limit must be a positive integer'))
454 454 if limit <= 0:
455 455 raise error.Abort(_('limit must be positive'))
456 456 else:
457 457 limit = None
458 458 return limit
459 459
460 460 def makefilename(repo, pat, node, desc=None,
461 461 total=None, seqno=None, revwidth=None, pathname=None):
462 462 node_expander = {
463 463 'H': lambda: hex(node),
464 464 'R': lambda: str(repo.changelog.rev(node)),
465 465 'h': lambda: short(node),
466 466 'm': lambda: re.sub('[^\w]', '_', str(desc))
467 467 }
468 468 expander = {
469 469 '%': lambda: '%',
470 470 'b': lambda: os.path.basename(repo.root),
471 471 }
472 472
473 473 try:
474 474 if node:
475 475 expander.update(node_expander)
476 476 if node:
477 477 expander['r'] = (lambda:
478 478 str(repo.changelog.rev(node)).zfill(revwidth or 0))
479 479 if total is not None:
480 480 expander['N'] = lambda: str(total)
481 481 if seqno is not None:
482 482 expander['n'] = lambda: str(seqno)
483 483 if total is not None and seqno is not None:
484 484 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
485 485 if pathname is not None:
486 486 expander['s'] = lambda: os.path.basename(pathname)
487 487 expander['d'] = lambda: os.path.dirname(pathname) or '.'
488 488 expander['p'] = lambda: pathname
489 489
490 490 newname = []
491 491 patlen = len(pat)
492 492 i = 0
493 493 while i < patlen:
494 c = pat[i]
494 c = pat[i:i + 1]
495 495 if c == '%':
496 496 i += 1
497 c = pat[i]
497 c = pat[i:i + 1]
498 498 c = expander[c]()
499 499 newname.append(c)
500 500 i += 1
501 501 return ''.join(newname)
502 502 except KeyError as inst:
503 503 raise error.Abort(_("invalid format spec '%%%s' in output filename") %
504 504 inst.args[0])
505 505
506 506 class _unclosablefile(object):
507 507 def __init__(self, fp):
508 508 self._fp = fp
509 509
510 510 def close(self):
511 511 pass
512 512
513 513 def __iter__(self):
514 514 return iter(self._fp)
515 515
516 516 def __getattr__(self, attr):
517 517 return getattr(self._fp, attr)
518 518
519 519 def __enter__(self):
520 520 return self
521 521
522 522 def __exit__(self, exc_type, exc_value, exc_tb):
523 523 pass
524 524
525 525 def makefileobj(repo, pat, node=None, desc=None, total=None,
526 526 seqno=None, revwidth=None, mode='wb', modemap=None,
527 527 pathname=None):
528 528
529 529 writable = mode not in ('r', 'rb')
530 530
531 531 if not pat or pat == '-':
532 532 if writable:
533 533 fp = repo.ui.fout
534 534 else:
535 535 fp = repo.ui.fin
536 536 return _unclosablefile(fp)
537 537 if util.safehasattr(pat, 'write') and writable:
538 538 return pat
539 539 if util.safehasattr(pat, 'read') and 'r' in mode:
540 540 return pat
541 541 fn = makefilename(repo, pat, node, desc, total, seqno, revwidth, pathname)
542 542 if modemap is not None:
543 543 mode = modemap.get(fn, mode)
544 544 if mode == 'wb':
545 545 modemap[fn] = 'ab'
546 546 return open(fn, mode)
547 547
548 548 def openrevlog(repo, cmd, file_, opts):
549 549 """opens the changelog, manifest, a filelog or a given revlog"""
550 550 cl = opts['changelog']
551 551 mf = opts['manifest']
552 552 dir = opts['dir']
553 553 msg = None
554 554 if cl and mf:
555 555 msg = _('cannot specify --changelog and --manifest at the same time')
556 556 elif cl and dir:
557 557 msg = _('cannot specify --changelog and --dir at the same time')
558 558 elif cl or mf or dir:
559 559 if file_:
560 560 msg = _('cannot specify filename with --changelog or --manifest')
561 561 elif not repo:
562 562 msg = _('cannot specify --changelog or --manifest or --dir '
563 563 'without a repository')
564 564 if msg:
565 565 raise error.Abort(msg)
566 566
567 567 r = None
568 568 if repo:
569 569 if cl:
570 570 r = repo.unfiltered().changelog
571 571 elif dir:
572 572 if 'treemanifest' not in repo.requirements:
573 573 raise error.Abort(_("--dir can only be used on repos with "
574 574 "treemanifest enabled"))
575 575 dirlog = repo.manifestlog._revlog.dirlog(dir)
576 576 if len(dirlog):
577 577 r = dirlog
578 578 elif mf:
579 579 r = repo.manifestlog._revlog
580 580 elif file_:
581 581 filelog = repo.file(file_)
582 582 if len(filelog):
583 583 r = filelog
584 584 if not r:
585 585 if not file_:
586 586 raise error.CommandError(cmd, _('invalid arguments'))
587 587 if not os.path.isfile(file_):
588 588 raise error.Abort(_("revlog '%s' not found") % file_)
589 589 r = revlog.revlog(vfsmod.vfs(pycompat.getcwd(), audit=False),
590 590 file_[:-2] + ".i")
591 591 return r
592 592
593 593 def copy(ui, repo, pats, opts, rename=False):
594 594 # called with the repo lock held
595 595 #
596 596 # hgsep => pathname that uses "/" to separate directories
597 597 # ossep => pathname that uses os.sep to separate directories
598 598 cwd = repo.getcwd()
599 599 targets = {}
600 600 after = opts.get("after")
601 601 dryrun = opts.get("dry_run")
602 602 wctx = repo[None]
603 603
604 604 def walkpat(pat):
605 605 srcs = []
606 606 if after:
607 607 badstates = '?'
608 608 else:
609 609 badstates = '?r'
610 610 m = scmutil.match(repo[None], [pat], opts, globbed=True)
611 611 for abs in repo.walk(m):
612 612 state = repo.dirstate[abs]
613 613 rel = m.rel(abs)
614 614 exact = m.exact(abs)
615 615 if state in badstates:
616 616 if exact and state == '?':
617 617 ui.warn(_('%s: not copying - file is not managed\n') % rel)
618 618 if exact and state == 'r':
619 619 ui.warn(_('%s: not copying - file has been marked for'
620 620 ' remove\n') % rel)
621 621 continue
622 622 # abs: hgsep
623 623 # rel: ossep
624 624 srcs.append((abs, rel, exact))
625 625 return srcs
626 626
627 627 # abssrc: hgsep
628 628 # relsrc: ossep
629 629 # otarget: ossep
630 630 def copyfile(abssrc, relsrc, otarget, exact):
631 631 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
632 632 if '/' in abstarget:
633 633 # We cannot normalize abstarget itself, this would prevent
634 634 # case only renames, like a => A.
635 635 abspath, absname = abstarget.rsplit('/', 1)
636 636 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
637 637 reltarget = repo.pathto(abstarget, cwd)
638 638 target = repo.wjoin(abstarget)
639 639 src = repo.wjoin(abssrc)
640 640 state = repo.dirstate[abstarget]
641 641
642 642 scmutil.checkportable(ui, abstarget)
643 643
644 644 # check for collisions
645 645 prevsrc = targets.get(abstarget)
646 646 if prevsrc is not None:
647 647 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
648 648 (reltarget, repo.pathto(abssrc, cwd),
649 649 repo.pathto(prevsrc, cwd)))
650 650 return
651 651
652 652 # check for overwrites
653 653 exists = os.path.lexists(target)
654 654 samefile = False
655 655 if exists and abssrc != abstarget:
656 656 if (repo.dirstate.normalize(abssrc) ==
657 657 repo.dirstate.normalize(abstarget)):
658 658 if not rename:
659 659 ui.warn(_("%s: can't copy - same file\n") % reltarget)
660 660 return
661 661 exists = False
662 662 samefile = True
663 663
664 664 if not after and exists or after and state in 'mn':
665 665 if not opts['force']:
666 666 if state in 'mn':
667 667 msg = _('%s: not overwriting - file already committed\n')
668 668 if after:
669 669 flags = '--after --force'
670 670 else:
671 671 flags = '--force'
672 672 if rename:
673 673 hint = _('(hg rename %s to replace the file by '
674 674 'recording a rename)\n') % flags
675 675 else:
676 676 hint = _('(hg copy %s to replace the file by '
677 677 'recording a copy)\n') % flags
678 678 else:
679 679 msg = _('%s: not overwriting - file exists\n')
680 680 if rename:
681 681 hint = _('(hg rename --after to record the rename)\n')
682 682 else:
683 683 hint = _('(hg copy --after to record the copy)\n')
684 684 ui.warn(msg % reltarget)
685 685 ui.warn(hint)
686 686 return
687 687
688 688 if after:
689 689 if not exists:
690 690 if rename:
691 691 ui.warn(_('%s: not recording move - %s does not exist\n') %
692 692 (relsrc, reltarget))
693 693 else:
694 694 ui.warn(_('%s: not recording copy - %s does not exist\n') %
695 695 (relsrc, reltarget))
696 696 return
697 697 elif not dryrun:
698 698 try:
699 699 if exists:
700 700 os.unlink(target)
701 701 targetdir = os.path.dirname(target) or '.'
702 702 if not os.path.isdir(targetdir):
703 703 os.makedirs(targetdir)
704 704 if samefile:
705 705 tmp = target + "~hgrename"
706 706 os.rename(src, tmp)
707 707 os.rename(tmp, target)
708 708 else:
709 709 util.copyfile(src, target)
710 710 srcexists = True
711 711 except IOError as inst:
712 712 if inst.errno == errno.ENOENT:
713 713 ui.warn(_('%s: deleted in working directory\n') % relsrc)
714 714 srcexists = False
715 715 else:
716 716 ui.warn(_('%s: cannot copy - %s\n') %
717 717 (relsrc, inst.strerror))
718 718 return True # report a failure
719 719
720 720 if ui.verbose or not exact:
721 721 if rename:
722 722 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
723 723 else:
724 724 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
725 725
726 726 targets[abstarget] = abssrc
727 727
728 728 # fix up dirstate
729 729 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
730 730 dryrun=dryrun, cwd=cwd)
731 731 if rename and not dryrun:
732 732 if not after and srcexists and not samefile:
733 733 repo.wvfs.unlinkpath(abssrc)
734 734 wctx.forget([abssrc])
735 735
736 736 # pat: ossep
737 737 # dest ossep
738 738 # srcs: list of (hgsep, hgsep, ossep, bool)
739 739 # return: function that takes hgsep and returns ossep
740 740 def targetpathfn(pat, dest, srcs):
741 741 if os.path.isdir(pat):
742 742 abspfx = pathutil.canonpath(repo.root, cwd, pat)
743 743 abspfx = util.localpath(abspfx)
744 744 if destdirexists:
745 745 striplen = len(os.path.split(abspfx)[0])
746 746 else:
747 747 striplen = len(abspfx)
748 748 if striplen:
749 749 striplen += len(pycompat.ossep)
750 750 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
751 751 elif destdirexists:
752 752 res = lambda p: os.path.join(dest,
753 753 os.path.basename(util.localpath(p)))
754 754 else:
755 755 res = lambda p: dest
756 756 return res
757 757
758 758 # pat: ossep
759 759 # dest ossep
760 760 # srcs: list of (hgsep, hgsep, ossep, bool)
761 761 # return: function that takes hgsep and returns ossep
762 762 def targetpathafterfn(pat, dest, srcs):
763 763 if matchmod.patkind(pat):
764 764 # a mercurial pattern
765 765 res = lambda p: os.path.join(dest,
766 766 os.path.basename(util.localpath(p)))
767 767 else:
768 768 abspfx = pathutil.canonpath(repo.root, cwd, pat)
769 769 if len(abspfx) < len(srcs[0][0]):
770 770 # A directory. Either the target path contains the last
771 771 # component of the source path or it does not.
772 772 def evalpath(striplen):
773 773 score = 0
774 774 for s in srcs:
775 775 t = os.path.join(dest, util.localpath(s[0])[striplen:])
776 776 if os.path.lexists(t):
777 777 score += 1
778 778 return score
779 779
780 780 abspfx = util.localpath(abspfx)
781 781 striplen = len(abspfx)
782 782 if striplen:
783 783 striplen += len(pycompat.ossep)
784 784 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
785 785 score = evalpath(striplen)
786 786 striplen1 = len(os.path.split(abspfx)[0])
787 787 if striplen1:
788 788 striplen1 += len(pycompat.ossep)
789 789 if evalpath(striplen1) > score:
790 790 striplen = striplen1
791 791 res = lambda p: os.path.join(dest,
792 792 util.localpath(p)[striplen:])
793 793 else:
794 794 # a file
795 795 if destdirexists:
796 796 res = lambda p: os.path.join(dest,
797 797 os.path.basename(util.localpath(p)))
798 798 else:
799 799 res = lambda p: dest
800 800 return res
801 801
802 802 pats = scmutil.expandpats(pats)
803 803 if not pats:
804 804 raise error.Abort(_('no source or destination specified'))
805 805 if len(pats) == 1:
806 806 raise error.Abort(_('no destination specified'))
807 807 dest = pats.pop()
808 808 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
809 809 if not destdirexists:
810 810 if len(pats) > 1 or matchmod.patkind(pats[0]):
811 811 raise error.Abort(_('with multiple sources, destination must be an '
812 812 'existing directory'))
813 813 if util.endswithsep(dest):
814 814 raise error.Abort(_('destination %s is not a directory') % dest)
815 815
816 816 tfn = targetpathfn
817 817 if after:
818 818 tfn = targetpathafterfn
819 819 copylist = []
820 820 for pat in pats:
821 821 srcs = walkpat(pat)
822 822 if not srcs:
823 823 continue
824 824 copylist.append((tfn(pat, dest, srcs), srcs))
825 825 if not copylist:
826 826 raise error.Abort(_('no files to copy'))
827 827
828 828 errors = 0
829 829 for targetpath, srcs in copylist:
830 830 for abssrc, relsrc, exact in srcs:
831 831 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
832 832 errors += 1
833 833
834 834 if errors:
835 835 ui.warn(_('(consider using --after)\n'))
836 836
837 837 return errors != 0
838 838
839 839 ## facility to let extension process additional data into an import patch
840 840 # list of identifier to be executed in order
841 841 extrapreimport = [] # run before commit
842 842 extrapostimport = [] # run after commit
843 843 # mapping from identifier to actual import function
844 844 #
845 845 # 'preimport' are run before the commit is made and are provided the following
846 846 # arguments:
847 847 # - repo: the localrepository instance,
848 848 # - patchdata: data extracted from patch header (cf m.patch.patchheadermap),
849 849 # - extra: the future extra dictionary of the changeset, please mutate it,
850 850 # - opts: the import options.
851 851 # XXX ideally, we would just pass an ctx ready to be computed, that would allow
852 852 # mutation of in memory commit and more. Feel free to rework the code to get
853 853 # there.
854 854 extrapreimportmap = {}
855 855 # 'postimport' are run after the commit is made and are provided the following
856 856 # argument:
857 857 # - ctx: the changectx created by import.
858 858 extrapostimportmap = {}
859 859
860 860 def tryimportone(ui, repo, hunk, parents, opts, msgs, updatefunc):
861 861 """Utility function used by commands.import to import a single patch
862 862
863 863 This function is explicitly defined here to help the evolve extension to
864 864 wrap this part of the import logic.
865 865
866 866 The API is currently a bit ugly because it a simple code translation from
867 867 the import command. Feel free to make it better.
868 868
869 869 :hunk: a patch (as a binary string)
870 870 :parents: nodes that will be parent of the created commit
871 871 :opts: the full dict of option passed to the import command
872 872 :msgs: list to save commit message to.
873 873 (used in case we need to save it when failing)
874 874 :updatefunc: a function that update a repo to a given node
875 875 updatefunc(<repo>, <node>)
876 876 """
877 877 # avoid cycle context -> subrepo -> cmdutil
878 878 from . import context
879 879 extractdata = patch.extract(ui, hunk)
880 880 tmpname = extractdata.get('filename')
881 881 message = extractdata.get('message')
882 882 user = opts.get('user') or extractdata.get('user')
883 883 date = opts.get('date') or extractdata.get('date')
884 884 branch = extractdata.get('branch')
885 885 nodeid = extractdata.get('nodeid')
886 886 p1 = extractdata.get('p1')
887 887 p2 = extractdata.get('p2')
888 888
889 889 nocommit = opts.get('no_commit')
890 890 importbranch = opts.get('import_branch')
891 891 update = not opts.get('bypass')
892 892 strip = opts["strip"]
893 893 prefix = opts["prefix"]
894 894 sim = float(opts.get('similarity') or 0)
895 895 if not tmpname:
896 896 return (None, None, False)
897 897
898 898 rejects = False
899 899
900 900 try:
901 901 cmdline_message = logmessage(ui, opts)
902 902 if cmdline_message:
903 903 # pickup the cmdline msg
904 904 message = cmdline_message
905 905 elif message:
906 906 # pickup the patch msg
907 907 message = message.strip()
908 908 else:
909 909 # launch the editor
910 910 message = None
911 911 ui.debug('message:\n%s\n' % message)
912 912
913 913 if len(parents) == 1:
914 914 parents.append(repo[nullid])
915 915 if opts.get('exact'):
916 916 if not nodeid or not p1:
917 917 raise error.Abort(_('not a Mercurial patch'))
918 918 p1 = repo[p1]
919 919 p2 = repo[p2 or nullid]
920 920 elif p2:
921 921 try:
922 922 p1 = repo[p1]
923 923 p2 = repo[p2]
924 924 # Without any options, consider p2 only if the
925 925 # patch is being applied on top of the recorded
926 926 # first parent.
927 927 if p1 != parents[0]:
928 928 p1 = parents[0]
929 929 p2 = repo[nullid]
930 930 except error.RepoError:
931 931 p1, p2 = parents
932 932 if p2.node() == nullid:
933 933 ui.warn(_("warning: import the patch as a normal revision\n"
934 934 "(use --exact to import the patch as a merge)\n"))
935 935 else:
936 936 p1, p2 = parents
937 937
938 938 n = None
939 939 if update:
940 940 if p1 != parents[0]:
941 941 updatefunc(repo, p1.node())
942 942 if p2 != parents[1]:
943 943 repo.setparents(p1.node(), p2.node())
944 944
945 945 if opts.get('exact') or importbranch:
946 946 repo.dirstate.setbranch(branch or 'default')
947 947
948 948 partial = opts.get('partial', False)
949 949 files = set()
950 950 try:
951 951 patch.patch(ui, repo, tmpname, strip=strip, prefix=prefix,
952 952 files=files, eolmode=None, similarity=sim / 100.0)
953 953 except patch.PatchError as e:
954 954 if not partial:
955 955 raise error.Abort(str(e))
956 956 if partial:
957 957 rejects = True
958 958
959 959 files = list(files)
960 960 if nocommit:
961 961 if message:
962 962 msgs.append(message)
963 963 else:
964 964 if opts.get('exact') or p2:
965 965 # If you got here, you either use --force and know what
966 966 # you are doing or used --exact or a merge patch while
967 967 # being updated to its first parent.
968 968 m = None
969 969 else:
970 970 m = scmutil.matchfiles(repo, files or [])
971 971 editform = mergeeditform(repo[None], 'import.normal')
972 972 if opts.get('exact'):
973 973 editor = None
974 974 else:
975 975 editor = getcommiteditor(editform=editform, **opts)
976 976 extra = {}
977 977 for idfunc in extrapreimport:
978 978 extrapreimportmap[idfunc](repo, extractdata, extra, opts)
979 979 overrides = {}
980 980 if partial:
981 981 overrides[('ui', 'allowemptycommit')] = True
982 982 with repo.ui.configoverride(overrides, 'import'):
983 983 n = repo.commit(message, user,
984 984 date, match=m,
985 985 editor=editor, extra=extra)
986 986 for idfunc in extrapostimport:
987 987 extrapostimportmap[idfunc](repo[n])
988 988 else:
989 989 if opts.get('exact') or importbranch:
990 990 branch = branch or 'default'
991 991 else:
992 992 branch = p1.branch()
993 993 store = patch.filestore()
994 994 try:
995 995 files = set()
996 996 try:
997 997 patch.patchrepo(ui, repo, p1, store, tmpname, strip, prefix,
998 998 files, eolmode=None)
999 999 except patch.PatchError as e:
1000 1000 raise error.Abort(str(e))
1001 1001 if opts.get('exact'):
1002 1002 editor = None
1003 1003 else:
1004 1004 editor = getcommiteditor(editform='import.bypass')
1005 1005 memctx = context.makememctx(repo, (p1.node(), p2.node()),
1006 1006 message,
1007 1007 user,
1008 1008 date,
1009 1009 branch, files, store,
1010 1010 editor=editor)
1011 1011 n = memctx.commit()
1012 1012 finally:
1013 1013 store.close()
1014 1014 if opts.get('exact') and nocommit:
1015 1015 # --exact with --no-commit is still useful in that it does merge
1016 1016 # and branch bits
1017 1017 ui.warn(_("warning: can't check exact import with --no-commit\n"))
1018 1018 elif opts.get('exact') and hex(n) != nodeid:
1019 1019 raise error.Abort(_('patch is damaged or loses information'))
1020 1020 msg = _('applied to working directory')
1021 1021 if n:
1022 1022 # i18n: refers to a short changeset id
1023 1023 msg = _('created %s') % short(n)
1024 1024 return (msg, n, rejects)
1025 1025 finally:
1026 1026 os.unlink(tmpname)
1027 1027
1028 1028 # facility to let extensions include additional data in an exported patch
1029 1029 # list of identifiers to be executed in order
1030 1030 extraexport = []
1031 1031 # mapping from identifier to actual export function
1032 1032 # function as to return a string to be added to the header or None
1033 1033 # it is given two arguments (sequencenumber, changectx)
1034 1034 extraexportmap = {}
1035 1035
1036 1036 def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
1037 1037 opts=None, match=None):
1038 1038 '''export changesets as hg patches.'''
1039 1039
1040 1040 total = len(revs)
1041 1041 revwidth = max([len(str(rev)) for rev in revs])
1042 1042 filemode = {}
1043 1043
1044 1044 def single(rev, seqno, fp):
1045 1045 ctx = repo[rev]
1046 1046 node = ctx.node()
1047 1047 parents = [p.node() for p in ctx.parents() if p]
1048 1048 branch = ctx.branch()
1049 1049 if switch_parent:
1050 1050 parents.reverse()
1051 1051
1052 1052 if parents:
1053 1053 prev = parents[0]
1054 1054 else:
1055 1055 prev = nullid
1056 1056
1057 1057 shouldclose = False
1058 1058 if not fp and len(template) > 0:
1059 1059 desc_lines = ctx.description().rstrip().split('\n')
1060 1060 desc = desc_lines[0] #Commit always has a first line.
1061 1061 fp = makefileobj(repo, template, node, desc=desc, total=total,
1062 1062 seqno=seqno, revwidth=revwidth, mode='wb',
1063 1063 modemap=filemode)
1064 1064 shouldclose = True
1065 1065 if fp and not getattr(fp, 'name', '<unnamed>').startswith('<'):
1066 1066 repo.ui.note("%s\n" % fp.name)
1067 1067
1068 1068 if not fp:
1069 1069 write = repo.ui.write
1070 1070 else:
1071 1071 def write(s, **kw):
1072 1072 fp.write(s)
1073 1073
1074 1074 write("# HG changeset patch\n")
1075 1075 write("# User %s\n" % ctx.user())
1076 1076 write("# Date %d %d\n" % ctx.date())
1077 1077 write("# %s\n" % util.datestr(ctx.date()))
1078 1078 if branch and branch != 'default':
1079 1079 write("# Branch %s\n" % branch)
1080 1080 write("# Node ID %s\n" % hex(node))
1081 1081 write("# Parent %s\n" % hex(prev))
1082 1082 if len(parents) > 1:
1083 1083 write("# Parent %s\n" % hex(parents[1]))
1084 1084
1085 1085 for headerid in extraexport:
1086 1086 header = extraexportmap[headerid](seqno, ctx)
1087 1087 if header is not None:
1088 1088 write('# %s\n' % header)
1089 1089 write(ctx.description().rstrip())
1090 1090 write("\n\n")
1091 1091
1092 1092 for chunk, label in patch.diffui(repo, prev, node, match, opts=opts):
1093 1093 write(chunk, label=label)
1094 1094
1095 1095 if shouldclose:
1096 1096 fp.close()
1097 1097
1098 1098 for seqno, rev in enumerate(revs):
1099 1099 single(rev, seqno + 1, fp)
1100 1100
1101 1101 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
1102 1102 changes=None, stat=False, fp=None, prefix='',
1103 1103 root='', listsubrepos=False):
1104 1104 '''show diff or diffstat.'''
1105 1105 if fp is None:
1106 1106 write = ui.write
1107 1107 else:
1108 1108 def write(s, **kw):
1109 1109 fp.write(s)
1110 1110
1111 1111 if root:
1112 1112 relroot = pathutil.canonpath(repo.root, repo.getcwd(), root)
1113 1113 else:
1114 1114 relroot = ''
1115 1115 if relroot != '':
1116 1116 # XXX relative roots currently don't work if the root is within a
1117 1117 # subrepo
1118 1118 uirelroot = match.uipath(relroot)
1119 1119 relroot += '/'
1120 1120 for matchroot in match.files():
1121 1121 if not matchroot.startswith(relroot):
1122 1122 ui.warn(_('warning: %s not inside relative root %s\n') % (
1123 1123 match.uipath(matchroot), uirelroot))
1124 1124
1125 1125 if stat:
1126 1126 diffopts = diffopts.copy(context=0)
1127 1127 width = 80
1128 1128 if not ui.plain():
1129 1129 width = ui.termwidth()
1130 1130 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
1131 1131 prefix=prefix, relroot=relroot)
1132 1132 for chunk, label in patch.diffstatui(util.iterlines(chunks),
1133 1133 width=width):
1134 1134 write(chunk, label=label)
1135 1135 else:
1136 1136 for chunk, label in patch.diffui(repo, node1, node2, match,
1137 1137 changes, diffopts, prefix=prefix,
1138 1138 relroot=relroot):
1139 1139 write(chunk, label=label)
1140 1140
1141 1141 if listsubrepos:
1142 1142 ctx1 = repo[node1]
1143 1143 ctx2 = repo[node2]
1144 1144 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1145 1145 tempnode2 = node2
1146 1146 try:
1147 1147 if node2 is not None:
1148 1148 tempnode2 = ctx2.substate[subpath][1]
1149 1149 except KeyError:
1150 1150 # A subrepo that existed in node1 was deleted between node1 and
1151 1151 # node2 (inclusive). Thus, ctx2's substate won't contain that
1152 1152 # subpath. The best we can do is to ignore it.
1153 1153 tempnode2 = None
1154 1154 submatch = matchmod.subdirmatcher(subpath, match)
1155 1155 sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
1156 1156 stat=stat, fp=fp, prefix=prefix)
1157 1157
1158 1158 def _changesetlabels(ctx):
1159 1159 labels = ['log.changeset', 'changeset.%s' % ctx.phasestr()]
1160 1160 if ctx.obsolete():
1161 1161 labels.append('changeset.obsolete')
1162 1162 if ctx.troubled():
1163 1163 labels.append('changeset.troubled')
1164 1164 for trouble in ctx.troubles():
1165 1165 labels.append('trouble.%s' % trouble)
1166 1166 return ' '.join(labels)
1167 1167
1168 1168 class changeset_printer(object):
1169 1169 '''show changeset information when templating not requested.'''
1170 1170
1171 1171 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1172 1172 self.ui = ui
1173 1173 self.repo = repo
1174 1174 self.buffered = buffered
1175 1175 self.matchfn = matchfn
1176 1176 self.diffopts = diffopts
1177 1177 self.header = {}
1178 1178 self.hunk = {}
1179 1179 self.lastheader = None
1180 1180 self.footer = None
1181 1181
1182 1182 def flush(self, ctx):
1183 1183 rev = ctx.rev()
1184 1184 if rev in self.header:
1185 1185 h = self.header[rev]
1186 1186 if h != self.lastheader:
1187 1187 self.lastheader = h
1188 1188 self.ui.write(h)
1189 1189 del self.header[rev]
1190 1190 if rev in self.hunk:
1191 1191 self.ui.write(self.hunk[rev])
1192 1192 del self.hunk[rev]
1193 1193 return 1
1194 1194 return 0
1195 1195
1196 1196 def close(self):
1197 1197 if self.footer:
1198 1198 self.ui.write(self.footer)
1199 1199
1200 1200 def show(self, ctx, copies=None, matchfn=None, **props):
1201 1201 if self.buffered:
1202 1202 self.ui.pushbuffer(labeled=True)
1203 1203 self._show(ctx, copies, matchfn, props)
1204 1204 self.hunk[ctx.rev()] = self.ui.popbuffer()
1205 1205 else:
1206 1206 self._show(ctx, copies, matchfn, props)
1207 1207
1208 1208 def _show(self, ctx, copies, matchfn, props):
1209 1209 '''show a single changeset or file revision'''
1210 1210 changenode = ctx.node()
1211 1211 rev = ctx.rev()
1212 1212 if self.ui.debugflag:
1213 1213 hexfunc = hex
1214 1214 else:
1215 1215 hexfunc = short
1216 1216 # as of now, wctx.node() and wctx.rev() return None, but we want to
1217 1217 # show the same values as {node} and {rev} templatekw
1218 1218 revnode = (scmutil.intrev(rev), hexfunc(bin(ctx.hex())))
1219 1219
1220 1220 if self.ui.quiet:
1221 1221 self.ui.write("%d:%s\n" % revnode, label='log.node')
1222 1222 return
1223 1223
1224 1224 date = util.datestr(ctx.date())
1225 1225
1226 1226 # i18n: column positioning for "hg log"
1227 1227 self.ui.write(_("changeset: %d:%s\n") % revnode,
1228 1228 label=_changesetlabels(ctx))
1229 1229
1230 1230 # branches are shown first before any other names due to backwards
1231 1231 # compatibility
1232 1232 branch = ctx.branch()
1233 1233 # don't show the default branch name
1234 1234 if branch != 'default':
1235 1235 # i18n: column positioning for "hg log"
1236 1236 self.ui.write(_("branch: %s\n") % branch,
1237 1237 label='log.branch')
1238 1238
1239 1239 for nsname, ns in self.repo.names.iteritems():
1240 1240 # branches has special logic already handled above, so here we just
1241 1241 # skip it
1242 1242 if nsname == 'branches':
1243 1243 continue
1244 1244 # we will use the templatename as the color name since those two
1245 1245 # should be the same
1246 1246 for name in ns.names(self.repo, changenode):
1247 1247 self.ui.write(ns.logfmt % name,
1248 1248 label='log.%s' % ns.colorname)
1249 1249 if self.ui.debugflag:
1250 1250 # i18n: column positioning for "hg log"
1251 1251 self.ui.write(_("phase: %s\n") % ctx.phasestr(),
1252 1252 label='log.phase')
1253 1253 for pctx in scmutil.meaningfulparents(self.repo, ctx):
1254 1254 label = 'log.parent changeset.%s' % pctx.phasestr()
1255 1255 # i18n: column positioning for "hg log"
1256 1256 self.ui.write(_("parent: %d:%s\n")
1257 1257 % (pctx.rev(), hexfunc(pctx.node())),
1258 1258 label=label)
1259 1259
1260 1260 if self.ui.debugflag and rev is not None:
1261 1261 mnode = ctx.manifestnode()
1262 1262 # i18n: column positioning for "hg log"
1263 1263 self.ui.write(_("manifest: %d:%s\n") %
1264 1264 (self.repo.manifestlog._revlog.rev(mnode),
1265 1265 hex(mnode)),
1266 1266 label='ui.debug log.manifest')
1267 1267 # i18n: column positioning for "hg log"
1268 1268 self.ui.write(_("user: %s\n") % ctx.user(),
1269 1269 label='log.user')
1270 1270 # i18n: column positioning for "hg log"
1271 1271 self.ui.write(_("date: %s\n") % date,
1272 1272 label='log.date')
1273 1273
1274 1274 if ctx.troubled():
1275 1275 # i18n: column positioning for "hg log"
1276 1276 self.ui.write(_("trouble: %s\n") % ', '.join(ctx.troubles()),
1277 1277 label='log.trouble')
1278 1278
1279 1279 if self.ui.debugflag:
1280 1280 files = ctx.p1().status(ctx)[:3]
1281 1281 for key, value in zip([# i18n: column positioning for "hg log"
1282 1282 _("files:"),
1283 1283 # i18n: column positioning for "hg log"
1284 1284 _("files+:"),
1285 1285 # i18n: column positioning for "hg log"
1286 1286 _("files-:")], files):
1287 1287 if value:
1288 1288 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
1289 1289 label='ui.debug log.files')
1290 1290 elif ctx.files() and self.ui.verbose:
1291 1291 # i18n: column positioning for "hg log"
1292 1292 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
1293 1293 label='ui.note log.files')
1294 1294 if copies and self.ui.verbose:
1295 1295 copies = ['%s (%s)' % c for c in copies]
1296 1296 # i18n: column positioning for "hg log"
1297 1297 self.ui.write(_("copies: %s\n") % ' '.join(copies),
1298 1298 label='ui.note log.copies')
1299 1299
1300 1300 extra = ctx.extra()
1301 1301 if extra and self.ui.debugflag:
1302 1302 for key, value in sorted(extra.items()):
1303 1303 # i18n: column positioning for "hg log"
1304 1304 self.ui.write(_("extra: %s=%s\n")
1305 1305 % (key, util.escapestr(value)),
1306 1306 label='ui.debug log.extra')
1307 1307
1308 1308 description = ctx.description().strip()
1309 1309 if description:
1310 1310 if self.ui.verbose:
1311 1311 self.ui.write(_("description:\n"),
1312 1312 label='ui.note log.description')
1313 1313 self.ui.write(description,
1314 1314 label='ui.note log.description')
1315 1315 self.ui.write("\n\n")
1316 1316 else:
1317 1317 # i18n: column positioning for "hg log"
1318 1318 self.ui.write(_("summary: %s\n") %
1319 1319 description.splitlines()[0],
1320 1320 label='log.summary')
1321 1321 self.ui.write("\n")
1322 1322
1323 1323 self.showpatch(ctx, matchfn)
1324 1324
1325 1325 def showpatch(self, ctx, matchfn):
1326 1326 if not matchfn:
1327 1327 matchfn = self.matchfn
1328 1328 if matchfn:
1329 1329 stat = self.diffopts.get('stat')
1330 1330 diff = self.diffopts.get('patch')
1331 1331 diffopts = patch.diffallopts(self.ui, self.diffopts)
1332 1332 node = ctx.node()
1333 1333 prev = ctx.p1().node()
1334 1334 if stat:
1335 1335 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1336 1336 match=matchfn, stat=True)
1337 1337 if diff:
1338 1338 if stat:
1339 1339 self.ui.write("\n")
1340 1340 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1341 1341 match=matchfn, stat=False)
1342 1342 self.ui.write("\n")
1343 1343
1344 1344 class jsonchangeset(changeset_printer):
1345 1345 '''format changeset information.'''
1346 1346
1347 1347 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1348 1348 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1349 1349 self.cache = {}
1350 1350 self._first = True
1351 1351
1352 1352 def close(self):
1353 1353 if not self._first:
1354 1354 self.ui.write("\n]\n")
1355 1355 else:
1356 1356 self.ui.write("[]\n")
1357 1357
1358 1358 def _show(self, ctx, copies, matchfn, props):
1359 1359 '''show a single changeset or file revision'''
1360 1360 rev = ctx.rev()
1361 1361 if rev is None:
1362 1362 jrev = jnode = 'null'
1363 1363 else:
1364 1364 jrev = str(rev)
1365 1365 jnode = '"%s"' % hex(ctx.node())
1366 1366 j = encoding.jsonescape
1367 1367
1368 1368 if self._first:
1369 1369 self.ui.write("[\n {")
1370 1370 self._first = False
1371 1371 else:
1372 1372 self.ui.write(",\n {")
1373 1373
1374 1374 if self.ui.quiet:
1375 1375 self.ui.write(('\n "rev": %s') % jrev)
1376 1376 self.ui.write((',\n "node": %s') % jnode)
1377 1377 self.ui.write('\n }')
1378 1378 return
1379 1379
1380 1380 self.ui.write(('\n "rev": %s') % jrev)
1381 1381 self.ui.write((',\n "node": %s') % jnode)
1382 1382 self.ui.write((',\n "branch": "%s"') % j(ctx.branch()))
1383 1383 self.ui.write((',\n "phase": "%s"') % ctx.phasestr())
1384 1384 self.ui.write((',\n "user": "%s"') % j(ctx.user()))
1385 1385 self.ui.write((',\n "date": [%d, %d]') % ctx.date())
1386 1386 self.ui.write((',\n "desc": "%s"') % j(ctx.description()))
1387 1387
1388 1388 self.ui.write((',\n "bookmarks": [%s]') %
1389 1389 ", ".join('"%s"' % j(b) for b in ctx.bookmarks()))
1390 1390 self.ui.write((',\n "tags": [%s]') %
1391 1391 ", ".join('"%s"' % j(t) for t in ctx.tags()))
1392 1392 self.ui.write((',\n "parents": [%s]') %
1393 1393 ", ".join('"%s"' % c.hex() for c in ctx.parents()))
1394 1394
1395 1395 if self.ui.debugflag:
1396 1396 if rev is None:
1397 1397 jmanifestnode = 'null'
1398 1398 else:
1399 1399 jmanifestnode = '"%s"' % hex(ctx.manifestnode())
1400 1400 self.ui.write((',\n "manifest": %s') % jmanifestnode)
1401 1401
1402 1402 self.ui.write((',\n "extra": {%s}') %
1403 1403 ", ".join('"%s": "%s"' % (j(k), j(v))
1404 1404 for k, v in ctx.extra().items()))
1405 1405
1406 1406 files = ctx.p1().status(ctx)
1407 1407 self.ui.write((',\n "modified": [%s]') %
1408 1408 ", ".join('"%s"' % j(f) for f in files[0]))
1409 1409 self.ui.write((',\n "added": [%s]') %
1410 1410 ", ".join('"%s"' % j(f) for f in files[1]))
1411 1411 self.ui.write((',\n "removed": [%s]') %
1412 1412 ", ".join('"%s"' % j(f) for f in files[2]))
1413 1413
1414 1414 elif self.ui.verbose:
1415 1415 self.ui.write((',\n "files": [%s]') %
1416 1416 ", ".join('"%s"' % j(f) for f in ctx.files()))
1417 1417
1418 1418 if copies:
1419 1419 self.ui.write((',\n "copies": {%s}') %
1420 1420 ", ".join('"%s": "%s"' % (j(k), j(v))
1421 1421 for k, v in copies))
1422 1422
1423 1423 matchfn = self.matchfn
1424 1424 if matchfn:
1425 1425 stat = self.diffopts.get('stat')
1426 1426 diff = self.diffopts.get('patch')
1427 1427 diffopts = patch.difffeatureopts(self.ui, self.diffopts, git=True)
1428 1428 node, prev = ctx.node(), ctx.p1().node()
1429 1429 if stat:
1430 1430 self.ui.pushbuffer()
1431 1431 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1432 1432 match=matchfn, stat=True)
1433 1433 self.ui.write((',\n "diffstat": "%s"')
1434 1434 % j(self.ui.popbuffer()))
1435 1435 if diff:
1436 1436 self.ui.pushbuffer()
1437 1437 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1438 1438 match=matchfn, stat=False)
1439 1439 self.ui.write((',\n "diff": "%s"') % j(self.ui.popbuffer()))
1440 1440
1441 1441 self.ui.write("\n }")
1442 1442
1443 1443 class changeset_templater(changeset_printer):
1444 1444 '''format changeset information.'''
1445 1445
1446 1446 def __init__(self, ui, repo, matchfn, diffopts, tmpl, mapfile, buffered):
1447 1447 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1448 1448 assert not (tmpl and mapfile)
1449 1449 defaulttempl = templatekw.defaulttempl
1450 1450 if mapfile:
1451 1451 self.t = templater.templater.frommapfile(mapfile,
1452 1452 cache=defaulttempl)
1453 1453 else:
1454 1454 self.t = formatter.maketemplater(ui, 'changeset', tmpl,
1455 1455 cache=defaulttempl)
1456 1456
1457 1457 self._counter = itertools.count()
1458 1458 self.cache = {}
1459 1459
1460 1460 # find correct templates for current mode
1461 1461 tmplmodes = [
1462 1462 (True, None),
1463 1463 (self.ui.verbose, 'verbose'),
1464 1464 (self.ui.quiet, 'quiet'),
1465 1465 (self.ui.debugflag, 'debug'),
1466 1466 ]
1467 1467
1468 1468 self._parts = {'header': '', 'footer': '', 'changeset': 'changeset',
1469 1469 'docheader': '', 'docfooter': ''}
1470 1470 for mode, postfix in tmplmodes:
1471 1471 for t in self._parts:
1472 1472 cur = t
1473 1473 if postfix:
1474 1474 cur += "_" + postfix
1475 1475 if mode and cur in self.t:
1476 1476 self._parts[t] = cur
1477 1477
1478 1478 if self._parts['docheader']:
1479 1479 self.ui.write(templater.stringify(self.t(self._parts['docheader'])))
1480 1480
1481 1481 def close(self):
1482 1482 if self._parts['docfooter']:
1483 1483 if not self.footer:
1484 1484 self.footer = ""
1485 1485 self.footer += templater.stringify(self.t(self._parts['docfooter']))
1486 1486 return super(changeset_templater, self).close()
1487 1487
1488 1488 def _show(self, ctx, copies, matchfn, props):
1489 1489 '''show a single changeset or file revision'''
1490 1490 props = props.copy()
1491 1491 props.update(templatekw.keywords)
1492 1492 props['templ'] = self.t
1493 1493 props['ctx'] = ctx
1494 1494 props['repo'] = self.repo
1495 1495 props['ui'] = self.repo.ui
1496 1496 props['index'] = next(self._counter)
1497 1497 props['revcache'] = {'copies': copies}
1498 1498 props['cache'] = self.cache
1499 1499
1500 1500 # write header
1501 1501 if self._parts['header']:
1502 1502 h = templater.stringify(self.t(self._parts['header'], **props))
1503 1503 if self.buffered:
1504 1504 self.header[ctx.rev()] = h
1505 1505 else:
1506 1506 if self.lastheader != h:
1507 1507 self.lastheader = h
1508 1508 self.ui.write(h)
1509 1509
1510 1510 # write changeset metadata, then patch if requested
1511 1511 key = self._parts['changeset']
1512 1512 self.ui.write(templater.stringify(self.t(key, **props)))
1513 1513 self.showpatch(ctx, matchfn)
1514 1514
1515 1515 if self._parts['footer']:
1516 1516 if not self.footer:
1517 1517 self.footer = templater.stringify(
1518 1518 self.t(self._parts['footer'], **props))
1519 1519
1520 1520 def gettemplate(ui, tmpl, style):
1521 1521 """
1522 1522 Find the template matching the given template spec or style.
1523 1523 """
1524 1524
1525 1525 # ui settings
1526 1526 if not tmpl and not style: # template are stronger than style
1527 1527 tmpl = ui.config('ui', 'logtemplate')
1528 1528 if tmpl:
1529 1529 return templater.unquotestring(tmpl), None
1530 1530 else:
1531 1531 style = util.expandpath(ui.config('ui', 'style', ''))
1532 1532
1533 1533 if not tmpl and style:
1534 1534 mapfile = style
1535 1535 if not os.path.split(mapfile)[0]:
1536 1536 mapname = (templater.templatepath('map-cmdline.' + mapfile)
1537 1537 or templater.templatepath(mapfile))
1538 1538 if mapname:
1539 1539 mapfile = mapname
1540 1540 return None, mapfile
1541 1541
1542 1542 if not tmpl:
1543 1543 return None, None
1544 1544
1545 1545 return formatter.lookuptemplate(ui, 'changeset', tmpl)
1546 1546
1547 1547 def show_changeset(ui, repo, opts, buffered=False):
1548 1548 """show one changeset using template or regular display.
1549 1549
1550 1550 Display format will be the first non-empty hit of:
1551 1551 1. option 'template'
1552 1552 2. option 'style'
1553 1553 3. [ui] setting 'logtemplate'
1554 1554 4. [ui] setting 'style'
1555 1555 If all of these values are either the unset or the empty string,
1556 1556 regular display via changeset_printer() is done.
1557 1557 """
1558 1558 # options
1559 1559 matchfn = None
1560 1560 if opts.get('patch') or opts.get('stat'):
1561 1561 matchfn = scmutil.matchall(repo)
1562 1562
1563 1563 if opts.get('template') == 'json':
1564 1564 return jsonchangeset(ui, repo, matchfn, opts, buffered)
1565 1565
1566 1566 tmpl, mapfile = gettemplate(ui, opts.get('template'), opts.get('style'))
1567 1567
1568 1568 if not tmpl and not mapfile:
1569 1569 return changeset_printer(ui, repo, matchfn, opts, buffered)
1570 1570
1571 1571 return changeset_templater(ui, repo, matchfn, opts, tmpl, mapfile, buffered)
1572 1572
1573 1573 def showmarker(fm, marker, index=None):
1574 1574 """utility function to display obsolescence marker in a readable way
1575 1575
1576 1576 To be used by debug function."""
1577 1577 if index is not None:
1578 1578 fm.write('index', '%i ', index)
1579 1579 fm.write('precnode', '%s ', hex(marker.precnode()))
1580 1580 succs = marker.succnodes()
1581 1581 fm.condwrite(succs, 'succnodes', '%s ',
1582 1582 fm.formatlist(map(hex, succs), name='node'))
1583 1583 fm.write('flag', '%X ', marker.flags())
1584 1584 parents = marker.parentnodes()
1585 1585 if parents is not None:
1586 1586 fm.write('parentnodes', '{%s} ',
1587 1587 fm.formatlist(map(hex, parents), name='node', sep=', '))
1588 1588 fm.write('date', '(%s) ', fm.formatdate(marker.date()))
1589 1589 meta = marker.metadata().copy()
1590 1590 meta.pop('date', None)
1591 1591 fm.write('metadata', '{%s}', fm.formatdict(meta, fmt='%r: %r', sep=', '))
1592 1592 fm.plain('\n')
1593 1593
1594 1594 def finddate(ui, repo, date):
1595 1595 """Find the tipmost changeset that matches the given date spec"""
1596 1596
1597 1597 df = util.matchdate(date)
1598 1598 m = scmutil.matchall(repo)
1599 1599 results = {}
1600 1600
1601 1601 def prep(ctx, fns):
1602 1602 d = ctx.date()
1603 1603 if df(d[0]):
1604 1604 results[ctx.rev()] = d
1605 1605
1606 1606 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
1607 1607 rev = ctx.rev()
1608 1608 if rev in results:
1609 1609 ui.status(_("found revision %s from %s\n") %
1610 1610 (rev, util.datestr(results[rev])))
1611 1611 return str(rev)
1612 1612
1613 1613 raise error.Abort(_("revision matching date not found"))
1614 1614
1615 1615 def increasingwindows(windowsize=8, sizelimit=512):
1616 1616 while True:
1617 1617 yield windowsize
1618 1618 if windowsize < sizelimit:
1619 1619 windowsize *= 2
1620 1620
1621 1621 class FileWalkError(Exception):
1622 1622 pass
1623 1623
1624 1624 def walkfilerevs(repo, match, follow, revs, fncache):
1625 1625 '''Walks the file history for the matched files.
1626 1626
1627 1627 Returns the changeset revs that are involved in the file history.
1628 1628
1629 1629 Throws FileWalkError if the file history can't be walked using
1630 1630 filelogs alone.
1631 1631 '''
1632 1632 wanted = set()
1633 1633 copies = []
1634 1634 minrev, maxrev = min(revs), max(revs)
1635 1635 def filerevgen(filelog, last):
1636 1636 """
1637 1637 Only files, no patterns. Check the history of each file.
1638 1638
1639 1639 Examines filelog entries within minrev, maxrev linkrev range
1640 1640 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1641 1641 tuples in backwards order
1642 1642 """
1643 1643 cl_count = len(repo)
1644 1644 revs = []
1645 1645 for j in xrange(0, last + 1):
1646 1646 linkrev = filelog.linkrev(j)
1647 1647 if linkrev < minrev:
1648 1648 continue
1649 1649 # only yield rev for which we have the changelog, it can
1650 1650 # happen while doing "hg log" during a pull or commit
1651 1651 if linkrev >= cl_count:
1652 1652 break
1653 1653
1654 1654 parentlinkrevs = []
1655 1655 for p in filelog.parentrevs(j):
1656 1656 if p != nullrev:
1657 1657 parentlinkrevs.append(filelog.linkrev(p))
1658 1658 n = filelog.node(j)
1659 1659 revs.append((linkrev, parentlinkrevs,
1660 1660 follow and filelog.renamed(n)))
1661 1661
1662 1662 return reversed(revs)
1663 1663 def iterfiles():
1664 1664 pctx = repo['.']
1665 1665 for filename in match.files():
1666 1666 if follow:
1667 1667 if filename not in pctx:
1668 1668 raise error.Abort(_('cannot follow file not in parent '
1669 1669 'revision: "%s"') % filename)
1670 1670 yield filename, pctx[filename].filenode()
1671 1671 else:
1672 1672 yield filename, None
1673 1673 for filename_node in copies:
1674 1674 yield filename_node
1675 1675
1676 1676 for file_, node in iterfiles():
1677 1677 filelog = repo.file(file_)
1678 1678 if not len(filelog):
1679 1679 if node is None:
1680 1680 # A zero count may be a directory or deleted file, so
1681 1681 # try to find matching entries on the slow path.
1682 1682 if follow:
1683 1683 raise error.Abort(
1684 1684 _('cannot follow nonexistent file: "%s"') % file_)
1685 1685 raise FileWalkError("Cannot walk via filelog")
1686 1686 else:
1687 1687 continue
1688 1688
1689 1689 if node is None:
1690 1690 last = len(filelog) - 1
1691 1691 else:
1692 1692 last = filelog.rev(node)
1693 1693
1694 1694 # keep track of all ancestors of the file
1695 1695 ancestors = set([filelog.linkrev(last)])
1696 1696
1697 1697 # iterate from latest to oldest revision
1698 1698 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1699 1699 if not follow:
1700 1700 if rev > maxrev:
1701 1701 continue
1702 1702 else:
1703 1703 # Note that last might not be the first interesting
1704 1704 # rev to us:
1705 1705 # if the file has been changed after maxrev, we'll
1706 1706 # have linkrev(last) > maxrev, and we still need
1707 1707 # to explore the file graph
1708 1708 if rev not in ancestors:
1709 1709 continue
1710 1710 # XXX insert 1327 fix here
1711 1711 if flparentlinkrevs:
1712 1712 ancestors.update(flparentlinkrevs)
1713 1713
1714 1714 fncache.setdefault(rev, []).append(file_)
1715 1715 wanted.add(rev)
1716 1716 if copied:
1717 1717 copies.append(copied)
1718 1718
1719 1719 return wanted
1720 1720
1721 1721 class _followfilter(object):
1722 1722 def __init__(self, repo, onlyfirst=False):
1723 1723 self.repo = repo
1724 1724 self.startrev = nullrev
1725 1725 self.roots = set()
1726 1726 self.onlyfirst = onlyfirst
1727 1727
1728 1728 def match(self, rev):
1729 1729 def realparents(rev):
1730 1730 if self.onlyfirst:
1731 1731 return self.repo.changelog.parentrevs(rev)[0:1]
1732 1732 else:
1733 1733 return filter(lambda x: x != nullrev,
1734 1734 self.repo.changelog.parentrevs(rev))
1735 1735
1736 1736 if self.startrev == nullrev:
1737 1737 self.startrev = rev
1738 1738 return True
1739 1739
1740 1740 if rev > self.startrev:
1741 1741 # forward: all descendants
1742 1742 if not self.roots:
1743 1743 self.roots.add(self.startrev)
1744 1744 for parent in realparents(rev):
1745 1745 if parent in self.roots:
1746 1746 self.roots.add(rev)
1747 1747 return True
1748 1748 else:
1749 1749 # backwards: all parents
1750 1750 if not self.roots:
1751 1751 self.roots.update(realparents(self.startrev))
1752 1752 if rev in self.roots:
1753 1753 self.roots.remove(rev)
1754 1754 self.roots.update(realparents(rev))
1755 1755 return True
1756 1756
1757 1757 return False
1758 1758
1759 1759 def walkchangerevs(repo, match, opts, prepare):
1760 1760 '''Iterate over files and the revs in which they changed.
1761 1761
1762 1762 Callers most commonly need to iterate backwards over the history
1763 1763 in which they are interested. Doing so has awful (quadratic-looking)
1764 1764 performance, so we use iterators in a "windowed" way.
1765 1765
1766 1766 We walk a window of revisions in the desired order. Within the
1767 1767 window, we first walk forwards to gather data, then in the desired
1768 1768 order (usually backwards) to display it.
1769 1769
1770 1770 This function returns an iterator yielding contexts. Before
1771 1771 yielding each context, the iterator will first call the prepare
1772 1772 function on each context in the window in forward order.'''
1773 1773
1774 1774 follow = opts.get('follow') or opts.get('follow_first')
1775 1775 revs = _logrevs(repo, opts)
1776 1776 if not revs:
1777 1777 return []
1778 1778 wanted = set()
1779 1779 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
1780 1780 opts.get('removed'))
1781 1781 fncache = {}
1782 1782 change = repo.changectx
1783 1783
1784 1784 # First step is to fill wanted, the set of revisions that we want to yield.
1785 1785 # When it does not induce extra cost, we also fill fncache for revisions in
1786 1786 # wanted: a cache of filenames that were changed (ctx.files()) and that
1787 1787 # match the file filtering conditions.
1788 1788
1789 1789 if match.always():
1790 1790 # No files, no patterns. Display all revs.
1791 1791 wanted = revs
1792 1792 elif not slowpath:
1793 1793 # We only have to read through the filelog to find wanted revisions
1794 1794
1795 1795 try:
1796 1796 wanted = walkfilerevs(repo, match, follow, revs, fncache)
1797 1797 except FileWalkError:
1798 1798 slowpath = True
1799 1799
1800 1800 # We decided to fall back to the slowpath because at least one
1801 1801 # of the paths was not a file. Check to see if at least one of them
1802 1802 # existed in history, otherwise simply return
1803 1803 for path in match.files():
1804 1804 if path == '.' or path in repo.store:
1805 1805 break
1806 1806 else:
1807 1807 return []
1808 1808
1809 1809 if slowpath:
1810 1810 # We have to read the changelog to match filenames against
1811 1811 # changed files
1812 1812
1813 1813 if follow:
1814 1814 raise error.Abort(_('can only follow copies/renames for explicit '
1815 1815 'filenames'))
1816 1816
1817 1817 # The slow path checks files modified in every changeset.
1818 1818 # This is really slow on large repos, so compute the set lazily.
1819 1819 class lazywantedset(object):
1820 1820 def __init__(self):
1821 1821 self.set = set()
1822 1822 self.revs = set(revs)
1823 1823
1824 1824 # No need to worry about locality here because it will be accessed
1825 1825 # in the same order as the increasing window below.
1826 1826 def __contains__(self, value):
1827 1827 if value in self.set:
1828 1828 return True
1829 1829 elif not value in self.revs:
1830 1830 return False
1831 1831 else:
1832 1832 self.revs.discard(value)
1833 1833 ctx = change(value)
1834 1834 matches = filter(match, ctx.files())
1835 1835 if matches:
1836 1836 fncache[value] = matches
1837 1837 self.set.add(value)
1838 1838 return True
1839 1839 return False
1840 1840
1841 1841 def discard(self, value):
1842 1842 self.revs.discard(value)
1843 1843 self.set.discard(value)
1844 1844
1845 1845 wanted = lazywantedset()
1846 1846
1847 1847 # it might be worthwhile to do this in the iterator if the rev range
1848 1848 # is descending and the prune args are all within that range
1849 1849 for rev in opts.get('prune', ()):
1850 1850 rev = repo[rev].rev()
1851 1851 ff = _followfilter(repo)
1852 1852 stop = min(revs[0], revs[-1])
1853 1853 for x in xrange(rev, stop - 1, -1):
1854 1854 if ff.match(x):
1855 1855 wanted = wanted - [x]
1856 1856
1857 1857 # Now that wanted is correctly initialized, we can iterate over the
1858 1858 # revision range, yielding only revisions in wanted.
1859 1859 def iterate():
1860 1860 if follow and match.always():
1861 1861 ff = _followfilter(repo, onlyfirst=opts.get('follow_first'))
1862 1862 def want(rev):
1863 1863 return ff.match(rev) and rev in wanted
1864 1864 else:
1865 1865 def want(rev):
1866 1866 return rev in wanted
1867 1867
1868 1868 it = iter(revs)
1869 1869 stopiteration = False
1870 1870 for windowsize in increasingwindows():
1871 1871 nrevs = []
1872 1872 for i in xrange(windowsize):
1873 1873 rev = next(it, None)
1874 1874 if rev is None:
1875 1875 stopiteration = True
1876 1876 break
1877 1877 elif want(rev):
1878 1878 nrevs.append(rev)
1879 1879 for rev in sorted(nrevs):
1880 1880 fns = fncache.get(rev)
1881 1881 ctx = change(rev)
1882 1882 if not fns:
1883 1883 def fns_generator():
1884 1884 for f in ctx.files():
1885 1885 if match(f):
1886 1886 yield f
1887 1887 fns = fns_generator()
1888 1888 prepare(ctx, fns)
1889 1889 for rev in nrevs:
1890 1890 yield change(rev)
1891 1891
1892 1892 if stopiteration:
1893 1893 break
1894 1894
1895 1895 return iterate()
1896 1896
1897 1897 def _makefollowlogfilematcher(repo, files, followfirst):
1898 1898 # When displaying a revision with --patch --follow FILE, we have
1899 1899 # to know which file of the revision must be diffed. With
1900 1900 # --follow, we want the names of the ancestors of FILE in the
1901 1901 # revision, stored in "fcache". "fcache" is populated by
1902 1902 # reproducing the graph traversal already done by --follow revset
1903 1903 # and relating revs to file names (which is not "correct" but
1904 1904 # good enough).
1905 1905 fcache = {}
1906 1906 fcacheready = [False]
1907 1907 pctx = repo['.']
1908 1908
1909 1909 def populate():
1910 1910 for fn in files:
1911 1911 fctx = pctx[fn]
1912 1912 fcache.setdefault(fctx.introrev(), set()).add(fctx.path())
1913 1913 for c in fctx.ancestors(followfirst=followfirst):
1914 1914 fcache.setdefault(c.rev(), set()).add(c.path())
1915 1915
1916 1916 def filematcher(rev):
1917 1917 if not fcacheready[0]:
1918 1918 # Lazy initialization
1919 1919 fcacheready[0] = True
1920 1920 populate()
1921 1921 return scmutil.matchfiles(repo, fcache.get(rev, []))
1922 1922
1923 1923 return filematcher
1924 1924
1925 1925 def _makenofollowlogfilematcher(repo, pats, opts):
1926 1926 '''hook for extensions to override the filematcher for non-follow cases'''
1927 1927 return None
1928 1928
1929 1929 def _makelogrevset(repo, pats, opts, revs):
1930 1930 """Return (expr, filematcher) where expr is a revset string built
1931 1931 from log options and file patterns or None. If --stat or --patch
1932 1932 are not passed filematcher is None. Otherwise it is a callable
1933 1933 taking a revision number and returning a match objects filtering
1934 1934 the files to be detailed when displaying the revision.
1935 1935 """
1936 1936 opt2revset = {
1937 1937 'no_merges': ('not merge()', None),
1938 1938 'only_merges': ('merge()', None),
1939 1939 '_ancestors': ('ancestors(%(val)s)', None),
1940 1940 '_fancestors': ('_firstancestors(%(val)s)', None),
1941 1941 '_descendants': ('descendants(%(val)s)', None),
1942 1942 '_fdescendants': ('_firstdescendants(%(val)s)', None),
1943 1943 '_matchfiles': ('_matchfiles(%(val)s)', None),
1944 1944 'date': ('date(%(val)r)', None),
1945 1945 'branch': ('branch(%(val)r)', ' or '),
1946 1946 '_patslog': ('filelog(%(val)r)', ' or '),
1947 1947 '_patsfollow': ('follow(%(val)r)', ' or '),
1948 1948 '_patsfollowfirst': ('_followfirst(%(val)r)', ' or '),
1949 1949 'keyword': ('keyword(%(val)r)', ' or '),
1950 1950 'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '),
1951 1951 'user': ('user(%(val)r)', ' or '),
1952 1952 }
1953 1953
1954 1954 opts = dict(opts)
1955 1955 # follow or not follow?
1956 1956 follow = opts.get('follow') or opts.get('follow_first')
1957 1957 if opts.get('follow_first'):
1958 1958 followfirst = 1
1959 1959 else:
1960 1960 followfirst = 0
1961 1961 # --follow with FILE behavior depends on revs...
1962 1962 it = iter(revs)
1963 1963 startrev = next(it)
1964 1964 followdescendants = startrev < next(it, startrev)
1965 1965
1966 1966 # branch and only_branch are really aliases and must be handled at
1967 1967 # the same time
1968 1968 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
1969 1969 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
1970 1970 # pats/include/exclude are passed to match.match() directly in
1971 1971 # _matchfiles() revset but walkchangerevs() builds its matcher with
1972 1972 # scmutil.match(). The difference is input pats are globbed on
1973 1973 # platforms without shell expansion (windows).
1974 1974 wctx = repo[None]
1975 1975 match, pats = scmutil.matchandpats(wctx, pats, opts)
1976 1976 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
1977 1977 opts.get('removed'))
1978 1978 if not slowpath:
1979 1979 for f in match.files():
1980 1980 if follow and f not in wctx:
1981 1981 # If the file exists, it may be a directory, so let it
1982 1982 # take the slow path.
1983 1983 if os.path.exists(repo.wjoin(f)):
1984 1984 slowpath = True
1985 1985 continue
1986 1986 else:
1987 1987 raise error.Abort(_('cannot follow file not in parent '
1988 1988 'revision: "%s"') % f)
1989 1989 filelog = repo.file(f)
1990 1990 if not filelog:
1991 1991 # A zero count may be a directory or deleted file, so
1992 1992 # try to find matching entries on the slow path.
1993 1993 if follow:
1994 1994 raise error.Abort(
1995 1995 _('cannot follow nonexistent file: "%s"') % f)
1996 1996 slowpath = True
1997 1997
1998 1998 # We decided to fall back to the slowpath because at least one
1999 1999 # of the paths was not a file. Check to see if at least one of them
2000 2000 # existed in history - in that case, we'll continue down the
2001 2001 # slowpath; otherwise, we can turn off the slowpath
2002 2002 if slowpath:
2003 2003 for path in match.files():
2004 2004 if path == '.' or path in repo.store:
2005 2005 break
2006 2006 else:
2007 2007 slowpath = False
2008 2008
2009 2009 fpats = ('_patsfollow', '_patsfollowfirst')
2010 2010 fnopats = (('_ancestors', '_fancestors'),
2011 2011 ('_descendants', '_fdescendants'))
2012 2012 if slowpath:
2013 2013 # See walkchangerevs() slow path.
2014 2014 #
2015 2015 # pats/include/exclude cannot be represented as separate
2016 2016 # revset expressions as their filtering logic applies at file
2017 2017 # level. For instance "-I a -X a" matches a revision touching
2018 2018 # "a" and "b" while "file(a) and not file(b)" does
2019 2019 # not. Besides, filesets are evaluated against the working
2020 2020 # directory.
2021 2021 matchargs = ['r:', 'd:relpath']
2022 2022 for p in pats:
2023 2023 matchargs.append('p:' + p)
2024 2024 for p in opts.get('include', []):
2025 2025 matchargs.append('i:' + p)
2026 2026 for p in opts.get('exclude', []):
2027 2027 matchargs.append('x:' + p)
2028 2028 matchargs = ','.join(('%r' % p) for p in matchargs)
2029 2029 opts['_matchfiles'] = matchargs
2030 2030 if follow:
2031 2031 opts[fnopats[0][followfirst]] = '.'
2032 2032 else:
2033 2033 if follow:
2034 2034 if pats:
2035 2035 # follow() revset interprets its file argument as a
2036 2036 # manifest entry, so use match.files(), not pats.
2037 2037 opts[fpats[followfirst]] = list(match.files())
2038 2038 else:
2039 2039 op = fnopats[followdescendants][followfirst]
2040 2040 opts[op] = 'rev(%d)' % startrev
2041 2041 else:
2042 2042 opts['_patslog'] = list(pats)
2043 2043
2044 2044 filematcher = None
2045 2045 if opts.get('patch') or opts.get('stat'):
2046 2046 # When following files, track renames via a special matcher.
2047 2047 # If we're forced to take the slowpath it means we're following
2048 2048 # at least one pattern/directory, so don't bother with rename tracking.
2049 2049 if follow and not match.always() and not slowpath:
2050 2050 # _makefollowlogfilematcher expects its files argument to be
2051 2051 # relative to the repo root, so use match.files(), not pats.
2052 2052 filematcher = _makefollowlogfilematcher(repo, match.files(),
2053 2053 followfirst)
2054 2054 else:
2055 2055 filematcher = _makenofollowlogfilematcher(repo, pats, opts)
2056 2056 if filematcher is None:
2057 2057 filematcher = lambda rev: match
2058 2058
2059 2059 expr = []
2060 2060 for op, val in sorted(opts.iteritems()):
2061 2061 if not val:
2062 2062 continue
2063 2063 if op not in opt2revset:
2064 2064 continue
2065 2065 revop, andor = opt2revset[op]
2066 2066 if '%(val)' not in revop:
2067 2067 expr.append(revop)
2068 2068 else:
2069 2069 if not isinstance(val, list):
2070 2070 e = revop % {'val': val}
2071 2071 else:
2072 2072 e = '(' + andor.join((revop % {'val': v}) for v in val) + ')'
2073 2073 expr.append(e)
2074 2074
2075 2075 if expr:
2076 2076 expr = '(' + ' and '.join(expr) + ')'
2077 2077 else:
2078 2078 expr = None
2079 2079 return expr, filematcher
2080 2080
2081 2081 def _logrevs(repo, opts):
2082 2082 # Default --rev value depends on --follow but --follow behavior
2083 2083 # depends on revisions resolved from --rev...
2084 2084 follow = opts.get('follow') or opts.get('follow_first')
2085 2085 if opts.get('rev'):
2086 2086 revs = scmutil.revrange(repo, opts['rev'])
2087 2087 elif follow and repo.dirstate.p1() == nullid:
2088 2088 revs = smartset.baseset()
2089 2089 elif follow:
2090 2090 revs = repo.revs('reverse(:.)')
2091 2091 else:
2092 2092 revs = smartset.spanset(repo)
2093 2093 revs.reverse()
2094 2094 return revs
2095 2095
2096 2096 def getgraphlogrevs(repo, pats, opts):
2097 2097 """Return (revs, expr, filematcher) where revs is an iterable of
2098 2098 revision numbers, expr is a revset string built from log options
2099 2099 and file patterns or None, and used to filter 'revs'. If --stat or
2100 2100 --patch are not passed filematcher is None. Otherwise it is a
2101 2101 callable taking a revision number and returning a match objects
2102 2102 filtering the files to be detailed when displaying the revision.
2103 2103 """
2104 2104 limit = loglimit(opts)
2105 2105 revs = _logrevs(repo, opts)
2106 2106 if not revs:
2107 2107 return smartset.baseset(), None, None
2108 2108 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2109 2109 if opts.get('rev'):
2110 2110 # User-specified revs might be unsorted, but don't sort before
2111 2111 # _makelogrevset because it might depend on the order of revs
2112 2112 if not (revs.isdescending() or revs.istopo()):
2113 2113 revs.sort(reverse=True)
2114 2114 if expr:
2115 2115 matcher = revset.match(repo.ui, expr, order=revset.followorder)
2116 2116 revs = matcher(repo, revs)
2117 2117 if limit is not None:
2118 2118 limitedrevs = []
2119 2119 for idx, rev in enumerate(revs):
2120 2120 if idx >= limit:
2121 2121 break
2122 2122 limitedrevs.append(rev)
2123 2123 revs = smartset.baseset(limitedrevs)
2124 2124
2125 2125 return revs, expr, filematcher
2126 2126
2127 2127 def getlogrevs(repo, pats, opts):
2128 2128 """Return (revs, expr, filematcher) where revs is an iterable of
2129 2129 revision numbers, expr is a revset string built from log options
2130 2130 and file patterns or None, and used to filter 'revs'. If --stat or
2131 2131 --patch are not passed filematcher is None. Otherwise it is a
2132 2132 callable taking a revision number and returning a match objects
2133 2133 filtering the files to be detailed when displaying the revision.
2134 2134 """
2135 2135 limit = loglimit(opts)
2136 2136 revs = _logrevs(repo, opts)
2137 2137 if not revs:
2138 2138 return smartset.baseset([]), None, None
2139 2139 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2140 2140 if expr:
2141 2141 matcher = revset.match(repo.ui, expr, order=revset.followorder)
2142 2142 revs = matcher(repo, revs)
2143 2143 if limit is not None:
2144 2144 limitedrevs = []
2145 2145 for idx, r in enumerate(revs):
2146 2146 if limit <= idx:
2147 2147 break
2148 2148 limitedrevs.append(r)
2149 2149 revs = smartset.baseset(limitedrevs)
2150 2150
2151 2151 return revs, expr, filematcher
2152 2152
2153 2153 def _graphnodeformatter(ui, displayer):
2154 2154 spec = ui.config('ui', 'graphnodetemplate')
2155 2155 if not spec:
2156 2156 return templatekw.showgraphnode # fast path for "{graphnode}"
2157 2157
2158 2158 spec = templater.unquotestring(spec)
2159 2159 templ = formatter.gettemplater(ui, 'graphnode', spec)
2160 2160 cache = {}
2161 2161 if isinstance(displayer, changeset_templater):
2162 2162 cache = displayer.cache # reuse cache of slow templates
2163 2163 props = templatekw.keywords.copy()
2164 2164 props['templ'] = templ
2165 2165 props['cache'] = cache
2166 2166 def formatnode(repo, ctx):
2167 2167 props['ctx'] = ctx
2168 2168 props['repo'] = repo
2169 2169 props['ui'] = repo.ui
2170 2170 props['revcache'] = {}
2171 2171 return templater.stringify(templ('graphnode', **props))
2172 2172 return formatnode
2173 2173
2174 2174 def displaygraph(ui, repo, dag, displayer, edgefn, getrenamed=None,
2175 2175 filematcher=None):
2176 2176 formatnode = _graphnodeformatter(ui, displayer)
2177 2177 state = graphmod.asciistate()
2178 2178 styles = state['styles']
2179 2179
2180 2180 # only set graph styling if HGPLAIN is not set.
2181 2181 if ui.plain('graph'):
2182 2182 # set all edge styles to |, the default pre-3.8 behaviour
2183 2183 styles.update(dict.fromkeys(styles, '|'))
2184 2184 else:
2185 2185 edgetypes = {
2186 2186 'parent': graphmod.PARENT,
2187 2187 'grandparent': graphmod.GRANDPARENT,
2188 2188 'missing': graphmod.MISSINGPARENT
2189 2189 }
2190 2190 for name, key in edgetypes.items():
2191 2191 # experimental config: experimental.graphstyle.*
2192 2192 styles[key] = ui.config('experimental', 'graphstyle.%s' % name,
2193 2193 styles[key])
2194 2194 if not styles[key]:
2195 2195 styles[key] = None
2196 2196
2197 2197 # experimental config: experimental.graphshorten
2198 2198 state['graphshorten'] = ui.configbool('experimental', 'graphshorten')
2199 2199
2200 2200 for rev, type, ctx, parents in dag:
2201 2201 char = formatnode(repo, ctx)
2202 2202 copies = None
2203 2203 if getrenamed and ctx.rev():
2204 2204 copies = []
2205 2205 for fn in ctx.files():
2206 2206 rename = getrenamed(fn, ctx.rev())
2207 2207 if rename:
2208 2208 copies.append((fn, rename[0]))
2209 2209 revmatchfn = None
2210 2210 if filematcher is not None:
2211 2211 revmatchfn = filematcher(ctx.rev())
2212 2212 displayer.show(ctx, copies=copies, matchfn=revmatchfn)
2213 2213 lines = displayer.hunk.pop(rev).split('\n')
2214 2214 if not lines[-1]:
2215 2215 del lines[-1]
2216 2216 displayer.flush(ctx)
2217 2217 edges = edgefn(type, char, lines, state, rev, parents)
2218 2218 for type, char, lines, coldata in edges:
2219 2219 graphmod.ascii(ui, state, type, char, lines, coldata)
2220 2220 displayer.close()
2221 2221
2222 2222 def graphlog(ui, repo, pats, opts):
2223 2223 # Parameters are identical to log command ones
2224 2224 revs, expr, filematcher = getgraphlogrevs(repo, pats, opts)
2225 2225 revdag = graphmod.dagwalker(repo, revs)
2226 2226
2227 2227 getrenamed = None
2228 2228 if opts.get('copies'):
2229 2229 endrev = None
2230 2230 if opts.get('rev'):
2231 2231 endrev = scmutil.revrange(repo, opts.get('rev')).max() + 1
2232 2232 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
2233 2233
2234 2234 ui.pager('log')
2235 2235 displayer = show_changeset(ui, repo, opts, buffered=True)
2236 2236 displaygraph(ui, repo, revdag, displayer, graphmod.asciiedges, getrenamed,
2237 2237 filematcher)
2238 2238
2239 2239 def checkunsupportedgraphflags(pats, opts):
2240 2240 for op in ["newest_first"]:
2241 2241 if op in opts and opts[op]:
2242 2242 raise error.Abort(_("-G/--graph option is incompatible with --%s")
2243 2243 % op.replace("_", "-"))
2244 2244
2245 2245 def graphrevs(repo, nodes, opts):
2246 2246 limit = loglimit(opts)
2247 2247 nodes.reverse()
2248 2248 if limit is not None:
2249 2249 nodes = nodes[:limit]
2250 2250 return graphmod.nodes(repo, nodes)
2251 2251
2252 2252 def add(ui, repo, match, prefix, explicitonly, **opts):
2253 2253 join = lambda f: os.path.join(prefix, f)
2254 2254 bad = []
2255 2255
2256 2256 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2257 2257 names = []
2258 2258 wctx = repo[None]
2259 2259 cca = None
2260 2260 abort, warn = scmutil.checkportabilityalert(ui)
2261 2261 if abort or warn:
2262 2262 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2263 2263
2264 2264 badmatch = matchmod.badmatch(match, badfn)
2265 2265 dirstate = repo.dirstate
2266 2266 # We don't want to just call wctx.walk here, since it would return a lot of
2267 2267 # clean files, which we aren't interested in and takes time.
2268 2268 for f in sorted(dirstate.walk(badmatch, sorted(wctx.substate),
2269 2269 True, False, full=False)):
2270 2270 exact = match.exact(f)
2271 2271 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2272 2272 if cca:
2273 2273 cca(f)
2274 2274 names.append(f)
2275 2275 if ui.verbose or not exact:
2276 2276 ui.status(_('adding %s\n') % match.rel(f))
2277 2277
2278 2278 for subpath in sorted(wctx.substate):
2279 2279 sub = wctx.sub(subpath)
2280 2280 try:
2281 2281 submatch = matchmod.subdirmatcher(subpath, match)
2282 2282 if opts.get(r'subrepos'):
2283 2283 bad.extend(sub.add(ui, submatch, prefix, False, **opts))
2284 2284 else:
2285 2285 bad.extend(sub.add(ui, submatch, prefix, True, **opts))
2286 2286 except error.LookupError:
2287 2287 ui.status(_("skipping missing subrepository: %s\n")
2288 2288 % join(subpath))
2289 2289
2290 2290 if not opts.get(r'dry_run'):
2291 2291 rejected = wctx.add(names, prefix)
2292 2292 bad.extend(f for f in rejected if f in match.files())
2293 2293 return bad
2294 2294
2295 2295 def addwebdirpath(repo, serverpath, webconf):
2296 2296 webconf[serverpath] = repo.root
2297 2297 repo.ui.debug('adding %s = %s\n' % (serverpath, repo.root))
2298 2298
2299 2299 for r in repo.revs('filelog("path:.hgsub")'):
2300 2300 ctx = repo[r]
2301 2301 for subpath in ctx.substate:
2302 2302 ctx.sub(subpath).addwebdirpath(serverpath, webconf)
2303 2303
2304 2304 def forget(ui, repo, match, prefix, explicitonly):
2305 2305 join = lambda f: os.path.join(prefix, f)
2306 2306 bad = []
2307 2307 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2308 2308 wctx = repo[None]
2309 2309 forgot = []
2310 2310
2311 2311 s = repo.status(match=matchmod.badmatch(match, badfn), clean=True)
2312 2312 forget = sorted(s[0] + s[1] + s[3] + s[6])
2313 2313 if explicitonly:
2314 2314 forget = [f for f in forget if match.exact(f)]
2315 2315
2316 2316 for subpath in sorted(wctx.substate):
2317 2317 sub = wctx.sub(subpath)
2318 2318 try:
2319 2319 submatch = matchmod.subdirmatcher(subpath, match)
2320 2320 subbad, subforgot = sub.forget(submatch, prefix)
2321 2321 bad.extend([subpath + '/' + f for f in subbad])
2322 2322 forgot.extend([subpath + '/' + f for f in subforgot])
2323 2323 except error.LookupError:
2324 2324 ui.status(_("skipping missing subrepository: %s\n")
2325 2325 % join(subpath))
2326 2326
2327 2327 if not explicitonly:
2328 2328 for f in match.files():
2329 2329 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2330 2330 if f not in forgot:
2331 2331 if repo.wvfs.exists(f):
2332 2332 # Don't complain if the exact case match wasn't given.
2333 2333 # But don't do this until after checking 'forgot', so
2334 2334 # that subrepo files aren't normalized, and this op is
2335 2335 # purely from data cached by the status walk above.
2336 2336 if repo.dirstate.normalize(f) in repo.dirstate:
2337 2337 continue
2338 2338 ui.warn(_('not removing %s: '
2339 2339 'file is already untracked\n')
2340 2340 % match.rel(f))
2341 2341 bad.append(f)
2342 2342
2343 2343 for f in forget:
2344 2344 if ui.verbose or not match.exact(f):
2345 2345 ui.status(_('removing %s\n') % match.rel(f))
2346 2346
2347 2347 rejected = wctx.forget(forget, prefix)
2348 2348 bad.extend(f for f in rejected if f in match.files())
2349 2349 forgot.extend(f for f in forget if f not in rejected)
2350 2350 return bad, forgot
2351 2351
2352 2352 def files(ui, ctx, m, fm, fmt, subrepos):
2353 2353 rev = ctx.rev()
2354 2354 ret = 1
2355 2355 ds = ctx.repo().dirstate
2356 2356
2357 2357 for f in ctx.matches(m):
2358 2358 if rev is None and ds[f] == 'r':
2359 2359 continue
2360 2360 fm.startitem()
2361 2361 if ui.verbose:
2362 2362 fc = ctx[f]
2363 2363 fm.write('size flags', '% 10d % 1s ', fc.size(), fc.flags())
2364 2364 fm.data(abspath=f)
2365 2365 fm.write('path', fmt, m.rel(f))
2366 2366 ret = 0
2367 2367
2368 2368 for subpath in sorted(ctx.substate):
2369 2369 submatch = matchmod.subdirmatcher(subpath, m)
2370 2370 if (subrepos or m.exact(subpath) or any(submatch.files())):
2371 2371 sub = ctx.sub(subpath)
2372 2372 try:
2373 2373 recurse = m.exact(subpath) or subrepos
2374 2374 if sub.printfiles(ui, submatch, fm, fmt, recurse) == 0:
2375 2375 ret = 0
2376 2376 except error.LookupError:
2377 2377 ui.status(_("skipping missing subrepository: %s\n")
2378 2378 % m.abs(subpath))
2379 2379
2380 2380 return ret
2381 2381
2382 2382 def remove(ui, repo, m, prefix, after, force, subrepos, warnings=None):
2383 2383 join = lambda f: os.path.join(prefix, f)
2384 2384 ret = 0
2385 2385 s = repo.status(match=m, clean=True)
2386 2386 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
2387 2387
2388 2388 wctx = repo[None]
2389 2389
2390 2390 if warnings is None:
2391 2391 warnings = []
2392 2392 warn = True
2393 2393 else:
2394 2394 warn = False
2395 2395
2396 2396 subs = sorted(wctx.substate)
2397 2397 total = len(subs)
2398 2398 count = 0
2399 2399 for subpath in subs:
2400 2400 count += 1
2401 2401 submatch = matchmod.subdirmatcher(subpath, m)
2402 2402 if subrepos or m.exact(subpath) or any(submatch.files()):
2403 2403 ui.progress(_('searching'), count, total=total, unit=_('subrepos'))
2404 2404 sub = wctx.sub(subpath)
2405 2405 try:
2406 2406 if sub.removefiles(submatch, prefix, after, force, subrepos,
2407 2407 warnings):
2408 2408 ret = 1
2409 2409 except error.LookupError:
2410 2410 warnings.append(_("skipping missing subrepository: %s\n")
2411 2411 % join(subpath))
2412 2412 ui.progress(_('searching'), None)
2413 2413
2414 2414 # warn about failure to delete explicit files/dirs
2415 2415 deleteddirs = util.dirs(deleted)
2416 2416 files = m.files()
2417 2417 total = len(files)
2418 2418 count = 0
2419 2419 for f in files:
2420 2420 def insubrepo():
2421 2421 for subpath in wctx.substate:
2422 2422 if f.startswith(subpath + '/'):
2423 2423 return True
2424 2424 return False
2425 2425
2426 2426 count += 1
2427 2427 ui.progress(_('deleting'), count, total=total, unit=_('files'))
2428 2428 isdir = f in deleteddirs or wctx.hasdir(f)
2429 2429 if (f in repo.dirstate or isdir or f == '.'
2430 2430 or insubrepo() or f in subs):
2431 2431 continue
2432 2432
2433 2433 if repo.wvfs.exists(f):
2434 2434 if repo.wvfs.isdir(f):
2435 2435 warnings.append(_('not removing %s: no tracked files\n')
2436 2436 % m.rel(f))
2437 2437 else:
2438 2438 warnings.append(_('not removing %s: file is untracked\n')
2439 2439 % m.rel(f))
2440 2440 # missing files will generate a warning elsewhere
2441 2441 ret = 1
2442 2442 ui.progress(_('deleting'), None)
2443 2443
2444 2444 if force:
2445 2445 list = modified + deleted + clean + added
2446 2446 elif after:
2447 2447 list = deleted
2448 2448 remaining = modified + added + clean
2449 2449 total = len(remaining)
2450 2450 count = 0
2451 2451 for f in remaining:
2452 2452 count += 1
2453 2453 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2454 2454 warnings.append(_('not removing %s: file still exists\n')
2455 2455 % m.rel(f))
2456 2456 ret = 1
2457 2457 ui.progress(_('skipping'), None)
2458 2458 else:
2459 2459 list = deleted + clean
2460 2460 total = len(modified) + len(added)
2461 2461 count = 0
2462 2462 for f in modified:
2463 2463 count += 1
2464 2464 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2465 2465 warnings.append(_('not removing %s: file is modified (use -f'
2466 2466 ' to force removal)\n') % m.rel(f))
2467 2467 ret = 1
2468 2468 for f in added:
2469 2469 count += 1
2470 2470 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2471 2471 warnings.append(_("not removing %s: file has been marked for add"
2472 2472 " (use 'hg forget' to undo add)\n") % m.rel(f))
2473 2473 ret = 1
2474 2474 ui.progress(_('skipping'), None)
2475 2475
2476 2476 list = sorted(list)
2477 2477 total = len(list)
2478 2478 count = 0
2479 2479 for f in list:
2480 2480 count += 1
2481 2481 if ui.verbose or not m.exact(f):
2482 2482 ui.progress(_('deleting'), count, total=total, unit=_('files'))
2483 2483 ui.status(_('removing %s\n') % m.rel(f))
2484 2484 ui.progress(_('deleting'), None)
2485 2485
2486 2486 with repo.wlock():
2487 2487 if not after:
2488 2488 for f in list:
2489 2489 if f in added:
2490 2490 continue # we never unlink added files on remove
2491 2491 repo.wvfs.unlinkpath(f, ignoremissing=True)
2492 2492 repo[None].forget(list)
2493 2493
2494 2494 if warn:
2495 2495 for warning in warnings:
2496 2496 ui.warn(warning)
2497 2497
2498 2498 return ret
2499 2499
2500 2500 def cat(ui, repo, ctx, matcher, prefix, **opts):
2501 2501 err = 1
2502 2502
2503 2503 def write(path):
2504 2504 fp = makefileobj(repo, opts.get('output'), ctx.node(),
2505 2505 pathname=os.path.join(prefix, path))
2506 2506 data = ctx[path].data()
2507 2507 if opts.get('decode'):
2508 2508 data = repo.wwritedata(path, data)
2509 2509 fp.write(data)
2510 2510 fp.close()
2511 2511
2512 2512 # Automation often uses hg cat on single files, so special case it
2513 2513 # for performance to avoid the cost of parsing the manifest.
2514 2514 if len(matcher.files()) == 1 and not matcher.anypats():
2515 2515 file = matcher.files()[0]
2516 2516 mfl = repo.manifestlog
2517 2517 mfnode = ctx.manifestnode()
2518 2518 try:
2519 2519 if mfnode and mfl[mfnode].find(file)[0]:
2520 2520 write(file)
2521 2521 return 0
2522 2522 except KeyError:
2523 2523 pass
2524 2524
2525 2525 for abs in ctx.walk(matcher):
2526 2526 write(abs)
2527 2527 err = 0
2528 2528
2529 2529 for subpath in sorted(ctx.substate):
2530 2530 sub = ctx.sub(subpath)
2531 2531 try:
2532 2532 submatch = matchmod.subdirmatcher(subpath, matcher)
2533 2533
2534 2534 if not sub.cat(submatch, os.path.join(prefix, sub._path),
2535 2535 **opts):
2536 2536 err = 0
2537 2537 except error.RepoLookupError:
2538 2538 ui.status(_("skipping missing subrepository: %s\n")
2539 2539 % os.path.join(prefix, subpath))
2540 2540
2541 2541 return err
2542 2542
2543 2543 def commit(ui, repo, commitfunc, pats, opts):
2544 2544 '''commit the specified files or all outstanding changes'''
2545 2545 date = opts.get('date')
2546 2546 if date:
2547 2547 opts['date'] = util.parsedate(date)
2548 2548 message = logmessage(ui, opts)
2549 2549 matcher = scmutil.match(repo[None], pats, opts)
2550 2550
2551 2551 # extract addremove carefully -- this function can be called from a command
2552 2552 # that doesn't support addremove
2553 2553 if opts.get('addremove'):
2554 2554 if scmutil.addremove(repo, matcher, "", opts) != 0:
2555 2555 raise error.Abort(
2556 2556 _("failed to mark all new/missing files as added/removed"))
2557 2557
2558 2558 return commitfunc(ui, repo, message, matcher, opts)
2559 2559
2560 2560 def samefile(f, ctx1, ctx2):
2561 2561 if f in ctx1.manifest():
2562 2562 a = ctx1.filectx(f)
2563 2563 if f in ctx2.manifest():
2564 2564 b = ctx2.filectx(f)
2565 2565 return (not a.cmp(b)
2566 2566 and a.flags() == b.flags())
2567 2567 else:
2568 2568 return False
2569 2569 else:
2570 2570 return f not in ctx2.manifest()
2571 2571
2572 2572 def amend(ui, repo, commitfunc, old, extra, pats, opts):
2573 2573 # avoid cycle context -> subrepo -> cmdutil
2574 2574 from . import context
2575 2575
2576 2576 # amend will reuse the existing user if not specified, but the obsolete
2577 2577 # marker creation requires that the current user's name is specified.
2578 2578 if obsolete.isenabled(repo, obsolete.createmarkersopt):
2579 2579 ui.username() # raise exception if username not set
2580 2580
2581 2581 ui.note(_('amending changeset %s\n') % old)
2582 2582 base = old.p1()
2583 2583 createmarkers = obsolete.isenabled(repo, obsolete.createmarkersopt)
2584 2584
2585 2585 wlock = lock = newid = None
2586 2586 try:
2587 2587 wlock = repo.wlock()
2588 2588 lock = repo.lock()
2589 2589 with repo.transaction('amend') as tr:
2590 2590 # See if we got a message from -m or -l, if not, open the editor
2591 2591 # with the message of the changeset to amend
2592 2592 message = logmessage(ui, opts)
2593 2593 # ensure logfile does not conflict with later enforcement of the
2594 2594 # message. potential logfile content has been processed by
2595 2595 # `logmessage` anyway.
2596 2596 opts.pop('logfile')
2597 2597 # First, do a regular commit to record all changes in the working
2598 2598 # directory (if there are any)
2599 2599 ui.callhooks = False
2600 2600 activebookmark = repo._bookmarks.active
2601 2601 try:
2602 2602 repo._bookmarks.active = None
2603 2603 opts['message'] = 'temporary amend commit for %s' % old
2604 2604 node = commit(ui, repo, commitfunc, pats, opts)
2605 2605 finally:
2606 2606 repo._bookmarks.active = activebookmark
2607 2607 repo._bookmarks.recordchange(tr)
2608 2608 ui.callhooks = True
2609 2609 ctx = repo[node]
2610 2610
2611 2611 # Participating changesets:
2612 2612 #
2613 2613 # node/ctx o - new (intermediate) commit that contains changes
2614 2614 # | from working dir to go into amending commit
2615 2615 # | (or a workingctx if there were no changes)
2616 2616 # |
2617 2617 # old o - changeset to amend
2618 2618 # |
2619 2619 # base o - parent of amending changeset
2620 2620
2621 2621 # Update extra dict from amended commit (e.g. to preserve graft
2622 2622 # source)
2623 2623 extra.update(old.extra())
2624 2624
2625 2625 # Also update it from the intermediate commit or from the wctx
2626 2626 extra.update(ctx.extra())
2627 2627
2628 2628 if len(old.parents()) > 1:
2629 2629 # ctx.files() isn't reliable for merges, so fall back to the
2630 2630 # slower repo.status() method
2631 2631 files = set([fn for st in repo.status(base, old)[:3]
2632 2632 for fn in st])
2633 2633 else:
2634 2634 files = set(old.files())
2635 2635
2636 2636 # Second, we use either the commit we just did, or if there were no
2637 2637 # changes the parent of the working directory as the version of the
2638 2638 # files in the final amend commit
2639 2639 if node:
2640 2640 ui.note(_('copying changeset %s to %s\n') % (ctx, base))
2641 2641
2642 2642 user = ctx.user()
2643 2643 date = ctx.date()
2644 2644 # Recompute copies (avoid recording a -> b -> a)
2645 2645 copied = copies.pathcopies(base, ctx)
2646 2646 if old.p2:
2647 2647 copied.update(copies.pathcopies(old.p2(), ctx))
2648 2648
2649 2649 # Prune files which were reverted by the updates: if old
2650 2650 # introduced file X and our intermediate commit, node,
2651 2651 # renamed that file, then those two files are the same and
2652 2652 # we can discard X from our list of files. Likewise if X
2653 2653 # was deleted, it's no longer relevant
2654 2654 files.update(ctx.files())
2655 2655 files = [f for f in files if not samefile(f, ctx, base)]
2656 2656
2657 2657 def filectxfn(repo, ctx_, path):
2658 2658 try:
2659 2659 fctx = ctx[path]
2660 2660 flags = fctx.flags()
2661 2661 mctx = context.memfilectx(repo,
2662 2662 fctx.path(), fctx.data(),
2663 2663 islink='l' in flags,
2664 2664 isexec='x' in flags,
2665 2665 copied=copied.get(path))
2666 2666 return mctx
2667 2667 except KeyError:
2668 2668 return None
2669 2669 else:
2670 2670 ui.note(_('copying changeset %s to %s\n') % (old, base))
2671 2671
2672 2672 # Use version of files as in the old cset
2673 2673 def filectxfn(repo, ctx_, path):
2674 2674 try:
2675 2675 return old.filectx(path)
2676 2676 except KeyError:
2677 2677 return None
2678 2678
2679 2679 user = opts.get('user') or old.user()
2680 2680 date = opts.get('date') or old.date()
2681 2681 editform = mergeeditform(old, 'commit.amend')
2682 2682 editor = getcommiteditor(editform=editform, **opts)
2683 2683 if not message:
2684 2684 editor = getcommiteditor(edit=True, editform=editform)
2685 2685 message = old.description()
2686 2686
2687 2687 pureextra = extra.copy()
2688 2688 extra['amend_source'] = old.hex()
2689 2689
2690 2690 new = context.memctx(repo,
2691 2691 parents=[base.node(), old.p2().node()],
2692 2692 text=message,
2693 2693 files=files,
2694 2694 filectxfn=filectxfn,
2695 2695 user=user,
2696 2696 date=date,
2697 2697 extra=extra,
2698 2698 editor=editor)
2699 2699
2700 2700 newdesc = changelog.stripdesc(new.description())
2701 2701 if ((not node)
2702 2702 and newdesc == old.description()
2703 2703 and user == old.user()
2704 2704 and date == old.date()
2705 2705 and pureextra == old.extra()):
2706 2706 # nothing changed. continuing here would create a new node
2707 2707 # anyway because of the amend_source noise.
2708 2708 #
2709 2709 # This not what we expect from amend.
2710 2710 return old.node()
2711 2711
2712 2712 ph = repo.ui.config('phases', 'new-commit', phases.draft)
2713 2713 try:
2714 2714 if opts.get('secret'):
2715 2715 commitphase = 'secret'
2716 2716 else:
2717 2717 commitphase = old.phase()
2718 2718 repo.ui.setconfig('phases', 'new-commit', commitphase, 'amend')
2719 2719 newid = repo.commitctx(new)
2720 2720 finally:
2721 2721 repo.ui.setconfig('phases', 'new-commit', ph, 'amend')
2722 2722 if newid != old.node():
2723 2723 # Reroute the working copy parent to the new changeset
2724 2724 repo.setparents(newid, nullid)
2725 2725
2726 2726 # Move bookmarks from old parent to amend commit
2727 2727 bms = repo.nodebookmarks(old.node())
2728 2728 if bms:
2729 2729 marks = repo._bookmarks
2730 2730 for bm in bms:
2731 2731 ui.debug('moving bookmarks %r from %s to %s\n' %
2732 2732 (marks, old.hex(), hex(newid)))
2733 2733 marks[bm] = newid
2734 2734 marks.recordchange(tr)
2735 2735 #commit the whole amend process
2736 2736 if createmarkers:
2737 2737 # mark the new changeset as successor of the rewritten one
2738 2738 new = repo[newid]
2739 2739 obs = [(old, (new,))]
2740 2740 if node:
2741 2741 obs.append((ctx, ()))
2742 2742
2743 2743 obsolete.createmarkers(repo, obs)
2744 2744 if not createmarkers and newid != old.node():
2745 2745 # Strip the intermediate commit (if there was one) and the amended
2746 2746 # commit
2747 2747 if node:
2748 2748 ui.note(_('stripping intermediate changeset %s\n') % ctx)
2749 2749 ui.note(_('stripping amended changeset %s\n') % old)
2750 2750 repair.strip(ui, repo, old.node(), topic='amend-backup')
2751 2751 finally:
2752 2752 lockmod.release(lock, wlock)
2753 2753 return newid
2754 2754
2755 2755 def commiteditor(repo, ctx, subs, editform=''):
2756 2756 if ctx.description():
2757 2757 return ctx.description()
2758 2758 return commitforceeditor(repo, ctx, subs, editform=editform,
2759 2759 unchangedmessagedetection=True)
2760 2760
2761 2761 def commitforceeditor(repo, ctx, subs, finishdesc=None, extramsg=None,
2762 2762 editform='', unchangedmessagedetection=False):
2763 2763 if not extramsg:
2764 2764 extramsg = _("Leave message empty to abort commit.")
2765 2765
2766 2766 forms = [e for e in editform.split('.') if e]
2767 2767 forms.insert(0, 'changeset')
2768 2768 templatetext = None
2769 2769 while forms:
2770 2770 tmpl = repo.ui.config('committemplate', '.'.join(forms))
2771 2771 if tmpl:
2772 2772 tmpl = templater.unquotestring(tmpl)
2773 2773 templatetext = committext = buildcommittemplate(
2774 2774 repo, ctx, subs, extramsg, tmpl)
2775 2775 break
2776 2776 forms.pop()
2777 2777 else:
2778 2778 committext = buildcommittext(repo, ctx, subs, extramsg)
2779 2779
2780 2780 # run editor in the repository root
2781 2781 olddir = pycompat.getcwd()
2782 2782 os.chdir(repo.root)
2783 2783
2784 2784 # make in-memory changes visible to external process
2785 2785 tr = repo.currenttransaction()
2786 2786 repo.dirstate.write(tr)
2787 2787 pending = tr and tr.writepending() and repo.root
2788 2788
2789 2789 editortext = repo.ui.edit(committext, ctx.user(), ctx.extra(),
2790 2790 editform=editform, pending=pending,
2791 2791 repopath=repo.path)
2792 2792 text = editortext
2793 2793
2794 2794 # strip away anything below this special string (used for editors that want
2795 2795 # to display the diff)
2796 2796 stripbelow = re.search(_linebelow, text, flags=re.MULTILINE)
2797 2797 if stripbelow:
2798 2798 text = text[:stripbelow.start()]
2799 2799
2800 2800 text = re.sub("(?m)^HG:.*(\n|$)", "", text)
2801 2801 os.chdir(olddir)
2802 2802
2803 2803 if finishdesc:
2804 2804 text = finishdesc(text)
2805 2805 if not text.strip():
2806 2806 raise error.Abort(_("empty commit message"))
2807 2807 if unchangedmessagedetection and editortext == templatetext:
2808 2808 raise error.Abort(_("commit message unchanged"))
2809 2809
2810 2810 return text
2811 2811
2812 2812 def buildcommittemplate(repo, ctx, subs, extramsg, tmpl):
2813 2813 ui = repo.ui
2814 2814 tmpl, mapfile = gettemplate(ui, tmpl, None)
2815 2815
2816 2816 t = changeset_templater(ui, repo, None, {}, tmpl, mapfile, False)
2817 2817
2818 2818 for k, v in repo.ui.configitems('committemplate'):
2819 2819 if k != 'changeset':
2820 2820 t.t.cache[k] = v
2821 2821
2822 2822 if not extramsg:
2823 2823 extramsg = '' # ensure that extramsg is string
2824 2824
2825 2825 ui.pushbuffer()
2826 2826 t.show(ctx, extramsg=extramsg)
2827 2827 return ui.popbuffer()
2828 2828
2829 2829 def hgprefix(msg):
2830 2830 return "\n".join(["HG: %s" % a for a in msg.split("\n") if a])
2831 2831
2832 2832 def buildcommittext(repo, ctx, subs, extramsg):
2833 2833 edittext = []
2834 2834 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
2835 2835 if ctx.description():
2836 2836 edittext.append(ctx.description())
2837 2837 edittext.append("")
2838 2838 edittext.append("") # Empty line between message and comments.
2839 2839 edittext.append(hgprefix(_("Enter commit message."
2840 2840 " Lines beginning with 'HG:' are removed.")))
2841 2841 edittext.append(hgprefix(extramsg))
2842 2842 edittext.append("HG: --")
2843 2843 edittext.append(hgprefix(_("user: %s") % ctx.user()))
2844 2844 if ctx.p2():
2845 2845 edittext.append(hgprefix(_("branch merge")))
2846 2846 if ctx.branch():
2847 2847 edittext.append(hgprefix(_("branch '%s'") % ctx.branch()))
2848 2848 if bookmarks.isactivewdirparent(repo):
2849 2849 edittext.append(hgprefix(_("bookmark '%s'") % repo._activebookmark))
2850 2850 edittext.extend([hgprefix(_("subrepo %s") % s) for s in subs])
2851 2851 edittext.extend([hgprefix(_("added %s") % f) for f in added])
2852 2852 edittext.extend([hgprefix(_("changed %s") % f) for f in modified])
2853 2853 edittext.extend([hgprefix(_("removed %s") % f) for f in removed])
2854 2854 if not added and not modified and not removed:
2855 2855 edittext.append(hgprefix(_("no files changed")))
2856 2856 edittext.append("")
2857 2857
2858 2858 return "\n".join(edittext)
2859 2859
2860 2860 def commitstatus(repo, node, branch, bheads=None, opts=None):
2861 2861 if opts is None:
2862 2862 opts = {}
2863 2863 ctx = repo[node]
2864 2864 parents = ctx.parents()
2865 2865
2866 2866 if (not opts.get('amend') and bheads and node not in bheads and not
2867 2867 [x for x in parents if x.node() in bheads and x.branch() == branch]):
2868 2868 repo.ui.status(_('created new head\n'))
2869 2869 # The message is not printed for initial roots. For the other
2870 2870 # changesets, it is printed in the following situations:
2871 2871 #
2872 2872 # Par column: for the 2 parents with ...
2873 2873 # N: null or no parent
2874 2874 # B: parent is on another named branch
2875 2875 # C: parent is a regular non head changeset
2876 2876 # H: parent was a branch head of the current branch
2877 2877 # Msg column: whether we print "created new head" message
2878 2878 # In the following, it is assumed that there already exists some
2879 2879 # initial branch heads of the current branch, otherwise nothing is
2880 2880 # printed anyway.
2881 2881 #
2882 2882 # Par Msg Comment
2883 2883 # N N y additional topo root
2884 2884 #
2885 2885 # B N y additional branch root
2886 2886 # C N y additional topo head
2887 2887 # H N n usual case
2888 2888 #
2889 2889 # B B y weird additional branch root
2890 2890 # C B y branch merge
2891 2891 # H B n merge with named branch
2892 2892 #
2893 2893 # C C y additional head from merge
2894 2894 # C H n merge with a head
2895 2895 #
2896 2896 # H H n head merge: head count decreases
2897 2897
2898 2898 if not opts.get('close_branch'):
2899 2899 for r in parents:
2900 2900 if r.closesbranch() and r.branch() == branch:
2901 2901 repo.ui.status(_('reopening closed branch head %d\n') % r)
2902 2902
2903 2903 if repo.ui.debugflag:
2904 2904 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx.hex()))
2905 2905 elif repo.ui.verbose:
2906 2906 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx))
2907 2907
2908 2908 def postcommitstatus(repo, pats, opts):
2909 2909 return repo.status(match=scmutil.match(repo[None], pats, opts))
2910 2910
2911 2911 def revert(ui, repo, ctx, parents, *pats, **opts):
2912 2912 parent, p2 = parents
2913 2913 node = ctx.node()
2914 2914
2915 2915 mf = ctx.manifest()
2916 2916 if node == p2:
2917 2917 parent = p2
2918 2918
2919 2919 # need all matching names in dirstate and manifest of target rev,
2920 2920 # so have to walk both. do not print errors if files exist in one
2921 2921 # but not other. in both cases, filesets should be evaluated against
2922 2922 # workingctx to get consistent result (issue4497). this means 'set:**'
2923 2923 # cannot be used to select missing files from target rev.
2924 2924
2925 2925 # `names` is a mapping for all elements in working copy and target revision
2926 2926 # The mapping is in the form:
2927 2927 # <asb path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
2928 2928 names = {}
2929 2929
2930 2930 with repo.wlock():
2931 2931 ## filling of the `names` mapping
2932 2932 # walk dirstate to fill `names`
2933 2933
2934 2934 interactive = opts.get('interactive', False)
2935 2935 wctx = repo[None]
2936 2936 m = scmutil.match(wctx, pats, opts)
2937 2937
2938 2938 # we'll need this later
2939 2939 targetsubs = sorted(s for s in wctx.substate if m(s))
2940 2940
2941 2941 if not m.always():
2942 2942 for abs in repo.walk(matchmod.badmatch(m, lambda x, y: False)):
2943 2943 names[abs] = m.rel(abs), m.exact(abs)
2944 2944
2945 2945 # walk target manifest to fill `names`
2946 2946
2947 2947 def badfn(path, msg):
2948 2948 if path in names:
2949 2949 return
2950 2950 if path in ctx.substate:
2951 2951 return
2952 2952 path_ = path + '/'
2953 2953 for f in names:
2954 2954 if f.startswith(path_):
2955 2955 return
2956 2956 ui.warn("%s: %s\n" % (m.rel(path), msg))
2957 2957
2958 2958 for abs in ctx.walk(matchmod.badmatch(m, badfn)):
2959 2959 if abs not in names:
2960 2960 names[abs] = m.rel(abs), m.exact(abs)
2961 2961
2962 2962 # Find status of all file in `names`.
2963 2963 m = scmutil.matchfiles(repo, names)
2964 2964
2965 2965 changes = repo.status(node1=node, match=m,
2966 2966 unknown=True, ignored=True, clean=True)
2967 2967 else:
2968 2968 changes = repo.status(node1=node, match=m)
2969 2969 for kind in changes:
2970 2970 for abs in kind:
2971 2971 names[abs] = m.rel(abs), m.exact(abs)
2972 2972
2973 2973 m = scmutil.matchfiles(repo, names)
2974 2974
2975 2975 modified = set(changes.modified)
2976 2976 added = set(changes.added)
2977 2977 removed = set(changes.removed)
2978 2978 _deleted = set(changes.deleted)
2979 2979 unknown = set(changes.unknown)
2980 2980 unknown.update(changes.ignored)
2981 2981 clean = set(changes.clean)
2982 2982 modadded = set()
2983 2983
2984 2984 # We need to account for the state of the file in the dirstate,
2985 2985 # even when we revert against something else than parent. This will
2986 2986 # slightly alter the behavior of revert (doing back up or not, delete
2987 2987 # or just forget etc).
2988 2988 if parent == node:
2989 2989 dsmodified = modified
2990 2990 dsadded = added
2991 2991 dsremoved = removed
2992 2992 # store all local modifications, useful later for rename detection
2993 2993 localchanges = dsmodified | dsadded
2994 2994 modified, added, removed = set(), set(), set()
2995 2995 else:
2996 2996 changes = repo.status(node1=parent, match=m)
2997 2997 dsmodified = set(changes.modified)
2998 2998 dsadded = set(changes.added)
2999 2999 dsremoved = set(changes.removed)
3000 3000 # store all local modifications, useful later for rename detection
3001 3001 localchanges = dsmodified | dsadded
3002 3002
3003 3003 # only take into account for removes between wc and target
3004 3004 clean |= dsremoved - removed
3005 3005 dsremoved &= removed
3006 3006 # distinct between dirstate remove and other
3007 3007 removed -= dsremoved
3008 3008
3009 3009 modadded = added & dsmodified
3010 3010 added -= modadded
3011 3011
3012 3012 # tell newly modified apart.
3013 3013 dsmodified &= modified
3014 3014 dsmodified |= modified & dsadded # dirstate added may need backup
3015 3015 modified -= dsmodified
3016 3016
3017 3017 # We need to wait for some post-processing to update this set
3018 3018 # before making the distinction. The dirstate will be used for
3019 3019 # that purpose.
3020 3020 dsadded = added
3021 3021
3022 3022 # in case of merge, files that are actually added can be reported as
3023 3023 # modified, we need to post process the result
3024 3024 if p2 != nullid:
3025 3025 mergeadd = set(dsmodified)
3026 3026 for path in dsmodified:
3027 3027 if path in mf:
3028 3028 mergeadd.remove(path)
3029 3029 dsadded |= mergeadd
3030 3030 dsmodified -= mergeadd
3031 3031
3032 3032 # if f is a rename, update `names` to also revert the source
3033 3033 cwd = repo.getcwd()
3034 3034 for f in localchanges:
3035 3035 src = repo.dirstate.copied(f)
3036 3036 # XXX should we check for rename down to target node?
3037 3037 if src and src not in names and repo.dirstate[src] == 'r':
3038 3038 dsremoved.add(src)
3039 3039 names[src] = (repo.pathto(src, cwd), True)
3040 3040
3041 3041 # determine the exact nature of the deleted changesets
3042 3042 deladded = set(_deleted)
3043 3043 for path in _deleted:
3044 3044 if path in mf:
3045 3045 deladded.remove(path)
3046 3046 deleted = _deleted - deladded
3047 3047
3048 3048 # distinguish between file to forget and the other
3049 3049 added = set()
3050 3050 for abs in dsadded:
3051 3051 if repo.dirstate[abs] != 'a':
3052 3052 added.add(abs)
3053 3053 dsadded -= added
3054 3054
3055 3055 for abs in deladded:
3056 3056 if repo.dirstate[abs] == 'a':
3057 3057 dsadded.add(abs)
3058 3058 deladded -= dsadded
3059 3059
3060 3060 # For files marked as removed, we check if an unknown file is present at
3061 3061 # the same path. If a such file exists it may need to be backed up.
3062 3062 # Making the distinction at this stage helps have simpler backup
3063 3063 # logic.
3064 3064 removunk = set()
3065 3065 for abs in removed:
3066 3066 target = repo.wjoin(abs)
3067 3067 if os.path.lexists(target):
3068 3068 removunk.add(abs)
3069 3069 removed -= removunk
3070 3070
3071 3071 dsremovunk = set()
3072 3072 for abs in dsremoved:
3073 3073 target = repo.wjoin(abs)
3074 3074 if os.path.lexists(target):
3075 3075 dsremovunk.add(abs)
3076 3076 dsremoved -= dsremovunk
3077 3077
3078 3078 # action to be actually performed by revert
3079 3079 # (<list of file>, message>) tuple
3080 3080 actions = {'revert': ([], _('reverting %s\n')),
3081 3081 'add': ([], _('adding %s\n')),
3082 3082 'remove': ([], _('removing %s\n')),
3083 3083 'drop': ([], _('removing %s\n')),
3084 3084 'forget': ([], _('forgetting %s\n')),
3085 3085 'undelete': ([], _('undeleting %s\n')),
3086 3086 'noop': (None, _('no changes needed to %s\n')),
3087 3087 'unknown': (None, _('file not managed: %s\n')),
3088 3088 }
3089 3089
3090 3090 # "constant" that convey the backup strategy.
3091 3091 # All set to `discard` if `no-backup` is set do avoid checking
3092 3092 # no_backup lower in the code.
3093 3093 # These values are ordered for comparison purposes
3094 3094 backupinteractive = 3 # do backup if interactively modified
3095 3095 backup = 2 # unconditionally do backup
3096 3096 check = 1 # check if the existing file differs from target
3097 3097 discard = 0 # never do backup
3098 3098 if opts.get('no_backup'):
3099 3099 backupinteractive = backup = check = discard
3100 3100 if interactive:
3101 3101 dsmodifiedbackup = backupinteractive
3102 3102 else:
3103 3103 dsmodifiedbackup = backup
3104 3104 tobackup = set()
3105 3105
3106 3106 backupanddel = actions['remove']
3107 3107 if not opts.get('no_backup'):
3108 3108 backupanddel = actions['drop']
3109 3109
3110 3110 disptable = (
3111 3111 # dispatch table:
3112 3112 # file state
3113 3113 # action
3114 3114 # make backup
3115 3115
3116 3116 ## Sets that results that will change file on disk
3117 3117 # Modified compared to target, no local change
3118 3118 (modified, actions['revert'], discard),
3119 3119 # Modified compared to target, but local file is deleted
3120 3120 (deleted, actions['revert'], discard),
3121 3121 # Modified compared to target, local change
3122 3122 (dsmodified, actions['revert'], dsmodifiedbackup),
3123 3123 # Added since target
3124 3124 (added, actions['remove'], discard),
3125 3125 # Added in working directory
3126 3126 (dsadded, actions['forget'], discard),
3127 3127 # Added since target, have local modification
3128 3128 (modadded, backupanddel, backup),
3129 3129 # Added since target but file is missing in working directory
3130 3130 (deladded, actions['drop'], discard),
3131 3131 # Removed since target, before working copy parent
3132 3132 (removed, actions['add'], discard),
3133 3133 # Same as `removed` but an unknown file exists at the same path
3134 3134 (removunk, actions['add'], check),
3135 3135 # Removed since targe, marked as such in working copy parent
3136 3136 (dsremoved, actions['undelete'], discard),
3137 3137 # Same as `dsremoved` but an unknown file exists at the same path
3138 3138 (dsremovunk, actions['undelete'], check),
3139 3139 ## the following sets does not result in any file changes
3140 3140 # File with no modification
3141 3141 (clean, actions['noop'], discard),
3142 3142 # Existing file, not tracked anywhere
3143 3143 (unknown, actions['unknown'], discard),
3144 3144 )
3145 3145
3146 3146 for abs, (rel, exact) in sorted(names.items()):
3147 3147 # target file to be touch on disk (relative to cwd)
3148 3148 target = repo.wjoin(abs)
3149 3149 # search the entry in the dispatch table.
3150 3150 # if the file is in any of these sets, it was touched in the working
3151 3151 # directory parent and we are sure it needs to be reverted.
3152 3152 for table, (xlist, msg), dobackup in disptable:
3153 3153 if abs not in table:
3154 3154 continue
3155 3155 if xlist is not None:
3156 3156 xlist.append(abs)
3157 3157 if dobackup:
3158 3158 # If in interactive mode, don't automatically create
3159 3159 # .orig files (issue4793)
3160 3160 if dobackup == backupinteractive:
3161 3161 tobackup.add(abs)
3162 3162 elif (backup <= dobackup or wctx[abs].cmp(ctx[abs])):
3163 3163 bakname = scmutil.origpath(ui, repo, rel)
3164 3164 ui.note(_('saving current version of %s as %s\n') %
3165 3165 (rel, bakname))
3166 3166 if not opts.get('dry_run'):
3167 3167 if interactive:
3168 3168 util.copyfile(target, bakname)
3169 3169 else:
3170 3170 util.rename(target, bakname)
3171 3171 if ui.verbose or not exact:
3172 3172 if not isinstance(msg, basestring):
3173 3173 msg = msg(abs)
3174 3174 ui.status(msg % rel)
3175 3175 elif exact:
3176 3176 ui.warn(msg % rel)
3177 3177 break
3178 3178
3179 3179 if not opts.get('dry_run'):
3180 3180 needdata = ('revert', 'add', 'undelete')
3181 3181 _revertprefetch(repo, ctx, *[actions[name][0] for name in needdata])
3182 3182 _performrevert(repo, parents, ctx, actions, interactive, tobackup)
3183 3183
3184 3184 if targetsubs:
3185 3185 # Revert the subrepos on the revert list
3186 3186 for sub in targetsubs:
3187 3187 try:
3188 3188 wctx.sub(sub).revert(ctx.substate[sub], *pats, **opts)
3189 3189 except KeyError:
3190 3190 raise error.Abort("subrepository '%s' does not exist in %s!"
3191 3191 % (sub, short(ctx.node())))
3192 3192
3193 3193 def _revertprefetch(repo, ctx, *files):
3194 3194 """Let extension changing the storage layer prefetch content"""
3195 3195 pass
3196 3196
3197 3197 def _performrevert(repo, parents, ctx, actions, interactive=False,
3198 3198 tobackup=None):
3199 3199 """function that actually perform all the actions computed for revert
3200 3200
3201 3201 This is an independent function to let extension to plug in and react to
3202 3202 the imminent revert.
3203 3203
3204 3204 Make sure you have the working directory locked when calling this function.
3205 3205 """
3206 3206 parent, p2 = parents
3207 3207 node = ctx.node()
3208 3208 excluded_files = []
3209 3209 matcher_opts = {"exclude": excluded_files}
3210 3210
3211 3211 def checkout(f):
3212 3212 fc = ctx[f]
3213 3213 repo.wwrite(f, fc.data(), fc.flags())
3214 3214
3215 3215 def doremove(f):
3216 3216 try:
3217 3217 repo.wvfs.unlinkpath(f)
3218 3218 except OSError:
3219 3219 pass
3220 3220 repo.dirstate.remove(f)
3221 3221
3222 3222 audit_path = pathutil.pathauditor(repo.root)
3223 3223 for f in actions['forget'][0]:
3224 3224 if interactive:
3225 3225 choice = repo.ui.promptchoice(
3226 3226 _("forget added file %s (Yn)?$$ &Yes $$ &No") % f)
3227 3227 if choice == 0:
3228 3228 repo.dirstate.drop(f)
3229 3229 else:
3230 3230 excluded_files.append(repo.wjoin(f))
3231 3231 else:
3232 3232 repo.dirstate.drop(f)
3233 3233 for f in actions['remove'][0]:
3234 3234 audit_path(f)
3235 3235 if interactive:
3236 3236 choice = repo.ui.promptchoice(
3237 3237 _("remove added file %s (Yn)?$$ &Yes $$ &No") % f)
3238 3238 if choice == 0:
3239 3239 doremove(f)
3240 3240 else:
3241 3241 excluded_files.append(repo.wjoin(f))
3242 3242 else:
3243 3243 doremove(f)
3244 3244 for f in actions['drop'][0]:
3245 3245 audit_path(f)
3246 3246 repo.dirstate.remove(f)
3247 3247
3248 3248 normal = None
3249 3249 if node == parent:
3250 3250 # We're reverting to our parent. If possible, we'd like status
3251 3251 # to report the file as clean. We have to use normallookup for
3252 3252 # merges to avoid losing information about merged/dirty files.
3253 3253 if p2 != nullid:
3254 3254 normal = repo.dirstate.normallookup
3255 3255 else:
3256 3256 normal = repo.dirstate.normal
3257 3257
3258 3258 newlyaddedandmodifiedfiles = set()
3259 3259 if interactive:
3260 3260 # Prompt the user for changes to revert
3261 3261 torevert = [repo.wjoin(f) for f in actions['revert'][0]]
3262 3262 m = scmutil.match(ctx, torevert, matcher_opts)
3263 3263 diffopts = patch.difffeatureopts(repo.ui, whitespace=True)
3264 3264 diffopts.nodates = True
3265 3265 diffopts.git = True
3266 3266 operation = 'discard'
3267 3267 reversehunks = True
3268 3268 if node != parent:
3269 3269 operation = 'revert'
3270 3270 reversehunks = repo.ui.configbool('experimental',
3271 3271 'revertalternateinteractivemode',
3272 3272 True)
3273 3273 if reversehunks:
3274 3274 diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts)
3275 3275 else:
3276 3276 diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts)
3277 3277 originalchunks = patch.parsepatch(diff)
3278 3278
3279 3279 try:
3280 3280
3281 3281 chunks, opts = recordfilter(repo.ui, originalchunks,
3282 3282 operation=operation)
3283 3283 if reversehunks:
3284 3284 chunks = patch.reversehunks(chunks)
3285 3285
3286 3286 except patch.PatchError as err:
3287 3287 raise error.Abort(_('error parsing patch: %s') % err)
3288 3288
3289 3289 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
3290 3290 if tobackup is None:
3291 3291 tobackup = set()
3292 3292 # Apply changes
3293 3293 fp = stringio()
3294 3294 for c in chunks:
3295 3295 # Create a backup file only if this hunk should be backed up
3296 3296 if ishunk(c) and c.header.filename() in tobackup:
3297 3297 abs = c.header.filename()
3298 3298 target = repo.wjoin(abs)
3299 3299 bakname = scmutil.origpath(repo.ui, repo, m.rel(abs))
3300 3300 util.copyfile(target, bakname)
3301 3301 tobackup.remove(abs)
3302 3302 c.write(fp)
3303 3303 dopatch = fp.tell()
3304 3304 fp.seek(0)
3305 3305 if dopatch:
3306 3306 try:
3307 3307 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3308 3308 except patch.PatchError as err:
3309 3309 raise error.Abort(str(err))
3310 3310 del fp
3311 3311 else:
3312 3312 for f in actions['revert'][0]:
3313 3313 checkout(f)
3314 3314 if normal:
3315 3315 normal(f)
3316 3316
3317 3317 for f in actions['add'][0]:
3318 3318 # Don't checkout modified files, they are already created by the diff
3319 3319 if f not in newlyaddedandmodifiedfiles:
3320 3320 checkout(f)
3321 3321 repo.dirstate.add(f)
3322 3322
3323 3323 normal = repo.dirstate.normallookup
3324 3324 if node == parent and p2 == nullid:
3325 3325 normal = repo.dirstate.normal
3326 3326 for f in actions['undelete'][0]:
3327 3327 checkout(f)
3328 3328 normal(f)
3329 3329
3330 3330 copied = copies.pathcopies(repo[parent], ctx)
3331 3331
3332 3332 for f in actions['add'][0] + actions['undelete'][0] + actions['revert'][0]:
3333 3333 if f in copied:
3334 3334 repo.dirstate.copy(copied[f], f)
3335 3335
3336 3336 def command(table):
3337 3337 """Returns a function object to be used as a decorator for making commands.
3338 3338
3339 3339 This function receives a command table as its argument. The table should
3340 3340 be a dict.
3341 3341
3342 3342 The returned function can be used as a decorator for adding commands
3343 3343 to that command table. This function accepts multiple arguments to define
3344 3344 a command.
3345 3345
3346 3346 The first argument is the command name.
3347 3347
3348 3348 The options argument is an iterable of tuples defining command arguments.
3349 3349 See ``mercurial.fancyopts.fancyopts()`` for the format of each tuple.
3350 3350
3351 3351 The synopsis argument defines a short, one line summary of how to use the
3352 3352 command. This shows up in the help output.
3353 3353
3354 3354 The norepo argument defines whether the command does not require a
3355 3355 local repository. Most commands operate against a repository, thus the
3356 3356 default is False.
3357 3357
3358 3358 The optionalrepo argument defines whether the command optionally requires
3359 3359 a local repository.
3360 3360
3361 3361 The inferrepo argument defines whether to try to find a repository from the
3362 3362 command line arguments. If True, arguments will be examined for potential
3363 3363 repository locations. See ``findrepo()``. If a repository is found, it
3364 3364 will be used.
3365 3365 """
3366 3366 def cmd(name, options=(), synopsis=None, norepo=False, optionalrepo=False,
3367 3367 inferrepo=False):
3368 3368 def decorator(func):
3369 3369 func.norepo = norepo
3370 3370 func.optionalrepo = optionalrepo
3371 3371 func.inferrepo = inferrepo
3372 3372 if synopsis:
3373 3373 table[name] = func, list(options), synopsis
3374 3374 else:
3375 3375 table[name] = func, list(options)
3376 3376 return func
3377 3377 return decorator
3378 3378
3379 3379 return cmd
3380 3380
3381 3381 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3382 3382 # commands.outgoing. "missing" is "missing" of the result of
3383 3383 # "findcommonoutgoing()"
3384 3384 outgoinghooks = util.hooks()
3385 3385
3386 3386 # a list of (ui, repo) functions called by commands.summary
3387 3387 summaryhooks = util.hooks()
3388 3388
3389 3389 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3390 3390 #
3391 3391 # functions should return tuple of booleans below, if 'changes' is None:
3392 3392 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3393 3393 #
3394 3394 # otherwise, 'changes' is a tuple of tuples below:
3395 3395 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3396 3396 # - (desturl, destbranch, destpeer, outgoing)
3397 3397 summaryremotehooks = util.hooks()
3398 3398
3399 3399 # A list of state files kept by multistep operations like graft.
3400 3400 # Since graft cannot be aborted, it is considered 'clearable' by update.
3401 3401 # note: bisect is intentionally excluded
3402 3402 # (state file, clearable, allowcommit, error, hint)
3403 3403 unfinishedstates = [
3404 3404 ('graftstate', True, False, _('graft in progress'),
3405 3405 _("use 'hg graft --continue' or 'hg update' to abort")),
3406 3406 ('updatestate', True, False, _('last update was interrupted'),
3407 3407 _("use 'hg update' to get a consistent checkout"))
3408 3408 ]
3409 3409
3410 3410 def checkunfinished(repo, commit=False):
3411 3411 '''Look for an unfinished multistep operation, like graft, and abort
3412 3412 if found. It's probably good to check this right before
3413 3413 bailifchanged().
3414 3414 '''
3415 3415 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3416 3416 if commit and allowcommit:
3417 3417 continue
3418 3418 if repo.vfs.exists(f):
3419 3419 raise error.Abort(msg, hint=hint)
3420 3420
3421 3421 def clearunfinished(repo):
3422 3422 '''Check for unfinished operations (as above), and clear the ones
3423 3423 that are clearable.
3424 3424 '''
3425 3425 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3426 3426 if not clearable and repo.vfs.exists(f):
3427 3427 raise error.Abort(msg, hint=hint)
3428 3428 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3429 3429 if clearable and repo.vfs.exists(f):
3430 3430 util.unlink(repo.vfs.join(f))
3431 3431
3432 3432 afterresolvedstates = [
3433 3433 ('graftstate',
3434 3434 _('hg graft --continue')),
3435 3435 ]
3436 3436
3437 3437 def howtocontinue(repo):
3438 3438 '''Check for an unfinished operation and return the command to finish
3439 3439 it.
3440 3440
3441 3441 afterresolvedstates tuples define a .hg/{file} and the corresponding
3442 3442 command needed to finish it.
3443 3443
3444 3444 Returns a (msg, warning) tuple. 'msg' is a string and 'warning' is
3445 3445 a boolean.
3446 3446 '''
3447 3447 contmsg = _("continue: %s")
3448 3448 for f, msg in afterresolvedstates:
3449 3449 if repo.vfs.exists(f):
3450 3450 return contmsg % msg, True
3451 3451 workingctx = repo[None]
3452 3452 dirty = any(repo.status()) or any(workingctx.sub(s).dirty()
3453 3453 for s in workingctx.substate)
3454 3454 if dirty:
3455 3455 return contmsg % _("hg commit"), False
3456 3456 return None, None
3457 3457
3458 3458 def checkafterresolved(repo):
3459 3459 '''Inform the user about the next action after completing hg resolve
3460 3460
3461 3461 If there's a matching afterresolvedstates, howtocontinue will yield
3462 3462 repo.ui.warn as the reporter.
3463 3463
3464 3464 Otherwise, it will yield repo.ui.note.
3465 3465 '''
3466 3466 msg, warning = howtocontinue(repo)
3467 3467 if msg is not None:
3468 3468 if warning:
3469 3469 repo.ui.warn("%s\n" % msg)
3470 3470 else:
3471 3471 repo.ui.note("%s\n" % msg)
3472 3472
3473 3473 def wrongtooltocontinue(repo, task):
3474 3474 '''Raise an abort suggesting how to properly continue if there is an
3475 3475 active task.
3476 3476
3477 3477 Uses howtocontinue() to find the active task.
3478 3478
3479 3479 If there's no task (repo.ui.note for 'hg commit'), it does not offer
3480 3480 a hint.
3481 3481 '''
3482 3482 after = howtocontinue(repo)
3483 3483 hint = None
3484 3484 if after[1]:
3485 3485 hint = after[0]
3486 3486 raise error.Abort(_('no %s in progress') % task, hint=hint)
@@ -1,3745 +1,3745 b''
1 1 # util.py - Mercurial utility functions and platform specific implementations
2 2 #
3 3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 """Mercurial utility functions and platform specific implementations.
11 11
12 12 This contains helper routines that are independent of the SCM core and
13 13 hide platform-specific details from the core.
14 14 """
15 15
16 16 from __future__ import absolute_import
17 17
18 18 import bz2
19 19 import calendar
20 20 import codecs
21 21 import collections
22 22 import datetime
23 23 import errno
24 24 import gc
25 25 import hashlib
26 26 import imp
27 27 import os
28 28 import platform as pyplatform
29 29 import re as remod
30 30 import shutil
31 31 import signal
32 32 import socket
33 33 import stat
34 34 import string
35 35 import subprocess
36 36 import sys
37 37 import tempfile
38 38 import textwrap
39 39 import time
40 40 import traceback
41 41 import warnings
42 42 import zlib
43 43
44 44 from . import (
45 45 encoding,
46 46 error,
47 47 i18n,
48 48 osutil,
49 49 parsers,
50 50 pycompat,
51 51 )
52 52
53 53 cookielib = pycompat.cookielib
54 54 empty = pycompat.empty
55 55 httplib = pycompat.httplib
56 56 httpserver = pycompat.httpserver
57 57 pickle = pycompat.pickle
58 58 queue = pycompat.queue
59 59 socketserver = pycompat.socketserver
60 60 stderr = pycompat.stderr
61 61 stdin = pycompat.stdin
62 62 stdout = pycompat.stdout
63 63 stringio = pycompat.stringio
64 64 urlerr = pycompat.urlerr
65 65 urlreq = pycompat.urlreq
66 66 xmlrpclib = pycompat.xmlrpclib
67 67
68 68 def isatty(fp):
69 69 try:
70 70 return fp.isatty()
71 71 except AttributeError:
72 72 return False
73 73
74 74 # glibc determines buffering on first write to stdout - if we replace a TTY
75 75 # destined stdout with a pipe destined stdout (e.g. pager), we want line
76 76 # buffering
77 77 if isatty(stdout):
78 78 stdout = os.fdopen(stdout.fileno(), pycompat.sysstr('wb'), 1)
79 79
80 80 if pycompat.osname == 'nt':
81 81 from . import windows as platform
82 82 stdout = platform.winstdout(stdout)
83 83 else:
84 84 from . import posix as platform
85 85
86 86 _ = i18n._
87 87
88 88 bindunixsocket = platform.bindunixsocket
89 89 cachestat = platform.cachestat
90 90 checkexec = platform.checkexec
91 91 checklink = platform.checklink
92 92 copymode = platform.copymode
93 93 executablepath = platform.executablepath
94 94 expandglobs = platform.expandglobs
95 95 explainexit = platform.explainexit
96 96 findexe = platform.findexe
97 97 gethgcmd = platform.gethgcmd
98 98 getuser = platform.getuser
99 99 getpid = os.getpid
100 100 groupmembers = platform.groupmembers
101 101 groupname = platform.groupname
102 102 hidewindow = platform.hidewindow
103 103 isexec = platform.isexec
104 104 isowner = platform.isowner
105 105 localpath = platform.localpath
106 106 lookupreg = platform.lookupreg
107 107 makedir = platform.makedir
108 108 nlinks = platform.nlinks
109 109 normpath = platform.normpath
110 110 normcase = platform.normcase
111 111 normcasespec = platform.normcasespec
112 112 normcasefallback = platform.normcasefallback
113 113 openhardlinks = platform.openhardlinks
114 114 oslink = platform.oslink
115 115 parsepatchoutput = platform.parsepatchoutput
116 116 pconvert = platform.pconvert
117 117 poll = platform.poll
118 118 popen = platform.popen
119 119 posixfile = platform.posixfile
120 120 quotecommand = platform.quotecommand
121 121 readpipe = platform.readpipe
122 122 rename = platform.rename
123 123 removedirs = platform.removedirs
124 124 samedevice = platform.samedevice
125 125 samefile = platform.samefile
126 126 samestat = platform.samestat
127 127 setbinary = platform.setbinary
128 128 setflags = platform.setflags
129 129 setsignalhandler = platform.setsignalhandler
130 130 shellquote = platform.shellquote
131 131 spawndetached = platform.spawndetached
132 132 split = platform.split
133 133 sshargs = platform.sshargs
134 134 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
135 135 statisexec = platform.statisexec
136 136 statislink = platform.statislink
137 137 testpid = platform.testpid
138 138 umask = platform.umask
139 139 unlink = platform.unlink
140 140 username = platform.username
141 141
142 142 # Python compatibility
143 143
144 144 _notset = object()
145 145
146 146 # disable Python's problematic floating point timestamps (issue4836)
147 147 # (Python hypocritically says you shouldn't change this behavior in
148 148 # libraries, and sure enough Mercurial is not a library.)
149 149 os.stat_float_times(False)
150 150
151 151 def safehasattr(thing, attr):
152 152 return getattr(thing, attr, _notset) is not _notset
153 153
154 154 def bitsfrom(container):
155 155 bits = 0
156 156 for bit in container:
157 157 bits |= bit
158 158 return bits
159 159
160 160 # python 2.6 still have deprecation warning enabled by default. We do not want
161 161 # to display anything to standard user so detect if we are running test and
162 162 # only use python deprecation warning in this case.
163 163 _dowarn = bool(encoding.environ.get('HGEMITWARNINGS'))
164 164 if _dowarn:
165 165 # explicitly unfilter our warning for python 2.7
166 166 #
167 167 # The option of setting PYTHONWARNINGS in the test runner was investigated.
168 168 # However, module name set through PYTHONWARNINGS was exactly matched, so
169 169 # we cannot set 'mercurial' and have it match eg: 'mercurial.scmutil'. This
170 170 # makes the whole PYTHONWARNINGS thing useless for our usecase.
171 171 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'mercurial')
172 172 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext')
173 173 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext3rd')
174 174
175 175 def nouideprecwarn(msg, version, stacklevel=1):
176 176 """Issue an python native deprecation warning
177 177
178 178 This is a noop outside of tests, use 'ui.deprecwarn' when possible.
179 179 """
180 180 if _dowarn:
181 181 msg += ("\n(compatibility will be dropped after Mercurial-%s,"
182 182 " update your code.)") % version
183 183 warnings.warn(msg, DeprecationWarning, stacklevel + 1)
184 184
185 185 DIGESTS = {
186 186 'md5': hashlib.md5,
187 187 'sha1': hashlib.sha1,
188 188 'sha512': hashlib.sha512,
189 189 }
190 190 # List of digest types from strongest to weakest
191 191 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
192 192
193 193 for k in DIGESTS_BY_STRENGTH:
194 194 assert k in DIGESTS
195 195
196 196 class digester(object):
197 197 """helper to compute digests.
198 198
199 199 This helper can be used to compute one or more digests given their name.
200 200
201 201 >>> d = digester(['md5', 'sha1'])
202 202 >>> d.update('foo')
203 203 >>> [k for k in sorted(d)]
204 204 ['md5', 'sha1']
205 205 >>> d['md5']
206 206 'acbd18db4cc2f85cedef654fccc4a4d8'
207 207 >>> d['sha1']
208 208 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
209 209 >>> digester.preferred(['md5', 'sha1'])
210 210 'sha1'
211 211 """
212 212
213 213 def __init__(self, digests, s=''):
214 214 self._hashes = {}
215 215 for k in digests:
216 216 if k not in DIGESTS:
217 217 raise Abort(_('unknown digest type: %s') % k)
218 218 self._hashes[k] = DIGESTS[k]()
219 219 if s:
220 220 self.update(s)
221 221
222 222 def update(self, data):
223 223 for h in self._hashes.values():
224 224 h.update(data)
225 225
226 226 def __getitem__(self, key):
227 227 if key not in DIGESTS:
228 228 raise Abort(_('unknown digest type: %s') % k)
229 229 return self._hashes[key].hexdigest()
230 230
231 231 def __iter__(self):
232 232 return iter(self._hashes)
233 233
234 234 @staticmethod
235 235 def preferred(supported):
236 236 """returns the strongest digest type in both supported and DIGESTS."""
237 237
238 238 for k in DIGESTS_BY_STRENGTH:
239 239 if k in supported:
240 240 return k
241 241 return None
242 242
243 243 class digestchecker(object):
244 244 """file handle wrapper that additionally checks content against a given
245 245 size and digests.
246 246
247 247 d = digestchecker(fh, size, {'md5': '...'})
248 248
249 249 When multiple digests are given, all of them are validated.
250 250 """
251 251
252 252 def __init__(self, fh, size, digests):
253 253 self._fh = fh
254 254 self._size = size
255 255 self._got = 0
256 256 self._digests = dict(digests)
257 257 self._digester = digester(self._digests.keys())
258 258
259 259 def read(self, length=-1):
260 260 content = self._fh.read(length)
261 261 self._digester.update(content)
262 262 self._got += len(content)
263 263 return content
264 264
265 265 def validate(self):
266 266 if self._size != self._got:
267 267 raise Abort(_('size mismatch: expected %d, got %d') %
268 268 (self._size, self._got))
269 269 for k, v in self._digests.items():
270 270 if v != self._digester[k]:
271 271 # i18n: first parameter is a digest name
272 272 raise Abort(_('%s mismatch: expected %s, got %s') %
273 273 (k, v, self._digester[k]))
274 274
275 275 try:
276 276 buffer = buffer
277 277 except NameError:
278 278 if not pycompat.ispy3:
279 279 def buffer(sliceable, offset=0, length=None):
280 280 if length is not None:
281 281 return sliceable[offset:offset + length]
282 282 return sliceable[offset:]
283 283 else:
284 284 def buffer(sliceable, offset=0, length=None):
285 285 if length is not None:
286 286 return memoryview(sliceable)[offset:offset + length]
287 287 return memoryview(sliceable)[offset:]
288 288
289 289 closefds = pycompat.osname == 'posix'
290 290
291 291 _chunksize = 4096
292 292
293 293 class bufferedinputpipe(object):
294 294 """a manually buffered input pipe
295 295
296 296 Python will not let us use buffered IO and lazy reading with 'polling' at
297 297 the same time. We cannot probe the buffer state and select will not detect
298 298 that data are ready to read if they are already buffered.
299 299
300 300 This class let us work around that by implementing its own buffering
301 301 (allowing efficient readline) while offering a way to know if the buffer is
302 302 empty from the output (allowing collaboration of the buffer with polling).
303 303
304 304 This class lives in the 'util' module because it makes use of the 'os'
305 305 module from the python stdlib.
306 306 """
307 307
308 308 def __init__(self, input):
309 309 self._input = input
310 310 self._buffer = []
311 311 self._eof = False
312 312 self._lenbuf = 0
313 313
314 314 @property
315 315 def hasbuffer(self):
316 316 """True is any data is currently buffered
317 317
318 318 This will be used externally a pre-step for polling IO. If there is
319 319 already data then no polling should be set in place."""
320 320 return bool(self._buffer)
321 321
322 322 @property
323 323 def closed(self):
324 324 return self._input.closed
325 325
326 326 def fileno(self):
327 327 return self._input.fileno()
328 328
329 329 def close(self):
330 330 return self._input.close()
331 331
332 332 def read(self, size):
333 333 while (not self._eof) and (self._lenbuf < size):
334 334 self._fillbuffer()
335 335 return self._frombuffer(size)
336 336
337 337 def readline(self, *args, **kwargs):
338 338 if 1 < len(self._buffer):
339 339 # this should not happen because both read and readline end with a
340 340 # _frombuffer call that collapse it.
341 341 self._buffer = [''.join(self._buffer)]
342 342 self._lenbuf = len(self._buffer[0])
343 343 lfi = -1
344 344 if self._buffer:
345 345 lfi = self._buffer[-1].find('\n')
346 346 while (not self._eof) and lfi < 0:
347 347 self._fillbuffer()
348 348 if self._buffer:
349 349 lfi = self._buffer[-1].find('\n')
350 350 size = lfi + 1
351 351 if lfi < 0: # end of file
352 352 size = self._lenbuf
353 353 elif 1 < len(self._buffer):
354 354 # we need to take previous chunks into account
355 355 size += self._lenbuf - len(self._buffer[-1])
356 356 return self._frombuffer(size)
357 357
358 358 def _frombuffer(self, size):
359 359 """return at most 'size' data from the buffer
360 360
361 361 The data are removed from the buffer."""
362 362 if size == 0 or not self._buffer:
363 363 return ''
364 364 buf = self._buffer[0]
365 365 if 1 < len(self._buffer):
366 366 buf = ''.join(self._buffer)
367 367
368 368 data = buf[:size]
369 369 buf = buf[len(data):]
370 370 if buf:
371 371 self._buffer = [buf]
372 372 self._lenbuf = len(buf)
373 373 else:
374 374 self._buffer = []
375 375 self._lenbuf = 0
376 376 return data
377 377
378 378 def _fillbuffer(self):
379 379 """read data to the buffer"""
380 380 data = os.read(self._input.fileno(), _chunksize)
381 381 if not data:
382 382 self._eof = True
383 383 else:
384 384 self._lenbuf += len(data)
385 385 self._buffer.append(data)
386 386
387 387 def popen2(cmd, env=None, newlines=False):
388 388 # Setting bufsize to -1 lets the system decide the buffer size.
389 389 # The default for bufsize is 0, meaning unbuffered. This leads to
390 390 # poor performance on Mac OS X: http://bugs.python.org/issue4194
391 391 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
392 392 close_fds=closefds,
393 393 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
394 394 universal_newlines=newlines,
395 395 env=env)
396 396 return p.stdin, p.stdout
397 397
398 398 def popen3(cmd, env=None, newlines=False):
399 399 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
400 400 return stdin, stdout, stderr
401 401
402 402 def popen4(cmd, env=None, newlines=False, bufsize=-1):
403 403 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
404 404 close_fds=closefds,
405 405 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
406 406 stderr=subprocess.PIPE,
407 407 universal_newlines=newlines,
408 408 env=env)
409 409 return p.stdin, p.stdout, p.stderr, p
410 410
411 411 def version():
412 412 """Return version information if available."""
413 413 try:
414 414 from . import __version__
415 415 return __version__.version
416 416 except ImportError:
417 417 return 'unknown'
418 418
419 419 def versiontuple(v=None, n=4):
420 420 """Parses a Mercurial version string into an N-tuple.
421 421
422 422 The version string to be parsed is specified with the ``v`` argument.
423 423 If it isn't defined, the current Mercurial version string will be parsed.
424 424
425 425 ``n`` can be 2, 3, or 4. Here is how some version strings map to
426 426 returned values:
427 427
428 428 >>> v = '3.6.1+190-df9b73d2d444'
429 429 >>> versiontuple(v, 2)
430 430 (3, 6)
431 431 >>> versiontuple(v, 3)
432 432 (3, 6, 1)
433 433 >>> versiontuple(v, 4)
434 434 (3, 6, 1, '190-df9b73d2d444')
435 435
436 436 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
437 437 (3, 6, 1, '190-df9b73d2d444+20151118')
438 438
439 439 >>> v = '3.6'
440 440 >>> versiontuple(v, 2)
441 441 (3, 6)
442 442 >>> versiontuple(v, 3)
443 443 (3, 6, None)
444 444 >>> versiontuple(v, 4)
445 445 (3, 6, None, None)
446 446
447 447 >>> v = '3.9-rc'
448 448 >>> versiontuple(v, 2)
449 449 (3, 9)
450 450 >>> versiontuple(v, 3)
451 451 (3, 9, None)
452 452 >>> versiontuple(v, 4)
453 453 (3, 9, None, 'rc')
454 454
455 455 >>> v = '3.9-rc+2-02a8fea4289b'
456 456 >>> versiontuple(v, 2)
457 457 (3, 9)
458 458 >>> versiontuple(v, 3)
459 459 (3, 9, None)
460 460 >>> versiontuple(v, 4)
461 461 (3, 9, None, 'rc+2-02a8fea4289b')
462 462 """
463 463 if not v:
464 464 v = version()
465 465 parts = remod.split('[\+-]', v, 1)
466 466 if len(parts) == 1:
467 467 vparts, extra = parts[0], None
468 468 else:
469 469 vparts, extra = parts
470 470
471 471 vints = []
472 472 for i in vparts.split('.'):
473 473 try:
474 474 vints.append(int(i))
475 475 except ValueError:
476 476 break
477 477 # (3, 6) -> (3, 6, None)
478 478 while len(vints) < 3:
479 479 vints.append(None)
480 480
481 481 if n == 2:
482 482 return (vints[0], vints[1])
483 483 if n == 3:
484 484 return (vints[0], vints[1], vints[2])
485 485 if n == 4:
486 486 return (vints[0], vints[1], vints[2], extra)
487 487
488 488 # used by parsedate
489 489 defaultdateformats = (
490 490 '%Y-%m-%dT%H:%M:%S', # the 'real' ISO8601
491 491 '%Y-%m-%dT%H:%M', # without seconds
492 492 '%Y-%m-%dT%H%M%S', # another awful but legal variant without :
493 493 '%Y-%m-%dT%H%M', # without seconds
494 494 '%Y-%m-%d %H:%M:%S', # our common legal variant
495 495 '%Y-%m-%d %H:%M', # without seconds
496 496 '%Y-%m-%d %H%M%S', # without :
497 497 '%Y-%m-%d %H%M', # without seconds
498 498 '%Y-%m-%d %I:%M:%S%p',
499 499 '%Y-%m-%d %H:%M',
500 500 '%Y-%m-%d %I:%M%p',
501 501 '%Y-%m-%d',
502 502 '%m-%d',
503 503 '%m/%d',
504 504 '%m/%d/%y',
505 505 '%m/%d/%Y',
506 506 '%a %b %d %H:%M:%S %Y',
507 507 '%a %b %d %I:%M:%S%p %Y',
508 508 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
509 509 '%b %d %H:%M:%S %Y',
510 510 '%b %d %I:%M:%S%p %Y',
511 511 '%b %d %H:%M:%S',
512 512 '%b %d %I:%M:%S%p',
513 513 '%b %d %H:%M',
514 514 '%b %d %I:%M%p',
515 515 '%b %d %Y',
516 516 '%b %d',
517 517 '%H:%M:%S',
518 518 '%I:%M:%S%p',
519 519 '%H:%M',
520 520 '%I:%M%p',
521 521 )
522 522
523 523 extendeddateformats = defaultdateformats + (
524 524 "%Y",
525 525 "%Y-%m",
526 526 "%b",
527 527 "%b %Y",
528 528 )
529 529
530 530 def cachefunc(func):
531 531 '''cache the result of function calls'''
532 532 # XXX doesn't handle keywords args
533 533 if func.__code__.co_argcount == 0:
534 534 cache = []
535 535 def f():
536 536 if len(cache) == 0:
537 537 cache.append(func())
538 538 return cache[0]
539 539 return f
540 540 cache = {}
541 541 if func.__code__.co_argcount == 1:
542 542 # we gain a small amount of time because
543 543 # we don't need to pack/unpack the list
544 544 def f(arg):
545 545 if arg not in cache:
546 546 cache[arg] = func(arg)
547 547 return cache[arg]
548 548 else:
549 549 def f(*args):
550 550 if args not in cache:
551 551 cache[args] = func(*args)
552 552 return cache[args]
553 553
554 554 return f
555 555
556 556 class sortdict(dict):
557 557 '''a simple sorted dictionary'''
558 558 def __init__(self, data=None):
559 559 self._list = []
560 560 if data:
561 561 self.update(data)
562 562 def copy(self):
563 563 return sortdict(self)
564 564 def __setitem__(self, key, val):
565 565 if key in self:
566 566 self._list.remove(key)
567 567 self._list.append(key)
568 568 dict.__setitem__(self, key, val)
569 569 def __iter__(self):
570 570 return self._list.__iter__()
571 571 def update(self, src):
572 572 if isinstance(src, dict):
573 573 src = src.iteritems()
574 574 for k, v in src:
575 575 self[k] = v
576 576 def clear(self):
577 577 dict.clear(self)
578 578 self._list = []
579 579 def items(self):
580 580 return [(k, self[k]) for k in self._list]
581 581 def __delitem__(self, key):
582 582 dict.__delitem__(self, key)
583 583 self._list.remove(key)
584 584 def pop(self, key, *args, **kwargs):
585 585 try:
586 586 self._list.remove(key)
587 587 except ValueError:
588 588 pass
589 589 return dict.pop(self, key, *args, **kwargs)
590 590 def keys(self):
591 591 return self._list[:]
592 592 def iterkeys(self):
593 593 return self._list.__iter__()
594 594 def iteritems(self):
595 595 for k in self._list:
596 596 yield k, self[k]
597 597 def insert(self, index, key, val):
598 598 self._list.insert(index, key)
599 599 dict.__setitem__(self, key, val)
600 600 def __repr__(self):
601 601 if not self:
602 602 return '%s()' % self.__class__.__name__
603 603 return '%s(%r)' % (self.__class__.__name__, self.items())
604 604
605 605 class _lrucachenode(object):
606 606 """A node in a doubly linked list.
607 607
608 608 Holds a reference to nodes on either side as well as a key-value
609 609 pair for the dictionary entry.
610 610 """
611 611 __slots__ = (u'next', u'prev', u'key', u'value')
612 612
613 613 def __init__(self):
614 614 self.next = None
615 615 self.prev = None
616 616
617 617 self.key = _notset
618 618 self.value = None
619 619
620 620 def markempty(self):
621 621 """Mark the node as emptied."""
622 622 self.key = _notset
623 623
624 624 class lrucachedict(object):
625 625 """Dict that caches most recent accesses and sets.
626 626
627 627 The dict consists of an actual backing dict - indexed by original
628 628 key - and a doubly linked circular list defining the order of entries in
629 629 the cache.
630 630
631 631 The head node is the newest entry in the cache. If the cache is full,
632 632 we recycle head.prev and make it the new head. Cache accesses result in
633 633 the node being moved to before the existing head and being marked as the
634 634 new head node.
635 635 """
636 636 def __init__(self, max):
637 637 self._cache = {}
638 638
639 639 self._head = head = _lrucachenode()
640 640 head.prev = head
641 641 head.next = head
642 642 self._size = 1
643 643 self._capacity = max
644 644
645 645 def __len__(self):
646 646 return len(self._cache)
647 647
648 648 def __contains__(self, k):
649 649 return k in self._cache
650 650
651 651 def __iter__(self):
652 652 # We don't have to iterate in cache order, but why not.
653 653 n = self._head
654 654 for i in range(len(self._cache)):
655 655 yield n.key
656 656 n = n.next
657 657
658 658 def __getitem__(self, k):
659 659 node = self._cache[k]
660 660 self._movetohead(node)
661 661 return node.value
662 662
663 663 def __setitem__(self, k, v):
664 664 node = self._cache.get(k)
665 665 # Replace existing value and mark as newest.
666 666 if node is not None:
667 667 node.value = v
668 668 self._movetohead(node)
669 669 return
670 670
671 671 if self._size < self._capacity:
672 672 node = self._addcapacity()
673 673 else:
674 674 # Grab the last/oldest item.
675 675 node = self._head.prev
676 676
677 677 # At capacity. Kill the old entry.
678 678 if node.key is not _notset:
679 679 del self._cache[node.key]
680 680
681 681 node.key = k
682 682 node.value = v
683 683 self._cache[k] = node
684 684 # And mark it as newest entry. No need to adjust order since it
685 685 # is already self._head.prev.
686 686 self._head = node
687 687
688 688 def __delitem__(self, k):
689 689 node = self._cache.pop(k)
690 690 node.markempty()
691 691
692 692 # Temporarily mark as newest item before re-adjusting head to make
693 693 # this node the oldest item.
694 694 self._movetohead(node)
695 695 self._head = node.next
696 696
697 697 # Additional dict methods.
698 698
699 699 def get(self, k, default=None):
700 700 try:
701 701 return self._cache[k].value
702 702 except KeyError:
703 703 return default
704 704
705 705 def clear(self):
706 706 n = self._head
707 707 while n.key is not _notset:
708 708 n.markempty()
709 709 n = n.next
710 710
711 711 self._cache.clear()
712 712
713 713 def copy(self):
714 714 result = lrucachedict(self._capacity)
715 715 n = self._head.prev
716 716 # Iterate in oldest-to-newest order, so the copy has the right ordering
717 717 for i in range(len(self._cache)):
718 718 result[n.key] = n.value
719 719 n = n.prev
720 720 return result
721 721
722 722 def _movetohead(self, node):
723 723 """Mark a node as the newest, making it the new head.
724 724
725 725 When a node is accessed, it becomes the freshest entry in the LRU
726 726 list, which is denoted by self._head.
727 727
728 728 Visually, let's make ``N`` the new head node (* denotes head):
729 729
730 730 previous/oldest <-> head <-> next/next newest
731 731
732 732 ----<->--- A* ---<->-----
733 733 | |
734 734 E <-> D <-> N <-> C <-> B
735 735
736 736 To:
737 737
738 738 ----<->--- N* ---<->-----
739 739 | |
740 740 E <-> D <-> C <-> B <-> A
741 741
742 742 This requires the following moves:
743 743
744 744 C.next = D (node.prev.next = node.next)
745 745 D.prev = C (node.next.prev = node.prev)
746 746 E.next = N (head.prev.next = node)
747 747 N.prev = E (node.prev = head.prev)
748 748 N.next = A (node.next = head)
749 749 A.prev = N (head.prev = node)
750 750 """
751 751 head = self._head
752 752 # C.next = D
753 753 node.prev.next = node.next
754 754 # D.prev = C
755 755 node.next.prev = node.prev
756 756 # N.prev = E
757 757 node.prev = head.prev
758 758 # N.next = A
759 759 # It is tempting to do just "head" here, however if node is
760 760 # adjacent to head, this will do bad things.
761 761 node.next = head.prev.next
762 762 # E.next = N
763 763 node.next.prev = node
764 764 # A.prev = N
765 765 node.prev.next = node
766 766
767 767 self._head = node
768 768
769 769 def _addcapacity(self):
770 770 """Add a node to the circular linked list.
771 771
772 772 The new node is inserted before the head node.
773 773 """
774 774 head = self._head
775 775 node = _lrucachenode()
776 776 head.prev.next = node
777 777 node.prev = head.prev
778 778 node.next = head
779 779 head.prev = node
780 780 self._size += 1
781 781 return node
782 782
783 783 def lrucachefunc(func):
784 784 '''cache most recent results of function calls'''
785 785 cache = {}
786 786 order = collections.deque()
787 787 if func.__code__.co_argcount == 1:
788 788 def f(arg):
789 789 if arg not in cache:
790 790 if len(cache) > 20:
791 791 del cache[order.popleft()]
792 792 cache[arg] = func(arg)
793 793 else:
794 794 order.remove(arg)
795 795 order.append(arg)
796 796 return cache[arg]
797 797 else:
798 798 def f(*args):
799 799 if args not in cache:
800 800 if len(cache) > 20:
801 801 del cache[order.popleft()]
802 802 cache[args] = func(*args)
803 803 else:
804 804 order.remove(args)
805 805 order.append(args)
806 806 return cache[args]
807 807
808 808 return f
809 809
810 810 class propertycache(object):
811 811 def __init__(self, func):
812 812 self.func = func
813 813 self.name = func.__name__
814 814 def __get__(self, obj, type=None):
815 815 result = self.func(obj)
816 816 self.cachevalue(obj, result)
817 817 return result
818 818
819 819 def cachevalue(self, obj, value):
820 820 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
821 821 obj.__dict__[self.name] = value
822 822
823 823 def pipefilter(s, cmd):
824 824 '''filter string S through command CMD, returning its output'''
825 825 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
826 826 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
827 827 pout, perr = p.communicate(s)
828 828 return pout
829 829
830 830 def tempfilter(s, cmd):
831 831 '''filter string S through a pair of temporary files with CMD.
832 832 CMD is used as a template to create the real command to be run,
833 833 with the strings INFILE and OUTFILE replaced by the real names of
834 834 the temporary files generated.'''
835 835 inname, outname = None, None
836 836 try:
837 837 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
838 838 fp = os.fdopen(infd, pycompat.sysstr('wb'))
839 839 fp.write(s)
840 840 fp.close()
841 841 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
842 842 os.close(outfd)
843 843 cmd = cmd.replace('INFILE', inname)
844 844 cmd = cmd.replace('OUTFILE', outname)
845 845 code = os.system(cmd)
846 846 if pycompat.sysplatform == 'OpenVMS' and code & 1:
847 847 code = 0
848 848 if code:
849 849 raise Abort(_("command '%s' failed: %s") %
850 850 (cmd, explainexit(code)))
851 851 return readfile(outname)
852 852 finally:
853 853 try:
854 854 if inname:
855 855 os.unlink(inname)
856 856 except OSError:
857 857 pass
858 858 try:
859 859 if outname:
860 860 os.unlink(outname)
861 861 except OSError:
862 862 pass
863 863
864 864 filtertable = {
865 865 'tempfile:': tempfilter,
866 866 'pipe:': pipefilter,
867 867 }
868 868
869 869 def filter(s, cmd):
870 870 "filter a string through a command that transforms its input to its output"
871 871 for name, fn in filtertable.iteritems():
872 872 if cmd.startswith(name):
873 873 return fn(s, cmd[len(name):].lstrip())
874 874 return pipefilter(s, cmd)
875 875
876 876 def binary(s):
877 877 """return true if a string is binary data"""
878 878 return bool(s and '\0' in s)
879 879
880 880 def increasingchunks(source, min=1024, max=65536):
881 881 '''return no less than min bytes per chunk while data remains,
882 882 doubling min after each chunk until it reaches max'''
883 883 def log2(x):
884 884 if not x:
885 885 return 0
886 886 i = 0
887 887 while x:
888 888 x >>= 1
889 889 i += 1
890 890 return i - 1
891 891
892 892 buf = []
893 893 blen = 0
894 894 for chunk in source:
895 895 buf.append(chunk)
896 896 blen += len(chunk)
897 897 if blen >= min:
898 898 if min < max:
899 899 min = min << 1
900 900 nmin = 1 << log2(blen)
901 901 if nmin > min:
902 902 min = nmin
903 903 if min > max:
904 904 min = max
905 905 yield ''.join(buf)
906 906 blen = 0
907 907 buf = []
908 908 if buf:
909 909 yield ''.join(buf)
910 910
911 911 Abort = error.Abort
912 912
913 913 def always(fn):
914 914 return True
915 915
916 916 def never(fn):
917 917 return False
918 918
919 919 def nogc(func):
920 920 """disable garbage collector
921 921
922 922 Python's garbage collector triggers a GC each time a certain number of
923 923 container objects (the number being defined by gc.get_threshold()) are
924 924 allocated even when marked not to be tracked by the collector. Tracking has
925 925 no effect on when GCs are triggered, only on what objects the GC looks
926 926 into. As a workaround, disable GC while building complex (huge)
927 927 containers.
928 928
929 929 This garbage collector issue have been fixed in 2.7.
930 930 """
931 931 if sys.version_info >= (2, 7):
932 932 return func
933 933 def wrapper(*args, **kwargs):
934 934 gcenabled = gc.isenabled()
935 935 gc.disable()
936 936 try:
937 937 return func(*args, **kwargs)
938 938 finally:
939 939 if gcenabled:
940 940 gc.enable()
941 941 return wrapper
942 942
943 943 def pathto(root, n1, n2):
944 944 '''return the relative path from one place to another.
945 945 root should use os.sep to separate directories
946 946 n1 should use os.sep to separate directories
947 947 n2 should use "/" to separate directories
948 948 returns an os.sep-separated path.
949 949
950 950 If n1 is a relative path, it's assumed it's
951 951 relative to root.
952 952 n2 should always be relative to root.
953 953 '''
954 954 if not n1:
955 955 return localpath(n2)
956 956 if os.path.isabs(n1):
957 957 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
958 958 return os.path.join(root, localpath(n2))
959 959 n2 = '/'.join((pconvert(root), n2))
960 960 a, b = splitpath(n1), n2.split('/')
961 961 a.reverse()
962 962 b.reverse()
963 963 while a and b and a[-1] == b[-1]:
964 964 a.pop()
965 965 b.pop()
966 966 b.reverse()
967 967 return pycompat.ossep.join((['..'] * len(a)) + b) or '.'
968 968
969 969 def mainfrozen():
970 970 """return True if we are a frozen executable.
971 971
972 972 The code supports py2exe (most common, Windows only) and tools/freeze
973 973 (portable, not much used).
974 974 """
975 975 return (safehasattr(sys, "frozen") or # new py2exe
976 976 safehasattr(sys, "importers") or # old py2exe
977 977 imp.is_frozen(u"__main__")) # tools/freeze
978 978
979 979 # the location of data files matching the source code
980 980 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
981 981 # executable version (py2exe) doesn't support __file__
982 982 datapath = os.path.dirname(pycompat.sysexecutable)
983 983 else:
984 984 datapath = os.path.dirname(pycompat.fsencode(__file__))
985 985
986 986 i18n.setdatapath(datapath)
987 987
988 988 _hgexecutable = None
989 989
990 990 def hgexecutable():
991 991 """return location of the 'hg' executable.
992 992
993 993 Defaults to $HG or 'hg' in the search path.
994 994 """
995 995 if _hgexecutable is None:
996 996 hg = encoding.environ.get('HG')
997 997 mainmod = sys.modules[pycompat.sysstr('__main__')]
998 998 if hg:
999 999 _sethgexecutable(hg)
1000 1000 elif mainfrozen():
1001 1001 if getattr(sys, 'frozen', None) == 'macosx_app':
1002 1002 # Env variable set by py2app
1003 1003 _sethgexecutable(encoding.environ['EXECUTABLEPATH'])
1004 1004 else:
1005 1005 _sethgexecutable(pycompat.sysexecutable)
1006 1006 elif (os.path.basename(
1007 1007 pycompat.fsencode(getattr(mainmod, '__file__', ''))) == 'hg'):
1008 1008 _sethgexecutable(pycompat.fsencode(mainmod.__file__))
1009 1009 else:
1010 1010 exe = findexe('hg') or os.path.basename(sys.argv[0])
1011 1011 _sethgexecutable(exe)
1012 1012 return _hgexecutable
1013 1013
1014 1014 def _sethgexecutable(path):
1015 1015 """set location of the 'hg' executable"""
1016 1016 global _hgexecutable
1017 1017 _hgexecutable = path
1018 1018
1019 1019 def _isstdout(f):
1020 1020 fileno = getattr(f, 'fileno', None)
1021 1021 return fileno and fileno() == sys.__stdout__.fileno()
1022 1022
1023 1023 def shellenviron(environ=None):
1024 1024 """return environ with optional override, useful for shelling out"""
1025 1025 def py2shell(val):
1026 1026 'convert python object into string that is useful to shell'
1027 1027 if val is None or val is False:
1028 1028 return '0'
1029 1029 if val is True:
1030 1030 return '1'
1031 1031 return str(val)
1032 1032 env = dict(encoding.environ)
1033 1033 if environ:
1034 1034 env.update((k, py2shell(v)) for k, v in environ.iteritems())
1035 1035 env['HG'] = hgexecutable()
1036 1036 return env
1037 1037
1038 1038 def system(cmd, environ=None, cwd=None, out=None):
1039 1039 '''enhanced shell command execution.
1040 1040 run with environment maybe modified, maybe in different dir.
1041 1041
1042 1042 if out is specified, it is assumed to be a file-like object that has a
1043 1043 write() method. stdout and stderr will be redirected to out.'''
1044 1044 try:
1045 1045 stdout.flush()
1046 1046 except Exception:
1047 1047 pass
1048 1048 cmd = quotecommand(cmd)
1049 1049 if pycompat.sysplatform == 'plan9' and (sys.version_info[0] == 2
1050 1050 and sys.version_info[1] < 7):
1051 1051 # subprocess kludge to work around issues in half-baked Python
1052 1052 # ports, notably bichued/python:
1053 1053 if not cwd is None:
1054 1054 os.chdir(cwd)
1055 1055 rc = os.system(cmd)
1056 1056 else:
1057 1057 env = shellenviron(environ)
1058 1058 if out is None or _isstdout(out):
1059 1059 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
1060 1060 env=env, cwd=cwd)
1061 1061 else:
1062 1062 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
1063 1063 env=env, cwd=cwd, stdout=subprocess.PIPE,
1064 1064 stderr=subprocess.STDOUT)
1065 1065 for line in iter(proc.stdout.readline, ''):
1066 1066 out.write(line)
1067 1067 proc.wait()
1068 1068 rc = proc.returncode
1069 1069 if pycompat.sysplatform == 'OpenVMS' and rc & 1:
1070 1070 rc = 0
1071 1071 return rc
1072 1072
1073 1073 def checksignature(func):
1074 1074 '''wrap a function with code to check for calling errors'''
1075 1075 def check(*args, **kwargs):
1076 1076 try:
1077 1077 return func(*args, **kwargs)
1078 1078 except TypeError:
1079 1079 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1080 1080 raise error.SignatureError
1081 1081 raise
1082 1082
1083 1083 return check
1084 1084
1085 1085 # a whilelist of known filesystems where hardlink works reliably
1086 1086 _hardlinkfswhitelist = set([
1087 1087 'btrfs',
1088 1088 'ext2',
1089 1089 'ext3',
1090 1090 'ext4',
1091 1091 'hfs',
1092 1092 'jfs',
1093 1093 'reiserfs',
1094 1094 'tmpfs',
1095 1095 'ufs',
1096 1096 'xfs',
1097 1097 'zfs',
1098 1098 ])
1099 1099
1100 1100 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1101 1101 '''copy a file, preserving mode and optionally other stat info like
1102 1102 atime/mtime
1103 1103
1104 1104 checkambig argument is used with filestat, and is useful only if
1105 1105 destination file is guarded by any lock (e.g. repo.lock or
1106 1106 repo.wlock).
1107 1107
1108 1108 copystat and checkambig should be exclusive.
1109 1109 '''
1110 1110 assert not (copystat and checkambig)
1111 1111 oldstat = None
1112 1112 if os.path.lexists(dest):
1113 1113 if checkambig:
1114 1114 oldstat = checkambig and filestat(dest)
1115 1115 unlink(dest)
1116 1116 if hardlink:
1117 1117 # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks
1118 1118 # unless we are confident that dest is on a whitelisted filesystem.
1119 1119 try:
1120 1120 fstype = getfstype(os.path.dirname(dest))
1121 1121 except OSError:
1122 1122 fstype = None
1123 1123 if fstype not in _hardlinkfswhitelist:
1124 1124 hardlink = False
1125 1125 if hardlink:
1126 1126 try:
1127 1127 oslink(src, dest)
1128 1128 return
1129 1129 except (IOError, OSError):
1130 1130 pass # fall back to normal copy
1131 1131 if os.path.islink(src):
1132 1132 os.symlink(os.readlink(src), dest)
1133 1133 # copytime is ignored for symlinks, but in general copytime isn't needed
1134 1134 # for them anyway
1135 1135 else:
1136 1136 try:
1137 1137 shutil.copyfile(src, dest)
1138 1138 if copystat:
1139 1139 # copystat also copies mode
1140 1140 shutil.copystat(src, dest)
1141 1141 else:
1142 1142 shutil.copymode(src, dest)
1143 1143 if oldstat and oldstat.stat:
1144 1144 newstat = filestat(dest)
1145 1145 if newstat.isambig(oldstat):
1146 1146 # stat of copied file is ambiguous to original one
1147 1147 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1148 1148 os.utime(dest, (advanced, advanced))
1149 1149 except shutil.Error as inst:
1150 1150 raise Abort(str(inst))
1151 1151
1152 1152 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1153 1153 """Copy a directory tree using hardlinks if possible."""
1154 1154 num = 0
1155 1155
1156 1156 gettopic = lambda: hardlink and _('linking') or _('copying')
1157 1157
1158 1158 if os.path.isdir(src):
1159 1159 if hardlink is None:
1160 1160 hardlink = (os.stat(src).st_dev ==
1161 1161 os.stat(os.path.dirname(dst)).st_dev)
1162 1162 topic = gettopic()
1163 1163 os.mkdir(dst)
1164 1164 for name, kind in osutil.listdir(src):
1165 1165 srcname = os.path.join(src, name)
1166 1166 dstname = os.path.join(dst, name)
1167 1167 def nprog(t, pos):
1168 1168 if pos is not None:
1169 1169 return progress(t, pos + num)
1170 1170 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1171 1171 num += n
1172 1172 else:
1173 1173 if hardlink is None:
1174 1174 hardlink = (os.stat(os.path.dirname(src)).st_dev ==
1175 1175 os.stat(os.path.dirname(dst)).st_dev)
1176 1176 topic = gettopic()
1177 1177
1178 1178 if hardlink:
1179 1179 try:
1180 1180 oslink(src, dst)
1181 1181 except (IOError, OSError):
1182 1182 hardlink = False
1183 1183 shutil.copy(src, dst)
1184 1184 else:
1185 1185 shutil.copy(src, dst)
1186 1186 num += 1
1187 1187 progress(topic, num)
1188 1188 progress(topic, None)
1189 1189
1190 1190 return hardlink, num
1191 1191
1192 1192 _winreservednames = '''con prn aux nul
1193 1193 com1 com2 com3 com4 com5 com6 com7 com8 com9
1194 1194 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1195 1195 _winreservedchars = ':*?"<>|'
1196 1196 def checkwinfilename(path):
1197 1197 r'''Check that the base-relative path is a valid filename on Windows.
1198 1198 Returns None if the path is ok, or a UI string describing the problem.
1199 1199
1200 1200 >>> checkwinfilename("just/a/normal/path")
1201 1201 >>> checkwinfilename("foo/bar/con.xml")
1202 1202 "filename contains 'con', which is reserved on Windows"
1203 1203 >>> checkwinfilename("foo/con.xml/bar")
1204 1204 "filename contains 'con', which is reserved on Windows"
1205 1205 >>> checkwinfilename("foo/bar/xml.con")
1206 1206 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1207 1207 "filename contains 'AUX', which is reserved on Windows"
1208 1208 >>> checkwinfilename("foo/bar/bla:.txt")
1209 1209 "filename contains ':', which is reserved on Windows"
1210 1210 >>> checkwinfilename("foo/bar/b\07la.txt")
1211 1211 "filename contains '\\x07', which is invalid on Windows"
1212 1212 >>> checkwinfilename("foo/bar/bla ")
1213 1213 "filename ends with ' ', which is not allowed on Windows"
1214 1214 >>> checkwinfilename("../bar")
1215 1215 >>> checkwinfilename("foo\\")
1216 1216 "filename ends with '\\', which is invalid on Windows"
1217 1217 >>> checkwinfilename("foo\\/bar")
1218 1218 "directory name ends with '\\', which is invalid on Windows"
1219 1219 '''
1220 1220 if path.endswith('\\'):
1221 1221 return _("filename ends with '\\', which is invalid on Windows")
1222 1222 if '\\/' in path:
1223 1223 return _("directory name ends with '\\', which is invalid on Windows")
1224 1224 for n in path.replace('\\', '/').split('/'):
1225 1225 if not n:
1226 1226 continue
1227 1227 for c in pycompat.bytestr(n):
1228 1228 if c in _winreservedchars:
1229 1229 return _("filename contains '%s', which is reserved "
1230 1230 "on Windows") % c
1231 1231 if ord(c) <= 31:
1232 1232 return _("filename contains %r, which is invalid "
1233 1233 "on Windows") % c
1234 1234 base = n.split('.')[0]
1235 1235 if base and base.lower() in _winreservednames:
1236 1236 return _("filename contains '%s', which is reserved "
1237 1237 "on Windows") % base
1238 1238 t = n[-1]
1239 1239 if t in '. ' and n not in '..':
1240 1240 return _("filename ends with '%s', which is not allowed "
1241 1241 "on Windows") % t
1242 1242
1243 1243 if pycompat.osname == 'nt':
1244 1244 checkosfilename = checkwinfilename
1245 1245 timer = time.clock
1246 1246 else:
1247 1247 checkosfilename = platform.checkosfilename
1248 1248 timer = time.time
1249 1249
1250 1250 if safehasattr(time, "perf_counter"):
1251 1251 timer = time.perf_counter
1252 1252
1253 1253 def makelock(info, pathname):
1254 1254 try:
1255 1255 return os.symlink(info, pathname)
1256 1256 except OSError as why:
1257 1257 if why.errno == errno.EEXIST:
1258 1258 raise
1259 1259 except AttributeError: # no symlink in os
1260 1260 pass
1261 1261
1262 1262 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1263 1263 os.write(ld, info)
1264 1264 os.close(ld)
1265 1265
1266 1266 def readlock(pathname):
1267 1267 try:
1268 1268 return os.readlink(pathname)
1269 1269 except OSError as why:
1270 1270 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1271 1271 raise
1272 1272 except AttributeError: # no symlink in os
1273 1273 pass
1274 1274 fp = posixfile(pathname)
1275 1275 r = fp.read()
1276 1276 fp.close()
1277 1277 return r
1278 1278
1279 1279 def fstat(fp):
1280 1280 '''stat file object that may not have fileno method.'''
1281 1281 try:
1282 1282 return os.fstat(fp.fileno())
1283 1283 except AttributeError:
1284 1284 return os.stat(fp.name)
1285 1285
1286 1286 # File system features
1287 1287
1288 1288 def fscasesensitive(path):
1289 1289 """
1290 1290 Return true if the given path is on a case-sensitive filesystem
1291 1291
1292 1292 Requires a path (like /foo/.hg) ending with a foldable final
1293 1293 directory component.
1294 1294 """
1295 1295 s1 = os.lstat(path)
1296 1296 d, b = os.path.split(path)
1297 1297 b2 = b.upper()
1298 1298 if b == b2:
1299 1299 b2 = b.lower()
1300 1300 if b == b2:
1301 1301 return True # no evidence against case sensitivity
1302 1302 p2 = os.path.join(d, b2)
1303 1303 try:
1304 1304 s2 = os.lstat(p2)
1305 1305 if s2 == s1:
1306 1306 return False
1307 1307 return True
1308 1308 except OSError:
1309 1309 return True
1310 1310
1311 1311 try:
1312 1312 import re2
1313 1313 _re2 = None
1314 1314 except ImportError:
1315 1315 _re2 = False
1316 1316
1317 1317 class _re(object):
1318 1318 def _checkre2(self):
1319 1319 global _re2
1320 1320 try:
1321 1321 # check if match works, see issue3964
1322 1322 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1323 1323 except ImportError:
1324 1324 _re2 = False
1325 1325
1326 1326 def compile(self, pat, flags=0):
1327 1327 '''Compile a regular expression, using re2 if possible
1328 1328
1329 1329 For best performance, use only re2-compatible regexp features. The
1330 1330 only flags from the re module that are re2-compatible are
1331 1331 IGNORECASE and MULTILINE.'''
1332 1332 if _re2 is None:
1333 1333 self._checkre2()
1334 1334 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1335 1335 if flags & remod.IGNORECASE:
1336 1336 pat = '(?i)' + pat
1337 1337 if flags & remod.MULTILINE:
1338 1338 pat = '(?m)' + pat
1339 1339 try:
1340 1340 return re2.compile(pat)
1341 1341 except re2.error:
1342 1342 pass
1343 1343 return remod.compile(pat, flags)
1344 1344
1345 1345 @propertycache
1346 1346 def escape(self):
1347 1347 '''Return the version of escape corresponding to self.compile.
1348 1348
1349 1349 This is imperfect because whether re2 or re is used for a particular
1350 1350 function depends on the flags, etc, but it's the best we can do.
1351 1351 '''
1352 1352 global _re2
1353 1353 if _re2 is None:
1354 1354 self._checkre2()
1355 1355 if _re2:
1356 1356 return re2.escape
1357 1357 else:
1358 1358 return remod.escape
1359 1359
1360 1360 re = _re()
1361 1361
1362 1362 _fspathcache = {}
1363 1363 def fspath(name, root):
1364 1364 '''Get name in the case stored in the filesystem
1365 1365
1366 1366 The name should be relative to root, and be normcase-ed for efficiency.
1367 1367
1368 1368 Note that this function is unnecessary, and should not be
1369 1369 called, for case-sensitive filesystems (simply because it's expensive).
1370 1370
1371 1371 The root should be normcase-ed, too.
1372 1372 '''
1373 1373 def _makefspathcacheentry(dir):
1374 1374 return dict((normcase(n), n) for n in os.listdir(dir))
1375 1375
1376 1376 seps = pycompat.ossep
1377 1377 if pycompat.osaltsep:
1378 1378 seps = seps + pycompat.osaltsep
1379 1379 # Protect backslashes. This gets silly very quickly.
1380 1380 seps.replace('\\','\\\\')
1381 1381 pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps))
1382 1382 dir = os.path.normpath(root)
1383 1383 result = []
1384 1384 for part, sep in pattern.findall(name):
1385 1385 if sep:
1386 1386 result.append(sep)
1387 1387 continue
1388 1388
1389 1389 if dir not in _fspathcache:
1390 1390 _fspathcache[dir] = _makefspathcacheentry(dir)
1391 1391 contents = _fspathcache[dir]
1392 1392
1393 1393 found = contents.get(part)
1394 1394 if not found:
1395 1395 # retry "once per directory" per "dirstate.walk" which
1396 1396 # may take place for each patches of "hg qpush", for example
1397 1397 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1398 1398 found = contents.get(part)
1399 1399
1400 1400 result.append(found or part)
1401 1401 dir = os.path.join(dir, part)
1402 1402
1403 1403 return ''.join(result)
1404 1404
1405 1405 def getfstype(dirpath):
1406 1406 '''Get the filesystem type name from a directory (best-effort)
1407 1407
1408 1408 Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc.
1409 1409 '''
1410 1410 return getattr(osutil, 'getfstype', lambda x: None)(dirpath)
1411 1411
1412 1412 def checknlink(testfile):
1413 1413 '''check whether hardlink count reporting works properly'''
1414 1414
1415 1415 # testfile may be open, so we need a separate file for checking to
1416 1416 # work around issue2543 (or testfile may get lost on Samba shares)
1417 1417 f1 = testfile + ".hgtmp1"
1418 1418 if os.path.lexists(f1):
1419 1419 return False
1420 1420 try:
1421 1421 posixfile(f1, 'w').close()
1422 1422 except IOError:
1423 1423 try:
1424 1424 os.unlink(f1)
1425 1425 except OSError:
1426 1426 pass
1427 1427 return False
1428 1428
1429 1429 f2 = testfile + ".hgtmp2"
1430 1430 fd = None
1431 1431 try:
1432 1432 oslink(f1, f2)
1433 1433 # nlinks() may behave differently for files on Windows shares if
1434 1434 # the file is open.
1435 1435 fd = posixfile(f2)
1436 1436 return nlinks(f2) > 1
1437 1437 except OSError:
1438 1438 return False
1439 1439 finally:
1440 1440 if fd is not None:
1441 1441 fd.close()
1442 1442 for f in (f1, f2):
1443 1443 try:
1444 1444 os.unlink(f)
1445 1445 except OSError:
1446 1446 pass
1447 1447
1448 1448 def endswithsep(path):
1449 1449 '''Check path ends with os.sep or os.altsep.'''
1450 1450 return (path.endswith(pycompat.ossep)
1451 1451 or pycompat.osaltsep and path.endswith(pycompat.osaltsep))
1452 1452
1453 1453 def splitpath(path):
1454 1454 '''Split path by os.sep.
1455 1455 Note that this function does not use os.altsep because this is
1456 1456 an alternative of simple "xxx.split(os.sep)".
1457 1457 It is recommended to use os.path.normpath() before using this
1458 1458 function if need.'''
1459 1459 return path.split(pycompat.ossep)
1460 1460
1461 1461 def gui():
1462 1462 '''Are we running in a GUI?'''
1463 1463 if pycompat.sysplatform == 'darwin':
1464 1464 if 'SSH_CONNECTION' in encoding.environ:
1465 1465 # handle SSH access to a box where the user is logged in
1466 1466 return False
1467 1467 elif getattr(osutil, 'isgui', None):
1468 1468 # check if a CoreGraphics session is available
1469 1469 return osutil.isgui()
1470 1470 else:
1471 1471 # pure build; use a safe default
1472 1472 return True
1473 1473 else:
1474 1474 return pycompat.osname == "nt" or encoding.environ.get("DISPLAY")
1475 1475
1476 1476 def mktempcopy(name, emptyok=False, createmode=None):
1477 1477 """Create a temporary file with the same contents from name
1478 1478
1479 1479 The permission bits are copied from the original file.
1480 1480
1481 1481 If the temporary file is going to be truncated immediately, you
1482 1482 can use emptyok=True as an optimization.
1483 1483
1484 1484 Returns the name of the temporary file.
1485 1485 """
1486 1486 d, fn = os.path.split(name)
1487 1487 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1488 1488 os.close(fd)
1489 1489 # Temporary files are created with mode 0600, which is usually not
1490 1490 # what we want. If the original file already exists, just copy
1491 1491 # its mode. Otherwise, manually obey umask.
1492 1492 copymode(name, temp, createmode)
1493 1493 if emptyok:
1494 1494 return temp
1495 1495 try:
1496 1496 try:
1497 1497 ifp = posixfile(name, "rb")
1498 1498 except IOError as inst:
1499 1499 if inst.errno == errno.ENOENT:
1500 1500 return temp
1501 1501 if not getattr(inst, 'filename', None):
1502 1502 inst.filename = name
1503 1503 raise
1504 1504 ofp = posixfile(temp, "wb")
1505 1505 for chunk in filechunkiter(ifp):
1506 1506 ofp.write(chunk)
1507 1507 ifp.close()
1508 1508 ofp.close()
1509 1509 except: # re-raises
1510 1510 try: os.unlink(temp)
1511 1511 except OSError: pass
1512 1512 raise
1513 1513 return temp
1514 1514
1515 1515 class filestat(object):
1516 1516 """help to exactly detect change of a file
1517 1517
1518 1518 'stat' attribute is result of 'os.stat()' if specified 'path'
1519 1519 exists. Otherwise, it is None. This can avoid preparative
1520 1520 'exists()' examination on client side of this class.
1521 1521 """
1522 1522 def __init__(self, path):
1523 1523 try:
1524 1524 self.stat = os.stat(path)
1525 1525 except OSError as err:
1526 1526 if err.errno != errno.ENOENT:
1527 1527 raise
1528 1528 self.stat = None
1529 1529
1530 1530 __hash__ = object.__hash__
1531 1531
1532 1532 def __eq__(self, old):
1533 1533 try:
1534 1534 # if ambiguity between stat of new and old file is
1535 1535 # avoided, comparison of size, ctime and mtime is enough
1536 1536 # to exactly detect change of a file regardless of platform
1537 1537 return (self.stat.st_size == old.stat.st_size and
1538 1538 self.stat.st_ctime == old.stat.st_ctime and
1539 1539 self.stat.st_mtime == old.stat.st_mtime)
1540 1540 except AttributeError:
1541 1541 return False
1542 1542
1543 1543 def isambig(self, old):
1544 1544 """Examine whether new (= self) stat is ambiguous against old one
1545 1545
1546 1546 "S[N]" below means stat of a file at N-th change:
1547 1547
1548 1548 - S[n-1].ctime < S[n].ctime: can detect change of a file
1549 1549 - S[n-1].ctime == S[n].ctime
1550 1550 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
1551 1551 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
1552 1552 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
1553 1553 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
1554 1554
1555 1555 Case (*2) above means that a file was changed twice or more at
1556 1556 same time in sec (= S[n-1].ctime), and comparison of timestamp
1557 1557 is ambiguous.
1558 1558
1559 1559 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
1560 1560 timestamp is ambiguous".
1561 1561
1562 1562 But advancing mtime only in case (*2) doesn't work as
1563 1563 expected, because naturally advanced S[n].mtime in case (*1)
1564 1564 might be equal to manually advanced S[n-1 or earlier].mtime.
1565 1565
1566 1566 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
1567 1567 treated as ambiguous regardless of mtime, to avoid overlooking
1568 1568 by confliction between such mtime.
1569 1569
1570 1570 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
1571 1571 S[n].mtime", even if size of a file isn't changed.
1572 1572 """
1573 1573 try:
1574 1574 return (self.stat.st_ctime == old.stat.st_ctime)
1575 1575 except AttributeError:
1576 1576 return False
1577 1577
1578 1578 def avoidambig(self, path, old):
1579 1579 """Change file stat of specified path to avoid ambiguity
1580 1580
1581 1581 'old' should be previous filestat of 'path'.
1582 1582
1583 1583 This skips avoiding ambiguity, if a process doesn't have
1584 1584 appropriate privileges for 'path'.
1585 1585 """
1586 1586 advanced = (old.stat.st_mtime + 1) & 0x7fffffff
1587 1587 try:
1588 1588 os.utime(path, (advanced, advanced))
1589 1589 except OSError as inst:
1590 1590 if inst.errno == errno.EPERM:
1591 1591 # utime() on the file created by another user causes EPERM,
1592 1592 # if a process doesn't have appropriate privileges
1593 1593 return
1594 1594 raise
1595 1595
1596 1596 def __ne__(self, other):
1597 1597 return not self == other
1598 1598
1599 1599 class atomictempfile(object):
1600 1600 '''writable file object that atomically updates a file
1601 1601
1602 1602 All writes will go to a temporary copy of the original file. Call
1603 1603 close() when you are done writing, and atomictempfile will rename
1604 1604 the temporary copy to the original name, making the changes
1605 1605 visible. If the object is destroyed without being closed, all your
1606 1606 writes are discarded.
1607 1607
1608 1608 checkambig argument of constructor is used with filestat, and is
1609 1609 useful only if target file is guarded by any lock (e.g. repo.lock
1610 1610 or repo.wlock).
1611 1611 '''
1612 1612 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
1613 1613 self.__name = name # permanent name
1614 1614 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1615 1615 createmode=createmode)
1616 1616 self._fp = posixfile(self._tempname, mode)
1617 1617 self._checkambig = checkambig
1618 1618
1619 1619 # delegated methods
1620 1620 self.read = self._fp.read
1621 1621 self.write = self._fp.write
1622 1622 self.seek = self._fp.seek
1623 1623 self.tell = self._fp.tell
1624 1624 self.fileno = self._fp.fileno
1625 1625
1626 1626 def close(self):
1627 1627 if not self._fp.closed:
1628 1628 self._fp.close()
1629 1629 filename = localpath(self.__name)
1630 1630 oldstat = self._checkambig and filestat(filename)
1631 1631 if oldstat and oldstat.stat:
1632 1632 rename(self._tempname, filename)
1633 1633 newstat = filestat(filename)
1634 1634 if newstat.isambig(oldstat):
1635 1635 # stat of changed file is ambiguous to original one
1636 1636 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1637 1637 os.utime(filename, (advanced, advanced))
1638 1638 else:
1639 1639 rename(self._tempname, filename)
1640 1640
1641 1641 def discard(self):
1642 1642 if not self._fp.closed:
1643 1643 try:
1644 1644 os.unlink(self._tempname)
1645 1645 except OSError:
1646 1646 pass
1647 1647 self._fp.close()
1648 1648
1649 1649 def __del__(self):
1650 1650 if safehasattr(self, '_fp'): # constructor actually did something
1651 1651 self.discard()
1652 1652
1653 1653 def __enter__(self):
1654 1654 return self
1655 1655
1656 1656 def __exit__(self, exctype, excvalue, traceback):
1657 1657 if exctype is not None:
1658 1658 self.discard()
1659 1659 else:
1660 1660 self.close()
1661 1661
1662 1662 def unlinkpath(f, ignoremissing=False):
1663 1663 """unlink and remove the directory if it is empty"""
1664 1664 if ignoremissing:
1665 1665 tryunlink(f)
1666 1666 else:
1667 1667 unlink(f)
1668 1668 # try removing directories that might now be empty
1669 1669 try:
1670 1670 removedirs(os.path.dirname(f))
1671 1671 except OSError:
1672 1672 pass
1673 1673
1674 1674 def tryunlink(f):
1675 1675 """Attempt to remove a file, ignoring ENOENT errors."""
1676 1676 try:
1677 1677 unlink(f)
1678 1678 except OSError as e:
1679 1679 if e.errno != errno.ENOENT:
1680 1680 raise
1681 1681
1682 1682 def makedirs(name, mode=None, notindexed=False):
1683 1683 """recursive directory creation with parent mode inheritance
1684 1684
1685 1685 Newly created directories are marked as "not to be indexed by
1686 1686 the content indexing service", if ``notindexed`` is specified
1687 1687 for "write" mode access.
1688 1688 """
1689 1689 try:
1690 1690 makedir(name, notindexed)
1691 1691 except OSError as err:
1692 1692 if err.errno == errno.EEXIST:
1693 1693 return
1694 1694 if err.errno != errno.ENOENT or not name:
1695 1695 raise
1696 1696 parent = os.path.dirname(os.path.abspath(name))
1697 1697 if parent == name:
1698 1698 raise
1699 1699 makedirs(parent, mode, notindexed)
1700 1700 try:
1701 1701 makedir(name, notindexed)
1702 1702 except OSError as err:
1703 1703 # Catch EEXIST to handle races
1704 1704 if err.errno == errno.EEXIST:
1705 1705 return
1706 1706 raise
1707 1707 if mode is not None:
1708 1708 os.chmod(name, mode)
1709 1709
1710 1710 def readfile(path):
1711 1711 with open(path, 'rb') as fp:
1712 1712 return fp.read()
1713 1713
1714 1714 def writefile(path, text):
1715 1715 with open(path, 'wb') as fp:
1716 1716 fp.write(text)
1717 1717
1718 1718 def appendfile(path, text):
1719 1719 with open(path, 'ab') as fp:
1720 1720 fp.write(text)
1721 1721
1722 1722 class chunkbuffer(object):
1723 1723 """Allow arbitrary sized chunks of data to be efficiently read from an
1724 1724 iterator over chunks of arbitrary size."""
1725 1725
1726 1726 def __init__(self, in_iter):
1727 1727 """in_iter is the iterator that's iterating over the input chunks."""
1728 1728 def splitbig(chunks):
1729 1729 for chunk in chunks:
1730 1730 if len(chunk) > 2**20:
1731 1731 pos = 0
1732 1732 while pos < len(chunk):
1733 1733 end = pos + 2 ** 18
1734 1734 yield chunk[pos:end]
1735 1735 pos = end
1736 1736 else:
1737 1737 yield chunk
1738 1738 self.iter = splitbig(in_iter)
1739 1739 self._queue = collections.deque()
1740 1740 self._chunkoffset = 0
1741 1741
1742 1742 def read(self, l=None):
1743 1743 """Read L bytes of data from the iterator of chunks of data.
1744 1744 Returns less than L bytes if the iterator runs dry.
1745 1745
1746 1746 If size parameter is omitted, read everything"""
1747 1747 if l is None:
1748 1748 return ''.join(self.iter)
1749 1749
1750 1750 left = l
1751 1751 buf = []
1752 1752 queue = self._queue
1753 1753 while left > 0:
1754 1754 # refill the queue
1755 1755 if not queue:
1756 1756 target = 2**18
1757 1757 for chunk in self.iter:
1758 1758 queue.append(chunk)
1759 1759 target -= len(chunk)
1760 1760 if target <= 0:
1761 1761 break
1762 1762 if not queue:
1763 1763 break
1764 1764
1765 1765 # The easy way to do this would be to queue.popleft(), modify the
1766 1766 # chunk (if necessary), then queue.appendleft(). However, for cases
1767 1767 # where we read partial chunk content, this incurs 2 dequeue
1768 1768 # mutations and creates a new str for the remaining chunk in the
1769 1769 # queue. Our code below avoids this overhead.
1770 1770
1771 1771 chunk = queue[0]
1772 1772 chunkl = len(chunk)
1773 1773 offset = self._chunkoffset
1774 1774
1775 1775 # Use full chunk.
1776 1776 if offset == 0 and left >= chunkl:
1777 1777 left -= chunkl
1778 1778 queue.popleft()
1779 1779 buf.append(chunk)
1780 1780 # self._chunkoffset remains at 0.
1781 1781 continue
1782 1782
1783 1783 chunkremaining = chunkl - offset
1784 1784
1785 1785 # Use all of unconsumed part of chunk.
1786 1786 if left >= chunkremaining:
1787 1787 left -= chunkremaining
1788 1788 queue.popleft()
1789 1789 # offset == 0 is enabled by block above, so this won't merely
1790 1790 # copy via ``chunk[0:]``.
1791 1791 buf.append(chunk[offset:])
1792 1792 self._chunkoffset = 0
1793 1793
1794 1794 # Partial chunk needed.
1795 1795 else:
1796 1796 buf.append(chunk[offset:offset + left])
1797 1797 self._chunkoffset += left
1798 1798 left -= chunkremaining
1799 1799
1800 1800 return ''.join(buf)
1801 1801
1802 1802 def filechunkiter(f, size=131072, limit=None):
1803 1803 """Create a generator that produces the data in the file size
1804 1804 (default 131072) bytes at a time, up to optional limit (default is
1805 1805 to read all data). Chunks may be less than size bytes if the
1806 1806 chunk is the last chunk in the file, or the file is a socket or
1807 1807 some other type of file that sometimes reads less data than is
1808 1808 requested."""
1809 1809 assert size >= 0
1810 1810 assert limit is None or limit >= 0
1811 1811 while True:
1812 1812 if limit is None:
1813 1813 nbytes = size
1814 1814 else:
1815 1815 nbytes = min(limit, size)
1816 1816 s = nbytes and f.read(nbytes)
1817 1817 if not s:
1818 1818 break
1819 1819 if limit:
1820 1820 limit -= len(s)
1821 1821 yield s
1822 1822
1823 1823 def makedate(timestamp=None):
1824 1824 '''Return a unix timestamp (or the current time) as a (unixtime,
1825 1825 offset) tuple based off the local timezone.'''
1826 1826 if timestamp is None:
1827 1827 timestamp = time.time()
1828 1828 if timestamp < 0:
1829 1829 hint = _("check your clock")
1830 1830 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1831 1831 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1832 1832 datetime.datetime.fromtimestamp(timestamp))
1833 1833 tz = delta.days * 86400 + delta.seconds
1834 1834 return timestamp, tz
1835 1835
1836 1836 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1837 1837 """represent a (unixtime, offset) tuple as a localized time.
1838 1838 unixtime is seconds since the epoch, and offset is the time zone's
1839 1839 number of seconds away from UTC.
1840 1840
1841 1841 >>> datestr((0, 0))
1842 1842 'Thu Jan 01 00:00:00 1970 +0000'
1843 1843 >>> datestr((42, 0))
1844 1844 'Thu Jan 01 00:00:42 1970 +0000'
1845 1845 >>> datestr((-42, 0))
1846 1846 'Wed Dec 31 23:59:18 1969 +0000'
1847 1847 >>> datestr((0x7fffffff, 0))
1848 1848 'Tue Jan 19 03:14:07 2038 +0000'
1849 1849 >>> datestr((-0x80000000, 0))
1850 1850 'Fri Dec 13 20:45:52 1901 +0000'
1851 1851 """
1852 1852 t, tz = date or makedate()
1853 1853 if "%1" in format or "%2" in format or "%z" in format:
1854 1854 sign = (tz > 0) and "-" or "+"
1855 1855 minutes = abs(tz) // 60
1856 1856 q, r = divmod(minutes, 60)
1857 1857 format = format.replace("%z", "%1%2")
1858 1858 format = format.replace("%1", "%c%02d" % (sign, q))
1859 1859 format = format.replace("%2", "%02d" % r)
1860 1860 d = t - tz
1861 1861 if d > 0x7fffffff:
1862 1862 d = 0x7fffffff
1863 1863 elif d < -0x80000000:
1864 1864 d = -0x80000000
1865 1865 # Never use time.gmtime() and datetime.datetime.fromtimestamp()
1866 1866 # because they use the gmtime() system call which is buggy on Windows
1867 1867 # for negative values.
1868 1868 t = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=d)
1869 1869 s = encoding.strtolocal(t.strftime(encoding.strfromlocal(format)))
1870 1870 return s
1871 1871
1872 1872 def shortdate(date=None):
1873 1873 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1874 1874 return datestr(date, format='%Y-%m-%d')
1875 1875
1876 1876 def parsetimezone(s):
1877 1877 """find a trailing timezone, if any, in string, and return a
1878 1878 (offset, remainder) pair"""
1879 1879
1880 1880 if s.endswith("GMT") or s.endswith("UTC"):
1881 1881 return 0, s[:-3].rstrip()
1882 1882
1883 1883 # Unix-style timezones [+-]hhmm
1884 1884 if len(s) >= 5 and s[-5] in "+-" and s[-4:].isdigit():
1885 1885 sign = (s[-5] == "+") and 1 or -1
1886 1886 hours = int(s[-4:-2])
1887 1887 minutes = int(s[-2:])
1888 1888 return -sign * (hours * 60 + minutes) * 60, s[:-5].rstrip()
1889 1889
1890 1890 # ISO8601 trailing Z
1891 1891 if s.endswith("Z") and s[-2:-1].isdigit():
1892 1892 return 0, s[:-1]
1893 1893
1894 1894 # ISO8601-style [+-]hh:mm
1895 1895 if (len(s) >= 6 and s[-6] in "+-" and s[-3] == ":" and
1896 1896 s[-5:-3].isdigit() and s[-2:].isdigit()):
1897 1897 sign = (s[-6] == "+") and 1 or -1
1898 1898 hours = int(s[-5:-3])
1899 1899 minutes = int(s[-2:])
1900 1900 return -sign * (hours * 60 + minutes) * 60, s[:-6]
1901 1901
1902 1902 return None, s
1903 1903
1904 1904 def strdate(string, format, defaults=None):
1905 1905 """parse a localized time string and return a (unixtime, offset) tuple.
1906 1906 if the string cannot be parsed, ValueError is raised."""
1907 1907 if defaults is None:
1908 1908 defaults = {}
1909 1909
1910 1910 # NOTE: unixtime = localunixtime + offset
1911 1911 offset, date = parsetimezone(string)
1912 1912
1913 1913 # add missing elements from defaults
1914 1914 usenow = False # default to using biased defaults
1915 1915 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1916 1916 found = [True for p in part if ("%"+p) in format]
1917 1917 if not found:
1918 1918 date += "@" + defaults[part][usenow]
1919 1919 format += "@%" + part[0]
1920 1920 else:
1921 1921 # We've found a specific time element, less specific time
1922 1922 # elements are relative to today
1923 1923 usenow = True
1924 1924
1925 1925 timetuple = time.strptime(date, format)
1926 1926 localunixtime = int(calendar.timegm(timetuple))
1927 1927 if offset is None:
1928 1928 # local timezone
1929 1929 unixtime = int(time.mktime(timetuple))
1930 1930 offset = unixtime - localunixtime
1931 1931 else:
1932 1932 unixtime = localunixtime + offset
1933 1933 return unixtime, offset
1934 1934
1935 1935 def parsedate(date, formats=None, bias=None):
1936 1936 """parse a localized date/time and return a (unixtime, offset) tuple.
1937 1937
1938 1938 The date may be a "unixtime offset" string or in one of the specified
1939 1939 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1940 1940
1941 1941 >>> parsedate(' today ') == parsedate(\
1942 1942 datetime.date.today().strftime('%b %d'))
1943 1943 True
1944 1944 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1945 1945 datetime.timedelta(days=1)\
1946 1946 ).strftime('%b %d'))
1947 1947 True
1948 1948 >>> now, tz = makedate()
1949 1949 >>> strnow, strtz = parsedate('now')
1950 1950 >>> (strnow - now) < 1
1951 1951 True
1952 1952 >>> tz == strtz
1953 1953 True
1954 1954 """
1955 1955 if bias is None:
1956 1956 bias = {}
1957 1957 if not date:
1958 1958 return 0, 0
1959 1959 if isinstance(date, tuple) and len(date) == 2:
1960 1960 return date
1961 1961 if not formats:
1962 1962 formats = defaultdateformats
1963 1963 date = date.strip()
1964 1964
1965 1965 if date == 'now' or date == _('now'):
1966 1966 return makedate()
1967 1967 if date == 'today' or date == _('today'):
1968 1968 date = datetime.date.today().strftime('%b %d')
1969 1969 elif date == 'yesterday' or date == _('yesterday'):
1970 1970 date = (datetime.date.today() -
1971 1971 datetime.timedelta(days=1)).strftime('%b %d')
1972 1972
1973 1973 try:
1974 1974 when, offset = map(int, date.split(' '))
1975 1975 except ValueError:
1976 1976 # fill out defaults
1977 1977 now = makedate()
1978 1978 defaults = {}
1979 1979 for part in ("d", "mb", "yY", "HI", "M", "S"):
1980 1980 # this piece is for rounding the specific end of unknowns
1981 1981 b = bias.get(part)
1982 1982 if b is None:
1983 if part[0] in "HMS":
1983 if part[0:1] in "HMS":
1984 1984 b = "00"
1985 1985 else:
1986 1986 b = "0"
1987 1987
1988 1988 # this piece is for matching the generic end to today's date
1989 n = datestr(now, "%" + part[0])
1989 n = datestr(now, "%" + part[0:1])
1990 1990
1991 1991 defaults[part] = (b, n)
1992 1992
1993 1993 for format in formats:
1994 1994 try:
1995 1995 when, offset = strdate(date, format, defaults)
1996 1996 except (ValueError, OverflowError):
1997 1997 pass
1998 1998 else:
1999 1999 break
2000 2000 else:
2001 2001 raise Abort(_('invalid date: %r') % date)
2002 2002 # validate explicit (probably user-specified) date and
2003 2003 # time zone offset. values must fit in signed 32 bits for
2004 2004 # current 32-bit linux runtimes. timezones go from UTC-12
2005 2005 # to UTC+14
2006 2006 if when < -0x80000000 or when > 0x7fffffff:
2007 2007 raise Abort(_('date exceeds 32 bits: %d') % when)
2008 2008 if offset < -50400 or offset > 43200:
2009 2009 raise Abort(_('impossible time zone offset: %d') % offset)
2010 2010 return when, offset
2011 2011
2012 2012 def matchdate(date):
2013 2013 """Return a function that matches a given date match specifier
2014 2014
2015 2015 Formats include:
2016 2016
2017 2017 '{date}' match a given date to the accuracy provided
2018 2018
2019 2019 '<{date}' on or before a given date
2020 2020
2021 2021 '>{date}' on or after a given date
2022 2022
2023 2023 >>> p1 = parsedate("10:29:59")
2024 2024 >>> p2 = parsedate("10:30:00")
2025 2025 >>> p3 = parsedate("10:30:59")
2026 2026 >>> p4 = parsedate("10:31:00")
2027 2027 >>> p5 = parsedate("Sep 15 10:30:00 1999")
2028 2028 >>> f = matchdate("10:30")
2029 2029 >>> f(p1[0])
2030 2030 False
2031 2031 >>> f(p2[0])
2032 2032 True
2033 2033 >>> f(p3[0])
2034 2034 True
2035 2035 >>> f(p4[0])
2036 2036 False
2037 2037 >>> f(p5[0])
2038 2038 False
2039 2039 """
2040 2040
2041 2041 def lower(date):
2042 2042 d = {'mb': "1", 'd': "1"}
2043 2043 return parsedate(date, extendeddateformats, d)[0]
2044 2044
2045 2045 def upper(date):
2046 2046 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
2047 2047 for days in ("31", "30", "29"):
2048 2048 try:
2049 2049 d["d"] = days
2050 2050 return parsedate(date, extendeddateformats, d)[0]
2051 2051 except Abort:
2052 2052 pass
2053 2053 d["d"] = "28"
2054 2054 return parsedate(date, extendeddateformats, d)[0]
2055 2055
2056 2056 date = date.strip()
2057 2057
2058 2058 if not date:
2059 2059 raise Abort(_("dates cannot consist entirely of whitespace"))
2060 2060 elif date[0] == "<":
2061 2061 if not date[1:]:
2062 2062 raise Abort(_("invalid day spec, use '<DATE'"))
2063 2063 when = upper(date[1:])
2064 2064 return lambda x: x <= when
2065 2065 elif date[0] == ">":
2066 2066 if not date[1:]:
2067 2067 raise Abort(_("invalid day spec, use '>DATE'"))
2068 2068 when = lower(date[1:])
2069 2069 return lambda x: x >= when
2070 2070 elif date[0] == "-":
2071 2071 try:
2072 2072 days = int(date[1:])
2073 2073 except ValueError:
2074 2074 raise Abort(_("invalid day spec: %s") % date[1:])
2075 2075 if days < 0:
2076 2076 raise Abort(_("%s must be nonnegative (see 'hg help dates')")
2077 2077 % date[1:])
2078 2078 when = makedate()[0] - days * 3600 * 24
2079 2079 return lambda x: x >= when
2080 2080 elif " to " in date:
2081 2081 a, b = date.split(" to ")
2082 2082 start, stop = lower(a), upper(b)
2083 2083 return lambda x: x >= start and x <= stop
2084 2084 else:
2085 2085 start, stop = lower(date), upper(date)
2086 2086 return lambda x: x >= start and x <= stop
2087 2087
2088 2088 def stringmatcher(pattern, casesensitive=True):
2089 2089 """
2090 2090 accepts a string, possibly starting with 're:' or 'literal:' prefix.
2091 2091 returns the matcher name, pattern, and matcher function.
2092 2092 missing or unknown prefixes are treated as literal matches.
2093 2093
2094 2094 helper for tests:
2095 2095 >>> def test(pattern, *tests):
2096 2096 ... kind, pattern, matcher = stringmatcher(pattern)
2097 2097 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
2098 2098 >>> def itest(pattern, *tests):
2099 2099 ... kind, pattern, matcher = stringmatcher(pattern, casesensitive=False)
2100 2100 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
2101 2101
2102 2102 exact matching (no prefix):
2103 2103 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
2104 2104 ('literal', 'abcdefg', [False, False, True])
2105 2105
2106 2106 regex matching ('re:' prefix)
2107 2107 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
2108 2108 ('re', 'a.+b', [False, False, True])
2109 2109
2110 2110 force exact matches ('literal:' prefix)
2111 2111 >>> test('literal:re:foobar', 'foobar', 're:foobar')
2112 2112 ('literal', 're:foobar', [False, True])
2113 2113
2114 2114 unknown prefixes are ignored and treated as literals
2115 2115 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
2116 2116 ('literal', 'foo:bar', [False, False, True])
2117 2117
2118 2118 case insensitive regex matches
2119 2119 >>> itest('re:A.+b', 'nomatch', 'fooadef', 'fooadefBar')
2120 2120 ('re', 'A.+b', [False, False, True])
2121 2121
2122 2122 case insensitive literal matches
2123 2123 >>> itest('ABCDEFG', 'abc', 'def', 'abcdefg')
2124 2124 ('literal', 'ABCDEFG', [False, False, True])
2125 2125 """
2126 2126 if pattern.startswith('re:'):
2127 2127 pattern = pattern[3:]
2128 2128 try:
2129 2129 flags = 0
2130 2130 if not casesensitive:
2131 2131 flags = remod.I
2132 2132 regex = remod.compile(pattern, flags)
2133 2133 except remod.error as e:
2134 2134 raise error.ParseError(_('invalid regular expression: %s')
2135 2135 % e)
2136 2136 return 're', pattern, regex.search
2137 2137 elif pattern.startswith('literal:'):
2138 2138 pattern = pattern[8:]
2139 2139
2140 2140 match = pattern.__eq__
2141 2141
2142 2142 if not casesensitive:
2143 2143 ipat = encoding.lower(pattern)
2144 2144 match = lambda s: ipat == encoding.lower(s)
2145 2145 return 'literal', pattern, match
2146 2146
2147 2147 def shortuser(user):
2148 2148 """Return a short representation of a user name or email address."""
2149 2149 f = user.find('@')
2150 2150 if f >= 0:
2151 2151 user = user[:f]
2152 2152 f = user.find('<')
2153 2153 if f >= 0:
2154 2154 user = user[f + 1:]
2155 2155 f = user.find(' ')
2156 2156 if f >= 0:
2157 2157 user = user[:f]
2158 2158 f = user.find('.')
2159 2159 if f >= 0:
2160 2160 user = user[:f]
2161 2161 return user
2162 2162
2163 2163 def emailuser(user):
2164 2164 """Return the user portion of an email address."""
2165 2165 f = user.find('@')
2166 2166 if f >= 0:
2167 2167 user = user[:f]
2168 2168 f = user.find('<')
2169 2169 if f >= 0:
2170 2170 user = user[f + 1:]
2171 2171 return user
2172 2172
2173 2173 def email(author):
2174 2174 '''get email of author.'''
2175 2175 r = author.find('>')
2176 2176 if r == -1:
2177 2177 r = None
2178 2178 return author[author.find('<') + 1:r]
2179 2179
2180 2180 def ellipsis(text, maxlength=400):
2181 2181 """Trim string to at most maxlength (default: 400) columns in display."""
2182 2182 return encoding.trim(text, maxlength, ellipsis='...')
2183 2183
2184 2184 def unitcountfn(*unittable):
2185 2185 '''return a function that renders a readable count of some quantity'''
2186 2186
2187 2187 def go(count):
2188 2188 for multiplier, divisor, format in unittable:
2189 2189 if abs(count) >= divisor * multiplier:
2190 2190 return format % (count / float(divisor))
2191 2191 return unittable[-1][2] % count
2192 2192
2193 2193 return go
2194 2194
2195 2195 def processlinerange(fromline, toline):
2196 2196 """Check that linerange <fromline>:<toline> makes sense and return a
2197 2197 0-based range.
2198 2198
2199 2199 >>> processlinerange(10, 20)
2200 2200 (9, 20)
2201 2201 >>> processlinerange(2, 1)
2202 2202 Traceback (most recent call last):
2203 2203 ...
2204 2204 ParseError: line range must be positive
2205 2205 >>> processlinerange(0, 5)
2206 2206 Traceback (most recent call last):
2207 2207 ...
2208 2208 ParseError: fromline must be strictly positive
2209 2209 """
2210 2210 if toline - fromline < 0:
2211 2211 raise error.ParseError(_("line range must be positive"))
2212 2212 if fromline < 1:
2213 2213 raise error.ParseError(_("fromline must be strictly positive"))
2214 2214 return fromline - 1, toline
2215 2215
2216 2216 bytecount = unitcountfn(
2217 2217 (100, 1 << 30, _('%.0f GB')),
2218 2218 (10, 1 << 30, _('%.1f GB')),
2219 2219 (1, 1 << 30, _('%.2f GB')),
2220 2220 (100, 1 << 20, _('%.0f MB')),
2221 2221 (10, 1 << 20, _('%.1f MB')),
2222 2222 (1, 1 << 20, _('%.2f MB')),
2223 2223 (100, 1 << 10, _('%.0f KB')),
2224 2224 (10, 1 << 10, _('%.1f KB')),
2225 2225 (1, 1 << 10, _('%.2f KB')),
2226 2226 (1, 1, _('%.0f bytes')),
2227 2227 )
2228 2228
2229 2229 # Matches a single EOL which can either be a CRLF where repeated CR
2230 2230 # are removed or a LF. We do not care about old Macintosh files, so a
2231 2231 # stray CR is an error.
2232 2232 _eolre = remod.compile(br'\r*\n')
2233 2233
2234 2234 def tolf(s):
2235 2235 return _eolre.sub('\n', s)
2236 2236
2237 2237 def tocrlf(s):
2238 2238 return _eolre.sub('\r\n', s)
2239 2239
2240 2240 if pycompat.oslinesep == '\r\n':
2241 2241 tonativeeol = tocrlf
2242 2242 fromnativeeol = tolf
2243 2243 else:
2244 2244 tonativeeol = pycompat.identity
2245 2245 fromnativeeol = pycompat.identity
2246 2246
2247 2247 def escapestr(s):
2248 2248 # call underlying function of s.encode('string_escape') directly for
2249 2249 # Python 3 compatibility
2250 2250 return codecs.escape_encode(s)[0]
2251 2251
2252 2252 def unescapestr(s):
2253 2253 return codecs.escape_decode(s)[0]
2254 2254
2255 2255 def uirepr(s):
2256 2256 # Avoid double backslash in Windows path repr()
2257 2257 return repr(s).replace('\\\\', '\\')
2258 2258
2259 2259 # delay import of textwrap
2260 2260 def MBTextWrapper(**kwargs):
2261 2261 class tw(textwrap.TextWrapper):
2262 2262 """
2263 2263 Extend TextWrapper for width-awareness.
2264 2264
2265 2265 Neither number of 'bytes' in any encoding nor 'characters' is
2266 2266 appropriate to calculate terminal columns for specified string.
2267 2267
2268 2268 Original TextWrapper implementation uses built-in 'len()' directly,
2269 2269 so overriding is needed to use width information of each characters.
2270 2270
2271 2271 In addition, characters classified into 'ambiguous' width are
2272 2272 treated as wide in East Asian area, but as narrow in other.
2273 2273
2274 2274 This requires use decision to determine width of such characters.
2275 2275 """
2276 2276 def _cutdown(self, ucstr, space_left):
2277 2277 l = 0
2278 2278 colwidth = encoding.ucolwidth
2279 2279 for i in xrange(len(ucstr)):
2280 2280 l += colwidth(ucstr[i])
2281 2281 if space_left < l:
2282 2282 return (ucstr[:i], ucstr[i:])
2283 2283 return ucstr, ''
2284 2284
2285 2285 # overriding of base class
2286 2286 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
2287 2287 space_left = max(width - cur_len, 1)
2288 2288
2289 2289 if self.break_long_words:
2290 2290 cut, res = self._cutdown(reversed_chunks[-1], space_left)
2291 2291 cur_line.append(cut)
2292 2292 reversed_chunks[-1] = res
2293 2293 elif not cur_line:
2294 2294 cur_line.append(reversed_chunks.pop())
2295 2295
2296 2296 # this overriding code is imported from TextWrapper of Python 2.6
2297 2297 # to calculate columns of string by 'encoding.ucolwidth()'
2298 2298 def _wrap_chunks(self, chunks):
2299 2299 colwidth = encoding.ucolwidth
2300 2300
2301 2301 lines = []
2302 2302 if self.width <= 0:
2303 2303 raise ValueError("invalid width %r (must be > 0)" % self.width)
2304 2304
2305 2305 # Arrange in reverse order so items can be efficiently popped
2306 2306 # from a stack of chucks.
2307 2307 chunks.reverse()
2308 2308
2309 2309 while chunks:
2310 2310
2311 2311 # Start the list of chunks that will make up the current line.
2312 2312 # cur_len is just the length of all the chunks in cur_line.
2313 2313 cur_line = []
2314 2314 cur_len = 0
2315 2315
2316 2316 # Figure out which static string will prefix this line.
2317 2317 if lines:
2318 2318 indent = self.subsequent_indent
2319 2319 else:
2320 2320 indent = self.initial_indent
2321 2321
2322 2322 # Maximum width for this line.
2323 2323 width = self.width - len(indent)
2324 2324
2325 2325 # First chunk on line is whitespace -- drop it, unless this
2326 2326 # is the very beginning of the text (i.e. no lines started yet).
2327 2327 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
2328 2328 del chunks[-1]
2329 2329
2330 2330 while chunks:
2331 2331 l = colwidth(chunks[-1])
2332 2332
2333 2333 # Can at least squeeze this chunk onto the current line.
2334 2334 if cur_len + l <= width:
2335 2335 cur_line.append(chunks.pop())
2336 2336 cur_len += l
2337 2337
2338 2338 # Nope, this line is full.
2339 2339 else:
2340 2340 break
2341 2341
2342 2342 # The current line is full, and the next chunk is too big to
2343 2343 # fit on *any* line (not just this one).
2344 2344 if chunks and colwidth(chunks[-1]) > width:
2345 2345 self._handle_long_word(chunks, cur_line, cur_len, width)
2346 2346
2347 2347 # If the last chunk on this line is all whitespace, drop it.
2348 2348 if (self.drop_whitespace and
2349 2349 cur_line and cur_line[-1].strip() == ''):
2350 2350 del cur_line[-1]
2351 2351
2352 2352 # Convert current line back to a string and store it in list
2353 2353 # of all lines (return value).
2354 2354 if cur_line:
2355 2355 lines.append(indent + ''.join(cur_line))
2356 2356
2357 2357 return lines
2358 2358
2359 2359 global MBTextWrapper
2360 2360 MBTextWrapper = tw
2361 2361 return tw(**kwargs)
2362 2362
2363 2363 def wrap(line, width, initindent='', hangindent=''):
2364 2364 maxindent = max(len(hangindent), len(initindent))
2365 2365 if width <= maxindent:
2366 2366 # adjust for weird terminal size
2367 2367 width = max(78, maxindent + 1)
2368 2368 line = line.decode(pycompat.sysstr(encoding.encoding),
2369 2369 pycompat.sysstr(encoding.encodingmode))
2370 2370 initindent = initindent.decode(pycompat.sysstr(encoding.encoding),
2371 2371 pycompat.sysstr(encoding.encodingmode))
2372 2372 hangindent = hangindent.decode(pycompat.sysstr(encoding.encoding),
2373 2373 pycompat.sysstr(encoding.encodingmode))
2374 2374 wrapper = MBTextWrapper(width=width,
2375 2375 initial_indent=initindent,
2376 2376 subsequent_indent=hangindent)
2377 2377 return wrapper.fill(line).encode(pycompat.sysstr(encoding.encoding))
2378 2378
2379 2379 if (pyplatform.python_implementation() == 'CPython' and
2380 2380 sys.version_info < (3, 0)):
2381 2381 # There is an issue in CPython that some IO methods do not handle EINTR
2382 2382 # correctly. The following table shows what CPython version (and functions)
2383 2383 # are affected (buggy: has the EINTR bug, okay: otherwise):
2384 2384 #
2385 2385 # | < 2.7.4 | 2.7.4 to 2.7.12 | >= 3.0
2386 2386 # --------------------------------------------------
2387 2387 # fp.__iter__ | buggy | buggy | okay
2388 2388 # fp.read* | buggy | okay [1] | okay
2389 2389 #
2390 2390 # [1]: fixed by changeset 67dc99a989cd in the cpython hg repo.
2391 2391 #
2392 2392 # Here we workaround the EINTR issue for fileobj.__iter__. Other methods
2393 2393 # like "read*" are ignored for now, as Python < 2.7.4 is a minority.
2394 2394 #
2395 2395 # Although we can workaround the EINTR issue for fp.__iter__, it is slower:
2396 2396 # "for x in fp" is 4x faster than "for x in iter(fp.readline, '')" in
2397 2397 # CPython 2, because CPython 2 maintains an internal readahead buffer for
2398 2398 # fp.__iter__ but not other fp.read* methods.
2399 2399 #
2400 2400 # On modern systems like Linux, the "read" syscall cannot be interrupted
2401 2401 # when reading "fast" files like on-disk files. So the EINTR issue only
2402 2402 # affects things like pipes, sockets, ttys etc. We treat "normal" (S_ISREG)
2403 2403 # files approximately as "fast" files and use the fast (unsafe) code path,
2404 2404 # to minimize the performance impact.
2405 2405 if sys.version_info >= (2, 7, 4):
2406 2406 # fp.readline deals with EINTR correctly, use it as a workaround.
2407 2407 def _safeiterfile(fp):
2408 2408 return iter(fp.readline, '')
2409 2409 else:
2410 2410 # fp.read* are broken too, manually deal with EINTR in a stupid way.
2411 2411 # note: this may block longer than necessary because of bufsize.
2412 2412 def _safeiterfile(fp, bufsize=4096):
2413 2413 fd = fp.fileno()
2414 2414 line = ''
2415 2415 while True:
2416 2416 try:
2417 2417 buf = os.read(fd, bufsize)
2418 2418 except OSError as ex:
2419 2419 # os.read only raises EINTR before any data is read
2420 2420 if ex.errno == errno.EINTR:
2421 2421 continue
2422 2422 else:
2423 2423 raise
2424 2424 line += buf
2425 2425 if '\n' in buf:
2426 2426 splitted = line.splitlines(True)
2427 2427 line = ''
2428 2428 for l in splitted:
2429 2429 if l[-1] == '\n':
2430 2430 yield l
2431 2431 else:
2432 2432 line = l
2433 2433 if not buf:
2434 2434 break
2435 2435 if line:
2436 2436 yield line
2437 2437
2438 2438 def iterfile(fp):
2439 2439 fastpath = True
2440 2440 if type(fp) is file:
2441 2441 fastpath = stat.S_ISREG(os.fstat(fp.fileno()).st_mode)
2442 2442 if fastpath:
2443 2443 return fp
2444 2444 else:
2445 2445 return _safeiterfile(fp)
2446 2446 else:
2447 2447 # PyPy and CPython 3 do not have the EINTR issue thus no workaround needed.
2448 2448 def iterfile(fp):
2449 2449 return fp
2450 2450
2451 2451 def iterlines(iterator):
2452 2452 for chunk in iterator:
2453 2453 for line in chunk.splitlines():
2454 2454 yield line
2455 2455
2456 2456 def expandpath(path):
2457 2457 return os.path.expanduser(os.path.expandvars(path))
2458 2458
2459 2459 def hgcmd():
2460 2460 """Return the command used to execute current hg
2461 2461
2462 2462 This is different from hgexecutable() because on Windows we want
2463 2463 to avoid things opening new shell windows like batch files, so we
2464 2464 get either the python call or current executable.
2465 2465 """
2466 2466 if mainfrozen():
2467 2467 if getattr(sys, 'frozen', None) == 'macosx_app':
2468 2468 # Env variable set by py2app
2469 2469 return [encoding.environ['EXECUTABLEPATH']]
2470 2470 else:
2471 2471 return [pycompat.sysexecutable]
2472 2472 return gethgcmd()
2473 2473
2474 2474 def rundetached(args, condfn):
2475 2475 """Execute the argument list in a detached process.
2476 2476
2477 2477 condfn is a callable which is called repeatedly and should return
2478 2478 True once the child process is known to have started successfully.
2479 2479 At this point, the child process PID is returned. If the child
2480 2480 process fails to start or finishes before condfn() evaluates to
2481 2481 True, return -1.
2482 2482 """
2483 2483 # Windows case is easier because the child process is either
2484 2484 # successfully starting and validating the condition or exiting
2485 2485 # on failure. We just poll on its PID. On Unix, if the child
2486 2486 # process fails to start, it will be left in a zombie state until
2487 2487 # the parent wait on it, which we cannot do since we expect a long
2488 2488 # running process on success. Instead we listen for SIGCHLD telling
2489 2489 # us our child process terminated.
2490 2490 terminated = set()
2491 2491 def handler(signum, frame):
2492 2492 terminated.add(os.wait())
2493 2493 prevhandler = None
2494 2494 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2495 2495 if SIGCHLD is not None:
2496 2496 prevhandler = signal.signal(SIGCHLD, handler)
2497 2497 try:
2498 2498 pid = spawndetached(args)
2499 2499 while not condfn():
2500 2500 if ((pid in terminated or not testpid(pid))
2501 2501 and not condfn()):
2502 2502 return -1
2503 2503 time.sleep(0.1)
2504 2504 return pid
2505 2505 finally:
2506 2506 if prevhandler is not None:
2507 2507 signal.signal(signal.SIGCHLD, prevhandler)
2508 2508
2509 2509 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2510 2510 """Return the result of interpolating items in the mapping into string s.
2511 2511
2512 2512 prefix is a single character string, or a two character string with
2513 2513 a backslash as the first character if the prefix needs to be escaped in
2514 2514 a regular expression.
2515 2515
2516 2516 fn is an optional function that will be applied to the replacement text
2517 2517 just before replacement.
2518 2518
2519 2519 escape_prefix is an optional flag that allows using doubled prefix for
2520 2520 its escaping.
2521 2521 """
2522 2522 fn = fn or (lambda s: s)
2523 2523 patterns = '|'.join(mapping.keys())
2524 2524 if escape_prefix:
2525 2525 patterns += '|' + prefix
2526 2526 if len(prefix) > 1:
2527 2527 prefix_char = prefix[1:]
2528 2528 else:
2529 2529 prefix_char = prefix
2530 2530 mapping[prefix_char] = prefix_char
2531 2531 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2532 2532 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2533 2533
2534 2534 def getport(port):
2535 2535 """Return the port for a given network service.
2536 2536
2537 2537 If port is an integer, it's returned as is. If it's a string, it's
2538 2538 looked up using socket.getservbyname(). If there's no matching
2539 2539 service, error.Abort is raised.
2540 2540 """
2541 2541 try:
2542 2542 return int(port)
2543 2543 except ValueError:
2544 2544 pass
2545 2545
2546 2546 try:
2547 2547 return socket.getservbyname(port)
2548 2548 except socket.error:
2549 2549 raise Abort(_("no port number associated with service '%s'") % port)
2550 2550
2551 2551 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2552 2552 '0': False, 'no': False, 'false': False, 'off': False,
2553 2553 'never': False}
2554 2554
2555 2555 def parsebool(s):
2556 2556 """Parse s into a boolean.
2557 2557
2558 2558 If s is not a valid boolean, returns None.
2559 2559 """
2560 2560 return _booleans.get(s.lower(), None)
2561 2561
2562 2562 _hextochr = dict((a + b, chr(int(a + b, 16)))
2563 2563 for a in string.hexdigits for b in string.hexdigits)
2564 2564
2565 2565 class url(object):
2566 2566 r"""Reliable URL parser.
2567 2567
2568 2568 This parses URLs and provides attributes for the following
2569 2569 components:
2570 2570
2571 2571 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2572 2572
2573 2573 Missing components are set to None. The only exception is
2574 2574 fragment, which is set to '' if present but empty.
2575 2575
2576 2576 If parsefragment is False, fragment is included in query. If
2577 2577 parsequery is False, query is included in path. If both are
2578 2578 False, both fragment and query are included in path.
2579 2579
2580 2580 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2581 2581
2582 2582 Note that for backward compatibility reasons, bundle URLs do not
2583 2583 take host names. That means 'bundle://../' has a path of '../'.
2584 2584
2585 2585 Examples:
2586 2586
2587 2587 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2588 2588 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2589 2589 >>> url('ssh://[::1]:2200//home/joe/repo')
2590 2590 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2591 2591 >>> url('file:///home/joe/repo')
2592 2592 <url scheme: 'file', path: '/home/joe/repo'>
2593 2593 >>> url('file:///c:/temp/foo/')
2594 2594 <url scheme: 'file', path: 'c:/temp/foo/'>
2595 2595 >>> url('bundle:foo')
2596 2596 <url scheme: 'bundle', path: 'foo'>
2597 2597 >>> url('bundle://../foo')
2598 2598 <url scheme: 'bundle', path: '../foo'>
2599 2599 >>> url(r'c:\foo\bar')
2600 2600 <url path: 'c:\\foo\\bar'>
2601 2601 >>> url(r'\\blah\blah\blah')
2602 2602 <url path: '\\\\blah\\blah\\blah'>
2603 2603 >>> url(r'\\blah\blah\blah#baz')
2604 2604 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2605 2605 >>> url(r'file:///C:\users\me')
2606 2606 <url scheme: 'file', path: 'C:\\users\\me'>
2607 2607
2608 2608 Authentication credentials:
2609 2609
2610 2610 >>> url('ssh://joe:xyz@x/repo')
2611 2611 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2612 2612 >>> url('ssh://joe@x/repo')
2613 2613 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2614 2614
2615 2615 Query strings and fragments:
2616 2616
2617 2617 >>> url('http://host/a?b#c')
2618 2618 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2619 2619 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2620 2620 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2621 2621
2622 2622 Empty path:
2623 2623
2624 2624 >>> url('')
2625 2625 <url path: ''>
2626 2626 >>> url('#a')
2627 2627 <url path: '', fragment: 'a'>
2628 2628 >>> url('http://host/')
2629 2629 <url scheme: 'http', host: 'host', path: ''>
2630 2630 >>> url('http://host/#a')
2631 2631 <url scheme: 'http', host: 'host', path: '', fragment: 'a'>
2632 2632
2633 2633 Only scheme:
2634 2634
2635 2635 >>> url('http:')
2636 2636 <url scheme: 'http'>
2637 2637 """
2638 2638
2639 2639 _safechars = "!~*'()+"
2640 2640 _safepchars = "/!~*'()+:\\"
2641 2641 _matchscheme = remod.compile('^[a-zA-Z0-9+.\\-]+:').match
2642 2642
2643 2643 def __init__(self, path, parsequery=True, parsefragment=True):
2644 2644 # We slowly chomp away at path until we have only the path left
2645 2645 self.scheme = self.user = self.passwd = self.host = None
2646 2646 self.port = self.path = self.query = self.fragment = None
2647 2647 self._localpath = True
2648 2648 self._hostport = ''
2649 2649 self._origpath = path
2650 2650
2651 2651 if parsefragment and '#' in path:
2652 2652 path, self.fragment = path.split('#', 1)
2653 2653
2654 2654 # special case for Windows drive letters and UNC paths
2655 2655 if hasdriveletter(path) or path.startswith('\\\\'):
2656 2656 self.path = path
2657 2657 return
2658 2658
2659 2659 # For compatibility reasons, we can't handle bundle paths as
2660 2660 # normal URLS
2661 2661 if path.startswith('bundle:'):
2662 2662 self.scheme = 'bundle'
2663 2663 path = path[7:]
2664 2664 if path.startswith('//'):
2665 2665 path = path[2:]
2666 2666 self.path = path
2667 2667 return
2668 2668
2669 2669 if self._matchscheme(path):
2670 2670 parts = path.split(':', 1)
2671 2671 if parts[0]:
2672 2672 self.scheme, path = parts
2673 2673 self._localpath = False
2674 2674
2675 2675 if not path:
2676 2676 path = None
2677 2677 if self._localpath:
2678 2678 self.path = ''
2679 2679 return
2680 2680 else:
2681 2681 if self._localpath:
2682 2682 self.path = path
2683 2683 return
2684 2684
2685 2685 if parsequery and '?' in path:
2686 2686 path, self.query = path.split('?', 1)
2687 2687 if not path:
2688 2688 path = None
2689 2689 if not self.query:
2690 2690 self.query = None
2691 2691
2692 2692 # // is required to specify a host/authority
2693 2693 if path and path.startswith('//'):
2694 2694 parts = path[2:].split('/', 1)
2695 2695 if len(parts) > 1:
2696 2696 self.host, path = parts
2697 2697 else:
2698 2698 self.host = parts[0]
2699 2699 path = None
2700 2700 if not self.host:
2701 2701 self.host = None
2702 2702 # path of file:///d is /d
2703 2703 # path of file:///d:/ is d:/, not /d:/
2704 2704 if path and not hasdriveletter(path):
2705 2705 path = '/' + path
2706 2706
2707 2707 if self.host and '@' in self.host:
2708 2708 self.user, self.host = self.host.rsplit('@', 1)
2709 2709 if ':' in self.user:
2710 2710 self.user, self.passwd = self.user.split(':', 1)
2711 2711 if not self.host:
2712 2712 self.host = None
2713 2713
2714 2714 # Don't split on colons in IPv6 addresses without ports
2715 2715 if (self.host and ':' in self.host and
2716 2716 not (self.host.startswith('[') and self.host.endswith(']'))):
2717 2717 self._hostport = self.host
2718 2718 self.host, self.port = self.host.rsplit(':', 1)
2719 2719 if not self.host:
2720 2720 self.host = None
2721 2721
2722 2722 if (self.host and self.scheme == 'file' and
2723 2723 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2724 2724 raise Abort(_('file:// URLs can only refer to localhost'))
2725 2725
2726 2726 self.path = path
2727 2727
2728 2728 # leave the query string escaped
2729 2729 for a in ('user', 'passwd', 'host', 'port',
2730 2730 'path', 'fragment'):
2731 2731 v = getattr(self, a)
2732 2732 if v is not None:
2733 2733 setattr(self, a, urlreq.unquote(v))
2734 2734
2735 2735 def __repr__(self):
2736 2736 attrs = []
2737 2737 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2738 2738 'query', 'fragment'):
2739 2739 v = getattr(self, a)
2740 2740 if v is not None:
2741 2741 attrs.append('%s: %r' % (a, v))
2742 2742 return '<url %s>' % ', '.join(attrs)
2743 2743
2744 2744 def __str__(self):
2745 2745 r"""Join the URL's components back into a URL string.
2746 2746
2747 2747 Examples:
2748 2748
2749 2749 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2750 2750 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2751 2751 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2752 2752 'http://user:pw@host:80/?foo=bar&baz=42'
2753 2753 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2754 2754 'http://user:pw@host:80/?foo=bar%3dbaz'
2755 2755 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2756 2756 'ssh://user:pw@[::1]:2200//home/joe#'
2757 2757 >>> str(url('http://localhost:80//'))
2758 2758 'http://localhost:80//'
2759 2759 >>> str(url('http://localhost:80/'))
2760 2760 'http://localhost:80/'
2761 2761 >>> str(url('http://localhost:80'))
2762 2762 'http://localhost:80/'
2763 2763 >>> str(url('bundle:foo'))
2764 2764 'bundle:foo'
2765 2765 >>> str(url('bundle://../foo'))
2766 2766 'bundle:../foo'
2767 2767 >>> str(url('path'))
2768 2768 'path'
2769 2769 >>> str(url('file:///tmp/foo/bar'))
2770 2770 'file:///tmp/foo/bar'
2771 2771 >>> str(url('file:///c:/tmp/foo/bar'))
2772 2772 'file:///c:/tmp/foo/bar'
2773 2773 >>> print url(r'bundle:foo\bar')
2774 2774 bundle:foo\bar
2775 2775 >>> print url(r'file:///D:\data\hg')
2776 2776 file:///D:\data\hg
2777 2777 """
2778 2778 return encoding.strfromlocal(self.__bytes__())
2779 2779
2780 2780 def __bytes__(self):
2781 2781 if self._localpath:
2782 2782 s = self.path
2783 2783 if self.scheme == 'bundle':
2784 2784 s = 'bundle:' + s
2785 2785 if self.fragment:
2786 2786 s += '#' + self.fragment
2787 2787 return s
2788 2788
2789 2789 s = self.scheme + ':'
2790 2790 if self.user or self.passwd or self.host:
2791 2791 s += '//'
2792 2792 elif self.scheme and (not self.path or self.path.startswith('/')
2793 2793 or hasdriveletter(self.path)):
2794 2794 s += '//'
2795 2795 if hasdriveletter(self.path):
2796 2796 s += '/'
2797 2797 if self.user:
2798 2798 s += urlreq.quote(self.user, safe=self._safechars)
2799 2799 if self.passwd:
2800 2800 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
2801 2801 if self.user or self.passwd:
2802 2802 s += '@'
2803 2803 if self.host:
2804 2804 if not (self.host.startswith('[') and self.host.endswith(']')):
2805 2805 s += urlreq.quote(self.host)
2806 2806 else:
2807 2807 s += self.host
2808 2808 if self.port:
2809 2809 s += ':' + urlreq.quote(self.port)
2810 2810 if self.host:
2811 2811 s += '/'
2812 2812 if self.path:
2813 2813 # TODO: similar to the query string, we should not unescape the
2814 2814 # path when we store it, the path might contain '%2f' = '/',
2815 2815 # which we should *not* escape.
2816 2816 s += urlreq.quote(self.path, safe=self._safepchars)
2817 2817 if self.query:
2818 2818 # we store the query in escaped form.
2819 2819 s += '?' + self.query
2820 2820 if self.fragment is not None:
2821 2821 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
2822 2822 return s
2823 2823
2824 2824 def authinfo(self):
2825 2825 user, passwd = self.user, self.passwd
2826 2826 try:
2827 2827 self.user, self.passwd = None, None
2828 2828 s = bytes(self)
2829 2829 finally:
2830 2830 self.user, self.passwd = user, passwd
2831 2831 if not self.user:
2832 2832 return (s, None)
2833 2833 # authinfo[1] is passed to urllib2 password manager, and its
2834 2834 # URIs must not contain credentials. The host is passed in the
2835 2835 # URIs list because Python < 2.4.3 uses only that to search for
2836 2836 # a password.
2837 2837 return (s, (None, (s, self.host),
2838 2838 self.user, self.passwd or ''))
2839 2839
2840 2840 def isabs(self):
2841 2841 if self.scheme and self.scheme != 'file':
2842 2842 return True # remote URL
2843 2843 if hasdriveletter(self.path):
2844 2844 return True # absolute for our purposes - can't be joined()
2845 2845 if self.path.startswith(r'\\'):
2846 2846 return True # Windows UNC path
2847 2847 if self.path.startswith('/'):
2848 2848 return True # POSIX-style
2849 2849 return False
2850 2850
2851 2851 def localpath(self):
2852 2852 if self.scheme == 'file' or self.scheme == 'bundle':
2853 2853 path = self.path or '/'
2854 2854 # For Windows, we need to promote hosts containing drive
2855 2855 # letters to paths with drive letters.
2856 2856 if hasdriveletter(self._hostport):
2857 2857 path = self._hostport + '/' + self.path
2858 2858 elif (self.host is not None and self.path
2859 2859 and not hasdriveletter(path)):
2860 2860 path = '/' + path
2861 2861 return path
2862 2862 return self._origpath
2863 2863
2864 2864 def islocal(self):
2865 2865 '''whether localpath will return something that posixfile can open'''
2866 2866 return (not self.scheme or self.scheme == 'file'
2867 2867 or self.scheme == 'bundle')
2868 2868
2869 2869 def hasscheme(path):
2870 2870 return bool(url(path).scheme)
2871 2871
2872 2872 def hasdriveletter(path):
2873 2873 return path and path[1:2] == ':' and path[0:1].isalpha()
2874 2874
2875 2875 def urllocalpath(path):
2876 2876 return url(path, parsequery=False, parsefragment=False).localpath()
2877 2877
2878 2878 def hidepassword(u):
2879 2879 '''hide user credential in a url string'''
2880 2880 u = url(u)
2881 2881 if u.passwd:
2882 2882 u.passwd = '***'
2883 2883 return bytes(u)
2884 2884
2885 2885 def removeauth(u):
2886 2886 '''remove all authentication information from a url string'''
2887 2887 u = url(u)
2888 2888 u.user = u.passwd = None
2889 2889 return str(u)
2890 2890
2891 2891 timecount = unitcountfn(
2892 2892 (1, 1e3, _('%.0f s')),
2893 2893 (100, 1, _('%.1f s')),
2894 2894 (10, 1, _('%.2f s')),
2895 2895 (1, 1, _('%.3f s')),
2896 2896 (100, 0.001, _('%.1f ms')),
2897 2897 (10, 0.001, _('%.2f ms')),
2898 2898 (1, 0.001, _('%.3f ms')),
2899 2899 (100, 0.000001, _('%.1f us')),
2900 2900 (10, 0.000001, _('%.2f us')),
2901 2901 (1, 0.000001, _('%.3f us')),
2902 2902 (100, 0.000000001, _('%.1f ns')),
2903 2903 (10, 0.000000001, _('%.2f ns')),
2904 2904 (1, 0.000000001, _('%.3f ns')),
2905 2905 )
2906 2906
2907 2907 _timenesting = [0]
2908 2908
2909 2909 def timed(func):
2910 2910 '''Report the execution time of a function call to stderr.
2911 2911
2912 2912 During development, use as a decorator when you need to measure
2913 2913 the cost of a function, e.g. as follows:
2914 2914
2915 2915 @util.timed
2916 2916 def foo(a, b, c):
2917 2917 pass
2918 2918 '''
2919 2919
2920 2920 def wrapper(*args, **kwargs):
2921 2921 start = timer()
2922 2922 indent = 2
2923 2923 _timenesting[0] += indent
2924 2924 try:
2925 2925 return func(*args, **kwargs)
2926 2926 finally:
2927 2927 elapsed = timer() - start
2928 2928 _timenesting[0] -= indent
2929 2929 stderr.write('%s%s: %s\n' %
2930 2930 (' ' * _timenesting[0], func.__name__,
2931 2931 timecount(elapsed)))
2932 2932 return wrapper
2933 2933
2934 2934 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2935 2935 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2936 2936
2937 2937 def sizetoint(s):
2938 2938 '''Convert a space specifier to a byte count.
2939 2939
2940 2940 >>> sizetoint('30')
2941 2941 30
2942 2942 >>> sizetoint('2.2kb')
2943 2943 2252
2944 2944 >>> sizetoint('6M')
2945 2945 6291456
2946 2946 '''
2947 2947 t = s.strip().lower()
2948 2948 try:
2949 2949 for k, u in _sizeunits:
2950 2950 if t.endswith(k):
2951 2951 return int(float(t[:-len(k)]) * u)
2952 2952 return int(t)
2953 2953 except ValueError:
2954 2954 raise error.ParseError(_("couldn't parse size: %s") % s)
2955 2955
2956 2956 class hooks(object):
2957 2957 '''A collection of hook functions that can be used to extend a
2958 2958 function's behavior. Hooks are called in lexicographic order,
2959 2959 based on the names of their sources.'''
2960 2960
2961 2961 def __init__(self):
2962 2962 self._hooks = []
2963 2963
2964 2964 def add(self, source, hook):
2965 2965 self._hooks.append((source, hook))
2966 2966
2967 2967 def __call__(self, *args):
2968 2968 self._hooks.sort(key=lambda x: x[0])
2969 2969 results = []
2970 2970 for source, hook in self._hooks:
2971 2971 results.append(hook(*args))
2972 2972 return results
2973 2973
2974 2974 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s', depth=0):
2975 2975 '''Yields lines for a nicely formatted stacktrace.
2976 2976 Skips the 'skip' last entries, then return the last 'depth' entries.
2977 2977 Each file+linenumber is formatted according to fileline.
2978 2978 Each line is formatted according to line.
2979 2979 If line is None, it yields:
2980 2980 length of longest filepath+line number,
2981 2981 filepath+linenumber,
2982 2982 function
2983 2983
2984 2984 Not be used in production code but very convenient while developing.
2985 2985 '''
2986 2986 entries = [(fileline % (fn, ln), func)
2987 2987 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]
2988 2988 ][-depth:]
2989 2989 if entries:
2990 2990 fnmax = max(len(entry[0]) for entry in entries)
2991 2991 for fnln, func in entries:
2992 2992 if line is None:
2993 2993 yield (fnmax, fnln, func)
2994 2994 else:
2995 2995 yield line % (fnmax, fnln, func)
2996 2996
2997 2997 def debugstacktrace(msg='stacktrace', skip=0,
2998 2998 f=stderr, otherf=stdout, depth=0):
2999 2999 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
3000 3000 Skips the 'skip' entries closest to the call, then show 'depth' entries.
3001 3001 By default it will flush stdout first.
3002 3002 It can be used everywhere and intentionally does not require an ui object.
3003 3003 Not be used in production code but very convenient while developing.
3004 3004 '''
3005 3005 if otherf:
3006 3006 otherf.flush()
3007 3007 f.write('%s at:\n' % msg.rstrip())
3008 3008 for line in getstackframes(skip + 1, depth=depth):
3009 3009 f.write(line)
3010 3010 f.flush()
3011 3011
3012 3012 class dirs(object):
3013 3013 '''a multiset of directory names from a dirstate or manifest'''
3014 3014
3015 3015 def __init__(self, map, skip=None):
3016 3016 self._dirs = {}
3017 3017 addpath = self.addpath
3018 3018 if safehasattr(map, 'iteritems') and skip is not None:
3019 3019 for f, s in map.iteritems():
3020 3020 if s[0] != skip:
3021 3021 addpath(f)
3022 3022 else:
3023 3023 for f in map:
3024 3024 addpath(f)
3025 3025
3026 3026 def addpath(self, path):
3027 3027 dirs = self._dirs
3028 3028 for base in finddirs(path):
3029 3029 if base in dirs:
3030 3030 dirs[base] += 1
3031 3031 return
3032 3032 dirs[base] = 1
3033 3033
3034 3034 def delpath(self, path):
3035 3035 dirs = self._dirs
3036 3036 for base in finddirs(path):
3037 3037 if dirs[base] > 1:
3038 3038 dirs[base] -= 1
3039 3039 return
3040 3040 del dirs[base]
3041 3041
3042 3042 def __iter__(self):
3043 3043 return iter(self._dirs)
3044 3044
3045 3045 def __contains__(self, d):
3046 3046 return d in self._dirs
3047 3047
3048 3048 if safehasattr(parsers, 'dirs'):
3049 3049 dirs = parsers.dirs
3050 3050
3051 3051 def finddirs(path):
3052 3052 pos = path.rfind('/')
3053 3053 while pos != -1:
3054 3054 yield path[:pos]
3055 3055 pos = path.rfind('/', 0, pos)
3056 3056
3057 3057 class ctxmanager(object):
3058 3058 '''A context manager for use in 'with' blocks to allow multiple
3059 3059 contexts to be entered at once. This is both safer and more
3060 3060 flexible than contextlib.nested.
3061 3061
3062 3062 Once Mercurial supports Python 2.7+, this will become mostly
3063 3063 unnecessary.
3064 3064 '''
3065 3065
3066 3066 def __init__(self, *args):
3067 3067 '''Accepts a list of no-argument functions that return context
3068 3068 managers. These will be invoked at __call__ time.'''
3069 3069 self._pending = args
3070 3070 self._atexit = []
3071 3071
3072 3072 def __enter__(self):
3073 3073 return self
3074 3074
3075 3075 def enter(self):
3076 3076 '''Create and enter context managers in the order in which they were
3077 3077 passed to the constructor.'''
3078 3078 values = []
3079 3079 for func in self._pending:
3080 3080 obj = func()
3081 3081 values.append(obj.__enter__())
3082 3082 self._atexit.append(obj.__exit__)
3083 3083 del self._pending
3084 3084 return values
3085 3085
3086 3086 def atexit(self, func, *args, **kwargs):
3087 3087 '''Add a function to call when this context manager exits. The
3088 3088 ordering of multiple atexit calls is unspecified, save that
3089 3089 they will happen before any __exit__ functions.'''
3090 3090 def wrapper(exc_type, exc_val, exc_tb):
3091 3091 func(*args, **kwargs)
3092 3092 self._atexit.append(wrapper)
3093 3093 return func
3094 3094
3095 3095 def __exit__(self, exc_type, exc_val, exc_tb):
3096 3096 '''Context managers are exited in the reverse order from which
3097 3097 they were created.'''
3098 3098 received = exc_type is not None
3099 3099 suppressed = False
3100 3100 pending = None
3101 3101 self._atexit.reverse()
3102 3102 for exitfunc in self._atexit:
3103 3103 try:
3104 3104 if exitfunc(exc_type, exc_val, exc_tb):
3105 3105 suppressed = True
3106 3106 exc_type = None
3107 3107 exc_val = None
3108 3108 exc_tb = None
3109 3109 except BaseException:
3110 3110 pending = sys.exc_info()
3111 3111 exc_type, exc_val, exc_tb = pending = sys.exc_info()
3112 3112 del self._atexit
3113 3113 if pending:
3114 3114 raise exc_val
3115 3115 return received and suppressed
3116 3116
3117 3117 # compression code
3118 3118
3119 3119 SERVERROLE = 'server'
3120 3120 CLIENTROLE = 'client'
3121 3121
3122 3122 compewireprotosupport = collections.namedtuple(u'compenginewireprotosupport',
3123 3123 (u'name', u'serverpriority',
3124 3124 u'clientpriority'))
3125 3125
3126 3126 class compressormanager(object):
3127 3127 """Holds registrations of various compression engines.
3128 3128
3129 3129 This class essentially abstracts the differences between compression
3130 3130 engines to allow new compression formats to be added easily, possibly from
3131 3131 extensions.
3132 3132
3133 3133 Compressors are registered against the global instance by calling its
3134 3134 ``register()`` method.
3135 3135 """
3136 3136 def __init__(self):
3137 3137 self._engines = {}
3138 3138 # Bundle spec human name to engine name.
3139 3139 self._bundlenames = {}
3140 3140 # Internal bundle identifier to engine name.
3141 3141 self._bundletypes = {}
3142 3142 # Revlog header to engine name.
3143 3143 self._revlogheaders = {}
3144 3144 # Wire proto identifier to engine name.
3145 3145 self._wiretypes = {}
3146 3146
3147 3147 def __getitem__(self, key):
3148 3148 return self._engines[key]
3149 3149
3150 3150 def __contains__(self, key):
3151 3151 return key in self._engines
3152 3152
3153 3153 def __iter__(self):
3154 3154 return iter(self._engines.keys())
3155 3155
3156 3156 def register(self, engine):
3157 3157 """Register a compression engine with the manager.
3158 3158
3159 3159 The argument must be a ``compressionengine`` instance.
3160 3160 """
3161 3161 if not isinstance(engine, compressionengine):
3162 3162 raise ValueError(_('argument must be a compressionengine'))
3163 3163
3164 3164 name = engine.name()
3165 3165
3166 3166 if name in self._engines:
3167 3167 raise error.Abort(_('compression engine %s already registered') %
3168 3168 name)
3169 3169
3170 3170 bundleinfo = engine.bundletype()
3171 3171 if bundleinfo:
3172 3172 bundlename, bundletype = bundleinfo
3173 3173
3174 3174 if bundlename in self._bundlenames:
3175 3175 raise error.Abort(_('bundle name %s already registered') %
3176 3176 bundlename)
3177 3177 if bundletype in self._bundletypes:
3178 3178 raise error.Abort(_('bundle type %s already registered by %s') %
3179 3179 (bundletype, self._bundletypes[bundletype]))
3180 3180
3181 3181 # No external facing name declared.
3182 3182 if bundlename:
3183 3183 self._bundlenames[bundlename] = name
3184 3184
3185 3185 self._bundletypes[bundletype] = name
3186 3186
3187 3187 wiresupport = engine.wireprotosupport()
3188 3188 if wiresupport:
3189 3189 wiretype = wiresupport.name
3190 3190 if wiretype in self._wiretypes:
3191 3191 raise error.Abort(_('wire protocol compression %s already '
3192 3192 'registered by %s') %
3193 3193 (wiretype, self._wiretypes[wiretype]))
3194 3194
3195 3195 self._wiretypes[wiretype] = name
3196 3196
3197 3197 revlogheader = engine.revlogheader()
3198 3198 if revlogheader and revlogheader in self._revlogheaders:
3199 3199 raise error.Abort(_('revlog header %s already registered by %s') %
3200 3200 (revlogheader, self._revlogheaders[revlogheader]))
3201 3201
3202 3202 if revlogheader:
3203 3203 self._revlogheaders[revlogheader] = name
3204 3204
3205 3205 self._engines[name] = engine
3206 3206
3207 3207 @property
3208 3208 def supportedbundlenames(self):
3209 3209 return set(self._bundlenames.keys())
3210 3210
3211 3211 @property
3212 3212 def supportedbundletypes(self):
3213 3213 return set(self._bundletypes.keys())
3214 3214
3215 3215 def forbundlename(self, bundlename):
3216 3216 """Obtain a compression engine registered to a bundle name.
3217 3217
3218 3218 Will raise KeyError if the bundle type isn't registered.
3219 3219
3220 3220 Will abort if the engine is known but not available.
3221 3221 """
3222 3222 engine = self._engines[self._bundlenames[bundlename]]
3223 3223 if not engine.available():
3224 3224 raise error.Abort(_('compression engine %s could not be loaded') %
3225 3225 engine.name())
3226 3226 return engine
3227 3227
3228 3228 def forbundletype(self, bundletype):
3229 3229 """Obtain a compression engine registered to a bundle type.
3230 3230
3231 3231 Will raise KeyError if the bundle type isn't registered.
3232 3232
3233 3233 Will abort if the engine is known but not available.
3234 3234 """
3235 3235 engine = self._engines[self._bundletypes[bundletype]]
3236 3236 if not engine.available():
3237 3237 raise error.Abort(_('compression engine %s could not be loaded') %
3238 3238 engine.name())
3239 3239 return engine
3240 3240
3241 3241 def supportedwireengines(self, role, onlyavailable=True):
3242 3242 """Obtain compression engines that support the wire protocol.
3243 3243
3244 3244 Returns a list of engines in prioritized order, most desired first.
3245 3245
3246 3246 If ``onlyavailable`` is set, filter out engines that can't be
3247 3247 loaded.
3248 3248 """
3249 3249 assert role in (SERVERROLE, CLIENTROLE)
3250 3250
3251 3251 attr = 'serverpriority' if role == SERVERROLE else 'clientpriority'
3252 3252
3253 3253 engines = [self._engines[e] for e in self._wiretypes.values()]
3254 3254 if onlyavailable:
3255 3255 engines = [e for e in engines if e.available()]
3256 3256
3257 3257 def getkey(e):
3258 3258 # Sort first by priority, highest first. In case of tie, sort
3259 3259 # alphabetically. This is arbitrary, but ensures output is
3260 3260 # stable.
3261 3261 w = e.wireprotosupport()
3262 3262 return -1 * getattr(w, attr), w.name
3263 3263
3264 3264 return list(sorted(engines, key=getkey))
3265 3265
3266 3266 def forwiretype(self, wiretype):
3267 3267 engine = self._engines[self._wiretypes[wiretype]]
3268 3268 if not engine.available():
3269 3269 raise error.Abort(_('compression engine %s could not be loaded') %
3270 3270 engine.name())
3271 3271 return engine
3272 3272
3273 3273 def forrevlogheader(self, header):
3274 3274 """Obtain a compression engine registered to a revlog header.
3275 3275
3276 3276 Will raise KeyError if the revlog header value isn't registered.
3277 3277 """
3278 3278 return self._engines[self._revlogheaders[header]]
3279 3279
3280 3280 compengines = compressormanager()
3281 3281
3282 3282 class compressionengine(object):
3283 3283 """Base class for compression engines.
3284 3284
3285 3285 Compression engines must implement the interface defined by this class.
3286 3286 """
3287 3287 def name(self):
3288 3288 """Returns the name of the compression engine.
3289 3289
3290 3290 This is the key the engine is registered under.
3291 3291
3292 3292 This method must be implemented.
3293 3293 """
3294 3294 raise NotImplementedError()
3295 3295
3296 3296 def available(self):
3297 3297 """Whether the compression engine is available.
3298 3298
3299 3299 The intent of this method is to allow optional compression engines
3300 3300 that may not be available in all installations (such as engines relying
3301 3301 on C extensions that may not be present).
3302 3302 """
3303 3303 return True
3304 3304
3305 3305 def bundletype(self):
3306 3306 """Describes bundle identifiers for this engine.
3307 3307
3308 3308 If this compression engine isn't supported for bundles, returns None.
3309 3309
3310 3310 If this engine can be used for bundles, returns a 2-tuple of strings of
3311 3311 the user-facing "bundle spec" compression name and an internal
3312 3312 identifier used to denote the compression format within bundles. To
3313 3313 exclude the name from external usage, set the first element to ``None``.
3314 3314
3315 3315 If bundle compression is supported, the class must also implement
3316 3316 ``compressstream`` and `decompressorreader``.
3317 3317
3318 3318 The docstring of this method is used in the help system to tell users
3319 3319 about this engine.
3320 3320 """
3321 3321 return None
3322 3322
3323 3323 def wireprotosupport(self):
3324 3324 """Declare support for this compression format on the wire protocol.
3325 3325
3326 3326 If this compression engine isn't supported for compressing wire
3327 3327 protocol payloads, returns None.
3328 3328
3329 3329 Otherwise, returns ``compenginewireprotosupport`` with the following
3330 3330 fields:
3331 3331
3332 3332 * String format identifier
3333 3333 * Integer priority for the server
3334 3334 * Integer priority for the client
3335 3335
3336 3336 The integer priorities are used to order the advertisement of format
3337 3337 support by server and client. The highest integer is advertised
3338 3338 first. Integers with non-positive values aren't advertised.
3339 3339
3340 3340 The priority values are somewhat arbitrary and only used for default
3341 3341 ordering. The relative order can be changed via config options.
3342 3342
3343 3343 If wire protocol compression is supported, the class must also implement
3344 3344 ``compressstream`` and ``decompressorreader``.
3345 3345 """
3346 3346 return None
3347 3347
3348 3348 def revlogheader(self):
3349 3349 """Header added to revlog chunks that identifies this engine.
3350 3350
3351 3351 If this engine can be used to compress revlogs, this method should
3352 3352 return the bytes used to identify chunks compressed with this engine.
3353 3353 Else, the method should return ``None`` to indicate it does not
3354 3354 participate in revlog compression.
3355 3355 """
3356 3356 return None
3357 3357
3358 3358 def compressstream(self, it, opts=None):
3359 3359 """Compress an iterator of chunks.
3360 3360
3361 3361 The method receives an iterator (ideally a generator) of chunks of
3362 3362 bytes to be compressed. It returns an iterator (ideally a generator)
3363 3363 of bytes of chunks representing the compressed output.
3364 3364
3365 3365 Optionally accepts an argument defining how to perform compression.
3366 3366 Each engine treats this argument differently.
3367 3367 """
3368 3368 raise NotImplementedError()
3369 3369
3370 3370 def decompressorreader(self, fh):
3371 3371 """Perform decompression on a file object.
3372 3372
3373 3373 Argument is an object with a ``read(size)`` method that returns
3374 3374 compressed data. Return value is an object with a ``read(size)`` that
3375 3375 returns uncompressed data.
3376 3376 """
3377 3377 raise NotImplementedError()
3378 3378
3379 3379 def revlogcompressor(self, opts=None):
3380 3380 """Obtain an object that can be used to compress revlog entries.
3381 3381
3382 3382 The object has a ``compress(data)`` method that compresses binary
3383 3383 data. This method returns compressed binary data or ``None`` if
3384 3384 the data could not be compressed (too small, not compressible, etc).
3385 3385 The returned data should have a header uniquely identifying this
3386 3386 compression format so decompression can be routed to this engine.
3387 3387 This header should be identified by the ``revlogheader()`` return
3388 3388 value.
3389 3389
3390 3390 The object has a ``decompress(data)`` method that decompresses
3391 3391 data. The method will only be called if ``data`` begins with
3392 3392 ``revlogheader()``. The method should return the raw, uncompressed
3393 3393 data or raise a ``RevlogError``.
3394 3394
3395 3395 The object is reusable but is not thread safe.
3396 3396 """
3397 3397 raise NotImplementedError()
3398 3398
3399 3399 class _zlibengine(compressionengine):
3400 3400 def name(self):
3401 3401 return 'zlib'
3402 3402
3403 3403 def bundletype(self):
3404 3404 """zlib compression using the DEFLATE algorithm.
3405 3405
3406 3406 All Mercurial clients should support this format. The compression
3407 3407 algorithm strikes a reasonable balance between compression ratio
3408 3408 and size.
3409 3409 """
3410 3410 return 'gzip', 'GZ'
3411 3411
3412 3412 def wireprotosupport(self):
3413 3413 return compewireprotosupport('zlib', 20, 20)
3414 3414
3415 3415 def revlogheader(self):
3416 3416 return 'x'
3417 3417
3418 3418 def compressstream(self, it, opts=None):
3419 3419 opts = opts or {}
3420 3420
3421 3421 z = zlib.compressobj(opts.get('level', -1))
3422 3422 for chunk in it:
3423 3423 data = z.compress(chunk)
3424 3424 # Not all calls to compress emit data. It is cheaper to inspect
3425 3425 # here than to feed empty chunks through generator.
3426 3426 if data:
3427 3427 yield data
3428 3428
3429 3429 yield z.flush()
3430 3430
3431 3431 def decompressorreader(self, fh):
3432 3432 def gen():
3433 3433 d = zlib.decompressobj()
3434 3434 for chunk in filechunkiter(fh):
3435 3435 while chunk:
3436 3436 # Limit output size to limit memory.
3437 3437 yield d.decompress(chunk, 2 ** 18)
3438 3438 chunk = d.unconsumed_tail
3439 3439
3440 3440 return chunkbuffer(gen())
3441 3441
3442 3442 class zlibrevlogcompressor(object):
3443 3443 def compress(self, data):
3444 3444 insize = len(data)
3445 3445 # Caller handles empty input case.
3446 3446 assert insize > 0
3447 3447
3448 3448 if insize < 44:
3449 3449 return None
3450 3450
3451 3451 elif insize <= 1000000:
3452 3452 compressed = zlib.compress(data)
3453 3453 if len(compressed) < insize:
3454 3454 return compressed
3455 3455 return None
3456 3456
3457 3457 # zlib makes an internal copy of the input buffer, doubling
3458 3458 # memory usage for large inputs. So do streaming compression
3459 3459 # on large inputs.
3460 3460 else:
3461 3461 z = zlib.compressobj()
3462 3462 parts = []
3463 3463 pos = 0
3464 3464 while pos < insize:
3465 3465 pos2 = pos + 2**20
3466 3466 parts.append(z.compress(data[pos:pos2]))
3467 3467 pos = pos2
3468 3468 parts.append(z.flush())
3469 3469
3470 3470 if sum(map(len, parts)) < insize:
3471 3471 return ''.join(parts)
3472 3472 return None
3473 3473
3474 3474 def decompress(self, data):
3475 3475 try:
3476 3476 return zlib.decompress(data)
3477 3477 except zlib.error as e:
3478 3478 raise error.RevlogError(_('revlog decompress error: %s') %
3479 3479 str(e))
3480 3480
3481 3481 def revlogcompressor(self, opts=None):
3482 3482 return self.zlibrevlogcompressor()
3483 3483
3484 3484 compengines.register(_zlibengine())
3485 3485
3486 3486 class _bz2engine(compressionengine):
3487 3487 def name(self):
3488 3488 return 'bz2'
3489 3489
3490 3490 def bundletype(self):
3491 3491 """An algorithm that produces smaller bundles than ``gzip``.
3492 3492
3493 3493 All Mercurial clients should support this format.
3494 3494
3495 3495 This engine will likely produce smaller bundles than ``gzip`` but
3496 3496 will be significantly slower, both during compression and
3497 3497 decompression.
3498 3498
3499 3499 If available, the ``zstd`` engine can yield similar or better
3500 3500 compression at much higher speeds.
3501 3501 """
3502 3502 return 'bzip2', 'BZ'
3503 3503
3504 3504 # We declare a protocol name but don't advertise by default because
3505 3505 # it is slow.
3506 3506 def wireprotosupport(self):
3507 3507 return compewireprotosupport('bzip2', 0, 0)
3508 3508
3509 3509 def compressstream(self, it, opts=None):
3510 3510 opts = opts or {}
3511 3511 z = bz2.BZ2Compressor(opts.get('level', 9))
3512 3512 for chunk in it:
3513 3513 data = z.compress(chunk)
3514 3514 if data:
3515 3515 yield data
3516 3516
3517 3517 yield z.flush()
3518 3518
3519 3519 def decompressorreader(self, fh):
3520 3520 def gen():
3521 3521 d = bz2.BZ2Decompressor()
3522 3522 for chunk in filechunkiter(fh):
3523 3523 yield d.decompress(chunk)
3524 3524
3525 3525 return chunkbuffer(gen())
3526 3526
3527 3527 compengines.register(_bz2engine())
3528 3528
3529 3529 class _truncatedbz2engine(compressionengine):
3530 3530 def name(self):
3531 3531 return 'bz2truncated'
3532 3532
3533 3533 def bundletype(self):
3534 3534 return None, '_truncatedBZ'
3535 3535
3536 3536 # We don't implement compressstream because it is hackily handled elsewhere.
3537 3537
3538 3538 def decompressorreader(self, fh):
3539 3539 def gen():
3540 3540 # The input stream doesn't have the 'BZ' header. So add it back.
3541 3541 d = bz2.BZ2Decompressor()
3542 3542 d.decompress('BZ')
3543 3543 for chunk in filechunkiter(fh):
3544 3544 yield d.decompress(chunk)
3545 3545
3546 3546 return chunkbuffer(gen())
3547 3547
3548 3548 compengines.register(_truncatedbz2engine())
3549 3549
3550 3550 class _noopengine(compressionengine):
3551 3551 def name(self):
3552 3552 return 'none'
3553 3553
3554 3554 def bundletype(self):
3555 3555 """No compression is performed.
3556 3556
3557 3557 Use this compression engine to explicitly disable compression.
3558 3558 """
3559 3559 return 'none', 'UN'
3560 3560
3561 3561 # Clients always support uncompressed payloads. Servers don't because
3562 3562 # unless you are on a fast network, uncompressed payloads can easily
3563 3563 # saturate your network pipe.
3564 3564 def wireprotosupport(self):
3565 3565 return compewireprotosupport('none', 0, 10)
3566 3566
3567 3567 # We don't implement revlogheader because it is handled specially
3568 3568 # in the revlog class.
3569 3569
3570 3570 def compressstream(self, it, opts=None):
3571 3571 return it
3572 3572
3573 3573 def decompressorreader(self, fh):
3574 3574 return fh
3575 3575
3576 3576 class nooprevlogcompressor(object):
3577 3577 def compress(self, data):
3578 3578 return None
3579 3579
3580 3580 def revlogcompressor(self, opts=None):
3581 3581 return self.nooprevlogcompressor()
3582 3582
3583 3583 compengines.register(_noopengine())
3584 3584
3585 3585 class _zstdengine(compressionengine):
3586 3586 def name(self):
3587 3587 return 'zstd'
3588 3588
3589 3589 @propertycache
3590 3590 def _module(self):
3591 3591 # Not all installs have the zstd module available. So defer importing
3592 3592 # until first access.
3593 3593 try:
3594 3594 from . import zstd
3595 3595 # Force delayed import.
3596 3596 zstd.__version__
3597 3597 return zstd
3598 3598 except ImportError:
3599 3599 return None
3600 3600
3601 3601 def available(self):
3602 3602 return bool(self._module)
3603 3603
3604 3604 def bundletype(self):
3605 3605 """A modern compression algorithm that is fast and highly flexible.
3606 3606
3607 3607 Only supported by Mercurial 4.1 and newer clients.
3608 3608
3609 3609 With the default settings, zstd compression is both faster and yields
3610 3610 better compression than ``gzip``. It also frequently yields better
3611 3611 compression than ``bzip2`` while operating at much higher speeds.
3612 3612
3613 3613 If this engine is available and backwards compatibility is not a
3614 3614 concern, it is likely the best available engine.
3615 3615 """
3616 3616 return 'zstd', 'ZS'
3617 3617
3618 3618 def wireprotosupport(self):
3619 3619 return compewireprotosupport('zstd', 50, 50)
3620 3620
3621 3621 def revlogheader(self):
3622 3622 return '\x28'
3623 3623
3624 3624 def compressstream(self, it, opts=None):
3625 3625 opts = opts or {}
3626 3626 # zstd level 3 is almost always significantly faster than zlib
3627 3627 # while providing no worse compression. It strikes a good balance
3628 3628 # between speed and compression.
3629 3629 level = opts.get('level', 3)
3630 3630
3631 3631 zstd = self._module
3632 3632 z = zstd.ZstdCompressor(level=level).compressobj()
3633 3633 for chunk in it:
3634 3634 data = z.compress(chunk)
3635 3635 if data:
3636 3636 yield data
3637 3637
3638 3638 yield z.flush()
3639 3639
3640 3640 def decompressorreader(self, fh):
3641 3641 zstd = self._module
3642 3642 dctx = zstd.ZstdDecompressor()
3643 3643 return chunkbuffer(dctx.read_from(fh))
3644 3644
3645 3645 class zstdrevlogcompressor(object):
3646 3646 def __init__(self, zstd, level=3):
3647 3647 # Writing the content size adds a few bytes to the output. However,
3648 3648 # it allows decompression to be more optimal since we can
3649 3649 # pre-allocate a buffer to hold the result.
3650 3650 self._cctx = zstd.ZstdCompressor(level=level,
3651 3651 write_content_size=True)
3652 3652 self._dctx = zstd.ZstdDecompressor()
3653 3653 self._compinsize = zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE
3654 3654 self._decompinsize = zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE
3655 3655
3656 3656 def compress(self, data):
3657 3657 insize = len(data)
3658 3658 # Caller handles empty input case.
3659 3659 assert insize > 0
3660 3660
3661 3661 if insize < 50:
3662 3662 return None
3663 3663
3664 3664 elif insize <= 1000000:
3665 3665 compressed = self._cctx.compress(data)
3666 3666 if len(compressed) < insize:
3667 3667 return compressed
3668 3668 return None
3669 3669 else:
3670 3670 z = self._cctx.compressobj()
3671 3671 chunks = []
3672 3672 pos = 0
3673 3673 while pos < insize:
3674 3674 pos2 = pos + self._compinsize
3675 3675 chunk = z.compress(data[pos:pos2])
3676 3676 if chunk:
3677 3677 chunks.append(chunk)
3678 3678 pos = pos2
3679 3679 chunks.append(z.flush())
3680 3680
3681 3681 if sum(map(len, chunks)) < insize:
3682 3682 return ''.join(chunks)
3683 3683 return None
3684 3684
3685 3685 def decompress(self, data):
3686 3686 insize = len(data)
3687 3687
3688 3688 try:
3689 3689 # This was measured to be faster than other streaming
3690 3690 # decompressors.
3691 3691 dobj = self._dctx.decompressobj()
3692 3692 chunks = []
3693 3693 pos = 0
3694 3694 while pos < insize:
3695 3695 pos2 = pos + self._decompinsize
3696 3696 chunk = dobj.decompress(data[pos:pos2])
3697 3697 if chunk:
3698 3698 chunks.append(chunk)
3699 3699 pos = pos2
3700 3700 # Frame should be exhausted, so no finish() API.
3701 3701
3702 3702 return ''.join(chunks)
3703 3703 except Exception as e:
3704 3704 raise error.RevlogError(_('revlog decompress error: %s') %
3705 3705 str(e))
3706 3706
3707 3707 def revlogcompressor(self, opts=None):
3708 3708 opts = opts or {}
3709 3709 return self.zstdrevlogcompressor(self._module,
3710 3710 level=opts.get('level', 3))
3711 3711
3712 3712 compengines.register(_zstdengine())
3713 3713
3714 3714 def bundlecompressiontopics():
3715 3715 """Obtains a list of available bundle compressions for use in help."""
3716 3716 # help.makeitemsdocs() expects a dict of names to items with a .__doc__.
3717 3717 items = {}
3718 3718
3719 3719 # We need to format the docstring. So use a dummy object/type to hold it
3720 3720 # rather than mutating the original.
3721 3721 class docobject(object):
3722 3722 pass
3723 3723
3724 3724 for name in compengines:
3725 3725 engine = compengines[name]
3726 3726
3727 3727 if not engine.available():
3728 3728 continue
3729 3729
3730 3730 bt = engine.bundletype()
3731 3731 if not bt or not bt[0]:
3732 3732 continue
3733 3733
3734 3734 doc = pycompat.sysstr('``%s``\n %s') % (
3735 3735 bt[0], engine.bundletype.__doc__)
3736 3736
3737 3737 value = docobject()
3738 3738 value.__doc__ = doc
3739 3739
3740 3740 items[bt[0]] = value
3741 3741
3742 3742 return items
3743 3743
3744 3744 # convenient shortcut
3745 3745 dst = debugstacktrace
General Comments 0
You need to be logged in to leave comments. Login now