##// END OF EJS Templates
util: take propertycache from context.py
Matt Mackall -
r8207:dd8d5be5 default
parent child Browse files
Show More
@@ -1,806 +1,799 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import nullid, nullrev, short, hex
9 9 from i18n import _
10 10 import ancestor, bdiff, error, util, os, errno
11 11
12 class propertycache(object):
13 def __init__(self, func):
14 self.func = func
15 self.name = func.__name__
16 def __get__(self, obj, type=None):
17 result = self.func(obj)
18 setattr(obj, self.name, result)
19 return result
12 propertycache = util.propertycache
20 13
21 14 class changectx(object):
22 15 """A changecontext object makes access to data related to a particular
23 16 changeset convenient."""
24 17 def __init__(self, repo, changeid=''):
25 18 """changeid is a revision number, node, or tag"""
26 19 if changeid == '':
27 20 changeid = '.'
28 21 self._repo = repo
29 22 if isinstance(changeid, (long, int)):
30 23 self._rev = changeid
31 24 self._node = self._repo.changelog.node(changeid)
32 25 else:
33 26 self._node = self._repo.lookup(changeid)
34 27 self._rev = self._repo.changelog.rev(self._node)
35 28
36 29 def __str__(self):
37 30 return short(self.node())
38 31
39 32 def __int__(self):
40 33 return self.rev()
41 34
42 35 def __repr__(self):
43 36 return "<changectx %s>" % str(self)
44 37
45 38 def __hash__(self):
46 39 try:
47 40 return hash(self._rev)
48 41 except AttributeError:
49 42 return id(self)
50 43
51 44 def __eq__(self, other):
52 45 try:
53 46 return self._rev == other._rev
54 47 except AttributeError:
55 48 return False
56 49
57 50 def __ne__(self, other):
58 51 return not (self == other)
59 52
60 53 def __nonzero__(self):
61 54 return self._rev != nullrev
62 55
63 56 @propertycache
64 57 def _changeset(self):
65 58 return self._repo.changelog.read(self.node())
66 59
67 60 @propertycache
68 61 def _manifest(self):
69 62 return self._repo.manifest.read(self._changeset[0])
70 63
71 64 @propertycache
72 65 def _manifestdelta(self):
73 66 return self._repo.manifest.readdelta(self._changeset[0])
74 67
75 68 @propertycache
76 69 def _parents(self):
77 70 p = self._repo.changelog.parentrevs(self._rev)
78 71 if p[1] == nullrev:
79 72 p = p[:-1]
80 73 return [changectx(self._repo, x) for x in p]
81 74
82 75 def __contains__(self, key):
83 76 return key in self._manifest
84 77
85 78 def __getitem__(self, key):
86 79 return self.filectx(key)
87 80
88 81 def __iter__(self):
89 82 for f in util.sort(self._manifest):
90 83 yield f
91 84
92 85 def changeset(self): return self._changeset
93 86 def manifest(self): return self._manifest
94 87
95 88 def rev(self): return self._rev
96 89 def node(self): return self._node
97 90 def hex(self): return hex(self._node)
98 91 def user(self): return self._changeset[1]
99 92 def date(self): return self._changeset[2]
100 93 def files(self): return self._changeset[3]
101 94 def description(self): return self._changeset[4]
102 95 def branch(self): return self._changeset[5].get("branch")
103 96 def extra(self): return self._changeset[5]
104 97 def tags(self): return self._repo.nodetags(self._node)
105 98
106 99 def parents(self):
107 100 """return contexts for each parent changeset"""
108 101 return self._parents
109 102
110 103 def children(self):
111 104 """return contexts for each child changeset"""
112 105 c = self._repo.changelog.children(self._node)
113 106 return [changectx(self._repo, x) for x in c]
114 107
115 108 def ancestors(self):
116 109 for a in self._repo.changelog.ancestors(self._rev):
117 110 yield changectx(self._repo, a)
118 111
119 112 def descendants(self):
120 113 for d in self._repo.changelog.descendants(self._rev):
121 114 yield changectx(self._repo, d)
122 115
123 116 def _fileinfo(self, path):
124 117 if '_manifest' in self.__dict__:
125 118 try:
126 119 return self._manifest[path], self._manifest.flags(path)
127 120 except KeyError:
128 121 raise error.LookupError(self._node, path,
129 122 _('not found in manifest'))
130 123 if '_manifestdelta' in self.__dict__ or path in self.files():
131 124 if path in self._manifestdelta:
132 125 return self._manifestdelta[path], self._manifestdelta.flags(path)
133 126 node, flag = self._repo.manifest.find(self._changeset[0], path)
134 127 if not node:
135 128 raise error.LookupError(self._node, path,
136 129 _('not found in manifest'))
137 130
138 131 return node, flag
139 132
140 133 def filenode(self, path):
141 134 return self._fileinfo(path)[0]
142 135
143 136 def flags(self, path):
144 137 try:
145 138 return self._fileinfo(path)[1]
146 139 except error.LookupError:
147 140 return ''
148 141
149 142 def filectx(self, path, fileid=None, filelog=None):
150 143 """get a file context from this changeset"""
151 144 if fileid is None:
152 145 fileid = self.filenode(path)
153 146 return filectx(self._repo, path, fileid=fileid,
154 147 changectx=self, filelog=filelog)
155 148
156 149 def ancestor(self, c2):
157 150 """
158 151 return the ancestor context of self and c2
159 152 """
160 153 n = self._repo.changelog.ancestor(self._node, c2._node)
161 154 return changectx(self._repo, n)
162 155
163 156 def walk(self, match):
164 157 fdict = dict.fromkeys(match.files())
165 158 # for dirstate.walk, files=['.'] means "walk the whole tree".
166 159 # follow that here, too
167 160 fdict.pop('.', None)
168 161 for fn in self:
169 162 for ffn in fdict:
170 163 # match if the file is the exact name or a directory
171 164 if ffn == fn or fn.startswith("%s/" % ffn):
172 165 del fdict[ffn]
173 166 break
174 167 if match(fn):
175 168 yield fn
176 169 for fn in util.sort(fdict):
177 170 if match.bad(fn, 'No such file in rev ' + str(self)) and match(fn):
178 171 yield fn
179 172
180 173 class filectx(object):
181 174 """A filecontext object makes access to data related to a particular
182 175 filerevision convenient."""
183 176 def __init__(self, repo, path, changeid=None, fileid=None,
184 177 filelog=None, changectx=None):
185 178 """changeid can be a changeset revision, node, or tag.
186 179 fileid can be a file revision or node."""
187 180 self._repo = repo
188 181 self._path = path
189 182
190 183 assert (changeid is not None
191 184 or fileid is not None
192 185 or changectx is not None)
193 186
194 187 if filelog:
195 188 self._filelog = filelog
196 189
197 190 if changeid is not None:
198 191 self._changeid = changeid
199 192 if changectx is not None:
200 193 self._changectx = changectx
201 194 if fileid is not None:
202 195 self._fileid = fileid
203 196
204 197 @propertycache
205 198 def _changectx(self):
206 199 return changectx(self._repo, self._changeid)
207 200
208 201 @propertycache
209 202 def _filelog(self):
210 203 return self._repo.file(self._path)
211 204
212 205 @propertycache
213 206 def _changeid(self):
214 207 if '_changectx' in self.__dict__:
215 208 return self._changectx.rev()
216 209 else:
217 210 return self._filelog.linkrev(self._filerev)
218 211
219 212 @propertycache
220 213 def _filenode(self):
221 214 if '_fileid' in self.__dict__:
222 215 return self._filelog.lookup(self._fileid)
223 216 else:
224 217 return self._changectx.filenode(self._path)
225 218
226 219 @propertycache
227 220 def _filerev(self):
228 221 return self._filelog.rev(self._filenode)
229 222
230 223 @propertycache
231 224 def _repopath(self):
232 225 return self._path
233 226
234 227 def __nonzero__(self):
235 228 try:
236 229 self._filenode
237 230 return True
238 231 except error.LookupError:
239 232 # file is missing
240 233 return False
241 234
242 235 def __str__(self):
243 236 return "%s@%s" % (self.path(), short(self.node()))
244 237
245 238 def __repr__(self):
246 239 return "<filectx %s>" % str(self)
247 240
248 241 def __hash__(self):
249 242 try:
250 243 return hash((self._path, self._fileid))
251 244 except AttributeError:
252 245 return id(self)
253 246
254 247 def __eq__(self, other):
255 248 try:
256 249 return (self._path == other._path
257 250 and self._fileid == other._fileid)
258 251 except AttributeError:
259 252 return False
260 253
261 254 def __ne__(self, other):
262 255 return not (self == other)
263 256
264 257 def filectx(self, fileid):
265 258 '''opens an arbitrary revision of the file without
266 259 opening a new filelog'''
267 260 return filectx(self._repo, self._path, fileid=fileid,
268 261 filelog=self._filelog)
269 262
270 263 def filerev(self): return self._filerev
271 264 def filenode(self): return self._filenode
272 265 def flags(self): return self._changectx.flags(self._path)
273 266 def filelog(self): return self._filelog
274 267
275 268 def rev(self):
276 269 if '_changectx' in self.__dict__:
277 270 return self._changectx.rev()
278 271 if '_changeid' in self.__dict__:
279 272 return self._changectx.rev()
280 273 return self._filelog.linkrev(self._filerev)
281 274
282 275 def linkrev(self): return self._filelog.linkrev(self._filerev)
283 276 def node(self): return self._changectx.node()
284 277 def user(self): return self._changectx.user()
285 278 def date(self): return self._changectx.date()
286 279 def files(self): return self._changectx.files()
287 280 def description(self): return self._changectx.description()
288 281 def branch(self): return self._changectx.branch()
289 282 def manifest(self): return self._changectx.manifest()
290 283 def changectx(self): return self._changectx
291 284
292 285 def data(self): return self._filelog.read(self._filenode)
293 286 def path(self): return self._path
294 287 def size(self): return self._filelog.size(self._filerev)
295 288
296 289 def cmp(self, text): return self._filelog.cmp(self._filenode, text)
297 290
298 291 def renamed(self):
299 292 """check if file was actually renamed in this changeset revision
300 293
301 294 If rename logged in file revision, we report copy for changeset only
302 295 if file revisions linkrev points back to the changeset in question
303 296 or both changeset parents contain different file revisions.
304 297 """
305 298
306 299 renamed = self._filelog.renamed(self._filenode)
307 300 if not renamed:
308 301 return renamed
309 302
310 303 if self.rev() == self.linkrev():
311 304 return renamed
312 305
313 306 name = self.path()
314 307 fnode = self._filenode
315 308 for p in self._changectx.parents():
316 309 try:
317 310 if fnode == p.filenode(name):
318 311 return None
319 312 except error.LookupError:
320 313 pass
321 314 return renamed
322 315
323 316 def parents(self):
324 317 p = self._path
325 318 fl = self._filelog
326 319 pl = [(p, n, fl) for n in self._filelog.parents(self._filenode)]
327 320
328 321 r = self._filelog.renamed(self._filenode)
329 322 if r:
330 323 pl[0] = (r[0], r[1], None)
331 324
332 325 return [filectx(self._repo, p, fileid=n, filelog=l)
333 326 for p,n,l in pl if n != nullid]
334 327
335 328 def children(self):
336 329 # hard for renames
337 330 c = self._filelog.children(self._filenode)
338 331 return [filectx(self._repo, self._path, fileid=x,
339 332 filelog=self._filelog) for x in c]
340 333
341 334 def annotate(self, follow=False, linenumber=None):
342 335 '''returns a list of tuples of (ctx, line) for each line
343 336 in the file, where ctx is the filectx of the node where
344 337 that line was last changed.
345 338 This returns tuples of ((ctx, linenumber), line) for each line,
346 339 if "linenumber" parameter is NOT "None".
347 340 In such tuples, linenumber means one at the first appearance
348 341 in the managed file.
349 342 To reduce annotation cost,
350 343 this returns fixed value(False is used) as linenumber,
351 344 if "linenumber" parameter is "False".'''
352 345
353 346 def decorate_compat(text, rev):
354 347 return ([rev] * len(text.splitlines()), text)
355 348
356 349 def without_linenumber(text, rev):
357 350 return ([(rev, False)] * len(text.splitlines()), text)
358 351
359 352 def with_linenumber(text, rev):
360 353 size = len(text.splitlines())
361 354 return ([(rev, i) for i in xrange(1, size + 1)], text)
362 355
363 356 decorate = (((linenumber is None) and decorate_compat) or
364 357 (linenumber and with_linenumber) or
365 358 without_linenumber)
366 359
367 360 def pair(parent, child):
368 361 for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]):
369 362 child[0][b1:b2] = parent[0][a1:a2]
370 363 return child
371 364
372 365 getlog = util.cachefunc(lambda x: self._repo.file(x))
373 366 def getctx(path, fileid):
374 367 log = path == self._path and self._filelog or getlog(path)
375 368 return filectx(self._repo, path, fileid=fileid, filelog=log)
376 369 getctx = util.cachefunc(getctx)
377 370
378 371 def parents(f):
379 372 # we want to reuse filectx objects as much as possible
380 373 p = f._path
381 374 if f._filerev is None: # working dir
382 375 pl = [(n.path(), n.filerev()) for n in f.parents()]
383 376 else:
384 377 pl = [(p, n) for n in f._filelog.parentrevs(f._filerev)]
385 378
386 379 if follow:
387 380 r = f.renamed()
388 381 if r:
389 382 pl[0] = (r[0], getlog(r[0]).rev(r[1]))
390 383
391 384 return [getctx(p, n) for p, n in pl if n != nullrev]
392 385
393 386 # use linkrev to find the first changeset where self appeared
394 387 if self.rev() != self.linkrev():
395 388 base = self.filectx(self.filerev())
396 389 else:
397 390 base = self
398 391
399 392 # find all ancestors
400 393 needed = {base: 1}
401 394 visit = [base]
402 395 files = [base._path]
403 396 while visit:
404 397 f = visit.pop(0)
405 398 for p in parents(f):
406 399 if p not in needed:
407 400 needed[p] = 1
408 401 visit.append(p)
409 402 if p._path not in files:
410 403 files.append(p._path)
411 404 else:
412 405 # count how many times we'll use this
413 406 needed[p] += 1
414 407
415 408 # sort by revision (per file) which is a topological order
416 409 visit = []
417 410 for f in files:
418 411 fn = [(n.rev(), n) for n in needed if n._path == f]
419 412 visit.extend(fn)
420 413
421 414 hist = {}
422 415 for r, f in util.sort(visit):
423 416 curr = decorate(f.data(), f)
424 417 for p in parents(f):
425 418 if p != nullid:
426 419 curr = pair(hist[p], curr)
427 420 # trim the history of unneeded revs
428 421 needed[p] -= 1
429 422 if not needed[p]:
430 423 del hist[p]
431 424 hist[f] = curr
432 425
433 426 return zip(hist[f][0], hist[f][1].splitlines(1))
434 427
435 428 def ancestor(self, fc2):
436 429 """
437 430 find the common ancestor file context, if any, of self, and fc2
438 431 """
439 432
440 433 acache = {}
441 434
442 435 # prime the ancestor cache for the working directory
443 436 for c in (self, fc2):
444 437 if c._filerev == None:
445 438 pl = [(n.path(), n.filenode()) for n in c.parents()]
446 439 acache[(c._path, None)] = pl
447 440
448 441 flcache = {self._repopath:self._filelog, fc2._repopath:fc2._filelog}
449 442 def parents(vertex):
450 443 if vertex in acache:
451 444 return acache[vertex]
452 445 f, n = vertex
453 446 if f not in flcache:
454 447 flcache[f] = self._repo.file(f)
455 448 fl = flcache[f]
456 449 pl = [(f, p) for p in fl.parents(n) if p != nullid]
457 450 re = fl.renamed(n)
458 451 if re:
459 452 pl.append(re)
460 453 acache[vertex] = pl
461 454 return pl
462 455
463 456 a, b = (self._path, self._filenode), (fc2._path, fc2._filenode)
464 457 v = ancestor.ancestor(a, b, parents)
465 458 if v:
466 459 f, n = v
467 460 return filectx(self._repo, f, fileid=n, filelog=flcache[f])
468 461
469 462 return None
470 463
471 464 class workingctx(changectx):
472 465 """A workingctx object makes access to data related to
473 466 the current working directory convenient.
474 467 parents - a pair of parent nodeids, or None to use the dirstate.
475 468 date - any valid date string or (unixtime, offset), or None.
476 469 user - username string, or None.
477 470 extra - a dictionary of extra values, or None.
478 471 changes - a list of file lists as returned by localrepo.status()
479 472 or None to use the repository status.
480 473 """
481 474 def __init__(self, repo, parents=None, text="", user=None, date=None,
482 475 extra=None, changes=None):
483 476 self._repo = repo
484 477 self._rev = None
485 478 self._node = None
486 479 self._text = text
487 480 if date:
488 481 self._date = util.parsedate(date)
489 482 if user:
490 483 self._user = user
491 484 if parents:
492 485 self._parents = [changectx(self._repo, p) for p in parents]
493 486 if changes:
494 487 self._status = list(changes)
495 488
496 489 self._extra = {}
497 490 if extra:
498 491 self._extra = extra.copy()
499 492 if 'branch' not in self._extra:
500 493 branch = self._repo.dirstate.branch()
501 494 try:
502 495 branch = branch.decode('UTF-8').encode('UTF-8')
503 496 except UnicodeDecodeError:
504 497 raise util.Abort(_('branch name not in UTF-8!'))
505 498 self._extra['branch'] = branch
506 499 if self._extra['branch'] == '':
507 500 self._extra['branch'] = 'default'
508 501
509 502 def __str__(self):
510 503 return str(self._parents[0]) + "+"
511 504
512 505 def __nonzero__(self):
513 506 return True
514 507
515 508 def __contains__(self, key):
516 509 return self._repo.dirstate[key] not in "?r"
517 510
518 511 @propertycache
519 512 def _manifest(self):
520 513 """generate a manifest corresponding to the working directory"""
521 514
522 515 man = self._parents[0].manifest().copy()
523 516 copied = self._repo.dirstate.copies()
524 517 cf = lambda x: man.flags(copied.get(x, x))
525 518 ff = self._repo.dirstate.flagfunc(cf)
526 519 modified, added, removed, deleted, unknown = self._status[:5]
527 520 for i, l in (("a", added), ("m", modified), ("u", unknown)):
528 521 for f in l:
529 522 man[f] = man.get(copied.get(f, f), nullid) + i
530 523 try:
531 524 man.set(f, ff(f))
532 525 except OSError:
533 526 pass
534 527
535 528 for f in deleted + removed:
536 529 if f in man:
537 530 del man[f]
538 531
539 532 return man
540 533
541 534 @propertycache
542 535 def _status(self):
543 536 return self._repo.status(unknown=True)
544 537
545 538 @propertycache
546 539 def _user(self):
547 540 return self._repo.ui.username()
548 541
549 542 @propertycache
550 543 def _date(self):
551 544 return util.makedate()
552 545
553 546 @propertycache
554 547 def _parents(self):
555 548 p = self._repo.dirstate.parents()
556 549 if p[1] == nullid:
557 550 p = p[:-1]
558 551 self._parents = [changectx(self._repo, x) for x in p]
559 552 return self._parents
560 553
561 554 def manifest(self): return self._manifest
562 555
563 556 def user(self): return self._user or self._repo.ui.username()
564 557 def date(self): return self._date
565 558 def description(self): return self._text
566 559 def files(self):
567 560 return util.sort(self._status[0] + self._status[1] + self._status[2])
568 561
569 562 def modified(self): return self._status[0]
570 563 def added(self): return self._status[1]
571 564 def removed(self): return self._status[2]
572 565 def deleted(self): return self._status[3]
573 566 def unknown(self): return self._status[4]
574 567 def clean(self): return self._status[5]
575 568 def branch(self): return self._extra['branch']
576 569 def extra(self): return self._extra
577 570
578 571 def tags(self):
579 572 t = []
580 573 [t.extend(p.tags()) for p in self.parents()]
581 574 return t
582 575
583 576 def children(self):
584 577 return []
585 578
586 579 def flags(self, path):
587 580 if '_manifest' in self.__dict__:
588 581 try:
589 582 return self._manifest.flags(path)
590 583 except KeyError:
591 584 return ''
592 585
593 586 pnode = self._parents[0].changeset()[0]
594 587 orig = self._repo.dirstate.copies().get(path, path)
595 588 node, flag = self._repo.manifest.find(pnode, orig)
596 589 try:
597 590 ff = self._repo.dirstate.flagfunc(lambda x: flag or '')
598 591 return ff(path)
599 592 except OSError:
600 593 pass
601 594
602 595 if not node or path in self.deleted() or path in self.removed():
603 596 return ''
604 597 return flag
605 598
606 599 def filectx(self, path, filelog=None):
607 600 """get a file context from the working directory"""
608 601 return workingfilectx(self._repo, path, workingctx=self,
609 602 filelog=filelog)
610 603
611 604 def ancestor(self, c2):
612 605 """return the ancestor context of self and c2"""
613 606 return self._parents[0].ancestor(c2) # punt on two parents for now
614 607
615 608 def walk(self, match):
616 609 return util.sort(self._repo.dirstate.walk(match, True, False).keys())
617 610
618 611 class workingfilectx(filectx):
619 612 """A workingfilectx object makes access to data related to a particular
620 613 file in the working directory convenient."""
621 614 def __init__(self, repo, path, filelog=None, workingctx=None):
622 615 """changeid can be a changeset revision, node, or tag.
623 616 fileid can be a file revision or node."""
624 617 self._repo = repo
625 618 self._path = path
626 619 self._changeid = None
627 620 self._filerev = self._filenode = None
628 621
629 622 if filelog:
630 623 self._filelog = filelog
631 624 if workingctx:
632 625 self._changectx = workingctx
633 626
634 627 @propertycache
635 628 def _changectx(self):
636 629 return workingctx(self._repo)
637 630
638 631 @propertycache
639 632 def _repopath(self):
640 633 return self._repo.dirstate.copied(self._path) or self._path
641 634
642 635 @propertycache
643 636 def _filelog(self):
644 637 return self._repo.file(self._repopath)
645 638
646 639 def __nonzero__(self):
647 640 return True
648 641
649 642 def __str__(self):
650 643 return "%s@%s" % (self.path(), self._changectx)
651 644
652 645 def filectx(self, fileid):
653 646 '''opens an arbitrary revision of the file without
654 647 opening a new filelog'''
655 648 return filectx(self._repo, self._repopath, fileid=fileid,
656 649 filelog=self._filelog)
657 650
658 651 def rev(self):
659 652 if '_changectx' in self.__dict__:
660 653 return self._changectx.rev()
661 654 return self._filelog.linkrev(self._filerev)
662 655
663 656 def data(self): return self._repo.wread(self._path)
664 657 def renamed(self):
665 658 rp = self._repopath
666 659 if rp == self._path:
667 660 return None
668 661 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
669 662
670 663 def parents(self):
671 664 '''return parent filectxs, following copies if necessary'''
672 665 p = self._path
673 666 rp = self._repopath
674 667 pcl = self._changectx._parents
675 668 fl = self._filelog
676 669 pl = [(rp, pcl[0]._manifest.get(rp, nullid), fl)]
677 670 if len(pcl) > 1:
678 671 if rp != p:
679 672 fl = None
680 673 pl.append((p, pcl[1]._manifest.get(p, nullid), fl))
681 674
682 675 return [filectx(self._repo, p, fileid=n, filelog=l)
683 676 for p,n,l in pl if n != nullid]
684 677
685 678 def children(self):
686 679 return []
687 680
688 681 def size(self): return os.stat(self._repo.wjoin(self._path)).st_size
689 682 def date(self):
690 683 t, tz = self._changectx.date()
691 684 try:
692 685 return (int(os.lstat(self._repo.wjoin(self._path)).st_mtime), tz)
693 686 except OSError, err:
694 687 if err.errno != errno.ENOENT: raise
695 688 return (t, tz)
696 689
697 690 def cmp(self, text): return self._repo.wread(self._path) == text
698 691
699 692 class memctx(object):
700 693 """Use memctx to perform in-memory commits via localrepo.commitctx().
701 694
702 695 Revision information is supplied at initialization time while
703 696 related files data and is made available through a callback
704 697 mechanism. 'repo' is the current localrepo, 'parents' is a
705 698 sequence of two parent revisions identifiers (pass None for every
706 699 missing parent), 'text' is the commit message and 'files' lists
707 700 names of files touched by the revision (normalized and relative to
708 701 repository root).
709 702
710 703 filectxfn(repo, memctx, path) is a callable receiving the
711 704 repository, the current memctx object and the normalized path of
712 705 requested file, relative to repository root. It is fired by the
713 706 commit function for every file in 'files', but calls order is
714 707 undefined. If the file is available in the revision being
715 708 committed (updated or added), filectxfn returns a memfilectx
716 709 object. If the file was removed, filectxfn raises an
717 710 IOError. Moved files are represented by marking the source file
718 711 removed and the new file added with copy information (see
719 712 memfilectx).
720 713
721 714 user receives the committer name and defaults to current
722 715 repository username, date is the commit date in any format
723 716 supported by util.parsedate() and defaults to current date, extra
724 717 is a dictionary of metadata or is left empty.
725 718 """
726 719 def __init__(self, repo, parents, text, files, filectxfn, user=None,
727 720 date=None, extra=None):
728 721 self._repo = repo
729 722 self._rev = None
730 723 self._node = None
731 724 self._text = text
732 725 self._date = date and util.parsedate(date) or util.makedate()
733 726 self._user = user
734 727 parents = [(p or nullid) for p in parents]
735 728 p1, p2 = parents
736 729 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
737 730 files = util.sort(set(files))
738 731 self._status = [files, [], [], [], []]
739 732 self._filectxfn = filectxfn
740 733
741 734 self._extra = extra and extra.copy() or {}
742 735 if 'branch' not in self._extra:
743 736 self._extra['branch'] = 'default'
744 737 elif self._extra.get('branch') == '':
745 738 self._extra['branch'] = 'default'
746 739
747 740 def __str__(self):
748 741 return str(self._parents[0]) + "+"
749 742
750 743 def __int__(self):
751 744 return self._rev
752 745
753 746 def __nonzero__(self):
754 747 return True
755 748
756 749 def user(self): return self._user or self._repo.ui.username()
757 750 def date(self): return self._date
758 751 def description(self): return self._text
759 752 def files(self): return self.modified()
760 753 def modified(self): return self._status[0]
761 754 def added(self): return self._status[1]
762 755 def removed(self): return self._status[2]
763 756 def deleted(self): return self._status[3]
764 757 def unknown(self): return self._status[4]
765 758 def clean(self): return self._status[5]
766 759 def branch(self): return self._extra['branch']
767 760 def extra(self): return self._extra
768 761 def flags(self, f): return self[f].flags()
769 762
770 763 def parents(self):
771 764 """return contexts for each parent changeset"""
772 765 return self._parents
773 766
774 767 def filectx(self, path, filelog=None):
775 768 """get a file context from the working directory"""
776 769 return self._filectxfn(self._repo, self, path)
777 770
778 771 class memfilectx(object):
779 772 """memfilectx represents an in-memory file to commit.
780 773
781 774 See memctx for more details.
782 775 """
783 776 def __init__(self, path, data, islink, isexec, copied):
784 777 """
785 778 path is the normalized file path relative to repository root.
786 779 data is the file content as a string.
787 780 islink is True if the file is a symbolic link.
788 781 isexec is True if the file is executable.
789 782 copied is the source file path if current file was copied in the
790 783 revision being committed, or None."""
791 784 self._path = path
792 785 self._data = data
793 786 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
794 787 self._copied = None
795 788 if copied:
796 789 self._copied = (copied, nullid)
797 790
798 791 def __nonzero__(self): return True
799 792 def __str__(self): return "%s@%s" % (self.path(), self._changectx)
800 793 def path(self): return self._path
801 794 def data(self): return self._data
802 795 def flags(self): return self._flags
803 796 def isexec(self): return 'x' in self._flags
804 797 def islink(self): return 'l' in self._flags
805 798 def renamed(self): return self._copied
806 799
@@ -1,1474 +1,1483 b''
1 1 """
2 2 util.py - Mercurial utility functions and platform specfic implementations
3 3
4 4 Copyright 2005 K. Thananchayan <thananck@yahoo.com>
5 5 Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
6 6 Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
7 7
8 8 This software may be used and distributed according to the terms
9 9 of the GNU General Public License, incorporated herein by reference.
10 10
11 11 This contains helper routines that are independent of the SCM core and hide
12 12 platform-specific details from the core.
13 13 """
14 14
15 15 from i18n import _
16 16 import cStringIO, errno, re, shutil, sys, tempfile, traceback, error
17 17 import os, stat, threading, time, calendar, glob, osutil
18 18 import imp
19 19
20 20 # Python compatibility
21 21
22 22 _md5 = None
23 23 def md5(s):
24 24 global _md5
25 25 if _md5 is None:
26 26 try:
27 27 import hashlib
28 28 _md5 = hashlib.md5
29 29 except ImportError:
30 30 import md5
31 31 _md5 = md5.md5
32 32 return _md5(s)
33 33
34 34 _sha1 = None
35 35 def sha1(s):
36 36 global _sha1
37 37 if _sha1 is None:
38 38 try:
39 39 import hashlib
40 40 _sha1 = hashlib.sha1
41 41 except ImportError:
42 42 import sha
43 43 _sha1 = sha.sha
44 44 return _sha1(s)
45 45
46 46 try:
47 47 import subprocess
48 48 subprocess.Popen # trigger ImportError early
49 49 closefds = os.name == 'posix'
50 50 def popen2(cmd, mode='t', bufsize=-1):
51 51 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
52 52 close_fds=closefds,
53 53 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
54 54 return p.stdin, p.stdout
55 55 def popen3(cmd, mode='t', bufsize=-1):
56 56 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
57 57 close_fds=closefds,
58 58 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
59 59 stderr=subprocess.PIPE)
60 60 return p.stdin, p.stdout, p.stderr
61 61 def Popen3(cmd, capturestderr=False, bufsize=-1):
62 62 stderr = capturestderr and subprocess.PIPE or None
63 63 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
64 64 close_fds=closefds,
65 65 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
66 66 stderr=stderr)
67 67 p.fromchild = p.stdout
68 68 p.tochild = p.stdin
69 69 p.childerr = p.stderr
70 70 return p
71 71 except ImportError:
72 72 subprocess = None
73 73 from popen2 import Popen3
74 74 popen2 = os.popen2
75 75 popen3 = os.popen3
76 76
77 77
78 78 def version():
79 79 """Return version information if available."""
80 80 try:
81 81 import __version__
82 82 return __version__.version
83 83 except ImportError:
84 84 return 'unknown'
85 85
86 86 # used by parsedate
87 87 defaultdateformats = (
88 88 '%Y-%m-%d %H:%M:%S',
89 89 '%Y-%m-%d %I:%M:%S%p',
90 90 '%Y-%m-%d %H:%M',
91 91 '%Y-%m-%d %I:%M%p',
92 92 '%Y-%m-%d',
93 93 '%m-%d',
94 94 '%m/%d',
95 95 '%m/%d/%y',
96 96 '%m/%d/%Y',
97 97 '%a %b %d %H:%M:%S %Y',
98 98 '%a %b %d %I:%M:%S%p %Y',
99 99 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
100 100 '%b %d %H:%M:%S %Y',
101 101 '%b %d %I:%M:%S%p %Y',
102 102 '%b %d %H:%M:%S',
103 103 '%b %d %I:%M:%S%p',
104 104 '%b %d %H:%M',
105 105 '%b %d %I:%M%p',
106 106 '%b %d %Y',
107 107 '%b %d',
108 108 '%H:%M:%S',
109 109 '%I:%M:%SP',
110 110 '%H:%M',
111 111 '%I:%M%p',
112 112 )
113 113
114 114 extendeddateformats = defaultdateformats + (
115 115 "%Y",
116 116 "%Y-%m",
117 117 "%b",
118 118 "%b %Y",
119 119 )
120 120
121 121 def cachefunc(func):
122 122 '''cache the result of function calls'''
123 123 # XXX doesn't handle keywords args
124 124 cache = {}
125 125 if func.func_code.co_argcount == 1:
126 126 # we gain a small amount of time because
127 127 # we don't need to pack/unpack the list
128 128 def f(arg):
129 129 if arg not in cache:
130 130 cache[arg] = func(arg)
131 131 return cache[arg]
132 132 else:
133 133 def f(*args):
134 134 if args not in cache:
135 135 cache[args] = func(*args)
136 136 return cache[args]
137 137
138 138 return f
139 139
140 class propertycache(object):
141 def __init__(self, func):
142 self.func = func
143 self.name = func.__name__
144 def __get__(self, obj, type=None):
145 result = self.func(obj)
146 setattr(obj, self.name, result)
147 return result
148
140 149 def pipefilter(s, cmd):
141 150 '''filter string S through command CMD, returning its output'''
142 151 (pin, pout) = popen2(cmd, 'b')
143 152 def writer():
144 153 try:
145 154 pin.write(s)
146 155 pin.close()
147 156 except IOError, inst:
148 157 if inst.errno != errno.EPIPE:
149 158 raise
150 159
151 160 # we should use select instead on UNIX, but this will work on most
152 161 # systems, including Windows
153 162 w = threading.Thread(target=writer)
154 163 w.start()
155 164 f = pout.read()
156 165 pout.close()
157 166 w.join()
158 167 return f
159 168
160 169 def tempfilter(s, cmd):
161 170 '''filter string S through a pair of temporary files with CMD.
162 171 CMD is used as a template to create the real command to be run,
163 172 with the strings INFILE and OUTFILE replaced by the real names of
164 173 the temporary files generated.'''
165 174 inname, outname = None, None
166 175 try:
167 176 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
168 177 fp = os.fdopen(infd, 'wb')
169 178 fp.write(s)
170 179 fp.close()
171 180 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
172 181 os.close(outfd)
173 182 cmd = cmd.replace('INFILE', inname)
174 183 cmd = cmd.replace('OUTFILE', outname)
175 184 code = os.system(cmd)
176 185 if sys.platform == 'OpenVMS' and code & 1:
177 186 code = 0
178 187 if code: raise Abort(_("command '%s' failed: %s") %
179 188 (cmd, explain_exit(code)))
180 189 return open(outname, 'rb').read()
181 190 finally:
182 191 try:
183 192 if inname: os.unlink(inname)
184 193 except: pass
185 194 try:
186 195 if outname: os.unlink(outname)
187 196 except: pass
188 197
189 198 filtertable = {
190 199 'tempfile:': tempfilter,
191 200 'pipe:': pipefilter,
192 201 }
193 202
194 203 def filter(s, cmd):
195 204 "filter a string through a command that transforms its input to its output"
196 205 for name, fn in filtertable.iteritems():
197 206 if cmd.startswith(name):
198 207 return fn(s, cmd[len(name):].lstrip())
199 208 return pipefilter(s, cmd)
200 209
201 210 def binary(s):
202 211 """return true if a string is binary data"""
203 212 return bool(s and '\0' in s)
204 213
205 214 def sort(l):
206 215 if not isinstance(l, list):
207 216 l = list(l)
208 217 l.sort()
209 218 return l
210 219
211 220 def increasingchunks(source, min=1024, max=65536):
212 221 '''return no less than min bytes per chunk while data remains,
213 222 doubling min after each chunk until it reaches max'''
214 223 def log2(x):
215 224 if not x:
216 225 return 0
217 226 i = 0
218 227 while x:
219 228 x >>= 1
220 229 i += 1
221 230 return i - 1
222 231
223 232 buf = []
224 233 blen = 0
225 234 for chunk in source:
226 235 buf.append(chunk)
227 236 blen += len(chunk)
228 237 if blen >= min:
229 238 if min < max:
230 239 min = min << 1
231 240 nmin = 1 << log2(blen)
232 241 if nmin > min:
233 242 min = nmin
234 243 if min > max:
235 244 min = max
236 245 yield ''.join(buf)
237 246 blen = 0
238 247 buf = []
239 248 if buf:
240 249 yield ''.join(buf)
241 250
242 251 Abort = error.Abort
243 252
244 253 def always(fn): return True
245 254 def never(fn): return False
246 255
247 256 def patkind(name, default):
248 257 """Split a string into an optional pattern kind prefix and the
249 258 actual pattern."""
250 259 for prefix in 're', 'glob', 'path', 'relglob', 'relpath', 'relre':
251 260 if name.startswith(prefix + ':'): return name.split(':', 1)
252 261 return default, name
253 262
254 263 def globre(pat, head='^', tail='$'):
255 264 "convert a glob pattern into a regexp"
256 265 i, n = 0, len(pat)
257 266 res = ''
258 267 group = 0
259 268 def peek(): return i < n and pat[i]
260 269 while i < n:
261 270 c = pat[i]
262 271 i = i+1
263 272 if c == '*':
264 273 if peek() == '*':
265 274 i += 1
266 275 res += '.*'
267 276 else:
268 277 res += '[^/]*'
269 278 elif c == '?':
270 279 res += '.'
271 280 elif c == '[':
272 281 j = i
273 282 if j < n and pat[j] in '!]':
274 283 j += 1
275 284 while j < n and pat[j] != ']':
276 285 j += 1
277 286 if j >= n:
278 287 res += '\\['
279 288 else:
280 289 stuff = pat[i:j].replace('\\','\\\\')
281 290 i = j + 1
282 291 if stuff[0] == '!':
283 292 stuff = '^' + stuff[1:]
284 293 elif stuff[0] == '^':
285 294 stuff = '\\' + stuff
286 295 res = '%s[%s]' % (res, stuff)
287 296 elif c == '{':
288 297 group += 1
289 298 res += '(?:'
290 299 elif c == '}' and group:
291 300 res += ')'
292 301 group -= 1
293 302 elif c == ',' and group:
294 303 res += '|'
295 304 elif c == '\\':
296 305 p = peek()
297 306 if p:
298 307 i += 1
299 308 res += re.escape(p)
300 309 else:
301 310 res += re.escape(c)
302 311 else:
303 312 res += re.escape(c)
304 313 return head + res + tail
305 314
306 315 _globchars = {'[': 1, '{': 1, '*': 1, '?': 1}
307 316
308 317 def pathto(root, n1, n2):
309 318 '''return the relative path from one place to another.
310 319 root should use os.sep to separate directories
311 320 n1 should use os.sep to separate directories
312 321 n2 should use "/" to separate directories
313 322 returns an os.sep-separated path.
314 323
315 324 If n1 is a relative path, it's assumed it's
316 325 relative to root.
317 326 n2 should always be relative to root.
318 327 '''
319 328 if not n1: return localpath(n2)
320 329 if os.path.isabs(n1):
321 330 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
322 331 return os.path.join(root, localpath(n2))
323 332 n2 = '/'.join((pconvert(root), n2))
324 333 a, b = splitpath(n1), n2.split('/')
325 334 a.reverse()
326 335 b.reverse()
327 336 while a and b and a[-1] == b[-1]:
328 337 a.pop()
329 338 b.pop()
330 339 b.reverse()
331 340 return os.sep.join((['..'] * len(a)) + b) or '.'
332 341
333 342 def canonpath(root, cwd, myname):
334 343 """return the canonical path of myname, given cwd and root"""
335 344 if root == os.sep:
336 345 rootsep = os.sep
337 346 elif endswithsep(root):
338 347 rootsep = root
339 348 else:
340 349 rootsep = root + os.sep
341 350 name = myname
342 351 if not os.path.isabs(name):
343 352 name = os.path.join(root, cwd, name)
344 353 name = os.path.normpath(name)
345 354 audit_path = path_auditor(root)
346 355 if name != rootsep and name.startswith(rootsep):
347 356 name = name[len(rootsep):]
348 357 audit_path(name)
349 358 return pconvert(name)
350 359 elif name == root:
351 360 return ''
352 361 else:
353 362 # Determine whether `name' is in the hierarchy at or beneath `root',
354 363 # by iterating name=dirname(name) until that causes no change (can't
355 364 # check name == '/', because that doesn't work on windows). For each
356 365 # `name', compare dev/inode numbers. If they match, the list `rel'
357 366 # holds the reversed list of components making up the relative file
358 367 # name we want.
359 368 root_st = os.stat(root)
360 369 rel = []
361 370 while True:
362 371 try:
363 372 name_st = os.stat(name)
364 373 except OSError:
365 374 break
366 375 if samestat(name_st, root_st):
367 376 if not rel:
368 377 # name was actually the same as root (maybe a symlink)
369 378 return ''
370 379 rel.reverse()
371 380 name = os.path.join(*rel)
372 381 audit_path(name)
373 382 return pconvert(name)
374 383 dirname, basename = os.path.split(name)
375 384 rel.append(basename)
376 385 if dirname == name:
377 386 break
378 387 name = dirname
379 388
380 389 raise Abort('%s not under root' % myname)
381 390
382 391 def matcher(canonroot, cwd='', names=[], inc=[], exc=[], src=None, dflt_pat='glob'):
383 392 """build a function to match a set of file patterns
384 393
385 394 arguments:
386 395 canonroot - the canonical root of the tree you're matching against
387 396 cwd - the current working directory, if relevant
388 397 names - patterns to find
389 398 inc - patterns to include
390 399 exc - patterns to exclude
391 400 dflt_pat - if a pattern in names has no explicit type, assume this one
392 401 src - where these patterns came from (e.g. .hgignore)
393 402
394 403 a pattern is one of:
395 404 'glob:<glob>' - a glob relative to cwd
396 405 're:<regexp>' - a regular expression
397 406 'path:<path>' - a path relative to canonroot
398 407 'relglob:<glob>' - an unrooted glob (*.c matches C files in all dirs)
399 408 'relpath:<path>' - a path relative to cwd
400 409 'relre:<regexp>' - a regexp that doesn't have to match the start of a name
401 410 '<something>' - one of the cases above, selected by the dflt_pat argument
402 411
403 412 returns:
404 413 a 3-tuple containing
405 414 - list of roots (places where one should start a recursive walk of the fs);
406 415 this often matches the explicit non-pattern names passed in, but also
407 416 includes the initial part of glob: patterns that has no glob characters
408 417 - a bool match(filename) function
409 418 - a bool indicating if any patterns were passed in
410 419 """
411 420
412 421 # a common case: no patterns at all
413 422 if not names and not inc and not exc:
414 423 return [], always, False
415 424
416 425 def contains_glob(name):
417 426 for c in name:
418 427 if c in _globchars: return True
419 428 return False
420 429
421 430 def regex(kind, name, tail):
422 431 '''convert a pattern into a regular expression'''
423 432 if not name:
424 433 return ''
425 434 if kind == 're':
426 435 return name
427 436 elif kind == 'path':
428 437 return '^' + re.escape(name) + '(?:/|$)'
429 438 elif kind == 'relglob':
430 439 return globre(name, '(?:|.*/)', tail)
431 440 elif kind == 'relpath':
432 441 return re.escape(name) + '(?:/|$)'
433 442 elif kind == 'relre':
434 443 if name.startswith('^'):
435 444 return name
436 445 return '.*' + name
437 446 return globre(name, '', tail)
438 447
439 448 def matchfn(pats, tail):
440 449 """build a matching function from a set of patterns"""
441 450 if not pats:
442 451 return
443 452 try:
444 453 pat = '(?:%s)' % '|'.join([regex(k, p, tail) for (k, p) in pats])
445 454 if len(pat) > 20000:
446 455 raise OverflowError()
447 456 return re.compile(pat).match
448 457 except OverflowError:
449 458 # We're using a Python with a tiny regex engine and we
450 459 # made it explode, so we'll divide the pattern list in two
451 460 # until it works
452 461 l = len(pats)
453 462 if l < 2:
454 463 raise
455 464 a, b = matchfn(pats[:l//2], tail), matchfn(pats[l//2:], tail)
456 465 return lambda s: a(s) or b(s)
457 466 except re.error:
458 467 for k, p in pats:
459 468 try:
460 469 re.compile('(?:%s)' % regex(k, p, tail))
461 470 except re.error:
462 471 if src:
463 472 raise Abort("%s: invalid pattern (%s): %s" %
464 473 (src, k, p))
465 474 else:
466 475 raise Abort("invalid pattern (%s): %s" % (k, p))
467 476 raise Abort("invalid pattern")
468 477
469 478 def globprefix(pat):
470 479 '''return the non-glob prefix of a path, e.g. foo/* -> foo'''
471 480 root = []
472 481 for p in pat.split('/'):
473 482 if contains_glob(p): break
474 483 root.append(p)
475 484 return '/'.join(root) or '.'
476 485
477 486 def normalizepats(names, default):
478 487 pats = []
479 488 roots = []
480 489 anypats = False
481 490 for kind, name in [patkind(p, default) for p in names]:
482 491 if kind in ('glob', 'relpath'):
483 492 name = canonpath(canonroot, cwd, name)
484 493 elif kind in ('relglob', 'path'):
485 494 name = normpath(name)
486 495
487 496 pats.append((kind, name))
488 497
489 498 if kind in ('glob', 're', 'relglob', 'relre'):
490 499 anypats = True
491 500
492 501 if kind == 'glob':
493 502 root = globprefix(name)
494 503 roots.append(root)
495 504 elif kind in ('relpath', 'path'):
496 505 roots.append(name or '.')
497 506 elif kind == 'relglob':
498 507 roots.append('.')
499 508 return roots, pats, anypats
500 509
501 510 roots, pats, anypats = normalizepats(names, dflt_pat)
502 511
503 512 patmatch = matchfn(pats, '$') or always
504 513 incmatch = always
505 514 if inc:
506 515 dummy, inckinds, dummy = normalizepats(inc, 'glob')
507 516 incmatch = matchfn(inckinds, '(?:/|$)')
508 517 excmatch = never
509 518 if exc:
510 519 dummy, exckinds, dummy = normalizepats(exc, 'glob')
511 520 excmatch = matchfn(exckinds, '(?:/|$)')
512 521
513 522 if not names and inc and not exc:
514 523 # common case: hgignore patterns
515 524 match = incmatch
516 525 else:
517 526 match = lambda fn: incmatch(fn) and not excmatch(fn) and patmatch(fn)
518 527
519 528 return (roots, match, (inc or exc or anypats) and True)
520 529
521 530 _hgexecutable = None
522 531
523 532 def main_is_frozen():
524 533 """return True if we are a frozen executable.
525 534
526 535 The code supports py2exe (most common, Windows only) and tools/freeze
527 536 (portable, not much used).
528 537 """
529 538 return (hasattr(sys, "frozen") or # new py2exe
530 539 hasattr(sys, "importers") or # old py2exe
531 540 imp.is_frozen("__main__")) # tools/freeze
532 541
533 542 def hgexecutable():
534 543 """return location of the 'hg' executable.
535 544
536 545 Defaults to $HG or 'hg' in the search path.
537 546 """
538 547 if _hgexecutable is None:
539 548 hg = os.environ.get('HG')
540 549 if hg:
541 550 set_hgexecutable(hg)
542 551 elif main_is_frozen():
543 552 set_hgexecutable(sys.executable)
544 553 else:
545 554 set_hgexecutable(find_exe('hg') or 'hg')
546 555 return _hgexecutable
547 556
548 557 def set_hgexecutable(path):
549 558 """set location of the 'hg' executable"""
550 559 global _hgexecutable
551 560 _hgexecutable = path
552 561
553 562 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None):
554 563 '''enhanced shell command execution.
555 564 run with environment maybe modified, maybe in different dir.
556 565
557 566 if command fails and onerr is None, return status. if ui object,
558 567 print error message and return status, else raise onerr object as
559 568 exception.'''
560 569 def py2shell(val):
561 570 'convert python object into string that is useful to shell'
562 571 if val in (None, False):
563 572 return '0'
564 573 if val == True:
565 574 return '1'
566 575 return str(val)
567 576 oldenv = {}
568 577 for k in environ:
569 578 oldenv[k] = os.environ.get(k)
570 579 if cwd is not None:
571 580 oldcwd = os.getcwd()
572 581 origcmd = cmd
573 582 if os.name == 'nt':
574 583 cmd = '"%s"' % cmd
575 584 try:
576 585 for k, v in environ.iteritems():
577 586 os.environ[k] = py2shell(v)
578 587 os.environ['HG'] = hgexecutable()
579 588 if cwd is not None and oldcwd != cwd:
580 589 os.chdir(cwd)
581 590 rc = os.system(cmd)
582 591 if sys.platform == 'OpenVMS' and rc & 1:
583 592 rc = 0
584 593 if rc and onerr:
585 594 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
586 595 explain_exit(rc)[0])
587 596 if errprefix:
588 597 errmsg = '%s: %s' % (errprefix, errmsg)
589 598 try:
590 599 onerr.warn(errmsg + '\n')
591 600 except AttributeError:
592 601 raise onerr(errmsg)
593 602 return rc
594 603 finally:
595 604 for k, v in oldenv.iteritems():
596 605 if v is None:
597 606 del os.environ[k]
598 607 else:
599 608 os.environ[k] = v
600 609 if cwd is not None and oldcwd != cwd:
601 610 os.chdir(oldcwd)
602 611
603 612 def checksignature(func):
604 613 '''wrap a function with code to check for calling errors'''
605 614 def check(*args, **kwargs):
606 615 try:
607 616 return func(*args, **kwargs)
608 617 except TypeError:
609 618 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
610 619 raise error.SignatureError
611 620 raise
612 621
613 622 return check
614 623
615 624 # os.path.lexists is not available on python2.3
616 625 def lexists(filename):
617 626 "test whether a file with this name exists. does not follow symlinks"
618 627 try:
619 628 os.lstat(filename)
620 629 except:
621 630 return False
622 631 return True
623 632
624 633 def rename(src, dst):
625 634 """forcibly rename a file"""
626 635 try:
627 636 os.rename(src, dst)
628 637 except OSError, err: # FIXME: check err (EEXIST ?)
629 638 # on windows, rename to existing file is not allowed, so we
630 639 # must delete destination first. but if file is open, unlink
631 640 # schedules it for delete but does not delete it. rename
632 641 # happens immediately even for open files, so we rename
633 642 # destination to a temporary name, then delete that. then
634 643 # rename is safe to do.
635 644 temp = dst + "-force-rename"
636 645 os.rename(dst, temp)
637 646 os.unlink(temp)
638 647 os.rename(src, dst)
639 648
640 649 def unlink(f):
641 650 """unlink and remove the directory if it is empty"""
642 651 os.unlink(f)
643 652 # try removing directories that might now be empty
644 653 try:
645 654 os.removedirs(os.path.dirname(f))
646 655 except OSError:
647 656 pass
648 657
649 658 def copyfile(src, dest):
650 659 "copy a file, preserving mode and atime/mtime"
651 660 if os.path.islink(src):
652 661 try:
653 662 os.unlink(dest)
654 663 except:
655 664 pass
656 665 os.symlink(os.readlink(src), dest)
657 666 else:
658 667 try:
659 668 shutil.copyfile(src, dest)
660 669 shutil.copystat(src, dest)
661 670 except shutil.Error, inst:
662 671 raise Abort(str(inst))
663 672
664 673 def copyfiles(src, dst, hardlink=None):
665 674 """Copy a directory tree using hardlinks if possible"""
666 675
667 676 if hardlink is None:
668 677 hardlink = (os.stat(src).st_dev ==
669 678 os.stat(os.path.dirname(dst)).st_dev)
670 679
671 680 if os.path.isdir(src):
672 681 os.mkdir(dst)
673 682 for name, kind in osutil.listdir(src):
674 683 srcname = os.path.join(src, name)
675 684 dstname = os.path.join(dst, name)
676 685 copyfiles(srcname, dstname, hardlink)
677 686 else:
678 687 if hardlink:
679 688 try:
680 689 os_link(src, dst)
681 690 except (IOError, OSError):
682 691 hardlink = False
683 692 shutil.copy(src, dst)
684 693 else:
685 694 shutil.copy(src, dst)
686 695
687 696 class path_auditor(object):
688 697 '''ensure that a filesystem path contains no banned components.
689 698 the following properties of a path are checked:
690 699
691 700 - under top-level .hg
692 701 - starts at the root of a windows drive
693 702 - contains ".."
694 703 - traverses a symlink (e.g. a/symlink_here/b)
695 704 - inside a nested repository'''
696 705
697 706 def __init__(self, root):
698 707 self.audited = set()
699 708 self.auditeddir = set()
700 709 self.root = root
701 710
702 711 def __call__(self, path):
703 712 if path in self.audited:
704 713 return
705 714 normpath = os.path.normcase(path)
706 715 parts = splitpath(normpath)
707 716 if (os.path.splitdrive(path)[0]
708 717 or parts[0].lower() in ('.hg', '.hg.', '')
709 718 or os.pardir in parts):
710 719 raise Abort(_("path contains illegal component: %s") % path)
711 720 if '.hg' in path.lower():
712 721 lparts = [p.lower() for p in parts]
713 722 for p in '.hg', '.hg.':
714 723 if p in lparts[1:]:
715 724 pos = lparts.index(p)
716 725 base = os.path.join(*parts[:pos])
717 726 raise Abort(_('path %r is inside repo %r') % (path, base))
718 727 def check(prefix):
719 728 curpath = os.path.join(self.root, prefix)
720 729 try:
721 730 st = os.lstat(curpath)
722 731 except OSError, err:
723 732 # EINVAL can be raised as invalid path syntax under win32.
724 733 # They must be ignored for patterns can be checked too.
725 734 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
726 735 raise
727 736 else:
728 737 if stat.S_ISLNK(st.st_mode):
729 738 raise Abort(_('path %r traverses symbolic link %r') %
730 739 (path, prefix))
731 740 elif (stat.S_ISDIR(st.st_mode) and
732 741 os.path.isdir(os.path.join(curpath, '.hg'))):
733 742 raise Abort(_('path %r is inside repo %r') %
734 743 (path, prefix))
735 744 parts.pop()
736 745 prefixes = []
737 746 for n in range(len(parts)):
738 747 prefix = os.sep.join(parts)
739 748 if prefix in self.auditeddir:
740 749 break
741 750 check(prefix)
742 751 prefixes.append(prefix)
743 752 parts.pop()
744 753
745 754 self.audited.add(path)
746 755 # only add prefixes to the cache after checking everything: we don't
747 756 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
748 757 self.auditeddir.update(prefixes)
749 758
750 759 def nlinks(pathname):
751 760 """Return number of hardlinks for the given file."""
752 761 return os.lstat(pathname).st_nlink
753 762
754 763 if hasattr(os, 'link'):
755 764 os_link = os.link
756 765 else:
757 766 def os_link(src, dst):
758 767 raise OSError(0, _("Hardlinks not supported"))
759 768
760 769 def lookup_reg(key, name=None, scope=None):
761 770 return None
762 771
763 772 if os.name == 'nt':
764 773 from windows import *
765 774 def expand_glob(pats):
766 775 '''On Windows, expand the implicit globs in a list of patterns'''
767 776 ret = []
768 777 for p in pats:
769 778 kind, name = patkind(p, None)
770 779 if kind is None:
771 780 globbed = glob.glob(name)
772 781 if globbed:
773 782 ret.extend(globbed)
774 783 continue
775 784 # if we couldn't expand the glob, just keep it around
776 785 ret.append(p)
777 786 return ret
778 787 else:
779 788 from posix import *
780 789
781 790 def makelock(info, pathname):
782 791 try:
783 792 return os.symlink(info, pathname)
784 793 except OSError, why:
785 794 if why.errno == errno.EEXIST:
786 795 raise
787 796 except AttributeError: # no symlink in os
788 797 pass
789 798
790 799 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
791 800 os.write(ld, info)
792 801 os.close(ld)
793 802
794 803 def readlock(pathname):
795 804 try:
796 805 return os.readlink(pathname)
797 806 except OSError, why:
798 807 if why.errno not in (errno.EINVAL, errno.ENOSYS):
799 808 raise
800 809 except AttributeError: # no symlink in os
801 810 pass
802 811 return posixfile(pathname).read()
803 812
804 813 def fstat(fp):
805 814 '''stat file object that may not have fileno method.'''
806 815 try:
807 816 return os.fstat(fp.fileno())
808 817 except AttributeError:
809 818 return os.stat(fp.name)
810 819
811 820 # File system features
812 821
813 822 def checkcase(path):
814 823 """
815 824 Check whether the given path is on a case-sensitive filesystem
816 825
817 826 Requires a path (like /foo/.hg) ending with a foldable final
818 827 directory component.
819 828 """
820 829 s1 = os.stat(path)
821 830 d, b = os.path.split(path)
822 831 p2 = os.path.join(d, b.upper())
823 832 if path == p2:
824 833 p2 = os.path.join(d, b.lower())
825 834 try:
826 835 s2 = os.stat(p2)
827 836 if s2 == s1:
828 837 return False
829 838 return True
830 839 except:
831 840 return True
832 841
833 842 _fspathcache = {}
834 843 def fspath(name, root):
835 844 '''Get name in the case stored in the filesystem
836 845
837 846 The name is either relative to root, or it is an absolute path starting
838 847 with root. Note that this function is unnecessary, and should not be
839 848 called, for case-sensitive filesystems (simply because it's expensive).
840 849 '''
841 850 # If name is absolute, make it relative
842 851 if name.lower().startswith(root.lower()):
843 852 l = len(root)
844 853 if name[l] == os.sep or name[l] == os.altsep:
845 854 l = l + 1
846 855 name = name[l:]
847 856
848 857 if not os.path.exists(os.path.join(root, name)):
849 858 return None
850 859
851 860 seps = os.sep
852 861 if os.altsep:
853 862 seps = seps + os.altsep
854 863 # Protect backslashes. This gets silly very quickly.
855 864 seps.replace('\\','\\\\')
856 865 pattern = re.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
857 866 dir = os.path.normcase(os.path.normpath(root))
858 867 result = []
859 868 for part, sep in pattern.findall(name):
860 869 if sep:
861 870 result.append(sep)
862 871 continue
863 872
864 873 if dir not in _fspathcache:
865 874 _fspathcache[dir] = os.listdir(dir)
866 875 contents = _fspathcache[dir]
867 876
868 877 lpart = part.lower()
869 878 for n in contents:
870 879 if n.lower() == lpart:
871 880 result.append(n)
872 881 break
873 882 else:
874 883 # Cannot happen, as the file exists!
875 884 result.append(part)
876 885 dir = os.path.join(dir, lpart)
877 886
878 887 return ''.join(result)
879 888
880 889 def checkexec(path):
881 890 """
882 891 Check whether the given path is on a filesystem with UNIX-like exec flags
883 892
884 893 Requires a directory (like /foo/.hg)
885 894 """
886 895
887 896 # VFAT on some Linux versions can flip mode but it doesn't persist
888 897 # a FS remount. Frequently we can detect it if files are created
889 898 # with exec bit on.
890 899
891 900 try:
892 901 EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
893 902 fh, fn = tempfile.mkstemp("", "", path)
894 903 try:
895 904 os.close(fh)
896 905 m = os.stat(fn).st_mode & 0777
897 906 new_file_has_exec = m & EXECFLAGS
898 907 os.chmod(fn, m ^ EXECFLAGS)
899 908 exec_flags_cannot_flip = ((os.stat(fn).st_mode & 0777) == m)
900 909 finally:
901 910 os.unlink(fn)
902 911 except (IOError, OSError):
903 912 # we don't care, the user probably won't be able to commit anyway
904 913 return False
905 914 return not (new_file_has_exec or exec_flags_cannot_flip)
906 915
907 916 def checklink(path):
908 917 """check whether the given path is on a symlink-capable filesystem"""
909 918 # mktemp is not racy because symlink creation will fail if the
910 919 # file already exists
911 920 name = tempfile.mktemp(dir=path)
912 921 try:
913 922 os.symlink(".", name)
914 923 os.unlink(name)
915 924 return True
916 925 except (OSError, AttributeError):
917 926 return False
918 927
919 928 def needbinarypatch():
920 929 """return True if patches should be applied in binary mode by default."""
921 930 return os.name == 'nt'
922 931
923 932 def endswithsep(path):
924 933 '''Check path ends with os.sep or os.altsep.'''
925 934 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
926 935
927 936 def splitpath(path):
928 937 '''Split path by os.sep.
929 938 Note that this function does not use os.altsep because this is
930 939 an alternative of simple "xxx.split(os.sep)".
931 940 It is recommended to use os.path.normpath() before using this
932 941 function if need.'''
933 942 return path.split(os.sep)
934 943
935 944 def gui():
936 945 '''Are we running in a GUI?'''
937 946 return os.name == "nt" or os.name == "mac" or os.environ.get("DISPLAY")
938 947
939 948 def mktempcopy(name, emptyok=False, createmode=None):
940 949 """Create a temporary file with the same contents from name
941 950
942 951 The permission bits are copied from the original file.
943 952
944 953 If the temporary file is going to be truncated immediately, you
945 954 can use emptyok=True as an optimization.
946 955
947 956 Returns the name of the temporary file.
948 957 """
949 958 d, fn = os.path.split(name)
950 959 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
951 960 os.close(fd)
952 961 # Temporary files are created with mode 0600, which is usually not
953 962 # what we want. If the original file already exists, just copy
954 963 # its mode. Otherwise, manually obey umask.
955 964 try:
956 965 st_mode = os.lstat(name).st_mode & 0777
957 966 except OSError, inst:
958 967 if inst.errno != errno.ENOENT:
959 968 raise
960 969 st_mode = createmode
961 970 if st_mode is None:
962 971 st_mode = ~umask
963 972 st_mode &= 0666
964 973 os.chmod(temp, st_mode)
965 974 if emptyok:
966 975 return temp
967 976 try:
968 977 try:
969 978 ifp = posixfile(name, "rb")
970 979 except IOError, inst:
971 980 if inst.errno == errno.ENOENT:
972 981 return temp
973 982 if not getattr(inst, 'filename', None):
974 983 inst.filename = name
975 984 raise
976 985 ofp = posixfile(temp, "wb")
977 986 for chunk in filechunkiter(ifp):
978 987 ofp.write(chunk)
979 988 ifp.close()
980 989 ofp.close()
981 990 except:
982 991 try: os.unlink(temp)
983 992 except: pass
984 993 raise
985 994 return temp
986 995
987 996 class atomictempfile(posixfile):
988 997 """file-like object that atomically updates a file
989 998
990 999 All writes will be redirected to a temporary copy of the original
991 1000 file. When rename is called, the copy is renamed to the original
992 1001 name, making the changes visible.
993 1002 """
994 1003 def __init__(self, name, mode, createmode):
995 1004 self.__name = name
996 1005 self.temp = mktempcopy(name, emptyok=('w' in mode),
997 1006 createmode=createmode)
998 1007 posixfile.__init__(self, self.temp, mode)
999 1008
1000 1009 def rename(self):
1001 1010 if not self.closed:
1002 1011 posixfile.close(self)
1003 1012 rename(self.temp, localpath(self.__name))
1004 1013
1005 1014 def __del__(self):
1006 1015 if not self.closed:
1007 1016 try:
1008 1017 os.unlink(self.temp)
1009 1018 except: pass
1010 1019 posixfile.close(self)
1011 1020
1012 1021 def makedirs(name, mode=None):
1013 1022 """recursive directory creation with parent mode inheritance"""
1014 1023 try:
1015 1024 os.mkdir(name)
1016 1025 if mode is not None:
1017 1026 os.chmod(name, mode)
1018 1027 return
1019 1028 except OSError, err:
1020 1029 if err.errno == errno.EEXIST:
1021 1030 return
1022 1031 if err.errno != errno.ENOENT:
1023 1032 raise
1024 1033 parent = os.path.abspath(os.path.dirname(name))
1025 1034 makedirs(parent, mode)
1026 1035 makedirs(name, mode)
1027 1036
1028 1037 class opener(object):
1029 1038 """Open files relative to a base directory
1030 1039
1031 1040 This class is used to hide the details of COW semantics and
1032 1041 remote file access from higher level code.
1033 1042 """
1034 1043 def __init__(self, base, audit=True):
1035 1044 self.base = base
1036 1045 if audit:
1037 1046 self.audit_path = path_auditor(base)
1038 1047 else:
1039 1048 self.audit_path = always
1040 1049 self.createmode = None
1041 1050
1042 1051 def __getattr__(self, name):
1043 1052 if name == '_can_symlink':
1044 1053 self._can_symlink = checklink(self.base)
1045 1054 return self._can_symlink
1046 1055 raise AttributeError(name)
1047 1056
1048 1057 def _fixfilemode(self, name):
1049 1058 if self.createmode is None:
1050 1059 return
1051 1060 os.chmod(name, self.createmode & 0666)
1052 1061
1053 1062 def __call__(self, path, mode="r", text=False, atomictemp=False):
1054 1063 self.audit_path(path)
1055 1064 f = os.path.join(self.base, path)
1056 1065
1057 1066 if not text and "b" not in mode:
1058 1067 mode += "b" # for that other OS
1059 1068
1060 1069 nlink = -1
1061 1070 if mode not in ("r", "rb"):
1062 1071 try:
1063 1072 nlink = nlinks(f)
1064 1073 except OSError:
1065 1074 nlink = 0
1066 1075 d = os.path.dirname(f)
1067 1076 if not os.path.isdir(d):
1068 1077 makedirs(d, self.createmode)
1069 1078 if atomictemp:
1070 1079 return atomictempfile(f, mode, self.createmode)
1071 1080 if nlink > 1:
1072 1081 rename(mktempcopy(f), f)
1073 1082 fp = posixfile(f, mode)
1074 1083 if nlink == 0:
1075 1084 self._fixfilemode(f)
1076 1085 return fp
1077 1086
1078 1087 def symlink(self, src, dst):
1079 1088 self.audit_path(dst)
1080 1089 linkname = os.path.join(self.base, dst)
1081 1090 try:
1082 1091 os.unlink(linkname)
1083 1092 except OSError:
1084 1093 pass
1085 1094
1086 1095 dirname = os.path.dirname(linkname)
1087 1096 if not os.path.exists(dirname):
1088 1097 makedirs(dirname, self.createmode)
1089 1098
1090 1099 if self._can_symlink:
1091 1100 try:
1092 1101 os.symlink(src, linkname)
1093 1102 except OSError, err:
1094 1103 raise OSError(err.errno, _('could not symlink to %r: %s') %
1095 1104 (src, err.strerror), linkname)
1096 1105 else:
1097 1106 f = self(dst, "w")
1098 1107 f.write(src)
1099 1108 f.close()
1100 1109 self._fixfilemode(dst)
1101 1110
1102 1111 class chunkbuffer(object):
1103 1112 """Allow arbitrary sized chunks of data to be efficiently read from an
1104 1113 iterator over chunks of arbitrary size."""
1105 1114
1106 1115 def __init__(self, in_iter):
1107 1116 """in_iter is the iterator that's iterating over the input chunks.
1108 1117 targetsize is how big a buffer to try to maintain."""
1109 1118 self.iter = iter(in_iter)
1110 1119 self.buf = ''
1111 1120 self.targetsize = 2**16
1112 1121
1113 1122 def read(self, l):
1114 1123 """Read L bytes of data from the iterator of chunks of data.
1115 1124 Returns less than L bytes if the iterator runs dry."""
1116 1125 if l > len(self.buf) and self.iter:
1117 1126 # Clamp to a multiple of self.targetsize
1118 1127 targetsize = max(l, self.targetsize)
1119 1128 collector = cStringIO.StringIO()
1120 1129 collector.write(self.buf)
1121 1130 collected = len(self.buf)
1122 1131 for chunk in self.iter:
1123 1132 collector.write(chunk)
1124 1133 collected += len(chunk)
1125 1134 if collected >= targetsize:
1126 1135 break
1127 1136 if collected < targetsize:
1128 1137 self.iter = False
1129 1138 self.buf = collector.getvalue()
1130 1139 if len(self.buf) == l:
1131 1140 s, self.buf = str(self.buf), ''
1132 1141 else:
1133 1142 s, self.buf = self.buf[:l], buffer(self.buf, l)
1134 1143 return s
1135 1144
1136 1145 def filechunkiter(f, size=65536, limit=None):
1137 1146 """Create a generator that produces the data in the file size
1138 1147 (default 65536) bytes at a time, up to optional limit (default is
1139 1148 to read all data). Chunks may be less than size bytes if the
1140 1149 chunk is the last chunk in the file, or the file is a socket or
1141 1150 some other type of file that sometimes reads less data than is
1142 1151 requested."""
1143 1152 assert size >= 0
1144 1153 assert limit is None or limit >= 0
1145 1154 while True:
1146 1155 if limit is None: nbytes = size
1147 1156 else: nbytes = min(limit, size)
1148 1157 s = nbytes and f.read(nbytes)
1149 1158 if not s: break
1150 1159 if limit: limit -= len(s)
1151 1160 yield s
1152 1161
1153 1162 def makedate():
1154 1163 lt = time.localtime()
1155 1164 if lt[8] == 1 and time.daylight:
1156 1165 tz = time.altzone
1157 1166 else:
1158 1167 tz = time.timezone
1159 1168 return time.mktime(lt), tz
1160 1169
1161 1170 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1162 1171 """represent a (unixtime, offset) tuple as a localized time.
1163 1172 unixtime is seconds since the epoch, and offset is the time zone's
1164 1173 number of seconds away from UTC. if timezone is false, do not
1165 1174 append time zone to string."""
1166 1175 t, tz = date or makedate()
1167 1176 if "%1" in format or "%2" in format:
1168 1177 sign = (tz > 0) and "-" or "+"
1169 1178 minutes = abs(tz) / 60
1170 1179 format = format.replace("%1", "%c%02d" % (sign, minutes / 60))
1171 1180 format = format.replace("%2", "%02d" % (minutes % 60))
1172 1181 s = time.strftime(format, time.gmtime(float(t) - tz))
1173 1182 return s
1174 1183
1175 1184 def shortdate(date=None):
1176 1185 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1177 1186 return datestr(date, format='%Y-%m-%d')
1178 1187
1179 1188 def strdate(string, format, defaults=[]):
1180 1189 """parse a localized time string and return a (unixtime, offset) tuple.
1181 1190 if the string cannot be parsed, ValueError is raised."""
1182 1191 def timezone(string):
1183 1192 tz = string.split()[-1]
1184 1193 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1185 1194 sign = (tz[0] == "+") and 1 or -1
1186 1195 hours = int(tz[1:3])
1187 1196 minutes = int(tz[3:5])
1188 1197 return -sign * (hours * 60 + minutes) * 60
1189 1198 if tz == "GMT" or tz == "UTC":
1190 1199 return 0
1191 1200 return None
1192 1201
1193 1202 # NOTE: unixtime = localunixtime + offset
1194 1203 offset, date = timezone(string), string
1195 1204 if offset != None:
1196 1205 date = " ".join(string.split()[:-1])
1197 1206
1198 1207 # add missing elements from defaults
1199 1208 for part in defaults:
1200 1209 found = [True for p in part if ("%"+p) in format]
1201 1210 if not found:
1202 1211 date += "@" + defaults[part]
1203 1212 format += "@%" + part[0]
1204 1213
1205 1214 timetuple = time.strptime(date, format)
1206 1215 localunixtime = int(calendar.timegm(timetuple))
1207 1216 if offset is None:
1208 1217 # local timezone
1209 1218 unixtime = int(time.mktime(timetuple))
1210 1219 offset = unixtime - localunixtime
1211 1220 else:
1212 1221 unixtime = localunixtime + offset
1213 1222 return unixtime, offset
1214 1223
1215 1224 def parsedate(date, formats=None, defaults=None):
1216 1225 """parse a localized date/time string and return a (unixtime, offset) tuple.
1217 1226
1218 1227 The date may be a "unixtime offset" string or in one of the specified
1219 1228 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1220 1229 """
1221 1230 if not date:
1222 1231 return 0, 0
1223 1232 if isinstance(date, tuple) and len(date) == 2:
1224 1233 return date
1225 1234 if not formats:
1226 1235 formats = defaultdateformats
1227 1236 date = date.strip()
1228 1237 try:
1229 1238 when, offset = map(int, date.split(' '))
1230 1239 except ValueError:
1231 1240 # fill out defaults
1232 1241 if not defaults:
1233 1242 defaults = {}
1234 1243 now = makedate()
1235 1244 for part in "d mb yY HI M S".split():
1236 1245 if part not in defaults:
1237 1246 if part[0] in "HMS":
1238 1247 defaults[part] = "00"
1239 1248 else:
1240 1249 defaults[part] = datestr(now, "%" + part[0])
1241 1250
1242 1251 for format in formats:
1243 1252 try:
1244 1253 when, offset = strdate(date, format, defaults)
1245 1254 except (ValueError, OverflowError):
1246 1255 pass
1247 1256 else:
1248 1257 break
1249 1258 else:
1250 1259 raise Abort(_('invalid date: %r ') % date)
1251 1260 # validate explicit (probably user-specified) date and
1252 1261 # time zone offset. values must fit in signed 32 bits for
1253 1262 # current 32-bit linux runtimes. timezones go from UTC-12
1254 1263 # to UTC+14
1255 1264 if abs(when) > 0x7fffffff:
1256 1265 raise Abort(_('date exceeds 32 bits: %d') % when)
1257 1266 if offset < -50400 or offset > 43200:
1258 1267 raise Abort(_('impossible time zone offset: %d') % offset)
1259 1268 return when, offset
1260 1269
1261 1270 def matchdate(date):
1262 1271 """Return a function that matches a given date match specifier
1263 1272
1264 1273 Formats include:
1265 1274
1266 1275 '{date}' match a given date to the accuracy provided
1267 1276
1268 1277 '<{date}' on or before a given date
1269 1278
1270 1279 '>{date}' on or after a given date
1271 1280
1272 1281 """
1273 1282
1274 1283 def lower(date):
1275 1284 d = dict(mb="1", d="1")
1276 1285 return parsedate(date, extendeddateformats, d)[0]
1277 1286
1278 1287 def upper(date):
1279 1288 d = dict(mb="12", HI="23", M="59", S="59")
1280 1289 for days in "31 30 29".split():
1281 1290 try:
1282 1291 d["d"] = days
1283 1292 return parsedate(date, extendeddateformats, d)[0]
1284 1293 except:
1285 1294 pass
1286 1295 d["d"] = "28"
1287 1296 return parsedate(date, extendeddateformats, d)[0]
1288 1297
1289 1298 date = date.strip()
1290 1299 if date[0] == "<":
1291 1300 when = upper(date[1:])
1292 1301 return lambda x: x <= when
1293 1302 elif date[0] == ">":
1294 1303 when = lower(date[1:])
1295 1304 return lambda x: x >= when
1296 1305 elif date[0] == "-":
1297 1306 try:
1298 1307 days = int(date[1:])
1299 1308 except ValueError:
1300 1309 raise Abort(_("invalid day spec: %s") % date[1:])
1301 1310 when = makedate()[0] - days * 3600 * 24
1302 1311 return lambda x: x >= when
1303 1312 elif " to " in date:
1304 1313 a, b = date.split(" to ")
1305 1314 start, stop = lower(a), upper(b)
1306 1315 return lambda x: x >= start and x <= stop
1307 1316 else:
1308 1317 start, stop = lower(date), upper(date)
1309 1318 return lambda x: x >= start and x <= stop
1310 1319
1311 1320 def shortuser(user):
1312 1321 """Return a short representation of a user name or email address."""
1313 1322 f = user.find('@')
1314 1323 if f >= 0:
1315 1324 user = user[:f]
1316 1325 f = user.find('<')
1317 1326 if f >= 0:
1318 1327 user = user[f+1:]
1319 1328 f = user.find(' ')
1320 1329 if f >= 0:
1321 1330 user = user[:f]
1322 1331 f = user.find('.')
1323 1332 if f >= 0:
1324 1333 user = user[:f]
1325 1334 return user
1326 1335
1327 1336 def email(author):
1328 1337 '''get email of author.'''
1329 1338 r = author.find('>')
1330 1339 if r == -1: r = None
1331 1340 return author[author.find('<')+1:r]
1332 1341
1333 1342 def ellipsis(text, maxlength=400):
1334 1343 """Trim string to at most maxlength (default: 400) characters."""
1335 1344 if len(text) <= maxlength:
1336 1345 return text
1337 1346 else:
1338 1347 return "%s..." % (text[:maxlength-3])
1339 1348
1340 1349 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
1341 1350 '''yield every hg repository under path, recursively.'''
1342 1351 def errhandler(err):
1343 1352 if err.filename == path:
1344 1353 raise err
1345 1354 if followsym and hasattr(os.path, 'samestat'):
1346 1355 def _add_dir_if_not_there(dirlst, dirname):
1347 1356 match = False
1348 1357 samestat = os.path.samestat
1349 1358 dirstat = os.stat(dirname)
1350 1359 for lstdirstat in dirlst:
1351 1360 if samestat(dirstat, lstdirstat):
1352 1361 match = True
1353 1362 break
1354 1363 if not match:
1355 1364 dirlst.append(dirstat)
1356 1365 return not match
1357 1366 else:
1358 1367 followsym = False
1359 1368
1360 1369 if (seen_dirs is None) and followsym:
1361 1370 seen_dirs = []
1362 1371 _add_dir_if_not_there(seen_dirs, path)
1363 1372 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
1364 1373 if '.hg' in dirs:
1365 1374 yield root # found a repository
1366 1375 qroot = os.path.join(root, '.hg', 'patches')
1367 1376 if os.path.isdir(os.path.join(qroot, '.hg')):
1368 1377 yield qroot # we have a patch queue repo here
1369 1378 if recurse:
1370 1379 # avoid recursing inside the .hg directory
1371 1380 dirs.remove('.hg')
1372 1381 else:
1373 1382 dirs[:] = [] # don't descend further
1374 1383 elif followsym:
1375 1384 newdirs = []
1376 1385 for d in dirs:
1377 1386 fname = os.path.join(root, d)
1378 1387 if _add_dir_if_not_there(seen_dirs, fname):
1379 1388 if os.path.islink(fname):
1380 1389 for hgname in walkrepos(fname, True, seen_dirs):
1381 1390 yield hgname
1382 1391 else:
1383 1392 newdirs.append(d)
1384 1393 dirs[:] = newdirs
1385 1394
1386 1395 _rcpath = None
1387 1396
1388 1397 def os_rcpath():
1389 1398 '''return default os-specific hgrc search path'''
1390 1399 path = system_rcpath()
1391 1400 path.extend(user_rcpath())
1392 1401 path = [os.path.normpath(f) for f in path]
1393 1402 return path
1394 1403
1395 1404 def rcpath():
1396 1405 '''return hgrc search path. if env var HGRCPATH is set, use it.
1397 1406 for each item in path, if directory, use files ending in .rc,
1398 1407 else use item.
1399 1408 make HGRCPATH empty to only look in .hg/hgrc of current repo.
1400 1409 if no HGRCPATH, use default os-specific path.'''
1401 1410 global _rcpath
1402 1411 if _rcpath is None:
1403 1412 if 'HGRCPATH' in os.environ:
1404 1413 _rcpath = []
1405 1414 for p in os.environ['HGRCPATH'].split(os.pathsep):
1406 1415 if not p: continue
1407 1416 if os.path.isdir(p):
1408 1417 for f, kind in osutil.listdir(p):
1409 1418 if f.endswith('.rc'):
1410 1419 _rcpath.append(os.path.join(p, f))
1411 1420 else:
1412 1421 _rcpath.append(p)
1413 1422 else:
1414 1423 _rcpath = os_rcpath()
1415 1424 return _rcpath
1416 1425
1417 1426 def bytecount(nbytes):
1418 1427 '''return byte count formatted as readable string, with units'''
1419 1428
1420 1429 units = (
1421 1430 (100, 1<<30, _('%.0f GB')),
1422 1431 (10, 1<<30, _('%.1f GB')),
1423 1432 (1, 1<<30, _('%.2f GB')),
1424 1433 (100, 1<<20, _('%.0f MB')),
1425 1434 (10, 1<<20, _('%.1f MB')),
1426 1435 (1, 1<<20, _('%.2f MB')),
1427 1436 (100, 1<<10, _('%.0f KB')),
1428 1437 (10, 1<<10, _('%.1f KB')),
1429 1438 (1, 1<<10, _('%.2f KB')),
1430 1439 (1, 1, _('%.0f bytes')),
1431 1440 )
1432 1441
1433 1442 for multiplier, divisor, format in units:
1434 1443 if nbytes >= divisor * multiplier:
1435 1444 return format % (nbytes / float(divisor))
1436 1445 return units[-1][2] % nbytes
1437 1446
1438 1447 def drop_scheme(scheme, path):
1439 1448 sc = scheme + ':'
1440 1449 if path.startswith(sc):
1441 1450 path = path[len(sc):]
1442 1451 if path.startswith('//'):
1443 1452 path = path[2:]
1444 1453 return path
1445 1454
1446 1455 def uirepr(s):
1447 1456 # Avoid double backslash in Windows path repr()
1448 1457 return repr(s).replace('\\\\', '\\')
1449 1458
1450 1459 def termwidth():
1451 1460 if 'COLUMNS' in os.environ:
1452 1461 try:
1453 1462 return int(os.environ['COLUMNS'])
1454 1463 except ValueError:
1455 1464 pass
1456 1465 try:
1457 1466 import termios, array, fcntl
1458 1467 for dev in (sys.stdout, sys.stdin):
1459 1468 try:
1460 1469 fd = dev.fileno()
1461 1470 if not os.isatty(fd):
1462 1471 continue
1463 1472 arri = fcntl.ioctl(fd, termios.TIOCGWINSZ, '\0' * 8)
1464 1473 return array.array('h', arri)[1]
1465 1474 except ValueError:
1466 1475 pass
1467 1476 except ImportError:
1468 1477 pass
1469 1478 return 80
1470 1479
1471 1480 def iterlines(iterator):
1472 1481 for chunk in iterator:
1473 1482 for line in chunk.splitlines():
1474 1483 yield line
General Comments 0
You need to be logged in to leave comments. Login now