##// END OF EJS Templates
Add endswithsep() and use it instead of using os.sep and os.altsep directly....
Shun-ichi GOTO -
r5843:83c354c4 default
parent child Browse files
Show More
@@ -1,1159 +1,1159
1 1 # cmdutil.py - help for command processing in mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import _
10 10 import os, sys, bisect, stat
11 11 import mdiff, bdiff, util, templater, patch, errno
12 12
13 13 revrangesep = ':'
14 14
15 15 class UnknownCommand(Exception):
16 16 """Exception raised if command is not in the command table."""
17 17 class AmbiguousCommand(Exception):
18 18 """Exception raised if command shortcut matches more than one command."""
19 19
20 20 def findpossible(ui, cmd, table):
21 21 """
22 22 Return cmd -> (aliases, command table entry)
23 23 for each matching command.
24 24 Return debug commands (or their aliases) only if no normal command matches.
25 25 """
26 26 choice = {}
27 27 debugchoice = {}
28 28 for e in table.keys():
29 29 aliases = e.lstrip("^").split("|")
30 30 found = None
31 31 if cmd in aliases:
32 32 found = cmd
33 33 elif not ui.config("ui", "strict"):
34 34 for a in aliases:
35 35 if a.startswith(cmd):
36 36 found = a
37 37 break
38 38 if found is not None:
39 39 if aliases[0].startswith("debug") or found.startswith("debug"):
40 40 debugchoice[found] = (aliases, table[e])
41 41 else:
42 42 choice[found] = (aliases, table[e])
43 43
44 44 if not choice and debugchoice:
45 45 choice = debugchoice
46 46
47 47 return choice
48 48
49 49 def findcmd(ui, cmd, table):
50 50 """Return (aliases, command table entry) for command string."""
51 51 choice = findpossible(ui, cmd, table)
52 52
53 53 if choice.has_key(cmd):
54 54 return choice[cmd]
55 55
56 56 if len(choice) > 1:
57 57 clist = choice.keys()
58 58 clist.sort()
59 59 raise AmbiguousCommand(cmd, clist)
60 60
61 61 if choice:
62 62 return choice.values()[0]
63 63
64 64 raise UnknownCommand(cmd)
65 65
66 66 def bail_if_changed(repo):
67 67 if repo.dirstate.parents()[1] != nullid:
68 68 raise util.Abort(_('outstanding uncommitted merge'))
69 69 modified, added, removed, deleted = repo.status()[:4]
70 70 if modified or added or removed or deleted:
71 71 raise util.Abort(_("outstanding uncommitted changes"))
72 72
73 73 def logmessage(opts):
74 74 """ get the log message according to -m and -l option """
75 75 message = opts['message']
76 76 logfile = opts['logfile']
77 77
78 78 if message and logfile:
79 79 raise util.Abort(_('options --message and --logfile are mutually '
80 80 'exclusive'))
81 81 if not message and logfile:
82 82 try:
83 83 if logfile == '-':
84 84 message = sys.stdin.read()
85 85 else:
86 86 message = open(logfile).read()
87 87 except IOError, inst:
88 88 raise util.Abort(_("can't read commit message '%s': %s") %
89 89 (logfile, inst.strerror))
90 90 return message
91 91
92 92 def setremoteconfig(ui, opts):
93 93 "copy remote options to ui tree"
94 94 if opts.get('ssh'):
95 95 ui.setconfig("ui", "ssh", opts['ssh'])
96 96 if opts.get('remotecmd'):
97 97 ui.setconfig("ui", "remotecmd", opts['remotecmd'])
98 98
99 99 def revpair(repo, revs):
100 100 '''return pair of nodes, given list of revisions. second item can
101 101 be None, meaning use working dir.'''
102 102
103 103 def revfix(repo, val, defval):
104 104 if not val and val != 0 and defval is not None:
105 105 val = defval
106 106 return repo.lookup(val)
107 107
108 108 if not revs:
109 109 return repo.dirstate.parents()[0], None
110 110 end = None
111 111 if len(revs) == 1:
112 112 if revrangesep in revs[0]:
113 113 start, end = revs[0].split(revrangesep, 1)
114 114 start = revfix(repo, start, 0)
115 115 end = revfix(repo, end, repo.changelog.count() - 1)
116 116 else:
117 117 start = revfix(repo, revs[0], None)
118 118 elif len(revs) == 2:
119 119 if revrangesep in revs[0] or revrangesep in revs[1]:
120 120 raise util.Abort(_('too many revisions specified'))
121 121 start = revfix(repo, revs[0], None)
122 122 end = revfix(repo, revs[1], None)
123 123 else:
124 124 raise util.Abort(_('too many revisions specified'))
125 125 return start, end
126 126
127 127 def revrange(repo, revs):
128 128 """Yield revision as strings from a list of revision specifications."""
129 129
130 130 def revfix(repo, val, defval):
131 131 if not val and val != 0 and defval is not None:
132 132 return defval
133 133 return repo.changelog.rev(repo.lookup(val))
134 134
135 135 seen, l = {}, []
136 136 for spec in revs:
137 137 if revrangesep in spec:
138 138 start, end = spec.split(revrangesep, 1)
139 139 start = revfix(repo, start, 0)
140 140 end = revfix(repo, end, repo.changelog.count() - 1)
141 141 step = start > end and -1 or 1
142 142 for rev in xrange(start, end+step, step):
143 143 if rev in seen:
144 144 continue
145 145 seen[rev] = 1
146 146 l.append(rev)
147 147 else:
148 148 rev = revfix(repo, spec, None)
149 149 if rev in seen:
150 150 continue
151 151 seen[rev] = 1
152 152 l.append(rev)
153 153
154 154 return l
155 155
156 156 def make_filename(repo, pat, node,
157 157 total=None, seqno=None, revwidth=None, pathname=None):
158 158 node_expander = {
159 159 'H': lambda: hex(node),
160 160 'R': lambda: str(repo.changelog.rev(node)),
161 161 'h': lambda: short(node),
162 162 }
163 163 expander = {
164 164 '%': lambda: '%',
165 165 'b': lambda: os.path.basename(repo.root),
166 166 }
167 167
168 168 try:
169 169 if node:
170 170 expander.update(node_expander)
171 171 if node:
172 172 expander['r'] = (lambda:
173 173 str(repo.changelog.rev(node)).zfill(revwidth or 0))
174 174 if total is not None:
175 175 expander['N'] = lambda: str(total)
176 176 if seqno is not None:
177 177 expander['n'] = lambda: str(seqno)
178 178 if total is not None and seqno is not None:
179 179 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
180 180 if pathname is not None:
181 181 expander['s'] = lambda: os.path.basename(pathname)
182 182 expander['d'] = lambda: os.path.dirname(pathname) or '.'
183 183 expander['p'] = lambda: pathname
184 184
185 185 newname = []
186 186 patlen = len(pat)
187 187 i = 0
188 188 while i < patlen:
189 189 c = pat[i]
190 190 if c == '%':
191 191 i += 1
192 192 c = pat[i]
193 193 c = expander[c]()
194 194 newname.append(c)
195 195 i += 1
196 196 return ''.join(newname)
197 197 except KeyError, inst:
198 198 raise util.Abort(_("invalid format spec '%%%s' in output file name") %
199 199 inst.args[0])
200 200
201 201 def make_file(repo, pat, node=None,
202 202 total=None, seqno=None, revwidth=None, mode='wb', pathname=None):
203 203 if not pat or pat == '-':
204 204 return 'w' in mode and sys.stdout or sys.stdin
205 205 if hasattr(pat, 'write') and 'w' in mode:
206 206 return pat
207 207 if hasattr(pat, 'read') and 'r' in mode:
208 208 return pat
209 209 return open(make_filename(repo, pat, node, total, seqno, revwidth,
210 210 pathname),
211 211 mode)
212 212
213 213 def matchpats(repo, pats=[], opts={}, globbed=False, default=None):
214 214 cwd = repo.getcwd()
215 215 return util.cmdmatcher(repo.root, cwd, pats or [], opts.get('include'),
216 216 opts.get('exclude'), globbed=globbed,
217 217 default=default)
218 218
219 219 def walk(repo, pats=[], opts={}, node=None, badmatch=None, globbed=False,
220 220 default=None):
221 221 files, matchfn, anypats = matchpats(repo, pats, opts, globbed=globbed,
222 222 default=default)
223 223 exact = dict.fromkeys(files)
224 224 cwd = repo.getcwd()
225 225 for src, fn in repo.walk(node=node, files=files, match=matchfn,
226 226 badmatch=badmatch):
227 227 yield src, fn, repo.pathto(fn, cwd), fn in exact
228 228
229 229 def findrenames(repo, added=None, removed=None, threshold=0.5):
230 230 '''find renamed files -- yields (before, after, score) tuples'''
231 231 if added is None or removed is None:
232 232 added, removed = repo.status()[1:3]
233 233 ctx = repo.changectx()
234 234 for a in added:
235 235 aa = repo.wread(a)
236 236 bestname, bestscore = None, threshold
237 237 for r in removed:
238 238 rr = ctx.filectx(r).data()
239 239
240 240 # bdiff.blocks() returns blocks of matching lines
241 241 # count the number of bytes in each
242 242 equal = 0
243 243 alines = mdiff.splitnewlines(aa)
244 244 matches = bdiff.blocks(aa, rr)
245 245 for x1,x2,y1,y2 in matches:
246 246 for line in alines[x1:x2]:
247 247 equal += len(line)
248 248
249 249 lengths = len(aa) + len(rr)
250 250 if lengths:
251 251 myscore = equal*2.0 / lengths
252 252 if myscore >= bestscore:
253 253 bestname, bestscore = r, myscore
254 254 if bestname:
255 255 yield bestname, a, bestscore
256 256
257 257 def addremove(repo, pats=[], opts={}, dry_run=None, similarity=None):
258 258 if dry_run is None:
259 259 dry_run = opts.get('dry_run')
260 260 if similarity is None:
261 261 similarity = float(opts.get('similarity') or 0)
262 262 add, remove = [], []
263 263 mapping = {}
264 264 for src, abs, rel, exact in walk(repo, pats, opts):
265 265 target = repo.wjoin(abs)
266 266 if src == 'f' and abs not in repo.dirstate:
267 267 add.append(abs)
268 268 mapping[abs] = rel, exact
269 269 if repo.ui.verbose or not exact:
270 270 repo.ui.status(_('adding %s\n') % ((pats and rel) or abs))
271 271 if repo.dirstate[abs] != 'r' and (not util.lexists(target)
272 272 or (os.path.isdir(target) and not os.path.islink(target))):
273 273 remove.append(abs)
274 274 mapping[abs] = rel, exact
275 275 if repo.ui.verbose or not exact:
276 276 repo.ui.status(_('removing %s\n') % ((pats and rel) or abs))
277 277 if not dry_run:
278 278 repo.remove(remove)
279 279 repo.add(add)
280 280 if similarity > 0:
281 281 for old, new, score in findrenames(repo, add, remove, similarity):
282 282 oldrel, oldexact = mapping[old]
283 283 newrel, newexact = mapping[new]
284 284 if repo.ui.verbose or not oldexact or not newexact:
285 285 repo.ui.status(_('recording removal of %s as rename to %s '
286 286 '(%d%% similar)\n') %
287 287 (oldrel, newrel, score * 100))
288 288 if not dry_run:
289 289 repo.copy(old, new)
290 290
291 291 def copy(ui, repo, pats, opts, rename=False):
292 292 # called with the repo lock held
293 293 #
294 294 # hgsep => pathname that uses "/" to separate directories
295 295 # ossep => pathname that uses os.sep to separate directories
296 296 cwd = repo.getcwd()
297 297 targets = {}
298 298 after = opts.get("after")
299 299 dryrun = opts.get("dry_run")
300 300
301 301 def walkpat(pat):
302 302 srcs = []
303 303 for tag, abs, rel, exact in walk(repo, [pat], opts, globbed=True):
304 304 state = repo.dirstate[abs]
305 305 if state in '?r':
306 306 if exact and state == '?':
307 307 ui.warn(_('%s: not copying - file is not managed\n') % rel)
308 308 if exact and state == 'r':
309 309 ui.warn(_('%s: not copying - file has been marked for'
310 310 ' remove\n') % rel)
311 311 continue
312 312 # abs: hgsep
313 313 # rel: ossep
314 314 srcs.append((abs, rel, exact))
315 315 return srcs
316 316
317 317 # abssrc: hgsep
318 318 # relsrc: ossep
319 319 # otarget: ossep
320 320 def copyfile(abssrc, relsrc, otarget, exact):
321 321 abstarget = util.canonpath(repo.root, cwd, otarget)
322 322 reltarget = repo.pathto(abstarget, cwd)
323 323 target = repo.wjoin(abstarget)
324 324 src = repo.wjoin(abssrc)
325 325 state = repo.dirstate[abstarget]
326 326
327 327 # check for collisions
328 328 prevsrc = targets.get(abstarget)
329 329 if prevsrc is not None:
330 330 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
331 331 (reltarget, repo.pathto(abssrc, cwd),
332 332 repo.pathto(prevsrc, cwd)))
333 333 return
334 334
335 335 # check for overwrites
336 336 exists = os.path.exists(target)
337 337 if (not after and exists or after and state in 'mn'):
338 338 if not opts['force']:
339 339 ui.warn(_('%s: not overwriting - file exists\n') %
340 340 reltarget)
341 341 return
342 342
343 343 if after:
344 344 if not exists:
345 345 return
346 346 elif not dryrun:
347 347 try:
348 348 if exists:
349 349 os.unlink(target)
350 350 targetdir = os.path.dirname(target) or '.'
351 351 if not os.path.isdir(targetdir):
352 352 os.makedirs(targetdir)
353 353 util.copyfile(src, target)
354 354 except IOError, inst:
355 355 if inst.errno == errno.ENOENT:
356 356 ui.warn(_('%s: deleted in working copy\n') % relsrc)
357 357 else:
358 358 ui.warn(_('%s: cannot copy - %s\n') %
359 359 (relsrc, inst.strerror))
360 360 return True # report a failure
361 361
362 362 if ui.verbose or not exact:
363 363 action = rename and "moving" or "copying"
364 364 ui.status(_('%s %s to %s\n') % (action, relsrc, reltarget))
365 365
366 366 targets[abstarget] = abssrc
367 367
368 368 # fix up dirstate
369 369 origsrc = repo.dirstate.copied(abssrc) or abssrc
370 370 if abstarget == origsrc: # copying back a copy?
371 371 if state not in 'mn' and not dryrun:
372 372 repo.dirstate.normallookup(abstarget)
373 373 else:
374 374 if repo.dirstate[origsrc] == 'a':
375 375 if not ui.quiet:
376 376 ui.warn(_("%s has not been committed yet, so no copy "
377 377 "data will be stored for %s.\n")
378 378 % (repo.pathto(origsrc, cwd), reltarget))
379 379 if abstarget not in repo.dirstate and not dryrun:
380 380 repo.add([abstarget])
381 381 elif not dryrun:
382 382 repo.copy(origsrc, abstarget)
383 383
384 384 if rename and not dryrun:
385 385 repo.remove([abssrc], True)
386 386
387 387 # pat: ossep
388 388 # dest ossep
389 389 # srcs: list of (hgsep, hgsep, ossep, bool)
390 390 # return: function that takes hgsep and returns ossep
391 391 def targetpathfn(pat, dest, srcs):
392 392 if os.path.isdir(pat):
393 393 abspfx = util.canonpath(repo.root, cwd, pat)
394 394 abspfx = util.localpath(abspfx)
395 395 if destdirexists:
396 396 striplen = len(os.path.split(abspfx)[0])
397 397 else:
398 398 striplen = len(abspfx)
399 399 if striplen:
400 400 striplen += len(os.sep)
401 401 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
402 402 elif destdirexists:
403 403 res = lambda p: os.path.join(dest,
404 404 os.path.basename(util.localpath(p)))
405 405 else:
406 406 res = lambda p: dest
407 407 return res
408 408
409 409 # pat: ossep
410 410 # dest ossep
411 411 # srcs: list of (hgsep, hgsep, ossep, bool)
412 412 # return: function that takes hgsep and returns ossep
413 413 def targetpathafterfn(pat, dest, srcs):
414 414 if util.patkind(pat, None)[0]:
415 415 # a mercurial pattern
416 416 res = lambda p: os.path.join(dest,
417 417 os.path.basename(util.localpath(p)))
418 418 else:
419 419 abspfx = util.canonpath(repo.root, cwd, pat)
420 420 if len(abspfx) < len(srcs[0][0]):
421 421 # A directory. Either the target path contains the last
422 422 # component of the source path or it does not.
423 423 def evalpath(striplen):
424 424 score = 0
425 425 for s in srcs:
426 426 t = os.path.join(dest, util.localpath(s[0])[striplen:])
427 427 if os.path.exists(t):
428 428 score += 1
429 429 return score
430 430
431 431 abspfx = util.localpath(abspfx)
432 432 striplen = len(abspfx)
433 433 if striplen:
434 434 striplen += len(os.sep)
435 435 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
436 436 score = evalpath(striplen)
437 437 striplen1 = len(os.path.split(abspfx)[0])
438 438 if striplen1:
439 439 striplen1 += len(os.sep)
440 440 if evalpath(striplen1) > score:
441 441 striplen = striplen1
442 442 res = lambda p: os.path.join(dest,
443 443 util.localpath(p)[striplen:])
444 444 else:
445 445 # a file
446 446 if destdirexists:
447 447 res = lambda p: os.path.join(dest,
448 448 os.path.basename(util.localpath(p)))
449 449 else:
450 450 res = lambda p: dest
451 451 return res
452 452
453 453
454 454 pats = util.expand_glob(pats)
455 455 if not pats:
456 456 raise util.Abort(_('no source or destination specified'))
457 457 if len(pats) == 1:
458 458 raise util.Abort(_('no destination specified'))
459 459 dest = pats.pop()
460 460 destdirexists = os.path.isdir(dest)
461 461 if not destdirexists:
462 462 if len(pats) > 1 or util.patkind(pats[0], None)[0]:
463 463 raise util.Abort(_('with multiple sources, destination must be an '
464 464 'existing directory'))
465 if dest.endswith(os.sep) or os.altsep and dest.endswith(os.altsep):
465 if util.endswithsep(dest):
466 466 raise util.Abort(_('destination %s is not a directory') % dest)
467 467
468 468 tfn = targetpathfn
469 469 if after:
470 470 tfn = targetpathafterfn
471 471 copylist = []
472 472 for pat in pats:
473 473 srcs = walkpat(pat)
474 474 if not srcs:
475 475 continue
476 476 copylist.append((tfn(pat, dest, srcs), srcs))
477 477 if not copylist:
478 478 raise util.Abort(_('no files to copy'))
479 479
480 480 errors = 0
481 481 for targetpath, srcs in copylist:
482 482 for abssrc, relsrc, exact in srcs:
483 483 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
484 484 errors += 1
485 485
486 486 if errors:
487 487 ui.warn(_('(consider using --after)\n'))
488 488
489 489 return errors
490 490
491 491 def service(opts, parentfn=None, initfn=None, runfn=None):
492 492 '''Run a command as a service.'''
493 493
494 494 if opts['daemon'] and not opts['daemon_pipefds']:
495 495 rfd, wfd = os.pipe()
496 496 args = sys.argv[:]
497 497 args.append('--daemon-pipefds=%d,%d' % (rfd, wfd))
498 498 # Don't pass --cwd to the child process, because we've already
499 499 # changed directory.
500 500 for i in xrange(1,len(args)):
501 501 if args[i].startswith('--cwd='):
502 502 del args[i]
503 503 break
504 504 elif args[i].startswith('--cwd'):
505 505 del args[i:i+2]
506 506 break
507 507 pid = os.spawnvp(os.P_NOWAIT | getattr(os, 'P_DETACH', 0),
508 508 args[0], args)
509 509 os.close(wfd)
510 510 os.read(rfd, 1)
511 511 if parentfn:
512 512 return parentfn(pid)
513 513 else:
514 514 os._exit(0)
515 515
516 516 if initfn:
517 517 initfn()
518 518
519 519 if opts['pid_file']:
520 520 fp = open(opts['pid_file'], 'w')
521 521 fp.write(str(os.getpid()) + '\n')
522 522 fp.close()
523 523
524 524 if opts['daemon_pipefds']:
525 525 rfd, wfd = [int(x) for x in opts['daemon_pipefds'].split(',')]
526 526 os.close(rfd)
527 527 try:
528 528 os.setsid()
529 529 except AttributeError:
530 530 pass
531 531 os.write(wfd, 'y')
532 532 os.close(wfd)
533 533 sys.stdout.flush()
534 534 sys.stderr.flush()
535 535 fd = os.open(util.nulldev, os.O_RDWR)
536 536 if fd != 0: os.dup2(fd, 0)
537 537 if fd != 1: os.dup2(fd, 1)
538 538 if fd != 2: os.dup2(fd, 2)
539 539 if fd not in (0, 1, 2): os.close(fd)
540 540
541 541 if runfn:
542 542 return runfn()
543 543
544 544 class changeset_printer(object):
545 545 '''show changeset information when templating not requested.'''
546 546
547 547 def __init__(self, ui, repo, patch, buffered):
548 548 self.ui = ui
549 549 self.repo = repo
550 550 self.buffered = buffered
551 551 self.patch = patch
552 552 self.header = {}
553 553 self.hunk = {}
554 554 self.lastheader = None
555 555
556 556 def flush(self, rev):
557 557 if rev in self.header:
558 558 h = self.header[rev]
559 559 if h != self.lastheader:
560 560 self.lastheader = h
561 561 self.ui.write(h)
562 562 del self.header[rev]
563 563 if rev in self.hunk:
564 564 self.ui.write(self.hunk[rev])
565 565 del self.hunk[rev]
566 566 return 1
567 567 return 0
568 568
569 569 def show(self, rev=0, changenode=None, copies=(), **props):
570 570 if self.buffered:
571 571 self.ui.pushbuffer()
572 572 self._show(rev, changenode, copies, props)
573 573 self.hunk[rev] = self.ui.popbuffer()
574 574 else:
575 575 self._show(rev, changenode, copies, props)
576 576
577 577 def _show(self, rev, changenode, copies, props):
578 578 '''show a single changeset or file revision'''
579 579 log = self.repo.changelog
580 580 if changenode is None:
581 581 changenode = log.node(rev)
582 582 elif not rev:
583 583 rev = log.rev(changenode)
584 584
585 585 if self.ui.quiet:
586 586 self.ui.write("%d:%s\n" % (rev, short(changenode)))
587 587 return
588 588
589 589 changes = log.read(changenode)
590 590 date = util.datestr(changes[2])
591 591 extra = changes[5]
592 592 branch = extra.get("branch")
593 593
594 594 hexfunc = self.ui.debugflag and hex or short
595 595
596 596 parents = [(p, hexfunc(log.node(p)))
597 597 for p in self._meaningful_parentrevs(log, rev)]
598 598
599 599 self.ui.write(_("changeset: %d:%s\n") % (rev, hexfunc(changenode)))
600 600
601 601 # don't show the default branch name
602 602 if branch != 'default':
603 603 branch = util.tolocal(branch)
604 604 self.ui.write(_("branch: %s\n") % branch)
605 605 for tag in self.repo.nodetags(changenode):
606 606 self.ui.write(_("tag: %s\n") % tag)
607 607 for parent in parents:
608 608 self.ui.write(_("parent: %d:%s\n") % parent)
609 609
610 610 if self.ui.debugflag:
611 611 self.ui.write(_("manifest: %d:%s\n") %
612 612 (self.repo.manifest.rev(changes[0]), hex(changes[0])))
613 613 self.ui.write(_("user: %s\n") % changes[1])
614 614 self.ui.write(_("date: %s\n") % date)
615 615
616 616 if self.ui.debugflag:
617 617 files = self.repo.status(log.parents(changenode)[0], changenode)[:3]
618 618 for key, value in zip([_("files:"), _("files+:"), _("files-:")],
619 619 files):
620 620 if value:
621 621 self.ui.write("%-12s %s\n" % (key, " ".join(value)))
622 622 elif changes[3] and self.ui.verbose:
623 623 self.ui.write(_("files: %s\n") % " ".join(changes[3]))
624 624 if copies and self.ui.verbose:
625 625 copies = ['%s (%s)' % c for c in copies]
626 626 self.ui.write(_("copies: %s\n") % ' '.join(copies))
627 627
628 628 if extra and self.ui.debugflag:
629 629 extraitems = extra.items()
630 630 extraitems.sort()
631 631 for key, value in extraitems:
632 632 self.ui.write(_("extra: %s=%s\n")
633 633 % (key, value.encode('string_escape')))
634 634
635 635 description = changes[4].strip()
636 636 if description:
637 637 if self.ui.verbose:
638 638 self.ui.write(_("description:\n"))
639 639 self.ui.write(description)
640 640 self.ui.write("\n\n")
641 641 else:
642 642 self.ui.write(_("summary: %s\n") %
643 643 description.splitlines()[0])
644 644 self.ui.write("\n")
645 645
646 646 self.showpatch(changenode)
647 647
648 648 def showpatch(self, node):
649 649 if self.patch:
650 650 prev = self.repo.changelog.parents(node)[0]
651 651 patch.diff(self.repo, prev, node, match=self.patch, fp=self.ui,
652 652 opts=patch.diffopts(self.ui))
653 653 self.ui.write("\n")
654 654
655 655 def _meaningful_parentrevs(self, log, rev):
656 656 """Return list of meaningful (or all if debug) parentrevs for rev.
657 657
658 658 For merges (two non-nullrev revisions) both parents are meaningful.
659 659 Otherwise the first parent revision is considered meaningful if it
660 660 is not the preceding revision.
661 661 """
662 662 parents = log.parentrevs(rev)
663 663 if not self.ui.debugflag and parents[1] == nullrev:
664 664 if parents[0] >= rev - 1:
665 665 parents = []
666 666 else:
667 667 parents = [parents[0]]
668 668 return parents
669 669
670 670
671 671 class changeset_templater(changeset_printer):
672 672 '''format changeset information.'''
673 673
674 674 def __init__(self, ui, repo, patch, mapfile, buffered):
675 675 changeset_printer.__init__(self, ui, repo, patch, buffered)
676 676 filters = templater.common_filters.copy()
677 677 filters['formatnode'] = (ui.debugflag and (lambda x: x)
678 678 or (lambda x: x[:12]))
679 679 self.t = templater.templater(mapfile, filters,
680 680 cache={
681 681 'parent': '{rev}:{node|formatnode} ',
682 682 'manifest': '{rev}:{node|formatnode}',
683 683 'filecopy': '{name} ({source})'})
684 684
685 685 def use_template(self, t):
686 686 '''set template string to use'''
687 687 self.t.cache['changeset'] = t
688 688
689 689 def _show(self, rev, changenode, copies, props):
690 690 '''show a single changeset or file revision'''
691 691 log = self.repo.changelog
692 692 if changenode is None:
693 693 changenode = log.node(rev)
694 694 elif not rev:
695 695 rev = log.rev(changenode)
696 696
697 697 changes = log.read(changenode)
698 698
699 699 def showlist(name, values, plural=None, **args):
700 700 '''expand set of values.
701 701 name is name of key in template map.
702 702 values is list of strings or dicts.
703 703 plural is plural of name, if not simply name + 's'.
704 704
705 705 expansion works like this, given name 'foo'.
706 706
707 707 if values is empty, expand 'no_foos'.
708 708
709 709 if 'foo' not in template map, return values as a string,
710 710 joined by space.
711 711
712 712 expand 'start_foos'.
713 713
714 714 for each value, expand 'foo'. if 'last_foo' in template
715 715 map, expand it instead of 'foo' for last key.
716 716
717 717 expand 'end_foos'.
718 718 '''
719 719 if plural: names = plural
720 720 else: names = name + 's'
721 721 if not values:
722 722 noname = 'no_' + names
723 723 if noname in self.t:
724 724 yield self.t(noname, **args)
725 725 return
726 726 if name not in self.t:
727 727 if isinstance(values[0], str):
728 728 yield ' '.join(values)
729 729 else:
730 730 for v in values:
731 731 yield dict(v, **args)
732 732 return
733 733 startname = 'start_' + names
734 734 if startname in self.t:
735 735 yield self.t(startname, **args)
736 736 vargs = args.copy()
737 737 def one(v, tag=name):
738 738 try:
739 739 vargs.update(v)
740 740 except (AttributeError, ValueError):
741 741 try:
742 742 for a, b in v:
743 743 vargs[a] = b
744 744 except ValueError:
745 745 vargs[name] = v
746 746 return self.t(tag, **vargs)
747 747 lastname = 'last_' + name
748 748 if lastname in self.t:
749 749 last = values.pop()
750 750 else:
751 751 last = None
752 752 for v in values:
753 753 yield one(v)
754 754 if last is not None:
755 755 yield one(last, tag=lastname)
756 756 endname = 'end_' + names
757 757 if endname in self.t:
758 758 yield self.t(endname, **args)
759 759
760 760 def showbranches(**args):
761 761 branch = changes[5].get("branch")
762 762 if branch != 'default':
763 763 branch = util.tolocal(branch)
764 764 return showlist('branch', [branch], plural='branches', **args)
765 765
766 766 def showparents(**args):
767 767 parents = [[('rev', p), ('node', hex(log.node(p)))]
768 768 for p in self._meaningful_parentrevs(log, rev)]
769 769 return showlist('parent', parents, **args)
770 770
771 771 def showtags(**args):
772 772 return showlist('tag', self.repo.nodetags(changenode), **args)
773 773
774 774 def showextras(**args):
775 775 extras = changes[5].items()
776 776 extras.sort()
777 777 for key, value in extras:
778 778 args = args.copy()
779 779 args.update(dict(key=key, value=value))
780 780 yield self.t('extra', **args)
781 781
782 782 def showcopies(**args):
783 783 c = [{'name': x[0], 'source': x[1]} for x in copies]
784 784 return showlist('file_copy', c, plural='file_copies', **args)
785 785
786 786 files = []
787 787 def getfiles():
788 788 if not files:
789 789 files[:] = self.repo.status(
790 790 log.parents(changenode)[0], changenode)[:3]
791 791 return files
792 792 def showfiles(**args):
793 793 return showlist('file', changes[3], **args)
794 794 def showmods(**args):
795 795 return showlist('file_mod', getfiles()[0], **args)
796 796 def showadds(**args):
797 797 return showlist('file_add', getfiles()[1], **args)
798 798 def showdels(**args):
799 799 return showlist('file_del', getfiles()[2], **args)
800 800 def showmanifest(**args):
801 801 args = args.copy()
802 802 args.update(dict(rev=self.repo.manifest.rev(changes[0]),
803 803 node=hex(changes[0])))
804 804 return self.t('manifest', **args)
805 805
806 806 defprops = {
807 807 'author': changes[1],
808 808 'branches': showbranches,
809 809 'date': changes[2],
810 810 'desc': changes[4].strip(),
811 811 'file_adds': showadds,
812 812 'file_dels': showdels,
813 813 'file_mods': showmods,
814 814 'files': showfiles,
815 815 'file_copies': showcopies,
816 816 'manifest': showmanifest,
817 817 'node': hex(changenode),
818 818 'parents': showparents,
819 819 'rev': rev,
820 820 'tags': showtags,
821 821 'extras': showextras,
822 822 }
823 823 props = props.copy()
824 824 props.update(defprops)
825 825
826 826 try:
827 827 if self.ui.debugflag and 'header_debug' in self.t:
828 828 key = 'header_debug'
829 829 elif self.ui.quiet and 'header_quiet' in self.t:
830 830 key = 'header_quiet'
831 831 elif self.ui.verbose and 'header_verbose' in self.t:
832 832 key = 'header_verbose'
833 833 elif 'header' in self.t:
834 834 key = 'header'
835 835 else:
836 836 key = ''
837 837 if key:
838 838 h = templater.stringify(self.t(key, **props))
839 839 if self.buffered:
840 840 self.header[rev] = h
841 841 else:
842 842 self.ui.write(h)
843 843 if self.ui.debugflag and 'changeset_debug' in self.t:
844 844 key = 'changeset_debug'
845 845 elif self.ui.quiet and 'changeset_quiet' in self.t:
846 846 key = 'changeset_quiet'
847 847 elif self.ui.verbose and 'changeset_verbose' in self.t:
848 848 key = 'changeset_verbose'
849 849 else:
850 850 key = 'changeset'
851 851 self.ui.write(templater.stringify(self.t(key, **props)))
852 852 self.showpatch(changenode)
853 853 except KeyError, inst:
854 854 raise util.Abort(_("%s: no key named '%s'") % (self.t.mapfile,
855 855 inst.args[0]))
856 856 except SyntaxError, inst:
857 857 raise util.Abort(_('%s: %s') % (self.t.mapfile, inst.args[0]))
858 858
859 859 def show_changeset(ui, repo, opts, buffered=False, matchfn=False):
860 860 """show one changeset using template or regular display.
861 861
862 862 Display format will be the first non-empty hit of:
863 863 1. option 'template'
864 864 2. option 'style'
865 865 3. [ui] setting 'logtemplate'
866 866 4. [ui] setting 'style'
867 867 If all of these values are either the unset or the empty string,
868 868 regular display via changeset_printer() is done.
869 869 """
870 870 # options
871 871 patch = False
872 872 if opts.get('patch'):
873 873 patch = matchfn or util.always
874 874
875 875 tmpl = opts.get('template')
876 876 mapfile = None
877 877 if tmpl:
878 878 tmpl = templater.parsestring(tmpl, quoted=False)
879 879 else:
880 880 mapfile = opts.get('style')
881 881 # ui settings
882 882 if not mapfile:
883 883 tmpl = ui.config('ui', 'logtemplate')
884 884 if tmpl:
885 885 tmpl = templater.parsestring(tmpl)
886 886 else:
887 887 mapfile = ui.config('ui', 'style')
888 888
889 889 if tmpl or mapfile:
890 890 if mapfile:
891 891 if not os.path.split(mapfile)[0]:
892 892 mapname = (templater.templatepath('map-cmdline.' + mapfile)
893 893 or templater.templatepath(mapfile))
894 894 if mapname: mapfile = mapname
895 895 try:
896 896 t = changeset_templater(ui, repo, patch, mapfile, buffered)
897 897 except SyntaxError, inst:
898 898 raise util.Abort(inst.args[0])
899 899 if tmpl: t.use_template(tmpl)
900 900 return t
901 901 return changeset_printer(ui, repo, patch, buffered)
902 902
903 903 def finddate(ui, repo, date):
904 904 """Find the tipmost changeset that matches the given date spec"""
905 905 df = util.matchdate(date)
906 906 get = util.cachefunc(lambda r: repo.changectx(r).changeset())
907 907 changeiter, matchfn = walkchangerevs(ui, repo, [], get, {'rev':None})
908 908 results = {}
909 909 for st, rev, fns in changeiter:
910 910 if st == 'add':
911 911 d = get(rev)[2]
912 912 if df(d[0]):
913 913 results[rev] = d
914 914 elif st == 'iter':
915 915 if rev in results:
916 916 ui.status("Found revision %s from %s\n" %
917 917 (rev, util.datestr(results[rev])))
918 918 return str(rev)
919 919
920 920 raise util.Abort(_("revision matching date not found"))
921 921
922 922 def walkchangerevs(ui, repo, pats, change, opts):
923 923 '''Iterate over files and the revs they changed in.
924 924
925 925 Callers most commonly need to iterate backwards over the history
926 926 it is interested in. Doing so has awful (quadratic-looking)
927 927 performance, so we use iterators in a "windowed" way.
928 928
929 929 We walk a window of revisions in the desired order. Within the
930 930 window, we first walk forwards to gather data, then in the desired
931 931 order (usually backwards) to display it.
932 932
933 933 This function returns an (iterator, matchfn) tuple. The iterator
934 934 yields 3-tuples. They will be of one of the following forms:
935 935
936 936 "window", incrementing, lastrev: stepping through a window,
937 937 positive if walking forwards through revs, last rev in the
938 938 sequence iterated over - use to reset state for the current window
939 939
940 940 "add", rev, fns: out-of-order traversal of the given file names
941 941 fns, which changed during revision rev - use to gather data for
942 942 possible display
943 943
944 944 "iter", rev, None: in-order traversal of the revs earlier iterated
945 945 over with "add" - use to display data'''
946 946
947 947 def increasing_windows(start, end, windowsize=8, sizelimit=512):
948 948 if start < end:
949 949 while start < end:
950 950 yield start, min(windowsize, end-start)
951 951 start += windowsize
952 952 if windowsize < sizelimit:
953 953 windowsize *= 2
954 954 else:
955 955 while start > end:
956 956 yield start, min(windowsize, start-end-1)
957 957 start -= windowsize
958 958 if windowsize < sizelimit:
959 959 windowsize *= 2
960 960
961 961 files, matchfn, anypats = matchpats(repo, pats, opts)
962 962 follow = opts.get('follow') or opts.get('follow_first')
963 963
964 964 if repo.changelog.count() == 0:
965 965 return [], matchfn
966 966
967 967 if follow:
968 968 defrange = '%s:0' % repo.changectx().rev()
969 969 else:
970 970 defrange = 'tip:0'
971 971 revs = revrange(repo, opts['rev'] or [defrange])
972 972 wanted = {}
973 973 slowpath = anypats or opts.get('removed')
974 974 fncache = {}
975 975
976 976 if not slowpath and not files:
977 977 # No files, no patterns. Display all revs.
978 978 wanted = dict.fromkeys(revs)
979 979 copies = []
980 980 if not slowpath:
981 981 # Only files, no patterns. Check the history of each file.
982 982 def filerevgen(filelog, node):
983 983 cl_count = repo.changelog.count()
984 984 if node is None:
985 985 last = filelog.count() - 1
986 986 else:
987 987 last = filelog.rev(node)
988 988 for i, window in increasing_windows(last, nullrev):
989 989 revs = []
990 990 for j in xrange(i - window, i + 1):
991 991 n = filelog.node(j)
992 992 revs.append((filelog.linkrev(n),
993 993 follow and filelog.renamed(n)))
994 994 revs.reverse()
995 995 for rev in revs:
996 996 # only yield rev for which we have the changelog, it can
997 997 # happen while doing "hg log" during a pull or commit
998 998 if rev[0] < cl_count:
999 999 yield rev
1000 1000 def iterfiles():
1001 1001 for filename in files:
1002 1002 yield filename, None
1003 1003 for filename_node in copies:
1004 1004 yield filename_node
1005 1005 minrev, maxrev = min(revs), max(revs)
1006 1006 for file_, node in iterfiles():
1007 1007 filelog = repo.file(file_)
1008 1008 # A zero count may be a directory or deleted file, so
1009 1009 # try to find matching entries on the slow path.
1010 1010 if filelog.count() == 0:
1011 1011 slowpath = True
1012 1012 break
1013 1013 for rev, copied in filerevgen(filelog, node):
1014 1014 if rev <= maxrev:
1015 1015 if rev < minrev:
1016 1016 break
1017 1017 fncache.setdefault(rev, [])
1018 1018 fncache[rev].append(file_)
1019 1019 wanted[rev] = 1
1020 1020 if follow and copied:
1021 1021 copies.append(copied)
1022 1022 if slowpath:
1023 1023 if follow:
1024 1024 raise util.Abort(_('can only follow copies/renames for explicit '
1025 1025 'file names'))
1026 1026
1027 1027 # The slow path checks files modified in every changeset.
1028 1028 def changerevgen():
1029 1029 for i, window in increasing_windows(repo.changelog.count()-1,
1030 1030 nullrev):
1031 1031 for j in xrange(i - window, i + 1):
1032 1032 yield j, change(j)[3]
1033 1033
1034 1034 for rev, changefiles in changerevgen():
1035 1035 matches = filter(matchfn, changefiles)
1036 1036 if matches:
1037 1037 fncache[rev] = matches
1038 1038 wanted[rev] = 1
1039 1039
1040 1040 class followfilter:
1041 1041 def __init__(self, onlyfirst=False):
1042 1042 self.startrev = nullrev
1043 1043 self.roots = []
1044 1044 self.onlyfirst = onlyfirst
1045 1045
1046 1046 def match(self, rev):
1047 1047 def realparents(rev):
1048 1048 if self.onlyfirst:
1049 1049 return repo.changelog.parentrevs(rev)[0:1]
1050 1050 else:
1051 1051 return filter(lambda x: x != nullrev,
1052 1052 repo.changelog.parentrevs(rev))
1053 1053
1054 1054 if self.startrev == nullrev:
1055 1055 self.startrev = rev
1056 1056 return True
1057 1057
1058 1058 if rev > self.startrev:
1059 1059 # forward: all descendants
1060 1060 if not self.roots:
1061 1061 self.roots.append(self.startrev)
1062 1062 for parent in realparents(rev):
1063 1063 if parent in self.roots:
1064 1064 self.roots.append(rev)
1065 1065 return True
1066 1066 else:
1067 1067 # backwards: all parents
1068 1068 if not self.roots:
1069 1069 self.roots.extend(realparents(self.startrev))
1070 1070 if rev in self.roots:
1071 1071 self.roots.remove(rev)
1072 1072 self.roots.extend(realparents(rev))
1073 1073 return True
1074 1074
1075 1075 return False
1076 1076
1077 1077 # it might be worthwhile to do this in the iterator if the rev range
1078 1078 # is descending and the prune args are all within that range
1079 1079 for rev in opts.get('prune', ()):
1080 1080 rev = repo.changelog.rev(repo.lookup(rev))
1081 1081 ff = followfilter()
1082 1082 stop = min(revs[0], revs[-1])
1083 1083 for x in xrange(rev, stop-1, -1):
1084 1084 if ff.match(x) and x in wanted:
1085 1085 del wanted[x]
1086 1086
1087 1087 def iterate():
1088 1088 if follow and not files:
1089 1089 ff = followfilter(onlyfirst=opts.get('follow_first'))
1090 1090 def want(rev):
1091 1091 if ff.match(rev) and rev in wanted:
1092 1092 return True
1093 1093 return False
1094 1094 else:
1095 1095 def want(rev):
1096 1096 return rev in wanted
1097 1097
1098 1098 for i, window in increasing_windows(0, len(revs)):
1099 1099 yield 'window', revs[0] < revs[-1], revs[-1]
1100 1100 nrevs = [rev for rev in revs[i:i+window] if want(rev)]
1101 1101 srevs = list(nrevs)
1102 1102 srevs.sort()
1103 1103 for rev in srevs:
1104 1104 fns = fncache.get(rev)
1105 1105 if not fns:
1106 1106 def fns_generator():
1107 1107 for f in change(rev)[3]:
1108 1108 if matchfn(f):
1109 1109 yield f
1110 1110 fns = fns_generator()
1111 1111 yield 'add', rev, fns
1112 1112 for rev in nrevs:
1113 1113 yield 'iter', rev, None
1114 1114 return iterate(), matchfn
1115 1115
1116 1116 def commit(ui, repo, commitfunc, pats, opts):
1117 1117 '''commit the specified files or all outstanding changes'''
1118 1118 message = logmessage(opts)
1119 1119
1120 1120 # extract addremove carefully -- this function can be called from a command
1121 1121 # that doesn't support addremove
1122 1122 if opts.get('addremove'):
1123 1123 addremove(repo, pats, opts)
1124 1124
1125 1125 fns, match, anypats = matchpats(repo, pats, opts)
1126 1126 if pats:
1127 1127 status = repo.status(files=fns, match=match)
1128 1128 modified, added, removed, deleted, unknown = status[:5]
1129 1129 files = modified + added + removed
1130 1130 slist = None
1131 1131 for f in fns:
1132 1132 if f == '.':
1133 1133 continue
1134 1134 if f not in files:
1135 1135 rf = repo.wjoin(f)
1136 1136 try:
1137 1137 mode = os.lstat(rf)[stat.ST_MODE]
1138 1138 except OSError:
1139 1139 raise util.Abort(_("file %s not found!") % rf)
1140 1140 if stat.S_ISDIR(mode):
1141 1141 name = f + '/'
1142 1142 if slist is None:
1143 1143 slist = list(files)
1144 1144 slist.sort()
1145 1145 i = bisect.bisect(slist, name)
1146 1146 if i >= len(slist) or not slist[i].startswith(name):
1147 1147 raise util.Abort(_("no match under directory %s!")
1148 1148 % rf)
1149 1149 elif not (stat.S_ISREG(mode) or stat.S_ISLNK(mode)):
1150 1150 raise util.Abort(_("can't commit %s: "
1151 1151 "unsupported file type!") % rf)
1152 1152 elif f not in repo.dirstate:
1153 1153 raise util.Abort(_("file %s not tracked!") % rf)
1154 1154 else:
1155 1155 files = []
1156 1156 try:
1157 1157 return commitfunc(ui, repo, files, message, match, opts)
1158 1158 except ValueError, inst:
1159 1159 raise util.Abort(str(inst))
@@ -1,582 +1,582
1 1 """
2 2 dirstate.py - working directory tracking for mercurial
3 3
4 4 Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 5
6 6 This software may be used and distributed according to the terms
7 7 of the GNU General Public License, incorporated herein by reference.
8 8 """
9 9
10 10 from node import *
11 11 from i18n import _
12 12 import struct, os, time, bisect, stat, strutil, util, re, errno, ignore
13 13 import cStringIO, osutil
14 14
15 15 _unknown = ('?', 0, 0, 0)
16 16 _format = ">cllll"
17 17
18 18 class dirstate(object):
19 19
20 20 def __init__(self, opener, ui, root):
21 21 self._opener = opener
22 22 self._root = root
23 23 self._dirty = False
24 24 self._dirtypl = False
25 25 self._ui = ui
26 26
27 27 def __getattr__(self, name):
28 28 if name == '_map':
29 29 self._read()
30 30 return self._map
31 31 elif name == '_copymap':
32 32 self._read()
33 33 return self._copymap
34 34 elif name == '_branch':
35 35 try:
36 36 self._branch = (self._opener("branch").read().strip()
37 37 or "default")
38 38 except IOError:
39 39 self._branch = "default"
40 40 return self._branch
41 41 elif name == '_pl':
42 42 self._pl = [nullid, nullid]
43 43 try:
44 44 st = self._opener("dirstate").read(40)
45 45 if len(st) == 40:
46 46 self._pl = st[:20], st[20:40]
47 47 except IOError, err:
48 48 if err.errno != errno.ENOENT: raise
49 49 return self._pl
50 50 elif name == '_dirs':
51 51 self._dirs = {}
52 52 for f in self._map:
53 53 if self[f] != 'r':
54 54 self._incpath(f)
55 55 return self._dirs
56 56 elif name == '_ignore':
57 57 files = [self._join('.hgignore')]
58 58 for name, path in self._ui.configitems("ui"):
59 59 if name == 'ignore' or name.startswith('ignore.'):
60 60 files.append(os.path.expanduser(path))
61 61 self._ignore = ignore.ignore(self._root, files, self._ui.warn)
62 62 return self._ignore
63 63 elif name == '_slash':
64 64 self._slash = self._ui.configbool('ui', 'slash') and os.sep != '/'
65 65 return self._slash
66 66 else:
67 67 raise AttributeError, name
68 68
69 69 def _join(self, f):
70 70 return os.path.join(self._root, f)
71 71
72 72 def getcwd(self):
73 73 cwd = os.getcwd()
74 74 if cwd == self._root: return ''
75 75 # self._root ends with a path separator if self._root is '/' or 'C:\'
76 76 rootsep = self._root
77 if not rootsep.endswith(os.sep):
77 if not util.endswithsep(rootsep):
78 78 rootsep += os.sep
79 79 if cwd.startswith(rootsep):
80 80 return cwd[len(rootsep):]
81 81 else:
82 82 # we're outside the repo. return an absolute path.
83 83 return cwd
84 84
85 85 def pathto(self, f, cwd=None):
86 86 if cwd is None:
87 87 cwd = self.getcwd()
88 88 path = util.pathto(self._root, cwd, f)
89 89 if self._slash:
90 90 return util.normpath(path)
91 91 return path
92 92
93 93 def __getitem__(self, key):
94 94 ''' current states:
95 95 n normal
96 96 m needs merging
97 97 r marked for removal
98 98 a marked for addition
99 99 ? not tracked'''
100 100 return self._map.get(key, ("?",))[0]
101 101
102 102 def __contains__(self, key):
103 103 return key in self._map
104 104
105 105 def __iter__(self):
106 106 a = self._map.keys()
107 107 a.sort()
108 108 for x in a:
109 109 yield x
110 110
111 111 def parents(self):
112 112 return self._pl
113 113
114 114 def branch(self):
115 115 return self._branch
116 116
117 117 def setparents(self, p1, p2=nullid):
118 118 self._dirty = self._dirtypl = True
119 119 self._pl = p1, p2
120 120
121 121 def setbranch(self, branch):
122 122 self._branch = branch
123 123 self._opener("branch", "w").write(branch + '\n')
124 124
125 125 def _read(self):
126 126 self._map = {}
127 127 self._copymap = {}
128 128 if not self._dirtypl:
129 129 self._pl = [nullid, nullid]
130 130 try:
131 131 st = self._opener("dirstate").read()
132 132 except IOError, err:
133 133 if err.errno != errno.ENOENT: raise
134 134 return
135 135 if not st:
136 136 return
137 137
138 138 if not self._dirtypl:
139 139 self._pl = [st[:20], st[20: 40]]
140 140
141 141 # deref fields so they will be local in loop
142 142 dmap = self._map
143 143 copymap = self._copymap
144 144 unpack = struct.unpack
145 145 e_size = struct.calcsize(_format)
146 146 pos1 = 40
147 147 l = len(st)
148 148
149 149 # the inner loop
150 150 while pos1 < l:
151 151 pos2 = pos1 + e_size
152 152 e = unpack(">cllll", st[pos1:pos2]) # a literal here is faster
153 153 pos1 = pos2 + e[4]
154 154 f = st[pos2:pos1]
155 155 if '\0' in f:
156 156 f, c = f.split('\0')
157 157 copymap[f] = c
158 158 dmap[f] = e # we hold onto e[4] because making a subtuple is slow
159 159
160 160 def invalidate(self):
161 161 for a in "_map _copymap _branch _pl _dirs _ignore".split():
162 162 if a in self.__dict__:
163 163 delattr(self, a)
164 164 self._dirty = False
165 165
166 166 def copy(self, source, dest):
167 167 self._dirty = True
168 168 self._copymap[dest] = source
169 169
170 170 def copied(self, file):
171 171 return self._copymap.get(file, None)
172 172
173 173 def copies(self):
174 174 return self._copymap
175 175
176 176 def _incpath(self, path):
177 177 c = path.rfind('/')
178 178 if c >= 0:
179 179 dirs = self._dirs
180 180 base = path[:c]
181 181 if base not in dirs:
182 182 self._incpath(base)
183 183 dirs[base] = 1
184 184 else:
185 185 dirs[base] += 1
186 186
187 187 def _decpath(self, path):
188 188 c = path.rfind('/')
189 189 if c >= 0:
190 190 base = path[:c]
191 191 dirs = self._dirs
192 192 if dirs[base] == 1:
193 193 del dirs[base]
194 194 self._decpath(base)
195 195 else:
196 196 dirs[base] -= 1
197 197
198 198 def _incpathcheck(self, f):
199 199 if '\r' in f or '\n' in f:
200 200 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames"))
201 201 # shadows
202 202 if f in self._dirs:
203 203 raise util.Abort(_('directory %r already in dirstate') % f)
204 204 for c in strutil.rfindall(f, '/'):
205 205 d = f[:c]
206 206 if d in self._dirs:
207 207 break
208 208 if d in self._map and self[d] != 'r':
209 209 raise util.Abort(_('file %r in dirstate clashes with %r') %
210 210 (d, f))
211 211 self._incpath(f)
212 212
213 213 def _changepath(self, f, newstate, relaxed=False):
214 214 # handle upcoming path changes
215 215 oldstate = self[f]
216 216 if oldstate not in "?r" and newstate in "?r":
217 217 if "_dirs" in self.__dict__:
218 218 self._decpath(f)
219 219 return
220 220 if oldstate in "?r" and newstate not in "?r":
221 221 if relaxed and oldstate == '?':
222 222 # XXX
223 223 # in relaxed mode we assume the caller knows
224 224 # what it is doing, workaround for updating
225 225 # dir-to-file revisions
226 226 if "_dirs" in self.__dict__:
227 227 self._incpath(f)
228 228 return
229 229 self._incpathcheck(f)
230 230 return
231 231
232 232 def normal(self, f):
233 233 'mark a file normal and clean'
234 234 self._dirty = True
235 235 self._changepath(f, 'n', True)
236 236 s = os.lstat(self._join(f))
237 237 self._map[f] = ('n', s.st_mode, s.st_size, s.st_mtime, 0)
238 238 if self._copymap.has_key(f):
239 239 del self._copymap[f]
240 240
241 241 def normallookup(self, f):
242 242 'mark a file normal, but possibly dirty'
243 243 self._dirty = True
244 244 self._changepath(f, 'n', True)
245 245 self._map[f] = ('n', 0, -1, -1, 0)
246 246 if f in self._copymap:
247 247 del self._copymap[f]
248 248
249 249 def normaldirty(self, f):
250 250 'mark a file normal, but dirty'
251 251 self._dirty = True
252 252 self._changepath(f, 'n', True)
253 253 self._map[f] = ('n', 0, -2, -1, 0)
254 254 if f in self._copymap:
255 255 del self._copymap[f]
256 256
257 257 def add(self, f):
258 258 'mark a file added'
259 259 self._dirty = True
260 260 self._changepath(f, 'a')
261 261 self._map[f] = ('a', 0, -1, -1, 0)
262 262 if f in self._copymap:
263 263 del self._copymap[f]
264 264
265 265 def remove(self, f):
266 266 'mark a file removed'
267 267 self._dirty = True
268 268 self._changepath(f, 'r')
269 269 self._map[f] = ('r', 0, 0, 0, 0)
270 270 if f in self._copymap:
271 271 del self._copymap[f]
272 272
273 273 def merge(self, f):
274 274 'mark a file merged'
275 275 self._dirty = True
276 276 s = os.lstat(self._join(f))
277 277 self._changepath(f, 'm', True)
278 278 self._map[f] = ('m', s.st_mode, s.st_size, s.st_mtime, 0)
279 279 if f in self._copymap:
280 280 del self._copymap[f]
281 281
282 282 def forget(self, f):
283 283 'forget a file'
284 284 self._dirty = True
285 285 try:
286 286 self._changepath(f, '?')
287 287 del self._map[f]
288 288 except KeyError:
289 289 self._ui.warn(_("not in dirstate: %s!\n") % f)
290 290
291 291 def clear(self):
292 292 self._map = {}
293 293 if "_dirs" in self.__dict__:
294 294 delattr(self, "_dirs");
295 295 self._copymap = {}
296 296 self._pl = [nullid, nullid]
297 297 self._dirty = True
298 298
299 299 def rebuild(self, parent, files):
300 300 self.clear()
301 301 for f in files:
302 302 if files.execf(f):
303 303 self._map[f] = ('n', 0777, -1, 0, 0)
304 304 else:
305 305 self._map[f] = ('n', 0666, -1, 0, 0)
306 306 self._pl = (parent, nullid)
307 307 self._dirty = True
308 308
309 309 def write(self):
310 310 if not self._dirty:
311 311 return
312 312 cs = cStringIO.StringIO()
313 313 copymap = self._copymap
314 314 pack = struct.pack
315 315 write = cs.write
316 316 write("".join(self._pl))
317 317 for f, e in self._map.iteritems():
318 318 if f in copymap:
319 319 f = "%s\0%s" % (f, copymap[f])
320 320 e = pack(_format, e[0], e[1], e[2], e[3], len(f))
321 321 write(e)
322 322 write(f)
323 323 st = self._opener("dirstate", "w", atomictemp=True)
324 324 st.write(cs.getvalue())
325 325 st.rename()
326 326 self._dirty = self._dirtypl = False
327 327
328 328 def _filter(self, files):
329 329 ret = {}
330 330 unknown = []
331 331
332 332 for x in files:
333 333 if x == '.':
334 334 return self._map.copy()
335 335 if x not in self._map:
336 336 unknown.append(x)
337 337 else:
338 338 ret[x] = self._map[x]
339 339
340 340 if not unknown:
341 341 return ret
342 342
343 343 b = self._map.keys()
344 344 b.sort()
345 345 blen = len(b)
346 346
347 347 for x in unknown:
348 348 bs = bisect.bisect(b, "%s%s" % (x, '/'))
349 349 while bs < blen:
350 350 s = b[bs]
351 351 if len(s) > len(x) and s.startswith(x):
352 352 ret[s] = self._map[s]
353 353 else:
354 354 break
355 355 bs += 1
356 356 return ret
357 357
358 358 def _supported(self, f, mode, verbose=False):
359 359 if stat.S_ISREG(mode) or stat.S_ISLNK(mode):
360 360 return True
361 361 if verbose:
362 362 kind = 'unknown'
363 363 if stat.S_ISCHR(mode): kind = _('character device')
364 364 elif stat.S_ISBLK(mode): kind = _('block device')
365 365 elif stat.S_ISFIFO(mode): kind = _('fifo')
366 366 elif stat.S_ISSOCK(mode): kind = _('socket')
367 367 elif stat.S_ISDIR(mode): kind = _('directory')
368 368 self._ui.warn(_('%s: unsupported file type (type is %s)\n')
369 369 % (self.pathto(f), kind))
370 370 return False
371 371
372 372 def walk(self, files=None, match=util.always, badmatch=None):
373 373 # filter out the stat
374 374 for src, f, st in self.statwalk(files, match, badmatch=badmatch):
375 375 yield src, f
376 376
377 377 def statwalk(self, files=None, match=util.always, ignored=False,
378 378 badmatch=None, directories=False):
379 379 '''
380 380 walk recursively through the directory tree, finding all files
381 381 matched by the match function
382 382
383 383 results are yielded in a tuple (src, filename, st), where src
384 384 is one of:
385 385 'f' the file was found in the directory tree
386 386 'd' the file is a directory of the tree
387 387 'm' the file was only in the dirstate and not in the tree
388 388 'b' file was not found and matched badmatch
389 389
390 390 and st is the stat result if the file was found in the directory.
391 391 '''
392 392
393 393 # walk all files by default
394 394 if not files:
395 395 files = ['.']
396 396 dc = self._map.copy()
397 397 else:
398 398 files = util.unique(files)
399 399 dc = self._filter(files)
400 400
401 401 def imatch(file_):
402 402 if file_ not in dc and self._ignore(file_):
403 403 return False
404 404 return match(file_)
405 405
406 406 ignore = self._ignore
407 407 if ignored:
408 408 imatch = match
409 409 ignore = util.never
410 410
411 411 # self._root may end with a path separator when self._root == '/'
412 412 common_prefix_len = len(self._root)
413 if not self._root.endswith(os.sep):
413 if not util.endswithsep(self._root):
414 414 common_prefix_len += 1
415 415
416 416 normpath = util.normpath
417 417 listdir = osutil.listdir
418 418 lstat = os.lstat
419 419 bisect_left = bisect.bisect_left
420 420 isdir = os.path.isdir
421 421 pconvert = util.pconvert
422 422 join = os.path.join
423 423 s_isdir = stat.S_ISDIR
424 424 supported = self._supported
425 425 _join = self._join
426 426 known = {'.hg': 1}
427 427
428 428 # recursion free walker, faster than os.walk.
429 429 def findfiles(s):
430 430 work = [s]
431 431 wadd = work.append
432 432 found = []
433 433 add = found.append
434 434 if directories:
435 435 add((normpath(s[common_prefix_len:]), 'd', lstat(s)))
436 436 while work:
437 437 top = work.pop()
438 438 entries = listdir(top, stat=True)
439 439 # nd is the top of the repository dir tree
440 440 nd = normpath(top[common_prefix_len:])
441 441 if nd == '.':
442 442 nd = ''
443 443 else:
444 444 # do not recurse into a repo contained in this
445 445 # one. use bisect to find .hg directory so speed
446 446 # is good on big directory.
447 447 names = [e[0] for e in entries]
448 448 hg = bisect_left(names, '.hg')
449 449 if hg < len(names) and names[hg] == '.hg':
450 450 if isdir(join(top, '.hg')):
451 451 continue
452 452 for f, kind, st in entries:
453 453 np = pconvert(join(nd, f))
454 454 if np in known:
455 455 continue
456 456 known[np] = 1
457 457 p = join(top, f)
458 458 # don't trip over symlinks
459 459 if kind == stat.S_IFDIR:
460 460 if not ignore(np):
461 461 wadd(p)
462 462 if directories:
463 463 add((np, 'd', st))
464 464 if np in dc and match(np):
465 465 add((np, 'm', st))
466 466 elif imatch(np):
467 467 if supported(np, st.st_mode):
468 468 add((np, 'f', st))
469 469 elif np in dc:
470 470 add((np, 'm', st))
471 471 found.sort()
472 472 return found
473 473
474 474 # step one, find all files that match our criteria
475 475 files.sort()
476 476 for ff in files:
477 477 nf = normpath(ff)
478 478 f = _join(ff)
479 479 try:
480 480 st = lstat(f)
481 481 except OSError, inst:
482 482 found = False
483 483 for fn in dc:
484 484 if nf == fn or (fn.startswith(nf) and fn[len(nf)] == '/'):
485 485 found = True
486 486 break
487 487 if not found:
488 488 if inst.errno != errno.ENOENT or not badmatch:
489 489 self._ui.warn('%s: %s\n' %
490 490 (self.pathto(ff), inst.strerror))
491 491 elif badmatch and badmatch(ff) and imatch(nf):
492 492 yield 'b', ff, None
493 493 continue
494 494 if s_isdir(st.st_mode):
495 495 for f, src, st in findfiles(f):
496 496 yield src, f, st
497 497 else:
498 498 if nf in known:
499 499 continue
500 500 known[nf] = 1
501 501 if match(nf):
502 502 if supported(ff, st.st_mode, verbose=True):
503 503 yield 'f', nf, st
504 504 elif ff in dc:
505 505 yield 'm', nf, st
506 506
507 507 # step two run through anything left in the dc hash and yield
508 508 # if we haven't already seen it
509 509 ks = dc.keys()
510 510 ks.sort()
511 511 for k in ks:
512 512 if k in known:
513 513 continue
514 514 known[k] = 1
515 515 if imatch(k):
516 516 yield 'm', k, None
517 517
518 518 def status(self, files, match, list_ignored, list_clean):
519 519 lookup, modified, added, unknown, ignored = [], [], [], [], []
520 520 removed, deleted, clean = [], [], []
521 521
522 522 _join = self._join
523 523 lstat = os.lstat
524 524 cmap = self._copymap
525 525 dmap = self._map
526 526 ladd = lookup.append
527 527 madd = modified.append
528 528 aadd = added.append
529 529 uadd = unknown.append
530 530 iadd = ignored.append
531 531 radd = removed.append
532 532 dadd = deleted.append
533 533 cadd = clean.append
534 534
535 535 for src, fn, st in self.statwalk(files, match, ignored=list_ignored):
536 536 if fn in dmap:
537 537 type_, mode, size, time, foo = dmap[fn]
538 538 else:
539 539 if list_ignored and self._ignore(fn):
540 540 iadd(fn)
541 541 else:
542 542 uadd(fn)
543 543 continue
544 544 if src == 'm':
545 545 nonexistent = True
546 546 if not st:
547 547 try:
548 548 st = lstat(_join(fn))
549 549 except OSError, inst:
550 550 if inst.errno not in (errno.ENOENT, errno.ENOTDIR):
551 551 raise
552 552 st = None
553 553 # We need to re-check that it is a valid file
554 554 if st and self._supported(fn, st.st_mode):
555 555 nonexistent = False
556 556 # XXX: what to do with file no longer present in the fs
557 557 # who are not removed in the dirstate ?
558 558 if nonexistent and type_ in "nm":
559 559 dadd(fn)
560 560 continue
561 561 # check the common case first
562 562 if type_ == 'n':
563 563 if not st:
564 564 st = lstat(_join(fn))
565 565 if (size >= 0 and (size != st.st_size
566 566 or (mode ^ st.st_mode) & 0100)
567 567 or size == -2
568 568 or fn in self._copymap):
569 569 madd(fn)
570 570 elif time != int(st.st_mtime):
571 571 ladd(fn)
572 572 elif list_clean:
573 573 cadd(fn)
574 574 elif type_ == 'm':
575 575 madd(fn)
576 576 elif type_ == 'a':
577 577 aadd(fn)
578 578 elif type_ == 'r':
579 579 radd(fn)
580 580
581 581 return (lookup, modified, added, removed, deleted, unknown, ignored,
582 582 clean)
@@ -1,1720 +1,1724
1 1 """
2 2 util.py - Mercurial utility functions and platform specfic implementations
3 3
4 4 Copyright 2005 K. Thananchayan <thananck@yahoo.com>
5 5 Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
6 6 Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
7 7
8 8 This software may be used and distributed according to the terms
9 9 of the GNU General Public License, incorporated herein by reference.
10 10
11 11 This contains helper routines that are independent of the SCM core and hide
12 12 platform-specific details from the core.
13 13 """
14 14
15 15 from i18n import _
16 16 import cStringIO, errno, getpass, popen2, re, shutil, sys, tempfile, strutil
17 17 import os, stat, threading, time, calendar, ConfigParser, locale, glob, osutil
18 18 import re, urlparse
19 19
20 20 try:
21 21 set = set
22 22 frozenset = frozenset
23 23 except NameError:
24 24 from sets import Set as set, ImmutableSet as frozenset
25 25
26 26 try:
27 27 _encoding = os.environ.get("HGENCODING")
28 28 if sys.platform == 'darwin' and not _encoding:
29 29 # On darwin, getpreferredencoding ignores the locale environment and
30 30 # always returns mac-roman. We override this if the environment is
31 31 # not C (has been customized by the user).
32 32 locale.setlocale(locale.LC_CTYPE, '')
33 33 _encoding = locale.getlocale()[1]
34 34 if not _encoding:
35 35 _encoding = locale.getpreferredencoding() or 'ascii'
36 36 except locale.Error:
37 37 _encoding = 'ascii'
38 38 _encodingmode = os.environ.get("HGENCODINGMODE", "strict")
39 39 _fallbackencoding = 'ISO-8859-1'
40 40
41 41 def tolocal(s):
42 42 """
43 43 Convert a string from internal UTF-8 to local encoding
44 44
45 45 All internal strings should be UTF-8 but some repos before the
46 46 implementation of locale support may contain latin1 or possibly
47 47 other character sets. We attempt to decode everything strictly
48 48 using UTF-8, then Latin-1, and failing that, we use UTF-8 and
49 49 replace unknown characters.
50 50 """
51 51 for e in ('UTF-8', _fallbackencoding):
52 52 try:
53 53 u = s.decode(e) # attempt strict decoding
54 54 return u.encode(_encoding, "replace")
55 55 except LookupError, k:
56 56 raise Abort(_("%s, please check your locale settings") % k)
57 57 except UnicodeDecodeError:
58 58 pass
59 59 u = s.decode("utf-8", "replace") # last ditch
60 60 return u.encode(_encoding, "replace")
61 61
62 62 def fromlocal(s):
63 63 """
64 64 Convert a string from the local character encoding to UTF-8
65 65
66 66 We attempt to decode strings using the encoding mode set by
67 67 HGENCODINGMODE, which defaults to 'strict'. In this mode, unknown
68 68 characters will cause an error message. Other modes include
69 69 'replace', which replaces unknown characters with a special
70 70 Unicode character, and 'ignore', which drops the character.
71 71 """
72 72 try:
73 73 return s.decode(_encoding, _encodingmode).encode("utf-8")
74 74 except UnicodeDecodeError, inst:
75 75 sub = s[max(0, inst.start-10):inst.start+10]
76 76 raise Abort("decoding near '%s': %s!" % (sub, inst))
77 77 except LookupError, k:
78 78 raise Abort(_("%s, please check your locale settings") % k)
79 79
80 80 def locallen(s):
81 81 """Find the length in characters of a local string"""
82 82 return len(s.decode(_encoding, "replace"))
83 83
84 84 # used by parsedate
85 85 defaultdateformats = (
86 86 '%Y-%m-%d %H:%M:%S',
87 87 '%Y-%m-%d %I:%M:%S%p',
88 88 '%Y-%m-%d %H:%M',
89 89 '%Y-%m-%d %I:%M%p',
90 90 '%Y-%m-%d',
91 91 '%m-%d',
92 92 '%m/%d',
93 93 '%m/%d/%y',
94 94 '%m/%d/%Y',
95 95 '%a %b %d %H:%M:%S %Y',
96 96 '%a %b %d %I:%M:%S%p %Y',
97 97 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
98 98 '%b %d %H:%M:%S %Y',
99 99 '%b %d %I:%M:%S%p %Y',
100 100 '%b %d %H:%M:%S',
101 101 '%b %d %I:%M:%S%p',
102 102 '%b %d %H:%M',
103 103 '%b %d %I:%M%p',
104 104 '%b %d %Y',
105 105 '%b %d',
106 106 '%H:%M:%S',
107 107 '%I:%M:%SP',
108 108 '%H:%M',
109 109 '%I:%M%p',
110 110 )
111 111
112 112 extendeddateformats = defaultdateformats + (
113 113 "%Y",
114 114 "%Y-%m",
115 115 "%b",
116 116 "%b %Y",
117 117 )
118 118
119 119 class SignalInterrupt(Exception):
120 120 """Exception raised on SIGTERM and SIGHUP."""
121 121
122 122 # differences from SafeConfigParser:
123 123 # - case-sensitive keys
124 124 # - allows values that are not strings (this means that you may not
125 125 # be able to save the configuration to a file)
126 126 class configparser(ConfigParser.SafeConfigParser):
127 127 def optionxform(self, optionstr):
128 128 return optionstr
129 129
130 130 def set(self, section, option, value):
131 131 return ConfigParser.ConfigParser.set(self, section, option, value)
132 132
133 133 def _interpolate(self, section, option, rawval, vars):
134 134 if not isinstance(rawval, basestring):
135 135 return rawval
136 136 return ConfigParser.SafeConfigParser._interpolate(self, section,
137 137 option, rawval, vars)
138 138
139 139 def cachefunc(func):
140 140 '''cache the result of function calls'''
141 141 # XXX doesn't handle keywords args
142 142 cache = {}
143 143 if func.func_code.co_argcount == 1:
144 144 # we gain a small amount of time because
145 145 # we don't need to pack/unpack the list
146 146 def f(arg):
147 147 if arg not in cache:
148 148 cache[arg] = func(arg)
149 149 return cache[arg]
150 150 else:
151 151 def f(*args):
152 152 if args not in cache:
153 153 cache[args] = func(*args)
154 154 return cache[args]
155 155
156 156 return f
157 157
158 158 def pipefilter(s, cmd):
159 159 '''filter string S through command CMD, returning its output'''
160 160 (pin, pout) = os.popen2(cmd, 'b')
161 161 def writer():
162 162 try:
163 163 pin.write(s)
164 164 pin.close()
165 165 except IOError, inst:
166 166 if inst.errno != errno.EPIPE:
167 167 raise
168 168
169 169 # we should use select instead on UNIX, but this will work on most
170 170 # systems, including Windows
171 171 w = threading.Thread(target=writer)
172 172 w.start()
173 173 f = pout.read()
174 174 pout.close()
175 175 w.join()
176 176 return f
177 177
178 178 def tempfilter(s, cmd):
179 179 '''filter string S through a pair of temporary files with CMD.
180 180 CMD is used as a template to create the real command to be run,
181 181 with the strings INFILE and OUTFILE replaced by the real names of
182 182 the temporary files generated.'''
183 183 inname, outname = None, None
184 184 try:
185 185 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
186 186 fp = os.fdopen(infd, 'wb')
187 187 fp.write(s)
188 188 fp.close()
189 189 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
190 190 os.close(outfd)
191 191 cmd = cmd.replace('INFILE', inname)
192 192 cmd = cmd.replace('OUTFILE', outname)
193 193 code = os.system(cmd)
194 194 if sys.platform == 'OpenVMS' and code & 1:
195 195 code = 0
196 196 if code: raise Abort(_("command '%s' failed: %s") %
197 197 (cmd, explain_exit(code)))
198 198 return open(outname, 'rb').read()
199 199 finally:
200 200 try:
201 201 if inname: os.unlink(inname)
202 202 except: pass
203 203 try:
204 204 if outname: os.unlink(outname)
205 205 except: pass
206 206
207 207 filtertable = {
208 208 'tempfile:': tempfilter,
209 209 'pipe:': pipefilter,
210 210 }
211 211
212 212 def filter(s, cmd):
213 213 "filter a string through a command that transforms its input to its output"
214 214 for name, fn in filtertable.iteritems():
215 215 if cmd.startswith(name):
216 216 return fn(s, cmd[len(name):].lstrip())
217 217 return pipefilter(s, cmd)
218 218
219 219 def binary(s):
220 220 """return true if a string is binary data using diff's heuristic"""
221 221 if s and '\0' in s[:4096]:
222 222 return True
223 223 return False
224 224
225 225 def unique(g):
226 226 """return the uniq elements of iterable g"""
227 227 return dict.fromkeys(g).keys()
228 228
229 229 class Abort(Exception):
230 230 """Raised if a command needs to print an error and exit."""
231 231
232 232 class UnexpectedOutput(Abort):
233 233 """Raised to print an error with part of output and exit."""
234 234
235 235 def always(fn): return True
236 236 def never(fn): return False
237 237
238 238 def expand_glob(pats):
239 239 '''On Windows, expand the implicit globs in a list of patterns'''
240 240 if os.name != 'nt':
241 241 return list(pats)
242 242 ret = []
243 243 for p in pats:
244 244 kind, name = patkind(p, None)
245 245 if kind is None:
246 246 globbed = glob.glob(name)
247 247 if globbed:
248 248 ret.extend(globbed)
249 249 continue
250 250 # if we couldn't expand the glob, just keep it around
251 251 ret.append(p)
252 252 return ret
253 253
254 254 def patkind(name, dflt_pat='glob'):
255 255 """Split a string into an optional pattern kind prefix and the
256 256 actual pattern."""
257 257 for prefix in 're', 'glob', 'path', 'relglob', 'relpath', 'relre':
258 258 if name.startswith(prefix + ':'): return name.split(':', 1)
259 259 return dflt_pat, name
260 260
261 261 def globre(pat, head='^', tail='$'):
262 262 "convert a glob pattern into a regexp"
263 263 i, n = 0, len(pat)
264 264 res = ''
265 265 group = False
266 266 def peek(): return i < n and pat[i]
267 267 while i < n:
268 268 c = pat[i]
269 269 i = i+1
270 270 if c == '*':
271 271 if peek() == '*':
272 272 i += 1
273 273 res += '.*'
274 274 else:
275 275 res += '[^/]*'
276 276 elif c == '?':
277 277 res += '.'
278 278 elif c == '[':
279 279 j = i
280 280 if j < n and pat[j] in '!]':
281 281 j += 1
282 282 while j < n and pat[j] != ']':
283 283 j += 1
284 284 if j >= n:
285 285 res += '\\['
286 286 else:
287 287 stuff = pat[i:j].replace('\\','\\\\')
288 288 i = j + 1
289 289 if stuff[0] == '!':
290 290 stuff = '^' + stuff[1:]
291 291 elif stuff[0] == '^':
292 292 stuff = '\\' + stuff
293 293 res = '%s[%s]' % (res, stuff)
294 294 elif c == '{':
295 295 group = True
296 296 res += '(?:'
297 297 elif c == '}' and group:
298 298 res += ')'
299 299 group = False
300 300 elif c == ',' and group:
301 301 res += '|'
302 302 elif c == '\\':
303 303 p = peek()
304 304 if p:
305 305 i += 1
306 306 res += re.escape(p)
307 307 else:
308 308 res += re.escape(c)
309 309 else:
310 310 res += re.escape(c)
311 311 return head + res + tail
312 312
313 313 _globchars = {'[': 1, '{': 1, '*': 1, '?': 1}
314 314
315 315 def pathto(root, n1, n2):
316 316 '''return the relative path from one place to another.
317 317 root should use os.sep to separate directories
318 318 n1 should use os.sep to separate directories
319 319 n2 should use "/" to separate directories
320 320 returns an os.sep-separated path.
321 321
322 322 If n1 is a relative path, it's assumed it's
323 323 relative to root.
324 324 n2 should always be relative to root.
325 325 '''
326 326 if not n1: return localpath(n2)
327 327 if os.path.isabs(n1):
328 328 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
329 329 return os.path.join(root, localpath(n2))
330 330 n2 = '/'.join((pconvert(root), n2))
331 331 a, b = n1.split(os.sep), n2.split('/')
332 332 a.reverse()
333 333 b.reverse()
334 334 while a and b and a[-1] == b[-1]:
335 335 a.pop()
336 336 b.pop()
337 337 b.reverse()
338 338 return os.sep.join((['..'] * len(a)) + b)
339 339
340 340 def canonpath(root, cwd, myname):
341 341 """return the canonical path of myname, given cwd and root"""
342 342 if root == os.sep:
343 343 rootsep = os.sep
344 elif root.endswith(os.sep):
344 elif endswithsep(root):
345 345 rootsep = root
346 346 else:
347 347 rootsep = root + os.sep
348 348 name = myname
349 349 if not os.path.isabs(name):
350 350 name = os.path.join(root, cwd, name)
351 351 name = os.path.normpath(name)
352 352 audit_path = path_auditor(root)
353 353 if name != rootsep and name.startswith(rootsep):
354 354 name = name[len(rootsep):]
355 355 audit_path(name)
356 356 return pconvert(name)
357 357 elif name == root:
358 358 return ''
359 359 else:
360 360 # Determine whether `name' is in the hierarchy at or beneath `root',
361 361 # by iterating name=dirname(name) until that causes no change (can't
362 362 # check name == '/', because that doesn't work on windows). For each
363 363 # `name', compare dev/inode numbers. If they match, the list `rel'
364 364 # holds the reversed list of components making up the relative file
365 365 # name we want.
366 366 root_st = os.stat(root)
367 367 rel = []
368 368 while True:
369 369 try:
370 370 name_st = os.stat(name)
371 371 except OSError:
372 372 break
373 373 if samestat(name_st, root_st):
374 374 if not rel:
375 375 # name was actually the same as root (maybe a symlink)
376 376 return ''
377 377 rel.reverse()
378 378 name = os.path.join(*rel)
379 379 audit_path(name)
380 380 return pconvert(name)
381 381 dirname, basename = os.path.split(name)
382 382 rel.append(basename)
383 383 if dirname == name:
384 384 break
385 385 name = dirname
386 386
387 387 raise Abort('%s not under root' % myname)
388 388
389 389 def matcher(canonroot, cwd='', names=[], inc=[], exc=[], src=None):
390 390 return _matcher(canonroot, cwd, names, inc, exc, 'glob', src)
391 391
392 392 def cmdmatcher(canonroot, cwd='', names=[], inc=[], exc=[], src=None,
393 393 globbed=False, default=None):
394 394 default = default or 'relpath'
395 395 if default == 'relpath' and not globbed:
396 396 names = expand_glob(names)
397 397 return _matcher(canonroot, cwd, names, inc, exc, default, src)
398 398
399 399 def _matcher(canonroot, cwd, names, inc, exc, dflt_pat, src):
400 400 """build a function to match a set of file patterns
401 401
402 402 arguments:
403 403 canonroot - the canonical root of the tree you're matching against
404 404 cwd - the current working directory, if relevant
405 405 names - patterns to find
406 406 inc - patterns to include
407 407 exc - patterns to exclude
408 408 dflt_pat - if a pattern in names has no explicit type, assume this one
409 409 src - where these patterns came from (e.g. .hgignore)
410 410
411 411 a pattern is one of:
412 412 'glob:<glob>' - a glob relative to cwd
413 413 're:<regexp>' - a regular expression
414 414 'path:<path>' - a path relative to canonroot
415 415 'relglob:<glob>' - an unrooted glob (*.c matches C files in all dirs)
416 416 'relpath:<path>' - a path relative to cwd
417 417 'relre:<regexp>' - a regexp that doesn't have to match the start of a name
418 418 '<something>' - one of the cases above, selected by the dflt_pat argument
419 419
420 420 returns:
421 421 a 3-tuple containing
422 422 - list of roots (places where one should start a recursive walk of the fs);
423 423 this often matches the explicit non-pattern names passed in, but also
424 424 includes the initial part of glob: patterns that has no glob characters
425 425 - a bool match(filename) function
426 426 - a bool indicating if any patterns were passed in
427 427 """
428 428
429 429 # a common case: no patterns at all
430 430 if not names and not inc and not exc:
431 431 return [], always, False
432 432
433 433 def contains_glob(name):
434 434 for c in name:
435 435 if c in _globchars: return True
436 436 return False
437 437
438 438 def regex(kind, name, tail):
439 439 '''convert a pattern into a regular expression'''
440 440 if not name:
441 441 return ''
442 442 if kind == 're':
443 443 return name
444 444 elif kind == 'path':
445 445 return '^' + re.escape(name) + '(?:/|$)'
446 446 elif kind == 'relglob':
447 447 return globre(name, '(?:|.*/)', tail)
448 448 elif kind == 'relpath':
449 449 return re.escape(name) + '(?:/|$)'
450 450 elif kind == 'relre':
451 451 if name.startswith('^'):
452 452 return name
453 453 return '.*' + name
454 454 return globre(name, '', tail)
455 455
456 456 def matchfn(pats, tail):
457 457 """build a matching function from a set of patterns"""
458 458 if not pats:
459 459 return
460 460 try:
461 461 pat = '(?:%s)' % '|'.join([regex(k, p, tail) for (k, p) in pats])
462 462 return re.compile(pat).match
463 463 except OverflowError:
464 464 # We're using a Python with a tiny regex engine and we
465 465 # made it explode, so we'll divide the pattern list in two
466 466 # until it works
467 467 l = len(pats)
468 468 if l < 2:
469 469 raise
470 470 a, b = matchfn(pats[:l//2], tail), matchfn(pats[l//2:], tail)
471 471 return lambda s: a(s) or b(s)
472 472 except re.error:
473 473 for k, p in pats:
474 474 try:
475 475 re.compile('(?:%s)' % regex(k, p, tail))
476 476 except re.error:
477 477 if src:
478 478 raise Abort("%s: invalid pattern (%s): %s" %
479 479 (src, k, p))
480 480 else:
481 481 raise Abort("invalid pattern (%s): %s" % (k, p))
482 482 raise Abort("invalid pattern")
483 483
484 484 def globprefix(pat):
485 485 '''return the non-glob prefix of a path, e.g. foo/* -> foo'''
486 486 root = []
487 487 for p in pat.split('/'):
488 488 if contains_glob(p): break
489 489 root.append(p)
490 490 return '/'.join(root) or '.'
491 491
492 492 def normalizepats(names, default):
493 493 pats = []
494 494 roots = []
495 495 anypats = False
496 496 for kind, name in [patkind(p, default) for p in names]:
497 497 if kind in ('glob', 'relpath'):
498 498 name = canonpath(canonroot, cwd, name)
499 499 elif kind in ('relglob', 'path'):
500 500 name = normpath(name)
501 501
502 502 pats.append((kind, name))
503 503
504 504 if kind in ('glob', 're', 'relglob', 'relre'):
505 505 anypats = True
506 506
507 507 if kind == 'glob':
508 508 root = globprefix(name)
509 509 roots.append(root)
510 510 elif kind in ('relpath', 'path'):
511 511 roots.append(name or '.')
512 512 elif kind == 'relglob':
513 513 roots.append('.')
514 514 return roots, pats, anypats
515 515
516 516 roots, pats, anypats = normalizepats(names, dflt_pat)
517 517
518 518 patmatch = matchfn(pats, '$') or always
519 519 incmatch = always
520 520 if inc:
521 521 dummy, inckinds, dummy = normalizepats(inc, 'glob')
522 522 incmatch = matchfn(inckinds, '(?:/|$)')
523 523 excmatch = lambda fn: False
524 524 if exc:
525 525 dummy, exckinds, dummy = normalizepats(exc, 'glob')
526 526 excmatch = matchfn(exckinds, '(?:/|$)')
527 527
528 528 if not names and inc and not exc:
529 529 # common case: hgignore patterns
530 530 match = incmatch
531 531 else:
532 532 match = lambda fn: incmatch(fn) and not excmatch(fn) and patmatch(fn)
533 533
534 534 return (roots, match, (inc or exc or anypats) and True)
535 535
536 536 _hgexecutable = None
537 537
538 538 def hgexecutable():
539 539 """return location of the 'hg' executable.
540 540
541 541 Defaults to $HG or 'hg' in the search path.
542 542 """
543 543 if _hgexecutable is None:
544 544 set_hgexecutable(os.environ.get('HG') or find_exe('hg', 'hg'))
545 545 return _hgexecutable
546 546
547 547 def set_hgexecutable(path):
548 548 """set location of the 'hg' executable"""
549 549 global _hgexecutable
550 550 _hgexecutable = path
551 551
552 552 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None):
553 553 '''enhanced shell command execution.
554 554 run with environment maybe modified, maybe in different dir.
555 555
556 556 if command fails and onerr is None, return status. if ui object,
557 557 print error message and return status, else raise onerr object as
558 558 exception.'''
559 559 def py2shell(val):
560 560 'convert python object into string that is useful to shell'
561 561 if val in (None, False):
562 562 return '0'
563 563 if val == True:
564 564 return '1'
565 565 return str(val)
566 566 oldenv = {}
567 567 for k in environ:
568 568 oldenv[k] = os.environ.get(k)
569 569 if cwd is not None:
570 570 oldcwd = os.getcwd()
571 571 origcmd = cmd
572 572 if os.name == 'nt':
573 573 cmd = '"%s"' % cmd
574 574 try:
575 575 for k, v in environ.iteritems():
576 576 os.environ[k] = py2shell(v)
577 577 os.environ['HG'] = hgexecutable()
578 578 if cwd is not None and oldcwd != cwd:
579 579 os.chdir(cwd)
580 580 rc = os.system(cmd)
581 581 if sys.platform == 'OpenVMS' and rc & 1:
582 582 rc = 0
583 583 if rc and onerr:
584 584 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
585 585 explain_exit(rc)[0])
586 586 if errprefix:
587 587 errmsg = '%s: %s' % (errprefix, errmsg)
588 588 try:
589 589 onerr.warn(errmsg + '\n')
590 590 except AttributeError:
591 591 raise onerr(errmsg)
592 592 return rc
593 593 finally:
594 594 for k, v in oldenv.iteritems():
595 595 if v is None:
596 596 del os.environ[k]
597 597 else:
598 598 os.environ[k] = v
599 599 if cwd is not None and oldcwd != cwd:
600 600 os.chdir(oldcwd)
601 601
602 602 # os.path.lexists is not available on python2.3
603 603 def lexists(filename):
604 604 "test whether a file with this name exists. does not follow symlinks"
605 605 try:
606 606 os.lstat(filename)
607 607 except:
608 608 return False
609 609 return True
610 610
611 611 def rename(src, dst):
612 612 """forcibly rename a file"""
613 613 try:
614 614 os.rename(src, dst)
615 615 except OSError, err: # FIXME: check err (EEXIST ?)
616 616 # on windows, rename to existing file is not allowed, so we
617 617 # must delete destination first. but if file is open, unlink
618 618 # schedules it for delete but does not delete it. rename
619 619 # happens immediately even for open files, so we create
620 620 # temporary file, delete it, rename destination to that name,
621 621 # then delete that. then rename is safe to do.
622 622 fd, temp = tempfile.mkstemp(dir=os.path.dirname(dst) or '.')
623 623 os.close(fd)
624 624 os.unlink(temp)
625 625 os.rename(dst, temp)
626 626 os.unlink(temp)
627 627 os.rename(src, dst)
628 628
629 629 def unlink(f):
630 630 """unlink and remove the directory if it is empty"""
631 631 os.unlink(f)
632 632 # try removing directories that might now be empty
633 633 try:
634 634 os.removedirs(os.path.dirname(f))
635 635 except OSError:
636 636 pass
637 637
638 638 def copyfile(src, dest):
639 639 "copy a file, preserving mode"
640 640 if os.path.islink(src):
641 641 try:
642 642 os.unlink(dest)
643 643 except:
644 644 pass
645 645 os.symlink(os.readlink(src), dest)
646 646 else:
647 647 try:
648 648 shutil.copyfile(src, dest)
649 649 shutil.copymode(src, dest)
650 650 except shutil.Error, inst:
651 651 raise Abort(str(inst))
652 652
653 653 def copyfiles(src, dst, hardlink=None):
654 654 """Copy a directory tree using hardlinks if possible"""
655 655
656 656 if hardlink is None:
657 657 hardlink = (os.stat(src).st_dev ==
658 658 os.stat(os.path.dirname(dst)).st_dev)
659 659
660 660 if os.path.isdir(src):
661 661 os.mkdir(dst)
662 662 for name, kind in osutil.listdir(src):
663 663 srcname = os.path.join(src, name)
664 664 dstname = os.path.join(dst, name)
665 665 copyfiles(srcname, dstname, hardlink)
666 666 else:
667 667 if hardlink:
668 668 try:
669 669 os_link(src, dst)
670 670 except (IOError, OSError):
671 671 hardlink = False
672 672 shutil.copy(src, dst)
673 673 else:
674 674 shutil.copy(src, dst)
675 675
676 676 class path_auditor(object):
677 677 '''ensure that a filesystem path contains no banned components.
678 678 the following properties of a path are checked:
679 679
680 680 - under top-level .hg
681 681 - starts at the root of a windows drive
682 682 - contains ".."
683 683 - traverses a symlink (e.g. a/symlink_here/b)
684 684 - inside a nested repository'''
685 685
686 686 def __init__(self, root):
687 687 self.audited = set()
688 688 self.auditeddir = set()
689 689 self.root = root
690 690
691 691 def __call__(self, path):
692 692 if path in self.audited:
693 693 return
694 694 normpath = os.path.normcase(path)
695 695 parts = normpath.split(os.sep)
696 696 if (os.path.splitdrive(path)[0] or parts[0] in ('.hg', '')
697 697 or os.pardir in parts):
698 698 raise Abort(_("path contains illegal component: %s") % path)
699 699 def check(prefix):
700 700 curpath = os.path.join(self.root, prefix)
701 701 try:
702 702 st = os.lstat(curpath)
703 703 except OSError, err:
704 704 # EINVAL can be raised as invalid path syntax under win32.
705 705 # They must be ignored for patterns can be checked too.
706 706 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
707 707 raise
708 708 else:
709 709 if stat.S_ISLNK(st.st_mode):
710 710 raise Abort(_('path %r traverses symbolic link %r') %
711 711 (path, prefix))
712 712 elif (stat.S_ISDIR(st.st_mode) and
713 713 os.path.isdir(os.path.join(curpath, '.hg'))):
714 714 raise Abort(_('path %r is inside repo %r') %
715 715 (path, prefix))
716 716
717 717 prefixes = []
718 718 for c in strutil.rfindall(normpath, os.sep):
719 719 prefix = normpath[:c]
720 720 if prefix in self.auditeddir:
721 721 break
722 722 check(prefix)
723 723 prefixes.append(prefix)
724 724
725 725 self.audited.add(path)
726 726 # only add prefixes to the cache after checking everything: we don't
727 727 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
728 728 self.auditeddir.update(prefixes)
729 729
730 730 def _makelock_file(info, pathname):
731 731 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
732 732 os.write(ld, info)
733 733 os.close(ld)
734 734
735 735 def _readlock_file(pathname):
736 736 return posixfile(pathname).read()
737 737
738 738 def nlinks(pathname):
739 739 """Return number of hardlinks for the given file."""
740 740 return os.lstat(pathname).st_nlink
741 741
742 742 if hasattr(os, 'link'):
743 743 os_link = os.link
744 744 else:
745 745 def os_link(src, dst):
746 746 raise OSError(0, _("Hardlinks not supported"))
747 747
748 748 def fstat(fp):
749 749 '''stat file object that may not have fileno method.'''
750 750 try:
751 751 return os.fstat(fp.fileno())
752 752 except AttributeError:
753 753 return os.stat(fp.name)
754 754
755 755 posixfile = file
756 756
757 757 def openhardlinks():
758 758 '''return true if it is safe to hold open file handles to hardlinks'''
759 759 return True
760 760
761 761 getuser_fallback = None
762 762
763 763 def getuser():
764 764 '''return name of current user'''
765 765 try:
766 766 return getpass.getuser()
767 767 except ImportError:
768 768 # import of pwd will fail on windows - try fallback
769 769 if getuser_fallback:
770 770 return getuser_fallback()
771 771 # raised if win32api not available
772 772 raise Abort(_('user name not available - set USERNAME '
773 773 'environment variable'))
774 774
775 775 def username(uid=None):
776 776 """Return the name of the user with the given uid.
777 777
778 778 If uid is None, return the name of the current user."""
779 779 try:
780 780 import pwd
781 781 if uid is None:
782 782 uid = os.getuid()
783 783 try:
784 784 return pwd.getpwuid(uid)[0]
785 785 except KeyError:
786 786 return str(uid)
787 787 except ImportError:
788 788 return None
789 789
790 790 def groupname(gid=None):
791 791 """Return the name of the group with the given gid.
792 792
793 793 If gid is None, return the name of the current group."""
794 794 try:
795 795 import grp
796 796 if gid is None:
797 797 gid = os.getgid()
798 798 try:
799 799 return grp.getgrgid(gid)[0]
800 800 except KeyError:
801 801 return str(gid)
802 802 except ImportError:
803 803 return None
804 804
805 805 # File system features
806 806
807 807 def checkfolding(path):
808 808 """
809 809 Check whether the given path is on a case-sensitive filesystem
810 810
811 811 Requires a path (like /foo/.hg) ending with a foldable final
812 812 directory component.
813 813 """
814 814 s1 = os.stat(path)
815 815 d, b = os.path.split(path)
816 816 p2 = os.path.join(d, b.upper())
817 817 if path == p2:
818 818 p2 = os.path.join(d, b.lower())
819 819 try:
820 820 s2 = os.stat(p2)
821 821 if s2 == s1:
822 822 return False
823 823 return True
824 824 except:
825 825 return True
826 826
827 827 def checkexec(path):
828 828 """
829 829 Check whether the given path is on a filesystem with UNIX-like exec flags
830 830
831 831 Requires a directory (like /foo/.hg)
832 832 """
833 833
834 834 # VFAT on some Linux versions can flip mode but it doesn't persist
835 835 # a FS remount. Frequently we can detect it if files are created
836 836 # with exec bit on.
837 837
838 838 try:
839 839 EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
840 840 fh, fn = tempfile.mkstemp("", "", path)
841 841 try:
842 842 os.close(fh)
843 843 m = os.stat(fn).st_mode & 0777
844 844 new_file_has_exec = m & EXECFLAGS
845 845 os.chmod(fn, m ^ EXECFLAGS)
846 846 exec_flags_cannot_flip = ((os.stat(fn).st_mode & 0777) == m)
847 847 finally:
848 848 os.unlink(fn)
849 849 except (IOError, OSError):
850 850 # we don't care, the user probably won't be able to commit anyway
851 851 return False
852 852 return not (new_file_has_exec or exec_flags_cannot_flip)
853 853
854 854 def execfunc(path, fallback):
855 855 '''return an is_exec() function with default to fallback'''
856 856 if checkexec(path):
857 857 return lambda x: is_exec(os.path.join(path, x))
858 858 return fallback
859 859
860 860 def checklink(path):
861 861 """check whether the given path is on a symlink-capable filesystem"""
862 862 # mktemp is not racy because symlink creation will fail if the
863 863 # file already exists
864 864 name = tempfile.mktemp(dir=path)
865 865 try:
866 866 os.symlink(".", name)
867 867 os.unlink(name)
868 868 return True
869 869 except (OSError, AttributeError):
870 870 return False
871 871
872 872 def linkfunc(path, fallback):
873 873 '''return an is_link() function with default to fallback'''
874 874 if checklink(path):
875 875 return lambda x: os.path.islink(os.path.join(path, x))
876 876 return fallback
877 877
878 878 _umask = os.umask(0)
879 879 os.umask(_umask)
880 880
881 881 def needbinarypatch():
882 882 """return True if patches should be applied in binary mode by default."""
883 883 return os.name == 'nt'
884 884
885 def endswithsep(path):
886 '''Check path ends with os.sep or os.altsep.'''
887 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
888
885 889 # Platform specific variants
886 890 if os.name == 'nt':
887 891 import msvcrt
888 892 nulldev = 'NUL:'
889 893
890 894 class winstdout:
891 895 '''stdout on windows misbehaves if sent through a pipe'''
892 896
893 897 def __init__(self, fp):
894 898 self.fp = fp
895 899
896 900 def __getattr__(self, key):
897 901 return getattr(self.fp, key)
898 902
899 903 def close(self):
900 904 try:
901 905 self.fp.close()
902 906 except: pass
903 907
904 908 def write(self, s):
905 909 try:
906 910 # This is workaround for "Not enough space" error on
907 911 # writing large size of data to console.
908 912 limit = 16000
909 913 l = len(s)
910 914 start = 0
911 915 while start < l:
912 916 end = start + limit
913 917 self.fp.write(s[start:end])
914 918 start = end
915 919 except IOError, inst:
916 920 if inst.errno != 0: raise
917 921 self.close()
918 922 raise IOError(errno.EPIPE, 'Broken pipe')
919 923
920 924 def flush(self):
921 925 try:
922 926 return self.fp.flush()
923 927 except IOError, inst:
924 928 if inst.errno != errno.EINVAL: raise
925 929 self.close()
926 930 raise IOError(errno.EPIPE, 'Broken pipe')
927 931
928 932 sys.stdout = winstdout(sys.stdout)
929 933
930 934 def _is_win_9x():
931 935 '''return true if run on windows 95, 98 or me.'''
932 936 try:
933 937 return sys.getwindowsversion()[3] == 1
934 938 except AttributeError:
935 939 return 'command' in os.environ.get('comspec', '')
936 940
937 941 def openhardlinks():
938 942 return not _is_win_9x and "win32api" in locals()
939 943
940 944 def system_rcpath():
941 945 try:
942 946 return system_rcpath_win32()
943 947 except:
944 948 return [r'c:\mercurial\mercurial.ini']
945 949
946 950 def user_rcpath():
947 951 '''return os-specific hgrc search path to the user dir'''
948 952 try:
949 953 userrc = user_rcpath_win32()
950 954 except:
951 955 userrc = os.path.join(os.path.expanduser('~'), 'mercurial.ini')
952 956 path = [userrc]
953 957 userprofile = os.environ.get('USERPROFILE')
954 958 if userprofile:
955 959 path.append(os.path.join(userprofile, 'mercurial.ini'))
956 960 return path
957 961
958 962 def parse_patch_output(output_line):
959 963 """parses the output produced by patch and returns the file name"""
960 964 pf = output_line[14:]
961 965 if pf[0] == '`':
962 966 pf = pf[1:-1] # Remove the quotes
963 967 return pf
964 968
965 969 def sshargs(sshcmd, host, user, port):
966 970 '''Build argument list for ssh or Plink'''
967 971 pflag = 'plink' in sshcmd.lower() and '-P' or '-p'
968 972 args = user and ("%s@%s" % (user, host)) or host
969 973 return port and ("%s %s %s" % (args, pflag, port)) or args
970 974
971 975 def testpid(pid):
972 976 '''return False if pid dead, True if running or not known'''
973 977 return True
974 978
975 979 def set_flags(f, flags):
976 980 pass
977 981
978 982 def set_binary(fd):
979 983 msvcrt.setmode(fd.fileno(), os.O_BINARY)
980 984
981 985 def pconvert(path):
982 986 return path.replace("\\", "/")
983 987
984 988 def localpath(path):
985 989 return path.replace('/', '\\')
986 990
987 991 def normpath(path):
988 992 return pconvert(os.path.normpath(path))
989 993
990 994 makelock = _makelock_file
991 995 readlock = _readlock_file
992 996
993 997 def samestat(s1, s2):
994 998 return False
995 999
996 1000 # A sequence of backslashes is special iff it precedes a double quote:
997 1001 # - if there's an even number of backslashes, the double quote is not
998 1002 # quoted (i.e. it ends the quoted region)
999 1003 # - if there's an odd number of backslashes, the double quote is quoted
1000 1004 # - in both cases, every pair of backslashes is unquoted into a single
1001 1005 # backslash
1002 1006 # (See http://msdn2.microsoft.com/en-us/library/a1y7w461.aspx )
1003 1007 # So, to quote a string, we must surround it in double quotes, double
1004 1008 # the number of backslashes that preceed double quotes and add another
1005 1009 # backslash before every double quote (being careful with the double
1006 1010 # quote we've appended to the end)
1007 1011 _quotere = None
1008 1012 def shellquote(s):
1009 1013 global _quotere
1010 1014 if _quotere is None:
1011 1015 _quotere = re.compile(r'(\\*)("|\\$)')
1012 1016 return '"%s"' % _quotere.sub(r'\1\1\\\2', s)
1013 1017
1014 1018 def quotecommand(cmd):
1015 1019 """Build a command string suitable for os.popen* calls."""
1016 1020 # The extra quotes are needed because popen* runs the command
1017 1021 # through the current COMSPEC. cmd.exe suppress enclosing quotes.
1018 1022 return '"' + cmd + '"'
1019 1023
1020 1024 def popen(command):
1021 1025 # Work around "popen spawned process may not write to stdout
1022 1026 # under windows"
1023 1027 # http://bugs.python.org/issue1366
1024 1028 command += " 2> %s" % nulldev
1025 1029 return os.popen(quotecommand(command))
1026 1030
1027 1031 def explain_exit(code):
1028 1032 return _("exited with status %d") % code, code
1029 1033
1030 1034 # if you change this stub into a real check, please try to implement the
1031 1035 # username and groupname functions above, too.
1032 1036 def isowner(fp, st=None):
1033 1037 return True
1034 1038
1035 1039 def find_in_path(name, path, default=None):
1036 1040 '''find name in search path. path can be string (will be split
1037 1041 with os.pathsep), or iterable thing that returns strings. if name
1038 1042 found, return path to name. else return default. name is looked up
1039 1043 using cmd.exe rules, using PATHEXT.'''
1040 1044 if isinstance(path, str):
1041 1045 path = path.split(os.pathsep)
1042 1046
1043 1047 pathext = os.environ.get('PATHEXT', '.COM;.EXE;.BAT;.CMD')
1044 1048 pathext = pathext.lower().split(os.pathsep)
1045 1049 isexec = os.path.splitext(name)[1].lower() in pathext
1046 1050
1047 1051 for p in path:
1048 1052 p_name = os.path.join(p, name)
1049 1053
1050 1054 if isexec and os.path.exists(p_name):
1051 1055 return p_name
1052 1056
1053 1057 for ext in pathext:
1054 1058 p_name_ext = p_name + ext
1055 1059 if os.path.exists(p_name_ext):
1056 1060 return p_name_ext
1057 1061 return default
1058 1062
1059 1063 def set_signal_handler():
1060 1064 try:
1061 1065 set_signal_handler_win32()
1062 1066 except NameError:
1063 1067 pass
1064 1068
1065 1069 try:
1066 1070 # override functions with win32 versions if possible
1067 1071 from util_win32 import *
1068 1072 if not _is_win_9x():
1069 1073 posixfile = posixfile_nt
1070 1074 except ImportError:
1071 1075 pass
1072 1076
1073 1077 else:
1074 1078 nulldev = '/dev/null'
1075 1079
1076 1080 def rcfiles(path):
1077 1081 rcs = [os.path.join(path, 'hgrc')]
1078 1082 rcdir = os.path.join(path, 'hgrc.d')
1079 1083 try:
1080 1084 rcs.extend([os.path.join(rcdir, f)
1081 1085 for f, kind in osutil.listdir(rcdir)
1082 1086 if f.endswith(".rc")])
1083 1087 except OSError:
1084 1088 pass
1085 1089 return rcs
1086 1090
1087 1091 def system_rcpath():
1088 1092 path = []
1089 1093 # old mod_python does not set sys.argv
1090 1094 if len(getattr(sys, 'argv', [])) > 0:
1091 1095 path.extend(rcfiles(os.path.dirname(sys.argv[0]) +
1092 1096 '/../etc/mercurial'))
1093 1097 path.extend(rcfiles('/etc/mercurial'))
1094 1098 return path
1095 1099
1096 1100 def user_rcpath():
1097 1101 return [os.path.expanduser('~/.hgrc')]
1098 1102
1099 1103 def parse_patch_output(output_line):
1100 1104 """parses the output produced by patch and returns the file name"""
1101 1105 pf = output_line[14:]
1102 1106 if os.sys.platform == 'OpenVMS':
1103 1107 if pf[0] == '`':
1104 1108 pf = pf[1:-1] # Remove the quotes
1105 1109 else:
1106 1110 if pf.startswith("'") and pf.endswith("'") and " " in pf:
1107 1111 pf = pf[1:-1] # Remove the quotes
1108 1112 return pf
1109 1113
1110 1114 def sshargs(sshcmd, host, user, port):
1111 1115 '''Build argument list for ssh'''
1112 1116 args = user and ("%s@%s" % (user, host)) or host
1113 1117 return port and ("%s -p %s" % (args, port)) or args
1114 1118
1115 1119 def is_exec(f):
1116 1120 """check whether a file is executable"""
1117 1121 return (os.lstat(f).st_mode & 0100 != 0)
1118 1122
1119 1123 def set_flags(f, flags):
1120 1124 s = os.lstat(f).st_mode
1121 1125 x = "x" in flags
1122 1126 l = "l" in flags
1123 1127 if l:
1124 1128 if not stat.S_ISLNK(s):
1125 1129 # switch file to link
1126 1130 data = file(f).read()
1127 1131 os.unlink(f)
1128 1132 os.symlink(data, f)
1129 1133 # no chmod needed at this point
1130 1134 return
1131 1135 if stat.S_ISLNK(s):
1132 1136 # switch link to file
1133 1137 data = os.readlink(f)
1134 1138 os.unlink(f)
1135 1139 file(f, "w").write(data)
1136 1140 s = 0666 & ~_umask # avoid restatting for chmod
1137 1141
1138 1142 sx = s & 0100
1139 1143 if x and not sx:
1140 1144 # Turn on +x for every +r bit when making a file executable
1141 1145 # and obey umask.
1142 1146 os.chmod(f, s | (s & 0444) >> 2 & ~_umask)
1143 1147 elif not x and sx:
1144 1148 # Turn off all +x bits
1145 1149 os.chmod(f, s & 0666)
1146 1150
1147 1151 def set_binary(fd):
1148 1152 pass
1149 1153
1150 1154 def pconvert(path):
1151 1155 return path
1152 1156
1153 1157 def localpath(path):
1154 1158 return path
1155 1159
1156 1160 normpath = os.path.normpath
1157 1161 samestat = os.path.samestat
1158 1162
1159 1163 def makelock(info, pathname):
1160 1164 try:
1161 1165 os.symlink(info, pathname)
1162 1166 except OSError, why:
1163 1167 if why.errno == errno.EEXIST:
1164 1168 raise
1165 1169 else:
1166 1170 _makelock_file(info, pathname)
1167 1171
1168 1172 def readlock(pathname):
1169 1173 try:
1170 1174 return os.readlink(pathname)
1171 1175 except OSError, why:
1172 1176 if why.errno in (errno.EINVAL, errno.ENOSYS):
1173 1177 return _readlock_file(pathname)
1174 1178 else:
1175 1179 raise
1176 1180
1177 1181 def shellquote(s):
1178 1182 if os.sys.platform == 'OpenVMS':
1179 1183 return '"%s"' % s
1180 1184 else:
1181 1185 return "'%s'" % s.replace("'", "'\\''")
1182 1186
1183 1187 def quotecommand(cmd):
1184 1188 return cmd
1185 1189
1186 1190 def popen(command):
1187 1191 return os.popen(command)
1188 1192
1189 1193 def testpid(pid):
1190 1194 '''return False if pid dead, True if running or not sure'''
1191 1195 if os.sys.platform == 'OpenVMS':
1192 1196 return True
1193 1197 try:
1194 1198 os.kill(pid, 0)
1195 1199 return True
1196 1200 except OSError, inst:
1197 1201 return inst.errno != errno.ESRCH
1198 1202
1199 1203 def explain_exit(code):
1200 1204 """return a 2-tuple (desc, code) describing a process's status"""
1201 1205 if os.WIFEXITED(code):
1202 1206 val = os.WEXITSTATUS(code)
1203 1207 return _("exited with status %d") % val, val
1204 1208 elif os.WIFSIGNALED(code):
1205 1209 val = os.WTERMSIG(code)
1206 1210 return _("killed by signal %d") % val, val
1207 1211 elif os.WIFSTOPPED(code):
1208 1212 val = os.WSTOPSIG(code)
1209 1213 return _("stopped by signal %d") % val, val
1210 1214 raise ValueError(_("invalid exit code"))
1211 1215
1212 1216 def isowner(fp, st=None):
1213 1217 """Return True if the file object f belongs to the current user.
1214 1218
1215 1219 The return value of a util.fstat(f) may be passed as the st argument.
1216 1220 """
1217 1221 if st is None:
1218 1222 st = fstat(fp)
1219 1223 return st.st_uid == os.getuid()
1220 1224
1221 1225 def find_in_path(name, path, default=None):
1222 1226 '''find name in search path. path can be string (will be split
1223 1227 with os.pathsep), or iterable thing that returns strings. if name
1224 1228 found, return path to name. else return default.'''
1225 1229 if isinstance(path, str):
1226 1230 path = path.split(os.pathsep)
1227 1231 for p in path:
1228 1232 p_name = os.path.join(p, name)
1229 1233 if os.path.exists(p_name):
1230 1234 return p_name
1231 1235 return default
1232 1236
1233 1237 def set_signal_handler():
1234 1238 pass
1235 1239
1236 1240 def find_exe(name, default=None):
1237 1241 '''find path of an executable.
1238 1242 if name contains a path component, return it as is. otherwise,
1239 1243 use normal executable search path.'''
1240 1244
1241 1245 if os.sep in name or sys.platform == 'OpenVMS':
1242 1246 # don't check the executable bit. if the file isn't
1243 1247 # executable, whoever tries to actually run it will give a
1244 1248 # much more useful error message.
1245 1249 return name
1246 1250 return find_in_path(name, os.environ.get('PATH', ''), default=default)
1247 1251
1248 1252 def _buildencodefun():
1249 1253 e = '_'
1250 1254 win_reserved = [ord(x) for x in '\\:*?"<>|']
1251 1255 cmap = dict([ (chr(x), chr(x)) for x in xrange(127) ])
1252 1256 for x in (range(32) + range(126, 256) + win_reserved):
1253 1257 cmap[chr(x)] = "~%02x" % x
1254 1258 for x in range(ord("A"), ord("Z")+1) + [ord(e)]:
1255 1259 cmap[chr(x)] = e + chr(x).lower()
1256 1260 dmap = {}
1257 1261 for k, v in cmap.iteritems():
1258 1262 dmap[v] = k
1259 1263 def decode(s):
1260 1264 i = 0
1261 1265 while i < len(s):
1262 1266 for l in xrange(1, 4):
1263 1267 try:
1264 1268 yield dmap[s[i:i+l]]
1265 1269 i += l
1266 1270 break
1267 1271 except KeyError:
1268 1272 pass
1269 1273 else:
1270 1274 raise KeyError
1271 1275 return (lambda s: "".join([cmap[c] for c in s]),
1272 1276 lambda s: "".join(list(decode(s))))
1273 1277
1274 1278 encodefilename, decodefilename = _buildencodefun()
1275 1279
1276 1280 def encodedopener(openerfn, fn):
1277 1281 def o(path, *args, **kw):
1278 1282 return openerfn(fn(path), *args, **kw)
1279 1283 return o
1280 1284
1281 1285 def mktempcopy(name, emptyok=False):
1282 1286 """Create a temporary file with the same contents from name
1283 1287
1284 1288 The permission bits are copied from the original file.
1285 1289
1286 1290 If the temporary file is going to be truncated immediately, you
1287 1291 can use emptyok=True as an optimization.
1288 1292
1289 1293 Returns the name of the temporary file.
1290 1294 """
1291 1295 d, fn = os.path.split(name)
1292 1296 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1293 1297 os.close(fd)
1294 1298 # Temporary files are created with mode 0600, which is usually not
1295 1299 # what we want. If the original file already exists, just copy
1296 1300 # its mode. Otherwise, manually obey umask.
1297 1301 try:
1298 1302 st_mode = os.lstat(name).st_mode & 0777
1299 1303 except OSError, inst:
1300 1304 if inst.errno != errno.ENOENT:
1301 1305 raise
1302 1306 st_mode = 0666 & ~_umask
1303 1307 os.chmod(temp, st_mode)
1304 1308 if emptyok:
1305 1309 return temp
1306 1310 try:
1307 1311 try:
1308 1312 ifp = posixfile(name, "rb")
1309 1313 except IOError, inst:
1310 1314 if inst.errno == errno.ENOENT:
1311 1315 return temp
1312 1316 if not getattr(inst, 'filename', None):
1313 1317 inst.filename = name
1314 1318 raise
1315 1319 ofp = posixfile(temp, "wb")
1316 1320 for chunk in filechunkiter(ifp):
1317 1321 ofp.write(chunk)
1318 1322 ifp.close()
1319 1323 ofp.close()
1320 1324 except:
1321 1325 try: os.unlink(temp)
1322 1326 except: pass
1323 1327 raise
1324 1328 return temp
1325 1329
1326 1330 class atomictempfile(posixfile):
1327 1331 """file-like object that atomically updates a file
1328 1332
1329 1333 All writes will be redirected to a temporary copy of the original
1330 1334 file. When rename is called, the copy is renamed to the original
1331 1335 name, making the changes visible.
1332 1336 """
1333 1337 def __init__(self, name, mode):
1334 1338 self.__name = name
1335 1339 self.temp = mktempcopy(name, emptyok=('w' in mode))
1336 1340 posixfile.__init__(self, self.temp, mode)
1337 1341
1338 1342 def rename(self):
1339 1343 if not self.closed:
1340 1344 posixfile.close(self)
1341 1345 rename(self.temp, localpath(self.__name))
1342 1346
1343 1347 def __del__(self):
1344 1348 if not self.closed:
1345 1349 try:
1346 1350 os.unlink(self.temp)
1347 1351 except: pass
1348 1352 posixfile.close(self)
1349 1353
1350 1354 class opener(object):
1351 1355 """Open files relative to a base directory
1352 1356
1353 1357 This class is used to hide the details of COW semantics and
1354 1358 remote file access from higher level code.
1355 1359 """
1356 1360 def __init__(self, base, audit=True):
1357 1361 self.base = base
1358 1362 if audit:
1359 1363 self.audit_path = path_auditor(base)
1360 1364 else:
1361 1365 self.audit_path = always
1362 1366
1363 1367 def __getattr__(self, name):
1364 1368 if name == '_can_symlink':
1365 1369 self._can_symlink = checklink(self.base)
1366 1370 return self._can_symlink
1367 1371 raise AttributeError(name)
1368 1372
1369 1373 def __call__(self, path, mode="r", text=False, atomictemp=False):
1370 1374 self.audit_path(path)
1371 1375 f = os.path.join(self.base, path)
1372 1376
1373 1377 if not text and "b" not in mode:
1374 1378 mode += "b" # for that other OS
1375 1379
1376 1380 if mode[0] != "r":
1377 1381 try:
1378 1382 nlink = nlinks(f)
1379 1383 except OSError:
1380 1384 nlink = 0
1381 1385 d = os.path.dirname(f)
1382 1386 if not os.path.isdir(d):
1383 1387 os.makedirs(d)
1384 1388 if atomictemp:
1385 1389 return atomictempfile(f, mode)
1386 1390 if nlink > 1:
1387 1391 rename(mktempcopy(f), f)
1388 1392 return posixfile(f, mode)
1389 1393
1390 1394 def symlink(self, src, dst):
1391 1395 self.audit_path(dst)
1392 1396 linkname = os.path.join(self.base, dst)
1393 1397 try:
1394 1398 os.unlink(linkname)
1395 1399 except OSError:
1396 1400 pass
1397 1401
1398 1402 dirname = os.path.dirname(linkname)
1399 1403 if not os.path.exists(dirname):
1400 1404 os.makedirs(dirname)
1401 1405
1402 1406 if self._can_symlink:
1403 1407 try:
1404 1408 os.symlink(src, linkname)
1405 1409 except OSError, err:
1406 1410 raise OSError(err.errno, _('could not symlink to %r: %s') %
1407 1411 (src, err.strerror), linkname)
1408 1412 else:
1409 1413 f = self(dst, "w")
1410 1414 f.write(src)
1411 1415 f.close()
1412 1416
1413 1417 class chunkbuffer(object):
1414 1418 """Allow arbitrary sized chunks of data to be efficiently read from an
1415 1419 iterator over chunks of arbitrary size."""
1416 1420
1417 1421 def __init__(self, in_iter):
1418 1422 """in_iter is the iterator that's iterating over the input chunks.
1419 1423 targetsize is how big a buffer to try to maintain."""
1420 1424 self.iter = iter(in_iter)
1421 1425 self.buf = ''
1422 1426 self.targetsize = 2**16
1423 1427
1424 1428 def read(self, l):
1425 1429 """Read L bytes of data from the iterator of chunks of data.
1426 1430 Returns less than L bytes if the iterator runs dry."""
1427 1431 if l > len(self.buf) and self.iter:
1428 1432 # Clamp to a multiple of self.targetsize
1429 1433 targetsize = max(l, self.targetsize)
1430 1434 collector = cStringIO.StringIO()
1431 1435 collector.write(self.buf)
1432 1436 collected = len(self.buf)
1433 1437 for chunk in self.iter:
1434 1438 collector.write(chunk)
1435 1439 collected += len(chunk)
1436 1440 if collected >= targetsize:
1437 1441 break
1438 1442 if collected < targetsize:
1439 1443 self.iter = False
1440 1444 self.buf = collector.getvalue()
1441 1445 if len(self.buf) == l:
1442 1446 s, self.buf = str(self.buf), ''
1443 1447 else:
1444 1448 s, self.buf = self.buf[:l], buffer(self.buf, l)
1445 1449 return s
1446 1450
1447 1451 def filechunkiter(f, size=65536, limit=None):
1448 1452 """Create a generator that produces the data in the file size
1449 1453 (default 65536) bytes at a time, up to optional limit (default is
1450 1454 to read all data). Chunks may be less than size bytes if the
1451 1455 chunk is the last chunk in the file, or the file is a socket or
1452 1456 some other type of file that sometimes reads less data than is
1453 1457 requested."""
1454 1458 assert size >= 0
1455 1459 assert limit is None or limit >= 0
1456 1460 while True:
1457 1461 if limit is None: nbytes = size
1458 1462 else: nbytes = min(limit, size)
1459 1463 s = nbytes and f.read(nbytes)
1460 1464 if not s: break
1461 1465 if limit: limit -= len(s)
1462 1466 yield s
1463 1467
1464 1468 def makedate():
1465 1469 lt = time.localtime()
1466 1470 if lt[8] == 1 and time.daylight:
1467 1471 tz = time.altzone
1468 1472 else:
1469 1473 tz = time.timezone
1470 1474 return time.mktime(lt), tz
1471 1475
1472 1476 def datestr(date=None, format='%a %b %d %H:%M:%S %Y', timezone=True, timezone_format=" %+03d%02d"):
1473 1477 """represent a (unixtime, offset) tuple as a localized time.
1474 1478 unixtime is seconds since the epoch, and offset is the time zone's
1475 1479 number of seconds away from UTC. if timezone is false, do not
1476 1480 append time zone to string."""
1477 1481 t, tz = date or makedate()
1478 1482 s = time.strftime(format, time.gmtime(float(t) - tz))
1479 1483 if timezone:
1480 1484 s += timezone_format % (-tz / 3600, ((-tz % 3600) / 60))
1481 1485 return s
1482 1486
1483 1487 def strdate(string, format, defaults=[]):
1484 1488 """parse a localized time string and return a (unixtime, offset) tuple.
1485 1489 if the string cannot be parsed, ValueError is raised."""
1486 1490 def timezone(string):
1487 1491 tz = string.split()[-1]
1488 1492 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1489 1493 tz = int(tz)
1490 1494 offset = - 3600 * (tz / 100) - 60 * (tz % 100)
1491 1495 return offset
1492 1496 if tz == "GMT" or tz == "UTC":
1493 1497 return 0
1494 1498 return None
1495 1499
1496 1500 # NOTE: unixtime = localunixtime + offset
1497 1501 offset, date = timezone(string), string
1498 1502 if offset != None:
1499 1503 date = " ".join(string.split()[:-1])
1500 1504
1501 1505 # add missing elements from defaults
1502 1506 for part in defaults:
1503 1507 found = [True for p in part if ("%"+p) in format]
1504 1508 if not found:
1505 1509 date += "@" + defaults[part]
1506 1510 format += "@%" + part[0]
1507 1511
1508 1512 timetuple = time.strptime(date, format)
1509 1513 localunixtime = int(calendar.timegm(timetuple))
1510 1514 if offset is None:
1511 1515 # local timezone
1512 1516 unixtime = int(time.mktime(timetuple))
1513 1517 offset = unixtime - localunixtime
1514 1518 else:
1515 1519 unixtime = localunixtime + offset
1516 1520 return unixtime, offset
1517 1521
1518 1522 def parsedate(string, formats=None, defaults=None):
1519 1523 """parse a localized time string and return a (unixtime, offset) tuple.
1520 1524 The date may be a "unixtime offset" string or in one of the specified
1521 1525 formats."""
1522 1526 if not string:
1523 1527 return 0, 0
1524 1528 if not formats:
1525 1529 formats = defaultdateformats
1526 1530 string = string.strip()
1527 1531 try:
1528 1532 when, offset = map(int, string.split(' '))
1529 1533 except ValueError:
1530 1534 # fill out defaults
1531 1535 if not defaults:
1532 1536 defaults = {}
1533 1537 now = makedate()
1534 1538 for part in "d mb yY HI M S".split():
1535 1539 if part not in defaults:
1536 1540 if part[0] in "HMS":
1537 1541 defaults[part] = "00"
1538 1542 elif part[0] in "dm":
1539 1543 defaults[part] = "1"
1540 1544 else:
1541 1545 defaults[part] = datestr(now, "%" + part[0], False)
1542 1546
1543 1547 for format in formats:
1544 1548 try:
1545 1549 when, offset = strdate(string, format, defaults)
1546 1550 except ValueError:
1547 1551 pass
1548 1552 else:
1549 1553 break
1550 1554 else:
1551 1555 raise Abort(_('invalid date: %r ') % string)
1552 1556 # validate explicit (probably user-specified) date and
1553 1557 # time zone offset. values must fit in signed 32 bits for
1554 1558 # current 32-bit linux runtimes. timezones go from UTC-12
1555 1559 # to UTC+14
1556 1560 if abs(when) > 0x7fffffff:
1557 1561 raise Abort(_('date exceeds 32 bits: %d') % when)
1558 1562 if offset < -50400 or offset > 43200:
1559 1563 raise Abort(_('impossible time zone offset: %d') % offset)
1560 1564 return when, offset
1561 1565
1562 1566 def matchdate(date):
1563 1567 """Return a function that matches a given date match specifier
1564 1568
1565 1569 Formats include:
1566 1570
1567 1571 '{date}' match a given date to the accuracy provided
1568 1572
1569 1573 '<{date}' on or before a given date
1570 1574
1571 1575 '>{date}' on or after a given date
1572 1576
1573 1577 """
1574 1578
1575 1579 def lower(date):
1576 1580 return parsedate(date, extendeddateformats)[0]
1577 1581
1578 1582 def upper(date):
1579 1583 d = dict(mb="12", HI="23", M="59", S="59")
1580 1584 for days in "31 30 29".split():
1581 1585 try:
1582 1586 d["d"] = days
1583 1587 return parsedate(date, extendeddateformats, d)[0]
1584 1588 except:
1585 1589 pass
1586 1590 d["d"] = "28"
1587 1591 return parsedate(date, extendeddateformats, d)[0]
1588 1592
1589 1593 if date[0] == "<":
1590 1594 when = upper(date[1:])
1591 1595 return lambda x: x <= when
1592 1596 elif date[0] == ">":
1593 1597 when = lower(date[1:])
1594 1598 return lambda x: x >= when
1595 1599 elif date[0] == "-":
1596 1600 try:
1597 1601 days = int(date[1:])
1598 1602 except ValueError:
1599 1603 raise Abort(_("invalid day spec: %s") % date[1:])
1600 1604 when = makedate()[0] - days * 3600 * 24
1601 1605 return lambda x: x >= when
1602 1606 elif " to " in date:
1603 1607 a, b = date.split(" to ")
1604 1608 start, stop = lower(a), upper(b)
1605 1609 return lambda x: x >= start and x <= stop
1606 1610 else:
1607 1611 start, stop = lower(date), upper(date)
1608 1612 return lambda x: x >= start and x <= stop
1609 1613
1610 1614 def shortuser(user):
1611 1615 """Return a short representation of a user name or email address."""
1612 1616 f = user.find('@')
1613 1617 if f >= 0:
1614 1618 user = user[:f]
1615 1619 f = user.find('<')
1616 1620 if f >= 0:
1617 1621 user = user[f+1:]
1618 1622 f = user.find(' ')
1619 1623 if f >= 0:
1620 1624 user = user[:f]
1621 1625 f = user.find('.')
1622 1626 if f >= 0:
1623 1627 user = user[:f]
1624 1628 return user
1625 1629
1626 1630 def ellipsis(text, maxlength=400):
1627 1631 """Trim string to at most maxlength (default: 400) characters."""
1628 1632 if len(text) <= maxlength:
1629 1633 return text
1630 1634 else:
1631 1635 return "%s..." % (text[:maxlength-3])
1632 1636
1633 1637 def walkrepos(path):
1634 1638 '''yield every hg repository under path, recursively.'''
1635 1639 def errhandler(err):
1636 1640 if err.filename == path:
1637 1641 raise err
1638 1642
1639 1643 for root, dirs, files in os.walk(path, onerror=errhandler):
1640 1644 for d in dirs:
1641 1645 if d == '.hg':
1642 1646 yield root
1643 1647 dirs[:] = []
1644 1648 break
1645 1649
1646 1650 _rcpath = None
1647 1651
1648 1652 def os_rcpath():
1649 1653 '''return default os-specific hgrc search path'''
1650 1654 path = system_rcpath()
1651 1655 path.extend(user_rcpath())
1652 1656 path = [os.path.normpath(f) for f in path]
1653 1657 return path
1654 1658
1655 1659 def rcpath():
1656 1660 '''return hgrc search path. if env var HGRCPATH is set, use it.
1657 1661 for each item in path, if directory, use files ending in .rc,
1658 1662 else use item.
1659 1663 make HGRCPATH empty to only look in .hg/hgrc of current repo.
1660 1664 if no HGRCPATH, use default os-specific path.'''
1661 1665 global _rcpath
1662 1666 if _rcpath is None:
1663 1667 if 'HGRCPATH' in os.environ:
1664 1668 _rcpath = []
1665 1669 for p in os.environ['HGRCPATH'].split(os.pathsep):
1666 1670 if not p: continue
1667 1671 if os.path.isdir(p):
1668 1672 for f, kind in osutil.listdir(p):
1669 1673 if f.endswith('.rc'):
1670 1674 _rcpath.append(os.path.join(p, f))
1671 1675 else:
1672 1676 _rcpath.append(p)
1673 1677 else:
1674 1678 _rcpath = os_rcpath()
1675 1679 return _rcpath
1676 1680
1677 1681 def bytecount(nbytes):
1678 1682 '''return byte count formatted as readable string, with units'''
1679 1683
1680 1684 units = (
1681 1685 (100, 1<<30, _('%.0f GB')),
1682 1686 (10, 1<<30, _('%.1f GB')),
1683 1687 (1, 1<<30, _('%.2f GB')),
1684 1688 (100, 1<<20, _('%.0f MB')),
1685 1689 (10, 1<<20, _('%.1f MB')),
1686 1690 (1, 1<<20, _('%.2f MB')),
1687 1691 (100, 1<<10, _('%.0f KB')),
1688 1692 (10, 1<<10, _('%.1f KB')),
1689 1693 (1, 1<<10, _('%.2f KB')),
1690 1694 (1, 1, _('%.0f bytes')),
1691 1695 )
1692 1696
1693 1697 for multiplier, divisor, format in units:
1694 1698 if nbytes >= divisor * multiplier:
1695 1699 return format % (nbytes / float(divisor))
1696 1700 return units[-1][2] % nbytes
1697 1701
1698 1702 def drop_scheme(scheme, path):
1699 1703 sc = scheme + ':'
1700 1704 if path.startswith(sc):
1701 1705 path = path[len(sc):]
1702 1706 if path.startswith('//'):
1703 1707 path = path[2:]
1704 1708 return path
1705 1709
1706 1710 def uirepr(s):
1707 1711 # Avoid double backslash in Windows path repr()
1708 1712 return repr(s).replace('\\\\', '\\')
1709 1713
1710 1714 def hidepassword(url):
1711 1715 '''hide user credential in a url string'''
1712 1716 scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)
1713 1717 netloc = re.sub('([^:]*):([^@]*)@(.*)', r'\1:***@\3', netloc)
1714 1718 return urlparse.urlunparse((scheme, netloc, path, params, query, fragment))
1715 1719
1716 1720 def removeauth(url):
1717 1721 '''remove all authentication information from a url string'''
1718 1722 scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)
1719 1723 netloc = netloc[netloc.find('@')+1:]
1720 1724 return urlparse.urlunparse((scheme, netloc, path, params, query, fragment))
General Comments 0
You need to be logged in to leave comments. Login now