##// END OF EJS Templates
cmdutil: hide child window created by win32 spawndetached()...
Patrick Mezard -
r10240:3af4b39a default
parent child Browse files
Show More
@@ -1,1172 +1,1173 b''
1 1 # cmdutil.py - help for command processing in mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2, incorporated herein by reference.
7 7
8 8 from node import hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import os, sys, errno, re, glob, tempfile, time
11 11 import mdiff, bdiff, util, templater, patch, error, encoding, templatekw
12 12 import match as _match
13 13
14 14 revrangesep = ':'
15 15
16 16 def findpossible(cmd, table, strict=False):
17 17 """
18 18 Return cmd -> (aliases, command table entry)
19 19 for each matching command.
20 20 Return debug commands (or their aliases) only if no normal command matches.
21 21 """
22 22 choice = {}
23 23 debugchoice = {}
24 24 for e in table.keys():
25 25 aliases = e.lstrip("^").split("|")
26 26 found = None
27 27 if cmd in aliases:
28 28 found = cmd
29 29 elif not strict:
30 30 for a in aliases:
31 31 if a.startswith(cmd):
32 32 found = a
33 33 break
34 34 if found is not None:
35 35 if aliases[0].startswith("debug") or found.startswith("debug"):
36 36 debugchoice[found] = (aliases, table[e])
37 37 else:
38 38 choice[found] = (aliases, table[e])
39 39
40 40 if not choice and debugchoice:
41 41 choice = debugchoice
42 42
43 43 return choice
44 44
45 45 def findcmd(cmd, table, strict=True):
46 46 """Return (aliases, command table entry) for command string."""
47 47 choice = findpossible(cmd, table, strict)
48 48
49 49 if cmd in choice:
50 50 return choice[cmd]
51 51
52 52 if len(choice) > 1:
53 53 clist = choice.keys()
54 54 clist.sort()
55 55 raise error.AmbiguousCommand(cmd, clist)
56 56
57 57 if choice:
58 58 return choice.values()[0]
59 59
60 60 raise error.UnknownCommand(cmd)
61 61
62 62 def bail_if_changed(repo):
63 63 if repo.dirstate.parents()[1] != nullid:
64 64 raise util.Abort(_('outstanding uncommitted merge'))
65 65 modified, added, removed, deleted = repo.status()[:4]
66 66 if modified or added or removed or deleted:
67 67 raise util.Abort(_("outstanding uncommitted changes"))
68 68
69 69 def logmessage(opts):
70 70 """ get the log message according to -m and -l option """
71 71 message = opts.get('message')
72 72 logfile = opts.get('logfile')
73 73
74 74 if message and logfile:
75 75 raise util.Abort(_('options --message and --logfile are mutually '
76 76 'exclusive'))
77 77 if not message and logfile:
78 78 try:
79 79 if logfile == '-':
80 80 message = sys.stdin.read()
81 81 else:
82 82 message = open(logfile).read()
83 83 except IOError, inst:
84 84 raise util.Abort(_("can't read commit message '%s': %s") %
85 85 (logfile, inst.strerror))
86 86 return message
87 87
88 88 def loglimit(opts):
89 89 """get the log limit according to option -l/--limit"""
90 90 limit = opts.get('limit')
91 91 if limit:
92 92 try:
93 93 limit = int(limit)
94 94 except ValueError:
95 95 raise util.Abort(_('limit must be a positive integer'))
96 96 if limit <= 0: raise util.Abort(_('limit must be positive'))
97 97 else:
98 98 limit = None
99 99 return limit
100 100
101 101 def remoteui(src, opts):
102 102 'build a remote ui from ui or repo and opts'
103 103 if hasattr(src, 'baseui'): # looks like a repository
104 104 dst = src.baseui.copy() # drop repo-specific config
105 105 src = src.ui # copy target options from repo
106 106 else: # assume it's a global ui object
107 107 dst = src.copy() # keep all global options
108 108
109 109 # copy ssh-specific options
110 110 for o in 'ssh', 'remotecmd':
111 111 v = opts.get(o) or src.config('ui', o)
112 112 if v:
113 113 dst.setconfig("ui", o, v)
114 114
115 115 # copy bundle-specific options
116 116 r = src.config('bundle', 'mainreporoot')
117 117 if r:
118 118 dst.setconfig('bundle', 'mainreporoot', r)
119 119
120 120 # copy auth section settings
121 121 for key, val in src.configitems('auth'):
122 122 dst.setconfig('auth', key, val)
123 123
124 124 return dst
125 125
126 126 def revpair(repo, revs):
127 127 '''return pair of nodes, given list of revisions. second item can
128 128 be None, meaning use working dir.'''
129 129
130 130 def revfix(repo, val, defval):
131 131 if not val and val != 0 and defval is not None:
132 132 val = defval
133 133 return repo.lookup(val)
134 134
135 135 if not revs:
136 136 return repo.dirstate.parents()[0], None
137 137 end = None
138 138 if len(revs) == 1:
139 139 if revrangesep in revs[0]:
140 140 start, end = revs[0].split(revrangesep, 1)
141 141 start = revfix(repo, start, 0)
142 142 end = revfix(repo, end, len(repo) - 1)
143 143 else:
144 144 start = revfix(repo, revs[0], None)
145 145 elif len(revs) == 2:
146 146 if revrangesep in revs[0] or revrangesep in revs[1]:
147 147 raise util.Abort(_('too many revisions specified'))
148 148 start = revfix(repo, revs[0], None)
149 149 end = revfix(repo, revs[1], None)
150 150 else:
151 151 raise util.Abort(_('too many revisions specified'))
152 152 return start, end
153 153
154 154 def revrange(repo, revs):
155 155 """Yield revision as strings from a list of revision specifications."""
156 156
157 157 def revfix(repo, val, defval):
158 158 if not val and val != 0 and defval is not None:
159 159 return defval
160 160 return repo.changelog.rev(repo.lookup(val))
161 161
162 162 seen, l = set(), []
163 163 for spec in revs:
164 164 if revrangesep in spec:
165 165 start, end = spec.split(revrangesep, 1)
166 166 start = revfix(repo, start, 0)
167 167 end = revfix(repo, end, len(repo) - 1)
168 168 step = start > end and -1 or 1
169 169 for rev in xrange(start, end+step, step):
170 170 if rev in seen:
171 171 continue
172 172 seen.add(rev)
173 173 l.append(rev)
174 174 else:
175 175 rev = revfix(repo, spec, None)
176 176 if rev in seen:
177 177 continue
178 178 seen.add(rev)
179 179 l.append(rev)
180 180
181 181 return l
182 182
183 183 def make_filename(repo, pat, node,
184 184 total=None, seqno=None, revwidth=None, pathname=None):
185 185 node_expander = {
186 186 'H': lambda: hex(node),
187 187 'R': lambda: str(repo.changelog.rev(node)),
188 188 'h': lambda: short(node),
189 189 }
190 190 expander = {
191 191 '%': lambda: '%',
192 192 'b': lambda: os.path.basename(repo.root),
193 193 }
194 194
195 195 try:
196 196 if node:
197 197 expander.update(node_expander)
198 198 if node:
199 199 expander['r'] = (lambda:
200 200 str(repo.changelog.rev(node)).zfill(revwidth or 0))
201 201 if total is not None:
202 202 expander['N'] = lambda: str(total)
203 203 if seqno is not None:
204 204 expander['n'] = lambda: str(seqno)
205 205 if total is not None and seqno is not None:
206 206 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
207 207 if pathname is not None:
208 208 expander['s'] = lambda: os.path.basename(pathname)
209 209 expander['d'] = lambda: os.path.dirname(pathname) or '.'
210 210 expander['p'] = lambda: pathname
211 211
212 212 newname = []
213 213 patlen = len(pat)
214 214 i = 0
215 215 while i < patlen:
216 216 c = pat[i]
217 217 if c == '%':
218 218 i += 1
219 219 c = pat[i]
220 220 c = expander[c]()
221 221 newname.append(c)
222 222 i += 1
223 223 return ''.join(newname)
224 224 except KeyError, inst:
225 225 raise util.Abort(_("invalid format spec '%%%s' in output filename") %
226 226 inst.args[0])
227 227
228 228 def make_file(repo, pat, node=None,
229 229 total=None, seqno=None, revwidth=None, mode='wb', pathname=None):
230 230
231 231 writable = 'w' in mode or 'a' in mode
232 232
233 233 if not pat or pat == '-':
234 234 return writable and sys.stdout or sys.stdin
235 235 if hasattr(pat, 'write') and writable:
236 236 return pat
237 237 if hasattr(pat, 'read') and 'r' in mode:
238 238 return pat
239 239 return open(make_filename(repo, pat, node, total, seqno, revwidth,
240 240 pathname),
241 241 mode)
242 242
243 243 def expandpats(pats):
244 244 if not util.expandglobs:
245 245 return list(pats)
246 246 ret = []
247 247 for p in pats:
248 248 kind, name = _match._patsplit(p, None)
249 249 if kind is None:
250 250 try:
251 251 globbed = glob.glob(name)
252 252 except re.error:
253 253 globbed = [name]
254 254 if globbed:
255 255 ret.extend(globbed)
256 256 continue
257 257 ret.append(p)
258 258 return ret
259 259
260 260 def match(repo, pats=[], opts={}, globbed=False, default='relpath'):
261 261 if not globbed and default == 'relpath':
262 262 pats = expandpats(pats or [])
263 263 m = _match.match(repo.root, repo.getcwd(), pats,
264 264 opts.get('include'), opts.get('exclude'), default)
265 265 def badfn(f, msg):
266 266 repo.ui.warn("%s: %s\n" % (m.rel(f), msg))
267 267 m.bad = badfn
268 268 return m
269 269
270 270 def matchall(repo):
271 271 return _match.always(repo.root, repo.getcwd())
272 272
273 273 def matchfiles(repo, files):
274 274 return _match.exact(repo.root, repo.getcwd(), files)
275 275
276 276 def findrenames(repo, added, removed, threshold):
277 277 '''find renamed files -- yields (before, after, score) tuples'''
278 278 copies = {}
279 279 ctx = repo['.']
280 280 for r in removed:
281 281 if r not in ctx:
282 282 continue
283 283 fctx = ctx.filectx(r)
284 284
285 285 def score(text):
286 286 if not len(text):
287 287 return 0.0
288 288 if not fctx.cmp(text):
289 289 return 1.0
290 290 if threshold == 1.0:
291 291 return 0.0
292 292 orig = fctx.data()
293 293 # bdiff.blocks() returns blocks of matching lines
294 294 # count the number of bytes in each
295 295 equal = 0
296 296 alines = mdiff.splitnewlines(text)
297 297 matches = bdiff.blocks(text, orig)
298 298 for x1, x2, y1, y2 in matches:
299 299 for line in alines[x1:x2]:
300 300 equal += len(line)
301 301
302 302 lengths = len(text) + len(orig)
303 303 return equal * 2.0 / lengths
304 304
305 305 for a in added:
306 306 bestscore = copies.get(a, (None, threshold))[1]
307 307 myscore = score(repo.wread(a))
308 308 if myscore >= bestscore:
309 309 copies[a] = (r, myscore)
310 310
311 311 for dest, v in copies.iteritems():
312 312 source, score = v
313 313 yield source, dest, score
314 314
315 315 def addremove(repo, pats=[], opts={}, dry_run=None, similarity=None):
316 316 if dry_run is None:
317 317 dry_run = opts.get('dry_run')
318 318 if similarity is None:
319 319 similarity = float(opts.get('similarity') or 0)
320 320 # we'd use status here, except handling of symlinks and ignore is tricky
321 321 added, unknown, deleted, removed = [], [], [], []
322 322 audit_path = util.path_auditor(repo.root)
323 323 m = match(repo, pats, opts)
324 324 for abs in repo.walk(m):
325 325 target = repo.wjoin(abs)
326 326 good = True
327 327 try:
328 328 audit_path(abs)
329 329 except:
330 330 good = False
331 331 rel = m.rel(abs)
332 332 exact = m.exact(abs)
333 333 if good and abs not in repo.dirstate:
334 334 unknown.append(abs)
335 335 if repo.ui.verbose or not exact:
336 336 repo.ui.status(_('adding %s\n') % ((pats and rel) or abs))
337 337 elif repo.dirstate[abs] != 'r' and (not good or not util.lexists(target)
338 338 or (os.path.isdir(target) and not os.path.islink(target))):
339 339 deleted.append(abs)
340 340 if repo.ui.verbose or not exact:
341 341 repo.ui.status(_('removing %s\n') % ((pats and rel) or abs))
342 342 # for finding renames
343 343 elif repo.dirstate[abs] == 'r':
344 344 removed.append(abs)
345 345 elif repo.dirstate[abs] == 'a':
346 346 added.append(abs)
347 347 if not dry_run:
348 348 repo.remove(deleted)
349 349 repo.add(unknown)
350 350 if similarity > 0:
351 351 for old, new, score in findrenames(repo, added + unknown,
352 352 removed + deleted, similarity):
353 353 if repo.ui.verbose or not m.exact(old) or not m.exact(new):
354 354 repo.ui.status(_('recording removal of %s as rename to %s '
355 355 '(%d%% similar)\n') %
356 356 (m.rel(old), m.rel(new), score * 100))
357 357 if not dry_run:
358 358 repo.copy(old, new)
359 359
360 360 def copy(ui, repo, pats, opts, rename=False):
361 361 # called with the repo lock held
362 362 #
363 363 # hgsep => pathname that uses "/" to separate directories
364 364 # ossep => pathname that uses os.sep to separate directories
365 365 cwd = repo.getcwd()
366 366 targets = {}
367 367 after = opts.get("after")
368 368 dryrun = opts.get("dry_run")
369 369
370 370 def walkpat(pat):
371 371 srcs = []
372 372 m = match(repo, [pat], opts, globbed=True)
373 373 for abs in repo.walk(m):
374 374 state = repo.dirstate[abs]
375 375 rel = m.rel(abs)
376 376 exact = m.exact(abs)
377 377 if state in '?r':
378 378 if exact and state == '?':
379 379 ui.warn(_('%s: not copying - file is not managed\n') % rel)
380 380 if exact and state == 'r':
381 381 ui.warn(_('%s: not copying - file has been marked for'
382 382 ' remove\n') % rel)
383 383 continue
384 384 # abs: hgsep
385 385 # rel: ossep
386 386 srcs.append((abs, rel, exact))
387 387 return srcs
388 388
389 389 # abssrc: hgsep
390 390 # relsrc: ossep
391 391 # otarget: ossep
392 392 def copyfile(abssrc, relsrc, otarget, exact):
393 393 abstarget = util.canonpath(repo.root, cwd, otarget)
394 394 reltarget = repo.pathto(abstarget, cwd)
395 395 target = repo.wjoin(abstarget)
396 396 src = repo.wjoin(abssrc)
397 397 state = repo.dirstate[abstarget]
398 398
399 399 # check for collisions
400 400 prevsrc = targets.get(abstarget)
401 401 if prevsrc is not None:
402 402 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
403 403 (reltarget, repo.pathto(abssrc, cwd),
404 404 repo.pathto(prevsrc, cwd)))
405 405 return
406 406
407 407 # check for overwrites
408 408 exists = os.path.exists(target)
409 409 if not after and exists or after and state in 'mn':
410 410 if not opts['force']:
411 411 ui.warn(_('%s: not overwriting - file exists\n') %
412 412 reltarget)
413 413 return
414 414
415 415 if after:
416 416 if not exists:
417 417 return
418 418 elif not dryrun:
419 419 try:
420 420 if exists:
421 421 os.unlink(target)
422 422 targetdir = os.path.dirname(target) or '.'
423 423 if not os.path.isdir(targetdir):
424 424 os.makedirs(targetdir)
425 425 util.copyfile(src, target)
426 426 except IOError, inst:
427 427 if inst.errno == errno.ENOENT:
428 428 ui.warn(_('%s: deleted in working copy\n') % relsrc)
429 429 else:
430 430 ui.warn(_('%s: cannot copy - %s\n') %
431 431 (relsrc, inst.strerror))
432 432 return True # report a failure
433 433
434 434 if ui.verbose or not exact:
435 435 if rename:
436 436 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
437 437 else:
438 438 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
439 439
440 440 targets[abstarget] = abssrc
441 441
442 442 # fix up dirstate
443 443 origsrc = repo.dirstate.copied(abssrc) or abssrc
444 444 if abstarget == origsrc: # copying back a copy?
445 445 if state not in 'mn' and not dryrun:
446 446 repo.dirstate.normallookup(abstarget)
447 447 else:
448 448 if repo.dirstate[origsrc] == 'a' and origsrc == abssrc:
449 449 if not ui.quiet:
450 450 ui.warn(_("%s has not been committed yet, so no copy "
451 451 "data will be stored for %s.\n")
452 452 % (repo.pathto(origsrc, cwd), reltarget))
453 453 if repo.dirstate[abstarget] in '?r' and not dryrun:
454 454 repo.add([abstarget])
455 455 elif not dryrun:
456 456 repo.copy(origsrc, abstarget)
457 457
458 458 if rename and not dryrun:
459 459 repo.remove([abssrc], not after)
460 460
461 461 # pat: ossep
462 462 # dest ossep
463 463 # srcs: list of (hgsep, hgsep, ossep, bool)
464 464 # return: function that takes hgsep and returns ossep
465 465 def targetpathfn(pat, dest, srcs):
466 466 if os.path.isdir(pat):
467 467 abspfx = util.canonpath(repo.root, cwd, pat)
468 468 abspfx = util.localpath(abspfx)
469 469 if destdirexists:
470 470 striplen = len(os.path.split(abspfx)[0])
471 471 else:
472 472 striplen = len(abspfx)
473 473 if striplen:
474 474 striplen += len(os.sep)
475 475 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
476 476 elif destdirexists:
477 477 res = lambda p: os.path.join(dest,
478 478 os.path.basename(util.localpath(p)))
479 479 else:
480 480 res = lambda p: dest
481 481 return res
482 482
483 483 # pat: ossep
484 484 # dest ossep
485 485 # srcs: list of (hgsep, hgsep, ossep, bool)
486 486 # return: function that takes hgsep and returns ossep
487 487 def targetpathafterfn(pat, dest, srcs):
488 488 if _match.patkind(pat):
489 489 # a mercurial pattern
490 490 res = lambda p: os.path.join(dest,
491 491 os.path.basename(util.localpath(p)))
492 492 else:
493 493 abspfx = util.canonpath(repo.root, cwd, pat)
494 494 if len(abspfx) < len(srcs[0][0]):
495 495 # A directory. Either the target path contains the last
496 496 # component of the source path or it does not.
497 497 def evalpath(striplen):
498 498 score = 0
499 499 for s in srcs:
500 500 t = os.path.join(dest, util.localpath(s[0])[striplen:])
501 501 if os.path.exists(t):
502 502 score += 1
503 503 return score
504 504
505 505 abspfx = util.localpath(abspfx)
506 506 striplen = len(abspfx)
507 507 if striplen:
508 508 striplen += len(os.sep)
509 509 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
510 510 score = evalpath(striplen)
511 511 striplen1 = len(os.path.split(abspfx)[0])
512 512 if striplen1:
513 513 striplen1 += len(os.sep)
514 514 if evalpath(striplen1) > score:
515 515 striplen = striplen1
516 516 res = lambda p: os.path.join(dest,
517 517 util.localpath(p)[striplen:])
518 518 else:
519 519 # a file
520 520 if destdirexists:
521 521 res = lambda p: os.path.join(dest,
522 522 os.path.basename(util.localpath(p)))
523 523 else:
524 524 res = lambda p: dest
525 525 return res
526 526
527 527
528 528 pats = expandpats(pats)
529 529 if not pats:
530 530 raise util.Abort(_('no source or destination specified'))
531 531 if len(pats) == 1:
532 532 raise util.Abort(_('no destination specified'))
533 533 dest = pats.pop()
534 534 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
535 535 if not destdirexists:
536 536 if len(pats) > 1 or _match.patkind(pats[0]):
537 537 raise util.Abort(_('with multiple sources, destination must be an '
538 538 'existing directory'))
539 539 if util.endswithsep(dest):
540 540 raise util.Abort(_('destination %s is not a directory') % dest)
541 541
542 542 tfn = targetpathfn
543 543 if after:
544 544 tfn = targetpathafterfn
545 545 copylist = []
546 546 for pat in pats:
547 547 srcs = walkpat(pat)
548 548 if not srcs:
549 549 continue
550 550 copylist.append((tfn(pat, dest, srcs), srcs))
551 551 if not copylist:
552 552 raise util.Abort(_('no files to copy'))
553 553
554 554 errors = 0
555 555 for targetpath, srcs in copylist:
556 556 for abssrc, relsrc, exact in srcs:
557 557 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
558 558 errors += 1
559 559
560 560 if errors:
561 561 ui.warn(_('(consider using --after)\n'))
562 562
563 563 return errors
564 564
565 565 def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
566 566 runargs=None, appendpid=False):
567 567 '''Run a command as a service.'''
568 568
569 569 if opts['daemon'] and not opts['daemon_pipefds']:
570 570 # Signal child process startup with file removal
571 571 lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
572 572 os.close(lockfd)
573 573 try:
574 574 if not runargs:
575 575 runargs = util.hgcmd() + sys.argv[1:]
576 576 runargs.append('--daemon-pipefds=%s' % lockpath)
577 577 # Don't pass --cwd to the child process, because we've already
578 578 # changed directory.
579 579 for i in xrange(1,len(runargs)):
580 580 if runargs[i].startswith('--cwd='):
581 581 del runargs[i]
582 582 break
583 583 elif runargs[i].startswith('--cwd'):
584 584 del runargs[i:i+2]
585 585 break
586 586 pid = util.spawndetached(runargs)
587 587 while os.path.exists(lockpath):
588 588 time.sleep(0.1)
589 589 finally:
590 590 try:
591 591 os.unlink(lockpath)
592 592 except OSError, e:
593 593 if e.errno != errno.ENOENT:
594 594 raise
595 595 if parentfn:
596 596 return parentfn(pid)
597 597 else:
598 598 return
599 599
600 600 if initfn:
601 601 initfn()
602 602
603 603 if opts['pid_file']:
604 604 mode = appendpid and 'a' or 'w'
605 605 fp = open(opts['pid_file'], mode)
606 606 fp.write(str(os.getpid()) + '\n')
607 607 fp.close()
608 608
609 609 if opts['daemon_pipefds']:
610 610 lockpath = opts['daemon_pipefds']
611 611 try:
612 612 os.setsid()
613 613 except AttributeError:
614 614 pass
615 615 os.unlink(lockpath)
616 util.hidewindow()
616 617 sys.stdout.flush()
617 618 sys.stderr.flush()
618 619
619 620 nullfd = os.open(util.nulldev, os.O_RDWR)
620 621 logfilefd = nullfd
621 622 if logfile:
622 623 logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
623 624 os.dup2(nullfd, 0)
624 625 os.dup2(logfilefd, 1)
625 626 os.dup2(logfilefd, 2)
626 627 if nullfd not in (0, 1, 2):
627 628 os.close(nullfd)
628 629 if logfile and logfilefd not in (0, 1, 2):
629 630 os.close(logfilefd)
630 631
631 632 if runfn:
632 633 return runfn()
633 634
634 635 class changeset_printer(object):
635 636 '''show changeset information when templating not requested.'''
636 637
637 638 def __init__(self, ui, repo, patch, diffopts, buffered):
638 639 self.ui = ui
639 640 self.repo = repo
640 641 self.buffered = buffered
641 642 self.patch = patch
642 643 self.diffopts = diffopts
643 644 self.header = {}
644 645 self.hunk = {}
645 646 self.lastheader = None
646 647 self.footer = None
647 648
648 649 def flush(self, rev):
649 650 if rev in self.header:
650 651 h = self.header[rev]
651 652 if h != self.lastheader:
652 653 self.lastheader = h
653 654 self.ui.write(h)
654 655 del self.header[rev]
655 656 if rev in self.hunk:
656 657 self.ui.write(self.hunk[rev])
657 658 del self.hunk[rev]
658 659 return 1
659 660 return 0
660 661
661 662 def close(self):
662 663 if self.footer:
663 664 self.ui.write(self.footer)
664 665
665 666 def show(self, ctx, copies=None, **props):
666 667 if self.buffered:
667 668 self.ui.pushbuffer()
668 669 self._show(ctx, copies, props)
669 670 self.hunk[ctx.rev()] = self.ui.popbuffer()
670 671 else:
671 672 self._show(ctx, copies, props)
672 673
673 674 def _show(self, ctx, copies, props):
674 675 '''show a single changeset or file revision'''
675 676 changenode = ctx.node()
676 677 rev = ctx.rev()
677 678
678 679 if self.ui.quiet:
679 680 self.ui.write("%d:%s\n" % (rev, short(changenode)))
680 681 return
681 682
682 683 log = self.repo.changelog
683 684 date = util.datestr(ctx.date())
684 685
685 686 hexfunc = self.ui.debugflag and hex or short
686 687
687 688 parents = [(p, hexfunc(log.node(p)))
688 689 for p in self._meaningful_parentrevs(log, rev)]
689 690
690 691 self.ui.write(_("changeset: %d:%s\n") % (rev, hexfunc(changenode)))
691 692
692 693 branch = ctx.branch()
693 694 # don't show the default branch name
694 695 if branch != 'default':
695 696 branch = encoding.tolocal(branch)
696 697 self.ui.write(_("branch: %s\n") % branch)
697 698 for tag in self.repo.nodetags(changenode):
698 699 self.ui.write(_("tag: %s\n") % tag)
699 700 for parent in parents:
700 701 self.ui.write(_("parent: %d:%s\n") % parent)
701 702
702 703 if self.ui.debugflag:
703 704 mnode = ctx.manifestnode()
704 705 self.ui.write(_("manifest: %d:%s\n") %
705 706 (self.repo.manifest.rev(mnode), hex(mnode)))
706 707 self.ui.write(_("user: %s\n") % ctx.user())
707 708 self.ui.write(_("date: %s\n") % date)
708 709
709 710 if self.ui.debugflag:
710 711 files = self.repo.status(log.parents(changenode)[0], changenode)[:3]
711 712 for key, value in zip([_("files:"), _("files+:"), _("files-:")],
712 713 files):
713 714 if value:
714 715 self.ui.write("%-12s %s\n" % (key, " ".join(value)))
715 716 elif ctx.files() and self.ui.verbose:
716 717 self.ui.write(_("files: %s\n") % " ".join(ctx.files()))
717 718 if copies and self.ui.verbose:
718 719 copies = ['%s (%s)' % c for c in copies]
719 720 self.ui.write(_("copies: %s\n") % ' '.join(copies))
720 721
721 722 extra = ctx.extra()
722 723 if extra and self.ui.debugflag:
723 724 for key, value in sorted(extra.items()):
724 725 self.ui.write(_("extra: %s=%s\n")
725 726 % (key, value.encode('string_escape')))
726 727
727 728 description = ctx.description().strip()
728 729 if description:
729 730 if self.ui.verbose:
730 731 self.ui.write(_("description:\n"))
731 732 self.ui.write(description)
732 733 self.ui.write("\n\n")
733 734 else:
734 735 self.ui.write(_("summary: %s\n") %
735 736 description.splitlines()[0])
736 737 self.ui.write("\n")
737 738
738 739 self.showpatch(changenode)
739 740
740 741 def showpatch(self, node):
741 742 if self.patch:
742 743 prev = self.repo.changelog.parents(node)[0]
743 744 chunks = patch.diff(self.repo, prev, node, match=self.patch,
744 745 opts=patch.diffopts(self.ui, self.diffopts))
745 746 for chunk in chunks:
746 747 self.ui.write(chunk)
747 748 self.ui.write("\n")
748 749
749 750 def _meaningful_parentrevs(self, log, rev):
750 751 """Return list of meaningful (or all if debug) parentrevs for rev.
751 752
752 753 For merges (two non-nullrev revisions) both parents are meaningful.
753 754 Otherwise the first parent revision is considered meaningful if it
754 755 is not the preceding revision.
755 756 """
756 757 parents = log.parentrevs(rev)
757 758 if not self.ui.debugflag and parents[1] == nullrev:
758 759 if parents[0] >= rev - 1:
759 760 parents = []
760 761 else:
761 762 parents = [parents[0]]
762 763 return parents
763 764
764 765
765 766 class changeset_templater(changeset_printer):
766 767 '''format changeset information.'''
767 768
768 769 def __init__(self, ui, repo, patch, diffopts, mapfile, buffered):
769 770 changeset_printer.__init__(self, ui, repo, patch, diffopts, buffered)
770 771 formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
771 772 defaulttempl = {
772 773 'parent': '{rev}:{node|formatnode} ',
773 774 'manifest': '{rev}:{node|formatnode}',
774 775 'file_copy': '{name} ({source})',
775 776 'extra': '{key}={value|stringescape}'
776 777 }
777 778 # filecopy is preserved for compatibility reasons
778 779 defaulttempl['filecopy'] = defaulttempl['file_copy']
779 780 self.t = templater.templater(mapfile, {'formatnode': formatnode},
780 781 cache=defaulttempl)
781 782 self.cache = {}
782 783
783 784 def use_template(self, t):
784 785 '''set template string to use'''
785 786 self.t.cache['changeset'] = t
786 787
787 788 def _meaningful_parentrevs(self, ctx):
788 789 """Return list of meaningful (or all if debug) parentrevs for rev.
789 790 """
790 791 parents = ctx.parents()
791 792 if len(parents) > 1:
792 793 return parents
793 794 if self.ui.debugflag:
794 795 return [parents[0], self.repo['null']]
795 796 if parents[0].rev() >= ctx.rev() - 1:
796 797 return []
797 798 return parents
798 799
799 800 def _show(self, ctx, copies, props):
800 801 '''show a single changeset or file revision'''
801 802
802 803 showlist = templatekw.showlist
803 804
804 805 # showparents() behaviour depends on ui trace level which
805 806 # causes unexpected behaviours at templating level and makes
806 807 # it harder to extract it in a standalone function. Its
807 808 # behaviour cannot be changed so leave it here for now.
808 809 def showparents(repo, ctx, templ, **args):
809 810 parents = [[('rev', p.rev()), ('node', p.hex())]
810 811 for p in self._meaningful_parentrevs(ctx)]
811 812 return showlist(templ, 'parent', parents, **args)
812 813
813 814 props = props.copy()
814 815 props.update(templatekw.keywords)
815 816 props['parents'] = showparents
816 817 props['templ'] = self.t
817 818 props['ctx'] = ctx
818 819 props['repo'] = self.repo
819 820 props['revcache'] = {'copies': copies}
820 821 props['cache'] = self.cache
821 822
822 823 # find correct templates for current mode
823 824
824 825 tmplmodes = [
825 826 (True, None),
826 827 (self.ui.verbose, 'verbose'),
827 828 (self.ui.quiet, 'quiet'),
828 829 (self.ui.debugflag, 'debug'),
829 830 ]
830 831
831 832 types = {'header': '', 'footer':'', 'changeset': 'changeset'}
832 833 for mode, postfix in tmplmodes:
833 834 for type in types:
834 835 cur = postfix and ('%s_%s' % (type, postfix)) or type
835 836 if mode and cur in self.t:
836 837 types[type] = cur
837 838
838 839 try:
839 840
840 841 # write header
841 842 if types['header']:
842 843 h = templater.stringify(self.t(types['header'], **props))
843 844 if self.buffered:
844 845 self.header[ctx.rev()] = h
845 846 else:
846 847 self.ui.write(h)
847 848
848 849 # write changeset metadata, then patch if requested
849 850 key = types['changeset']
850 851 self.ui.write(templater.stringify(self.t(key, **props)))
851 852 self.showpatch(ctx.node())
852 853
853 854 if types['footer']:
854 855 if not self.footer:
855 856 self.footer = templater.stringify(self.t(types['footer'],
856 857 **props))
857 858
858 859 except KeyError, inst:
859 860 msg = _("%s: no key named '%s'")
860 861 raise util.Abort(msg % (self.t.mapfile, inst.args[0]))
861 862 except SyntaxError, inst:
862 863 raise util.Abort(_('%s: %s') % (self.t.mapfile, inst.args[0]))
863 864
864 865 def show_changeset(ui, repo, opts, buffered=False, matchfn=False):
865 866 """show one changeset using template or regular display.
866 867
867 868 Display format will be the first non-empty hit of:
868 869 1. option 'template'
869 870 2. option 'style'
870 871 3. [ui] setting 'logtemplate'
871 872 4. [ui] setting 'style'
872 873 If all of these values are either the unset or the empty string,
873 874 regular display via changeset_printer() is done.
874 875 """
875 876 # options
876 877 patch = False
877 878 if opts.get('patch'):
878 879 patch = matchfn or matchall(repo)
879 880
880 881 tmpl = opts.get('template')
881 882 style = None
882 883 if tmpl:
883 884 tmpl = templater.parsestring(tmpl, quoted=False)
884 885 else:
885 886 style = opts.get('style')
886 887
887 888 # ui settings
888 889 if not (tmpl or style):
889 890 tmpl = ui.config('ui', 'logtemplate')
890 891 if tmpl:
891 892 tmpl = templater.parsestring(tmpl)
892 893 else:
893 894 style = ui.config('ui', 'style')
894 895
895 896 if not (tmpl or style):
896 897 return changeset_printer(ui, repo, patch, opts, buffered)
897 898
898 899 mapfile = None
899 900 if style and not tmpl:
900 901 mapfile = style
901 902 if not os.path.split(mapfile)[0]:
902 903 mapname = (templater.templatepath('map-cmdline.' + mapfile)
903 904 or templater.templatepath(mapfile))
904 905 if mapname: mapfile = mapname
905 906
906 907 try:
907 908 t = changeset_templater(ui, repo, patch, opts, mapfile, buffered)
908 909 except SyntaxError, inst:
909 910 raise util.Abort(inst.args[0])
910 911 if tmpl: t.use_template(tmpl)
911 912 return t
912 913
913 914 def finddate(ui, repo, date):
914 915 """Find the tipmost changeset that matches the given date spec"""
915 916
916 917 df = util.matchdate(date)
917 918 m = matchall(repo)
918 919 results = {}
919 920
920 921 def prep(ctx, fns):
921 922 d = ctx.date()
922 923 if df(d[0]):
923 924 results[ctx.rev()] = d
924 925
925 926 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
926 927 rev = ctx.rev()
927 928 if rev in results:
928 929 ui.status(_("Found revision %s from %s\n") %
929 930 (rev, util.datestr(results[rev])))
930 931 return str(rev)
931 932
932 933 raise util.Abort(_("revision matching date not found"))
933 934
934 935 def walkchangerevs(repo, match, opts, prepare):
935 936 '''Iterate over files and the revs in which they changed.
936 937
937 938 Callers most commonly need to iterate backwards over the history
938 939 in which they are interested. Doing so has awful (quadratic-looking)
939 940 performance, so we use iterators in a "windowed" way.
940 941
941 942 We walk a window of revisions in the desired order. Within the
942 943 window, we first walk forwards to gather data, then in the desired
943 944 order (usually backwards) to display it.
944 945
945 946 This function returns an iterator yielding contexts. Before
946 947 yielding each context, the iterator will first call the prepare
947 948 function on each context in the window in forward order.'''
948 949
949 950 def increasing_windows(start, end, windowsize=8, sizelimit=512):
950 951 if start < end:
951 952 while start < end:
952 953 yield start, min(windowsize, end-start)
953 954 start += windowsize
954 955 if windowsize < sizelimit:
955 956 windowsize *= 2
956 957 else:
957 958 while start > end:
958 959 yield start, min(windowsize, start-end-1)
959 960 start -= windowsize
960 961 if windowsize < sizelimit:
961 962 windowsize *= 2
962 963
963 964 follow = opts.get('follow') or opts.get('follow_first')
964 965
965 966 if not len(repo):
966 967 return []
967 968
968 969 if follow:
969 970 defrange = '%s:0' % repo['.'].rev()
970 971 else:
971 972 defrange = '-1:0'
972 973 revs = revrange(repo, opts['rev'] or [defrange])
973 974 wanted = set()
974 975 slowpath = match.anypats() or (match.files() and opts.get('removed'))
975 976 fncache = {}
976 977 change = util.cachefunc(repo.changectx)
977 978
978 979 if not slowpath and not match.files():
979 980 # No files, no patterns. Display all revs.
980 981 wanted = set(revs)
981 982 copies = []
982 983
983 984 if not slowpath:
984 985 # Only files, no patterns. Check the history of each file.
985 986 def filerevgen(filelog, node):
986 987 cl_count = len(repo)
987 988 if node is None:
988 989 last = len(filelog) - 1
989 990 else:
990 991 last = filelog.rev(node)
991 992 for i, window in increasing_windows(last, nullrev):
992 993 revs = []
993 994 for j in xrange(i - window, i + 1):
994 995 n = filelog.node(j)
995 996 revs.append((filelog.linkrev(j),
996 997 follow and filelog.renamed(n)))
997 998 for rev in reversed(revs):
998 999 # only yield rev for which we have the changelog, it can
999 1000 # happen while doing "hg log" during a pull or commit
1000 1001 if rev[0] < cl_count:
1001 1002 yield rev
1002 1003 def iterfiles():
1003 1004 for filename in match.files():
1004 1005 yield filename, None
1005 1006 for filename_node in copies:
1006 1007 yield filename_node
1007 1008 minrev, maxrev = min(revs), max(revs)
1008 1009 for file_, node in iterfiles():
1009 1010 filelog = repo.file(file_)
1010 1011 if not len(filelog):
1011 1012 if node is None:
1012 1013 # A zero count may be a directory or deleted file, so
1013 1014 # try to find matching entries on the slow path.
1014 1015 if follow:
1015 1016 raise util.Abort(_('cannot follow nonexistent file: "%s"') % file_)
1016 1017 slowpath = True
1017 1018 break
1018 1019 else:
1019 1020 continue
1020 1021 for rev, copied in filerevgen(filelog, node):
1021 1022 if rev <= maxrev:
1022 1023 if rev < minrev:
1023 1024 break
1024 1025 fncache.setdefault(rev, [])
1025 1026 fncache[rev].append(file_)
1026 1027 wanted.add(rev)
1027 1028 if follow and copied:
1028 1029 copies.append(copied)
1029 1030 if slowpath:
1030 1031 if follow:
1031 1032 raise util.Abort(_('can only follow copies/renames for explicit '
1032 1033 'filenames'))
1033 1034
1034 1035 # The slow path checks files modified in every changeset.
1035 1036 def changerevgen():
1036 1037 for i, window in increasing_windows(len(repo) - 1, nullrev):
1037 1038 for j in xrange(i - window, i + 1):
1038 1039 yield change(j)
1039 1040
1040 1041 for ctx in changerevgen():
1041 1042 matches = filter(match, ctx.files())
1042 1043 if matches:
1043 1044 fncache[ctx.rev()] = matches
1044 1045 wanted.add(ctx.rev())
1045 1046
1046 1047 class followfilter(object):
1047 1048 def __init__(self, onlyfirst=False):
1048 1049 self.startrev = nullrev
1049 1050 self.roots = set()
1050 1051 self.onlyfirst = onlyfirst
1051 1052
1052 1053 def match(self, rev):
1053 1054 def realparents(rev):
1054 1055 if self.onlyfirst:
1055 1056 return repo.changelog.parentrevs(rev)[0:1]
1056 1057 else:
1057 1058 return filter(lambda x: x != nullrev,
1058 1059 repo.changelog.parentrevs(rev))
1059 1060
1060 1061 if self.startrev == nullrev:
1061 1062 self.startrev = rev
1062 1063 return True
1063 1064
1064 1065 if rev > self.startrev:
1065 1066 # forward: all descendants
1066 1067 if not self.roots:
1067 1068 self.roots.add(self.startrev)
1068 1069 for parent in realparents(rev):
1069 1070 if parent in self.roots:
1070 1071 self.roots.add(rev)
1071 1072 return True
1072 1073 else:
1073 1074 # backwards: all parents
1074 1075 if not self.roots:
1075 1076 self.roots.update(realparents(self.startrev))
1076 1077 if rev in self.roots:
1077 1078 self.roots.remove(rev)
1078 1079 self.roots.update(realparents(rev))
1079 1080 return True
1080 1081
1081 1082 return False
1082 1083
1083 1084 # it might be worthwhile to do this in the iterator if the rev range
1084 1085 # is descending and the prune args are all within that range
1085 1086 for rev in opts.get('prune', ()):
1086 1087 rev = repo.changelog.rev(repo.lookup(rev))
1087 1088 ff = followfilter()
1088 1089 stop = min(revs[0], revs[-1])
1089 1090 for x in xrange(rev, stop-1, -1):
1090 1091 if ff.match(x):
1091 1092 wanted.discard(x)
1092 1093
1093 1094 def iterate():
1094 1095 if follow and not match.files():
1095 1096 ff = followfilter(onlyfirst=opts.get('follow_first'))
1096 1097 def want(rev):
1097 1098 return ff.match(rev) and rev in wanted
1098 1099 else:
1099 1100 def want(rev):
1100 1101 return rev in wanted
1101 1102
1102 1103 for i, window in increasing_windows(0, len(revs)):
1103 1104 change = util.cachefunc(repo.changectx)
1104 1105 nrevs = [rev for rev in revs[i:i+window] if want(rev)]
1105 1106 for rev in sorted(nrevs):
1106 1107 fns = fncache.get(rev)
1107 1108 ctx = change(rev)
1108 1109 if not fns:
1109 1110 def fns_generator():
1110 1111 for f in ctx.files():
1111 1112 if match(f):
1112 1113 yield f
1113 1114 fns = fns_generator()
1114 1115 prepare(ctx, fns)
1115 1116 for rev in nrevs:
1116 1117 yield change(rev)
1117 1118 return iterate()
1118 1119
1119 1120 def commit(ui, repo, commitfunc, pats, opts):
1120 1121 '''commit the specified files or all outstanding changes'''
1121 1122 date = opts.get('date')
1122 1123 if date:
1123 1124 opts['date'] = util.parsedate(date)
1124 1125 message = logmessage(opts)
1125 1126
1126 1127 # extract addremove carefully -- this function can be called from a command
1127 1128 # that doesn't support addremove
1128 1129 if opts.get('addremove'):
1129 1130 addremove(repo, pats, opts)
1130 1131
1131 1132 return commitfunc(ui, repo, message, match(repo, pats, opts), opts)
1132 1133
1133 1134 def commiteditor(repo, ctx, subs):
1134 1135 if ctx.description():
1135 1136 return ctx.description()
1136 1137 return commitforceeditor(repo, ctx, subs)
1137 1138
1138 1139 def commitforceeditor(repo, ctx, subs):
1139 1140 edittext = []
1140 1141 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
1141 1142 if ctx.description():
1142 1143 edittext.append(ctx.description())
1143 1144 edittext.append("")
1144 1145 edittext.append("") # Empty line between message and comments.
1145 1146 edittext.append(_("HG: Enter commit message."
1146 1147 " Lines beginning with 'HG:' are removed."))
1147 1148 edittext.append(_("HG: Leave message empty to abort commit."))
1148 1149 edittext.append("HG: --")
1149 1150 edittext.append(_("HG: user: %s") % ctx.user())
1150 1151 if ctx.p2():
1151 1152 edittext.append(_("HG: branch merge"))
1152 1153 if ctx.branch():
1153 1154 edittext.append(_("HG: branch '%s'")
1154 1155 % encoding.tolocal(ctx.branch()))
1155 1156 edittext.extend([_("HG: subrepo %s") % s for s in subs])
1156 1157 edittext.extend([_("HG: added %s") % f for f in added])
1157 1158 edittext.extend([_("HG: changed %s") % f for f in modified])
1158 1159 edittext.extend([_("HG: removed %s") % f for f in removed])
1159 1160 if not added and not modified and not removed:
1160 1161 edittext.append(_("HG: no files changed"))
1161 1162 edittext.append("")
1162 1163 # run editor in the repository root
1163 1164 olddir = os.getcwd()
1164 1165 os.chdir(repo.root)
1165 1166 text = repo.ui.edit("\n".join(edittext), ctx.user())
1166 1167 text = re.sub("(?m)^HG:.*\n", "", text)
1167 1168 os.chdir(olddir)
1168 1169
1169 1170 if not text.strip():
1170 1171 raise util.Abort(_("empty commit message"))
1171 1172
1172 1173 return text
@@ -1,1287 +1,1295 b''
1 1 # util.py - Mercurial utility functions and platform specfic implementations
2 2 #
3 3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2, incorporated herein by reference.
9 9
10 10 """Mercurial utility functions and platform specfic implementations.
11 11
12 12 This contains helper routines that are independent of the SCM core and
13 13 hide platform-specific details from the core.
14 14 """
15 15
16 16 from i18n import _
17 17 import error, osutil, encoding
18 18 import cStringIO, errno, re, shutil, sys, tempfile, traceback
19 19 import os, stat, time, calendar, textwrap
20 20 import imp
21 21
22 22 # Python compatibility
23 23
24 24 def sha1(s):
25 25 return _fastsha1(s)
26 26
27 27 def _fastsha1(s):
28 28 # This function will import sha1 from hashlib or sha (whichever is
29 29 # available) and overwrite itself with it on the first call.
30 30 # Subsequent calls will go directly to the imported function.
31 31 try:
32 32 from hashlib import sha1 as _sha1
33 33 except ImportError:
34 34 from sha import sha as _sha1
35 35 global _fastsha1, sha1
36 36 _fastsha1 = sha1 = _sha1
37 37 return _sha1(s)
38 38
39 39 import subprocess
40 40 closefds = os.name == 'posix'
41 41
42 42 def popen2(cmd, env=None, newlines=False):
43 43 # Setting bufsize to -1 lets the system decide the buffer size.
44 44 # The default for bufsize is 0, meaning unbuffered. This leads to
45 45 # poor performance on Mac OS X: http://bugs.python.org/issue4194
46 46 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
47 47 close_fds=closefds,
48 48 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
49 49 universal_newlines=newlines,
50 50 env=env)
51 51 return p.stdin, p.stdout
52 52
53 53 def popen3(cmd, env=None, newlines=False):
54 54 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
55 55 close_fds=closefds,
56 56 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
57 57 stderr=subprocess.PIPE,
58 58 universal_newlines=newlines,
59 59 env=env)
60 60 return p.stdin, p.stdout, p.stderr
61 61
62 62 def version():
63 63 """Return version information if available."""
64 64 try:
65 65 import __version__
66 66 return __version__.version
67 67 except ImportError:
68 68 return 'unknown'
69 69
70 70 # used by parsedate
71 71 defaultdateformats = (
72 72 '%Y-%m-%d %H:%M:%S',
73 73 '%Y-%m-%d %I:%M:%S%p',
74 74 '%Y-%m-%d %H:%M',
75 75 '%Y-%m-%d %I:%M%p',
76 76 '%Y-%m-%d',
77 77 '%m-%d',
78 78 '%m/%d',
79 79 '%m/%d/%y',
80 80 '%m/%d/%Y',
81 81 '%a %b %d %H:%M:%S %Y',
82 82 '%a %b %d %I:%M:%S%p %Y',
83 83 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
84 84 '%b %d %H:%M:%S %Y',
85 85 '%b %d %I:%M:%S%p %Y',
86 86 '%b %d %H:%M:%S',
87 87 '%b %d %I:%M:%S%p',
88 88 '%b %d %H:%M',
89 89 '%b %d %I:%M%p',
90 90 '%b %d %Y',
91 91 '%b %d',
92 92 '%H:%M:%S',
93 93 '%I:%M:%S%p',
94 94 '%H:%M',
95 95 '%I:%M%p',
96 96 )
97 97
98 98 extendeddateformats = defaultdateformats + (
99 99 "%Y",
100 100 "%Y-%m",
101 101 "%b",
102 102 "%b %Y",
103 103 )
104 104
105 105 def cachefunc(func):
106 106 '''cache the result of function calls'''
107 107 # XXX doesn't handle keywords args
108 108 cache = {}
109 109 if func.func_code.co_argcount == 1:
110 110 # we gain a small amount of time because
111 111 # we don't need to pack/unpack the list
112 112 def f(arg):
113 113 if arg not in cache:
114 114 cache[arg] = func(arg)
115 115 return cache[arg]
116 116 else:
117 117 def f(*args):
118 118 if args not in cache:
119 119 cache[args] = func(*args)
120 120 return cache[args]
121 121
122 122 return f
123 123
124 124 def lrucachefunc(func):
125 125 '''cache most recent results of function calls'''
126 126 cache = {}
127 127 order = []
128 128 if func.func_code.co_argcount == 1:
129 129 def f(arg):
130 130 if arg not in cache:
131 131 if len(cache) > 20:
132 132 del cache[order.pop(0)]
133 133 cache[arg] = func(arg)
134 134 else:
135 135 order.remove(arg)
136 136 order.append(arg)
137 137 return cache[arg]
138 138 else:
139 139 def f(*args):
140 140 if args not in cache:
141 141 if len(cache) > 20:
142 142 del cache[order.pop(0)]
143 143 cache[args] = func(*args)
144 144 else:
145 145 order.remove(args)
146 146 order.append(args)
147 147 return cache[args]
148 148
149 149 return f
150 150
151 151 class propertycache(object):
152 152 def __init__(self, func):
153 153 self.func = func
154 154 self.name = func.__name__
155 155 def __get__(self, obj, type=None):
156 156 result = self.func(obj)
157 157 setattr(obj, self.name, result)
158 158 return result
159 159
160 160 def pipefilter(s, cmd):
161 161 '''filter string S through command CMD, returning its output'''
162 162 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
163 163 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
164 164 pout, perr = p.communicate(s)
165 165 return pout
166 166
167 167 def tempfilter(s, cmd):
168 168 '''filter string S through a pair of temporary files with CMD.
169 169 CMD is used as a template to create the real command to be run,
170 170 with the strings INFILE and OUTFILE replaced by the real names of
171 171 the temporary files generated.'''
172 172 inname, outname = None, None
173 173 try:
174 174 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
175 175 fp = os.fdopen(infd, 'wb')
176 176 fp.write(s)
177 177 fp.close()
178 178 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
179 179 os.close(outfd)
180 180 cmd = cmd.replace('INFILE', inname)
181 181 cmd = cmd.replace('OUTFILE', outname)
182 182 code = os.system(cmd)
183 183 if sys.platform == 'OpenVMS' and code & 1:
184 184 code = 0
185 185 if code: raise Abort(_("command '%s' failed: %s") %
186 186 (cmd, explain_exit(code)))
187 187 return open(outname, 'rb').read()
188 188 finally:
189 189 try:
190 190 if inname: os.unlink(inname)
191 191 except: pass
192 192 try:
193 193 if outname: os.unlink(outname)
194 194 except: pass
195 195
196 196 filtertable = {
197 197 'tempfile:': tempfilter,
198 198 'pipe:': pipefilter,
199 199 }
200 200
201 201 def filter(s, cmd):
202 202 "filter a string through a command that transforms its input to its output"
203 203 for name, fn in filtertable.iteritems():
204 204 if cmd.startswith(name):
205 205 return fn(s, cmd[len(name):].lstrip())
206 206 return pipefilter(s, cmd)
207 207
208 208 def binary(s):
209 209 """return true if a string is binary data"""
210 210 return bool(s and '\0' in s)
211 211
212 212 def increasingchunks(source, min=1024, max=65536):
213 213 '''return no less than min bytes per chunk while data remains,
214 214 doubling min after each chunk until it reaches max'''
215 215 def log2(x):
216 216 if not x:
217 217 return 0
218 218 i = 0
219 219 while x:
220 220 x >>= 1
221 221 i += 1
222 222 return i - 1
223 223
224 224 buf = []
225 225 blen = 0
226 226 for chunk in source:
227 227 buf.append(chunk)
228 228 blen += len(chunk)
229 229 if blen >= min:
230 230 if min < max:
231 231 min = min << 1
232 232 nmin = 1 << log2(blen)
233 233 if nmin > min:
234 234 min = nmin
235 235 if min > max:
236 236 min = max
237 237 yield ''.join(buf)
238 238 blen = 0
239 239 buf = []
240 240 if buf:
241 241 yield ''.join(buf)
242 242
243 243 Abort = error.Abort
244 244
245 245 def always(fn): return True
246 246 def never(fn): return False
247 247
248 248 def pathto(root, n1, n2):
249 249 '''return the relative path from one place to another.
250 250 root should use os.sep to separate directories
251 251 n1 should use os.sep to separate directories
252 252 n2 should use "/" to separate directories
253 253 returns an os.sep-separated path.
254 254
255 255 If n1 is a relative path, it's assumed it's
256 256 relative to root.
257 257 n2 should always be relative to root.
258 258 '''
259 259 if not n1: return localpath(n2)
260 260 if os.path.isabs(n1):
261 261 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
262 262 return os.path.join(root, localpath(n2))
263 263 n2 = '/'.join((pconvert(root), n2))
264 264 a, b = splitpath(n1), n2.split('/')
265 265 a.reverse()
266 266 b.reverse()
267 267 while a and b and a[-1] == b[-1]:
268 268 a.pop()
269 269 b.pop()
270 270 b.reverse()
271 271 return os.sep.join((['..'] * len(a)) + b) or '.'
272 272
273 273 def canonpath(root, cwd, myname):
274 274 """return the canonical path of myname, given cwd and root"""
275 275 if endswithsep(root):
276 276 rootsep = root
277 277 else:
278 278 rootsep = root + os.sep
279 279 name = myname
280 280 if not os.path.isabs(name):
281 281 name = os.path.join(root, cwd, name)
282 282 name = os.path.normpath(name)
283 283 audit_path = path_auditor(root)
284 284 if name != rootsep and name.startswith(rootsep):
285 285 name = name[len(rootsep):]
286 286 audit_path(name)
287 287 return pconvert(name)
288 288 elif name == root:
289 289 return ''
290 290 else:
291 291 # Determine whether `name' is in the hierarchy at or beneath `root',
292 292 # by iterating name=dirname(name) until that causes no change (can't
293 293 # check name == '/', because that doesn't work on windows). For each
294 294 # `name', compare dev/inode numbers. If they match, the list `rel'
295 295 # holds the reversed list of components making up the relative file
296 296 # name we want.
297 297 root_st = os.stat(root)
298 298 rel = []
299 299 while True:
300 300 try:
301 301 name_st = os.stat(name)
302 302 except OSError:
303 303 break
304 304 if samestat(name_st, root_st):
305 305 if not rel:
306 306 # name was actually the same as root (maybe a symlink)
307 307 return ''
308 308 rel.reverse()
309 309 name = os.path.join(*rel)
310 310 audit_path(name)
311 311 return pconvert(name)
312 312 dirname, basename = os.path.split(name)
313 313 rel.append(basename)
314 314 if dirname == name:
315 315 break
316 316 name = dirname
317 317
318 318 raise Abort('%s not under root' % myname)
319 319
320 320 _hgexecutable = None
321 321
322 322 def main_is_frozen():
323 323 """return True if we are a frozen executable.
324 324
325 325 The code supports py2exe (most common, Windows only) and tools/freeze
326 326 (portable, not much used).
327 327 """
328 328 return (hasattr(sys, "frozen") or # new py2exe
329 329 hasattr(sys, "importers") or # old py2exe
330 330 imp.is_frozen("__main__")) # tools/freeze
331 331
332 332 def hgexecutable():
333 333 """return location of the 'hg' executable.
334 334
335 335 Defaults to $HG or 'hg' in the search path.
336 336 """
337 337 if _hgexecutable is None:
338 338 hg = os.environ.get('HG')
339 339 if hg:
340 340 set_hgexecutable(hg)
341 341 elif main_is_frozen():
342 342 set_hgexecutable(sys.executable)
343 343 else:
344 344 exe = find_exe('hg') or os.path.basename(sys.argv[0])
345 345 set_hgexecutable(exe)
346 346 return _hgexecutable
347 347
348 348 def set_hgexecutable(path):
349 349 """set location of the 'hg' executable"""
350 350 global _hgexecutable
351 351 _hgexecutable = path
352 352
353 353 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None):
354 354 '''enhanced shell command execution.
355 355 run with environment maybe modified, maybe in different dir.
356 356
357 357 if command fails and onerr is None, return status. if ui object,
358 358 print error message and return status, else raise onerr object as
359 359 exception.'''
360 360 def py2shell(val):
361 361 'convert python object into string that is useful to shell'
362 362 if val is None or val is False:
363 363 return '0'
364 364 if val is True:
365 365 return '1'
366 366 return str(val)
367 367 origcmd = cmd
368 368 if os.name == 'nt':
369 369 cmd = '"%s"' % cmd
370 370 env = dict(os.environ)
371 371 env.update((k, py2shell(v)) for k, v in environ.iteritems())
372 372 env['HG'] = hgexecutable()
373 373 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
374 374 env=env, cwd=cwd)
375 375 if sys.platform == 'OpenVMS' and rc & 1:
376 376 rc = 0
377 377 if rc and onerr:
378 378 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
379 379 explain_exit(rc)[0])
380 380 if errprefix:
381 381 errmsg = '%s: %s' % (errprefix, errmsg)
382 382 try:
383 383 onerr.warn(errmsg + '\n')
384 384 except AttributeError:
385 385 raise onerr(errmsg)
386 386 return rc
387 387
388 388 def checksignature(func):
389 389 '''wrap a function with code to check for calling errors'''
390 390 def check(*args, **kwargs):
391 391 try:
392 392 return func(*args, **kwargs)
393 393 except TypeError:
394 394 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
395 395 raise error.SignatureError
396 396 raise
397 397
398 398 return check
399 399
400 400 # os.path.lexists is not available on python2.3
401 401 def lexists(filename):
402 402 "test whether a file with this name exists. does not follow symlinks"
403 403 try:
404 404 os.lstat(filename)
405 405 except:
406 406 return False
407 407 return True
408 408
409 409 def unlink(f):
410 410 """unlink and remove the directory if it is empty"""
411 411 os.unlink(f)
412 412 # try removing directories that might now be empty
413 413 try:
414 414 os.removedirs(os.path.dirname(f))
415 415 except OSError:
416 416 pass
417 417
418 418 def copyfile(src, dest):
419 419 "copy a file, preserving mode and atime/mtime"
420 420 if os.path.islink(src):
421 421 try:
422 422 os.unlink(dest)
423 423 except:
424 424 pass
425 425 os.symlink(os.readlink(src), dest)
426 426 else:
427 427 try:
428 428 shutil.copyfile(src, dest)
429 429 shutil.copystat(src, dest)
430 430 except shutil.Error, inst:
431 431 raise Abort(str(inst))
432 432
433 433 def copyfiles(src, dst, hardlink=None):
434 434 """Copy a directory tree using hardlinks if possible"""
435 435
436 436 if hardlink is None:
437 437 hardlink = (os.stat(src).st_dev ==
438 438 os.stat(os.path.dirname(dst)).st_dev)
439 439
440 440 if os.path.isdir(src):
441 441 os.mkdir(dst)
442 442 for name, kind in osutil.listdir(src):
443 443 srcname = os.path.join(src, name)
444 444 dstname = os.path.join(dst, name)
445 445 copyfiles(srcname, dstname, hardlink)
446 446 else:
447 447 if hardlink:
448 448 try:
449 449 os_link(src, dst)
450 450 except (IOError, OSError):
451 451 hardlink = False
452 452 shutil.copy(src, dst)
453 453 else:
454 454 shutil.copy(src, dst)
455 455
456 456 class path_auditor(object):
457 457 '''ensure that a filesystem path contains no banned components.
458 458 the following properties of a path are checked:
459 459
460 460 - under top-level .hg
461 461 - starts at the root of a windows drive
462 462 - contains ".."
463 463 - traverses a symlink (e.g. a/symlink_here/b)
464 464 - inside a nested repository'''
465 465
466 466 def __init__(self, root):
467 467 self.audited = set()
468 468 self.auditeddir = set()
469 469 self.root = root
470 470
471 471 def __call__(self, path):
472 472 if path in self.audited:
473 473 return
474 474 normpath = os.path.normcase(path)
475 475 parts = splitpath(normpath)
476 476 if (os.path.splitdrive(path)[0]
477 477 or parts[0].lower() in ('.hg', '.hg.', '')
478 478 or os.pardir in parts):
479 479 raise Abort(_("path contains illegal component: %s") % path)
480 480 if '.hg' in path.lower():
481 481 lparts = [p.lower() for p in parts]
482 482 for p in '.hg', '.hg.':
483 483 if p in lparts[1:]:
484 484 pos = lparts.index(p)
485 485 base = os.path.join(*parts[:pos])
486 486 raise Abort(_('path %r is inside repo %r') % (path, base))
487 487 def check(prefix):
488 488 curpath = os.path.join(self.root, prefix)
489 489 try:
490 490 st = os.lstat(curpath)
491 491 except OSError, err:
492 492 # EINVAL can be raised as invalid path syntax under win32.
493 493 # They must be ignored for patterns can be checked too.
494 494 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
495 495 raise
496 496 else:
497 497 if stat.S_ISLNK(st.st_mode):
498 498 raise Abort(_('path %r traverses symbolic link %r') %
499 499 (path, prefix))
500 500 elif (stat.S_ISDIR(st.st_mode) and
501 501 os.path.isdir(os.path.join(curpath, '.hg'))):
502 502 raise Abort(_('path %r is inside repo %r') %
503 503 (path, prefix))
504 504 parts.pop()
505 505 prefixes = []
506 506 while parts:
507 507 prefix = os.sep.join(parts)
508 508 if prefix in self.auditeddir:
509 509 break
510 510 check(prefix)
511 511 prefixes.append(prefix)
512 512 parts.pop()
513 513
514 514 self.audited.add(path)
515 515 # only add prefixes to the cache after checking everything: we don't
516 516 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
517 517 self.auditeddir.update(prefixes)
518 518
519 519 def nlinks(pathname):
520 520 """Return number of hardlinks for the given file."""
521 521 return os.lstat(pathname).st_nlink
522 522
523 523 if hasattr(os, 'link'):
524 524 os_link = os.link
525 525 else:
526 526 def os_link(src, dst):
527 527 raise OSError(0, _("Hardlinks not supported"))
528 528
529 529 def lookup_reg(key, name=None, scope=None):
530 530 return None
531 531
532 def hidewindow():
533 """Hide current shell window.
534
535 Used to hide the window opened when starting asynchronous
536 child process under Windows, unneeded on other systems.
537 """
538 pass
539
532 540 if os.name == 'nt':
533 541 from windows import *
534 542 else:
535 543 from posix import *
536 544
537 545 def makelock(info, pathname):
538 546 try:
539 547 return os.symlink(info, pathname)
540 548 except OSError, why:
541 549 if why.errno == errno.EEXIST:
542 550 raise
543 551 except AttributeError: # no symlink in os
544 552 pass
545 553
546 554 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
547 555 os.write(ld, info)
548 556 os.close(ld)
549 557
550 558 def readlock(pathname):
551 559 try:
552 560 return os.readlink(pathname)
553 561 except OSError, why:
554 562 if why.errno not in (errno.EINVAL, errno.ENOSYS):
555 563 raise
556 564 except AttributeError: # no symlink in os
557 565 pass
558 566 return posixfile(pathname).read()
559 567
560 568 def fstat(fp):
561 569 '''stat file object that may not have fileno method.'''
562 570 try:
563 571 return os.fstat(fp.fileno())
564 572 except AttributeError:
565 573 return os.stat(fp.name)
566 574
567 575 # File system features
568 576
569 577 def checkcase(path):
570 578 """
571 579 Check whether the given path is on a case-sensitive filesystem
572 580
573 581 Requires a path (like /foo/.hg) ending with a foldable final
574 582 directory component.
575 583 """
576 584 s1 = os.stat(path)
577 585 d, b = os.path.split(path)
578 586 p2 = os.path.join(d, b.upper())
579 587 if path == p2:
580 588 p2 = os.path.join(d, b.lower())
581 589 try:
582 590 s2 = os.stat(p2)
583 591 if s2 == s1:
584 592 return False
585 593 return True
586 594 except:
587 595 return True
588 596
589 597 _fspathcache = {}
590 598 def fspath(name, root):
591 599 '''Get name in the case stored in the filesystem
592 600
593 601 The name is either relative to root, or it is an absolute path starting
594 602 with root. Note that this function is unnecessary, and should not be
595 603 called, for case-sensitive filesystems (simply because it's expensive).
596 604 '''
597 605 # If name is absolute, make it relative
598 606 if name.lower().startswith(root.lower()):
599 607 l = len(root)
600 608 if name[l] == os.sep or name[l] == os.altsep:
601 609 l = l + 1
602 610 name = name[l:]
603 611
604 612 if not os.path.exists(os.path.join(root, name)):
605 613 return None
606 614
607 615 seps = os.sep
608 616 if os.altsep:
609 617 seps = seps + os.altsep
610 618 # Protect backslashes. This gets silly very quickly.
611 619 seps.replace('\\','\\\\')
612 620 pattern = re.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
613 621 dir = os.path.normcase(os.path.normpath(root))
614 622 result = []
615 623 for part, sep in pattern.findall(name):
616 624 if sep:
617 625 result.append(sep)
618 626 continue
619 627
620 628 if dir not in _fspathcache:
621 629 _fspathcache[dir] = os.listdir(dir)
622 630 contents = _fspathcache[dir]
623 631
624 632 lpart = part.lower()
625 633 lenp = len(part)
626 634 for n in contents:
627 635 if lenp == len(n) and n.lower() == lpart:
628 636 result.append(n)
629 637 break
630 638 else:
631 639 # Cannot happen, as the file exists!
632 640 result.append(part)
633 641 dir = os.path.join(dir, lpart)
634 642
635 643 return ''.join(result)
636 644
637 645 def checkexec(path):
638 646 """
639 647 Check whether the given path is on a filesystem with UNIX-like exec flags
640 648
641 649 Requires a directory (like /foo/.hg)
642 650 """
643 651
644 652 # VFAT on some Linux versions can flip mode but it doesn't persist
645 653 # a FS remount. Frequently we can detect it if files are created
646 654 # with exec bit on.
647 655
648 656 try:
649 657 EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
650 658 fh, fn = tempfile.mkstemp("", "", path)
651 659 try:
652 660 os.close(fh)
653 661 m = os.stat(fn).st_mode & 0777
654 662 new_file_has_exec = m & EXECFLAGS
655 663 os.chmod(fn, m ^ EXECFLAGS)
656 664 exec_flags_cannot_flip = ((os.stat(fn).st_mode & 0777) == m)
657 665 finally:
658 666 os.unlink(fn)
659 667 except (IOError, OSError):
660 668 # we don't care, the user probably won't be able to commit anyway
661 669 return False
662 670 return not (new_file_has_exec or exec_flags_cannot_flip)
663 671
664 672 def checklink(path):
665 673 """check whether the given path is on a symlink-capable filesystem"""
666 674 # mktemp is not racy because symlink creation will fail if the
667 675 # file already exists
668 676 name = tempfile.mktemp(dir=path)
669 677 try:
670 678 os.symlink(".", name)
671 679 os.unlink(name)
672 680 return True
673 681 except (OSError, AttributeError):
674 682 return False
675 683
676 684 def needbinarypatch():
677 685 """return True if patches should be applied in binary mode by default."""
678 686 return os.name == 'nt'
679 687
680 688 def endswithsep(path):
681 689 '''Check path ends with os.sep or os.altsep.'''
682 690 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
683 691
684 692 def splitpath(path):
685 693 '''Split path by os.sep.
686 694 Note that this function does not use os.altsep because this is
687 695 an alternative of simple "xxx.split(os.sep)".
688 696 It is recommended to use os.path.normpath() before using this
689 697 function if need.'''
690 698 return path.split(os.sep)
691 699
692 700 def gui():
693 701 '''Are we running in a GUI?'''
694 702 return os.name == "nt" or os.name == "mac" or os.environ.get("DISPLAY")
695 703
696 704 def mktempcopy(name, emptyok=False, createmode=None):
697 705 """Create a temporary file with the same contents from name
698 706
699 707 The permission bits are copied from the original file.
700 708
701 709 If the temporary file is going to be truncated immediately, you
702 710 can use emptyok=True as an optimization.
703 711
704 712 Returns the name of the temporary file.
705 713 """
706 714 d, fn = os.path.split(name)
707 715 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
708 716 os.close(fd)
709 717 # Temporary files are created with mode 0600, which is usually not
710 718 # what we want. If the original file already exists, just copy
711 719 # its mode. Otherwise, manually obey umask.
712 720 try:
713 721 st_mode = os.lstat(name).st_mode & 0777
714 722 except OSError, inst:
715 723 if inst.errno != errno.ENOENT:
716 724 raise
717 725 st_mode = createmode
718 726 if st_mode is None:
719 727 st_mode = ~umask
720 728 st_mode &= 0666
721 729 os.chmod(temp, st_mode)
722 730 if emptyok:
723 731 return temp
724 732 try:
725 733 try:
726 734 ifp = posixfile(name, "rb")
727 735 except IOError, inst:
728 736 if inst.errno == errno.ENOENT:
729 737 return temp
730 738 if not getattr(inst, 'filename', None):
731 739 inst.filename = name
732 740 raise
733 741 ofp = posixfile(temp, "wb")
734 742 for chunk in filechunkiter(ifp):
735 743 ofp.write(chunk)
736 744 ifp.close()
737 745 ofp.close()
738 746 except:
739 747 try: os.unlink(temp)
740 748 except: pass
741 749 raise
742 750 return temp
743 751
744 752 class atomictempfile(object):
745 753 """file-like object that atomically updates a file
746 754
747 755 All writes will be redirected to a temporary copy of the original
748 756 file. When rename is called, the copy is renamed to the original
749 757 name, making the changes visible.
750 758 """
751 759 def __init__(self, name, mode, createmode):
752 760 self.__name = name
753 761 self._fp = None
754 762 self.temp = mktempcopy(name, emptyok=('w' in mode),
755 763 createmode=createmode)
756 764 self._fp = posixfile(self.temp, mode)
757 765
758 766 def __getattr__(self, name):
759 767 return getattr(self._fp, name)
760 768
761 769 def rename(self):
762 770 if not self._fp.closed:
763 771 self._fp.close()
764 772 rename(self.temp, localpath(self.__name))
765 773
766 774 def __del__(self):
767 775 if not self._fp:
768 776 return
769 777 if not self._fp.closed:
770 778 try:
771 779 os.unlink(self.temp)
772 780 except: pass
773 781 self._fp.close()
774 782
775 783 def makedirs(name, mode=None):
776 784 """recursive directory creation with parent mode inheritance"""
777 785 try:
778 786 os.mkdir(name)
779 787 if mode is not None:
780 788 os.chmod(name, mode)
781 789 return
782 790 except OSError, err:
783 791 if err.errno == errno.EEXIST:
784 792 return
785 793 if err.errno != errno.ENOENT:
786 794 raise
787 795 parent = os.path.abspath(os.path.dirname(name))
788 796 makedirs(parent, mode)
789 797 makedirs(name, mode)
790 798
791 799 class opener(object):
792 800 """Open files relative to a base directory
793 801
794 802 This class is used to hide the details of COW semantics and
795 803 remote file access from higher level code.
796 804 """
797 805 def __init__(self, base, audit=True):
798 806 self.base = base
799 807 if audit:
800 808 self.audit_path = path_auditor(base)
801 809 else:
802 810 self.audit_path = always
803 811 self.createmode = None
804 812
805 813 @propertycache
806 814 def _can_symlink(self):
807 815 return checklink(self.base)
808 816
809 817 def _fixfilemode(self, name):
810 818 if self.createmode is None:
811 819 return
812 820 os.chmod(name, self.createmode & 0666)
813 821
814 822 def __call__(self, path, mode="r", text=False, atomictemp=False):
815 823 self.audit_path(path)
816 824 f = os.path.join(self.base, path)
817 825
818 826 if not text and "b" not in mode:
819 827 mode += "b" # for that other OS
820 828
821 829 nlink = -1
822 830 if mode not in ("r", "rb"):
823 831 try:
824 832 nlink = nlinks(f)
825 833 except OSError:
826 834 nlink = 0
827 835 d = os.path.dirname(f)
828 836 if not os.path.isdir(d):
829 837 makedirs(d, self.createmode)
830 838 if atomictemp:
831 839 return atomictempfile(f, mode, self.createmode)
832 840 if nlink > 1:
833 841 rename(mktempcopy(f), f)
834 842 fp = posixfile(f, mode)
835 843 if nlink == 0:
836 844 self._fixfilemode(f)
837 845 return fp
838 846
839 847 def symlink(self, src, dst):
840 848 self.audit_path(dst)
841 849 linkname = os.path.join(self.base, dst)
842 850 try:
843 851 os.unlink(linkname)
844 852 except OSError:
845 853 pass
846 854
847 855 dirname = os.path.dirname(linkname)
848 856 if not os.path.exists(dirname):
849 857 makedirs(dirname, self.createmode)
850 858
851 859 if self._can_symlink:
852 860 try:
853 861 os.symlink(src, linkname)
854 862 except OSError, err:
855 863 raise OSError(err.errno, _('could not symlink to %r: %s') %
856 864 (src, err.strerror), linkname)
857 865 else:
858 866 f = self(dst, "w")
859 867 f.write(src)
860 868 f.close()
861 869 self._fixfilemode(dst)
862 870
863 871 class chunkbuffer(object):
864 872 """Allow arbitrary sized chunks of data to be efficiently read from an
865 873 iterator over chunks of arbitrary size."""
866 874
867 875 def __init__(self, in_iter):
868 876 """in_iter is the iterator that's iterating over the input chunks.
869 877 targetsize is how big a buffer to try to maintain."""
870 878 self.iter = iter(in_iter)
871 879 self.buf = ''
872 880 self.targetsize = 2**16
873 881
874 882 def read(self, l):
875 883 """Read L bytes of data from the iterator of chunks of data.
876 884 Returns less than L bytes if the iterator runs dry."""
877 885 if l > len(self.buf) and self.iter:
878 886 # Clamp to a multiple of self.targetsize
879 887 targetsize = max(l, self.targetsize)
880 888 collector = cStringIO.StringIO()
881 889 collector.write(self.buf)
882 890 collected = len(self.buf)
883 891 for chunk in self.iter:
884 892 collector.write(chunk)
885 893 collected += len(chunk)
886 894 if collected >= targetsize:
887 895 break
888 896 if collected < targetsize:
889 897 self.iter = False
890 898 self.buf = collector.getvalue()
891 899 if len(self.buf) == l:
892 900 s, self.buf = str(self.buf), ''
893 901 else:
894 902 s, self.buf = self.buf[:l], buffer(self.buf, l)
895 903 return s
896 904
897 905 def filechunkiter(f, size=65536, limit=None):
898 906 """Create a generator that produces the data in the file size
899 907 (default 65536) bytes at a time, up to optional limit (default is
900 908 to read all data). Chunks may be less than size bytes if the
901 909 chunk is the last chunk in the file, or the file is a socket or
902 910 some other type of file that sometimes reads less data than is
903 911 requested."""
904 912 assert size >= 0
905 913 assert limit is None or limit >= 0
906 914 while True:
907 915 if limit is None: nbytes = size
908 916 else: nbytes = min(limit, size)
909 917 s = nbytes and f.read(nbytes)
910 918 if not s: break
911 919 if limit: limit -= len(s)
912 920 yield s
913 921
914 922 def makedate():
915 923 lt = time.localtime()
916 924 if lt[8] == 1 and time.daylight:
917 925 tz = time.altzone
918 926 else:
919 927 tz = time.timezone
920 928 return time.mktime(lt), tz
921 929
922 930 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
923 931 """represent a (unixtime, offset) tuple as a localized time.
924 932 unixtime is seconds since the epoch, and offset is the time zone's
925 933 number of seconds away from UTC. if timezone is false, do not
926 934 append time zone to string."""
927 935 t, tz = date or makedate()
928 936 if "%1" in format or "%2" in format:
929 937 sign = (tz > 0) and "-" or "+"
930 938 minutes = abs(tz) // 60
931 939 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
932 940 format = format.replace("%2", "%02d" % (minutes % 60))
933 941 s = time.strftime(format, time.gmtime(float(t) - tz))
934 942 return s
935 943
936 944 def shortdate(date=None):
937 945 """turn (timestamp, tzoff) tuple into iso 8631 date."""
938 946 return datestr(date, format='%Y-%m-%d')
939 947
940 948 def strdate(string, format, defaults=[]):
941 949 """parse a localized time string and return a (unixtime, offset) tuple.
942 950 if the string cannot be parsed, ValueError is raised."""
943 951 def timezone(string):
944 952 tz = string.split()[-1]
945 953 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
946 954 sign = (tz[0] == "+") and 1 or -1
947 955 hours = int(tz[1:3])
948 956 minutes = int(tz[3:5])
949 957 return -sign * (hours * 60 + minutes) * 60
950 958 if tz == "GMT" or tz == "UTC":
951 959 return 0
952 960 return None
953 961
954 962 # NOTE: unixtime = localunixtime + offset
955 963 offset, date = timezone(string), string
956 964 if offset != None:
957 965 date = " ".join(string.split()[:-1])
958 966
959 967 # add missing elements from defaults
960 968 for part in defaults:
961 969 found = [True for p in part if ("%"+p) in format]
962 970 if not found:
963 971 date += "@" + defaults[part]
964 972 format += "@%" + part[0]
965 973
966 974 timetuple = time.strptime(date, format)
967 975 localunixtime = int(calendar.timegm(timetuple))
968 976 if offset is None:
969 977 # local timezone
970 978 unixtime = int(time.mktime(timetuple))
971 979 offset = unixtime - localunixtime
972 980 else:
973 981 unixtime = localunixtime + offset
974 982 return unixtime, offset
975 983
976 984 def parsedate(date, formats=None, defaults=None):
977 985 """parse a localized date/time string and return a (unixtime, offset) tuple.
978 986
979 987 The date may be a "unixtime offset" string or in one of the specified
980 988 formats. If the date already is a (unixtime, offset) tuple, it is returned.
981 989 """
982 990 if not date:
983 991 return 0, 0
984 992 if isinstance(date, tuple) and len(date) == 2:
985 993 return date
986 994 if not formats:
987 995 formats = defaultdateformats
988 996 date = date.strip()
989 997 try:
990 998 when, offset = map(int, date.split(' '))
991 999 except ValueError:
992 1000 # fill out defaults
993 1001 if not defaults:
994 1002 defaults = {}
995 1003 now = makedate()
996 1004 for part in "d mb yY HI M S".split():
997 1005 if part not in defaults:
998 1006 if part[0] in "HMS":
999 1007 defaults[part] = "00"
1000 1008 else:
1001 1009 defaults[part] = datestr(now, "%" + part[0])
1002 1010
1003 1011 for format in formats:
1004 1012 try:
1005 1013 when, offset = strdate(date, format, defaults)
1006 1014 except (ValueError, OverflowError):
1007 1015 pass
1008 1016 else:
1009 1017 break
1010 1018 else:
1011 1019 raise Abort(_('invalid date: %r ') % date)
1012 1020 # validate explicit (probably user-specified) date and
1013 1021 # time zone offset. values must fit in signed 32 bits for
1014 1022 # current 32-bit linux runtimes. timezones go from UTC-12
1015 1023 # to UTC+14
1016 1024 if abs(when) > 0x7fffffff:
1017 1025 raise Abort(_('date exceeds 32 bits: %d') % when)
1018 1026 if offset < -50400 or offset > 43200:
1019 1027 raise Abort(_('impossible time zone offset: %d') % offset)
1020 1028 return when, offset
1021 1029
1022 1030 def matchdate(date):
1023 1031 """Return a function that matches a given date match specifier
1024 1032
1025 1033 Formats include:
1026 1034
1027 1035 '{date}' match a given date to the accuracy provided
1028 1036
1029 1037 '<{date}' on or before a given date
1030 1038
1031 1039 '>{date}' on or after a given date
1032 1040
1033 1041 """
1034 1042
1035 1043 def lower(date):
1036 1044 d = dict(mb="1", d="1")
1037 1045 return parsedate(date, extendeddateformats, d)[0]
1038 1046
1039 1047 def upper(date):
1040 1048 d = dict(mb="12", HI="23", M="59", S="59")
1041 1049 for days in "31 30 29".split():
1042 1050 try:
1043 1051 d["d"] = days
1044 1052 return parsedate(date, extendeddateformats, d)[0]
1045 1053 except:
1046 1054 pass
1047 1055 d["d"] = "28"
1048 1056 return parsedate(date, extendeddateformats, d)[0]
1049 1057
1050 1058 date = date.strip()
1051 1059 if date[0] == "<":
1052 1060 when = upper(date[1:])
1053 1061 return lambda x: x <= when
1054 1062 elif date[0] == ">":
1055 1063 when = lower(date[1:])
1056 1064 return lambda x: x >= when
1057 1065 elif date[0] == "-":
1058 1066 try:
1059 1067 days = int(date[1:])
1060 1068 except ValueError:
1061 1069 raise Abort(_("invalid day spec: %s") % date[1:])
1062 1070 when = makedate()[0] - days * 3600 * 24
1063 1071 return lambda x: x >= when
1064 1072 elif " to " in date:
1065 1073 a, b = date.split(" to ")
1066 1074 start, stop = lower(a), upper(b)
1067 1075 return lambda x: x >= start and x <= stop
1068 1076 else:
1069 1077 start, stop = lower(date), upper(date)
1070 1078 return lambda x: x >= start and x <= stop
1071 1079
1072 1080 def shortuser(user):
1073 1081 """Return a short representation of a user name or email address."""
1074 1082 f = user.find('@')
1075 1083 if f >= 0:
1076 1084 user = user[:f]
1077 1085 f = user.find('<')
1078 1086 if f >= 0:
1079 1087 user = user[f+1:]
1080 1088 f = user.find(' ')
1081 1089 if f >= 0:
1082 1090 user = user[:f]
1083 1091 f = user.find('.')
1084 1092 if f >= 0:
1085 1093 user = user[:f]
1086 1094 return user
1087 1095
1088 1096 def email(author):
1089 1097 '''get email of author.'''
1090 1098 r = author.find('>')
1091 1099 if r == -1: r = None
1092 1100 return author[author.find('<')+1:r]
1093 1101
1094 1102 def ellipsis(text, maxlength=400):
1095 1103 """Trim string to at most maxlength (default: 400) characters."""
1096 1104 if len(text) <= maxlength:
1097 1105 return text
1098 1106 else:
1099 1107 return "%s..." % (text[:maxlength-3])
1100 1108
1101 1109 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
1102 1110 '''yield every hg repository under path, recursively.'''
1103 1111 def errhandler(err):
1104 1112 if err.filename == path:
1105 1113 raise err
1106 1114 if followsym and hasattr(os.path, 'samestat'):
1107 1115 def _add_dir_if_not_there(dirlst, dirname):
1108 1116 match = False
1109 1117 samestat = os.path.samestat
1110 1118 dirstat = os.stat(dirname)
1111 1119 for lstdirstat in dirlst:
1112 1120 if samestat(dirstat, lstdirstat):
1113 1121 match = True
1114 1122 break
1115 1123 if not match:
1116 1124 dirlst.append(dirstat)
1117 1125 return not match
1118 1126 else:
1119 1127 followsym = False
1120 1128
1121 1129 if (seen_dirs is None) and followsym:
1122 1130 seen_dirs = []
1123 1131 _add_dir_if_not_there(seen_dirs, path)
1124 1132 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
1125 1133 dirs.sort()
1126 1134 if '.hg' in dirs:
1127 1135 yield root # found a repository
1128 1136 qroot = os.path.join(root, '.hg', 'patches')
1129 1137 if os.path.isdir(os.path.join(qroot, '.hg')):
1130 1138 yield qroot # we have a patch queue repo here
1131 1139 if recurse:
1132 1140 # avoid recursing inside the .hg directory
1133 1141 dirs.remove('.hg')
1134 1142 else:
1135 1143 dirs[:] = [] # don't descend further
1136 1144 elif followsym:
1137 1145 newdirs = []
1138 1146 for d in dirs:
1139 1147 fname = os.path.join(root, d)
1140 1148 if _add_dir_if_not_there(seen_dirs, fname):
1141 1149 if os.path.islink(fname):
1142 1150 for hgname in walkrepos(fname, True, seen_dirs):
1143 1151 yield hgname
1144 1152 else:
1145 1153 newdirs.append(d)
1146 1154 dirs[:] = newdirs
1147 1155
1148 1156 _rcpath = None
1149 1157
1150 1158 def os_rcpath():
1151 1159 '''return default os-specific hgrc search path'''
1152 1160 path = system_rcpath()
1153 1161 path.extend(user_rcpath())
1154 1162 path = [os.path.normpath(f) for f in path]
1155 1163 return path
1156 1164
1157 1165 def rcpath():
1158 1166 '''return hgrc search path. if env var HGRCPATH is set, use it.
1159 1167 for each item in path, if directory, use files ending in .rc,
1160 1168 else use item.
1161 1169 make HGRCPATH empty to only look in .hg/hgrc of current repo.
1162 1170 if no HGRCPATH, use default os-specific path.'''
1163 1171 global _rcpath
1164 1172 if _rcpath is None:
1165 1173 if 'HGRCPATH' in os.environ:
1166 1174 _rcpath = []
1167 1175 for p in os.environ['HGRCPATH'].split(os.pathsep):
1168 1176 if not p: continue
1169 1177 p = expandpath(p)
1170 1178 if os.path.isdir(p):
1171 1179 for f, kind in osutil.listdir(p):
1172 1180 if f.endswith('.rc'):
1173 1181 _rcpath.append(os.path.join(p, f))
1174 1182 else:
1175 1183 _rcpath.append(p)
1176 1184 else:
1177 1185 _rcpath = os_rcpath()
1178 1186 return _rcpath
1179 1187
1180 1188 def bytecount(nbytes):
1181 1189 '''return byte count formatted as readable string, with units'''
1182 1190
1183 1191 units = (
1184 1192 (100, 1<<30, _('%.0f GB')),
1185 1193 (10, 1<<30, _('%.1f GB')),
1186 1194 (1, 1<<30, _('%.2f GB')),
1187 1195 (100, 1<<20, _('%.0f MB')),
1188 1196 (10, 1<<20, _('%.1f MB')),
1189 1197 (1, 1<<20, _('%.2f MB')),
1190 1198 (100, 1<<10, _('%.0f KB')),
1191 1199 (10, 1<<10, _('%.1f KB')),
1192 1200 (1, 1<<10, _('%.2f KB')),
1193 1201 (1, 1, _('%.0f bytes')),
1194 1202 )
1195 1203
1196 1204 for multiplier, divisor, format in units:
1197 1205 if nbytes >= divisor * multiplier:
1198 1206 return format % (nbytes / float(divisor))
1199 1207 return units[-1][2] % nbytes
1200 1208
1201 1209 def drop_scheme(scheme, path):
1202 1210 sc = scheme + ':'
1203 1211 if path.startswith(sc):
1204 1212 path = path[len(sc):]
1205 1213 if path.startswith('//'):
1206 1214 if scheme == 'file':
1207 1215 i = path.find('/', 2)
1208 1216 if i == -1:
1209 1217 return ''
1210 1218 # On Windows, absolute paths are rooted at the current drive
1211 1219 # root. On POSIX they are rooted at the file system root.
1212 1220 if os.name == 'nt':
1213 1221 droot = os.path.splitdrive(os.getcwd())[0] + '/'
1214 1222 path = os.path.join(droot, path[i+1:])
1215 1223 else:
1216 1224 path = path[i:]
1217 1225 else:
1218 1226 path = path[2:]
1219 1227 return path
1220 1228
1221 1229 def uirepr(s):
1222 1230 # Avoid double backslash in Windows path repr()
1223 1231 return repr(s).replace('\\\\', '\\')
1224 1232
1225 1233 def termwidth():
1226 1234 if 'COLUMNS' in os.environ:
1227 1235 try:
1228 1236 return int(os.environ['COLUMNS'])
1229 1237 except ValueError:
1230 1238 pass
1231 1239 try:
1232 1240 import termios, array, fcntl
1233 1241 for dev in (sys.stdout, sys.stdin):
1234 1242 try:
1235 1243 try:
1236 1244 fd = dev.fileno()
1237 1245 except AttributeError:
1238 1246 continue
1239 1247 if not os.isatty(fd):
1240 1248 continue
1241 1249 arri = fcntl.ioctl(fd, termios.TIOCGWINSZ, '\0' * 8)
1242 1250 return array.array('h', arri)[1]
1243 1251 except ValueError:
1244 1252 pass
1245 1253 except IOError, e:
1246 1254 if e[0] == errno.EINVAL:
1247 1255 pass
1248 1256 else:
1249 1257 raise
1250 1258 except ImportError:
1251 1259 pass
1252 1260 return 80
1253 1261
1254 1262 def wrap(line, hangindent, width=None):
1255 1263 if width is None:
1256 1264 width = termwidth() - 2
1257 1265 if width <= hangindent:
1258 1266 # adjust for weird terminal size
1259 1267 width = max(78, hangindent + 1)
1260 1268 padding = '\n' + ' ' * hangindent
1261 1269 # To avoid corrupting multi-byte characters in line, we must wrap
1262 1270 # a Unicode string instead of a bytestring.
1263 1271 try:
1264 1272 u = line.decode(encoding.encoding)
1265 1273 w = padding.join(textwrap.wrap(u, width=width - hangindent))
1266 1274 return w.encode(encoding.encoding)
1267 1275 except UnicodeDecodeError:
1268 1276 return padding.join(textwrap.wrap(line, width=width - hangindent))
1269 1277
1270 1278 def iterlines(iterator):
1271 1279 for chunk in iterator:
1272 1280 for line in chunk.splitlines():
1273 1281 yield line
1274 1282
1275 1283 def expandpath(path):
1276 1284 return os.path.expanduser(os.path.expandvars(path))
1277 1285
1278 1286 def hgcmd():
1279 1287 """Return the command used to execute current hg
1280 1288
1281 1289 This is different from hgexecutable() because on Windows we want
1282 1290 to avoid things opening new shell windows like batch files, so we
1283 1291 get either the python call or current executable.
1284 1292 """
1285 1293 if main_is_frozen():
1286 1294 return [sys.executable]
1287 1295 return gethgcmd()
@@ -1,174 +1,183 b''
1 1 # win32.py - utility functions that use win32 API
2 2 #
3 3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2, incorporated herein by reference.
7 7
8 8 """Utility functions that use win32 API.
9 9
10 10 Mark Hammond's win32all package allows better functionality on
11 11 Windows. This module overrides definitions in util.py. If not
12 12 available, import of this module will fail, and generic code will be
13 13 used.
14 14 """
15 15
16 16 import win32api
17 17
18 18 import errno, os, sys, pywintypes, win32con, win32file, win32process
19 import winerror
19 import winerror, win32gui
20 20 import osutil, encoding
21 21 from win32com.shell import shell, shellcon
22 22
23 23 def os_link(src, dst):
24 24 try:
25 25 win32file.CreateHardLink(dst, src)
26 26 # CreateHardLink sometimes succeeds on mapped drives but
27 27 # following nlinks() returns 1. Check it now and bail out.
28 28 if nlinks(src) < 2:
29 29 try:
30 30 win32file.DeleteFile(dst)
31 31 except:
32 32 pass
33 33 # Fake hardlinking error
34 34 raise OSError(errno.EINVAL, 'Hardlinking not supported')
35 35 except pywintypes.error, details:
36 36 raise OSError(errno.EINVAL, 'target implements hardlinks improperly')
37 37 except NotImplementedError: # Another fake error win Win98
38 38 raise OSError(errno.EINVAL, 'Hardlinking not supported')
39 39
40 40 def _getfileinfo(pathname):
41 41 """Return number of hardlinks for the given file."""
42 42 try:
43 43 fh = win32file.CreateFile(pathname,
44 44 win32file.GENERIC_READ, win32file.FILE_SHARE_READ,
45 45 None, win32file.OPEN_EXISTING, 0, None)
46 46 try:
47 47 return win32file.GetFileInformationByHandle(fh)
48 48 finally:
49 49 fh.Close()
50 50 except pywintypes.error:
51 51 return None
52 52
53 53 def nlinks(pathname):
54 54 """Return number of hardlinks for the given file."""
55 55 res = _getfileinfo(pathname)
56 56 if res is not None:
57 57 return res[7]
58 58 else:
59 59 return os.lstat(pathname).st_nlink
60 60
61 61 def samefile(fpath1, fpath2):
62 62 """Returns whether fpath1 and fpath2 refer to the same file. This is only
63 63 guaranteed to work for files, not directories."""
64 64 res1 = _getfileinfo(fpath1)
65 65 res2 = _getfileinfo(fpath2)
66 66 if res1 is not None and res2 is not None:
67 67 # Index 4 is the volume serial number, and 8 and 9 contain the file ID
68 68 return res1[4] == res2[4] and res1[8] == res2[8] and res1[9] == res2[9]
69 69 else:
70 70 return False
71 71
72 72 def samedevice(fpath1, fpath2):
73 73 """Returns whether fpath1 and fpath2 are on the same device. This is only
74 74 guaranteed to work for files, not directories."""
75 75 res1 = _getfileinfo(fpath1)
76 76 res2 = _getfileinfo(fpath2)
77 77 if res1 is not None and res2 is not None:
78 78 return res1[4] == res2[4]
79 79 else:
80 80 return False
81 81
82 82 def testpid(pid):
83 83 '''return True if pid is still running or unable to
84 84 determine, False otherwise'''
85 85 try:
86 86 handle = win32api.OpenProcess(
87 87 win32con.PROCESS_QUERY_INFORMATION, False, pid)
88 88 if handle:
89 89 status = win32process.GetExitCodeProcess(handle)
90 90 return status == win32con.STILL_ACTIVE
91 91 except pywintypes.error, details:
92 92 return details[0] != winerror.ERROR_INVALID_PARAMETER
93 93 return True
94 94
95 95 def lookup_reg(key, valname=None, scope=None):
96 96 ''' Look up a key/value name in the Windows registry.
97 97
98 98 valname: value name. If unspecified, the default value for the key
99 99 is used.
100 100 scope: optionally specify scope for registry lookup, this can be
101 101 a sequence of scopes to look up in order. Default (CURRENT_USER,
102 102 LOCAL_MACHINE).
103 103 '''
104 104 try:
105 105 from _winreg import HKEY_CURRENT_USER, HKEY_LOCAL_MACHINE, \
106 106 QueryValueEx, OpenKey
107 107 except ImportError:
108 108 return None
109 109
110 110 if scope is None:
111 111 scope = (HKEY_CURRENT_USER, HKEY_LOCAL_MACHINE)
112 112 elif not isinstance(scope, (list, tuple)):
113 113 scope = (scope,)
114 114 for s in scope:
115 115 try:
116 116 val = QueryValueEx(OpenKey(s, key), valname)[0]
117 117 # never let a Unicode string escape into the wild
118 118 return encoding.tolocal(val.encode('UTF-8'))
119 119 except EnvironmentError:
120 120 pass
121 121
122 122 def system_rcpath_win32():
123 123 '''return default os-specific hgrc search path'''
124 124 proc = win32api.GetCurrentProcess()
125 125 try:
126 126 # This will fail on windows < NT
127 127 filename = win32process.GetModuleFileNameEx(proc, 0)
128 128 except:
129 129 filename = win32api.GetModuleFileName(0)
130 130 # Use mercurial.ini found in directory with hg.exe
131 131 progrc = os.path.join(os.path.dirname(filename), 'mercurial.ini')
132 132 if os.path.isfile(progrc):
133 133 return [progrc]
134 134 # else look for a system rcpath in the registry
135 135 try:
136 136 value = win32api.RegQueryValue(
137 137 win32con.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Mercurial')
138 138 rcpath = []
139 139 for p in value.split(os.pathsep):
140 140 if p.lower().endswith('mercurial.ini'):
141 141 rcpath.append(p)
142 142 elif os.path.isdir(p):
143 143 for f, kind in osutil.listdir(p):
144 144 if f.endswith('.rc'):
145 145 rcpath.append(os.path.join(p, f))
146 146 return rcpath
147 147 except pywintypes.error:
148 148 return []
149 149
150 150 def user_rcpath_win32():
151 151 '''return os-specific hgrc search path to the user dir'''
152 152 userdir = os.path.expanduser('~')
153 153 if sys.getwindowsversion()[3] != 2 and userdir == '~':
154 154 # We are on win < nt: fetch the APPDATA directory location and use
155 155 # the parent directory as the user home dir.
156 156 appdir = shell.SHGetPathFromIDList(
157 157 shell.SHGetSpecialFolderLocation(0, shellcon.CSIDL_APPDATA))
158 158 userdir = os.path.dirname(appdir)
159 159 return [os.path.join(userdir, 'mercurial.ini'),
160 160 os.path.join(userdir, '.hgrc')]
161 161
162 162 def getuser():
163 163 '''return name of current user'''
164 164 return win32api.GetUserName()
165 165
166 166 def set_signal_handler_win32():
167 167 """Register a termination handler for console events including
168 168 CTRL+C. python signal handlers do not work well with socket
169 169 operations.
170 170 """
171 171 def handler(event):
172 172 win32process.ExitProcess(1)
173 173 win32api.SetConsoleCtrlHandler(handler)
174 174
175 def hidewindow():
176 def callback(*args, **kwargs):
177 hwnd, pid = args
178 wpid = win32process.GetWindowThreadProcessId(hwnd)[1]
179 if pid == wpid:
180 win32gui.ShowWindow(hwnd, win32con.SW_HIDE)
181
182 pid = win32process.GetCurrentProcessId()
183 win32gui.EnumWindows(callback, pid)
General Comments 0
You need to be logged in to leave comments. Login now