##// END OF EJS Templates
move filename encoding functions from util.py to new store.py
Adrian Buehlmann -
r6839:01db3e10 default
parent child Browse files
Show More
@@ -0,0 +1,39 b''
1 # store.py - repository store handling for Mercurial
2 #
3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
4 #
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
7
8 def _buildencodefun():
9 e = '_'
10 win_reserved = [ord(x) for x in '\\:*?"<>|']
11 cmap = dict([ (chr(x), chr(x)) for x in xrange(127) ])
12 for x in (range(32) + range(126, 256) + win_reserved):
13 cmap[chr(x)] = "~%02x" % x
14 for x in range(ord("A"), ord("Z")+1) + [ord(e)]:
15 cmap[chr(x)] = e + chr(x).lower()
16 dmap = {}
17 for k, v in cmap.iteritems():
18 dmap[v] = k
19 def decode(s):
20 i = 0
21 while i < len(s):
22 for l in xrange(1, 4):
23 try:
24 yield dmap[s[i:i+l]]
25 i += l
26 break
27 except KeyError:
28 pass
29 else:
30 raise KeyError
31 return (lambda s: "".join([cmap[c] for c in s]),
32 lambda s: "".join(list(decode(s))))
33
34 encodefilename, decodefilename = _buildencodefun()
35
36 def encodedopener(openerfn, fn):
37 def o(path, *args, **kw):
38 return openerfn(fn(path), *args, **kw)
39 return o
@@ -1,2076 +1,2076 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup
11 11 import changelog, dirstate, filelog, manifest, context, weakref
12 import lock, transaction, stat, errno, ui
12 import lock, transaction, stat, errno, ui, store
13 13 import os, revlog, time, util, extensions, hook, inspect
14 14 import match as match_
15 15
16 16 class localrepository(repo.repository):
17 17 capabilities = util.set(('lookup', 'changegroupsubset'))
18 18 supported = ('revlogv1', 'store')
19 19
20 20 def __init__(self, parentui, path=None, create=0):
21 21 repo.repository.__init__(self)
22 22 self.root = os.path.realpath(path)
23 23 self.path = os.path.join(self.root, ".hg")
24 24 self.origroot = path
25 25 self.opener = util.opener(self.path)
26 26 self.wopener = util.opener(self.root)
27 27
28 28 if not os.path.isdir(self.path):
29 29 if create:
30 30 if not os.path.exists(path):
31 31 os.mkdir(path)
32 32 os.mkdir(self.path)
33 33 requirements = ["revlogv1"]
34 34 if parentui.configbool('format', 'usestore', True):
35 35 os.mkdir(os.path.join(self.path, "store"))
36 36 requirements.append("store")
37 37 # create an invalid changelog
38 38 self.opener("00changelog.i", "a").write(
39 39 '\0\0\0\2' # represents revlogv2
40 40 ' dummy changelog to prevent using the old repo layout'
41 41 )
42 42 reqfile = self.opener("requires", "w")
43 43 for r in requirements:
44 44 reqfile.write("%s\n" % r)
45 45 reqfile.close()
46 46 else:
47 47 raise repo.RepoError(_("repository %s not found") % path)
48 48 elif create:
49 49 raise repo.RepoError(_("repository %s already exists") % path)
50 50 else:
51 51 # find requirements
52 52 try:
53 53 requirements = self.opener("requires").read().splitlines()
54 54 except IOError, inst:
55 55 if inst.errno != errno.ENOENT:
56 56 raise
57 57 requirements = []
58 58 # check them
59 59 for r in requirements:
60 60 if r not in self.supported:
61 61 raise repo.RepoError(_("requirement '%s' not supported") % r)
62 62
63 63 # setup store
64 64 if "store" in requirements:
65 self.encodefn = util.encodefilename
66 self.decodefn = util.decodefilename
65 self.encodefn = store.encodefilename
66 self.decodefn = store.decodefilename
67 67 self.spath = os.path.join(self.path, "store")
68 68 else:
69 69 self.encodefn = lambda x: x
70 70 self.decodefn = lambda x: x
71 71 self.spath = self.path
72 72
73 73 try:
74 74 # files in .hg/ will be created using this mode
75 75 mode = os.stat(self.spath).st_mode
76 76 # avoid some useless chmods
77 77 if (0777 & ~util._umask) == (0777 & mode):
78 78 mode = None
79 79 except OSError:
80 80 mode = None
81 81
82 82 self._createmode = mode
83 83 self.opener.createmode = mode
84 84 sopener = util.opener(self.spath)
85 85 sopener.createmode = mode
86 self.sopener = util.encodedopener(sopener, self.encodefn)
86 self.sopener = store.encodedopener(sopener, self.encodefn)
87 87
88 88 self.ui = ui.ui(parentui=parentui)
89 89 try:
90 90 self.ui.readconfig(self.join("hgrc"), self.root)
91 91 extensions.loadall(self.ui)
92 92 except IOError:
93 93 pass
94 94
95 95 self.tagscache = None
96 96 self._tagstypecache = None
97 97 self.branchcache = None
98 98 self._ubranchcache = None # UTF-8 version of branchcache
99 99 self._branchcachetip = None
100 100 self.nodetagscache = None
101 101 self.filterpats = {}
102 102 self._datafilters = {}
103 103 self._transref = self._lockref = self._wlockref = None
104 104
105 105 def __getattr__(self, name):
106 106 if name == 'changelog':
107 107 self.changelog = changelog.changelog(self.sopener)
108 108 self.sopener.defversion = self.changelog.version
109 109 return self.changelog
110 110 if name == 'manifest':
111 111 self.changelog
112 112 self.manifest = manifest.manifest(self.sopener)
113 113 return self.manifest
114 114 if name == 'dirstate':
115 115 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
116 116 return self.dirstate
117 117 else:
118 118 raise AttributeError, name
119 119
120 120 def __getitem__(self, changeid):
121 121 if changeid == None:
122 122 return context.workingctx(self)
123 123 return context.changectx(self, changeid)
124 124
125 125 def __nonzero__(self):
126 126 return True
127 127
128 128 def __len__(self):
129 129 return len(self.changelog)
130 130
131 131 def __iter__(self):
132 132 for i in xrange(len(self)):
133 133 yield i
134 134
135 135 def url(self):
136 136 return 'file:' + self.root
137 137
138 138 def hook(self, name, throw=False, **args):
139 139 return hook.hook(self.ui, self, name, throw, **args)
140 140
141 141 tag_disallowed = ':\r\n'
142 142
143 143 def _tag(self, names, node, message, local, user, date, parent=None,
144 144 extra={}):
145 145 use_dirstate = parent is None
146 146
147 147 if isinstance(names, str):
148 148 allchars = names
149 149 names = (names,)
150 150 else:
151 151 allchars = ''.join(names)
152 152 for c in self.tag_disallowed:
153 153 if c in allchars:
154 154 raise util.Abort(_('%r cannot be used in a tag name') % c)
155 155
156 156 for name in names:
157 157 self.hook('pretag', throw=True, node=hex(node), tag=name,
158 158 local=local)
159 159
160 160 def writetags(fp, names, munge, prevtags):
161 161 fp.seek(0, 2)
162 162 if prevtags and prevtags[-1] != '\n':
163 163 fp.write('\n')
164 164 for name in names:
165 165 m = munge and munge(name) or name
166 166 if self._tagstypecache and name in self._tagstypecache:
167 167 old = self.tagscache.get(name, nullid)
168 168 fp.write('%s %s\n' % (hex(old), m))
169 169 fp.write('%s %s\n' % (hex(node), m))
170 170 fp.close()
171 171
172 172 prevtags = ''
173 173 if local:
174 174 try:
175 175 fp = self.opener('localtags', 'r+')
176 176 except IOError, err:
177 177 fp = self.opener('localtags', 'a')
178 178 else:
179 179 prevtags = fp.read()
180 180
181 181 # local tags are stored in the current charset
182 182 writetags(fp, names, None, prevtags)
183 183 for name in names:
184 184 self.hook('tag', node=hex(node), tag=name, local=local)
185 185 return
186 186
187 187 if use_dirstate:
188 188 try:
189 189 fp = self.wfile('.hgtags', 'rb+')
190 190 except IOError, err:
191 191 fp = self.wfile('.hgtags', 'ab')
192 192 else:
193 193 prevtags = fp.read()
194 194 else:
195 195 try:
196 196 prevtags = self.filectx('.hgtags', parent).data()
197 197 except revlog.LookupError:
198 198 pass
199 199 fp = self.wfile('.hgtags', 'wb')
200 200 if prevtags:
201 201 fp.write(prevtags)
202 202
203 203 # committed tags are stored in UTF-8
204 204 writetags(fp, names, util.fromlocal, prevtags)
205 205
206 206 if use_dirstate and '.hgtags' not in self.dirstate:
207 207 self.add(['.hgtags'])
208 208
209 209 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
210 210 extra=extra)
211 211
212 212 for name in names:
213 213 self.hook('tag', node=hex(node), tag=name, local=local)
214 214
215 215 return tagnode
216 216
217 217 def tag(self, names, node, message, local, user, date):
218 218 '''tag a revision with one or more symbolic names.
219 219
220 220 names is a list of strings or, when adding a single tag, names may be a
221 221 string.
222 222
223 223 if local is True, the tags are stored in a per-repository file.
224 224 otherwise, they are stored in the .hgtags file, and a new
225 225 changeset is committed with the change.
226 226
227 227 keyword arguments:
228 228
229 229 local: whether to store tags in non-version-controlled file
230 230 (default False)
231 231
232 232 message: commit message to use if committing
233 233
234 234 user: name of user to use if committing
235 235
236 236 date: date tuple to use if committing'''
237 237
238 238 for x in self.status()[:5]:
239 239 if '.hgtags' in x:
240 240 raise util.Abort(_('working copy of .hgtags is changed '
241 241 '(please commit .hgtags manually)'))
242 242
243 243 self._tag(names, node, message, local, user, date)
244 244
245 245 def tags(self):
246 246 '''return a mapping of tag to node'''
247 247 if self.tagscache:
248 248 return self.tagscache
249 249
250 250 globaltags = {}
251 251 tagtypes = {}
252 252
253 253 def readtags(lines, fn, tagtype):
254 254 filetags = {}
255 255 count = 0
256 256
257 257 def warn(msg):
258 258 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
259 259
260 260 for l in lines:
261 261 count += 1
262 262 if not l:
263 263 continue
264 264 s = l.split(" ", 1)
265 265 if len(s) != 2:
266 266 warn(_("cannot parse entry"))
267 267 continue
268 268 node, key = s
269 269 key = util.tolocal(key.strip()) # stored in UTF-8
270 270 try:
271 271 bin_n = bin(node)
272 272 except TypeError:
273 273 warn(_("node '%s' is not well formed") % node)
274 274 continue
275 275 if bin_n not in self.changelog.nodemap:
276 276 warn(_("tag '%s' refers to unknown node") % key)
277 277 continue
278 278
279 279 h = []
280 280 if key in filetags:
281 281 n, h = filetags[key]
282 282 h.append(n)
283 283 filetags[key] = (bin_n, h)
284 284
285 285 for k, nh in filetags.items():
286 286 if k not in globaltags:
287 287 globaltags[k] = nh
288 288 tagtypes[k] = tagtype
289 289 continue
290 290
291 291 # we prefer the global tag if:
292 292 # it supercedes us OR
293 293 # mutual supercedes and it has a higher rank
294 294 # otherwise we win because we're tip-most
295 295 an, ah = nh
296 296 bn, bh = globaltags[k]
297 297 if (bn != an and an in bh and
298 298 (bn not in ah or len(bh) > len(ah))):
299 299 an = bn
300 300 ah.extend([n for n in bh if n not in ah])
301 301 globaltags[k] = an, ah
302 302 tagtypes[k] = tagtype
303 303
304 304 # read the tags file from each head, ending with the tip
305 305 f = None
306 306 for rev, node, fnode in self._hgtagsnodes():
307 307 f = (f and f.filectx(fnode) or
308 308 self.filectx('.hgtags', fileid=fnode))
309 309 readtags(f.data().splitlines(), f, "global")
310 310
311 311 try:
312 312 data = util.fromlocal(self.opener("localtags").read())
313 313 # localtags are stored in the local character set
314 314 # while the internal tag table is stored in UTF-8
315 315 readtags(data.splitlines(), "localtags", "local")
316 316 except IOError:
317 317 pass
318 318
319 319 self.tagscache = {}
320 320 self._tagstypecache = {}
321 321 for k,nh in globaltags.items():
322 322 n = nh[0]
323 323 if n != nullid:
324 324 self.tagscache[k] = n
325 325 self._tagstypecache[k] = tagtypes[k]
326 326 self.tagscache['tip'] = self.changelog.tip()
327 327 return self.tagscache
328 328
329 329 def tagtype(self, tagname):
330 330 '''
331 331 return the type of the given tag. result can be:
332 332
333 333 'local' : a local tag
334 334 'global' : a global tag
335 335 None : tag does not exist
336 336 '''
337 337
338 338 self.tags()
339 339
340 340 return self._tagstypecache.get(tagname)
341 341
342 342 def _hgtagsnodes(self):
343 343 heads = self.heads()
344 344 heads.reverse()
345 345 last = {}
346 346 ret = []
347 347 for node in heads:
348 348 c = self[node]
349 349 rev = c.rev()
350 350 try:
351 351 fnode = c.filenode('.hgtags')
352 352 except revlog.LookupError:
353 353 continue
354 354 ret.append((rev, node, fnode))
355 355 if fnode in last:
356 356 ret[last[fnode]] = None
357 357 last[fnode] = len(ret) - 1
358 358 return [item for item in ret if item]
359 359
360 360 def tagslist(self):
361 361 '''return a list of tags ordered by revision'''
362 362 l = []
363 363 for t, n in self.tags().items():
364 364 try:
365 365 r = self.changelog.rev(n)
366 366 except:
367 367 r = -2 # sort to the beginning of the list if unknown
368 368 l.append((r, t, n))
369 369 return [(t, n) for r, t, n in util.sort(l)]
370 370
371 371 def nodetags(self, node):
372 372 '''return the tags associated with a node'''
373 373 if not self.nodetagscache:
374 374 self.nodetagscache = {}
375 375 for t, n in self.tags().items():
376 376 self.nodetagscache.setdefault(n, []).append(t)
377 377 return self.nodetagscache.get(node, [])
378 378
379 379 def _branchtags(self, partial, lrev):
380 380 tiprev = len(self) - 1
381 381 if lrev != tiprev:
382 382 self._updatebranchcache(partial, lrev+1, tiprev+1)
383 383 self._writebranchcache(partial, self.changelog.tip(), tiprev)
384 384
385 385 return partial
386 386
387 387 def branchtags(self):
388 388 tip = self.changelog.tip()
389 389 if self.branchcache is not None and self._branchcachetip == tip:
390 390 return self.branchcache
391 391
392 392 oldtip = self._branchcachetip
393 393 self._branchcachetip = tip
394 394 if self.branchcache is None:
395 395 self.branchcache = {} # avoid recursion in changectx
396 396 else:
397 397 self.branchcache.clear() # keep using the same dict
398 398 if oldtip is None or oldtip not in self.changelog.nodemap:
399 399 partial, last, lrev = self._readbranchcache()
400 400 else:
401 401 lrev = self.changelog.rev(oldtip)
402 402 partial = self._ubranchcache
403 403
404 404 self._branchtags(partial, lrev)
405 405
406 406 # the branch cache is stored on disk as UTF-8, but in the local
407 407 # charset internally
408 408 for k, v in partial.items():
409 409 self.branchcache[util.tolocal(k)] = v
410 410 self._ubranchcache = partial
411 411 return self.branchcache
412 412
413 413 def _readbranchcache(self):
414 414 partial = {}
415 415 try:
416 416 f = self.opener("branch.cache")
417 417 lines = f.read().split('\n')
418 418 f.close()
419 419 except (IOError, OSError):
420 420 return {}, nullid, nullrev
421 421
422 422 try:
423 423 last, lrev = lines.pop(0).split(" ", 1)
424 424 last, lrev = bin(last), int(lrev)
425 425 if lrev >= len(self) or self[lrev].node() != last:
426 426 # invalidate the cache
427 427 raise ValueError('invalidating branch cache (tip differs)')
428 428 for l in lines:
429 429 if not l: continue
430 430 node, label = l.split(" ", 1)
431 431 partial[label.strip()] = bin(node)
432 432 except (KeyboardInterrupt, util.SignalInterrupt):
433 433 raise
434 434 except Exception, inst:
435 435 if self.ui.debugflag:
436 436 self.ui.warn(str(inst), '\n')
437 437 partial, last, lrev = {}, nullid, nullrev
438 438 return partial, last, lrev
439 439
440 440 def _writebranchcache(self, branches, tip, tiprev):
441 441 try:
442 442 f = self.opener("branch.cache", "w", atomictemp=True)
443 443 f.write("%s %s\n" % (hex(tip), tiprev))
444 444 for label, node in branches.iteritems():
445 445 f.write("%s %s\n" % (hex(node), label))
446 446 f.rename()
447 447 except (IOError, OSError):
448 448 pass
449 449
450 450 def _updatebranchcache(self, partial, start, end):
451 451 for r in xrange(start, end):
452 452 c = self[r]
453 453 b = c.branch()
454 454 partial[b] = c.node()
455 455
456 456 def lookup(self, key):
457 457 if key == '.':
458 458 return self.dirstate.parents()[0]
459 459 elif key == 'null':
460 460 return nullid
461 461 n = self.changelog._match(key)
462 462 if n:
463 463 return n
464 464 if key in self.tags():
465 465 return self.tags()[key]
466 466 if key in self.branchtags():
467 467 return self.branchtags()[key]
468 468 n = self.changelog._partialmatch(key)
469 469 if n:
470 470 return n
471 471 try:
472 472 if len(key) == 20:
473 473 key = hex(key)
474 474 except:
475 475 pass
476 476 raise repo.RepoError(_("unknown revision '%s'") % key)
477 477
478 478 def local(self):
479 479 return True
480 480
481 481 def join(self, f):
482 482 return os.path.join(self.path, f)
483 483
484 484 def sjoin(self, f):
485 485 f = self.encodefn(f)
486 486 return os.path.join(self.spath, f)
487 487
488 488 def wjoin(self, f):
489 489 return os.path.join(self.root, f)
490 490
491 491 def rjoin(self, f):
492 492 return os.path.join(self.root, util.pconvert(f))
493 493
494 494 def file(self, f):
495 495 if f[0] == '/':
496 496 f = f[1:]
497 497 return filelog.filelog(self.sopener, f)
498 498
499 499 def changectx(self, changeid):
500 500 return self[changeid]
501 501
502 502 def parents(self, changeid=None):
503 503 '''get list of changectxs for parents of changeid'''
504 504 return self[changeid].parents()
505 505
506 506 def filectx(self, path, changeid=None, fileid=None):
507 507 """changeid can be a changeset revision, node, or tag.
508 508 fileid can be a file revision or node."""
509 509 return context.filectx(self, path, changeid, fileid)
510 510
511 511 def getcwd(self):
512 512 return self.dirstate.getcwd()
513 513
514 514 def pathto(self, f, cwd=None):
515 515 return self.dirstate.pathto(f, cwd)
516 516
517 517 def wfile(self, f, mode='r'):
518 518 return self.wopener(f, mode)
519 519
520 520 def _link(self, f):
521 521 return os.path.islink(self.wjoin(f))
522 522
523 523 def _filter(self, filter, filename, data):
524 524 if filter not in self.filterpats:
525 525 l = []
526 526 for pat, cmd in self.ui.configitems(filter):
527 527 mf = util.matcher(self.root, "", [pat], [], [])[1]
528 528 fn = None
529 529 params = cmd
530 530 for name, filterfn in self._datafilters.iteritems():
531 531 if cmd.startswith(name):
532 532 fn = filterfn
533 533 params = cmd[len(name):].lstrip()
534 534 break
535 535 if not fn:
536 536 fn = lambda s, c, **kwargs: util.filter(s, c)
537 537 # Wrap old filters not supporting keyword arguments
538 538 if not inspect.getargspec(fn)[2]:
539 539 oldfn = fn
540 540 fn = lambda s, c, **kwargs: oldfn(s, c)
541 541 l.append((mf, fn, params))
542 542 self.filterpats[filter] = l
543 543
544 544 for mf, fn, cmd in self.filterpats[filter]:
545 545 if mf(filename):
546 546 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
547 547 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
548 548 break
549 549
550 550 return data
551 551
552 552 def adddatafilter(self, name, filter):
553 553 self._datafilters[name] = filter
554 554
555 555 def wread(self, filename):
556 556 if self._link(filename):
557 557 data = os.readlink(self.wjoin(filename))
558 558 else:
559 559 data = self.wopener(filename, 'r').read()
560 560 return self._filter("encode", filename, data)
561 561
562 562 def wwrite(self, filename, data, flags):
563 563 data = self._filter("decode", filename, data)
564 564 try:
565 565 os.unlink(self.wjoin(filename))
566 566 except OSError:
567 567 pass
568 568 self.wopener(filename, 'w').write(data)
569 569 util.set_flags(self.wjoin(filename), flags)
570 570
571 571 def wwritedata(self, filename, data):
572 572 return self._filter("decode", filename, data)
573 573
574 574 def transaction(self):
575 575 if self._transref and self._transref():
576 576 return self._transref().nest()
577 577
578 578 # abort here if the journal already exists
579 579 if os.path.exists(self.sjoin("journal")):
580 580 raise repo.RepoError(_("journal already exists - run hg recover"))
581 581
582 582 # save dirstate for rollback
583 583 try:
584 584 ds = self.opener("dirstate").read()
585 585 except IOError:
586 586 ds = ""
587 587 self.opener("journal.dirstate", "w").write(ds)
588 588 self.opener("journal.branch", "w").write(self.dirstate.branch())
589 589
590 590 renames = [(self.sjoin("journal"), self.sjoin("undo")),
591 591 (self.join("journal.dirstate"), self.join("undo.dirstate")),
592 592 (self.join("journal.branch"), self.join("undo.branch"))]
593 593 tr = transaction.transaction(self.ui.warn, self.sopener,
594 594 self.sjoin("journal"),
595 595 aftertrans(renames),
596 596 self._createmode)
597 597 self._transref = weakref.ref(tr)
598 598 return tr
599 599
600 600 def recover(self):
601 601 l = self.lock()
602 602 try:
603 603 if os.path.exists(self.sjoin("journal")):
604 604 self.ui.status(_("rolling back interrupted transaction\n"))
605 605 transaction.rollback(self.sopener, self.sjoin("journal"))
606 606 self.invalidate()
607 607 return True
608 608 else:
609 609 self.ui.warn(_("no interrupted transaction available\n"))
610 610 return False
611 611 finally:
612 612 del l
613 613
614 614 def rollback(self):
615 615 wlock = lock = None
616 616 try:
617 617 wlock = self.wlock()
618 618 lock = self.lock()
619 619 if os.path.exists(self.sjoin("undo")):
620 620 self.ui.status(_("rolling back last transaction\n"))
621 621 transaction.rollback(self.sopener, self.sjoin("undo"))
622 622 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
623 623 try:
624 624 branch = self.opener("undo.branch").read()
625 625 self.dirstate.setbranch(branch)
626 626 except IOError:
627 627 self.ui.warn(_("Named branch could not be reset, "
628 628 "current branch still is: %s\n")
629 629 % util.tolocal(self.dirstate.branch()))
630 630 self.invalidate()
631 631 self.dirstate.invalidate()
632 632 else:
633 633 self.ui.warn(_("no rollback information available\n"))
634 634 finally:
635 635 del lock, wlock
636 636
637 637 def invalidate(self):
638 638 for a in "changelog manifest".split():
639 639 if a in self.__dict__:
640 640 delattr(self, a)
641 641 self.tagscache = None
642 642 self._tagstypecache = None
643 643 self.nodetagscache = None
644 644 self.branchcache = None
645 645 self._ubranchcache = None
646 646 self._branchcachetip = None
647 647
648 648 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
649 649 try:
650 650 l = lock.lock(lockname, 0, releasefn, desc=desc)
651 651 except lock.LockHeld, inst:
652 652 if not wait:
653 653 raise
654 654 self.ui.warn(_("waiting for lock on %s held by %r\n") %
655 655 (desc, inst.locker))
656 656 # default to 600 seconds timeout
657 657 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
658 658 releasefn, desc=desc)
659 659 if acquirefn:
660 660 acquirefn()
661 661 return l
662 662
663 663 def lock(self, wait=True):
664 664 if self._lockref and self._lockref():
665 665 return self._lockref()
666 666
667 667 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
668 668 _('repository %s') % self.origroot)
669 669 self._lockref = weakref.ref(l)
670 670 return l
671 671
672 672 def wlock(self, wait=True):
673 673 if self._wlockref and self._wlockref():
674 674 return self._wlockref()
675 675
676 676 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
677 677 self.dirstate.invalidate, _('working directory of %s') %
678 678 self.origroot)
679 679 self._wlockref = weakref.ref(l)
680 680 return l
681 681
682 682 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
683 683 """
684 684 commit an individual file as part of a larger transaction
685 685 """
686 686
687 687 fn = fctx.path()
688 688 t = fctx.data()
689 689 fl = self.file(fn)
690 690 fp1 = manifest1.get(fn, nullid)
691 691 fp2 = manifest2.get(fn, nullid)
692 692
693 693 meta = {}
694 694 cp = fctx.renamed()
695 695 if cp and cp[0] != fn:
696 696 cp = cp[0]
697 697 # Mark the new revision of this file as a copy of another
698 698 # file. This copy data will effectively act as a parent
699 699 # of this new revision. If this is a merge, the first
700 700 # parent will be the nullid (meaning "look up the copy data")
701 701 # and the second one will be the other parent. For example:
702 702 #
703 703 # 0 --- 1 --- 3 rev1 changes file foo
704 704 # \ / rev2 renames foo to bar and changes it
705 705 # \- 2 -/ rev3 should have bar with all changes and
706 706 # should record that bar descends from
707 707 # bar in rev2 and foo in rev1
708 708 #
709 709 # this allows this merge to succeed:
710 710 #
711 711 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
712 712 # \ / merging rev3 and rev4 should use bar@rev2
713 713 # \- 2 --- 4 as the merge base
714 714 #
715 715 meta["copy"] = cp
716 716 if not manifest2: # not a branch merge
717 717 meta["copyrev"] = hex(manifest1[cp])
718 718 fp2 = nullid
719 719 elif fp2 != nullid: # copied on remote side
720 720 meta["copyrev"] = hex(manifest1[cp])
721 721 elif fp1 != nullid: # copied on local side, reversed
722 722 meta["copyrev"] = hex(manifest2[cp])
723 723 fp2 = fp1
724 724 elif cp in manifest2: # directory rename on local side
725 725 meta["copyrev"] = hex(manifest2[cp])
726 726 else: # directory rename on remote side
727 727 meta["copyrev"] = hex(manifest1[cp])
728 728 self.ui.debug(_(" %s: copy %s:%s\n") %
729 729 (fn, cp, meta["copyrev"]))
730 730 fp1 = nullid
731 731 elif fp2 != nullid:
732 732 # is one parent an ancestor of the other?
733 733 fpa = fl.ancestor(fp1, fp2)
734 734 if fpa == fp1:
735 735 fp1, fp2 = fp2, nullid
736 736 elif fpa == fp2:
737 737 fp2 = nullid
738 738
739 739 # is the file unmodified from the parent? report existing entry
740 740 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
741 741 return fp1
742 742
743 743 changelist.append(fn)
744 744 return fl.add(t, meta, tr, linkrev, fp1, fp2)
745 745
746 746 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
747 747 if p1 is None:
748 748 p1, p2 = self.dirstate.parents()
749 749 return self.commit(files=files, text=text, user=user, date=date,
750 750 p1=p1, p2=p2, extra=extra, empty_ok=True)
751 751
752 752 def commit(self, files=None, text="", user=None, date=None,
753 753 match=None, force=False, force_editor=False,
754 754 p1=None, p2=None, extra={}, empty_ok=False):
755 755 wlock = lock = None
756 756 if files:
757 757 files = util.unique(files)
758 758 try:
759 759 wlock = self.wlock()
760 760 lock = self.lock()
761 761 use_dirstate = (p1 is None) # not rawcommit
762 762
763 763 if use_dirstate:
764 764 p1, p2 = self.dirstate.parents()
765 765 update_dirstate = True
766 766
767 767 if (not force and p2 != nullid and
768 768 (match and (match.files() or match.anypats()))):
769 769 raise util.Abort(_('cannot partially commit a merge '
770 770 '(do not specify files or patterns)'))
771 771
772 772 if files:
773 773 modified, removed = [], []
774 774 for f in files:
775 775 s = self.dirstate[f]
776 776 if s in 'nma':
777 777 modified.append(f)
778 778 elif s == 'r':
779 779 removed.append(f)
780 780 else:
781 781 self.ui.warn(_("%s not tracked!\n") % f)
782 782 changes = [modified, [], removed, [], []]
783 783 else:
784 784 changes = self.status(match=match)
785 785 else:
786 786 p1, p2 = p1, p2 or nullid
787 787 update_dirstate = (self.dirstate.parents()[0] == p1)
788 788 changes = [files, [], [], [], []]
789 789
790 790 wctx = context.workingctx(self, (p1, p2), text, user, date,
791 791 extra, changes)
792 792 return self._commitctx(wctx, force, force_editor, empty_ok,
793 793 use_dirstate, update_dirstate)
794 794 finally:
795 795 del lock, wlock
796 796
797 797 def commitctx(self, ctx):
798 798 wlock = lock = None
799 799 try:
800 800 wlock = self.wlock()
801 801 lock = self.lock()
802 802 return self._commitctx(ctx, force=True, force_editor=False,
803 803 empty_ok=True, use_dirstate=False,
804 804 update_dirstate=False)
805 805 finally:
806 806 del lock, wlock
807 807
808 808 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
809 809 use_dirstate=True, update_dirstate=True):
810 810 tr = None
811 811 valid = 0 # don't save the dirstate if this isn't set
812 812 try:
813 813 commit = util.sort(wctx.modified() + wctx.added())
814 814 remove = wctx.removed()
815 815 extra = wctx.extra().copy()
816 816 branchname = extra['branch']
817 817 user = wctx.user()
818 818 text = wctx.description()
819 819
820 820 p1, p2 = [p.node() for p in wctx.parents()]
821 821 c1 = self.changelog.read(p1)
822 822 c2 = self.changelog.read(p2)
823 823 m1 = self.manifest.read(c1[0]).copy()
824 824 m2 = self.manifest.read(c2[0])
825 825
826 826 if use_dirstate:
827 827 oldname = c1[5].get("branch") # stored in UTF-8
828 828 if (not commit and not remove and not force and p2 == nullid
829 829 and branchname == oldname):
830 830 self.ui.status(_("nothing changed\n"))
831 831 return None
832 832
833 833 xp1 = hex(p1)
834 834 if p2 == nullid: xp2 = ''
835 835 else: xp2 = hex(p2)
836 836
837 837 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
838 838
839 839 tr = self.transaction()
840 840 trp = weakref.proxy(tr)
841 841
842 842 # check in files
843 843 new = {}
844 844 changed = []
845 845 linkrev = len(self)
846 846 for f in commit:
847 847 self.ui.note(f + "\n")
848 848 try:
849 849 fctx = wctx.filectx(f)
850 850 newflags = fctx.flags()
851 851 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
852 852 if ((not changed or changed[-1] != f) and
853 853 m2.get(f) != new[f]):
854 854 # mention the file in the changelog if some
855 855 # flag changed, even if there was no content
856 856 # change.
857 857 if m1.flags(f) != newflags:
858 858 changed.append(f)
859 859 m1.set(f, newflags)
860 860 if use_dirstate:
861 861 self.dirstate.normal(f)
862 862
863 863 except (OSError, IOError):
864 864 if use_dirstate:
865 865 self.ui.warn(_("trouble committing %s!\n") % f)
866 866 raise
867 867 else:
868 868 remove.append(f)
869 869
870 870 # update manifest
871 871 m1.update(new)
872 872 removed = []
873 873
874 874 for f in util.sort(remove):
875 875 if f in m1:
876 876 del m1[f]
877 877 removed.append(f)
878 878 elif f in m2:
879 879 removed.append(f)
880 880 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
881 881 (new, removed))
882 882
883 883 # add changeset
884 884 if (not empty_ok and not text) or force_editor:
885 885 edittext = []
886 886 if text:
887 887 edittext.append(text)
888 888 edittext.append("")
889 889 edittext.append(_("HG: Enter commit message."
890 890 " Lines beginning with 'HG:' are removed."))
891 891 edittext.append("HG: --")
892 892 edittext.append("HG: user: %s" % user)
893 893 if p2 != nullid:
894 894 edittext.append("HG: branch merge")
895 895 if branchname:
896 896 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
897 897 edittext.extend(["HG: changed %s" % f for f in changed])
898 898 edittext.extend(["HG: removed %s" % f for f in removed])
899 899 if not changed and not remove:
900 900 edittext.append("HG: no files changed")
901 901 edittext.append("")
902 902 # run editor in the repository root
903 903 olddir = os.getcwd()
904 904 os.chdir(self.root)
905 905 text = self.ui.edit("\n".join(edittext), user)
906 906 os.chdir(olddir)
907 907
908 908 lines = [line.rstrip() for line in text.rstrip().splitlines()]
909 909 while lines and not lines[0]:
910 910 del lines[0]
911 911 if not lines and use_dirstate:
912 912 raise util.Abort(_("empty commit message"))
913 913 text = '\n'.join(lines)
914 914
915 915 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
916 916 user, wctx.date(), extra)
917 917 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
918 918 parent2=xp2)
919 919 tr.close()
920 920
921 921 if self.branchcache:
922 922 self.branchtags()
923 923
924 924 if use_dirstate or update_dirstate:
925 925 self.dirstate.setparents(n)
926 926 if use_dirstate:
927 927 for f in removed:
928 928 self.dirstate.forget(f)
929 929 valid = 1 # our dirstate updates are complete
930 930
931 931 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
932 932 return n
933 933 finally:
934 934 if not valid: # don't save our updated dirstate
935 935 self.dirstate.invalidate()
936 936 del tr
937 937
938 938 def walk(self, match, node=None):
939 939 '''
940 940 walk recursively through the directory tree or a given
941 941 changeset, finding all files matched by the match
942 942 function
943 943 '''
944 944 return self[node].walk(match)
945 945
946 946 def status(self, node1='.', node2=None, match=None,
947 947 ignored=False, clean=False, unknown=False):
948 948 """return status of files between two nodes or node and working directory
949 949
950 950 If node1 is None, use the first dirstate parent instead.
951 951 If node2 is None, compare node1 with working directory.
952 952 """
953 953
954 954 def mfmatches(ctx):
955 955 mf = ctx.manifest().copy()
956 956 for fn in mf.keys():
957 957 if not match(fn):
958 958 del mf[fn]
959 959 return mf
960 960
961 961 ctx1 = self[node1]
962 962 ctx2 = self[node2]
963 963 working = ctx2 == self[None]
964 964 parentworking = working and ctx1 == self['.']
965 965 match = match or match_.always(self.root, self.getcwd())
966 966 listignored, listclean, listunknown = ignored, clean, unknown
967 967
968 968 if working: # we need to scan the working dir
969 969 s = self.dirstate.status(match, listignored, listclean, listunknown)
970 970 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
971 971
972 972 # check for any possibly clean files
973 973 if parentworking and cmp:
974 974 fixup = []
975 975 # do a full compare of any files that might have changed
976 976 for f in cmp:
977 977 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
978 978 or ctx1[f].cmp(ctx2[f].data())):
979 979 modified.append(f)
980 980 else:
981 981 fixup.append(f)
982 982
983 983 if listclean:
984 984 clean += fixup
985 985
986 986 # update dirstate for files that are actually clean
987 987 if fixup:
988 988 wlock = None
989 989 try:
990 990 try:
991 991 wlock = self.wlock(False)
992 992 for f in fixup:
993 993 self.dirstate.normal(f)
994 994 except lock.LockException:
995 995 pass
996 996 finally:
997 997 del wlock
998 998
999 999 if not parentworking:
1000 1000 mf1 = mfmatches(ctx1)
1001 1001 if working:
1002 1002 # we are comparing working dir against non-parent
1003 1003 # generate a pseudo-manifest for the working dir
1004 1004 mf2 = mfmatches(self['.'])
1005 1005 for f in cmp + modified + added:
1006 1006 mf2[f] = None
1007 1007 mf2.set(f, ctx2.flags(f))
1008 1008 for f in removed:
1009 1009 if f in mf2:
1010 1010 del mf2[f]
1011 1011 else:
1012 1012 # we are comparing two revisions
1013 1013 deleted, unknown, ignored = [], [], []
1014 1014 mf2 = mfmatches(ctx2)
1015 1015
1016 1016 modified, added, clean = [], [], []
1017 1017 for fn in mf2:
1018 1018 if fn in mf1:
1019 1019 if (mf1.flags(fn) != mf2.flags(fn) or
1020 1020 (mf1[fn] != mf2[fn] and
1021 1021 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1022 1022 modified.append(fn)
1023 1023 elif listclean:
1024 1024 clean.append(fn)
1025 1025 del mf1[fn]
1026 1026 else:
1027 1027 added.append(fn)
1028 1028 removed = mf1.keys()
1029 1029
1030 1030 r = modified, added, removed, deleted, unknown, ignored, clean
1031 1031 [l.sort() for l in r]
1032 1032 return r
1033 1033
1034 1034 def add(self, list):
1035 1035 wlock = self.wlock()
1036 1036 try:
1037 1037 rejected = []
1038 1038 for f in list:
1039 1039 p = self.wjoin(f)
1040 1040 try:
1041 1041 st = os.lstat(p)
1042 1042 except:
1043 1043 self.ui.warn(_("%s does not exist!\n") % f)
1044 1044 rejected.append(f)
1045 1045 continue
1046 1046 if st.st_size > 10000000:
1047 1047 self.ui.warn(_("%s: files over 10MB may cause memory and"
1048 1048 " performance problems\n"
1049 1049 "(use 'hg revert %s' to unadd the file)\n")
1050 1050 % (f, f))
1051 1051 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1052 1052 self.ui.warn(_("%s not added: only files and symlinks "
1053 1053 "supported currently\n") % f)
1054 1054 rejected.append(p)
1055 1055 elif self.dirstate[f] in 'amn':
1056 1056 self.ui.warn(_("%s already tracked!\n") % f)
1057 1057 elif self.dirstate[f] == 'r':
1058 1058 self.dirstate.normallookup(f)
1059 1059 else:
1060 1060 self.dirstate.add(f)
1061 1061 return rejected
1062 1062 finally:
1063 1063 del wlock
1064 1064
1065 1065 def forget(self, list):
1066 1066 wlock = self.wlock()
1067 1067 try:
1068 1068 for f in list:
1069 1069 if self.dirstate[f] != 'a':
1070 1070 self.ui.warn(_("%s not added!\n") % f)
1071 1071 else:
1072 1072 self.dirstate.forget(f)
1073 1073 finally:
1074 1074 del wlock
1075 1075
1076 1076 def remove(self, list, unlink=False):
1077 1077 wlock = None
1078 1078 try:
1079 1079 if unlink:
1080 1080 for f in list:
1081 1081 try:
1082 1082 util.unlink(self.wjoin(f))
1083 1083 except OSError, inst:
1084 1084 if inst.errno != errno.ENOENT:
1085 1085 raise
1086 1086 wlock = self.wlock()
1087 1087 for f in list:
1088 1088 if unlink and os.path.exists(self.wjoin(f)):
1089 1089 self.ui.warn(_("%s still exists!\n") % f)
1090 1090 elif self.dirstate[f] == 'a':
1091 1091 self.dirstate.forget(f)
1092 1092 elif f not in self.dirstate:
1093 1093 self.ui.warn(_("%s not tracked!\n") % f)
1094 1094 else:
1095 1095 self.dirstate.remove(f)
1096 1096 finally:
1097 1097 del wlock
1098 1098
1099 1099 def undelete(self, list):
1100 1100 wlock = None
1101 1101 try:
1102 1102 manifests = [self.manifest.read(self.changelog.read(p)[0])
1103 1103 for p in self.dirstate.parents() if p != nullid]
1104 1104 wlock = self.wlock()
1105 1105 for f in list:
1106 1106 if self.dirstate[f] != 'r':
1107 1107 self.ui.warn("%s not removed!\n" % f)
1108 1108 else:
1109 1109 m = f in manifests[0] and manifests[0] or manifests[1]
1110 1110 t = self.file(f).read(m[f])
1111 1111 self.wwrite(f, t, m.flags(f))
1112 1112 self.dirstate.normal(f)
1113 1113 finally:
1114 1114 del wlock
1115 1115
1116 1116 def copy(self, source, dest):
1117 1117 wlock = None
1118 1118 try:
1119 1119 p = self.wjoin(dest)
1120 1120 if not (os.path.exists(p) or os.path.islink(p)):
1121 1121 self.ui.warn(_("%s does not exist!\n") % dest)
1122 1122 elif not (os.path.isfile(p) or os.path.islink(p)):
1123 1123 self.ui.warn(_("copy failed: %s is not a file or a "
1124 1124 "symbolic link\n") % dest)
1125 1125 else:
1126 1126 wlock = self.wlock()
1127 1127 if dest not in self.dirstate:
1128 1128 self.dirstate.add(dest)
1129 1129 self.dirstate.copy(source, dest)
1130 1130 finally:
1131 1131 del wlock
1132 1132
1133 1133 def heads(self, start=None):
1134 1134 heads = self.changelog.heads(start)
1135 1135 # sort the output in rev descending order
1136 1136 heads = [(-self.changelog.rev(h), h) for h in heads]
1137 1137 return [n for (r, n) in util.sort(heads)]
1138 1138
1139 1139 def branchheads(self, branch=None, start=None):
1140 1140 if branch is None:
1141 1141 branch = self[None].branch()
1142 1142 branches = self.branchtags()
1143 1143 if branch not in branches:
1144 1144 return []
1145 1145 # The basic algorithm is this:
1146 1146 #
1147 1147 # Start from the branch tip since there are no later revisions that can
1148 1148 # possibly be in this branch, and the tip is a guaranteed head.
1149 1149 #
1150 1150 # Remember the tip's parents as the first ancestors, since these by
1151 1151 # definition are not heads.
1152 1152 #
1153 1153 # Step backwards from the brach tip through all the revisions. We are
1154 1154 # guaranteed by the rules of Mercurial that we will now be visiting the
1155 1155 # nodes in reverse topological order (children before parents).
1156 1156 #
1157 1157 # If a revision is one of the ancestors of a head then we can toss it
1158 1158 # out of the ancestors set (we've already found it and won't be
1159 1159 # visiting it again) and put its parents in the ancestors set.
1160 1160 #
1161 1161 # Otherwise, if a revision is in the branch it's another head, since it
1162 1162 # wasn't in the ancestor list of an existing head. So add it to the
1163 1163 # head list, and add its parents to the ancestor list.
1164 1164 #
1165 1165 # If it is not in the branch ignore it.
1166 1166 #
1167 1167 # Once we have a list of heads, use nodesbetween to filter out all the
1168 1168 # heads that cannot be reached from startrev. There may be a more
1169 1169 # efficient way to do this as part of the previous algorithm.
1170 1170
1171 1171 set = util.set
1172 1172 heads = [self.changelog.rev(branches[branch])]
1173 1173 # Don't care if ancestors contains nullrev or not.
1174 1174 ancestors = set(self.changelog.parentrevs(heads[0]))
1175 1175 for rev in xrange(heads[0] - 1, nullrev, -1):
1176 1176 if rev in ancestors:
1177 1177 ancestors.update(self.changelog.parentrevs(rev))
1178 1178 ancestors.remove(rev)
1179 1179 elif self[rev].branch() == branch:
1180 1180 heads.append(rev)
1181 1181 ancestors.update(self.changelog.parentrevs(rev))
1182 1182 heads = [self.changelog.node(rev) for rev in heads]
1183 1183 if start is not None:
1184 1184 heads = self.changelog.nodesbetween([start], heads)[2]
1185 1185 return heads
1186 1186
1187 1187 def branches(self, nodes):
1188 1188 if not nodes:
1189 1189 nodes = [self.changelog.tip()]
1190 1190 b = []
1191 1191 for n in nodes:
1192 1192 t = n
1193 1193 while 1:
1194 1194 p = self.changelog.parents(n)
1195 1195 if p[1] != nullid or p[0] == nullid:
1196 1196 b.append((t, n, p[0], p[1]))
1197 1197 break
1198 1198 n = p[0]
1199 1199 return b
1200 1200
1201 1201 def between(self, pairs):
1202 1202 r = []
1203 1203
1204 1204 for top, bottom in pairs:
1205 1205 n, l, i = top, [], 0
1206 1206 f = 1
1207 1207
1208 1208 while n != bottom:
1209 1209 p = self.changelog.parents(n)[0]
1210 1210 if i == f:
1211 1211 l.append(n)
1212 1212 f = f * 2
1213 1213 n = p
1214 1214 i += 1
1215 1215
1216 1216 r.append(l)
1217 1217
1218 1218 return r
1219 1219
1220 1220 def findincoming(self, remote, base=None, heads=None, force=False):
1221 1221 """Return list of roots of the subsets of missing nodes from remote
1222 1222
1223 1223 If base dict is specified, assume that these nodes and their parents
1224 1224 exist on the remote side and that no child of a node of base exists
1225 1225 in both remote and self.
1226 1226 Furthermore base will be updated to include the nodes that exists
1227 1227 in self and remote but no children exists in self and remote.
1228 1228 If a list of heads is specified, return only nodes which are heads
1229 1229 or ancestors of these heads.
1230 1230
1231 1231 All the ancestors of base are in self and in remote.
1232 1232 All the descendants of the list returned are missing in self.
1233 1233 (and so we know that the rest of the nodes are missing in remote, see
1234 1234 outgoing)
1235 1235 """
1236 1236 m = self.changelog.nodemap
1237 1237 search = []
1238 1238 fetch = {}
1239 1239 seen = {}
1240 1240 seenbranch = {}
1241 1241 if base == None:
1242 1242 base = {}
1243 1243
1244 1244 if not heads:
1245 1245 heads = remote.heads()
1246 1246
1247 1247 if self.changelog.tip() == nullid:
1248 1248 base[nullid] = 1
1249 1249 if heads != [nullid]:
1250 1250 return [nullid]
1251 1251 return []
1252 1252
1253 1253 # assume we're closer to the tip than the root
1254 1254 # and start by examining the heads
1255 1255 self.ui.status(_("searching for changes\n"))
1256 1256
1257 1257 unknown = []
1258 1258 for h in heads:
1259 1259 if h not in m:
1260 1260 unknown.append(h)
1261 1261 else:
1262 1262 base[h] = 1
1263 1263
1264 1264 if not unknown:
1265 1265 return []
1266 1266
1267 1267 req = dict.fromkeys(unknown)
1268 1268 reqcnt = 0
1269 1269
1270 1270 # search through remote branches
1271 1271 # a 'branch' here is a linear segment of history, with four parts:
1272 1272 # head, root, first parent, second parent
1273 1273 # (a branch always has two parents (or none) by definition)
1274 1274 unknown = remote.branches(unknown)
1275 1275 while unknown:
1276 1276 r = []
1277 1277 while unknown:
1278 1278 n = unknown.pop(0)
1279 1279 if n[0] in seen:
1280 1280 continue
1281 1281
1282 1282 self.ui.debug(_("examining %s:%s\n")
1283 1283 % (short(n[0]), short(n[1])))
1284 1284 if n[0] == nullid: # found the end of the branch
1285 1285 pass
1286 1286 elif n in seenbranch:
1287 1287 self.ui.debug(_("branch already found\n"))
1288 1288 continue
1289 1289 elif n[1] and n[1] in m: # do we know the base?
1290 1290 self.ui.debug(_("found incomplete branch %s:%s\n")
1291 1291 % (short(n[0]), short(n[1])))
1292 1292 search.append(n) # schedule branch range for scanning
1293 1293 seenbranch[n] = 1
1294 1294 else:
1295 1295 if n[1] not in seen and n[1] not in fetch:
1296 1296 if n[2] in m and n[3] in m:
1297 1297 self.ui.debug(_("found new changeset %s\n") %
1298 1298 short(n[1]))
1299 1299 fetch[n[1]] = 1 # earliest unknown
1300 1300 for p in n[2:4]:
1301 1301 if p in m:
1302 1302 base[p] = 1 # latest known
1303 1303
1304 1304 for p in n[2:4]:
1305 1305 if p not in req and p not in m:
1306 1306 r.append(p)
1307 1307 req[p] = 1
1308 1308 seen[n[0]] = 1
1309 1309
1310 1310 if r:
1311 1311 reqcnt += 1
1312 1312 self.ui.debug(_("request %d: %s\n") %
1313 1313 (reqcnt, " ".join(map(short, r))))
1314 1314 for p in xrange(0, len(r), 10):
1315 1315 for b in remote.branches(r[p:p+10]):
1316 1316 self.ui.debug(_("received %s:%s\n") %
1317 1317 (short(b[0]), short(b[1])))
1318 1318 unknown.append(b)
1319 1319
1320 1320 # do binary search on the branches we found
1321 1321 while search:
1322 1322 n = search.pop(0)
1323 1323 reqcnt += 1
1324 1324 l = remote.between([(n[0], n[1])])[0]
1325 1325 l.append(n[1])
1326 1326 p = n[0]
1327 1327 f = 1
1328 1328 for i in l:
1329 1329 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1330 1330 if i in m:
1331 1331 if f <= 2:
1332 1332 self.ui.debug(_("found new branch changeset %s\n") %
1333 1333 short(p))
1334 1334 fetch[p] = 1
1335 1335 base[i] = 1
1336 1336 else:
1337 1337 self.ui.debug(_("narrowed branch search to %s:%s\n")
1338 1338 % (short(p), short(i)))
1339 1339 search.append((p, i))
1340 1340 break
1341 1341 p, f = i, f * 2
1342 1342
1343 1343 # sanity check our fetch list
1344 1344 for f in fetch.keys():
1345 1345 if f in m:
1346 1346 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1347 1347
1348 1348 if base.keys() == [nullid]:
1349 1349 if force:
1350 1350 self.ui.warn(_("warning: repository is unrelated\n"))
1351 1351 else:
1352 1352 raise util.Abort(_("repository is unrelated"))
1353 1353
1354 1354 self.ui.debug(_("found new changesets starting at ") +
1355 1355 " ".join([short(f) for f in fetch]) + "\n")
1356 1356
1357 1357 self.ui.debug(_("%d total queries\n") % reqcnt)
1358 1358
1359 1359 return fetch.keys()
1360 1360
1361 1361 def findoutgoing(self, remote, base=None, heads=None, force=False):
1362 1362 """Return list of nodes that are roots of subsets not in remote
1363 1363
1364 1364 If base dict is specified, assume that these nodes and their parents
1365 1365 exist on the remote side.
1366 1366 If a list of heads is specified, return only nodes which are heads
1367 1367 or ancestors of these heads, and return a second element which
1368 1368 contains all remote heads which get new children.
1369 1369 """
1370 1370 if base == None:
1371 1371 base = {}
1372 1372 self.findincoming(remote, base, heads, force=force)
1373 1373
1374 1374 self.ui.debug(_("common changesets up to ")
1375 1375 + " ".join(map(short, base.keys())) + "\n")
1376 1376
1377 1377 remain = dict.fromkeys(self.changelog.nodemap)
1378 1378
1379 1379 # prune everything remote has from the tree
1380 1380 del remain[nullid]
1381 1381 remove = base.keys()
1382 1382 while remove:
1383 1383 n = remove.pop(0)
1384 1384 if n in remain:
1385 1385 del remain[n]
1386 1386 for p in self.changelog.parents(n):
1387 1387 remove.append(p)
1388 1388
1389 1389 # find every node whose parents have been pruned
1390 1390 subset = []
1391 1391 # find every remote head that will get new children
1392 1392 updated_heads = {}
1393 1393 for n in remain:
1394 1394 p1, p2 = self.changelog.parents(n)
1395 1395 if p1 not in remain and p2 not in remain:
1396 1396 subset.append(n)
1397 1397 if heads:
1398 1398 if p1 in heads:
1399 1399 updated_heads[p1] = True
1400 1400 if p2 in heads:
1401 1401 updated_heads[p2] = True
1402 1402
1403 1403 # this is the set of all roots we have to push
1404 1404 if heads:
1405 1405 return subset, updated_heads.keys()
1406 1406 else:
1407 1407 return subset
1408 1408
1409 1409 def pull(self, remote, heads=None, force=False):
1410 1410 lock = self.lock()
1411 1411 try:
1412 1412 fetch = self.findincoming(remote, heads=heads, force=force)
1413 1413 if fetch == [nullid]:
1414 1414 self.ui.status(_("requesting all changes\n"))
1415 1415
1416 1416 if not fetch:
1417 1417 self.ui.status(_("no changes found\n"))
1418 1418 return 0
1419 1419
1420 1420 if heads is None:
1421 1421 cg = remote.changegroup(fetch, 'pull')
1422 1422 else:
1423 1423 if 'changegroupsubset' not in remote.capabilities:
1424 1424 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1425 1425 cg = remote.changegroupsubset(fetch, heads, 'pull')
1426 1426 return self.addchangegroup(cg, 'pull', remote.url())
1427 1427 finally:
1428 1428 del lock
1429 1429
1430 1430 def push(self, remote, force=False, revs=None):
1431 1431 # there are two ways to push to remote repo:
1432 1432 #
1433 1433 # addchangegroup assumes local user can lock remote
1434 1434 # repo (local filesystem, old ssh servers).
1435 1435 #
1436 1436 # unbundle assumes local user cannot lock remote repo (new ssh
1437 1437 # servers, http servers).
1438 1438
1439 1439 if remote.capable('unbundle'):
1440 1440 return self.push_unbundle(remote, force, revs)
1441 1441 return self.push_addchangegroup(remote, force, revs)
1442 1442
1443 1443 def prepush(self, remote, force, revs):
1444 1444 base = {}
1445 1445 remote_heads = remote.heads()
1446 1446 inc = self.findincoming(remote, base, remote_heads, force=force)
1447 1447
1448 1448 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1449 1449 if revs is not None:
1450 1450 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1451 1451 else:
1452 1452 bases, heads = update, self.changelog.heads()
1453 1453
1454 1454 if not bases:
1455 1455 self.ui.status(_("no changes found\n"))
1456 1456 return None, 1
1457 1457 elif not force:
1458 1458 # check if we're creating new remote heads
1459 1459 # to be a remote head after push, node must be either
1460 1460 # - unknown locally
1461 1461 # - a local outgoing head descended from update
1462 1462 # - a remote head that's known locally and not
1463 1463 # ancestral to an outgoing head
1464 1464
1465 1465 warn = 0
1466 1466
1467 1467 if remote_heads == [nullid]:
1468 1468 warn = 0
1469 1469 elif not revs and len(heads) > len(remote_heads):
1470 1470 warn = 1
1471 1471 else:
1472 1472 newheads = list(heads)
1473 1473 for r in remote_heads:
1474 1474 if r in self.changelog.nodemap:
1475 1475 desc = self.changelog.heads(r, heads)
1476 1476 l = [h for h in heads if h in desc]
1477 1477 if not l:
1478 1478 newheads.append(r)
1479 1479 else:
1480 1480 newheads.append(r)
1481 1481 if len(newheads) > len(remote_heads):
1482 1482 warn = 1
1483 1483
1484 1484 if warn:
1485 1485 self.ui.warn(_("abort: push creates new remote heads!\n"))
1486 1486 self.ui.status(_("(did you forget to merge?"
1487 1487 " use push -f to force)\n"))
1488 1488 return None, 0
1489 1489 elif inc:
1490 1490 self.ui.warn(_("note: unsynced remote changes!\n"))
1491 1491
1492 1492
1493 1493 if revs is None:
1494 1494 cg = self.changegroup(update, 'push')
1495 1495 else:
1496 1496 cg = self.changegroupsubset(update, revs, 'push')
1497 1497 return cg, remote_heads
1498 1498
1499 1499 def push_addchangegroup(self, remote, force, revs):
1500 1500 lock = remote.lock()
1501 1501 try:
1502 1502 ret = self.prepush(remote, force, revs)
1503 1503 if ret[0] is not None:
1504 1504 cg, remote_heads = ret
1505 1505 return remote.addchangegroup(cg, 'push', self.url())
1506 1506 return ret[1]
1507 1507 finally:
1508 1508 del lock
1509 1509
1510 1510 def push_unbundle(self, remote, force, revs):
1511 1511 # local repo finds heads on server, finds out what revs it
1512 1512 # must push. once revs transferred, if server finds it has
1513 1513 # different heads (someone else won commit/push race), server
1514 1514 # aborts.
1515 1515
1516 1516 ret = self.prepush(remote, force, revs)
1517 1517 if ret[0] is not None:
1518 1518 cg, remote_heads = ret
1519 1519 if force: remote_heads = ['force']
1520 1520 return remote.unbundle(cg, remote_heads, 'push')
1521 1521 return ret[1]
1522 1522
1523 1523 def changegroupinfo(self, nodes, source):
1524 1524 if self.ui.verbose or source == 'bundle':
1525 1525 self.ui.status(_("%d changesets found\n") % len(nodes))
1526 1526 if self.ui.debugflag:
1527 1527 self.ui.debug(_("List of changesets:\n"))
1528 1528 for node in nodes:
1529 1529 self.ui.debug("%s\n" % hex(node))
1530 1530
1531 1531 def changegroupsubset(self, bases, heads, source, extranodes=None):
1532 1532 """This function generates a changegroup consisting of all the nodes
1533 1533 that are descendents of any of the bases, and ancestors of any of
1534 1534 the heads.
1535 1535
1536 1536 It is fairly complex as determining which filenodes and which
1537 1537 manifest nodes need to be included for the changeset to be complete
1538 1538 is non-trivial.
1539 1539
1540 1540 Another wrinkle is doing the reverse, figuring out which changeset in
1541 1541 the changegroup a particular filenode or manifestnode belongs to.
1542 1542
1543 1543 The caller can specify some nodes that must be included in the
1544 1544 changegroup using the extranodes argument. It should be a dict
1545 1545 where the keys are the filenames (or 1 for the manifest), and the
1546 1546 values are lists of (node, linknode) tuples, where node is a wanted
1547 1547 node and linknode is the changelog node that should be transmitted as
1548 1548 the linkrev.
1549 1549 """
1550 1550
1551 1551 self.hook('preoutgoing', throw=True, source=source)
1552 1552
1553 1553 # Set up some initial variables
1554 1554 # Make it easy to refer to self.changelog
1555 1555 cl = self.changelog
1556 1556 # msng is short for missing - compute the list of changesets in this
1557 1557 # changegroup.
1558 1558 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1559 1559 self.changegroupinfo(msng_cl_lst, source)
1560 1560 # Some bases may turn out to be superfluous, and some heads may be
1561 1561 # too. nodesbetween will return the minimal set of bases and heads
1562 1562 # necessary to re-create the changegroup.
1563 1563
1564 1564 # Known heads are the list of heads that it is assumed the recipient
1565 1565 # of this changegroup will know about.
1566 1566 knownheads = {}
1567 1567 # We assume that all parents of bases are known heads.
1568 1568 for n in bases:
1569 1569 for p in cl.parents(n):
1570 1570 if p != nullid:
1571 1571 knownheads[p] = 1
1572 1572 knownheads = knownheads.keys()
1573 1573 if knownheads:
1574 1574 # Now that we know what heads are known, we can compute which
1575 1575 # changesets are known. The recipient must know about all
1576 1576 # changesets required to reach the known heads from the null
1577 1577 # changeset.
1578 1578 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1579 1579 junk = None
1580 1580 # Transform the list into an ersatz set.
1581 1581 has_cl_set = dict.fromkeys(has_cl_set)
1582 1582 else:
1583 1583 # If there were no known heads, the recipient cannot be assumed to
1584 1584 # know about any changesets.
1585 1585 has_cl_set = {}
1586 1586
1587 1587 # Make it easy to refer to self.manifest
1588 1588 mnfst = self.manifest
1589 1589 # We don't know which manifests are missing yet
1590 1590 msng_mnfst_set = {}
1591 1591 # Nor do we know which filenodes are missing.
1592 1592 msng_filenode_set = {}
1593 1593
1594 1594 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1595 1595 junk = None
1596 1596
1597 1597 # A changeset always belongs to itself, so the changenode lookup
1598 1598 # function for a changenode is identity.
1599 1599 def identity(x):
1600 1600 return x
1601 1601
1602 1602 # A function generating function. Sets up an environment for the
1603 1603 # inner function.
1604 1604 def cmp_by_rev_func(revlog):
1605 1605 # Compare two nodes by their revision number in the environment's
1606 1606 # revision history. Since the revision number both represents the
1607 1607 # most efficient order to read the nodes in, and represents a
1608 1608 # topological sorting of the nodes, this function is often useful.
1609 1609 def cmp_by_rev(a, b):
1610 1610 return cmp(revlog.rev(a), revlog.rev(b))
1611 1611 return cmp_by_rev
1612 1612
1613 1613 # If we determine that a particular file or manifest node must be a
1614 1614 # node that the recipient of the changegroup will already have, we can
1615 1615 # also assume the recipient will have all the parents. This function
1616 1616 # prunes them from the set of missing nodes.
1617 1617 def prune_parents(revlog, hasset, msngset):
1618 1618 haslst = hasset.keys()
1619 1619 haslst.sort(cmp_by_rev_func(revlog))
1620 1620 for node in haslst:
1621 1621 parentlst = [p for p in revlog.parents(node) if p != nullid]
1622 1622 while parentlst:
1623 1623 n = parentlst.pop()
1624 1624 if n not in hasset:
1625 1625 hasset[n] = 1
1626 1626 p = [p for p in revlog.parents(n) if p != nullid]
1627 1627 parentlst.extend(p)
1628 1628 for n in hasset:
1629 1629 msngset.pop(n, None)
1630 1630
1631 1631 # This is a function generating function used to set up an environment
1632 1632 # for the inner function to execute in.
1633 1633 def manifest_and_file_collector(changedfileset):
1634 1634 # This is an information gathering function that gathers
1635 1635 # information from each changeset node that goes out as part of
1636 1636 # the changegroup. The information gathered is a list of which
1637 1637 # manifest nodes are potentially required (the recipient may
1638 1638 # already have them) and total list of all files which were
1639 1639 # changed in any changeset in the changegroup.
1640 1640 #
1641 1641 # We also remember the first changenode we saw any manifest
1642 1642 # referenced by so we can later determine which changenode 'owns'
1643 1643 # the manifest.
1644 1644 def collect_manifests_and_files(clnode):
1645 1645 c = cl.read(clnode)
1646 1646 for f in c[3]:
1647 1647 # This is to make sure we only have one instance of each
1648 1648 # filename string for each filename.
1649 1649 changedfileset.setdefault(f, f)
1650 1650 msng_mnfst_set.setdefault(c[0], clnode)
1651 1651 return collect_manifests_and_files
1652 1652
1653 1653 # Figure out which manifest nodes (of the ones we think might be part
1654 1654 # of the changegroup) the recipient must know about and remove them
1655 1655 # from the changegroup.
1656 1656 def prune_manifests():
1657 1657 has_mnfst_set = {}
1658 1658 for n in msng_mnfst_set:
1659 1659 # If a 'missing' manifest thinks it belongs to a changenode
1660 1660 # the recipient is assumed to have, obviously the recipient
1661 1661 # must have that manifest.
1662 1662 linknode = cl.node(mnfst.linkrev(n))
1663 1663 if linknode in has_cl_set:
1664 1664 has_mnfst_set[n] = 1
1665 1665 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1666 1666
1667 1667 # Use the information collected in collect_manifests_and_files to say
1668 1668 # which changenode any manifestnode belongs to.
1669 1669 def lookup_manifest_link(mnfstnode):
1670 1670 return msng_mnfst_set[mnfstnode]
1671 1671
1672 1672 # A function generating function that sets up the initial environment
1673 1673 # the inner function.
1674 1674 def filenode_collector(changedfiles):
1675 1675 next_rev = [0]
1676 1676 # This gathers information from each manifestnode included in the
1677 1677 # changegroup about which filenodes the manifest node references
1678 1678 # so we can include those in the changegroup too.
1679 1679 #
1680 1680 # It also remembers which changenode each filenode belongs to. It
1681 1681 # does this by assuming the a filenode belongs to the changenode
1682 1682 # the first manifest that references it belongs to.
1683 1683 def collect_msng_filenodes(mnfstnode):
1684 1684 r = mnfst.rev(mnfstnode)
1685 1685 if r == next_rev[0]:
1686 1686 # If the last rev we looked at was the one just previous,
1687 1687 # we only need to see a diff.
1688 1688 deltamf = mnfst.readdelta(mnfstnode)
1689 1689 # For each line in the delta
1690 1690 for f, fnode in deltamf.items():
1691 1691 f = changedfiles.get(f, None)
1692 1692 # And if the file is in the list of files we care
1693 1693 # about.
1694 1694 if f is not None:
1695 1695 # Get the changenode this manifest belongs to
1696 1696 clnode = msng_mnfst_set[mnfstnode]
1697 1697 # Create the set of filenodes for the file if
1698 1698 # there isn't one already.
1699 1699 ndset = msng_filenode_set.setdefault(f, {})
1700 1700 # And set the filenode's changelog node to the
1701 1701 # manifest's if it hasn't been set already.
1702 1702 ndset.setdefault(fnode, clnode)
1703 1703 else:
1704 1704 # Otherwise we need a full manifest.
1705 1705 m = mnfst.read(mnfstnode)
1706 1706 # For every file in we care about.
1707 1707 for f in changedfiles:
1708 1708 fnode = m.get(f, None)
1709 1709 # If it's in the manifest
1710 1710 if fnode is not None:
1711 1711 # See comments above.
1712 1712 clnode = msng_mnfst_set[mnfstnode]
1713 1713 ndset = msng_filenode_set.setdefault(f, {})
1714 1714 ndset.setdefault(fnode, clnode)
1715 1715 # Remember the revision we hope to see next.
1716 1716 next_rev[0] = r + 1
1717 1717 return collect_msng_filenodes
1718 1718
1719 1719 # We have a list of filenodes we think we need for a file, lets remove
1720 1720 # all those we now the recipient must have.
1721 1721 def prune_filenodes(f, filerevlog):
1722 1722 msngset = msng_filenode_set[f]
1723 1723 hasset = {}
1724 1724 # If a 'missing' filenode thinks it belongs to a changenode we
1725 1725 # assume the recipient must have, then the recipient must have
1726 1726 # that filenode.
1727 1727 for n in msngset:
1728 1728 clnode = cl.node(filerevlog.linkrev(n))
1729 1729 if clnode in has_cl_set:
1730 1730 hasset[n] = 1
1731 1731 prune_parents(filerevlog, hasset, msngset)
1732 1732
1733 1733 # A function generator function that sets up the a context for the
1734 1734 # inner function.
1735 1735 def lookup_filenode_link_func(fname):
1736 1736 msngset = msng_filenode_set[fname]
1737 1737 # Lookup the changenode the filenode belongs to.
1738 1738 def lookup_filenode_link(fnode):
1739 1739 return msngset[fnode]
1740 1740 return lookup_filenode_link
1741 1741
1742 1742 # Add the nodes that were explicitly requested.
1743 1743 def add_extra_nodes(name, nodes):
1744 1744 if not extranodes or name not in extranodes:
1745 1745 return
1746 1746
1747 1747 for node, linknode in extranodes[name]:
1748 1748 if node not in nodes:
1749 1749 nodes[node] = linknode
1750 1750
1751 1751 # Now that we have all theses utility functions to help out and
1752 1752 # logically divide up the task, generate the group.
1753 1753 def gengroup():
1754 1754 # The set of changed files starts empty.
1755 1755 changedfiles = {}
1756 1756 # Create a changenode group generator that will call our functions
1757 1757 # back to lookup the owning changenode and collect information.
1758 1758 group = cl.group(msng_cl_lst, identity,
1759 1759 manifest_and_file_collector(changedfiles))
1760 1760 for chnk in group:
1761 1761 yield chnk
1762 1762
1763 1763 # The list of manifests has been collected by the generator
1764 1764 # calling our functions back.
1765 1765 prune_manifests()
1766 1766 add_extra_nodes(1, msng_mnfst_set)
1767 1767 msng_mnfst_lst = msng_mnfst_set.keys()
1768 1768 # Sort the manifestnodes by revision number.
1769 1769 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1770 1770 # Create a generator for the manifestnodes that calls our lookup
1771 1771 # and data collection functions back.
1772 1772 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1773 1773 filenode_collector(changedfiles))
1774 1774 for chnk in group:
1775 1775 yield chnk
1776 1776
1777 1777 # These are no longer needed, dereference and toss the memory for
1778 1778 # them.
1779 1779 msng_mnfst_lst = None
1780 1780 msng_mnfst_set.clear()
1781 1781
1782 1782 if extranodes:
1783 1783 for fname in extranodes:
1784 1784 if isinstance(fname, int):
1785 1785 continue
1786 1786 add_extra_nodes(fname,
1787 1787 msng_filenode_set.setdefault(fname, {}))
1788 1788 changedfiles[fname] = 1
1789 1789 # Go through all our files in order sorted by name.
1790 1790 for fname in util.sort(changedfiles):
1791 1791 filerevlog = self.file(fname)
1792 1792 if not len(filerevlog):
1793 1793 raise util.Abort(_("empty or missing revlog for %s") % fname)
1794 1794 # Toss out the filenodes that the recipient isn't really
1795 1795 # missing.
1796 1796 if fname in msng_filenode_set:
1797 1797 prune_filenodes(fname, filerevlog)
1798 1798 msng_filenode_lst = msng_filenode_set[fname].keys()
1799 1799 else:
1800 1800 msng_filenode_lst = []
1801 1801 # If any filenodes are left, generate the group for them,
1802 1802 # otherwise don't bother.
1803 1803 if len(msng_filenode_lst) > 0:
1804 1804 yield changegroup.chunkheader(len(fname))
1805 1805 yield fname
1806 1806 # Sort the filenodes by their revision #
1807 1807 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1808 1808 # Create a group generator and only pass in a changenode
1809 1809 # lookup function as we need to collect no information
1810 1810 # from filenodes.
1811 1811 group = filerevlog.group(msng_filenode_lst,
1812 1812 lookup_filenode_link_func(fname))
1813 1813 for chnk in group:
1814 1814 yield chnk
1815 1815 if fname in msng_filenode_set:
1816 1816 # Don't need this anymore, toss it to free memory.
1817 1817 del msng_filenode_set[fname]
1818 1818 # Signal that no more groups are left.
1819 1819 yield changegroup.closechunk()
1820 1820
1821 1821 if msng_cl_lst:
1822 1822 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1823 1823
1824 1824 return util.chunkbuffer(gengroup())
1825 1825
1826 1826 def changegroup(self, basenodes, source):
1827 1827 """Generate a changegroup of all nodes that we have that a recipient
1828 1828 doesn't.
1829 1829
1830 1830 This is much easier than the previous function as we can assume that
1831 1831 the recipient has any changenode we aren't sending them."""
1832 1832
1833 1833 self.hook('preoutgoing', throw=True, source=source)
1834 1834
1835 1835 cl = self.changelog
1836 1836 nodes = cl.nodesbetween(basenodes, None)[0]
1837 1837 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1838 1838 self.changegroupinfo(nodes, source)
1839 1839
1840 1840 def identity(x):
1841 1841 return x
1842 1842
1843 1843 def gennodelst(log):
1844 1844 for r in log:
1845 1845 n = log.node(r)
1846 1846 if log.linkrev(n) in revset:
1847 1847 yield n
1848 1848
1849 1849 def changed_file_collector(changedfileset):
1850 1850 def collect_changed_files(clnode):
1851 1851 c = cl.read(clnode)
1852 1852 for fname in c[3]:
1853 1853 changedfileset[fname] = 1
1854 1854 return collect_changed_files
1855 1855
1856 1856 def lookuprevlink_func(revlog):
1857 1857 def lookuprevlink(n):
1858 1858 return cl.node(revlog.linkrev(n))
1859 1859 return lookuprevlink
1860 1860
1861 1861 def gengroup():
1862 1862 # construct a list of all changed files
1863 1863 changedfiles = {}
1864 1864
1865 1865 for chnk in cl.group(nodes, identity,
1866 1866 changed_file_collector(changedfiles)):
1867 1867 yield chnk
1868 1868
1869 1869 mnfst = self.manifest
1870 1870 nodeiter = gennodelst(mnfst)
1871 1871 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1872 1872 yield chnk
1873 1873
1874 1874 for fname in util.sort(changedfiles):
1875 1875 filerevlog = self.file(fname)
1876 1876 if not len(filerevlog):
1877 1877 raise util.Abort(_("empty or missing revlog for %s") % fname)
1878 1878 nodeiter = gennodelst(filerevlog)
1879 1879 nodeiter = list(nodeiter)
1880 1880 if nodeiter:
1881 1881 yield changegroup.chunkheader(len(fname))
1882 1882 yield fname
1883 1883 lookup = lookuprevlink_func(filerevlog)
1884 1884 for chnk in filerevlog.group(nodeiter, lookup):
1885 1885 yield chnk
1886 1886
1887 1887 yield changegroup.closechunk()
1888 1888
1889 1889 if nodes:
1890 1890 self.hook('outgoing', node=hex(nodes[0]), source=source)
1891 1891
1892 1892 return util.chunkbuffer(gengroup())
1893 1893
1894 1894 def addchangegroup(self, source, srctype, url, emptyok=False):
1895 1895 """add changegroup to repo.
1896 1896
1897 1897 return values:
1898 1898 - nothing changed or no source: 0
1899 1899 - more heads than before: 1+added heads (2..n)
1900 1900 - less heads than before: -1-removed heads (-2..-n)
1901 1901 - number of heads stays the same: 1
1902 1902 """
1903 1903 def csmap(x):
1904 1904 self.ui.debug(_("add changeset %s\n") % short(x))
1905 1905 return len(cl)
1906 1906
1907 1907 def revmap(x):
1908 1908 return cl.rev(x)
1909 1909
1910 1910 if not source:
1911 1911 return 0
1912 1912
1913 1913 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1914 1914
1915 1915 changesets = files = revisions = 0
1916 1916
1917 1917 # write changelog data to temp files so concurrent readers will not see
1918 1918 # inconsistent view
1919 1919 cl = self.changelog
1920 1920 cl.delayupdate()
1921 1921 oldheads = len(cl.heads())
1922 1922
1923 1923 tr = self.transaction()
1924 1924 try:
1925 1925 trp = weakref.proxy(tr)
1926 1926 # pull off the changeset group
1927 1927 self.ui.status(_("adding changesets\n"))
1928 1928 cor = len(cl) - 1
1929 1929 chunkiter = changegroup.chunkiter(source)
1930 1930 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1931 1931 raise util.Abort(_("received changelog group is empty"))
1932 1932 cnr = len(cl) - 1
1933 1933 changesets = cnr - cor
1934 1934
1935 1935 # pull off the manifest group
1936 1936 self.ui.status(_("adding manifests\n"))
1937 1937 chunkiter = changegroup.chunkiter(source)
1938 1938 # no need to check for empty manifest group here:
1939 1939 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1940 1940 # no new manifest will be created and the manifest group will
1941 1941 # be empty during the pull
1942 1942 self.manifest.addgroup(chunkiter, revmap, trp)
1943 1943
1944 1944 # process the files
1945 1945 self.ui.status(_("adding file changes\n"))
1946 1946 while 1:
1947 1947 f = changegroup.getchunk(source)
1948 1948 if not f:
1949 1949 break
1950 1950 self.ui.debug(_("adding %s revisions\n") % f)
1951 1951 fl = self.file(f)
1952 1952 o = len(fl)
1953 1953 chunkiter = changegroup.chunkiter(source)
1954 1954 if fl.addgroup(chunkiter, revmap, trp) is None:
1955 1955 raise util.Abort(_("received file revlog group is empty"))
1956 1956 revisions += len(fl) - o
1957 1957 files += 1
1958 1958
1959 1959 # make changelog see real files again
1960 1960 cl.finalize(trp)
1961 1961
1962 1962 newheads = len(self.changelog.heads())
1963 1963 heads = ""
1964 1964 if oldheads and newheads != oldheads:
1965 1965 heads = _(" (%+d heads)") % (newheads - oldheads)
1966 1966
1967 1967 self.ui.status(_("added %d changesets"
1968 1968 " with %d changes to %d files%s\n")
1969 1969 % (changesets, revisions, files, heads))
1970 1970
1971 1971 if changesets > 0:
1972 1972 self.hook('pretxnchangegroup', throw=True,
1973 1973 node=hex(self.changelog.node(cor+1)), source=srctype,
1974 1974 url=url)
1975 1975
1976 1976 tr.close()
1977 1977 finally:
1978 1978 del tr
1979 1979
1980 1980 if changesets > 0:
1981 1981 # forcefully update the on-disk branch cache
1982 1982 self.ui.debug(_("updating the branch cache\n"))
1983 1983 self.branchtags()
1984 1984 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1985 1985 source=srctype, url=url)
1986 1986
1987 1987 for i in xrange(cor + 1, cnr + 1):
1988 1988 self.hook("incoming", node=hex(self.changelog.node(i)),
1989 1989 source=srctype, url=url)
1990 1990
1991 1991 # never return 0 here:
1992 1992 if newheads < oldheads:
1993 1993 return newheads - oldheads - 1
1994 1994 else:
1995 1995 return newheads - oldheads + 1
1996 1996
1997 1997
1998 1998 def stream_in(self, remote):
1999 1999 fp = remote.stream_out()
2000 2000 l = fp.readline()
2001 2001 try:
2002 2002 resp = int(l)
2003 2003 except ValueError:
2004 2004 raise util.UnexpectedOutput(
2005 2005 _('Unexpected response from remote server:'), l)
2006 2006 if resp == 1:
2007 2007 raise util.Abort(_('operation forbidden by server'))
2008 2008 elif resp == 2:
2009 2009 raise util.Abort(_('locking the remote repository failed'))
2010 2010 elif resp != 0:
2011 2011 raise util.Abort(_('the server sent an unknown error code'))
2012 2012 self.ui.status(_('streaming all changes\n'))
2013 2013 l = fp.readline()
2014 2014 try:
2015 2015 total_files, total_bytes = map(int, l.split(' ', 1))
2016 2016 except (ValueError, TypeError):
2017 2017 raise util.UnexpectedOutput(
2018 2018 _('Unexpected response from remote server:'), l)
2019 2019 self.ui.status(_('%d files to transfer, %s of data\n') %
2020 2020 (total_files, util.bytecount(total_bytes)))
2021 2021 start = time.time()
2022 2022 for i in xrange(total_files):
2023 2023 # XXX doesn't support '\n' or '\r' in filenames
2024 2024 l = fp.readline()
2025 2025 try:
2026 2026 name, size = l.split('\0', 1)
2027 2027 size = int(size)
2028 2028 except ValueError, TypeError:
2029 2029 raise util.UnexpectedOutput(
2030 2030 _('Unexpected response from remote server:'), l)
2031 2031 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2032 2032 ofp = self.sopener(name, 'w')
2033 2033 for chunk in util.filechunkiter(fp, limit=size):
2034 2034 ofp.write(chunk)
2035 2035 ofp.close()
2036 2036 elapsed = time.time() - start
2037 2037 if elapsed <= 0:
2038 2038 elapsed = 0.001
2039 2039 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2040 2040 (util.bytecount(total_bytes), elapsed,
2041 2041 util.bytecount(total_bytes / elapsed)))
2042 2042 self.invalidate()
2043 2043 return len(self.heads()) + 1
2044 2044
2045 2045 def clone(self, remote, heads=[], stream=False):
2046 2046 '''clone remote repository.
2047 2047
2048 2048 keyword arguments:
2049 2049 heads: list of revs to clone (forces use of pull)
2050 2050 stream: use streaming clone if possible'''
2051 2051
2052 2052 # now, all clients that can request uncompressed clones can
2053 2053 # read repo formats supported by all servers that can serve
2054 2054 # them.
2055 2055
2056 2056 # if revlog format changes, client will have to check version
2057 2057 # and format flags on "stream" capability, and use
2058 2058 # uncompressed only if compatible.
2059 2059
2060 2060 if stream and not heads and remote.capable('stream'):
2061 2061 return self.stream_in(remote)
2062 2062 return self.pull(remote, heads)
2063 2063
2064 2064 # used to avoid circular references so destructors work
2065 2065 def aftertrans(files):
2066 2066 renamefiles = [tuple(t) for t in files]
2067 2067 def a():
2068 2068 for src, dest in renamefiles:
2069 2069 util.rename(src, dest)
2070 2070 return a
2071 2071
2072 2072 def instance(ui, path, create):
2073 2073 return localrepository(ui, util.drop_scheme('file', path), create)
2074 2074
2075 2075 def islocal(path):
2076 2076 return True
@@ -1,83 +1,83 b''
1 1 # statichttprepo.py - simple http repository class for mercurial
2 2 #
3 3 # This provides read-only repo access to repositories exported via static http
4 4 #
5 5 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
6 6 #
7 7 # This software may be used and distributed according to the terms
8 8 # of the GNU General Public License, incorporated herein by reference.
9 9
10 10 from i18n import _
11 11 import changelog, httprangereader
12 import repo, localrepo, manifest, util
12 import repo, localrepo, manifest, util, store
13 13 import urllib, urllib2, errno
14 14
15 15 class rangereader(httprangereader.httprangereader):
16 16 def read(self, size=None):
17 17 try:
18 18 return httprangereader.httprangereader.read(self, size)
19 19 except urllib2.HTTPError, inst:
20 20 num = inst.code == 404 and errno.ENOENT or None
21 21 raise IOError(num, inst)
22 22 except urllib2.URLError, inst:
23 23 raise IOError(None, inst.reason[1])
24 24
25 25 def opener(base):
26 26 """return a function that opens files over http"""
27 27 p = base
28 28 def o(path, mode="r"):
29 29 f = "/".join((p, urllib.quote(path)))
30 30 return rangereader(f)
31 31 return o
32 32
33 33 class statichttprepository(localrepo.localrepository):
34 34 def __init__(self, ui, path):
35 35 self._url = path
36 36 self.ui = ui
37 37
38 38 self.path = path.rstrip('/') + "/.hg"
39 39 self.opener = opener(self.path)
40 40
41 41 # find requirements
42 42 try:
43 43 requirements = self.opener("requires").read().splitlines()
44 44 except IOError, inst:
45 45 if inst.errno == errno.ENOENT:
46 46 msg = _("'%s' does not appear to be an hg repository") % path
47 47 raise repo.RepoError(msg)
48 48 else:
49 49 requirements = []
50 50
51 51 # check them
52 52 for r in requirements:
53 53 if r not in self.supported:
54 54 raise repo.RepoError(_("requirement '%s' not supported") % r)
55 55
56 56 # setup store
57 57 if "store" in requirements:
58 self.encodefn = util.encodefilename
59 self.decodefn = util.decodefilename
58 self.encodefn = store.encodefilename
59 self.decodefn = store.decodefilename
60 60 self.spath = self.path + "/store"
61 61 else:
62 62 self.encodefn = lambda x: x
63 63 self.decodefn = lambda x: x
64 64 self.spath = self.path
65 self.sopener = util.encodedopener(opener(self.spath), self.encodefn)
65 self.sopener = store.encodedopener(opener(self.spath), self.encodefn)
66 66
67 67 self.manifest = manifest.manifest(self.sopener)
68 68 self.changelog = changelog.changelog(self.sopener)
69 69 self.tagscache = None
70 70 self.nodetagscache = None
71 71 self.encodepats = None
72 72 self.decodepats = None
73 73
74 74 def url(self):
75 75 return 'static-' + self._url
76 76
77 77 def local(self):
78 78 return False
79 79
80 80 def instance(ui, path, create):
81 81 if create:
82 82 raise util.Abort(_('cannot create new static-http repository'))
83 83 return statichttprepository(ui, path[7:])
@@ -1,1897 +1,1864 b''
1 1 """
2 2 util.py - Mercurial utility functions and platform specfic implementations
3 3
4 4 Copyright 2005 K. Thananchayan <thananck@yahoo.com>
5 5 Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
6 6 Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
7 7
8 8 This software may be used and distributed according to the terms
9 9 of the GNU General Public License, incorporated herein by reference.
10 10
11 11 This contains helper routines that are independent of the SCM core and hide
12 12 platform-specific details from the core.
13 13 """
14 14
15 15 from i18n import _
16 16 import cStringIO, errno, getpass, re, shutil, sys, tempfile
17 17 import os, stat, threading, time, calendar, ConfigParser, locale, glob, osutil
18 18 import imp, urlparse
19 19
20 20 # Python compatibility
21 21
22 22 try:
23 23 set = set
24 24 frozenset = frozenset
25 25 except NameError:
26 26 from sets import Set as set, ImmutableSet as frozenset
27 27
28 28 _md5 = None
29 29 def md5(s):
30 30 global _md5
31 31 if _md5 is None:
32 32 try:
33 33 import hashlib
34 34 _md5 = hashlib.md5
35 35 except ImportError:
36 36 import md5
37 37 _md5 = md5.md5
38 38 return _md5(s)
39 39
40 40 _sha1 = None
41 41 def sha1(s):
42 42 global _sha1
43 43 if _sha1 is None:
44 44 try:
45 45 import hashlib
46 46 _sha1 = hashlib.sha1
47 47 except ImportError:
48 48 import sha
49 49 _sha1 = sha.sha
50 50 return _sha1(s)
51 51
52 52 try:
53 53 _encoding = os.environ.get("HGENCODING")
54 54 if sys.platform == 'darwin' and not _encoding:
55 55 # On darwin, getpreferredencoding ignores the locale environment and
56 56 # always returns mac-roman. We override this if the environment is
57 57 # not C (has been customized by the user).
58 58 locale.setlocale(locale.LC_CTYPE, '')
59 59 _encoding = locale.getlocale()[1]
60 60 if not _encoding:
61 61 _encoding = locale.getpreferredencoding() or 'ascii'
62 62 except locale.Error:
63 63 _encoding = 'ascii'
64 64 _encodingmode = os.environ.get("HGENCODINGMODE", "strict")
65 65 _fallbackencoding = 'ISO-8859-1'
66 66
67 67 def tolocal(s):
68 68 """
69 69 Convert a string from internal UTF-8 to local encoding
70 70
71 71 All internal strings should be UTF-8 but some repos before the
72 72 implementation of locale support may contain latin1 or possibly
73 73 other character sets. We attempt to decode everything strictly
74 74 using UTF-8, then Latin-1, and failing that, we use UTF-8 and
75 75 replace unknown characters.
76 76 """
77 77 for e in ('UTF-8', _fallbackencoding):
78 78 try:
79 79 u = s.decode(e) # attempt strict decoding
80 80 return u.encode(_encoding, "replace")
81 81 except LookupError, k:
82 82 raise Abort(_("%s, please check your locale settings") % k)
83 83 except UnicodeDecodeError:
84 84 pass
85 85 u = s.decode("utf-8", "replace") # last ditch
86 86 return u.encode(_encoding, "replace")
87 87
88 88 def fromlocal(s):
89 89 """
90 90 Convert a string from the local character encoding to UTF-8
91 91
92 92 We attempt to decode strings using the encoding mode set by
93 93 HGENCODINGMODE, which defaults to 'strict'. In this mode, unknown
94 94 characters will cause an error message. Other modes include
95 95 'replace', which replaces unknown characters with a special
96 96 Unicode character, and 'ignore', which drops the character.
97 97 """
98 98 try:
99 99 return s.decode(_encoding, _encodingmode).encode("utf-8")
100 100 except UnicodeDecodeError, inst:
101 101 sub = s[max(0, inst.start-10):inst.start+10]
102 102 raise Abort("decoding near '%s': %s!" % (sub, inst))
103 103 except LookupError, k:
104 104 raise Abort(_("%s, please check your locale settings") % k)
105 105
106 106 def locallen(s):
107 107 """Find the length in characters of a local string"""
108 108 return len(s.decode(_encoding, "replace"))
109 109
110 110 # used by parsedate
111 111 defaultdateformats = (
112 112 '%Y-%m-%d %H:%M:%S',
113 113 '%Y-%m-%d %I:%M:%S%p',
114 114 '%Y-%m-%d %H:%M',
115 115 '%Y-%m-%d %I:%M%p',
116 116 '%Y-%m-%d',
117 117 '%m-%d',
118 118 '%m/%d',
119 119 '%m/%d/%y',
120 120 '%m/%d/%Y',
121 121 '%a %b %d %H:%M:%S %Y',
122 122 '%a %b %d %I:%M:%S%p %Y',
123 123 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
124 124 '%b %d %H:%M:%S %Y',
125 125 '%b %d %I:%M:%S%p %Y',
126 126 '%b %d %H:%M:%S',
127 127 '%b %d %I:%M:%S%p',
128 128 '%b %d %H:%M',
129 129 '%b %d %I:%M%p',
130 130 '%b %d %Y',
131 131 '%b %d',
132 132 '%H:%M:%S',
133 133 '%I:%M:%SP',
134 134 '%H:%M',
135 135 '%I:%M%p',
136 136 )
137 137
138 138 extendeddateformats = defaultdateformats + (
139 139 "%Y",
140 140 "%Y-%m",
141 141 "%b",
142 142 "%b %Y",
143 143 )
144 144
145 145 class SignalInterrupt(Exception):
146 146 """Exception raised on SIGTERM and SIGHUP."""
147 147
148 148 # differences from SafeConfigParser:
149 149 # - case-sensitive keys
150 150 # - allows values that are not strings (this means that you may not
151 151 # be able to save the configuration to a file)
152 152 class configparser(ConfigParser.SafeConfigParser):
153 153 def optionxform(self, optionstr):
154 154 return optionstr
155 155
156 156 def set(self, section, option, value):
157 157 return ConfigParser.ConfigParser.set(self, section, option, value)
158 158
159 159 def _interpolate(self, section, option, rawval, vars):
160 160 if not isinstance(rawval, basestring):
161 161 return rawval
162 162 return ConfigParser.SafeConfigParser._interpolate(self, section,
163 163 option, rawval, vars)
164 164
165 165 def cachefunc(func):
166 166 '''cache the result of function calls'''
167 167 # XXX doesn't handle keywords args
168 168 cache = {}
169 169 if func.func_code.co_argcount == 1:
170 170 # we gain a small amount of time because
171 171 # we don't need to pack/unpack the list
172 172 def f(arg):
173 173 if arg not in cache:
174 174 cache[arg] = func(arg)
175 175 return cache[arg]
176 176 else:
177 177 def f(*args):
178 178 if args not in cache:
179 179 cache[args] = func(*args)
180 180 return cache[args]
181 181
182 182 return f
183 183
184 184 def pipefilter(s, cmd):
185 185 '''filter string S through command CMD, returning its output'''
186 186 (pin, pout) = os.popen2(cmd, 'b')
187 187 def writer():
188 188 try:
189 189 pin.write(s)
190 190 pin.close()
191 191 except IOError, inst:
192 192 if inst.errno != errno.EPIPE:
193 193 raise
194 194
195 195 # we should use select instead on UNIX, but this will work on most
196 196 # systems, including Windows
197 197 w = threading.Thread(target=writer)
198 198 w.start()
199 199 f = pout.read()
200 200 pout.close()
201 201 w.join()
202 202 return f
203 203
204 204 def tempfilter(s, cmd):
205 205 '''filter string S through a pair of temporary files with CMD.
206 206 CMD is used as a template to create the real command to be run,
207 207 with the strings INFILE and OUTFILE replaced by the real names of
208 208 the temporary files generated.'''
209 209 inname, outname = None, None
210 210 try:
211 211 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
212 212 fp = os.fdopen(infd, 'wb')
213 213 fp.write(s)
214 214 fp.close()
215 215 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
216 216 os.close(outfd)
217 217 cmd = cmd.replace('INFILE', inname)
218 218 cmd = cmd.replace('OUTFILE', outname)
219 219 code = os.system(cmd)
220 220 if sys.platform == 'OpenVMS' and code & 1:
221 221 code = 0
222 222 if code: raise Abort(_("command '%s' failed: %s") %
223 223 (cmd, explain_exit(code)))
224 224 return open(outname, 'rb').read()
225 225 finally:
226 226 try:
227 227 if inname: os.unlink(inname)
228 228 except: pass
229 229 try:
230 230 if outname: os.unlink(outname)
231 231 except: pass
232 232
233 233 filtertable = {
234 234 'tempfile:': tempfilter,
235 235 'pipe:': pipefilter,
236 236 }
237 237
238 238 def filter(s, cmd):
239 239 "filter a string through a command that transforms its input to its output"
240 240 for name, fn in filtertable.iteritems():
241 241 if cmd.startswith(name):
242 242 return fn(s, cmd[len(name):].lstrip())
243 243 return pipefilter(s, cmd)
244 244
245 245 def binary(s):
246 246 """return true if a string is binary data"""
247 247 if s and '\0' in s:
248 248 return True
249 249 return False
250 250
251 251 def unique(g):
252 252 """return the uniq elements of iterable g"""
253 253 return dict.fromkeys(g).keys()
254 254
255 255 def sort(l):
256 256 if not isinstance(l, list):
257 257 l = list(l)
258 258 l.sort()
259 259 return l
260 260
261 261 class Abort(Exception):
262 262 """Raised if a command needs to print an error and exit."""
263 263
264 264 class UnexpectedOutput(Abort):
265 265 """Raised to print an error with part of output and exit."""
266 266
267 267 def always(fn): return True
268 268 def never(fn): return False
269 269
270 270 def expand_glob(pats):
271 271 '''On Windows, expand the implicit globs in a list of patterns'''
272 272 if os.name != 'nt':
273 273 return list(pats)
274 274 ret = []
275 275 for p in pats:
276 276 kind, name = patkind(p, None)
277 277 if kind is None:
278 278 globbed = glob.glob(name)
279 279 if globbed:
280 280 ret.extend(globbed)
281 281 continue
282 282 # if we couldn't expand the glob, just keep it around
283 283 ret.append(p)
284 284 return ret
285 285
286 286 def patkind(name, default):
287 287 """Split a string into an optional pattern kind prefix and the
288 288 actual pattern."""
289 289 for prefix in 're', 'glob', 'path', 'relglob', 'relpath', 'relre':
290 290 if name.startswith(prefix + ':'): return name.split(':', 1)
291 291 return default, name
292 292
293 293 def globre(pat, head='^', tail='$'):
294 294 "convert a glob pattern into a regexp"
295 295 i, n = 0, len(pat)
296 296 res = ''
297 297 group = 0
298 298 def peek(): return i < n and pat[i]
299 299 while i < n:
300 300 c = pat[i]
301 301 i = i+1
302 302 if c == '*':
303 303 if peek() == '*':
304 304 i += 1
305 305 res += '.*'
306 306 else:
307 307 res += '[^/]*'
308 308 elif c == '?':
309 309 res += '.'
310 310 elif c == '[':
311 311 j = i
312 312 if j < n and pat[j] in '!]':
313 313 j += 1
314 314 while j < n and pat[j] != ']':
315 315 j += 1
316 316 if j >= n:
317 317 res += '\\['
318 318 else:
319 319 stuff = pat[i:j].replace('\\','\\\\')
320 320 i = j + 1
321 321 if stuff[0] == '!':
322 322 stuff = '^' + stuff[1:]
323 323 elif stuff[0] == '^':
324 324 stuff = '\\' + stuff
325 325 res = '%s[%s]' % (res, stuff)
326 326 elif c == '{':
327 327 group += 1
328 328 res += '(?:'
329 329 elif c == '}' and group:
330 330 res += ')'
331 331 group -= 1
332 332 elif c == ',' and group:
333 333 res += '|'
334 334 elif c == '\\':
335 335 p = peek()
336 336 if p:
337 337 i += 1
338 338 res += re.escape(p)
339 339 else:
340 340 res += re.escape(c)
341 341 else:
342 342 res += re.escape(c)
343 343 return head + res + tail
344 344
345 345 _globchars = {'[': 1, '{': 1, '*': 1, '?': 1}
346 346
347 347 def pathto(root, n1, n2):
348 348 '''return the relative path from one place to another.
349 349 root should use os.sep to separate directories
350 350 n1 should use os.sep to separate directories
351 351 n2 should use "/" to separate directories
352 352 returns an os.sep-separated path.
353 353
354 354 If n1 is a relative path, it's assumed it's
355 355 relative to root.
356 356 n2 should always be relative to root.
357 357 '''
358 358 if not n1: return localpath(n2)
359 359 if os.path.isabs(n1):
360 360 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
361 361 return os.path.join(root, localpath(n2))
362 362 n2 = '/'.join((pconvert(root), n2))
363 363 a, b = splitpath(n1), n2.split('/')
364 364 a.reverse()
365 365 b.reverse()
366 366 while a and b and a[-1] == b[-1]:
367 367 a.pop()
368 368 b.pop()
369 369 b.reverse()
370 370 return os.sep.join((['..'] * len(a)) + b) or '.'
371 371
372 372 def canonpath(root, cwd, myname):
373 373 """return the canonical path of myname, given cwd and root"""
374 374 if root == os.sep:
375 375 rootsep = os.sep
376 376 elif endswithsep(root):
377 377 rootsep = root
378 378 else:
379 379 rootsep = root + os.sep
380 380 name = myname
381 381 if not os.path.isabs(name):
382 382 name = os.path.join(root, cwd, name)
383 383 name = os.path.normpath(name)
384 384 audit_path = path_auditor(root)
385 385 if name != rootsep and name.startswith(rootsep):
386 386 name = name[len(rootsep):]
387 387 audit_path(name)
388 388 return pconvert(name)
389 389 elif name == root:
390 390 return ''
391 391 else:
392 392 # Determine whether `name' is in the hierarchy at or beneath `root',
393 393 # by iterating name=dirname(name) until that causes no change (can't
394 394 # check name == '/', because that doesn't work on windows). For each
395 395 # `name', compare dev/inode numbers. If they match, the list `rel'
396 396 # holds the reversed list of components making up the relative file
397 397 # name we want.
398 398 root_st = os.stat(root)
399 399 rel = []
400 400 while True:
401 401 try:
402 402 name_st = os.stat(name)
403 403 except OSError:
404 404 break
405 405 if samestat(name_st, root_st):
406 406 if not rel:
407 407 # name was actually the same as root (maybe a symlink)
408 408 return ''
409 409 rel.reverse()
410 410 name = os.path.join(*rel)
411 411 audit_path(name)
412 412 return pconvert(name)
413 413 dirname, basename = os.path.split(name)
414 414 rel.append(basename)
415 415 if dirname == name:
416 416 break
417 417 name = dirname
418 418
419 419 raise Abort('%s not under root' % myname)
420 420
421 421 def matcher(canonroot, cwd='', names=[], inc=[], exc=[], src=None, dflt_pat='glob'):
422 422 """build a function to match a set of file patterns
423 423
424 424 arguments:
425 425 canonroot - the canonical root of the tree you're matching against
426 426 cwd - the current working directory, if relevant
427 427 names - patterns to find
428 428 inc - patterns to include
429 429 exc - patterns to exclude
430 430 dflt_pat - if a pattern in names has no explicit type, assume this one
431 431 src - where these patterns came from (e.g. .hgignore)
432 432
433 433 a pattern is one of:
434 434 'glob:<glob>' - a glob relative to cwd
435 435 're:<regexp>' - a regular expression
436 436 'path:<path>' - a path relative to canonroot
437 437 'relglob:<glob>' - an unrooted glob (*.c matches C files in all dirs)
438 438 'relpath:<path>' - a path relative to cwd
439 439 'relre:<regexp>' - a regexp that doesn't have to match the start of a name
440 440 '<something>' - one of the cases above, selected by the dflt_pat argument
441 441
442 442 returns:
443 443 a 3-tuple containing
444 444 - list of roots (places where one should start a recursive walk of the fs);
445 445 this often matches the explicit non-pattern names passed in, but also
446 446 includes the initial part of glob: patterns that has no glob characters
447 447 - a bool match(filename) function
448 448 - a bool indicating if any patterns were passed in
449 449 """
450 450
451 451 # a common case: no patterns at all
452 452 if not names and not inc and not exc:
453 453 return [], always, False
454 454
455 455 def contains_glob(name):
456 456 for c in name:
457 457 if c in _globchars: return True
458 458 return False
459 459
460 460 def regex(kind, name, tail):
461 461 '''convert a pattern into a regular expression'''
462 462 if not name:
463 463 return ''
464 464 if kind == 're':
465 465 return name
466 466 elif kind == 'path':
467 467 return '^' + re.escape(name) + '(?:/|$)'
468 468 elif kind == 'relglob':
469 469 return globre(name, '(?:|.*/)', tail)
470 470 elif kind == 'relpath':
471 471 return re.escape(name) + '(?:/|$)'
472 472 elif kind == 'relre':
473 473 if name.startswith('^'):
474 474 return name
475 475 return '.*' + name
476 476 return globre(name, '', tail)
477 477
478 478 def matchfn(pats, tail):
479 479 """build a matching function from a set of patterns"""
480 480 if not pats:
481 481 return
482 482 try:
483 483 pat = '(?:%s)' % '|'.join([regex(k, p, tail) for (k, p) in pats])
484 484 if len(pat) > 20000:
485 485 raise OverflowError()
486 486 return re.compile(pat).match
487 487 except OverflowError:
488 488 # We're using a Python with a tiny regex engine and we
489 489 # made it explode, so we'll divide the pattern list in two
490 490 # until it works
491 491 l = len(pats)
492 492 if l < 2:
493 493 raise
494 494 a, b = matchfn(pats[:l//2], tail), matchfn(pats[l//2:], tail)
495 495 return lambda s: a(s) or b(s)
496 496 except re.error:
497 497 for k, p in pats:
498 498 try:
499 499 re.compile('(?:%s)' % regex(k, p, tail))
500 500 except re.error:
501 501 if src:
502 502 raise Abort("%s: invalid pattern (%s): %s" %
503 503 (src, k, p))
504 504 else:
505 505 raise Abort("invalid pattern (%s): %s" % (k, p))
506 506 raise Abort("invalid pattern")
507 507
508 508 def globprefix(pat):
509 509 '''return the non-glob prefix of a path, e.g. foo/* -> foo'''
510 510 root = []
511 511 for p in pat.split('/'):
512 512 if contains_glob(p): break
513 513 root.append(p)
514 514 return '/'.join(root) or '.'
515 515
516 516 def normalizepats(names, default):
517 517 pats = []
518 518 roots = []
519 519 anypats = False
520 520 for kind, name in [patkind(p, default) for p in names]:
521 521 if kind in ('glob', 'relpath'):
522 522 name = canonpath(canonroot, cwd, name)
523 523 elif kind in ('relglob', 'path'):
524 524 name = normpath(name)
525 525
526 526 pats.append((kind, name))
527 527
528 528 if kind in ('glob', 're', 'relglob', 'relre'):
529 529 anypats = True
530 530
531 531 if kind == 'glob':
532 532 root = globprefix(name)
533 533 roots.append(root)
534 534 elif kind in ('relpath', 'path'):
535 535 roots.append(name or '.')
536 536 elif kind == 'relglob':
537 537 roots.append('.')
538 538 return roots, pats, anypats
539 539
540 540 roots, pats, anypats = normalizepats(names, dflt_pat)
541 541
542 542 patmatch = matchfn(pats, '$') or always
543 543 incmatch = always
544 544 if inc:
545 545 dummy, inckinds, dummy = normalizepats(inc, 'glob')
546 546 incmatch = matchfn(inckinds, '(?:/|$)')
547 547 excmatch = lambda fn: False
548 548 if exc:
549 549 dummy, exckinds, dummy = normalizepats(exc, 'glob')
550 550 excmatch = matchfn(exckinds, '(?:/|$)')
551 551
552 552 if not names and inc and not exc:
553 553 # common case: hgignore patterns
554 554 match = incmatch
555 555 else:
556 556 match = lambda fn: incmatch(fn) and not excmatch(fn) and patmatch(fn)
557 557
558 558 return (roots, match, (inc or exc or anypats) and True)
559 559
560 560 _hgexecutable = None
561 561
562 562 def main_is_frozen():
563 563 """return True if we are a frozen executable.
564 564
565 565 The code supports py2exe (most common, Windows only) and tools/freeze
566 566 (portable, not much used).
567 567 """
568 568 return (hasattr(sys, "frozen") or # new py2exe
569 569 hasattr(sys, "importers") or # old py2exe
570 570 imp.is_frozen("__main__")) # tools/freeze
571 571
572 572 def hgexecutable():
573 573 """return location of the 'hg' executable.
574 574
575 575 Defaults to $HG or 'hg' in the search path.
576 576 """
577 577 if _hgexecutable is None:
578 578 hg = os.environ.get('HG')
579 579 if hg:
580 580 set_hgexecutable(hg)
581 581 elif main_is_frozen():
582 582 set_hgexecutable(sys.executable)
583 583 else:
584 584 set_hgexecutable(find_exe('hg', 'hg'))
585 585 return _hgexecutable
586 586
587 587 def set_hgexecutable(path):
588 588 """set location of the 'hg' executable"""
589 589 global _hgexecutable
590 590 _hgexecutable = path
591 591
592 592 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None):
593 593 '''enhanced shell command execution.
594 594 run with environment maybe modified, maybe in different dir.
595 595
596 596 if command fails and onerr is None, return status. if ui object,
597 597 print error message and return status, else raise onerr object as
598 598 exception.'''
599 599 def py2shell(val):
600 600 'convert python object into string that is useful to shell'
601 601 if val in (None, False):
602 602 return '0'
603 603 if val == True:
604 604 return '1'
605 605 return str(val)
606 606 oldenv = {}
607 607 for k in environ:
608 608 oldenv[k] = os.environ.get(k)
609 609 if cwd is not None:
610 610 oldcwd = os.getcwd()
611 611 origcmd = cmd
612 612 if os.name == 'nt':
613 613 cmd = '"%s"' % cmd
614 614 try:
615 615 for k, v in environ.iteritems():
616 616 os.environ[k] = py2shell(v)
617 617 os.environ['HG'] = hgexecutable()
618 618 if cwd is not None and oldcwd != cwd:
619 619 os.chdir(cwd)
620 620 rc = os.system(cmd)
621 621 if sys.platform == 'OpenVMS' and rc & 1:
622 622 rc = 0
623 623 if rc and onerr:
624 624 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
625 625 explain_exit(rc)[0])
626 626 if errprefix:
627 627 errmsg = '%s: %s' % (errprefix, errmsg)
628 628 try:
629 629 onerr.warn(errmsg + '\n')
630 630 except AttributeError:
631 631 raise onerr(errmsg)
632 632 return rc
633 633 finally:
634 634 for k, v in oldenv.iteritems():
635 635 if v is None:
636 636 del os.environ[k]
637 637 else:
638 638 os.environ[k] = v
639 639 if cwd is not None and oldcwd != cwd:
640 640 os.chdir(oldcwd)
641 641
642 642 # os.path.lexists is not available on python2.3
643 643 def lexists(filename):
644 644 "test whether a file with this name exists. does not follow symlinks"
645 645 try:
646 646 os.lstat(filename)
647 647 except:
648 648 return False
649 649 return True
650 650
651 651 def rename(src, dst):
652 652 """forcibly rename a file"""
653 653 try:
654 654 os.rename(src, dst)
655 655 except OSError, err: # FIXME: check err (EEXIST ?)
656 656 # on windows, rename to existing file is not allowed, so we
657 657 # must delete destination first. but if file is open, unlink
658 658 # schedules it for delete but does not delete it. rename
659 659 # happens immediately even for open files, so we create
660 660 # temporary file, delete it, rename destination to that name,
661 661 # then delete that. then rename is safe to do.
662 662 fd, temp = tempfile.mkstemp(dir=os.path.dirname(dst) or '.')
663 663 os.close(fd)
664 664 os.unlink(temp)
665 665 os.rename(dst, temp)
666 666 os.unlink(temp)
667 667 os.rename(src, dst)
668 668
669 669 def unlink(f):
670 670 """unlink and remove the directory if it is empty"""
671 671 os.unlink(f)
672 672 # try removing directories that might now be empty
673 673 try:
674 674 os.removedirs(os.path.dirname(f))
675 675 except OSError:
676 676 pass
677 677
678 678 def copyfile(src, dest):
679 679 "copy a file, preserving mode"
680 680 if os.path.islink(src):
681 681 try:
682 682 os.unlink(dest)
683 683 except:
684 684 pass
685 685 os.symlink(os.readlink(src), dest)
686 686 else:
687 687 try:
688 688 shutil.copyfile(src, dest)
689 689 shutil.copymode(src, dest)
690 690 except shutil.Error, inst:
691 691 raise Abort(str(inst))
692 692
693 693 def copyfiles(src, dst, hardlink=None):
694 694 """Copy a directory tree using hardlinks if possible"""
695 695
696 696 if hardlink is None:
697 697 hardlink = (os.stat(src).st_dev ==
698 698 os.stat(os.path.dirname(dst)).st_dev)
699 699
700 700 if os.path.isdir(src):
701 701 os.mkdir(dst)
702 702 for name, kind in osutil.listdir(src):
703 703 srcname = os.path.join(src, name)
704 704 dstname = os.path.join(dst, name)
705 705 copyfiles(srcname, dstname, hardlink)
706 706 else:
707 707 if hardlink:
708 708 try:
709 709 os_link(src, dst)
710 710 except (IOError, OSError):
711 711 hardlink = False
712 712 shutil.copy(src, dst)
713 713 else:
714 714 shutil.copy(src, dst)
715 715
716 716 class path_auditor(object):
717 717 '''ensure that a filesystem path contains no banned components.
718 718 the following properties of a path are checked:
719 719
720 720 - under top-level .hg
721 721 - starts at the root of a windows drive
722 722 - contains ".."
723 723 - traverses a symlink (e.g. a/symlink_here/b)
724 724 - inside a nested repository'''
725 725
726 726 def __init__(self, root):
727 727 self.audited = set()
728 728 self.auditeddir = set()
729 729 self.root = root
730 730
731 731 def __call__(self, path):
732 732 if path in self.audited:
733 733 return
734 734 normpath = os.path.normcase(path)
735 735 parts = splitpath(normpath)
736 736 if (os.path.splitdrive(path)[0] or parts[0] in ('.hg', '')
737 737 or os.pardir in parts):
738 738 raise Abort(_("path contains illegal component: %s") % path)
739 739 def check(prefix):
740 740 curpath = os.path.join(self.root, prefix)
741 741 try:
742 742 st = os.lstat(curpath)
743 743 except OSError, err:
744 744 # EINVAL can be raised as invalid path syntax under win32.
745 745 # They must be ignored for patterns can be checked too.
746 746 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
747 747 raise
748 748 else:
749 749 if stat.S_ISLNK(st.st_mode):
750 750 raise Abort(_('path %r traverses symbolic link %r') %
751 751 (path, prefix))
752 752 elif (stat.S_ISDIR(st.st_mode) and
753 753 os.path.isdir(os.path.join(curpath, '.hg'))):
754 754 raise Abort(_('path %r is inside repo %r') %
755 755 (path, prefix))
756 756 parts.pop()
757 757 prefixes = []
758 758 for n in range(len(parts)):
759 759 prefix = os.sep.join(parts)
760 760 if prefix in self.auditeddir:
761 761 break
762 762 check(prefix)
763 763 prefixes.append(prefix)
764 764 parts.pop()
765 765
766 766 self.audited.add(path)
767 767 # only add prefixes to the cache after checking everything: we don't
768 768 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
769 769 self.auditeddir.update(prefixes)
770 770
771 771 def _makelock_file(info, pathname):
772 772 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
773 773 os.write(ld, info)
774 774 os.close(ld)
775 775
776 776 def _readlock_file(pathname):
777 777 return posixfile(pathname).read()
778 778
779 779 def nlinks(pathname):
780 780 """Return number of hardlinks for the given file."""
781 781 return os.lstat(pathname).st_nlink
782 782
783 783 if hasattr(os, 'link'):
784 784 os_link = os.link
785 785 else:
786 786 def os_link(src, dst):
787 787 raise OSError(0, _("Hardlinks not supported"))
788 788
789 789 def fstat(fp):
790 790 '''stat file object that may not have fileno method.'''
791 791 try:
792 792 return os.fstat(fp.fileno())
793 793 except AttributeError:
794 794 return os.stat(fp.name)
795 795
796 796 posixfile = file
797 797
798 798 def openhardlinks():
799 799 '''return true if it is safe to hold open file handles to hardlinks'''
800 800 return True
801 801
802 802 getuser_fallback = None
803 803
804 804 def getuser():
805 805 '''return name of current user'''
806 806 try:
807 807 return getpass.getuser()
808 808 except ImportError:
809 809 # import of pwd will fail on windows - try fallback
810 810 if getuser_fallback:
811 811 return getuser_fallback()
812 812 # raised if win32api not available
813 813 raise Abort(_('user name not available - set USERNAME '
814 814 'environment variable'))
815 815
816 816 def username(uid=None):
817 817 """Return the name of the user with the given uid.
818 818
819 819 If uid is None, return the name of the current user."""
820 820 try:
821 821 import pwd
822 822 if uid is None:
823 823 uid = os.getuid()
824 824 try:
825 825 return pwd.getpwuid(uid)[0]
826 826 except KeyError:
827 827 return str(uid)
828 828 except ImportError:
829 829 return None
830 830
831 831 def groupname(gid=None):
832 832 """Return the name of the group with the given gid.
833 833
834 834 If gid is None, return the name of the current group."""
835 835 try:
836 836 import grp
837 837 if gid is None:
838 838 gid = os.getgid()
839 839 try:
840 840 return grp.getgrgid(gid)[0]
841 841 except KeyError:
842 842 return str(gid)
843 843 except ImportError:
844 844 return None
845 845
846 846 # File system features
847 847
848 848 def checkcase(path):
849 849 """
850 850 Check whether the given path is on a case-sensitive filesystem
851 851
852 852 Requires a path (like /foo/.hg) ending with a foldable final
853 853 directory component.
854 854 """
855 855 s1 = os.stat(path)
856 856 d, b = os.path.split(path)
857 857 p2 = os.path.join(d, b.upper())
858 858 if path == p2:
859 859 p2 = os.path.join(d, b.lower())
860 860 try:
861 861 s2 = os.stat(p2)
862 862 if s2 == s1:
863 863 return False
864 864 return True
865 865 except:
866 866 return True
867 867
868 868 _fspathcache = {}
869 869 def fspath(name, root):
870 870 '''Get name in the case stored in the filesystem
871 871
872 872 The name is either relative to root, or it is an absolute path starting
873 873 with root. Note that this function is unnecessary, and should not be
874 874 called, for case-sensitive filesystems (simply because it's expensive).
875 875 '''
876 876 # If name is absolute, make it relative
877 877 if name.lower().startswith(root.lower()):
878 878 l = len(root)
879 879 if name[l] == os.sep or name[l] == os.altsep:
880 880 l = l + 1
881 881 name = name[l:]
882 882
883 883 if not os.path.exists(os.path.join(root, name)):
884 884 return None
885 885
886 886 seps = os.sep
887 887 if os.altsep:
888 888 seps = seps + os.altsep
889 889 # Protect backslashes. This gets silly very quickly.
890 890 seps.replace('\\','\\\\')
891 891 pattern = re.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
892 892 dir = os.path.normcase(os.path.normpath(root))
893 893 result = []
894 894 for part, sep in pattern.findall(name):
895 895 if sep:
896 896 result.append(sep)
897 897 continue
898 898
899 899 if dir not in _fspathcache:
900 900 _fspathcache[dir] = os.listdir(dir)
901 901 contents = _fspathcache[dir]
902 902
903 903 lpart = part.lower()
904 904 for n in contents:
905 905 if n.lower() == lpart:
906 906 result.append(n)
907 907 break
908 908 else:
909 909 # Cannot happen, as the file exists!
910 910 result.append(part)
911 911 dir = os.path.join(dir, lpart)
912 912
913 913 return ''.join(result)
914 914
915 915 def checkexec(path):
916 916 """
917 917 Check whether the given path is on a filesystem with UNIX-like exec flags
918 918
919 919 Requires a directory (like /foo/.hg)
920 920 """
921 921
922 922 # VFAT on some Linux versions can flip mode but it doesn't persist
923 923 # a FS remount. Frequently we can detect it if files are created
924 924 # with exec bit on.
925 925
926 926 try:
927 927 EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
928 928 fh, fn = tempfile.mkstemp("", "", path)
929 929 try:
930 930 os.close(fh)
931 931 m = os.stat(fn).st_mode & 0777
932 932 new_file_has_exec = m & EXECFLAGS
933 933 os.chmod(fn, m ^ EXECFLAGS)
934 934 exec_flags_cannot_flip = ((os.stat(fn).st_mode & 0777) == m)
935 935 finally:
936 936 os.unlink(fn)
937 937 except (IOError, OSError):
938 938 # we don't care, the user probably won't be able to commit anyway
939 939 return False
940 940 return not (new_file_has_exec or exec_flags_cannot_flip)
941 941
942 942 def checklink(path):
943 943 """check whether the given path is on a symlink-capable filesystem"""
944 944 # mktemp is not racy because symlink creation will fail if the
945 945 # file already exists
946 946 name = tempfile.mktemp(dir=path)
947 947 try:
948 948 os.symlink(".", name)
949 949 os.unlink(name)
950 950 return True
951 951 except (OSError, AttributeError):
952 952 return False
953 953
954 954 _umask = os.umask(0)
955 955 os.umask(_umask)
956 956
957 957 def needbinarypatch():
958 958 """return True if patches should be applied in binary mode by default."""
959 959 return os.name == 'nt'
960 960
961 961 def endswithsep(path):
962 962 '''Check path ends with os.sep or os.altsep.'''
963 963 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
964 964
965 965 def splitpath(path):
966 966 '''Split path by os.sep.
967 967 Note that this function does not use os.altsep because this is
968 968 an alternative of simple "xxx.split(os.sep)".
969 969 It is recommended to use os.path.normpath() before using this
970 970 function if need.'''
971 971 return path.split(os.sep)
972 972
973 973 def gui():
974 974 '''Are we running in a GUI?'''
975 975 return os.name == "nt" or os.name == "mac" or os.environ.get("DISPLAY")
976 976
977 977 def lookup_reg(key, name=None, scope=None):
978 978 return None
979 979
980 980 # Platform specific variants
981 981 if os.name == 'nt':
982 982 import msvcrt
983 983 nulldev = 'NUL:'
984 984
985 985 class winstdout:
986 986 '''stdout on windows misbehaves if sent through a pipe'''
987 987
988 988 def __init__(self, fp):
989 989 self.fp = fp
990 990
991 991 def __getattr__(self, key):
992 992 return getattr(self.fp, key)
993 993
994 994 def close(self):
995 995 try:
996 996 self.fp.close()
997 997 except: pass
998 998
999 999 def write(self, s):
1000 1000 try:
1001 1001 # This is workaround for "Not enough space" error on
1002 1002 # writing large size of data to console.
1003 1003 limit = 16000
1004 1004 l = len(s)
1005 1005 start = 0
1006 1006 while start < l:
1007 1007 end = start + limit
1008 1008 self.fp.write(s[start:end])
1009 1009 start = end
1010 1010 except IOError, inst:
1011 1011 if inst.errno != 0: raise
1012 1012 self.close()
1013 1013 raise IOError(errno.EPIPE, 'Broken pipe')
1014 1014
1015 1015 def flush(self):
1016 1016 try:
1017 1017 return self.fp.flush()
1018 1018 except IOError, inst:
1019 1019 if inst.errno != errno.EINVAL: raise
1020 1020 self.close()
1021 1021 raise IOError(errno.EPIPE, 'Broken pipe')
1022 1022
1023 1023 sys.stdout = winstdout(sys.stdout)
1024 1024
1025 1025 def _is_win_9x():
1026 1026 '''return true if run on windows 95, 98 or me.'''
1027 1027 try:
1028 1028 return sys.getwindowsversion()[3] == 1
1029 1029 except AttributeError:
1030 1030 return 'command' in os.environ.get('comspec', '')
1031 1031
1032 1032 def openhardlinks():
1033 1033 return not _is_win_9x and "win32api" in locals()
1034 1034
1035 1035 def system_rcpath():
1036 1036 try:
1037 1037 return system_rcpath_win32()
1038 1038 except:
1039 1039 return [r'c:\mercurial\mercurial.ini']
1040 1040
1041 1041 def user_rcpath():
1042 1042 '''return os-specific hgrc search path to the user dir'''
1043 1043 try:
1044 1044 path = user_rcpath_win32()
1045 1045 except:
1046 1046 home = os.path.expanduser('~')
1047 1047 path = [os.path.join(home, 'mercurial.ini'),
1048 1048 os.path.join(home, '.hgrc')]
1049 1049 userprofile = os.environ.get('USERPROFILE')
1050 1050 if userprofile:
1051 1051 path.append(os.path.join(userprofile, 'mercurial.ini'))
1052 1052 path.append(os.path.join(userprofile, '.hgrc'))
1053 1053 return path
1054 1054
1055 1055 def parse_patch_output(output_line):
1056 1056 """parses the output produced by patch and returns the file name"""
1057 1057 pf = output_line[14:]
1058 1058 if pf[0] == '`':
1059 1059 pf = pf[1:-1] # Remove the quotes
1060 1060 return pf
1061 1061
1062 1062 def sshargs(sshcmd, host, user, port):
1063 1063 '''Build argument list for ssh or Plink'''
1064 1064 pflag = 'plink' in sshcmd.lower() and '-P' or '-p'
1065 1065 args = user and ("%s@%s" % (user, host)) or host
1066 1066 return port and ("%s %s %s" % (args, pflag, port)) or args
1067 1067
1068 1068 def testpid(pid):
1069 1069 '''return False if pid dead, True if running or not known'''
1070 1070 return True
1071 1071
1072 1072 def set_flags(f, flags):
1073 1073 pass
1074 1074
1075 1075 def set_binary(fd):
1076 1076 # When run without console, pipes may expose invalid
1077 1077 # fileno(), usually set to -1.
1078 1078 if hasattr(fd, 'fileno') and fd.fileno() >= 0:
1079 1079 msvcrt.setmode(fd.fileno(), os.O_BINARY)
1080 1080
1081 1081 def pconvert(path):
1082 1082 return '/'.join(splitpath(path))
1083 1083
1084 1084 def localpath(path):
1085 1085 return path.replace('/', '\\')
1086 1086
1087 1087 def normpath(path):
1088 1088 return pconvert(os.path.normpath(path))
1089 1089
1090 1090 makelock = _makelock_file
1091 1091 readlock = _readlock_file
1092 1092
1093 1093 def samestat(s1, s2):
1094 1094 return False
1095 1095
1096 1096 # A sequence of backslashes is special iff it precedes a double quote:
1097 1097 # - if there's an even number of backslashes, the double quote is not
1098 1098 # quoted (i.e. it ends the quoted region)
1099 1099 # - if there's an odd number of backslashes, the double quote is quoted
1100 1100 # - in both cases, every pair of backslashes is unquoted into a single
1101 1101 # backslash
1102 1102 # (See http://msdn2.microsoft.com/en-us/library/a1y7w461.aspx )
1103 1103 # So, to quote a string, we must surround it in double quotes, double
1104 1104 # the number of backslashes that preceed double quotes and add another
1105 1105 # backslash before every double quote (being careful with the double
1106 1106 # quote we've appended to the end)
1107 1107 _quotere = None
1108 1108 def shellquote(s):
1109 1109 global _quotere
1110 1110 if _quotere is None:
1111 1111 _quotere = re.compile(r'(\\*)("|\\$)')
1112 1112 return '"%s"' % _quotere.sub(r'\1\1\\\2', s)
1113 1113
1114 1114 def quotecommand(cmd):
1115 1115 """Build a command string suitable for os.popen* calls."""
1116 1116 # The extra quotes are needed because popen* runs the command
1117 1117 # through the current COMSPEC. cmd.exe suppress enclosing quotes.
1118 1118 return '"' + cmd + '"'
1119 1119
1120 1120 def popen(command, mode='r'):
1121 1121 # Work around "popen spawned process may not write to stdout
1122 1122 # under windows"
1123 1123 # http://bugs.python.org/issue1366
1124 1124 command += " 2> %s" % nulldev
1125 1125 return os.popen(quotecommand(command), mode)
1126 1126
1127 1127 def explain_exit(code):
1128 1128 return _("exited with status %d") % code, code
1129 1129
1130 1130 # if you change this stub into a real check, please try to implement the
1131 1131 # username and groupname functions above, too.
1132 1132 def isowner(fp, st=None):
1133 1133 return True
1134 1134
1135 1135 def find_in_path(name, path, default=None):
1136 1136 '''find name in search path. path can be string (will be split
1137 1137 with os.pathsep), or iterable thing that returns strings. if name
1138 1138 found, return path to name. else return default. name is looked up
1139 1139 using cmd.exe rules, using PATHEXT.'''
1140 1140 if isinstance(path, str):
1141 1141 path = path.split(os.pathsep)
1142 1142
1143 1143 pathext = os.environ.get('PATHEXT', '.COM;.EXE;.BAT;.CMD')
1144 1144 pathext = pathext.lower().split(os.pathsep)
1145 1145 isexec = os.path.splitext(name)[1].lower() in pathext
1146 1146
1147 1147 for p in path:
1148 1148 p_name = os.path.join(p, name)
1149 1149
1150 1150 if isexec and os.path.exists(p_name):
1151 1151 return p_name
1152 1152
1153 1153 for ext in pathext:
1154 1154 p_name_ext = p_name + ext
1155 1155 if os.path.exists(p_name_ext):
1156 1156 return p_name_ext
1157 1157 return default
1158 1158
1159 1159 def set_signal_handler():
1160 1160 try:
1161 1161 set_signal_handler_win32()
1162 1162 except NameError:
1163 1163 pass
1164 1164
1165 1165 try:
1166 1166 # override functions with win32 versions if possible
1167 1167 from util_win32 import *
1168 1168 if not _is_win_9x():
1169 1169 posixfile = posixfile_nt
1170 1170 except ImportError:
1171 1171 pass
1172 1172
1173 1173 else:
1174 1174 nulldev = '/dev/null'
1175 1175
1176 1176 def rcfiles(path):
1177 1177 rcs = [os.path.join(path, 'hgrc')]
1178 1178 rcdir = os.path.join(path, 'hgrc.d')
1179 1179 try:
1180 1180 rcs.extend([os.path.join(rcdir, f)
1181 1181 for f, kind in osutil.listdir(rcdir)
1182 1182 if f.endswith(".rc")])
1183 1183 except OSError:
1184 1184 pass
1185 1185 return rcs
1186 1186
1187 1187 def system_rcpath():
1188 1188 path = []
1189 1189 # old mod_python does not set sys.argv
1190 1190 if len(getattr(sys, 'argv', [])) > 0:
1191 1191 path.extend(rcfiles(os.path.dirname(sys.argv[0]) +
1192 1192 '/../etc/mercurial'))
1193 1193 path.extend(rcfiles('/etc/mercurial'))
1194 1194 return path
1195 1195
1196 1196 def user_rcpath():
1197 1197 return [os.path.expanduser('~/.hgrc')]
1198 1198
1199 1199 def parse_patch_output(output_line):
1200 1200 """parses the output produced by patch and returns the file name"""
1201 1201 pf = output_line[14:]
1202 1202 if os.sys.platform == 'OpenVMS':
1203 1203 if pf[0] == '`':
1204 1204 pf = pf[1:-1] # Remove the quotes
1205 1205 else:
1206 1206 if pf.startswith("'") and pf.endswith("'") and " " in pf:
1207 1207 pf = pf[1:-1] # Remove the quotes
1208 1208 return pf
1209 1209
1210 1210 def sshargs(sshcmd, host, user, port):
1211 1211 '''Build argument list for ssh'''
1212 1212 args = user and ("%s@%s" % (user, host)) or host
1213 1213 return port and ("%s -p %s" % (args, port)) or args
1214 1214
1215 1215 def is_exec(f):
1216 1216 """check whether a file is executable"""
1217 1217 return (os.lstat(f).st_mode & 0100 != 0)
1218 1218
1219 1219 def set_flags(f, flags):
1220 1220 s = os.lstat(f).st_mode
1221 1221 x = "x" in flags
1222 1222 l = "l" in flags
1223 1223 if l:
1224 1224 if not stat.S_ISLNK(s):
1225 1225 # switch file to link
1226 1226 data = file(f).read()
1227 1227 os.unlink(f)
1228 1228 os.symlink(data, f)
1229 1229 # no chmod needed at this point
1230 1230 return
1231 1231 if stat.S_ISLNK(s):
1232 1232 # switch link to file
1233 1233 data = os.readlink(f)
1234 1234 os.unlink(f)
1235 1235 file(f, "w").write(data)
1236 1236 s = 0666 & ~_umask # avoid restatting for chmod
1237 1237
1238 1238 sx = s & 0100
1239 1239 if x and not sx:
1240 1240 # Turn on +x for every +r bit when making a file executable
1241 1241 # and obey umask.
1242 1242 os.chmod(f, s | (s & 0444) >> 2 & ~_umask)
1243 1243 elif not x and sx:
1244 1244 # Turn off all +x bits
1245 1245 os.chmod(f, s & 0666)
1246 1246
1247 1247 def set_binary(fd):
1248 1248 pass
1249 1249
1250 1250 def pconvert(path):
1251 1251 return path
1252 1252
1253 1253 def localpath(path):
1254 1254 return path
1255 1255
1256 1256 normpath = os.path.normpath
1257 1257 samestat = os.path.samestat
1258 1258
1259 1259 def makelock(info, pathname):
1260 1260 try:
1261 1261 os.symlink(info, pathname)
1262 1262 except OSError, why:
1263 1263 if why.errno == errno.EEXIST:
1264 1264 raise
1265 1265 else:
1266 1266 _makelock_file(info, pathname)
1267 1267
1268 1268 def readlock(pathname):
1269 1269 try:
1270 1270 return os.readlink(pathname)
1271 1271 except OSError, why:
1272 1272 if why.errno in (errno.EINVAL, errno.ENOSYS):
1273 1273 return _readlock_file(pathname)
1274 1274 else:
1275 1275 raise
1276 1276
1277 1277 def shellquote(s):
1278 1278 if os.sys.platform == 'OpenVMS':
1279 1279 return '"%s"' % s
1280 1280 else:
1281 1281 return "'%s'" % s.replace("'", "'\\''")
1282 1282
1283 1283 def quotecommand(cmd):
1284 1284 return cmd
1285 1285
1286 1286 def popen(command, mode='r'):
1287 1287 return os.popen(command, mode)
1288 1288
1289 1289 def testpid(pid):
1290 1290 '''return False if pid dead, True if running or not sure'''
1291 1291 if os.sys.platform == 'OpenVMS':
1292 1292 return True
1293 1293 try:
1294 1294 os.kill(pid, 0)
1295 1295 return True
1296 1296 except OSError, inst:
1297 1297 return inst.errno != errno.ESRCH
1298 1298
1299 1299 def explain_exit(code):
1300 1300 """return a 2-tuple (desc, code) describing a process's status"""
1301 1301 if os.WIFEXITED(code):
1302 1302 val = os.WEXITSTATUS(code)
1303 1303 return _("exited with status %d") % val, val
1304 1304 elif os.WIFSIGNALED(code):
1305 1305 val = os.WTERMSIG(code)
1306 1306 return _("killed by signal %d") % val, val
1307 1307 elif os.WIFSTOPPED(code):
1308 1308 val = os.WSTOPSIG(code)
1309 1309 return _("stopped by signal %d") % val, val
1310 1310 raise ValueError(_("invalid exit code"))
1311 1311
1312 1312 def isowner(fp, st=None):
1313 1313 """Return True if the file object f belongs to the current user.
1314 1314
1315 1315 The return value of a util.fstat(f) may be passed as the st argument.
1316 1316 """
1317 1317 if st is None:
1318 1318 st = fstat(fp)
1319 1319 return st.st_uid == os.getuid()
1320 1320
1321 1321 def find_in_path(name, path, default=None):
1322 1322 '''find name in search path. path can be string (will be split
1323 1323 with os.pathsep), or iterable thing that returns strings. if name
1324 1324 found, return path to name. else return default.'''
1325 1325 if isinstance(path, str):
1326 1326 path = path.split(os.pathsep)
1327 1327 for p in path:
1328 1328 p_name = os.path.join(p, name)
1329 1329 if os.path.exists(p_name):
1330 1330 return p_name
1331 1331 return default
1332 1332
1333 1333 def set_signal_handler():
1334 1334 pass
1335 1335
1336 1336 def find_exe(name, default=None):
1337 1337 '''find path of an executable.
1338 1338 if name contains a path component, return it as is. otherwise,
1339 1339 use normal executable search path.'''
1340 1340
1341 1341 if os.sep in name or sys.platform == 'OpenVMS':
1342 1342 # don't check the executable bit. if the file isn't
1343 1343 # executable, whoever tries to actually run it will give a
1344 1344 # much more useful error message.
1345 1345 return name
1346 1346 return find_in_path(name, os.environ.get('PATH', ''), default=default)
1347 1347
1348 def _buildencodefun():
1349 e = '_'
1350 win_reserved = [ord(x) for x in '\\:*?"<>|']
1351 cmap = dict([ (chr(x), chr(x)) for x in xrange(127) ])
1352 for x in (range(32) + range(126, 256) + win_reserved):
1353 cmap[chr(x)] = "~%02x" % x
1354 for x in range(ord("A"), ord("Z")+1) + [ord(e)]:
1355 cmap[chr(x)] = e + chr(x).lower()
1356 dmap = {}
1357 for k, v in cmap.iteritems():
1358 dmap[v] = k
1359 def decode(s):
1360 i = 0
1361 while i < len(s):
1362 for l in xrange(1, 4):
1363 try:
1364 yield dmap[s[i:i+l]]
1365 i += l
1366 break
1367 except KeyError:
1368 pass
1369 else:
1370 raise KeyError
1371 return (lambda s: "".join([cmap[c] for c in s]),
1372 lambda s: "".join(list(decode(s))))
1373
1374 encodefilename, decodefilename = _buildencodefun()
1375
1376 def encodedopener(openerfn, fn):
1377 def o(path, *args, **kw):
1378 return openerfn(fn(path), *args, **kw)
1379 return o
1380
1381 1348 def mktempcopy(name, emptyok=False, createmode=None):
1382 1349 """Create a temporary file with the same contents from name
1383 1350
1384 1351 The permission bits are copied from the original file.
1385 1352
1386 1353 If the temporary file is going to be truncated immediately, you
1387 1354 can use emptyok=True as an optimization.
1388 1355
1389 1356 Returns the name of the temporary file.
1390 1357 """
1391 1358 d, fn = os.path.split(name)
1392 1359 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1393 1360 os.close(fd)
1394 1361 # Temporary files are created with mode 0600, which is usually not
1395 1362 # what we want. If the original file already exists, just copy
1396 1363 # its mode. Otherwise, manually obey umask.
1397 1364 try:
1398 1365 st_mode = os.lstat(name).st_mode & 0777
1399 1366 except OSError, inst:
1400 1367 if inst.errno != errno.ENOENT:
1401 1368 raise
1402 1369 st_mode = createmode
1403 1370 if st_mode is None:
1404 1371 st_mode = ~_umask
1405 1372 st_mode &= 0666
1406 1373 os.chmod(temp, st_mode)
1407 1374 if emptyok:
1408 1375 return temp
1409 1376 try:
1410 1377 try:
1411 1378 ifp = posixfile(name, "rb")
1412 1379 except IOError, inst:
1413 1380 if inst.errno == errno.ENOENT:
1414 1381 return temp
1415 1382 if not getattr(inst, 'filename', None):
1416 1383 inst.filename = name
1417 1384 raise
1418 1385 ofp = posixfile(temp, "wb")
1419 1386 for chunk in filechunkiter(ifp):
1420 1387 ofp.write(chunk)
1421 1388 ifp.close()
1422 1389 ofp.close()
1423 1390 except:
1424 1391 try: os.unlink(temp)
1425 1392 except: pass
1426 1393 raise
1427 1394 return temp
1428 1395
1429 1396 class atomictempfile(posixfile):
1430 1397 """file-like object that atomically updates a file
1431 1398
1432 1399 All writes will be redirected to a temporary copy of the original
1433 1400 file. When rename is called, the copy is renamed to the original
1434 1401 name, making the changes visible.
1435 1402 """
1436 1403 def __init__(self, name, mode, createmode):
1437 1404 self.__name = name
1438 1405 self.temp = mktempcopy(name, emptyok=('w' in mode),
1439 1406 createmode=createmode)
1440 1407 posixfile.__init__(self, self.temp, mode)
1441 1408
1442 1409 def rename(self):
1443 1410 if not self.closed:
1444 1411 posixfile.close(self)
1445 1412 rename(self.temp, localpath(self.__name))
1446 1413
1447 1414 def __del__(self):
1448 1415 if not self.closed:
1449 1416 try:
1450 1417 os.unlink(self.temp)
1451 1418 except: pass
1452 1419 posixfile.close(self)
1453 1420
1454 1421 def makedirs(name, mode=None):
1455 1422 """recursive directory creation with parent mode inheritance"""
1456 1423 try:
1457 1424 os.mkdir(name)
1458 1425 if mode is not None:
1459 1426 os.chmod(name, mode)
1460 1427 return
1461 1428 except OSError, err:
1462 1429 if err.errno == errno.EEXIST:
1463 1430 return
1464 1431 if err.errno != errno.ENOENT:
1465 1432 raise
1466 1433 parent = os.path.abspath(os.path.dirname(name))
1467 1434 makedirs(parent, mode)
1468 1435 makedirs(name, mode)
1469 1436
1470 1437 class opener(object):
1471 1438 """Open files relative to a base directory
1472 1439
1473 1440 This class is used to hide the details of COW semantics and
1474 1441 remote file access from higher level code.
1475 1442 """
1476 1443 def __init__(self, base, audit=True):
1477 1444 self.base = base
1478 1445 if audit:
1479 1446 self.audit_path = path_auditor(base)
1480 1447 else:
1481 1448 self.audit_path = always
1482 1449 self.createmode = None
1483 1450
1484 1451 def __getattr__(self, name):
1485 1452 if name == '_can_symlink':
1486 1453 self._can_symlink = checklink(self.base)
1487 1454 return self._can_symlink
1488 1455 raise AttributeError(name)
1489 1456
1490 1457 def _fixfilemode(self, name):
1491 1458 if self.createmode is None:
1492 1459 return
1493 1460 os.chmod(name, self.createmode & 0666)
1494 1461
1495 1462 def __call__(self, path, mode="r", text=False, atomictemp=False):
1496 1463 self.audit_path(path)
1497 1464 f = os.path.join(self.base, path)
1498 1465
1499 1466 if not text and "b" not in mode:
1500 1467 mode += "b" # for that other OS
1501 1468
1502 1469 nlink = -1
1503 1470 if mode not in ("r", "rb"):
1504 1471 try:
1505 1472 nlink = nlinks(f)
1506 1473 except OSError:
1507 1474 nlink = 0
1508 1475 d = os.path.dirname(f)
1509 1476 if not os.path.isdir(d):
1510 1477 makedirs(d, self.createmode)
1511 1478 if atomictemp:
1512 1479 return atomictempfile(f, mode, self.createmode)
1513 1480 if nlink > 1:
1514 1481 rename(mktempcopy(f), f)
1515 1482 fp = posixfile(f, mode)
1516 1483 if nlink == 0:
1517 1484 self._fixfilemode(f)
1518 1485 return fp
1519 1486
1520 1487 def symlink(self, src, dst):
1521 1488 self.audit_path(dst)
1522 1489 linkname = os.path.join(self.base, dst)
1523 1490 try:
1524 1491 os.unlink(linkname)
1525 1492 except OSError:
1526 1493 pass
1527 1494
1528 1495 dirname = os.path.dirname(linkname)
1529 1496 if not os.path.exists(dirname):
1530 1497 makedirs(dirname, self.createmode)
1531 1498
1532 1499 if self._can_symlink:
1533 1500 try:
1534 1501 os.symlink(src, linkname)
1535 1502 except OSError, err:
1536 1503 raise OSError(err.errno, _('could not symlink to %r: %s') %
1537 1504 (src, err.strerror), linkname)
1538 1505 else:
1539 1506 f = self(dst, "w")
1540 1507 f.write(src)
1541 1508 f.close()
1542 1509 self._fixfilemode(dst)
1543 1510
1544 1511 class chunkbuffer(object):
1545 1512 """Allow arbitrary sized chunks of data to be efficiently read from an
1546 1513 iterator over chunks of arbitrary size."""
1547 1514
1548 1515 def __init__(self, in_iter):
1549 1516 """in_iter is the iterator that's iterating over the input chunks.
1550 1517 targetsize is how big a buffer to try to maintain."""
1551 1518 self.iter = iter(in_iter)
1552 1519 self.buf = ''
1553 1520 self.targetsize = 2**16
1554 1521
1555 1522 def read(self, l):
1556 1523 """Read L bytes of data from the iterator of chunks of data.
1557 1524 Returns less than L bytes if the iterator runs dry."""
1558 1525 if l > len(self.buf) and self.iter:
1559 1526 # Clamp to a multiple of self.targetsize
1560 1527 targetsize = max(l, self.targetsize)
1561 1528 collector = cStringIO.StringIO()
1562 1529 collector.write(self.buf)
1563 1530 collected = len(self.buf)
1564 1531 for chunk in self.iter:
1565 1532 collector.write(chunk)
1566 1533 collected += len(chunk)
1567 1534 if collected >= targetsize:
1568 1535 break
1569 1536 if collected < targetsize:
1570 1537 self.iter = False
1571 1538 self.buf = collector.getvalue()
1572 1539 if len(self.buf) == l:
1573 1540 s, self.buf = str(self.buf), ''
1574 1541 else:
1575 1542 s, self.buf = self.buf[:l], buffer(self.buf, l)
1576 1543 return s
1577 1544
1578 1545 def filechunkiter(f, size=65536, limit=None):
1579 1546 """Create a generator that produces the data in the file size
1580 1547 (default 65536) bytes at a time, up to optional limit (default is
1581 1548 to read all data). Chunks may be less than size bytes if the
1582 1549 chunk is the last chunk in the file, or the file is a socket or
1583 1550 some other type of file that sometimes reads less data than is
1584 1551 requested."""
1585 1552 assert size >= 0
1586 1553 assert limit is None or limit >= 0
1587 1554 while True:
1588 1555 if limit is None: nbytes = size
1589 1556 else: nbytes = min(limit, size)
1590 1557 s = nbytes and f.read(nbytes)
1591 1558 if not s: break
1592 1559 if limit: limit -= len(s)
1593 1560 yield s
1594 1561
1595 1562 def makedate():
1596 1563 lt = time.localtime()
1597 1564 if lt[8] == 1 and time.daylight:
1598 1565 tz = time.altzone
1599 1566 else:
1600 1567 tz = time.timezone
1601 1568 return time.mktime(lt), tz
1602 1569
1603 1570 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1604 1571 """represent a (unixtime, offset) tuple as a localized time.
1605 1572 unixtime is seconds since the epoch, and offset is the time zone's
1606 1573 number of seconds away from UTC. if timezone is false, do not
1607 1574 append time zone to string."""
1608 1575 t, tz = date or makedate()
1609 1576 if "%1" in format or "%2" in format:
1610 1577 sign = (tz > 0) and "-" or "+"
1611 1578 minutes = abs(tz) / 60
1612 1579 format = format.replace("%1", "%c%02d" % (sign, minutes / 60))
1613 1580 format = format.replace("%2", "%02d" % (minutes % 60))
1614 1581 s = time.strftime(format, time.gmtime(float(t) - tz))
1615 1582 return s
1616 1583
1617 1584 def shortdate(date=None):
1618 1585 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1619 1586 return datestr(date, format='%Y-%m-%d')
1620 1587
1621 1588 def strdate(string, format, defaults=[]):
1622 1589 """parse a localized time string and return a (unixtime, offset) tuple.
1623 1590 if the string cannot be parsed, ValueError is raised."""
1624 1591 def timezone(string):
1625 1592 tz = string.split()[-1]
1626 1593 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1627 1594 sign = (tz[0] == "+") and 1 or -1
1628 1595 hours = int(tz[1:3])
1629 1596 minutes = int(tz[3:5])
1630 1597 return -sign * (hours * 60 + minutes) * 60
1631 1598 if tz == "GMT" or tz == "UTC":
1632 1599 return 0
1633 1600 return None
1634 1601
1635 1602 # NOTE: unixtime = localunixtime + offset
1636 1603 offset, date = timezone(string), string
1637 1604 if offset != None:
1638 1605 date = " ".join(string.split()[:-1])
1639 1606
1640 1607 # add missing elements from defaults
1641 1608 for part in defaults:
1642 1609 found = [True for p in part if ("%"+p) in format]
1643 1610 if not found:
1644 1611 date += "@" + defaults[part]
1645 1612 format += "@%" + part[0]
1646 1613
1647 1614 timetuple = time.strptime(date, format)
1648 1615 localunixtime = int(calendar.timegm(timetuple))
1649 1616 if offset is None:
1650 1617 # local timezone
1651 1618 unixtime = int(time.mktime(timetuple))
1652 1619 offset = unixtime - localunixtime
1653 1620 else:
1654 1621 unixtime = localunixtime + offset
1655 1622 return unixtime, offset
1656 1623
1657 1624 def parsedate(date, formats=None, defaults=None):
1658 1625 """parse a localized date/time string and return a (unixtime, offset) tuple.
1659 1626
1660 1627 The date may be a "unixtime offset" string or in one of the specified
1661 1628 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1662 1629 """
1663 1630 if not date:
1664 1631 return 0, 0
1665 1632 if isinstance(date, tuple) and len(date) == 2:
1666 1633 return date
1667 1634 if not formats:
1668 1635 formats = defaultdateformats
1669 1636 date = date.strip()
1670 1637 try:
1671 1638 when, offset = map(int, date.split(' '))
1672 1639 except ValueError:
1673 1640 # fill out defaults
1674 1641 if not defaults:
1675 1642 defaults = {}
1676 1643 now = makedate()
1677 1644 for part in "d mb yY HI M S".split():
1678 1645 if part not in defaults:
1679 1646 if part[0] in "HMS":
1680 1647 defaults[part] = "00"
1681 1648 else:
1682 1649 defaults[part] = datestr(now, "%" + part[0])
1683 1650
1684 1651 for format in formats:
1685 1652 try:
1686 1653 when, offset = strdate(date, format, defaults)
1687 1654 except (ValueError, OverflowError):
1688 1655 pass
1689 1656 else:
1690 1657 break
1691 1658 else:
1692 1659 raise Abort(_('invalid date: %r ') % date)
1693 1660 # validate explicit (probably user-specified) date and
1694 1661 # time zone offset. values must fit in signed 32 bits for
1695 1662 # current 32-bit linux runtimes. timezones go from UTC-12
1696 1663 # to UTC+14
1697 1664 if abs(when) > 0x7fffffff:
1698 1665 raise Abort(_('date exceeds 32 bits: %d') % when)
1699 1666 if offset < -50400 or offset > 43200:
1700 1667 raise Abort(_('impossible time zone offset: %d') % offset)
1701 1668 return when, offset
1702 1669
1703 1670 def matchdate(date):
1704 1671 """Return a function that matches a given date match specifier
1705 1672
1706 1673 Formats include:
1707 1674
1708 1675 '{date}' match a given date to the accuracy provided
1709 1676
1710 1677 '<{date}' on or before a given date
1711 1678
1712 1679 '>{date}' on or after a given date
1713 1680
1714 1681 """
1715 1682
1716 1683 def lower(date):
1717 1684 d = dict(mb="1", d="1")
1718 1685 return parsedate(date, extendeddateformats, d)[0]
1719 1686
1720 1687 def upper(date):
1721 1688 d = dict(mb="12", HI="23", M="59", S="59")
1722 1689 for days in "31 30 29".split():
1723 1690 try:
1724 1691 d["d"] = days
1725 1692 return parsedate(date, extendeddateformats, d)[0]
1726 1693 except:
1727 1694 pass
1728 1695 d["d"] = "28"
1729 1696 return parsedate(date, extendeddateformats, d)[0]
1730 1697
1731 1698 if date[0] == "<":
1732 1699 when = upper(date[1:])
1733 1700 return lambda x: x <= when
1734 1701 elif date[0] == ">":
1735 1702 when = lower(date[1:])
1736 1703 return lambda x: x >= when
1737 1704 elif date[0] == "-":
1738 1705 try:
1739 1706 days = int(date[1:])
1740 1707 except ValueError:
1741 1708 raise Abort(_("invalid day spec: %s") % date[1:])
1742 1709 when = makedate()[0] - days * 3600 * 24
1743 1710 return lambda x: x >= when
1744 1711 elif " to " in date:
1745 1712 a, b = date.split(" to ")
1746 1713 start, stop = lower(a), upper(b)
1747 1714 return lambda x: x >= start and x <= stop
1748 1715 else:
1749 1716 start, stop = lower(date), upper(date)
1750 1717 return lambda x: x >= start and x <= stop
1751 1718
1752 1719 def shortuser(user):
1753 1720 """Return a short representation of a user name or email address."""
1754 1721 f = user.find('@')
1755 1722 if f >= 0:
1756 1723 user = user[:f]
1757 1724 f = user.find('<')
1758 1725 if f >= 0:
1759 1726 user = user[f+1:]
1760 1727 f = user.find(' ')
1761 1728 if f >= 0:
1762 1729 user = user[:f]
1763 1730 f = user.find('.')
1764 1731 if f >= 0:
1765 1732 user = user[:f]
1766 1733 return user
1767 1734
1768 1735 def email(author):
1769 1736 '''get email of author.'''
1770 1737 r = author.find('>')
1771 1738 if r == -1: r = None
1772 1739 return author[author.find('<')+1:r]
1773 1740
1774 1741 def ellipsis(text, maxlength=400):
1775 1742 """Trim string to at most maxlength (default: 400) characters."""
1776 1743 if len(text) <= maxlength:
1777 1744 return text
1778 1745 else:
1779 1746 return "%s..." % (text[:maxlength-3])
1780 1747
1781 1748 def walkrepos(path, followsym=False, seen_dirs=None):
1782 1749 '''yield every hg repository under path, recursively.'''
1783 1750 def errhandler(err):
1784 1751 if err.filename == path:
1785 1752 raise err
1786 1753 if followsym and hasattr(os.path, 'samestat'):
1787 1754 def _add_dir_if_not_there(dirlst, dirname):
1788 1755 match = False
1789 1756 samestat = os.path.samestat
1790 1757 dirstat = os.stat(dirname)
1791 1758 for lstdirstat in dirlst:
1792 1759 if samestat(dirstat, lstdirstat):
1793 1760 match = True
1794 1761 break
1795 1762 if not match:
1796 1763 dirlst.append(dirstat)
1797 1764 return not match
1798 1765 else:
1799 1766 followsym = False
1800 1767
1801 1768 if (seen_dirs is None) and followsym:
1802 1769 seen_dirs = []
1803 1770 _add_dir_if_not_there(seen_dirs, path)
1804 1771 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
1805 1772 if '.hg' in dirs:
1806 1773 dirs[:] = [] # don't descend further
1807 1774 yield root # found a repository
1808 1775 qroot = os.path.join(root, '.hg', 'patches')
1809 1776 if os.path.isdir(os.path.join(qroot, '.hg')):
1810 1777 yield qroot # we have a patch queue repo here
1811 1778 elif followsym:
1812 1779 newdirs = []
1813 1780 for d in dirs:
1814 1781 fname = os.path.join(root, d)
1815 1782 if _add_dir_if_not_there(seen_dirs, fname):
1816 1783 if os.path.islink(fname):
1817 1784 for hgname in walkrepos(fname, True, seen_dirs):
1818 1785 yield hgname
1819 1786 else:
1820 1787 newdirs.append(d)
1821 1788 dirs[:] = newdirs
1822 1789
1823 1790 _rcpath = None
1824 1791
1825 1792 def os_rcpath():
1826 1793 '''return default os-specific hgrc search path'''
1827 1794 path = system_rcpath()
1828 1795 path.extend(user_rcpath())
1829 1796 path = [os.path.normpath(f) for f in path]
1830 1797 return path
1831 1798
1832 1799 def rcpath():
1833 1800 '''return hgrc search path. if env var HGRCPATH is set, use it.
1834 1801 for each item in path, if directory, use files ending in .rc,
1835 1802 else use item.
1836 1803 make HGRCPATH empty to only look in .hg/hgrc of current repo.
1837 1804 if no HGRCPATH, use default os-specific path.'''
1838 1805 global _rcpath
1839 1806 if _rcpath is None:
1840 1807 if 'HGRCPATH' in os.environ:
1841 1808 _rcpath = []
1842 1809 for p in os.environ['HGRCPATH'].split(os.pathsep):
1843 1810 if not p: continue
1844 1811 if os.path.isdir(p):
1845 1812 for f, kind in osutil.listdir(p):
1846 1813 if f.endswith('.rc'):
1847 1814 _rcpath.append(os.path.join(p, f))
1848 1815 else:
1849 1816 _rcpath.append(p)
1850 1817 else:
1851 1818 _rcpath = os_rcpath()
1852 1819 return _rcpath
1853 1820
1854 1821 def bytecount(nbytes):
1855 1822 '''return byte count formatted as readable string, with units'''
1856 1823
1857 1824 units = (
1858 1825 (100, 1<<30, _('%.0f GB')),
1859 1826 (10, 1<<30, _('%.1f GB')),
1860 1827 (1, 1<<30, _('%.2f GB')),
1861 1828 (100, 1<<20, _('%.0f MB')),
1862 1829 (10, 1<<20, _('%.1f MB')),
1863 1830 (1, 1<<20, _('%.2f MB')),
1864 1831 (100, 1<<10, _('%.0f KB')),
1865 1832 (10, 1<<10, _('%.1f KB')),
1866 1833 (1, 1<<10, _('%.2f KB')),
1867 1834 (1, 1, _('%.0f bytes')),
1868 1835 )
1869 1836
1870 1837 for multiplier, divisor, format in units:
1871 1838 if nbytes >= divisor * multiplier:
1872 1839 return format % (nbytes / float(divisor))
1873 1840 return units[-1][2] % nbytes
1874 1841
1875 1842 def drop_scheme(scheme, path):
1876 1843 sc = scheme + ':'
1877 1844 if path.startswith(sc):
1878 1845 path = path[len(sc):]
1879 1846 if path.startswith('//'):
1880 1847 path = path[2:]
1881 1848 return path
1882 1849
1883 1850 def uirepr(s):
1884 1851 # Avoid double backslash in Windows path repr()
1885 1852 return repr(s).replace('\\\\', '\\')
1886 1853
1887 1854 def hidepassword(url):
1888 1855 '''hide user credential in a url string'''
1889 1856 scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)
1890 1857 netloc = re.sub('([^:]*):([^@]*)@(.*)', r'\1:***@\3', netloc)
1891 1858 return urlparse.urlunparse((scheme, netloc, path, params, query, fragment))
1892 1859
1893 1860 def removeauth(url):
1894 1861 '''remove all authentication information from a url string'''
1895 1862 scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)
1896 1863 netloc = netloc[netloc.find('@')+1:]
1897 1864 return urlparse.urlunparse((scheme, netloc, path, params, query, fragment))
General Comments 0
You need to be logged in to leave comments. Login now