##// END OF EJS Templates
Merge with crew-stable
Martin Geisler -
r9482:ca3390c1 merge default
parent child Browse files
Show More
@@ -1,2172 +1,2171 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2, incorporated herein by reference.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup, subrepo
11 11 import changelog, dirstate, filelog, manifest, context
12 12 import lock, transaction, store, encoding
13 13 import util, extensions, hook, error
14 14 import match as match_
15 15 import merge as merge_
16 16 import tags as tags_
17 17 from lock import release
18 18 import weakref, stat, errno, os, time, inspect
19 19 propertycache = util.propertycache
20 20
21 21 class localrepository(repo.repository):
22 22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
23 23 supported = set('revlogv1 store fncache shared'.split())
24 24
25 25 def __init__(self, baseui, path=None, create=0):
26 26 repo.repository.__init__(self)
27 27 self.root = os.path.realpath(path)
28 28 self.path = os.path.join(self.root, ".hg")
29 29 self.origroot = path
30 30 self.opener = util.opener(self.path)
31 31 self.wopener = util.opener(self.root)
32 32 self.baseui = baseui
33 33 self.ui = baseui.copy()
34 34
35 35 try:
36 36 self.ui.readconfig(self.join("hgrc"), self.root)
37 37 extensions.loadall(self.ui)
38 38 except IOError:
39 39 pass
40 40
41 41 if not os.path.isdir(self.path):
42 42 if create:
43 43 if not os.path.exists(path):
44 44 os.mkdir(path)
45 45 os.mkdir(self.path)
46 46 requirements = ["revlogv1"]
47 47 if self.ui.configbool('format', 'usestore', True):
48 48 os.mkdir(os.path.join(self.path, "store"))
49 49 requirements.append("store")
50 50 if self.ui.configbool('format', 'usefncache', True):
51 51 requirements.append("fncache")
52 52 # create an invalid changelog
53 53 self.opener("00changelog.i", "a").write(
54 54 '\0\0\0\2' # represents revlogv2
55 55 ' dummy changelog to prevent using the old repo layout'
56 56 )
57 57 reqfile = self.opener("requires", "w")
58 58 for r in requirements:
59 59 reqfile.write("%s\n" % r)
60 60 reqfile.close()
61 61 else:
62 62 raise error.RepoError(_("repository %s not found") % path)
63 63 elif create:
64 64 raise error.RepoError(_("repository %s already exists") % path)
65 65 else:
66 66 # find requirements
67 67 requirements = set()
68 68 try:
69 69 requirements = set(self.opener("requires").read().splitlines())
70 70 except IOError, inst:
71 71 if inst.errno != errno.ENOENT:
72 72 raise
73 73 for r in requirements - self.supported:
74 74 raise error.RepoError(_("requirement '%s' not supported") % r)
75 75
76 76 self.sharedpath = self.path
77 77 try:
78 78 s = os.path.realpath(self.opener("sharedpath").read())
79 79 if not os.path.exists(s):
80 80 raise error.RepoError(
81 81 _('.hg/sharedpath points to nonexistent directory %s') % s)
82 82 self.sharedpath = s
83 83 except IOError, inst:
84 84 if inst.errno != errno.ENOENT:
85 85 raise
86 86
87 87 self.store = store.store(requirements, self.sharedpath, util.opener)
88 88 self.spath = self.store.path
89 89 self.sopener = self.store.opener
90 90 self.sjoin = self.store.join
91 91 self.opener.createmode = self.store.createmode
92 92
93 93 # These two define the set of tags for this repository. _tags
94 94 # maps tag name to node; _tagtypes maps tag name to 'global' or
95 95 # 'local'. (Global tags are defined by .hgtags across all
96 96 # heads, and local tags are defined in .hg/localtags.) They
97 97 # constitute the in-memory cache of tags.
98 98 self._tags = None
99 99 self._tagtypes = None
100 100
101 101 self.branchcache = None
102 102 self._ubranchcache = None # UTF-8 version of branchcache
103 103 self._branchcachetip = None
104 104 self.nodetagscache = None
105 105 self.filterpats = {}
106 106 self._datafilters = {}
107 107 self._transref = self._lockref = self._wlockref = None
108 108
109 109 @propertycache
110 110 def changelog(self):
111 111 c = changelog.changelog(self.sopener)
112 112 if 'HG_PENDING' in os.environ:
113 113 p = os.environ['HG_PENDING']
114 114 if p.startswith(self.root):
115 115 c.readpending('00changelog.i.a')
116 116 self.sopener.defversion = c.version
117 117 return c
118 118
119 119 @propertycache
120 120 def manifest(self):
121 121 return manifest.manifest(self.sopener)
122 122
123 123 @propertycache
124 124 def dirstate(self):
125 125 return dirstate.dirstate(self.opener, self.ui, self.root)
126 126
127 127 def __getitem__(self, changeid):
128 128 if changeid is None:
129 129 return context.workingctx(self)
130 130 return context.changectx(self, changeid)
131 131
132 132 def __nonzero__(self):
133 133 return True
134 134
135 135 def __len__(self):
136 136 return len(self.changelog)
137 137
138 138 def __iter__(self):
139 139 for i in xrange(len(self)):
140 140 yield i
141 141
142 142 def url(self):
143 143 return 'file:' + self.root
144 144
145 145 def hook(self, name, throw=False, **args):
146 146 return hook.hook(self.ui, self, name, throw, **args)
147 147
148 148 tag_disallowed = ':\r\n'
149 149
150 150 def _tag(self, names, node, message, local, user, date, extra={}):
151 151 if isinstance(names, str):
152 152 allchars = names
153 153 names = (names,)
154 154 else:
155 155 allchars = ''.join(names)
156 156 for c in self.tag_disallowed:
157 157 if c in allchars:
158 158 raise util.Abort(_('%r cannot be used in a tag name') % c)
159 159
160 160 for name in names:
161 161 self.hook('pretag', throw=True, node=hex(node), tag=name,
162 162 local=local)
163 163
164 164 def writetags(fp, names, munge, prevtags):
165 165 fp.seek(0, 2)
166 166 if prevtags and prevtags[-1] != '\n':
167 167 fp.write('\n')
168 168 for name in names:
169 169 m = munge and munge(name) or name
170 170 if self._tagtypes and name in self._tagtypes:
171 171 old = self._tags.get(name, nullid)
172 172 fp.write('%s %s\n' % (hex(old), m))
173 173 fp.write('%s %s\n' % (hex(node), m))
174 174 fp.close()
175 175
176 176 prevtags = ''
177 177 if local:
178 178 try:
179 179 fp = self.opener('localtags', 'r+')
180 180 except IOError:
181 181 fp = self.opener('localtags', 'a')
182 182 else:
183 183 prevtags = fp.read()
184 184
185 185 # local tags are stored in the current charset
186 186 writetags(fp, names, None, prevtags)
187 187 for name in names:
188 188 self.hook('tag', node=hex(node), tag=name, local=local)
189 189 return
190 190
191 191 try:
192 192 fp = self.wfile('.hgtags', 'rb+')
193 193 except IOError:
194 194 fp = self.wfile('.hgtags', 'ab')
195 195 else:
196 196 prevtags = fp.read()
197 197
198 198 # committed tags are stored in UTF-8
199 199 writetags(fp, names, encoding.fromlocal, prevtags)
200 200
201 201 if '.hgtags' not in self.dirstate:
202 202 self.add(['.hgtags'])
203 203
204 204 m = match_.exact(self.root, '', ['.hgtags'])
205 205 tagnode = self.commit(message, user, date, extra=extra, match=m)
206 206
207 207 for name in names:
208 208 self.hook('tag', node=hex(node), tag=name, local=local)
209 209
210 210 return tagnode
211 211
212 212 def tag(self, names, node, message, local, user, date):
213 213 '''tag a revision with one or more symbolic names.
214 214
215 215 names is a list of strings or, when adding a single tag, names may be a
216 216 string.
217 217
218 218 if local is True, the tags are stored in a per-repository file.
219 219 otherwise, they are stored in the .hgtags file, and a new
220 220 changeset is committed with the change.
221 221
222 222 keyword arguments:
223 223
224 224 local: whether to store tags in non-version-controlled file
225 225 (default False)
226 226
227 227 message: commit message to use if committing
228 228
229 229 user: name of user to use if committing
230 230
231 231 date: date tuple to use if committing'''
232 232
233 233 for x in self.status()[:5]:
234 234 if '.hgtags' in x:
235 235 raise util.Abort(_('working copy of .hgtags is changed '
236 236 '(please commit .hgtags manually)'))
237 237
238 238 self.tags() # instantiate the cache
239 239 self._tag(names, node, message, local, user, date)
240 240
241 241 def tags(self):
242 242 '''return a mapping of tag to node'''
243 243 if self._tags is None:
244 244 (self._tags, self._tagtypes) = self._findtags()
245 245
246 246 return self._tags
247 247
248 248 def _findtags(self):
249 249 '''Do the hard work of finding tags. Return a pair of dicts
250 250 (tags, tagtypes) where tags maps tag name to node, and tagtypes
251 251 maps tag name to a string like \'global\' or \'local\'.
252 252 Subclasses or extensions are free to add their own tags, but
253 253 should be aware that the returned dicts will be retained for the
254 254 duration of the localrepo object.'''
255 255
256 256 # XXX what tagtype should subclasses/extensions use? Currently
257 257 # mq and bookmarks add tags, but do not set the tagtype at all.
258 258 # Should each extension invent its own tag type? Should there
259 259 # be one tagtype for all such "virtual" tags? Or is the status
260 260 # quo fine?
261 261
262 262 alltags = {} # map tag name to (node, hist)
263 263 tagtypes = {}
264 264
265 265 tags_.findglobaltags(self.ui, self, alltags, tagtypes)
266 266 tags_.readlocaltags(self.ui, self, alltags, tagtypes)
267 267
268 268 # Build the return dicts. Have to re-encode tag names because
269 269 # the tags module always uses UTF-8 (in order not to lose info
270 270 # writing to the cache), but the rest of Mercurial wants them in
271 271 # local encoding.
272 272 tags = {}
273 273 for (name, (node, hist)) in alltags.iteritems():
274 274 if node != nullid:
275 275 tags[encoding.tolocal(name)] = node
276 276 tags['tip'] = self.changelog.tip()
277 277 tagtypes = dict([(encoding.tolocal(name), value)
278 278 for (name, value) in tagtypes.iteritems()])
279 279 return (tags, tagtypes)
280 280
281 281 def tagtype(self, tagname):
282 282 '''
283 283 return the type of the given tag. result can be:
284 284
285 285 'local' : a local tag
286 286 'global' : a global tag
287 287 None : tag does not exist
288 288 '''
289 289
290 290 self.tags()
291 291
292 292 return self._tagtypes.get(tagname)
293 293
294 294 def tagslist(self):
295 295 '''return a list of tags ordered by revision'''
296 296 l = []
297 297 for t, n in self.tags().iteritems():
298 298 try:
299 299 r = self.changelog.rev(n)
300 300 except:
301 301 r = -2 # sort to the beginning of the list if unknown
302 302 l.append((r, t, n))
303 303 return [(t, n) for r, t, n in sorted(l)]
304 304
305 305 def nodetags(self, node):
306 306 '''return the tags associated with a node'''
307 307 if not self.nodetagscache:
308 308 self.nodetagscache = {}
309 309 for t, n in self.tags().iteritems():
310 310 self.nodetagscache.setdefault(n, []).append(t)
311 311 return self.nodetagscache.get(node, [])
312 312
313 313 def _branchtags(self, partial, lrev):
314 314 # TODO: rename this function?
315 315 tiprev = len(self) - 1
316 316 if lrev != tiprev:
317 317 self._updatebranchcache(partial, lrev+1, tiprev+1)
318 318 self._writebranchcache(partial, self.changelog.tip(), tiprev)
319 319
320 320 return partial
321 321
322 322 def branchmap(self):
323 323 tip = self.changelog.tip()
324 324 if self.branchcache is not None and self._branchcachetip == tip:
325 325 return self.branchcache
326 326
327 327 oldtip = self._branchcachetip
328 328 self._branchcachetip = tip
329 329 if self.branchcache is None:
330 330 self.branchcache = {} # avoid recursion in changectx
331 331 else:
332 332 self.branchcache.clear() # keep using the same dict
333 333 if oldtip is None or oldtip not in self.changelog.nodemap:
334 334 partial, last, lrev = self._readbranchcache()
335 335 else:
336 336 lrev = self.changelog.rev(oldtip)
337 337 partial = self._ubranchcache
338 338
339 339 self._branchtags(partial, lrev)
340 340 # this private cache holds all heads (not just tips)
341 341 self._ubranchcache = partial
342 342
343 343 # the branch cache is stored on disk as UTF-8, but in the local
344 344 # charset internally
345 345 for k, v in partial.iteritems():
346 346 self.branchcache[encoding.tolocal(k)] = v
347 347 return self.branchcache
348 348
349 349
350 350 def branchtags(self):
351 351 '''return a dict where branch names map to the tipmost head of
352 352 the branch, open heads come before closed'''
353 353 bt = {}
354 354 for bn, heads in self.branchmap().iteritems():
355 355 head = None
356 356 for i in range(len(heads)-1, -1, -1):
357 357 h = heads[i]
358 358 if 'close' not in self.changelog.read(h)[5]:
359 359 head = h
360 360 break
361 361 # no open heads were found
362 362 if head is None:
363 363 head = heads[-1]
364 364 bt[bn] = head
365 365 return bt
366 366
367 367
368 368 def _readbranchcache(self):
369 369 partial = {}
370 370 try:
371 371 f = self.opener("branchheads.cache")
372 372 lines = f.read().split('\n')
373 373 f.close()
374 374 except (IOError, OSError):
375 375 return {}, nullid, nullrev
376 376
377 377 try:
378 378 last, lrev = lines.pop(0).split(" ", 1)
379 379 last, lrev = bin(last), int(lrev)
380 380 if lrev >= len(self) or self[lrev].node() != last:
381 381 # invalidate the cache
382 382 raise ValueError('invalidating branch cache (tip differs)')
383 383 for l in lines:
384 384 if not l: continue
385 385 node, label = l.split(" ", 1)
386 386 partial.setdefault(label.strip(), []).append(bin(node))
387 387 except KeyboardInterrupt:
388 388 raise
389 389 except Exception, inst:
390 390 if self.ui.debugflag:
391 391 self.ui.warn(str(inst), '\n')
392 392 partial, last, lrev = {}, nullid, nullrev
393 393 return partial, last, lrev
394 394
395 395 def _writebranchcache(self, branches, tip, tiprev):
396 396 try:
397 397 f = self.opener("branchheads.cache", "w", atomictemp=True)
398 398 f.write("%s %s\n" % (hex(tip), tiprev))
399 399 for label, nodes in branches.iteritems():
400 400 for node in nodes:
401 401 f.write("%s %s\n" % (hex(node), label))
402 402 f.rename()
403 403 except (IOError, OSError):
404 404 pass
405 405
406 406 def _updatebranchcache(self, partial, start, end):
407 407 # collect new branch entries
408 408 newbranches = {}
409 409 for r in xrange(start, end):
410 410 c = self[r]
411 411 newbranches.setdefault(c.branch(), []).append(c.node())
412 412 # if older branchheads are reachable from new ones, they aren't
413 413 # really branchheads. Note checking parents is insufficient:
414 414 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
415 415 for branch, newnodes in newbranches.iteritems():
416 416 bheads = partial.setdefault(branch, [])
417 417 bheads.extend(newnodes)
418 418 if len(bheads) < 2:
419 419 continue
420 420 newbheads = []
421 421 # starting from tip means fewer passes over reachable
422 422 while newnodes:
423 423 latest = newnodes.pop()
424 424 if latest not in bheads:
425 425 continue
426 426 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
427 427 reachable = self.changelog.reachable(latest, minbhrev)
428 428 bheads = [b for b in bheads if b not in reachable]
429 429 newbheads.insert(0, latest)
430 430 bheads.extend(newbheads)
431 431 partial[branch] = bheads
432 432
433 433 def lookup(self, key):
434 434 if isinstance(key, int):
435 435 return self.changelog.node(key)
436 436 elif key == '.':
437 437 return self.dirstate.parents()[0]
438 438 elif key == 'null':
439 439 return nullid
440 440 elif key == 'tip':
441 441 return self.changelog.tip()
442 442 n = self.changelog._match(key)
443 443 if n:
444 444 return n
445 445 if key in self.tags():
446 446 return self.tags()[key]
447 447 if key in self.branchtags():
448 448 return self.branchtags()[key]
449 449 n = self.changelog._partialmatch(key)
450 450 if n:
451 451 return n
452 452
453 453 # can't find key, check if it might have come from damaged dirstate
454 454 if key in self.dirstate.parents():
455 455 raise error.Abort(_("working directory has unknown parent '%s'!")
456 456 % short(key))
457 457 try:
458 458 if len(key) == 20:
459 459 key = hex(key)
460 460 except:
461 461 pass
462 462 raise error.RepoLookupError(_("unknown revision '%s'") % key)
463 463
464 464 def local(self):
465 465 return True
466 466
467 467 def join(self, f):
468 468 return os.path.join(self.path, f)
469 469
470 470 def wjoin(self, f):
471 471 return os.path.join(self.root, f)
472 472
473 473 def rjoin(self, f):
474 474 return os.path.join(self.root, util.pconvert(f))
475 475
476 476 def file(self, f):
477 477 if f[0] == '/':
478 478 f = f[1:]
479 479 return filelog.filelog(self.sopener, f)
480 480
481 481 def changectx(self, changeid):
482 482 return self[changeid]
483 483
484 484 def parents(self, changeid=None):
485 485 '''get list of changectxs for parents of changeid'''
486 486 return self[changeid].parents()
487 487
488 488 def filectx(self, path, changeid=None, fileid=None):
489 489 """changeid can be a changeset revision, node, or tag.
490 490 fileid can be a file revision or node."""
491 491 return context.filectx(self, path, changeid, fileid)
492 492
493 493 def getcwd(self):
494 494 return self.dirstate.getcwd()
495 495
496 496 def pathto(self, f, cwd=None):
497 497 return self.dirstate.pathto(f, cwd)
498 498
499 499 def wfile(self, f, mode='r'):
500 500 return self.wopener(f, mode)
501 501
502 502 def _link(self, f):
503 503 return os.path.islink(self.wjoin(f))
504 504
505 505 def _filter(self, filter, filename, data):
506 506 if filter not in self.filterpats:
507 507 l = []
508 508 for pat, cmd in self.ui.configitems(filter):
509 509 if cmd == '!':
510 510 continue
511 511 mf = match_.match(self.root, '', [pat])
512 512 fn = None
513 513 params = cmd
514 514 for name, filterfn in self._datafilters.iteritems():
515 515 if cmd.startswith(name):
516 516 fn = filterfn
517 517 params = cmd[len(name):].lstrip()
518 518 break
519 519 if not fn:
520 520 fn = lambda s, c, **kwargs: util.filter(s, c)
521 521 # Wrap old filters not supporting keyword arguments
522 522 if not inspect.getargspec(fn)[2]:
523 523 oldfn = fn
524 524 fn = lambda s, c, **kwargs: oldfn(s, c)
525 525 l.append((mf, fn, params))
526 526 self.filterpats[filter] = l
527 527
528 528 for mf, fn, cmd in self.filterpats[filter]:
529 529 if mf(filename):
530 530 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
531 531 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
532 532 break
533 533
534 534 return data
535 535
536 536 def adddatafilter(self, name, filter):
537 537 self._datafilters[name] = filter
538 538
539 539 def wread(self, filename):
540 540 if self._link(filename):
541 541 data = os.readlink(self.wjoin(filename))
542 542 else:
543 543 data = self.wopener(filename, 'r').read()
544 544 return self._filter("encode", filename, data)
545 545
546 546 def wwrite(self, filename, data, flags):
547 547 data = self._filter("decode", filename, data)
548 548 try:
549 549 os.unlink(self.wjoin(filename))
550 550 except OSError:
551 551 pass
552 552 if 'l' in flags:
553 553 self.wopener.symlink(data, filename)
554 554 else:
555 555 self.wopener(filename, 'w').write(data)
556 556 if 'x' in flags:
557 557 util.set_flags(self.wjoin(filename), False, True)
558 558
559 559 def wwritedata(self, filename, data):
560 560 return self._filter("decode", filename, data)
561 561
562 562 def transaction(self):
563 563 tr = self._transref and self._transref() or None
564 564 if tr and tr.running():
565 565 return tr.nest()
566 566
567 567 # abort here if the journal already exists
568 568 if os.path.exists(self.sjoin("journal")):
569 569 raise error.RepoError(_("journal already exists - run hg recover"))
570 570
571 571 # save dirstate for rollback
572 572 try:
573 573 ds = self.opener("dirstate").read()
574 574 except IOError:
575 575 ds = ""
576 576 self.opener("journal.dirstate", "w").write(ds)
577 577 self.opener("journal.branch", "w").write(self.dirstate.branch())
578 578
579 579 renames = [(self.sjoin("journal"), self.sjoin("undo")),
580 580 (self.join("journal.dirstate"), self.join("undo.dirstate")),
581 581 (self.join("journal.branch"), self.join("undo.branch"))]
582 582 tr = transaction.transaction(self.ui.warn, self.sopener,
583 583 self.sjoin("journal"),
584 584 aftertrans(renames),
585 585 self.store.createmode)
586 586 self._transref = weakref.ref(tr)
587 587 return tr
588 588
589 589 def recover(self):
590 590 lock = self.lock()
591 591 try:
592 592 if os.path.exists(self.sjoin("journal")):
593 593 self.ui.status(_("rolling back interrupted transaction\n"))
594 594 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
595 595 self.invalidate()
596 596 return True
597 597 else:
598 598 self.ui.warn(_("no interrupted transaction available\n"))
599 599 return False
600 600 finally:
601 601 lock.release()
602 602
603 603 def rollback(self):
604 604 wlock = lock = None
605 605 try:
606 606 wlock = self.wlock()
607 607 lock = self.lock()
608 608 if os.path.exists(self.sjoin("undo")):
609 609 self.ui.status(_("rolling back last transaction\n"))
610 610 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
611 611 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
612 612 try:
613 613 branch = self.opener("undo.branch").read()
614 614 self.dirstate.setbranch(branch)
615 615 except IOError:
616 616 self.ui.warn(_("Named branch could not be reset, "
617 617 "current branch still is: %s\n")
618 618 % encoding.tolocal(self.dirstate.branch()))
619 619 self.invalidate()
620 620 self.dirstate.invalidate()
621 621 self.destroyed()
622 622 else:
623 623 self.ui.warn(_("no rollback information available\n"))
624 624 finally:
625 625 release(lock, wlock)
626 626
627 627 def invalidate(self):
628 628 for a in "changelog manifest".split():
629 629 if a in self.__dict__:
630 630 delattr(self, a)
631 631 self._tags = None
632 632 self._tagtypes = None
633 633 self.nodetagscache = None
634 634 self.branchcache = None
635 635 self._ubranchcache = None
636 636 self._branchcachetip = None
637 637
638 638 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
639 639 try:
640 640 l = lock.lock(lockname, 0, releasefn, desc=desc)
641 641 except error.LockHeld, inst:
642 642 if not wait:
643 643 raise
644 644 self.ui.warn(_("waiting for lock on %s held by %r\n") %
645 645 (desc, inst.locker))
646 646 # default to 600 seconds timeout
647 647 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
648 648 releasefn, desc=desc)
649 649 if acquirefn:
650 650 acquirefn()
651 651 return l
652 652
653 653 def lock(self, wait=True):
654 654 '''Lock the repository store (.hg/store) and return a weak reference
655 655 to the lock. Use this before modifying the store (e.g. committing or
656 656 stripping). If you are opening a transaction, get a lock as well.)'''
657 657 l = self._lockref and self._lockref()
658 658 if l is not None and l.held:
659 659 l.lock()
660 660 return l
661 661
662 662 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
663 663 _('repository %s') % self.origroot)
664 664 self._lockref = weakref.ref(l)
665 665 return l
666 666
667 667 def wlock(self, wait=True):
668 668 '''Lock the non-store parts of the repository (everything under
669 669 .hg except .hg/store) and return a weak reference to the lock.
670 670 Use this before modifying files in .hg.'''
671 671 l = self._wlockref and self._wlockref()
672 672 if l is not None and l.held:
673 673 l.lock()
674 674 return l
675 675
676 676 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
677 677 self.dirstate.invalidate, _('working directory of %s') %
678 678 self.origroot)
679 679 self._wlockref = weakref.ref(l)
680 680 return l
681 681
682 682 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
683 683 """
684 684 commit an individual file as part of a larger transaction
685 685 """
686 686
687 687 fname = fctx.path()
688 688 text = fctx.data()
689 689 flog = self.file(fname)
690 690 fparent1 = manifest1.get(fname, nullid)
691 691 fparent2 = fparent2o = manifest2.get(fname, nullid)
692 692
693 693 meta = {}
694 694 copy = fctx.renamed()
695 695 if copy and copy[0] != fname:
696 696 # Mark the new revision of this file as a copy of another
697 697 # file. This copy data will effectively act as a parent
698 698 # of this new revision. If this is a merge, the first
699 699 # parent will be the nullid (meaning "look up the copy data")
700 700 # and the second one will be the other parent. For example:
701 701 #
702 702 # 0 --- 1 --- 3 rev1 changes file foo
703 703 # \ / rev2 renames foo to bar and changes it
704 704 # \- 2 -/ rev3 should have bar with all changes and
705 705 # should record that bar descends from
706 706 # bar in rev2 and foo in rev1
707 707 #
708 708 # this allows this merge to succeed:
709 709 #
710 710 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
711 711 # \ / merging rev3 and rev4 should use bar@rev2
712 712 # \- 2 --- 4 as the merge base
713 713 #
714 714
715 715 cfname = copy[0]
716 716 crev = manifest1.get(cfname)
717 717 newfparent = fparent2
718 718
719 719 if manifest2: # branch merge
720 720 if fparent2 == nullid or crev is None: # copied on remote side
721 721 if cfname in manifest2:
722 722 crev = manifest2[cfname]
723 723 newfparent = fparent1
724 724
725 725 # find source in nearest ancestor if we've lost track
726 726 if not crev:
727 727 self.ui.debug(" %s: searching for copy revision for %s\n" %
728 728 (fname, cfname))
729 729 for ancestor in self['.'].ancestors():
730 730 if cfname in ancestor:
731 731 crev = ancestor[cfname].filenode()
732 732 break
733 733
734 734 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
735 735 meta["copy"] = cfname
736 736 meta["copyrev"] = hex(crev)
737 737 fparent1, fparent2 = nullid, newfparent
738 738 elif fparent2 != nullid:
739 739 # is one parent an ancestor of the other?
740 740 fparentancestor = flog.ancestor(fparent1, fparent2)
741 741 if fparentancestor == fparent1:
742 742 fparent1, fparent2 = fparent2, nullid
743 743 elif fparentancestor == fparent2:
744 744 fparent2 = nullid
745 745
746 746 # is the file changed?
747 747 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
748 748 changelist.append(fname)
749 749 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
750 750
751 751 # are just the flags changed during merge?
752 752 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
753 753 changelist.append(fname)
754 754
755 755 return fparent1
756 756
757 757 def commit(self, text="", user=None, date=None, match=None, force=False,
758 758 editor=False, extra={}):
759 759 """Add a new revision to current repository.
760 760
761 761 Revision information is gathered from the working directory,
762 762 match can be used to filter the committed files. If editor is
763 763 supplied, it is called to get a commit message.
764 764 """
765 765
766 766 def fail(f, msg):
767 767 raise util.Abort('%s: %s' % (f, msg))
768 768
769 769 if not match:
770 770 match = match_.always(self.root, '')
771 771
772 772 if not force:
773 773 vdirs = []
774 774 match.dir = vdirs.append
775 775 match.bad = fail
776 776
777 777 wlock = self.wlock()
778 778 try:
779 779 p1, p2 = self.dirstate.parents()
780 780 wctx = self[None]
781 781
782 782 if (not force and p2 != nullid and match and
783 783 (match.files() or match.anypats())):
784 784 raise util.Abort(_('cannot partially commit a merge '
785 785 '(do not specify files or patterns)'))
786 786
787 787 changes = self.status(match=match, clean=force)
788 788 if force:
789 789 changes[0].extend(changes[6]) # mq may commit unchanged files
790 790
791 791 # check subrepos
792 792 subs = []
793 793 for s in wctx.substate:
794 794 if match(s) and wctx.sub(s).dirty():
795 795 subs.append(s)
796 796 if subs and '.hgsubstate' not in changes[0]:
797 797 changes[0].insert(0, '.hgsubstate')
798 798
799 799 # make sure all explicit patterns are matched
800 800 if not force and match.files():
801 801 matched = set(changes[0] + changes[1] + changes[2])
802 802
803 803 for f in match.files():
804 804 if f == '.' or f in matched or f in wctx.substate:
805 805 continue
806 806 if f in changes[3]: # missing
807 807 fail(f, _('file not found!'))
808 808 if f in vdirs: # visited directory
809 809 d = f + '/'
810 810 for mf in matched:
811 811 if mf.startswith(d):
812 812 break
813 813 else:
814 814 fail(f, _("no match under directory!"))
815 815 elif f not in self.dirstate:
816 816 fail(f, _("file not tracked!"))
817 817
818 818 if (not force and not extra.get("close") and p2 == nullid
819 819 and not (changes[0] or changes[1] or changes[2])
820 820 and self[None].branch() == self['.'].branch()):
821 821 return None
822 822
823 823 ms = merge_.mergestate(self)
824 824 for f in changes[0]:
825 825 if f in ms and ms[f] == 'u':
826 826 raise util.Abort(_("unresolved merge conflicts "
827 827 "(see hg resolve)"))
828 828
829 829 cctx = context.workingctx(self, (p1, p2), text, user, date,
830 830 extra, changes)
831 831 if editor:
832 832 cctx._text = editor(self, cctx, subs)
833 833
834 834 # commit subs
835 835 if subs:
836 836 state = wctx.substate.copy()
837 837 for s in subs:
838 838 self.ui.status(_('committing subrepository %s\n') % s)
839 839 sr = wctx.sub(s).commit(cctx._text, user, date)
840 840 state[s] = (state[s][0], sr)
841 841 subrepo.writestate(self, state)
842 842
843 843 ret = self.commitctx(cctx, True)
844 844
845 845 # update dirstate and mergestate
846 846 for f in changes[0] + changes[1]:
847 847 self.dirstate.normal(f)
848 848 for f in changes[2]:
849 849 self.dirstate.forget(f)
850 850 self.dirstate.setparents(ret)
851 851 ms.reset()
852 852
853 853 return ret
854 854
855 855 finally:
856 856 wlock.release()
857 857
858 858 def commitctx(self, ctx, error=False):
859 859 """Add a new revision to current repository.
860 860
861 861 Revision information is passed via the context argument.
862 862 """
863 863
864 864 tr = lock = None
865 865 removed = ctx.removed()
866 866 p1, p2 = ctx.p1(), ctx.p2()
867 867 m1 = p1.manifest().copy()
868 868 m2 = p2.manifest()
869 869 user = ctx.user()
870 870
871 871 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
872 872 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
873 873
874 874 lock = self.lock()
875 875 try:
876 876 tr = self.transaction()
877 877 trp = weakref.proxy(tr)
878 878
879 879 # check in files
880 880 new = {}
881 881 changed = []
882 882 linkrev = len(self)
883 883 for f in sorted(ctx.modified() + ctx.added()):
884 884 self.ui.note(f + "\n")
885 885 try:
886 886 fctx = ctx[f]
887 887 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
888 888 changed)
889 889 m1.set(f, fctx.flags())
890 890 except (OSError, IOError):
891 891 if error:
892 892 self.ui.warn(_("trouble committing %s!\n") % f)
893 893 raise
894 894 else:
895 895 removed.append(f)
896 896
897 897 # update manifest
898 898 m1.update(new)
899 899 removed = [f for f in sorted(removed) if f in m1 or f in m2]
900 900 drop = [f for f in removed if f in m1]
901 901 for f in drop:
902 902 del m1[f]
903 903 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
904 904 p2.manifestnode(), (new, drop))
905 905
906 906 # update changelog
907 907 self.changelog.delayupdate()
908 908 n = self.changelog.add(mn, changed + removed, ctx.description(),
909 909 trp, p1.node(), p2.node(),
910 910 user, ctx.date(), ctx.extra().copy())
911 911 p = lambda: self.changelog.writepending() and self.root or ""
912 912 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
913 913 parent2=xp2, pending=p)
914 914 self.changelog.finalize(trp)
915 915 tr.close()
916 916
917 917 if self.branchcache:
918 918 self.branchtags()
919 919
920 920 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
921 921 return n
922 922 finally:
923 923 del tr
924 924 lock.release()
925 925
926 926 def destroyed(self):
927 927 '''Inform the repository that nodes have been destroyed.
928 928 Intended for use by strip and rollback, so there's a common
929 929 place for anything that has to be done after destroying history.'''
930 930 # XXX it might be nice if we could take the list of destroyed
931 931 # nodes, but I don't see an easy way for rollback() to do that
932 932
933 933 # Ensure the persistent tag cache is updated. Doing it now
934 934 # means that the tag cache only has to worry about destroyed
935 935 # heads immediately after a strip/rollback. That in turn
936 936 # guarantees that "cachetip == currenttip" (comparing both rev
937 937 # and node) always means no nodes have been added or destroyed.
938 938
939 939 # XXX this is suboptimal when qrefresh'ing: we strip the current
940 940 # head, refresh the tag cache, then immediately add a new head.
941 941 # But I think doing it this way is necessary for the "instant
942 942 # tag cache retrieval" case to work.
943 943 tags_.findglobaltags(self.ui, self, {}, {})
944 944
945 945 def walk(self, match, node=None):
946 946 '''
947 947 walk recursively through the directory tree or a given
948 948 changeset, finding all files matched by the match
949 949 function
950 950 '''
951 951 return self[node].walk(match)
952 952
953 953 def status(self, node1='.', node2=None, match=None,
954 954 ignored=False, clean=False, unknown=False):
955 955 """return status of files between two nodes or node and working directory
956 956
957 957 If node1 is None, use the first dirstate parent instead.
958 958 If node2 is None, compare node1 with working directory.
959 959 """
960 960
961 961 def mfmatches(ctx):
962 962 mf = ctx.manifest().copy()
963 963 for fn in mf.keys():
964 964 if not match(fn):
965 965 del mf[fn]
966 966 return mf
967 967
968 968 if isinstance(node1, context.changectx):
969 969 ctx1 = node1
970 970 else:
971 971 ctx1 = self[node1]
972 972 if isinstance(node2, context.changectx):
973 973 ctx2 = node2
974 974 else:
975 975 ctx2 = self[node2]
976 976
977 977 working = ctx2.rev() is None
978 978 parentworking = working and ctx1 == self['.']
979 979 match = match or match_.always(self.root, self.getcwd())
980 980 listignored, listclean, listunknown = ignored, clean, unknown
981 981
982 982 # load earliest manifest first for caching reasons
983 983 if not working and ctx2.rev() < ctx1.rev():
984 984 ctx2.manifest()
985 985
986 986 if not parentworking:
987 987 def bad(f, msg):
988 988 if f not in ctx1:
989 989 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
990 990 match.bad = bad
991 991
992 992 if working: # we need to scan the working dir
993 993 s = self.dirstate.status(match, listignored, listclean, listunknown)
994 994 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
995 995
996 996 # check for any possibly clean files
997 997 if parentworking and cmp:
998 998 fixup = []
999 999 # do a full compare of any files that might have changed
1000 1000 for f in sorted(cmp):
1001 1001 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1002 1002 or ctx1[f].cmp(ctx2[f].data())):
1003 1003 modified.append(f)
1004 1004 else:
1005 1005 fixup.append(f)
1006 1006
1007 1007 if listclean:
1008 1008 clean += fixup
1009 1009
1010 1010 # update dirstate for files that are actually clean
1011 1011 if fixup:
1012 1012 try:
1013 1013 # updating the dirstate is optional
1014 1014 # so we don't wait on the lock
1015 1015 wlock = self.wlock(False)
1016 1016 try:
1017 1017 for f in fixup:
1018 1018 self.dirstate.normal(f)
1019 1019 finally:
1020 1020 wlock.release()
1021 1021 except error.LockError:
1022 1022 pass
1023 1023
1024 1024 if not parentworking:
1025 1025 mf1 = mfmatches(ctx1)
1026 1026 if working:
1027 1027 # we are comparing working dir against non-parent
1028 1028 # generate a pseudo-manifest for the working dir
1029 1029 mf2 = mfmatches(self['.'])
1030 1030 for f in cmp + modified + added:
1031 1031 mf2[f] = None
1032 1032 mf2.set(f, ctx2.flags(f))
1033 1033 for f in removed:
1034 1034 if f in mf2:
1035 1035 del mf2[f]
1036 1036 else:
1037 1037 # we are comparing two revisions
1038 1038 deleted, unknown, ignored = [], [], []
1039 1039 mf2 = mfmatches(ctx2)
1040 1040
1041 1041 modified, added, clean = [], [], []
1042 1042 for fn in mf2:
1043 1043 if fn in mf1:
1044 1044 if (mf1.flags(fn) != mf2.flags(fn) or
1045 1045 (mf1[fn] != mf2[fn] and
1046 1046 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1047 1047 modified.append(fn)
1048 1048 elif listclean:
1049 1049 clean.append(fn)
1050 1050 del mf1[fn]
1051 1051 else:
1052 1052 added.append(fn)
1053 1053 removed = mf1.keys()
1054 1054
1055 1055 r = modified, added, removed, deleted, unknown, ignored, clean
1056 1056 [l.sort() for l in r]
1057 1057 return r
1058 1058
1059 1059 def add(self, list):
1060 1060 wlock = self.wlock()
1061 1061 try:
1062 1062 rejected = []
1063 1063 for f in list:
1064 1064 p = self.wjoin(f)
1065 1065 try:
1066 1066 st = os.lstat(p)
1067 1067 except:
1068 1068 self.ui.warn(_("%s does not exist!\n") % f)
1069 1069 rejected.append(f)
1070 1070 continue
1071 1071 if st.st_size > 10000000:
1072 1072 self.ui.warn(_("%s: files over 10MB may cause memory and"
1073 1073 " performance problems\n"
1074 1074 "(use 'hg revert %s' to unadd the file)\n")
1075 1075 % (f, f))
1076 1076 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1077 1077 self.ui.warn(_("%s not added: only files and symlinks "
1078 1078 "supported currently\n") % f)
1079 1079 rejected.append(p)
1080 1080 elif self.dirstate[f] in 'amn':
1081 1081 self.ui.warn(_("%s already tracked!\n") % f)
1082 1082 elif self.dirstate[f] == 'r':
1083 1083 self.dirstate.normallookup(f)
1084 1084 else:
1085 1085 self.dirstate.add(f)
1086 1086 return rejected
1087 1087 finally:
1088 1088 wlock.release()
1089 1089
1090 1090 def forget(self, list):
1091 1091 wlock = self.wlock()
1092 1092 try:
1093 1093 for f in list:
1094 1094 if self.dirstate[f] != 'a':
1095 1095 self.ui.warn(_("%s not added!\n") % f)
1096 1096 else:
1097 1097 self.dirstate.forget(f)
1098 1098 finally:
1099 1099 wlock.release()
1100 1100
1101 1101 def remove(self, list, unlink=False):
1102 1102 if unlink:
1103 1103 for f in list:
1104 1104 try:
1105 1105 util.unlink(self.wjoin(f))
1106 1106 except OSError, inst:
1107 1107 if inst.errno != errno.ENOENT:
1108 1108 raise
1109 1109 wlock = self.wlock()
1110 1110 try:
1111 1111 for f in list:
1112 1112 if unlink and os.path.exists(self.wjoin(f)):
1113 1113 self.ui.warn(_("%s still exists!\n") % f)
1114 1114 elif self.dirstate[f] == 'a':
1115 1115 self.dirstate.forget(f)
1116 1116 elif f not in self.dirstate:
1117 1117 self.ui.warn(_("%s not tracked!\n") % f)
1118 1118 else:
1119 1119 self.dirstate.remove(f)
1120 1120 finally:
1121 1121 wlock.release()
1122 1122
1123 1123 def undelete(self, list):
1124 1124 manifests = [self.manifest.read(self.changelog.read(p)[0])
1125 1125 for p in self.dirstate.parents() if p != nullid]
1126 1126 wlock = self.wlock()
1127 1127 try:
1128 1128 for f in list:
1129 1129 if self.dirstate[f] != 'r':
1130 1130 self.ui.warn(_("%s not removed!\n") % f)
1131 1131 else:
1132 1132 m = f in manifests[0] and manifests[0] or manifests[1]
1133 1133 t = self.file(f).read(m[f])
1134 1134 self.wwrite(f, t, m.flags(f))
1135 1135 self.dirstate.normal(f)
1136 1136 finally:
1137 1137 wlock.release()
1138 1138
1139 1139 def copy(self, source, dest):
1140 1140 p = self.wjoin(dest)
1141 1141 if not (os.path.exists(p) or os.path.islink(p)):
1142 1142 self.ui.warn(_("%s does not exist!\n") % dest)
1143 1143 elif not (os.path.isfile(p) or os.path.islink(p)):
1144 1144 self.ui.warn(_("copy failed: %s is not a file or a "
1145 1145 "symbolic link\n") % dest)
1146 1146 else:
1147 1147 wlock = self.wlock()
1148 1148 try:
1149 1149 if self.dirstate[dest] in '?r':
1150 1150 self.dirstate.add(dest)
1151 1151 self.dirstate.copy(source, dest)
1152 1152 finally:
1153 1153 wlock.release()
1154 1154
1155 1155 def heads(self, start=None):
1156 1156 heads = self.changelog.heads(start)
1157 1157 # sort the output in rev descending order
1158 1158 heads = [(-self.changelog.rev(h), h) for h in heads]
1159 1159 return [n for (r, n) in sorted(heads)]
1160 1160
1161 1161 def branchheads(self, branch=None, start=None, closed=False):
1162 1162 '''return a (possibly filtered) list of heads for the given branch
1163 1163
1164 1164 Heads are returned in topological order, from newest to oldest.
1165 1165 If branch is None, use the dirstate branch.
1166 1166 If start is not None, return only heads reachable from start.
1167 1167 If closed is True, return heads that are marked as closed as well.
1168 1168 '''
1169 1169 if branch is None:
1170 1170 branch = self[None].branch()
1171 1171 branches = self.branchmap()
1172 1172 if branch not in branches:
1173 1173 return []
1174 1174 # the cache returns heads ordered lowest to highest
1175 1175 bheads = list(reversed(branches[branch]))
1176 1176 if start is not None:
1177 1177 # filter out the heads that cannot be reached from startrev
1178 1178 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1179 1179 bheads = [h for h in bheads if h in fbheads]
1180 1180 if not closed:
1181 1181 bheads = [h for h in bheads if
1182 1182 ('close' not in self.changelog.read(h)[5])]
1183 1183 return bheads
1184 1184
1185 1185 def branches(self, nodes):
1186 1186 if not nodes:
1187 1187 nodes = [self.changelog.tip()]
1188 1188 b = []
1189 1189 for n in nodes:
1190 1190 t = n
1191 1191 while 1:
1192 1192 p = self.changelog.parents(n)
1193 1193 if p[1] != nullid or p[0] == nullid:
1194 1194 b.append((t, n, p[0], p[1]))
1195 1195 break
1196 1196 n = p[0]
1197 1197 return b
1198 1198
1199 1199 def between(self, pairs):
1200 1200 r = []
1201 1201
1202 1202 for top, bottom in pairs:
1203 1203 n, l, i = top, [], 0
1204 1204 f = 1
1205 1205
1206 1206 while n != bottom and n != nullid:
1207 1207 p = self.changelog.parents(n)[0]
1208 1208 if i == f:
1209 1209 l.append(n)
1210 1210 f = f * 2
1211 1211 n = p
1212 1212 i += 1
1213 1213
1214 1214 r.append(l)
1215 1215
1216 1216 return r
1217 1217
1218 1218 def findincoming(self, remote, base=None, heads=None, force=False):
1219 1219 """Return list of roots of the subsets of missing nodes from remote
1220 1220
1221 1221 If base dict is specified, assume that these nodes and their parents
1222 1222 exist on the remote side and that no child of a node of base exists
1223 1223 in both remote and self.
1224 1224 Furthermore base will be updated to include the nodes that exists
1225 1225 in self and remote but no children exists in self and remote.
1226 1226 If a list of heads is specified, return only nodes which are heads
1227 1227 or ancestors of these heads.
1228 1228
1229 1229 All the ancestors of base are in self and in remote.
1230 1230 All the descendants of the list returned are missing in self.
1231 1231 (and so we know that the rest of the nodes are missing in remote, see
1232 1232 outgoing)
1233 1233 """
1234 1234 return self.findcommonincoming(remote, base, heads, force)[1]
1235 1235
1236 1236 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1237 1237 """Return a tuple (common, missing roots, heads) used to identify
1238 1238 missing nodes from remote.
1239 1239
1240 1240 If base dict is specified, assume that these nodes and their parents
1241 1241 exist on the remote side and that no child of a node of base exists
1242 1242 in both remote and self.
1243 1243 Furthermore base will be updated to include the nodes that exists
1244 1244 in self and remote but no children exists in self and remote.
1245 1245 If a list of heads is specified, return only nodes which are heads
1246 1246 or ancestors of these heads.
1247 1247
1248 1248 All the ancestors of base are in self and in remote.
1249 1249 """
1250 1250 m = self.changelog.nodemap
1251 1251 search = []
1252 1252 fetch = set()
1253 1253 seen = set()
1254 1254 seenbranch = set()
1255 1255 if base is None:
1256 1256 base = {}
1257 1257
1258 1258 if not heads:
1259 1259 heads = remote.heads()
1260 1260
1261 1261 if self.changelog.tip() == nullid:
1262 1262 base[nullid] = 1
1263 1263 if heads != [nullid]:
1264 1264 return [nullid], [nullid], list(heads)
1265 1265 return [nullid], [], []
1266 1266
1267 1267 # assume we're closer to the tip than the root
1268 1268 # and start by examining the heads
1269 1269 self.ui.status(_("searching for changes\n"))
1270 1270
1271 1271 unknown = []
1272 1272 for h in heads:
1273 1273 if h not in m:
1274 1274 unknown.append(h)
1275 1275 else:
1276 1276 base[h] = 1
1277 1277
1278 1278 heads = unknown
1279 1279 if not unknown:
1280 1280 return base.keys(), [], []
1281 1281
1282 1282 req = set(unknown)
1283 1283 reqcnt = 0
1284 1284
1285 1285 # search through remote branches
1286 1286 # a 'branch' here is a linear segment of history, with four parts:
1287 1287 # head, root, first parent, second parent
1288 1288 # (a branch always has two parents (or none) by definition)
1289 1289 unknown = remote.branches(unknown)
1290 1290 while unknown:
1291 1291 r = []
1292 1292 while unknown:
1293 1293 n = unknown.pop(0)
1294 1294 if n[0] in seen:
1295 1295 continue
1296 1296
1297 1297 self.ui.debug("examining %s:%s\n"
1298 1298 % (short(n[0]), short(n[1])))
1299 1299 if n[0] == nullid: # found the end of the branch
1300 1300 pass
1301 1301 elif n in seenbranch:
1302 1302 self.ui.debug("branch already found\n")
1303 1303 continue
1304 1304 elif n[1] and n[1] in m: # do we know the base?
1305 1305 self.ui.debug("found incomplete branch %s:%s\n"
1306 1306 % (short(n[0]), short(n[1])))
1307 1307 search.append(n[0:2]) # schedule branch range for scanning
1308 1308 seenbranch.add(n)
1309 1309 else:
1310 1310 if n[1] not in seen and n[1] not in fetch:
1311 1311 if n[2] in m and n[3] in m:
1312 1312 self.ui.debug("found new changeset %s\n" %
1313 1313 short(n[1]))
1314 1314 fetch.add(n[1]) # earliest unknown
1315 1315 for p in n[2:4]:
1316 1316 if p in m:
1317 1317 base[p] = 1 # latest known
1318 1318
1319 1319 for p in n[2:4]:
1320 1320 if p not in req and p not in m:
1321 1321 r.append(p)
1322 1322 req.add(p)
1323 1323 seen.add(n[0])
1324 1324
1325 1325 if r:
1326 1326 reqcnt += 1
1327 1327 self.ui.debug("request %d: %s\n" %
1328 1328 (reqcnt, " ".join(map(short, r))))
1329 1329 for p in xrange(0, len(r), 10):
1330 1330 for b in remote.branches(r[p:p+10]):
1331 1331 self.ui.debug("received %s:%s\n" %
1332 1332 (short(b[0]), short(b[1])))
1333 1333 unknown.append(b)
1334 1334
1335 1335 # do binary search on the branches we found
1336 1336 while search:
1337 1337 newsearch = []
1338 1338 reqcnt += 1
1339 1339 for n, l in zip(search, remote.between(search)):
1340 1340 l.append(n[1])
1341 1341 p = n[0]
1342 1342 f = 1
1343 1343 for i in l:
1344 1344 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1345 1345 if i in m:
1346 1346 if f <= 2:
1347 1347 self.ui.debug("found new branch changeset %s\n" %
1348 1348 short(p))
1349 1349 fetch.add(p)
1350 1350 base[i] = 1
1351 1351 else:
1352 1352 self.ui.debug("narrowed branch search to %s:%s\n"
1353 1353 % (short(p), short(i)))
1354 1354 newsearch.append((p, i))
1355 1355 break
1356 1356 p, f = i, f * 2
1357 1357 search = newsearch
1358 1358
1359 1359 # sanity check our fetch list
1360 1360 for f in fetch:
1361 1361 if f in m:
1362 1362 raise error.RepoError(_("already have changeset ")
1363 1363 + short(f[:4]))
1364 1364
1365 1365 if base.keys() == [nullid]:
1366 1366 if force:
1367 1367 self.ui.warn(_("warning: repository is unrelated\n"))
1368 1368 else:
1369 1369 raise util.Abort(_("repository is unrelated"))
1370 1370
1371 1371 self.ui.debug("found new changesets starting at " +
1372 1372 " ".join([short(f) for f in fetch]) + "\n")
1373 1373
1374 1374 self.ui.debug("%d total queries\n" % reqcnt)
1375 1375
1376 1376 return base.keys(), list(fetch), heads
1377 1377
1378 1378 def findoutgoing(self, remote, base=None, heads=None, force=False):
1379 1379 """Return list of nodes that are roots of subsets not in remote
1380 1380
1381 1381 If base dict is specified, assume that these nodes and their parents
1382 1382 exist on the remote side.
1383 1383 If a list of heads is specified, return only nodes which are heads
1384 1384 or ancestors of these heads, and return a second element which
1385 1385 contains all remote heads which get new children.
1386 1386 """
1387 1387 if base is None:
1388 1388 base = {}
1389 1389 self.findincoming(remote, base, heads, force=force)
1390 1390
1391 1391 self.ui.debug("common changesets up to "
1392 1392 + " ".join(map(short, base.keys())) + "\n")
1393 1393
1394 1394 remain = set(self.changelog.nodemap)
1395 1395
1396 1396 # prune everything remote has from the tree
1397 1397 remain.remove(nullid)
1398 1398 remove = base.keys()
1399 1399 while remove:
1400 1400 n = remove.pop(0)
1401 1401 if n in remain:
1402 1402 remain.remove(n)
1403 1403 for p in self.changelog.parents(n):
1404 1404 remove.append(p)
1405 1405
1406 1406 # find every node whose parents have been pruned
1407 1407 subset = []
1408 1408 # find every remote head that will get new children
1409 1409 updated_heads = set()
1410 1410 for n in remain:
1411 1411 p1, p2 = self.changelog.parents(n)
1412 1412 if p1 not in remain and p2 not in remain:
1413 1413 subset.append(n)
1414 1414 if heads:
1415 1415 if p1 in heads:
1416 1416 updated_heads.add(p1)
1417 1417 if p2 in heads:
1418 1418 updated_heads.add(p2)
1419 1419
1420 1420 # this is the set of all roots we have to push
1421 1421 if heads:
1422 1422 return subset, list(updated_heads)
1423 1423 else:
1424 1424 return subset
1425 1425
1426 1426 def pull(self, remote, heads=None, force=False):
1427 1427 lock = self.lock()
1428 1428 try:
1429 1429 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1430 1430 force=force)
1431 1431 if fetch == [nullid]:
1432 1432 self.ui.status(_("requesting all changes\n"))
1433 1433
1434 1434 if not fetch:
1435 1435 self.ui.status(_("no changes found\n"))
1436 1436 return 0
1437 1437
1438 1438 if heads is None and remote.capable('changegroupsubset'):
1439 1439 heads = rheads
1440 1440
1441 1441 if heads is None:
1442 1442 cg = remote.changegroup(fetch, 'pull')
1443 1443 else:
1444 1444 if not remote.capable('changegroupsubset'):
1445 1445 raise util.Abort(_("Partial pull cannot be done because "
1446 1446 "other repository doesn't support "
1447 1447 "changegroupsubset."))
1448 1448 cg = remote.changegroupsubset(fetch, heads, 'pull')
1449 1449 return self.addchangegroup(cg, 'pull', remote.url())
1450 1450 finally:
1451 1451 lock.release()
1452 1452
1453 1453 def push(self, remote, force=False, revs=None):
1454 1454 # there are two ways to push to remote repo:
1455 1455 #
1456 1456 # addchangegroup assumes local user can lock remote
1457 1457 # repo (local filesystem, old ssh servers).
1458 1458 #
1459 1459 # unbundle assumes local user cannot lock remote repo (new ssh
1460 1460 # servers, http servers).
1461 1461
1462 1462 if remote.capable('unbundle'):
1463 1463 return self.push_unbundle(remote, force, revs)
1464 1464 return self.push_addchangegroup(remote, force, revs)
1465 1465
1466 1466 def prepush(self, remote, force, revs):
1467 1467 '''Analyze the local and remote repositories and determine which
1468 1468 changesets need to be pushed to the remote. Return a tuple
1469 1469 (changegroup, remoteheads). changegroup is a readable file-like
1470 1470 object whose read() returns successive changegroup chunks ready to
1471 1471 be sent over the wire. remoteheads is the list of remote heads.
1472 1472 '''
1473 1473 common = {}
1474 1474 remote_heads = remote.heads()
1475 1475 inc = self.findincoming(remote, common, remote_heads, force=force)
1476 1476
1477 1477 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1478 if revs is not None:
1479 1478 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1480 else:
1481 bases, heads = update, self.changelog.heads()
1482 1479
1483 def checkbranch(lheads, rheads, updatelh):
1480 def checkbranch(lheads, rheads, updatelb):
1484 1481 '''
1485 1482 check whether there are more local heads than remote heads on
1486 1483 a specific branch.
1487 1484
1488 1485 lheads: local branch heads
1489 1486 rheads: remote branch heads
1490 updatelh: outgoing local branch heads
1487 updatelb: outgoing local branch bases
1491 1488 '''
1492 1489
1493 1490 warn = 0
1494 1491
1495 1492 if not revs and len(lheads) > len(rheads):
1496 1493 warn = 1
1497 1494 else:
1495 # add local heads involved in the push
1498 1496 updatelheads = [self.changelog.heads(x, lheads)
1499 for x in updatelh]
1497 for x in updatelb]
1500 1498 newheads = set(sum(updatelheads, [])) & set(lheads)
1501 1499
1502 1500 if not newheads:
1503 1501 return True
1504 1502
1503 # add heads we don't have or that are not involved in the push
1505 1504 for r in rheads:
1506 1505 if r in self.changelog.nodemap:
1507 1506 desc = self.changelog.heads(r, heads)
1508 1507 l = [h for h in heads if h in desc]
1509 1508 if not l:
1510 1509 newheads.add(r)
1511 1510 else:
1512 1511 newheads.add(r)
1513 1512 if len(newheads) > len(rheads):
1514 1513 warn = 1
1515 1514
1516 1515 if warn:
1517 1516 if not rheads: # new branch requires --force
1518 1517 self.ui.warn(_("abort: push creates new"
1519 1518 " remote branch '%s'!\n") %
1520 self[updatelh[0]].branch())
1519 self[updatelb[0]].branch())
1521 1520 else:
1522 1521 self.ui.warn(_("abort: push creates new remote heads!\n"))
1523 1522
1524 1523 self.ui.status(_("(did you forget to merge?"
1525 1524 " use push -f to force)\n"))
1526 1525 return False
1527 1526 return True
1528 1527
1529 1528 if not bases:
1530 1529 self.ui.status(_("no changes found\n"))
1531 1530 return None, 1
1532 1531 elif not force:
1533 1532 # Check for each named branch if we're creating new remote heads.
1534 1533 # To be a remote head after push, node must be either:
1535 1534 # - unknown locally
1536 1535 # - a local outgoing head descended from update
1537 1536 # - a remote head that's known locally and not
1538 1537 # ancestral to an outgoing head
1539 1538 #
1540 1539 # New named branches cannot be created without --force.
1541 1540
1542 1541 if remote_heads != [nullid]:
1543 1542 if remote.capable('branchmap'):
1544 1543 localhds = {}
1545 1544 if not revs:
1546 1545 localhds = self.branchmap()
1547 1546 else:
1548 1547 for n in heads:
1549 1548 branch = self[n].branch()
1550 1549 if branch in localhds:
1551 1550 localhds[branch].append(n)
1552 1551 else:
1553 1552 localhds[branch] = [n]
1554 1553
1555 1554 remotehds = remote.branchmap()
1556 1555
1557 1556 for lh in localhds:
1558 1557 if lh in remotehds:
1559 1558 rheads = remotehds[lh]
1560 1559 else:
1561 1560 rheads = []
1562 1561 lheads = localhds[lh]
1563 updatelh = [upd for upd in update
1562 updatelb = [upd for upd in update
1564 1563 if self[upd].branch() == lh]
1565 if not updatelh:
1564 if not updatelb:
1566 1565 continue
1567 if not checkbranch(lheads, rheads, updatelh):
1566 if not checkbranch(lheads, rheads, updatelb):
1568 1567 return None, 0
1569 1568 else:
1570 1569 if not checkbranch(heads, remote_heads, update):
1571 1570 return None, 0
1572 1571
1573 1572 if inc:
1574 1573 self.ui.warn(_("note: unsynced remote changes!\n"))
1575 1574
1576 1575
1577 1576 if revs is None:
1578 1577 # use the fast path, no race possible on push
1579 1578 cg = self._changegroup(common.keys(), 'push')
1580 1579 else:
1581 1580 cg = self.changegroupsubset(update, revs, 'push')
1582 1581 return cg, remote_heads
1583 1582
1584 1583 def push_addchangegroup(self, remote, force, revs):
1585 1584 lock = remote.lock()
1586 1585 try:
1587 1586 ret = self.prepush(remote, force, revs)
1588 1587 if ret[0] is not None:
1589 1588 cg, remote_heads = ret
1590 1589 return remote.addchangegroup(cg, 'push', self.url())
1591 1590 return ret[1]
1592 1591 finally:
1593 1592 lock.release()
1594 1593
1595 1594 def push_unbundle(self, remote, force, revs):
1596 1595 # local repo finds heads on server, finds out what revs it
1597 1596 # must push. once revs transferred, if server finds it has
1598 1597 # different heads (someone else won commit/push race), server
1599 1598 # aborts.
1600 1599
1601 1600 ret = self.prepush(remote, force, revs)
1602 1601 if ret[0] is not None:
1603 1602 cg, remote_heads = ret
1604 1603 if force: remote_heads = ['force']
1605 1604 return remote.unbundle(cg, remote_heads, 'push')
1606 1605 return ret[1]
1607 1606
1608 1607 def changegroupinfo(self, nodes, source):
1609 1608 if self.ui.verbose or source == 'bundle':
1610 1609 self.ui.status(_("%d changesets found\n") % len(nodes))
1611 1610 if self.ui.debugflag:
1612 1611 self.ui.debug("list of changesets:\n")
1613 1612 for node in nodes:
1614 1613 self.ui.debug("%s\n" % hex(node))
1615 1614
1616 1615 def changegroupsubset(self, bases, heads, source, extranodes=None):
1617 1616 """Compute a changegroup consisting of all the nodes that are
1618 1617 descendents of any of the bases and ancestors of any of the heads.
1619 1618 Return a chunkbuffer object whose read() method will return
1620 1619 successive changegroup chunks.
1621 1620
1622 1621 It is fairly complex as determining which filenodes and which
1623 1622 manifest nodes need to be included for the changeset to be complete
1624 1623 is non-trivial.
1625 1624
1626 1625 Another wrinkle is doing the reverse, figuring out which changeset in
1627 1626 the changegroup a particular filenode or manifestnode belongs to.
1628 1627
1629 1628 The caller can specify some nodes that must be included in the
1630 1629 changegroup using the extranodes argument. It should be a dict
1631 1630 where the keys are the filenames (or 1 for the manifest), and the
1632 1631 values are lists of (node, linknode) tuples, where node is a wanted
1633 1632 node and linknode is the changelog node that should be transmitted as
1634 1633 the linkrev.
1635 1634 """
1636 1635
1637 1636 if extranodes is None:
1638 1637 # can we go through the fast path ?
1639 1638 heads.sort()
1640 1639 allheads = self.heads()
1641 1640 allheads.sort()
1642 1641 if heads == allheads:
1643 1642 common = []
1644 1643 # parents of bases are known from both sides
1645 1644 for n in bases:
1646 1645 for p in self.changelog.parents(n):
1647 1646 if p != nullid:
1648 1647 common.append(p)
1649 1648 return self._changegroup(common, source)
1650 1649
1651 1650 self.hook('preoutgoing', throw=True, source=source)
1652 1651
1653 1652 # Set up some initial variables
1654 1653 # Make it easy to refer to self.changelog
1655 1654 cl = self.changelog
1656 1655 # msng is short for missing - compute the list of changesets in this
1657 1656 # changegroup.
1658 1657 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1659 1658 self.changegroupinfo(msng_cl_lst, source)
1660 1659 # Some bases may turn out to be superfluous, and some heads may be
1661 1660 # too. nodesbetween will return the minimal set of bases and heads
1662 1661 # necessary to re-create the changegroup.
1663 1662
1664 1663 # Known heads are the list of heads that it is assumed the recipient
1665 1664 # of this changegroup will know about.
1666 1665 knownheads = set()
1667 1666 # We assume that all parents of bases are known heads.
1668 1667 for n in bases:
1669 1668 knownheads.update(cl.parents(n))
1670 1669 knownheads.discard(nullid)
1671 1670 knownheads = list(knownheads)
1672 1671 if knownheads:
1673 1672 # Now that we know what heads are known, we can compute which
1674 1673 # changesets are known. The recipient must know about all
1675 1674 # changesets required to reach the known heads from the null
1676 1675 # changeset.
1677 1676 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1678 1677 junk = None
1679 1678 # Transform the list into a set.
1680 1679 has_cl_set = set(has_cl_set)
1681 1680 else:
1682 1681 # If there were no known heads, the recipient cannot be assumed to
1683 1682 # know about any changesets.
1684 1683 has_cl_set = set()
1685 1684
1686 1685 # Make it easy to refer to self.manifest
1687 1686 mnfst = self.manifest
1688 1687 # We don't know which manifests are missing yet
1689 1688 msng_mnfst_set = {}
1690 1689 # Nor do we know which filenodes are missing.
1691 1690 msng_filenode_set = {}
1692 1691
1693 1692 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1694 1693 junk = None
1695 1694
1696 1695 # A changeset always belongs to itself, so the changenode lookup
1697 1696 # function for a changenode is identity.
1698 1697 def identity(x):
1699 1698 return x
1700 1699
1701 1700 # If we determine that a particular file or manifest node must be a
1702 1701 # node that the recipient of the changegroup will already have, we can
1703 1702 # also assume the recipient will have all the parents. This function
1704 1703 # prunes them from the set of missing nodes.
1705 1704 def prune_parents(revlog, hasset, msngset):
1706 1705 haslst = list(hasset)
1707 1706 haslst.sort(key=revlog.rev)
1708 1707 for node in haslst:
1709 1708 parentlst = [p for p in revlog.parents(node) if p != nullid]
1710 1709 while parentlst:
1711 1710 n = parentlst.pop()
1712 1711 if n not in hasset:
1713 1712 hasset.add(n)
1714 1713 p = [p for p in revlog.parents(n) if p != nullid]
1715 1714 parentlst.extend(p)
1716 1715 for n in hasset:
1717 1716 msngset.pop(n, None)
1718 1717
1719 1718 # This is a function generating function used to set up an environment
1720 1719 # for the inner function to execute in.
1721 1720 def manifest_and_file_collector(changedfileset):
1722 1721 # This is an information gathering function that gathers
1723 1722 # information from each changeset node that goes out as part of
1724 1723 # the changegroup. The information gathered is a list of which
1725 1724 # manifest nodes are potentially required (the recipient may
1726 1725 # already have them) and total list of all files which were
1727 1726 # changed in any changeset in the changegroup.
1728 1727 #
1729 1728 # We also remember the first changenode we saw any manifest
1730 1729 # referenced by so we can later determine which changenode 'owns'
1731 1730 # the manifest.
1732 1731 def collect_manifests_and_files(clnode):
1733 1732 c = cl.read(clnode)
1734 1733 for f in c[3]:
1735 1734 # This is to make sure we only have one instance of each
1736 1735 # filename string for each filename.
1737 1736 changedfileset.setdefault(f, f)
1738 1737 msng_mnfst_set.setdefault(c[0], clnode)
1739 1738 return collect_manifests_and_files
1740 1739
1741 1740 # Figure out which manifest nodes (of the ones we think might be part
1742 1741 # of the changegroup) the recipient must know about and remove them
1743 1742 # from the changegroup.
1744 1743 def prune_manifests():
1745 1744 has_mnfst_set = set()
1746 1745 for n in msng_mnfst_set:
1747 1746 # If a 'missing' manifest thinks it belongs to a changenode
1748 1747 # the recipient is assumed to have, obviously the recipient
1749 1748 # must have that manifest.
1750 1749 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1751 1750 if linknode in has_cl_set:
1752 1751 has_mnfst_set.add(n)
1753 1752 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1754 1753
1755 1754 # Use the information collected in collect_manifests_and_files to say
1756 1755 # which changenode any manifestnode belongs to.
1757 1756 def lookup_manifest_link(mnfstnode):
1758 1757 return msng_mnfst_set[mnfstnode]
1759 1758
1760 1759 # A function generating function that sets up the initial environment
1761 1760 # the inner function.
1762 1761 def filenode_collector(changedfiles):
1763 1762 next_rev = [0]
1764 1763 # This gathers information from each manifestnode included in the
1765 1764 # changegroup about which filenodes the manifest node references
1766 1765 # so we can include those in the changegroup too.
1767 1766 #
1768 1767 # It also remembers which changenode each filenode belongs to. It
1769 1768 # does this by assuming the a filenode belongs to the changenode
1770 1769 # the first manifest that references it belongs to.
1771 1770 def collect_msng_filenodes(mnfstnode):
1772 1771 r = mnfst.rev(mnfstnode)
1773 1772 if r == next_rev[0]:
1774 1773 # If the last rev we looked at was the one just previous,
1775 1774 # we only need to see a diff.
1776 1775 deltamf = mnfst.readdelta(mnfstnode)
1777 1776 # For each line in the delta
1778 1777 for f, fnode in deltamf.iteritems():
1779 1778 f = changedfiles.get(f, None)
1780 1779 # And if the file is in the list of files we care
1781 1780 # about.
1782 1781 if f is not None:
1783 1782 # Get the changenode this manifest belongs to
1784 1783 clnode = msng_mnfst_set[mnfstnode]
1785 1784 # Create the set of filenodes for the file if
1786 1785 # there isn't one already.
1787 1786 ndset = msng_filenode_set.setdefault(f, {})
1788 1787 # And set the filenode's changelog node to the
1789 1788 # manifest's if it hasn't been set already.
1790 1789 ndset.setdefault(fnode, clnode)
1791 1790 else:
1792 1791 # Otherwise we need a full manifest.
1793 1792 m = mnfst.read(mnfstnode)
1794 1793 # For every file in we care about.
1795 1794 for f in changedfiles:
1796 1795 fnode = m.get(f, None)
1797 1796 # If it's in the manifest
1798 1797 if fnode is not None:
1799 1798 # See comments above.
1800 1799 clnode = msng_mnfst_set[mnfstnode]
1801 1800 ndset = msng_filenode_set.setdefault(f, {})
1802 1801 ndset.setdefault(fnode, clnode)
1803 1802 # Remember the revision we hope to see next.
1804 1803 next_rev[0] = r + 1
1805 1804 return collect_msng_filenodes
1806 1805
1807 1806 # We have a list of filenodes we think we need for a file, lets remove
1808 1807 # all those we know the recipient must have.
1809 1808 def prune_filenodes(f, filerevlog):
1810 1809 msngset = msng_filenode_set[f]
1811 1810 hasset = set()
1812 1811 # If a 'missing' filenode thinks it belongs to a changenode we
1813 1812 # assume the recipient must have, then the recipient must have
1814 1813 # that filenode.
1815 1814 for n in msngset:
1816 1815 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1817 1816 if clnode in has_cl_set:
1818 1817 hasset.add(n)
1819 1818 prune_parents(filerevlog, hasset, msngset)
1820 1819
1821 1820 # A function generator function that sets up the a context for the
1822 1821 # inner function.
1823 1822 def lookup_filenode_link_func(fname):
1824 1823 msngset = msng_filenode_set[fname]
1825 1824 # Lookup the changenode the filenode belongs to.
1826 1825 def lookup_filenode_link(fnode):
1827 1826 return msngset[fnode]
1828 1827 return lookup_filenode_link
1829 1828
1830 1829 # Add the nodes that were explicitly requested.
1831 1830 def add_extra_nodes(name, nodes):
1832 1831 if not extranodes or name not in extranodes:
1833 1832 return
1834 1833
1835 1834 for node, linknode in extranodes[name]:
1836 1835 if node not in nodes:
1837 1836 nodes[node] = linknode
1838 1837
1839 1838 # Now that we have all theses utility functions to help out and
1840 1839 # logically divide up the task, generate the group.
1841 1840 def gengroup():
1842 1841 # The set of changed files starts empty.
1843 1842 changedfiles = {}
1844 1843 # Create a changenode group generator that will call our functions
1845 1844 # back to lookup the owning changenode and collect information.
1846 1845 group = cl.group(msng_cl_lst, identity,
1847 1846 manifest_and_file_collector(changedfiles))
1848 1847 for chnk in group:
1849 1848 yield chnk
1850 1849
1851 1850 # The list of manifests has been collected by the generator
1852 1851 # calling our functions back.
1853 1852 prune_manifests()
1854 1853 add_extra_nodes(1, msng_mnfst_set)
1855 1854 msng_mnfst_lst = msng_mnfst_set.keys()
1856 1855 # Sort the manifestnodes by revision number.
1857 1856 msng_mnfst_lst.sort(key=mnfst.rev)
1858 1857 # Create a generator for the manifestnodes that calls our lookup
1859 1858 # and data collection functions back.
1860 1859 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1861 1860 filenode_collector(changedfiles))
1862 1861 for chnk in group:
1863 1862 yield chnk
1864 1863
1865 1864 # These are no longer needed, dereference and toss the memory for
1866 1865 # them.
1867 1866 msng_mnfst_lst = None
1868 1867 msng_mnfst_set.clear()
1869 1868
1870 1869 if extranodes:
1871 1870 for fname in extranodes:
1872 1871 if isinstance(fname, int):
1873 1872 continue
1874 1873 msng_filenode_set.setdefault(fname, {})
1875 1874 changedfiles[fname] = 1
1876 1875 # Go through all our files in order sorted by name.
1877 1876 for fname in sorted(changedfiles):
1878 1877 filerevlog = self.file(fname)
1879 1878 if not len(filerevlog):
1880 1879 raise util.Abort(_("empty or missing revlog for %s") % fname)
1881 1880 # Toss out the filenodes that the recipient isn't really
1882 1881 # missing.
1883 1882 if fname in msng_filenode_set:
1884 1883 prune_filenodes(fname, filerevlog)
1885 1884 add_extra_nodes(fname, msng_filenode_set[fname])
1886 1885 msng_filenode_lst = msng_filenode_set[fname].keys()
1887 1886 else:
1888 1887 msng_filenode_lst = []
1889 1888 # If any filenodes are left, generate the group for them,
1890 1889 # otherwise don't bother.
1891 1890 if len(msng_filenode_lst) > 0:
1892 1891 yield changegroup.chunkheader(len(fname))
1893 1892 yield fname
1894 1893 # Sort the filenodes by their revision #
1895 1894 msng_filenode_lst.sort(key=filerevlog.rev)
1896 1895 # Create a group generator and only pass in a changenode
1897 1896 # lookup function as we need to collect no information
1898 1897 # from filenodes.
1899 1898 group = filerevlog.group(msng_filenode_lst,
1900 1899 lookup_filenode_link_func(fname))
1901 1900 for chnk in group:
1902 1901 yield chnk
1903 1902 if fname in msng_filenode_set:
1904 1903 # Don't need this anymore, toss it to free memory.
1905 1904 del msng_filenode_set[fname]
1906 1905 # Signal that no more groups are left.
1907 1906 yield changegroup.closechunk()
1908 1907
1909 1908 if msng_cl_lst:
1910 1909 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1911 1910
1912 1911 return util.chunkbuffer(gengroup())
1913 1912
1914 1913 def changegroup(self, basenodes, source):
1915 1914 # to avoid a race we use changegroupsubset() (issue1320)
1916 1915 return self.changegroupsubset(basenodes, self.heads(), source)
1917 1916
1918 1917 def _changegroup(self, common, source):
1919 1918 """Compute the changegroup of all nodes that we have that a recipient
1920 1919 doesn't. Return a chunkbuffer object whose read() method will return
1921 1920 successive changegroup chunks.
1922 1921
1923 1922 This is much easier than the previous function as we can assume that
1924 1923 the recipient has any changenode we aren't sending them.
1925 1924
1926 1925 common is the set of common nodes between remote and self"""
1927 1926
1928 1927 self.hook('preoutgoing', throw=True, source=source)
1929 1928
1930 1929 cl = self.changelog
1931 1930 nodes = cl.findmissing(common)
1932 1931 revset = set([cl.rev(n) for n in nodes])
1933 1932 self.changegroupinfo(nodes, source)
1934 1933
1935 1934 def identity(x):
1936 1935 return x
1937 1936
1938 1937 def gennodelst(log):
1939 1938 for r in log:
1940 1939 if log.linkrev(r) in revset:
1941 1940 yield log.node(r)
1942 1941
1943 1942 def changed_file_collector(changedfileset):
1944 1943 def collect_changed_files(clnode):
1945 1944 c = cl.read(clnode)
1946 1945 changedfileset.update(c[3])
1947 1946 return collect_changed_files
1948 1947
1949 1948 def lookuprevlink_func(revlog):
1950 1949 def lookuprevlink(n):
1951 1950 return cl.node(revlog.linkrev(revlog.rev(n)))
1952 1951 return lookuprevlink
1953 1952
1954 1953 def gengroup():
1955 1954 '''yield a sequence of changegroup chunks (strings)'''
1956 1955 # construct a list of all changed files
1957 1956 changedfiles = set()
1958 1957
1959 1958 for chnk in cl.group(nodes, identity,
1960 1959 changed_file_collector(changedfiles)):
1961 1960 yield chnk
1962 1961
1963 1962 mnfst = self.manifest
1964 1963 nodeiter = gennodelst(mnfst)
1965 1964 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1966 1965 yield chnk
1967 1966
1968 1967 for fname in sorted(changedfiles):
1969 1968 filerevlog = self.file(fname)
1970 1969 if not len(filerevlog):
1971 1970 raise util.Abort(_("empty or missing revlog for %s") % fname)
1972 1971 nodeiter = gennodelst(filerevlog)
1973 1972 nodeiter = list(nodeiter)
1974 1973 if nodeiter:
1975 1974 yield changegroup.chunkheader(len(fname))
1976 1975 yield fname
1977 1976 lookup = lookuprevlink_func(filerevlog)
1978 1977 for chnk in filerevlog.group(nodeiter, lookup):
1979 1978 yield chnk
1980 1979
1981 1980 yield changegroup.closechunk()
1982 1981
1983 1982 if nodes:
1984 1983 self.hook('outgoing', node=hex(nodes[0]), source=source)
1985 1984
1986 1985 return util.chunkbuffer(gengroup())
1987 1986
1988 1987 def addchangegroup(self, source, srctype, url, emptyok=False):
1989 1988 """add changegroup to repo.
1990 1989
1991 1990 return values:
1992 1991 - nothing changed or no source: 0
1993 1992 - more heads than before: 1+added heads (2..n)
1994 1993 - less heads than before: -1-removed heads (-2..-n)
1995 1994 - number of heads stays the same: 1
1996 1995 """
1997 1996 def csmap(x):
1998 1997 self.ui.debug("add changeset %s\n" % short(x))
1999 1998 return len(cl)
2000 1999
2001 2000 def revmap(x):
2002 2001 return cl.rev(x)
2003 2002
2004 2003 if not source:
2005 2004 return 0
2006 2005
2007 2006 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2008 2007
2009 2008 changesets = files = revisions = 0
2010 2009
2011 2010 # write changelog data to temp files so concurrent readers will not see
2012 2011 # inconsistent view
2013 2012 cl = self.changelog
2014 2013 cl.delayupdate()
2015 2014 oldheads = len(cl.heads())
2016 2015
2017 2016 tr = self.transaction()
2018 2017 try:
2019 2018 trp = weakref.proxy(tr)
2020 2019 # pull off the changeset group
2021 2020 self.ui.status(_("adding changesets\n"))
2022 2021 clstart = len(cl)
2023 2022 chunkiter = changegroup.chunkiter(source)
2024 2023 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2025 2024 raise util.Abort(_("received changelog group is empty"))
2026 2025 clend = len(cl)
2027 2026 changesets = clend - clstart
2028 2027
2029 2028 # pull off the manifest group
2030 2029 self.ui.status(_("adding manifests\n"))
2031 2030 chunkiter = changegroup.chunkiter(source)
2032 2031 # no need to check for empty manifest group here:
2033 2032 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2034 2033 # no new manifest will be created and the manifest group will
2035 2034 # be empty during the pull
2036 2035 self.manifest.addgroup(chunkiter, revmap, trp)
2037 2036
2038 2037 # process the files
2039 2038 self.ui.status(_("adding file changes\n"))
2040 2039 while 1:
2041 2040 f = changegroup.getchunk(source)
2042 2041 if not f:
2043 2042 break
2044 2043 self.ui.debug("adding %s revisions\n" % f)
2045 2044 fl = self.file(f)
2046 2045 o = len(fl)
2047 2046 chunkiter = changegroup.chunkiter(source)
2048 2047 if fl.addgroup(chunkiter, revmap, trp) is None:
2049 2048 raise util.Abort(_("received file revlog group is empty"))
2050 2049 revisions += len(fl) - o
2051 2050 files += 1
2052 2051
2053 2052 newheads = len(cl.heads())
2054 2053 heads = ""
2055 2054 if oldheads and newheads != oldheads:
2056 2055 heads = _(" (%+d heads)") % (newheads - oldheads)
2057 2056
2058 2057 self.ui.status(_("added %d changesets"
2059 2058 " with %d changes to %d files%s\n")
2060 2059 % (changesets, revisions, files, heads))
2061 2060
2062 2061 if changesets > 0:
2063 2062 p = lambda: cl.writepending() and self.root or ""
2064 2063 self.hook('pretxnchangegroup', throw=True,
2065 2064 node=hex(cl.node(clstart)), source=srctype,
2066 2065 url=url, pending=p)
2067 2066
2068 2067 # make changelog see real files again
2069 2068 cl.finalize(trp)
2070 2069
2071 2070 tr.close()
2072 2071 finally:
2073 2072 del tr
2074 2073
2075 2074 if changesets > 0:
2076 2075 # forcefully update the on-disk branch cache
2077 2076 self.ui.debug("updating the branch cache\n")
2078 2077 self.branchtags()
2079 2078 self.hook("changegroup", node=hex(cl.node(clstart)),
2080 2079 source=srctype, url=url)
2081 2080
2082 2081 for i in xrange(clstart, clend):
2083 2082 self.hook("incoming", node=hex(cl.node(i)),
2084 2083 source=srctype, url=url)
2085 2084
2086 2085 # never return 0 here:
2087 2086 if newheads < oldheads:
2088 2087 return newheads - oldheads - 1
2089 2088 else:
2090 2089 return newheads - oldheads + 1
2091 2090
2092 2091
2093 2092 def stream_in(self, remote):
2094 2093 fp = remote.stream_out()
2095 2094 l = fp.readline()
2096 2095 try:
2097 2096 resp = int(l)
2098 2097 except ValueError:
2099 2098 raise error.ResponseError(
2100 2099 _('Unexpected response from remote server:'), l)
2101 2100 if resp == 1:
2102 2101 raise util.Abort(_('operation forbidden by server'))
2103 2102 elif resp == 2:
2104 2103 raise util.Abort(_('locking the remote repository failed'))
2105 2104 elif resp != 0:
2106 2105 raise util.Abort(_('the server sent an unknown error code'))
2107 2106 self.ui.status(_('streaming all changes\n'))
2108 2107 l = fp.readline()
2109 2108 try:
2110 2109 total_files, total_bytes = map(int, l.split(' ', 1))
2111 2110 except (ValueError, TypeError):
2112 2111 raise error.ResponseError(
2113 2112 _('Unexpected response from remote server:'), l)
2114 2113 self.ui.status(_('%d files to transfer, %s of data\n') %
2115 2114 (total_files, util.bytecount(total_bytes)))
2116 2115 start = time.time()
2117 2116 for i in xrange(total_files):
2118 2117 # XXX doesn't support '\n' or '\r' in filenames
2119 2118 l = fp.readline()
2120 2119 try:
2121 2120 name, size = l.split('\0', 1)
2122 2121 size = int(size)
2123 2122 except (ValueError, TypeError):
2124 2123 raise error.ResponseError(
2125 2124 _('Unexpected response from remote server:'), l)
2126 2125 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2127 2126 # for backwards compat, name was partially encoded
2128 2127 ofp = self.sopener(store.decodedir(name), 'w')
2129 2128 for chunk in util.filechunkiter(fp, limit=size):
2130 2129 ofp.write(chunk)
2131 2130 ofp.close()
2132 2131 elapsed = time.time() - start
2133 2132 if elapsed <= 0:
2134 2133 elapsed = 0.001
2135 2134 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2136 2135 (util.bytecount(total_bytes), elapsed,
2137 2136 util.bytecount(total_bytes / elapsed)))
2138 2137 self.invalidate()
2139 2138 return len(self.heads()) + 1
2140 2139
2141 2140 def clone(self, remote, heads=[], stream=False):
2142 2141 '''clone remote repository.
2143 2142
2144 2143 keyword arguments:
2145 2144 heads: list of revs to clone (forces use of pull)
2146 2145 stream: use streaming clone if possible'''
2147 2146
2148 2147 # now, all clients that can request uncompressed clones can
2149 2148 # read repo formats supported by all servers that can serve
2150 2149 # them.
2151 2150
2152 2151 # if revlog format changes, client will have to check version
2153 2152 # and format flags on "stream" capability, and use
2154 2153 # uncompressed only if compatible.
2155 2154
2156 2155 if stream and not heads and remote.capable('stream'):
2157 2156 return self.stream_in(remote)
2158 2157 return self.pull(remote, heads)
2159 2158
2160 2159 # used to avoid circular references so destructors work
2161 2160 def aftertrans(files):
2162 2161 renamefiles = [tuple(t) for t in files]
2163 2162 def a():
2164 2163 for src, dest in renamefiles:
2165 2164 util.rename(src, dest)
2166 2165 return a
2167 2166
2168 2167 def instance(ui, path, create):
2169 2168 return localrepository(ui, util.drop_scheme('file', path), create)
2170 2169
2171 2170 def islocal(path):
2172 2171 return True
@@ -1,1286 +1,1290 b''
1 1 # util.py - Mercurial utility functions and platform specfic implementations
2 2 #
3 3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2, incorporated herein by reference.
9 9
10 10 """Mercurial utility functions and platform specfic implementations.
11 11
12 12 This contains helper routines that are independent of the SCM core and
13 13 hide platform-specific details from the core.
14 14 """
15 15
16 16 from i18n import _
17 import error, osutil
17 import error, osutil, encoding
18 18 import cStringIO, errno, re, shutil, sys, tempfile, traceback
19 19 import os, stat, time, calendar, random, textwrap
20 20 import imp
21 21
22 22 # Python compatibility
23 23
24 24 def sha1(s):
25 25 return _fastsha1(s)
26 26
27 27 def _fastsha1(s):
28 28 # This function will import sha1 from hashlib or sha (whichever is
29 29 # available) and overwrite itself with it on the first call.
30 30 # Subsequent calls will go directly to the imported function.
31 31 try:
32 32 from hashlib import sha1 as _sha1
33 33 except ImportError:
34 34 from sha import sha as _sha1
35 35 global _fastsha1, sha1
36 36 _fastsha1 = sha1 = _sha1
37 37 return _sha1(s)
38 38
39 39 import subprocess
40 40 closefds = os.name == 'posix'
41 41 def popen2(cmd):
42 42 # Setting bufsize to -1 lets the system decide the buffer size.
43 43 # The default for bufsize is 0, meaning unbuffered. This leads to
44 44 # poor performance on Mac OS X: http://bugs.python.org/issue4194
45 45 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
46 46 close_fds=closefds,
47 47 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
48 48 return p.stdin, p.stdout
49 49 def popen3(cmd):
50 50 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
51 51 close_fds=closefds,
52 52 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
53 53 stderr=subprocess.PIPE)
54 54 return p.stdin, p.stdout, p.stderr
55 55
56 56 def version():
57 57 """Return version information if available."""
58 58 try:
59 59 import __version__
60 60 return __version__.version
61 61 except ImportError:
62 62 return 'unknown'
63 63
64 64 # used by parsedate
65 65 defaultdateformats = (
66 66 '%Y-%m-%d %H:%M:%S',
67 67 '%Y-%m-%d %I:%M:%S%p',
68 68 '%Y-%m-%d %H:%M',
69 69 '%Y-%m-%d %I:%M%p',
70 70 '%Y-%m-%d',
71 71 '%m-%d',
72 72 '%m/%d',
73 73 '%m/%d/%y',
74 74 '%m/%d/%Y',
75 75 '%a %b %d %H:%M:%S %Y',
76 76 '%a %b %d %I:%M:%S%p %Y',
77 77 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
78 78 '%b %d %H:%M:%S %Y',
79 79 '%b %d %I:%M:%S%p %Y',
80 80 '%b %d %H:%M:%S',
81 81 '%b %d %I:%M:%S%p',
82 82 '%b %d %H:%M',
83 83 '%b %d %I:%M%p',
84 84 '%b %d %Y',
85 85 '%b %d',
86 86 '%H:%M:%S',
87 87 '%I:%M:%S%p',
88 88 '%H:%M',
89 89 '%I:%M%p',
90 90 )
91 91
92 92 extendeddateformats = defaultdateformats + (
93 93 "%Y",
94 94 "%Y-%m",
95 95 "%b",
96 96 "%b %Y",
97 97 )
98 98
99 99 def cachefunc(func):
100 100 '''cache the result of function calls'''
101 101 # XXX doesn't handle keywords args
102 102 cache = {}
103 103 if func.func_code.co_argcount == 1:
104 104 # we gain a small amount of time because
105 105 # we don't need to pack/unpack the list
106 106 def f(arg):
107 107 if arg not in cache:
108 108 cache[arg] = func(arg)
109 109 return cache[arg]
110 110 else:
111 111 def f(*args):
112 112 if args not in cache:
113 113 cache[args] = func(*args)
114 114 return cache[args]
115 115
116 116 return f
117 117
118 118 def lrucachefunc(func):
119 119 '''cache most recent results of function calls'''
120 120 cache = {}
121 121 order = []
122 122 if func.func_code.co_argcount == 1:
123 123 def f(arg):
124 124 if arg not in cache:
125 125 if len(cache) > 20:
126 126 del cache[order.pop(0)]
127 127 cache[arg] = func(arg)
128 128 else:
129 129 order.remove(arg)
130 130 order.append(arg)
131 131 return cache[arg]
132 132 else:
133 133 def f(*args):
134 134 if args not in cache:
135 135 if len(cache) > 20:
136 136 del cache[order.pop(0)]
137 137 cache[args] = func(*args)
138 138 else:
139 139 order.remove(args)
140 140 order.append(args)
141 141 return cache[args]
142 142
143 143 return f
144 144
145 145 class propertycache(object):
146 146 def __init__(self, func):
147 147 self.func = func
148 148 self.name = func.__name__
149 149 def __get__(self, obj, type=None):
150 150 result = self.func(obj)
151 151 setattr(obj, self.name, result)
152 152 return result
153 153
154 154 def pipefilter(s, cmd):
155 155 '''filter string S through command CMD, returning its output'''
156 156 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
157 157 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
158 158 pout, perr = p.communicate(s)
159 159 return pout
160 160
161 161 def tempfilter(s, cmd):
162 162 '''filter string S through a pair of temporary files with CMD.
163 163 CMD is used as a template to create the real command to be run,
164 164 with the strings INFILE and OUTFILE replaced by the real names of
165 165 the temporary files generated.'''
166 166 inname, outname = None, None
167 167 try:
168 168 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
169 169 fp = os.fdopen(infd, 'wb')
170 170 fp.write(s)
171 171 fp.close()
172 172 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
173 173 os.close(outfd)
174 174 cmd = cmd.replace('INFILE', inname)
175 175 cmd = cmd.replace('OUTFILE', outname)
176 176 code = os.system(cmd)
177 177 if sys.platform == 'OpenVMS' and code & 1:
178 178 code = 0
179 179 if code: raise Abort(_("command '%s' failed: %s") %
180 180 (cmd, explain_exit(code)))
181 181 return open(outname, 'rb').read()
182 182 finally:
183 183 try:
184 184 if inname: os.unlink(inname)
185 185 except: pass
186 186 try:
187 187 if outname: os.unlink(outname)
188 188 except: pass
189 189
190 190 filtertable = {
191 191 'tempfile:': tempfilter,
192 192 'pipe:': pipefilter,
193 193 }
194 194
195 195 def filter(s, cmd):
196 196 "filter a string through a command that transforms its input to its output"
197 197 for name, fn in filtertable.iteritems():
198 198 if cmd.startswith(name):
199 199 return fn(s, cmd[len(name):].lstrip())
200 200 return pipefilter(s, cmd)
201 201
202 202 def binary(s):
203 203 """return true if a string is binary data"""
204 204 return bool(s and '\0' in s)
205 205
206 206 def increasingchunks(source, min=1024, max=65536):
207 207 '''return no less than min bytes per chunk while data remains,
208 208 doubling min after each chunk until it reaches max'''
209 209 def log2(x):
210 210 if not x:
211 211 return 0
212 212 i = 0
213 213 while x:
214 214 x >>= 1
215 215 i += 1
216 216 return i - 1
217 217
218 218 buf = []
219 219 blen = 0
220 220 for chunk in source:
221 221 buf.append(chunk)
222 222 blen += len(chunk)
223 223 if blen >= min:
224 224 if min < max:
225 225 min = min << 1
226 226 nmin = 1 << log2(blen)
227 227 if nmin > min:
228 228 min = nmin
229 229 if min > max:
230 230 min = max
231 231 yield ''.join(buf)
232 232 blen = 0
233 233 buf = []
234 234 if buf:
235 235 yield ''.join(buf)
236 236
237 237 Abort = error.Abort
238 238
239 239 def always(fn): return True
240 240 def never(fn): return False
241 241
242 242 def pathto(root, n1, n2):
243 243 '''return the relative path from one place to another.
244 244 root should use os.sep to separate directories
245 245 n1 should use os.sep to separate directories
246 246 n2 should use "/" to separate directories
247 247 returns an os.sep-separated path.
248 248
249 249 If n1 is a relative path, it's assumed it's
250 250 relative to root.
251 251 n2 should always be relative to root.
252 252 '''
253 253 if not n1: return localpath(n2)
254 254 if os.path.isabs(n1):
255 255 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
256 256 return os.path.join(root, localpath(n2))
257 257 n2 = '/'.join((pconvert(root), n2))
258 258 a, b = splitpath(n1), n2.split('/')
259 259 a.reverse()
260 260 b.reverse()
261 261 while a and b and a[-1] == b[-1]:
262 262 a.pop()
263 263 b.pop()
264 264 b.reverse()
265 265 return os.sep.join((['..'] * len(a)) + b) or '.'
266 266
267 267 def canonpath(root, cwd, myname):
268 268 """return the canonical path of myname, given cwd and root"""
269 269 if endswithsep(root):
270 270 rootsep = root
271 271 else:
272 272 rootsep = root + os.sep
273 273 name = myname
274 274 if not os.path.isabs(name):
275 275 name = os.path.join(root, cwd, name)
276 276 name = os.path.normpath(name)
277 277 audit_path = path_auditor(root)
278 278 if name != rootsep and name.startswith(rootsep):
279 279 name = name[len(rootsep):]
280 280 audit_path(name)
281 281 return pconvert(name)
282 282 elif name == root:
283 283 return ''
284 284 else:
285 285 # Determine whether `name' is in the hierarchy at or beneath `root',
286 286 # by iterating name=dirname(name) until that causes no change (can't
287 287 # check name == '/', because that doesn't work on windows). For each
288 288 # `name', compare dev/inode numbers. If they match, the list `rel'
289 289 # holds the reversed list of components making up the relative file
290 290 # name we want.
291 291 root_st = os.stat(root)
292 292 rel = []
293 293 while True:
294 294 try:
295 295 name_st = os.stat(name)
296 296 except OSError:
297 297 break
298 298 if samestat(name_st, root_st):
299 299 if not rel:
300 300 # name was actually the same as root (maybe a symlink)
301 301 return ''
302 302 rel.reverse()
303 303 name = os.path.join(*rel)
304 304 audit_path(name)
305 305 return pconvert(name)
306 306 dirname, basename = os.path.split(name)
307 307 rel.append(basename)
308 308 if dirname == name:
309 309 break
310 310 name = dirname
311 311
312 312 raise Abort('%s not under root' % myname)
313 313
314 314 _hgexecutable = None
315 315
316 316 def main_is_frozen():
317 317 """return True if we are a frozen executable.
318 318
319 319 The code supports py2exe (most common, Windows only) and tools/freeze
320 320 (portable, not much used).
321 321 """
322 322 return (hasattr(sys, "frozen") or # new py2exe
323 323 hasattr(sys, "importers") or # old py2exe
324 324 imp.is_frozen("__main__")) # tools/freeze
325 325
326 326 def hgexecutable():
327 327 """return location of the 'hg' executable.
328 328
329 329 Defaults to $HG or 'hg' in the search path.
330 330 """
331 331 if _hgexecutable is None:
332 332 hg = os.environ.get('HG')
333 333 if hg:
334 334 set_hgexecutable(hg)
335 335 elif main_is_frozen():
336 336 set_hgexecutable(sys.executable)
337 337 else:
338 338 set_hgexecutable(find_exe('hg') or 'hg')
339 339 return _hgexecutable
340 340
341 341 def set_hgexecutable(path):
342 342 """set location of the 'hg' executable"""
343 343 global _hgexecutable
344 344 _hgexecutable = path
345 345
346 346 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None):
347 347 '''enhanced shell command execution.
348 348 run with environment maybe modified, maybe in different dir.
349 349
350 350 if command fails and onerr is None, return status. if ui object,
351 351 print error message and return status, else raise onerr object as
352 352 exception.'''
353 353 def py2shell(val):
354 354 'convert python object into string that is useful to shell'
355 355 if val is None or val is False:
356 356 return '0'
357 357 if val is True:
358 358 return '1'
359 359 return str(val)
360 360 oldenv = {}
361 361 for k in environ:
362 362 oldenv[k] = os.environ.get(k)
363 363 if cwd is not None:
364 364 oldcwd = os.getcwd()
365 365 origcmd = cmd
366 366 if os.name == 'nt':
367 367 cmd = '"%s"' % cmd
368 368 try:
369 369 for k, v in environ.iteritems():
370 370 os.environ[k] = py2shell(v)
371 371 os.environ['HG'] = hgexecutable()
372 372 if cwd is not None and oldcwd != cwd:
373 373 os.chdir(cwd)
374 374 rc = os.system(cmd)
375 375 if sys.platform == 'OpenVMS' and rc & 1:
376 376 rc = 0
377 377 if rc and onerr:
378 378 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
379 379 explain_exit(rc)[0])
380 380 if errprefix:
381 381 errmsg = '%s: %s' % (errprefix, errmsg)
382 382 try:
383 383 onerr.warn(errmsg + '\n')
384 384 except AttributeError:
385 385 raise onerr(errmsg)
386 386 return rc
387 387 finally:
388 388 for k, v in oldenv.iteritems():
389 389 if v is None:
390 390 del os.environ[k]
391 391 else:
392 392 os.environ[k] = v
393 393 if cwd is not None and oldcwd != cwd:
394 394 os.chdir(oldcwd)
395 395
396 396 def checksignature(func):
397 397 '''wrap a function with code to check for calling errors'''
398 398 def check(*args, **kwargs):
399 399 try:
400 400 return func(*args, **kwargs)
401 401 except TypeError:
402 402 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
403 403 raise error.SignatureError
404 404 raise
405 405
406 406 return check
407 407
408 408 # os.path.lexists is not available on python2.3
409 409 def lexists(filename):
410 410 "test whether a file with this name exists. does not follow symlinks"
411 411 try:
412 412 os.lstat(filename)
413 413 except:
414 414 return False
415 415 return True
416 416
417 417 def rename(src, dst):
418 418 """forcibly rename a file"""
419 419 try:
420 420 os.rename(src, dst)
421 421 except OSError, err: # FIXME: check err (EEXIST ?)
422 422
423 423 # On windows, rename to existing file is not allowed, so we
424 424 # must delete destination first. But if a file is open, unlink
425 425 # schedules it for delete but does not delete it. Rename
426 426 # happens immediately even for open files, so we rename
427 427 # destination to a temporary name, then delete that. Then
428 428 # rename is safe to do.
429 429 # The temporary name is chosen at random to avoid the situation
430 430 # where a file is left lying around from a previous aborted run.
431 431 # The usual race condition this introduces can't be avoided as
432 432 # we need the name to rename into, and not the file itself. Due
433 433 # to the nature of the operation however, any races will at worst
434 434 # lead to the rename failing and the current operation aborting.
435 435
436 436 def tempname(prefix):
437 437 for tries in xrange(10):
438 438 temp = '%s-%08x' % (prefix, random.randint(0, 0xffffffff))
439 439 if not os.path.exists(temp):
440 440 return temp
441 441 raise IOError, (errno.EEXIST, "No usable temporary filename found")
442 442
443 443 temp = tempname(dst)
444 444 os.rename(dst, temp)
445 445 os.unlink(temp)
446 446 os.rename(src, dst)
447 447
448 448 def unlink(f):
449 449 """unlink and remove the directory if it is empty"""
450 450 os.unlink(f)
451 451 # try removing directories that might now be empty
452 452 try:
453 453 os.removedirs(os.path.dirname(f))
454 454 except OSError:
455 455 pass
456 456
457 457 def copyfile(src, dest):
458 458 "copy a file, preserving mode and atime/mtime"
459 459 if os.path.islink(src):
460 460 try:
461 461 os.unlink(dest)
462 462 except:
463 463 pass
464 464 os.symlink(os.readlink(src), dest)
465 465 else:
466 466 try:
467 467 shutil.copyfile(src, dest)
468 468 shutil.copystat(src, dest)
469 469 except shutil.Error, inst:
470 470 raise Abort(str(inst))
471 471
472 472 def copyfiles(src, dst, hardlink=None):
473 473 """Copy a directory tree using hardlinks if possible"""
474 474
475 475 if hardlink is None:
476 476 hardlink = (os.stat(src).st_dev ==
477 477 os.stat(os.path.dirname(dst)).st_dev)
478 478
479 479 if os.path.isdir(src):
480 480 os.mkdir(dst)
481 481 for name, kind in osutil.listdir(src):
482 482 srcname = os.path.join(src, name)
483 483 dstname = os.path.join(dst, name)
484 484 copyfiles(srcname, dstname, hardlink)
485 485 else:
486 486 if hardlink:
487 487 try:
488 488 os_link(src, dst)
489 489 except (IOError, OSError):
490 490 hardlink = False
491 491 shutil.copy(src, dst)
492 492 else:
493 493 shutil.copy(src, dst)
494 494
495 495 class path_auditor(object):
496 496 '''ensure that a filesystem path contains no banned components.
497 497 the following properties of a path are checked:
498 498
499 499 - under top-level .hg
500 500 - starts at the root of a windows drive
501 501 - contains ".."
502 502 - traverses a symlink (e.g. a/symlink_here/b)
503 503 - inside a nested repository'''
504 504
505 505 def __init__(self, root):
506 506 self.audited = set()
507 507 self.auditeddir = set()
508 508 self.root = root
509 509
510 510 def __call__(self, path):
511 511 if path in self.audited:
512 512 return
513 513 normpath = os.path.normcase(path)
514 514 parts = splitpath(normpath)
515 515 if (os.path.splitdrive(path)[0]
516 516 or parts[0].lower() in ('.hg', '.hg.', '')
517 517 or os.pardir in parts):
518 518 raise Abort(_("path contains illegal component: %s") % path)
519 519 if '.hg' in path.lower():
520 520 lparts = [p.lower() for p in parts]
521 521 for p in '.hg', '.hg.':
522 522 if p in lparts[1:]:
523 523 pos = lparts.index(p)
524 524 base = os.path.join(*parts[:pos])
525 525 raise Abort(_('path %r is inside repo %r') % (path, base))
526 526 def check(prefix):
527 527 curpath = os.path.join(self.root, prefix)
528 528 try:
529 529 st = os.lstat(curpath)
530 530 except OSError, err:
531 531 # EINVAL can be raised as invalid path syntax under win32.
532 532 # They must be ignored for patterns can be checked too.
533 533 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
534 534 raise
535 535 else:
536 536 if stat.S_ISLNK(st.st_mode):
537 537 raise Abort(_('path %r traverses symbolic link %r') %
538 538 (path, prefix))
539 539 elif (stat.S_ISDIR(st.st_mode) and
540 540 os.path.isdir(os.path.join(curpath, '.hg'))):
541 541 raise Abort(_('path %r is inside repo %r') %
542 542 (path, prefix))
543 543 parts.pop()
544 544 prefixes = []
545 545 while parts:
546 546 prefix = os.sep.join(parts)
547 547 if prefix in self.auditeddir:
548 548 break
549 549 check(prefix)
550 550 prefixes.append(prefix)
551 551 parts.pop()
552 552
553 553 self.audited.add(path)
554 554 # only add prefixes to the cache after checking everything: we don't
555 555 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
556 556 self.auditeddir.update(prefixes)
557 557
558 558 def nlinks(pathname):
559 559 """Return number of hardlinks for the given file."""
560 560 return os.lstat(pathname).st_nlink
561 561
562 562 if hasattr(os, 'link'):
563 563 os_link = os.link
564 564 else:
565 565 def os_link(src, dst):
566 566 raise OSError(0, _("Hardlinks not supported"))
567 567
568 568 def lookup_reg(key, name=None, scope=None):
569 569 return None
570 570
571 571 if os.name == 'nt':
572 572 from windows import *
573 573 else:
574 574 from posix import *
575 575
576 576 def makelock(info, pathname):
577 577 try:
578 578 return os.symlink(info, pathname)
579 579 except OSError, why:
580 580 if why.errno == errno.EEXIST:
581 581 raise
582 582 except AttributeError: # no symlink in os
583 583 pass
584 584
585 585 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
586 586 os.write(ld, info)
587 587 os.close(ld)
588 588
589 589 def readlock(pathname):
590 590 try:
591 591 return os.readlink(pathname)
592 592 except OSError, why:
593 593 if why.errno not in (errno.EINVAL, errno.ENOSYS):
594 594 raise
595 595 except AttributeError: # no symlink in os
596 596 pass
597 597 return posixfile(pathname).read()
598 598
599 599 def fstat(fp):
600 600 '''stat file object that may not have fileno method.'''
601 601 try:
602 602 return os.fstat(fp.fileno())
603 603 except AttributeError:
604 604 return os.stat(fp.name)
605 605
606 606 # File system features
607 607
608 608 def checkcase(path):
609 609 """
610 610 Check whether the given path is on a case-sensitive filesystem
611 611
612 612 Requires a path (like /foo/.hg) ending with a foldable final
613 613 directory component.
614 614 """
615 615 s1 = os.stat(path)
616 616 d, b = os.path.split(path)
617 617 p2 = os.path.join(d, b.upper())
618 618 if path == p2:
619 619 p2 = os.path.join(d, b.lower())
620 620 try:
621 621 s2 = os.stat(p2)
622 622 if s2 == s1:
623 623 return False
624 624 return True
625 625 except:
626 626 return True
627 627
628 628 _fspathcache = {}
629 629 def fspath(name, root):
630 630 '''Get name in the case stored in the filesystem
631 631
632 632 The name is either relative to root, or it is an absolute path starting
633 633 with root. Note that this function is unnecessary, and should not be
634 634 called, for case-sensitive filesystems (simply because it's expensive).
635 635 '''
636 636 # If name is absolute, make it relative
637 637 if name.lower().startswith(root.lower()):
638 638 l = len(root)
639 639 if name[l] == os.sep or name[l] == os.altsep:
640 640 l = l + 1
641 641 name = name[l:]
642 642
643 643 if not os.path.exists(os.path.join(root, name)):
644 644 return None
645 645
646 646 seps = os.sep
647 647 if os.altsep:
648 648 seps = seps + os.altsep
649 649 # Protect backslashes. This gets silly very quickly.
650 650 seps.replace('\\','\\\\')
651 651 pattern = re.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
652 652 dir = os.path.normcase(os.path.normpath(root))
653 653 result = []
654 654 for part, sep in pattern.findall(name):
655 655 if sep:
656 656 result.append(sep)
657 657 continue
658 658
659 659 if dir not in _fspathcache:
660 660 _fspathcache[dir] = os.listdir(dir)
661 661 contents = _fspathcache[dir]
662 662
663 663 lpart = part.lower()
664 664 lenp = len(part)
665 665 for n in contents:
666 666 if lenp == len(n) and n.lower() == lpart:
667 667 result.append(n)
668 668 break
669 669 else:
670 670 # Cannot happen, as the file exists!
671 671 result.append(part)
672 672 dir = os.path.join(dir, lpart)
673 673
674 674 return ''.join(result)
675 675
676 676 def checkexec(path):
677 677 """
678 678 Check whether the given path is on a filesystem with UNIX-like exec flags
679 679
680 680 Requires a directory (like /foo/.hg)
681 681 """
682 682
683 683 # VFAT on some Linux versions can flip mode but it doesn't persist
684 684 # a FS remount. Frequently we can detect it if files are created
685 685 # with exec bit on.
686 686
687 687 try:
688 688 EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
689 689 fh, fn = tempfile.mkstemp("", "", path)
690 690 try:
691 691 os.close(fh)
692 692 m = os.stat(fn).st_mode & 0777
693 693 new_file_has_exec = m & EXECFLAGS
694 694 os.chmod(fn, m ^ EXECFLAGS)
695 695 exec_flags_cannot_flip = ((os.stat(fn).st_mode & 0777) == m)
696 696 finally:
697 697 os.unlink(fn)
698 698 except (IOError, OSError):
699 699 # we don't care, the user probably won't be able to commit anyway
700 700 return False
701 701 return not (new_file_has_exec or exec_flags_cannot_flip)
702 702
703 703 def checklink(path):
704 704 """check whether the given path is on a symlink-capable filesystem"""
705 705 # mktemp is not racy because symlink creation will fail if the
706 706 # file already exists
707 707 name = tempfile.mktemp(dir=path)
708 708 try:
709 709 os.symlink(".", name)
710 710 os.unlink(name)
711 711 return True
712 712 except (OSError, AttributeError):
713 713 return False
714 714
715 715 def needbinarypatch():
716 716 """return True if patches should be applied in binary mode by default."""
717 717 return os.name == 'nt'
718 718
719 719 def endswithsep(path):
720 720 '''Check path ends with os.sep or os.altsep.'''
721 721 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
722 722
723 723 def splitpath(path):
724 724 '''Split path by os.sep.
725 725 Note that this function does not use os.altsep because this is
726 726 an alternative of simple "xxx.split(os.sep)".
727 727 It is recommended to use os.path.normpath() before using this
728 728 function if need.'''
729 729 return path.split(os.sep)
730 730
731 731 def gui():
732 732 '''Are we running in a GUI?'''
733 733 return os.name == "nt" or os.name == "mac" or os.environ.get("DISPLAY")
734 734
735 735 def mktempcopy(name, emptyok=False, createmode=None):
736 736 """Create a temporary file with the same contents from name
737 737
738 738 The permission bits are copied from the original file.
739 739
740 740 If the temporary file is going to be truncated immediately, you
741 741 can use emptyok=True as an optimization.
742 742
743 743 Returns the name of the temporary file.
744 744 """
745 745 d, fn = os.path.split(name)
746 746 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
747 747 os.close(fd)
748 748 # Temporary files are created with mode 0600, which is usually not
749 749 # what we want. If the original file already exists, just copy
750 750 # its mode. Otherwise, manually obey umask.
751 751 try:
752 752 st_mode = os.lstat(name).st_mode & 0777
753 753 except OSError, inst:
754 754 if inst.errno != errno.ENOENT:
755 755 raise
756 756 st_mode = createmode
757 757 if st_mode is None:
758 758 st_mode = ~umask
759 759 st_mode &= 0666
760 760 os.chmod(temp, st_mode)
761 761 if emptyok:
762 762 return temp
763 763 try:
764 764 try:
765 765 ifp = posixfile(name, "rb")
766 766 except IOError, inst:
767 767 if inst.errno == errno.ENOENT:
768 768 return temp
769 769 if not getattr(inst, 'filename', None):
770 770 inst.filename = name
771 771 raise
772 772 ofp = posixfile(temp, "wb")
773 773 for chunk in filechunkiter(ifp):
774 774 ofp.write(chunk)
775 775 ifp.close()
776 776 ofp.close()
777 777 except:
778 778 try: os.unlink(temp)
779 779 except: pass
780 780 raise
781 781 return temp
782 782
783 783 class atomictempfile(object):
784 784 """file-like object that atomically updates a file
785 785
786 786 All writes will be redirected to a temporary copy of the original
787 787 file. When rename is called, the copy is renamed to the original
788 788 name, making the changes visible.
789 789 """
790 790 def __init__(self, name, mode, createmode):
791 791 self.__name = name
792 792 self._fp = None
793 793 self.temp = mktempcopy(name, emptyok=('w' in mode),
794 794 createmode=createmode)
795 795 self._fp = posixfile(self.temp, mode)
796 796
797 797 def __getattr__(self, name):
798 798 return getattr(self._fp, name)
799 799
800 800 def rename(self):
801 801 if not self._fp.closed:
802 802 self._fp.close()
803 803 rename(self.temp, localpath(self.__name))
804 804
805 805 def __del__(self):
806 806 if not self._fp:
807 807 return
808 808 if not self._fp.closed:
809 809 try:
810 810 os.unlink(self.temp)
811 811 except: pass
812 812 self._fp.close()
813 813
814 814 def makedirs(name, mode=None):
815 815 """recursive directory creation with parent mode inheritance"""
816 816 try:
817 817 os.mkdir(name)
818 818 if mode is not None:
819 819 os.chmod(name, mode)
820 820 return
821 821 except OSError, err:
822 822 if err.errno == errno.EEXIST:
823 823 return
824 824 if err.errno != errno.ENOENT:
825 825 raise
826 826 parent = os.path.abspath(os.path.dirname(name))
827 827 makedirs(parent, mode)
828 828 makedirs(name, mode)
829 829
830 830 class opener(object):
831 831 """Open files relative to a base directory
832 832
833 833 This class is used to hide the details of COW semantics and
834 834 remote file access from higher level code.
835 835 """
836 836 def __init__(self, base, audit=True):
837 837 self.base = base
838 838 if audit:
839 839 self.audit_path = path_auditor(base)
840 840 else:
841 841 self.audit_path = always
842 842 self.createmode = None
843 843
844 844 @propertycache
845 845 def _can_symlink(self):
846 846 return checklink(self.base)
847 847
848 848 def _fixfilemode(self, name):
849 849 if self.createmode is None:
850 850 return
851 851 os.chmod(name, self.createmode & 0666)
852 852
853 853 def __call__(self, path, mode="r", text=False, atomictemp=False):
854 854 self.audit_path(path)
855 855 f = os.path.join(self.base, path)
856 856
857 857 if not text and "b" not in mode:
858 858 mode += "b" # for that other OS
859 859
860 860 nlink = -1
861 861 if mode not in ("r", "rb"):
862 862 try:
863 863 nlink = nlinks(f)
864 864 except OSError:
865 865 nlink = 0
866 866 d = os.path.dirname(f)
867 867 if not os.path.isdir(d):
868 868 makedirs(d, self.createmode)
869 869 if atomictemp:
870 870 return atomictempfile(f, mode, self.createmode)
871 871 if nlink > 1:
872 872 rename(mktempcopy(f), f)
873 873 fp = posixfile(f, mode)
874 874 if nlink == 0:
875 875 self._fixfilemode(f)
876 876 return fp
877 877
878 878 def symlink(self, src, dst):
879 879 self.audit_path(dst)
880 880 linkname = os.path.join(self.base, dst)
881 881 try:
882 882 os.unlink(linkname)
883 883 except OSError:
884 884 pass
885 885
886 886 dirname = os.path.dirname(linkname)
887 887 if not os.path.exists(dirname):
888 888 makedirs(dirname, self.createmode)
889 889
890 890 if self._can_symlink:
891 891 try:
892 892 os.symlink(src, linkname)
893 893 except OSError, err:
894 894 raise OSError(err.errno, _('could not symlink to %r: %s') %
895 895 (src, err.strerror), linkname)
896 896 else:
897 897 f = self(dst, "w")
898 898 f.write(src)
899 899 f.close()
900 900 self._fixfilemode(dst)
901 901
902 902 class chunkbuffer(object):
903 903 """Allow arbitrary sized chunks of data to be efficiently read from an
904 904 iterator over chunks of arbitrary size."""
905 905
906 906 def __init__(self, in_iter):
907 907 """in_iter is the iterator that's iterating over the input chunks.
908 908 targetsize is how big a buffer to try to maintain."""
909 909 self.iter = iter(in_iter)
910 910 self.buf = ''
911 911 self.targetsize = 2**16
912 912
913 913 def read(self, l):
914 914 """Read L bytes of data from the iterator of chunks of data.
915 915 Returns less than L bytes if the iterator runs dry."""
916 916 if l > len(self.buf) and self.iter:
917 917 # Clamp to a multiple of self.targetsize
918 918 targetsize = max(l, self.targetsize)
919 919 collector = cStringIO.StringIO()
920 920 collector.write(self.buf)
921 921 collected = len(self.buf)
922 922 for chunk in self.iter:
923 923 collector.write(chunk)
924 924 collected += len(chunk)
925 925 if collected >= targetsize:
926 926 break
927 927 if collected < targetsize:
928 928 self.iter = False
929 929 self.buf = collector.getvalue()
930 930 if len(self.buf) == l:
931 931 s, self.buf = str(self.buf), ''
932 932 else:
933 933 s, self.buf = self.buf[:l], buffer(self.buf, l)
934 934 return s
935 935
936 936 def filechunkiter(f, size=65536, limit=None):
937 937 """Create a generator that produces the data in the file size
938 938 (default 65536) bytes at a time, up to optional limit (default is
939 939 to read all data). Chunks may be less than size bytes if the
940 940 chunk is the last chunk in the file, or the file is a socket or
941 941 some other type of file that sometimes reads less data than is
942 942 requested."""
943 943 assert size >= 0
944 944 assert limit is None or limit >= 0
945 945 while True:
946 946 if limit is None: nbytes = size
947 947 else: nbytes = min(limit, size)
948 948 s = nbytes and f.read(nbytes)
949 949 if not s: break
950 950 if limit: limit -= len(s)
951 951 yield s
952 952
953 953 def makedate():
954 954 lt = time.localtime()
955 955 if lt[8] == 1 and time.daylight:
956 956 tz = time.altzone
957 957 else:
958 958 tz = time.timezone
959 959 return time.mktime(lt), tz
960 960
961 961 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
962 962 """represent a (unixtime, offset) tuple as a localized time.
963 963 unixtime is seconds since the epoch, and offset is the time zone's
964 964 number of seconds away from UTC. if timezone is false, do not
965 965 append time zone to string."""
966 966 t, tz = date or makedate()
967 967 if "%1" in format or "%2" in format:
968 968 sign = (tz > 0) and "-" or "+"
969 969 minutes = abs(tz) // 60
970 970 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
971 971 format = format.replace("%2", "%02d" % (minutes % 60))
972 972 s = time.strftime(format, time.gmtime(float(t) - tz))
973 973 return s
974 974
975 975 def shortdate(date=None):
976 976 """turn (timestamp, tzoff) tuple into iso 8631 date."""
977 977 return datestr(date, format='%Y-%m-%d')
978 978
979 979 def strdate(string, format, defaults=[]):
980 980 """parse a localized time string and return a (unixtime, offset) tuple.
981 981 if the string cannot be parsed, ValueError is raised."""
982 982 def timezone(string):
983 983 tz = string.split()[-1]
984 984 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
985 985 sign = (tz[0] == "+") and 1 or -1
986 986 hours = int(tz[1:3])
987 987 minutes = int(tz[3:5])
988 988 return -sign * (hours * 60 + minutes) * 60
989 989 if tz == "GMT" or tz == "UTC":
990 990 return 0
991 991 return None
992 992
993 993 # NOTE: unixtime = localunixtime + offset
994 994 offset, date = timezone(string), string
995 995 if offset != None:
996 996 date = " ".join(string.split()[:-1])
997 997
998 998 # add missing elements from defaults
999 999 for part in defaults:
1000 1000 found = [True for p in part if ("%"+p) in format]
1001 1001 if not found:
1002 1002 date += "@" + defaults[part]
1003 1003 format += "@%" + part[0]
1004 1004
1005 1005 timetuple = time.strptime(date, format)
1006 1006 localunixtime = int(calendar.timegm(timetuple))
1007 1007 if offset is None:
1008 1008 # local timezone
1009 1009 unixtime = int(time.mktime(timetuple))
1010 1010 offset = unixtime - localunixtime
1011 1011 else:
1012 1012 unixtime = localunixtime + offset
1013 1013 return unixtime, offset
1014 1014
1015 1015 def parsedate(date, formats=None, defaults=None):
1016 1016 """parse a localized date/time string and return a (unixtime, offset) tuple.
1017 1017
1018 1018 The date may be a "unixtime offset" string or in one of the specified
1019 1019 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1020 1020 """
1021 1021 if not date:
1022 1022 return 0, 0
1023 1023 if isinstance(date, tuple) and len(date) == 2:
1024 1024 return date
1025 1025 if not formats:
1026 1026 formats = defaultdateformats
1027 1027 date = date.strip()
1028 1028 try:
1029 1029 when, offset = map(int, date.split(' '))
1030 1030 except ValueError:
1031 1031 # fill out defaults
1032 1032 if not defaults:
1033 1033 defaults = {}
1034 1034 now = makedate()
1035 1035 for part in "d mb yY HI M S".split():
1036 1036 if part not in defaults:
1037 1037 if part[0] in "HMS":
1038 1038 defaults[part] = "00"
1039 1039 else:
1040 1040 defaults[part] = datestr(now, "%" + part[0])
1041 1041
1042 1042 for format in formats:
1043 1043 try:
1044 1044 when, offset = strdate(date, format, defaults)
1045 1045 except (ValueError, OverflowError):
1046 1046 pass
1047 1047 else:
1048 1048 break
1049 1049 else:
1050 1050 raise Abort(_('invalid date: %r ') % date)
1051 1051 # validate explicit (probably user-specified) date and
1052 1052 # time zone offset. values must fit in signed 32 bits for
1053 1053 # current 32-bit linux runtimes. timezones go from UTC-12
1054 1054 # to UTC+14
1055 1055 if abs(when) > 0x7fffffff:
1056 1056 raise Abort(_('date exceeds 32 bits: %d') % when)
1057 1057 if offset < -50400 or offset > 43200:
1058 1058 raise Abort(_('impossible time zone offset: %d') % offset)
1059 1059 return when, offset
1060 1060
1061 1061 def matchdate(date):
1062 1062 """Return a function that matches a given date match specifier
1063 1063
1064 1064 Formats include:
1065 1065
1066 1066 '{date}' match a given date to the accuracy provided
1067 1067
1068 1068 '<{date}' on or before a given date
1069 1069
1070 1070 '>{date}' on or after a given date
1071 1071
1072 1072 """
1073 1073
1074 1074 def lower(date):
1075 1075 d = dict(mb="1", d="1")
1076 1076 return parsedate(date, extendeddateformats, d)[0]
1077 1077
1078 1078 def upper(date):
1079 1079 d = dict(mb="12", HI="23", M="59", S="59")
1080 1080 for days in "31 30 29".split():
1081 1081 try:
1082 1082 d["d"] = days
1083 1083 return parsedate(date, extendeddateformats, d)[0]
1084 1084 except:
1085 1085 pass
1086 1086 d["d"] = "28"
1087 1087 return parsedate(date, extendeddateformats, d)[0]
1088 1088
1089 1089 date = date.strip()
1090 1090 if date[0] == "<":
1091 1091 when = upper(date[1:])
1092 1092 return lambda x: x <= when
1093 1093 elif date[0] == ">":
1094 1094 when = lower(date[1:])
1095 1095 return lambda x: x >= when
1096 1096 elif date[0] == "-":
1097 1097 try:
1098 1098 days = int(date[1:])
1099 1099 except ValueError:
1100 1100 raise Abort(_("invalid day spec: %s") % date[1:])
1101 1101 when = makedate()[0] - days * 3600 * 24
1102 1102 return lambda x: x >= when
1103 1103 elif " to " in date:
1104 1104 a, b = date.split(" to ")
1105 1105 start, stop = lower(a), upper(b)
1106 1106 return lambda x: x >= start and x <= stop
1107 1107 else:
1108 1108 start, stop = lower(date), upper(date)
1109 1109 return lambda x: x >= start and x <= stop
1110 1110
1111 1111 def shortuser(user):
1112 1112 """Return a short representation of a user name or email address."""
1113 1113 f = user.find('@')
1114 1114 if f >= 0:
1115 1115 user = user[:f]
1116 1116 f = user.find('<')
1117 1117 if f >= 0:
1118 1118 user = user[f+1:]
1119 1119 f = user.find(' ')
1120 1120 if f >= 0:
1121 1121 user = user[:f]
1122 1122 f = user.find('.')
1123 1123 if f >= 0:
1124 1124 user = user[:f]
1125 1125 return user
1126 1126
1127 1127 def email(author):
1128 1128 '''get email of author.'''
1129 1129 r = author.find('>')
1130 1130 if r == -1: r = None
1131 1131 return author[author.find('<')+1:r]
1132 1132
1133 1133 def ellipsis(text, maxlength=400):
1134 1134 """Trim string to at most maxlength (default: 400) characters."""
1135 1135 if len(text) <= maxlength:
1136 1136 return text
1137 1137 else:
1138 1138 return "%s..." % (text[:maxlength-3])
1139 1139
1140 1140 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
1141 1141 '''yield every hg repository under path, recursively.'''
1142 1142 def errhandler(err):
1143 1143 if err.filename == path:
1144 1144 raise err
1145 1145 if followsym and hasattr(os.path, 'samestat'):
1146 1146 def _add_dir_if_not_there(dirlst, dirname):
1147 1147 match = False
1148 1148 samestat = os.path.samestat
1149 1149 dirstat = os.stat(dirname)
1150 1150 for lstdirstat in dirlst:
1151 1151 if samestat(dirstat, lstdirstat):
1152 1152 match = True
1153 1153 break
1154 1154 if not match:
1155 1155 dirlst.append(dirstat)
1156 1156 return not match
1157 1157 else:
1158 1158 followsym = False
1159 1159
1160 1160 if (seen_dirs is None) and followsym:
1161 1161 seen_dirs = []
1162 1162 _add_dir_if_not_there(seen_dirs, path)
1163 1163 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
1164 1164 if '.hg' in dirs:
1165 1165 yield root # found a repository
1166 1166 qroot = os.path.join(root, '.hg', 'patches')
1167 1167 if os.path.isdir(os.path.join(qroot, '.hg')):
1168 1168 yield qroot # we have a patch queue repo here
1169 1169 if recurse:
1170 1170 # avoid recursing inside the .hg directory
1171 1171 dirs.remove('.hg')
1172 1172 else:
1173 1173 dirs[:] = [] # don't descend further
1174 1174 elif followsym:
1175 1175 newdirs = []
1176 1176 for d in dirs:
1177 1177 fname = os.path.join(root, d)
1178 1178 if _add_dir_if_not_there(seen_dirs, fname):
1179 1179 if os.path.islink(fname):
1180 1180 for hgname in walkrepos(fname, True, seen_dirs):
1181 1181 yield hgname
1182 1182 else:
1183 1183 newdirs.append(d)
1184 1184 dirs[:] = newdirs
1185 1185
1186 1186 _rcpath = None
1187 1187
1188 1188 def os_rcpath():
1189 1189 '''return default os-specific hgrc search path'''
1190 1190 path = system_rcpath()
1191 1191 path.extend(user_rcpath())
1192 1192 path = [os.path.normpath(f) for f in path]
1193 1193 return path
1194 1194
1195 1195 def rcpath():
1196 1196 '''return hgrc search path. if env var HGRCPATH is set, use it.
1197 1197 for each item in path, if directory, use files ending in .rc,
1198 1198 else use item.
1199 1199 make HGRCPATH empty to only look in .hg/hgrc of current repo.
1200 1200 if no HGRCPATH, use default os-specific path.'''
1201 1201 global _rcpath
1202 1202 if _rcpath is None:
1203 1203 if 'HGRCPATH' in os.environ:
1204 1204 _rcpath = []
1205 1205 for p in os.environ['HGRCPATH'].split(os.pathsep):
1206 1206 if not p: continue
1207 1207 if os.path.isdir(p):
1208 1208 for f, kind in osutil.listdir(p):
1209 1209 if f.endswith('.rc'):
1210 1210 _rcpath.append(os.path.join(p, f))
1211 1211 else:
1212 1212 _rcpath.append(p)
1213 1213 else:
1214 1214 _rcpath = os_rcpath()
1215 1215 return _rcpath
1216 1216
1217 1217 def bytecount(nbytes):
1218 1218 '''return byte count formatted as readable string, with units'''
1219 1219
1220 1220 units = (
1221 1221 (100, 1<<30, _('%.0f GB')),
1222 1222 (10, 1<<30, _('%.1f GB')),
1223 1223 (1, 1<<30, _('%.2f GB')),
1224 1224 (100, 1<<20, _('%.0f MB')),
1225 1225 (10, 1<<20, _('%.1f MB')),
1226 1226 (1, 1<<20, _('%.2f MB')),
1227 1227 (100, 1<<10, _('%.0f KB')),
1228 1228 (10, 1<<10, _('%.1f KB')),
1229 1229 (1, 1<<10, _('%.2f KB')),
1230 1230 (1, 1, _('%.0f bytes')),
1231 1231 )
1232 1232
1233 1233 for multiplier, divisor, format in units:
1234 1234 if nbytes >= divisor * multiplier:
1235 1235 return format % (nbytes / float(divisor))
1236 1236 return units[-1][2] % nbytes
1237 1237
1238 1238 def drop_scheme(scheme, path):
1239 1239 sc = scheme + ':'
1240 1240 if path.startswith(sc):
1241 1241 path = path[len(sc):]
1242 1242 if path.startswith('//'):
1243 1243 path = path[2:]
1244 1244 return path
1245 1245
1246 1246 def uirepr(s):
1247 1247 # Avoid double backslash in Windows path repr()
1248 1248 return repr(s).replace('\\\\', '\\')
1249 1249
1250 1250 def termwidth():
1251 1251 if 'COLUMNS' in os.environ:
1252 1252 try:
1253 1253 return int(os.environ['COLUMNS'])
1254 1254 except ValueError:
1255 1255 pass
1256 1256 try:
1257 1257 import termios, array, fcntl
1258 1258 for dev in (sys.stdout, sys.stdin):
1259 1259 try:
1260 1260 try:
1261 1261 fd = dev.fileno()
1262 1262 except AttributeError:
1263 1263 continue
1264 1264 if not os.isatty(fd):
1265 1265 continue
1266 1266 arri = fcntl.ioctl(fd, termios.TIOCGWINSZ, '\0' * 8)
1267 1267 return array.array('h', arri)[1]
1268 1268 except ValueError:
1269 1269 pass
1270 1270 except ImportError:
1271 1271 pass
1272 1272 return 80
1273 1273
1274 1274 def wrap(line, hangindent, width=None):
1275 1275 if width is None:
1276 1276 width = termwidth() - 2
1277 1277 if width <= hangindent:
1278 1278 # adjust for weird terminal size
1279 1279 width = max(78, hangindent + 1)
1280 1280 padding = '\n' + ' ' * hangindent
1281 return padding.join(textwrap.wrap(line, width=width - hangindent))
1281 # To avoid corrupting multi-byte characters in line, we must wrap
1282 # a Unicode string instead of a bytestring.
1283 u = line.decode(encoding.encoding)
1284 w = padding.join(textwrap.wrap(u, width=width - hangindent))
1285 return w.encode(encoding.encoding)
1282 1286
1283 1287 def iterlines(iterator):
1284 1288 for chunk in iterator:
1285 1289 for line in chunk.splitlines():
1286 1290 yield line
@@ -1,126 +1,143 b''
1 1 #!/bin/sh
2 2
3 3 mkdir a
4 4 cd a
5 5 hg init
6 6 echo foo > t1
7 7 hg add t1
8 8 hg commit -m "1" -d "1000000 0"
9 9
10 10 cd ..
11 11 hg clone a b
12 12
13 13 cd a
14 14 echo foo > t2
15 15 hg add t2
16 16 hg commit -m "2" -d "1000000 0"
17 17
18 18 cd ../b
19 19 echo foo > t3
20 20 hg add t3
21 21 hg commit -m "3" -d "1000000 0"
22 22
23 23 hg push ../a
24 24 hg pull ../a
25 25 hg push ../a
26 26 hg merge
27 27 hg commit -m "4" -d "1000000 0"
28 28 hg push ../a
29 29 cd ..
30 30
31 31 hg init c
32 32 cd c
33 33 for i in 0 1 2; do
34 34 echo $i >> foo
35 35 hg ci -Am $i -d "1000000 0"
36 36 done
37 37 cd ..
38 38
39 39 hg clone c d
40 40 cd d
41 41 for i in 0 1; do
42 42 hg co -C $i
43 43 echo d-$i >> foo
44 44 hg ci -m d-$i -d "1000000 0"
45 45 done
46 46
47 47 HGMERGE=true hg merge 3
48 48 hg ci -m c-d -d "1000000 0"
49 49
50 50 hg push ../c; echo $?
51 51 hg push -r 2 ../c; echo $?
52 52 hg push -r 3 ../c; echo $?
53 53 hg push -r 3 -r 4 ../c; echo $?
54 54 hg push -f -r 3 -r 4 ../c; echo $?
55 55 hg push -r 5 ../c; echo $?
56 56
57 57 # issue 450
58 58 hg init ../e
59 59 hg push -r 0 ../e ; echo $?
60 60 hg push -r 1 ../e ; echo $?
61 61
62 62 cd ..
63 63
64 64 # issue 736
65 65 echo % issue 736
66 66 hg init f
67 67 cd f
68 68 hg -q branch a
69 69 echo 0 > foo
70 70 hg -q ci -d "1000000 0" -Am 0
71 71 echo 1 > foo
72 72 hg -q ci -d "1000000 0" -m 1
73 73 hg -q up 0
74 74 echo 2 > foo
75 75 hg -q ci -d "1000000 0" -m 2
76 76 hg -q up 0
77 77 hg -q branch b
78 78 echo 3 > foo
79 79 hg -q ci -d "1000000 0" -m 3
80 80 cd ..
81 81
82 82 hg -q clone f g
83 83 cd g
84 84
85 85 echo % push on existing branch and new branch
86 86 hg -q up 1
87 87 echo 4 > foo
88 88 hg -q ci -d "1000000 0" -m 4
89 89 hg -q up 0
90 90 echo 5 > foo
91 91 hg -q branch c
92 92 hg -q ci -d "1000000 0" -m 5
93 93 hg push -r 4 -r 5 ../f; echo $?
94 94
95 95 echo % fail on multiple head push
96 96 hg -q up 1
97 97 echo 6 > foo
98 98 hg -q ci -d "1000000 0" -m 6
99 99 hg push -r 4 -r 6 ../f; echo $?
100 100
101 101 echo % push replacement head on existing branches
102 102 hg -q up 3
103 103 echo 7 > foo
104 104 hg -q ci -d "1000000 0" -m 7
105 105 hg push -r 6 -r 7 ../f; echo $?
106 106
107 107 echo % merge of branch a to other branch b followed by unrelated push on branch a
108 108 hg -q up 6
109 109 HGMERGE=true hg -q merge 7
110 110 hg -q ci -d "1000000 0" -m 8
111 111 hg -q up 7
112 112 echo 9 > foo
113 113 hg -q ci -d "1000000 0" -m 9
114 114 hg push -r 8 ../f; echo $?
115 115 hg push -r 9 ../f; echo $?
116 116
117 117 echo % cheating the counting algorithm
118 118 hg -q up 8
119 119 HGMERGE=true hg -q merge 2
120 120 hg -q ci -d "1000000 0" -m 10
121 121 hg -q up 1
122 122 echo 11 > foo
123 123 hg -q ci -d "1000000 0" -m 11
124 124 hg push -r 10 -r 11 ../f; echo $?
125 125
126 echo % checking prepush logic does not allow silently pushing multiple new heads
127 cd ..
128 hg init g
129 echo init > g/init
130 hg -R g ci -Am init
131 echo a > g/a
132 hg -R g ci -Am a
133 hg clone g h
134 hg -R g up 0
135 echo b > g/b
136 hg -R g ci -Am b
137 hg -R h up 0
138 echo c > h/c
139 hg -R h ci -Am c
140 hg -R h push g
141 echo
142
126 143 exit 0
@@ -1,126 +1,143 b''
1 1 updating working directory
2 2 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
3 3 pushing to ../a
4 4 searching for changes
5 5 abort: push creates new remote heads!
6 6 (did you forget to merge? use push -f to force)
7 7 pulling from ../a
8 8 searching for changes
9 9 adding changesets
10 10 adding manifests
11 11 adding file changes
12 12 added 1 changesets with 1 changes to 1 files (+1 heads)
13 13 (run 'hg heads' to see heads, 'hg merge' to merge)
14 14 pushing to ../a
15 15 searching for changes
16 16 abort: push creates new remote heads!
17 17 (did you forget to merge? use push -f to force)
18 18 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
19 19 (branch merge, don't forget to commit)
20 20 pushing to ../a
21 21 searching for changes
22 22 adding changesets
23 23 adding manifests
24 24 adding file changes
25 25 added 2 changesets with 1 changes to 1 files
26 26 adding foo
27 27 updating working directory
28 28 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
29 29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
30 30 created new head
31 31 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
32 32 created new head
33 33 merging foo
34 34 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
35 35 (branch merge, don't forget to commit)
36 36 pushing to ../c
37 37 searching for changes
38 38 abort: push creates new remote heads!
39 39 (did you forget to merge? use push -f to force)
40 40 1
41 41 pushing to ../c
42 42 searching for changes
43 43 no changes found
44 44 0
45 45 pushing to ../c
46 46 searching for changes
47 47 abort: push creates new remote heads!
48 48 (did you forget to merge? use push -f to force)
49 49 1
50 50 pushing to ../c
51 51 searching for changes
52 52 abort: push creates new remote heads!
53 53 (did you forget to merge? use push -f to force)
54 54 1
55 55 pushing to ../c
56 56 searching for changes
57 57 adding changesets
58 58 adding manifests
59 59 adding file changes
60 60 added 2 changesets with 2 changes to 1 files (+2 heads)
61 61 0
62 62 pushing to ../c
63 63 searching for changes
64 64 adding changesets
65 65 adding manifests
66 66 adding file changes
67 67 added 1 changesets with 1 changes to 1 files (-1 heads)
68 68 0
69 69 pushing to ../e
70 70 searching for changes
71 71 adding changesets
72 72 adding manifests
73 73 adding file changes
74 74 added 1 changesets with 1 changes to 1 files
75 75 0
76 76 pushing to ../e
77 77 searching for changes
78 78 adding changesets
79 79 adding manifests
80 80 adding file changes
81 81 added 1 changesets with 1 changes to 1 files
82 82 0
83 83 % issue 736
84 84 % push on existing branch and new branch
85 85 pushing to ../f
86 86 searching for changes
87 87 abort: push creates new remote branch 'c'!
88 88 (did you forget to merge? use push -f to force)
89 89 1
90 90 % fail on multiple head push
91 91 pushing to ../f
92 92 searching for changes
93 93 abort: push creates new remote heads!
94 94 (did you forget to merge? use push -f to force)
95 95 1
96 96 % push replacement head on existing branches
97 97 pushing to ../f
98 98 searching for changes
99 99 adding changesets
100 100 adding manifests
101 101 adding file changes
102 102 added 2 changesets with 2 changes to 1 files
103 103 0
104 104 % merge of branch a to other branch b followed by unrelated push on branch a
105 105 pushing to ../f
106 106 searching for changes
107 107 adding changesets
108 108 adding manifests
109 109 adding file changes
110 110 added 1 changesets with 1 changes to 1 files (-1 heads)
111 111 0
112 112 pushing to ../f
113 113 searching for changes
114 114 adding changesets
115 115 adding manifests
116 116 adding file changes
117 117 added 1 changesets with 1 changes to 1 files (+1 heads)
118 118 0
119 119 % cheating the counting algorithm
120 120 pushing to ../f
121 121 searching for changes
122 122 adding changesets
123 123 adding manifests
124 124 adding file changes
125 125 added 2 changesets with 2 changes to 1 files
126 126 0
127 % checking prepush logic does not allow silently pushing multiple new heads
128 abort: repository g already exists!
129 adding init
130 adding a
131 updating working directory
132 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
133 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
134 adding b
135 created new head
136 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
137 adding c
138 created new head
139 pushing to g
140 searching for changes
141 abort: push creates new remote heads!
142 (did you forget to merge? use push -f to force)
143
General Comments 0
You need to be logged in to leave comments. Login now