##// END OF EJS Templates
localrepo: support 'rev in repo' syntax
Alexander Solovyov -
r9924:ea3acaae default
parent child Browse files
Show More
@@ -1,2158 +1,2164
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2, incorporated herein by reference.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup, subrepo
11 11 import changelog, dirstate, filelog, manifest, context
12 12 import lock, transaction, store, encoding
13 13 import util, extensions, hook, error
14 14 import match as match_
15 15 import merge as merge_
16 16 import tags as tags_
17 17 from lock import release
18 18 import weakref, stat, errno, os, time, inspect
19 19 propertycache = util.propertycache
20 20
21 21 class localrepository(repo.repository):
22 22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
23 23 supported = set('revlogv1 store fncache shared'.split())
24 24
25 25 def __init__(self, baseui, path=None, create=0):
26 26 repo.repository.__init__(self)
27 27 self.root = os.path.realpath(path)
28 28 self.path = os.path.join(self.root, ".hg")
29 29 self.origroot = path
30 30 self.opener = util.opener(self.path)
31 31 self.wopener = util.opener(self.root)
32 32 self.baseui = baseui
33 33 self.ui = baseui.copy()
34 34
35 35 try:
36 36 self.ui.readconfig(self.join("hgrc"), self.root)
37 37 extensions.loadall(self.ui)
38 38 except IOError:
39 39 pass
40 40
41 41 if not os.path.isdir(self.path):
42 42 if create:
43 43 if not os.path.exists(path):
44 44 os.mkdir(path)
45 45 os.mkdir(self.path)
46 46 requirements = ["revlogv1"]
47 47 if self.ui.configbool('format', 'usestore', True):
48 48 os.mkdir(os.path.join(self.path, "store"))
49 49 requirements.append("store")
50 50 if self.ui.configbool('format', 'usefncache', True):
51 51 requirements.append("fncache")
52 52 # create an invalid changelog
53 53 self.opener("00changelog.i", "a").write(
54 54 '\0\0\0\2' # represents revlogv2
55 55 ' dummy changelog to prevent using the old repo layout'
56 56 )
57 57 reqfile = self.opener("requires", "w")
58 58 for r in requirements:
59 59 reqfile.write("%s\n" % r)
60 60 reqfile.close()
61 61 else:
62 62 raise error.RepoError(_("repository %s not found") % path)
63 63 elif create:
64 64 raise error.RepoError(_("repository %s already exists") % path)
65 65 else:
66 66 # find requirements
67 67 requirements = set()
68 68 try:
69 69 requirements = set(self.opener("requires").read().splitlines())
70 70 except IOError, inst:
71 71 if inst.errno != errno.ENOENT:
72 72 raise
73 73 for r in requirements - self.supported:
74 74 raise error.RepoError(_("requirement '%s' not supported") % r)
75 75
76 76 self.sharedpath = self.path
77 77 try:
78 78 s = os.path.realpath(self.opener("sharedpath").read())
79 79 if not os.path.exists(s):
80 80 raise error.RepoError(
81 81 _('.hg/sharedpath points to nonexistent directory %s') % s)
82 82 self.sharedpath = s
83 83 except IOError, inst:
84 84 if inst.errno != errno.ENOENT:
85 85 raise
86 86
87 87 self.store = store.store(requirements, self.sharedpath, util.opener)
88 88 self.spath = self.store.path
89 89 self.sopener = self.store.opener
90 90 self.sjoin = self.store.join
91 91 self.opener.createmode = self.store.createmode
92 92
93 93 # These two define the set of tags for this repository. _tags
94 94 # maps tag name to node; _tagtypes maps tag name to 'global' or
95 95 # 'local'. (Global tags are defined by .hgtags across all
96 96 # heads, and local tags are defined in .hg/localtags.) They
97 97 # constitute the in-memory cache of tags.
98 98 self._tags = None
99 99 self._tagtypes = None
100 100
101 101 self._branchcache = None # in UTF-8
102 102 self._branchcachetip = None
103 103 self.nodetagscache = None
104 104 self.filterpats = {}
105 105 self._datafilters = {}
106 106 self._transref = self._lockref = self._wlockref = None
107 107
108 108 @propertycache
109 109 def changelog(self):
110 110 c = changelog.changelog(self.sopener)
111 111 if 'HG_PENDING' in os.environ:
112 112 p = os.environ['HG_PENDING']
113 113 if p.startswith(self.root):
114 114 c.readpending('00changelog.i.a')
115 115 self.sopener.defversion = c.version
116 116 return c
117 117
118 118 @propertycache
119 119 def manifest(self):
120 120 return manifest.manifest(self.sopener)
121 121
122 122 @propertycache
123 123 def dirstate(self):
124 124 return dirstate.dirstate(self.opener, self.ui, self.root)
125 125
126 126 def __getitem__(self, changeid):
127 127 if changeid is None:
128 128 return context.workingctx(self)
129 129 return context.changectx(self, changeid)
130 130
131 def __contains__(self, changeid):
132 try:
133 return bool(self.lookup(changeid))
134 except error.RepoLookupError:
135 return False
136
131 137 def __nonzero__(self):
132 138 return True
133 139
134 140 def __len__(self):
135 141 return len(self.changelog)
136 142
137 143 def __iter__(self):
138 144 for i in xrange(len(self)):
139 145 yield i
140 146
141 147 def url(self):
142 148 return 'file:' + self.root
143 149
144 150 def hook(self, name, throw=False, **args):
145 151 return hook.hook(self.ui, self, name, throw, **args)
146 152
147 153 tag_disallowed = ':\r\n'
148 154
149 155 def _tag(self, names, node, message, local, user, date, extra={}):
150 156 if isinstance(names, str):
151 157 allchars = names
152 158 names = (names,)
153 159 else:
154 160 allchars = ''.join(names)
155 161 for c in self.tag_disallowed:
156 162 if c in allchars:
157 163 raise util.Abort(_('%r cannot be used in a tag name') % c)
158 164
159 165 for name in names:
160 166 self.hook('pretag', throw=True, node=hex(node), tag=name,
161 167 local=local)
162 168
163 169 def writetags(fp, names, munge, prevtags):
164 170 fp.seek(0, 2)
165 171 if prevtags and prevtags[-1] != '\n':
166 172 fp.write('\n')
167 173 for name in names:
168 174 m = munge and munge(name) or name
169 175 if self._tagtypes and name in self._tagtypes:
170 176 old = self._tags.get(name, nullid)
171 177 fp.write('%s %s\n' % (hex(old), m))
172 178 fp.write('%s %s\n' % (hex(node), m))
173 179 fp.close()
174 180
175 181 prevtags = ''
176 182 if local:
177 183 try:
178 184 fp = self.opener('localtags', 'r+')
179 185 except IOError:
180 186 fp = self.opener('localtags', 'a')
181 187 else:
182 188 prevtags = fp.read()
183 189
184 190 # local tags are stored in the current charset
185 191 writetags(fp, names, None, prevtags)
186 192 for name in names:
187 193 self.hook('tag', node=hex(node), tag=name, local=local)
188 194 return
189 195
190 196 try:
191 197 fp = self.wfile('.hgtags', 'rb+')
192 198 except IOError:
193 199 fp = self.wfile('.hgtags', 'ab')
194 200 else:
195 201 prevtags = fp.read()
196 202
197 203 # committed tags are stored in UTF-8
198 204 writetags(fp, names, encoding.fromlocal, prevtags)
199 205
200 206 if '.hgtags' not in self.dirstate:
201 207 self.add(['.hgtags'])
202 208
203 209 m = match_.exact(self.root, '', ['.hgtags'])
204 210 tagnode = self.commit(message, user, date, extra=extra, match=m)
205 211
206 212 for name in names:
207 213 self.hook('tag', node=hex(node), tag=name, local=local)
208 214
209 215 return tagnode
210 216
211 217 def tag(self, names, node, message, local, user, date):
212 218 '''tag a revision with one or more symbolic names.
213 219
214 220 names is a list of strings or, when adding a single tag, names may be a
215 221 string.
216 222
217 223 if local is True, the tags are stored in a per-repository file.
218 224 otherwise, they are stored in the .hgtags file, and a new
219 225 changeset is committed with the change.
220 226
221 227 keyword arguments:
222 228
223 229 local: whether to store tags in non-version-controlled file
224 230 (default False)
225 231
226 232 message: commit message to use if committing
227 233
228 234 user: name of user to use if committing
229 235
230 236 date: date tuple to use if committing'''
231 237
232 238 for x in self.status()[:5]:
233 239 if '.hgtags' in x:
234 240 raise util.Abort(_('working copy of .hgtags is changed '
235 241 '(please commit .hgtags manually)'))
236 242
237 243 self.tags() # instantiate the cache
238 244 self._tag(names, node, message, local, user, date)
239 245
240 246 def tags(self):
241 247 '''return a mapping of tag to node'''
242 248 if self._tags is None:
243 249 (self._tags, self._tagtypes) = self._findtags()
244 250
245 251 return self._tags
246 252
247 253 def _findtags(self):
248 254 '''Do the hard work of finding tags. Return a pair of dicts
249 255 (tags, tagtypes) where tags maps tag name to node, and tagtypes
250 256 maps tag name to a string like \'global\' or \'local\'.
251 257 Subclasses or extensions are free to add their own tags, but
252 258 should be aware that the returned dicts will be retained for the
253 259 duration of the localrepo object.'''
254 260
255 261 # XXX what tagtype should subclasses/extensions use? Currently
256 262 # mq and bookmarks add tags, but do not set the tagtype at all.
257 263 # Should each extension invent its own tag type? Should there
258 264 # be one tagtype for all such "virtual" tags? Or is the status
259 265 # quo fine?
260 266
261 267 alltags = {} # map tag name to (node, hist)
262 268 tagtypes = {}
263 269
264 270 tags_.findglobaltags(self.ui, self, alltags, tagtypes)
265 271 tags_.readlocaltags(self.ui, self, alltags, tagtypes)
266 272
267 273 # Build the return dicts. Have to re-encode tag names because
268 274 # the tags module always uses UTF-8 (in order not to lose info
269 275 # writing to the cache), but the rest of Mercurial wants them in
270 276 # local encoding.
271 277 tags = {}
272 278 for (name, (node, hist)) in alltags.iteritems():
273 279 if node != nullid:
274 280 tags[encoding.tolocal(name)] = node
275 281 tags['tip'] = self.changelog.tip()
276 282 tagtypes = dict([(encoding.tolocal(name), value)
277 283 for (name, value) in tagtypes.iteritems()])
278 284 return (tags, tagtypes)
279 285
280 286 def tagtype(self, tagname):
281 287 '''
282 288 return the type of the given tag. result can be:
283 289
284 290 'local' : a local tag
285 291 'global' : a global tag
286 292 None : tag does not exist
287 293 '''
288 294
289 295 self.tags()
290 296
291 297 return self._tagtypes.get(tagname)
292 298
293 299 def tagslist(self):
294 300 '''return a list of tags ordered by revision'''
295 301 l = []
296 302 for t, n in self.tags().iteritems():
297 303 try:
298 304 r = self.changelog.rev(n)
299 305 except:
300 306 r = -2 # sort to the beginning of the list if unknown
301 307 l.append((r, t, n))
302 308 return [(t, n) for r, t, n in sorted(l)]
303 309
304 310 def nodetags(self, node):
305 311 '''return the tags associated with a node'''
306 312 if not self.nodetagscache:
307 313 self.nodetagscache = {}
308 314 for t, n in self.tags().iteritems():
309 315 self.nodetagscache.setdefault(n, []).append(t)
310 316 return self.nodetagscache.get(node, [])
311 317
312 318 def _branchtags(self, partial, lrev):
313 319 # TODO: rename this function?
314 320 tiprev = len(self) - 1
315 321 if lrev != tiprev:
316 322 self._updatebranchcache(partial, lrev+1, tiprev+1)
317 323 self._writebranchcache(partial, self.changelog.tip(), tiprev)
318 324
319 325 return partial
320 326
321 327 def branchmap(self):
322 328 tip = self.changelog.tip()
323 329 if self._branchcache is not None and self._branchcachetip == tip:
324 330 return self._branchcache
325 331
326 332 oldtip = self._branchcachetip
327 333 self._branchcachetip = tip
328 334 if oldtip is None or oldtip not in self.changelog.nodemap:
329 335 partial, last, lrev = self._readbranchcache()
330 336 else:
331 337 lrev = self.changelog.rev(oldtip)
332 338 partial = self._branchcache
333 339
334 340 self._branchtags(partial, lrev)
335 341 # this private cache holds all heads (not just tips)
336 342 self._branchcache = partial
337 343
338 344 return self._branchcache
339 345
340 346 def branchtags(self):
341 347 '''return a dict where branch names map to the tipmost head of
342 348 the branch, open heads come before closed'''
343 349 bt = {}
344 350 for bn, heads in self.branchmap().iteritems():
345 351 head = None
346 352 for i in range(len(heads)-1, -1, -1):
347 353 h = heads[i]
348 354 if 'close' not in self.changelog.read(h)[5]:
349 355 head = h
350 356 break
351 357 # no open heads were found
352 358 if head is None:
353 359 head = heads[-1]
354 360 bt[bn] = head
355 361 return bt
356 362
357 363
358 364 def _readbranchcache(self):
359 365 partial = {}
360 366 try:
361 367 f = self.opener("branchheads.cache")
362 368 lines = f.read().split('\n')
363 369 f.close()
364 370 except (IOError, OSError):
365 371 return {}, nullid, nullrev
366 372
367 373 try:
368 374 last, lrev = lines.pop(0).split(" ", 1)
369 375 last, lrev = bin(last), int(lrev)
370 376 if lrev >= len(self) or self[lrev].node() != last:
371 377 # invalidate the cache
372 378 raise ValueError('invalidating branch cache (tip differs)')
373 379 for l in lines:
374 380 if not l: continue
375 381 node, label = l.split(" ", 1)
376 382 partial.setdefault(label.strip(), []).append(bin(node))
377 383 except KeyboardInterrupt:
378 384 raise
379 385 except Exception, inst:
380 386 if self.ui.debugflag:
381 387 self.ui.warn(str(inst), '\n')
382 388 partial, last, lrev = {}, nullid, nullrev
383 389 return partial, last, lrev
384 390
385 391 def _writebranchcache(self, branches, tip, tiprev):
386 392 try:
387 393 f = self.opener("branchheads.cache", "w", atomictemp=True)
388 394 f.write("%s %s\n" % (hex(tip), tiprev))
389 395 for label, nodes in branches.iteritems():
390 396 for node in nodes:
391 397 f.write("%s %s\n" % (hex(node), label))
392 398 f.rename()
393 399 except (IOError, OSError):
394 400 pass
395 401
396 402 def _updatebranchcache(self, partial, start, end):
397 403 # collect new branch entries
398 404 newbranches = {}
399 405 for r in xrange(start, end):
400 406 c = self[r]
401 407 newbranches.setdefault(c.branch(), []).append(c.node())
402 408 # if older branchheads are reachable from new ones, they aren't
403 409 # really branchheads. Note checking parents is insufficient:
404 410 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
405 411 for branch, newnodes in newbranches.iteritems():
406 412 bheads = partial.setdefault(branch, [])
407 413 bheads.extend(newnodes)
408 414 if len(bheads) < 2:
409 415 continue
410 416 newbheads = []
411 417 # starting from tip means fewer passes over reachable
412 418 while newnodes:
413 419 latest = newnodes.pop()
414 420 if latest not in bheads:
415 421 continue
416 422 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
417 423 reachable = self.changelog.reachable(latest, minbhrev)
418 424 bheads = [b for b in bheads if b not in reachable]
419 425 newbheads.insert(0, latest)
420 426 bheads.extend(newbheads)
421 427 partial[branch] = bheads
422 428
423 429 def lookup(self, key):
424 430 if isinstance(key, int):
425 431 return self.changelog.node(key)
426 432 elif key == '.':
427 433 return self.dirstate.parents()[0]
428 434 elif key == 'null':
429 435 return nullid
430 436 elif key == 'tip':
431 437 return self.changelog.tip()
432 438 n = self.changelog._match(key)
433 439 if n:
434 440 return n
435 441 if key in self.tags():
436 442 return self.tags()[key]
437 443 if key in self.branchtags():
438 444 return self.branchtags()[key]
439 445 n = self.changelog._partialmatch(key)
440 446 if n:
441 447 return n
442 448
443 449 # can't find key, check if it might have come from damaged dirstate
444 450 if key in self.dirstate.parents():
445 451 raise error.Abort(_("working directory has unknown parent '%s'!")
446 452 % short(key))
447 453 try:
448 454 if len(key) == 20:
449 455 key = hex(key)
450 456 except:
451 457 pass
452 458 raise error.RepoLookupError(_("unknown revision '%s'") % key)
453 459
454 460 def local(self):
455 461 return True
456 462
457 463 def join(self, f):
458 464 return os.path.join(self.path, f)
459 465
460 466 def wjoin(self, f):
461 467 return os.path.join(self.root, f)
462 468
463 469 def rjoin(self, f):
464 470 return os.path.join(self.root, util.pconvert(f))
465 471
466 472 def file(self, f):
467 473 if f[0] == '/':
468 474 f = f[1:]
469 475 return filelog.filelog(self.sopener, f)
470 476
471 477 def changectx(self, changeid):
472 478 return self[changeid]
473 479
474 480 def parents(self, changeid=None):
475 481 '''get list of changectxs for parents of changeid'''
476 482 return self[changeid].parents()
477 483
478 484 def filectx(self, path, changeid=None, fileid=None):
479 485 """changeid can be a changeset revision, node, or tag.
480 486 fileid can be a file revision or node."""
481 487 return context.filectx(self, path, changeid, fileid)
482 488
483 489 def getcwd(self):
484 490 return self.dirstate.getcwd()
485 491
486 492 def pathto(self, f, cwd=None):
487 493 return self.dirstate.pathto(f, cwd)
488 494
489 495 def wfile(self, f, mode='r'):
490 496 return self.wopener(f, mode)
491 497
492 498 def _link(self, f):
493 499 return os.path.islink(self.wjoin(f))
494 500
495 501 def _filter(self, filter, filename, data):
496 502 if filter not in self.filterpats:
497 503 l = []
498 504 for pat, cmd in self.ui.configitems(filter):
499 505 if cmd == '!':
500 506 continue
501 507 mf = match_.match(self.root, '', [pat])
502 508 fn = None
503 509 params = cmd
504 510 for name, filterfn in self._datafilters.iteritems():
505 511 if cmd.startswith(name):
506 512 fn = filterfn
507 513 params = cmd[len(name):].lstrip()
508 514 break
509 515 if not fn:
510 516 fn = lambda s, c, **kwargs: util.filter(s, c)
511 517 # Wrap old filters not supporting keyword arguments
512 518 if not inspect.getargspec(fn)[2]:
513 519 oldfn = fn
514 520 fn = lambda s, c, **kwargs: oldfn(s, c)
515 521 l.append((mf, fn, params))
516 522 self.filterpats[filter] = l
517 523
518 524 for mf, fn, cmd in self.filterpats[filter]:
519 525 if mf(filename):
520 526 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
521 527 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
522 528 break
523 529
524 530 return data
525 531
526 532 def adddatafilter(self, name, filter):
527 533 self._datafilters[name] = filter
528 534
529 535 def wread(self, filename):
530 536 if self._link(filename):
531 537 data = os.readlink(self.wjoin(filename))
532 538 else:
533 539 data = self.wopener(filename, 'r').read()
534 540 return self._filter("encode", filename, data)
535 541
536 542 def wwrite(self, filename, data, flags):
537 543 data = self._filter("decode", filename, data)
538 544 try:
539 545 os.unlink(self.wjoin(filename))
540 546 except OSError:
541 547 pass
542 548 if 'l' in flags:
543 549 self.wopener.symlink(data, filename)
544 550 else:
545 551 self.wopener(filename, 'w').write(data)
546 552 if 'x' in flags:
547 553 util.set_flags(self.wjoin(filename), False, True)
548 554
549 555 def wwritedata(self, filename, data):
550 556 return self._filter("decode", filename, data)
551 557
552 558 def transaction(self):
553 559 tr = self._transref and self._transref() or None
554 560 if tr and tr.running():
555 561 return tr.nest()
556 562
557 563 # abort here if the journal already exists
558 564 if os.path.exists(self.sjoin("journal")):
559 565 raise error.RepoError(_("abandoned transaction found - run hg recover"))
560 566
561 567 # save dirstate for rollback
562 568 try:
563 569 ds = self.opener("dirstate").read()
564 570 except IOError:
565 571 ds = ""
566 572 self.opener("journal.dirstate", "w").write(ds)
567 573 self.opener("journal.branch", "w").write(self.dirstate.branch())
568 574
569 575 renames = [(self.sjoin("journal"), self.sjoin("undo")),
570 576 (self.join("journal.dirstate"), self.join("undo.dirstate")),
571 577 (self.join("journal.branch"), self.join("undo.branch"))]
572 578 tr = transaction.transaction(self.ui.warn, self.sopener,
573 579 self.sjoin("journal"),
574 580 aftertrans(renames),
575 581 self.store.createmode)
576 582 self._transref = weakref.ref(tr)
577 583 return tr
578 584
579 585 def recover(self):
580 586 lock = self.lock()
581 587 try:
582 588 if os.path.exists(self.sjoin("journal")):
583 589 self.ui.status(_("rolling back interrupted transaction\n"))
584 590 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
585 591 self.invalidate()
586 592 return True
587 593 else:
588 594 self.ui.warn(_("no interrupted transaction available\n"))
589 595 return False
590 596 finally:
591 597 lock.release()
592 598
593 599 def rollback(self):
594 600 wlock = lock = None
595 601 try:
596 602 wlock = self.wlock()
597 603 lock = self.lock()
598 604 if os.path.exists(self.sjoin("undo")):
599 605 self.ui.status(_("rolling back last transaction\n"))
600 606 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
601 607 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
602 608 try:
603 609 branch = self.opener("undo.branch").read()
604 610 self.dirstate.setbranch(branch)
605 611 except IOError:
606 612 self.ui.warn(_("Named branch could not be reset, "
607 613 "current branch still is: %s\n")
608 614 % encoding.tolocal(self.dirstate.branch()))
609 615 self.invalidate()
610 616 self.dirstate.invalidate()
611 617 self.destroyed()
612 618 else:
613 619 self.ui.warn(_("no rollback information available\n"))
614 620 finally:
615 621 release(lock, wlock)
616 622
617 623 def invalidate(self):
618 624 for a in "changelog manifest".split():
619 625 if a in self.__dict__:
620 626 delattr(self, a)
621 627 self._tags = None
622 628 self._tagtypes = None
623 629 self.nodetagscache = None
624 630 self._branchcache = None # in UTF-8
625 631 self._branchcachetip = None
626 632
627 633 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
628 634 try:
629 635 l = lock.lock(lockname, 0, releasefn, desc=desc)
630 636 except error.LockHeld, inst:
631 637 if not wait:
632 638 raise
633 639 self.ui.warn(_("waiting for lock on %s held by %r\n") %
634 640 (desc, inst.locker))
635 641 # default to 600 seconds timeout
636 642 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
637 643 releasefn, desc=desc)
638 644 if acquirefn:
639 645 acquirefn()
640 646 return l
641 647
642 648 def lock(self, wait=True):
643 649 '''Lock the repository store (.hg/store) and return a weak reference
644 650 to the lock. Use this before modifying the store (e.g. committing or
645 651 stripping). If you are opening a transaction, get a lock as well.)'''
646 652 l = self._lockref and self._lockref()
647 653 if l is not None and l.held:
648 654 l.lock()
649 655 return l
650 656
651 657 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
652 658 _('repository %s') % self.origroot)
653 659 self._lockref = weakref.ref(l)
654 660 return l
655 661
656 662 def wlock(self, wait=True):
657 663 '''Lock the non-store parts of the repository (everything under
658 664 .hg except .hg/store) and return a weak reference to the lock.
659 665 Use this before modifying files in .hg.'''
660 666 l = self._wlockref and self._wlockref()
661 667 if l is not None and l.held:
662 668 l.lock()
663 669 return l
664 670
665 671 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
666 672 self.dirstate.invalidate, _('working directory of %s') %
667 673 self.origroot)
668 674 self._wlockref = weakref.ref(l)
669 675 return l
670 676
671 677 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
672 678 """
673 679 commit an individual file as part of a larger transaction
674 680 """
675 681
676 682 fname = fctx.path()
677 683 text = fctx.data()
678 684 flog = self.file(fname)
679 685 fparent1 = manifest1.get(fname, nullid)
680 686 fparent2 = fparent2o = manifest2.get(fname, nullid)
681 687
682 688 meta = {}
683 689 copy = fctx.renamed()
684 690 if copy and copy[0] != fname:
685 691 # Mark the new revision of this file as a copy of another
686 692 # file. This copy data will effectively act as a parent
687 693 # of this new revision. If this is a merge, the first
688 694 # parent will be the nullid (meaning "look up the copy data")
689 695 # and the second one will be the other parent. For example:
690 696 #
691 697 # 0 --- 1 --- 3 rev1 changes file foo
692 698 # \ / rev2 renames foo to bar and changes it
693 699 # \- 2 -/ rev3 should have bar with all changes and
694 700 # should record that bar descends from
695 701 # bar in rev2 and foo in rev1
696 702 #
697 703 # this allows this merge to succeed:
698 704 #
699 705 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
700 706 # \ / merging rev3 and rev4 should use bar@rev2
701 707 # \- 2 --- 4 as the merge base
702 708 #
703 709
704 710 cfname = copy[0]
705 711 crev = manifest1.get(cfname)
706 712 newfparent = fparent2
707 713
708 714 if manifest2: # branch merge
709 715 if fparent2 == nullid or crev is None: # copied on remote side
710 716 if cfname in manifest2:
711 717 crev = manifest2[cfname]
712 718 newfparent = fparent1
713 719
714 720 # find source in nearest ancestor if we've lost track
715 721 if not crev:
716 722 self.ui.debug(" %s: searching for copy revision for %s\n" %
717 723 (fname, cfname))
718 724 for ancestor in self['.'].ancestors():
719 725 if cfname in ancestor:
720 726 crev = ancestor[cfname].filenode()
721 727 break
722 728
723 729 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
724 730 meta["copy"] = cfname
725 731 meta["copyrev"] = hex(crev)
726 732 fparent1, fparent2 = nullid, newfparent
727 733 elif fparent2 != nullid:
728 734 # is one parent an ancestor of the other?
729 735 fparentancestor = flog.ancestor(fparent1, fparent2)
730 736 if fparentancestor == fparent1:
731 737 fparent1, fparent2 = fparent2, nullid
732 738 elif fparentancestor == fparent2:
733 739 fparent2 = nullid
734 740
735 741 # is the file changed?
736 742 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
737 743 changelist.append(fname)
738 744 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
739 745
740 746 # are just the flags changed during merge?
741 747 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
742 748 changelist.append(fname)
743 749
744 750 return fparent1
745 751
746 752 def commit(self, text="", user=None, date=None, match=None, force=False,
747 753 editor=False, extra={}):
748 754 """Add a new revision to current repository.
749 755
750 756 Revision information is gathered from the working directory,
751 757 match can be used to filter the committed files. If editor is
752 758 supplied, it is called to get a commit message.
753 759 """
754 760
755 761 def fail(f, msg):
756 762 raise util.Abort('%s: %s' % (f, msg))
757 763
758 764 if not match:
759 765 match = match_.always(self.root, '')
760 766
761 767 if not force:
762 768 vdirs = []
763 769 match.dir = vdirs.append
764 770 match.bad = fail
765 771
766 772 wlock = self.wlock()
767 773 try:
768 774 p1, p2 = self.dirstate.parents()
769 775 wctx = self[None]
770 776
771 777 if (not force and p2 != nullid and match and
772 778 (match.files() or match.anypats())):
773 779 raise util.Abort(_('cannot partially commit a merge '
774 780 '(do not specify files or patterns)'))
775 781
776 782 changes = self.status(match=match, clean=force)
777 783 if force:
778 784 changes[0].extend(changes[6]) # mq may commit unchanged files
779 785
780 786 # check subrepos
781 787 subs = []
782 788 for s in wctx.substate:
783 789 if match(s) and wctx.sub(s).dirty():
784 790 subs.append(s)
785 791 if subs and '.hgsubstate' not in changes[0]:
786 792 changes[0].insert(0, '.hgsubstate')
787 793
788 794 # make sure all explicit patterns are matched
789 795 if not force and match.files():
790 796 matched = set(changes[0] + changes[1] + changes[2])
791 797
792 798 for f in match.files():
793 799 if f == '.' or f in matched or f in wctx.substate:
794 800 continue
795 801 if f in changes[3]: # missing
796 802 fail(f, _('file not found!'))
797 803 if f in vdirs: # visited directory
798 804 d = f + '/'
799 805 for mf in matched:
800 806 if mf.startswith(d):
801 807 break
802 808 else:
803 809 fail(f, _("no match under directory!"))
804 810 elif f not in self.dirstate:
805 811 fail(f, _("file not tracked!"))
806 812
807 813 if (not force and not extra.get("close") and p2 == nullid
808 814 and not (changes[0] or changes[1] or changes[2])
809 815 and self[None].branch() == self['.'].branch()):
810 816 return None
811 817
812 818 ms = merge_.mergestate(self)
813 819 for f in changes[0]:
814 820 if f in ms and ms[f] == 'u':
815 821 raise util.Abort(_("unresolved merge conflicts "
816 822 "(see hg resolve)"))
817 823
818 824 cctx = context.workingctx(self, (p1, p2), text, user, date,
819 825 extra, changes)
820 826 if editor:
821 827 cctx._text = editor(self, cctx, subs)
822 828
823 829 # commit subs
824 830 if subs:
825 831 state = wctx.substate.copy()
826 832 for s in subs:
827 833 self.ui.status(_('committing subrepository %s\n') % s)
828 834 sr = wctx.sub(s).commit(cctx._text, user, date)
829 835 state[s] = (state[s][0], sr)
830 836 subrepo.writestate(self, state)
831 837
832 838 ret = self.commitctx(cctx, True)
833 839
834 840 # update dirstate and mergestate
835 841 for f in changes[0] + changes[1]:
836 842 self.dirstate.normal(f)
837 843 for f in changes[2]:
838 844 self.dirstate.forget(f)
839 845 self.dirstate.setparents(ret)
840 846 ms.reset()
841 847
842 848 return ret
843 849
844 850 finally:
845 851 wlock.release()
846 852
847 853 def commitctx(self, ctx, error=False):
848 854 """Add a new revision to current repository.
849 855
850 856 Revision information is passed via the context argument.
851 857 """
852 858
853 859 tr = lock = None
854 860 removed = ctx.removed()
855 861 p1, p2 = ctx.p1(), ctx.p2()
856 862 m1 = p1.manifest().copy()
857 863 m2 = p2.manifest()
858 864 user = ctx.user()
859 865
860 866 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
861 867 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
862 868
863 869 lock = self.lock()
864 870 try:
865 871 tr = self.transaction()
866 872 trp = weakref.proxy(tr)
867 873
868 874 # check in files
869 875 new = {}
870 876 changed = []
871 877 linkrev = len(self)
872 878 for f in sorted(ctx.modified() + ctx.added()):
873 879 self.ui.note(f + "\n")
874 880 try:
875 881 fctx = ctx[f]
876 882 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
877 883 changed)
878 884 m1.set(f, fctx.flags())
879 885 except (OSError, IOError):
880 886 if error:
881 887 self.ui.warn(_("trouble committing %s!\n") % f)
882 888 raise
883 889 else:
884 890 removed.append(f)
885 891
886 892 # update manifest
887 893 m1.update(new)
888 894 removed = [f for f in sorted(removed) if f in m1 or f in m2]
889 895 drop = [f for f in removed if f in m1]
890 896 for f in drop:
891 897 del m1[f]
892 898 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
893 899 p2.manifestnode(), (new, drop))
894 900
895 901 # update changelog
896 902 self.changelog.delayupdate()
897 903 n = self.changelog.add(mn, changed + removed, ctx.description(),
898 904 trp, p1.node(), p2.node(),
899 905 user, ctx.date(), ctx.extra().copy())
900 906 p = lambda: self.changelog.writepending() and self.root or ""
901 907 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
902 908 parent2=xp2, pending=p)
903 909 self.changelog.finalize(trp)
904 910 tr.close()
905 911
906 912 if self._branchcache:
907 913 self.branchtags()
908 914
909 915 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
910 916 return n
911 917 finally:
912 918 del tr
913 919 lock.release()
914 920
915 921 def destroyed(self):
916 922 '''Inform the repository that nodes have been destroyed.
917 923 Intended for use by strip and rollback, so there's a common
918 924 place for anything that has to be done after destroying history.'''
919 925 # XXX it might be nice if we could take the list of destroyed
920 926 # nodes, but I don't see an easy way for rollback() to do that
921 927
922 928 # Ensure the persistent tag cache is updated. Doing it now
923 929 # means that the tag cache only has to worry about destroyed
924 930 # heads immediately after a strip/rollback. That in turn
925 931 # guarantees that "cachetip == currenttip" (comparing both rev
926 932 # and node) always means no nodes have been added or destroyed.
927 933
928 934 # XXX this is suboptimal when qrefresh'ing: we strip the current
929 935 # head, refresh the tag cache, then immediately add a new head.
930 936 # But I think doing it this way is necessary for the "instant
931 937 # tag cache retrieval" case to work.
932 938 tags_.findglobaltags(self.ui, self, {}, {})
933 939
934 940 def walk(self, match, node=None):
935 941 '''
936 942 walk recursively through the directory tree or a given
937 943 changeset, finding all files matched by the match
938 944 function
939 945 '''
940 946 return self[node].walk(match)
941 947
942 948 def status(self, node1='.', node2=None, match=None,
943 949 ignored=False, clean=False, unknown=False):
944 950 """return status of files between two nodes or node and working directory
945 951
946 952 If node1 is None, use the first dirstate parent instead.
947 953 If node2 is None, compare node1 with working directory.
948 954 """
949 955
950 956 def mfmatches(ctx):
951 957 mf = ctx.manifest().copy()
952 958 for fn in mf.keys():
953 959 if not match(fn):
954 960 del mf[fn]
955 961 return mf
956 962
957 963 if isinstance(node1, context.changectx):
958 964 ctx1 = node1
959 965 else:
960 966 ctx1 = self[node1]
961 967 if isinstance(node2, context.changectx):
962 968 ctx2 = node2
963 969 else:
964 970 ctx2 = self[node2]
965 971
966 972 working = ctx2.rev() is None
967 973 parentworking = working and ctx1 == self['.']
968 974 match = match or match_.always(self.root, self.getcwd())
969 975 listignored, listclean, listunknown = ignored, clean, unknown
970 976
971 977 # load earliest manifest first for caching reasons
972 978 if not working and ctx2.rev() < ctx1.rev():
973 979 ctx2.manifest()
974 980
975 981 if not parentworking:
976 982 def bad(f, msg):
977 983 if f not in ctx1:
978 984 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
979 985 match.bad = bad
980 986
981 987 if working: # we need to scan the working dir
982 988 s = self.dirstate.status(match, listignored, listclean, listunknown)
983 989 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
984 990
985 991 # check for any possibly clean files
986 992 if parentworking and cmp:
987 993 fixup = []
988 994 # do a full compare of any files that might have changed
989 995 for f in sorted(cmp):
990 996 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
991 997 or ctx1[f].cmp(ctx2[f].data())):
992 998 modified.append(f)
993 999 else:
994 1000 fixup.append(f)
995 1001
996 1002 if listclean:
997 1003 clean += fixup
998 1004
999 1005 # update dirstate for files that are actually clean
1000 1006 if fixup:
1001 1007 try:
1002 1008 # updating the dirstate is optional
1003 1009 # so we don't wait on the lock
1004 1010 wlock = self.wlock(False)
1005 1011 try:
1006 1012 for f in fixup:
1007 1013 self.dirstate.normal(f)
1008 1014 finally:
1009 1015 wlock.release()
1010 1016 except error.LockError:
1011 1017 pass
1012 1018
1013 1019 if not parentworking:
1014 1020 mf1 = mfmatches(ctx1)
1015 1021 if working:
1016 1022 # we are comparing working dir against non-parent
1017 1023 # generate a pseudo-manifest for the working dir
1018 1024 mf2 = mfmatches(self['.'])
1019 1025 for f in cmp + modified + added:
1020 1026 mf2[f] = None
1021 1027 mf2.set(f, ctx2.flags(f))
1022 1028 for f in removed:
1023 1029 if f in mf2:
1024 1030 del mf2[f]
1025 1031 else:
1026 1032 # we are comparing two revisions
1027 1033 deleted, unknown, ignored = [], [], []
1028 1034 mf2 = mfmatches(ctx2)
1029 1035
1030 1036 modified, added, clean = [], [], []
1031 1037 for fn in mf2:
1032 1038 if fn in mf1:
1033 1039 if (mf1.flags(fn) != mf2.flags(fn) or
1034 1040 (mf1[fn] != mf2[fn] and
1035 1041 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1036 1042 modified.append(fn)
1037 1043 elif listclean:
1038 1044 clean.append(fn)
1039 1045 del mf1[fn]
1040 1046 else:
1041 1047 added.append(fn)
1042 1048 removed = mf1.keys()
1043 1049
1044 1050 r = modified, added, removed, deleted, unknown, ignored, clean
1045 1051 [l.sort() for l in r]
1046 1052 return r
1047 1053
1048 1054 def add(self, list):
1049 1055 wlock = self.wlock()
1050 1056 try:
1051 1057 rejected = []
1052 1058 for f in list:
1053 1059 p = self.wjoin(f)
1054 1060 try:
1055 1061 st = os.lstat(p)
1056 1062 except:
1057 1063 self.ui.warn(_("%s does not exist!\n") % f)
1058 1064 rejected.append(f)
1059 1065 continue
1060 1066 if st.st_size > 10000000:
1061 1067 self.ui.warn(_("%s: files over 10MB may cause memory and"
1062 1068 " performance problems\n"
1063 1069 "(use 'hg revert %s' to unadd the file)\n")
1064 1070 % (f, f))
1065 1071 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1066 1072 self.ui.warn(_("%s not added: only files and symlinks "
1067 1073 "supported currently\n") % f)
1068 1074 rejected.append(p)
1069 1075 elif self.dirstate[f] in 'amn':
1070 1076 self.ui.warn(_("%s already tracked!\n") % f)
1071 1077 elif self.dirstate[f] == 'r':
1072 1078 self.dirstate.normallookup(f)
1073 1079 else:
1074 1080 self.dirstate.add(f)
1075 1081 return rejected
1076 1082 finally:
1077 1083 wlock.release()
1078 1084
1079 1085 def forget(self, list):
1080 1086 wlock = self.wlock()
1081 1087 try:
1082 1088 for f in list:
1083 1089 if self.dirstate[f] != 'a':
1084 1090 self.ui.warn(_("%s not added!\n") % f)
1085 1091 else:
1086 1092 self.dirstate.forget(f)
1087 1093 finally:
1088 1094 wlock.release()
1089 1095
1090 1096 def remove(self, list, unlink=False):
1091 1097 if unlink:
1092 1098 for f in list:
1093 1099 try:
1094 1100 util.unlink(self.wjoin(f))
1095 1101 except OSError, inst:
1096 1102 if inst.errno != errno.ENOENT:
1097 1103 raise
1098 1104 wlock = self.wlock()
1099 1105 try:
1100 1106 for f in list:
1101 1107 if unlink and os.path.exists(self.wjoin(f)):
1102 1108 self.ui.warn(_("%s still exists!\n") % f)
1103 1109 elif self.dirstate[f] == 'a':
1104 1110 self.dirstate.forget(f)
1105 1111 elif f not in self.dirstate:
1106 1112 self.ui.warn(_("%s not tracked!\n") % f)
1107 1113 else:
1108 1114 self.dirstate.remove(f)
1109 1115 finally:
1110 1116 wlock.release()
1111 1117
1112 1118 def undelete(self, list):
1113 1119 manifests = [self.manifest.read(self.changelog.read(p)[0])
1114 1120 for p in self.dirstate.parents() if p != nullid]
1115 1121 wlock = self.wlock()
1116 1122 try:
1117 1123 for f in list:
1118 1124 if self.dirstate[f] != 'r':
1119 1125 self.ui.warn(_("%s not removed!\n") % f)
1120 1126 else:
1121 1127 m = f in manifests[0] and manifests[0] or manifests[1]
1122 1128 t = self.file(f).read(m[f])
1123 1129 self.wwrite(f, t, m.flags(f))
1124 1130 self.dirstate.normal(f)
1125 1131 finally:
1126 1132 wlock.release()
1127 1133
1128 1134 def copy(self, source, dest):
1129 1135 p = self.wjoin(dest)
1130 1136 if not (os.path.exists(p) or os.path.islink(p)):
1131 1137 self.ui.warn(_("%s does not exist!\n") % dest)
1132 1138 elif not (os.path.isfile(p) or os.path.islink(p)):
1133 1139 self.ui.warn(_("copy failed: %s is not a file or a "
1134 1140 "symbolic link\n") % dest)
1135 1141 else:
1136 1142 wlock = self.wlock()
1137 1143 try:
1138 1144 if self.dirstate[dest] in '?r':
1139 1145 self.dirstate.add(dest)
1140 1146 self.dirstate.copy(source, dest)
1141 1147 finally:
1142 1148 wlock.release()
1143 1149
1144 1150 def heads(self, start=None):
1145 1151 heads = self.changelog.heads(start)
1146 1152 # sort the output in rev descending order
1147 1153 heads = [(-self.changelog.rev(h), h) for h in heads]
1148 1154 return [n for (r, n) in sorted(heads)]
1149 1155
1150 1156 def branchheads(self, branch=None, start=None, closed=False):
1151 1157 '''return a (possibly filtered) list of heads for the given branch
1152 1158
1153 1159 Heads are returned in topological order, from newest to oldest.
1154 1160 If branch is None, use the dirstate branch.
1155 1161 If start is not None, return only heads reachable from start.
1156 1162 If closed is True, return heads that are marked as closed as well.
1157 1163 '''
1158 1164 if branch is None:
1159 1165 branch = self[None].branch()
1160 1166 branches = self.branchmap()
1161 1167 if branch not in branches:
1162 1168 return []
1163 1169 # the cache returns heads ordered lowest to highest
1164 1170 bheads = list(reversed(branches[branch]))
1165 1171 if start is not None:
1166 1172 # filter out the heads that cannot be reached from startrev
1167 1173 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1168 1174 bheads = [h for h in bheads if h in fbheads]
1169 1175 if not closed:
1170 1176 bheads = [h for h in bheads if
1171 1177 ('close' not in self.changelog.read(h)[5])]
1172 1178 return bheads
1173 1179
1174 1180 def branches(self, nodes):
1175 1181 if not nodes:
1176 1182 nodes = [self.changelog.tip()]
1177 1183 b = []
1178 1184 for n in nodes:
1179 1185 t = n
1180 1186 while 1:
1181 1187 p = self.changelog.parents(n)
1182 1188 if p[1] != nullid or p[0] == nullid:
1183 1189 b.append((t, n, p[0], p[1]))
1184 1190 break
1185 1191 n = p[0]
1186 1192 return b
1187 1193
1188 1194 def between(self, pairs):
1189 1195 r = []
1190 1196
1191 1197 for top, bottom in pairs:
1192 1198 n, l, i = top, [], 0
1193 1199 f = 1
1194 1200
1195 1201 while n != bottom and n != nullid:
1196 1202 p = self.changelog.parents(n)[0]
1197 1203 if i == f:
1198 1204 l.append(n)
1199 1205 f = f * 2
1200 1206 n = p
1201 1207 i += 1
1202 1208
1203 1209 r.append(l)
1204 1210
1205 1211 return r
1206 1212
1207 1213 def findincoming(self, remote, base=None, heads=None, force=False):
1208 1214 """Return list of roots of the subsets of missing nodes from remote
1209 1215
1210 1216 If base dict is specified, assume that these nodes and their parents
1211 1217 exist on the remote side and that no child of a node of base exists
1212 1218 in both remote and self.
1213 1219 Furthermore base will be updated to include the nodes that exists
1214 1220 in self and remote but no children exists in self and remote.
1215 1221 If a list of heads is specified, return only nodes which are heads
1216 1222 or ancestors of these heads.
1217 1223
1218 1224 All the ancestors of base are in self and in remote.
1219 1225 All the descendants of the list returned are missing in self.
1220 1226 (and so we know that the rest of the nodes are missing in remote, see
1221 1227 outgoing)
1222 1228 """
1223 1229 return self.findcommonincoming(remote, base, heads, force)[1]
1224 1230
1225 1231 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1226 1232 """Return a tuple (common, missing roots, heads) used to identify
1227 1233 missing nodes from remote.
1228 1234
1229 1235 If base dict is specified, assume that these nodes and their parents
1230 1236 exist on the remote side and that no child of a node of base exists
1231 1237 in both remote and self.
1232 1238 Furthermore base will be updated to include the nodes that exists
1233 1239 in self and remote but no children exists in self and remote.
1234 1240 If a list of heads is specified, return only nodes which are heads
1235 1241 or ancestors of these heads.
1236 1242
1237 1243 All the ancestors of base are in self and in remote.
1238 1244 """
1239 1245 m = self.changelog.nodemap
1240 1246 search = []
1241 1247 fetch = set()
1242 1248 seen = set()
1243 1249 seenbranch = set()
1244 1250 if base is None:
1245 1251 base = {}
1246 1252
1247 1253 if not heads:
1248 1254 heads = remote.heads()
1249 1255
1250 1256 if self.changelog.tip() == nullid:
1251 1257 base[nullid] = 1
1252 1258 if heads != [nullid]:
1253 1259 return [nullid], [nullid], list(heads)
1254 1260 return [nullid], [], []
1255 1261
1256 1262 # assume we're closer to the tip than the root
1257 1263 # and start by examining the heads
1258 1264 self.ui.status(_("searching for changes\n"))
1259 1265
1260 1266 unknown = []
1261 1267 for h in heads:
1262 1268 if h not in m:
1263 1269 unknown.append(h)
1264 1270 else:
1265 1271 base[h] = 1
1266 1272
1267 1273 heads = unknown
1268 1274 if not unknown:
1269 1275 return base.keys(), [], []
1270 1276
1271 1277 req = set(unknown)
1272 1278 reqcnt = 0
1273 1279
1274 1280 # search through remote branches
1275 1281 # a 'branch' here is a linear segment of history, with four parts:
1276 1282 # head, root, first parent, second parent
1277 1283 # (a branch always has two parents (or none) by definition)
1278 1284 unknown = remote.branches(unknown)
1279 1285 while unknown:
1280 1286 r = []
1281 1287 while unknown:
1282 1288 n = unknown.pop(0)
1283 1289 if n[0] in seen:
1284 1290 continue
1285 1291
1286 1292 self.ui.debug("examining %s:%s\n"
1287 1293 % (short(n[0]), short(n[1])))
1288 1294 if n[0] == nullid: # found the end of the branch
1289 1295 pass
1290 1296 elif n in seenbranch:
1291 1297 self.ui.debug("branch already found\n")
1292 1298 continue
1293 1299 elif n[1] and n[1] in m: # do we know the base?
1294 1300 self.ui.debug("found incomplete branch %s:%s\n"
1295 1301 % (short(n[0]), short(n[1])))
1296 1302 search.append(n[0:2]) # schedule branch range for scanning
1297 1303 seenbranch.add(n)
1298 1304 else:
1299 1305 if n[1] not in seen and n[1] not in fetch:
1300 1306 if n[2] in m and n[3] in m:
1301 1307 self.ui.debug("found new changeset %s\n" %
1302 1308 short(n[1]))
1303 1309 fetch.add(n[1]) # earliest unknown
1304 1310 for p in n[2:4]:
1305 1311 if p in m:
1306 1312 base[p] = 1 # latest known
1307 1313
1308 1314 for p in n[2:4]:
1309 1315 if p not in req and p not in m:
1310 1316 r.append(p)
1311 1317 req.add(p)
1312 1318 seen.add(n[0])
1313 1319
1314 1320 if r:
1315 1321 reqcnt += 1
1316 1322 self.ui.debug("request %d: %s\n" %
1317 1323 (reqcnt, " ".join(map(short, r))))
1318 1324 for p in xrange(0, len(r), 10):
1319 1325 for b in remote.branches(r[p:p+10]):
1320 1326 self.ui.debug("received %s:%s\n" %
1321 1327 (short(b[0]), short(b[1])))
1322 1328 unknown.append(b)
1323 1329
1324 1330 # do binary search on the branches we found
1325 1331 while search:
1326 1332 newsearch = []
1327 1333 reqcnt += 1
1328 1334 for n, l in zip(search, remote.between(search)):
1329 1335 l.append(n[1])
1330 1336 p = n[0]
1331 1337 f = 1
1332 1338 for i in l:
1333 1339 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1334 1340 if i in m:
1335 1341 if f <= 2:
1336 1342 self.ui.debug("found new branch changeset %s\n" %
1337 1343 short(p))
1338 1344 fetch.add(p)
1339 1345 base[i] = 1
1340 1346 else:
1341 1347 self.ui.debug("narrowed branch search to %s:%s\n"
1342 1348 % (short(p), short(i)))
1343 1349 newsearch.append((p, i))
1344 1350 break
1345 1351 p, f = i, f * 2
1346 1352 search = newsearch
1347 1353
1348 1354 # sanity check our fetch list
1349 1355 for f in fetch:
1350 1356 if f in m:
1351 1357 raise error.RepoError(_("already have changeset ")
1352 1358 + short(f[:4]))
1353 1359
1354 1360 if base.keys() == [nullid]:
1355 1361 if force:
1356 1362 self.ui.warn(_("warning: repository is unrelated\n"))
1357 1363 else:
1358 1364 raise util.Abort(_("repository is unrelated"))
1359 1365
1360 1366 self.ui.debug("found new changesets starting at " +
1361 1367 " ".join([short(f) for f in fetch]) + "\n")
1362 1368
1363 1369 self.ui.debug("%d total queries\n" % reqcnt)
1364 1370
1365 1371 return base.keys(), list(fetch), heads
1366 1372
1367 1373 def findoutgoing(self, remote, base=None, heads=None, force=False):
1368 1374 """Return list of nodes that are roots of subsets not in remote
1369 1375
1370 1376 If base dict is specified, assume that these nodes and their parents
1371 1377 exist on the remote side.
1372 1378 If a list of heads is specified, return only nodes which are heads
1373 1379 or ancestors of these heads, and return a second element which
1374 1380 contains all remote heads which get new children.
1375 1381 """
1376 1382 if base is None:
1377 1383 base = {}
1378 1384 self.findincoming(remote, base, heads, force=force)
1379 1385
1380 1386 self.ui.debug("common changesets up to "
1381 1387 + " ".join(map(short, base.keys())) + "\n")
1382 1388
1383 1389 remain = set(self.changelog.nodemap)
1384 1390
1385 1391 # prune everything remote has from the tree
1386 1392 remain.remove(nullid)
1387 1393 remove = base.keys()
1388 1394 while remove:
1389 1395 n = remove.pop(0)
1390 1396 if n in remain:
1391 1397 remain.remove(n)
1392 1398 for p in self.changelog.parents(n):
1393 1399 remove.append(p)
1394 1400
1395 1401 # find every node whose parents have been pruned
1396 1402 subset = []
1397 1403 # find every remote head that will get new children
1398 1404 updated_heads = set()
1399 1405 for n in remain:
1400 1406 p1, p2 = self.changelog.parents(n)
1401 1407 if p1 not in remain and p2 not in remain:
1402 1408 subset.append(n)
1403 1409 if heads:
1404 1410 if p1 in heads:
1405 1411 updated_heads.add(p1)
1406 1412 if p2 in heads:
1407 1413 updated_heads.add(p2)
1408 1414
1409 1415 # this is the set of all roots we have to push
1410 1416 if heads:
1411 1417 return subset, list(updated_heads)
1412 1418 else:
1413 1419 return subset
1414 1420
1415 1421 def pull(self, remote, heads=None, force=False):
1416 1422 lock = self.lock()
1417 1423 try:
1418 1424 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1419 1425 force=force)
1420 1426 if fetch == [nullid]:
1421 1427 self.ui.status(_("requesting all changes\n"))
1422 1428
1423 1429 if not fetch:
1424 1430 self.ui.status(_("no changes found\n"))
1425 1431 return 0
1426 1432
1427 1433 if heads is None and remote.capable('changegroupsubset'):
1428 1434 heads = rheads
1429 1435
1430 1436 if heads is None:
1431 1437 cg = remote.changegroup(fetch, 'pull')
1432 1438 else:
1433 1439 if not remote.capable('changegroupsubset'):
1434 1440 raise util.Abort(_("Partial pull cannot be done because "
1435 1441 "other repository doesn't support "
1436 1442 "changegroupsubset."))
1437 1443 cg = remote.changegroupsubset(fetch, heads, 'pull')
1438 1444 return self.addchangegroup(cg, 'pull', remote.url())
1439 1445 finally:
1440 1446 lock.release()
1441 1447
1442 1448 def push(self, remote, force=False, revs=None):
1443 1449 # there are two ways to push to remote repo:
1444 1450 #
1445 1451 # addchangegroup assumes local user can lock remote
1446 1452 # repo (local filesystem, old ssh servers).
1447 1453 #
1448 1454 # unbundle assumes local user cannot lock remote repo (new ssh
1449 1455 # servers, http servers).
1450 1456
1451 1457 if remote.capable('unbundle'):
1452 1458 return self.push_unbundle(remote, force, revs)
1453 1459 return self.push_addchangegroup(remote, force, revs)
1454 1460
1455 1461 def prepush(self, remote, force, revs):
1456 1462 '''Analyze the local and remote repositories and determine which
1457 1463 changesets need to be pushed to the remote. Return a tuple
1458 1464 (changegroup, remoteheads). changegroup is a readable file-like
1459 1465 object whose read() returns successive changegroup chunks ready to
1460 1466 be sent over the wire. remoteheads is the list of remote heads.
1461 1467 '''
1462 1468 common = {}
1463 1469 remote_heads = remote.heads()
1464 1470 inc = self.findincoming(remote, common, remote_heads, force=force)
1465 1471
1466 1472 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1467 1473 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1468 1474
1469 1475 def checkbranch(lheads, rheads, updatelb):
1470 1476 '''
1471 1477 check whether there are more local heads than remote heads on
1472 1478 a specific branch.
1473 1479
1474 1480 lheads: local branch heads
1475 1481 rheads: remote branch heads
1476 1482 updatelb: outgoing local branch bases
1477 1483 '''
1478 1484
1479 1485 warn = 0
1480 1486
1481 1487 if not revs and len(lheads) > len(rheads):
1482 1488 warn = 1
1483 1489 else:
1484 1490 # add local heads involved in the push
1485 1491 updatelheads = [self.changelog.heads(x, lheads)
1486 1492 for x in updatelb]
1487 1493 newheads = set(sum(updatelheads, [])) & set(lheads)
1488 1494
1489 1495 if not newheads:
1490 1496 return True
1491 1497
1492 1498 # add heads we don't have or that are not involved in the push
1493 1499 for r in rheads:
1494 1500 if r in self.changelog.nodemap:
1495 1501 desc = self.changelog.heads(r, heads)
1496 1502 l = [h for h in heads if h in desc]
1497 1503 if not l:
1498 1504 newheads.add(r)
1499 1505 else:
1500 1506 newheads.add(r)
1501 1507 if len(newheads) > len(rheads):
1502 1508 warn = 1
1503 1509
1504 1510 if warn:
1505 1511 if not rheads: # new branch requires --force
1506 1512 self.ui.warn(_("abort: push creates new"
1507 1513 " remote branch '%s'!\n") %
1508 1514 self[updatelb[0]].branch())
1509 1515 else:
1510 1516 self.ui.warn(_("abort: push creates new remote heads!\n"))
1511 1517
1512 1518 self.ui.status(_("(did you forget to merge?"
1513 1519 " use push -f to force)\n"))
1514 1520 return False
1515 1521 return True
1516 1522
1517 1523 if not bases:
1518 1524 self.ui.status(_("no changes found\n"))
1519 1525 return None, 1
1520 1526 elif not force:
1521 1527 # Check for each named branch if we're creating new remote heads.
1522 1528 # To be a remote head after push, node must be either:
1523 1529 # - unknown locally
1524 1530 # - a local outgoing head descended from update
1525 1531 # - a remote head that's known locally and not
1526 1532 # ancestral to an outgoing head
1527 1533 #
1528 1534 # New named branches cannot be created without --force.
1529 1535
1530 1536 if remote_heads != [nullid]:
1531 1537 if remote.capable('branchmap'):
1532 1538 localhds = {}
1533 1539 if not revs:
1534 1540 localhds = self.branchmap()
1535 1541 else:
1536 1542 for n in heads:
1537 1543 branch = self[n].branch()
1538 1544 if branch in localhds:
1539 1545 localhds[branch].append(n)
1540 1546 else:
1541 1547 localhds[branch] = [n]
1542 1548
1543 1549 remotehds = remote.branchmap()
1544 1550
1545 1551 for lh in localhds:
1546 1552 if lh in remotehds:
1547 1553 rheads = remotehds[lh]
1548 1554 else:
1549 1555 rheads = []
1550 1556 lheads = localhds[lh]
1551 1557 updatelb = [upd for upd in update
1552 1558 if self[upd].branch() == lh]
1553 1559 if not updatelb:
1554 1560 continue
1555 1561 if not checkbranch(lheads, rheads, updatelb):
1556 1562 return None, 0
1557 1563 else:
1558 1564 if not checkbranch(heads, remote_heads, update):
1559 1565 return None, 0
1560 1566
1561 1567 if inc:
1562 1568 self.ui.warn(_("note: unsynced remote changes!\n"))
1563 1569
1564 1570
1565 1571 if revs is None:
1566 1572 # use the fast path, no race possible on push
1567 1573 nodes = self.changelog.findmissing(common.keys())
1568 1574 cg = self._changegroup(nodes, 'push')
1569 1575 else:
1570 1576 cg = self.changegroupsubset(update, revs, 'push')
1571 1577 return cg, remote_heads
1572 1578
1573 1579 def push_addchangegroup(self, remote, force, revs):
1574 1580 lock = remote.lock()
1575 1581 try:
1576 1582 ret = self.prepush(remote, force, revs)
1577 1583 if ret[0] is not None:
1578 1584 cg, remote_heads = ret
1579 1585 return remote.addchangegroup(cg, 'push', self.url())
1580 1586 return ret[1]
1581 1587 finally:
1582 1588 lock.release()
1583 1589
1584 1590 def push_unbundle(self, remote, force, revs):
1585 1591 # local repo finds heads on server, finds out what revs it
1586 1592 # must push. once revs transferred, if server finds it has
1587 1593 # different heads (someone else won commit/push race), server
1588 1594 # aborts.
1589 1595
1590 1596 ret = self.prepush(remote, force, revs)
1591 1597 if ret[0] is not None:
1592 1598 cg, remote_heads = ret
1593 1599 if force: remote_heads = ['force']
1594 1600 return remote.unbundle(cg, remote_heads, 'push')
1595 1601 return ret[1]
1596 1602
1597 1603 def changegroupinfo(self, nodes, source):
1598 1604 if self.ui.verbose or source == 'bundle':
1599 1605 self.ui.status(_("%d changesets found\n") % len(nodes))
1600 1606 if self.ui.debugflag:
1601 1607 self.ui.debug("list of changesets:\n")
1602 1608 for node in nodes:
1603 1609 self.ui.debug("%s\n" % hex(node))
1604 1610
1605 1611 def changegroupsubset(self, bases, heads, source, extranodes=None):
1606 1612 """Compute a changegroup consisting of all the nodes that are
1607 1613 descendents of any of the bases and ancestors of any of the heads.
1608 1614 Return a chunkbuffer object whose read() method will return
1609 1615 successive changegroup chunks.
1610 1616
1611 1617 It is fairly complex as determining which filenodes and which
1612 1618 manifest nodes need to be included for the changeset to be complete
1613 1619 is non-trivial.
1614 1620
1615 1621 Another wrinkle is doing the reverse, figuring out which changeset in
1616 1622 the changegroup a particular filenode or manifestnode belongs to.
1617 1623
1618 1624 The caller can specify some nodes that must be included in the
1619 1625 changegroup using the extranodes argument. It should be a dict
1620 1626 where the keys are the filenames (or 1 for the manifest), and the
1621 1627 values are lists of (node, linknode) tuples, where node is a wanted
1622 1628 node and linknode is the changelog node that should be transmitted as
1623 1629 the linkrev.
1624 1630 """
1625 1631
1626 1632 # Set up some initial variables
1627 1633 # Make it easy to refer to self.changelog
1628 1634 cl = self.changelog
1629 1635 # msng is short for missing - compute the list of changesets in this
1630 1636 # changegroup.
1631 1637 if not bases:
1632 1638 bases = [nullid]
1633 1639 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1634 1640
1635 1641 if extranodes is None:
1636 1642 # can we go through the fast path ?
1637 1643 heads.sort()
1638 1644 allheads = self.heads()
1639 1645 allheads.sort()
1640 1646 if heads == allheads:
1641 1647 return self._changegroup(msng_cl_lst, source)
1642 1648
1643 1649 # slow path
1644 1650 self.hook('preoutgoing', throw=True, source=source)
1645 1651
1646 1652 self.changegroupinfo(msng_cl_lst, source)
1647 1653 # Some bases may turn out to be superfluous, and some heads may be
1648 1654 # too. nodesbetween will return the minimal set of bases and heads
1649 1655 # necessary to re-create the changegroup.
1650 1656
1651 1657 # Known heads are the list of heads that it is assumed the recipient
1652 1658 # of this changegroup will know about.
1653 1659 knownheads = set()
1654 1660 # We assume that all parents of bases are known heads.
1655 1661 for n in bases:
1656 1662 knownheads.update(cl.parents(n))
1657 1663 knownheads.discard(nullid)
1658 1664 knownheads = list(knownheads)
1659 1665 if knownheads:
1660 1666 # Now that we know what heads are known, we can compute which
1661 1667 # changesets are known. The recipient must know about all
1662 1668 # changesets required to reach the known heads from the null
1663 1669 # changeset.
1664 1670 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1665 1671 junk = None
1666 1672 # Transform the list into a set.
1667 1673 has_cl_set = set(has_cl_set)
1668 1674 else:
1669 1675 # If there were no known heads, the recipient cannot be assumed to
1670 1676 # know about any changesets.
1671 1677 has_cl_set = set()
1672 1678
1673 1679 # Make it easy to refer to self.manifest
1674 1680 mnfst = self.manifest
1675 1681 # We don't know which manifests are missing yet
1676 1682 msng_mnfst_set = {}
1677 1683 # Nor do we know which filenodes are missing.
1678 1684 msng_filenode_set = {}
1679 1685
1680 1686 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1681 1687 junk = None
1682 1688
1683 1689 # A changeset always belongs to itself, so the changenode lookup
1684 1690 # function for a changenode is identity.
1685 1691 def identity(x):
1686 1692 return x
1687 1693
1688 1694 # If we determine that a particular file or manifest node must be a
1689 1695 # node that the recipient of the changegroup will already have, we can
1690 1696 # also assume the recipient will have all the parents. This function
1691 1697 # prunes them from the set of missing nodes.
1692 1698 def prune_parents(revlog, hasset, msngset):
1693 1699 haslst = list(hasset)
1694 1700 haslst.sort(key=revlog.rev)
1695 1701 for node in haslst:
1696 1702 parentlst = [p for p in revlog.parents(node) if p != nullid]
1697 1703 while parentlst:
1698 1704 n = parentlst.pop()
1699 1705 if n not in hasset:
1700 1706 hasset.add(n)
1701 1707 p = [p for p in revlog.parents(n) if p != nullid]
1702 1708 parentlst.extend(p)
1703 1709 for n in hasset:
1704 1710 msngset.pop(n, None)
1705 1711
1706 1712 # This is a function generating function used to set up an environment
1707 1713 # for the inner function to execute in.
1708 1714 def manifest_and_file_collector(changedfileset):
1709 1715 # This is an information gathering function that gathers
1710 1716 # information from each changeset node that goes out as part of
1711 1717 # the changegroup. The information gathered is a list of which
1712 1718 # manifest nodes are potentially required (the recipient may
1713 1719 # already have them) and total list of all files which were
1714 1720 # changed in any changeset in the changegroup.
1715 1721 #
1716 1722 # We also remember the first changenode we saw any manifest
1717 1723 # referenced by so we can later determine which changenode 'owns'
1718 1724 # the manifest.
1719 1725 def collect_manifests_and_files(clnode):
1720 1726 c = cl.read(clnode)
1721 1727 for f in c[3]:
1722 1728 # This is to make sure we only have one instance of each
1723 1729 # filename string for each filename.
1724 1730 changedfileset.setdefault(f, f)
1725 1731 msng_mnfst_set.setdefault(c[0], clnode)
1726 1732 return collect_manifests_and_files
1727 1733
1728 1734 # Figure out which manifest nodes (of the ones we think might be part
1729 1735 # of the changegroup) the recipient must know about and remove them
1730 1736 # from the changegroup.
1731 1737 def prune_manifests():
1732 1738 has_mnfst_set = set()
1733 1739 for n in msng_mnfst_set:
1734 1740 # If a 'missing' manifest thinks it belongs to a changenode
1735 1741 # the recipient is assumed to have, obviously the recipient
1736 1742 # must have that manifest.
1737 1743 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1738 1744 if linknode in has_cl_set:
1739 1745 has_mnfst_set.add(n)
1740 1746 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1741 1747
1742 1748 # Use the information collected in collect_manifests_and_files to say
1743 1749 # which changenode any manifestnode belongs to.
1744 1750 def lookup_manifest_link(mnfstnode):
1745 1751 return msng_mnfst_set[mnfstnode]
1746 1752
1747 1753 # A function generating function that sets up the initial environment
1748 1754 # the inner function.
1749 1755 def filenode_collector(changedfiles):
1750 1756 next_rev = [0]
1751 1757 # This gathers information from each manifestnode included in the
1752 1758 # changegroup about which filenodes the manifest node references
1753 1759 # so we can include those in the changegroup too.
1754 1760 #
1755 1761 # It also remembers which changenode each filenode belongs to. It
1756 1762 # does this by assuming the a filenode belongs to the changenode
1757 1763 # the first manifest that references it belongs to.
1758 1764 def collect_msng_filenodes(mnfstnode):
1759 1765 r = mnfst.rev(mnfstnode)
1760 1766 if r == next_rev[0]:
1761 1767 # If the last rev we looked at was the one just previous,
1762 1768 # we only need to see a diff.
1763 1769 deltamf = mnfst.readdelta(mnfstnode)
1764 1770 # For each line in the delta
1765 1771 for f, fnode in deltamf.iteritems():
1766 1772 f = changedfiles.get(f, None)
1767 1773 # And if the file is in the list of files we care
1768 1774 # about.
1769 1775 if f is not None:
1770 1776 # Get the changenode this manifest belongs to
1771 1777 clnode = msng_mnfst_set[mnfstnode]
1772 1778 # Create the set of filenodes for the file if
1773 1779 # there isn't one already.
1774 1780 ndset = msng_filenode_set.setdefault(f, {})
1775 1781 # And set the filenode's changelog node to the
1776 1782 # manifest's if it hasn't been set already.
1777 1783 ndset.setdefault(fnode, clnode)
1778 1784 else:
1779 1785 # Otherwise we need a full manifest.
1780 1786 m = mnfst.read(mnfstnode)
1781 1787 # For every file in we care about.
1782 1788 for f in changedfiles:
1783 1789 fnode = m.get(f, None)
1784 1790 # If it's in the manifest
1785 1791 if fnode is not None:
1786 1792 # See comments above.
1787 1793 clnode = msng_mnfst_set[mnfstnode]
1788 1794 ndset = msng_filenode_set.setdefault(f, {})
1789 1795 ndset.setdefault(fnode, clnode)
1790 1796 # Remember the revision we hope to see next.
1791 1797 next_rev[0] = r + 1
1792 1798 return collect_msng_filenodes
1793 1799
1794 1800 # We have a list of filenodes we think we need for a file, lets remove
1795 1801 # all those we know the recipient must have.
1796 1802 def prune_filenodes(f, filerevlog):
1797 1803 msngset = msng_filenode_set[f]
1798 1804 hasset = set()
1799 1805 # If a 'missing' filenode thinks it belongs to a changenode we
1800 1806 # assume the recipient must have, then the recipient must have
1801 1807 # that filenode.
1802 1808 for n in msngset:
1803 1809 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1804 1810 if clnode in has_cl_set:
1805 1811 hasset.add(n)
1806 1812 prune_parents(filerevlog, hasset, msngset)
1807 1813
1808 1814 # A function generator function that sets up the a context for the
1809 1815 # inner function.
1810 1816 def lookup_filenode_link_func(fname):
1811 1817 msngset = msng_filenode_set[fname]
1812 1818 # Lookup the changenode the filenode belongs to.
1813 1819 def lookup_filenode_link(fnode):
1814 1820 return msngset[fnode]
1815 1821 return lookup_filenode_link
1816 1822
1817 1823 # Add the nodes that were explicitly requested.
1818 1824 def add_extra_nodes(name, nodes):
1819 1825 if not extranodes or name not in extranodes:
1820 1826 return
1821 1827
1822 1828 for node, linknode in extranodes[name]:
1823 1829 if node not in nodes:
1824 1830 nodes[node] = linknode
1825 1831
1826 1832 # Now that we have all theses utility functions to help out and
1827 1833 # logically divide up the task, generate the group.
1828 1834 def gengroup():
1829 1835 # The set of changed files starts empty.
1830 1836 changedfiles = {}
1831 1837 # Create a changenode group generator that will call our functions
1832 1838 # back to lookup the owning changenode and collect information.
1833 1839 group = cl.group(msng_cl_lst, identity,
1834 1840 manifest_and_file_collector(changedfiles))
1835 1841 for chnk in group:
1836 1842 yield chnk
1837 1843
1838 1844 # The list of manifests has been collected by the generator
1839 1845 # calling our functions back.
1840 1846 prune_manifests()
1841 1847 add_extra_nodes(1, msng_mnfst_set)
1842 1848 msng_mnfst_lst = msng_mnfst_set.keys()
1843 1849 # Sort the manifestnodes by revision number.
1844 1850 msng_mnfst_lst.sort(key=mnfst.rev)
1845 1851 # Create a generator for the manifestnodes that calls our lookup
1846 1852 # and data collection functions back.
1847 1853 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1848 1854 filenode_collector(changedfiles))
1849 1855 for chnk in group:
1850 1856 yield chnk
1851 1857
1852 1858 # These are no longer needed, dereference and toss the memory for
1853 1859 # them.
1854 1860 msng_mnfst_lst = None
1855 1861 msng_mnfst_set.clear()
1856 1862
1857 1863 if extranodes:
1858 1864 for fname in extranodes:
1859 1865 if isinstance(fname, int):
1860 1866 continue
1861 1867 msng_filenode_set.setdefault(fname, {})
1862 1868 changedfiles[fname] = 1
1863 1869 # Go through all our files in order sorted by name.
1864 1870 for fname in sorted(changedfiles):
1865 1871 filerevlog = self.file(fname)
1866 1872 if not len(filerevlog):
1867 1873 raise util.Abort(_("empty or missing revlog for %s") % fname)
1868 1874 # Toss out the filenodes that the recipient isn't really
1869 1875 # missing.
1870 1876 if fname in msng_filenode_set:
1871 1877 prune_filenodes(fname, filerevlog)
1872 1878 add_extra_nodes(fname, msng_filenode_set[fname])
1873 1879 msng_filenode_lst = msng_filenode_set[fname].keys()
1874 1880 else:
1875 1881 msng_filenode_lst = []
1876 1882 # If any filenodes are left, generate the group for them,
1877 1883 # otherwise don't bother.
1878 1884 if len(msng_filenode_lst) > 0:
1879 1885 yield changegroup.chunkheader(len(fname))
1880 1886 yield fname
1881 1887 # Sort the filenodes by their revision #
1882 1888 msng_filenode_lst.sort(key=filerevlog.rev)
1883 1889 # Create a group generator and only pass in a changenode
1884 1890 # lookup function as we need to collect no information
1885 1891 # from filenodes.
1886 1892 group = filerevlog.group(msng_filenode_lst,
1887 1893 lookup_filenode_link_func(fname))
1888 1894 for chnk in group:
1889 1895 yield chnk
1890 1896 if fname in msng_filenode_set:
1891 1897 # Don't need this anymore, toss it to free memory.
1892 1898 del msng_filenode_set[fname]
1893 1899 # Signal that no more groups are left.
1894 1900 yield changegroup.closechunk()
1895 1901
1896 1902 if msng_cl_lst:
1897 1903 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1898 1904
1899 1905 return util.chunkbuffer(gengroup())
1900 1906
1901 1907 def changegroup(self, basenodes, source):
1902 1908 # to avoid a race we use changegroupsubset() (issue1320)
1903 1909 return self.changegroupsubset(basenodes, self.heads(), source)
1904 1910
1905 1911 def _changegroup(self, nodes, source):
1906 1912 """Compute the changegroup of all nodes that we have that a recipient
1907 1913 doesn't. Return a chunkbuffer object whose read() method will return
1908 1914 successive changegroup chunks.
1909 1915
1910 1916 This is much easier than the previous function as we can assume that
1911 1917 the recipient has any changenode we aren't sending them.
1912 1918
1913 1919 nodes is the set of nodes to send"""
1914 1920
1915 1921 self.hook('preoutgoing', throw=True, source=source)
1916 1922
1917 1923 cl = self.changelog
1918 1924 revset = set([cl.rev(n) for n in nodes])
1919 1925 self.changegroupinfo(nodes, source)
1920 1926
1921 1927 def identity(x):
1922 1928 return x
1923 1929
1924 1930 def gennodelst(log):
1925 1931 for r in log:
1926 1932 if log.linkrev(r) in revset:
1927 1933 yield log.node(r)
1928 1934
1929 1935 def changed_file_collector(changedfileset):
1930 1936 def collect_changed_files(clnode):
1931 1937 c = cl.read(clnode)
1932 1938 changedfileset.update(c[3])
1933 1939 return collect_changed_files
1934 1940
1935 1941 def lookuprevlink_func(revlog):
1936 1942 def lookuprevlink(n):
1937 1943 return cl.node(revlog.linkrev(revlog.rev(n)))
1938 1944 return lookuprevlink
1939 1945
1940 1946 def gengroup():
1941 1947 '''yield a sequence of changegroup chunks (strings)'''
1942 1948 # construct a list of all changed files
1943 1949 changedfiles = set()
1944 1950
1945 1951 for chnk in cl.group(nodes, identity,
1946 1952 changed_file_collector(changedfiles)):
1947 1953 yield chnk
1948 1954
1949 1955 mnfst = self.manifest
1950 1956 nodeiter = gennodelst(mnfst)
1951 1957 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1952 1958 yield chnk
1953 1959
1954 1960 for fname in sorted(changedfiles):
1955 1961 filerevlog = self.file(fname)
1956 1962 if not len(filerevlog):
1957 1963 raise util.Abort(_("empty or missing revlog for %s") % fname)
1958 1964 nodeiter = gennodelst(filerevlog)
1959 1965 nodeiter = list(nodeiter)
1960 1966 if nodeiter:
1961 1967 yield changegroup.chunkheader(len(fname))
1962 1968 yield fname
1963 1969 lookup = lookuprevlink_func(filerevlog)
1964 1970 for chnk in filerevlog.group(nodeiter, lookup):
1965 1971 yield chnk
1966 1972
1967 1973 yield changegroup.closechunk()
1968 1974
1969 1975 if nodes:
1970 1976 self.hook('outgoing', node=hex(nodes[0]), source=source)
1971 1977
1972 1978 return util.chunkbuffer(gengroup())
1973 1979
1974 1980 def addchangegroup(self, source, srctype, url, emptyok=False):
1975 1981 """add changegroup to repo.
1976 1982
1977 1983 return values:
1978 1984 - nothing changed or no source: 0
1979 1985 - more heads than before: 1+added heads (2..n)
1980 1986 - less heads than before: -1-removed heads (-2..-n)
1981 1987 - number of heads stays the same: 1
1982 1988 """
1983 1989 def csmap(x):
1984 1990 self.ui.debug("add changeset %s\n" % short(x))
1985 1991 return len(cl)
1986 1992
1987 1993 def revmap(x):
1988 1994 return cl.rev(x)
1989 1995
1990 1996 if not source:
1991 1997 return 0
1992 1998
1993 1999 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1994 2000
1995 2001 changesets = files = revisions = 0
1996 2002
1997 2003 # write changelog data to temp files so concurrent readers will not see
1998 2004 # inconsistent view
1999 2005 cl = self.changelog
2000 2006 cl.delayupdate()
2001 2007 oldheads = len(cl.heads())
2002 2008
2003 2009 tr = self.transaction()
2004 2010 try:
2005 2011 trp = weakref.proxy(tr)
2006 2012 # pull off the changeset group
2007 2013 self.ui.status(_("adding changesets\n"))
2008 2014 clstart = len(cl)
2009 2015 chunkiter = changegroup.chunkiter(source)
2010 2016 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2011 2017 raise util.Abort(_("received changelog group is empty"))
2012 2018 clend = len(cl)
2013 2019 changesets = clend - clstart
2014 2020
2015 2021 # pull off the manifest group
2016 2022 self.ui.status(_("adding manifests\n"))
2017 2023 chunkiter = changegroup.chunkiter(source)
2018 2024 # no need to check for empty manifest group here:
2019 2025 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2020 2026 # no new manifest will be created and the manifest group will
2021 2027 # be empty during the pull
2022 2028 self.manifest.addgroup(chunkiter, revmap, trp)
2023 2029
2024 2030 # process the files
2025 2031 self.ui.status(_("adding file changes\n"))
2026 2032 while 1:
2027 2033 f = changegroup.getchunk(source)
2028 2034 if not f:
2029 2035 break
2030 2036 self.ui.debug("adding %s revisions\n" % f)
2031 2037 fl = self.file(f)
2032 2038 o = len(fl)
2033 2039 chunkiter = changegroup.chunkiter(source)
2034 2040 if fl.addgroup(chunkiter, revmap, trp) is None:
2035 2041 raise util.Abort(_("received file revlog group is empty"))
2036 2042 revisions += len(fl) - o
2037 2043 files += 1
2038 2044
2039 2045 newheads = len(cl.heads())
2040 2046 heads = ""
2041 2047 if oldheads and newheads != oldheads:
2042 2048 heads = _(" (%+d heads)") % (newheads - oldheads)
2043 2049
2044 2050 self.ui.status(_("added %d changesets"
2045 2051 " with %d changes to %d files%s\n")
2046 2052 % (changesets, revisions, files, heads))
2047 2053
2048 2054 if changesets > 0:
2049 2055 p = lambda: cl.writepending() and self.root or ""
2050 2056 self.hook('pretxnchangegroup', throw=True,
2051 2057 node=hex(cl.node(clstart)), source=srctype,
2052 2058 url=url, pending=p)
2053 2059
2054 2060 # make changelog see real files again
2055 2061 cl.finalize(trp)
2056 2062
2057 2063 tr.close()
2058 2064 finally:
2059 2065 del tr
2060 2066
2061 2067 if changesets > 0:
2062 2068 # forcefully update the on-disk branch cache
2063 2069 self.ui.debug("updating the branch cache\n")
2064 2070 self.branchtags()
2065 2071 self.hook("changegroup", node=hex(cl.node(clstart)),
2066 2072 source=srctype, url=url)
2067 2073
2068 2074 for i in xrange(clstart, clend):
2069 2075 self.hook("incoming", node=hex(cl.node(i)),
2070 2076 source=srctype, url=url)
2071 2077
2072 2078 # never return 0 here:
2073 2079 if newheads < oldheads:
2074 2080 return newheads - oldheads - 1
2075 2081 else:
2076 2082 return newheads - oldheads + 1
2077 2083
2078 2084
2079 2085 def stream_in(self, remote):
2080 2086 fp = remote.stream_out()
2081 2087 l = fp.readline()
2082 2088 try:
2083 2089 resp = int(l)
2084 2090 except ValueError:
2085 2091 raise error.ResponseError(
2086 2092 _('Unexpected response from remote server:'), l)
2087 2093 if resp == 1:
2088 2094 raise util.Abort(_('operation forbidden by server'))
2089 2095 elif resp == 2:
2090 2096 raise util.Abort(_('locking the remote repository failed'))
2091 2097 elif resp != 0:
2092 2098 raise util.Abort(_('the server sent an unknown error code'))
2093 2099 self.ui.status(_('streaming all changes\n'))
2094 2100 l = fp.readline()
2095 2101 try:
2096 2102 total_files, total_bytes = map(int, l.split(' ', 1))
2097 2103 except (ValueError, TypeError):
2098 2104 raise error.ResponseError(
2099 2105 _('Unexpected response from remote server:'), l)
2100 2106 self.ui.status(_('%d files to transfer, %s of data\n') %
2101 2107 (total_files, util.bytecount(total_bytes)))
2102 2108 start = time.time()
2103 2109 for i in xrange(total_files):
2104 2110 # XXX doesn't support '\n' or '\r' in filenames
2105 2111 l = fp.readline()
2106 2112 try:
2107 2113 name, size = l.split('\0', 1)
2108 2114 size = int(size)
2109 2115 except (ValueError, TypeError):
2110 2116 raise error.ResponseError(
2111 2117 _('Unexpected response from remote server:'), l)
2112 2118 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2113 2119 # for backwards compat, name was partially encoded
2114 2120 ofp = self.sopener(store.decodedir(name), 'w')
2115 2121 for chunk in util.filechunkiter(fp, limit=size):
2116 2122 ofp.write(chunk)
2117 2123 ofp.close()
2118 2124 elapsed = time.time() - start
2119 2125 if elapsed <= 0:
2120 2126 elapsed = 0.001
2121 2127 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2122 2128 (util.bytecount(total_bytes), elapsed,
2123 2129 util.bytecount(total_bytes / elapsed)))
2124 2130 self.invalidate()
2125 2131 return len(self.heads()) + 1
2126 2132
2127 2133 def clone(self, remote, heads=[], stream=False):
2128 2134 '''clone remote repository.
2129 2135
2130 2136 keyword arguments:
2131 2137 heads: list of revs to clone (forces use of pull)
2132 2138 stream: use streaming clone if possible'''
2133 2139
2134 2140 # now, all clients that can request uncompressed clones can
2135 2141 # read repo formats supported by all servers that can serve
2136 2142 # them.
2137 2143
2138 2144 # if revlog format changes, client will have to check version
2139 2145 # and format flags on "stream" capability, and use
2140 2146 # uncompressed only if compatible.
2141 2147
2142 2148 if stream and not heads and remote.capable('stream'):
2143 2149 return self.stream_in(remote)
2144 2150 return self.pull(remote, heads)
2145 2151
2146 2152 # used to avoid circular references so destructors work
2147 2153 def aftertrans(files):
2148 2154 renamefiles = [tuple(t) for t in files]
2149 2155 def a():
2150 2156 for src, dest in renamefiles:
2151 2157 util.rename(src, dest)
2152 2158 return a
2153 2159
2154 2160 def instance(ui, path, create):
2155 2161 return localrepository(ui, util.drop_scheme('file', path), create)
2156 2162
2157 2163 def islocal(path):
2158 2164 return True
General Comments 0
You need to be logged in to leave comments. Login now