##// END OF EJS Templates
merge with main
Benoit Boissinot -
r10321:6e721636 merge default
parent child Browse files
Show More
@@ -1,2169 +1,2169 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup, subrepo
11 11 import changelog, dirstate, filelog, manifest, context
12 12 import lock, transaction, store, encoding
13 13 import util, extensions, hook, error
14 14 import match as match_
15 15 import merge as merge_
16 16 import tags as tags_
17 17 from lock import release
18 18 import weakref, stat, errno, os, time, inspect
19 19 propertycache = util.propertycache
20 20
21 21 class localrepository(repo.repository):
22 22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
23 23 supported = set('revlogv1 store fncache shared'.split())
24 24
25 25 def __init__(self, baseui, path=None, create=0):
26 26 repo.repository.__init__(self)
27 27 self.root = os.path.realpath(path)
28 28 self.path = os.path.join(self.root, ".hg")
29 29 self.origroot = path
30 30 self.opener = util.opener(self.path)
31 31 self.wopener = util.opener(self.root)
32 32 self.baseui = baseui
33 33 self.ui = baseui.copy()
34 34
35 35 try:
36 36 self.ui.readconfig(self.join("hgrc"), self.root)
37 37 extensions.loadall(self.ui)
38 38 except IOError:
39 39 pass
40 40
41 41 if not os.path.isdir(self.path):
42 42 if create:
43 43 if not os.path.exists(path):
44 44 os.mkdir(path)
45 45 os.mkdir(self.path)
46 46 requirements = ["revlogv1"]
47 47 if self.ui.configbool('format', 'usestore', True):
48 48 os.mkdir(os.path.join(self.path, "store"))
49 49 requirements.append("store")
50 50 if self.ui.configbool('format', 'usefncache', True):
51 51 requirements.append("fncache")
52 52 # create an invalid changelog
53 53 self.opener("00changelog.i", "a").write(
54 54 '\0\0\0\2' # represents revlogv2
55 55 ' dummy changelog to prevent using the old repo layout'
56 56 )
57 57 reqfile = self.opener("requires", "w")
58 58 for r in requirements:
59 59 reqfile.write("%s\n" % r)
60 60 reqfile.close()
61 61 else:
62 62 raise error.RepoError(_("repository %s not found") % path)
63 63 elif create:
64 64 raise error.RepoError(_("repository %s already exists") % path)
65 65 else:
66 66 # find requirements
67 67 requirements = set()
68 68 try:
69 69 requirements = set(self.opener("requires").read().splitlines())
70 70 except IOError, inst:
71 71 if inst.errno != errno.ENOENT:
72 72 raise
73 73 for r in requirements - self.supported:
74 74 raise error.RepoError(_("requirement '%s' not supported") % r)
75 75
76 76 self.sharedpath = self.path
77 77 try:
78 78 s = os.path.realpath(self.opener("sharedpath").read())
79 79 if not os.path.exists(s):
80 80 raise error.RepoError(
81 81 _('.hg/sharedpath points to nonexistent directory %s') % s)
82 82 self.sharedpath = s
83 83 except IOError, inst:
84 84 if inst.errno != errno.ENOENT:
85 85 raise
86 86
87 87 self.store = store.store(requirements, self.sharedpath, util.opener)
88 88 self.spath = self.store.path
89 89 self.sopener = self.store.opener
90 90 self.sjoin = self.store.join
91 91 self.opener.createmode = self.store.createmode
92 92
93 93 # These two define the set of tags for this repository. _tags
94 94 # maps tag name to node; _tagtypes maps tag name to 'global' or
95 95 # 'local'. (Global tags are defined by .hgtags across all
96 96 # heads, and local tags are defined in .hg/localtags.) They
97 97 # constitute the in-memory cache of tags.
98 98 self._tags = None
99 99 self._tagtypes = None
100 100
101 101 self._branchcache = None # in UTF-8
102 102 self._branchcachetip = None
103 103 self.nodetagscache = None
104 104 self.filterpats = {}
105 105 self._datafilters = {}
106 106 self._transref = self._lockref = self._wlockref = None
107 107
108 108 @propertycache
109 109 def changelog(self):
110 110 c = changelog.changelog(self.sopener)
111 111 if 'HG_PENDING' in os.environ:
112 112 p = os.environ['HG_PENDING']
113 113 if p.startswith(self.root):
114 114 c.readpending('00changelog.i.a')
115 115 self.sopener.defversion = c.version
116 116 return c
117 117
118 118 @propertycache
119 119 def manifest(self):
120 120 return manifest.manifest(self.sopener)
121 121
122 122 @propertycache
123 123 def dirstate(self):
124 124 return dirstate.dirstate(self.opener, self.ui, self.root)
125 125
126 126 def __getitem__(self, changeid):
127 127 if changeid is None:
128 128 return context.workingctx(self)
129 129 return context.changectx(self, changeid)
130 130
131 131 def __contains__(self, changeid):
132 132 try:
133 133 return bool(self.lookup(changeid))
134 134 except error.RepoLookupError:
135 135 return False
136 136
137 137 def __nonzero__(self):
138 138 return True
139 139
140 140 def __len__(self):
141 141 return len(self.changelog)
142 142
143 143 def __iter__(self):
144 144 for i in xrange(len(self)):
145 145 yield i
146 146
147 147 def url(self):
148 148 return 'file:' + self.root
149 149
150 150 def hook(self, name, throw=False, **args):
151 151 return hook.hook(self.ui, self, name, throw, **args)
152 152
153 153 tag_disallowed = ':\r\n'
154 154
155 155 def _tag(self, names, node, message, local, user, date, extra={}):
156 156 if isinstance(names, str):
157 157 allchars = names
158 158 names = (names,)
159 159 else:
160 160 allchars = ''.join(names)
161 161 for c in self.tag_disallowed:
162 162 if c in allchars:
163 163 raise util.Abort(_('%r cannot be used in a tag name') % c)
164 164
165 165 for name in names:
166 166 self.hook('pretag', throw=True, node=hex(node), tag=name,
167 167 local=local)
168 168
169 169 def writetags(fp, names, munge, prevtags):
170 170 fp.seek(0, 2)
171 171 if prevtags and prevtags[-1] != '\n':
172 172 fp.write('\n')
173 173 for name in names:
174 174 m = munge and munge(name) or name
175 175 if self._tagtypes and name in self._tagtypes:
176 176 old = self._tags.get(name, nullid)
177 177 fp.write('%s %s\n' % (hex(old), m))
178 178 fp.write('%s %s\n' % (hex(node), m))
179 179 fp.close()
180 180
181 181 prevtags = ''
182 182 if local:
183 183 try:
184 184 fp = self.opener('localtags', 'r+')
185 185 except IOError:
186 186 fp = self.opener('localtags', 'a')
187 187 else:
188 188 prevtags = fp.read()
189 189
190 190 # local tags are stored in the current charset
191 191 writetags(fp, names, None, prevtags)
192 192 for name in names:
193 193 self.hook('tag', node=hex(node), tag=name, local=local)
194 194 return
195 195
196 196 try:
197 197 fp = self.wfile('.hgtags', 'rb+')
198 198 except IOError:
199 199 fp = self.wfile('.hgtags', 'ab')
200 200 else:
201 201 prevtags = fp.read()
202 202
203 203 # committed tags are stored in UTF-8
204 204 writetags(fp, names, encoding.fromlocal, prevtags)
205 205
206 206 if '.hgtags' not in self.dirstate:
207 207 self.add(['.hgtags'])
208 208
209 209 m = match_.exact(self.root, '', ['.hgtags'])
210 210 tagnode = self.commit(message, user, date, extra=extra, match=m)
211 211
212 212 for name in names:
213 213 self.hook('tag', node=hex(node), tag=name, local=local)
214 214
215 215 return tagnode
216 216
217 217 def tag(self, names, node, message, local, user, date):
218 218 '''tag a revision with one or more symbolic names.
219 219
220 220 names is a list of strings or, when adding a single tag, names may be a
221 221 string.
222 222
223 223 if local is True, the tags are stored in a per-repository file.
224 224 otherwise, they are stored in the .hgtags file, and a new
225 225 changeset is committed with the change.
226 226
227 227 keyword arguments:
228 228
229 229 local: whether to store tags in non-version-controlled file
230 230 (default False)
231 231
232 232 message: commit message to use if committing
233 233
234 234 user: name of user to use if committing
235 235
236 236 date: date tuple to use if committing'''
237 237
238 238 for x in self.status()[:5]:
239 239 if '.hgtags' in x:
240 240 raise util.Abort(_('working copy of .hgtags is changed '
241 241 '(please commit .hgtags manually)'))
242 242
243 243 self.tags() # instantiate the cache
244 244 self._tag(names, node, message, local, user, date)
245 245
246 246 def tags(self):
247 247 '''return a mapping of tag to node'''
248 248 if self._tags is None:
249 249 (self._tags, self._tagtypes) = self._findtags()
250 250
251 251 return self._tags
252 252
253 253 def _findtags(self):
254 254 '''Do the hard work of finding tags. Return a pair of dicts
255 255 (tags, tagtypes) where tags maps tag name to node, and tagtypes
256 256 maps tag name to a string like \'global\' or \'local\'.
257 257 Subclasses or extensions are free to add their own tags, but
258 258 should be aware that the returned dicts will be retained for the
259 259 duration of the localrepo object.'''
260 260
261 261 # XXX what tagtype should subclasses/extensions use? Currently
262 262 # mq and bookmarks add tags, but do not set the tagtype at all.
263 263 # Should each extension invent its own tag type? Should there
264 264 # be one tagtype for all such "virtual" tags? Or is the status
265 265 # quo fine?
266 266
267 267 alltags = {} # map tag name to (node, hist)
268 268 tagtypes = {}
269 269
270 270 tags_.findglobaltags(self.ui, self, alltags, tagtypes)
271 271 tags_.readlocaltags(self.ui, self, alltags, tagtypes)
272 272
273 273 # Build the return dicts. Have to re-encode tag names because
274 274 # the tags module always uses UTF-8 (in order not to lose info
275 275 # writing to the cache), but the rest of Mercurial wants them in
276 276 # local encoding.
277 277 tags = {}
278 278 for (name, (node, hist)) in alltags.iteritems():
279 279 if node != nullid:
280 280 tags[encoding.tolocal(name)] = node
281 281 tags['tip'] = self.changelog.tip()
282 282 tagtypes = dict([(encoding.tolocal(name), value)
283 283 for (name, value) in tagtypes.iteritems()])
284 284 return (tags, tagtypes)
285 285
286 286 def tagtype(self, tagname):
287 287 '''
288 288 return the type of the given tag. result can be:
289 289
290 290 'local' : a local tag
291 291 'global' : a global tag
292 292 None : tag does not exist
293 293 '''
294 294
295 295 self.tags()
296 296
297 297 return self._tagtypes.get(tagname)
298 298
299 299 def tagslist(self):
300 300 '''return a list of tags ordered by revision'''
301 301 l = []
302 302 for t, n in self.tags().iteritems():
303 303 try:
304 304 r = self.changelog.rev(n)
305 305 except:
306 306 r = -2 # sort to the beginning of the list if unknown
307 307 l.append((r, t, n))
308 308 return [(t, n) for r, t, n in sorted(l)]
309 309
310 310 def nodetags(self, node):
311 311 '''return the tags associated with a node'''
312 312 if not self.nodetagscache:
313 313 self.nodetagscache = {}
314 314 for t, n in self.tags().iteritems():
315 315 self.nodetagscache.setdefault(n, []).append(t)
316 316 return self.nodetagscache.get(node, [])
317 317
318 318 def _branchtags(self, partial, lrev):
319 319 # TODO: rename this function?
320 320 tiprev = len(self) - 1
321 321 if lrev != tiprev:
322 322 self._updatebranchcache(partial, lrev + 1, tiprev + 1)
323 323 self._writebranchcache(partial, self.changelog.tip(), tiprev)
324 324
325 325 return partial
326 326
327 327 def branchmap(self):
328 328 tip = self.changelog.tip()
329 329 if self._branchcache is not None and self._branchcachetip == tip:
330 330 return self._branchcache
331 331
332 332 oldtip = self._branchcachetip
333 333 self._branchcachetip = tip
334 334 if oldtip is None or oldtip not in self.changelog.nodemap:
335 335 partial, last, lrev = self._readbranchcache()
336 336 else:
337 337 lrev = self.changelog.rev(oldtip)
338 338 partial = self._branchcache
339 339
340 340 self._branchtags(partial, lrev)
341 341 # this private cache holds all heads (not just tips)
342 342 self._branchcache = partial
343 343
344 344 return self._branchcache
345 345
346 346 def branchtags(self):
347 347 '''return a dict where branch names map to the tipmost head of
348 348 the branch, open heads come before closed'''
349 349 bt = {}
350 350 for bn, heads in self.branchmap().iteritems():
351 351 head = None
352 352 for i in range(len(heads)-1, -1, -1):
353 353 h = heads[i]
354 354 if 'close' not in self.changelog.read(h)[5]:
355 355 head = h
356 356 break
357 357 # no open heads were found
358 358 if head is None:
359 359 head = heads[-1]
360 360 bt[bn] = head
361 361 return bt
362 362
363 363
364 364 def _readbranchcache(self):
365 365 partial = {}
366 366 try:
367 367 f = self.opener("branchheads.cache")
368 368 lines = f.read().split('\n')
369 369 f.close()
370 370 except (IOError, OSError):
371 371 return {}, nullid, nullrev
372 372
373 373 try:
374 374 last, lrev = lines.pop(0).split(" ", 1)
375 375 last, lrev = bin(last), int(lrev)
376 376 if lrev >= len(self) or self[lrev].node() != last:
377 377 # invalidate the cache
378 378 raise ValueError('invalidating branch cache (tip differs)')
379 379 for l in lines:
380 380 if not l:
381 381 continue
382 382 node, label = l.split(" ", 1)
383 383 partial.setdefault(label.strip(), []).append(bin(node))
384 384 except KeyboardInterrupt:
385 385 raise
386 386 except Exception, inst:
387 387 if self.ui.debugflag:
388 388 self.ui.warn(str(inst), '\n')
389 389 partial, last, lrev = {}, nullid, nullrev
390 390 return partial, last, lrev
391 391
392 392 def _writebranchcache(self, branches, tip, tiprev):
393 393 try:
394 394 f = self.opener("branchheads.cache", "w", atomictemp=True)
395 395 f.write("%s %s\n" % (hex(tip), tiprev))
396 396 for label, nodes in branches.iteritems():
397 397 for node in nodes:
398 398 f.write("%s %s\n" % (hex(node), label))
399 399 f.rename()
400 400 except (IOError, OSError):
401 401 pass
402 402
403 403 def _updatebranchcache(self, partial, start, end):
404 404 # collect new branch entries
405 405 newbranches = {}
406 406 for r in xrange(start, end):
407 407 c = self[r]
408 408 newbranches.setdefault(c.branch(), []).append(c.node())
409 409 # if older branchheads are reachable from new ones, they aren't
410 410 # really branchheads. Note checking parents is insufficient:
411 411 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
412 412 for branch, newnodes in newbranches.iteritems():
413 413 bheads = partial.setdefault(branch, [])
414 414 bheads.extend(newnodes)
415 415 if len(bheads) < 2:
416 416 continue
417 417 newbheads = []
418 418 # starting from tip means fewer passes over reachable
419 419 while newnodes:
420 420 latest = newnodes.pop()
421 421 if latest not in bheads:
422 422 continue
423 423 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
424 424 reachable = self.changelog.reachable(latest, minbhrev)
425 425 bheads = [b for b in bheads if b not in reachable]
426 426 newbheads.insert(0, latest)
427 427 bheads.extend(newbheads)
428 428 partial[branch] = bheads
429 429
430 430 def lookup(self, key):
431 431 if isinstance(key, int):
432 432 return self.changelog.node(key)
433 433 elif key == '.':
434 434 return self.dirstate.parents()[0]
435 435 elif key == 'null':
436 436 return nullid
437 437 elif key == 'tip':
438 438 return self.changelog.tip()
439 439 n = self.changelog._match(key)
440 440 if n:
441 441 return n
442 442 if key in self.tags():
443 443 return self.tags()[key]
444 444 if key in self.branchtags():
445 445 return self.branchtags()[key]
446 446 n = self.changelog._partialmatch(key)
447 447 if n:
448 448 return n
449 449
450 450 # can't find key, check if it might have come from damaged dirstate
451 451 if key in self.dirstate.parents():
452 452 raise error.Abort(_("working directory has unknown parent '%s'!")
453 453 % short(key))
454 454 try:
455 455 if len(key) == 20:
456 456 key = hex(key)
457 457 except:
458 458 pass
459 459 raise error.RepoLookupError(_("unknown revision '%s'") % key)
460 460
461 461 def local(self):
462 462 return True
463 463
464 464 def join(self, f):
465 465 return os.path.join(self.path, f)
466 466
467 467 def wjoin(self, f):
468 468 return os.path.join(self.root, f)
469 469
470 470 def rjoin(self, f):
471 471 return os.path.join(self.root, util.pconvert(f))
472 472
473 473 def file(self, f):
474 474 if f[0] == '/':
475 475 f = f[1:]
476 476 return filelog.filelog(self.sopener, f)
477 477
478 478 def changectx(self, changeid):
479 479 return self[changeid]
480 480
481 481 def parents(self, changeid=None):
482 482 '''get list of changectxs for parents of changeid'''
483 483 return self[changeid].parents()
484 484
485 485 def filectx(self, path, changeid=None, fileid=None):
486 486 """changeid can be a changeset revision, node, or tag.
487 487 fileid can be a file revision or node."""
488 488 return context.filectx(self, path, changeid, fileid)
489 489
490 490 def getcwd(self):
491 491 return self.dirstate.getcwd()
492 492
493 493 def pathto(self, f, cwd=None):
494 494 return self.dirstate.pathto(f, cwd)
495 495
496 496 def wfile(self, f, mode='r'):
497 497 return self.wopener(f, mode)
498 498
499 499 def _link(self, f):
500 500 return os.path.islink(self.wjoin(f))
501 501
502 502 def _filter(self, filter, filename, data):
503 503 if filter not in self.filterpats:
504 504 l = []
505 505 for pat, cmd in self.ui.configitems(filter):
506 506 if cmd == '!':
507 507 continue
508 508 mf = match_.match(self.root, '', [pat])
509 509 fn = None
510 510 params = cmd
511 511 for name, filterfn in self._datafilters.iteritems():
512 512 if cmd.startswith(name):
513 513 fn = filterfn
514 514 params = cmd[len(name):].lstrip()
515 515 break
516 516 if not fn:
517 517 fn = lambda s, c, **kwargs: util.filter(s, c)
518 518 # Wrap old filters not supporting keyword arguments
519 519 if not inspect.getargspec(fn)[2]:
520 520 oldfn = fn
521 521 fn = lambda s, c, **kwargs: oldfn(s, c)
522 522 l.append((mf, fn, params))
523 523 self.filterpats[filter] = l
524 524
525 525 for mf, fn, cmd in self.filterpats[filter]:
526 526 if mf(filename):
527 527 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
528 528 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
529 529 break
530 530
531 531 return data
532 532
533 533 def adddatafilter(self, name, filter):
534 534 self._datafilters[name] = filter
535 535
536 536 def wread(self, filename):
537 537 if self._link(filename):
538 538 data = os.readlink(self.wjoin(filename))
539 539 else:
540 540 data = self.wopener(filename, 'r').read()
541 541 return self._filter("encode", filename, data)
542 542
543 543 def wwrite(self, filename, data, flags):
544 544 data = self._filter("decode", filename, data)
545 545 try:
546 546 os.unlink(self.wjoin(filename))
547 547 except OSError:
548 548 pass
549 549 if 'l' in flags:
550 550 self.wopener.symlink(data, filename)
551 551 else:
552 552 self.wopener(filename, 'w').write(data)
553 553 if 'x' in flags:
554 554 util.set_flags(self.wjoin(filename), False, True)
555 555
556 556 def wwritedata(self, filename, data):
557 557 return self._filter("decode", filename, data)
558 558
559 559 def transaction(self):
560 560 tr = self._transref and self._transref() or None
561 561 if tr and tr.running():
562 562 return tr.nest()
563 563
564 564 # abort here if the journal already exists
565 565 if os.path.exists(self.sjoin("journal")):
566 566 raise error.RepoError(
567 567 _("abandoned transaction found - run hg recover"))
568 568
569 569 # save dirstate for rollback
570 570 try:
571 571 ds = self.opener("dirstate").read()
572 572 except IOError:
573 573 ds = ""
574 574 self.opener("journal.dirstate", "w").write(ds)
575 575 self.opener("journal.branch", "w").write(self.dirstate.branch())
576 576
577 577 renames = [(self.sjoin("journal"), self.sjoin("undo")),
578 578 (self.join("journal.dirstate"), self.join("undo.dirstate")),
579 579 (self.join("journal.branch"), self.join("undo.branch"))]
580 580 tr = transaction.transaction(self.ui.warn, self.sopener,
581 581 self.sjoin("journal"),
582 582 aftertrans(renames),
583 583 self.store.createmode)
584 584 self._transref = weakref.ref(tr)
585 585 return tr
586 586
587 587 def recover(self):
588 588 lock = self.lock()
589 589 try:
590 590 if os.path.exists(self.sjoin("journal")):
591 591 self.ui.status(_("rolling back interrupted transaction\n"))
592 592 transaction.rollback(self.sopener, self.sjoin("journal"),
593 593 self.ui.warn)
594 594 self.invalidate()
595 595 return True
596 596 else:
597 597 self.ui.warn(_("no interrupted transaction available\n"))
598 598 return False
599 599 finally:
600 600 lock.release()
601 601
602 602 def rollback(self):
603 603 wlock = lock = None
604 604 try:
605 605 wlock = self.wlock()
606 606 lock = self.lock()
607 607 if os.path.exists(self.sjoin("undo")):
608 608 self.ui.status(_("rolling back last transaction\n"))
609 609 transaction.rollback(self.sopener, self.sjoin("undo"),
610 610 self.ui.warn)
611 611 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
612 612 try:
613 613 branch = self.opener("undo.branch").read()
614 614 self.dirstate.setbranch(branch)
615 615 except IOError:
616 616 self.ui.warn(_("Named branch could not be reset, "
617 617 "current branch still is: %s\n")
618 618 % encoding.tolocal(self.dirstate.branch()))
619 619 self.invalidate()
620 620 self.dirstate.invalidate()
621 621 self.destroyed()
622 622 else:
623 623 self.ui.warn(_("no rollback information available\n"))
624 624 finally:
625 625 release(lock, wlock)
626 626
627 627 def invalidate(self):
628 628 for a in "changelog manifest".split():
629 629 if a in self.__dict__:
630 630 delattr(self, a)
631 631 self._tags = None
632 632 self._tagtypes = None
633 633 self.nodetagscache = None
634 634 self._branchcache = None # in UTF-8
635 635 self._branchcachetip = None
636 636
637 637 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
638 638 try:
639 639 l = lock.lock(lockname, 0, releasefn, desc=desc)
640 640 except error.LockHeld, inst:
641 641 if not wait:
642 642 raise
643 643 self.ui.warn(_("waiting for lock on %s held by %r\n") %
644 644 (desc, inst.locker))
645 645 # default to 600 seconds timeout
646 646 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
647 647 releasefn, desc=desc)
648 648 if acquirefn:
649 649 acquirefn()
650 650 return l
651 651
652 652 def lock(self, wait=True):
653 653 '''Lock the repository store (.hg/store) and return a weak reference
654 654 to the lock. Use this before modifying the store (e.g. committing or
655 655 stripping). If you are opening a transaction, get a lock as well.)'''
656 656 l = self._lockref and self._lockref()
657 657 if l is not None and l.held:
658 658 l.lock()
659 659 return l
660 660
661 661 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
662 662 _('repository %s') % self.origroot)
663 663 self._lockref = weakref.ref(l)
664 664 return l
665 665
666 666 def wlock(self, wait=True):
667 667 '''Lock the non-store parts of the repository (everything under
668 668 .hg except .hg/store) and return a weak reference to the lock.
669 669 Use this before modifying files in .hg.'''
670 670 l = self._wlockref and self._wlockref()
671 671 if l is not None and l.held:
672 672 l.lock()
673 673 return l
674 674
675 675 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
676 676 self.dirstate.invalidate, _('working directory of %s') %
677 677 self.origroot)
678 678 self._wlockref = weakref.ref(l)
679 679 return l
680 680
681 681 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
682 682 """
683 683 commit an individual file as part of a larger transaction
684 684 """
685 685
686 686 fname = fctx.path()
687 687 text = fctx.data()
688 688 flog = self.file(fname)
689 689 fparent1 = manifest1.get(fname, nullid)
690 690 fparent2 = fparent2o = manifest2.get(fname, nullid)
691 691
692 692 meta = {}
693 693 copy = fctx.renamed()
694 694 if copy and copy[0] != fname:
695 695 # Mark the new revision of this file as a copy of another
696 696 # file. This copy data will effectively act as a parent
697 697 # of this new revision. If this is a merge, the first
698 698 # parent will be the nullid (meaning "look up the copy data")
699 699 # and the second one will be the other parent. For example:
700 700 #
701 701 # 0 --- 1 --- 3 rev1 changes file foo
702 702 # \ / rev2 renames foo to bar and changes it
703 703 # \- 2 -/ rev3 should have bar with all changes and
704 704 # should record that bar descends from
705 705 # bar in rev2 and foo in rev1
706 706 #
707 707 # this allows this merge to succeed:
708 708 #
709 709 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
710 710 # \ / merging rev3 and rev4 should use bar@rev2
711 711 # \- 2 --- 4 as the merge base
712 712 #
713 713
714 714 cfname = copy[0]
715 715 crev = manifest1.get(cfname)
716 716 newfparent = fparent2
717 717
718 718 if manifest2: # branch merge
719 719 if fparent2 == nullid or crev is None: # copied on remote side
720 720 if cfname in manifest2:
721 721 crev = manifest2[cfname]
722 722 newfparent = fparent1
723 723
724 724 # find source in nearest ancestor if we've lost track
725 725 if not crev:
726 726 self.ui.debug(" %s: searching for copy revision for %s\n" %
727 727 (fname, cfname))
728 728 for ancestor in self['.'].ancestors():
729 729 if cfname in ancestor:
730 730 crev = ancestor[cfname].filenode()
731 731 break
732 732
733 733 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
734 734 meta["copy"] = cfname
735 735 meta["copyrev"] = hex(crev)
736 736 fparent1, fparent2 = nullid, newfparent
737 737 elif fparent2 != nullid:
738 738 # is one parent an ancestor of the other?
739 739 fparentancestor = flog.ancestor(fparent1, fparent2)
740 740 if fparentancestor == fparent1:
741 741 fparent1, fparent2 = fparent2, nullid
742 742 elif fparentancestor == fparent2:
743 743 fparent2 = nullid
744 744
745 745 # is the file changed?
746 746 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
747 747 changelist.append(fname)
748 748 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
749 749
750 750 # are just the flags changed during merge?
751 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
751 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
752 752 changelist.append(fname)
753 753
754 754 return fparent1
755 755
756 756 def commit(self, text="", user=None, date=None, match=None, force=False,
757 757 editor=False, extra={}):
758 758 """Add a new revision to current repository.
759 759
760 760 Revision information is gathered from the working directory,
761 761 match can be used to filter the committed files. If editor is
762 762 supplied, it is called to get a commit message.
763 763 """
764 764
765 765 def fail(f, msg):
766 766 raise util.Abort('%s: %s' % (f, msg))
767 767
768 768 if not match:
769 769 match = match_.always(self.root, '')
770 770
771 771 if not force:
772 772 vdirs = []
773 773 match.dir = vdirs.append
774 774 match.bad = fail
775 775
776 776 wlock = self.wlock()
777 777 try:
778 778 p1, p2 = self.dirstate.parents()
779 779 wctx = self[None]
780 780
781 781 if (not force and p2 != nullid and match and
782 782 (match.files() or match.anypats())):
783 783 raise util.Abort(_('cannot partially commit a merge '
784 784 '(do not specify files or patterns)'))
785 785
786 786 changes = self.status(match=match, clean=force)
787 787 if force:
788 788 changes[0].extend(changes[6]) # mq may commit unchanged files
789 789
790 790 # check subrepos
791 791 subs = []
792 792 for s in wctx.substate:
793 793 if match(s) and wctx.sub(s).dirty():
794 794 subs.append(s)
795 795 if subs and '.hgsubstate' not in changes[0]:
796 796 changes[0].insert(0, '.hgsubstate')
797 797
798 798 # make sure all explicit patterns are matched
799 799 if not force and match.files():
800 800 matched = set(changes[0] + changes[1] + changes[2])
801 801
802 802 for f in match.files():
803 803 if f == '.' or f in matched or f in wctx.substate:
804 804 continue
805 805 if f in changes[3]: # missing
806 806 fail(f, _('file not found!'))
807 807 if f in vdirs: # visited directory
808 808 d = f + '/'
809 809 for mf in matched:
810 810 if mf.startswith(d):
811 811 break
812 812 else:
813 813 fail(f, _("no match under directory!"))
814 814 elif f not in self.dirstate:
815 815 fail(f, _("file not tracked!"))
816 816
817 817 if (not force and not extra.get("close") and p2 == nullid
818 818 and not (changes[0] or changes[1] or changes[2])
819 819 and self[None].branch() == self['.'].branch()):
820 820 return None
821 821
822 822 ms = merge_.mergestate(self)
823 823 for f in changes[0]:
824 824 if f in ms and ms[f] == 'u':
825 825 raise util.Abort(_("unresolved merge conflicts "
826 826 "(see hg resolve)"))
827 827
828 828 cctx = context.workingctx(self, (p1, p2), text, user, date,
829 829 extra, changes)
830 830 if editor:
831 831 cctx._text = editor(self, cctx, subs)
832 832 edited = (text != cctx._text)
833 833
834 834 # commit subs
835 835 if subs:
836 836 state = wctx.substate.copy()
837 837 for s in subs:
838 838 self.ui.status(_('committing subrepository %s\n') % s)
839 839 sr = wctx.sub(s).commit(cctx._text, user, date)
840 840 state[s] = (state[s][0], sr)
841 841 subrepo.writestate(self, state)
842 842
843 843 # Save commit message in case this transaction gets rolled back
844 844 # (e.g. by a pretxncommit hook). Leave the content alone on
845 845 # the assumption that the user will use the same editor again.
846 846 msgfile = self.opener('last-message.txt', 'wb')
847 847 msgfile.write(cctx._text)
848 848 msgfile.close()
849 849
850 850 try:
851 851 ret = self.commitctx(cctx, True)
852 852 except:
853 853 if edited:
854 854 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
855 855 self.ui.write(
856 856 _('note: commit message saved in %s\n') % msgfn)
857 857 raise
858 858
859 859 # update dirstate and mergestate
860 860 for f in changes[0] + changes[1]:
861 861 self.dirstate.normal(f)
862 862 for f in changes[2]:
863 863 self.dirstate.forget(f)
864 864 self.dirstate.setparents(ret)
865 865 ms.reset()
866 866
867 867 return ret
868 868
869 869 finally:
870 870 wlock.release()
871 871
872 872 def commitctx(self, ctx, error=False):
873 873 """Add a new revision to current repository.
874 874
875 875 Revision information is passed via the context argument.
876 876 """
877 877
878 878 tr = lock = None
879 879 removed = ctx.removed()
880 880 p1, p2 = ctx.p1(), ctx.p2()
881 881 m1 = p1.manifest().copy()
882 882 m2 = p2.manifest()
883 883 user = ctx.user()
884 884
885 885 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
886 886 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
887 887
888 888 lock = self.lock()
889 889 try:
890 890 tr = self.transaction()
891 891 trp = weakref.proxy(tr)
892 892
893 893 # check in files
894 894 new = {}
895 895 changed = []
896 896 linkrev = len(self)
897 897 for f in sorted(ctx.modified() + ctx.added()):
898 898 self.ui.note(f + "\n")
899 899 try:
900 900 fctx = ctx[f]
901 901 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
902 902 changed)
903 903 m1.set(f, fctx.flags())
904 904 except (OSError, IOError):
905 905 if error:
906 906 self.ui.warn(_("trouble committing %s!\n") % f)
907 907 raise
908 908 else:
909 909 removed.append(f)
910 910
911 911 # update manifest
912 912 m1.update(new)
913 913 removed = [f for f in sorted(removed) if f in m1 or f in m2]
914 914 drop = [f for f in removed if f in m1]
915 915 for f in drop:
916 916 del m1[f]
917 917 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
918 918 p2.manifestnode(), (new, drop))
919 919
920 920 # update changelog
921 921 self.changelog.delayupdate()
922 922 n = self.changelog.add(mn, changed + removed, ctx.description(),
923 923 trp, p1.node(), p2.node(),
924 924 user, ctx.date(), ctx.extra().copy())
925 925 p = lambda: self.changelog.writepending() and self.root or ""
926 926 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
927 927 parent2=xp2, pending=p)
928 928 self.changelog.finalize(trp)
929 929 tr.close()
930 930
931 931 if self._branchcache:
932 932 self.branchtags()
933 933
934 934 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
935 935 return n
936 936 finally:
937 937 del tr
938 938 lock.release()
939 939
940 940 def destroyed(self):
941 941 '''Inform the repository that nodes have been destroyed.
942 942 Intended for use by strip and rollback, so there's a common
943 943 place for anything that has to be done after destroying history.'''
944 944 # XXX it might be nice if we could take the list of destroyed
945 945 # nodes, but I don't see an easy way for rollback() to do that
946 946
947 947 # Ensure the persistent tag cache is updated. Doing it now
948 948 # means that the tag cache only has to worry about destroyed
949 949 # heads immediately after a strip/rollback. That in turn
950 950 # guarantees that "cachetip == currenttip" (comparing both rev
951 951 # and node) always means no nodes have been added or destroyed.
952 952
953 953 # XXX this is suboptimal when qrefresh'ing: we strip the current
954 954 # head, refresh the tag cache, then immediately add a new head.
955 955 # But I think doing it this way is necessary for the "instant
956 956 # tag cache retrieval" case to work.
957 957 tags_.findglobaltags(self.ui, self, {}, {})
958 958
959 959 def walk(self, match, node=None):
960 960 '''
961 961 walk recursively through the directory tree or a given
962 962 changeset, finding all files matched by the match
963 963 function
964 964 '''
965 965 return self[node].walk(match)
966 966
967 967 def status(self, node1='.', node2=None, match=None,
968 968 ignored=False, clean=False, unknown=False):
969 969 """return status of files between two nodes or node and working directory
970 970
971 971 If node1 is None, use the first dirstate parent instead.
972 972 If node2 is None, compare node1 with working directory.
973 973 """
974 974
975 975 def mfmatches(ctx):
976 976 mf = ctx.manifest().copy()
977 977 for fn in mf.keys():
978 978 if not match(fn):
979 979 del mf[fn]
980 980 return mf
981 981
982 982 if isinstance(node1, context.changectx):
983 983 ctx1 = node1
984 984 else:
985 985 ctx1 = self[node1]
986 986 if isinstance(node2, context.changectx):
987 987 ctx2 = node2
988 988 else:
989 989 ctx2 = self[node2]
990 990
991 991 working = ctx2.rev() is None
992 992 parentworking = working and ctx1 == self['.']
993 993 match = match or match_.always(self.root, self.getcwd())
994 994 listignored, listclean, listunknown = ignored, clean, unknown
995 995
996 996 # load earliest manifest first for caching reasons
997 997 if not working and ctx2.rev() < ctx1.rev():
998 998 ctx2.manifest()
999 999
1000 1000 if not parentworking:
1001 1001 def bad(f, msg):
1002 1002 if f not in ctx1:
1003 1003 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1004 1004 match.bad = bad
1005 1005
1006 1006 if working: # we need to scan the working dir
1007 1007 subrepos = ctx1.substate.keys()
1008 1008 s = self.dirstate.status(match, subrepos, listignored,
1009 1009 listclean, listunknown)
1010 1010 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1011 1011
1012 1012 # check for any possibly clean files
1013 1013 if parentworking and cmp:
1014 1014 fixup = []
1015 1015 # do a full compare of any files that might have changed
1016 1016 for f in sorted(cmp):
1017 1017 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1018 1018 or ctx1[f].cmp(ctx2[f].data())):
1019 1019 modified.append(f)
1020 1020 else:
1021 1021 fixup.append(f)
1022 1022
1023 1023 if listclean:
1024 1024 clean += fixup
1025 1025
1026 1026 # update dirstate for files that are actually clean
1027 1027 if fixup:
1028 1028 try:
1029 1029 # updating the dirstate is optional
1030 1030 # so we don't wait on the lock
1031 1031 wlock = self.wlock(False)
1032 1032 try:
1033 1033 for f in fixup:
1034 1034 self.dirstate.normal(f)
1035 1035 finally:
1036 1036 wlock.release()
1037 1037 except error.LockError:
1038 1038 pass
1039 1039
1040 1040 if not parentworking:
1041 1041 mf1 = mfmatches(ctx1)
1042 1042 if working:
1043 1043 # we are comparing working dir against non-parent
1044 1044 # generate a pseudo-manifest for the working dir
1045 1045 mf2 = mfmatches(self['.'])
1046 1046 for f in cmp + modified + added:
1047 1047 mf2[f] = None
1048 1048 mf2.set(f, ctx2.flags(f))
1049 1049 for f in removed:
1050 1050 if f in mf2:
1051 1051 del mf2[f]
1052 1052 else:
1053 1053 # we are comparing two revisions
1054 1054 deleted, unknown, ignored = [], [], []
1055 1055 mf2 = mfmatches(ctx2)
1056 1056
1057 1057 modified, added, clean = [], [], []
1058 1058 for fn in mf2:
1059 1059 if fn in mf1:
1060 1060 if (mf1.flags(fn) != mf2.flags(fn) or
1061 1061 (mf1[fn] != mf2[fn] and
1062 1062 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1063 1063 modified.append(fn)
1064 1064 elif listclean:
1065 1065 clean.append(fn)
1066 1066 del mf1[fn]
1067 1067 else:
1068 1068 added.append(fn)
1069 1069 removed = mf1.keys()
1070 1070
1071 1071 r = modified, added, removed, deleted, unknown, ignored, clean
1072 1072 [l.sort() for l in r]
1073 1073 return r
1074 1074
1075 1075 def add(self, list):
1076 1076 wlock = self.wlock()
1077 1077 try:
1078 1078 rejected = []
1079 1079 for f in list:
1080 1080 p = self.wjoin(f)
1081 1081 try:
1082 1082 st = os.lstat(p)
1083 1083 except:
1084 1084 self.ui.warn(_("%s does not exist!\n") % f)
1085 1085 rejected.append(f)
1086 1086 continue
1087 1087 if st.st_size > 10000000:
1088 1088 self.ui.warn(_("%s: files over 10MB may cause memory and"
1089 1089 " performance problems\n"
1090 1090 "(use 'hg revert %s' to unadd the file)\n")
1091 1091 % (f, f))
1092 1092 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1093 1093 self.ui.warn(_("%s not added: only files and symlinks "
1094 1094 "supported currently\n") % f)
1095 1095 rejected.append(p)
1096 1096 elif self.dirstate[f] in 'amn':
1097 1097 self.ui.warn(_("%s already tracked!\n") % f)
1098 1098 elif self.dirstate[f] == 'r':
1099 1099 self.dirstate.normallookup(f)
1100 1100 else:
1101 1101 self.dirstate.add(f)
1102 1102 return rejected
1103 1103 finally:
1104 1104 wlock.release()
1105 1105
1106 1106 def forget(self, list):
1107 1107 wlock = self.wlock()
1108 1108 try:
1109 1109 for f in list:
1110 1110 if self.dirstate[f] != 'a':
1111 1111 self.ui.warn(_("%s not added!\n") % f)
1112 1112 else:
1113 1113 self.dirstate.forget(f)
1114 1114 finally:
1115 1115 wlock.release()
1116 1116
1117 1117 def remove(self, list, unlink=False):
1118 1118 if unlink:
1119 1119 for f in list:
1120 1120 try:
1121 1121 util.unlink(self.wjoin(f))
1122 1122 except OSError, inst:
1123 1123 if inst.errno != errno.ENOENT:
1124 1124 raise
1125 1125 wlock = self.wlock()
1126 1126 try:
1127 1127 for f in list:
1128 1128 if unlink and os.path.exists(self.wjoin(f)):
1129 1129 self.ui.warn(_("%s still exists!\n") % f)
1130 1130 elif self.dirstate[f] == 'a':
1131 1131 self.dirstate.forget(f)
1132 1132 elif f not in self.dirstate:
1133 1133 self.ui.warn(_("%s not tracked!\n") % f)
1134 1134 else:
1135 1135 self.dirstate.remove(f)
1136 1136 finally:
1137 1137 wlock.release()
1138 1138
1139 1139 def undelete(self, list):
1140 1140 manifests = [self.manifest.read(self.changelog.read(p)[0])
1141 1141 for p in self.dirstate.parents() if p != nullid]
1142 1142 wlock = self.wlock()
1143 1143 try:
1144 1144 for f in list:
1145 1145 if self.dirstate[f] != 'r':
1146 1146 self.ui.warn(_("%s not removed!\n") % f)
1147 1147 else:
1148 1148 m = f in manifests[0] and manifests[0] or manifests[1]
1149 1149 t = self.file(f).read(m[f])
1150 1150 self.wwrite(f, t, m.flags(f))
1151 1151 self.dirstate.normal(f)
1152 1152 finally:
1153 1153 wlock.release()
1154 1154
1155 1155 def copy(self, source, dest):
1156 1156 p = self.wjoin(dest)
1157 1157 if not (os.path.exists(p) or os.path.islink(p)):
1158 1158 self.ui.warn(_("%s does not exist!\n") % dest)
1159 1159 elif not (os.path.isfile(p) or os.path.islink(p)):
1160 1160 self.ui.warn(_("copy failed: %s is not a file or a "
1161 1161 "symbolic link\n") % dest)
1162 1162 else:
1163 1163 wlock = self.wlock()
1164 1164 try:
1165 1165 if self.dirstate[dest] in '?r':
1166 1166 self.dirstate.add(dest)
1167 1167 self.dirstate.copy(source, dest)
1168 1168 finally:
1169 1169 wlock.release()
1170 1170
1171 1171 def heads(self, start=None):
1172 1172 heads = self.changelog.heads(start)
1173 1173 # sort the output in rev descending order
1174 1174 heads = [(-self.changelog.rev(h), h) for h in heads]
1175 1175 return [n for (r, n) in sorted(heads)]
1176 1176
1177 1177 def branchheads(self, branch=None, start=None, closed=False):
1178 1178 '''return a (possibly filtered) list of heads for the given branch
1179 1179
1180 1180 Heads are returned in topological order, from newest to oldest.
1181 1181 If branch is None, use the dirstate branch.
1182 1182 If start is not None, return only heads reachable from start.
1183 1183 If closed is True, return heads that are marked as closed as well.
1184 1184 '''
1185 1185 if branch is None:
1186 1186 branch = self[None].branch()
1187 1187 branches = self.branchmap()
1188 1188 if branch not in branches:
1189 1189 return []
1190 1190 # the cache returns heads ordered lowest to highest
1191 1191 bheads = list(reversed(branches[branch]))
1192 1192 if start is not None:
1193 1193 # filter out the heads that cannot be reached from startrev
1194 1194 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1195 1195 bheads = [h for h in bheads if h in fbheads]
1196 1196 if not closed:
1197 1197 bheads = [h for h in bheads if
1198 1198 ('close' not in self.changelog.read(h)[5])]
1199 1199 return bheads
1200 1200
1201 1201 def branches(self, nodes):
1202 1202 if not nodes:
1203 1203 nodes = [self.changelog.tip()]
1204 1204 b = []
1205 1205 for n in nodes:
1206 1206 t = n
1207 1207 while 1:
1208 1208 p = self.changelog.parents(n)
1209 1209 if p[1] != nullid or p[0] == nullid:
1210 1210 b.append((t, n, p[0], p[1]))
1211 1211 break
1212 1212 n = p[0]
1213 1213 return b
1214 1214
1215 1215 def between(self, pairs):
1216 1216 r = []
1217 1217
1218 1218 for top, bottom in pairs:
1219 1219 n, l, i = top, [], 0
1220 1220 f = 1
1221 1221
1222 1222 while n != bottom and n != nullid:
1223 1223 p = self.changelog.parents(n)[0]
1224 1224 if i == f:
1225 1225 l.append(n)
1226 1226 f = f * 2
1227 1227 n = p
1228 1228 i += 1
1229 1229
1230 1230 r.append(l)
1231 1231
1232 1232 return r
1233 1233
1234 1234 def findincoming(self, remote, base=None, heads=None, force=False):
1235 1235 """Return list of roots of the subsets of missing nodes from remote
1236 1236
1237 1237 If base dict is specified, assume that these nodes and their parents
1238 1238 exist on the remote side and that no child of a node of base exists
1239 1239 in both remote and self.
1240 1240 Furthermore base will be updated to include the nodes that exists
1241 1241 in self and remote but no children exists in self and remote.
1242 1242 If a list of heads is specified, return only nodes which are heads
1243 1243 or ancestors of these heads.
1244 1244
1245 1245 All the ancestors of base are in self and in remote.
1246 1246 All the descendants of the list returned are missing in self.
1247 1247 (and so we know that the rest of the nodes are missing in remote, see
1248 1248 outgoing)
1249 1249 """
1250 1250 return self.findcommonincoming(remote, base, heads, force)[1]
1251 1251
1252 1252 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1253 1253 """Return a tuple (common, missing roots, heads) used to identify
1254 1254 missing nodes from remote.
1255 1255
1256 1256 If base dict is specified, assume that these nodes and their parents
1257 1257 exist on the remote side and that no child of a node of base exists
1258 1258 in both remote and self.
1259 1259 Furthermore base will be updated to include the nodes that exists
1260 1260 in self and remote but no children exists in self and remote.
1261 1261 If a list of heads is specified, return only nodes which are heads
1262 1262 or ancestors of these heads.
1263 1263
1264 1264 All the ancestors of base are in self and in remote.
1265 1265 """
1266 1266 m = self.changelog.nodemap
1267 1267 search = []
1268 1268 fetch = set()
1269 1269 seen = set()
1270 1270 seenbranch = set()
1271 1271 if base is None:
1272 1272 base = {}
1273 1273
1274 1274 if not heads:
1275 1275 heads = remote.heads()
1276 1276
1277 1277 if self.changelog.tip() == nullid:
1278 1278 base[nullid] = 1
1279 1279 if heads != [nullid]:
1280 1280 return [nullid], [nullid], list(heads)
1281 1281 return [nullid], [], []
1282 1282
1283 1283 # assume we're closer to the tip than the root
1284 1284 # and start by examining the heads
1285 1285 self.ui.status(_("searching for changes\n"))
1286 1286
1287 1287 unknown = []
1288 1288 for h in heads:
1289 1289 if h not in m:
1290 1290 unknown.append(h)
1291 1291 else:
1292 1292 base[h] = 1
1293 1293
1294 1294 heads = unknown
1295 1295 if not unknown:
1296 1296 return base.keys(), [], []
1297 1297
1298 1298 req = set(unknown)
1299 1299 reqcnt = 0
1300 1300
1301 1301 # search through remote branches
1302 1302 # a 'branch' here is a linear segment of history, with four parts:
1303 1303 # head, root, first parent, second parent
1304 1304 # (a branch always has two parents (or none) by definition)
1305 1305 unknown = remote.branches(unknown)
1306 1306 while unknown:
1307 1307 r = []
1308 1308 while unknown:
1309 1309 n = unknown.pop(0)
1310 1310 if n[0] in seen:
1311 1311 continue
1312 1312
1313 1313 self.ui.debug("examining %s:%s\n"
1314 1314 % (short(n[0]), short(n[1])))
1315 1315 if n[0] == nullid: # found the end of the branch
1316 1316 pass
1317 1317 elif n in seenbranch:
1318 1318 self.ui.debug("branch already found\n")
1319 1319 continue
1320 1320 elif n[1] and n[1] in m: # do we know the base?
1321 1321 self.ui.debug("found incomplete branch %s:%s\n"
1322 1322 % (short(n[0]), short(n[1])))
1323 1323 search.append(n[0:2]) # schedule branch range for scanning
1324 1324 seenbranch.add(n)
1325 1325 else:
1326 1326 if n[1] not in seen and n[1] not in fetch:
1327 1327 if n[2] in m and n[3] in m:
1328 1328 self.ui.debug("found new changeset %s\n" %
1329 1329 short(n[1]))
1330 1330 fetch.add(n[1]) # earliest unknown
1331 1331 for p in n[2:4]:
1332 1332 if p in m:
1333 1333 base[p] = 1 # latest known
1334 1334
1335 1335 for p in n[2:4]:
1336 1336 if p not in req and p not in m:
1337 1337 r.append(p)
1338 1338 req.add(p)
1339 1339 seen.add(n[0])
1340 1340
1341 1341 if r:
1342 1342 reqcnt += 1
1343 1343 self.ui.debug("request %d: %s\n" %
1344 1344 (reqcnt, " ".join(map(short, r))))
1345 1345 for p in xrange(0, len(r), 10):
1346 1346 for b in remote.branches(r[p:p + 10]):
1347 1347 self.ui.debug("received %s:%s\n" %
1348 1348 (short(b[0]), short(b[1])))
1349 1349 unknown.append(b)
1350 1350
1351 1351 # do binary search on the branches we found
1352 1352 while search:
1353 1353 newsearch = []
1354 1354 reqcnt += 1
1355 1355 for n, l in zip(search, remote.between(search)):
1356 1356 l.append(n[1])
1357 1357 p = n[0]
1358 1358 f = 1
1359 1359 for i in l:
1360 1360 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1361 1361 if i in m:
1362 1362 if f <= 2:
1363 1363 self.ui.debug("found new branch changeset %s\n" %
1364 1364 short(p))
1365 1365 fetch.add(p)
1366 1366 base[i] = 1
1367 1367 else:
1368 1368 self.ui.debug("narrowed branch search to %s:%s\n"
1369 1369 % (short(p), short(i)))
1370 1370 newsearch.append((p, i))
1371 1371 break
1372 1372 p, f = i, f * 2
1373 1373 search = newsearch
1374 1374
1375 1375 # sanity check our fetch list
1376 1376 for f in fetch:
1377 1377 if f in m:
1378 1378 raise error.RepoError(_("already have changeset ")
1379 1379 + short(f[:4]))
1380 1380
1381 1381 if base.keys() == [nullid]:
1382 1382 if force:
1383 1383 self.ui.warn(_("warning: repository is unrelated\n"))
1384 1384 else:
1385 1385 raise util.Abort(_("repository is unrelated"))
1386 1386
1387 1387 self.ui.debug("found new changesets starting at " +
1388 1388 " ".join([short(f) for f in fetch]) + "\n")
1389 1389
1390 1390 self.ui.debug("%d total queries\n" % reqcnt)
1391 1391
1392 1392 return base.keys(), list(fetch), heads
1393 1393
1394 1394 def findoutgoing(self, remote, base=None, heads=None, force=False):
1395 1395 """Return list of nodes that are roots of subsets not in remote
1396 1396
1397 1397 If base dict is specified, assume that these nodes and their parents
1398 1398 exist on the remote side.
1399 1399 If a list of heads is specified, return only nodes which are heads
1400 1400 or ancestors of these heads, and return a second element which
1401 1401 contains all remote heads which get new children.
1402 1402 """
1403 1403 if base is None:
1404 1404 base = {}
1405 1405 self.findincoming(remote, base, heads, force=force)
1406 1406
1407 1407 self.ui.debug("common changesets up to "
1408 1408 + " ".join(map(short, base.keys())) + "\n")
1409 1409
1410 1410 remain = set(self.changelog.nodemap)
1411 1411
1412 1412 # prune everything remote has from the tree
1413 1413 remain.remove(nullid)
1414 1414 remove = base.keys()
1415 1415 while remove:
1416 1416 n = remove.pop(0)
1417 1417 if n in remain:
1418 1418 remain.remove(n)
1419 1419 for p in self.changelog.parents(n):
1420 1420 remove.append(p)
1421 1421
1422 1422 # find every node whose parents have been pruned
1423 1423 subset = []
1424 1424 # find every remote head that will get new children
1425 1425 updated_heads = set()
1426 1426 for n in remain:
1427 1427 p1, p2 = self.changelog.parents(n)
1428 1428 if p1 not in remain and p2 not in remain:
1429 1429 subset.append(n)
1430 1430 if heads:
1431 1431 if p1 in heads:
1432 1432 updated_heads.add(p1)
1433 1433 if p2 in heads:
1434 1434 updated_heads.add(p2)
1435 1435
1436 1436 # this is the set of all roots we have to push
1437 1437 if heads:
1438 1438 return subset, list(updated_heads)
1439 1439 else:
1440 1440 return subset
1441 1441
1442 1442 def pull(self, remote, heads=None, force=False):
1443 1443 lock = self.lock()
1444 1444 try:
1445 1445 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1446 1446 force=force)
1447 1447 if fetch == [nullid]:
1448 1448 self.ui.status(_("requesting all changes\n"))
1449 1449
1450 1450 if not fetch:
1451 1451 self.ui.status(_("no changes found\n"))
1452 1452 return 0
1453 1453
1454 1454 if heads is None and remote.capable('changegroupsubset'):
1455 1455 heads = rheads
1456 1456
1457 1457 if heads is None:
1458 1458 cg = remote.changegroup(fetch, 'pull')
1459 1459 else:
1460 1460 if not remote.capable('changegroupsubset'):
1461 1461 raise util.Abort(_("Partial pull cannot be done because "
1462 1462 "other repository doesn't support "
1463 1463 "changegroupsubset."))
1464 1464 cg = remote.changegroupsubset(fetch, heads, 'pull')
1465 1465 return self.addchangegroup(cg, 'pull', remote.url())
1466 1466 finally:
1467 1467 lock.release()
1468 1468
1469 1469 def push(self, remote, force=False, revs=None):
1470 1470 # there are two ways to push to remote repo:
1471 1471 #
1472 1472 # addchangegroup assumes local user can lock remote
1473 1473 # repo (local filesystem, old ssh servers).
1474 1474 #
1475 1475 # unbundle assumes local user cannot lock remote repo (new ssh
1476 1476 # servers, http servers).
1477 1477
1478 1478 if remote.capable('unbundle'):
1479 1479 return self.push_unbundle(remote, force, revs)
1480 1480 return self.push_addchangegroup(remote, force, revs)
1481 1481
1482 1482 def prepush(self, remote, force, revs):
1483 1483 '''Analyze the local and remote repositories and determine which
1484 1484 changesets need to be pushed to the remote. Return a tuple
1485 1485 (changegroup, remoteheads). changegroup is a readable file-like
1486 1486 object whose read() returns successive changegroup chunks ready to
1487 1487 be sent over the wire. remoteheads is the list of remote heads.
1488 1488 '''
1489 1489 common = {}
1490 1490 remote_heads = remote.heads()
1491 1491 inc = self.findincoming(remote, common, remote_heads, force=force)
1492 1492
1493 1493 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1494 1494 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1495 1495
1496 1496 def checkbranch(lheads, rheads, updatelb):
1497 1497 '''
1498 1498 check whether there are more local heads than remote heads on
1499 1499 a specific branch.
1500 1500
1501 1501 lheads: local branch heads
1502 1502 rheads: remote branch heads
1503 1503 updatelb: outgoing local branch bases
1504 1504 '''
1505 1505
1506 1506 warn = 0
1507 1507
1508 1508 if not revs and len(lheads) > len(rheads):
1509 1509 warn = 1
1510 1510 else:
1511 1511 # add local heads involved in the push
1512 1512 updatelheads = [self.changelog.heads(x, lheads)
1513 1513 for x in updatelb]
1514 1514 newheads = set(sum(updatelheads, [])) & set(lheads)
1515 1515
1516 1516 if not newheads:
1517 1517 return True
1518 1518
1519 1519 # add heads we don't have or that are not involved in the push
1520 1520 for r in rheads:
1521 1521 if r in self.changelog.nodemap:
1522 1522 desc = self.changelog.heads(r, heads)
1523 1523 l = [h for h in heads if h in desc]
1524 1524 if not l:
1525 1525 newheads.add(r)
1526 1526 else:
1527 1527 newheads.add(r)
1528 1528 if len(newheads) > len(rheads):
1529 1529 warn = 1
1530 1530
1531 1531 if warn:
1532 1532 if not rheads: # new branch requires --force
1533 1533 self.ui.warn(_("abort: push creates new"
1534 1534 " remote branch '%s'!\n") %
1535 1535 self[lheads[0]].branch())
1536 1536 else:
1537 1537 self.ui.warn(_("abort: push creates new remote heads!\n"))
1538 1538
1539 1539 self.ui.status(_("(did you forget to merge?"
1540 1540 " use push -f to force)\n"))
1541 1541 return False
1542 1542 return True
1543 1543
1544 1544 if not bases:
1545 1545 self.ui.status(_("no changes found\n"))
1546 1546 return None, 1
1547 1547 elif not force:
1548 1548 # Check for each named branch if we're creating new remote heads.
1549 1549 # To be a remote head after push, node must be either:
1550 1550 # - unknown locally
1551 1551 # - a local outgoing head descended from update
1552 1552 # - a remote head that's known locally and not
1553 1553 # ancestral to an outgoing head
1554 1554 #
1555 1555 # New named branches cannot be created without --force.
1556 1556
1557 1557 if remote_heads != [nullid]:
1558 1558 if remote.capable('branchmap'):
1559 1559 localhds = {}
1560 1560 if not revs:
1561 1561 localhds = self.branchmap()
1562 1562 else:
1563 1563 for n in heads:
1564 1564 branch = self[n].branch()
1565 1565 if branch in localhds:
1566 1566 localhds[branch].append(n)
1567 1567 else:
1568 1568 localhds[branch] = [n]
1569 1569
1570 1570 remotehds = remote.branchmap()
1571 1571
1572 1572 for lh in localhds:
1573 1573 if lh in remotehds:
1574 1574 rheads = remotehds[lh]
1575 1575 else:
1576 1576 rheads = []
1577 1577 lheads = localhds[lh]
1578 1578 if not checkbranch(lheads, rheads, update):
1579 1579 return None, 0
1580 1580 else:
1581 1581 if not checkbranch(heads, remote_heads, update):
1582 1582 return None, 0
1583 1583
1584 1584 if inc:
1585 1585 self.ui.warn(_("note: unsynced remote changes!\n"))
1586 1586
1587 1587
1588 1588 if revs is None:
1589 1589 # use the fast path, no race possible on push
1590 1590 nodes = self.changelog.findmissing(common.keys())
1591 1591 cg = self._changegroup(nodes, 'push')
1592 1592 else:
1593 1593 cg = self.changegroupsubset(update, revs, 'push')
1594 1594 return cg, remote_heads
1595 1595
1596 1596 def push_addchangegroup(self, remote, force, revs):
1597 1597 lock = remote.lock()
1598 1598 try:
1599 1599 ret = self.prepush(remote, force, revs)
1600 1600 if ret[0] is not None:
1601 1601 cg, remote_heads = ret
1602 1602 return remote.addchangegroup(cg, 'push', self.url())
1603 1603 return ret[1]
1604 1604 finally:
1605 1605 lock.release()
1606 1606
1607 1607 def push_unbundle(self, remote, force, revs):
1608 1608 # local repo finds heads on server, finds out what revs it
1609 1609 # must push. once revs transferred, if server finds it has
1610 1610 # different heads (someone else won commit/push race), server
1611 1611 # aborts.
1612 1612
1613 1613 ret = self.prepush(remote, force, revs)
1614 1614 if ret[0] is not None:
1615 1615 cg, remote_heads = ret
1616 1616 if force:
1617 1617 remote_heads = ['force']
1618 1618 return remote.unbundle(cg, remote_heads, 'push')
1619 1619 return ret[1]
1620 1620
1621 1621 def changegroupinfo(self, nodes, source):
1622 1622 if self.ui.verbose or source == 'bundle':
1623 1623 self.ui.status(_("%d changesets found\n") % len(nodes))
1624 1624 if self.ui.debugflag:
1625 1625 self.ui.debug("list of changesets:\n")
1626 1626 for node in nodes:
1627 1627 self.ui.debug("%s\n" % hex(node))
1628 1628
1629 1629 def changegroupsubset(self, bases, heads, source, extranodes=None):
1630 1630 """Compute a changegroup consisting of all the nodes that are
1631 1631 descendents of any of the bases and ancestors of any of the heads.
1632 1632 Return a chunkbuffer object whose read() method will return
1633 1633 successive changegroup chunks.
1634 1634
1635 1635 It is fairly complex as determining which filenodes and which
1636 1636 manifest nodes need to be included for the changeset to be complete
1637 1637 is non-trivial.
1638 1638
1639 1639 Another wrinkle is doing the reverse, figuring out which changeset in
1640 1640 the changegroup a particular filenode or manifestnode belongs to.
1641 1641
1642 1642 The caller can specify some nodes that must be included in the
1643 1643 changegroup using the extranodes argument. It should be a dict
1644 1644 where the keys are the filenames (or 1 for the manifest), and the
1645 1645 values are lists of (node, linknode) tuples, where node is a wanted
1646 1646 node and linknode is the changelog node that should be transmitted as
1647 1647 the linkrev.
1648 1648 """
1649 1649
1650 1650 # Set up some initial variables
1651 1651 # Make it easy to refer to self.changelog
1652 1652 cl = self.changelog
1653 1653 # msng is short for missing - compute the list of changesets in this
1654 1654 # changegroup.
1655 1655 if not bases:
1656 1656 bases = [nullid]
1657 1657 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1658 1658
1659 1659 if extranodes is None:
1660 1660 # can we go through the fast path ?
1661 1661 heads.sort()
1662 1662 allheads = self.heads()
1663 1663 allheads.sort()
1664 1664 if heads == allheads:
1665 1665 return self._changegroup(msng_cl_lst, source)
1666 1666
1667 1667 # slow path
1668 1668 self.hook('preoutgoing', throw=True, source=source)
1669 1669
1670 1670 self.changegroupinfo(msng_cl_lst, source)
1671 1671 # Some bases may turn out to be superfluous, and some heads may be
1672 1672 # too. nodesbetween will return the minimal set of bases and heads
1673 1673 # necessary to re-create the changegroup.
1674 1674
1675 1675 # Known heads are the list of heads that it is assumed the recipient
1676 1676 # of this changegroup will know about.
1677 1677 knownheads = set()
1678 1678 # We assume that all parents of bases are known heads.
1679 1679 for n in bases:
1680 1680 knownheads.update(cl.parents(n))
1681 1681 knownheads.discard(nullid)
1682 1682 knownheads = list(knownheads)
1683 1683 if knownheads:
1684 1684 # Now that we know what heads are known, we can compute which
1685 1685 # changesets are known. The recipient must know about all
1686 1686 # changesets required to reach the known heads from the null
1687 1687 # changeset.
1688 1688 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1689 1689 junk = None
1690 1690 # Transform the list into a set.
1691 1691 has_cl_set = set(has_cl_set)
1692 1692 else:
1693 1693 # If there were no known heads, the recipient cannot be assumed to
1694 1694 # know about any changesets.
1695 1695 has_cl_set = set()
1696 1696
1697 1697 # Make it easy to refer to self.manifest
1698 1698 mnfst = self.manifest
1699 1699 # We don't know which manifests are missing yet
1700 1700 msng_mnfst_set = {}
1701 1701 # Nor do we know which filenodes are missing.
1702 1702 msng_filenode_set = {}
1703 1703
1704 1704 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1705 1705 junk = None
1706 1706
1707 1707 # A changeset always belongs to itself, so the changenode lookup
1708 1708 # function for a changenode is identity.
1709 1709 def identity(x):
1710 1710 return x
1711 1711
1712 1712 # If we determine that a particular file or manifest node must be a
1713 1713 # node that the recipient of the changegroup will already have, we can
1714 1714 # also assume the recipient will have all the parents. This function
1715 1715 # prunes them from the set of missing nodes.
1716 1716 def prune_parents(revlog, hasset, msngset):
1717 1717 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1718 1718 msngset.pop(revlog.node(r), None)
1719 1719
1720 1720 # This is a function generating function used to set up an environment
1721 1721 # for the inner function to execute in.
1722 1722 def manifest_and_file_collector(changedfileset):
1723 1723 # This is an information gathering function that gathers
1724 1724 # information from each changeset node that goes out as part of
1725 1725 # the changegroup. The information gathered is a list of which
1726 1726 # manifest nodes are potentially required (the recipient may
1727 1727 # already have them) and total list of all files which were
1728 1728 # changed in any changeset in the changegroup.
1729 1729 #
1730 1730 # We also remember the first changenode we saw any manifest
1731 1731 # referenced by so we can later determine which changenode 'owns'
1732 1732 # the manifest.
1733 1733 def collect_manifests_and_files(clnode):
1734 1734 c = cl.read(clnode)
1735 1735 for f in c[3]:
1736 1736 # This is to make sure we only have one instance of each
1737 1737 # filename string for each filename.
1738 1738 changedfileset.setdefault(f, f)
1739 1739 msng_mnfst_set.setdefault(c[0], clnode)
1740 1740 return collect_manifests_and_files
1741 1741
1742 1742 # Figure out which manifest nodes (of the ones we think might be part
1743 1743 # of the changegroup) the recipient must know about and remove them
1744 1744 # from the changegroup.
1745 1745 def prune_manifests():
1746 1746 has_mnfst_set = set()
1747 1747 for n in msng_mnfst_set:
1748 1748 # If a 'missing' manifest thinks it belongs to a changenode
1749 1749 # the recipient is assumed to have, obviously the recipient
1750 1750 # must have that manifest.
1751 1751 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1752 1752 if linknode in has_cl_set:
1753 1753 has_mnfst_set.add(n)
1754 1754 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1755 1755
1756 1756 # Use the information collected in collect_manifests_and_files to say
1757 1757 # which changenode any manifestnode belongs to.
1758 1758 def lookup_manifest_link(mnfstnode):
1759 1759 return msng_mnfst_set[mnfstnode]
1760 1760
1761 1761 # A function generating function that sets up the initial environment
1762 1762 # the inner function.
1763 1763 def filenode_collector(changedfiles):
1764 1764 # This gathers information from each manifestnode included in the
1765 1765 # changegroup about which filenodes the manifest node references
1766 1766 # so we can include those in the changegroup too.
1767 1767 #
1768 1768 # It also remembers which changenode each filenode belongs to. It
1769 1769 # does this by assuming the a filenode belongs to the changenode
1770 1770 # the first manifest that references it belongs to.
1771 1771 def collect_msng_filenodes(mnfstnode):
1772 1772 r = mnfst.rev(mnfstnode)
1773 1773 if r - 1 in mnfst.parentrevs(r):
1774 1774 # If the previous rev is one of the parents,
1775 1775 # we only need to see a diff.
1776 1776 deltamf = mnfst.readdelta(mnfstnode)
1777 1777 # For each line in the delta
1778 1778 for f, fnode in deltamf.iteritems():
1779 1779 f = changedfiles.get(f, None)
1780 1780 # And if the file is in the list of files we care
1781 1781 # about.
1782 1782 if f is not None:
1783 1783 # Get the changenode this manifest belongs to
1784 1784 clnode = msng_mnfst_set[mnfstnode]
1785 1785 # Create the set of filenodes for the file if
1786 1786 # there isn't one already.
1787 1787 ndset = msng_filenode_set.setdefault(f, {})
1788 1788 # And set the filenode's changelog node to the
1789 1789 # manifest's if it hasn't been set already.
1790 1790 ndset.setdefault(fnode, clnode)
1791 1791 else:
1792 1792 # Otherwise we need a full manifest.
1793 1793 m = mnfst.read(mnfstnode)
1794 1794 # For every file in we care about.
1795 1795 for f in changedfiles:
1796 1796 fnode = m.get(f, None)
1797 1797 # If it's in the manifest
1798 1798 if fnode is not None:
1799 1799 # See comments above.
1800 1800 clnode = msng_mnfst_set[mnfstnode]
1801 1801 ndset = msng_filenode_set.setdefault(f, {})
1802 1802 ndset.setdefault(fnode, clnode)
1803 1803 return collect_msng_filenodes
1804 1804
1805 1805 # We have a list of filenodes we think we need for a file, lets remove
1806 1806 # all those we know the recipient must have.
1807 1807 def prune_filenodes(f, filerevlog):
1808 1808 msngset = msng_filenode_set[f]
1809 1809 hasset = set()
1810 1810 # If a 'missing' filenode thinks it belongs to a changenode we
1811 1811 # assume the recipient must have, then the recipient must have
1812 1812 # that filenode.
1813 1813 for n in msngset:
1814 1814 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1815 1815 if clnode in has_cl_set:
1816 1816 hasset.add(n)
1817 1817 prune_parents(filerevlog, hasset, msngset)
1818 1818
1819 1819 # A function generator function that sets up the a context for the
1820 1820 # inner function.
1821 1821 def lookup_filenode_link_func(fname):
1822 1822 msngset = msng_filenode_set[fname]
1823 1823 # Lookup the changenode the filenode belongs to.
1824 1824 def lookup_filenode_link(fnode):
1825 1825 return msngset[fnode]
1826 1826 return lookup_filenode_link
1827 1827
1828 1828 # Add the nodes that were explicitly requested.
1829 1829 def add_extra_nodes(name, nodes):
1830 1830 if not extranodes or name not in extranodes:
1831 1831 return
1832 1832
1833 1833 for node, linknode in extranodes[name]:
1834 1834 if node not in nodes:
1835 1835 nodes[node] = linknode
1836 1836
1837 1837 # Now that we have all theses utility functions to help out and
1838 1838 # logically divide up the task, generate the group.
1839 1839 def gengroup():
1840 1840 # The set of changed files starts empty.
1841 1841 changedfiles = {}
1842 1842 # Create a changenode group generator that will call our functions
1843 1843 # back to lookup the owning changenode and collect information.
1844 1844 group = cl.group(msng_cl_lst, identity,
1845 1845 manifest_and_file_collector(changedfiles))
1846 1846 for chnk in group:
1847 1847 yield chnk
1848 1848
1849 1849 # The list of manifests has been collected by the generator
1850 1850 # calling our functions back.
1851 1851 prune_manifests()
1852 1852 add_extra_nodes(1, msng_mnfst_set)
1853 1853 msng_mnfst_lst = msng_mnfst_set.keys()
1854 1854 # Sort the manifestnodes by revision number.
1855 1855 msng_mnfst_lst.sort(key=mnfst.rev)
1856 1856 # Create a generator for the manifestnodes that calls our lookup
1857 1857 # and data collection functions back.
1858 1858 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1859 1859 filenode_collector(changedfiles))
1860 1860 for chnk in group:
1861 1861 yield chnk
1862 1862
1863 1863 # These are no longer needed, dereference and toss the memory for
1864 1864 # them.
1865 1865 msng_mnfst_lst = None
1866 1866 msng_mnfst_set.clear()
1867 1867
1868 1868 if extranodes:
1869 1869 for fname in extranodes:
1870 1870 if isinstance(fname, int):
1871 1871 continue
1872 1872 msng_filenode_set.setdefault(fname, {})
1873 1873 changedfiles[fname] = 1
1874 1874 # Go through all our files in order sorted by name.
1875 1875 for fname in sorted(changedfiles):
1876 1876 filerevlog = self.file(fname)
1877 1877 if not len(filerevlog):
1878 1878 raise util.Abort(_("empty or missing revlog for %s") % fname)
1879 1879 # Toss out the filenodes that the recipient isn't really
1880 1880 # missing.
1881 1881 if fname in msng_filenode_set:
1882 1882 prune_filenodes(fname, filerevlog)
1883 1883 add_extra_nodes(fname, msng_filenode_set[fname])
1884 1884 msng_filenode_lst = msng_filenode_set[fname].keys()
1885 1885 else:
1886 1886 msng_filenode_lst = []
1887 1887 # If any filenodes are left, generate the group for them,
1888 1888 # otherwise don't bother.
1889 1889 if len(msng_filenode_lst) > 0:
1890 1890 yield changegroup.chunkheader(len(fname))
1891 1891 yield fname
1892 1892 # Sort the filenodes by their revision #
1893 1893 msng_filenode_lst.sort(key=filerevlog.rev)
1894 1894 # Create a group generator and only pass in a changenode
1895 1895 # lookup function as we need to collect no information
1896 1896 # from filenodes.
1897 1897 group = filerevlog.group(msng_filenode_lst,
1898 1898 lookup_filenode_link_func(fname))
1899 1899 for chnk in group:
1900 1900 yield chnk
1901 1901 if fname in msng_filenode_set:
1902 1902 # Don't need this anymore, toss it to free memory.
1903 1903 del msng_filenode_set[fname]
1904 1904 # Signal that no more groups are left.
1905 1905 yield changegroup.closechunk()
1906 1906
1907 1907 if msng_cl_lst:
1908 1908 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1909 1909
1910 1910 return util.chunkbuffer(gengroup())
1911 1911
1912 1912 def changegroup(self, basenodes, source):
1913 1913 # to avoid a race we use changegroupsubset() (issue1320)
1914 1914 return self.changegroupsubset(basenodes, self.heads(), source)
1915 1915
1916 1916 def _changegroup(self, nodes, source):
1917 1917 """Compute the changegroup of all nodes that we have that a recipient
1918 1918 doesn't. Return a chunkbuffer object whose read() method will return
1919 1919 successive changegroup chunks.
1920 1920
1921 1921 This is much easier than the previous function as we can assume that
1922 1922 the recipient has any changenode we aren't sending them.
1923 1923
1924 1924 nodes is the set of nodes to send"""
1925 1925
1926 1926 self.hook('preoutgoing', throw=True, source=source)
1927 1927
1928 1928 cl = self.changelog
1929 1929 revset = set([cl.rev(n) for n in nodes])
1930 1930 self.changegroupinfo(nodes, source)
1931 1931
1932 1932 def identity(x):
1933 1933 return x
1934 1934
1935 1935 def gennodelst(log):
1936 1936 for r in log:
1937 1937 if log.linkrev(r) in revset:
1938 1938 yield log.node(r)
1939 1939
1940 1940 def changed_file_collector(changedfileset):
1941 1941 def collect_changed_files(clnode):
1942 1942 c = cl.read(clnode)
1943 1943 changedfileset.update(c[3])
1944 1944 return collect_changed_files
1945 1945
1946 1946 def lookuprevlink_func(revlog):
1947 1947 def lookuprevlink(n):
1948 1948 return cl.node(revlog.linkrev(revlog.rev(n)))
1949 1949 return lookuprevlink
1950 1950
1951 1951 def gengroup():
1952 1952 '''yield a sequence of changegroup chunks (strings)'''
1953 1953 # construct a list of all changed files
1954 1954 changedfiles = set()
1955 1955
1956 1956 for chnk in cl.group(nodes, identity,
1957 1957 changed_file_collector(changedfiles)):
1958 1958 yield chnk
1959 1959
1960 1960 mnfst = self.manifest
1961 1961 nodeiter = gennodelst(mnfst)
1962 1962 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1963 1963 yield chnk
1964 1964
1965 1965 for fname in sorted(changedfiles):
1966 1966 filerevlog = self.file(fname)
1967 1967 if not len(filerevlog):
1968 1968 raise util.Abort(_("empty or missing revlog for %s") % fname)
1969 1969 nodeiter = gennodelst(filerevlog)
1970 1970 nodeiter = list(nodeiter)
1971 1971 if nodeiter:
1972 1972 yield changegroup.chunkheader(len(fname))
1973 1973 yield fname
1974 1974 lookup = lookuprevlink_func(filerevlog)
1975 1975 for chnk in filerevlog.group(nodeiter, lookup):
1976 1976 yield chnk
1977 1977
1978 1978 yield changegroup.closechunk()
1979 1979
1980 1980 if nodes:
1981 1981 self.hook('outgoing', node=hex(nodes[0]), source=source)
1982 1982
1983 1983 return util.chunkbuffer(gengroup())
1984 1984
1985 1985 def addchangegroup(self, source, srctype, url, emptyok=False):
1986 1986 """add changegroup to repo.
1987 1987
1988 1988 return values:
1989 1989 - nothing changed or no source: 0
1990 1990 - more heads than before: 1+added heads (2..n)
1991 1991 - less heads than before: -1-removed heads (-2..-n)
1992 1992 - number of heads stays the same: 1
1993 1993 """
1994 1994 def csmap(x):
1995 1995 self.ui.debug("add changeset %s\n" % short(x))
1996 1996 return len(cl)
1997 1997
1998 1998 def revmap(x):
1999 1999 return cl.rev(x)
2000 2000
2001 2001 if not source:
2002 2002 return 0
2003 2003
2004 2004 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2005 2005
2006 2006 changesets = files = revisions = 0
2007 2007
2008 2008 # write changelog data to temp files so concurrent readers will not see
2009 2009 # inconsistent view
2010 2010 cl = self.changelog
2011 2011 cl.delayupdate()
2012 2012 oldheads = len(cl.heads())
2013 2013
2014 2014 tr = self.transaction()
2015 2015 try:
2016 2016 trp = weakref.proxy(tr)
2017 2017 # pull off the changeset group
2018 2018 self.ui.status(_("adding changesets\n"))
2019 2019 clstart = len(cl)
2020 2020 chunkiter = changegroup.chunkiter(source)
2021 2021 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2022 2022 raise util.Abort(_("received changelog group is empty"))
2023 2023 clend = len(cl)
2024 2024 changesets = clend - clstart
2025 2025
2026 2026 # pull off the manifest group
2027 2027 self.ui.status(_("adding manifests\n"))
2028 2028 chunkiter = changegroup.chunkiter(source)
2029 2029 # no need to check for empty manifest group here:
2030 2030 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2031 2031 # no new manifest will be created and the manifest group will
2032 2032 # be empty during the pull
2033 2033 self.manifest.addgroup(chunkiter, revmap, trp)
2034 2034
2035 2035 # process the files
2036 2036 self.ui.status(_("adding file changes\n"))
2037 2037 while 1:
2038 2038 f = changegroup.getchunk(source)
2039 2039 if not f:
2040 2040 break
2041 2041 self.ui.debug("adding %s revisions\n" % f)
2042 2042 fl = self.file(f)
2043 2043 o = len(fl)
2044 2044 chunkiter = changegroup.chunkiter(source)
2045 2045 if fl.addgroup(chunkiter, revmap, trp) is None:
2046 2046 raise util.Abort(_("received file revlog group is empty"))
2047 2047 revisions += len(fl) - o
2048 2048 files += 1
2049 2049
2050 2050 newheads = len(cl.heads())
2051 2051 heads = ""
2052 2052 if oldheads and newheads != oldheads:
2053 2053 heads = _(" (%+d heads)") % (newheads - oldheads)
2054 2054
2055 2055 self.ui.status(_("added %d changesets"
2056 2056 " with %d changes to %d files%s\n")
2057 2057 % (changesets, revisions, files, heads))
2058 2058
2059 2059 if changesets > 0:
2060 2060 p = lambda: cl.writepending() and self.root or ""
2061 2061 self.hook('pretxnchangegroup', throw=True,
2062 2062 node=hex(cl.node(clstart)), source=srctype,
2063 2063 url=url, pending=p)
2064 2064
2065 2065 # make changelog see real files again
2066 2066 cl.finalize(trp)
2067 2067
2068 2068 tr.close()
2069 2069 finally:
2070 2070 del tr
2071 2071
2072 2072 if changesets > 0:
2073 2073 # forcefully update the on-disk branch cache
2074 2074 self.ui.debug("updating the branch cache\n")
2075 2075 self.branchtags()
2076 2076 self.hook("changegroup", node=hex(cl.node(clstart)),
2077 2077 source=srctype, url=url)
2078 2078
2079 2079 for i in xrange(clstart, clend):
2080 2080 self.hook("incoming", node=hex(cl.node(i)),
2081 2081 source=srctype, url=url)
2082 2082
2083 2083 # never return 0 here:
2084 2084 if newheads < oldheads:
2085 2085 return newheads - oldheads - 1
2086 2086 else:
2087 2087 return newheads - oldheads + 1
2088 2088
2089 2089
2090 2090 def stream_in(self, remote):
2091 2091 fp = remote.stream_out()
2092 2092 l = fp.readline()
2093 2093 try:
2094 2094 resp = int(l)
2095 2095 except ValueError:
2096 2096 raise error.ResponseError(
2097 2097 _('Unexpected response from remote server:'), l)
2098 2098 if resp == 1:
2099 2099 raise util.Abort(_('operation forbidden by server'))
2100 2100 elif resp == 2:
2101 2101 raise util.Abort(_('locking the remote repository failed'))
2102 2102 elif resp != 0:
2103 2103 raise util.Abort(_('the server sent an unknown error code'))
2104 2104 self.ui.status(_('streaming all changes\n'))
2105 2105 l = fp.readline()
2106 2106 try:
2107 2107 total_files, total_bytes = map(int, l.split(' ', 1))
2108 2108 except (ValueError, TypeError):
2109 2109 raise error.ResponseError(
2110 2110 _('Unexpected response from remote server:'), l)
2111 2111 self.ui.status(_('%d files to transfer, %s of data\n') %
2112 2112 (total_files, util.bytecount(total_bytes)))
2113 2113 start = time.time()
2114 2114 for i in xrange(total_files):
2115 2115 # XXX doesn't support '\n' or '\r' in filenames
2116 2116 l = fp.readline()
2117 2117 try:
2118 2118 name, size = l.split('\0', 1)
2119 2119 size = int(size)
2120 2120 except (ValueError, TypeError):
2121 2121 raise error.ResponseError(
2122 2122 _('Unexpected response from remote server:'), l)
2123 2123 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2124 2124 # for backwards compat, name was partially encoded
2125 2125 ofp = self.sopener(store.decodedir(name), 'w')
2126 2126 for chunk in util.filechunkiter(fp, limit=size):
2127 2127 ofp.write(chunk)
2128 2128 ofp.close()
2129 2129 elapsed = time.time() - start
2130 2130 if elapsed <= 0:
2131 2131 elapsed = 0.001
2132 2132 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2133 2133 (util.bytecount(total_bytes), elapsed,
2134 2134 util.bytecount(total_bytes / elapsed)))
2135 2135 self.invalidate()
2136 2136 return len(self.heads()) + 1
2137 2137
2138 2138 def clone(self, remote, heads=[], stream=False):
2139 2139 '''clone remote repository.
2140 2140
2141 2141 keyword arguments:
2142 2142 heads: list of revs to clone (forces use of pull)
2143 2143 stream: use streaming clone if possible'''
2144 2144
2145 2145 # now, all clients that can request uncompressed clones can
2146 2146 # read repo formats supported by all servers that can serve
2147 2147 # them.
2148 2148
2149 2149 # if revlog format changes, client will have to check version
2150 2150 # and format flags on "stream" capability, and use
2151 2151 # uncompressed only if compatible.
2152 2152
2153 2153 if stream and not heads and remote.capable('stream'):
2154 2154 return self.stream_in(remote)
2155 2155 return self.pull(remote, heads)
2156 2156
2157 2157 # used to avoid circular references so destructors work
2158 2158 def aftertrans(files):
2159 2159 renamefiles = [tuple(t) for t in files]
2160 2160 def a():
2161 2161 for src, dest in renamefiles:
2162 2162 util.rename(src, dest)
2163 2163 return a
2164 2164
2165 2165 def instance(ui, path, create):
2166 2166 return localrepository(ui, util.drop_scheme('file', path), create)
2167 2167
2168 2168 def islocal(path):
2169 2169 return True
General Comments 0
You need to be logged in to leave comments. Login now