##// END OF EJS Templates
repo: add internal support for sharing store directories...
Matt Mackall -
r8799:87d1fd40 default
parent child Browse files
Show More
@@ -1,2149 +1,2160 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2, incorporated herein by reference.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup
11 11 import changelog, dirstate, filelog, manifest, context
12 12 import lock, transaction, store, encoding
13 13 import util, extensions, hook, error
14 14 import match as match_
15 15 import merge as merge_
16 16 from lock import release
17 17 import weakref, stat, errno, os, time, inspect
18 18 propertycache = util.propertycache
19 19
20 20 class localrepository(repo.repository):
21 21 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
22 supported = set('revlogv1 store fncache'.split())
22 supported = set('revlogv1 store fncache shared'.split())
23 23
24 24 def __init__(self, baseui, path=None, create=0):
25 25 repo.repository.__init__(self)
26 26 self.root = os.path.realpath(path)
27 27 self.path = os.path.join(self.root, ".hg")
28 28 self.origroot = path
29 29 self.opener = util.opener(self.path)
30 30 self.wopener = util.opener(self.root)
31 31 self.baseui = baseui
32 32 self.ui = baseui.copy()
33 33
34 34 try:
35 35 self.ui.readconfig(self.join("hgrc"), self.root)
36 36 extensions.loadall(self.ui)
37 37 except IOError:
38 38 pass
39 39
40 40 if not os.path.isdir(self.path):
41 41 if create:
42 42 if not os.path.exists(path):
43 43 os.mkdir(path)
44 44 os.mkdir(self.path)
45 45 requirements = ["revlogv1"]
46 46 if self.ui.configbool('format', 'usestore', True):
47 47 os.mkdir(os.path.join(self.path, "store"))
48 48 requirements.append("store")
49 49 if self.ui.configbool('format', 'usefncache', True):
50 50 requirements.append("fncache")
51 51 # create an invalid changelog
52 52 self.opener("00changelog.i", "a").write(
53 53 '\0\0\0\2' # represents revlogv2
54 54 ' dummy changelog to prevent using the old repo layout'
55 55 )
56 56 reqfile = self.opener("requires", "w")
57 57 for r in requirements:
58 58 reqfile.write("%s\n" % r)
59 59 reqfile.close()
60 60 else:
61 61 raise error.RepoError(_("repository %s not found") % path)
62 62 elif create:
63 63 raise error.RepoError(_("repository %s already exists") % path)
64 64 else:
65 65 # find requirements
66 66 requirements = set()
67 67 try:
68 68 requirements = set(self.opener("requires").read().splitlines())
69 69 except IOError, inst:
70 70 if inst.errno != errno.ENOENT:
71 71 raise
72 72 for r in requirements - self.supported:
73 73 raise error.RepoError(_("requirement '%s' not supported") % r)
74 74
75 self.store = store.store(requirements, self.path, util.opener)
75 self.sharedpath = self.path
76 try:
77 s = os.path.realpath(self.opener("sharedpath").read())
78 if not os.path.exists(s):
79 raise error.RepoError(
80 _('.hg/sharedpath points to nonexistent directory %s' % s))
81 self.sharedpath = s
82 except IOError, inst:
83 if inst.errno != errno.ENOENT:
84 raise
85
86 self.store = store.store(requirements, self.sharedpath, util.opener)
76 87 self.spath = self.store.path
77 88 self.sopener = self.store.opener
78 89 self.sjoin = self.store.join
79 90 self.opener.createmode = self.store.createmode
80 91
81 92 self.tagscache = None
82 93 self._tagstypecache = None
83 94 self.branchcache = None
84 95 self._ubranchcache = None # UTF-8 version of branchcache
85 96 self._branchcachetip = None
86 97 self.nodetagscache = None
87 98 self.filterpats = {}
88 99 self._datafilters = {}
89 100 self._transref = self._lockref = self._wlockref = None
90 101
91 102 @propertycache
92 103 def changelog(self):
93 104 c = changelog.changelog(self.sopener)
94 105 if 'HG_PENDING' in os.environ:
95 106 p = os.environ['HG_PENDING']
96 107 if p.startswith(self.root):
97 108 c.readpending('00changelog.i.a')
98 109 self.sopener.defversion = c.version
99 110 return c
100 111
101 112 @propertycache
102 113 def manifest(self):
103 114 return manifest.manifest(self.sopener)
104 115
105 116 @propertycache
106 117 def dirstate(self):
107 118 return dirstate.dirstate(self.opener, self.ui, self.root)
108 119
109 120 def __getitem__(self, changeid):
110 121 if changeid is None:
111 122 return context.workingctx(self)
112 123 return context.changectx(self, changeid)
113 124
114 125 def __nonzero__(self):
115 126 return True
116 127
117 128 def __len__(self):
118 129 return len(self.changelog)
119 130
120 131 def __iter__(self):
121 132 for i in xrange(len(self)):
122 133 yield i
123 134
124 135 def url(self):
125 136 return 'file:' + self.root
126 137
127 138 def hook(self, name, throw=False, **args):
128 139 return hook.hook(self.ui, self, name, throw, **args)
129 140
130 141 tag_disallowed = ':\r\n'
131 142
132 143 def _tag(self, names, node, message, local, user, date, extra={}):
133 144 if isinstance(names, str):
134 145 allchars = names
135 146 names = (names,)
136 147 else:
137 148 allchars = ''.join(names)
138 149 for c in self.tag_disallowed:
139 150 if c in allchars:
140 151 raise util.Abort(_('%r cannot be used in a tag name') % c)
141 152
142 153 for name in names:
143 154 self.hook('pretag', throw=True, node=hex(node), tag=name,
144 155 local=local)
145 156
146 157 def writetags(fp, names, munge, prevtags):
147 158 fp.seek(0, 2)
148 159 if prevtags and prevtags[-1] != '\n':
149 160 fp.write('\n')
150 161 for name in names:
151 162 m = munge and munge(name) or name
152 163 if self._tagstypecache and name in self._tagstypecache:
153 164 old = self.tagscache.get(name, nullid)
154 165 fp.write('%s %s\n' % (hex(old), m))
155 166 fp.write('%s %s\n' % (hex(node), m))
156 167 fp.close()
157 168
158 169 prevtags = ''
159 170 if local:
160 171 try:
161 172 fp = self.opener('localtags', 'r+')
162 173 except IOError:
163 174 fp = self.opener('localtags', 'a')
164 175 else:
165 176 prevtags = fp.read()
166 177
167 178 # local tags are stored in the current charset
168 179 writetags(fp, names, None, prevtags)
169 180 for name in names:
170 181 self.hook('tag', node=hex(node), tag=name, local=local)
171 182 return
172 183
173 184 try:
174 185 fp = self.wfile('.hgtags', 'rb+')
175 186 except IOError:
176 187 fp = self.wfile('.hgtags', 'ab')
177 188 else:
178 189 prevtags = fp.read()
179 190
180 191 # committed tags are stored in UTF-8
181 192 writetags(fp, names, encoding.fromlocal, prevtags)
182 193
183 194 if '.hgtags' not in self.dirstate:
184 195 self.add(['.hgtags'])
185 196
186 197 m = match_.exact(self.root, '', ['.hgtags'])
187 198 tagnode = self.commit(message, user, date, extra=extra, match=m)
188 199
189 200 for name in names:
190 201 self.hook('tag', node=hex(node), tag=name, local=local)
191 202
192 203 return tagnode
193 204
194 205 def tag(self, names, node, message, local, user, date):
195 206 '''tag a revision with one or more symbolic names.
196 207
197 208 names is a list of strings or, when adding a single tag, names may be a
198 209 string.
199 210
200 211 if local is True, the tags are stored in a per-repository file.
201 212 otherwise, they are stored in the .hgtags file, and a new
202 213 changeset is committed with the change.
203 214
204 215 keyword arguments:
205 216
206 217 local: whether to store tags in non-version-controlled file
207 218 (default False)
208 219
209 220 message: commit message to use if committing
210 221
211 222 user: name of user to use if committing
212 223
213 224 date: date tuple to use if committing'''
214 225
215 226 for x in self.status()[:5]:
216 227 if '.hgtags' in x:
217 228 raise util.Abort(_('working copy of .hgtags is changed '
218 229 '(please commit .hgtags manually)'))
219 230
220 231 self.tags() # instantiate the cache
221 232 self._tag(names, node, message, local, user, date)
222 233
223 234 def tags(self):
224 235 '''return a mapping of tag to node'''
225 236 if self.tagscache:
226 237 return self.tagscache
227 238
228 239 globaltags = {}
229 240 tagtypes = {}
230 241
231 242 def readtags(lines, fn, tagtype):
232 243 filetags = {}
233 244 count = 0
234 245
235 246 def warn(msg):
236 247 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
237 248
238 249 for l in lines:
239 250 count += 1
240 251 if not l:
241 252 continue
242 253 s = l.split(" ", 1)
243 254 if len(s) != 2:
244 255 warn(_("cannot parse entry"))
245 256 continue
246 257 node, key = s
247 258 key = encoding.tolocal(key.strip()) # stored in UTF-8
248 259 try:
249 260 bin_n = bin(node)
250 261 except TypeError:
251 262 warn(_("node '%s' is not well formed") % node)
252 263 continue
253 264 if bin_n not in self.changelog.nodemap:
254 265 warn(_("tag '%s' refers to unknown node") % key)
255 266 continue
256 267
257 268 h = []
258 269 if key in filetags:
259 270 n, h = filetags[key]
260 271 h.append(n)
261 272 filetags[key] = (bin_n, h)
262 273
263 274 for k, nh in filetags.iteritems():
264 275 if k not in globaltags:
265 276 globaltags[k] = nh
266 277 tagtypes[k] = tagtype
267 278 continue
268 279
269 280 # we prefer the global tag if:
270 281 # it supercedes us OR
271 282 # mutual supercedes and it has a higher rank
272 283 # otherwise we win because we're tip-most
273 284 an, ah = nh
274 285 bn, bh = globaltags[k]
275 286 if (bn != an and an in bh and
276 287 (bn not in ah or len(bh) > len(ah))):
277 288 an = bn
278 289 ah.extend([n for n in bh if n not in ah])
279 290 globaltags[k] = an, ah
280 291 tagtypes[k] = tagtype
281 292
282 293 # read the tags file from each head, ending with the tip
283 294 f = None
284 295 for rev, node, fnode in self._hgtagsnodes():
285 296 f = (f and f.filectx(fnode) or
286 297 self.filectx('.hgtags', fileid=fnode))
287 298 readtags(f.data().splitlines(), f, "global")
288 299
289 300 try:
290 301 data = encoding.fromlocal(self.opener("localtags").read())
291 302 # localtags are stored in the local character set
292 303 # while the internal tag table is stored in UTF-8
293 304 readtags(data.splitlines(), "localtags", "local")
294 305 except IOError:
295 306 pass
296 307
297 308 self.tagscache = {}
298 309 self._tagstypecache = {}
299 310 for k, nh in globaltags.iteritems():
300 311 n = nh[0]
301 312 if n != nullid:
302 313 self.tagscache[k] = n
303 314 self._tagstypecache[k] = tagtypes[k]
304 315 self.tagscache['tip'] = self.changelog.tip()
305 316 return self.tagscache
306 317
307 318 def tagtype(self, tagname):
308 319 '''
309 320 return the type of the given tag. result can be:
310 321
311 322 'local' : a local tag
312 323 'global' : a global tag
313 324 None : tag does not exist
314 325 '''
315 326
316 327 self.tags()
317 328
318 329 return self._tagstypecache.get(tagname)
319 330
320 331 def _hgtagsnodes(self):
321 332 last = {}
322 333 ret = []
323 334 for node in reversed(self.heads()):
324 335 c = self[node]
325 336 rev = c.rev()
326 337 try:
327 338 fnode = c.filenode('.hgtags')
328 339 except error.LookupError:
329 340 continue
330 341 ret.append((rev, node, fnode))
331 342 if fnode in last:
332 343 ret[last[fnode]] = None
333 344 last[fnode] = len(ret) - 1
334 345 return [item for item in ret if item]
335 346
336 347 def tagslist(self):
337 348 '''return a list of tags ordered by revision'''
338 349 l = []
339 350 for t, n in self.tags().iteritems():
340 351 try:
341 352 r = self.changelog.rev(n)
342 353 except:
343 354 r = -2 # sort to the beginning of the list if unknown
344 355 l.append((r, t, n))
345 356 return [(t, n) for r, t, n in sorted(l)]
346 357
347 358 def nodetags(self, node):
348 359 '''return the tags associated with a node'''
349 360 if not self.nodetagscache:
350 361 self.nodetagscache = {}
351 362 for t, n in self.tags().iteritems():
352 363 self.nodetagscache.setdefault(n, []).append(t)
353 364 return self.nodetagscache.get(node, [])
354 365
355 366 def _branchtags(self, partial, lrev):
356 367 # TODO: rename this function?
357 368 tiprev = len(self) - 1
358 369 if lrev != tiprev:
359 370 self._updatebranchcache(partial, lrev+1, tiprev+1)
360 371 self._writebranchcache(partial, self.changelog.tip(), tiprev)
361 372
362 373 return partial
363 374
364 375 def branchmap(self):
365 376 tip = self.changelog.tip()
366 377 if self.branchcache is not None and self._branchcachetip == tip:
367 378 return self.branchcache
368 379
369 380 oldtip = self._branchcachetip
370 381 self._branchcachetip = tip
371 382 if self.branchcache is None:
372 383 self.branchcache = {} # avoid recursion in changectx
373 384 else:
374 385 self.branchcache.clear() # keep using the same dict
375 386 if oldtip is None or oldtip not in self.changelog.nodemap:
376 387 partial, last, lrev = self._readbranchcache()
377 388 else:
378 389 lrev = self.changelog.rev(oldtip)
379 390 partial = self._ubranchcache
380 391
381 392 self._branchtags(partial, lrev)
382 393 # this private cache holds all heads (not just tips)
383 394 self._ubranchcache = partial
384 395
385 396 # the branch cache is stored on disk as UTF-8, but in the local
386 397 # charset internally
387 398 for k, v in partial.iteritems():
388 399 self.branchcache[encoding.tolocal(k)] = v
389 400 return self.branchcache
390 401
391 402
392 403 def branchtags(self):
393 404 '''return a dict where branch names map to the tipmost head of
394 405 the branch, open heads come before closed'''
395 406 bt = {}
396 407 for bn, heads in self.branchmap().iteritems():
397 408 head = None
398 409 for i in range(len(heads)-1, -1, -1):
399 410 h = heads[i]
400 411 if 'close' not in self.changelog.read(h)[5]:
401 412 head = h
402 413 break
403 414 # no open heads were found
404 415 if head is None:
405 416 head = heads[-1]
406 417 bt[bn] = head
407 418 return bt
408 419
409 420
410 421 def _readbranchcache(self):
411 422 partial = {}
412 423 try:
413 424 f = self.opener("branchheads.cache")
414 425 lines = f.read().split('\n')
415 426 f.close()
416 427 except (IOError, OSError):
417 428 return {}, nullid, nullrev
418 429
419 430 try:
420 431 last, lrev = lines.pop(0).split(" ", 1)
421 432 last, lrev = bin(last), int(lrev)
422 433 if lrev >= len(self) or self[lrev].node() != last:
423 434 # invalidate the cache
424 435 raise ValueError('invalidating branch cache (tip differs)')
425 436 for l in lines:
426 437 if not l: continue
427 438 node, label = l.split(" ", 1)
428 439 partial.setdefault(label.strip(), []).append(bin(node))
429 440 except KeyboardInterrupt:
430 441 raise
431 442 except Exception, inst:
432 443 if self.ui.debugflag:
433 444 self.ui.warn(str(inst), '\n')
434 445 partial, last, lrev = {}, nullid, nullrev
435 446 return partial, last, lrev
436 447
437 448 def _writebranchcache(self, branches, tip, tiprev):
438 449 try:
439 450 f = self.opener("branchheads.cache", "w", atomictemp=True)
440 451 f.write("%s %s\n" % (hex(tip), tiprev))
441 452 for label, nodes in branches.iteritems():
442 453 for node in nodes:
443 454 f.write("%s %s\n" % (hex(node), label))
444 455 f.rename()
445 456 except (IOError, OSError):
446 457 pass
447 458
448 459 def _updatebranchcache(self, partial, start, end):
449 460 for r in xrange(start, end):
450 461 c = self[r]
451 462 b = c.branch()
452 463 bheads = partial.setdefault(b, [])
453 464 bheads.append(c.node())
454 465 for p in c.parents():
455 466 pn = p.node()
456 467 if pn in bheads:
457 468 bheads.remove(pn)
458 469
459 470 def lookup(self, key):
460 471 if isinstance(key, int):
461 472 return self.changelog.node(key)
462 473 elif key == '.':
463 474 return self.dirstate.parents()[0]
464 475 elif key == 'null':
465 476 return nullid
466 477 elif key == 'tip':
467 478 return self.changelog.tip()
468 479 n = self.changelog._match(key)
469 480 if n:
470 481 return n
471 482 if key in self.tags():
472 483 return self.tags()[key]
473 484 if key in self.branchtags():
474 485 return self.branchtags()[key]
475 486 n = self.changelog._partialmatch(key)
476 487 if n:
477 488 return n
478 489
479 490 # can't find key, check if it might have come from damaged dirstate
480 491 if key in self.dirstate.parents():
481 492 raise error.Abort(_("working directory has unknown parent '%s'!")
482 493 % short(key))
483 494 try:
484 495 if len(key) == 20:
485 496 key = hex(key)
486 497 except:
487 498 pass
488 499 raise error.RepoError(_("unknown revision '%s'") % key)
489 500
490 501 def local(self):
491 502 return True
492 503
493 504 def join(self, f):
494 505 return os.path.join(self.path, f)
495 506
496 507 def wjoin(self, f):
497 508 return os.path.join(self.root, f)
498 509
499 510 def rjoin(self, f):
500 511 return os.path.join(self.root, util.pconvert(f))
501 512
502 513 def file(self, f):
503 514 if f[0] == '/':
504 515 f = f[1:]
505 516 return filelog.filelog(self.sopener, f)
506 517
507 518 def changectx(self, changeid):
508 519 return self[changeid]
509 520
510 521 def parents(self, changeid=None):
511 522 '''get list of changectxs for parents of changeid'''
512 523 return self[changeid].parents()
513 524
514 525 def filectx(self, path, changeid=None, fileid=None):
515 526 """changeid can be a changeset revision, node, or tag.
516 527 fileid can be a file revision or node."""
517 528 return context.filectx(self, path, changeid, fileid)
518 529
519 530 def getcwd(self):
520 531 return self.dirstate.getcwd()
521 532
522 533 def pathto(self, f, cwd=None):
523 534 return self.dirstate.pathto(f, cwd)
524 535
525 536 def wfile(self, f, mode='r'):
526 537 return self.wopener(f, mode)
527 538
528 539 def _link(self, f):
529 540 return os.path.islink(self.wjoin(f))
530 541
531 542 def _filter(self, filter, filename, data):
532 543 if filter not in self.filterpats:
533 544 l = []
534 545 for pat, cmd in self.ui.configitems(filter):
535 546 if cmd == '!':
536 547 continue
537 548 mf = match_.match(self.root, '', [pat])
538 549 fn = None
539 550 params = cmd
540 551 for name, filterfn in self._datafilters.iteritems():
541 552 if cmd.startswith(name):
542 553 fn = filterfn
543 554 params = cmd[len(name):].lstrip()
544 555 break
545 556 if not fn:
546 557 fn = lambda s, c, **kwargs: util.filter(s, c)
547 558 # Wrap old filters not supporting keyword arguments
548 559 if not inspect.getargspec(fn)[2]:
549 560 oldfn = fn
550 561 fn = lambda s, c, **kwargs: oldfn(s, c)
551 562 l.append((mf, fn, params))
552 563 self.filterpats[filter] = l
553 564
554 565 for mf, fn, cmd in self.filterpats[filter]:
555 566 if mf(filename):
556 567 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
557 568 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
558 569 break
559 570
560 571 return data
561 572
562 573 def adddatafilter(self, name, filter):
563 574 self._datafilters[name] = filter
564 575
565 576 def wread(self, filename):
566 577 if self._link(filename):
567 578 data = os.readlink(self.wjoin(filename))
568 579 else:
569 580 data = self.wopener(filename, 'r').read()
570 581 return self._filter("encode", filename, data)
571 582
572 583 def wwrite(self, filename, data, flags):
573 584 data = self._filter("decode", filename, data)
574 585 try:
575 586 os.unlink(self.wjoin(filename))
576 587 except OSError:
577 588 pass
578 589 if 'l' in flags:
579 590 self.wopener.symlink(data, filename)
580 591 else:
581 592 self.wopener(filename, 'w').write(data)
582 593 if 'x' in flags:
583 594 util.set_flags(self.wjoin(filename), False, True)
584 595
585 596 def wwritedata(self, filename, data):
586 597 return self._filter("decode", filename, data)
587 598
588 599 def transaction(self):
589 600 tr = self._transref and self._transref() or None
590 601 if tr and tr.running():
591 602 return tr.nest()
592 603
593 604 # abort here if the journal already exists
594 605 if os.path.exists(self.sjoin("journal")):
595 606 raise error.RepoError(_("journal already exists - run hg recover"))
596 607
597 608 # save dirstate for rollback
598 609 try:
599 610 ds = self.opener("dirstate").read()
600 611 except IOError:
601 612 ds = ""
602 613 self.opener("journal.dirstate", "w").write(ds)
603 614 self.opener("journal.branch", "w").write(self.dirstate.branch())
604 615
605 616 renames = [(self.sjoin("journal"), self.sjoin("undo")),
606 617 (self.join("journal.dirstate"), self.join("undo.dirstate")),
607 618 (self.join("journal.branch"), self.join("undo.branch"))]
608 619 tr = transaction.transaction(self.ui.warn, self.sopener,
609 620 self.sjoin("journal"),
610 621 aftertrans(renames),
611 622 self.store.createmode)
612 623 self._transref = weakref.ref(tr)
613 624 return tr
614 625
615 626 def recover(self):
616 627 lock = self.lock()
617 628 try:
618 629 if os.path.exists(self.sjoin("journal")):
619 630 self.ui.status(_("rolling back interrupted transaction\n"))
620 631 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
621 632 self.invalidate()
622 633 return True
623 634 else:
624 635 self.ui.warn(_("no interrupted transaction available\n"))
625 636 return False
626 637 finally:
627 638 lock.release()
628 639
629 640 def rollback(self):
630 641 wlock = lock = None
631 642 try:
632 643 wlock = self.wlock()
633 644 lock = self.lock()
634 645 if os.path.exists(self.sjoin("undo")):
635 646 self.ui.status(_("rolling back last transaction\n"))
636 647 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
637 648 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
638 649 try:
639 650 branch = self.opener("undo.branch").read()
640 651 self.dirstate.setbranch(branch)
641 652 except IOError:
642 653 self.ui.warn(_("Named branch could not be reset, "
643 654 "current branch still is: %s\n")
644 655 % encoding.tolocal(self.dirstate.branch()))
645 656 self.invalidate()
646 657 self.dirstate.invalidate()
647 658 else:
648 659 self.ui.warn(_("no rollback information available\n"))
649 660 finally:
650 661 release(lock, wlock)
651 662
652 663 def invalidate(self):
653 664 for a in "changelog manifest".split():
654 665 if a in self.__dict__:
655 666 delattr(self, a)
656 667 self.tagscache = None
657 668 self._tagstypecache = None
658 669 self.nodetagscache = None
659 670 self.branchcache = None
660 671 self._ubranchcache = None
661 672 self._branchcachetip = None
662 673
663 674 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
664 675 try:
665 676 l = lock.lock(lockname, 0, releasefn, desc=desc)
666 677 except error.LockHeld, inst:
667 678 if not wait:
668 679 raise
669 680 self.ui.warn(_("waiting for lock on %s held by %r\n") %
670 681 (desc, inst.locker))
671 682 # default to 600 seconds timeout
672 683 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
673 684 releasefn, desc=desc)
674 685 if acquirefn:
675 686 acquirefn()
676 687 return l
677 688
678 689 def lock(self, wait=True):
679 690 l = self._lockref and self._lockref()
680 691 if l is not None and l.held:
681 692 l.lock()
682 693 return l
683 694
684 695 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
685 696 _('repository %s') % self.origroot)
686 697 self._lockref = weakref.ref(l)
687 698 return l
688 699
689 700 def wlock(self, wait=True):
690 701 l = self._wlockref and self._wlockref()
691 702 if l is not None and l.held:
692 703 l.lock()
693 704 return l
694 705
695 706 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
696 707 self.dirstate.invalidate, _('working directory of %s') %
697 708 self.origroot)
698 709 self._wlockref = weakref.ref(l)
699 710 return l
700 711
701 712 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
702 713 """
703 714 commit an individual file as part of a larger transaction
704 715 """
705 716
706 717 fname = fctx.path()
707 718 text = fctx.data()
708 719 flog = self.file(fname)
709 720 fparent1 = manifest1.get(fname, nullid)
710 721 fparent2 = fparent2o = manifest2.get(fname, nullid)
711 722
712 723 meta = {}
713 724 copy = fctx.renamed()
714 725 if copy and copy[0] != fname:
715 726 # Mark the new revision of this file as a copy of another
716 727 # file. This copy data will effectively act as a parent
717 728 # of this new revision. If this is a merge, the first
718 729 # parent will be the nullid (meaning "look up the copy data")
719 730 # and the second one will be the other parent. For example:
720 731 #
721 732 # 0 --- 1 --- 3 rev1 changes file foo
722 733 # \ / rev2 renames foo to bar and changes it
723 734 # \- 2 -/ rev3 should have bar with all changes and
724 735 # should record that bar descends from
725 736 # bar in rev2 and foo in rev1
726 737 #
727 738 # this allows this merge to succeed:
728 739 #
729 740 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
730 741 # \ / merging rev3 and rev4 should use bar@rev2
731 742 # \- 2 --- 4 as the merge base
732 743 #
733 744
734 745 cfname = copy[0]
735 746 crev = manifest1.get(cfname)
736 747 newfparent = fparent2
737 748
738 749 if manifest2: # branch merge
739 750 if fparent2 == nullid or crev is None: # copied on remote side
740 751 if cfname in manifest2:
741 752 crev = manifest2[cfname]
742 753 newfparent = fparent1
743 754
744 755 # find source in nearest ancestor if we've lost track
745 756 if not crev:
746 757 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
747 758 (fname, cfname))
748 759 for ancestor in self['.'].ancestors():
749 760 if cfname in ancestor:
750 761 crev = ancestor[cfname].filenode()
751 762 break
752 763
753 764 self.ui.debug(_(" %s: copy %s:%s\n") % (fname, cfname, hex(crev)))
754 765 meta["copy"] = cfname
755 766 meta["copyrev"] = hex(crev)
756 767 fparent1, fparent2 = nullid, newfparent
757 768 elif fparent2 != nullid:
758 769 # is one parent an ancestor of the other?
759 770 fparentancestor = flog.ancestor(fparent1, fparent2)
760 771 if fparentancestor == fparent1:
761 772 fparent1, fparent2 = fparent2, nullid
762 773 elif fparentancestor == fparent2:
763 774 fparent2 = nullid
764 775
765 776 # is the file changed?
766 777 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
767 778 changelist.append(fname)
768 779 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
769 780
770 781 # are just the flags changed during merge?
771 782 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
772 783 changelist.append(fname)
773 784
774 785 return fparent1
775 786
776 787 def commit(self, text="", user=None, date=None, match=None, force=False,
777 788 editor=False, extra={}):
778 789 """Add a new revision to current repository.
779 790
780 791 Revision information is gathered from the working directory,
781 792 match can be used to filter the committed files. If editor is
782 793 supplied, it is called to get a commit message.
783 794 """
784 795
785 796 def fail(f, msg):
786 797 raise util.Abort('%s: %s' % (f, msg))
787 798
788 799 if not match:
789 800 match = match_.always(self.root, '')
790 801
791 802 if not force:
792 803 vdirs = []
793 804 match.dir = vdirs.append
794 805 match.bad = fail
795 806
796 807 wlock = self.wlock()
797 808 try:
798 809 p1, p2 = self.dirstate.parents()
799 810
800 811 if (not force and p2 != nullid and match and
801 812 (match.files() or match.anypats())):
802 813 raise util.Abort(_('cannot partially commit a merge '
803 814 '(do not specify files or patterns)'))
804 815
805 816 changes = self.status(match=match, clean=force)
806 817 if force:
807 818 changes[0].extend(changes[6]) # mq may commit unchanged files
808 819
809 820 # make sure all explicit patterns are matched
810 821 if not force and match.files():
811 822 matched = set(changes[0] + changes[1] + changes[2])
812 823
813 824 for f in match.files():
814 825 if f == '.' or f in matched: # matched
815 826 continue
816 827 if f in changes[3]: # missing
817 828 fail(f, _('file not found!'))
818 829 if f in vdirs: # visited directory
819 830 d = f + '/'
820 831 for mf in matched:
821 832 if mf.startswith(d):
822 833 break
823 834 else:
824 835 fail(f, _("no match under directory!"))
825 836 elif f not in self.dirstate:
826 837 fail(f, _("file not tracked!"))
827 838
828 839 if (not force and not extra.get("close") and p2 == nullid
829 840 and not (changes[0] or changes[1] or changes[2])
830 841 and self[None].branch() == self['.'].branch()):
831 842 self.ui.status(_("nothing changed\n"))
832 843 return None
833 844
834 845 ms = merge_.mergestate(self)
835 846 for f in changes[0]:
836 847 if f in ms and ms[f] == 'u':
837 848 raise util.Abort(_("unresolved merge conflicts "
838 849 "(see hg resolve)"))
839 850
840 851 cctx = context.workingctx(self, (p1, p2), text, user, date,
841 852 extra, changes)
842 853 if editor:
843 854 cctx._text = editor(self, cctx)
844 855 ret = self.commitctx(cctx, True)
845 856
846 857 # update dirstate and mergestate
847 858 for f in changes[0] + changes[1]:
848 859 self.dirstate.normal(f)
849 860 for f in changes[2]:
850 861 self.dirstate.forget(f)
851 862 self.dirstate.setparents(ret)
852 863 ms.reset()
853 864
854 865 return ret
855 866
856 867 finally:
857 868 wlock.release()
858 869
859 870 def commitctx(self, ctx, error=False):
860 871 """Add a new revision to current repository.
861 872
862 873 Revision information is passed via the context argument.
863 874 """
864 875
865 876 tr = lock = None
866 877 removed = ctx.removed()
867 878 p1, p2 = ctx.p1(), ctx.p2()
868 879 m1 = p1.manifest().copy()
869 880 m2 = p2.manifest()
870 881 user = ctx.user()
871 882
872 883 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
873 884 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
874 885
875 886 lock = self.lock()
876 887 try:
877 888 tr = self.transaction()
878 889 trp = weakref.proxy(tr)
879 890
880 891 # check in files
881 892 new = {}
882 893 changed = []
883 894 linkrev = len(self)
884 895 for f in sorted(ctx.modified() + ctx.added()):
885 896 self.ui.note(f + "\n")
886 897 try:
887 898 fctx = ctx[f]
888 899 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
889 900 changed)
890 901 m1.set(f, fctx.flags())
891 902 except (OSError, IOError):
892 903 if error:
893 904 self.ui.warn(_("trouble committing %s!\n") % f)
894 905 raise
895 906 else:
896 907 removed.append(f)
897 908
898 909 # update manifest
899 910 m1.update(new)
900 911 removed = [f for f in sorted(removed) if f in m1 or f in m2]
901 912 drop = [f for f in removed if f in m1]
902 913 for f in drop:
903 914 del m1[f]
904 915 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
905 916 p2.manifestnode(), (new, drop))
906 917
907 918 # update changelog
908 919 self.changelog.delayupdate()
909 920 n = self.changelog.add(mn, changed + removed, ctx.description(),
910 921 trp, p1.node(), p2.node(),
911 922 user, ctx.date(), ctx.extra().copy())
912 923 p = lambda: self.changelog.writepending() and self.root or ""
913 924 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
914 925 parent2=xp2, pending=p)
915 926 self.changelog.finalize(trp)
916 927 tr.close()
917 928
918 929 if self.branchcache:
919 930 self.branchtags()
920 931
921 932 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
922 933 return n
923 934 finally:
924 935 del tr
925 936 lock.release()
926 937
927 938 def walk(self, match, node=None):
928 939 '''
929 940 walk recursively through the directory tree or a given
930 941 changeset, finding all files matched by the match
931 942 function
932 943 '''
933 944 return self[node].walk(match)
934 945
935 946 def status(self, node1='.', node2=None, match=None,
936 947 ignored=False, clean=False, unknown=False):
937 948 """return status of files between two nodes or node and working directory
938 949
939 950 If node1 is None, use the first dirstate parent instead.
940 951 If node2 is None, compare node1 with working directory.
941 952 """
942 953
943 954 def mfmatches(ctx):
944 955 mf = ctx.manifest().copy()
945 956 for fn in mf.keys():
946 957 if not match(fn):
947 958 del mf[fn]
948 959 return mf
949 960
950 961 if isinstance(node1, context.changectx):
951 962 ctx1 = node1
952 963 else:
953 964 ctx1 = self[node1]
954 965 if isinstance(node2, context.changectx):
955 966 ctx2 = node2
956 967 else:
957 968 ctx2 = self[node2]
958 969
959 970 working = ctx2.rev() is None
960 971 parentworking = working and ctx1 == self['.']
961 972 match = match or match_.always(self.root, self.getcwd())
962 973 listignored, listclean, listunknown = ignored, clean, unknown
963 974
964 975 # load earliest manifest first for caching reasons
965 976 if not working and ctx2.rev() < ctx1.rev():
966 977 ctx2.manifest()
967 978
968 979 if not parentworking:
969 980 def bad(f, msg):
970 981 if f not in ctx1:
971 982 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
972 983 match.bad = bad
973 984
974 985 if working: # we need to scan the working dir
975 986 s = self.dirstate.status(match, listignored, listclean, listunknown)
976 987 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
977 988
978 989 # check for any possibly clean files
979 990 if parentworking and cmp:
980 991 fixup = []
981 992 # do a full compare of any files that might have changed
982 993 for f in sorted(cmp):
983 994 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
984 995 or ctx1[f].cmp(ctx2[f].data())):
985 996 modified.append(f)
986 997 else:
987 998 fixup.append(f)
988 999
989 1000 if listclean:
990 1001 clean += fixup
991 1002
992 1003 # update dirstate for files that are actually clean
993 1004 if fixup:
994 1005 try:
995 1006 # updating the dirstate is optional
996 1007 # so we don't wait on the lock
997 1008 wlock = self.wlock(False)
998 1009 try:
999 1010 for f in fixup:
1000 1011 self.dirstate.normal(f)
1001 1012 finally:
1002 1013 wlock.release()
1003 1014 except error.LockError:
1004 1015 pass
1005 1016
1006 1017 if not parentworking:
1007 1018 mf1 = mfmatches(ctx1)
1008 1019 if working:
1009 1020 # we are comparing working dir against non-parent
1010 1021 # generate a pseudo-manifest for the working dir
1011 1022 mf2 = mfmatches(self['.'])
1012 1023 for f in cmp + modified + added:
1013 1024 mf2[f] = None
1014 1025 mf2.set(f, ctx2.flags(f))
1015 1026 for f in removed:
1016 1027 if f in mf2:
1017 1028 del mf2[f]
1018 1029 else:
1019 1030 # we are comparing two revisions
1020 1031 deleted, unknown, ignored = [], [], []
1021 1032 mf2 = mfmatches(ctx2)
1022 1033
1023 1034 modified, added, clean = [], [], []
1024 1035 for fn in mf2:
1025 1036 if fn in mf1:
1026 1037 if (mf1.flags(fn) != mf2.flags(fn) or
1027 1038 (mf1[fn] != mf2[fn] and
1028 1039 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1029 1040 modified.append(fn)
1030 1041 elif listclean:
1031 1042 clean.append(fn)
1032 1043 del mf1[fn]
1033 1044 else:
1034 1045 added.append(fn)
1035 1046 removed = mf1.keys()
1036 1047
1037 1048 r = modified, added, removed, deleted, unknown, ignored, clean
1038 1049 [l.sort() for l in r]
1039 1050 return r
1040 1051
1041 1052 def add(self, list):
1042 1053 wlock = self.wlock()
1043 1054 try:
1044 1055 rejected = []
1045 1056 for f in list:
1046 1057 p = self.wjoin(f)
1047 1058 try:
1048 1059 st = os.lstat(p)
1049 1060 except:
1050 1061 self.ui.warn(_("%s does not exist!\n") % f)
1051 1062 rejected.append(f)
1052 1063 continue
1053 1064 if st.st_size > 10000000:
1054 1065 self.ui.warn(_("%s: files over 10MB may cause memory and"
1055 1066 " performance problems\n"
1056 1067 "(use 'hg revert %s' to unadd the file)\n")
1057 1068 % (f, f))
1058 1069 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1059 1070 self.ui.warn(_("%s not added: only files and symlinks "
1060 1071 "supported currently\n") % f)
1061 1072 rejected.append(p)
1062 1073 elif self.dirstate[f] in 'amn':
1063 1074 self.ui.warn(_("%s already tracked!\n") % f)
1064 1075 elif self.dirstate[f] == 'r':
1065 1076 self.dirstate.normallookup(f)
1066 1077 else:
1067 1078 self.dirstate.add(f)
1068 1079 return rejected
1069 1080 finally:
1070 1081 wlock.release()
1071 1082
1072 1083 def forget(self, list):
1073 1084 wlock = self.wlock()
1074 1085 try:
1075 1086 for f in list:
1076 1087 if self.dirstate[f] != 'a':
1077 1088 self.ui.warn(_("%s not added!\n") % f)
1078 1089 else:
1079 1090 self.dirstate.forget(f)
1080 1091 finally:
1081 1092 wlock.release()
1082 1093
1083 1094 def remove(self, list, unlink=False):
1084 1095 if unlink:
1085 1096 for f in list:
1086 1097 try:
1087 1098 util.unlink(self.wjoin(f))
1088 1099 except OSError, inst:
1089 1100 if inst.errno != errno.ENOENT:
1090 1101 raise
1091 1102 wlock = self.wlock()
1092 1103 try:
1093 1104 for f in list:
1094 1105 if unlink and os.path.exists(self.wjoin(f)):
1095 1106 self.ui.warn(_("%s still exists!\n") % f)
1096 1107 elif self.dirstate[f] == 'a':
1097 1108 self.dirstate.forget(f)
1098 1109 elif f not in self.dirstate:
1099 1110 self.ui.warn(_("%s not tracked!\n") % f)
1100 1111 else:
1101 1112 self.dirstate.remove(f)
1102 1113 finally:
1103 1114 wlock.release()
1104 1115
1105 1116 def undelete(self, list):
1106 1117 manifests = [self.manifest.read(self.changelog.read(p)[0])
1107 1118 for p in self.dirstate.parents() if p != nullid]
1108 1119 wlock = self.wlock()
1109 1120 try:
1110 1121 for f in list:
1111 1122 if self.dirstate[f] != 'r':
1112 1123 self.ui.warn(_("%s not removed!\n") % f)
1113 1124 else:
1114 1125 m = f in manifests[0] and manifests[0] or manifests[1]
1115 1126 t = self.file(f).read(m[f])
1116 1127 self.wwrite(f, t, m.flags(f))
1117 1128 self.dirstate.normal(f)
1118 1129 finally:
1119 1130 wlock.release()
1120 1131
1121 1132 def copy(self, source, dest):
1122 1133 p = self.wjoin(dest)
1123 1134 if not (os.path.exists(p) or os.path.islink(p)):
1124 1135 self.ui.warn(_("%s does not exist!\n") % dest)
1125 1136 elif not (os.path.isfile(p) or os.path.islink(p)):
1126 1137 self.ui.warn(_("copy failed: %s is not a file or a "
1127 1138 "symbolic link\n") % dest)
1128 1139 else:
1129 1140 wlock = self.wlock()
1130 1141 try:
1131 1142 if self.dirstate[dest] in '?r':
1132 1143 self.dirstate.add(dest)
1133 1144 self.dirstate.copy(source, dest)
1134 1145 finally:
1135 1146 wlock.release()
1136 1147
1137 1148 def heads(self, start=None):
1138 1149 heads = self.changelog.heads(start)
1139 1150 # sort the output in rev descending order
1140 1151 heads = [(-self.changelog.rev(h), h) for h in heads]
1141 1152 return [n for (r, n) in sorted(heads)]
1142 1153
1143 1154 def branchheads(self, branch=None, start=None, closed=False):
1144 1155 if branch is None:
1145 1156 branch = self[None].branch()
1146 1157 branches = self.branchmap()
1147 1158 if branch not in branches:
1148 1159 return []
1149 1160 bheads = branches[branch]
1150 1161 # the cache returns heads ordered lowest to highest
1151 1162 bheads.reverse()
1152 1163 if start is not None:
1153 1164 # filter out the heads that cannot be reached from startrev
1154 1165 bheads = self.changelog.nodesbetween([start], bheads)[2]
1155 1166 if not closed:
1156 1167 bheads = [h for h in bheads if
1157 1168 ('close' not in self.changelog.read(h)[5])]
1158 1169 return bheads
1159 1170
1160 1171 def branches(self, nodes):
1161 1172 if not nodes:
1162 1173 nodes = [self.changelog.tip()]
1163 1174 b = []
1164 1175 for n in nodes:
1165 1176 t = n
1166 1177 while 1:
1167 1178 p = self.changelog.parents(n)
1168 1179 if p[1] != nullid or p[0] == nullid:
1169 1180 b.append((t, n, p[0], p[1]))
1170 1181 break
1171 1182 n = p[0]
1172 1183 return b
1173 1184
1174 1185 def between(self, pairs):
1175 1186 r = []
1176 1187
1177 1188 for top, bottom in pairs:
1178 1189 n, l, i = top, [], 0
1179 1190 f = 1
1180 1191
1181 1192 while n != bottom and n != nullid:
1182 1193 p = self.changelog.parents(n)[0]
1183 1194 if i == f:
1184 1195 l.append(n)
1185 1196 f = f * 2
1186 1197 n = p
1187 1198 i += 1
1188 1199
1189 1200 r.append(l)
1190 1201
1191 1202 return r
1192 1203
1193 1204 def findincoming(self, remote, base=None, heads=None, force=False):
1194 1205 """Return list of roots of the subsets of missing nodes from remote
1195 1206
1196 1207 If base dict is specified, assume that these nodes and their parents
1197 1208 exist on the remote side and that no child of a node of base exists
1198 1209 in both remote and self.
1199 1210 Furthermore base will be updated to include the nodes that exists
1200 1211 in self and remote but no children exists in self and remote.
1201 1212 If a list of heads is specified, return only nodes which are heads
1202 1213 or ancestors of these heads.
1203 1214
1204 1215 All the ancestors of base are in self and in remote.
1205 1216 All the descendants of the list returned are missing in self.
1206 1217 (and so we know that the rest of the nodes are missing in remote, see
1207 1218 outgoing)
1208 1219 """
1209 1220 return self.findcommonincoming(remote, base, heads, force)[1]
1210 1221
1211 1222 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1212 1223 """Return a tuple (common, missing roots, heads) used to identify
1213 1224 missing nodes from remote.
1214 1225
1215 1226 If base dict is specified, assume that these nodes and their parents
1216 1227 exist on the remote side and that no child of a node of base exists
1217 1228 in both remote and self.
1218 1229 Furthermore base will be updated to include the nodes that exists
1219 1230 in self and remote but no children exists in self and remote.
1220 1231 If a list of heads is specified, return only nodes which are heads
1221 1232 or ancestors of these heads.
1222 1233
1223 1234 All the ancestors of base are in self and in remote.
1224 1235 """
1225 1236 m = self.changelog.nodemap
1226 1237 search = []
1227 1238 fetch = set()
1228 1239 seen = set()
1229 1240 seenbranch = set()
1230 1241 if base is None:
1231 1242 base = {}
1232 1243
1233 1244 if not heads:
1234 1245 heads = remote.heads()
1235 1246
1236 1247 if self.changelog.tip() == nullid:
1237 1248 base[nullid] = 1
1238 1249 if heads != [nullid]:
1239 1250 return [nullid], [nullid], list(heads)
1240 1251 return [nullid], [], []
1241 1252
1242 1253 # assume we're closer to the tip than the root
1243 1254 # and start by examining the heads
1244 1255 self.ui.status(_("searching for changes\n"))
1245 1256
1246 1257 unknown = []
1247 1258 for h in heads:
1248 1259 if h not in m:
1249 1260 unknown.append(h)
1250 1261 else:
1251 1262 base[h] = 1
1252 1263
1253 1264 heads = unknown
1254 1265 if not unknown:
1255 1266 return base.keys(), [], []
1256 1267
1257 1268 req = set(unknown)
1258 1269 reqcnt = 0
1259 1270
1260 1271 # search through remote branches
1261 1272 # a 'branch' here is a linear segment of history, with four parts:
1262 1273 # head, root, first parent, second parent
1263 1274 # (a branch always has two parents (or none) by definition)
1264 1275 unknown = remote.branches(unknown)
1265 1276 while unknown:
1266 1277 r = []
1267 1278 while unknown:
1268 1279 n = unknown.pop(0)
1269 1280 if n[0] in seen:
1270 1281 continue
1271 1282
1272 1283 self.ui.debug(_("examining %s:%s\n")
1273 1284 % (short(n[0]), short(n[1])))
1274 1285 if n[0] == nullid: # found the end of the branch
1275 1286 pass
1276 1287 elif n in seenbranch:
1277 1288 self.ui.debug(_("branch already found\n"))
1278 1289 continue
1279 1290 elif n[1] and n[1] in m: # do we know the base?
1280 1291 self.ui.debug(_("found incomplete branch %s:%s\n")
1281 1292 % (short(n[0]), short(n[1])))
1282 1293 search.append(n[0:2]) # schedule branch range for scanning
1283 1294 seenbranch.add(n)
1284 1295 else:
1285 1296 if n[1] not in seen and n[1] not in fetch:
1286 1297 if n[2] in m and n[3] in m:
1287 1298 self.ui.debug(_("found new changeset %s\n") %
1288 1299 short(n[1]))
1289 1300 fetch.add(n[1]) # earliest unknown
1290 1301 for p in n[2:4]:
1291 1302 if p in m:
1292 1303 base[p] = 1 # latest known
1293 1304
1294 1305 for p in n[2:4]:
1295 1306 if p not in req and p not in m:
1296 1307 r.append(p)
1297 1308 req.add(p)
1298 1309 seen.add(n[0])
1299 1310
1300 1311 if r:
1301 1312 reqcnt += 1
1302 1313 self.ui.debug(_("request %d: %s\n") %
1303 1314 (reqcnt, " ".join(map(short, r))))
1304 1315 for p in xrange(0, len(r), 10):
1305 1316 for b in remote.branches(r[p:p+10]):
1306 1317 self.ui.debug(_("received %s:%s\n") %
1307 1318 (short(b[0]), short(b[1])))
1308 1319 unknown.append(b)
1309 1320
1310 1321 # do binary search on the branches we found
1311 1322 while search:
1312 1323 newsearch = []
1313 1324 reqcnt += 1
1314 1325 for n, l in zip(search, remote.between(search)):
1315 1326 l.append(n[1])
1316 1327 p = n[0]
1317 1328 f = 1
1318 1329 for i in l:
1319 1330 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1320 1331 if i in m:
1321 1332 if f <= 2:
1322 1333 self.ui.debug(_("found new branch changeset %s\n") %
1323 1334 short(p))
1324 1335 fetch.add(p)
1325 1336 base[i] = 1
1326 1337 else:
1327 1338 self.ui.debug(_("narrowed branch search to %s:%s\n")
1328 1339 % (short(p), short(i)))
1329 1340 newsearch.append((p, i))
1330 1341 break
1331 1342 p, f = i, f * 2
1332 1343 search = newsearch
1333 1344
1334 1345 # sanity check our fetch list
1335 1346 for f in fetch:
1336 1347 if f in m:
1337 1348 raise error.RepoError(_("already have changeset ")
1338 1349 + short(f[:4]))
1339 1350
1340 1351 if base.keys() == [nullid]:
1341 1352 if force:
1342 1353 self.ui.warn(_("warning: repository is unrelated\n"))
1343 1354 else:
1344 1355 raise util.Abort(_("repository is unrelated"))
1345 1356
1346 1357 self.ui.debug(_("found new changesets starting at ") +
1347 1358 " ".join([short(f) for f in fetch]) + "\n")
1348 1359
1349 1360 self.ui.debug(_("%d total queries\n") % reqcnt)
1350 1361
1351 1362 return base.keys(), list(fetch), heads
1352 1363
1353 1364 def findoutgoing(self, remote, base=None, heads=None, force=False):
1354 1365 """Return list of nodes that are roots of subsets not in remote
1355 1366
1356 1367 If base dict is specified, assume that these nodes and their parents
1357 1368 exist on the remote side.
1358 1369 If a list of heads is specified, return only nodes which are heads
1359 1370 or ancestors of these heads, and return a second element which
1360 1371 contains all remote heads which get new children.
1361 1372 """
1362 1373 if base is None:
1363 1374 base = {}
1364 1375 self.findincoming(remote, base, heads, force=force)
1365 1376
1366 1377 self.ui.debug(_("common changesets up to ")
1367 1378 + " ".join(map(short, base.keys())) + "\n")
1368 1379
1369 1380 remain = set(self.changelog.nodemap)
1370 1381
1371 1382 # prune everything remote has from the tree
1372 1383 remain.remove(nullid)
1373 1384 remove = base.keys()
1374 1385 while remove:
1375 1386 n = remove.pop(0)
1376 1387 if n in remain:
1377 1388 remain.remove(n)
1378 1389 for p in self.changelog.parents(n):
1379 1390 remove.append(p)
1380 1391
1381 1392 # find every node whose parents have been pruned
1382 1393 subset = []
1383 1394 # find every remote head that will get new children
1384 1395 updated_heads = set()
1385 1396 for n in remain:
1386 1397 p1, p2 = self.changelog.parents(n)
1387 1398 if p1 not in remain and p2 not in remain:
1388 1399 subset.append(n)
1389 1400 if heads:
1390 1401 if p1 in heads:
1391 1402 updated_heads.add(p1)
1392 1403 if p2 in heads:
1393 1404 updated_heads.add(p2)
1394 1405
1395 1406 # this is the set of all roots we have to push
1396 1407 if heads:
1397 1408 return subset, list(updated_heads)
1398 1409 else:
1399 1410 return subset
1400 1411
1401 1412 def pull(self, remote, heads=None, force=False):
1402 1413 lock = self.lock()
1403 1414 try:
1404 1415 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1405 1416 force=force)
1406 1417 if fetch == [nullid]:
1407 1418 self.ui.status(_("requesting all changes\n"))
1408 1419
1409 1420 if not fetch:
1410 1421 self.ui.status(_("no changes found\n"))
1411 1422 return 0
1412 1423
1413 1424 if heads is None and remote.capable('changegroupsubset'):
1414 1425 heads = rheads
1415 1426
1416 1427 if heads is None:
1417 1428 cg = remote.changegroup(fetch, 'pull')
1418 1429 else:
1419 1430 if not remote.capable('changegroupsubset'):
1420 1431 raise util.Abort(_("Partial pull cannot be done because "
1421 1432 "other repository doesn't support "
1422 1433 "changegroupsubset."))
1423 1434 cg = remote.changegroupsubset(fetch, heads, 'pull')
1424 1435 return self.addchangegroup(cg, 'pull', remote.url())
1425 1436 finally:
1426 1437 lock.release()
1427 1438
1428 1439 def push(self, remote, force=False, revs=None):
1429 1440 # there are two ways to push to remote repo:
1430 1441 #
1431 1442 # addchangegroup assumes local user can lock remote
1432 1443 # repo (local filesystem, old ssh servers).
1433 1444 #
1434 1445 # unbundle assumes local user cannot lock remote repo (new ssh
1435 1446 # servers, http servers).
1436 1447
1437 1448 if remote.capable('unbundle'):
1438 1449 return self.push_unbundle(remote, force, revs)
1439 1450 return self.push_addchangegroup(remote, force, revs)
1440 1451
1441 1452 def prepush(self, remote, force, revs):
1442 1453 common = {}
1443 1454 remote_heads = remote.heads()
1444 1455 inc = self.findincoming(remote, common, remote_heads, force=force)
1445 1456
1446 1457 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1447 1458 if revs is not None:
1448 1459 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1449 1460 else:
1450 1461 bases, heads = update, self.changelog.heads()
1451 1462
1452 1463 def checkbranch(lheads, rheads, updatelh):
1453 1464 '''
1454 1465 check whether there are more local heads than remote heads on
1455 1466 a specific branch.
1456 1467
1457 1468 lheads: local branch heads
1458 1469 rheads: remote branch heads
1459 1470 updatelh: outgoing local branch heads
1460 1471 '''
1461 1472
1462 1473 warn = 0
1463 1474
1464 1475 if not revs and len(lheads) > len(rheads):
1465 1476 warn = 1
1466 1477 else:
1467 1478 updatelheads = [self.changelog.heads(x, lheads)
1468 1479 for x in updatelh]
1469 1480 newheads = set(sum(updatelheads, [])) & set(lheads)
1470 1481
1471 1482 if not newheads:
1472 1483 return True
1473 1484
1474 1485 for r in rheads:
1475 1486 if r in self.changelog.nodemap:
1476 1487 desc = self.changelog.heads(r, heads)
1477 1488 l = [h for h in heads if h in desc]
1478 1489 if not l:
1479 1490 newheads.add(r)
1480 1491 else:
1481 1492 newheads.add(r)
1482 1493 if len(newheads) > len(rheads):
1483 1494 warn = 1
1484 1495
1485 1496 if warn:
1486 1497 if not rheads: # new branch requires --force
1487 1498 self.ui.warn(_("abort: push creates new"
1488 1499 " remote branch '%s'!\n" %
1489 1500 self[updatelh[0]].branch()))
1490 1501 else:
1491 1502 self.ui.warn(_("abort: push creates new remote heads!\n"))
1492 1503
1493 1504 self.ui.status(_("(did you forget to merge?"
1494 1505 " use push -f to force)\n"))
1495 1506 return False
1496 1507 return True
1497 1508
1498 1509 if not bases:
1499 1510 self.ui.status(_("no changes found\n"))
1500 1511 return None, 1
1501 1512 elif not force:
1502 1513 # Check for each named branch if we're creating new remote heads.
1503 1514 # To be a remote head after push, node must be either:
1504 1515 # - unknown locally
1505 1516 # - a local outgoing head descended from update
1506 1517 # - a remote head that's known locally and not
1507 1518 # ancestral to an outgoing head
1508 1519 #
1509 1520 # New named branches cannot be created without --force.
1510 1521
1511 1522 if remote_heads != [nullid]:
1512 1523 if remote.capable('branchmap'):
1513 1524 localhds = {}
1514 1525 if not revs:
1515 1526 localhds = self.branchmap()
1516 1527 else:
1517 1528 for n in heads:
1518 1529 branch = self[n].branch()
1519 1530 if branch in localhds:
1520 1531 localhds[branch].append(n)
1521 1532 else:
1522 1533 localhds[branch] = [n]
1523 1534
1524 1535 remotehds = remote.branchmap()
1525 1536
1526 1537 for lh in localhds:
1527 1538 if lh in remotehds:
1528 1539 rheads = remotehds[lh]
1529 1540 else:
1530 1541 rheads = []
1531 1542 lheads = localhds[lh]
1532 1543 updatelh = [upd for upd in update
1533 1544 if self[upd].branch() == lh]
1534 1545 if not updatelh:
1535 1546 continue
1536 1547 if not checkbranch(lheads, rheads, updatelh):
1537 1548 return None, 0
1538 1549 else:
1539 1550 if not checkbranch(heads, remote_heads, update):
1540 1551 return None, 0
1541 1552
1542 1553 if inc:
1543 1554 self.ui.warn(_("note: unsynced remote changes!\n"))
1544 1555
1545 1556
1546 1557 if revs is None:
1547 1558 # use the fast path, no race possible on push
1548 1559 cg = self._changegroup(common.keys(), 'push')
1549 1560 else:
1550 1561 cg = self.changegroupsubset(update, revs, 'push')
1551 1562 return cg, remote_heads
1552 1563
1553 1564 def push_addchangegroup(self, remote, force, revs):
1554 1565 lock = remote.lock()
1555 1566 try:
1556 1567 ret = self.prepush(remote, force, revs)
1557 1568 if ret[0] is not None:
1558 1569 cg, remote_heads = ret
1559 1570 return remote.addchangegroup(cg, 'push', self.url())
1560 1571 return ret[1]
1561 1572 finally:
1562 1573 lock.release()
1563 1574
1564 1575 def push_unbundle(self, remote, force, revs):
1565 1576 # local repo finds heads on server, finds out what revs it
1566 1577 # must push. once revs transferred, if server finds it has
1567 1578 # different heads (someone else won commit/push race), server
1568 1579 # aborts.
1569 1580
1570 1581 ret = self.prepush(remote, force, revs)
1571 1582 if ret[0] is not None:
1572 1583 cg, remote_heads = ret
1573 1584 if force: remote_heads = ['force']
1574 1585 return remote.unbundle(cg, remote_heads, 'push')
1575 1586 return ret[1]
1576 1587
1577 1588 def changegroupinfo(self, nodes, source):
1578 1589 if self.ui.verbose or source == 'bundle':
1579 1590 self.ui.status(_("%d changesets found\n") % len(nodes))
1580 1591 if self.ui.debugflag:
1581 1592 self.ui.debug(_("list of changesets:\n"))
1582 1593 for node in nodes:
1583 1594 self.ui.debug("%s\n" % hex(node))
1584 1595
1585 1596 def changegroupsubset(self, bases, heads, source, extranodes=None):
1586 1597 """This function generates a changegroup consisting of all the nodes
1587 1598 that are descendents of any of the bases, and ancestors of any of
1588 1599 the heads.
1589 1600
1590 1601 It is fairly complex as determining which filenodes and which
1591 1602 manifest nodes need to be included for the changeset to be complete
1592 1603 is non-trivial.
1593 1604
1594 1605 Another wrinkle is doing the reverse, figuring out which changeset in
1595 1606 the changegroup a particular filenode or manifestnode belongs to.
1596 1607
1597 1608 The caller can specify some nodes that must be included in the
1598 1609 changegroup using the extranodes argument. It should be a dict
1599 1610 where the keys are the filenames (or 1 for the manifest), and the
1600 1611 values are lists of (node, linknode) tuples, where node is a wanted
1601 1612 node and linknode is the changelog node that should be transmitted as
1602 1613 the linkrev.
1603 1614 """
1604 1615
1605 1616 if extranodes is None:
1606 1617 # can we go through the fast path ?
1607 1618 heads.sort()
1608 1619 allheads = self.heads()
1609 1620 allheads.sort()
1610 1621 if heads == allheads:
1611 1622 common = []
1612 1623 # parents of bases are known from both sides
1613 1624 for n in bases:
1614 1625 for p in self.changelog.parents(n):
1615 1626 if p != nullid:
1616 1627 common.append(p)
1617 1628 return self._changegroup(common, source)
1618 1629
1619 1630 self.hook('preoutgoing', throw=True, source=source)
1620 1631
1621 1632 # Set up some initial variables
1622 1633 # Make it easy to refer to self.changelog
1623 1634 cl = self.changelog
1624 1635 # msng is short for missing - compute the list of changesets in this
1625 1636 # changegroup.
1626 1637 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1627 1638 self.changegroupinfo(msng_cl_lst, source)
1628 1639 # Some bases may turn out to be superfluous, and some heads may be
1629 1640 # too. nodesbetween will return the minimal set of bases and heads
1630 1641 # necessary to re-create the changegroup.
1631 1642
1632 1643 # Known heads are the list of heads that it is assumed the recipient
1633 1644 # of this changegroup will know about.
1634 1645 knownheads = set()
1635 1646 # We assume that all parents of bases are known heads.
1636 1647 for n in bases:
1637 1648 knownheads.update(cl.parents(n))
1638 1649 knownheads.discard(nullid)
1639 1650 knownheads = list(knownheads)
1640 1651 if knownheads:
1641 1652 # Now that we know what heads are known, we can compute which
1642 1653 # changesets are known. The recipient must know about all
1643 1654 # changesets required to reach the known heads from the null
1644 1655 # changeset.
1645 1656 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1646 1657 junk = None
1647 1658 # Transform the list into a set.
1648 1659 has_cl_set = set(has_cl_set)
1649 1660 else:
1650 1661 # If there were no known heads, the recipient cannot be assumed to
1651 1662 # know about any changesets.
1652 1663 has_cl_set = set()
1653 1664
1654 1665 # Make it easy to refer to self.manifest
1655 1666 mnfst = self.manifest
1656 1667 # We don't know which manifests are missing yet
1657 1668 msng_mnfst_set = {}
1658 1669 # Nor do we know which filenodes are missing.
1659 1670 msng_filenode_set = {}
1660 1671
1661 1672 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1662 1673 junk = None
1663 1674
1664 1675 # A changeset always belongs to itself, so the changenode lookup
1665 1676 # function for a changenode is identity.
1666 1677 def identity(x):
1667 1678 return x
1668 1679
1669 1680 # A function generating function. Sets up an environment for the
1670 1681 # inner function.
1671 1682 def cmp_by_rev_func(revlog):
1672 1683 # Compare two nodes by their revision number in the environment's
1673 1684 # revision history. Since the revision number both represents the
1674 1685 # most efficient order to read the nodes in, and represents a
1675 1686 # topological sorting of the nodes, this function is often useful.
1676 1687 def cmp_by_rev(a, b):
1677 1688 return cmp(revlog.rev(a), revlog.rev(b))
1678 1689 return cmp_by_rev
1679 1690
1680 1691 # If we determine that a particular file or manifest node must be a
1681 1692 # node that the recipient of the changegroup will already have, we can
1682 1693 # also assume the recipient will have all the parents. This function
1683 1694 # prunes them from the set of missing nodes.
1684 1695 def prune_parents(revlog, hasset, msngset):
1685 1696 haslst = list(hasset)
1686 1697 haslst.sort(cmp_by_rev_func(revlog))
1687 1698 for node in haslst:
1688 1699 parentlst = [p for p in revlog.parents(node) if p != nullid]
1689 1700 while parentlst:
1690 1701 n = parentlst.pop()
1691 1702 if n not in hasset:
1692 1703 hasset.add(n)
1693 1704 p = [p for p in revlog.parents(n) if p != nullid]
1694 1705 parentlst.extend(p)
1695 1706 for n in hasset:
1696 1707 msngset.pop(n, None)
1697 1708
1698 1709 # This is a function generating function used to set up an environment
1699 1710 # for the inner function to execute in.
1700 1711 def manifest_and_file_collector(changedfileset):
1701 1712 # This is an information gathering function that gathers
1702 1713 # information from each changeset node that goes out as part of
1703 1714 # the changegroup. The information gathered is a list of which
1704 1715 # manifest nodes are potentially required (the recipient may
1705 1716 # already have them) and total list of all files which were
1706 1717 # changed in any changeset in the changegroup.
1707 1718 #
1708 1719 # We also remember the first changenode we saw any manifest
1709 1720 # referenced by so we can later determine which changenode 'owns'
1710 1721 # the manifest.
1711 1722 def collect_manifests_and_files(clnode):
1712 1723 c = cl.read(clnode)
1713 1724 for f in c[3]:
1714 1725 # This is to make sure we only have one instance of each
1715 1726 # filename string for each filename.
1716 1727 changedfileset.setdefault(f, f)
1717 1728 msng_mnfst_set.setdefault(c[0], clnode)
1718 1729 return collect_manifests_and_files
1719 1730
1720 1731 # Figure out which manifest nodes (of the ones we think might be part
1721 1732 # of the changegroup) the recipient must know about and remove them
1722 1733 # from the changegroup.
1723 1734 def prune_manifests():
1724 1735 has_mnfst_set = set()
1725 1736 for n in msng_mnfst_set:
1726 1737 # If a 'missing' manifest thinks it belongs to a changenode
1727 1738 # the recipient is assumed to have, obviously the recipient
1728 1739 # must have that manifest.
1729 1740 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1730 1741 if linknode in has_cl_set:
1731 1742 has_mnfst_set.add(n)
1732 1743 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1733 1744
1734 1745 # Use the information collected in collect_manifests_and_files to say
1735 1746 # which changenode any manifestnode belongs to.
1736 1747 def lookup_manifest_link(mnfstnode):
1737 1748 return msng_mnfst_set[mnfstnode]
1738 1749
1739 1750 # A function generating function that sets up the initial environment
1740 1751 # the inner function.
1741 1752 def filenode_collector(changedfiles):
1742 1753 next_rev = [0]
1743 1754 # This gathers information from each manifestnode included in the
1744 1755 # changegroup about which filenodes the manifest node references
1745 1756 # so we can include those in the changegroup too.
1746 1757 #
1747 1758 # It also remembers which changenode each filenode belongs to. It
1748 1759 # does this by assuming the a filenode belongs to the changenode
1749 1760 # the first manifest that references it belongs to.
1750 1761 def collect_msng_filenodes(mnfstnode):
1751 1762 r = mnfst.rev(mnfstnode)
1752 1763 if r == next_rev[0]:
1753 1764 # If the last rev we looked at was the one just previous,
1754 1765 # we only need to see a diff.
1755 1766 deltamf = mnfst.readdelta(mnfstnode)
1756 1767 # For each line in the delta
1757 1768 for f, fnode in deltamf.iteritems():
1758 1769 f = changedfiles.get(f, None)
1759 1770 # And if the file is in the list of files we care
1760 1771 # about.
1761 1772 if f is not None:
1762 1773 # Get the changenode this manifest belongs to
1763 1774 clnode = msng_mnfst_set[mnfstnode]
1764 1775 # Create the set of filenodes for the file if
1765 1776 # there isn't one already.
1766 1777 ndset = msng_filenode_set.setdefault(f, {})
1767 1778 # And set the filenode's changelog node to the
1768 1779 # manifest's if it hasn't been set already.
1769 1780 ndset.setdefault(fnode, clnode)
1770 1781 else:
1771 1782 # Otherwise we need a full manifest.
1772 1783 m = mnfst.read(mnfstnode)
1773 1784 # For every file in we care about.
1774 1785 for f in changedfiles:
1775 1786 fnode = m.get(f, None)
1776 1787 # If it's in the manifest
1777 1788 if fnode is not None:
1778 1789 # See comments above.
1779 1790 clnode = msng_mnfst_set[mnfstnode]
1780 1791 ndset = msng_filenode_set.setdefault(f, {})
1781 1792 ndset.setdefault(fnode, clnode)
1782 1793 # Remember the revision we hope to see next.
1783 1794 next_rev[0] = r + 1
1784 1795 return collect_msng_filenodes
1785 1796
1786 1797 # We have a list of filenodes we think we need for a file, lets remove
1787 1798 # all those we know the recipient must have.
1788 1799 def prune_filenodes(f, filerevlog):
1789 1800 msngset = msng_filenode_set[f]
1790 1801 hasset = set()
1791 1802 # If a 'missing' filenode thinks it belongs to a changenode we
1792 1803 # assume the recipient must have, then the recipient must have
1793 1804 # that filenode.
1794 1805 for n in msngset:
1795 1806 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1796 1807 if clnode in has_cl_set:
1797 1808 hasset.add(n)
1798 1809 prune_parents(filerevlog, hasset, msngset)
1799 1810
1800 1811 # A function generator function that sets up the a context for the
1801 1812 # inner function.
1802 1813 def lookup_filenode_link_func(fname):
1803 1814 msngset = msng_filenode_set[fname]
1804 1815 # Lookup the changenode the filenode belongs to.
1805 1816 def lookup_filenode_link(fnode):
1806 1817 return msngset[fnode]
1807 1818 return lookup_filenode_link
1808 1819
1809 1820 # Add the nodes that were explicitly requested.
1810 1821 def add_extra_nodes(name, nodes):
1811 1822 if not extranodes or name not in extranodes:
1812 1823 return
1813 1824
1814 1825 for node, linknode in extranodes[name]:
1815 1826 if node not in nodes:
1816 1827 nodes[node] = linknode
1817 1828
1818 1829 # Now that we have all theses utility functions to help out and
1819 1830 # logically divide up the task, generate the group.
1820 1831 def gengroup():
1821 1832 # The set of changed files starts empty.
1822 1833 changedfiles = {}
1823 1834 # Create a changenode group generator that will call our functions
1824 1835 # back to lookup the owning changenode and collect information.
1825 1836 group = cl.group(msng_cl_lst, identity,
1826 1837 manifest_and_file_collector(changedfiles))
1827 1838 for chnk in group:
1828 1839 yield chnk
1829 1840
1830 1841 # The list of manifests has been collected by the generator
1831 1842 # calling our functions back.
1832 1843 prune_manifests()
1833 1844 add_extra_nodes(1, msng_mnfst_set)
1834 1845 msng_mnfst_lst = msng_mnfst_set.keys()
1835 1846 # Sort the manifestnodes by revision number.
1836 1847 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1837 1848 # Create a generator for the manifestnodes that calls our lookup
1838 1849 # and data collection functions back.
1839 1850 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1840 1851 filenode_collector(changedfiles))
1841 1852 for chnk in group:
1842 1853 yield chnk
1843 1854
1844 1855 # These are no longer needed, dereference and toss the memory for
1845 1856 # them.
1846 1857 msng_mnfst_lst = None
1847 1858 msng_mnfst_set.clear()
1848 1859
1849 1860 if extranodes:
1850 1861 for fname in extranodes:
1851 1862 if isinstance(fname, int):
1852 1863 continue
1853 1864 msng_filenode_set.setdefault(fname, {})
1854 1865 changedfiles[fname] = 1
1855 1866 # Go through all our files in order sorted by name.
1856 1867 for fname in sorted(changedfiles):
1857 1868 filerevlog = self.file(fname)
1858 1869 if not len(filerevlog):
1859 1870 raise util.Abort(_("empty or missing revlog for %s") % fname)
1860 1871 # Toss out the filenodes that the recipient isn't really
1861 1872 # missing.
1862 1873 if fname in msng_filenode_set:
1863 1874 prune_filenodes(fname, filerevlog)
1864 1875 add_extra_nodes(fname, msng_filenode_set[fname])
1865 1876 msng_filenode_lst = msng_filenode_set[fname].keys()
1866 1877 else:
1867 1878 msng_filenode_lst = []
1868 1879 # If any filenodes are left, generate the group for them,
1869 1880 # otherwise don't bother.
1870 1881 if len(msng_filenode_lst) > 0:
1871 1882 yield changegroup.chunkheader(len(fname))
1872 1883 yield fname
1873 1884 # Sort the filenodes by their revision #
1874 1885 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1875 1886 # Create a group generator and only pass in a changenode
1876 1887 # lookup function as we need to collect no information
1877 1888 # from filenodes.
1878 1889 group = filerevlog.group(msng_filenode_lst,
1879 1890 lookup_filenode_link_func(fname))
1880 1891 for chnk in group:
1881 1892 yield chnk
1882 1893 if fname in msng_filenode_set:
1883 1894 # Don't need this anymore, toss it to free memory.
1884 1895 del msng_filenode_set[fname]
1885 1896 # Signal that no more groups are left.
1886 1897 yield changegroup.closechunk()
1887 1898
1888 1899 if msng_cl_lst:
1889 1900 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1890 1901
1891 1902 return util.chunkbuffer(gengroup())
1892 1903
1893 1904 def changegroup(self, basenodes, source):
1894 1905 # to avoid a race we use changegroupsubset() (issue1320)
1895 1906 return self.changegroupsubset(basenodes, self.heads(), source)
1896 1907
1897 1908 def _changegroup(self, common, source):
1898 1909 """Generate a changegroup of all nodes that we have that a recipient
1899 1910 doesn't.
1900 1911
1901 1912 This is much easier than the previous function as we can assume that
1902 1913 the recipient has any changenode we aren't sending them.
1903 1914
1904 1915 common is the set of common nodes between remote and self"""
1905 1916
1906 1917 self.hook('preoutgoing', throw=True, source=source)
1907 1918
1908 1919 cl = self.changelog
1909 1920 nodes = cl.findmissing(common)
1910 1921 revset = set([cl.rev(n) for n in nodes])
1911 1922 self.changegroupinfo(nodes, source)
1912 1923
1913 1924 def identity(x):
1914 1925 return x
1915 1926
1916 1927 def gennodelst(log):
1917 1928 for r in log:
1918 1929 if log.linkrev(r) in revset:
1919 1930 yield log.node(r)
1920 1931
1921 1932 def changed_file_collector(changedfileset):
1922 1933 def collect_changed_files(clnode):
1923 1934 c = cl.read(clnode)
1924 1935 changedfileset.update(c[3])
1925 1936 return collect_changed_files
1926 1937
1927 1938 def lookuprevlink_func(revlog):
1928 1939 def lookuprevlink(n):
1929 1940 return cl.node(revlog.linkrev(revlog.rev(n)))
1930 1941 return lookuprevlink
1931 1942
1932 1943 def gengroup():
1933 1944 # construct a list of all changed files
1934 1945 changedfiles = set()
1935 1946
1936 1947 for chnk in cl.group(nodes, identity,
1937 1948 changed_file_collector(changedfiles)):
1938 1949 yield chnk
1939 1950
1940 1951 mnfst = self.manifest
1941 1952 nodeiter = gennodelst(mnfst)
1942 1953 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1943 1954 yield chnk
1944 1955
1945 1956 for fname in sorted(changedfiles):
1946 1957 filerevlog = self.file(fname)
1947 1958 if not len(filerevlog):
1948 1959 raise util.Abort(_("empty or missing revlog for %s") % fname)
1949 1960 nodeiter = gennodelst(filerevlog)
1950 1961 nodeiter = list(nodeiter)
1951 1962 if nodeiter:
1952 1963 yield changegroup.chunkheader(len(fname))
1953 1964 yield fname
1954 1965 lookup = lookuprevlink_func(filerevlog)
1955 1966 for chnk in filerevlog.group(nodeiter, lookup):
1956 1967 yield chnk
1957 1968
1958 1969 yield changegroup.closechunk()
1959 1970
1960 1971 if nodes:
1961 1972 self.hook('outgoing', node=hex(nodes[0]), source=source)
1962 1973
1963 1974 return util.chunkbuffer(gengroup())
1964 1975
1965 1976 def addchangegroup(self, source, srctype, url, emptyok=False):
1966 1977 """add changegroup to repo.
1967 1978
1968 1979 return values:
1969 1980 - nothing changed or no source: 0
1970 1981 - more heads than before: 1+added heads (2..n)
1971 1982 - less heads than before: -1-removed heads (-2..-n)
1972 1983 - number of heads stays the same: 1
1973 1984 """
1974 1985 def csmap(x):
1975 1986 self.ui.debug(_("add changeset %s\n") % short(x))
1976 1987 return len(cl)
1977 1988
1978 1989 def revmap(x):
1979 1990 return cl.rev(x)
1980 1991
1981 1992 if not source:
1982 1993 return 0
1983 1994
1984 1995 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1985 1996
1986 1997 changesets = files = revisions = 0
1987 1998
1988 1999 # write changelog data to temp files so concurrent readers will not see
1989 2000 # inconsistent view
1990 2001 cl = self.changelog
1991 2002 cl.delayupdate()
1992 2003 oldheads = len(cl.heads())
1993 2004
1994 2005 tr = self.transaction()
1995 2006 try:
1996 2007 trp = weakref.proxy(tr)
1997 2008 # pull off the changeset group
1998 2009 self.ui.status(_("adding changesets\n"))
1999 2010 clstart = len(cl)
2000 2011 chunkiter = changegroup.chunkiter(source)
2001 2012 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2002 2013 raise util.Abort(_("received changelog group is empty"))
2003 2014 clend = len(cl)
2004 2015 changesets = clend - clstart
2005 2016
2006 2017 # pull off the manifest group
2007 2018 self.ui.status(_("adding manifests\n"))
2008 2019 chunkiter = changegroup.chunkiter(source)
2009 2020 # no need to check for empty manifest group here:
2010 2021 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2011 2022 # no new manifest will be created and the manifest group will
2012 2023 # be empty during the pull
2013 2024 self.manifest.addgroup(chunkiter, revmap, trp)
2014 2025
2015 2026 # process the files
2016 2027 self.ui.status(_("adding file changes\n"))
2017 2028 while 1:
2018 2029 f = changegroup.getchunk(source)
2019 2030 if not f:
2020 2031 break
2021 2032 self.ui.debug(_("adding %s revisions\n") % f)
2022 2033 fl = self.file(f)
2023 2034 o = len(fl)
2024 2035 chunkiter = changegroup.chunkiter(source)
2025 2036 if fl.addgroup(chunkiter, revmap, trp) is None:
2026 2037 raise util.Abort(_("received file revlog group is empty"))
2027 2038 revisions += len(fl) - o
2028 2039 files += 1
2029 2040
2030 2041 newheads = len(cl.heads())
2031 2042 heads = ""
2032 2043 if oldheads and newheads != oldheads:
2033 2044 heads = _(" (%+d heads)") % (newheads - oldheads)
2034 2045
2035 2046 self.ui.status(_("added %d changesets"
2036 2047 " with %d changes to %d files%s\n")
2037 2048 % (changesets, revisions, files, heads))
2038 2049
2039 2050 if changesets > 0:
2040 2051 p = lambda: cl.writepending() and self.root or ""
2041 2052 self.hook('pretxnchangegroup', throw=True,
2042 2053 node=hex(cl.node(clstart)), source=srctype,
2043 2054 url=url, pending=p)
2044 2055
2045 2056 # make changelog see real files again
2046 2057 cl.finalize(trp)
2047 2058
2048 2059 tr.close()
2049 2060 finally:
2050 2061 del tr
2051 2062
2052 2063 if changesets > 0:
2053 2064 # forcefully update the on-disk branch cache
2054 2065 self.ui.debug(_("updating the branch cache\n"))
2055 2066 self.branchtags()
2056 2067 self.hook("changegroup", node=hex(cl.node(clstart)),
2057 2068 source=srctype, url=url)
2058 2069
2059 2070 for i in xrange(clstart, clend):
2060 2071 self.hook("incoming", node=hex(cl.node(i)),
2061 2072 source=srctype, url=url)
2062 2073
2063 2074 # never return 0 here:
2064 2075 if newheads < oldheads:
2065 2076 return newheads - oldheads - 1
2066 2077 else:
2067 2078 return newheads - oldheads + 1
2068 2079
2069 2080
2070 2081 def stream_in(self, remote):
2071 2082 fp = remote.stream_out()
2072 2083 l = fp.readline()
2073 2084 try:
2074 2085 resp = int(l)
2075 2086 except ValueError:
2076 2087 raise error.ResponseError(
2077 2088 _('Unexpected response from remote server:'), l)
2078 2089 if resp == 1:
2079 2090 raise util.Abort(_('operation forbidden by server'))
2080 2091 elif resp == 2:
2081 2092 raise util.Abort(_('locking the remote repository failed'))
2082 2093 elif resp != 0:
2083 2094 raise util.Abort(_('the server sent an unknown error code'))
2084 2095 self.ui.status(_('streaming all changes\n'))
2085 2096 l = fp.readline()
2086 2097 try:
2087 2098 total_files, total_bytes = map(int, l.split(' ', 1))
2088 2099 except (ValueError, TypeError):
2089 2100 raise error.ResponseError(
2090 2101 _('Unexpected response from remote server:'), l)
2091 2102 self.ui.status(_('%d files to transfer, %s of data\n') %
2092 2103 (total_files, util.bytecount(total_bytes)))
2093 2104 start = time.time()
2094 2105 for i in xrange(total_files):
2095 2106 # XXX doesn't support '\n' or '\r' in filenames
2096 2107 l = fp.readline()
2097 2108 try:
2098 2109 name, size = l.split('\0', 1)
2099 2110 size = int(size)
2100 2111 except (ValueError, TypeError):
2101 2112 raise error.ResponseError(
2102 2113 _('Unexpected response from remote server:'), l)
2103 2114 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2104 2115 # for backwards compat, name was partially encoded
2105 2116 ofp = self.sopener(store.decodedir(name), 'w')
2106 2117 for chunk in util.filechunkiter(fp, limit=size):
2107 2118 ofp.write(chunk)
2108 2119 ofp.close()
2109 2120 elapsed = time.time() - start
2110 2121 if elapsed <= 0:
2111 2122 elapsed = 0.001
2112 2123 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2113 2124 (util.bytecount(total_bytes), elapsed,
2114 2125 util.bytecount(total_bytes / elapsed)))
2115 2126 self.invalidate()
2116 2127 return len(self.heads()) + 1
2117 2128
2118 2129 def clone(self, remote, heads=[], stream=False):
2119 2130 '''clone remote repository.
2120 2131
2121 2132 keyword arguments:
2122 2133 heads: list of revs to clone (forces use of pull)
2123 2134 stream: use streaming clone if possible'''
2124 2135
2125 2136 # now, all clients that can request uncompressed clones can
2126 2137 # read repo formats supported by all servers that can serve
2127 2138 # them.
2128 2139
2129 2140 # if revlog format changes, client will have to check version
2130 2141 # and format flags on "stream" capability, and use
2131 2142 # uncompressed only if compatible.
2132 2143
2133 2144 if stream and not heads and remote.capable('stream'):
2134 2145 return self.stream_in(remote)
2135 2146 return self.pull(remote, heads)
2136 2147
2137 2148 # used to avoid circular references so destructors work
2138 2149 def aftertrans(files):
2139 2150 renamefiles = [tuple(t) for t in files]
2140 2151 def a():
2141 2152 for src, dest in renamefiles:
2142 2153 util.rename(src, dest)
2143 2154 return a
2144 2155
2145 2156 def instance(ui, path, create):
2146 2157 return localrepository(ui, util.drop_scheme('file', path), create)
2147 2158
2148 2159 def islocal(path):
2149 2160 return True
General Comments 0
You need to be logged in to leave comments. Login now