##// END OF EJS Templates
commit: push repo lock down into _commitctx
Matt Mackall -
r8405:7cbf8fcd default
parent child Browse files
Show More
@@ -1,2136 +1,2133
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2, incorporated herein by reference.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup
11 11 import changelog, dirstate, filelog, manifest, context
12 12 import lock, transaction, store, encoding
13 13 import util, extensions, hook, error
14 14 import match as match_
15 15 import merge as merge_
16 16 from lock import release
17 17 import weakref, stat, errno, os, time, inspect
18 18 propertycache = util.propertycache
19 19
20 20 class localrepository(repo.repository):
21 21 capabilities = set(('lookup', 'changegroupsubset'))
22 22 supported = set('revlogv1 store fncache'.split())
23 23
24 24 def __init__(self, baseui, path=None, create=0):
25 25 repo.repository.__init__(self)
26 26 self.root = os.path.realpath(path)
27 27 self.path = os.path.join(self.root, ".hg")
28 28 self.origroot = path
29 29 self.opener = util.opener(self.path)
30 30 self.wopener = util.opener(self.root)
31 31
32 32 if not os.path.isdir(self.path):
33 33 if create:
34 34 if not os.path.exists(path):
35 35 os.mkdir(path)
36 36 os.mkdir(self.path)
37 37 requirements = ["revlogv1"]
38 38 if baseui.configbool('format', 'usestore', True):
39 39 os.mkdir(os.path.join(self.path, "store"))
40 40 requirements.append("store")
41 41 if baseui.configbool('format', 'usefncache', True):
42 42 requirements.append("fncache")
43 43 # create an invalid changelog
44 44 self.opener("00changelog.i", "a").write(
45 45 '\0\0\0\2' # represents revlogv2
46 46 ' dummy changelog to prevent using the old repo layout'
47 47 )
48 48 reqfile = self.opener("requires", "w")
49 49 for r in requirements:
50 50 reqfile.write("%s\n" % r)
51 51 reqfile.close()
52 52 else:
53 53 raise error.RepoError(_("repository %s not found") % path)
54 54 elif create:
55 55 raise error.RepoError(_("repository %s already exists") % path)
56 56 else:
57 57 # find requirements
58 58 requirements = set()
59 59 try:
60 60 requirements = set(self.opener("requires").read().splitlines())
61 61 except IOError, inst:
62 62 if inst.errno != errno.ENOENT:
63 63 raise
64 64 for r in requirements - self.supported:
65 65 raise error.RepoError(_("requirement '%s' not supported") % r)
66 66
67 67 self.store = store.store(requirements, self.path, util.opener)
68 68 self.spath = self.store.path
69 69 self.sopener = self.store.opener
70 70 self.sjoin = self.store.join
71 71 self.opener.createmode = self.store.createmode
72 72
73 73 self.baseui = baseui
74 74 self.ui = baseui.copy()
75 75 try:
76 76 self.ui.readconfig(self.join("hgrc"), self.root)
77 77 extensions.loadall(self.ui)
78 78 except IOError:
79 79 pass
80 80
81 81 self.tagscache = None
82 82 self._tagstypecache = None
83 83 self.branchcache = None
84 84 self._ubranchcache = None # UTF-8 version of branchcache
85 85 self._branchcachetip = None
86 86 self.nodetagscache = None
87 87 self.filterpats = {}
88 88 self._datafilters = {}
89 89 self._transref = self._lockref = self._wlockref = None
90 90
91 91 @propertycache
92 92 def changelog(self):
93 93 c = changelog.changelog(self.sopener)
94 94 if 'HG_PENDING' in os.environ:
95 95 p = os.environ['HG_PENDING']
96 96 if p.startswith(self.root):
97 97 c.readpending('00changelog.i.a')
98 98 self.sopener.defversion = c.version
99 99 return c
100 100
101 101 @propertycache
102 102 def manifest(self):
103 103 return manifest.manifest(self.sopener)
104 104
105 105 @propertycache
106 106 def dirstate(self):
107 107 return dirstate.dirstate(self.opener, self.ui, self.root)
108 108
109 109 def __getitem__(self, changeid):
110 110 if changeid == None:
111 111 return context.workingctx(self)
112 112 return context.changectx(self, changeid)
113 113
114 114 def __nonzero__(self):
115 115 return True
116 116
117 117 def __len__(self):
118 118 return len(self.changelog)
119 119
120 120 def __iter__(self):
121 121 for i in xrange(len(self)):
122 122 yield i
123 123
124 124 def url(self):
125 125 return 'file:' + self.root
126 126
127 127 def hook(self, name, throw=False, **args):
128 128 return hook.hook(self.ui, self, name, throw, **args)
129 129
130 130 tag_disallowed = ':\r\n'
131 131
132 132 def _tag(self, names, node, message, local, user, date, extra={}):
133 133 if isinstance(names, str):
134 134 allchars = names
135 135 names = (names,)
136 136 else:
137 137 allchars = ''.join(names)
138 138 for c in self.tag_disallowed:
139 139 if c in allchars:
140 140 raise util.Abort(_('%r cannot be used in a tag name') % c)
141 141
142 142 for name in names:
143 143 self.hook('pretag', throw=True, node=hex(node), tag=name,
144 144 local=local)
145 145
146 146 def writetags(fp, names, munge, prevtags):
147 147 fp.seek(0, 2)
148 148 if prevtags and prevtags[-1] != '\n':
149 149 fp.write('\n')
150 150 for name in names:
151 151 m = munge and munge(name) or name
152 152 if self._tagstypecache and name in self._tagstypecache:
153 153 old = self.tagscache.get(name, nullid)
154 154 fp.write('%s %s\n' % (hex(old), m))
155 155 fp.write('%s %s\n' % (hex(node), m))
156 156 fp.close()
157 157
158 158 prevtags = ''
159 159 if local:
160 160 try:
161 161 fp = self.opener('localtags', 'r+')
162 162 except IOError:
163 163 fp = self.opener('localtags', 'a')
164 164 else:
165 165 prevtags = fp.read()
166 166
167 167 # local tags are stored in the current charset
168 168 writetags(fp, names, None, prevtags)
169 169 for name in names:
170 170 self.hook('tag', node=hex(node), tag=name, local=local)
171 171 return
172 172
173 173 try:
174 174 fp = self.wfile('.hgtags', 'rb+')
175 175 except IOError:
176 176 fp = self.wfile('.hgtags', 'ab')
177 177 else:
178 178 prevtags = fp.read()
179 179
180 180 # committed tags are stored in UTF-8
181 181 writetags(fp, names, encoding.fromlocal, prevtags)
182 182
183 183 if '.hgtags' not in self.dirstate:
184 184 self.add(['.hgtags'])
185 185
186 186 tagnode = self.commit(['.hgtags'], message, user, date, extra=extra)
187 187
188 188 for name in names:
189 189 self.hook('tag', node=hex(node), tag=name, local=local)
190 190
191 191 return tagnode
192 192
193 193 def tag(self, names, node, message, local, user, date):
194 194 '''tag a revision with one or more symbolic names.
195 195
196 196 names is a list of strings or, when adding a single tag, names may be a
197 197 string.
198 198
199 199 if local is True, the tags are stored in a per-repository file.
200 200 otherwise, they are stored in the .hgtags file, and a new
201 201 changeset is committed with the change.
202 202
203 203 keyword arguments:
204 204
205 205 local: whether to store tags in non-version-controlled file
206 206 (default False)
207 207
208 208 message: commit message to use if committing
209 209
210 210 user: name of user to use if committing
211 211
212 212 date: date tuple to use if committing'''
213 213
214 214 for x in self.status()[:5]:
215 215 if '.hgtags' in x:
216 216 raise util.Abort(_('working copy of .hgtags is changed '
217 217 '(please commit .hgtags manually)'))
218 218
219 219 self.tags() # instantiate the cache
220 220 self._tag(names, node, message, local, user, date)
221 221
222 222 def tags(self):
223 223 '''return a mapping of tag to node'''
224 224 if self.tagscache:
225 225 return self.tagscache
226 226
227 227 globaltags = {}
228 228 tagtypes = {}
229 229
230 230 def readtags(lines, fn, tagtype):
231 231 filetags = {}
232 232 count = 0
233 233
234 234 def warn(msg):
235 235 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
236 236
237 237 for l in lines:
238 238 count += 1
239 239 if not l:
240 240 continue
241 241 s = l.split(" ", 1)
242 242 if len(s) != 2:
243 243 warn(_("cannot parse entry"))
244 244 continue
245 245 node, key = s
246 246 key = encoding.tolocal(key.strip()) # stored in UTF-8
247 247 try:
248 248 bin_n = bin(node)
249 249 except TypeError:
250 250 warn(_("node '%s' is not well formed") % node)
251 251 continue
252 252 if bin_n not in self.changelog.nodemap:
253 253 warn(_("tag '%s' refers to unknown node") % key)
254 254 continue
255 255
256 256 h = []
257 257 if key in filetags:
258 258 n, h = filetags[key]
259 259 h.append(n)
260 260 filetags[key] = (bin_n, h)
261 261
262 262 for k, nh in filetags.iteritems():
263 263 if k not in globaltags:
264 264 globaltags[k] = nh
265 265 tagtypes[k] = tagtype
266 266 continue
267 267
268 268 # we prefer the global tag if:
269 269 # it supercedes us OR
270 270 # mutual supercedes and it has a higher rank
271 271 # otherwise we win because we're tip-most
272 272 an, ah = nh
273 273 bn, bh = globaltags[k]
274 274 if (bn != an and an in bh and
275 275 (bn not in ah or len(bh) > len(ah))):
276 276 an = bn
277 277 ah.extend([n for n in bh if n not in ah])
278 278 globaltags[k] = an, ah
279 279 tagtypes[k] = tagtype
280 280
281 281 # read the tags file from each head, ending with the tip
282 282 f = None
283 283 for rev, node, fnode in self._hgtagsnodes():
284 284 f = (f and f.filectx(fnode) or
285 285 self.filectx('.hgtags', fileid=fnode))
286 286 readtags(f.data().splitlines(), f, "global")
287 287
288 288 try:
289 289 data = encoding.fromlocal(self.opener("localtags").read())
290 290 # localtags are stored in the local character set
291 291 # while the internal tag table is stored in UTF-8
292 292 readtags(data.splitlines(), "localtags", "local")
293 293 except IOError:
294 294 pass
295 295
296 296 self.tagscache = {}
297 297 self._tagstypecache = {}
298 298 for k, nh in globaltags.iteritems():
299 299 n = nh[0]
300 300 if n != nullid:
301 301 self.tagscache[k] = n
302 302 self._tagstypecache[k] = tagtypes[k]
303 303 self.tagscache['tip'] = self.changelog.tip()
304 304 return self.tagscache
305 305
306 306 def tagtype(self, tagname):
307 307 '''
308 308 return the type of the given tag. result can be:
309 309
310 310 'local' : a local tag
311 311 'global' : a global tag
312 312 None : tag does not exist
313 313 '''
314 314
315 315 self.tags()
316 316
317 317 return self._tagstypecache.get(tagname)
318 318
319 319 def _hgtagsnodes(self):
320 320 last = {}
321 321 ret = []
322 322 for node in reversed(self.heads()):
323 323 c = self[node]
324 324 rev = c.rev()
325 325 try:
326 326 fnode = c.filenode('.hgtags')
327 327 except error.LookupError:
328 328 continue
329 329 ret.append((rev, node, fnode))
330 330 if fnode in last:
331 331 ret[last[fnode]] = None
332 332 last[fnode] = len(ret) - 1
333 333 return [item for item in ret if item]
334 334
335 335 def tagslist(self):
336 336 '''return a list of tags ordered by revision'''
337 337 l = []
338 338 for t, n in self.tags().iteritems():
339 339 try:
340 340 r = self.changelog.rev(n)
341 341 except:
342 342 r = -2 # sort to the beginning of the list if unknown
343 343 l.append((r, t, n))
344 344 return [(t, n) for r, t, n in sorted(l)]
345 345
346 346 def nodetags(self, node):
347 347 '''return the tags associated with a node'''
348 348 if not self.nodetagscache:
349 349 self.nodetagscache = {}
350 350 for t, n in self.tags().iteritems():
351 351 self.nodetagscache.setdefault(n, []).append(t)
352 352 return self.nodetagscache.get(node, [])
353 353
354 354 def _branchtags(self, partial, lrev):
355 355 # TODO: rename this function?
356 356 tiprev = len(self) - 1
357 357 if lrev != tiprev:
358 358 self._updatebranchcache(partial, lrev+1, tiprev+1)
359 359 self._writebranchcache(partial, self.changelog.tip(), tiprev)
360 360
361 361 return partial
362 362
363 363 def _branchheads(self):
364 364 tip = self.changelog.tip()
365 365 if self.branchcache is not None and self._branchcachetip == tip:
366 366 return self.branchcache
367 367
368 368 oldtip = self._branchcachetip
369 369 self._branchcachetip = tip
370 370 if self.branchcache is None:
371 371 self.branchcache = {} # avoid recursion in changectx
372 372 else:
373 373 self.branchcache.clear() # keep using the same dict
374 374 if oldtip is None or oldtip not in self.changelog.nodemap:
375 375 partial, last, lrev = self._readbranchcache()
376 376 else:
377 377 lrev = self.changelog.rev(oldtip)
378 378 partial = self._ubranchcache
379 379
380 380 self._branchtags(partial, lrev)
381 381 # this private cache holds all heads (not just tips)
382 382 self._ubranchcache = partial
383 383
384 384 # the branch cache is stored on disk as UTF-8, but in the local
385 385 # charset internally
386 386 for k, v in partial.iteritems():
387 387 self.branchcache[encoding.tolocal(k)] = v
388 388 return self.branchcache
389 389
390 390
391 391 def branchtags(self):
392 392 '''return a dict where branch names map to the tipmost head of
393 393 the branch, open heads come before closed'''
394 394 bt = {}
395 395 for bn, heads in self._branchheads().iteritems():
396 396 head = None
397 397 for i in range(len(heads)-1, -1, -1):
398 398 h = heads[i]
399 399 if 'close' not in self.changelog.read(h)[5]:
400 400 head = h
401 401 break
402 402 # no open heads were found
403 403 if head is None:
404 404 head = heads[-1]
405 405 bt[bn] = head
406 406 return bt
407 407
408 408
409 409 def _readbranchcache(self):
410 410 partial = {}
411 411 try:
412 412 f = self.opener("branchheads.cache")
413 413 lines = f.read().split('\n')
414 414 f.close()
415 415 except (IOError, OSError):
416 416 return {}, nullid, nullrev
417 417
418 418 try:
419 419 last, lrev = lines.pop(0).split(" ", 1)
420 420 last, lrev = bin(last), int(lrev)
421 421 if lrev >= len(self) or self[lrev].node() != last:
422 422 # invalidate the cache
423 423 raise ValueError('invalidating branch cache (tip differs)')
424 424 for l in lines:
425 425 if not l: continue
426 426 node, label = l.split(" ", 1)
427 427 partial.setdefault(label.strip(), []).append(bin(node))
428 428 except KeyboardInterrupt:
429 429 raise
430 430 except Exception, inst:
431 431 if self.ui.debugflag:
432 432 self.ui.warn(str(inst), '\n')
433 433 partial, last, lrev = {}, nullid, nullrev
434 434 return partial, last, lrev
435 435
436 436 def _writebranchcache(self, branches, tip, tiprev):
437 437 try:
438 438 f = self.opener("branchheads.cache", "w", atomictemp=True)
439 439 f.write("%s %s\n" % (hex(tip), tiprev))
440 440 for label, nodes in branches.iteritems():
441 441 for node in nodes:
442 442 f.write("%s %s\n" % (hex(node), label))
443 443 f.rename()
444 444 except (IOError, OSError):
445 445 pass
446 446
447 447 def _updatebranchcache(self, partial, start, end):
448 448 for r in xrange(start, end):
449 449 c = self[r]
450 450 b = c.branch()
451 451 bheads = partial.setdefault(b, [])
452 452 bheads.append(c.node())
453 453 for p in c.parents():
454 454 pn = p.node()
455 455 if pn in bheads:
456 456 bheads.remove(pn)
457 457
458 458 def lookup(self, key):
459 459 if isinstance(key, int):
460 460 return self.changelog.node(key)
461 461 elif key == '.':
462 462 return self.dirstate.parents()[0]
463 463 elif key == 'null':
464 464 return nullid
465 465 elif key == 'tip':
466 466 return self.changelog.tip()
467 467 n = self.changelog._match(key)
468 468 if n:
469 469 return n
470 470 if key in self.tags():
471 471 return self.tags()[key]
472 472 if key in self.branchtags():
473 473 return self.branchtags()[key]
474 474 n = self.changelog._partialmatch(key)
475 475 if n:
476 476 return n
477 477 try:
478 478 if len(key) == 20:
479 479 key = hex(key)
480 480 except:
481 481 pass
482 482 raise error.RepoError(_("unknown revision '%s'") % key)
483 483
484 484 def local(self):
485 485 return True
486 486
487 487 def join(self, f):
488 488 return os.path.join(self.path, f)
489 489
490 490 def wjoin(self, f):
491 491 return os.path.join(self.root, f)
492 492
493 493 def rjoin(self, f):
494 494 return os.path.join(self.root, util.pconvert(f))
495 495
496 496 def file(self, f):
497 497 if f[0] == '/':
498 498 f = f[1:]
499 499 return filelog.filelog(self.sopener, f)
500 500
501 501 def changectx(self, changeid):
502 502 return self[changeid]
503 503
504 504 def parents(self, changeid=None):
505 505 '''get list of changectxs for parents of changeid'''
506 506 return self[changeid].parents()
507 507
508 508 def filectx(self, path, changeid=None, fileid=None):
509 509 """changeid can be a changeset revision, node, or tag.
510 510 fileid can be a file revision or node."""
511 511 return context.filectx(self, path, changeid, fileid)
512 512
513 513 def getcwd(self):
514 514 return self.dirstate.getcwd()
515 515
516 516 def pathto(self, f, cwd=None):
517 517 return self.dirstate.pathto(f, cwd)
518 518
519 519 def wfile(self, f, mode='r'):
520 520 return self.wopener(f, mode)
521 521
522 522 def _link(self, f):
523 523 return os.path.islink(self.wjoin(f))
524 524
525 525 def _filter(self, filter, filename, data):
526 526 if filter not in self.filterpats:
527 527 l = []
528 528 for pat, cmd in self.ui.configitems(filter):
529 529 if cmd == '!':
530 530 continue
531 531 mf = util.matcher(self.root, "", [pat], [], [])[1]
532 532 fn = None
533 533 params = cmd
534 534 for name, filterfn in self._datafilters.iteritems():
535 535 if cmd.startswith(name):
536 536 fn = filterfn
537 537 params = cmd[len(name):].lstrip()
538 538 break
539 539 if not fn:
540 540 fn = lambda s, c, **kwargs: util.filter(s, c)
541 541 # Wrap old filters not supporting keyword arguments
542 542 if not inspect.getargspec(fn)[2]:
543 543 oldfn = fn
544 544 fn = lambda s, c, **kwargs: oldfn(s, c)
545 545 l.append((mf, fn, params))
546 546 self.filterpats[filter] = l
547 547
548 548 for mf, fn, cmd in self.filterpats[filter]:
549 549 if mf(filename):
550 550 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
551 551 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
552 552 break
553 553
554 554 return data
555 555
556 556 def adddatafilter(self, name, filter):
557 557 self._datafilters[name] = filter
558 558
559 559 def wread(self, filename):
560 560 if self._link(filename):
561 561 data = os.readlink(self.wjoin(filename))
562 562 else:
563 563 data = self.wopener(filename, 'r').read()
564 564 return self._filter("encode", filename, data)
565 565
566 566 def wwrite(self, filename, data, flags):
567 567 data = self._filter("decode", filename, data)
568 568 try:
569 569 os.unlink(self.wjoin(filename))
570 570 except OSError:
571 571 pass
572 572 if 'l' in flags:
573 573 self.wopener.symlink(data, filename)
574 574 else:
575 575 self.wopener(filename, 'w').write(data)
576 576 if 'x' in flags:
577 577 util.set_flags(self.wjoin(filename), False, True)
578 578
579 579 def wwritedata(self, filename, data):
580 580 return self._filter("decode", filename, data)
581 581
582 582 def transaction(self):
583 583 tr = self._transref and self._transref() or None
584 584 if tr and tr.running():
585 585 return tr.nest()
586 586
587 587 # abort here if the journal already exists
588 588 if os.path.exists(self.sjoin("journal")):
589 589 raise error.RepoError(_("journal already exists - run hg recover"))
590 590
591 591 # save dirstate for rollback
592 592 try:
593 593 ds = self.opener("dirstate").read()
594 594 except IOError:
595 595 ds = ""
596 596 self.opener("journal.dirstate", "w").write(ds)
597 597 self.opener("journal.branch", "w").write(self.dirstate.branch())
598 598
599 599 renames = [(self.sjoin("journal"), self.sjoin("undo")),
600 600 (self.join("journal.dirstate"), self.join("undo.dirstate")),
601 601 (self.join("journal.branch"), self.join("undo.branch"))]
602 602 tr = transaction.transaction(self.ui.warn, self.sopener,
603 603 self.sjoin("journal"),
604 604 aftertrans(renames),
605 605 self.store.createmode)
606 606 self._transref = weakref.ref(tr)
607 607 return tr
608 608
609 609 def recover(self):
610 610 lock = self.lock()
611 611 try:
612 612 if os.path.exists(self.sjoin("journal")):
613 613 self.ui.status(_("rolling back interrupted transaction\n"))
614 614 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
615 615 self.invalidate()
616 616 return True
617 617 else:
618 618 self.ui.warn(_("no interrupted transaction available\n"))
619 619 return False
620 620 finally:
621 621 lock.release()
622 622
623 623 def rollback(self):
624 624 wlock = lock = None
625 625 try:
626 626 wlock = self.wlock()
627 627 lock = self.lock()
628 628 if os.path.exists(self.sjoin("undo")):
629 629 self.ui.status(_("rolling back last transaction\n"))
630 630 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
631 631 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
632 632 try:
633 633 branch = self.opener("undo.branch").read()
634 634 self.dirstate.setbranch(branch)
635 635 except IOError:
636 636 self.ui.warn(_("Named branch could not be reset, "
637 637 "current branch still is: %s\n")
638 638 % encoding.tolocal(self.dirstate.branch()))
639 639 self.invalidate()
640 640 self.dirstate.invalidate()
641 641 else:
642 642 self.ui.warn(_("no rollback information available\n"))
643 643 finally:
644 644 release(lock, wlock)
645 645
646 646 def invalidate(self):
647 647 for a in "changelog manifest".split():
648 648 if a in self.__dict__:
649 649 delattr(self, a)
650 650 self.tagscache = None
651 651 self._tagstypecache = None
652 652 self.nodetagscache = None
653 653 self.branchcache = None
654 654 self._ubranchcache = None
655 655 self._branchcachetip = None
656 656
657 657 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
658 658 try:
659 659 l = lock.lock(lockname, 0, releasefn, desc=desc)
660 660 except error.LockHeld, inst:
661 661 if not wait:
662 662 raise
663 663 self.ui.warn(_("waiting for lock on %s held by %r\n") %
664 664 (desc, inst.locker))
665 665 # default to 600 seconds timeout
666 666 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
667 667 releasefn, desc=desc)
668 668 if acquirefn:
669 669 acquirefn()
670 670 return l
671 671
672 672 def lock(self, wait=True):
673 673 l = self._lockref and self._lockref()
674 674 if l is not None and l.held:
675 675 l.lock()
676 676 return l
677 677
678 678 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
679 679 _('repository %s') % self.origroot)
680 680 self._lockref = weakref.ref(l)
681 681 return l
682 682
683 683 def wlock(self, wait=True):
684 684 l = self._wlockref and self._wlockref()
685 685 if l is not None and l.held:
686 686 l.lock()
687 687 return l
688 688
689 689 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
690 690 self.dirstate.invalidate, _('working directory of %s') %
691 691 self.origroot)
692 692 self._wlockref = weakref.ref(l)
693 693 return l
694 694
695 695 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
696 696 """
697 697 commit an individual file as part of a larger transaction
698 698 """
699 699
700 700 fname = fctx.path()
701 701 text = fctx.data()
702 702 flog = self.file(fname)
703 703 fparent1 = manifest1.get(fname, nullid)
704 704 fparent2 = fparent2o = manifest2.get(fname, nullid)
705 705
706 706 meta = {}
707 707 copy = fctx.renamed()
708 708 if copy and copy[0] != fname:
709 709 # Mark the new revision of this file as a copy of another
710 710 # file. This copy data will effectively act as a parent
711 711 # of this new revision. If this is a merge, the first
712 712 # parent will be the nullid (meaning "look up the copy data")
713 713 # and the second one will be the other parent. For example:
714 714 #
715 715 # 0 --- 1 --- 3 rev1 changes file foo
716 716 # \ / rev2 renames foo to bar and changes it
717 717 # \- 2 -/ rev3 should have bar with all changes and
718 718 # should record that bar descends from
719 719 # bar in rev2 and foo in rev1
720 720 #
721 721 # this allows this merge to succeed:
722 722 #
723 723 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
724 724 # \ / merging rev3 and rev4 should use bar@rev2
725 725 # \- 2 --- 4 as the merge base
726 726 #
727 727
728 728 cfname = copy[0]
729 729 crev = manifest1.get(cfname)
730 730 newfparent = fparent2
731 731
732 732 if manifest2: # branch merge
733 733 if fparent2 == nullid or crev is None: # copied on remote side
734 734 if cfname in manifest2:
735 735 crev = manifest2[cfname]
736 736 newfparent = fparent1
737 737
738 738 # find source in nearest ancestor if we've lost track
739 739 if not crev:
740 740 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
741 741 (fname, cfname))
742 742 for ancestor in self['.'].ancestors():
743 743 if cfname in ancestor:
744 744 crev = ancestor[cfname].filenode()
745 745 break
746 746
747 747 self.ui.debug(_(" %s: copy %s:%s\n") % (fname, cfname, hex(crev)))
748 748 meta["copy"] = cfname
749 749 meta["copyrev"] = hex(crev)
750 750 fparent1, fparent2 = nullid, newfparent
751 751 elif fparent2 != nullid:
752 752 # is one parent an ancestor of the other?
753 753 fparentancestor = flog.ancestor(fparent1, fparent2)
754 754 if fparentancestor == fparent1:
755 755 fparent1, fparent2 = fparent2, nullid
756 756 elif fparentancestor == fparent2:
757 757 fparent2 = nullid
758 758
759 759 # is the file changed?
760 760 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
761 761 changelist.append(fname)
762 762 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
763 763
764 764 # are just the flags changed during merge?
765 765 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
766 766 changelist.append(fname)
767 767
768 768 return fparent1
769 769
770 770 def commit(self, files=None, text="", user=None, date=None, match=None,
771 771 force=False, force_editor=False, extra={}, empty_ok=False):
772 772 wlock = lock = None
773 773 if extra.get("close"):
774 774 force = True
775 775 if files:
776 776 files = list(set(files))
777
778 wlock = self.wlock()
777 779 try:
778 wlock = self.wlock()
779 lock = self.lock()
780
781 780 p1, p2 = self.dirstate.parents()
782 781
783 782 if (not force and p2 != nullid and
784 783 (match and (match.files() or match.anypats()))):
785 784 raise util.Abort(_('cannot partially commit a merge '
786 785 '(do not specify files or patterns)'))
787 786
788 787 if files:
789 788 modified, removed = [], []
790 789 for f in files:
791 790 s = self.dirstate[f]
792 791 if s in 'nma':
793 792 modified.append(f)
794 793 elif s == 'r':
795 794 removed.append(f)
796 795 else:
797 796 self.ui.warn(_("%s not tracked!\n") % f)
798 797 changes = [modified, [], removed, [], []]
799 798 else:
800 799 changes = self.status(match=match)
801 800
802 801 if (not (changes[0] or changes[1] or changes[2])
803 802 and not force and p2 == nullid and
804 803 self[None].branch() == self['.'].branch()):
805 804 self.ui.status(_("nothing changed\n"))
806 805 return None
807 806
808 807 ms = merge_.mergestate(self)
809 808 for f in changes[0]:
810 809 if f in ms and ms[f] == 'u':
811 810 raise util.Abort(_("unresolved merge conflicts "
812 811 "(see hg resolve)"))
813 812 wctx = context.workingctx(self, (p1, p2), text, user, date,
814 813 extra, changes)
815 814 r = self._commitctx(wctx, force, force_editor, empty_ok, True)
816 815 ms.reset()
817 816 return r
818 817
819 818 finally:
820 release(lock, wlock)
819 wlock.release()
821 820
822 821 def commitctx(self, ctx):
823 822 """Add a new revision to current repository.
824 823
825 824 Revision information is passed in the context.memctx argument.
826 825 commitctx() does not touch the working directory.
827 826 """
828 lock = self.lock()
829 try:
830 return self._commitctx(ctx, force=True, force_editor=False,
831 empty_ok=True, working=False)
832 finally:
833 lock.release()
827 return self._commitctx(ctx, force=True, force_editor=False,
828 empty_ok=True, working=False)
834 829
835 830 def _commitctx(self, ctx, force=False, force_editor=False, empty_ok=False,
836 831 working=True):
832 lock = self.lock()
837 833 tr = None
838 834 valid = 0 # don't save the dirstate if this isn't set
839 835 try:
840 836 commit = sorted(ctx.modified() + ctx.added())
841 837 remove = ctx.removed()
842 838 extra = ctx.extra().copy()
843 839 branchname = extra['branch']
844 840 user = ctx.user()
845 841 text = ctx.description()
846 842
847 843 p1, p2 = [p.node() for p in ctx.parents()]
848 844 c1 = self.changelog.read(p1)
849 845 c2 = self.changelog.read(p2)
850 846 m1 = self.manifest.read(c1[0]).copy()
851 847 m2 = self.manifest.read(c2[0])
852 848
853 849 xp1 = hex(p1)
854 850 if p2 == nullid: xp2 = ''
855 851 else: xp2 = hex(p2)
856 852
857 853 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
858 854
859 855 tr = self.transaction()
860 856 trp = weakref.proxy(tr)
861 857
862 858 # check in files
863 859 new = {}
864 860 changed = []
865 861 linkrev = len(self)
866 862 for f in commit:
867 863 self.ui.note(f + "\n")
868 864 try:
869 865 fctx = ctx[f]
870 866 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
871 867 changed)
872 868 m1.set(f, fctx.flags())
873 869 if working:
874 870 self.dirstate.normal(f)
875 871
876 872 except (OSError, IOError):
877 873 if working:
878 874 self.ui.warn(_("trouble committing %s!\n") % f)
879 875 raise
880 876 else:
881 877 remove.append(f)
882 878
883 879 updated, added = [], []
884 880 for f in sorted(changed):
885 881 if f in m1 or f in m2:
886 882 updated.append(f)
887 883 else:
888 884 added.append(f)
889 885
890 886 # update manifest
891 887 m1.update(new)
892 888 removed = [f for f in sorted(remove) if f in m1 or f in m2]
893 889 removed1 = []
894 890
895 891 for f in removed:
896 892 if f in m1:
897 893 del m1[f]
898 894 removed1.append(f)
899 895 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
900 896 (new, removed1))
901 897
902 898 # add changeset
903 899 if (not empty_ok and not text) or force_editor:
904 900 edittext = []
905 901 if text:
906 902 edittext.append(text)
907 903 edittext.append("")
908 904 edittext.append("") # Empty line between message and comments.
909 905 edittext.append(_("HG: Enter commit message."
910 906 " Lines beginning with 'HG:' are removed."))
911 907 edittext.append("HG: --")
912 908 edittext.append(_("HG: user: %s") % user)
913 909 if p2 != nullid:
914 910 edittext.append(_("HG: branch merge"))
915 911 if branchname:
916 912 edittext.append(_("HG: branch '%s'")
917 913 % encoding.tolocal(branchname))
918 914 edittext.extend([_("HG: added %s") % f for f in added])
919 915 edittext.extend([_("HG: changed %s") % f for f in updated])
920 916 edittext.extend([_("HG: removed %s") % f for f in removed])
921 917 if not added and not updated and not removed:
922 918 edittext.append(_("HG: no files changed"))
923 919 edittext.append("")
924 920 # run editor in the repository root
925 921 olddir = os.getcwd()
926 922 os.chdir(self.root)
927 923 text = self.ui.edit("\n".join(edittext), user)
928 924 os.chdir(olddir)
929 925
930 926 lines = [line.rstrip() for line in text.rstrip().splitlines()]
931 927 while lines and not lines[0]:
932 928 del lines[0]
933 929 if not lines and working:
934 930 raise util.Abort(_("empty commit message"))
935 931 text = '\n'.join(lines)
936 932
937 933 self.changelog.delayupdate()
938 934 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
939 935 user, ctx.date(), extra)
940 936 p = lambda: self.changelog.writepending() and self.root or ""
941 937 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
942 938 parent2=xp2, pending=p)
943 939 self.changelog.finalize(trp)
944 940 tr.close()
945 941
946 942 if self.branchcache:
947 943 self.branchtags()
948 944
949 945 if working:
950 946 self.dirstate.setparents(n)
951 947 for f in removed:
952 948 self.dirstate.forget(f)
953 949 valid = 1 # our dirstate updates are complete
954 950
955 951 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
956 952 return n
957 953 finally:
958 954 if not valid: # don't save our updated dirstate
959 955 self.dirstate.invalidate()
960 956 del tr
957 lock.release()
961 958
962 959 def walk(self, match, node=None):
963 960 '''
964 961 walk recursively through the directory tree or a given
965 962 changeset, finding all files matched by the match
966 963 function
967 964 '''
968 965 return self[node].walk(match)
969 966
970 967 def status(self, node1='.', node2=None, match=None,
971 968 ignored=False, clean=False, unknown=False):
972 969 """return status of files between two nodes or node and working directory
973 970
974 971 If node1 is None, use the first dirstate parent instead.
975 972 If node2 is None, compare node1 with working directory.
976 973 """
977 974
978 975 def mfmatches(ctx):
979 976 mf = ctx.manifest().copy()
980 977 for fn in mf.keys():
981 978 if not match(fn):
982 979 del mf[fn]
983 980 return mf
984 981
985 982 if isinstance(node1, context.changectx):
986 983 ctx1 = node1
987 984 else:
988 985 ctx1 = self[node1]
989 986 if isinstance(node2, context.changectx):
990 987 ctx2 = node2
991 988 else:
992 989 ctx2 = self[node2]
993 990
994 991 working = ctx2.rev() is None
995 992 parentworking = working and ctx1 == self['.']
996 993 match = match or match_.always(self.root, self.getcwd())
997 994 listignored, listclean, listunknown = ignored, clean, unknown
998 995
999 996 # load earliest manifest first for caching reasons
1000 997 if not working and ctx2.rev() < ctx1.rev():
1001 998 ctx2.manifest()
1002 999
1003 1000 if not parentworking:
1004 1001 def bad(f, msg):
1005 1002 if f not in ctx1:
1006 1003 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1007 1004 return False
1008 1005 match.bad = bad
1009 1006
1010 1007 if working: # we need to scan the working dir
1011 1008 s = self.dirstate.status(match, listignored, listclean, listunknown)
1012 1009 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1013 1010
1014 1011 # check for any possibly clean files
1015 1012 if parentworking and cmp:
1016 1013 fixup = []
1017 1014 # do a full compare of any files that might have changed
1018 1015 for f in sorted(cmp):
1019 1016 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1020 1017 or ctx1[f].cmp(ctx2[f].data())):
1021 1018 modified.append(f)
1022 1019 else:
1023 1020 fixup.append(f)
1024 1021
1025 1022 if listclean:
1026 1023 clean += fixup
1027 1024
1028 1025 # update dirstate for files that are actually clean
1029 1026 if fixup:
1030 1027 wlock = None
1031 1028 try:
1032 1029 try:
1033 1030 # updating the dirstate is optional
1034 1031 # so we don't wait on the lock
1035 1032 wlock = self.wlock(False)
1036 1033 for f in fixup:
1037 1034 self.dirstate.normal(f)
1038 1035 except error.LockError:
1039 1036 pass
1040 1037 finally:
1041 1038 release(wlock)
1042 1039
1043 1040 if not parentworking:
1044 1041 mf1 = mfmatches(ctx1)
1045 1042 if working:
1046 1043 # we are comparing working dir against non-parent
1047 1044 # generate a pseudo-manifest for the working dir
1048 1045 mf2 = mfmatches(self['.'])
1049 1046 for f in cmp + modified + added:
1050 1047 mf2[f] = None
1051 1048 mf2.set(f, ctx2.flags(f))
1052 1049 for f in removed:
1053 1050 if f in mf2:
1054 1051 del mf2[f]
1055 1052 else:
1056 1053 # we are comparing two revisions
1057 1054 deleted, unknown, ignored = [], [], []
1058 1055 mf2 = mfmatches(ctx2)
1059 1056
1060 1057 modified, added, clean = [], [], []
1061 1058 for fn in mf2:
1062 1059 if fn in mf1:
1063 1060 if (mf1.flags(fn) != mf2.flags(fn) or
1064 1061 (mf1[fn] != mf2[fn] and
1065 1062 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1066 1063 modified.append(fn)
1067 1064 elif listclean:
1068 1065 clean.append(fn)
1069 1066 del mf1[fn]
1070 1067 else:
1071 1068 added.append(fn)
1072 1069 removed = mf1.keys()
1073 1070
1074 1071 r = modified, added, removed, deleted, unknown, ignored, clean
1075 1072 [l.sort() for l in r]
1076 1073 return r
1077 1074
1078 1075 def add(self, list):
1079 1076 wlock = self.wlock()
1080 1077 try:
1081 1078 rejected = []
1082 1079 for f in list:
1083 1080 p = self.wjoin(f)
1084 1081 try:
1085 1082 st = os.lstat(p)
1086 1083 except:
1087 1084 self.ui.warn(_("%s does not exist!\n") % f)
1088 1085 rejected.append(f)
1089 1086 continue
1090 1087 if st.st_size > 10000000:
1091 1088 self.ui.warn(_("%s: files over 10MB may cause memory and"
1092 1089 " performance problems\n"
1093 1090 "(use 'hg revert %s' to unadd the file)\n")
1094 1091 % (f, f))
1095 1092 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1096 1093 self.ui.warn(_("%s not added: only files and symlinks "
1097 1094 "supported currently\n") % f)
1098 1095 rejected.append(p)
1099 1096 elif self.dirstate[f] in 'amn':
1100 1097 self.ui.warn(_("%s already tracked!\n") % f)
1101 1098 elif self.dirstate[f] == 'r':
1102 1099 self.dirstate.normallookup(f)
1103 1100 else:
1104 1101 self.dirstate.add(f)
1105 1102 return rejected
1106 1103 finally:
1107 1104 wlock.release()
1108 1105
1109 1106 def forget(self, list):
1110 1107 wlock = self.wlock()
1111 1108 try:
1112 1109 for f in list:
1113 1110 if self.dirstate[f] != 'a':
1114 1111 self.ui.warn(_("%s not added!\n") % f)
1115 1112 else:
1116 1113 self.dirstate.forget(f)
1117 1114 finally:
1118 1115 wlock.release()
1119 1116
1120 1117 def remove(self, list, unlink=False):
1121 1118 wlock = None
1122 1119 try:
1123 1120 if unlink:
1124 1121 for f in list:
1125 1122 try:
1126 1123 util.unlink(self.wjoin(f))
1127 1124 except OSError, inst:
1128 1125 if inst.errno != errno.ENOENT:
1129 1126 raise
1130 1127 wlock = self.wlock()
1131 1128 for f in list:
1132 1129 if unlink and os.path.exists(self.wjoin(f)):
1133 1130 self.ui.warn(_("%s still exists!\n") % f)
1134 1131 elif self.dirstate[f] == 'a':
1135 1132 self.dirstate.forget(f)
1136 1133 elif f not in self.dirstate:
1137 1134 self.ui.warn(_("%s not tracked!\n") % f)
1138 1135 else:
1139 1136 self.dirstate.remove(f)
1140 1137 finally:
1141 1138 release(wlock)
1142 1139
1143 1140 def undelete(self, list):
1144 1141 manifests = [self.manifest.read(self.changelog.read(p)[0])
1145 1142 for p in self.dirstate.parents() if p != nullid]
1146 1143 wlock = self.wlock()
1147 1144 try:
1148 1145 for f in list:
1149 1146 if self.dirstate[f] != 'r':
1150 1147 self.ui.warn(_("%s not removed!\n") % f)
1151 1148 else:
1152 1149 m = f in manifests[0] and manifests[0] or manifests[1]
1153 1150 t = self.file(f).read(m[f])
1154 1151 self.wwrite(f, t, m.flags(f))
1155 1152 self.dirstate.normal(f)
1156 1153 finally:
1157 1154 wlock.release()
1158 1155
1159 1156 def copy(self, source, dest):
1160 1157 p = self.wjoin(dest)
1161 1158 if not (os.path.exists(p) or os.path.islink(p)):
1162 1159 self.ui.warn(_("%s does not exist!\n") % dest)
1163 1160 elif not (os.path.isfile(p) or os.path.islink(p)):
1164 1161 self.ui.warn(_("copy failed: %s is not a file or a "
1165 1162 "symbolic link\n") % dest)
1166 1163 else:
1167 1164 wlock = self.wlock()
1168 1165 try:
1169 1166 if self.dirstate[dest] in '?r':
1170 1167 self.dirstate.add(dest)
1171 1168 self.dirstate.copy(source, dest)
1172 1169 finally:
1173 1170 wlock.release()
1174 1171
1175 1172 def heads(self, start=None, closed=True):
1176 1173 heads = self.changelog.heads(start)
1177 1174 def display(head):
1178 1175 if closed:
1179 1176 return True
1180 1177 extras = self.changelog.read(head)[5]
1181 1178 return ('close' not in extras)
1182 1179 # sort the output in rev descending order
1183 1180 heads = [(-self.changelog.rev(h), h) for h in heads if display(h)]
1184 1181 return [n for (r, n) in sorted(heads)]
1185 1182
1186 1183 def branchheads(self, branch=None, start=None, closed=True):
1187 1184 if branch is None:
1188 1185 branch = self[None].branch()
1189 1186 branches = self._branchheads()
1190 1187 if branch not in branches:
1191 1188 return []
1192 1189 bheads = branches[branch]
1193 1190 # the cache returns heads ordered lowest to highest
1194 1191 bheads.reverse()
1195 1192 if start is not None:
1196 1193 # filter out the heads that cannot be reached from startrev
1197 1194 bheads = self.changelog.nodesbetween([start], bheads)[2]
1198 1195 if not closed:
1199 1196 bheads = [h for h in bheads if
1200 1197 ('close' not in self.changelog.read(h)[5])]
1201 1198 return bheads
1202 1199
1203 1200 def branches(self, nodes):
1204 1201 if not nodes:
1205 1202 nodes = [self.changelog.tip()]
1206 1203 b = []
1207 1204 for n in nodes:
1208 1205 t = n
1209 1206 while 1:
1210 1207 p = self.changelog.parents(n)
1211 1208 if p[1] != nullid or p[0] == nullid:
1212 1209 b.append((t, n, p[0], p[1]))
1213 1210 break
1214 1211 n = p[0]
1215 1212 return b
1216 1213
1217 1214 def between(self, pairs):
1218 1215 r = []
1219 1216
1220 1217 for top, bottom in pairs:
1221 1218 n, l, i = top, [], 0
1222 1219 f = 1
1223 1220
1224 1221 while n != bottom and n != nullid:
1225 1222 p = self.changelog.parents(n)[0]
1226 1223 if i == f:
1227 1224 l.append(n)
1228 1225 f = f * 2
1229 1226 n = p
1230 1227 i += 1
1231 1228
1232 1229 r.append(l)
1233 1230
1234 1231 return r
1235 1232
1236 1233 def findincoming(self, remote, base=None, heads=None, force=False):
1237 1234 """Return list of roots of the subsets of missing nodes from remote
1238 1235
1239 1236 If base dict is specified, assume that these nodes and their parents
1240 1237 exist on the remote side and that no child of a node of base exists
1241 1238 in both remote and self.
1242 1239 Furthermore base will be updated to include the nodes that exists
1243 1240 in self and remote but no children exists in self and remote.
1244 1241 If a list of heads is specified, return only nodes which are heads
1245 1242 or ancestors of these heads.
1246 1243
1247 1244 All the ancestors of base are in self and in remote.
1248 1245 All the descendants of the list returned are missing in self.
1249 1246 (and so we know that the rest of the nodes are missing in remote, see
1250 1247 outgoing)
1251 1248 """
1252 1249 return self.findcommonincoming(remote, base, heads, force)[1]
1253 1250
1254 1251 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1255 1252 """Return a tuple (common, missing roots, heads) used to identify
1256 1253 missing nodes from remote.
1257 1254
1258 1255 If base dict is specified, assume that these nodes and their parents
1259 1256 exist on the remote side and that no child of a node of base exists
1260 1257 in both remote and self.
1261 1258 Furthermore base will be updated to include the nodes that exists
1262 1259 in self and remote but no children exists in self and remote.
1263 1260 If a list of heads is specified, return only nodes which are heads
1264 1261 or ancestors of these heads.
1265 1262
1266 1263 All the ancestors of base are in self and in remote.
1267 1264 """
1268 1265 m = self.changelog.nodemap
1269 1266 search = []
1270 1267 fetch = set()
1271 1268 seen = set()
1272 1269 seenbranch = set()
1273 1270 if base == None:
1274 1271 base = {}
1275 1272
1276 1273 if not heads:
1277 1274 heads = remote.heads()
1278 1275
1279 1276 if self.changelog.tip() == nullid:
1280 1277 base[nullid] = 1
1281 1278 if heads != [nullid]:
1282 1279 return [nullid], [nullid], list(heads)
1283 1280 return [nullid], [], []
1284 1281
1285 1282 # assume we're closer to the tip than the root
1286 1283 # and start by examining the heads
1287 1284 self.ui.status(_("searching for changes\n"))
1288 1285
1289 1286 unknown = []
1290 1287 for h in heads:
1291 1288 if h not in m:
1292 1289 unknown.append(h)
1293 1290 else:
1294 1291 base[h] = 1
1295 1292
1296 1293 heads = unknown
1297 1294 if not unknown:
1298 1295 return base.keys(), [], []
1299 1296
1300 1297 req = set(unknown)
1301 1298 reqcnt = 0
1302 1299
1303 1300 # search through remote branches
1304 1301 # a 'branch' here is a linear segment of history, with four parts:
1305 1302 # head, root, first parent, second parent
1306 1303 # (a branch always has two parents (or none) by definition)
1307 1304 unknown = remote.branches(unknown)
1308 1305 while unknown:
1309 1306 r = []
1310 1307 while unknown:
1311 1308 n = unknown.pop(0)
1312 1309 if n[0] in seen:
1313 1310 continue
1314 1311
1315 1312 self.ui.debug(_("examining %s:%s\n")
1316 1313 % (short(n[0]), short(n[1])))
1317 1314 if n[0] == nullid: # found the end of the branch
1318 1315 pass
1319 1316 elif n in seenbranch:
1320 1317 self.ui.debug(_("branch already found\n"))
1321 1318 continue
1322 1319 elif n[1] and n[1] in m: # do we know the base?
1323 1320 self.ui.debug(_("found incomplete branch %s:%s\n")
1324 1321 % (short(n[0]), short(n[1])))
1325 1322 search.append(n[0:2]) # schedule branch range for scanning
1326 1323 seenbranch.add(n)
1327 1324 else:
1328 1325 if n[1] not in seen and n[1] not in fetch:
1329 1326 if n[2] in m and n[3] in m:
1330 1327 self.ui.debug(_("found new changeset %s\n") %
1331 1328 short(n[1]))
1332 1329 fetch.add(n[1]) # earliest unknown
1333 1330 for p in n[2:4]:
1334 1331 if p in m:
1335 1332 base[p] = 1 # latest known
1336 1333
1337 1334 for p in n[2:4]:
1338 1335 if p not in req and p not in m:
1339 1336 r.append(p)
1340 1337 req.add(p)
1341 1338 seen.add(n[0])
1342 1339
1343 1340 if r:
1344 1341 reqcnt += 1
1345 1342 self.ui.debug(_("request %d: %s\n") %
1346 1343 (reqcnt, " ".join(map(short, r))))
1347 1344 for p in xrange(0, len(r), 10):
1348 1345 for b in remote.branches(r[p:p+10]):
1349 1346 self.ui.debug(_("received %s:%s\n") %
1350 1347 (short(b[0]), short(b[1])))
1351 1348 unknown.append(b)
1352 1349
1353 1350 # do binary search on the branches we found
1354 1351 while search:
1355 1352 newsearch = []
1356 1353 reqcnt += 1
1357 1354 for n, l in zip(search, remote.between(search)):
1358 1355 l.append(n[1])
1359 1356 p = n[0]
1360 1357 f = 1
1361 1358 for i in l:
1362 1359 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1363 1360 if i in m:
1364 1361 if f <= 2:
1365 1362 self.ui.debug(_("found new branch changeset %s\n") %
1366 1363 short(p))
1367 1364 fetch.add(p)
1368 1365 base[i] = 1
1369 1366 else:
1370 1367 self.ui.debug(_("narrowed branch search to %s:%s\n")
1371 1368 % (short(p), short(i)))
1372 1369 newsearch.append((p, i))
1373 1370 break
1374 1371 p, f = i, f * 2
1375 1372 search = newsearch
1376 1373
1377 1374 # sanity check our fetch list
1378 1375 for f in fetch:
1379 1376 if f in m:
1380 1377 raise error.RepoError(_("already have changeset ")
1381 1378 + short(f[:4]))
1382 1379
1383 1380 if base.keys() == [nullid]:
1384 1381 if force:
1385 1382 self.ui.warn(_("warning: repository is unrelated\n"))
1386 1383 else:
1387 1384 raise util.Abort(_("repository is unrelated"))
1388 1385
1389 1386 self.ui.debug(_("found new changesets starting at ") +
1390 1387 " ".join([short(f) for f in fetch]) + "\n")
1391 1388
1392 1389 self.ui.debug(_("%d total queries\n") % reqcnt)
1393 1390
1394 1391 return base.keys(), list(fetch), heads
1395 1392
1396 1393 def findoutgoing(self, remote, base=None, heads=None, force=False):
1397 1394 """Return list of nodes that are roots of subsets not in remote
1398 1395
1399 1396 If base dict is specified, assume that these nodes and their parents
1400 1397 exist on the remote side.
1401 1398 If a list of heads is specified, return only nodes which are heads
1402 1399 or ancestors of these heads, and return a second element which
1403 1400 contains all remote heads which get new children.
1404 1401 """
1405 1402 if base == None:
1406 1403 base = {}
1407 1404 self.findincoming(remote, base, heads, force=force)
1408 1405
1409 1406 self.ui.debug(_("common changesets up to ")
1410 1407 + " ".join(map(short, base.keys())) + "\n")
1411 1408
1412 1409 remain = set(self.changelog.nodemap)
1413 1410
1414 1411 # prune everything remote has from the tree
1415 1412 remain.remove(nullid)
1416 1413 remove = base.keys()
1417 1414 while remove:
1418 1415 n = remove.pop(0)
1419 1416 if n in remain:
1420 1417 remain.remove(n)
1421 1418 for p in self.changelog.parents(n):
1422 1419 remove.append(p)
1423 1420
1424 1421 # find every node whose parents have been pruned
1425 1422 subset = []
1426 1423 # find every remote head that will get new children
1427 1424 updated_heads = {}
1428 1425 for n in remain:
1429 1426 p1, p2 = self.changelog.parents(n)
1430 1427 if p1 not in remain and p2 not in remain:
1431 1428 subset.append(n)
1432 1429 if heads:
1433 1430 if p1 in heads:
1434 1431 updated_heads[p1] = True
1435 1432 if p2 in heads:
1436 1433 updated_heads[p2] = True
1437 1434
1438 1435 # this is the set of all roots we have to push
1439 1436 if heads:
1440 1437 return subset, updated_heads.keys()
1441 1438 else:
1442 1439 return subset
1443 1440
1444 1441 def pull(self, remote, heads=None, force=False):
1445 1442 lock = self.lock()
1446 1443 try:
1447 1444 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1448 1445 force=force)
1449 1446 if fetch == [nullid]:
1450 1447 self.ui.status(_("requesting all changes\n"))
1451 1448
1452 1449 if not fetch:
1453 1450 self.ui.status(_("no changes found\n"))
1454 1451 return 0
1455 1452
1456 1453 if heads is None and remote.capable('changegroupsubset'):
1457 1454 heads = rheads
1458 1455
1459 1456 if heads is None:
1460 1457 cg = remote.changegroup(fetch, 'pull')
1461 1458 else:
1462 1459 if not remote.capable('changegroupsubset'):
1463 1460 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1464 1461 cg = remote.changegroupsubset(fetch, heads, 'pull')
1465 1462 return self.addchangegroup(cg, 'pull', remote.url())
1466 1463 finally:
1467 1464 lock.release()
1468 1465
1469 1466 def push(self, remote, force=False, revs=None):
1470 1467 # there are two ways to push to remote repo:
1471 1468 #
1472 1469 # addchangegroup assumes local user can lock remote
1473 1470 # repo (local filesystem, old ssh servers).
1474 1471 #
1475 1472 # unbundle assumes local user cannot lock remote repo (new ssh
1476 1473 # servers, http servers).
1477 1474
1478 1475 if remote.capable('unbundle'):
1479 1476 return self.push_unbundle(remote, force, revs)
1480 1477 return self.push_addchangegroup(remote, force, revs)
1481 1478
1482 1479 def prepush(self, remote, force, revs):
1483 1480 common = {}
1484 1481 remote_heads = remote.heads()
1485 1482 inc = self.findincoming(remote, common, remote_heads, force=force)
1486 1483
1487 1484 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1488 1485 if revs is not None:
1489 1486 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1490 1487 else:
1491 1488 bases, heads = update, self.changelog.heads()
1492 1489
1493 1490 if not bases:
1494 1491 self.ui.status(_("no changes found\n"))
1495 1492 return None, 1
1496 1493 elif not force:
1497 1494 # check if we're creating new remote heads
1498 1495 # to be a remote head after push, node must be either
1499 1496 # - unknown locally
1500 1497 # - a local outgoing head descended from update
1501 1498 # - a remote head that's known locally and not
1502 1499 # ancestral to an outgoing head
1503 1500
1504 1501 warn = 0
1505 1502
1506 1503 if remote_heads == [nullid]:
1507 1504 warn = 0
1508 1505 elif not revs and len(heads) > len(remote_heads):
1509 1506 warn = 1
1510 1507 else:
1511 1508 newheads = list(heads)
1512 1509 for r in remote_heads:
1513 1510 if r in self.changelog.nodemap:
1514 1511 desc = self.changelog.heads(r, heads)
1515 1512 l = [h for h in heads if h in desc]
1516 1513 if not l:
1517 1514 newheads.append(r)
1518 1515 else:
1519 1516 newheads.append(r)
1520 1517 if len(newheads) > len(remote_heads):
1521 1518 warn = 1
1522 1519
1523 1520 if warn:
1524 1521 self.ui.warn(_("abort: push creates new remote heads!\n"))
1525 1522 self.ui.status(_("(did you forget to merge?"
1526 1523 " use push -f to force)\n"))
1527 1524 return None, 0
1528 1525 elif inc:
1529 1526 self.ui.warn(_("note: unsynced remote changes!\n"))
1530 1527
1531 1528
1532 1529 if revs is None:
1533 1530 # use the fast path, no race possible on push
1534 1531 cg = self._changegroup(common.keys(), 'push')
1535 1532 else:
1536 1533 cg = self.changegroupsubset(update, revs, 'push')
1537 1534 return cg, remote_heads
1538 1535
1539 1536 def push_addchangegroup(self, remote, force, revs):
1540 1537 lock = remote.lock()
1541 1538 try:
1542 1539 ret = self.prepush(remote, force, revs)
1543 1540 if ret[0] is not None:
1544 1541 cg, remote_heads = ret
1545 1542 return remote.addchangegroup(cg, 'push', self.url())
1546 1543 return ret[1]
1547 1544 finally:
1548 1545 lock.release()
1549 1546
1550 1547 def push_unbundle(self, remote, force, revs):
1551 1548 # local repo finds heads on server, finds out what revs it
1552 1549 # must push. once revs transferred, if server finds it has
1553 1550 # different heads (someone else won commit/push race), server
1554 1551 # aborts.
1555 1552
1556 1553 ret = self.prepush(remote, force, revs)
1557 1554 if ret[0] is not None:
1558 1555 cg, remote_heads = ret
1559 1556 if force: remote_heads = ['force']
1560 1557 return remote.unbundle(cg, remote_heads, 'push')
1561 1558 return ret[1]
1562 1559
1563 1560 def changegroupinfo(self, nodes, source):
1564 1561 if self.ui.verbose or source == 'bundle':
1565 1562 self.ui.status(_("%d changesets found\n") % len(nodes))
1566 1563 if self.ui.debugflag:
1567 1564 self.ui.debug(_("list of changesets:\n"))
1568 1565 for node in nodes:
1569 1566 self.ui.debug("%s\n" % hex(node))
1570 1567
1571 1568 def changegroupsubset(self, bases, heads, source, extranodes=None):
1572 1569 """This function generates a changegroup consisting of all the nodes
1573 1570 that are descendents of any of the bases, and ancestors of any of
1574 1571 the heads.
1575 1572
1576 1573 It is fairly complex as determining which filenodes and which
1577 1574 manifest nodes need to be included for the changeset to be complete
1578 1575 is non-trivial.
1579 1576
1580 1577 Another wrinkle is doing the reverse, figuring out which changeset in
1581 1578 the changegroup a particular filenode or manifestnode belongs to.
1582 1579
1583 1580 The caller can specify some nodes that must be included in the
1584 1581 changegroup using the extranodes argument. It should be a dict
1585 1582 where the keys are the filenames (or 1 for the manifest), and the
1586 1583 values are lists of (node, linknode) tuples, where node is a wanted
1587 1584 node and linknode is the changelog node that should be transmitted as
1588 1585 the linkrev.
1589 1586 """
1590 1587
1591 1588 if extranodes is None:
1592 1589 # can we go through the fast path ?
1593 1590 heads.sort()
1594 1591 allheads = self.heads()
1595 1592 allheads.sort()
1596 1593 if heads == allheads:
1597 1594 common = []
1598 1595 # parents of bases are known from both sides
1599 1596 for n in bases:
1600 1597 for p in self.changelog.parents(n):
1601 1598 if p != nullid:
1602 1599 common.append(p)
1603 1600 return self._changegroup(common, source)
1604 1601
1605 1602 self.hook('preoutgoing', throw=True, source=source)
1606 1603
1607 1604 # Set up some initial variables
1608 1605 # Make it easy to refer to self.changelog
1609 1606 cl = self.changelog
1610 1607 # msng is short for missing - compute the list of changesets in this
1611 1608 # changegroup.
1612 1609 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1613 1610 self.changegroupinfo(msng_cl_lst, source)
1614 1611 # Some bases may turn out to be superfluous, and some heads may be
1615 1612 # too. nodesbetween will return the minimal set of bases and heads
1616 1613 # necessary to re-create the changegroup.
1617 1614
1618 1615 # Known heads are the list of heads that it is assumed the recipient
1619 1616 # of this changegroup will know about.
1620 1617 knownheads = {}
1621 1618 # We assume that all parents of bases are known heads.
1622 1619 for n in bases:
1623 1620 for p in cl.parents(n):
1624 1621 if p != nullid:
1625 1622 knownheads[p] = 1
1626 1623 knownheads = knownheads.keys()
1627 1624 if knownheads:
1628 1625 # Now that we know what heads are known, we can compute which
1629 1626 # changesets are known. The recipient must know about all
1630 1627 # changesets required to reach the known heads from the null
1631 1628 # changeset.
1632 1629 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1633 1630 junk = None
1634 1631 # Transform the list into a set.
1635 1632 has_cl_set = set(has_cl_set)
1636 1633 else:
1637 1634 # If there were no known heads, the recipient cannot be assumed to
1638 1635 # know about any changesets.
1639 1636 has_cl_set = set()
1640 1637
1641 1638 # Make it easy to refer to self.manifest
1642 1639 mnfst = self.manifest
1643 1640 # We don't know which manifests are missing yet
1644 1641 msng_mnfst_set = {}
1645 1642 # Nor do we know which filenodes are missing.
1646 1643 msng_filenode_set = {}
1647 1644
1648 1645 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1649 1646 junk = None
1650 1647
1651 1648 # A changeset always belongs to itself, so the changenode lookup
1652 1649 # function for a changenode is identity.
1653 1650 def identity(x):
1654 1651 return x
1655 1652
1656 1653 # A function generating function. Sets up an environment for the
1657 1654 # inner function.
1658 1655 def cmp_by_rev_func(revlog):
1659 1656 # Compare two nodes by their revision number in the environment's
1660 1657 # revision history. Since the revision number both represents the
1661 1658 # most efficient order to read the nodes in, and represents a
1662 1659 # topological sorting of the nodes, this function is often useful.
1663 1660 def cmp_by_rev(a, b):
1664 1661 return cmp(revlog.rev(a), revlog.rev(b))
1665 1662 return cmp_by_rev
1666 1663
1667 1664 # If we determine that a particular file or manifest node must be a
1668 1665 # node that the recipient of the changegroup will already have, we can
1669 1666 # also assume the recipient will have all the parents. This function
1670 1667 # prunes them from the set of missing nodes.
1671 1668 def prune_parents(revlog, hasset, msngset):
1672 1669 haslst = hasset.keys()
1673 1670 haslst.sort(cmp_by_rev_func(revlog))
1674 1671 for node in haslst:
1675 1672 parentlst = [p for p in revlog.parents(node) if p != nullid]
1676 1673 while parentlst:
1677 1674 n = parentlst.pop()
1678 1675 if n not in hasset:
1679 1676 hasset[n] = 1
1680 1677 p = [p for p in revlog.parents(n) if p != nullid]
1681 1678 parentlst.extend(p)
1682 1679 for n in hasset:
1683 1680 msngset.pop(n, None)
1684 1681
1685 1682 # This is a function generating function used to set up an environment
1686 1683 # for the inner function to execute in.
1687 1684 def manifest_and_file_collector(changedfileset):
1688 1685 # This is an information gathering function that gathers
1689 1686 # information from each changeset node that goes out as part of
1690 1687 # the changegroup. The information gathered is a list of which
1691 1688 # manifest nodes are potentially required (the recipient may
1692 1689 # already have them) and total list of all files which were
1693 1690 # changed in any changeset in the changegroup.
1694 1691 #
1695 1692 # We also remember the first changenode we saw any manifest
1696 1693 # referenced by so we can later determine which changenode 'owns'
1697 1694 # the manifest.
1698 1695 def collect_manifests_and_files(clnode):
1699 1696 c = cl.read(clnode)
1700 1697 for f in c[3]:
1701 1698 # This is to make sure we only have one instance of each
1702 1699 # filename string for each filename.
1703 1700 changedfileset.setdefault(f, f)
1704 1701 msng_mnfst_set.setdefault(c[0], clnode)
1705 1702 return collect_manifests_and_files
1706 1703
1707 1704 # Figure out which manifest nodes (of the ones we think might be part
1708 1705 # of the changegroup) the recipient must know about and remove them
1709 1706 # from the changegroup.
1710 1707 def prune_manifests():
1711 1708 has_mnfst_set = {}
1712 1709 for n in msng_mnfst_set:
1713 1710 # If a 'missing' manifest thinks it belongs to a changenode
1714 1711 # the recipient is assumed to have, obviously the recipient
1715 1712 # must have that manifest.
1716 1713 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1717 1714 if linknode in has_cl_set:
1718 1715 has_mnfst_set[n] = 1
1719 1716 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1720 1717
1721 1718 # Use the information collected in collect_manifests_and_files to say
1722 1719 # which changenode any manifestnode belongs to.
1723 1720 def lookup_manifest_link(mnfstnode):
1724 1721 return msng_mnfst_set[mnfstnode]
1725 1722
1726 1723 # A function generating function that sets up the initial environment
1727 1724 # the inner function.
1728 1725 def filenode_collector(changedfiles):
1729 1726 next_rev = [0]
1730 1727 # This gathers information from each manifestnode included in the
1731 1728 # changegroup about which filenodes the manifest node references
1732 1729 # so we can include those in the changegroup too.
1733 1730 #
1734 1731 # It also remembers which changenode each filenode belongs to. It
1735 1732 # does this by assuming the a filenode belongs to the changenode
1736 1733 # the first manifest that references it belongs to.
1737 1734 def collect_msng_filenodes(mnfstnode):
1738 1735 r = mnfst.rev(mnfstnode)
1739 1736 if r == next_rev[0]:
1740 1737 # If the last rev we looked at was the one just previous,
1741 1738 # we only need to see a diff.
1742 1739 deltamf = mnfst.readdelta(mnfstnode)
1743 1740 # For each line in the delta
1744 1741 for f, fnode in deltamf.iteritems():
1745 1742 f = changedfiles.get(f, None)
1746 1743 # And if the file is in the list of files we care
1747 1744 # about.
1748 1745 if f is not None:
1749 1746 # Get the changenode this manifest belongs to
1750 1747 clnode = msng_mnfst_set[mnfstnode]
1751 1748 # Create the set of filenodes for the file if
1752 1749 # there isn't one already.
1753 1750 ndset = msng_filenode_set.setdefault(f, {})
1754 1751 # And set the filenode's changelog node to the
1755 1752 # manifest's if it hasn't been set already.
1756 1753 ndset.setdefault(fnode, clnode)
1757 1754 else:
1758 1755 # Otherwise we need a full manifest.
1759 1756 m = mnfst.read(mnfstnode)
1760 1757 # For every file in we care about.
1761 1758 for f in changedfiles:
1762 1759 fnode = m.get(f, None)
1763 1760 # If it's in the manifest
1764 1761 if fnode is not None:
1765 1762 # See comments above.
1766 1763 clnode = msng_mnfst_set[mnfstnode]
1767 1764 ndset = msng_filenode_set.setdefault(f, {})
1768 1765 ndset.setdefault(fnode, clnode)
1769 1766 # Remember the revision we hope to see next.
1770 1767 next_rev[0] = r + 1
1771 1768 return collect_msng_filenodes
1772 1769
1773 1770 # We have a list of filenodes we think we need for a file, lets remove
1774 1771 # all those we know the recipient must have.
1775 1772 def prune_filenodes(f, filerevlog):
1776 1773 msngset = msng_filenode_set[f]
1777 1774 hasset = {}
1778 1775 # If a 'missing' filenode thinks it belongs to a changenode we
1779 1776 # assume the recipient must have, then the recipient must have
1780 1777 # that filenode.
1781 1778 for n in msngset:
1782 1779 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1783 1780 if clnode in has_cl_set:
1784 1781 hasset[n] = 1
1785 1782 prune_parents(filerevlog, hasset, msngset)
1786 1783
1787 1784 # A function generator function that sets up the a context for the
1788 1785 # inner function.
1789 1786 def lookup_filenode_link_func(fname):
1790 1787 msngset = msng_filenode_set[fname]
1791 1788 # Lookup the changenode the filenode belongs to.
1792 1789 def lookup_filenode_link(fnode):
1793 1790 return msngset[fnode]
1794 1791 return lookup_filenode_link
1795 1792
1796 1793 # Add the nodes that were explicitly requested.
1797 1794 def add_extra_nodes(name, nodes):
1798 1795 if not extranodes or name not in extranodes:
1799 1796 return
1800 1797
1801 1798 for node, linknode in extranodes[name]:
1802 1799 if node not in nodes:
1803 1800 nodes[node] = linknode
1804 1801
1805 1802 # Now that we have all theses utility functions to help out and
1806 1803 # logically divide up the task, generate the group.
1807 1804 def gengroup():
1808 1805 # The set of changed files starts empty.
1809 1806 changedfiles = {}
1810 1807 # Create a changenode group generator that will call our functions
1811 1808 # back to lookup the owning changenode and collect information.
1812 1809 group = cl.group(msng_cl_lst, identity,
1813 1810 manifest_and_file_collector(changedfiles))
1814 1811 for chnk in group:
1815 1812 yield chnk
1816 1813
1817 1814 # The list of manifests has been collected by the generator
1818 1815 # calling our functions back.
1819 1816 prune_manifests()
1820 1817 add_extra_nodes(1, msng_mnfst_set)
1821 1818 msng_mnfst_lst = msng_mnfst_set.keys()
1822 1819 # Sort the manifestnodes by revision number.
1823 1820 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1824 1821 # Create a generator for the manifestnodes that calls our lookup
1825 1822 # and data collection functions back.
1826 1823 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1827 1824 filenode_collector(changedfiles))
1828 1825 for chnk in group:
1829 1826 yield chnk
1830 1827
1831 1828 # These are no longer needed, dereference and toss the memory for
1832 1829 # them.
1833 1830 msng_mnfst_lst = None
1834 1831 msng_mnfst_set.clear()
1835 1832
1836 1833 if extranodes:
1837 1834 for fname in extranodes:
1838 1835 if isinstance(fname, int):
1839 1836 continue
1840 1837 msng_filenode_set.setdefault(fname, {})
1841 1838 changedfiles[fname] = 1
1842 1839 # Go through all our files in order sorted by name.
1843 1840 for fname in sorted(changedfiles):
1844 1841 filerevlog = self.file(fname)
1845 1842 if not len(filerevlog):
1846 1843 raise util.Abort(_("empty or missing revlog for %s") % fname)
1847 1844 # Toss out the filenodes that the recipient isn't really
1848 1845 # missing.
1849 1846 if fname in msng_filenode_set:
1850 1847 prune_filenodes(fname, filerevlog)
1851 1848 add_extra_nodes(fname, msng_filenode_set[fname])
1852 1849 msng_filenode_lst = msng_filenode_set[fname].keys()
1853 1850 else:
1854 1851 msng_filenode_lst = []
1855 1852 # If any filenodes are left, generate the group for them,
1856 1853 # otherwise don't bother.
1857 1854 if len(msng_filenode_lst) > 0:
1858 1855 yield changegroup.chunkheader(len(fname))
1859 1856 yield fname
1860 1857 # Sort the filenodes by their revision #
1861 1858 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1862 1859 # Create a group generator and only pass in a changenode
1863 1860 # lookup function as we need to collect no information
1864 1861 # from filenodes.
1865 1862 group = filerevlog.group(msng_filenode_lst,
1866 1863 lookup_filenode_link_func(fname))
1867 1864 for chnk in group:
1868 1865 yield chnk
1869 1866 if fname in msng_filenode_set:
1870 1867 # Don't need this anymore, toss it to free memory.
1871 1868 del msng_filenode_set[fname]
1872 1869 # Signal that no more groups are left.
1873 1870 yield changegroup.closechunk()
1874 1871
1875 1872 if msng_cl_lst:
1876 1873 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1877 1874
1878 1875 return util.chunkbuffer(gengroup())
1879 1876
1880 1877 def changegroup(self, basenodes, source):
1881 1878 # to avoid a race we use changegroupsubset() (issue1320)
1882 1879 return self.changegroupsubset(basenodes, self.heads(), source)
1883 1880
1884 1881 def _changegroup(self, common, source):
1885 1882 """Generate a changegroup of all nodes that we have that a recipient
1886 1883 doesn't.
1887 1884
1888 1885 This is much easier than the previous function as we can assume that
1889 1886 the recipient has any changenode we aren't sending them.
1890 1887
1891 1888 common is the set of common nodes between remote and self"""
1892 1889
1893 1890 self.hook('preoutgoing', throw=True, source=source)
1894 1891
1895 1892 cl = self.changelog
1896 1893 nodes = cl.findmissing(common)
1897 1894 revset = set([cl.rev(n) for n in nodes])
1898 1895 self.changegroupinfo(nodes, source)
1899 1896
1900 1897 def identity(x):
1901 1898 return x
1902 1899
1903 1900 def gennodelst(log):
1904 1901 for r in log:
1905 1902 if log.linkrev(r) in revset:
1906 1903 yield log.node(r)
1907 1904
1908 1905 def changed_file_collector(changedfileset):
1909 1906 def collect_changed_files(clnode):
1910 1907 c = cl.read(clnode)
1911 1908 for fname in c[3]:
1912 1909 changedfileset[fname] = 1
1913 1910 return collect_changed_files
1914 1911
1915 1912 def lookuprevlink_func(revlog):
1916 1913 def lookuprevlink(n):
1917 1914 return cl.node(revlog.linkrev(revlog.rev(n)))
1918 1915 return lookuprevlink
1919 1916
1920 1917 def gengroup():
1921 1918 # construct a list of all changed files
1922 1919 changedfiles = {}
1923 1920
1924 1921 for chnk in cl.group(nodes, identity,
1925 1922 changed_file_collector(changedfiles)):
1926 1923 yield chnk
1927 1924
1928 1925 mnfst = self.manifest
1929 1926 nodeiter = gennodelst(mnfst)
1930 1927 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1931 1928 yield chnk
1932 1929
1933 1930 for fname in sorted(changedfiles):
1934 1931 filerevlog = self.file(fname)
1935 1932 if not len(filerevlog):
1936 1933 raise util.Abort(_("empty or missing revlog for %s") % fname)
1937 1934 nodeiter = gennodelst(filerevlog)
1938 1935 nodeiter = list(nodeiter)
1939 1936 if nodeiter:
1940 1937 yield changegroup.chunkheader(len(fname))
1941 1938 yield fname
1942 1939 lookup = lookuprevlink_func(filerevlog)
1943 1940 for chnk in filerevlog.group(nodeiter, lookup):
1944 1941 yield chnk
1945 1942
1946 1943 yield changegroup.closechunk()
1947 1944
1948 1945 if nodes:
1949 1946 self.hook('outgoing', node=hex(nodes[0]), source=source)
1950 1947
1951 1948 return util.chunkbuffer(gengroup())
1952 1949
1953 1950 def addchangegroup(self, source, srctype, url, emptyok=False):
1954 1951 """add changegroup to repo.
1955 1952
1956 1953 return values:
1957 1954 - nothing changed or no source: 0
1958 1955 - more heads than before: 1+added heads (2..n)
1959 1956 - less heads than before: -1-removed heads (-2..-n)
1960 1957 - number of heads stays the same: 1
1961 1958 """
1962 1959 def csmap(x):
1963 1960 self.ui.debug(_("add changeset %s\n") % short(x))
1964 1961 return len(cl)
1965 1962
1966 1963 def revmap(x):
1967 1964 return cl.rev(x)
1968 1965
1969 1966 if not source:
1970 1967 return 0
1971 1968
1972 1969 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1973 1970
1974 1971 changesets = files = revisions = 0
1975 1972
1976 1973 # write changelog data to temp files so concurrent readers will not see
1977 1974 # inconsistent view
1978 1975 cl = self.changelog
1979 1976 cl.delayupdate()
1980 1977 oldheads = len(cl.heads())
1981 1978
1982 1979 tr = self.transaction()
1983 1980 try:
1984 1981 trp = weakref.proxy(tr)
1985 1982 # pull off the changeset group
1986 1983 self.ui.status(_("adding changesets\n"))
1987 1984 clstart = len(cl)
1988 1985 chunkiter = changegroup.chunkiter(source)
1989 1986 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1990 1987 raise util.Abort(_("received changelog group is empty"))
1991 1988 clend = len(cl)
1992 1989 changesets = clend - clstart
1993 1990
1994 1991 # pull off the manifest group
1995 1992 self.ui.status(_("adding manifests\n"))
1996 1993 chunkiter = changegroup.chunkiter(source)
1997 1994 # no need to check for empty manifest group here:
1998 1995 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1999 1996 # no new manifest will be created and the manifest group will
2000 1997 # be empty during the pull
2001 1998 self.manifest.addgroup(chunkiter, revmap, trp)
2002 1999
2003 2000 # process the files
2004 2001 self.ui.status(_("adding file changes\n"))
2005 2002 while 1:
2006 2003 f = changegroup.getchunk(source)
2007 2004 if not f:
2008 2005 break
2009 2006 self.ui.debug(_("adding %s revisions\n") % f)
2010 2007 fl = self.file(f)
2011 2008 o = len(fl)
2012 2009 chunkiter = changegroup.chunkiter(source)
2013 2010 if fl.addgroup(chunkiter, revmap, trp) is None:
2014 2011 raise util.Abort(_("received file revlog group is empty"))
2015 2012 revisions += len(fl) - o
2016 2013 files += 1
2017 2014
2018 2015 newheads = len(cl.heads())
2019 2016 heads = ""
2020 2017 if oldheads and newheads != oldheads:
2021 2018 heads = _(" (%+d heads)") % (newheads - oldheads)
2022 2019
2023 2020 self.ui.status(_("added %d changesets"
2024 2021 " with %d changes to %d files%s\n")
2025 2022 % (changesets, revisions, files, heads))
2026 2023
2027 2024 if changesets > 0:
2028 2025 p = lambda: cl.writepending() and self.root or ""
2029 2026 self.hook('pretxnchangegroup', throw=True,
2030 2027 node=hex(cl.node(clstart)), source=srctype,
2031 2028 url=url, pending=p)
2032 2029
2033 2030 # make changelog see real files again
2034 2031 cl.finalize(trp)
2035 2032
2036 2033 tr.close()
2037 2034 finally:
2038 2035 del tr
2039 2036
2040 2037 if changesets > 0:
2041 2038 # forcefully update the on-disk branch cache
2042 2039 self.ui.debug(_("updating the branch cache\n"))
2043 2040 self.branchtags()
2044 2041 self.hook("changegroup", node=hex(cl.node(clstart)),
2045 2042 source=srctype, url=url)
2046 2043
2047 2044 for i in xrange(clstart, clend):
2048 2045 self.hook("incoming", node=hex(cl.node(i)),
2049 2046 source=srctype, url=url)
2050 2047
2051 2048 # never return 0 here:
2052 2049 if newheads < oldheads:
2053 2050 return newheads - oldheads - 1
2054 2051 else:
2055 2052 return newheads - oldheads + 1
2056 2053
2057 2054
2058 2055 def stream_in(self, remote):
2059 2056 fp = remote.stream_out()
2060 2057 l = fp.readline()
2061 2058 try:
2062 2059 resp = int(l)
2063 2060 except ValueError:
2064 2061 raise error.ResponseError(
2065 2062 _('Unexpected response from remote server:'), l)
2066 2063 if resp == 1:
2067 2064 raise util.Abort(_('operation forbidden by server'))
2068 2065 elif resp == 2:
2069 2066 raise util.Abort(_('locking the remote repository failed'))
2070 2067 elif resp != 0:
2071 2068 raise util.Abort(_('the server sent an unknown error code'))
2072 2069 self.ui.status(_('streaming all changes\n'))
2073 2070 l = fp.readline()
2074 2071 try:
2075 2072 total_files, total_bytes = map(int, l.split(' ', 1))
2076 2073 except (ValueError, TypeError):
2077 2074 raise error.ResponseError(
2078 2075 _('Unexpected response from remote server:'), l)
2079 2076 self.ui.status(_('%d files to transfer, %s of data\n') %
2080 2077 (total_files, util.bytecount(total_bytes)))
2081 2078 start = time.time()
2082 2079 for i in xrange(total_files):
2083 2080 # XXX doesn't support '\n' or '\r' in filenames
2084 2081 l = fp.readline()
2085 2082 try:
2086 2083 name, size = l.split('\0', 1)
2087 2084 size = int(size)
2088 2085 except (ValueError, TypeError):
2089 2086 raise error.ResponseError(
2090 2087 _('Unexpected response from remote server:'), l)
2091 2088 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2092 2089 ofp = self.sopener(name, 'w')
2093 2090 for chunk in util.filechunkiter(fp, limit=size):
2094 2091 ofp.write(chunk)
2095 2092 ofp.close()
2096 2093 elapsed = time.time() - start
2097 2094 if elapsed <= 0:
2098 2095 elapsed = 0.001
2099 2096 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2100 2097 (util.bytecount(total_bytes), elapsed,
2101 2098 util.bytecount(total_bytes / elapsed)))
2102 2099 self.invalidate()
2103 2100 return len(self.heads()) + 1
2104 2101
2105 2102 def clone(self, remote, heads=[], stream=False):
2106 2103 '''clone remote repository.
2107 2104
2108 2105 keyword arguments:
2109 2106 heads: list of revs to clone (forces use of pull)
2110 2107 stream: use streaming clone if possible'''
2111 2108
2112 2109 # now, all clients that can request uncompressed clones can
2113 2110 # read repo formats supported by all servers that can serve
2114 2111 # them.
2115 2112
2116 2113 # if revlog format changes, client will have to check version
2117 2114 # and format flags on "stream" capability, and use
2118 2115 # uncompressed only if compatible.
2119 2116
2120 2117 if stream and not heads and remote.capable('stream'):
2121 2118 return self.stream_in(remote)
2122 2119 return self.pull(remote, heads)
2123 2120
2124 2121 # used to avoid circular references so destructors work
2125 2122 def aftertrans(files):
2126 2123 renamefiles = [tuple(t) for t in files]
2127 2124 def a():
2128 2125 for src, dest in renamefiles:
2129 2126 util.rename(src, dest)
2130 2127 return a
2131 2128
2132 2129 def instance(ui, path, create):
2133 2130 return localrepository(ui, util.drop_scheme('file', path), create)
2134 2131
2135 2132 def islocal(path):
2136 2133 return True
General Comments 0
You need to be logged in to leave comments. Login now