##// END OF EJS Templates
transaction: support multiple, separate transactions...
Henrik Stuart -
r8072:ecf77954 default
parent child Browse files
Show More
@@ -1,2167 +1,2168 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup
11 11 import changelog, dirstate, filelog, manifest, context, weakref
12 12 import lock, transaction, stat, errno, ui, store, encoding
13 13 import os, time, util, extensions, hook, inspect, error
14 14 import match as match_
15 15 import merge as merge_
16 16
17 17 class localrepository(repo.repository):
18 18 capabilities = util.set(('lookup', 'changegroupsubset'))
19 19 supported = ('revlogv1', 'store', 'fncache')
20 20
21 21 def __init__(self, parentui, path=None, create=0):
22 22 repo.repository.__init__(self)
23 23 self.root = os.path.realpath(path)
24 24 self.path = os.path.join(self.root, ".hg")
25 25 self.origroot = path
26 26 self.opener = util.opener(self.path)
27 27 self.wopener = util.opener(self.root)
28 28
29 29 if not os.path.isdir(self.path):
30 30 if create:
31 31 if not os.path.exists(path):
32 32 os.mkdir(path)
33 33 os.mkdir(self.path)
34 34 requirements = ["revlogv1"]
35 35 if parentui.configbool('format', 'usestore', True):
36 36 os.mkdir(os.path.join(self.path, "store"))
37 37 requirements.append("store")
38 38 if parentui.configbool('format', 'usefncache', True):
39 39 requirements.append("fncache")
40 40 # create an invalid changelog
41 41 self.opener("00changelog.i", "a").write(
42 42 '\0\0\0\2' # represents revlogv2
43 43 ' dummy changelog to prevent using the old repo layout'
44 44 )
45 45 reqfile = self.opener("requires", "w")
46 46 for r in requirements:
47 47 reqfile.write("%s\n" % r)
48 48 reqfile.close()
49 49 else:
50 50 raise error.RepoError(_("repository %s not found") % path)
51 51 elif create:
52 52 raise error.RepoError(_("repository %s already exists") % path)
53 53 else:
54 54 # find requirements
55 55 requirements = []
56 56 try:
57 57 requirements = self.opener("requires").read().splitlines()
58 58 for r in requirements:
59 59 if r not in self.supported:
60 60 raise error.RepoError(_("requirement '%s' not supported") % r)
61 61 except IOError, inst:
62 62 if inst.errno != errno.ENOENT:
63 63 raise
64 64
65 65 self.store = store.store(requirements, self.path, util.opener)
66 66 self.spath = self.store.path
67 67 self.sopener = self.store.opener
68 68 self.sjoin = self.store.join
69 69 self.opener.createmode = self.store.createmode
70 70
71 71 self.ui = ui.ui(parentui=parentui)
72 72 try:
73 73 self.ui.readconfig(self.join("hgrc"), self.root)
74 74 extensions.loadall(self.ui)
75 75 except IOError:
76 76 pass
77 77
78 78 self.tagscache = None
79 79 self._tagstypecache = None
80 80 self.branchcache = None
81 81 self._ubranchcache = None # UTF-8 version of branchcache
82 82 self._branchcachetip = None
83 83 self.nodetagscache = None
84 84 self.filterpats = {}
85 85 self._datafilters = {}
86 86 self._transref = self._lockref = self._wlockref = None
87 87
88 88 def __getattr__(self, name):
89 89 if name == 'changelog':
90 90 self.changelog = changelog.changelog(self.sopener)
91 91 if 'HG_PENDING' in os.environ:
92 92 p = os.environ['HG_PENDING']
93 93 if p.startswith(self.root):
94 94 self.changelog.readpending('00changelog.i.a')
95 95 self.sopener.defversion = self.changelog.version
96 96 return self.changelog
97 97 if name == 'manifest':
98 98 self.changelog
99 99 self.manifest = manifest.manifest(self.sopener)
100 100 return self.manifest
101 101 if name == 'dirstate':
102 102 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
103 103 return self.dirstate
104 104 else:
105 105 raise AttributeError(name)
106 106
107 107 def __getitem__(self, changeid):
108 108 if changeid == None:
109 109 return context.workingctx(self)
110 110 return context.changectx(self, changeid)
111 111
112 112 def __nonzero__(self):
113 113 return True
114 114
115 115 def __len__(self):
116 116 return len(self.changelog)
117 117
118 118 def __iter__(self):
119 119 for i in xrange(len(self)):
120 120 yield i
121 121
122 122 def url(self):
123 123 return 'file:' + self.root
124 124
125 125 def hook(self, name, throw=False, **args):
126 126 return hook.hook(self.ui, self, name, throw, **args)
127 127
128 128 tag_disallowed = ':\r\n'
129 129
130 130 def _tag(self, names, node, message, local, user, date, parent=None,
131 131 extra={}):
132 132 use_dirstate = parent is None
133 133
134 134 if isinstance(names, str):
135 135 allchars = names
136 136 names = (names,)
137 137 else:
138 138 allchars = ''.join(names)
139 139 for c in self.tag_disallowed:
140 140 if c in allchars:
141 141 raise util.Abort(_('%r cannot be used in a tag name') % c)
142 142
143 143 for name in names:
144 144 self.hook('pretag', throw=True, node=hex(node), tag=name,
145 145 local=local)
146 146
147 147 def writetags(fp, names, munge, prevtags):
148 148 fp.seek(0, 2)
149 149 if prevtags and prevtags[-1] != '\n':
150 150 fp.write('\n')
151 151 for name in names:
152 152 m = munge and munge(name) or name
153 153 if self._tagstypecache and name in self._tagstypecache:
154 154 old = self.tagscache.get(name, nullid)
155 155 fp.write('%s %s\n' % (hex(old), m))
156 156 fp.write('%s %s\n' % (hex(node), m))
157 157 fp.close()
158 158
159 159 prevtags = ''
160 160 if local:
161 161 try:
162 162 fp = self.opener('localtags', 'r+')
163 163 except IOError:
164 164 fp = self.opener('localtags', 'a')
165 165 else:
166 166 prevtags = fp.read()
167 167
168 168 # local tags are stored in the current charset
169 169 writetags(fp, names, None, prevtags)
170 170 for name in names:
171 171 self.hook('tag', node=hex(node), tag=name, local=local)
172 172 return
173 173
174 174 if use_dirstate:
175 175 try:
176 176 fp = self.wfile('.hgtags', 'rb+')
177 177 except IOError:
178 178 fp = self.wfile('.hgtags', 'ab')
179 179 else:
180 180 prevtags = fp.read()
181 181 else:
182 182 try:
183 183 prevtags = self.filectx('.hgtags', parent).data()
184 184 except error.LookupError:
185 185 pass
186 186 fp = self.wfile('.hgtags', 'wb')
187 187 if prevtags:
188 188 fp.write(prevtags)
189 189
190 190 # committed tags are stored in UTF-8
191 191 writetags(fp, names, encoding.fromlocal, prevtags)
192 192
193 193 if use_dirstate and '.hgtags' not in self.dirstate:
194 194 self.add(['.hgtags'])
195 195
196 196 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
197 197 extra=extra)
198 198
199 199 for name in names:
200 200 self.hook('tag', node=hex(node), tag=name, local=local)
201 201
202 202 return tagnode
203 203
204 204 def tag(self, names, node, message, local, user, date):
205 205 '''tag a revision with one or more symbolic names.
206 206
207 207 names is a list of strings or, when adding a single tag, names may be a
208 208 string.
209 209
210 210 if local is True, the tags are stored in a per-repository file.
211 211 otherwise, they are stored in the .hgtags file, and a new
212 212 changeset is committed with the change.
213 213
214 214 keyword arguments:
215 215
216 216 local: whether to store tags in non-version-controlled file
217 217 (default False)
218 218
219 219 message: commit message to use if committing
220 220
221 221 user: name of user to use if committing
222 222
223 223 date: date tuple to use if committing'''
224 224
225 225 for x in self.status()[:5]:
226 226 if '.hgtags' in x:
227 227 raise util.Abort(_('working copy of .hgtags is changed '
228 228 '(please commit .hgtags manually)'))
229 229
230 230 self.tags() # instantiate the cache
231 231 self._tag(names, node, message, local, user, date)
232 232
233 233 def tags(self):
234 234 '''return a mapping of tag to node'''
235 235 if self.tagscache:
236 236 return self.tagscache
237 237
238 238 globaltags = {}
239 239 tagtypes = {}
240 240
241 241 def readtags(lines, fn, tagtype):
242 242 filetags = {}
243 243 count = 0
244 244
245 245 def warn(msg):
246 246 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
247 247
248 248 for l in lines:
249 249 count += 1
250 250 if not l:
251 251 continue
252 252 s = l.split(" ", 1)
253 253 if len(s) != 2:
254 254 warn(_("cannot parse entry"))
255 255 continue
256 256 node, key = s
257 257 key = encoding.tolocal(key.strip()) # stored in UTF-8
258 258 try:
259 259 bin_n = bin(node)
260 260 except TypeError:
261 261 warn(_("node '%s' is not well formed") % node)
262 262 continue
263 263 if bin_n not in self.changelog.nodemap:
264 264 warn(_("tag '%s' refers to unknown node") % key)
265 265 continue
266 266
267 267 h = []
268 268 if key in filetags:
269 269 n, h = filetags[key]
270 270 h.append(n)
271 271 filetags[key] = (bin_n, h)
272 272
273 273 for k, nh in filetags.iteritems():
274 274 if k not in globaltags:
275 275 globaltags[k] = nh
276 276 tagtypes[k] = tagtype
277 277 continue
278 278
279 279 # we prefer the global tag if:
280 280 # it supercedes us OR
281 281 # mutual supercedes and it has a higher rank
282 282 # otherwise we win because we're tip-most
283 283 an, ah = nh
284 284 bn, bh = globaltags[k]
285 285 if (bn != an and an in bh and
286 286 (bn not in ah or len(bh) > len(ah))):
287 287 an = bn
288 288 ah.extend([n for n in bh if n not in ah])
289 289 globaltags[k] = an, ah
290 290 tagtypes[k] = tagtype
291 291
292 292 # read the tags file from each head, ending with the tip
293 293 f = None
294 294 for rev, node, fnode in self._hgtagsnodes():
295 295 f = (f and f.filectx(fnode) or
296 296 self.filectx('.hgtags', fileid=fnode))
297 297 readtags(f.data().splitlines(), f, "global")
298 298
299 299 try:
300 300 data = encoding.fromlocal(self.opener("localtags").read())
301 301 # localtags are stored in the local character set
302 302 # while the internal tag table is stored in UTF-8
303 303 readtags(data.splitlines(), "localtags", "local")
304 304 except IOError:
305 305 pass
306 306
307 307 self.tagscache = {}
308 308 self._tagstypecache = {}
309 309 for k, nh in globaltags.iteritems():
310 310 n = nh[0]
311 311 if n != nullid:
312 312 self.tagscache[k] = n
313 313 self._tagstypecache[k] = tagtypes[k]
314 314 self.tagscache['tip'] = self.changelog.tip()
315 315 return self.tagscache
316 316
317 317 def tagtype(self, tagname):
318 318 '''
319 319 return the type of the given tag. result can be:
320 320
321 321 'local' : a local tag
322 322 'global' : a global tag
323 323 None : tag does not exist
324 324 '''
325 325
326 326 self.tags()
327 327
328 328 return self._tagstypecache.get(tagname)
329 329
330 330 def _hgtagsnodes(self):
331 331 heads = self.heads()
332 332 heads.reverse()
333 333 last = {}
334 334 ret = []
335 335 for node in heads:
336 336 c = self[node]
337 337 rev = c.rev()
338 338 try:
339 339 fnode = c.filenode('.hgtags')
340 340 except error.LookupError:
341 341 continue
342 342 ret.append((rev, node, fnode))
343 343 if fnode in last:
344 344 ret[last[fnode]] = None
345 345 last[fnode] = len(ret) - 1
346 346 return [item for item in ret if item]
347 347
348 348 def tagslist(self):
349 349 '''return a list of tags ordered by revision'''
350 350 l = []
351 351 for t, n in self.tags().iteritems():
352 352 try:
353 353 r = self.changelog.rev(n)
354 354 except:
355 355 r = -2 # sort to the beginning of the list if unknown
356 356 l.append((r, t, n))
357 357 return [(t, n) for r, t, n in util.sort(l)]
358 358
359 359 def nodetags(self, node):
360 360 '''return the tags associated with a node'''
361 361 if not self.nodetagscache:
362 362 self.nodetagscache = {}
363 363 for t, n in self.tags().iteritems():
364 364 self.nodetagscache.setdefault(n, []).append(t)
365 365 return self.nodetagscache.get(node, [])
366 366
367 367 def _branchtags(self, partial, lrev):
368 368 # TODO: rename this function?
369 369 tiprev = len(self) - 1
370 370 if lrev != tiprev:
371 371 self._updatebranchcache(partial, lrev+1, tiprev+1)
372 372 self._writebranchcache(partial, self.changelog.tip(), tiprev)
373 373
374 374 return partial
375 375
376 376 def _branchheads(self):
377 377 tip = self.changelog.tip()
378 378 if self.branchcache is not None and self._branchcachetip == tip:
379 379 return self.branchcache
380 380
381 381 oldtip = self._branchcachetip
382 382 self._branchcachetip = tip
383 383 if self.branchcache is None:
384 384 self.branchcache = {} # avoid recursion in changectx
385 385 else:
386 386 self.branchcache.clear() # keep using the same dict
387 387 if oldtip is None or oldtip not in self.changelog.nodemap:
388 388 partial, last, lrev = self._readbranchcache()
389 389 else:
390 390 lrev = self.changelog.rev(oldtip)
391 391 partial = self._ubranchcache
392 392
393 393 self._branchtags(partial, lrev)
394 394 # this private cache holds all heads (not just tips)
395 395 self._ubranchcache = partial
396 396
397 397 # the branch cache is stored on disk as UTF-8, but in the local
398 398 # charset internally
399 399 for k, v in partial.iteritems():
400 400 self.branchcache[encoding.tolocal(k)] = v
401 401 return self.branchcache
402 402
403 403
404 404 def branchtags(self):
405 405 '''return a dict where branch names map to the tipmost head of
406 406 the branch, open heads come before closed'''
407 407 bt = {}
408 408 for bn, heads in self._branchheads().iteritems():
409 409 head = None
410 410 for i in range(len(heads)-1, -1, -1):
411 411 h = heads[i]
412 412 if 'close' not in self.changelog.read(h)[5]:
413 413 head = h
414 414 break
415 415 # no open heads were found
416 416 if head is None:
417 417 head = heads[-1]
418 418 bt[bn] = head
419 419 return bt
420 420
421 421
422 422 def _readbranchcache(self):
423 423 partial = {}
424 424 try:
425 425 f = self.opener("branchheads.cache")
426 426 lines = f.read().split('\n')
427 427 f.close()
428 428 except (IOError, OSError):
429 429 return {}, nullid, nullrev
430 430
431 431 try:
432 432 last, lrev = lines.pop(0).split(" ", 1)
433 433 last, lrev = bin(last), int(lrev)
434 434 if lrev >= len(self) or self[lrev].node() != last:
435 435 # invalidate the cache
436 436 raise ValueError('invalidating branch cache (tip differs)')
437 437 for l in lines:
438 438 if not l: continue
439 439 node, label = l.split(" ", 1)
440 440 partial.setdefault(label.strip(), []).append(bin(node))
441 441 except KeyboardInterrupt:
442 442 raise
443 443 except Exception, inst:
444 444 if self.ui.debugflag:
445 445 self.ui.warn(str(inst), '\n')
446 446 partial, last, lrev = {}, nullid, nullrev
447 447 return partial, last, lrev
448 448
449 449 def _writebranchcache(self, branches, tip, tiprev):
450 450 try:
451 451 f = self.opener("branchheads.cache", "w", atomictemp=True)
452 452 f.write("%s %s\n" % (hex(tip), tiprev))
453 453 for label, nodes in branches.iteritems():
454 454 for node in nodes:
455 455 f.write("%s %s\n" % (hex(node), label))
456 456 f.rename()
457 457 except (IOError, OSError):
458 458 pass
459 459
460 460 def _updatebranchcache(self, partial, start, end):
461 461 for r in xrange(start, end):
462 462 c = self[r]
463 463 b = c.branch()
464 464 bheads = partial.setdefault(b, [])
465 465 bheads.append(c.node())
466 466 for p in c.parents():
467 467 pn = p.node()
468 468 if pn in bheads:
469 469 bheads.remove(pn)
470 470
471 471 def lookup(self, key):
472 472 if isinstance(key, int):
473 473 return self.changelog.node(key)
474 474 elif key == '.':
475 475 return self.dirstate.parents()[0]
476 476 elif key == 'null':
477 477 return nullid
478 478 elif key == 'tip':
479 479 return self.changelog.tip()
480 480 n = self.changelog._match(key)
481 481 if n:
482 482 return n
483 483 if key in self.tags():
484 484 return self.tags()[key]
485 485 if key in self.branchtags():
486 486 return self.branchtags()[key]
487 487 n = self.changelog._partialmatch(key)
488 488 if n:
489 489 return n
490 490 try:
491 491 if len(key) == 20:
492 492 key = hex(key)
493 493 except:
494 494 pass
495 495 raise error.RepoError(_("unknown revision '%s'") % key)
496 496
497 497 def local(self):
498 498 return True
499 499
500 500 def join(self, f):
501 501 return os.path.join(self.path, f)
502 502
503 503 def wjoin(self, f):
504 504 return os.path.join(self.root, f)
505 505
506 506 def rjoin(self, f):
507 507 return os.path.join(self.root, util.pconvert(f))
508 508
509 509 def file(self, f):
510 510 if f[0] == '/':
511 511 f = f[1:]
512 512 return filelog.filelog(self.sopener, f)
513 513
514 514 def changectx(self, changeid):
515 515 return self[changeid]
516 516
517 517 def parents(self, changeid=None):
518 518 '''get list of changectxs for parents of changeid'''
519 519 return self[changeid].parents()
520 520
521 521 def filectx(self, path, changeid=None, fileid=None):
522 522 """changeid can be a changeset revision, node, or tag.
523 523 fileid can be a file revision or node."""
524 524 return context.filectx(self, path, changeid, fileid)
525 525
526 526 def getcwd(self):
527 527 return self.dirstate.getcwd()
528 528
529 529 def pathto(self, f, cwd=None):
530 530 return self.dirstate.pathto(f, cwd)
531 531
532 532 def wfile(self, f, mode='r'):
533 533 return self.wopener(f, mode)
534 534
535 535 def _link(self, f):
536 536 return os.path.islink(self.wjoin(f))
537 537
538 538 def _filter(self, filter, filename, data):
539 539 if filter not in self.filterpats:
540 540 l = []
541 541 for pat, cmd in self.ui.configitems(filter):
542 542 if cmd == '!':
543 543 continue
544 544 mf = util.matcher(self.root, "", [pat], [], [])[1]
545 545 fn = None
546 546 params = cmd
547 547 for name, filterfn in self._datafilters.iteritems():
548 548 if cmd.startswith(name):
549 549 fn = filterfn
550 550 params = cmd[len(name):].lstrip()
551 551 break
552 552 if not fn:
553 553 fn = lambda s, c, **kwargs: util.filter(s, c)
554 554 # Wrap old filters not supporting keyword arguments
555 555 if not inspect.getargspec(fn)[2]:
556 556 oldfn = fn
557 557 fn = lambda s, c, **kwargs: oldfn(s, c)
558 558 l.append((mf, fn, params))
559 559 self.filterpats[filter] = l
560 560
561 561 for mf, fn, cmd in self.filterpats[filter]:
562 562 if mf(filename):
563 563 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
564 564 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
565 565 break
566 566
567 567 return data
568 568
569 569 def adddatafilter(self, name, filter):
570 570 self._datafilters[name] = filter
571 571
572 572 def wread(self, filename):
573 573 if self._link(filename):
574 574 data = os.readlink(self.wjoin(filename))
575 575 else:
576 576 data = self.wopener(filename, 'r').read()
577 577 return self._filter("encode", filename, data)
578 578
579 579 def wwrite(self, filename, data, flags):
580 580 data = self._filter("decode", filename, data)
581 581 try:
582 582 os.unlink(self.wjoin(filename))
583 583 except OSError:
584 584 pass
585 585 if 'l' in flags:
586 586 self.wopener.symlink(data, filename)
587 587 else:
588 588 self.wopener(filename, 'w').write(data)
589 589 if 'x' in flags:
590 590 util.set_flags(self.wjoin(filename), False, True)
591 591
592 592 def wwritedata(self, filename, data):
593 593 return self._filter("decode", filename, data)
594 594
595 595 def transaction(self):
596 if self._transref and self._transref():
597 return self._transref().nest()
596 tr = self._transref and self._transref() or None
597 if tr and tr.running():
598 return tr.nest()
598 599
599 600 # abort here if the journal already exists
600 601 if os.path.exists(self.sjoin("journal")):
601 602 raise error.RepoError(_("journal already exists - run hg recover"))
602 603
603 604 # save dirstate for rollback
604 605 try:
605 606 ds = self.opener("dirstate").read()
606 607 except IOError:
607 608 ds = ""
608 609 self.opener("journal.dirstate", "w").write(ds)
609 610 self.opener("journal.branch", "w").write(self.dirstate.branch())
610 611
611 612 renames = [(self.sjoin("journal"), self.sjoin("undo")),
612 613 (self.join("journal.dirstate"), self.join("undo.dirstate")),
613 614 (self.join("journal.branch"), self.join("undo.branch"))]
614 615 tr = transaction.transaction(self.ui.warn, self.sopener,
615 616 self.sjoin("journal"),
616 617 aftertrans(renames),
617 618 self.store.createmode)
618 619 self._transref = weakref.ref(tr)
619 620 return tr
620 621
621 622 def recover(self):
622 623 l = self.lock()
623 624 try:
624 625 if os.path.exists(self.sjoin("journal")):
625 626 self.ui.status(_("rolling back interrupted transaction\n"))
626 627 transaction.rollback(self.sopener, self.sjoin("journal"))
627 628 self.invalidate()
628 629 return True
629 630 else:
630 631 self.ui.warn(_("no interrupted transaction available\n"))
631 632 return False
632 633 finally:
633 634 del l
634 635
635 636 def rollback(self):
636 637 wlock = lock = None
637 638 try:
638 639 wlock = self.wlock()
639 640 lock = self.lock()
640 641 if os.path.exists(self.sjoin("undo")):
641 642 self.ui.status(_("rolling back last transaction\n"))
642 643 transaction.rollback(self.sopener, self.sjoin("undo"))
643 644 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
644 645 try:
645 646 branch = self.opener("undo.branch").read()
646 647 self.dirstate.setbranch(branch)
647 648 except IOError:
648 649 self.ui.warn(_("Named branch could not be reset, "
649 650 "current branch still is: %s\n")
650 651 % encoding.tolocal(self.dirstate.branch()))
651 652 self.invalidate()
652 653 self.dirstate.invalidate()
653 654 else:
654 655 self.ui.warn(_("no rollback information available\n"))
655 656 finally:
656 657 del lock, wlock
657 658
658 659 def invalidate(self):
659 660 for a in "changelog manifest".split():
660 661 if a in self.__dict__:
661 662 delattr(self, a)
662 663 self.tagscache = None
663 664 self._tagstypecache = None
664 665 self.nodetagscache = None
665 666 self.branchcache = None
666 667 self._ubranchcache = None
667 668 self._branchcachetip = None
668 669
669 670 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
670 671 try:
671 672 l = lock.lock(lockname, 0, releasefn, desc=desc)
672 673 except error.LockHeld, inst:
673 674 if not wait:
674 675 raise
675 676 self.ui.warn(_("waiting for lock on %s held by %r\n") %
676 677 (desc, inst.locker))
677 678 # default to 600 seconds timeout
678 679 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
679 680 releasefn, desc=desc)
680 681 if acquirefn:
681 682 acquirefn()
682 683 return l
683 684
684 685 def lock(self, wait=True):
685 686 if self._lockref and self._lockref():
686 687 return self._lockref()
687 688
688 689 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
689 690 _('repository %s') % self.origroot)
690 691 self._lockref = weakref.ref(l)
691 692 return l
692 693
693 694 def wlock(self, wait=True):
694 695 if self._wlockref and self._wlockref():
695 696 return self._wlockref()
696 697
697 698 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
698 699 self.dirstate.invalidate, _('working directory of %s') %
699 700 self.origroot)
700 701 self._wlockref = weakref.ref(l)
701 702 return l
702 703
703 704 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
704 705 """
705 706 commit an individual file as part of a larger transaction
706 707 """
707 708
708 709 fn = fctx.path()
709 710 t = fctx.data()
710 711 fl = self.file(fn)
711 712 fp1 = manifest1.get(fn, nullid)
712 713 fp2 = manifest2.get(fn, nullid)
713 714
714 715 meta = {}
715 716 cp = fctx.renamed()
716 717 if cp and cp[0] != fn:
717 718 # Mark the new revision of this file as a copy of another
718 719 # file. This copy data will effectively act as a parent
719 720 # of this new revision. If this is a merge, the first
720 721 # parent will be the nullid (meaning "look up the copy data")
721 722 # and the second one will be the other parent. For example:
722 723 #
723 724 # 0 --- 1 --- 3 rev1 changes file foo
724 725 # \ / rev2 renames foo to bar and changes it
725 726 # \- 2 -/ rev3 should have bar with all changes and
726 727 # should record that bar descends from
727 728 # bar in rev2 and foo in rev1
728 729 #
729 730 # this allows this merge to succeed:
730 731 #
731 732 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
732 733 # \ / merging rev3 and rev4 should use bar@rev2
733 734 # \- 2 --- 4 as the merge base
734 735 #
735 736
736 737 cf = cp[0]
737 738 cr = manifest1.get(cf)
738 739 nfp = fp2
739 740
740 741 if manifest2: # branch merge
741 742 if fp2 == nullid or cr is None: # copied on remote side
742 743 if cf in manifest2:
743 744 cr = manifest2[cf]
744 745 nfp = fp1
745 746
746 747 # find source in nearest ancestor if we've lost track
747 748 if not cr:
748 749 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
749 750 (fn, cf))
750 751 for a in self['.'].ancestors():
751 752 if cf in a:
752 753 cr = a[cf].filenode()
753 754 break
754 755
755 756 self.ui.debug(_(" %s: copy %s:%s\n") % (fn, cf, hex(cr)))
756 757 meta["copy"] = cf
757 758 meta["copyrev"] = hex(cr)
758 759 fp1, fp2 = nullid, nfp
759 760 elif fp2 != nullid:
760 761 # is one parent an ancestor of the other?
761 762 fpa = fl.ancestor(fp1, fp2)
762 763 if fpa == fp1:
763 764 fp1, fp2 = fp2, nullid
764 765 elif fpa == fp2:
765 766 fp2 = nullid
766 767
767 768 # is the file unmodified from the parent? report existing entry
768 769 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
769 770 return fp1
770 771
771 772 changelist.append(fn)
772 773 return fl.add(t, meta, tr, linkrev, fp1, fp2)
773 774
774 775 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
775 776 if p1 is None:
776 777 p1, p2 = self.dirstate.parents()
777 778 return self.commit(files=files, text=text, user=user, date=date,
778 779 p1=p1, p2=p2, extra=extra, empty_ok=True)
779 780
780 781 def commit(self, files=None, text="", user=None, date=None,
781 782 match=None, force=False, force_editor=False,
782 783 p1=None, p2=None, extra={}, empty_ok=False):
783 784 wlock = lock = None
784 785 if extra.get("close"):
785 786 force = True
786 787 if files:
787 788 files = util.unique(files)
788 789 try:
789 790 wlock = self.wlock()
790 791 lock = self.lock()
791 792 use_dirstate = (p1 is None) # not rawcommit
792 793
793 794 if use_dirstate:
794 795 p1, p2 = self.dirstate.parents()
795 796 update_dirstate = True
796 797
797 798 if (not force and p2 != nullid and
798 799 (match and (match.files() or match.anypats()))):
799 800 raise util.Abort(_('cannot partially commit a merge '
800 801 '(do not specify files or patterns)'))
801 802
802 803 if files:
803 804 modified, removed = [], []
804 805 for f in files:
805 806 s = self.dirstate[f]
806 807 if s in 'nma':
807 808 modified.append(f)
808 809 elif s == 'r':
809 810 removed.append(f)
810 811 else:
811 812 self.ui.warn(_("%s not tracked!\n") % f)
812 813 changes = [modified, [], removed, [], []]
813 814 else:
814 815 changes = self.status(match=match)
815 816 else:
816 817 p1, p2 = p1, p2 or nullid
817 818 update_dirstate = (self.dirstate.parents()[0] == p1)
818 819 changes = [files, [], [], [], []]
819 820
820 821 ms = merge_.mergestate(self)
821 822 for f in changes[0]:
822 823 if f in ms and ms[f] == 'u':
823 824 raise util.Abort(_("unresolved merge conflicts "
824 825 "(see hg resolve)"))
825 826 wctx = context.workingctx(self, (p1, p2), text, user, date,
826 827 extra, changes)
827 828 r = self._commitctx(wctx, force, force_editor, empty_ok,
828 829 use_dirstate, update_dirstate)
829 830 ms.reset()
830 831 return r
831 832
832 833 finally:
833 834 del lock, wlock
834 835
835 836 def commitctx(self, ctx):
836 837 """Add a new revision to current repository.
837 838
838 839 Revision information is passed in the context.memctx argument.
839 840 commitctx() does not touch the working directory.
840 841 """
841 842 wlock = lock = None
842 843 try:
843 844 wlock = self.wlock()
844 845 lock = self.lock()
845 846 return self._commitctx(ctx, force=True, force_editor=False,
846 847 empty_ok=True, use_dirstate=False,
847 848 update_dirstate=False)
848 849 finally:
849 850 del lock, wlock
850 851
851 852 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
852 853 use_dirstate=True, update_dirstate=True):
853 854 tr = None
854 855 valid = 0 # don't save the dirstate if this isn't set
855 856 try:
856 857 commit = util.sort(wctx.modified() + wctx.added())
857 858 remove = wctx.removed()
858 859 extra = wctx.extra().copy()
859 860 branchname = extra['branch']
860 861 user = wctx.user()
861 862 text = wctx.description()
862 863
863 864 p1, p2 = [p.node() for p in wctx.parents()]
864 865 c1 = self.changelog.read(p1)
865 866 c2 = self.changelog.read(p2)
866 867 m1 = self.manifest.read(c1[0]).copy()
867 868 m2 = self.manifest.read(c2[0])
868 869
869 870 if use_dirstate:
870 871 oldname = c1[5].get("branch") # stored in UTF-8
871 872 if (not commit and not remove and not force and p2 == nullid
872 873 and branchname == oldname):
873 874 self.ui.status(_("nothing changed\n"))
874 875 return None
875 876
876 877 xp1 = hex(p1)
877 878 if p2 == nullid: xp2 = ''
878 879 else: xp2 = hex(p2)
879 880
880 881 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
881 882
882 883 tr = self.transaction()
883 884 trp = weakref.proxy(tr)
884 885
885 886 # check in files
886 887 new = {}
887 888 changed = []
888 889 linkrev = len(self)
889 890 for f in commit:
890 891 self.ui.note(f + "\n")
891 892 try:
892 893 fctx = wctx.filectx(f)
893 894 newflags = fctx.flags()
894 895 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
895 896 if ((not changed or changed[-1] != f) and
896 897 m2.get(f) != new[f]):
897 898 # mention the file in the changelog if some
898 899 # flag changed, even if there was no content
899 900 # change.
900 901 if m1.flags(f) != newflags:
901 902 changed.append(f)
902 903 m1.set(f, newflags)
903 904 if use_dirstate:
904 905 self.dirstate.normal(f)
905 906
906 907 except (OSError, IOError):
907 908 if use_dirstate:
908 909 self.ui.warn(_("trouble committing %s!\n") % f)
909 910 raise
910 911 else:
911 912 remove.append(f)
912 913
913 914 updated, added = [], []
914 915 for f in util.sort(changed):
915 916 if f in m1 or f in m2:
916 917 updated.append(f)
917 918 else:
918 919 added.append(f)
919 920
920 921 # update manifest
921 922 m1.update(new)
922 923 removed = [f for f in util.sort(remove) if f in m1 or f in m2]
923 924 removed1 = []
924 925
925 926 for f in removed:
926 927 if f in m1:
927 928 del m1[f]
928 929 removed1.append(f)
929 930 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
930 931 (new, removed1))
931 932
932 933 # add changeset
933 934 if (not empty_ok and not text) or force_editor:
934 935 edittext = []
935 936 if text:
936 937 edittext.append(text)
937 938 edittext.append("")
938 939 edittext.append("") # Empty line between message and comments.
939 940 edittext.append(_("HG: Enter commit message."
940 941 " Lines beginning with 'HG:' are removed."))
941 942 edittext.append("HG: --")
942 943 edittext.append("HG: user: %s" % user)
943 944 if p2 != nullid:
944 945 edittext.append("HG: branch merge")
945 946 if branchname:
946 947 edittext.append("HG: branch '%s'"
947 948 % encoding.tolocal(branchname))
948 949 edittext.extend(["HG: added %s" % f for f in added])
949 950 edittext.extend(["HG: changed %s" % f for f in updated])
950 951 edittext.extend(["HG: removed %s" % f for f in removed])
951 952 if not added and not updated and not removed:
952 953 edittext.append("HG: no files changed")
953 954 edittext.append("")
954 955 # run editor in the repository root
955 956 olddir = os.getcwd()
956 957 os.chdir(self.root)
957 958 text = self.ui.edit("\n".join(edittext), user)
958 959 os.chdir(olddir)
959 960
960 961 lines = [line.rstrip() for line in text.rstrip().splitlines()]
961 962 while lines and not lines[0]:
962 963 del lines[0]
963 964 if not lines and use_dirstate:
964 965 raise util.Abort(_("empty commit message"))
965 966 text = '\n'.join(lines)
966 967
967 968 self.changelog.delayupdate()
968 969 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
969 970 user, wctx.date(), extra)
970 971 p = lambda: self.changelog.writepending() and self.root or ""
971 972 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
972 973 parent2=xp2, pending=p)
973 974 self.changelog.finalize(trp)
974 975 tr.close()
975 976
976 977 if self.branchcache:
977 978 self.branchtags()
978 979
979 980 if use_dirstate or update_dirstate:
980 981 self.dirstate.setparents(n)
981 982 if use_dirstate:
982 983 for f in removed:
983 984 self.dirstate.forget(f)
984 985 valid = 1 # our dirstate updates are complete
985 986
986 987 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
987 988 return n
988 989 finally:
989 990 if not valid: # don't save our updated dirstate
990 991 self.dirstate.invalidate()
991 992 del tr
992 993
993 994 def walk(self, match, node=None):
994 995 '''
995 996 walk recursively through the directory tree or a given
996 997 changeset, finding all files matched by the match
997 998 function
998 999 '''
999 1000 return self[node].walk(match)
1000 1001
1001 1002 def status(self, node1='.', node2=None, match=None,
1002 1003 ignored=False, clean=False, unknown=False):
1003 1004 """return status of files between two nodes or node and working directory
1004 1005
1005 1006 If node1 is None, use the first dirstate parent instead.
1006 1007 If node2 is None, compare node1 with working directory.
1007 1008 """
1008 1009
1009 1010 def mfmatches(ctx):
1010 1011 mf = ctx.manifest().copy()
1011 1012 for fn in mf.keys():
1012 1013 if not match(fn):
1013 1014 del mf[fn]
1014 1015 return mf
1015 1016
1016 1017 if isinstance(node1, context.changectx):
1017 1018 ctx1 = node1
1018 1019 else:
1019 1020 ctx1 = self[node1]
1020 1021 if isinstance(node2, context.changectx):
1021 1022 ctx2 = node2
1022 1023 else:
1023 1024 ctx2 = self[node2]
1024 1025
1025 1026 working = ctx2.rev() is None
1026 1027 parentworking = working and ctx1 == self['.']
1027 1028 match = match or match_.always(self.root, self.getcwd())
1028 1029 listignored, listclean, listunknown = ignored, clean, unknown
1029 1030
1030 1031 # load earliest manifest first for caching reasons
1031 1032 if not working and ctx2.rev() < ctx1.rev():
1032 1033 ctx2.manifest()
1033 1034
1034 1035 if not parentworking:
1035 1036 def bad(f, msg):
1036 1037 if f not in ctx1:
1037 1038 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1038 1039 return False
1039 1040 match.bad = bad
1040 1041
1041 1042 if working: # we need to scan the working dir
1042 1043 s = self.dirstate.status(match, listignored, listclean, listunknown)
1043 1044 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1044 1045
1045 1046 # check for any possibly clean files
1046 1047 if parentworking and cmp:
1047 1048 fixup = []
1048 1049 # do a full compare of any files that might have changed
1049 1050 for f in cmp:
1050 1051 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1051 1052 or ctx1[f].cmp(ctx2[f].data())):
1052 1053 modified.append(f)
1053 1054 else:
1054 1055 fixup.append(f)
1055 1056
1056 1057 if listclean:
1057 1058 clean += fixup
1058 1059
1059 1060 # update dirstate for files that are actually clean
1060 1061 if fixup:
1061 1062 wlock = None
1062 1063 try:
1063 1064 try:
1064 1065 wlock = self.wlock(False)
1065 1066 for f in fixup:
1066 1067 self.dirstate.normal(f)
1067 1068 except error.LockError:
1068 1069 pass
1069 1070 finally:
1070 1071 del wlock
1071 1072
1072 1073 if not parentworking:
1073 1074 mf1 = mfmatches(ctx1)
1074 1075 if working:
1075 1076 # we are comparing working dir against non-parent
1076 1077 # generate a pseudo-manifest for the working dir
1077 1078 mf2 = mfmatches(self['.'])
1078 1079 for f in cmp + modified + added:
1079 1080 mf2[f] = None
1080 1081 mf2.set(f, ctx2.flags(f))
1081 1082 for f in removed:
1082 1083 if f in mf2:
1083 1084 del mf2[f]
1084 1085 else:
1085 1086 # we are comparing two revisions
1086 1087 deleted, unknown, ignored = [], [], []
1087 1088 mf2 = mfmatches(ctx2)
1088 1089
1089 1090 modified, added, clean = [], [], []
1090 1091 for fn in mf2:
1091 1092 if fn in mf1:
1092 1093 if (mf1.flags(fn) != mf2.flags(fn) or
1093 1094 (mf1[fn] != mf2[fn] and
1094 1095 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1095 1096 modified.append(fn)
1096 1097 elif listclean:
1097 1098 clean.append(fn)
1098 1099 del mf1[fn]
1099 1100 else:
1100 1101 added.append(fn)
1101 1102 removed = mf1.keys()
1102 1103
1103 1104 r = modified, added, removed, deleted, unknown, ignored, clean
1104 1105 [l.sort() for l in r]
1105 1106 return r
1106 1107
1107 1108 def add(self, list):
1108 1109 wlock = self.wlock()
1109 1110 try:
1110 1111 rejected = []
1111 1112 for f in list:
1112 1113 p = self.wjoin(f)
1113 1114 try:
1114 1115 st = os.lstat(p)
1115 1116 except:
1116 1117 self.ui.warn(_("%s does not exist!\n") % f)
1117 1118 rejected.append(f)
1118 1119 continue
1119 1120 if st.st_size > 10000000:
1120 1121 self.ui.warn(_("%s: files over 10MB may cause memory and"
1121 1122 " performance problems\n"
1122 1123 "(use 'hg revert %s' to unadd the file)\n")
1123 1124 % (f, f))
1124 1125 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1125 1126 self.ui.warn(_("%s not added: only files and symlinks "
1126 1127 "supported currently\n") % f)
1127 1128 rejected.append(p)
1128 1129 elif self.dirstate[f] in 'amn':
1129 1130 self.ui.warn(_("%s already tracked!\n") % f)
1130 1131 elif self.dirstate[f] == 'r':
1131 1132 self.dirstate.normallookup(f)
1132 1133 else:
1133 1134 self.dirstate.add(f)
1134 1135 return rejected
1135 1136 finally:
1136 1137 del wlock
1137 1138
1138 1139 def forget(self, list):
1139 1140 wlock = self.wlock()
1140 1141 try:
1141 1142 for f in list:
1142 1143 if self.dirstate[f] != 'a':
1143 1144 self.ui.warn(_("%s not added!\n") % f)
1144 1145 else:
1145 1146 self.dirstate.forget(f)
1146 1147 finally:
1147 1148 del wlock
1148 1149
1149 1150 def remove(self, list, unlink=False):
1150 1151 wlock = None
1151 1152 try:
1152 1153 if unlink:
1153 1154 for f in list:
1154 1155 try:
1155 1156 util.unlink(self.wjoin(f))
1156 1157 except OSError, inst:
1157 1158 if inst.errno != errno.ENOENT:
1158 1159 raise
1159 1160 wlock = self.wlock()
1160 1161 for f in list:
1161 1162 if unlink and os.path.exists(self.wjoin(f)):
1162 1163 self.ui.warn(_("%s still exists!\n") % f)
1163 1164 elif self.dirstate[f] == 'a':
1164 1165 self.dirstate.forget(f)
1165 1166 elif f not in self.dirstate:
1166 1167 self.ui.warn(_("%s not tracked!\n") % f)
1167 1168 else:
1168 1169 self.dirstate.remove(f)
1169 1170 finally:
1170 1171 del wlock
1171 1172
1172 1173 def undelete(self, list):
1173 1174 wlock = None
1174 1175 try:
1175 1176 manifests = [self.manifest.read(self.changelog.read(p)[0])
1176 1177 for p in self.dirstate.parents() if p != nullid]
1177 1178 wlock = self.wlock()
1178 1179 for f in list:
1179 1180 if self.dirstate[f] != 'r':
1180 1181 self.ui.warn(_("%s not removed!\n") % f)
1181 1182 else:
1182 1183 m = f in manifests[0] and manifests[0] or manifests[1]
1183 1184 t = self.file(f).read(m[f])
1184 1185 self.wwrite(f, t, m.flags(f))
1185 1186 self.dirstate.normal(f)
1186 1187 finally:
1187 1188 del wlock
1188 1189
1189 1190 def copy(self, source, dest):
1190 1191 wlock = None
1191 1192 try:
1192 1193 p = self.wjoin(dest)
1193 1194 if not (os.path.exists(p) or os.path.islink(p)):
1194 1195 self.ui.warn(_("%s does not exist!\n") % dest)
1195 1196 elif not (os.path.isfile(p) or os.path.islink(p)):
1196 1197 self.ui.warn(_("copy failed: %s is not a file or a "
1197 1198 "symbolic link\n") % dest)
1198 1199 else:
1199 1200 wlock = self.wlock()
1200 1201 if self.dirstate[dest] in '?r':
1201 1202 self.dirstate.add(dest)
1202 1203 self.dirstate.copy(source, dest)
1203 1204 finally:
1204 1205 del wlock
1205 1206
1206 1207 def heads(self, start=None, closed=True):
1207 1208 heads = self.changelog.heads(start)
1208 1209 def display(head):
1209 1210 if closed:
1210 1211 return True
1211 1212 extras = self.changelog.read(head)[5]
1212 1213 return ('close' not in extras)
1213 1214 # sort the output in rev descending order
1214 1215 heads = [(-self.changelog.rev(h), h) for h in heads if display(h)]
1215 1216 return [n for (r, n) in util.sort(heads)]
1216 1217
1217 1218 def branchheads(self, branch=None, start=None, closed=True):
1218 1219 if branch is None:
1219 1220 branch = self[None].branch()
1220 1221 branches = self._branchheads()
1221 1222 if branch not in branches:
1222 1223 return []
1223 1224 bheads = branches[branch]
1224 1225 # the cache returns heads ordered lowest to highest
1225 1226 bheads.reverse()
1226 1227 if start is not None:
1227 1228 # filter out the heads that cannot be reached from startrev
1228 1229 bheads = self.changelog.nodesbetween([start], bheads)[2]
1229 1230 if not closed:
1230 1231 bheads = [h for h in bheads if
1231 1232 ('close' not in self.changelog.read(h)[5])]
1232 1233 return bheads
1233 1234
1234 1235 def branches(self, nodes):
1235 1236 if not nodes:
1236 1237 nodes = [self.changelog.tip()]
1237 1238 b = []
1238 1239 for n in nodes:
1239 1240 t = n
1240 1241 while 1:
1241 1242 p = self.changelog.parents(n)
1242 1243 if p[1] != nullid or p[0] == nullid:
1243 1244 b.append((t, n, p[0], p[1]))
1244 1245 break
1245 1246 n = p[0]
1246 1247 return b
1247 1248
1248 1249 def between(self, pairs):
1249 1250 r = []
1250 1251
1251 1252 for top, bottom in pairs:
1252 1253 n, l, i = top, [], 0
1253 1254 f = 1
1254 1255
1255 1256 while n != bottom and n != nullid:
1256 1257 p = self.changelog.parents(n)[0]
1257 1258 if i == f:
1258 1259 l.append(n)
1259 1260 f = f * 2
1260 1261 n = p
1261 1262 i += 1
1262 1263
1263 1264 r.append(l)
1264 1265
1265 1266 return r
1266 1267
1267 1268 def findincoming(self, remote, base=None, heads=None, force=False):
1268 1269 """Return list of roots of the subsets of missing nodes from remote
1269 1270
1270 1271 If base dict is specified, assume that these nodes and their parents
1271 1272 exist on the remote side and that no child of a node of base exists
1272 1273 in both remote and self.
1273 1274 Furthermore base will be updated to include the nodes that exists
1274 1275 in self and remote but no children exists in self and remote.
1275 1276 If a list of heads is specified, return only nodes which are heads
1276 1277 or ancestors of these heads.
1277 1278
1278 1279 All the ancestors of base are in self and in remote.
1279 1280 All the descendants of the list returned are missing in self.
1280 1281 (and so we know that the rest of the nodes are missing in remote, see
1281 1282 outgoing)
1282 1283 """
1283 1284 return self.findcommonincoming(remote, base, heads, force)[1]
1284 1285
1285 1286 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1286 1287 """Return a tuple (common, missing roots, heads) used to identify
1287 1288 missing nodes from remote.
1288 1289
1289 1290 If base dict is specified, assume that these nodes and their parents
1290 1291 exist on the remote side and that no child of a node of base exists
1291 1292 in both remote and self.
1292 1293 Furthermore base will be updated to include the nodes that exists
1293 1294 in self and remote but no children exists in self and remote.
1294 1295 If a list of heads is specified, return only nodes which are heads
1295 1296 or ancestors of these heads.
1296 1297
1297 1298 All the ancestors of base are in self and in remote.
1298 1299 """
1299 1300 m = self.changelog.nodemap
1300 1301 search = []
1301 1302 fetch = {}
1302 1303 seen = {}
1303 1304 seenbranch = {}
1304 1305 if base == None:
1305 1306 base = {}
1306 1307
1307 1308 if not heads:
1308 1309 heads = remote.heads()
1309 1310
1310 1311 if self.changelog.tip() == nullid:
1311 1312 base[nullid] = 1
1312 1313 if heads != [nullid]:
1313 1314 return [nullid], [nullid], list(heads)
1314 1315 return [nullid], [], []
1315 1316
1316 1317 # assume we're closer to the tip than the root
1317 1318 # and start by examining the heads
1318 1319 self.ui.status(_("searching for changes\n"))
1319 1320
1320 1321 unknown = []
1321 1322 for h in heads:
1322 1323 if h not in m:
1323 1324 unknown.append(h)
1324 1325 else:
1325 1326 base[h] = 1
1326 1327
1327 1328 heads = unknown
1328 1329 if not unknown:
1329 1330 return base.keys(), [], []
1330 1331
1331 1332 req = dict.fromkeys(unknown)
1332 1333 reqcnt = 0
1333 1334
1334 1335 # search through remote branches
1335 1336 # a 'branch' here is a linear segment of history, with four parts:
1336 1337 # head, root, first parent, second parent
1337 1338 # (a branch always has two parents (or none) by definition)
1338 1339 unknown = remote.branches(unknown)
1339 1340 while unknown:
1340 1341 r = []
1341 1342 while unknown:
1342 1343 n = unknown.pop(0)
1343 1344 if n[0] in seen:
1344 1345 continue
1345 1346
1346 1347 self.ui.debug(_("examining %s:%s\n")
1347 1348 % (short(n[0]), short(n[1])))
1348 1349 if n[0] == nullid: # found the end of the branch
1349 1350 pass
1350 1351 elif n in seenbranch:
1351 1352 self.ui.debug(_("branch already found\n"))
1352 1353 continue
1353 1354 elif n[1] and n[1] in m: # do we know the base?
1354 1355 self.ui.debug(_("found incomplete branch %s:%s\n")
1355 1356 % (short(n[0]), short(n[1])))
1356 1357 search.append(n[0:2]) # schedule branch range for scanning
1357 1358 seenbranch[n] = 1
1358 1359 else:
1359 1360 if n[1] not in seen and n[1] not in fetch:
1360 1361 if n[2] in m and n[3] in m:
1361 1362 self.ui.debug(_("found new changeset %s\n") %
1362 1363 short(n[1]))
1363 1364 fetch[n[1]] = 1 # earliest unknown
1364 1365 for p in n[2:4]:
1365 1366 if p in m:
1366 1367 base[p] = 1 # latest known
1367 1368
1368 1369 for p in n[2:4]:
1369 1370 if p not in req and p not in m:
1370 1371 r.append(p)
1371 1372 req[p] = 1
1372 1373 seen[n[0]] = 1
1373 1374
1374 1375 if r:
1375 1376 reqcnt += 1
1376 1377 self.ui.debug(_("request %d: %s\n") %
1377 1378 (reqcnt, " ".join(map(short, r))))
1378 1379 for p in xrange(0, len(r), 10):
1379 1380 for b in remote.branches(r[p:p+10]):
1380 1381 self.ui.debug(_("received %s:%s\n") %
1381 1382 (short(b[0]), short(b[1])))
1382 1383 unknown.append(b)
1383 1384
1384 1385 # do binary search on the branches we found
1385 1386 while search:
1386 1387 newsearch = []
1387 1388 reqcnt += 1
1388 1389 for n, l in zip(search, remote.between(search)):
1389 1390 l.append(n[1])
1390 1391 p = n[0]
1391 1392 f = 1
1392 1393 for i in l:
1393 1394 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1394 1395 if i in m:
1395 1396 if f <= 2:
1396 1397 self.ui.debug(_("found new branch changeset %s\n") %
1397 1398 short(p))
1398 1399 fetch[p] = 1
1399 1400 base[i] = 1
1400 1401 else:
1401 1402 self.ui.debug(_("narrowed branch search to %s:%s\n")
1402 1403 % (short(p), short(i)))
1403 1404 newsearch.append((p, i))
1404 1405 break
1405 1406 p, f = i, f * 2
1406 1407 search = newsearch
1407 1408
1408 1409 # sanity check our fetch list
1409 1410 for f in fetch.keys():
1410 1411 if f in m:
1411 1412 raise error.RepoError(_("already have changeset ")
1412 1413 + short(f[:4]))
1413 1414
1414 1415 if base.keys() == [nullid]:
1415 1416 if force:
1416 1417 self.ui.warn(_("warning: repository is unrelated\n"))
1417 1418 else:
1418 1419 raise util.Abort(_("repository is unrelated"))
1419 1420
1420 1421 self.ui.debug(_("found new changesets starting at ") +
1421 1422 " ".join([short(f) for f in fetch]) + "\n")
1422 1423
1423 1424 self.ui.debug(_("%d total queries\n") % reqcnt)
1424 1425
1425 1426 return base.keys(), fetch.keys(), heads
1426 1427
1427 1428 def findoutgoing(self, remote, base=None, heads=None, force=False):
1428 1429 """Return list of nodes that are roots of subsets not in remote
1429 1430
1430 1431 If base dict is specified, assume that these nodes and their parents
1431 1432 exist on the remote side.
1432 1433 If a list of heads is specified, return only nodes which are heads
1433 1434 or ancestors of these heads, and return a second element which
1434 1435 contains all remote heads which get new children.
1435 1436 """
1436 1437 if base == None:
1437 1438 base = {}
1438 1439 self.findincoming(remote, base, heads, force=force)
1439 1440
1440 1441 self.ui.debug(_("common changesets up to ")
1441 1442 + " ".join(map(short, base.keys())) + "\n")
1442 1443
1443 1444 remain = dict.fromkeys(self.changelog.nodemap)
1444 1445
1445 1446 # prune everything remote has from the tree
1446 1447 del remain[nullid]
1447 1448 remove = base.keys()
1448 1449 while remove:
1449 1450 n = remove.pop(0)
1450 1451 if n in remain:
1451 1452 del remain[n]
1452 1453 for p in self.changelog.parents(n):
1453 1454 remove.append(p)
1454 1455
1455 1456 # find every node whose parents have been pruned
1456 1457 subset = []
1457 1458 # find every remote head that will get new children
1458 1459 updated_heads = {}
1459 1460 for n in remain:
1460 1461 p1, p2 = self.changelog.parents(n)
1461 1462 if p1 not in remain and p2 not in remain:
1462 1463 subset.append(n)
1463 1464 if heads:
1464 1465 if p1 in heads:
1465 1466 updated_heads[p1] = True
1466 1467 if p2 in heads:
1467 1468 updated_heads[p2] = True
1468 1469
1469 1470 # this is the set of all roots we have to push
1470 1471 if heads:
1471 1472 return subset, updated_heads.keys()
1472 1473 else:
1473 1474 return subset
1474 1475
1475 1476 def pull(self, remote, heads=None, force=False):
1476 1477 lock = self.lock()
1477 1478 try:
1478 1479 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1479 1480 force=force)
1480 1481 if fetch == [nullid]:
1481 1482 self.ui.status(_("requesting all changes\n"))
1482 1483
1483 1484 if not fetch:
1484 1485 self.ui.status(_("no changes found\n"))
1485 1486 return 0
1486 1487
1487 1488 if heads is None and remote.capable('changegroupsubset'):
1488 1489 heads = rheads
1489 1490
1490 1491 if heads is None:
1491 1492 cg = remote.changegroup(fetch, 'pull')
1492 1493 else:
1493 1494 if not remote.capable('changegroupsubset'):
1494 1495 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1495 1496 cg = remote.changegroupsubset(fetch, heads, 'pull')
1496 1497 return self.addchangegroup(cg, 'pull', remote.url())
1497 1498 finally:
1498 1499 del lock
1499 1500
1500 1501 def push(self, remote, force=False, revs=None):
1501 1502 # there are two ways to push to remote repo:
1502 1503 #
1503 1504 # addchangegroup assumes local user can lock remote
1504 1505 # repo (local filesystem, old ssh servers).
1505 1506 #
1506 1507 # unbundle assumes local user cannot lock remote repo (new ssh
1507 1508 # servers, http servers).
1508 1509
1509 1510 if remote.capable('unbundle'):
1510 1511 return self.push_unbundle(remote, force, revs)
1511 1512 return self.push_addchangegroup(remote, force, revs)
1512 1513
1513 1514 def prepush(self, remote, force, revs):
1514 1515 common = {}
1515 1516 remote_heads = remote.heads()
1516 1517 inc = self.findincoming(remote, common, remote_heads, force=force)
1517 1518
1518 1519 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1519 1520 if revs is not None:
1520 1521 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1521 1522 else:
1522 1523 bases, heads = update, self.changelog.heads()
1523 1524
1524 1525 if not bases:
1525 1526 self.ui.status(_("no changes found\n"))
1526 1527 return None, 1
1527 1528 elif not force:
1528 1529 # check if we're creating new remote heads
1529 1530 # to be a remote head after push, node must be either
1530 1531 # - unknown locally
1531 1532 # - a local outgoing head descended from update
1532 1533 # - a remote head that's known locally and not
1533 1534 # ancestral to an outgoing head
1534 1535
1535 1536 warn = 0
1536 1537
1537 1538 if remote_heads == [nullid]:
1538 1539 warn = 0
1539 1540 elif not revs and len(heads) > len(remote_heads):
1540 1541 warn = 1
1541 1542 else:
1542 1543 newheads = list(heads)
1543 1544 for r in remote_heads:
1544 1545 if r in self.changelog.nodemap:
1545 1546 desc = self.changelog.heads(r, heads)
1546 1547 l = [h for h in heads if h in desc]
1547 1548 if not l:
1548 1549 newheads.append(r)
1549 1550 else:
1550 1551 newheads.append(r)
1551 1552 if len(newheads) > len(remote_heads):
1552 1553 warn = 1
1553 1554
1554 1555 if warn:
1555 1556 self.ui.warn(_("abort: push creates new remote heads!\n"))
1556 1557 self.ui.status(_("(did you forget to merge?"
1557 1558 " use push -f to force)\n"))
1558 1559 return None, 0
1559 1560 elif inc:
1560 1561 self.ui.warn(_("note: unsynced remote changes!\n"))
1561 1562
1562 1563
1563 1564 if revs is None:
1564 1565 # use the fast path, no race possible on push
1565 1566 cg = self._changegroup(common.keys(), 'push')
1566 1567 else:
1567 1568 cg = self.changegroupsubset(update, revs, 'push')
1568 1569 return cg, remote_heads
1569 1570
1570 1571 def push_addchangegroup(self, remote, force, revs):
1571 1572 lock = remote.lock()
1572 1573 try:
1573 1574 ret = self.prepush(remote, force, revs)
1574 1575 if ret[0] is not None:
1575 1576 cg, remote_heads = ret
1576 1577 return remote.addchangegroup(cg, 'push', self.url())
1577 1578 return ret[1]
1578 1579 finally:
1579 1580 del lock
1580 1581
1581 1582 def push_unbundle(self, remote, force, revs):
1582 1583 # local repo finds heads on server, finds out what revs it
1583 1584 # must push. once revs transferred, if server finds it has
1584 1585 # different heads (someone else won commit/push race), server
1585 1586 # aborts.
1586 1587
1587 1588 ret = self.prepush(remote, force, revs)
1588 1589 if ret[0] is not None:
1589 1590 cg, remote_heads = ret
1590 1591 if force: remote_heads = ['force']
1591 1592 return remote.unbundle(cg, remote_heads, 'push')
1592 1593 return ret[1]
1593 1594
1594 1595 def changegroupinfo(self, nodes, source):
1595 1596 if self.ui.verbose or source == 'bundle':
1596 1597 self.ui.status(_("%d changesets found\n") % len(nodes))
1597 1598 if self.ui.debugflag:
1598 1599 self.ui.debug(_("list of changesets:\n"))
1599 1600 for node in nodes:
1600 1601 self.ui.debug("%s\n" % hex(node))
1601 1602
1602 1603 def changegroupsubset(self, bases, heads, source, extranodes=None):
1603 1604 """This function generates a changegroup consisting of all the nodes
1604 1605 that are descendents of any of the bases, and ancestors of any of
1605 1606 the heads.
1606 1607
1607 1608 It is fairly complex as determining which filenodes and which
1608 1609 manifest nodes need to be included for the changeset to be complete
1609 1610 is non-trivial.
1610 1611
1611 1612 Another wrinkle is doing the reverse, figuring out which changeset in
1612 1613 the changegroup a particular filenode or manifestnode belongs to.
1613 1614
1614 1615 The caller can specify some nodes that must be included in the
1615 1616 changegroup using the extranodes argument. It should be a dict
1616 1617 where the keys are the filenames (or 1 for the manifest), and the
1617 1618 values are lists of (node, linknode) tuples, where node is a wanted
1618 1619 node and linknode is the changelog node that should be transmitted as
1619 1620 the linkrev.
1620 1621 """
1621 1622
1622 1623 if extranodes is None:
1623 1624 # can we go through the fast path ?
1624 1625 heads.sort()
1625 1626 allheads = self.heads()
1626 1627 allheads.sort()
1627 1628 if heads == allheads:
1628 1629 common = []
1629 1630 # parents of bases are known from both sides
1630 1631 for n in bases:
1631 1632 for p in self.changelog.parents(n):
1632 1633 if p != nullid:
1633 1634 common.append(p)
1634 1635 return self._changegroup(common, source)
1635 1636
1636 1637 self.hook('preoutgoing', throw=True, source=source)
1637 1638
1638 1639 # Set up some initial variables
1639 1640 # Make it easy to refer to self.changelog
1640 1641 cl = self.changelog
1641 1642 # msng is short for missing - compute the list of changesets in this
1642 1643 # changegroup.
1643 1644 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1644 1645 self.changegroupinfo(msng_cl_lst, source)
1645 1646 # Some bases may turn out to be superfluous, and some heads may be
1646 1647 # too. nodesbetween will return the minimal set of bases and heads
1647 1648 # necessary to re-create the changegroup.
1648 1649
1649 1650 # Known heads are the list of heads that it is assumed the recipient
1650 1651 # of this changegroup will know about.
1651 1652 knownheads = {}
1652 1653 # We assume that all parents of bases are known heads.
1653 1654 for n in bases:
1654 1655 for p in cl.parents(n):
1655 1656 if p != nullid:
1656 1657 knownheads[p] = 1
1657 1658 knownheads = knownheads.keys()
1658 1659 if knownheads:
1659 1660 # Now that we know what heads are known, we can compute which
1660 1661 # changesets are known. The recipient must know about all
1661 1662 # changesets required to reach the known heads from the null
1662 1663 # changeset.
1663 1664 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1664 1665 junk = None
1665 1666 # Transform the list into an ersatz set.
1666 1667 has_cl_set = dict.fromkeys(has_cl_set)
1667 1668 else:
1668 1669 # If there were no known heads, the recipient cannot be assumed to
1669 1670 # know about any changesets.
1670 1671 has_cl_set = {}
1671 1672
1672 1673 # Make it easy to refer to self.manifest
1673 1674 mnfst = self.manifest
1674 1675 # We don't know which manifests are missing yet
1675 1676 msng_mnfst_set = {}
1676 1677 # Nor do we know which filenodes are missing.
1677 1678 msng_filenode_set = {}
1678 1679
1679 1680 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1680 1681 junk = None
1681 1682
1682 1683 # A changeset always belongs to itself, so the changenode lookup
1683 1684 # function for a changenode is identity.
1684 1685 def identity(x):
1685 1686 return x
1686 1687
1687 1688 # A function generating function. Sets up an environment for the
1688 1689 # inner function.
1689 1690 def cmp_by_rev_func(revlog):
1690 1691 # Compare two nodes by their revision number in the environment's
1691 1692 # revision history. Since the revision number both represents the
1692 1693 # most efficient order to read the nodes in, and represents a
1693 1694 # topological sorting of the nodes, this function is often useful.
1694 1695 def cmp_by_rev(a, b):
1695 1696 return cmp(revlog.rev(a), revlog.rev(b))
1696 1697 return cmp_by_rev
1697 1698
1698 1699 # If we determine that a particular file or manifest node must be a
1699 1700 # node that the recipient of the changegroup will already have, we can
1700 1701 # also assume the recipient will have all the parents. This function
1701 1702 # prunes them from the set of missing nodes.
1702 1703 def prune_parents(revlog, hasset, msngset):
1703 1704 haslst = hasset.keys()
1704 1705 haslst.sort(cmp_by_rev_func(revlog))
1705 1706 for node in haslst:
1706 1707 parentlst = [p for p in revlog.parents(node) if p != nullid]
1707 1708 while parentlst:
1708 1709 n = parentlst.pop()
1709 1710 if n not in hasset:
1710 1711 hasset[n] = 1
1711 1712 p = [p for p in revlog.parents(n) if p != nullid]
1712 1713 parentlst.extend(p)
1713 1714 for n in hasset:
1714 1715 msngset.pop(n, None)
1715 1716
1716 1717 # This is a function generating function used to set up an environment
1717 1718 # for the inner function to execute in.
1718 1719 def manifest_and_file_collector(changedfileset):
1719 1720 # This is an information gathering function that gathers
1720 1721 # information from each changeset node that goes out as part of
1721 1722 # the changegroup. The information gathered is a list of which
1722 1723 # manifest nodes are potentially required (the recipient may
1723 1724 # already have them) and total list of all files which were
1724 1725 # changed in any changeset in the changegroup.
1725 1726 #
1726 1727 # We also remember the first changenode we saw any manifest
1727 1728 # referenced by so we can later determine which changenode 'owns'
1728 1729 # the manifest.
1729 1730 def collect_manifests_and_files(clnode):
1730 1731 c = cl.read(clnode)
1731 1732 for f in c[3]:
1732 1733 # This is to make sure we only have one instance of each
1733 1734 # filename string for each filename.
1734 1735 changedfileset.setdefault(f, f)
1735 1736 msng_mnfst_set.setdefault(c[0], clnode)
1736 1737 return collect_manifests_and_files
1737 1738
1738 1739 # Figure out which manifest nodes (of the ones we think might be part
1739 1740 # of the changegroup) the recipient must know about and remove them
1740 1741 # from the changegroup.
1741 1742 def prune_manifests():
1742 1743 has_mnfst_set = {}
1743 1744 for n in msng_mnfst_set:
1744 1745 # If a 'missing' manifest thinks it belongs to a changenode
1745 1746 # the recipient is assumed to have, obviously the recipient
1746 1747 # must have that manifest.
1747 1748 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1748 1749 if linknode in has_cl_set:
1749 1750 has_mnfst_set[n] = 1
1750 1751 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1751 1752
1752 1753 # Use the information collected in collect_manifests_and_files to say
1753 1754 # which changenode any manifestnode belongs to.
1754 1755 def lookup_manifest_link(mnfstnode):
1755 1756 return msng_mnfst_set[mnfstnode]
1756 1757
1757 1758 # A function generating function that sets up the initial environment
1758 1759 # the inner function.
1759 1760 def filenode_collector(changedfiles):
1760 1761 next_rev = [0]
1761 1762 # This gathers information from each manifestnode included in the
1762 1763 # changegroup about which filenodes the manifest node references
1763 1764 # so we can include those in the changegroup too.
1764 1765 #
1765 1766 # It also remembers which changenode each filenode belongs to. It
1766 1767 # does this by assuming the a filenode belongs to the changenode
1767 1768 # the first manifest that references it belongs to.
1768 1769 def collect_msng_filenodes(mnfstnode):
1769 1770 r = mnfst.rev(mnfstnode)
1770 1771 if r == next_rev[0]:
1771 1772 # If the last rev we looked at was the one just previous,
1772 1773 # we only need to see a diff.
1773 1774 deltamf = mnfst.readdelta(mnfstnode)
1774 1775 # For each line in the delta
1775 1776 for f, fnode in deltamf.iteritems():
1776 1777 f = changedfiles.get(f, None)
1777 1778 # And if the file is in the list of files we care
1778 1779 # about.
1779 1780 if f is not None:
1780 1781 # Get the changenode this manifest belongs to
1781 1782 clnode = msng_mnfst_set[mnfstnode]
1782 1783 # Create the set of filenodes for the file if
1783 1784 # there isn't one already.
1784 1785 ndset = msng_filenode_set.setdefault(f, {})
1785 1786 # And set the filenode's changelog node to the
1786 1787 # manifest's if it hasn't been set already.
1787 1788 ndset.setdefault(fnode, clnode)
1788 1789 else:
1789 1790 # Otherwise we need a full manifest.
1790 1791 m = mnfst.read(mnfstnode)
1791 1792 # For every file in we care about.
1792 1793 for f in changedfiles:
1793 1794 fnode = m.get(f, None)
1794 1795 # If it's in the manifest
1795 1796 if fnode is not None:
1796 1797 # See comments above.
1797 1798 clnode = msng_mnfst_set[mnfstnode]
1798 1799 ndset = msng_filenode_set.setdefault(f, {})
1799 1800 ndset.setdefault(fnode, clnode)
1800 1801 # Remember the revision we hope to see next.
1801 1802 next_rev[0] = r + 1
1802 1803 return collect_msng_filenodes
1803 1804
1804 1805 # We have a list of filenodes we think we need for a file, lets remove
1805 1806 # all those we now the recipient must have.
1806 1807 def prune_filenodes(f, filerevlog):
1807 1808 msngset = msng_filenode_set[f]
1808 1809 hasset = {}
1809 1810 # If a 'missing' filenode thinks it belongs to a changenode we
1810 1811 # assume the recipient must have, then the recipient must have
1811 1812 # that filenode.
1812 1813 for n in msngset:
1813 1814 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1814 1815 if clnode in has_cl_set:
1815 1816 hasset[n] = 1
1816 1817 prune_parents(filerevlog, hasset, msngset)
1817 1818
1818 1819 # A function generator function that sets up the a context for the
1819 1820 # inner function.
1820 1821 def lookup_filenode_link_func(fname):
1821 1822 msngset = msng_filenode_set[fname]
1822 1823 # Lookup the changenode the filenode belongs to.
1823 1824 def lookup_filenode_link(fnode):
1824 1825 return msngset[fnode]
1825 1826 return lookup_filenode_link
1826 1827
1827 1828 # Add the nodes that were explicitly requested.
1828 1829 def add_extra_nodes(name, nodes):
1829 1830 if not extranodes or name not in extranodes:
1830 1831 return
1831 1832
1832 1833 for node, linknode in extranodes[name]:
1833 1834 if node not in nodes:
1834 1835 nodes[node] = linknode
1835 1836
1836 1837 # Now that we have all theses utility functions to help out and
1837 1838 # logically divide up the task, generate the group.
1838 1839 def gengroup():
1839 1840 # The set of changed files starts empty.
1840 1841 changedfiles = {}
1841 1842 # Create a changenode group generator that will call our functions
1842 1843 # back to lookup the owning changenode and collect information.
1843 1844 group = cl.group(msng_cl_lst, identity,
1844 1845 manifest_and_file_collector(changedfiles))
1845 1846 for chnk in group:
1846 1847 yield chnk
1847 1848
1848 1849 # The list of manifests has been collected by the generator
1849 1850 # calling our functions back.
1850 1851 prune_manifests()
1851 1852 add_extra_nodes(1, msng_mnfst_set)
1852 1853 msng_mnfst_lst = msng_mnfst_set.keys()
1853 1854 # Sort the manifestnodes by revision number.
1854 1855 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1855 1856 # Create a generator for the manifestnodes that calls our lookup
1856 1857 # and data collection functions back.
1857 1858 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1858 1859 filenode_collector(changedfiles))
1859 1860 for chnk in group:
1860 1861 yield chnk
1861 1862
1862 1863 # These are no longer needed, dereference and toss the memory for
1863 1864 # them.
1864 1865 msng_mnfst_lst = None
1865 1866 msng_mnfst_set.clear()
1866 1867
1867 1868 if extranodes:
1868 1869 for fname in extranodes:
1869 1870 if isinstance(fname, int):
1870 1871 continue
1871 1872 msng_filenode_set.setdefault(fname, {})
1872 1873 changedfiles[fname] = 1
1873 1874 # Go through all our files in order sorted by name.
1874 1875 for fname in util.sort(changedfiles):
1875 1876 filerevlog = self.file(fname)
1876 1877 if not len(filerevlog):
1877 1878 raise util.Abort(_("empty or missing revlog for %s") % fname)
1878 1879 # Toss out the filenodes that the recipient isn't really
1879 1880 # missing.
1880 1881 if fname in msng_filenode_set:
1881 1882 prune_filenodes(fname, filerevlog)
1882 1883 add_extra_nodes(fname, msng_filenode_set[fname])
1883 1884 msng_filenode_lst = msng_filenode_set[fname].keys()
1884 1885 else:
1885 1886 msng_filenode_lst = []
1886 1887 # If any filenodes are left, generate the group for them,
1887 1888 # otherwise don't bother.
1888 1889 if len(msng_filenode_lst) > 0:
1889 1890 yield changegroup.chunkheader(len(fname))
1890 1891 yield fname
1891 1892 # Sort the filenodes by their revision #
1892 1893 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1893 1894 # Create a group generator and only pass in a changenode
1894 1895 # lookup function as we need to collect no information
1895 1896 # from filenodes.
1896 1897 group = filerevlog.group(msng_filenode_lst,
1897 1898 lookup_filenode_link_func(fname))
1898 1899 for chnk in group:
1899 1900 yield chnk
1900 1901 if fname in msng_filenode_set:
1901 1902 # Don't need this anymore, toss it to free memory.
1902 1903 del msng_filenode_set[fname]
1903 1904 # Signal that no more groups are left.
1904 1905 yield changegroup.closechunk()
1905 1906
1906 1907 if msng_cl_lst:
1907 1908 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1908 1909
1909 1910 return util.chunkbuffer(gengroup())
1910 1911
1911 1912 def changegroup(self, basenodes, source):
1912 1913 # to avoid a race we use changegroupsubset() (issue1320)
1913 1914 return self.changegroupsubset(basenodes, self.heads(), source)
1914 1915
1915 1916 def _changegroup(self, common, source):
1916 1917 """Generate a changegroup of all nodes that we have that a recipient
1917 1918 doesn't.
1918 1919
1919 1920 This is much easier than the previous function as we can assume that
1920 1921 the recipient has any changenode we aren't sending them.
1921 1922
1922 1923 common is the set of common nodes between remote and self"""
1923 1924
1924 1925 self.hook('preoutgoing', throw=True, source=source)
1925 1926
1926 1927 cl = self.changelog
1927 1928 nodes = cl.findmissing(common)
1928 1929 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1929 1930 self.changegroupinfo(nodes, source)
1930 1931
1931 1932 def identity(x):
1932 1933 return x
1933 1934
1934 1935 def gennodelst(log):
1935 1936 for r in log:
1936 1937 if log.linkrev(r) in revset:
1937 1938 yield log.node(r)
1938 1939
1939 1940 def changed_file_collector(changedfileset):
1940 1941 def collect_changed_files(clnode):
1941 1942 c = cl.read(clnode)
1942 1943 for fname in c[3]:
1943 1944 changedfileset[fname] = 1
1944 1945 return collect_changed_files
1945 1946
1946 1947 def lookuprevlink_func(revlog):
1947 1948 def lookuprevlink(n):
1948 1949 return cl.node(revlog.linkrev(revlog.rev(n)))
1949 1950 return lookuprevlink
1950 1951
1951 1952 def gengroup():
1952 1953 # construct a list of all changed files
1953 1954 changedfiles = {}
1954 1955
1955 1956 for chnk in cl.group(nodes, identity,
1956 1957 changed_file_collector(changedfiles)):
1957 1958 yield chnk
1958 1959
1959 1960 mnfst = self.manifest
1960 1961 nodeiter = gennodelst(mnfst)
1961 1962 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1962 1963 yield chnk
1963 1964
1964 1965 for fname in util.sort(changedfiles):
1965 1966 filerevlog = self.file(fname)
1966 1967 if not len(filerevlog):
1967 1968 raise util.Abort(_("empty or missing revlog for %s") % fname)
1968 1969 nodeiter = gennodelst(filerevlog)
1969 1970 nodeiter = list(nodeiter)
1970 1971 if nodeiter:
1971 1972 yield changegroup.chunkheader(len(fname))
1972 1973 yield fname
1973 1974 lookup = lookuprevlink_func(filerevlog)
1974 1975 for chnk in filerevlog.group(nodeiter, lookup):
1975 1976 yield chnk
1976 1977
1977 1978 yield changegroup.closechunk()
1978 1979
1979 1980 if nodes:
1980 1981 self.hook('outgoing', node=hex(nodes[0]), source=source)
1981 1982
1982 1983 return util.chunkbuffer(gengroup())
1983 1984
1984 1985 def addchangegroup(self, source, srctype, url, emptyok=False):
1985 1986 """add changegroup to repo.
1986 1987
1987 1988 return values:
1988 1989 - nothing changed or no source: 0
1989 1990 - more heads than before: 1+added heads (2..n)
1990 1991 - less heads than before: -1-removed heads (-2..-n)
1991 1992 - number of heads stays the same: 1
1992 1993 """
1993 1994 def csmap(x):
1994 1995 self.ui.debug(_("add changeset %s\n") % short(x))
1995 1996 return len(cl)
1996 1997
1997 1998 def revmap(x):
1998 1999 return cl.rev(x)
1999 2000
2000 2001 if not source:
2001 2002 return 0
2002 2003
2003 2004 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2004 2005
2005 2006 changesets = files = revisions = 0
2006 2007
2007 2008 # write changelog data to temp files so concurrent readers will not see
2008 2009 # inconsistent view
2009 2010 cl = self.changelog
2010 2011 cl.delayupdate()
2011 2012 oldheads = len(cl.heads())
2012 2013
2013 2014 tr = self.transaction()
2014 2015 try:
2015 2016 trp = weakref.proxy(tr)
2016 2017 # pull off the changeset group
2017 2018 self.ui.status(_("adding changesets\n"))
2018 2019 cor = len(cl) - 1
2019 2020 chunkiter = changegroup.chunkiter(source)
2020 2021 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2021 2022 raise util.Abort(_("received changelog group is empty"))
2022 2023 cnr = len(cl) - 1
2023 2024 changesets = cnr - cor
2024 2025
2025 2026 # pull off the manifest group
2026 2027 self.ui.status(_("adding manifests\n"))
2027 2028 chunkiter = changegroup.chunkiter(source)
2028 2029 # no need to check for empty manifest group here:
2029 2030 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2030 2031 # no new manifest will be created and the manifest group will
2031 2032 # be empty during the pull
2032 2033 self.manifest.addgroup(chunkiter, revmap, trp)
2033 2034
2034 2035 # process the files
2035 2036 self.ui.status(_("adding file changes\n"))
2036 2037 while 1:
2037 2038 f = changegroup.getchunk(source)
2038 2039 if not f:
2039 2040 break
2040 2041 self.ui.debug(_("adding %s revisions\n") % f)
2041 2042 fl = self.file(f)
2042 2043 o = len(fl)
2043 2044 chunkiter = changegroup.chunkiter(source)
2044 2045 if fl.addgroup(chunkiter, revmap, trp) is None:
2045 2046 raise util.Abort(_("received file revlog group is empty"))
2046 2047 revisions += len(fl) - o
2047 2048 files += 1
2048 2049
2049 2050 newheads = len(self.changelog.heads())
2050 2051 heads = ""
2051 2052 if oldheads and newheads != oldheads:
2052 2053 heads = _(" (%+d heads)") % (newheads - oldheads)
2053 2054
2054 2055 self.ui.status(_("added %d changesets"
2055 2056 " with %d changes to %d files%s\n")
2056 2057 % (changesets, revisions, files, heads))
2057 2058
2058 2059 if changesets > 0:
2059 2060 p = lambda: self.changelog.writepending() and self.root or ""
2060 2061 self.hook('pretxnchangegroup', throw=True,
2061 2062 node=hex(self.changelog.node(cor+1)), source=srctype,
2062 2063 url=url, pending=p)
2063 2064
2064 2065 # make changelog see real files again
2065 2066 cl.finalize(trp)
2066 2067
2067 2068 tr.close()
2068 2069 finally:
2069 2070 del tr
2070 2071
2071 2072 if changesets > 0:
2072 2073 # forcefully update the on-disk branch cache
2073 2074 self.ui.debug(_("updating the branch cache\n"))
2074 2075 self.branchtags()
2075 2076 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2076 2077 source=srctype, url=url)
2077 2078
2078 2079 for i in xrange(cor + 1, cnr + 1):
2079 2080 self.hook("incoming", node=hex(self.changelog.node(i)),
2080 2081 source=srctype, url=url)
2081 2082
2082 2083 # never return 0 here:
2083 2084 if newheads < oldheads:
2084 2085 return newheads - oldheads - 1
2085 2086 else:
2086 2087 return newheads - oldheads + 1
2087 2088
2088 2089
2089 2090 def stream_in(self, remote):
2090 2091 fp = remote.stream_out()
2091 2092 l = fp.readline()
2092 2093 try:
2093 2094 resp = int(l)
2094 2095 except ValueError:
2095 2096 raise error.ResponseError(
2096 2097 _('Unexpected response from remote server:'), l)
2097 2098 if resp == 1:
2098 2099 raise util.Abort(_('operation forbidden by server'))
2099 2100 elif resp == 2:
2100 2101 raise util.Abort(_('locking the remote repository failed'))
2101 2102 elif resp != 0:
2102 2103 raise util.Abort(_('the server sent an unknown error code'))
2103 2104 self.ui.status(_('streaming all changes\n'))
2104 2105 l = fp.readline()
2105 2106 try:
2106 2107 total_files, total_bytes = map(int, l.split(' ', 1))
2107 2108 except (ValueError, TypeError):
2108 2109 raise error.ResponseError(
2109 2110 _('Unexpected response from remote server:'), l)
2110 2111 self.ui.status(_('%d files to transfer, %s of data\n') %
2111 2112 (total_files, util.bytecount(total_bytes)))
2112 2113 start = time.time()
2113 2114 for i in xrange(total_files):
2114 2115 # XXX doesn't support '\n' or '\r' in filenames
2115 2116 l = fp.readline()
2116 2117 try:
2117 2118 name, size = l.split('\0', 1)
2118 2119 size = int(size)
2119 2120 except (ValueError, TypeError):
2120 2121 raise error.ResponseError(
2121 2122 _('Unexpected response from remote server:'), l)
2122 2123 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2123 2124 ofp = self.sopener(name, 'w')
2124 2125 for chunk in util.filechunkiter(fp, limit=size):
2125 2126 ofp.write(chunk)
2126 2127 ofp.close()
2127 2128 elapsed = time.time() - start
2128 2129 if elapsed <= 0:
2129 2130 elapsed = 0.001
2130 2131 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2131 2132 (util.bytecount(total_bytes), elapsed,
2132 2133 util.bytecount(total_bytes / elapsed)))
2133 2134 self.invalidate()
2134 2135 return len(self.heads()) + 1
2135 2136
2136 2137 def clone(self, remote, heads=[], stream=False):
2137 2138 '''clone remote repository.
2138 2139
2139 2140 keyword arguments:
2140 2141 heads: list of revs to clone (forces use of pull)
2141 2142 stream: use streaming clone if possible'''
2142 2143
2143 2144 # now, all clients that can request uncompressed clones can
2144 2145 # read repo formats supported by all servers that can serve
2145 2146 # them.
2146 2147
2147 2148 # if revlog format changes, client will have to check version
2148 2149 # and format flags on "stream" capability, and use
2149 2150 # uncompressed only if compatible.
2150 2151
2151 2152 if stream and not heads and remote.capable('stream'):
2152 2153 return self.stream_in(remote)
2153 2154 return self.pull(remote, heads)
2154 2155
2155 2156 # used to avoid circular references so destructors work
2156 2157 def aftertrans(files):
2157 2158 renamefiles = [tuple(t) for t in files]
2158 2159 def a():
2159 2160 for src, dest in renamefiles:
2160 2161 util.rename(src, dest)
2161 2162 return a
2162 2163
2163 2164 def instance(ui, path, create):
2164 2165 return localrepository(ui, util.drop_scheme('file', path), create)
2165 2166
2166 2167 def islocal(path):
2167 2168 return True
General Comments 0
You need to be logged in to leave comments. Login now