##// END OF EJS Templates
localrepo: simplify requirements checking
Matt Mackall -
r6895:a6bb9493 default
parent child Browse files
Show More
@@ -1,2090 +1,2088
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup
11 11 import changelog, dirstate, filelog, manifest, context, weakref
12 12 import lock, transaction, stat, errno, ui, store
13 13 import os, revlog, time, util, extensions, hook, inspect
14 14 import match as match_
15 15 import merge as merge_
16 16
17 17 class localrepository(repo.repository):
18 18 capabilities = util.set(('lookup', 'changegroupsubset'))
19 19 supported = ('revlogv1', 'store')
20 20
21 21 def __init__(self, parentui, path=None, create=0):
22 22 repo.repository.__init__(self)
23 23 self.root = os.path.realpath(path)
24 24 self.path = os.path.join(self.root, ".hg")
25 25 self.origroot = path
26 26 self.opener = util.opener(self.path)
27 27 self.wopener = util.opener(self.root)
28 28
29 29 if not os.path.isdir(self.path):
30 30 if create:
31 31 if not os.path.exists(path):
32 32 os.mkdir(path)
33 33 os.mkdir(self.path)
34 34 requirements = ["revlogv1"]
35 35 if parentui.configbool('format', 'usestore', True):
36 36 os.mkdir(os.path.join(self.path, "store"))
37 37 requirements.append("store")
38 38 # create an invalid changelog
39 39 self.opener("00changelog.i", "a").write(
40 40 '\0\0\0\2' # represents revlogv2
41 41 ' dummy changelog to prevent using the old repo layout'
42 42 )
43 43 reqfile = self.opener("requires", "w")
44 44 for r in requirements:
45 45 reqfile.write("%s\n" % r)
46 46 reqfile.close()
47 47 else:
48 48 raise repo.RepoError(_("repository %s not found") % path)
49 49 elif create:
50 50 raise repo.RepoError(_("repository %s already exists") % path)
51 51 else:
52 52 # find requirements
53 requirements = []
53 54 try:
54 55 requirements = self.opener("requires").read().splitlines()
56 for r in requirements:
57 if r not in self.supported:
58 raise repo.RepoError(_("requirement '%s' not supported") % r)
55 59 except IOError, inst:
56 60 if inst.errno != errno.ENOENT:
57 61 raise
58 requirements = []
59 # check them
60 for r in requirements:
61 if r not in self.supported:
62 raise repo.RepoError(_("requirement '%s' not supported") % r)
63 62
64 63 self.store = store.store(requirements, self.path)
65
66 64 self.spath = self.store.path
67 65 self.sopener = self.store.opener
68 66 self.sjoin = self.store.join
69 67 self.opener.createmode = self.store.createmode
70 68
71 69 self.ui = ui.ui(parentui=parentui)
72 70 try:
73 71 self.ui.readconfig(self.join("hgrc"), self.root)
74 72 extensions.loadall(self.ui)
75 73 except IOError:
76 74 pass
77 75
78 76 self.tagscache = None
79 77 self._tagstypecache = None
80 78 self.branchcache = None
81 79 self._ubranchcache = None # UTF-8 version of branchcache
82 80 self._branchcachetip = None
83 81 self.nodetagscache = None
84 82 self.filterpats = {}
85 83 self._datafilters = {}
86 84 self._transref = self._lockref = self._wlockref = None
87 85
88 86 def __getattr__(self, name):
89 87 if name == 'changelog':
90 88 self.changelog = changelog.changelog(self.sopener)
91 89 self.sopener.defversion = self.changelog.version
92 90 return self.changelog
93 91 if name == 'manifest':
94 92 self.changelog
95 93 self.manifest = manifest.manifest(self.sopener)
96 94 return self.manifest
97 95 if name == 'dirstate':
98 96 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
99 97 return self.dirstate
100 98 else:
101 99 raise AttributeError, name
102 100
103 101 def __getitem__(self, changeid):
104 102 if changeid == None:
105 103 return context.workingctx(self)
106 104 return context.changectx(self, changeid)
107 105
108 106 def __nonzero__(self):
109 107 return True
110 108
111 109 def __len__(self):
112 110 return len(self.changelog)
113 111
114 112 def __iter__(self):
115 113 for i in xrange(len(self)):
116 114 yield i
117 115
118 116 def url(self):
119 117 return 'file:' + self.root
120 118
121 119 def hook(self, name, throw=False, **args):
122 120 return hook.hook(self.ui, self, name, throw, **args)
123 121
124 122 tag_disallowed = ':\r\n'
125 123
126 124 def _tag(self, names, node, message, local, user, date, parent=None,
127 125 extra={}):
128 126 use_dirstate = parent is None
129 127
130 128 if isinstance(names, str):
131 129 allchars = names
132 130 names = (names,)
133 131 else:
134 132 allchars = ''.join(names)
135 133 for c in self.tag_disallowed:
136 134 if c in allchars:
137 135 raise util.Abort(_('%r cannot be used in a tag name') % c)
138 136
139 137 for name in names:
140 138 self.hook('pretag', throw=True, node=hex(node), tag=name,
141 139 local=local)
142 140
143 141 def writetags(fp, names, munge, prevtags):
144 142 fp.seek(0, 2)
145 143 if prevtags and prevtags[-1] != '\n':
146 144 fp.write('\n')
147 145 for name in names:
148 146 m = munge and munge(name) or name
149 147 if self._tagstypecache and name in self._tagstypecache:
150 148 old = self.tagscache.get(name, nullid)
151 149 fp.write('%s %s\n' % (hex(old), m))
152 150 fp.write('%s %s\n' % (hex(node), m))
153 151 fp.close()
154 152
155 153 prevtags = ''
156 154 if local:
157 155 try:
158 156 fp = self.opener('localtags', 'r+')
159 157 except IOError, err:
160 158 fp = self.opener('localtags', 'a')
161 159 else:
162 160 prevtags = fp.read()
163 161
164 162 # local tags are stored in the current charset
165 163 writetags(fp, names, None, prevtags)
166 164 for name in names:
167 165 self.hook('tag', node=hex(node), tag=name, local=local)
168 166 return
169 167
170 168 if use_dirstate:
171 169 try:
172 170 fp = self.wfile('.hgtags', 'rb+')
173 171 except IOError, err:
174 172 fp = self.wfile('.hgtags', 'ab')
175 173 else:
176 174 prevtags = fp.read()
177 175 else:
178 176 try:
179 177 prevtags = self.filectx('.hgtags', parent).data()
180 178 except revlog.LookupError:
181 179 pass
182 180 fp = self.wfile('.hgtags', 'wb')
183 181 if prevtags:
184 182 fp.write(prevtags)
185 183
186 184 # committed tags are stored in UTF-8
187 185 writetags(fp, names, util.fromlocal, prevtags)
188 186
189 187 if use_dirstate and '.hgtags' not in self.dirstate:
190 188 self.add(['.hgtags'])
191 189
192 190 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
193 191 extra=extra)
194 192
195 193 for name in names:
196 194 self.hook('tag', node=hex(node), tag=name, local=local)
197 195
198 196 return tagnode
199 197
200 198 def tag(self, names, node, message, local, user, date):
201 199 '''tag a revision with one or more symbolic names.
202 200
203 201 names is a list of strings or, when adding a single tag, names may be a
204 202 string.
205 203
206 204 if local is True, the tags are stored in a per-repository file.
207 205 otherwise, they are stored in the .hgtags file, and a new
208 206 changeset is committed with the change.
209 207
210 208 keyword arguments:
211 209
212 210 local: whether to store tags in non-version-controlled file
213 211 (default False)
214 212
215 213 message: commit message to use if committing
216 214
217 215 user: name of user to use if committing
218 216
219 217 date: date tuple to use if committing'''
220 218
221 219 for x in self.status()[:5]:
222 220 if '.hgtags' in x:
223 221 raise util.Abort(_('working copy of .hgtags is changed '
224 222 '(please commit .hgtags manually)'))
225 223
226 224 self._tag(names, node, message, local, user, date)
227 225
228 226 def tags(self):
229 227 '''return a mapping of tag to node'''
230 228 if self.tagscache:
231 229 return self.tagscache
232 230
233 231 globaltags = {}
234 232 tagtypes = {}
235 233
236 234 def readtags(lines, fn, tagtype):
237 235 filetags = {}
238 236 count = 0
239 237
240 238 def warn(msg):
241 239 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
242 240
243 241 for l in lines:
244 242 count += 1
245 243 if not l:
246 244 continue
247 245 s = l.split(" ", 1)
248 246 if len(s) != 2:
249 247 warn(_("cannot parse entry"))
250 248 continue
251 249 node, key = s
252 250 key = util.tolocal(key.strip()) # stored in UTF-8
253 251 try:
254 252 bin_n = bin(node)
255 253 except TypeError:
256 254 warn(_("node '%s' is not well formed") % node)
257 255 continue
258 256 if bin_n not in self.changelog.nodemap:
259 257 warn(_("tag '%s' refers to unknown node") % key)
260 258 continue
261 259
262 260 h = []
263 261 if key in filetags:
264 262 n, h = filetags[key]
265 263 h.append(n)
266 264 filetags[key] = (bin_n, h)
267 265
268 266 for k, nh in filetags.items():
269 267 if k not in globaltags:
270 268 globaltags[k] = nh
271 269 tagtypes[k] = tagtype
272 270 continue
273 271
274 272 # we prefer the global tag if:
275 273 # it supercedes us OR
276 274 # mutual supercedes and it has a higher rank
277 275 # otherwise we win because we're tip-most
278 276 an, ah = nh
279 277 bn, bh = globaltags[k]
280 278 if (bn != an and an in bh and
281 279 (bn not in ah or len(bh) > len(ah))):
282 280 an = bn
283 281 ah.extend([n for n in bh if n not in ah])
284 282 globaltags[k] = an, ah
285 283 tagtypes[k] = tagtype
286 284
287 285 # read the tags file from each head, ending with the tip
288 286 f = None
289 287 for rev, node, fnode in self._hgtagsnodes():
290 288 f = (f and f.filectx(fnode) or
291 289 self.filectx('.hgtags', fileid=fnode))
292 290 readtags(f.data().splitlines(), f, "global")
293 291
294 292 try:
295 293 data = util.fromlocal(self.opener("localtags").read())
296 294 # localtags are stored in the local character set
297 295 # while the internal tag table is stored in UTF-8
298 296 readtags(data.splitlines(), "localtags", "local")
299 297 except IOError:
300 298 pass
301 299
302 300 self.tagscache = {}
303 301 self._tagstypecache = {}
304 302 for k,nh in globaltags.items():
305 303 n = nh[0]
306 304 if n != nullid:
307 305 self.tagscache[k] = n
308 306 self._tagstypecache[k] = tagtypes[k]
309 307 self.tagscache['tip'] = self.changelog.tip()
310 308 return self.tagscache
311 309
312 310 def tagtype(self, tagname):
313 311 '''
314 312 return the type of the given tag. result can be:
315 313
316 314 'local' : a local tag
317 315 'global' : a global tag
318 316 None : tag does not exist
319 317 '''
320 318
321 319 self.tags()
322 320
323 321 return self._tagstypecache.get(tagname)
324 322
325 323 def _hgtagsnodes(self):
326 324 heads = self.heads()
327 325 heads.reverse()
328 326 last = {}
329 327 ret = []
330 328 for node in heads:
331 329 c = self[node]
332 330 rev = c.rev()
333 331 try:
334 332 fnode = c.filenode('.hgtags')
335 333 except revlog.LookupError:
336 334 continue
337 335 ret.append((rev, node, fnode))
338 336 if fnode in last:
339 337 ret[last[fnode]] = None
340 338 last[fnode] = len(ret) - 1
341 339 return [item for item in ret if item]
342 340
343 341 def tagslist(self):
344 342 '''return a list of tags ordered by revision'''
345 343 l = []
346 344 for t, n in self.tags().items():
347 345 try:
348 346 r = self.changelog.rev(n)
349 347 except:
350 348 r = -2 # sort to the beginning of the list if unknown
351 349 l.append((r, t, n))
352 350 return [(t, n) for r, t, n in util.sort(l)]
353 351
354 352 def nodetags(self, node):
355 353 '''return the tags associated with a node'''
356 354 if not self.nodetagscache:
357 355 self.nodetagscache = {}
358 356 for t, n in self.tags().items():
359 357 self.nodetagscache.setdefault(n, []).append(t)
360 358 return self.nodetagscache.get(node, [])
361 359
362 360 def _branchtags(self, partial, lrev):
363 361 tiprev = len(self) - 1
364 362 if lrev != tiprev:
365 363 self._updatebranchcache(partial, lrev+1, tiprev+1)
366 364 self._writebranchcache(partial, self.changelog.tip(), tiprev)
367 365
368 366 return partial
369 367
370 368 def branchtags(self):
371 369 tip = self.changelog.tip()
372 370 if self.branchcache is not None and self._branchcachetip == tip:
373 371 return self.branchcache
374 372
375 373 oldtip = self._branchcachetip
376 374 self._branchcachetip = tip
377 375 if self.branchcache is None:
378 376 self.branchcache = {} # avoid recursion in changectx
379 377 else:
380 378 self.branchcache.clear() # keep using the same dict
381 379 if oldtip is None or oldtip not in self.changelog.nodemap:
382 380 partial, last, lrev = self._readbranchcache()
383 381 else:
384 382 lrev = self.changelog.rev(oldtip)
385 383 partial = self._ubranchcache
386 384
387 385 self._branchtags(partial, lrev)
388 386
389 387 # the branch cache is stored on disk as UTF-8, but in the local
390 388 # charset internally
391 389 for k, v in partial.items():
392 390 self.branchcache[util.tolocal(k)] = v
393 391 self._ubranchcache = partial
394 392 return self.branchcache
395 393
396 394 def _readbranchcache(self):
397 395 partial = {}
398 396 try:
399 397 f = self.opener("branch.cache")
400 398 lines = f.read().split('\n')
401 399 f.close()
402 400 except (IOError, OSError):
403 401 return {}, nullid, nullrev
404 402
405 403 try:
406 404 last, lrev = lines.pop(0).split(" ", 1)
407 405 last, lrev = bin(last), int(lrev)
408 406 if lrev >= len(self) or self[lrev].node() != last:
409 407 # invalidate the cache
410 408 raise ValueError('invalidating branch cache (tip differs)')
411 409 for l in lines:
412 410 if not l: continue
413 411 node, label = l.split(" ", 1)
414 412 partial[label.strip()] = bin(node)
415 413 except (KeyboardInterrupt, util.SignalInterrupt):
416 414 raise
417 415 except Exception, inst:
418 416 if self.ui.debugflag:
419 417 self.ui.warn(str(inst), '\n')
420 418 partial, last, lrev = {}, nullid, nullrev
421 419 return partial, last, lrev
422 420
423 421 def _writebranchcache(self, branches, tip, tiprev):
424 422 try:
425 423 f = self.opener("branch.cache", "w", atomictemp=True)
426 424 f.write("%s %s\n" % (hex(tip), tiprev))
427 425 for label, node in branches.iteritems():
428 426 f.write("%s %s\n" % (hex(node), label))
429 427 f.rename()
430 428 except (IOError, OSError):
431 429 pass
432 430
433 431 def _updatebranchcache(self, partial, start, end):
434 432 for r in xrange(start, end):
435 433 c = self[r]
436 434 b = c.branch()
437 435 partial[b] = c.node()
438 436
439 437 def lookup(self, key):
440 438 if key == '.':
441 439 return self.dirstate.parents()[0]
442 440 elif key == 'null':
443 441 return nullid
444 442 n = self.changelog._match(key)
445 443 if n:
446 444 return n
447 445 if key in self.tags():
448 446 return self.tags()[key]
449 447 if key in self.branchtags():
450 448 return self.branchtags()[key]
451 449 n = self.changelog._partialmatch(key)
452 450 if n:
453 451 return n
454 452 try:
455 453 if len(key) == 20:
456 454 key = hex(key)
457 455 except:
458 456 pass
459 457 raise repo.RepoError(_("unknown revision '%s'") % key)
460 458
461 459 def local(self):
462 460 return True
463 461
464 462 def join(self, f):
465 463 return os.path.join(self.path, f)
466 464
467 465 def wjoin(self, f):
468 466 return os.path.join(self.root, f)
469 467
470 468 def rjoin(self, f):
471 469 return os.path.join(self.root, util.pconvert(f))
472 470
473 471 def file(self, f):
474 472 if f[0] == '/':
475 473 f = f[1:]
476 474 return filelog.filelog(self.sopener, f)
477 475
478 476 def changectx(self, changeid):
479 477 return self[changeid]
480 478
481 479 def parents(self, changeid=None):
482 480 '''get list of changectxs for parents of changeid'''
483 481 return self[changeid].parents()
484 482
485 483 def filectx(self, path, changeid=None, fileid=None):
486 484 """changeid can be a changeset revision, node, or tag.
487 485 fileid can be a file revision or node."""
488 486 return context.filectx(self, path, changeid, fileid)
489 487
490 488 def getcwd(self):
491 489 return self.dirstate.getcwd()
492 490
493 491 def pathto(self, f, cwd=None):
494 492 return self.dirstate.pathto(f, cwd)
495 493
496 494 def wfile(self, f, mode='r'):
497 495 return self.wopener(f, mode)
498 496
499 497 def _link(self, f):
500 498 return os.path.islink(self.wjoin(f))
501 499
502 500 def _filter(self, filter, filename, data):
503 501 if filter not in self.filterpats:
504 502 l = []
505 503 for pat, cmd in self.ui.configitems(filter):
506 504 mf = util.matcher(self.root, "", [pat], [], [])[1]
507 505 fn = None
508 506 params = cmd
509 507 for name, filterfn in self._datafilters.iteritems():
510 508 if cmd.startswith(name):
511 509 fn = filterfn
512 510 params = cmd[len(name):].lstrip()
513 511 break
514 512 if not fn:
515 513 fn = lambda s, c, **kwargs: util.filter(s, c)
516 514 # Wrap old filters not supporting keyword arguments
517 515 if not inspect.getargspec(fn)[2]:
518 516 oldfn = fn
519 517 fn = lambda s, c, **kwargs: oldfn(s, c)
520 518 l.append((mf, fn, params))
521 519 self.filterpats[filter] = l
522 520
523 521 for mf, fn, cmd in self.filterpats[filter]:
524 522 if mf(filename):
525 523 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
526 524 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
527 525 break
528 526
529 527 return data
530 528
531 529 def adddatafilter(self, name, filter):
532 530 self._datafilters[name] = filter
533 531
534 532 def wread(self, filename):
535 533 if self._link(filename):
536 534 data = os.readlink(self.wjoin(filename))
537 535 else:
538 536 data = self.wopener(filename, 'r').read()
539 537 return self._filter("encode", filename, data)
540 538
541 539 def wwrite(self, filename, data, flags):
542 540 data = self._filter("decode", filename, data)
543 541 try:
544 542 os.unlink(self.wjoin(filename))
545 543 except OSError:
546 544 pass
547 545 if 'l' in flags:
548 546 self.wopener.symlink(data, filename)
549 547 else:
550 548 self.wopener(filename, 'w').write(data)
551 549 if 'x' in flags:
552 550 util.set_flags(self.wjoin(filename), False, True)
553 551
554 552 def wwritedata(self, filename, data):
555 553 return self._filter("decode", filename, data)
556 554
557 555 def transaction(self):
558 556 if self._transref and self._transref():
559 557 return self._transref().nest()
560 558
561 559 # abort here if the journal already exists
562 560 if os.path.exists(self.sjoin("journal")):
563 561 raise repo.RepoError(_("journal already exists - run hg recover"))
564 562
565 563 # save dirstate for rollback
566 564 try:
567 565 ds = self.opener("dirstate").read()
568 566 except IOError:
569 567 ds = ""
570 568 self.opener("journal.dirstate", "w").write(ds)
571 569 self.opener("journal.branch", "w").write(self.dirstate.branch())
572 570
573 571 renames = [(self.sjoin("journal"), self.sjoin("undo")),
574 572 (self.join("journal.dirstate"), self.join("undo.dirstate")),
575 573 (self.join("journal.branch"), self.join("undo.branch"))]
576 574 tr = transaction.transaction(self.ui.warn, self.sopener,
577 575 self.sjoin("journal"),
578 576 aftertrans(renames),
579 577 self.store.createmode)
580 578 self._transref = weakref.ref(tr)
581 579 return tr
582 580
583 581 def recover(self):
584 582 l = self.lock()
585 583 try:
586 584 if os.path.exists(self.sjoin("journal")):
587 585 self.ui.status(_("rolling back interrupted transaction\n"))
588 586 transaction.rollback(self.sopener, self.sjoin("journal"))
589 587 self.invalidate()
590 588 return True
591 589 else:
592 590 self.ui.warn(_("no interrupted transaction available\n"))
593 591 return False
594 592 finally:
595 593 del l
596 594
597 595 def rollback(self):
598 596 wlock = lock = None
599 597 try:
600 598 wlock = self.wlock()
601 599 lock = self.lock()
602 600 if os.path.exists(self.sjoin("undo")):
603 601 self.ui.status(_("rolling back last transaction\n"))
604 602 transaction.rollback(self.sopener, self.sjoin("undo"))
605 603 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
606 604 try:
607 605 branch = self.opener("undo.branch").read()
608 606 self.dirstate.setbranch(branch)
609 607 except IOError:
610 608 self.ui.warn(_("Named branch could not be reset, "
611 609 "current branch still is: %s\n")
612 610 % util.tolocal(self.dirstate.branch()))
613 611 self.invalidate()
614 612 self.dirstate.invalidate()
615 613 else:
616 614 self.ui.warn(_("no rollback information available\n"))
617 615 finally:
618 616 del lock, wlock
619 617
620 618 def invalidate(self):
621 619 for a in "changelog manifest".split():
622 620 if a in self.__dict__:
623 621 delattr(self, a)
624 622 self.tagscache = None
625 623 self._tagstypecache = None
626 624 self.nodetagscache = None
627 625 self.branchcache = None
628 626 self._ubranchcache = None
629 627 self._branchcachetip = None
630 628
631 629 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
632 630 try:
633 631 l = lock.lock(lockname, 0, releasefn, desc=desc)
634 632 except lock.LockHeld, inst:
635 633 if not wait:
636 634 raise
637 635 self.ui.warn(_("waiting for lock on %s held by %r\n") %
638 636 (desc, inst.locker))
639 637 # default to 600 seconds timeout
640 638 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
641 639 releasefn, desc=desc)
642 640 if acquirefn:
643 641 acquirefn()
644 642 return l
645 643
646 644 def lock(self, wait=True):
647 645 if self._lockref and self._lockref():
648 646 return self._lockref()
649 647
650 648 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
651 649 _('repository %s') % self.origroot)
652 650 self._lockref = weakref.ref(l)
653 651 return l
654 652
655 653 def wlock(self, wait=True):
656 654 if self._wlockref and self._wlockref():
657 655 return self._wlockref()
658 656
659 657 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
660 658 self.dirstate.invalidate, _('working directory of %s') %
661 659 self.origroot)
662 660 self._wlockref = weakref.ref(l)
663 661 return l
664 662
665 663 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
666 664 """
667 665 commit an individual file as part of a larger transaction
668 666 """
669 667
670 668 fn = fctx.path()
671 669 t = fctx.data()
672 670 fl = self.file(fn)
673 671 fp1 = manifest1.get(fn, nullid)
674 672 fp2 = manifest2.get(fn, nullid)
675 673
676 674 meta = {}
677 675 cp = fctx.renamed()
678 676 if cp and cp[0] != fn:
679 677 # Mark the new revision of this file as a copy of another
680 678 # file. This copy data will effectively act as a parent
681 679 # of this new revision. If this is a merge, the first
682 680 # parent will be the nullid (meaning "look up the copy data")
683 681 # and the second one will be the other parent. For example:
684 682 #
685 683 # 0 --- 1 --- 3 rev1 changes file foo
686 684 # \ / rev2 renames foo to bar and changes it
687 685 # \- 2 -/ rev3 should have bar with all changes and
688 686 # should record that bar descends from
689 687 # bar in rev2 and foo in rev1
690 688 #
691 689 # this allows this merge to succeed:
692 690 #
693 691 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
694 692 # \ / merging rev3 and rev4 should use bar@rev2
695 693 # \- 2 --- 4 as the merge base
696 694 #
697 695
698 696 cf = cp[0]
699 697 cr = manifest1.get(cf)
700 698 nfp = fp2
701 699
702 700 if manifest2: # branch merge
703 701 if fp2 == nullid: # copied on remote side
704 702 if fp1 != nullid or cf in manifest2:
705 703 cr = manifest2[cf]
706 704 nfp = fp1
707 705
708 706 # find source in nearest ancestor if we've lost track
709 707 if not cr:
710 708 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
711 709 (fn, cf))
712 710 for a in self['.'].ancestors():
713 711 if cf in a:
714 712 cr = a[cf].filenode()
715 713 break
716 714
717 715 self.ui.debug(_(" %s: copy %s:%s\n") % (fn, cf, hex(cr)))
718 716 meta["copy"] = cf
719 717 meta["copyrev"] = hex(cr)
720 718 fp1, fp2 = nullid, nfp
721 719 elif fp2 != nullid:
722 720 # is one parent an ancestor of the other?
723 721 fpa = fl.ancestor(fp1, fp2)
724 722 if fpa == fp1:
725 723 fp1, fp2 = fp2, nullid
726 724 elif fpa == fp2:
727 725 fp2 = nullid
728 726
729 727 # is the file unmodified from the parent? report existing entry
730 728 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
731 729 return fp1
732 730
733 731 changelist.append(fn)
734 732 return fl.add(t, meta, tr, linkrev, fp1, fp2)
735 733
736 734 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
737 735 if p1 is None:
738 736 p1, p2 = self.dirstate.parents()
739 737 return self.commit(files=files, text=text, user=user, date=date,
740 738 p1=p1, p2=p2, extra=extra, empty_ok=True)
741 739
742 740 def commit(self, files=None, text="", user=None, date=None,
743 741 match=None, force=False, force_editor=False,
744 742 p1=None, p2=None, extra={}, empty_ok=False):
745 743 wlock = lock = None
746 744 if files:
747 745 files = util.unique(files)
748 746 try:
749 747 wlock = self.wlock()
750 748 lock = self.lock()
751 749 use_dirstate = (p1 is None) # not rawcommit
752 750
753 751 if use_dirstate:
754 752 p1, p2 = self.dirstate.parents()
755 753 update_dirstate = True
756 754
757 755 if (not force and p2 != nullid and
758 756 (match and (match.files() or match.anypats()))):
759 757 raise util.Abort(_('cannot partially commit a merge '
760 758 '(do not specify files or patterns)'))
761 759
762 760 if files:
763 761 modified, removed = [], []
764 762 for f in files:
765 763 s = self.dirstate[f]
766 764 if s in 'nma':
767 765 modified.append(f)
768 766 elif s == 'r':
769 767 removed.append(f)
770 768 else:
771 769 self.ui.warn(_("%s not tracked!\n") % f)
772 770 changes = [modified, [], removed, [], []]
773 771 else:
774 772 changes = self.status(match=match)
775 773 else:
776 774 p1, p2 = p1, p2 or nullid
777 775 update_dirstate = (self.dirstate.parents()[0] == p1)
778 776 changes = [files, [], [], [], []]
779 777
780 778 ms = merge_.mergestate(self)
781 779 for f in changes[0]:
782 780 if f in ms and ms[f] == 'u':
783 781 raise util.Abort(_("unresolved merge conflicts "
784 782 "(see hg resolve)"))
785 783 wctx = context.workingctx(self, (p1, p2), text, user, date,
786 784 extra, changes)
787 785 return self._commitctx(wctx, force, force_editor, empty_ok,
788 786 use_dirstate, update_dirstate)
789 787 finally:
790 788 del lock, wlock
791 789
792 790 def commitctx(self, ctx):
793 791 wlock = lock = None
794 792 try:
795 793 wlock = self.wlock()
796 794 lock = self.lock()
797 795 return self._commitctx(ctx, force=True, force_editor=False,
798 796 empty_ok=True, use_dirstate=False,
799 797 update_dirstate=False)
800 798 finally:
801 799 del lock, wlock
802 800
803 801 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
804 802 use_dirstate=True, update_dirstate=True):
805 803 tr = None
806 804 valid = 0 # don't save the dirstate if this isn't set
807 805 try:
808 806 commit = util.sort(wctx.modified() + wctx.added())
809 807 remove = wctx.removed()
810 808 extra = wctx.extra().copy()
811 809 branchname = extra['branch']
812 810 user = wctx.user()
813 811 text = wctx.description()
814 812
815 813 p1, p2 = [p.node() for p in wctx.parents()]
816 814 c1 = self.changelog.read(p1)
817 815 c2 = self.changelog.read(p2)
818 816 m1 = self.manifest.read(c1[0]).copy()
819 817 m2 = self.manifest.read(c2[0])
820 818
821 819 if use_dirstate:
822 820 oldname = c1[5].get("branch") # stored in UTF-8
823 821 if (not commit and not remove and not force and p2 == nullid
824 822 and branchname == oldname):
825 823 self.ui.status(_("nothing changed\n"))
826 824 return None
827 825
828 826 xp1 = hex(p1)
829 827 if p2 == nullid: xp2 = ''
830 828 else: xp2 = hex(p2)
831 829
832 830 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
833 831
834 832 tr = self.transaction()
835 833 trp = weakref.proxy(tr)
836 834
837 835 # check in files
838 836 new = {}
839 837 changed = []
840 838 linkrev = len(self)
841 839 for f in commit:
842 840 self.ui.note(f + "\n")
843 841 try:
844 842 fctx = wctx.filectx(f)
845 843 newflags = fctx.flags()
846 844 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
847 845 if ((not changed or changed[-1] != f) and
848 846 m2.get(f) != new[f]):
849 847 # mention the file in the changelog if some
850 848 # flag changed, even if there was no content
851 849 # change.
852 850 if m1.flags(f) != newflags:
853 851 changed.append(f)
854 852 m1.set(f, newflags)
855 853 if use_dirstate:
856 854 self.dirstate.normal(f)
857 855
858 856 except (OSError, IOError):
859 857 if use_dirstate:
860 858 self.ui.warn(_("trouble committing %s!\n") % f)
861 859 raise
862 860 else:
863 861 remove.append(f)
864 862
865 863 # update manifest
866 864 m1.update(new)
867 865 removed = []
868 866
869 867 for f in util.sort(remove):
870 868 if f in m1:
871 869 del m1[f]
872 870 removed.append(f)
873 871 elif f in m2:
874 872 removed.append(f)
875 873 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
876 874 (new, removed))
877 875
878 876 # add changeset
879 877 if (not empty_ok and not text) or force_editor:
880 878 edittext = []
881 879 if text:
882 880 edittext.append(text)
883 881 edittext.append("")
884 882 edittext.append(_("HG: Enter commit message."
885 883 " Lines beginning with 'HG:' are removed."))
886 884 edittext.append("HG: --")
887 885 edittext.append("HG: user: %s" % user)
888 886 if p2 != nullid:
889 887 edittext.append("HG: branch merge")
890 888 if branchname:
891 889 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
892 890 edittext.extend(["HG: changed %s" % f for f in changed])
893 891 edittext.extend(["HG: removed %s" % f for f in removed])
894 892 if not changed and not remove:
895 893 edittext.append("HG: no files changed")
896 894 edittext.append("")
897 895 # run editor in the repository root
898 896 olddir = os.getcwd()
899 897 os.chdir(self.root)
900 898 text = self.ui.edit("\n".join(edittext), user)
901 899 os.chdir(olddir)
902 900
903 901 lines = [line.rstrip() for line in text.rstrip().splitlines()]
904 902 while lines and not lines[0]:
905 903 del lines[0]
906 904 if not lines and use_dirstate:
907 905 raise util.Abort(_("empty commit message"))
908 906 text = '\n'.join(lines)
909 907
910 908 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
911 909 user, wctx.date(), extra)
912 910 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
913 911 parent2=xp2)
914 912 tr.close()
915 913
916 914 if self.branchcache:
917 915 self.branchtags()
918 916
919 917 if use_dirstate or update_dirstate:
920 918 self.dirstate.setparents(n)
921 919 if use_dirstate:
922 920 for f in removed:
923 921 self.dirstate.forget(f)
924 922 valid = 1 # our dirstate updates are complete
925 923
926 924 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
927 925 return n
928 926 finally:
929 927 if not valid: # don't save our updated dirstate
930 928 self.dirstate.invalidate()
931 929 del tr
932 930
933 931 def walk(self, match, node=None):
934 932 '''
935 933 walk recursively through the directory tree or a given
936 934 changeset, finding all files matched by the match
937 935 function
938 936 '''
939 937 return self[node].walk(match)
940 938
941 939 def status(self, node1='.', node2=None, match=None,
942 940 ignored=False, clean=False, unknown=False):
943 941 """return status of files between two nodes or node and working directory
944 942
945 943 If node1 is None, use the first dirstate parent instead.
946 944 If node2 is None, compare node1 with working directory.
947 945 """
948 946
949 947 def mfmatches(ctx):
950 948 mf = ctx.manifest().copy()
951 949 for fn in mf.keys():
952 950 if not match(fn):
953 951 del mf[fn]
954 952 return mf
955 953
956 954 ctx1 = self[node1]
957 955 ctx2 = self[node2]
958 956 working = ctx2 == self[None]
959 957 parentworking = working and ctx1 == self['.']
960 958 match = match or match_.always(self.root, self.getcwd())
961 959 listignored, listclean, listunknown = ignored, clean, unknown
962 960
963 961 if working: # we need to scan the working dir
964 962 s = self.dirstate.status(match, listignored, listclean, listunknown)
965 963 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
966 964
967 965 # check for any possibly clean files
968 966 if parentworking and cmp:
969 967 fixup = []
970 968 # do a full compare of any files that might have changed
971 969 for f in cmp:
972 970 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
973 971 or ctx1[f].cmp(ctx2[f].data())):
974 972 modified.append(f)
975 973 else:
976 974 fixup.append(f)
977 975
978 976 if listclean:
979 977 clean += fixup
980 978
981 979 # update dirstate for files that are actually clean
982 980 if fixup:
983 981 wlock = None
984 982 try:
985 983 try:
986 984 wlock = self.wlock(False)
987 985 for f in fixup:
988 986 self.dirstate.normal(f)
989 987 except lock.LockException:
990 988 pass
991 989 finally:
992 990 del wlock
993 991
994 992 if not parentworking:
995 993 mf1 = mfmatches(ctx1)
996 994 if working:
997 995 # we are comparing working dir against non-parent
998 996 # generate a pseudo-manifest for the working dir
999 997 mf2 = mfmatches(self['.'])
1000 998 for f in cmp + modified + added:
1001 999 mf2[f] = None
1002 1000 mf2.set(f, ctx2.flags(f))
1003 1001 for f in removed:
1004 1002 if f in mf2:
1005 1003 del mf2[f]
1006 1004 else:
1007 1005 # we are comparing two revisions
1008 1006 deleted, unknown, ignored = [], [], []
1009 1007 mf2 = mfmatches(ctx2)
1010 1008
1011 1009 modified, added, clean = [], [], []
1012 1010 for fn in mf2:
1013 1011 if fn in mf1:
1014 1012 if (mf1.flags(fn) != mf2.flags(fn) or
1015 1013 (mf1[fn] != mf2[fn] and
1016 1014 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1017 1015 modified.append(fn)
1018 1016 elif listclean:
1019 1017 clean.append(fn)
1020 1018 del mf1[fn]
1021 1019 else:
1022 1020 added.append(fn)
1023 1021 removed = mf1.keys()
1024 1022
1025 1023 r = modified, added, removed, deleted, unknown, ignored, clean
1026 1024 [l.sort() for l in r]
1027 1025 return r
1028 1026
1029 1027 def add(self, list):
1030 1028 wlock = self.wlock()
1031 1029 try:
1032 1030 rejected = []
1033 1031 for f in list:
1034 1032 p = self.wjoin(f)
1035 1033 try:
1036 1034 st = os.lstat(p)
1037 1035 except:
1038 1036 self.ui.warn(_("%s does not exist!\n") % f)
1039 1037 rejected.append(f)
1040 1038 continue
1041 1039 if st.st_size > 10000000:
1042 1040 self.ui.warn(_("%s: files over 10MB may cause memory and"
1043 1041 " performance problems\n"
1044 1042 "(use 'hg revert %s' to unadd the file)\n")
1045 1043 % (f, f))
1046 1044 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1047 1045 self.ui.warn(_("%s not added: only files and symlinks "
1048 1046 "supported currently\n") % f)
1049 1047 rejected.append(p)
1050 1048 elif self.dirstate[f] in 'amn':
1051 1049 self.ui.warn(_("%s already tracked!\n") % f)
1052 1050 elif self.dirstate[f] == 'r':
1053 1051 self.dirstate.normallookup(f)
1054 1052 else:
1055 1053 self.dirstate.add(f)
1056 1054 return rejected
1057 1055 finally:
1058 1056 del wlock
1059 1057
1060 1058 def forget(self, list):
1061 1059 wlock = self.wlock()
1062 1060 try:
1063 1061 for f in list:
1064 1062 if self.dirstate[f] != 'a':
1065 1063 self.ui.warn(_("%s not added!\n") % f)
1066 1064 else:
1067 1065 self.dirstate.forget(f)
1068 1066 finally:
1069 1067 del wlock
1070 1068
1071 1069 def remove(self, list, unlink=False):
1072 1070 wlock = None
1073 1071 try:
1074 1072 if unlink:
1075 1073 for f in list:
1076 1074 try:
1077 1075 util.unlink(self.wjoin(f))
1078 1076 except OSError, inst:
1079 1077 if inst.errno != errno.ENOENT:
1080 1078 raise
1081 1079 wlock = self.wlock()
1082 1080 for f in list:
1083 1081 if unlink and os.path.exists(self.wjoin(f)):
1084 1082 self.ui.warn(_("%s still exists!\n") % f)
1085 1083 elif self.dirstate[f] == 'a':
1086 1084 self.dirstate.forget(f)
1087 1085 elif f not in self.dirstate:
1088 1086 self.ui.warn(_("%s not tracked!\n") % f)
1089 1087 else:
1090 1088 self.dirstate.remove(f)
1091 1089 finally:
1092 1090 del wlock
1093 1091
1094 1092 def undelete(self, list):
1095 1093 wlock = None
1096 1094 try:
1097 1095 manifests = [self.manifest.read(self.changelog.read(p)[0])
1098 1096 for p in self.dirstate.parents() if p != nullid]
1099 1097 wlock = self.wlock()
1100 1098 for f in list:
1101 1099 if self.dirstate[f] != 'r':
1102 1100 self.ui.warn("%s not removed!\n" % f)
1103 1101 else:
1104 1102 m = f in manifests[0] and manifests[0] or manifests[1]
1105 1103 t = self.file(f).read(m[f])
1106 1104 self.wwrite(f, t, m.flags(f))
1107 1105 self.dirstate.normal(f)
1108 1106 finally:
1109 1107 del wlock
1110 1108
1111 1109 def copy(self, source, dest):
1112 1110 wlock = None
1113 1111 try:
1114 1112 p = self.wjoin(dest)
1115 1113 if not (os.path.exists(p) or os.path.islink(p)):
1116 1114 self.ui.warn(_("%s does not exist!\n") % dest)
1117 1115 elif not (os.path.isfile(p) or os.path.islink(p)):
1118 1116 self.ui.warn(_("copy failed: %s is not a file or a "
1119 1117 "symbolic link\n") % dest)
1120 1118 else:
1121 1119 wlock = self.wlock()
1122 1120 if dest not in self.dirstate:
1123 1121 self.dirstate.add(dest)
1124 1122 self.dirstate.copy(source, dest)
1125 1123 finally:
1126 1124 del wlock
1127 1125
1128 1126 def heads(self, start=None):
1129 1127 heads = self.changelog.heads(start)
1130 1128 # sort the output in rev descending order
1131 1129 heads = [(-self.changelog.rev(h), h) for h in heads]
1132 1130 return [n for (r, n) in util.sort(heads)]
1133 1131
1134 1132 def branchheads(self, branch=None, start=None):
1135 1133 if branch is None:
1136 1134 branch = self[None].branch()
1137 1135 branches = self.branchtags()
1138 1136 if branch not in branches:
1139 1137 return []
1140 1138 # The basic algorithm is this:
1141 1139 #
1142 1140 # Start from the branch tip since there are no later revisions that can
1143 1141 # possibly be in this branch, and the tip is a guaranteed head.
1144 1142 #
1145 1143 # Remember the tip's parents as the first ancestors, since these by
1146 1144 # definition are not heads.
1147 1145 #
1148 1146 # Step backwards from the brach tip through all the revisions. We are
1149 1147 # guaranteed by the rules of Mercurial that we will now be visiting the
1150 1148 # nodes in reverse topological order (children before parents).
1151 1149 #
1152 1150 # If a revision is one of the ancestors of a head then we can toss it
1153 1151 # out of the ancestors set (we've already found it and won't be
1154 1152 # visiting it again) and put its parents in the ancestors set.
1155 1153 #
1156 1154 # Otherwise, if a revision is in the branch it's another head, since it
1157 1155 # wasn't in the ancestor list of an existing head. So add it to the
1158 1156 # head list, and add its parents to the ancestor list.
1159 1157 #
1160 1158 # If it is not in the branch ignore it.
1161 1159 #
1162 1160 # Once we have a list of heads, use nodesbetween to filter out all the
1163 1161 # heads that cannot be reached from startrev. There may be a more
1164 1162 # efficient way to do this as part of the previous algorithm.
1165 1163
1166 1164 set = util.set
1167 1165 heads = [self.changelog.rev(branches[branch])]
1168 1166 # Don't care if ancestors contains nullrev or not.
1169 1167 ancestors = set(self.changelog.parentrevs(heads[0]))
1170 1168 for rev in xrange(heads[0] - 1, nullrev, -1):
1171 1169 if rev in ancestors:
1172 1170 ancestors.update(self.changelog.parentrevs(rev))
1173 1171 ancestors.remove(rev)
1174 1172 elif self[rev].branch() == branch:
1175 1173 heads.append(rev)
1176 1174 ancestors.update(self.changelog.parentrevs(rev))
1177 1175 heads = [self.changelog.node(rev) for rev in heads]
1178 1176 if start is not None:
1179 1177 heads = self.changelog.nodesbetween([start], heads)[2]
1180 1178 return heads
1181 1179
1182 1180 def branches(self, nodes):
1183 1181 if not nodes:
1184 1182 nodes = [self.changelog.tip()]
1185 1183 b = []
1186 1184 for n in nodes:
1187 1185 t = n
1188 1186 while 1:
1189 1187 p = self.changelog.parents(n)
1190 1188 if p[1] != nullid or p[0] == nullid:
1191 1189 b.append((t, n, p[0], p[1]))
1192 1190 break
1193 1191 n = p[0]
1194 1192 return b
1195 1193
1196 1194 def between(self, pairs):
1197 1195 r = []
1198 1196
1199 1197 for top, bottom in pairs:
1200 1198 n, l, i = top, [], 0
1201 1199 f = 1
1202 1200
1203 1201 while n != bottom:
1204 1202 p = self.changelog.parents(n)[0]
1205 1203 if i == f:
1206 1204 l.append(n)
1207 1205 f = f * 2
1208 1206 n = p
1209 1207 i += 1
1210 1208
1211 1209 r.append(l)
1212 1210
1213 1211 return r
1214 1212
1215 1213 def findincoming(self, remote, base=None, heads=None, force=False):
1216 1214 """Return list of roots of the subsets of missing nodes from remote
1217 1215
1218 1216 If base dict is specified, assume that these nodes and their parents
1219 1217 exist on the remote side and that no child of a node of base exists
1220 1218 in both remote and self.
1221 1219 Furthermore base will be updated to include the nodes that exists
1222 1220 in self and remote but no children exists in self and remote.
1223 1221 If a list of heads is specified, return only nodes which are heads
1224 1222 or ancestors of these heads.
1225 1223
1226 1224 All the ancestors of base are in self and in remote.
1227 1225 All the descendants of the list returned are missing in self.
1228 1226 (and so we know that the rest of the nodes are missing in remote, see
1229 1227 outgoing)
1230 1228 """
1231 1229 m = self.changelog.nodemap
1232 1230 search = []
1233 1231 fetch = {}
1234 1232 seen = {}
1235 1233 seenbranch = {}
1236 1234 if base == None:
1237 1235 base = {}
1238 1236
1239 1237 if not heads:
1240 1238 heads = remote.heads()
1241 1239
1242 1240 if self.changelog.tip() == nullid:
1243 1241 base[nullid] = 1
1244 1242 if heads != [nullid]:
1245 1243 return [nullid]
1246 1244 return []
1247 1245
1248 1246 # assume we're closer to the tip than the root
1249 1247 # and start by examining the heads
1250 1248 self.ui.status(_("searching for changes\n"))
1251 1249
1252 1250 unknown = []
1253 1251 for h in heads:
1254 1252 if h not in m:
1255 1253 unknown.append(h)
1256 1254 else:
1257 1255 base[h] = 1
1258 1256
1259 1257 if not unknown:
1260 1258 return []
1261 1259
1262 1260 req = dict.fromkeys(unknown)
1263 1261 reqcnt = 0
1264 1262
1265 1263 # search through remote branches
1266 1264 # a 'branch' here is a linear segment of history, with four parts:
1267 1265 # head, root, first parent, second parent
1268 1266 # (a branch always has two parents (or none) by definition)
1269 1267 unknown = remote.branches(unknown)
1270 1268 while unknown:
1271 1269 r = []
1272 1270 while unknown:
1273 1271 n = unknown.pop(0)
1274 1272 if n[0] in seen:
1275 1273 continue
1276 1274
1277 1275 self.ui.debug(_("examining %s:%s\n")
1278 1276 % (short(n[0]), short(n[1])))
1279 1277 if n[0] == nullid: # found the end of the branch
1280 1278 pass
1281 1279 elif n in seenbranch:
1282 1280 self.ui.debug(_("branch already found\n"))
1283 1281 continue
1284 1282 elif n[1] and n[1] in m: # do we know the base?
1285 1283 self.ui.debug(_("found incomplete branch %s:%s\n")
1286 1284 % (short(n[0]), short(n[1])))
1287 1285 search.append(n) # schedule branch range for scanning
1288 1286 seenbranch[n] = 1
1289 1287 else:
1290 1288 if n[1] not in seen and n[1] not in fetch:
1291 1289 if n[2] in m and n[3] in m:
1292 1290 self.ui.debug(_("found new changeset %s\n") %
1293 1291 short(n[1]))
1294 1292 fetch[n[1]] = 1 # earliest unknown
1295 1293 for p in n[2:4]:
1296 1294 if p in m:
1297 1295 base[p] = 1 # latest known
1298 1296
1299 1297 for p in n[2:4]:
1300 1298 if p not in req and p not in m:
1301 1299 r.append(p)
1302 1300 req[p] = 1
1303 1301 seen[n[0]] = 1
1304 1302
1305 1303 if r:
1306 1304 reqcnt += 1
1307 1305 self.ui.debug(_("request %d: %s\n") %
1308 1306 (reqcnt, " ".join(map(short, r))))
1309 1307 for p in xrange(0, len(r), 10):
1310 1308 for b in remote.branches(r[p:p+10]):
1311 1309 self.ui.debug(_("received %s:%s\n") %
1312 1310 (short(b[0]), short(b[1])))
1313 1311 unknown.append(b)
1314 1312
1315 1313 # do binary search on the branches we found
1316 1314 while search:
1317 1315 n = search.pop(0)
1318 1316 reqcnt += 1
1319 1317 l = remote.between([(n[0], n[1])])[0]
1320 1318 l.append(n[1])
1321 1319 p = n[0]
1322 1320 f = 1
1323 1321 for i in l:
1324 1322 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1325 1323 if i in m:
1326 1324 if f <= 2:
1327 1325 self.ui.debug(_("found new branch changeset %s\n") %
1328 1326 short(p))
1329 1327 fetch[p] = 1
1330 1328 base[i] = 1
1331 1329 else:
1332 1330 self.ui.debug(_("narrowed branch search to %s:%s\n")
1333 1331 % (short(p), short(i)))
1334 1332 search.append((p, i))
1335 1333 break
1336 1334 p, f = i, f * 2
1337 1335
1338 1336 # sanity check our fetch list
1339 1337 for f in fetch.keys():
1340 1338 if f in m:
1341 1339 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1342 1340
1343 1341 if base.keys() == [nullid]:
1344 1342 if force:
1345 1343 self.ui.warn(_("warning: repository is unrelated\n"))
1346 1344 else:
1347 1345 raise util.Abort(_("repository is unrelated"))
1348 1346
1349 1347 self.ui.debug(_("found new changesets starting at ") +
1350 1348 " ".join([short(f) for f in fetch]) + "\n")
1351 1349
1352 1350 self.ui.debug(_("%d total queries\n") % reqcnt)
1353 1351
1354 1352 return fetch.keys()
1355 1353
1356 1354 def findoutgoing(self, remote, base=None, heads=None, force=False):
1357 1355 """Return list of nodes that are roots of subsets not in remote
1358 1356
1359 1357 If base dict is specified, assume that these nodes and their parents
1360 1358 exist on the remote side.
1361 1359 If a list of heads is specified, return only nodes which are heads
1362 1360 or ancestors of these heads, and return a second element which
1363 1361 contains all remote heads which get new children.
1364 1362 """
1365 1363 if base == None:
1366 1364 base = {}
1367 1365 self.findincoming(remote, base, heads, force=force)
1368 1366
1369 1367 self.ui.debug(_("common changesets up to ")
1370 1368 + " ".join(map(short, base.keys())) + "\n")
1371 1369
1372 1370 remain = dict.fromkeys(self.changelog.nodemap)
1373 1371
1374 1372 # prune everything remote has from the tree
1375 1373 del remain[nullid]
1376 1374 remove = base.keys()
1377 1375 while remove:
1378 1376 n = remove.pop(0)
1379 1377 if n in remain:
1380 1378 del remain[n]
1381 1379 for p in self.changelog.parents(n):
1382 1380 remove.append(p)
1383 1381
1384 1382 # find every node whose parents have been pruned
1385 1383 subset = []
1386 1384 # find every remote head that will get new children
1387 1385 updated_heads = {}
1388 1386 for n in remain:
1389 1387 p1, p2 = self.changelog.parents(n)
1390 1388 if p1 not in remain and p2 not in remain:
1391 1389 subset.append(n)
1392 1390 if heads:
1393 1391 if p1 in heads:
1394 1392 updated_heads[p1] = True
1395 1393 if p2 in heads:
1396 1394 updated_heads[p2] = True
1397 1395
1398 1396 # this is the set of all roots we have to push
1399 1397 if heads:
1400 1398 return subset, updated_heads.keys()
1401 1399 else:
1402 1400 return subset
1403 1401
1404 1402 def pull(self, remote, heads=None, force=False):
1405 1403 lock = self.lock()
1406 1404 try:
1407 1405 fetch = self.findincoming(remote, heads=heads, force=force)
1408 1406 if fetch == [nullid]:
1409 1407 self.ui.status(_("requesting all changes\n"))
1410 1408
1411 1409 if not fetch:
1412 1410 self.ui.status(_("no changes found\n"))
1413 1411 return 0
1414 1412
1415 1413 if heads is None:
1416 1414 cg = remote.changegroup(fetch, 'pull')
1417 1415 else:
1418 1416 if 'changegroupsubset' not in remote.capabilities:
1419 1417 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1420 1418 cg = remote.changegroupsubset(fetch, heads, 'pull')
1421 1419 return self.addchangegroup(cg, 'pull', remote.url())
1422 1420 finally:
1423 1421 del lock
1424 1422
1425 1423 def push(self, remote, force=False, revs=None):
1426 1424 # there are two ways to push to remote repo:
1427 1425 #
1428 1426 # addchangegroup assumes local user can lock remote
1429 1427 # repo (local filesystem, old ssh servers).
1430 1428 #
1431 1429 # unbundle assumes local user cannot lock remote repo (new ssh
1432 1430 # servers, http servers).
1433 1431
1434 1432 if remote.capable('unbundle'):
1435 1433 return self.push_unbundle(remote, force, revs)
1436 1434 return self.push_addchangegroup(remote, force, revs)
1437 1435
1438 1436 def prepush(self, remote, force, revs):
1439 1437 base = {}
1440 1438 remote_heads = remote.heads()
1441 1439 inc = self.findincoming(remote, base, remote_heads, force=force)
1442 1440
1443 1441 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1444 1442 if revs is not None:
1445 1443 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1446 1444 else:
1447 1445 bases, heads = update, self.changelog.heads()
1448 1446
1449 1447 if not bases:
1450 1448 self.ui.status(_("no changes found\n"))
1451 1449 return None, 1
1452 1450 elif not force:
1453 1451 # check if we're creating new remote heads
1454 1452 # to be a remote head after push, node must be either
1455 1453 # - unknown locally
1456 1454 # - a local outgoing head descended from update
1457 1455 # - a remote head that's known locally and not
1458 1456 # ancestral to an outgoing head
1459 1457
1460 1458 warn = 0
1461 1459
1462 1460 if remote_heads == [nullid]:
1463 1461 warn = 0
1464 1462 elif not revs and len(heads) > len(remote_heads):
1465 1463 warn = 1
1466 1464 else:
1467 1465 newheads = list(heads)
1468 1466 for r in remote_heads:
1469 1467 if r in self.changelog.nodemap:
1470 1468 desc = self.changelog.heads(r, heads)
1471 1469 l = [h for h in heads if h in desc]
1472 1470 if not l:
1473 1471 newheads.append(r)
1474 1472 else:
1475 1473 newheads.append(r)
1476 1474 if len(newheads) > len(remote_heads):
1477 1475 warn = 1
1478 1476
1479 1477 if warn:
1480 1478 self.ui.warn(_("abort: push creates new remote heads!\n"))
1481 1479 self.ui.status(_("(did you forget to merge?"
1482 1480 " use push -f to force)\n"))
1483 1481 return None, 0
1484 1482 elif inc:
1485 1483 self.ui.warn(_("note: unsynced remote changes!\n"))
1486 1484
1487 1485
1488 1486 if revs is None:
1489 1487 cg = self.changegroup(update, 'push')
1490 1488 else:
1491 1489 cg = self.changegroupsubset(update, revs, 'push')
1492 1490 return cg, remote_heads
1493 1491
1494 1492 def push_addchangegroup(self, remote, force, revs):
1495 1493 lock = remote.lock()
1496 1494 try:
1497 1495 ret = self.prepush(remote, force, revs)
1498 1496 if ret[0] is not None:
1499 1497 cg, remote_heads = ret
1500 1498 return remote.addchangegroup(cg, 'push', self.url())
1501 1499 return ret[1]
1502 1500 finally:
1503 1501 del lock
1504 1502
1505 1503 def push_unbundle(self, remote, force, revs):
1506 1504 # local repo finds heads on server, finds out what revs it
1507 1505 # must push. once revs transferred, if server finds it has
1508 1506 # different heads (someone else won commit/push race), server
1509 1507 # aborts.
1510 1508
1511 1509 ret = self.prepush(remote, force, revs)
1512 1510 if ret[0] is not None:
1513 1511 cg, remote_heads = ret
1514 1512 if force: remote_heads = ['force']
1515 1513 return remote.unbundle(cg, remote_heads, 'push')
1516 1514 return ret[1]
1517 1515
1518 1516 def changegroupinfo(self, nodes, source):
1519 1517 if self.ui.verbose or source == 'bundle':
1520 1518 self.ui.status(_("%d changesets found\n") % len(nodes))
1521 1519 if self.ui.debugflag:
1522 1520 self.ui.debug(_("List of changesets:\n"))
1523 1521 for node in nodes:
1524 1522 self.ui.debug("%s\n" % hex(node))
1525 1523
1526 1524 def changegroupsubset(self, bases, heads, source, extranodes=None):
1527 1525 """This function generates a changegroup consisting of all the nodes
1528 1526 that are descendents of any of the bases, and ancestors of any of
1529 1527 the heads.
1530 1528
1531 1529 It is fairly complex as determining which filenodes and which
1532 1530 manifest nodes need to be included for the changeset to be complete
1533 1531 is non-trivial.
1534 1532
1535 1533 Another wrinkle is doing the reverse, figuring out which changeset in
1536 1534 the changegroup a particular filenode or manifestnode belongs to.
1537 1535
1538 1536 The caller can specify some nodes that must be included in the
1539 1537 changegroup using the extranodes argument. It should be a dict
1540 1538 where the keys are the filenames (or 1 for the manifest), and the
1541 1539 values are lists of (node, linknode) tuples, where node is a wanted
1542 1540 node and linknode is the changelog node that should be transmitted as
1543 1541 the linkrev.
1544 1542 """
1545 1543
1546 1544 self.hook('preoutgoing', throw=True, source=source)
1547 1545
1548 1546 # Set up some initial variables
1549 1547 # Make it easy to refer to self.changelog
1550 1548 cl = self.changelog
1551 1549 # msng is short for missing - compute the list of changesets in this
1552 1550 # changegroup.
1553 1551 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1554 1552 self.changegroupinfo(msng_cl_lst, source)
1555 1553 # Some bases may turn out to be superfluous, and some heads may be
1556 1554 # too. nodesbetween will return the minimal set of bases and heads
1557 1555 # necessary to re-create the changegroup.
1558 1556
1559 1557 # Known heads are the list of heads that it is assumed the recipient
1560 1558 # of this changegroup will know about.
1561 1559 knownheads = {}
1562 1560 # We assume that all parents of bases are known heads.
1563 1561 for n in bases:
1564 1562 for p in cl.parents(n):
1565 1563 if p != nullid:
1566 1564 knownheads[p] = 1
1567 1565 knownheads = knownheads.keys()
1568 1566 if knownheads:
1569 1567 # Now that we know what heads are known, we can compute which
1570 1568 # changesets are known. The recipient must know about all
1571 1569 # changesets required to reach the known heads from the null
1572 1570 # changeset.
1573 1571 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1574 1572 junk = None
1575 1573 # Transform the list into an ersatz set.
1576 1574 has_cl_set = dict.fromkeys(has_cl_set)
1577 1575 else:
1578 1576 # If there were no known heads, the recipient cannot be assumed to
1579 1577 # know about any changesets.
1580 1578 has_cl_set = {}
1581 1579
1582 1580 # Make it easy to refer to self.manifest
1583 1581 mnfst = self.manifest
1584 1582 # We don't know which manifests are missing yet
1585 1583 msng_mnfst_set = {}
1586 1584 # Nor do we know which filenodes are missing.
1587 1585 msng_filenode_set = {}
1588 1586
1589 1587 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1590 1588 junk = None
1591 1589
1592 1590 # A changeset always belongs to itself, so the changenode lookup
1593 1591 # function for a changenode is identity.
1594 1592 def identity(x):
1595 1593 return x
1596 1594
1597 1595 # A function generating function. Sets up an environment for the
1598 1596 # inner function.
1599 1597 def cmp_by_rev_func(revlog):
1600 1598 # Compare two nodes by their revision number in the environment's
1601 1599 # revision history. Since the revision number both represents the
1602 1600 # most efficient order to read the nodes in, and represents a
1603 1601 # topological sorting of the nodes, this function is often useful.
1604 1602 def cmp_by_rev(a, b):
1605 1603 return cmp(revlog.rev(a), revlog.rev(b))
1606 1604 return cmp_by_rev
1607 1605
1608 1606 # If we determine that a particular file or manifest node must be a
1609 1607 # node that the recipient of the changegroup will already have, we can
1610 1608 # also assume the recipient will have all the parents. This function
1611 1609 # prunes them from the set of missing nodes.
1612 1610 def prune_parents(revlog, hasset, msngset):
1613 1611 haslst = hasset.keys()
1614 1612 haslst.sort(cmp_by_rev_func(revlog))
1615 1613 for node in haslst:
1616 1614 parentlst = [p for p in revlog.parents(node) if p != nullid]
1617 1615 while parentlst:
1618 1616 n = parentlst.pop()
1619 1617 if n not in hasset:
1620 1618 hasset[n] = 1
1621 1619 p = [p for p in revlog.parents(n) if p != nullid]
1622 1620 parentlst.extend(p)
1623 1621 for n in hasset:
1624 1622 msngset.pop(n, None)
1625 1623
1626 1624 # This is a function generating function used to set up an environment
1627 1625 # for the inner function to execute in.
1628 1626 def manifest_and_file_collector(changedfileset):
1629 1627 # This is an information gathering function that gathers
1630 1628 # information from each changeset node that goes out as part of
1631 1629 # the changegroup. The information gathered is a list of which
1632 1630 # manifest nodes are potentially required (the recipient may
1633 1631 # already have them) and total list of all files which were
1634 1632 # changed in any changeset in the changegroup.
1635 1633 #
1636 1634 # We also remember the first changenode we saw any manifest
1637 1635 # referenced by so we can later determine which changenode 'owns'
1638 1636 # the manifest.
1639 1637 def collect_manifests_and_files(clnode):
1640 1638 c = cl.read(clnode)
1641 1639 for f in c[3]:
1642 1640 # This is to make sure we only have one instance of each
1643 1641 # filename string for each filename.
1644 1642 changedfileset.setdefault(f, f)
1645 1643 msng_mnfst_set.setdefault(c[0], clnode)
1646 1644 return collect_manifests_and_files
1647 1645
1648 1646 # Figure out which manifest nodes (of the ones we think might be part
1649 1647 # of the changegroup) the recipient must know about and remove them
1650 1648 # from the changegroup.
1651 1649 def prune_manifests():
1652 1650 has_mnfst_set = {}
1653 1651 for n in msng_mnfst_set:
1654 1652 # If a 'missing' manifest thinks it belongs to a changenode
1655 1653 # the recipient is assumed to have, obviously the recipient
1656 1654 # must have that manifest.
1657 1655 linknode = cl.node(mnfst.linkrev(n))
1658 1656 if linknode in has_cl_set:
1659 1657 has_mnfst_set[n] = 1
1660 1658 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1661 1659
1662 1660 # Use the information collected in collect_manifests_and_files to say
1663 1661 # which changenode any manifestnode belongs to.
1664 1662 def lookup_manifest_link(mnfstnode):
1665 1663 return msng_mnfst_set[mnfstnode]
1666 1664
1667 1665 # A function generating function that sets up the initial environment
1668 1666 # the inner function.
1669 1667 def filenode_collector(changedfiles):
1670 1668 next_rev = [0]
1671 1669 # This gathers information from each manifestnode included in the
1672 1670 # changegroup about which filenodes the manifest node references
1673 1671 # so we can include those in the changegroup too.
1674 1672 #
1675 1673 # It also remembers which changenode each filenode belongs to. It
1676 1674 # does this by assuming the a filenode belongs to the changenode
1677 1675 # the first manifest that references it belongs to.
1678 1676 def collect_msng_filenodes(mnfstnode):
1679 1677 r = mnfst.rev(mnfstnode)
1680 1678 if r == next_rev[0]:
1681 1679 # If the last rev we looked at was the one just previous,
1682 1680 # we only need to see a diff.
1683 1681 deltamf = mnfst.readdelta(mnfstnode)
1684 1682 # For each line in the delta
1685 1683 for f, fnode in deltamf.items():
1686 1684 f = changedfiles.get(f, None)
1687 1685 # And if the file is in the list of files we care
1688 1686 # about.
1689 1687 if f is not None:
1690 1688 # Get the changenode this manifest belongs to
1691 1689 clnode = msng_mnfst_set[mnfstnode]
1692 1690 # Create the set of filenodes for the file if
1693 1691 # there isn't one already.
1694 1692 ndset = msng_filenode_set.setdefault(f, {})
1695 1693 # And set the filenode's changelog node to the
1696 1694 # manifest's if it hasn't been set already.
1697 1695 ndset.setdefault(fnode, clnode)
1698 1696 else:
1699 1697 # Otherwise we need a full manifest.
1700 1698 m = mnfst.read(mnfstnode)
1701 1699 # For every file in we care about.
1702 1700 for f in changedfiles:
1703 1701 fnode = m.get(f, None)
1704 1702 # If it's in the manifest
1705 1703 if fnode is not None:
1706 1704 # See comments above.
1707 1705 clnode = msng_mnfst_set[mnfstnode]
1708 1706 ndset = msng_filenode_set.setdefault(f, {})
1709 1707 ndset.setdefault(fnode, clnode)
1710 1708 # Remember the revision we hope to see next.
1711 1709 next_rev[0] = r + 1
1712 1710 return collect_msng_filenodes
1713 1711
1714 1712 # We have a list of filenodes we think we need for a file, lets remove
1715 1713 # all those we now the recipient must have.
1716 1714 def prune_filenodes(f, filerevlog):
1717 1715 msngset = msng_filenode_set[f]
1718 1716 hasset = {}
1719 1717 # If a 'missing' filenode thinks it belongs to a changenode we
1720 1718 # assume the recipient must have, then the recipient must have
1721 1719 # that filenode.
1722 1720 for n in msngset:
1723 1721 clnode = cl.node(filerevlog.linkrev(n))
1724 1722 if clnode in has_cl_set:
1725 1723 hasset[n] = 1
1726 1724 prune_parents(filerevlog, hasset, msngset)
1727 1725
1728 1726 # A function generator function that sets up the a context for the
1729 1727 # inner function.
1730 1728 def lookup_filenode_link_func(fname):
1731 1729 msngset = msng_filenode_set[fname]
1732 1730 # Lookup the changenode the filenode belongs to.
1733 1731 def lookup_filenode_link(fnode):
1734 1732 return msngset[fnode]
1735 1733 return lookup_filenode_link
1736 1734
1737 1735 # Add the nodes that were explicitly requested.
1738 1736 def add_extra_nodes(name, nodes):
1739 1737 if not extranodes or name not in extranodes:
1740 1738 return
1741 1739
1742 1740 for node, linknode in extranodes[name]:
1743 1741 if node not in nodes:
1744 1742 nodes[node] = linknode
1745 1743
1746 1744 # Now that we have all theses utility functions to help out and
1747 1745 # logically divide up the task, generate the group.
1748 1746 def gengroup():
1749 1747 # The set of changed files starts empty.
1750 1748 changedfiles = {}
1751 1749 # Create a changenode group generator that will call our functions
1752 1750 # back to lookup the owning changenode and collect information.
1753 1751 group = cl.group(msng_cl_lst, identity,
1754 1752 manifest_and_file_collector(changedfiles))
1755 1753 for chnk in group:
1756 1754 yield chnk
1757 1755
1758 1756 # The list of manifests has been collected by the generator
1759 1757 # calling our functions back.
1760 1758 prune_manifests()
1761 1759 add_extra_nodes(1, msng_mnfst_set)
1762 1760 msng_mnfst_lst = msng_mnfst_set.keys()
1763 1761 # Sort the manifestnodes by revision number.
1764 1762 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1765 1763 # Create a generator for the manifestnodes that calls our lookup
1766 1764 # and data collection functions back.
1767 1765 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1768 1766 filenode_collector(changedfiles))
1769 1767 for chnk in group:
1770 1768 yield chnk
1771 1769
1772 1770 # These are no longer needed, dereference and toss the memory for
1773 1771 # them.
1774 1772 msng_mnfst_lst = None
1775 1773 msng_mnfst_set.clear()
1776 1774
1777 1775 if extranodes:
1778 1776 for fname in extranodes:
1779 1777 if isinstance(fname, int):
1780 1778 continue
1781 1779 add_extra_nodes(fname,
1782 1780 msng_filenode_set.setdefault(fname, {}))
1783 1781 changedfiles[fname] = 1
1784 1782 # Go through all our files in order sorted by name.
1785 1783 for fname in util.sort(changedfiles):
1786 1784 filerevlog = self.file(fname)
1787 1785 if not len(filerevlog):
1788 1786 raise util.Abort(_("empty or missing revlog for %s") % fname)
1789 1787 # Toss out the filenodes that the recipient isn't really
1790 1788 # missing.
1791 1789 if fname in msng_filenode_set:
1792 1790 prune_filenodes(fname, filerevlog)
1793 1791 msng_filenode_lst = msng_filenode_set[fname].keys()
1794 1792 else:
1795 1793 msng_filenode_lst = []
1796 1794 # If any filenodes are left, generate the group for them,
1797 1795 # otherwise don't bother.
1798 1796 if len(msng_filenode_lst) > 0:
1799 1797 yield changegroup.chunkheader(len(fname))
1800 1798 yield fname
1801 1799 # Sort the filenodes by their revision #
1802 1800 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1803 1801 # Create a group generator and only pass in a changenode
1804 1802 # lookup function as we need to collect no information
1805 1803 # from filenodes.
1806 1804 group = filerevlog.group(msng_filenode_lst,
1807 1805 lookup_filenode_link_func(fname))
1808 1806 for chnk in group:
1809 1807 yield chnk
1810 1808 if fname in msng_filenode_set:
1811 1809 # Don't need this anymore, toss it to free memory.
1812 1810 del msng_filenode_set[fname]
1813 1811 # Signal that no more groups are left.
1814 1812 yield changegroup.closechunk()
1815 1813
1816 1814 if msng_cl_lst:
1817 1815 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1818 1816
1819 1817 return util.chunkbuffer(gengroup())
1820 1818
1821 1819 def changegroup(self, basenodes, source):
1822 1820 """Generate a changegroup of all nodes that we have that a recipient
1823 1821 doesn't.
1824 1822
1825 1823 This is much easier than the previous function as we can assume that
1826 1824 the recipient has any changenode we aren't sending them."""
1827 1825
1828 1826 self.hook('preoutgoing', throw=True, source=source)
1829 1827
1830 1828 cl = self.changelog
1831 1829 nodes = cl.nodesbetween(basenodes, None)[0]
1832 1830 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1833 1831 self.changegroupinfo(nodes, source)
1834 1832
1835 1833 def identity(x):
1836 1834 return x
1837 1835
1838 1836 def gennodelst(log):
1839 1837 for r in log:
1840 1838 n = log.node(r)
1841 1839 if log.linkrev(n) in revset:
1842 1840 yield n
1843 1841
1844 1842 def changed_file_collector(changedfileset):
1845 1843 def collect_changed_files(clnode):
1846 1844 c = cl.read(clnode)
1847 1845 for fname in c[3]:
1848 1846 changedfileset[fname] = 1
1849 1847 return collect_changed_files
1850 1848
1851 1849 def lookuprevlink_func(revlog):
1852 1850 def lookuprevlink(n):
1853 1851 return cl.node(revlog.linkrev(n))
1854 1852 return lookuprevlink
1855 1853
1856 1854 def gengroup():
1857 1855 # construct a list of all changed files
1858 1856 changedfiles = {}
1859 1857
1860 1858 for chnk in cl.group(nodes, identity,
1861 1859 changed_file_collector(changedfiles)):
1862 1860 yield chnk
1863 1861
1864 1862 mnfst = self.manifest
1865 1863 nodeiter = gennodelst(mnfst)
1866 1864 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1867 1865 yield chnk
1868 1866
1869 1867 for fname in util.sort(changedfiles):
1870 1868 filerevlog = self.file(fname)
1871 1869 if not len(filerevlog):
1872 1870 raise util.Abort(_("empty or missing revlog for %s") % fname)
1873 1871 nodeiter = gennodelst(filerevlog)
1874 1872 nodeiter = list(nodeiter)
1875 1873 if nodeiter:
1876 1874 yield changegroup.chunkheader(len(fname))
1877 1875 yield fname
1878 1876 lookup = lookuprevlink_func(filerevlog)
1879 1877 for chnk in filerevlog.group(nodeiter, lookup):
1880 1878 yield chnk
1881 1879
1882 1880 yield changegroup.closechunk()
1883 1881
1884 1882 if nodes:
1885 1883 self.hook('outgoing', node=hex(nodes[0]), source=source)
1886 1884
1887 1885 return util.chunkbuffer(gengroup())
1888 1886
1889 1887 def addchangegroup(self, source, srctype, url, emptyok=False):
1890 1888 """add changegroup to repo.
1891 1889
1892 1890 return values:
1893 1891 - nothing changed or no source: 0
1894 1892 - more heads than before: 1+added heads (2..n)
1895 1893 - less heads than before: -1-removed heads (-2..-n)
1896 1894 - number of heads stays the same: 1
1897 1895 """
1898 1896 def csmap(x):
1899 1897 self.ui.debug(_("add changeset %s\n") % short(x))
1900 1898 return len(cl)
1901 1899
1902 1900 def revmap(x):
1903 1901 return cl.rev(x)
1904 1902
1905 1903 if not source:
1906 1904 return 0
1907 1905
1908 1906 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1909 1907
1910 1908 changesets = files = revisions = 0
1911 1909
1912 1910 # write changelog data to temp files so concurrent readers will not see
1913 1911 # inconsistent view
1914 1912 cl = self.changelog
1915 1913 cl.delayupdate()
1916 1914 oldheads = len(cl.heads())
1917 1915
1918 1916 tr = self.transaction()
1919 1917 try:
1920 1918 trp = weakref.proxy(tr)
1921 1919 # pull off the changeset group
1922 1920 self.ui.status(_("adding changesets\n"))
1923 1921 cor = len(cl) - 1
1924 1922 chunkiter = changegroup.chunkiter(source)
1925 1923 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1926 1924 raise util.Abort(_("received changelog group is empty"))
1927 1925 cnr = len(cl) - 1
1928 1926 changesets = cnr - cor
1929 1927
1930 1928 # pull off the manifest group
1931 1929 self.ui.status(_("adding manifests\n"))
1932 1930 chunkiter = changegroup.chunkiter(source)
1933 1931 # no need to check for empty manifest group here:
1934 1932 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1935 1933 # no new manifest will be created and the manifest group will
1936 1934 # be empty during the pull
1937 1935 self.manifest.addgroup(chunkiter, revmap, trp)
1938 1936
1939 1937 # process the files
1940 1938 self.ui.status(_("adding file changes\n"))
1941 1939 while 1:
1942 1940 f = changegroup.getchunk(source)
1943 1941 if not f:
1944 1942 break
1945 1943 self.ui.debug(_("adding %s revisions\n") % f)
1946 1944 fl = self.file(f)
1947 1945 o = len(fl)
1948 1946 chunkiter = changegroup.chunkiter(source)
1949 1947 if fl.addgroup(chunkiter, revmap, trp) is None:
1950 1948 raise util.Abort(_("received file revlog group is empty"))
1951 1949 revisions += len(fl) - o
1952 1950 files += 1
1953 1951
1954 1952 # make changelog see real files again
1955 1953 cl.finalize(trp)
1956 1954
1957 1955 newheads = len(self.changelog.heads())
1958 1956 heads = ""
1959 1957 if oldheads and newheads != oldheads:
1960 1958 heads = _(" (%+d heads)") % (newheads - oldheads)
1961 1959
1962 1960 self.ui.status(_("added %d changesets"
1963 1961 " with %d changes to %d files%s\n")
1964 1962 % (changesets, revisions, files, heads))
1965 1963
1966 1964 if changesets > 0:
1967 1965 self.hook('pretxnchangegroup', throw=True,
1968 1966 node=hex(self.changelog.node(cor+1)), source=srctype,
1969 1967 url=url)
1970 1968
1971 1969 tr.close()
1972 1970 finally:
1973 1971 del tr
1974 1972
1975 1973 if changesets > 0:
1976 1974 # forcefully update the on-disk branch cache
1977 1975 self.ui.debug(_("updating the branch cache\n"))
1978 1976 self.branchtags()
1979 1977 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1980 1978 source=srctype, url=url)
1981 1979
1982 1980 for i in xrange(cor + 1, cnr + 1):
1983 1981 self.hook("incoming", node=hex(self.changelog.node(i)),
1984 1982 source=srctype, url=url)
1985 1983
1986 1984 # never return 0 here:
1987 1985 if newheads < oldheads:
1988 1986 return newheads - oldheads - 1
1989 1987 else:
1990 1988 return newheads - oldheads + 1
1991 1989
1992 1990
1993 1991 def stream_in(self, remote):
1994 1992 fp = remote.stream_out()
1995 1993 l = fp.readline()
1996 1994 try:
1997 1995 resp = int(l)
1998 1996 except ValueError:
1999 1997 raise util.UnexpectedOutput(
2000 1998 _('Unexpected response from remote server:'), l)
2001 1999 if resp == 1:
2002 2000 raise util.Abort(_('operation forbidden by server'))
2003 2001 elif resp == 2:
2004 2002 raise util.Abort(_('locking the remote repository failed'))
2005 2003 elif resp != 0:
2006 2004 raise util.Abort(_('the server sent an unknown error code'))
2007 2005 self.ui.status(_('streaming all changes\n'))
2008 2006 l = fp.readline()
2009 2007 try:
2010 2008 total_files, total_bytes = map(int, l.split(' ', 1))
2011 2009 except (ValueError, TypeError):
2012 2010 raise util.UnexpectedOutput(
2013 2011 _('Unexpected response from remote server:'), l)
2014 2012 self.ui.status(_('%d files to transfer, %s of data\n') %
2015 2013 (total_files, util.bytecount(total_bytes)))
2016 2014 start = time.time()
2017 2015 for i in xrange(total_files):
2018 2016 # XXX doesn't support '\n' or '\r' in filenames
2019 2017 l = fp.readline()
2020 2018 try:
2021 2019 name, size = l.split('\0', 1)
2022 2020 size = int(size)
2023 2021 except ValueError, TypeError:
2024 2022 raise util.UnexpectedOutput(
2025 2023 _('Unexpected response from remote server:'), l)
2026 2024 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2027 2025 ofp = self.sopener(name, 'w')
2028 2026 for chunk in util.filechunkiter(fp, limit=size):
2029 2027 ofp.write(chunk)
2030 2028 ofp.close()
2031 2029 elapsed = time.time() - start
2032 2030 if elapsed <= 0:
2033 2031 elapsed = 0.001
2034 2032 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2035 2033 (util.bytecount(total_bytes), elapsed,
2036 2034 util.bytecount(total_bytes / elapsed)))
2037 2035 self.invalidate()
2038 2036 return len(self.heads()) + 1
2039 2037
2040 2038 def clone(self, remote, heads=[], stream=False):
2041 2039 '''clone remote repository.
2042 2040
2043 2041 keyword arguments:
2044 2042 heads: list of revs to clone (forces use of pull)
2045 2043 stream: use streaming clone if possible'''
2046 2044
2047 2045 # now, all clients that can request uncompressed clones can
2048 2046 # read repo formats supported by all servers that can serve
2049 2047 # them.
2050 2048
2051 2049 # if revlog format changes, client will have to check version
2052 2050 # and format flags on "stream" capability, and use
2053 2051 # uncompressed only if compatible.
2054 2052
2055 2053 if stream and not heads and remote.capable('stream'):
2056 2054 return self.stream_in(remote)
2057 2055 return self.pull(remote, heads)
2058 2056
2059 2057 def storefiles(self):
2060 2058 '''get all *.i and *.d files in the store
2061 2059
2062 2060 Returns (list of (filename, size), total_bytes)'''
2063 2061
2064 2062 lock = None
2065 2063 try:
2066 2064 self.ui.debug('scanning\n')
2067 2065 entries = []
2068 2066 total_bytes = 0
2069 2067 # get consistent snapshot of repo, lock during scan
2070 2068 lock = self.lock()
2071 2069 for name, size in self.store.walk():
2072 2070 entries.append((name, size))
2073 2071 total_bytes += size
2074 2072 return entries, total_bytes
2075 2073 finally:
2076 2074 del lock
2077 2075
2078 2076 # used to avoid circular references so destructors work
2079 2077 def aftertrans(files):
2080 2078 renamefiles = [tuple(t) for t in files]
2081 2079 def a():
2082 2080 for src, dest in renamefiles:
2083 2081 util.rename(src, dest)
2084 2082 return a
2085 2083
2086 2084 def instance(ui, path, create):
2087 2085 return localrepository(ui, util.drop_scheme('file', path), create)
2088 2086
2089 2087 def islocal(path):
2090 2088 return True
General Comments 0
You need to be logged in to leave comments. Login now