##// END OF EJS Templates
fix error spotted by pychecker
Benoit Boissinot -
r6411:34c51857 default
parent child Browse files
Show More
@@ -1,2138 +1,2138 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup
11 11 import changelog, dirstate, filelog, manifest, context, weakref
12 12 import lock, transaction, stat, errno, ui
13 13 import os, revlog, time, util, extensions, hook, inspect
14 14
15 15 class localrepository(repo.repository):
16 16 capabilities = util.set(('lookup', 'changegroupsubset'))
17 17 supported = ('revlogv1', 'store')
18 18
19 19 def __init__(self, parentui, path=None, create=0):
20 20 repo.repository.__init__(self)
21 21 self.root = os.path.realpath(path)
22 22 self.path = os.path.join(self.root, ".hg")
23 23 self.origroot = path
24 24 self.opener = util.opener(self.path)
25 25 self.wopener = util.opener(self.root)
26 26
27 27 if not os.path.isdir(self.path):
28 28 if create:
29 29 if not os.path.exists(path):
30 30 os.mkdir(path)
31 31 os.mkdir(self.path)
32 32 requirements = ["revlogv1"]
33 33 if parentui.configbool('format', 'usestore', True):
34 34 os.mkdir(os.path.join(self.path, "store"))
35 35 requirements.append("store")
36 36 # create an invalid changelog
37 37 self.opener("00changelog.i", "a").write(
38 38 '\0\0\0\2' # represents revlogv2
39 39 ' dummy changelog to prevent using the old repo layout'
40 40 )
41 41 reqfile = self.opener("requires", "w")
42 42 for r in requirements:
43 43 reqfile.write("%s\n" % r)
44 44 reqfile.close()
45 45 else:
46 46 raise repo.RepoError(_("repository %s not found") % path)
47 47 elif create:
48 48 raise repo.RepoError(_("repository %s already exists") % path)
49 49 else:
50 50 # find requirements
51 51 try:
52 52 requirements = self.opener("requires").read().splitlines()
53 53 except IOError, inst:
54 54 if inst.errno != errno.ENOENT:
55 55 raise
56 56 requirements = []
57 57 # check them
58 58 for r in requirements:
59 59 if r not in self.supported:
60 60 raise repo.RepoError(_("requirement '%s' not supported") % r)
61 61
62 62 # setup store
63 63 if "store" in requirements:
64 64 self.encodefn = util.encodefilename
65 65 self.decodefn = util.decodefilename
66 66 self.spath = os.path.join(self.path, "store")
67 67 else:
68 68 self.encodefn = lambda x: x
69 69 self.decodefn = lambda x: x
70 70 self.spath = self.path
71 71
72 72 try:
73 73 # files in .hg/ will be created using this mode
74 74 mode = os.stat(self.spath).st_mode
75 75 # avoid some useless chmods
76 76 if (0777 & ~util._umask) == (0777 & mode):
77 77 mode = None
78 78 except OSError:
79 79 mode = None
80 80
81 81 self._createmode = mode
82 82 self.opener.createmode = mode
83 83 sopener = util.opener(self.spath)
84 84 sopener.createmode = mode
85 85 self.sopener = util.encodedopener(sopener, self.encodefn)
86 86
87 87 self.ui = ui.ui(parentui=parentui)
88 88 try:
89 89 self.ui.readconfig(self.join("hgrc"), self.root)
90 90 extensions.loadall(self.ui)
91 91 except IOError:
92 92 pass
93 93
94 94 self.tagscache = None
95 95 self._tagstypecache = None
96 96 self.branchcache = None
97 97 self._ubranchcache = None # UTF-8 version of branchcache
98 98 self._branchcachetip = None
99 99 self.nodetagscache = None
100 100 self.filterpats = {}
101 101 self._datafilters = {}
102 102 self._transref = self._lockref = self._wlockref = None
103 103
104 104 def __getattr__(self, name):
105 105 if name == 'changelog':
106 106 self.changelog = changelog.changelog(self.sopener)
107 107 self.sopener.defversion = self.changelog.version
108 108 return self.changelog
109 109 if name == 'manifest':
110 110 self.changelog
111 111 self.manifest = manifest.manifest(self.sopener)
112 112 return self.manifest
113 113 if name == 'dirstate':
114 114 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
115 115 return self.dirstate
116 116 else:
117 117 raise AttributeError, name
118 118
119 119 def url(self):
120 120 return 'file:' + self.root
121 121
122 122 def hook(self, name, throw=False, **args):
123 123 return hook.hook(self.ui, self, name, throw, **args)
124 124
125 125 tag_disallowed = ':\r\n'
126 126
127 127 def _tag(self, names, node, message, local, user, date, parent=None,
128 128 extra={}):
129 129 use_dirstate = parent is None
130 130
131 131 if isinstance(names, str):
132 132 allchars = names
133 133 names = (names,)
134 134 else:
135 135 allchars = ''.join(names)
136 136 for c in self.tag_disallowed:
137 137 if c in allchars:
138 138 raise util.Abort(_('%r cannot be used in a tag name') % c)
139 139
140 140 for name in names:
141 141 self.hook('pretag', throw=True, node=hex(node), tag=name,
142 142 local=local)
143 143
144 144 def writetags(fp, names, munge, prevtags):
145 145 fp.seek(0, 2)
146 146 if prevtags and prevtags[-1] != '\n':
147 147 fp.write('\n')
148 148 for name in names:
149 149 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
150 150 fp.close()
151 151
152 152 prevtags = ''
153 153 if local:
154 154 try:
155 155 fp = self.opener('localtags', 'r+')
156 156 except IOError, err:
157 157 fp = self.opener('localtags', 'a')
158 158 else:
159 159 prevtags = fp.read()
160 160
161 161 # local tags are stored in the current charset
162 162 writetags(fp, names, None, prevtags)
163 163 for name in names:
164 164 self.hook('tag', node=hex(node), tag=name, local=local)
165 165 return
166 166
167 167 if use_dirstate:
168 168 try:
169 169 fp = self.wfile('.hgtags', 'rb+')
170 170 except IOError, err:
171 171 fp = self.wfile('.hgtags', 'ab')
172 172 else:
173 173 prevtags = fp.read()
174 174 else:
175 175 try:
176 176 prevtags = self.filectx('.hgtags', parent).data()
177 177 except revlog.LookupError:
178 178 pass
179 179 fp = self.wfile('.hgtags', 'wb')
180 180 if prevtags:
181 181 fp.write(prevtags)
182 182
183 183 # committed tags are stored in UTF-8
184 184 writetags(fp, names, util.fromlocal, prevtags)
185 185
186 186 if use_dirstate and '.hgtags' not in self.dirstate:
187 187 self.add(['.hgtags'])
188 188
189 189 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
190 190 extra=extra)
191 191
192 192 for name in names:
193 193 self.hook('tag', node=hex(node), tag=name, local=local)
194 194
195 195 return tagnode
196 196
197 197 def tag(self, names, node, message, local, user, date):
198 198 '''tag a revision with one or more symbolic names.
199 199
200 200 names is a list of strings or, when adding a single tag, names may be a
201 201 string.
202 202
203 203 if local is True, the tags are stored in a per-repository file.
204 204 otherwise, they are stored in the .hgtags file, and a new
205 205 changeset is committed with the change.
206 206
207 207 keyword arguments:
208 208
209 209 local: whether to store tags in non-version-controlled file
210 210 (default False)
211 211
212 212 message: commit message to use if committing
213 213
214 214 user: name of user to use if committing
215 215
216 216 date: date tuple to use if committing'''
217 217
218 218 for x in self.status()[:5]:
219 219 if '.hgtags' in x:
220 220 raise util.Abort(_('working copy of .hgtags is changed '
221 221 '(please commit .hgtags manually)'))
222 222
223 223 self._tag(names, node, message, local, user, date)
224 224
225 225 def tags(self):
226 226 '''return a mapping of tag to node'''
227 227 if self.tagscache:
228 228 return self.tagscache
229 229
230 230 globaltags = {}
231 231 tagtypes = {}
232 232
233 233 def readtags(lines, fn, tagtype):
234 234 filetags = {}
235 235 count = 0
236 236
237 237 def warn(msg):
238 238 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
239 239
240 240 for l in lines:
241 241 count += 1
242 242 if not l:
243 243 continue
244 244 s = l.split(" ", 1)
245 245 if len(s) != 2:
246 246 warn(_("cannot parse entry"))
247 247 continue
248 248 node, key = s
249 249 key = util.tolocal(key.strip()) # stored in UTF-8
250 250 try:
251 251 bin_n = bin(node)
252 252 except TypeError:
253 253 warn(_("node '%s' is not well formed") % node)
254 254 continue
255 255 if bin_n not in self.changelog.nodemap:
256 256 warn(_("tag '%s' refers to unknown node") % key)
257 257 continue
258 258
259 259 h = []
260 260 if key in filetags:
261 261 n, h = filetags[key]
262 262 h.append(n)
263 263 filetags[key] = (bin_n, h)
264 264
265 265 for k, nh in filetags.items():
266 266 if k not in globaltags:
267 267 globaltags[k] = nh
268 268 tagtypes[k] = tagtype
269 269 continue
270 270
271 271 # we prefer the global tag if:
272 272 # it supercedes us OR
273 273 # mutual supercedes and it has a higher rank
274 274 # otherwise we win because we're tip-most
275 275 an, ah = nh
276 276 bn, bh = globaltags[k]
277 277 if (bn != an and an in bh and
278 278 (bn not in ah or len(bh) > len(ah))):
279 279 an = bn
280 280 ah.extend([n for n in bh if n not in ah])
281 281 globaltags[k] = an, ah
282 282 tagtypes[k] = tagtype
283 283
284 284 # read the tags file from each head, ending with the tip
285 285 f = None
286 286 for rev, node, fnode in self._hgtagsnodes():
287 287 f = (f and f.filectx(fnode) or
288 288 self.filectx('.hgtags', fileid=fnode))
289 289 readtags(f.data().splitlines(), f, "global")
290 290
291 291 try:
292 292 data = util.fromlocal(self.opener("localtags").read())
293 293 # localtags are stored in the local character set
294 294 # while the internal tag table is stored in UTF-8
295 295 readtags(data.splitlines(), "localtags", "local")
296 296 except IOError:
297 297 pass
298 298
299 299 self.tagscache = {}
300 300 self._tagstypecache = {}
301 301 for k,nh in globaltags.items():
302 302 n = nh[0]
303 303 if n != nullid:
304 304 self.tagscache[k] = n
305 305 self._tagstypecache[k] = tagtypes[k]
306 306 self.tagscache['tip'] = self.changelog.tip()
307 307
308 308 return self.tagscache
309 309
310 310 def tagtype(self, tagname):
311 311 '''
312 312 return the type of the given tag. result can be:
313 313
314 314 'local' : a local tag
315 315 'global' : a global tag
316 316 None : tag does not exist
317 317 '''
318 318
319 319 self.tags()
320 320
321 321 return self._tagstypecache.get(tagname)
322 322
323 323 def _hgtagsnodes(self):
324 324 heads = self.heads()
325 325 heads.reverse()
326 326 last = {}
327 327 ret = []
328 328 for node in heads:
329 329 c = self.changectx(node)
330 330 rev = c.rev()
331 331 try:
332 332 fnode = c.filenode('.hgtags')
333 333 except revlog.LookupError:
334 334 continue
335 335 ret.append((rev, node, fnode))
336 336 if fnode in last:
337 337 ret[last[fnode]] = None
338 338 last[fnode] = len(ret) - 1
339 339 return [item for item in ret if item]
340 340
341 341 def tagslist(self):
342 342 '''return a list of tags ordered by revision'''
343 343 l = []
344 344 for t, n in self.tags().items():
345 345 try:
346 346 r = self.changelog.rev(n)
347 347 except:
348 348 r = -2 # sort to the beginning of the list if unknown
349 349 l.append((r, t, n))
350 350 l.sort()
351 351 return [(t, n) for r, t, n in l]
352 352
353 353 def nodetags(self, node):
354 354 '''return the tags associated with a node'''
355 355 if not self.nodetagscache:
356 356 self.nodetagscache = {}
357 357 for t, n in self.tags().items():
358 358 self.nodetagscache.setdefault(n, []).append(t)
359 359 return self.nodetagscache.get(node, [])
360 360
361 361 def _branchtags(self, partial, lrev):
362 362 tiprev = self.changelog.count() - 1
363 363 if lrev != tiprev:
364 364 self._updatebranchcache(partial, lrev+1, tiprev+1)
365 365 self._writebranchcache(partial, self.changelog.tip(), tiprev)
366 366
367 367 return partial
368 368
369 369 def branchtags(self):
370 370 tip = self.changelog.tip()
371 371 if self.branchcache is not None and self._branchcachetip == tip:
372 372 return self.branchcache
373 373
374 374 oldtip = self._branchcachetip
375 375 self._branchcachetip = tip
376 376 if self.branchcache is None:
377 377 self.branchcache = {} # avoid recursion in changectx
378 378 else:
379 379 self.branchcache.clear() # keep using the same dict
380 380 if oldtip is None or oldtip not in self.changelog.nodemap:
381 381 partial, last, lrev = self._readbranchcache()
382 382 else:
383 383 lrev = self.changelog.rev(oldtip)
384 384 partial = self._ubranchcache
385 385
386 386 self._branchtags(partial, lrev)
387 387
388 388 # the branch cache is stored on disk as UTF-8, but in the local
389 389 # charset internally
390 390 for k, v in partial.items():
391 391 self.branchcache[util.tolocal(k)] = v
392 392 self._ubranchcache = partial
393 393 return self.branchcache
394 394
395 395 def _readbranchcache(self):
396 396 partial = {}
397 397 try:
398 398 f = self.opener("branch.cache")
399 399 lines = f.read().split('\n')
400 400 f.close()
401 401 except (IOError, OSError):
402 402 return {}, nullid, nullrev
403 403
404 404 try:
405 405 last, lrev = lines.pop(0).split(" ", 1)
406 406 last, lrev = bin(last), int(lrev)
407 407 if not (lrev < self.changelog.count() and
408 408 self.changelog.node(lrev) == last): # sanity check
409 409 # invalidate the cache
410 410 raise ValueError('invalidating branch cache (tip differs)')
411 411 for l in lines:
412 412 if not l: continue
413 413 node, label = l.split(" ", 1)
414 414 partial[label.strip()] = bin(node)
415 415 except (KeyboardInterrupt, util.SignalInterrupt):
416 416 raise
417 417 except Exception, inst:
418 418 if self.ui.debugflag:
419 419 self.ui.warn(str(inst), '\n')
420 420 partial, last, lrev = {}, nullid, nullrev
421 421 return partial, last, lrev
422 422
423 423 def _writebranchcache(self, branches, tip, tiprev):
424 424 try:
425 425 f = self.opener("branch.cache", "w", atomictemp=True)
426 426 f.write("%s %s\n" % (hex(tip), tiprev))
427 427 for label, node in branches.iteritems():
428 428 f.write("%s %s\n" % (hex(node), label))
429 429 f.rename()
430 430 except (IOError, OSError):
431 431 pass
432 432
433 433 def _updatebranchcache(self, partial, start, end):
434 434 for r in xrange(start, end):
435 435 c = self.changectx(r)
436 436 b = c.branch()
437 437 partial[b] = c.node()
438 438
439 439 def lookup(self, key):
440 440 if key == '.':
441 441 key, second = self.dirstate.parents()
442 442 if key == nullid:
443 443 raise repo.RepoError(_("no revision checked out"))
444 444 if second != nullid:
445 445 self.ui.warn(_("warning: working directory has two parents, "
446 446 "tag '.' uses the first\n"))
447 447 elif key == 'null':
448 448 return nullid
449 449 n = self.changelog._match(key)
450 450 if n:
451 451 return n
452 452 if key in self.tags():
453 453 return self.tags()[key]
454 454 if key in self.branchtags():
455 455 return self.branchtags()[key]
456 456 n = self.changelog._partialmatch(key)
457 457 if n:
458 458 return n
459 459 try:
460 460 if len(key) == 20:
461 461 key = hex(key)
462 462 except:
463 463 pass
464 464 raise repo.RepoError(_("unknown revision '%s'") % key)
465 465
466 466 def local(self):
467 467 return True
468 468
469 469 def join(self, f):
470 470 return os.path.join(self.path, f)
471 471
472 472 def sjoin(self, f):
473 473 f = self.encodefn(f)
474 474 return os.path.join(self.spath, f)
475 475
476 476 def wjoin(self, f):
477 477 return os.path.join(self.root, f)
478 478
479 479 def file(self, f):
480 480 if f[0] == '/':
481 481 f = f[1:]
482 482 return filelog.filelog(self.sopener, f)
483 483
484 484 def changectx(self, changeid=None):
485 485 return context.changectx(self, changeid)
486 486
487 487 def workingctx(self):
488 488 return context.workingctx(self)
489 489
490 490 def parents(self, changeid=None):
491 491 '''
492 492 get list of changectxs for parents of changeid or working directory
493 493 '''
494 494 if changeid is None:
495 495 pl = self.dirstate.parents()
496 496 else:
497 497 n = self.changelog.lookup(changeid)
498 498 pl = self.changelog.parents(n)
499 499 if pl[1] == nullid:
500 500 return [self.changectx(pl[0])]
501 501 return [self.changectx(pl[0]), self.changectx(pl[1])]
502 502
503 503 def filectx(self, path, changeid=None, fileid=None):
504 504 """changeid can be a changeset revision, node, or tag.
505 505 fileid can be a file revision or node."""
506 506 return context.filectx(self, path, changeid, fileid)
507 507
508 508 def getcwd(self):
509 509 return self.dirstate.getcwd()
510 510
511 511 def pathto(self, f, cwd=None):
512 512 return self.dirstate.pathto(f, cwd)
513 513
514 514 def wfile(self, f, mode='r'):
515 515 return self.wopener(f, mode)
516 516
517 517 def _link(self, f):
518 518 return os.path.islink(self.wjoin(f))
519 519
520 520 def _filter(self, filter, filename, data):
521 521 if filter not in self.filterpats:
522 522 l = []
523 523 for pat, cmd in self.ui.configitems(filter):
524 524 mf = util.matcher(self.root, "", [pat], [], [])[1]
525 525 fn = None
526 526 params = cmd
527 527 for name, filterfn in self._datafilters.iteritems():
528 528 if cmd.startswith(name):
529 529 fn = filterfn
530 530 params = cmd[len(name):].lstrip()
531 531 break
532 532 if not fn:
533 533 fn = lambda s, c, **kwargs: util.filter(s, c)
534 534 # Wrap old filters not supporting keyword arguments
535 535 if not inspect.getargspec(fn)[2]:
536 536 oldfn = fn
537 537 fn = lambda s, c, **kwargs: oldfn(s, c)
538 538 l.append((mf, fn, params))
539 539 self.filterpats[filter] = l
540 540
541 541 for mf, fn, cmd in self.filterpats[filter]:
542 542 if mf(filename):
543 543 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
544 544 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
545 545 break
546 546
547 547 return data
548 548
549 549 def adddatafilter(self, name, filter):
550 550 self._datafilters[name] = filter
551 551
552 552 def wread(self, filename):
553 553 if self._link(filename):
554 554 data = os.readlink(self.wjoin(filename))
555 555 else:
556 556 data = self.wopener(filename, 'r').read()
557 557 return self._filter("encode", filename, data)
558 558
559 559 def wwrite(self, filename, data, flags):
560 560 data = self._filter("decode", filename, data)
561 561 try:
562 562 os.unlink(self.wjoin(filename))
563 563 except OSError:
564 564 pass
565 565 self.wopener(filename, 'w').write(data)
566 566 util.set_flags(self.wjoin(filename), flags)
567 567
568 568 def wwritedata(self, filename, data):
569 569 return self._filter("decode", filename, data)
570 570
571 571 def transaction(self):
572 572 if self._transref and self._transref():
573 573 return self._transref().nest()
574 574
575 575 # abort here if the journal already exists
576 576 if os.path.exists(self.sjoin("journal")):
577 577 raise repo.RepoError(_("journal already exists - run hg recover"))
578 578
579 579 # save dirstate for rollback
580 580 try:
581 581 ds = self.opener("dirstate").read()
582 582 except IOError:
583 583 ds = ""
584 584 self.opener("journal.dirstate", "w").write(ds)
585 585 self.opener("journal.branch", "w").write(self.dirstate.branch())
586 586
587 587 renames = [(self.sjoin("journal"), self.sjoin("undo")),
588 588 (self.join("journal.dirstate"), self.join("undo.dirstate")),
589 589 (self.join("journal.branch"), self.join("undo.branch"))]
590 590 tr = transaction.transaction(self.ui.warn, self.sopener,
591 591 self.sjoin("journal"),
592 592 aftertrans(renames),
593 593 self._createmode)
594 594 self._transref = weakref.ref(tr)
595 595 return tr
596 596
597 597 def recover(self):
598 598 l = self.lock()
599 599 try:
600 600 if os.path.exists(self.sjoin("journal")):
601 601 self.ui.status(_("rolling back interrupted transaction\n"))
602 602 transaction.rollback(self.sopener, self.sjoin("journal"))
603 603 self.invalidate()
604 604 return True
605 605 else:
606 606 self.ui.warn(_("no interrupted transaction available\n"))
607 607 return False
608 608 finally:
609 609 del l
610 610
611 611 def rollback(self):
612 612 wlock = lock = None
613 613 try:
614 614 wlock = self.wlock()
615 615 lock = self.lock()
616 616 if os.path.exists(self.sjoin("undo")):
617 617 self.ui.status(_("rolling back last transaction\n"))
618 618 transaction.rollback(self.sopener, self.sjoin("undo"))
619 619 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
620 620 try:
621 621 branch = self.opener("undo.branch").read()
622 622 self.dirstate.setbranch(branch)
623 623 except IOError:
624 624 self.ui.warn(_("Named branch could not be reset, "
625 625 "current branch still is: %s\n")
626 626 % util.tolocal(self.dirstate.branch()))
627 627 self.invalidate()
628 628 self.dirstate.invalidate()
629 629 else:
630 630 self.ui.warn(_("no rollback information available\n"))
631 631 finally:
632 632 del lock, wlock
633 633
634 634 def invalidate(self):
635 635 for a in "changelog manifest".split():
636 636 if a in self.__dict__:
637 637 delattr(self, a)
638 638 self.tagscache = None
639 639 self._tagstypecache = None
640 640 self.nodetagscache = None
641 641 self.branchcache = None
642 642 self._ubranchcache = None
643 643 self._branchcachetip = None
644 644
645 645 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
646 646 try:
647 647 l = lock.lock(lockname, 0, releasefn, desc=desc)
648 648 except lock.LockHeld, inst:
649 649 if not wait:
650 650 raise
651 651 self.ui.warn(_("waiting for lock on %s held by %r\n") %
652 652 (desc, inst.locker))
653 653 # default to 600 seconds timeout
654 654 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
655 655 releasefn, desc=desc)
656 656 if acquirefn:
657 657 acquirefn()
658 658 return l
659 659
660 660 def lock(self, wait=True):
661 661 if self._lockref and self._lockref():
662 662 return self._lockref()
663 663
664 664 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
665 665 _('repository %s') % self.origroot)
666 666 self._lockref = weakref.ref(l)
667 667 return l
668 668
669 669 def wlock(self, wait=True):
670 670 if self._wlockref and self._wlockref():
671 671 return self._wlockref()
672 672
673 673 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
674 674 self.dirstate.invalidate, _('working directory of %s') %
675 675 self.origroot)
676 676 self._wlockref = weakref.ref(l)
677 677 return l
678 678
679 679 def filecommit(self, fn, manifest1, manifest2, linkrev, tr, changelist):
680 680 """
681 681 commit an individual file as part of a larger transaction
682 682 """
683 683
684 684 t = self.wread(fn)
685 685 fl = self.file(fn)
686 686 fp1 = manifest1.get(fn, nullid)
687 687 fp2 = manifest2.get(fn, nullid)
688 688
689 689 meta = {}
690 690 cp = self.dirstate.copied(fn)
691 691 if cp:
692 692 # Mark the new revision of this file as a copy of another
693 693 # file. This copy data will effectively act as a parent
694 694 # of this new revision. If this is a merge, the first
695 695 # parent will be the nullid (meaning "look up the copy data")
696 696 # and the second one will be the other parent. For example:
697 697 #
698 698 # 0 --- 1 --- 3 rev1 changes file foo
699 699 # \ / rev2 renames foo to bar and changes it
700 700 # \- 2 -/ rev3 should have bar with all changes and
701 701 # should record that bar descends from
702 702 # bar in rev2 and foo in rev1
703 703 #
704 704 # this allows this merge to succeed:
705 705 #
706 706 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
707 707 # \ / merging rev3 and rev4 should use bar@rev2
708 708 # \- 2 --- 4 as the merge base
709 709 #
710 710 meta["copy"] = cp
711 711 if not manifest2: # not a branch merge
712 712 meta["copyrev"] = hex(manifest1.get(cp, nullid))
713 713 fp2 = nullid
714 714 elif fp2 != nullid: # copied on remote side
715 715 meta["copyrev"] = hex(manifest1.get(cp, nullid))
716 716 elif fp1 != nullid: # copied on local side, reversed
717 717 meta["copyrev"] = hex(manifest2.get(cp))
718 718 fp2 = fp1
719 719 elif cp in manifest2: # directory rename on local side
720 720 meta["copyrev"] = hex(manifest2[cp])
721 721 else: # directory rename on remote side
722 722 meta["copyrev"] = hex(manifest1.get(cp, nullid))
723 723 self.ui.debug(_(" %s: copy %s:%s\n") %
724 724 (fn, cp, meta["copyrev"]))
725 725 fp1 = nullid
726 726 elif fp2 != nullid:
727 727 # is one parent an ancestor of the other?
728 728 fpa = fl.ancestor(fp1, fp2)
729 729 if fpa == fp1:
730 730 fp1, fp2 = fp2, nullid
731 731 elif fpa == fp2:
732 732 fp2 = nullid
733 733
734 734 # is the file unmodified from the parent? report existing entry
735 735 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
736 736 return fp1
737 737
738 738 changelist.append(fn)
739 739 return fl.add(t, meta, tr, linkrev, fp1, fp2)
740 740
741 741 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
742 742 if p1 is None:
743 743 p1, p2 = self.dirstate.parents()
744 744 return self.commit(files=files, text=text, user=user, date=date,
745 745 p1=p1, p2=p2, extra=extra, empty_ok=True)
746 746
747 747 def commit(self, files=None, text="", user=None, date=None,
748 748 match=util.always, force=False, force_editor=False,
749 749 p1=None, p2=None, extra={}, empty_ok=False):
750 750 wlock = lock = tr = None
751 751 valid = 0 # don't save the dirstate if this isn't set
752 752 if files:
753 753 files = util.unique(files)
754 754 try:
755 755 wlock = self.wlock()
756 756 lock = self.lock()
757 757 commit = []
758 758 remove = []
759 759 changed = []
760 760 use_dirstate = (p1 is None) # not rawcommit
761 761 extra = extra.copy()
762 762
763 763 if use_dirstate:
764 764 if files:
765 765 for f in files:
766 766 s = self.dirstate[f]
767 767 if s in 'nma':
768 768 commit.append(f)
769 769 elif s == 'r':
770 770 remove.append(f)
771 771 else:
772 772 self.ui.warn(_("%s not tracked!\n") % f)
773 773 else:
774 774 changes = self.status(match=match)[:5]
775 775 modified, added, removed, deleted, unknown = changes
776 776 commit = modified + added
777 777 remove = removed
778 778 else:
779 779 commit = files
780 780
781 781 if use_dirstate:
782 782 p1, p2 = self.dirstate.parents()
783 783 update_dirstate = True
784 784
785 785 if (not force and p2 != nullid and
786 786 (files or match != util.always)):
787 787 raise util.Abort(_('cannot partially commit a merge '
788 788 '(do not specify files or patterns)'))
789 789 else:
790 790 p1, p2 = p1, p2 or nullid
791 791 update_dirstate = (self.dirstate.parents()[0] == p1)
792 792
793 793 c1 = self.changelog.read(p1)
794 794 c2 = self.changelog.read(p2)
795 795 m1 = self.manifest.read(c1[0]).copy()
796 796 m2 = self.manifest.read(c2[0])
797 797
798 798 if use_dirstate:
799 799 branchname = self.workingctx().branch()
800 800 try:
801 801 branchname = branchname.decode('UTF-8').encode('UTF-8')
802 802 except UnicodeDecodeError:
803 803 raise util.Abort(_('branch name not in UTF-8!'))
804 804 else:
805 805 branchname = ""
806 806
807 807 if use_dirstate:
808 808 oldname = c1[5].get("branch") # stored in UTF-8
809 809 if (not commit and not remove and not force and p2 == nullid
810 810 and branchname == oldname):
811 811 self.ui.status(_("nothing changed\n"))
812 812 return None
813 813
814 814 xp1 = hex(p1)
815 815 if p2 == nullid: xp2 = ''
816 816 else: xp2 = hex(p2)
817 817
818 818 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
819 819
820 820 tr = self.transaction()
821 821 trp = weakref.proxy(tr)
822 822
823 823 # check in files
824 824 new = {}
825 825 linkrev = self.changelog.count()
826 826 commit.sort()
827 827 is_exec = util.execfunc(self.root, m1.execf)
828 828 is_link = util.linkfunc(self.root, m1.linkf)
829 829 for f in commit:
830 830 self.ui.note(f + "\n")
831 831 try:
832 832 new[f] = self.filecommit(f, m1, m2, linkrev, trp, changed)
833 833 new_exec = is_exec(f)
834 834 new_link = is_link(f)
835 835 if ((not changed or changed[-1] != f) and
836 836 m2.get(f) != new[f]):
837 837 # mention the file in the changelog if some
838 838 # flag changed, even if there was no content
839 839 # change.
840 840 old_exec = m1.execf(f)
841 841 old_link = m1.linkf(f)
842 842 if old_exec != new_exec or old_link != new_link:
843 843 changed.append(f)
844 844 m1.set(f, new_exec, new_link)
845 845 if use_dirstate:
846 846 self.dirstate.normal(f)
847 847
848 848 except (OSError, IOError):
849 849 if use_dirstate:
850 850 self.ui.warn(_("trouble committing %s!\n") % f)
851 851 raise
852 852 else:
853 853 remove.append(f)
854 854
855 855 # update manifest
856 856 m1.update(new)
857 857 remove.sort()
858 858 removed = []
859 859
860 860 for f in remove:
861 861 if f in m1:
862 862 del m1[f]
863 863 removed.append(f)
864 864 elif f in m2:
865 865 removed.append(f)
866 866 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
867 867 (new, removed))
868 868
869 869 # add changeset
870 870 new = new.keys()
871 871 new.sort()
872 872
873 873 user = user or self.ui.username()
874 874 if (not empty_ok and not text) or force_editor:
875 875 edittext = []
876 876 if text:
877 877 edittext.append(text)
878 878 edittext.append("")
879 879 edittext.append(_("HG: Enter commit message."
880 880 " Lines beginning with 'HG:' are removed."))
881 881 edittext.append("HG: --")
882 882 edittext.append("HG: user: %s" % user)
883 883 if p2 != nullid:
884 884 edittext.append("HG: branch merge")
885 885 if branchname:
886 886 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
887 887 edittext.extend(["HG: changed %s" % f for f in changed])
888 888 edittext.extend(["HG: removed %s" % f for f in removed])
889 889 if not changed and not remove:
890 890 edittext.append("HG: no files changed")
891 891 edittext.append("")
892 892 # run editor in the repository root
893 893 olddir = os.getcwd()
894 894 os.chdir(self.root)
895 895 text = self.ui.edit("\n".join(edittext), user)
896 896 os.chdir(olddir)
897 897
898 898 if branchname:
899 899 extra["branch"] = branchname
900 900
901 901 lines = [line.rstrip() for line in text.rstrip().splitlines()]
902 902 while lines and not lines[0]:
903 903 del lines[0]
904 904 if not lines and use_dirstate:
905 905 raise util.Abort(_("empty commit message"))
906 906 text = '\n'.join(lines)
907 907
908 908 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
909 909 user, date, extra)
910 910 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
911 911 parent2=xp2)
912 912 tr.close()
913 913
914 914 if self.branchcache:
915 915 self.branchtags()
916 916
917 917 if use_dirstate or update_dirstate:
918 918 self.dirstate.setparents(n)
919 919 if use_dirstate:
920 920 for f in removed:
921 921 self.dirstate.forget(f)
922 922 valid = 1 # our dirstate updates are complete
923 923
924 924 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
925 925 return n
926 926 finally:
927 927 if not valid: # don't save our updated dirstate
928 928 self.dirstate.invalidate()
929 929 del tr, lock, wlock
930 930
931 931 def walk(self, node=None, files=[], match=util.always, badmatch=None):
932 932 '''
933 933 walk recursively through the directory tree or a given
934 934 changeset, finding all files matched by the match
935 935 function
936 936
937 937 results are yielded in a tuple (src, filename), where src
938 938 is one of:
939 939 'f' the file was found in the directory tree
940 940 'm' the file was only in the dirstate and not in the tree
941 941 'b' file was not found and matched badmatch
942 942 '''
943 943
944 944 if node:
945 945 fdict = dict.fromkeys(files)
946 946 # for dirstate.walk, files=['.'] means "walk the whole tree".
947 947 # follow that here, too
948 948 fdict.pop('.', None)
949 949 mdict = self.manifest.read(self.changelog.read(node)[0])
950 950 mfiles = mdict.keys()
951 951 mfiles.sort()
952 952 for fn in mfiles:
953 953 for ffn in fdict:
954 954 # match if the file is the exact name or a directory
955 955 if ffn == fn or fn.startswith("%s/" % ffn):
956 956 del fdict[ffn]
957 957 break
958 958 if match(fn):
959 959 yield 'm', fn
960 960 ffiles = fdict.keys()
961 961 ffiles.sort()
962 962 for fn in ffiles:
963 963 if badmatch and badmatch(fn):
964 964 if match(fn):
965 965 yield 'b', fn
966 966 else:
967 967 self.ui.warn(_('%s: No such file in rev %s\n')
968 968 % (self.pathto(fn), short(node)))
969 969 else:
970 970 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
971 971 yield src, fn
972 972
973 973 def status(self, node1=None, node2=None, files=[], match=util.always,
974 974 list_ignored=False, list_clean=False, list_unknown=True):
975 975 """return status of files between two nodes or node and working directory
976 976
977 977 If node1 is None, use the first dirstate parent instead.
978 978 If node2 is None, compare node1 with working directory.
979 979 """
980 980
981 981 def fcmp(fn, getnode):
982 982 t1 = self.wread(fn)
983 983 return self.file(fn).cmp(getnode(fn), t1)
984 984
985 985 def mfmatches(node):
986 986 change = self.changelog.read(node)
987 987 mf = self.manifest.read(change[0]).copy()
988 988 for fn in mf.keys():
989 989 if not match(fn):
990 990 del mf[fn]
991 991 return mf
992 992
993 993 modified, added, removed, deleted, unknown = [], [], [], [], []
994 994 ignored, clean = [], []
995 995
996 996 compareworking = False
997 997 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
998 998 compareworking = True
999 999
1000 1000 if not compareworking:
1001 1001 # read the manifest from node1 before the manifest from node2,
1002 1002 # so that we'll hit the manifest cache if we're going through
1003 1003 # all the revisions in parent->child order.
1004 1004 mf1 = mfmatches(node1)
1005 1005
1006 1006 # are we comparing the working directory?
1007 1007 if not node2:
1008 1008 (lookup, modified, added, removed, deleted, unknown,
1009 1009 ignored, clean) = self.dirstate.status(files, match,
1010 1010 list_ignored, list_clean,
1011 1011 list_unknown)
1012 1012
1013 1013 # are we comparing working dir against its parent?
1014 1014 if compareworking:
1015 1015 if lookup:
1016 1016 fixup = []
1017 1017 # do a full compare of any files that might have changed
1018 1018 ctx = self.changectx()
1019 1019 mexec = lambda f: 'x' in ctx.fileflags(f)
1020 1020 mlink = lambda f: 'l' in ctx.fileflags(f)
1021 1021 is_exec = util.execfunc(self.root, mexec)
1022 1022 is_link = util.linkfunc(self.root, mlink)
1023 1023 def flags(f):
1024 1024 return is_link(f) and 'l' or is_exec(f) and 'x' or ''
1025 1025 for f in lookup:
1026 1026 if (f not in ctx or flags(f) != ctx.fileflags(f)
1027 1027 or ctx[f].cmp(self.wread(f))):
1028 1028 modified.append(f)
1029 1029 else:
1030 1030 fixup.append(f)
1031 1031 if list_clean:
1032 1032 clean.append(f)
1033 1033
1034 1034 # update dirstate for files that are actually clean
1035 1035 if fixup:
1036 1036 wlock = None
1037 1037 try:
1038 1038 try:
1039 1039 wlock = self.wlock(False)
1040 1040 except lock.LockException:
1041 1041 pass
1042 1042 if wlock:
1043 1043 for f in fixup:
1044 1044 self.dirstate.normal(f)
1045 1045 finally:
1046 1046 del wlock
1047 1047 else:
1048 1048 # we are comparing working dir against non-parent
1049 1049 # generate a pseudo-manifest for the working dir
1050 1050 # XXX: create it in dirstate.py ?
1051 1051 mf2 = mfmatches(self.dirstate.parents()[0])
1052 1052 is_exec = util.execfunc(self.root, mf2.execf)
1053 1053 is_link = util.linkfunc(self.root, mf2.linkf)
1054 1054 for f in lookup + modified + added:
1055 1055 mf2[f] = ""
1056 1056 mf2.set(f, is_exec(f), is_link(f))
1057 1057 for f in removed:
1058 1058 if f in mf2:
1059 1059 del mf2[f]
1060 1060
1061 1061 else:
1062 1062 # we are comparing two revisions
1063 1063 mf2 = mfmatches(node2)
1064 1064
1065 1065 if not compareworking:
1066 1066 # flush lists from dirstate before comparing manifests
1067 1067 modified, added, clean = [], [], []
1068 1068
1069 1069 # make sure to sort the files so we talk to the disk in a
1070 1070 # reasonable order
1071 1071 mf2keys = mf2.keys()
1072 1072 mf2keys.sort()
1073 1073 getnode = lambda fn: mf1.get(fn, nullid)
1074 1074 for fn in mf2keys:
1075 1075 if fn in mf1:
1076 1076 if (mf1.flags(fn) != mf2.flags(fn) or
1077 1077 (mf1[fn] != mf2[fn] and
1078 1078 (mf2[fn] != "" or fcmp(fn, getnode)))):
1079 1079 modified.append(fn)
1080 1080 elif list_clean:
1081 1081 clean.append(fn)
1082 1082 del mf1[fn]
1083 1083 else:
1084 1084 added.append(fn)
1085 1085
1086 1086 removed = mf1.keys()
1087 1087
1088 1088 # sort and return results:
1089 1089 for l in modified, added, removed, deleted, unknown, ignored, clean:
1090 1090 l.sort()
1091 1091 return (modified, added, removed, deleted, unknown, ignored, clean)
1092 1092
1093 1093 def add(self, list):
1094 1094 wlock = self.wlock()
1095 1095 try:
1096 1096 rejected = []
1097 1097 for f in list:
1098 1098 p = self.wjoin(f)
1099 1099 try:
1100 1100 st = os.lstat(p)
1101 1101 except:
1102 1102 self.ui.warn(_("%s does not exist!\n") % f)
1103 1103 rejected.append(f)
1104 1104 continue
1105 1105 if st.st_size > 10000000:
1106 1106 self.ui.warn(_("%s: files over 10MB may cause memory and"
1107 1107 " performance problems\n"
1108 1108 "(use 'hg revert %s' to unadd the file)\n")
1109 1109 % (f, f))
1110 1110 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1111 1111 self.ui.warn(_("%s not added: only files and symlinks "
1112 1112 "supported currently\n") % f)
1113 1113 rejected.append(p)
1114 1114 elif self.dirstate[f] in 'amn':
1115 1115 self.ui.warn(_("%s already tracked!\n") % f)
1116 1116 elif self.dirstate[f] == 'r':
1117 1117 self.dirstate.normallookup(f)
1118 1118 else:
1119 1119 self.dirstate.add(f)
1120 1120 return rejected
1121 1121 finally:
1122 1122 del wlock
1123 1123
1124 1124 def forget(self, list):
1125 1125 wlock = self.wlock()
1126 1126 try:
1127 1127 for f in list:
1128 1128 if self.dirstate[f] != 'a':
1129 1129 self.ui.warn(_("%s not added!\n") % f)
1130 1130 else:
1131 1131 self.dirstate.forget(f)
1132 1132 finally:
1133 1133 del wlock
1134 1134
1135 1135 def remove(self, list, unlink=False):
1136 1136 wlock = None
1137 1137 try:
1138 1138 if unlink:
1139 1139 for f in list:
1140 1140 try:
1141 1141 util.unlink(self.wjoin(f))
1142 1142 except OSError, inst:
1143 1143 if inst.errno != errno.ENOENT:
1144 1144 raise
1145 1145 wlock = self.wlock()
1146 1146 for f in list:
1147 1147 if unlink and os.path.exists(self.wjoin(f)):
1148 1148 self.ui.warn(_("%s still exists!\n") % f)
1149 1149 elif self.dirstate[f] == 'a':
1150 1150 self.dirstate.forget(f)
1151 1151 elif f not in self.dirstate:
1152 1152 self.ui.warn(_("%s not tracked!\n") % f)
1153 1153 else:
1154 1154 self.dirstate.remove(f)
1155 1155 finally:
1156 1156 del wlock
1157 1157
1158 1158 def undelete(self, list):
1159 1159 wlock = None
1160 1160 try:
1161 1161 manifests = [self.manifest.read(self.changelog.read(p)[0])
1162 1162 for p in self.dirstate.parents() if p != nullid]
1163 1163 wlock = self.wlock()
1164 1164 for f in list:
1165 1165 if self.dirstate[f] != 'r':
1166 1166 self.ui.warn("%s not removed!\n" % f)
1167 1167 else:
1168 1168 m = f in manifests[0] and manifests[0] or manifests[1]
1169 1169 t = self.file(f).read(m[f])
1170 1170 self.wwrite(f, t, m.flags(f))
1171 1171 self.dirstate.normal(f)
1172 1172 finally:
1173 1173 del wlock
1174 1174
1175 1175 def copy(self, source, dest):
1176 1176 wlock = None
1177 1177 try:
1178 1178 p = self.wjoin(dest)
1179 1179 if not (os.path.exists(p) or os.path.islink(p)):
1180 1180 self.ui.warn(_("%s does not exist!\n") % dest)
1181 1181 elif not (os.path.isfile(p) or os.path.islink(p)):
1182 1182 self.ui.warn(_("copy failed: %s is not a file or a "
1183 1183 "symbolic link\n") % dest)
1184 1184 else:
1185 1185 wlock = self.wlock()
1186 1186 if dest not in self.dirstate:
1187 1187 self.dirstate.add(dest)
1188 1188 self.dirstate.copy(source, dest)
1189 1189 finally:
1190 1190 del wlock
1191 1191
1192 1192 def heads(self, start=None):
1193 1193 heads = self.changelog.heads(start)
1194 1194 # sort the output in rev descending order
1195 1195 heads = [(-self.changelog.rev(h), h) for h in heads]
1196 1196 heads.sort()
1197 1197 return [n for (r, n) in heads]
1198 1198
1199 1199 def branchheads(self, branch, start=None):
1200 1200 branches = self.branchtags()
1201 1201 if branch not in branches:
1202 1202 return []
1203 1203 # The basic algorithm is this:
1204 1204 #
1205 1205 # Start from the branch tip since there are no later revisions that can
1206 1206 # possibly be in this branch, and the tip is a guaranteed head.
1207 1207 #
1208 1208 # Remember the tip's parents as the first ancestors, since these by
1209 1209 # definition are not heads.
1210 1210 #
1211 1211 # Step backwards from the brach tip through all the revisions. We are
1212 1212 # guaranteed by the rules of Mercurial that we will now be visiting the
1213 1213 # nodes in reverse topological order (children before parents).
1214 1214 #
1215 1215 # If a revision is one of the ancestors of a head then we can toss it
1216 1216 # out of the ancestors set (we've already found it and won't be
1217 1217 # visiting it again) and put its parents in the ancestors set.
1218 1218 #
1219 1219 # Otherwise, if a revision is in the branch it's another head, since it
1220 1220 # wasn't in the ancestor list of an existing head. So add it to the
1221 1221 # head list, and add its parents to the ancestor list.
1222 1222 #
1223 1223 # If it is not in the branch ignore it.
1224 1224 #
1225 1225 # Once we have a list of heads, use nodesbetween to filter out all the
1226 1226 # heads that cannot be reached from startrev. There may be a more
1227 1227 # efficient way to do this as part of the previous algorithm.
1228 1228
1229 1229 set = util.set
1230 1230 heads = [self.changelog.rev(branches[branch])]
1231 1231 # Don't care if ancestors contains nullrev or not.
1232 1232 ancestors = set(self.changelog.parentrevs(heads[0]))
1233 1233 for rev in xrange(heads[0] - 1, nullrev, -1):
1234 1234 if rev in ancestors:
1235 1235 ancestors.update(self.changelog.parentrevs(rev))
1236 1236 ancestors.remove(rev)
1237 1237 elif self.changectx(rev).branch() == branch:
1238 1238 heads.append(rev)
1239 1239 ancestors.update(self.changelog.parentrevs(rev))
1240 1240 heads = [self.changelog.node(rev) for rev in heads]
1241 1241 if start is not None:
1242 1242 heads = self.changelog.nodesbetween([start], heads)[2]
1243 1243 return heads
1244 1244
1245 1245 def branches(self, nodes):
1246 1246 if not nodes:
1247 1247 nodes = [self.changelog.tip()]
1248 1248 b = []
1249 1249 for n in nodes:
1250 1250 t = n
1251 1251 while 1:
1252 1252 p = self.changelog.parents(n)
1253 1253 if p[1] != nullid or p[0] == nullid:
1254 1254 b.append((t, n, p[0], p[1]))
1255 1255 break
1256 1256 n = p[0]
1257 1257 return b
1258 1258
1259 1259 def between(self, pairs):
1260 1260 r = []
1261 1261
1262 1262 for top, bottom in pairs:
1263 1263 n, l, i = top, [], 0
1264 1264 f = 1
1265 1265
1266 1266 while n != bottom:
1267 1267 p = self.changelog.parents(n)[0]
1268 1268 if i == f:
1269 1269 l.append(n)
1270 1270 f = f * 2
1271 1271 n = p
1272 1272 i += 1
1273 1273
1274 1274 r.append(l)
1275 1275
1276 1276 return r
1277 1277
1278 1278 def findincoming(self, remote, base=None, heads=None, force=False):
1279 1279 """Return list of roots of the subsets of missing nodes from remote
1280 1280
1281 1281 If base dict is specified, assume that these nodes and their parents
1282 1282 exist on the remote side and that no child of a node of base exists
1283 1283 in both remote and self.
1284 1284 Furthermore base will be updated to include the nodes that exists
1285 1285 in self and remote but no children exists in self and remote.
1286 1286 If a list of heads is specified, return only nodes which are heads
1287 1287 or ancestors of these heads.
1288 1288
1289 1289 All the ancestors of base are in self and in remote.
1290 1290 All the descendants of the list returned are missing in self.
1291 1291 (and so we know that the rest of the nodes are missing in remote, see
1292 1292 outgoing)
1293 1293 """
1294 1294 m = self.changelog.nodemap
1295 1295 search = []
1296 1296 fetch = {}
1297 1297 seen = {}
1298 1298 seenbranch = {}
1299 1299 if base == None:
1300 1300 base = {}
1301 1301
1302 1302 if not heads:
1303 1303 heads = remote.heads()
1304 1304
1305 1305 if self.changelog.tip() == nullid:
1306 1306 base[nullid] = 1
1307 1307 if heads != [nullid]:
1308 1308 return [nullid]
1309 1309 return []
1310 1310
1311 1311 # assume we're closer to the tip than the root
1312 1312 # and start by examining the heads
1313 1313 self.ui.status(_("searching for changes\n"))
1314 1314
1315 1315 unknown = []
1316 1316 for h in heads:
1317 1317 if h not in m:
1318 1318 unknown.append(h)
1319 1319 else:
1320 1320 base[h] = 1
1321 1321
1322 1322 if not unknown:
1323 1323 return []
1324 1324
1325 1325 req = dict.fromkeys(unknown)
1326 1326 reqcnt = 0
1327 1327
1328 1328 # search through remote branches
1329 1329 # a 'branch' here is a linear segment of history, with four parts:
1330 1330 # head, root, first parent, second parent
1331 1331 # (a branch always has two parents (or none) by definition)
1332 1332 unknown = remote.branches(unknown)
1333 1333 while unknown:
1334 1334 r = []
1335 1335 while unknown:
1336 1336 n = unknown.pop(0)
1337 1337 if n[0] in seen:
1338 1338 continue
1339 1339
1340 1340 self.ui.debug(_("examining %s:%s\n")
1341 1341 % (short(n[0]), short(n[1])))
1342 1342 if n[0] == nullid: # found the end of the branch
1343 1343 pass
1344 1344 elif n in seenbranch:
1345 1345 self.ui.debug(_("branch already found\n"))
1346 1346 continue
1347 1347 elif n[1] and n[1] in m: # do we know the base?
1348 1348 self.ui.debug(_("found incomplete branch %s:%s\n")
1349 1349 % (short(n[0]), short(n[1])))
1350 1350 search.append(n) # schedule branch range for scanning
1351 1351 seenbranch[n] = 1
1352 1352 else:
1353 1353 if n[1] not in seen and n[1] not in fetch:
1354 1354 if n[2] in m and n[3] in m:
1355 1355 self.ui.debug(_("found new changeset %s\n") %
1356 1356 short(n[1]))
1357 1357 fetch[n[1]] = 1 # earliest unknown
1358 1358 for p in n[2:4]:
1359 1359 if p in m:
1360 1360 base[p] = 1 # latest known
1361 1361
1362 1362 for p in n[2:4]:
1363 1363 if p not in req and p not in m:
1364 1364 r.append(p)
1365 1365 req[p] = 1
1366 1366 seen[n[0]] = 1
1367 1367
1368 1368 if r:
1369 1369 reqcnt += 1
1370 1370 self.ui.debug(_("request %d: %s\n") %
1371 1371 (reqcnt, " ".join(map(short, r))))
1372 1372 for p in xrange(0, len(r), 10):
1373 1373 for b in remote.branches(r[p:p+10]):
1374 1374 self.ui.debug(_("received %s:%s\n") %
1375 1375 (short(b[0]), short(b[1])))
1376 1376 unknown.append(b)
1377 1377
1378 1378 # do binary search on the branches we found
1379 1379 while search:
1380 1380 n = search.pop(0)
1381 1381 reqcnt += 1
1382 1382 l = remote.between([(n[0], n[1])])[0]
1383 1383 l.append(n[1])
1384 1384 p = n[0]
1385 1385 f = 1
1386 1386 for i in l:
1387 1387 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1388 1388 if i in m:
1389 1389 if f <= 2:
1390 1390 self.ui.debug(_("found new branch changeset %s\n") %
1391 1391 short(p))
1392 1392 fetch[p] = 1
1393 1393 base[i] = 1
1394 1394 else:
1395 1395 self.ui.debug(_("narrowed branch search to %s:%s\n")
1396 1396 % (short(p), short(i)))
1397 1397 search.append((p, i))
1398 1398 break
1399 1399 p, f = i, f * 2
1400 1400
1401 1401 # sanity check our fetch list
1402 1402 for f in fetch.keys():
1403 1403 if f in m:
1404 1404 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1405 1405
1406 1406 if base.keys() == [nullid]:
1407 1407 if force:
1408 1408 self.ui.warn(_("warning: repository is unrelated\n"))
1409 1409 else:
1410 1410 raise util.Abort(_("repository is unrelated"))
1411 1411
1412 1412 self.ui.debug(_("found new changesets starting at ") +
1413 1413 " ".join([short(f) for f in fetch]) + "\n")
1414 1414
1415 1415 self.ui.debug(_("%d total queries\n") % reqcnt)
1416 1416
1417 1417 return fetch.keys()
1418 1418
1419 1419 def findoutgoing(self, remote, base=None, heads=None, force=False):
1420 1420 """Return list of nodes that are roots of subsets not in remote
1421 1421
1422 1422 If base dict is specified, assume that these nodes and their parents
1423 1423 exist on the remote side.
1424 1424 If a list of heads is specified, return only nodes which are heads
1425 1425 or ancestors of these heads, and return a second element which
1426 1426 contains all remote heads which get new children.
1427 1427 """
1428 1428 if base == None:
1429 1429 base = {}
1430 1430 self.findincoming(remote, base, heads, force=force)
1431 1431
1432 1432 self.ui.debug(_("common changesets up to ")
1433 1433 + " ".join(map(short, base.keys())) + "\n")
1434 1434
1435 1435 remain = dict.fromkeys(self.changelog.nodemap)
1436 1436
1437 1437 # prune everything remote has from the tree
1438 1438 del remain[nullid]
1439 1439 remove = base.keys()
1440 1440 while remove:
1441 1441 n = remove.pop(0)
1442 1442 if n in remain:
1443 1443 del remain[n]
1444 1444 for p in self.changelog.parents(n):
1445 1445 remove.append(p)
1446 1446
1447 1447 # find every node whose parents have been pruned
1448 1448 subset = []
1449 1449 # find every remote head that will get new children
1450 1450 updated_heads = {}
1451 1451 for n in remain:
1452 1452 p1, p2 = self.changelog.parents(n)
1453 1453 if p1 not in remain and p2 not in remain:
1454 1454 subset.append(n)
1455 1455 if heads:
1456 1456 if p1 in heads:
1457 1457 updated_heads[p1] = True
1458 1458 if p2 in heads:
1459 1459 updated_heads[p2] = True
1460 1460
1461 1461 # this is the set of all roots we have to push
1462 1462 if heads:
1463 1463 return subset, updated_heads.keys()
1464 1464 else:
1465 1465 return subset
1466 1466
1467 1467 def pull(self, remote, heads=None, force=False):
1468 1468 lock = self.lock()
1469 1469 try:
1470 1470 fetch = self.findincoming(remote, heads=heads, force=force)
1471 1471 if fetch == [nullid]:
1472 1472 self.ui.status(_("requesting all changes\n"))
1473 1473
1474 1474 if not fetch:
1475 1475 self.ui.status(_("no changes found\n"))
1476 1476 return 0
1477 1477
1478 1478 if heads is None:
1479 1479 cg = remote.changegroup(fetch, 'pull')
1480 1480 else:
1481 1481 if 'changegroupsubset' not in remote.capabilities:
1482 1482 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1483 1483 cg = remote.changegroupsubset(fetch, heads, 'pull')
1484 1484 return self.addchangegroup(cg, 'pull', remote.url())
1485 1485 finally:
1486 1486 del lock
1487 1487
1488 1488 def push(self, remote, force=False, revs=None):
1489 1489 # there are two ways to push to remote repo:
1490 1490 #
1491 1491 # addchangegroup assumes local user can lock remote
1492 1492 # repo (local filesystem, old ssh servers).
1493 1493 #
1494 1494 # unbundle assumes local user cannot lock remote repo (new ssh
1495 1495 # servers, http servers).
1496 1496
1497 1497 if remote.capable('unbundle'):
1498 1498 return self.push_unbundle(remote, force, revs)
1499 1499 return self.push_addchangegroup(remote, force, revs)
1500 1500
1501 1501 def prepush(self, remote, force, revs):
1502 1502 base = {}
1503 1503 remote_heads = remote.heads()
1504 1504 inc = self.findincoming(remote, base, remote_heads, force=force)
1505 1505
1506 1506 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1507 1507 if revs is not None:
1508 1508 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1509 1509 else:
1510 1510 bases, heads = update, self.changelog.heads()
1511 1511
1512 1512 if not bases:
1513 1513 self.ui.status(_("no changes found\n"))
1514 1514 return None, 1
1515 1515 elif not force:
1516 1516 # check if we're creating new remote heads
1517 1517 # to be a remote head after push, node must be either
1518 1518 # - unknown locally
1519 1519 # - a local outgoing head descended from update
1520 1520 # - a remote head that's known locally and not
1521 1521 # ancestral to an outgoing head
1522 1522
1523 1523 warn = 0
1524 1524
1525 1525 if remote_heads == [nullid]:
1526 1526 warn = 0
1527 1527 elif not revs and len(heads) > len(remote_heads):
1528 1528 warn = 1
1529 1529 else:
1530 1530 newheads = list(heads)
1531 1531 for r in remote_heads:
1532 1532 if r in self.changelog.nodemap:
1533 1533 desc = self.changelog.heads(r, heads)
1534 1534 l = [h for h in heads if h in desc]
1535 1535 if not l:
1536 1536 newheads.append(r)
1537 1537 else:
1538 1538 newheads.append(r)
1539 1539 if len(newheads) > len(remote_heads):
1540 1540 warn = 1
1541 1541
1542 1542 if warn:
1543 1543 self.ui.warn(_("abort: push creates new remote heads!\n"))
1544 1544 self.ui.status(_("(did you forget to merge?"
1545 1545 " use push -f to force)\n"))
1546 1546 return None, 0
1547 1547 elif inc:
1548 1548 self.ui.warn(_("note: unsynced remote changes!\n"))
1549 1549
1550 1550
1551 1551 if revs is None:
1552 1552 cg = self.changegroup(update, 'push')
1553 1553 else:
1554 1554 cg = self.changegroupsubset(update, revs, 'push')
1555 1555 return cg, remote_heads
1556 1556
1557 1557 def push_addchangegroup(self, remote, force, revs):
1558 1558 lock = remote.lock()
1559 1559 try:
1560 1560 ret = self.prepush(remote, force, revs)
1561 1561 if ret[0] is not None:
1562 1562 cg, remote_heads = ret
1563 1563 return remote.addchangegroup(cg, 'push', self.url())
1564 1564 return ret[1]
1565 1565 finally:
1566 1566 del lock
1567 1567
1568 1568 def push_unbundle(self, remote, force, revs):
1569 1569 # local repo finds heads on server, finds out what revs it
1570 1570 # must push. once revs transferred, if server finds it has
1571 1571 # different heads (someone else won commit/push race), server
1572 1572 # aborts.
1573 1573
1574 1574 ret = self.prepush(remote, force, revs)
1575 1575 if ret[0] is not None:
1576 1576 cg, remote_heads = ret
1577 1577 if force: remote_heads = ['force']
1578 1578 return remote.unbundle(cg, remote_heads, 'push')
1579 1579 return ret[1]
1580 1580
1581 1581 def changegroupinfo(self, nodes, source):
1582 1582 if self.ui.verbose or source == 'bundle':
1583 1583 self.ui.status(_("%d changesets found\n") % len(nodes))
1584 1584 if self.ui.debugflag:
1585 1585 self.ui.debug(_("List of changesets:\n"))
1586 1586 for node in nodes:
1587 1587 self.ui.debug("%s\n" % hex(node))
1588 1588
1589 1589 def changegroupsubset(self, bases, heads, source, extranodes=None):
1590 1590 """This function generates a changegroup consisting of all the nodes
1591 1591 that are descendents of any of the bases, and ancestors of any of
1592 1592 the heads.
1593 1593
1594 1594 It is fairly complex as determining which filenodes and which
1595 1595 manifest nodes need to be included for the changeset to be complete
1596 1596 is non-trivial.
1597 1597
1598 1598 Another wrinkle is doing the reverse, figuring out which changeset in
1599 1599 the changegroup a particular filenode or manifestnode belongs to.
1600 1600
1601 1601 The caller can specify some nodes that must be included in the
1602 1602 changegroup using the extranodes argument. It should be a dict
1603 1603 where the keys are the filenames (or 1 for the manifest), and the
1604 1604 values are lists of (node, linknode) tuples, where node is a wanted
1605 1605 node and linknode is the changelog node that should be transmitted as
1606 1606 the linkrev.
1607 1607 """
1608 1608
1609 1609 self.hook('preoutgoing', throw=True, source=source)
1610 1610
1611 1611 # Set up some initial variables
1612 1612 # Make it easy to refer to self.changelog
1613 1613 cl = self.changelog
1614 1614 # msng is short for missing - compute the list of changesets in this
1615 1615 # changegroup.
1616 1616 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1617 1617 self.changegroupinfo(msng_cl_lst, source)
1618 1618 # Some bases may turn out to be superfluous, and some heads may be
1619 1619 # too. nodesbetween will return the minimal set of bases and heads
1620 1620 # necessary to re-create the changegroup.
1621 1621
1622 1622 # Known heads are the list of heads that it is assumed the recipient
1623 1623 # of this changegroup will know about.
1624 1624 knownheads = {}
1625 1625 # We assume that all parents of bases are known heads.
1626 1626 for n in bases:
1627 1627 for p in cl.parents(n):
1628 1628 if p != nullid:
1629 1629 knownheads[p] = 1
1630 1630 knownheads = knownheads.keys()
1631 1631 if knownheads:
1632 1632 # Now that we know what heads are known, we can compute which
1633 1633 # changesets are known. The recipient must know about all
1634 1634 # changesets required to reach the known heads from the null
1635 1635 # changeset.
1636 1636 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1637 1637 junk = None
1638 1638 # Transform the list into an ersatz set.
1639 1639 has_cl_set = dict.fromkeys(has_cl_set)
1640 1640 else:
1641 1641 # If there were no known heads, the recipient cannot be assumed to
1642 1642 # know about any changesets.
1643 1643 has_cl_set = {}
1644 1644
1645 1645 # Make it easy to refer to self.manifest
1646 1646 mnfst = self.manifest
1647 1647 # We don't know which manifests are missing yet
1648 1648 msng_mnfst_set = {}
1649 1649 # Nor do we know which filenodes are missing.
1650 1650 msng_filenode_set = {}
1651 1651
1652 1652 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1653 1653 junk = None
1654 1654
1655 1655 # A changeset always belongs to itself, so the changenode lookup
1656 1656 # function for a changenode is identity.
1657 1657 def identity(x):
1658 1658 return x
1659 1659
1660 1660 # A function generating function. Sets up an environment for the
1661 1661 # inner function.
1662 1662 def cmp_by_rev_func(revlog):
1663 1663 # Compare two nodes by their revision number in the environment's
1664 1664 # revision history. Since the revision number both represents the
1665 1665 # most efficient order to read the nodes in, and represents a
1666 1666 # topological sorting of the nodes, this function is often useful.
1667 1667 def cmp_by_rev(a, b):
1668 1668 return cmp(revlog.rev(a), revlog.rev(b))
1669 1669 return cmp_by_rev
1670 1670
1671 1671 # If we determine that a particular file or manifest node must be a
1672 1672 # node that the recipient of the changegroup will already have, we can
1673 1673 # also assume the recipient will have all the parents. This function
1674 1674 # prunes them from the set of missing nodes.
1675 1675 def prune_parents(revlog, hasset, msngset):
1676 1676 haslst = hasset.keys()
1677 1677 haslst.sort(cmp_by_rev_func(revlog))
1678 1678 for node in haslst:
1679 1679 parentlst = [p for p in revlog.parents(node) if p != nullid]
1680 1680 while parentlst:
1681 1681 n = parentlst.pop()
1682 1682 if n not in hasset:
1683 1683 hasset[n] = 1
1684 1684 p = [p for p in revlog.parents(n) if p != nullid]
1685 1685 parentlst.extend(p)
1686 1686 for n in hasset:
1687 1687 msngset.pop(n, None)
1688 1688
1689 1689 # This is a function generating function used to set up an environment
1690 1690 # for the inner function to execute in.
1691 1691 def manifest_and_file_collector(changedfileset):
1692 1692 # This is an information gathering function that gathers
1693 1693 # information from each changeset node that goes out as part of
1694 1694 # the changegroup. The information gathered is a list of which
1695 1695 # manifest nodes are potentially required (the recipient may
1696 1696 # already have them) and total list of all files which were
1697 1697 # changed in any changeset in the changegroup.
1698 1698 #
1699 1699 # We also remember the first changenode we saw any manifest
1700 1700 # referenced by so we can later determine which changenode 'owns'
1701 1701 # the manifest.
1702 1702 def collect_manifests_and_files(clnode):
1703 1703 c = cl.read(clnode)
1704 1704 for f in c[3]:
1705 1705 # This is to make sure we only have one instance of each
1706 1706 # filename string for each filename.
1707 1707 changedfileset.setdefault(f, f)
1708 1708 msng_mnfst_set.setdefault(c[0], clnode)
1709 1709 return collect_manifests_and_files
1710 1710
1711 1711 # Figure out which manifest nodes (of the ones we think might be part
1712 1712 # of the changegroup) the recipient must know about and remove them
1713 1713 # from the changegroup.
1714 1714 def prune_manifests():
1715 1715 has_mnfst_set = {}
1716 1716 for n in msng_mnfst_set:
1717 1717 # If a 'missing' manifest thinks it belongs to a changenode
1718 1718 # the recipient is assumed to have, obviously the recipient
1719 1719 # must have that manifest.
1720 1720 linknode = cl.node(mnfst.linkrev(n))
1721 1721 if linknode in has_cl_set:
1722 1722 has_mnfst_set[n] = 1
1723 1723 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1724 1724
1725 1725 # Use the information collected in collect_manifests_and_files to say
1726 1726 # which changenode any manifestnode belongs to.
1727 1727 def lookup_manifest_link(mnfstnode):
1728 1728 return msng_mnfst_set[mnfstnode]
1729 1729
1730 1730 # A function generating function that sets up the initial environment
1731 1731 # the inner function.
1732 1732 def filenode_collector(changedfiles):
1733 1733 next_rev = [0]
1734 1734 # This gathers information from each manifestnode included in the
1735 1735 # changegroup about which filenodes the manifest node references
1736 1736 # so we can include those in the changegroup too.
1737 1737 #
1738 1738 # It also remembers which changenode each filenode belongs to. It
1739 1739 # does this by assuming the a filenode belongs to the changenode
1740 1740 # the first manifest that references it belongs to.
1741 1741 def collect_msng_filenodes(mnfstnode):
1742 1742 r = mnfst.rev(mnfstnode)
1743 1743 if r == next_rev[0]:
1744 1744 # If the last rev we looked at was the one just previous,
1745 1745 # we only need to see a diff.
1746 1746 deltamf = mnfst.readdelta(mnfstnode)
1747 1747 # For each line in the delta
1748 1748 for f, fnode in deltamf.items():
1749 1749 f = changedfiles.get(f, None)
1750 1750 # And if the file is in the list of files we care
1751 1751 # about.
1752 1752 if f is not None:
1753 1753 # Get the changenode this manifest belongs to
1754 1754 clnode = msng_mnfst_set[mnfstnode]
1755 1755 # Create the set of filenodes for the file if
1756 1756 # there isn't one already.
1757 1757 ndset = msng_filenode_set.setdefault(f, {})
1758 1758 # And set the filenode's changelog node to the
1759 1759 # manifest's if it hasn't been set already.
1760 1760 ndset.setdefault(fnode, clnode)
1761 1761 else:
1762 1762 # Otherwise we need a full manifest.
1763 1763 m = mnfst.read(mnfstnode)
1764 1764 # For every file in we care about.
1765 1765 for f in changedfiles:
1766 1766 fnode = m.get(f, None)
1767 1767 # If it's in the manifest
1768 1768 if fnode is not None:
1769 1769 # See comments above.
1770 1770 clnode = msng_mnfst_set[mnfstnode]
1771 1771 ndset = msng_filenode_set.setdefault(f, {})
1772 1772 ndset.setdefault(fnode, clnode)
1773 1773 # Remember the revision we hope to see next.
1774 1774 next_rev[0] = r + 1
1775 1775 return collect_msng_filenodes
1776 1776
1777 1777 # We have a list of filenodes we think we need for a file, lets remove
1778 1778 # all those we now the recipient must have.
1779 1779 def prune_filenodes(f, filerevlog):
1780 1780 msngset = msng_filenode_set[f]
1781 1781 hasset = {}
1782 1782 # If a 'missing' filenode thinks it belongs to a changenode we
1783 1783 # assume the recipient must have, then the recipient must have
1784 1784 # that filenode.
1785 1785 for n in msngset:
1786 1786 clnode = cl.node(filerevlog.linkrev(n))
1787 1787 if clnode in has_cl_set:
1788 1788 hasset[n] = 1
1789 1789 prune_parents(filerevlog, hasset, msngset)
1790 1790
1791 1791 # A function generator function that sets up the a context for the
1792 1792 # inner function.
1793 1793 def lookup_filenode_link_func(fname):
1794 1794 msngset = msng_filenode_set[fname]
1795 1795 # Lookup the changenode the filenode belongs to.
1796 1796 def lookup_filenode_link(fnode):
1797 1797 return msngset[fnode]
1798 1798 return lookup_filenode_link
1799 1799
1800 1800 # Add the nodes that were explicitly requested.
1801 1801 def add_extra_nodes(name, nodes):
1802 1802 if not extranodes or name not in extranodes:
1803 1803 return
1804 1804
1805 1805 for node, linknode in extranodes[name]:
1806 1806 if node not in nodes:
1807 1807 nodes[node] = linknode
1808 1808
1809 1809 # Now that we have all theses utility functions to help out and
1810 1810 # logically divide up the task, generate the group.
1811 1811 def gengroup():
1812 1812 # The set of changed files starts empty.
1813 1813 changedfiles = {}
1814 1814 # Create a changenode group generator that will call our functions
1815 1815 # back to lookup the owning changenode and collect information.
1816 1816 group = cl.group(msng_cl_lst, identity,
1817 1817 manifest_and_file_collector(changedfiles))
1818 1818 for chnk in group:
1819 1819 yield chnk
1820 1820
1821 1821 # The list of manifests has been collected by the generator
1822 1822 # calling our functions back.
1823 1823 prune_manifests()
1824 1824 add_extra_nodes(1, msng_mnfst_set)
1825 1825 msng_mnfst_lst = msng_mnfst_set.keys()
1826 1826 # Sort the manifestnodes by revision number.
1827 1827 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1828 1828 # Create a generator for the manifestnodes that calls our lookup
1829 1829 # and data collection functions back.
1830 1830 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1831 1831 filenode_collector(changedfiles))
1832 1832 for chnk in group:
1833 1833 yield chnk
1834 1834
1835 1835 # These are no longer needed, dereference and toss the memory for
1836 1836 # them.
1837 1837 msng_mnfst_lst = None
1838 1838 msng_mnfst_set.clear()
1839 1839
1840 1840 if extranodes:
1841 1841 for fname in extranodes:
1842 1842 if isinstance(fname, int):
1843 1843 continue
1844 1844 add_extra_nodes(fname,
1845 1845 msng_filenode_set.setdefault(fname, {}))
1846 1846 changedfiles[fname] = 1
1847 1847 changedfiles = changedfiles.keys()
1848 1848 changedfiles.sort()
1849 1849 # Go through all our files in order sorted by name.
1850 1850 for fname in changedfiles:
1851 1851 filerevlog = self.file(fname)
1852 1852 if filerevlog.count() == 0:
1853 1853 raise util.Abort(_("empty or missing revlog for %s") % fname)
1854 1854 # Toss out the filenodes that the recipient isn't really
1855 1855 # missing.
1856 1856 if fname in msng_filenode_set:
1857 1857 prune_filenodes(fname, filerevlog)
1858 1858 msng_filenode_lst = msng_filenode_set[fname].keys()
1859 1859 else:
1860 1860 msng_filenode_lst = []
1861 1861 # If any filenodes are left, generate the group for them,
1862 1862 # otherwise don't bother.
1863 1863 if len(msng_filenode_lst) > 0:
1864 1864 yield changegroup.chunkheader(len(fname))
1865 1865 yield fname
1866 1866 # Sort the filenodes by their revision #
1867 1867 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1868 1868 # Create a group generator and only pass in a changenode
1869 1869 # lookup function as we need to collect no information
1870 1870 # from filenodes.
1871 1871 group = filerevlog.group(msng_filenode_lst,
1872 1872 lookup_filenode_link_func(fname))
1873 1873 for chnk in group:
1874 1874 yield chnk
1875 1875 if fname in msng_filenode_set:
1876 1876 # Don't need this anymore, toss it to free memory.
1877 1877 del msng_filenode_set[fname]
1878 1878 # Signal that no more groups are left.
1879 1879 yield changegroup.closechunk()
1880 1880
1881 1881 if msng_cl_lst:
1882 1882 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1883 1883
1884 1884 return util.chunkbuffer(gengroup())
1885 1885
1886 1886 def changegroup(self, basenodes, source):
1887 1887 """Generate a changegroup of all nodes that we have that a recipient
1888 1888 doesn't.
1889 1889
1890 1890 This is much easier than the previous function as we can assume that
1891 1891 the recipient has any changenode we aren't sending them."""
1892 1892
1893 1893 self.hook('preoutgoing', throw=True, source=source)
1894 1894
1895 1895 cl = self.changelog
1896 1896 nodes = cl.nodesbetween(basenodes, None)[0]
1897 1897 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1898 1898 self.changegroupinfo(nodes, source)
1899 1899
1900 1900 def identity(x):
1901 1901 return x
1902 1902
1903 1903 def gennodelst(revlog):
1904 1904 for r in xrange(0, revlog.count()):
1905 1905 n = revlog.node(r)
1906 1906 if revlog.linkrev(n) in revset:
1907 1907 yield n
1908 1908
1909 1909 def changed_file_collector(changedfileset):
1910 1910 def collect_changed_files(clnode):
1911 1911 c = cl.read(clnode)
1912 1912 for fname in c[3]:
1913 1913 changedfileset[fname] = 1
1914 1914 return collect_changed_files
1915 1915
1916 1916 def lookuprevlink_func(revlog):
1917 1917 def lookuprevlink(n):
1918 1918 return cl.node(revlog.linkrev(n))
1919 1919 return lookuprevlink
1920 1920
1921 1921 def gengroup():
1922 1922 # construct a list of all changed files
1923 1923 changedfiles = {}
1924 1924
1925 1925 for chnk in cl.group(nodes, identity,
1926 1926 changed_file_collector(changedfiles)):
1927 1927 yield chnk
1928 1928 changedfiles = changedfiles.keys()
1929 1929 changedfiles.sort()
1930 1930
1931 1931 mnfst = self.manifest
1932 1932 nodeiter = gennodelst(mnfst)
1933 1933 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1934 1934 yield chnk
1935 1935
1936 1936 for fname in changedfiles:
1937 1937 filerevlog = self.file(fname)
1938 1938 if filerevlog.count() == 0:
1939 1939 raise util.Abort(_("empty or missing revlog for %s") % fname)
1940 1940 nodeiter = gennodelst(filerevlog)
1941 1941 nodeiter = list(nodeiter)
1942 1942 if nodeiter:
1943 1943 yield changegroup.chunkheader(len(fname))
1944 1944 yield fname
1945 1945 lookup = lookuprevlink_func(filerevlog)
1946 1946 for chnk in filerevlog.group(nodeiter, lookup):
1947 1947 yield chnk
1948 1948
1949 1949 yield changegroup.closechunk()
1950 1950
1951 1951 if nodes:
1952 1952 self.hook('outgoing', node=hex(nodes[0]), source=source)
1953 1953
1954 1954 return util.chunkbuffer(gengroup())
1955 1955
1956 1956 def addchangegroup(self, source, srctype, url, emptyok=False):
1957 1957 """add changegroup to repo.
1958 1958
1959 1959 return values:
1960 1960 - nothing changed or no source: 0
1961 1961 - more heads than before: 1+added heads (2..n)
1962 1962 - less heads than before: -1-removed heads (-2..-n)
1963 1963 - number of heads stays the same: 1
1964 1964 """
1965 1965 def csmap(x):
1966 1966 self.ui.debug(_("add changeset %s\n") % short(x))
1967 1967 return cl.count()
1968 1968
1969 1969 def revmap(x):
1970 1970 return cl.rev(x)
1971 1971
1972 1972 if not source:
1973 1973 return 0
1974 1974
1975 1975 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1976 1976
1977 1977 changesets = files = revisions = 0
1978 1978
1979 1979 # write changelog data to temp files so concurrent readers will not see
1980 1980 # inconsistent view
1981 1981 cl = self.changelog
1982 1982 cl.delayupdate()
1983 1983 oldheads = len(cl.heads())
1984 1984
1985 1985 tr = self.transaction()
1986 1986 try:
1987 1987 trp = weakref.proxy(tr)
1988 1988 # pull off the changeset group
1989 1989 self.ui.status(_("adding changesets\n"))
1990 1990 cor = cl.count() - 1
1991 1991 chunkiter = changegroup.chunkiter(source)
1992 1992 if cl.addgroup(chunkiter, csmap, trp, 1) is None and not emptyok:
1993 1993 raise util.Abort(_("received changelog group is empty"))
1994 1994 cnr = cl.count() - 1
1995 1995 changesets = cnr - cor
1996 1996
1997 1997 # pull off the manifest group
1998 1998 self.ui.status(_("adding manifests\n"))
1999 1999 chunkiter = changegroup.chunkiter(source)
2000 2000 # no need to check for empty manifest group here:
2001 2001 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2002 2002 # no new manifest will be created and the manifest group will
2003 2003 # be empty during the pull
2004 2004 self.manifest.addgroup(chunkiter, revmap, trp)
2005 2005
2006 2006 # process the files
2007 2007 self.ui.status(_("adding file changes\n"))
2008 2008 while 1:
2009 2009 f = changegroup.getchunk(source)
2010 2010 if not f:
2011 2011 break
2012 2012 self.ui.debug(_("adding %s revisions\n") % f)
2013 2013 fl = self.file(f)
2014 2014 o = fl.count()
2015 2015 chunkiter = changegroup.chunkiter(source)
2016 2016 if fl.addgroup(chunkiter, revmap, trp) is None:
2017 2017 raise util.Abort(_("received file revlog group is empty"))
2018 2018 revisions += fl.count() - o
2019 2019 files += 1
2020 2020
2021 2021 # make changelog see real files again
2022 2022 cl.finalize(trp)
2023 2023
2024 2024 newheads = len(self.changelog.heads())
2025 2025 heads = ""
2026 2026 if oldheads and newheads != oldheads:
2027 2027 heads = _(" (%+d heads)") % (newheads - oldheads)
2028 2028
2029 2029 self.ui.status(_("added %d changesets"
2030 2030 " with %d changes to %d files%s\n")
2031 2031 % (changesets, revisions, files, heads))
2032 2032
2033 2033 if changesets > 0:
2034 2034 self.hook('pretxnchangegroup', throw=True,
2035 2035 node=hex(self.changelog.node(cor+1)), source=srctype,
2036 2036 url=url)
2037 2037
2038 2038 tr.close()
2039 2039 finally:
2040 2040 del tr
2041 2041
2042 2042 if changesets > 0:
2043 2043 # forcefully update the on-disk branch cache
2044 2044 self.ui.debug(_("updating the branch cache\n"))
2045 2045 self.branchtags()
2046 2046 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2047 2047 source=srctype, url=url)
2048 2048
2049 2049 for i in xrange(cor + 1, cnr + 1):
2050 2050 self.hook("incoming", node=hex(self.changelog.node(i)),
2051 2051 source=srctype, url=url)
2052 2052
2053 2053 # never return 0 here:
2054 2054 if newheads < oldheads:
2055 2055 return newheads - oldheads - 1
2056 2056 else:
2057 2057 return newheads - oldheads + 1
2058 2058
2059 2059
2060 2060 def stream_in(self, remote):
2061 2061 fp = remote.stream_out()
2062 2062 l = fp.readline()
2063 2063 try:
2064 2064 resp = int(l)
2065 2065 except ValueError:
2066 2066 raise util.UnexpectedOutput(
2067 2067 _('Unexpected response from remote server:'), l)
2068 2068 if resp == 1:
2069 2069 raise util.Abort(_('operation forbidden by server'))
2070 2070 elif resp == 2:
2071 2071 raise util.Abort(_('locking the remote repository failed'))
2072 2072 elif resp != 0:
2073 2073 raise util.Abort(_('the server sent an unknown error code'))
2074 2074 self.ui.status(_('streaming all changes\n'))
2075 2075 l = fp.readline()
2076 2076 try:
2077 2077 total_files, total_bytes = map(int, l.split(' ', 1))
2078 except ValueError, TypeError:
2078 except (ValueError, TypeError):
2079 2079 raise util.UnexpectedOutput(
2080 2080 _('Unexpected response from remote server:'), l)
2081 2081 self.ui.status(_('%d files to transfer, %s of data\n') %
2082 2082 (total_files, util.bytecount(total_bytes)))
2083 2083 start = time.time()
2084 2084 for i in xrange(total_files):
2085 2085 # XXX doesn't support '\n' or '\r' in filenames
2086 2086 l = fp.readline()
2087 2087 try:
2088 2088 name, size = l.split('\0', 1)
2089 2089 size = int(size)
2090 2090 except ValueError, TypeError:
2091 2091 raise util.UnexpectedOutput(
2092 2092 _('Unexpected response from remote server:'), l)
2093 2093 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2094 2094 ofp = self.sopener(name, 'w')
2095 2095 for chunk in util.filechunkiter(fp, limit=size):
2096 2096 ofp.write(chunk)
2097 2097 ofp.close()
2098 2098 elapsed = time.time() - start
2099 2099 if elapsed <= 0:
2100 2100 elapsed = 0.001
2101 2101 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2102 2102 (util.bytecount(total_bytes), elapsed,
2103 2103 util.bytecount(total_bytes / elapsed)))
2104 2104 self.invalidate()
2105 2105 return len(self.heads()) + 1
2106 2106
2107 2107 def clone(self, remote, heads=[], stream=False):
2108 2108 '''clone remote repository.
2109 2109
2110 2110 keyword arguments:
2111 2111 heads: list of revs to clone (forces use of pull)
2112 2112 stream: use streaming clone if possible'''
2113 2113
2114 2114 # now, all clients that can request uncompressed clones can
2115 2115 # read repo formats supported by all servers that can serve
2116 2116 # them.
2117 2117
2118 2118 # if revlog format changes, client will have to check version
2119 2119 # and format flags on "stream" capability, and use
2120 2120 # uncompressed only if compatible.
2121 2121
2122 2122 if stream and not heads and remote.capable('stream'):
2123 2123 return self.stream_in(remote)
2124 2124 return self.pull(remote, heads)
2125 2125
2126 2126 # used to avoid circular references so destructors work
2127 2127 def aftertrans(files):
2128 2128 renamefiles = [tuple(t) for t in files]
2129 2129 def a():
2130 2130 for src, dest in renamefiles:
2131 2131 util.rename(src, dest)
2132 2132 return a
2133 2133
2134 2134 def instance(ui, path, create):
2135 2135 return localrepository(ui, util.drop_scheme('file', path), create)
2136 2136
2137 2137 def islocal(path):
2138 2138 return True
General Comments 0
You need to be logged in to leave comments. Login now