##// END OF EJS Templates
tag: without a checkout, base the tag changeset on tip instead of nullid...
Benoit Boissinot -
r7009:3d54cf97 default
parent child Browse files
Show More
@@ -0,0 +1,13 b''
1 #!/bin/sh
2
3 hg init
4 echo a > a
5 hg commit -Am "test" -d "1000000 0"
6
7 echo % issue 916
8 hg up 000000
9 hg parents
10 hg tag -r tip -d "1000000 0" "jglick"
11 echo % should it be removed ?
12 cat .hgtags
13 hg tip
@@ -0,0 +1,11 b''
1 adding a
2 % issue 916
3 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
4 % should it be removed ?
5 0acdaf8983679e0aac16e811534eb49d7ee1f2b4 jglick
6 changeset: 1:99b47705d075
7 tag: tip
8 user: test
9 date: Mon Jan 12 13:46:40 1970 +0000
10 summary: Added tag jglick for changeset 0acdaf898367
11
@@ -1,2070 +1,2076 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup
11 11 import changelog, dirstate, filelog, manifest, context, weakref
12 12 import lock, transaction, stat, errno, ui, store
13 13 import os, revlog, time, util, extensions, hook, inspect
14 14 import match as match_
15 15 import merge as merge_
16 16
17 17 class localrepository(repo.repository):
18 18 capabilities = util.set(('lookup', 'changegroupsubset'))
19 19 supported = ('revlogv1', 'store')
20 20
21 21 def __init__(self, parentui, path=None, create=0):
22 22 repo.repository.__init__(self)
23 23 self.root = os.path.realpath(path)
24 24 self.path = os.path.join(self.root, ".hg")
25 25 self.origroot = path
26 26 self.opener = util.opener(self.path)
27 27 self.wopener = util.opener(self.root)
28 28
29 29 if not os.path.isdir(self.path):
30 30 if create:
31 31 if not os.path.exists(path):
32 32 os.mkdir(path)
33 33 os.mkdir(self.path)
34 34 requirements = ["revlogv1"]
35 35 if parentui.configbool('format', 'usestore', True):
36 36 os.mkdir(os.path.join(self.path, "store"))
37 37 requirements.append("store")
38 38 # create an invalid changelog
39 39 self.opener("00changelog.i", "a").write(
40 40 '\0\0\0\2' # represents revlogv2
41 41 ' dummy changelog to prevent using the old repo layout'
42 42 )
43 43 reqfile = self.opener("requires", "w")
44 44 for r in requirements:
45 45 reqfile.write("%s\n" % r)
46 46 reqfile.close()
47 47 else:
48 48 raise repo.RepoError(_("repository %s not found") % path)
49 49 elif create:
50 50 raise repo.RepoError(_("repository %s already exists") % path)
51 51 else:
52 52 # find requirements
53 53 requirements = []
54 54 try:
55 55 requirements = self.opener("requires").read().splitlines()
56 56 for r in requirements:
57 57 if r not in self.supported:
58 58 raise repo.RepoError(_("requirement '%s' not supported") % r)
59 59 except IOError, inst:
60 60 if inst.errno != errno.ENOENT:
61 61 raise
62 62
63 63 self.store = store.store(requirements, self.path, util.opener)
64 64 self.spath = self.store.path
65 65 self.sopener = self.store.opener
66 66 self.sjoin = self.store.join
67 67 self.opener.createmode = self.store.createmode
68 68
69 69 self.ui = ui.ui(parentui=parentui)
70 70 try:
71 71 self.ui.readconfig(self.join("hgrc"), self.root)
72 72 extensions.loadall(self.ui)
73 73 except IOError:
74 74 pass
75 75
76 76 self.tagscache = None
77 77 self._tagstypecache = None
78 78 self.branchcache = None
79 79 self._ubranchcache = None # UTF-8 version of branchcache
80 80 self._branchcachetip = None
81 81 self.nodetagscache = None
82 82 self.filterpats = {}
83 83 self._datafilters = {}
84 84 self._transref = self._lockref = self._wlockref = None
85 85
86 86 def __getattr__(self, name):
87 87 if name == 'changelog':
88 88 self.changelog = changelog.changelog(self.sopener)
89 89 self.sopener.defversion = self.changelog.version
90 90 return self.changelog
91 91 if name == 'manifest':
92 92 self.changelog
93 93 self.manifest = manifest.manifest(self.sopener)
94 94 return self.manifest
95 95 if name == 'dirstate':
96 96 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
97 97 return self.dirstate
98 98 else:
99 99 raise AttributeError(name)
100 100
101 101 def __getitem__(self, changeid):
102 102 if changeid == None:
103 103 return context.workingctx(self)
104 104 return context.changectx(self, changeid)
105 105
106 106 def __nonzero__(self):
107 107 return True
108 108
109 109 def __len__(self):
110 110 return len(self.changelog)
111 111
112 112 def __iter__(self):
113 113 for i in xrange(len(self)):
114 114 yield i
115 115
116 116 def url(self):
117 117 return 'file:' + self.root
118 118
119 119 def hook(self, name, throw=False, **args):
120 120 return hook.hook(self.ui, self, name, throw, **args)
121 121
122 122 tag_disallowed = ':\r\n'
123 123
124 124 def _tag(self, names, node, message, local, user, date, parent=None,
125 125 extra={}):
126 126 use_dirstate = parent is None
127 127
128 128 if isinstance(names, str):
129 129 allchars = names
130 130 names = (names,)
131 131 else:
132 132 allchars = ''.join(names)
133 133 for c in self.tag_disallowed:
134 134 if c in allchars:
135 135 raise util.Abort(_('%r cannot be used in a tag name') % c)
136 136
137 137 for name in names:
138 138 self.hook('pretag', throw=True, node=hex(node), tag=name,
139 139 local=local)
140 140
141 141 def writetags(fp, names, munge, prevtags):
142 142 fp.seek(0, 2)
143 143 if prevtags and prevtags[-1] != '\n':
144 144 fp.write('\n')
145 145 for name in names:
146 146 m = munge and munge(name) or name
147 147 if self._tagstypecache and name in self._tagstypecache:
148 148 old = self.tagscache.get(name, nullid)
149 149 fp.write('%s %s\n' % (hex(old), m))
150 150 fp.write('%s %s\n' % (hex(node), m))
151 151 fp.close()
152 152
153 153 prevtags = ''
154 154 if local:
155 155 try:
156 156 fp = self.opener('localtags', 'r+')
157 157 except IOError, err:
158 158 fp = self.opener('localtags', 'a')
159 159 else:
160 160 prevtags = fp.read()
161 161
162 162 # local tags are stored in the current charset
163 163 writetags(fp, names, None, prevtags)
164 164 for name in names:
165 165 self.hook('tag', node=hex(node), tag=name, local=local)
166 166 return
167 167
168 168 if use_dirstate:
169 169 try:
170 170 fp = self.wfile('.hgtags', 'rb+')
171 171 except IOError, err:
172 172 fp = self.wfile('.hgtags', 'ab')
173 173 else:
174 174 prevtags = fp.read()
175 175 else:
176 176 try:
177 177 prevtags = self.filectx('.hgtags', parent).data()
178 178 except revlog.LookupError:
179 179 pass
180 180 fp = self.wfile('.hgtags', 'wb')
181 181 if prevtags:
182 182 fp.write(prevtags)
183 183
184 184 # committed tags are stored in UTF-8
185 185 writetags(fp, names, util.fromlocal, prevtags)
186 186
187 187 if use_dirstate and '.hgtags' not in self.dirstate:
188 188 self.add(['.hgtags'])
189 189
190 190 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
191 191 extra=extra)
192 192
193 193 for name in names:
194 194 self.hook('tag', node=hex(node), tag=name, local=local)
195 195
196 196 return tagnode
197 197
198 198 def tag(self, names, node, message, local, user, date):
199 199 '''tag a revision with one or more symbolic names.
200 200
201 201 names is a list of strings or, when adding a single tag, names may be a
202 202 string.
203 203
204 204 if local is True, the tags are stored in a per-repository file.
205 205 otherwise, they are stored in the .hgtags file, and a new
206 206 changeset is committed with the change.
207 207
208 208 keyword arguments:
209 209
210 210 local: whether to store tags in non-version-controlled file
211 211 (default False)
212 212
213 213 message: commit message to use if committing
214 214
215 215 user: name of user to use if committing
216 216
217 217 date: date tuple to use if committing'''
218 218
219 219 for x in self.status()[:5]:
220 220 if '.hgtags' in x:
221 221 raise util.Abort(_('working copy of .hgtags is changed '
222 222 '(please commit .hgtags manually)'))
223 223
224 self._tag(names, node, message, local, user, date)
224 parents = self[None].parents()
225 parent = None
226 # use tip instead of the parent rev if there's no working copy
227 # (avoid creating a new head)
228 if len(parents) == 1 and parents[0].node() == nullid:
229 parent = self['tip'].node()
230 self._tag(names, node, message, local, user, date, parent=parent)
225 231
226 232 def tags(self):
227 233 '''return a mapping of tag to node'''
228 234 if self.tagscache:
229 235 return self.tagscache
230 236
231 237 globaltags = {}
232 238 tagtypes = {}
233 239
234 240 def readtags(lines, fn, tagtype):
235 241 filetags = {}
236 242 count = 0
237 243
238 244 def warn(msg):
239 245 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
240 246
241 247 for l in lines:
242 248 count += 1
243 249 if not l:
244 250 continue
245 251 s = l.split(" ", 1)
246 252 if len(s) != 2:
247 253 warn(_("cannot parse entry"))
248 254 continue
249 255 node, key = s
250 256 key = util.tolocal(key.strip()) # stored in UTF-8
251 257 try:
252 258 bin_n = bin(node)
253 259 except TypeError:
254 260 warn(_("node '%s' is not well formed") % node)
255 261 continue
256 262 if bin_n not in self.changelog.nodemap:
257 263 warn(_("tag '%s' refers to unknown node") % key)
258 264 continue
259 265
260 266 h = []
261 267 if key in filetags:
262 268 n, h = filetags[key]
263 269 h.append(n)
264 270 filetags[key] = (bin_n, h)
265 271
266 272 for k, nh in filetags.items():
267 273 if k not in globaltags:
268 274 globaltags[k] = nh
269 275 tagtypes[k] = tagtype
270 276 continue
271 277
272 278 # we prefer the global tag if:
273 279 # it supercedes us OR
274 280 # mutual supercedes and it has a higher rank
275 281 # otherwise we win because we're tip-most
276 282 an, ah = nh
277 283 bn, bh = globaltags[k]
278 284 if (bn != an and an in bh and
279 285 (bn not in ah or len(bh) > len(ah))):
280 286 an = bn
281 287 ah.extend([n for n in bh if n not in ah])
282 288 globaltags[k] = an, ah
283 289 tagtypes[k] = tagtype
284 290
285 291 # read the tags file from each head, ending with the tip
286 292 f = None
287 293 for rev, node, fnode in self._hgtagsnodes():
288 294 f = (f and f.filectx(fnode) or
289 295 self.filectx('.hgtags', fileid=fnode))
290 296 readtags(f.data().splitlines(), f, "global")
291 297
292 298 try:
293 299 data = util.fromlocal(self.opener("localtags").read())
294 300 # localtags are stored in the local character set
295 301 # while the internal tag table is stored in UTF-8
296 302 readtags(data.splitlines(), "localtags", "local")
297 303 except IOError:
298 304 pass
299 305
300 306 self.tagscache = {}
301 307 self._tagstypecache = {}
302 308 for k,nh in globaltags.items():
303 309 n = nh[0]
304 310 if n != nullid:
305 311 self.tagscache[k] = n
306 312 self._tagstypecache[k] = tagtypes[k]
307 313 self.tagscache['tip'] = self.changelog.tip()
308 314 return self.tagscache
309 315
310 316 def tagtype(self, tagname):
311 317 '''
312 318 return the type of the given tag. result can be:
313 319
314 320 'local' : a local tag
315 321 'global' : a global tag
316 322 None : tag does not exist
317 323 '''
318 324
319 325 self.tags()
320 326
321 327 return self._tagstypecache.get(tagname)
322 328
323 329 def _hgtagsnodes(self):
324 330 heads = self.heads()
325 331 heads.reverse()
326 332 last = {}
327 333 ret = []
328 334 for node in heads:
329 335 c = self[node]
330 336 rev = c.rev()
331 337 try:
332 338 fnode = c.filenode('.hgtags')
333 339 except revlog.LookupError:
334 340 continue
335 341 ret.append((rev, node, fnode))
336 342 if fnode in last:
337 343 ret[last[fnode]] = None
338 344 last[fnode] = len(ret) - 1
339 345 return [item for item in ret if item]
340 346
341 347 def tagslist(self):
342 348 '''return a list of tags ordered by revision'''
343 349 l = []
344 350 for t, n in self.tags().items():
345 351 try:
346 352 r = self.changelog.rev(n)
347 353 except:
348 354 r = -2 # sort to the beginning of the list if unknown
349 355 l.append((r, t, n))
350 356 return [(t, n) for r, t, n in util.sort(l)]
351 357
352 358 def nodetags(self, node):
353 359 '''return the tags associated with a node'''
354 360 if not self.nodetagscache:
355 361 self.nodetagscache = {}
356 362 for t, n in self.tags().items():
357 363 self.nodetagscache.setdefault(n, []).append(t)
358 364 return self.nodetagscache.get(node, [])
359 365
360 366 def _branchtags(self, partial, lrev):
361 367 tiprev = len(self) - 1
362 368 if lrev != tiprev:
363 369 self._updatebranchcache(partial, lrev+1, tiprev+1)
364 370 self._writebranchcache(partial, self.changelog.tip(), tiprev)
365 371
366 372 return partial
367 373
368 374 def branchtags(self):
369 375 tip = self.changelog.tip()
370 376 if self.branchcache is not None and self._branchcachetip == tip:
371 377 return self.branchcache
372 378
373 379 oldtip = self._branchcachetip
374 380 self._branchcachetip = tip
375 381 if self.branchcache is None:
376 382 self.branchcache = {} # avoid recursion in changectx
377 383 else:
378 384 self.branchcache.clear() # keep using the same dict
379 385 if oldtip is None or oldtip not in self.changelog.nodemap:
380 386 partial, last, lrev = self._readbranchcache()
381 387 else:
382 388 lrev = self.changelog.rev(oldtip)
383 389 partial = self._ubranchcache
384 390
385 391 self._branchtags(partial, lrev)
386 392
387 393 # the branch cache is stored on disk as UTF-8, but in the local
388 394 # charset internally
389 395 for k, v in partial.items():
390 396 self.branchcache[util.tolocal(k)] = v
391 397 self._ubranchcache = partial
392 398 return self.branchcache
393 399
394 400 def _readbranchcache(self):
395 401 partial = {}
396 402 try:
397 403 f = self.opener("branch.cache")
398 404 lines = f.read().split('\n')
399 405 f.close()
400 406 except (IOError, OSError):
401 407 return {}, nullid, nullrev
402 408
403 409 try:
404 410 last, lrev = lines.pop(0).split(" ", 1)
405 411 last, lrev = bin(last), int(lrev)
406 412 if lrev >= len(self) or self[lrev].node() != last:
407 413 # invalidate the cache
408 414 raise ValueError('invalidating branch cache (tip differs)')
409 415 for l in lines:
410 416 if not l: continue
411 417 node, label = l.split(" ", 1)
412 418 partial[label.strip()] = bin(node)
413 419 except (KeyboardInterrupt, util.SignalInterrupt):
414 420 raise
415 421 except Exception, inst:
416 422 if self.ui.debugflag:
417 423 self.ui.warn(str(inst), '\n')
418 424 partial, last, lrev = {}, nullid, nullrev
419 425 return partial, last, lrev
420 426
421 427 def _writebranchcache(self, branches, tip, tiprev):
422 428 try:
423 429 f = self.opener("branch.cache", "w", atomictemp=True)
424 430 f.write("%s %s\n" % (hex(tip), tiprev))
425 431 for label, node in branches.iteritems():
426 432 f.write("%s %s\n" % (hex(node), label))
427 433 f.rename()
428 434 except (IOError, OSError):
429 435 pass
430 436
431 437 def _updatebranchcache(self, partial, start, end):
432 438 for r in xrange(start, end):
433 439 c = self[r]
434 440 b = c.branch()
435 441 partial[b] = c.node()
436 442
437 443 def lookup(self, key):
438 444 if key == '.':
439 445 return self.dirstate.parents()[0]
440 446 elif key == 'null':
441 447 return nullid
442 448 n = self.changelog._match(key)
443 449 if n:
444 450 return n
445 451 if key in self.tags():
446 452 return self.tags()[key]
447 453 if key in self.branchtags():
448 454 return self.branchtags()[key]
449 455 n = self.changelog._partialmatch(key)
450 456 if n:
451 457 return n
452 458 try:
453 459 if len(key) == 20:
454 460 key = hex(key)
455 461 except:
456 462 pass
457 463 raise repo.RepoError(_("unknown revision '%s'") % key)
458 464
459 465 def local(self):
460 466 return True
461 467
462 468 def join(self, f):
463 469 return os.path.join(self.path, f)
464 470
465 471 def wjoin(self, f):
466 472 return os.path.join(self.root, f)
467 473
468 474 def rjoin(self, f):
469 475 return os.path.join(self.root, util.pconvert(f))
470 476
471 477 def file(self, f):
472 478 if f[0] == '/':
473 479 f = f[1:]
474 480 return filelog.filelog(self.sopener, f)
475 481
476 482 def changectx(self, changeid):
477 483 return self[changeid]
478 484
479 485 def parents(self, changeid=None):
480 486 '''get list of changectxs for parents of changeid'''
481 487 return self[changeid].parents()
482 488
483 489 def filectx(self, path, changeid=None, fileid=None):
484 490 """changeid can be a changeset revision, node, or tag.
485 491 fileid can be a file revision or node."""
486 492 return context.filectx(self, path, changeid, fileid)
487 493
488 494 def getcwd(self):
489 495 return self.dirstate.getcwd()
490 496
491 497 def pathto(self, f, cwd=None):
492 498 return self.dirstate.pathto(f, cwd)
493 499
494 500 def wfile(self, f, mode='r'):
495 501 return self.wopener(f, mode)
496 502
497 503 def _link(self, f):
498 504 return os.path.islink(self.wjoin(f))
499 505
500 506 def _filter(self, filter, filename, data):
501 507 if filter not in self.filterpats:
502 508 l = []
503 509 for pat, cmd in self.ui.configitems(filter):
504 510 mf = util.matcher(self.root, "", [pat], [], [])[1]
505 511 fn = None
506 512 params = cmd
507 513 for name, filterfn in self._datafilters.iteritems():
508 514 if cmd.startswith(name):
509 515 fn = filterfn
510 516 params = cmd[len(name):].lstrip()
511 517 break
512 518 if not fn:
513 519 fn = lambda s, c, **kwargs: util.filter(s, c)
514 520 # Wrap old filters not supporting keyword arguments
515 521 if not inspect.getargspec(fn)[2]:
516 522 oldfn = fn
517 523 fn = lambda s, c, **kwargs: oldfn(s, c)
518 524 l.append((mf, fn, params))
519 525 self.filterpats[filter] = l
520 526
521 527 for mf, fn, cmd in self.filterpats[filter]:
522 528 if mf(filename):
523 529 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
524 530 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
525 531 break
526 532
527 533 return data
528 534
529 535 def adddatafilter(self, name, filter):
530 536 self._datafilters[name] = filter
531 537
532 538 def wread(self, filename):
533 539 if self._link(filename):
534 540 data = os.readlink(self.wjoin(filename))
535 541 else:
536 542 data = self.wopener(filename, 'r').read()
537 543 return self._filter("encode", filename, data)
538 544
539 545 def wwrite(self, filename, data, flags):
540 546 data = self._filter("decode", filename, data)
541 547 try:
542 548 os.unlink(self.wjoin(filename))
543 549 except OSError:
544 550 pass
545 551 if 'l' in flags:
546 552 self.wopener.symlink(data, filename)
547 553 else:
548 554 self.wopener(filename, 'w').write(data)
549 555 if 'x' in flags:
550 556 util.set_flags(self.wjoin(filename), False, True)
551 557
552 558 def wwritedata(self, filename, data):
553 559 return self._filter("decode", filename, data)
554 560
555 561 def transaction(self):
556 562 if self._transref and self._transref():
557 563 return self._transref().nest()
558 564
559 565 # abort here if the journal already exists
560 566 if os.path.exists(self.sjoin("journal")):
561 567 raise repo.RepoError(_("journal already exists - run hg recover"))
562 568
563 569 # save dirstate for rollback
564 570 try:
565 571 ds = self.opener("dirstate").read()
566 572 except IOError:
567 573 ds = ""
568 574 self.opener("journal.dirstate", "w").write(ds)
569 575 self.opener("journal.branch", "w").write(self.dirstate.branch())
570 576
571 577 renames = [(self.sjoin("journal"), self.sjoin("undo")),
572 578 (self.join("journal.dirstate"), self.join("undo.dirstate")),
573 579 (self.join("journal.branch"), self.join("undo.branch"))]
574 580 tr = transaction.transaction(self.ui.warn, self.sopener,
575 581 self.sjoin("journal"),
576 582 aftertrans(renames),
577 583 self.store.createmode)
578 584 self._transref = weakref.ref(tr)
579 585 return tr
580 586
581 587 def recover(self):
582 588 l = self.lock()
583 589 try:
584 590 if os.path.exists(self.sjoin("journal")):
585 591 self.ui.status(_("rolling back interrupted transaction\n"))
586 592 transaction.rollback(self.sopener, self.sjoin("journal"))
587 593 self.invalidate()
588 594 return True
589 595 else:
590 596 self.ui.warn(_("no interrupted transaction available\n"))
591 597 return False
592 598 finally:
593 599 del l
594 600
595 601 def rollback(self):
596 602 wlock = lock = None
597 603 try:
598 604 wlock = self.wlock()
599 605 lock = self.lock()
600 606 if os.path.exists(self.sjoin("undo")):
601 607 self.ui.status(_("rolling back last transaction\n"))
602 608 transaction.rollback(self.sopener, self.sjoin("undo"))
603 609 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
604 610 try:
605 611 branch = self.opener("undo.branch").read()
606 612 self.dirstate.setbranch(branch)
607 613 except IOError:
608 614 self.ui.warn(_("Named branch could not be reset, "
609 615 "current branch still is: %s\n")
610 616 % util.tolocal(self.dirstate.branch()))
611 617 self.invalidate()
612 618 self.dirstate.invalidate()
613 619 else:
614 620 self.ui.warn(_("no rollback information available\n"))
615 621 finally:
616 622 del lock, wlock
617 623
618 624 def invalidate(self):
619 625 for a in "changelog manifest".split():
620 626 if a in self.__dict__:
621 627 delattr(self, a)
622 628 self.tagscache = None
623 629 self._tagstypecache = None
624 630 self.nodetagscache = None
625 631 self.branchcache = None
626 632 self._ubranchcache = None
627 633 self._branchcachetip = None
628 634
629 635 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
630 636 try:
631 637 l = lock.lock(lockname, 0, releasefn, desc=desc)
632 638 except lock.LockHeld, inst:
633 639 if not wait:
634 640 raise
635 641 self.ui.warn(_("waiting for lock on %s held by %r\n") %
636 642 (desc, inst.locker))
637 643 # default to 600 seconds timeout
638 644 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
639 645 releasefn, desc=desc)
640 646 if acquirefn:
641 647 acquirefn()
642 648 return l
643 649
644 650 def lock(self, wait=True):
645 651 if self._lockref and self._lockref():
646 652 return self._lockref()
647 653
648 654 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
649 655 _('repository %s') % self.origroot)
650 656 self._lockref = weakref.ref(l)
651 657 return l
652 658
653 659 def wlock(self, wait=True):
654 660 if self._wlockref and self._wlockref():
655 661 return self._wlockref()
656 662
657 663 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
658 664 self.dirstate.invalidate, _('working directory of %s') %
659 665 self.origroot)
660 666 self._wlockref = weakref.ref(l)
661 667 return l
662 668
663 669 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
664 670 """
665 671 commit an individual file as part of a larger transaction
666 672 """
667 673
668 674 fn = fctx.path()
669 675 t = fctx.data()
670 676 fl = self.file(fn)
671 677 fp1 = manifest1.get(fn, nullid)
672 678 fp2 = manifest2.get(fn, nullid)
673 679
674 680 meta = {}
675 681 cp = fctx.renamed()
676 682 if cp and cp[0] != fn:
677 683 # Mark the new revision of this file as a copy of another
678 684 # file. This copy data will effectively act as a parent
679 685 # of this new revision. If this is a merge, the first
680 686 # parent will be the nullid (meaning "look up the copy data")
681 687 # and the second one will be the other parent. For example:
682 688 #
683 689 # 0 --- 1 --- 3 rev1 changes file foo
684 690 # \ / rev2 renames foo to bar and changes it
685 691 # \- 2 -/ rev3 should have bar with all changes and
686 692 # should record that bar descends from
687 693 # bar in rev2 and foo in rev1
688 694 #
689 695 # this allows this merge to succeed:
690 696 #
691 697 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
692 698 # \ / merging rev3 and rev4 should use bar@rev2
693 699 # \- 2 --- 4 as the merge base
694 700 #
695 701
696 702 cf = cp[0]
697 703 cr = manifest1.get(cf)
698 704 nfp = fp2
699 705
700 706 if manifest2: # branch merge
701 707 if fp2 == nullid: # copied on remote side
702 708 if fp1 != nullid or cf in manifest2:
703 709 cr = manifest2[cf]
704 710 nfp = fp1
705 711
706 712 # find source in nearest ancestor if we've lost track
707 713 if not cr:
708 714 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
709 715 (fn, cf))
710 716 for a in self['.'].ancestors():
711 717 if cf in a:
712 718 cr = a[cf].filenode()
713 719 break
714 720
715 721 self.ui.debug(_(" %s: copy %s:%s\n") % (fn, cf, hex(cr)))
716 722 meta["copy"] = cf
717 723 meta["copyrev"] = hex(cr)
718 724 fp1, fp2 = nullid, nfp
719 725 elif fp2 != nullid:
720 726 # is one parent an ancestor of the other?
721 727 fpa = fl.ancestor(fp1, fp2)
722 728 if fpa == fp1:
723 729 fp1, fp2 = fp2, nullid
724 730 elif fpa == fp2:
725 731 fp2 = nullid
726 732
727 733 # is the file unmodified from the parent? report existing entry
728 734 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
729 735 return fp1
730 736
731 737 changelist.append(fn)
732 738 return fl.add(t, meta, tr, linkrev, fp1, fp2)
733 739
734 740 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
735 741 if p1 is None:
736 742 p1, p2 = self.dirstate.parents()
737 743 return self.commit(files=files, text=text, user=user, date=date,
738 744 p1=p1, p2=p2, extra=extra, empty_ok=True)
739 745
740 746 def commit(self, files=None, text="", user=None, date=None,
741 747 match=None, force=False, force_editor=False,
742 748 p1=None, p2=None, extra={}, empty_ok=False):
743 749 wlock = lock = None
744 750 if files:
745 751 files = util.unique(files)
746 752 try:
747 753 wlock = self.wlock()
748 754 lock = self.lock()
749 755 use_dirstate = (p1 is None) # not rawcommit
750 756
751 757 if use_dirstate:
752 758 p1, p2 = self.dirstate.parents()
753 759 update_dirstate = True
754 760
755 761 if (not force and p2 != nullid and
756 762 (match and (match.files() or match.anypats()))):
757 763 raise util.Abort(_('cannot partially commit a merge '
758 764 '(do not specify files or patterns)'))
759 765
760 766 if files:
761 767 modified, removed = [], []
762 768 for f in files:
763 769 s = self.dirstate[f]
764 770 if s in 'nma':
765 771 modified.append(f)
766 772 elif s == 'r':
767 773 removed.append(f)
768 774 else:
769 775 self.ui.warn(_("%s not tracked!\n") % f)
770 776 changes = [modified, [], removed, [], []]
771 777 else:
772 778 changes = self.status(match=match)
773 779 else:
774 780 p1, p2 = p1, p2 or nullid
775 781 update_dirstate = (self.dirstate.parents()[0] == p1)
776 782 changes = [files, [], [], [], []]
777 783
778 784 ms = merge_.mergestate(self)
779 785 for f in changes[0]:
780 786 if f in ms and ms[f] == 'u':
781 787 raise util.Abort(_("unresolved merge conflicts "
782 788 "(see hg resolve)"))
783 789 wctx = context.workingctx(self, (p1, p2), text, user, date,
784 790 extra, changes)
785 791 return self._commitctx(wctx, force, force_editor, empty_ok,
786 792 use_dirstate, update_dirstate)
787 793 finally:
788 794 del lock, wlock
789 795
790 796 def commitctx(self, ctx):
791 797 wlock = lock = None
792 798 try:
793 799 wlock = self.wlock()
794 800 lock = self.lock()
795 801 return self._commitctx(ctx, force=True, force_editor=False,
796 802 empty_ok=True, use_dirstate=False,
797 803 update_dirstate=False)
798 804 finally:
799 805 del lock, wlock
800 806
801 807 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
802 808 use_dirstate=True, update_dirstate=True):
803 809 tr = None
804 810 valid = 0 # don't save the dirstate if this isn't set
805 811 try:
806 812 commit = util.sort(wctx.modified() + wctx.added())
807 813 remove = wctx.removed()
808 814 extra = wctx.extra().copy()
809 815 branchname = extra['branch']
810 816 user = wctx.user()
811 817 text = wctx.description()
812 818
813 819 p1, p2 = [p.node() for p in wctx.parents()]
814 820 c1 = self.changelog.read(p1)
815 821 c2 = self.changelog.read(p2)
816 822 m1 = self.manifest.read(c1[0]).copy()
817 823 m2 = self.manifest.read(c2[0])
818 824
819 825 if use_dirstate:
820 826 oldname = c1[5].get("branch") # stored in UTF-8
821 827 if (not commit and not remove and not force and p2 == nullid
822 828 and branchname == oldname):
823 829 self.ui.status(_("nothing changed\n"))
824 830 return None
825 831
826 832 xp1 = hex(p1)
827 833 if p2 == nullid: xp2 = ''
828 834 else: xp2 = hex(p2)
829 835
830 836 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
831 837
832 838 tr = self.transaction()
833 839 trp = weakref.proxy(tr)
834 840
835 841 # check in files
836 842 new = {}
837 843 changed = []
838 844 linkrev = len(self)
839 845 for f in commit:
840 846 self.ui.note(f + "\n")
841 847 try:
842 848 fctx = wctx.filectx(f)
843 849 newflags = fctx.flags()
844 850 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
845 851 if ((not changed or changed[-1] != f) and
846 852 m2.get(f) != new[f]):
847 853 # mention the file in the changelog if some
848 854 # flag changed, even if there was no content
849 855 # change.
850 856 if m1.flags(f) != newflags:
851 857 changed.append(f)
852 858 m1.set(f, newflags)
853 859 if use_dirstate:
854 860 self.dirstate.normal(f)
855 861
856 862 except (OSError, IOError):
857 863 if use_dirstate:
858 864 self.ui.warn(_("trouble committing %s!\n") % f)
859 865 raise
860 866 else:
861 867 remove.append(f)
862 868
863 869 # update manifest
864 870 m1.update(new)
865 871 removed = []
866 872
867 873 for f in util.sort(remove):
868 874 if f in m1:
869 875 del m1[f]
870 876 removed.append(f)
871 877 elif f in m2:
872 878 removed.append(f)
873 879 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
874 880 (new, removed))
875 881
876 882 # add changeset
877 883 if (not empty_ok and not text) or force_editor:
878 884 edittext = []
879 885 if text:
880 886 edittext.append(text)
881 887 edittext.append("")
882 888 edittext.append("") # Empty line between message and comments.
883 889 edittext.append(_("HG: Enter commit message."
884 890 " Lines beginning with 'HG:' are removed."))
885 891 edittext.append("HG: --")
886 892 edittext.append("HG: user: %s" % user)
887 893 if p2 != nullid:
888 894 edittext.append("HG: branch merge")
889 895 if branchname:
890 896 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
891 897 edittext.extend(["HG: changed %s" % f for f in changed])
892 898 edittext.extend(["HG: removed %s" % f for f in removed])
893 899 if not changed and not remove:
894 900 edittext.append("HG: no files changed")
895 901 edittext.append("")
896 902 # run editor in the repository root
897 903 olddir = os.getcwd()
898 904 os.chdir(self.root)
899 905 text = self.ui.edit("\n".join(edittext), user)
900 906 os.chdir(olddir)
901 907
902 908 lines = [line.rstrip() for line in text.rstrip().splitlines()]
903 909 while lines and not lines[0]:
904 910 del lines[0]
905 911 if not lines and use_dirstate:
906 912 raise util.Abort(_("empty commit message"))
907 913 text = '\n'.join(lines)
908 914
909 915 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
910 916 user, wctx.date(), extra)
911 917 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
912 918 parent2=xp2)
913 919 tr.close()
914 920
915 921 if self.branchcache:
916 922 self.branchtags()
917 923
918 924 if use_dirstate or update_dirstate:
919 925 self.dirstate.setparents(n)
920 926 if use_dirstate:
921 927 for f in removed:
922 928 self.dirstate.forget(f)
923 929 valid = 1 # our dirstate updates are complete
924 930
925 931 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
926 932 return n
927 933 finally:
928 934 if not valid: # don't save our updated dirstate
929 935 self.dirstate.invalidate()
930 936 del tr
931 937
932 938 def walk(self, match, node=None):
933 939 '''
934 940 walk recursively through the directory tree or a given
935 941 changeset, finding all files matched by the match
936 942 function
937 943 '''
938 944 return self[node].walk(match)
939 945
940 946 def status(self, node1='.', node2=None, match=None,
941 947 ignored=False, clean=False, unknown=False):
942 948 """return status of files between two nodes or node and working directory
943 949
944 950 If node1 is None, use the first dirstate parent instead.
945 951 If node2 is None, compare node1 with working directory.
946 952 """
947 953
948 954 def mfmatches(ctx):
949 955 mf = ctx.manifest().copy()
950 956 for fn in mf.keys():
951 957 if not match(fn):
952 958 del mf[fn]
953 959 return mf
954 960
955 961 ctx1 = self[node1]
956 962 ctx2 = self[node2]
957 963 working = ctx2 == self[None]
958 964 parentworking = working and ctx1 == self['.']
959 965 match = match or match_.always(self.root, self.getcwd())
960 966 listignored, listclean, listunknown = ignored, clean, unknown
961 967
962 968 if working: # we need to scan the working dir
963 969 s = self.dirstate.status(match, listignored, listclean, listunknown)
964 970 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
965 971
966 972 # check for any possibly clean files
967 973 if parentworking and cmp:
968 974 fixup = []
969 975 # do a full compare of any files that might have changed
970 976 for f in cmp:
971 977 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
972 978 or ctx1[f].cmp(ctx2[f].data())):
973 979 modified.append(f)
974 980 else:
975 981 fixup.append(f)
976 982
977 983 if listclean:
978 984 clean += fixup
979 985
980 986 # update dirstate for files that are actually clean
981 987 if fixup:
982 988 wlock = None
983 989 try:
984 990 try:
985 991 wlock = self.wlock(False)
986 992 for f in fixup:
987 993 self.dirstate.normal(f)
988 994 except lock.LockException:
989 995 pass
990 996 finally:
991 997 del wlock
992 998
993 999 if not parentworking:
994 1000 mf1 = mfmatches(ctx1)
995 1001 if working:
996 1002 # we are comparing working dir against non-parent
997 1003 # generate a pseudo-manifest for the working dir
998 1004 mf2 = mfmatches(self['.'])
999 1005 for f in cmp + modified + added:
1000 1006 mf2[f] = None
1001 1007 mf2.set(f, ctx2.flags(f))
1002 1008 for f in removed:
1003 1009 if f in mf2:
1004 1010 del mf2[f]
1005 1011 else:
1006 1012 # we are comparing two revisions
1007 1013 deleted, unknown, ignored = [], [], []
1008 1014 mf2 = mfmatches(ctx2)
1009 1015
1010 1016 modified, added, clean = [], [], []
1011 1017 for fn in mf2:
1012 1018 if fn in mf1:
1013 1019 if (mf1.flags(fn) != mf2.flags(fn) or
1014 1020 (mf1[fn] != mf2[fn] and
1015 1021 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1016 1022 modified.append(fn)
1017 1023 elif listclean:
1018 1024 clean.append(fn)
1019 1025 del mf1[fn]
1020 1026 else:
1021 1027 added.append(fn)
1022 1028 removed = mf1.keys()
1023 1029
1024 1030 r = modified, added, removed, deleted, unknown, ignored, clean
1025 1031 [l.sort() for l in r]
1026 1032 return r
1027 1033
1028 1034 def add(self, list):
1029 1035 wlock = self.wlock()
1030 1036 try:
1031 1037 rejected = []
1032 1038 for f in list:
1033 1039 p = self.wjoin(f)
1034 1040 try:
1035 1041 st = os.lstat(p)
1036 1042 except:
1037 1043 self.ui.warn(_("%s does not exist!\n") % f)
1038 1044 rejected.append(f)
1039 1045 continue
1040 1046 if st.st_size > 10000000:
1041 1047 self.ui.warn(_("%s: files over 10MB may cause memory and"
1042 1048 " performance problems\n"
1043 1049 "(use 'hg revert %s' to unadd the file)\n")
1044 1050 % (f, f))
1045 1051 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1046 1052 self.ui.warn(_("%s not added: only files and symlinks "
1047 1053 "supported currently\n") % f)
1048 1054 rejected.append(p)
1049 1055 elif self.dirstate[f] in 'amn':
1050 1056 self.ui.warn(_("%s already tracked!\n") % f)
1051 1057 elif self.dirstate[f] == 'r':
1052 1058 self.dirstate.normallookup(f)
1053 1059 else:
1054 1060 self.dirstate.add(f)
1055 1061 return rejected
1056 1062 finally:
1057 1063 del wlock
1058 1064
1059 1065 def forget(self, list):
1060 1066 wlock = self.wlock()
1061 1067 try:
1062 1068 for f in list:
1063 1069 if self.dirstate[f] != 'a':
1064 1070 self.ui.warn(_("%s not added!\n") % f)
1065 1071 else:
1066 1072 self.dirstate.forget(f)
1067 1073 finally:
1068 1074 del wlock
1069 1075
1070 1076 def remove(self, list, unlink=False):
1071 1077 wlock = None
1072 1078 try:
1073 1079 if unlink:
1074 1080 for f in list:
1075 1081 try:
1076 1082 util.unlink(self.wjoin(f))
1077 1083 except OSError, inst:
1078 1084 if inst.errno != errno.ENOENT:
1079 1085 raise
1080 1086 wlock = self.wlock()
1081 1087 for f in list:
1082 1088 if unlink and os.path.exists(self.wjoin(f)):
1083 1089 self.ui.warn(_("%s still exists!\n") % f)
1084 1090 elif self.dirstate[f] == 'a':
1085 1091 self.dirstate.forget(f)
1086 1092 elif f not in self.dirstate:
1087 1093 self.ui.warn(_("%s not tracked!\n") % f)
1088 1094 else:
1089 1095 self.dirstate.remove(f)
1090 1096 finally:
1091 1097 del wlock
1092 1098
1093 1099 def undelete(self, list):
1094 1100 wlock = None
1095 1101 try:
1096 1102 manifests = [self.manifest.read(self.changelog.read(p)[0])
1097 1103 for p in self.dirstate.parents() if p != nullid]
1098 1104 wlock = self.wlock()
1099 1105 for f in list:
1100 1106 if self.dirstate[f] != 'r':
1101 1107 self.ui.warn(_("%s not removed!\n") % f)
1102 1108 else:
1103 1109 m = f in manifests[0] and manifests[0] or manifests[1]
1104 1110 t = self.file(f).read(m[f])
1105 1111 self.wwrite(f, t, m.flags(f))
1106 1112 self.dirstate.normal(f)
1107 1113 finally:
1108 1114 del wlock
1109 1115
1110 1116 def copy(self, source, dest):
1111 1117 wlock = None
1112 1118 try:
1113 1119 p = self.wjoin(dest)
1114 1120 if not (os.path.exists(p) or os.path.islink(p)):
1115 1121 self.ui.warn(_("%s does not exist!\n") % dest)
1116 1122 elif not (os.path.isfile(p) or os.path.islink(p)):
1117 1123 self.ui.warn(_("copy failed: %s is not a file or a "
1118 1124 "symbolic link\n") % dest)
1119 1125 else:
1120 1126 wlock = self.wlock()
1121 1127 if dest not in self.dirstate:
1122 1128 self.dirstate.add(dest)
1123 1129 self.dirstate.copy(source, dest)
1124 1130 finally:
1125 1131 del wlock
1126 1132
1127 1133 def heads(self, start=None):
1128 1134 heads = self.changelog.heads(start)
1129 1135 # sort the output in rev descending order
1130 1136 heads = [(-self.changelog.rev(h), h) for h in heads]
1131 1137 return [n for (r, n) in util.sort(heads)]
1132 1138
1133 1139 def branchheads(self, branch=None, start=None):
1134 1140 if branch is None:
1135 1141 branch = self[None].branch()
1136 1142 branches = self.branchtags()
1137 1143 if branch not in branches:
1138 1144 return []
1139 1145 # The basic algorithm is this:
1140 1146 #
1141 1147 # Start from the branch tip since there are no later revisions that can
1142 1148 # possibly be in this branch, and the tip is a guaranteed head.
1143 1149 #
1144 1150 # Remember the tip's parents as the first ancestors, since these by
1145 1151 # definition are not heads.
1146 1152 #
1147 1153 # Step backwards from the brach tip through all the revisions. We are
1148 1154 # guaranteed by the rules of Mercurial that we will now be visiting the
1149 1155 # nodes in reverse topological order (children before parents).
1150 1156 #
1151 1157 # If a revision is one of the ancestors of a head then we can toss it
1152 1158 # out of the ancestors set (we've already found it and won't be
1153 1159 # visiting it again) and put its parents in the ancestors set.
1154 1160 #
1155 1161 # Otherwise, if a revision is in the branch it's another head, since it
1156 1162 # wasn't in the ancestor list of an existing head. So add it to the
1157 1163 # head list, and add its parents to the ancestor list.
1158 1164 #
1159 1165 # If it is not in the branch ignore it.
1160 1166 #
1161 1167 # Once we have a list of heads, use nodesbetween to filter out all the
1162 1168 # heads that cannot be reached from startrev. There may be a more
1163 1169 # efficient way to do this as part of the previous algorithm.
1164 1170
1165 1171 set = util.set
1166 1172 heads = [self.changelog.rev(branches[branch])]
1167 1173 # Don't care if ancestors contains nullrev or not.
1168 1174 ancestors = set(self.changelog.parentrevs(heads[0]))
1169 1175 for rev in xrange(heads[0] - 1, nullrev, -1):
1170 1176 if rev in ancestors:
1171 1177 ancestors.update(self.changelog.parentrevs(rev))
1172 1178 ancestors.remove(rev)
1173 1179 elif self[rev].branch() == branch:
1174 1180 heads.append(rev)
1175 1181 ancestors.update(self.changelog.parentrevs(rev))
1176 1182 heads = [self.changelog.node(rev) for rev in heads]
1177 1183 if start is not None:
1178 1184 heads = self.changelog.nodesbetween([start], heads)[2]
1179 1185 return heads
1180 1186
1181 1187 def branches(self, nodes):
1182 1188 if not nodes:
1183 1189 nodes = [self.changelog.tip()]
1184 1190 b = []
1185 1191 for n in nodes:
1186 1192 t = n
1187 1193 while 1:
1188 1194 p = self.changelog.parents(n)
1189 1195 if p[1] != nullid or p[0] == nullid:
1190 1196 b.append((t, n, p[0], p[1]))
1191 1197 break
1192 1198 n = p[0]
1193 1199 return b
1194 1200
1195 1201 def between(self, pairs):
1196 1202 r = []
1197 1203
1198 1204 for top, bottom in pairs:
1199 1205 n, l, i = top, [], 0
1200 1206 f = 1
1201 1207
1202 1208 while n != bottom:
1203 1209 p = self.changelog.parents(n)[0]
1204 1210 if i == f:
1205 1211 l.append(n)
1206 1212 f = f * 2
1207 1213 n = p
1208 1214 i += 1
1209 1215
1210 1216 r.append(l)
1211 1217
1212 1218 return r
1213 1219
1214 1220 def findincoming(self, remote, base=None, heads=None, force=False):
1215 1221 """Return list of roots of the subsets of missing nodes from remote
1216 1222
1217 1223 If base dict is specified, assume that these nodes and their parents
1218 1224 exist on the remote side and that no child of a node of base exists
1219 1225 in both remote and self.
1220 1226 Furthermore base will be updated to include the nodes that exists
1221 1227 in self and remote but no children exists in self and remote.
1222 1228 If a list of heads is specified, return only nodes which are heads
1223 1229 or ancestors of these heads.
1224 1230
1225 1231 All the ancestors of base are in self and in remote.
1226 1232 All the descendants of the list returned are missing in self.
1227 1233 (and so we know that the rest of the nodes are missing in remote, see
1228 1234 outgoing)
1229 1235 """
1230 1236 m = self.changelog.nodemap
1231 1237 search = []
1232 1238 fetch = {}
1233 1239 seen = {}
1234 1240 seenbranch = {}
1235 1241 if base == None:
1236 1242 base = {}
1237 1243
1238 1244 if not heads:
1239 1245 heads = remote.heads()
1240 1246
1241 1247 if self.changelog.tip() == nullid:
1242 1248 base[nullid] = 1
1243 1249 if heads != [nullid]:
1244 1250 return [nullid]
1245 1251 return []
1246 1252
1247 1253 # assume we're closer to the tip than the root
1248 1254 # and start by examining the heads
1249 1255 self.ui.status(_("searching for changes\n"))
1250 1256
1251 1257 unknown = []
1252 1258 for h in heads:
1253 1259 if h not in m:
1254 1260 unknown.append(h)
1255 1261 else:
1256 1262 base[h] = 1
1257 1263
1258 1264 if not unknown:
1259 1265 return []
1260 1266
1261 1267 req = dict.fromkeys(unknown)
1262 1268 reqcnt = 0
1263 1269
1264 1270 # search through remote branches
1265 1271 # a 'branch' here is a linear segment of history, with four parts:
1266 1272 # head, root, first parent, second parent
1267 1273 # (a branch always has two parents (or none) by definition)
1268 1274 unknown = remote.branches(unknown)
1269 1275 while unknown:
1270 1276 r = []
1271 1277 while unknown:
1272 1278 n = unknown.pop(0)
1273 1279 if n[0] in seen:
1274 1280 continue
1275 1281
1276 1282 self.ui.debug(_("examining %s:%s\n")
1277 1283 % (short(n[0]), short(n[1])))
1278 1284 if n[0] == nullid: # found the end of the branch
1279 1285 pass
1280 1286 elif n in seenbranch:
1281 1287 self.ui.debug(_("branch already found\n"))
1282 1288 continue
1283 1289 elif n[1] and n[1] in m: # do we know the base?
1284 1290 self.ui.debug(_("found incomplete branch %s:%s\n")
1285 1291 % (short(n[0]), short(n[1])))
1286 1292 search.append(n) # schedule branch range for scanning
1287 1293 seenbranch[n] = 1
1288 1294 else:
1289 1295 if n[1] not in seen and n[1] not in fetch:
1290 1296 if n[2] in m and n[3] in m:
1291 1297 self.ui.debug(_("found new changeset %s\n") %
1292 1298 short(n[1]))
1293 1299 fetch[n[1]] = 1 # earliest unknown
1294 1300 for p in n[2:4]:
1295 1301 if p in m:
1296 1302 base[p] = 1 # latest known
1297 1303
1298 1304 for p in n[2:4]:
1299 1305 if p not in req and p not in m:
1300 1306 r.append(p)
1301 1307 req[p] = 1
1302 1308 seen[n[0]] = 1
1303 1309
1304 1310 if r:
1305 1311 reqcnt += 1
1306 1312 self.ui.debug(_("request %d: %s\n") %
1307 1313 (reqcnt, " ".join(map(short, r))))
1308 1314 for p in xrange(0, len(r), 10):
1309 1315 for b in remote.branches(r[p:p+10]):
1310 1316 self.ui.debug(_("received %s:%s\n") %
1311 1317 (short(b[0]), short(b[1])))
1312 1318 unknown.append(b)
1313 1319
1314 1320 # do binary search on the branches we found
1315 1321 while search:
1316 1322 n = search.pop(0)
1317 1323 reqcnt += 1
1318 1324 l = remote.between([(n[0], n[1])])[0]
1319 1325 l.append(n[1])
1320 1326 p = n[0]
1321 1327 f = 1
1322 1328 for i in l:
1323 1329 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1324 1330 if i in m:
1325 1331 if f <= 2:
1326 1332 self.ui.debug(_("found new branch changeset %s\n") %
1327 1333 short(p))
1328 1334 fetch[p] = 1
1329 1335 base[i] = 1
1330 1336 else:
1331 1337 self.ui.debug(_("narrowed branch search to %s:%s\n")
1332 1338 % (short(p), short(i)))
1333 1339 search.append((p, i))
1334 1340 break
1335 1341 p, f = i, f * 2
1336 1342
1337 1343 # sanity check our fetch list
1338 1344 for f in fetch.keys():
1339 1345 if f in m:
1340 1346 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1341 1347
1342 1348 if base.keys() == [nullid]:
1343 1349 if force:
1344 1350 self.ui.warn(_("warning: repository is unrelated\n"))
1345 1351 else:
1346 1352 raise util.Abort(_("repository is unrelated"))
1347 1353
1348 1354 self.ui.debug(_("found new changesets starting at ") +
1349 1355 " ".join([short(f) for f in fetch]) + "\n")
1350 1356
1351 1357 self.ui.debug(_("%d total queries\n") % reqcnt)
1352 1358
1353 1359 return fetch.keys()
1354 1360
1355 1361 def findoutgoing(self, remote, base=None, heads=None, force=False):
1356 1362 """Return list of nodes that are roots of subsets not in remote
1357 1363
1358 1364 If base dict is specified, assume that these nodes and their parents
1359 1365 exist on the remote side.
1360 1366 If a list of heads is specified, return only nodes which are heads
1361 1367 or ancestors of these heads, and return a second element which
1362 1368 contains all remote heads which get new children.
1363 1369 """
1364 1370 if base == None:
1365 1371 base = {}
1366 1372 self.findincoming(remote, base, heads, force=force)
1367 1373
1368 1374 self.ui.debug(_("common changesets up to ")
1369 1375 + " ".join(map(short, base.keys())) + "\n")
1370 1376
1371 1377 remain = dict.fromkeys(self.changelog.nodemap)
1372 1378
1373 1379 # prune everything remote has from the tree
1374 1380 del remain[nullid]
1375 1381 remove = base.keys()
1376 1382 while remove:
1377 1383 n = remove.pop(0)
1378 1384 if n in remain:
1379 1385 del remain[n]
1380 1386 for p in self.changelog.parents(n):
1381 1387 remove.append(p)
1382 1388
1383 1389 # find every node whose parents have been pruned
1384 1390 subset = []
1385 1391 # find every remote head that will get new children
1386 1392 updated_heads = {}
1387 1393 for n in remain:
1388 1394 p1, p2 = self.changelog.parents(n)
1389 1395 if p1 not in remain and p2 not in remain:
1390 1396 subset.append(n)
1391 1397 if heads:
1392 1398 if p1 in heads:
1393 1399 updated_heads[p1] = True
1394 1400 if p2 in heads:
1395 1401 updated_heads[p2] = True
1396 1402
1397 1403 # this is the set of all roots we have to push
1398 1404 if heads:
1399 1405 return subset, updated_heads.keys()
1400 1406 else:
1401 1407 return subset
1402 1408
1403 1409 def pull(self, remote, heads=None, force=False):
1404 1410 lock = self.lock()
1405 1411 try:
1406 1412 fetch = self.findincoming(remote, heads=heads, force=force)
1407 1413 if fetch == [nullid]:
1408 1414 self.ui.status(_("requesting all changes\n"))
1409 1415
1410 1416 if not fetch:
1411 1417 self.ui.status(_("no changes found\n"))
1412 1418 return 0
1413 1419
1414 1420 if heads is None:
1415 1421 cg = remote.changegroup(fetch, 'pull')
1416 1422 else:
1417 1423 if 'changegroupsubset' not in remote.capabilities:
1418 1424 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1419 1425 cg = remote.changegroupsubset(fetch, heads, 'pull')
1420 1426 return self.addchangegroup(cg, 'pull', remote.url())
1421 1427 finally:
1422 1428 del lock
1423 1429
1424 1430 def push(self, remote, force=False, revs=None):
1425 1431 # there are two ways to push to remote repo:
1426 1432 #
1427 1433 # addchangegroup assumes local user can lock remote
1428 1434 # repo (local filesystem, old ssh servers).
1429 1435 #
1430 1436 # unbundle assumes local user cannot lock remote repo (new ssh
1431 1437 # servers, http servers).
1432 1438
1433 1439 if remote.capable('unbundle'):
1434 1440 return self.push_unbundle(remote, force, revs)
1435 1441 return self.push_addchangegroup(remote, force, revs)
1436 1442
1437 1443 def prepush(self, remote, force, revs):
1438 1444 base = {}
1439 1445 remote_heads = remote.heads()
1440 1446 inc = self.findincoming(remote, base, remote_heads, force=force)
1441 1447
1442 1448 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1443 1449 if revs is not None:
1444 1450 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1445 1451 else:
1446 1452 bases, heads = update, self.changelog.heads()
1447 1453
1448 1454 if not bases:
1449 1455 self.ui.status(_("no changes found\n"))
1450 1456 return None, 1
1451 1457 elif not force:
1452 1458 # check if we're creating new remote heads
1453 1459 # to be a remote head after push, node must be either
1454 1460 # - unknown locally
1455 1461 # - a local outgoing head descended from update
1456 1462 # - a remote head that's known locally and not
1457 1463 # ancestral to an outgoing head
1458 1464
1459 1465 warn = 0
1460 1466
1461 1467 if remote_heads == [nullid]:
1462 1468 warn = 0
1463 1469 elif not revs and len(heads) > len(remote_heads):
1464 1470 warn = 1
1465 1471 else:
1466 1472 newheads = list(heads)
1467 1473 for r in remote_heads:
1468 1474 if r in self.changelog.nodemap:
1469 1475 desc = self.changelog.heads(r, heads)
1470 1476 l = [h for h in heads if h in desc]
1471 1477 if not l:
1472 1478 newheads.append(r)
1473 1479 else:
1474 1480 newheads.append(r)
1475 1481 if len(newheads) > len(remote_heads):
1476 1482 warn = 1
1477 1483
1478 1484 if warn:
1479 1485 self.ui.warn(_("abort: push creates new remote heads!\n"))
1480 1486 self.ui.status(_("(did you forget to merge?"
1481 1487 " use push -f to force)\n"))
1482 1488 return None, 0
1483 1489 elif inc:
1484 1490 self.ui.warn(_("note: unsynced remote changes!\n"))
1485 1491
1486 1492
1487 1493 if revs is None:
1488 1494 cg = self.changegroup(update, 'push')
1489 1495 else:
1490 1496 cg = self.changegroupsubset(update, revs, 'push')
1491 1497 return cg, remote_heads
1492 1498
1493 1499 def push_addchangegroup(self, remote, force, revs):
1494 1500 lock = remote.lock()
1495 1501 try:
1496 1502 ret = self.prepush(remote, force, revs)
1497 1503 if ret[0] is not None:
1498 1504 cg, remote_heads = ret
1499 1505 return remote.addchangegroup(cg, 'push', self.url())
1500 1506 return ret[1]
1501 1507 finally:
1502 1508 del lock
1503 1509
1504 1510 def push_unbundle(self, remote, force, revs):
1505 1511 # local repo finds heads on server, finds out what revs it
1506 1512 # must push. once revs transferred, if server finds it has
1507 1513 # different heads (someone else won commit/push race), server
1508 1514 # aborts.
1509 1515
1510 1516 ret = self.prepush(remote, force, revs)
1511 1517 if ret[0] is not None:
1512 1518 cg, remote_heads = ret
1513 1519 if force: remote_heads = ['force']
1514 1520 return remote.unbundle(cg, remote_heads, 'push')
1515 1521 return ret[1]
1516 1522
1517 1523 def changegroupinfo(self, nodes, source):
1518 1524 if self.ui.verbose or source == 'bundle':
1519 1525 self.ui.status(_("%d changesets found\n") % len(nodes))
1520 1526 if self.ui.debugflag:
1521 1527 self.ui.debug(_("List of changesets:\n"))
1522 1528 for node in nodes:
1523 1529 self.ui.debug("%s\n" % hex(node))
1524 1530
1525 1531 def changegroupsubset(self, bases, heads, source, extranodes=None):
1526 1532 """This function generates a changegroup consisting of all the nodes
1527 1533 that are descendents of any of the bases, and ancestors of any of
1528 1534 the heads.
1529 1535
1530 1536 It is fairly complex as determining which filenodes and which
1531 1537 manifest nodes need to be included for the changeset to be complete
1532 1538 is non-trivial.
1533 1539
1534 1540 Another wrinkle is doing the reverse, figuring out which changeset in
1535 1541 the changegroup a particular filenode or manifestnode belongs to.
1536 1542
1537 1543 The caller can specify some nodes that must be included in the
1538 1544 changegroup using the extranodes argument. It should be a dict
1539 1545 where the keys are the filenames (or 1 for the manifest), and the
1540 1546 values are lists of (node, linknode) tuples, where node is a wanted
1541 1547 node and linknode is the changelog node that should be transmitted as
1542 1548 the linkrev.
1543 1549 """
1544 1550
1545 1551 self.hook('preoutgoing', throw=True, source=source)
1546 1552
1547 1553 # Set up some initial variables
1548 1554 # Make it easy to refer to self.changelog
1549 1555 cl = self.changelog
1550 1556 # msng is short for missing - compute the list of changesets in this
1551 1557 # changegroup.
1552 1558 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1553 1559 self.changegroupinfo(msng_cl_lst, source)
1554 1560 # Some bases may turn out to be superfluous, and some heads may be
1555 1561 # too. nodesbetween will return the minimal set of bases and heads
1556 1562 # necessary to re-create the changegroup.
1557 1563
1558 1564 # Known heads are the list of heads that it is assumed the recipient
1559 1565 # of this changegroup will know about.
1560 1566 knownheads = {}
1561 1567 # We assume that all parents of bases are known heads.
1562 1568 for n in bases:
1563 1569 for p in cl.parents(n):
1564 1570 if p != nullid:
1565 1571 knownheads[p] = 1
1566 1572 knownheads = knownheads.keys()
1567 1573 if knownheads:
1568 1574 # Now that we know what heads are known, we can compute which
1569 1575 # changesets are known. The recipient must know about all
1570 1576 # changesets required to reach the known heads from the null
1571 1577 # changeset.
1572 1578 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1573 1579 junk = None
1574 1580 # Transform the list into an ersatz set.
1575 1581 has_cl_set = dict.fromkeys(has_cl_set)
1576 1582 else:
1577 1583 # If there were no known heads, the recipient cannot be assumed to
1578 1584 # know about any changesets.
1579 1585 has_cl_set = {}
1580 1586
1581 1587 # Make it easy to refer to self.manifest
1582 1588 mnfst = self.manifest
1583 1589 # We don't know which manifests are missing yet
1584 1590 msng_mnfst_set = {}
1585 1591 # Nor do we know which filenodes are missing.
1586 1592 msng_filenode_set = {}
1587 1593
1588 1594 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1589 1595 junk = None
1590 1596
1591 1597 # A changeset always belongs to itself, so the changenode lookup
1592 1598 # function for a changenode is identity.
1593 1599 def identity(x):
1594 1600 return x
1595 1601
1596 1602 # A function generating function. Sets up an environment for the
1597 1603 # inner function.
1598 1604 def cmp_by_rev_func(revlog):
1599 1605 # Compare two nodes by their revision number in the environment's
1600 1606 # revision history. Since the revision number both represents the
1601 1607 # most efficient order to read the nodes in, and represents a
1602 1608 # topological sorting of the nodes, this function is often useful.
1603 1609 def cmp_by_rev(a, b):
1604 1610 return cmp(revlog.rev(a), revlog.rev(b))
1605 1611 return cmp_by_rev
1606 1612
1607 1613 # If we determine that a particular file or manifest node must be a
1608 1614 # node that the recipient of the changegroup will already have, we can
1609 1615 # also assume the recipient will have all the parents. This function
1610 1616 # prunes them from the set of missing nodes.
1611 1617 def prune_parents(revlog, hasset, msngset):
1612 1618 haslst = hasset.keys()
1613 1619 haslst.sort(cmp_by_rev_func(revlog))
1614 1620 for node in haslst:
1615 1621 parentlst = [p for p in revlog.parents(node) if p != nullid]
1616 1622 while parentlst:
1617 1623 n = parentlst.pop()
1618 1624 if n not in hasset:
1619 1625 hasset[n] = 1
1620 1626 p = [p for p in revlog.parents(n) if p != nullid]
1621 1627 parentlst.extend(p)
1622 1628 for n in hasset:
1623 1629 msngset.pop(n, None)
1624 1630
1625 1631 # This is a function generating function used to set up an environment
1626 1632 # for the inner function to execute in.
1627 1633 def manifest_and_file_collector(changedfileset):
1628 1634 # This is an information gathering function that gathers
1629 1635 # information from each changeset node that goes out as part of
1630 1636 # the changegroup. The information gathered is a list of which
1631 1637 # manifest nodes are potentially required (the recipient may
1632 1638 # already have them) and total list of all files which were
1633 1639 # changed in any changeset in the changegroup.
1634 1640 #
1635 1641 # We also remember the first changenode we saw any manifest
1636 1642 # referenced by so we can later determine which changenode 'owns'
1637 1643 # the manifest.
1638 1644 def collect_manifests_and_files(clnode):
1639 1645 c = cl.read(clnode)
1640 1646 for f in c[3]:
1641 1647 # This is to make sure we only have one instance of each
1642 1648 # filename string for each filename.
1643 1649 changedfileset.setdefault(f, f)
1644 1650 msng_mnfst_set.setdefault(c[0], clnode)
1645 1651 return collect_manifests_and_files
1646 1652
1647 1653 # Figure out which manifest nodes (of the ones we think might be part
1648 1654 # of the changegroup) the recipient must know about and remove them
1649 1655 # from the changegroup.
1650 1656 def prune_manifests():
1651 1657 has_mnfst_set = {}
1652 1658 for n in msng_mnfst_set:
1653 1659 # If a 'missing' manifest thinks it belongs to a changenode
1654 1660 # the recipient is assumed to have, obviously the recipient
1655 1661 # must have that manifest.
1656 1662 linknode = cl.node(mnfst.linkrev(n))
1657 1663 if linknode in has_cl_set:
1658 1664 has_mnfst_set[n] = 1
1659 1665 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1660 1666
1661 1667 # Use the information collected in collect_manifests_and_files to say
1662 1668 # which changenode any manifestnode belongs to.
1663 1669 def lookup_manifest_link(mnfstnode):
1664 1670 return msng_mnfst_set[mnfstnode]
1665 1671
1666 1672 # A function generating function that sets up the initial environment
1667 1673 # the inner function.
1668 1674 def filenode_collector(changedfiles):
1669 1675 next_rev = [0]
1670 1676 # This gathers information from each manifestnode included in the
1671 1677 # changegroup about which filenodes the manifest node references
1672 1678 # so we can include those in the changegroup too.
1673 1679 #
1674 1680 # It also remembers which changenode each filenode belongs to. It
1675 1681 # does this by assuming the a filenode belongs to the changenode
1676 1682 # the first manifest that references it belongs to.
1677 1683 def collect_msng_filenodes(mnfstnode):
1678 1684 r = mnfst.rev(mnfstnode)
1679 1685 if r == next_rev[0]:
1680 1686 # If the last rev we looked at was the one just previous,
1681 1687 # we only need to see a diff.
1682 1688 deltamf = mnfst.readdelta(mnfstnode)
1683 1689 # For each line in the delta
1684 1690 for f, fnode in deltamf.items():
1685 1691 f = changedfiles.get(f, None)
1686 1692 # And if the file is in the list of files we care
1687 1693 # about.
1688 1694 if f is not None:
1689 1695 # Get the changenode this manifest belongs to
1690 1696 clnode = msng_mnfst_set[mnfstnode]
1691 1697 # Create the set of filenodes for the file if
1692 1698 # there isn't one already.
1693 1699 ndset = msng_filenode_set.setdefault(f, {})
1694 1700 # And set the filenode's changelog node to the
1695 1701 # manifest's if it hasn't been set already.
1696 1702 ndset.setdefault(fnode, clnode)
1697 1703 else:
1698 1704 # Otherwise we need a full manifest.
1699 1705 m = mnfst.read(mnfstnode)
1700 1706 # For every file in we care about.
1701 1707 for f in changedfiles:
1702 1708 fnode = m.get(f, None)
1703 1709 # If it's in the manifest
1704 1710 if fnode is not None:
1705 1711 # See comments above.
1706 1712 clnode = msng_mnfst_set[mnfstnode]
1707 1713 ndset = msng_filenode_set.setdefault(f, {})
1708 1714 ndset.setdefault(fnode, clnode)
1709 1715 # Remember the revision we hope to see next.
1710 1716 next_rev[0] = r + 1
1711 1717 return collect_msng_filenodes
1712 1718
1713 1719 # We have a list of filenodes we think we need for a file, lets remove
1714 1720 # all those we now the recipient must have.
1715 1721 def prune_filenodes(f, filerevlog):
1716 1722 msngset = msng_filenode_set[f]
1717 1723 hasset = {}
1718 1724 # If a 'missing' filenode thinks it belongs to a changenode we
1719 1725 # assume the recipient must have, then the recipient must have
1720 1726 # that filenode.
1721 1727 for n in msngset:
1722 1728 clnode = cl.node(filerevlog.linkrev(n))
1723 1729 if clnode in has_cl_set:
1724 1730 hasset[n] = 1
1725 1731 prune_parents(filerevlog, hasset, msngset)
1726 1732
1727 1733 # A function generator function that sets up the a context for the
1728 1734 # inner function.
1729 1735 def lookup_filenode_link_func(fname):
1730 1736 msngset = msng_filenode_set[fname]
1731 1737 # Lookup the changenode the filenode belongs to.
1732 1738 def lookup_filenode_link(fnode):
1733 1739 return msngset[fnode]
1734 1740 return lookup_filenode_link
1735 1741
1736 1742 # Add the nodes that were explicitly requested.
1737 1743 def add_extra_nodes(name, nodes):
1738 1744 if not extranodes or name not in extranodes:
1739 1745 return
1740 1746
1741 1747 for node, linknode in extranodes[name]:
1742 1748 if node not in nodes:
1743 1749 nodes[node] = linknode
1744 1750
1745 1751 # Now that we have all theses utility functions to help out and
1746 1752 # logically divide up the task, generate the group.
1747 1753 def gengroup():
1748 1754 # The set of changed files starts empty.
1749 1755 changedfiles = {}
1750 1756 # Create a changenode group generator that will call our functions
1751 1757 # back to lookup the owning changenode and collect information.
1752 1758 group = cl.group(msng_cl_lst, identity,
1753 1759 manifest_and_file_collector(changedfiles))
1754 1760 for chnk in group:
1755 1761 yield chnk
1756 1762
1757 1763 # The list of manifests has been collected by the generator
1758 1764 # calling our functions back.
1759 1765 prune_manifests()
1760 1766 add_extra_nodes(1, msng_mnfst_set)
1761 1767 msng_mnfst_lst = msng_mnfst_set.keys()
1762 1768 # Sort the manifestnodes by revision number.
1763 1769 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1764 1770 # Create a generator for the manifestnodes that calls our lookup
1765 1771 # and data collection functions back.
1766 1772 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1767 1773 filenode_collector(changedfiles))
1768 1774 for chnk in group:
1769 1775 yield chnk
1770 1776
1771 1777 # These are no longer needed, dereference and toss the memory for
1772 1778 # them.
1773 1779 msng_mnfst_lst = None
1774 1780 msng_mnfst_set.clear()
1775 1781
1776 1782 if extranodes:
1777 1783 for fname in extranodes:
1778 1784 if isinstance(fname, int):
1779 1785 continue
1780 1786 add_extra_nodes(fname,
1781 1787 msng_filenode_set.setdefault(fname, {}))
1782 1788 changedfiles[fname] = 1
1783 1789 # Go through all our files in order sorted by name.
1784 1790 for fname in util.sort(changedfiles):
1785 1791 filerevlog = self.file(fname)
1786 1792 if not len(filerevlog):
1787 1793 raise util.Abort(_("empty or missing revlog for %s") % fname)
1788 1794 # Toss out the filenodes that the recipient isn't really
1789 1795 # missing.
1790 1796 if fname in msng_filenode_set:
1791 1797 prune_filenodes(fname, filerevlog)
1792 1798 msng_filenode_lst = msng_filenode_set[fname].keys()
1793 1799 else:
1794 1800 msng_filenode_lst = []
1795 1801 # If any filenodes are left, generate the group for them,
1796 1802 # otherwise don't bother.
1797 1803 if len(msng_filenode_lst) > 0:
1798 1804 yield changegroup.chunkheader(len(fname))
1799 1805 yield fname
1800 1806 # Sort the filenodes by their revision #
1801 1807 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1802 1808 # Create a group generator and only pass in a changenode
1803 1809 # lookup function as we need to collect no information
1804 1810 # from filenodes.
1805 1811 group = filerevlog.group(msng_filenode_lst,
1806 1812 lookup_filenode_link_func(fname))
1807 1813 for chnk in group:
1808 1814 yield chnk
1809 1815 if fname in msng_filenode_set:
1810 1816 # Don't need this anymore, toss it to free memory.
1811 1817 del msng_filenode_set[fname]
1812 1818 # Signal that no more groups are left.
1813 1819 yield changegroup.closechunk()
1814 1820
1815 1821 if msng_cl_lst:
1816 1822 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1817 1823
1818 1824 return util.chunkbuffer(gengroup())
1819 1825
1820 1826 def changegroup(self, basenodes, source):
1821 1827 """Generate a changegroup of all nodes that we have that a recipient
1822 1828 doesn't.
1823 1829
1824 1830 This is much easier than the previous function as we can assume that
1825 1831 the recipient has any changenode we aren't sending them."""
1826 1832
1827 1833 self.hook('preoutgoing', throw=True, source=source)
1828 1834
1829 1835 cl = self.changelog
1830 1836 nodes = cl.nodesbetween(basenodes, None)[0]
1831 1837 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1832 1838 self.changegroupinfo(nodes, source)
1833 1839
1834 1840 def identity(x):
1835 1841 return x
1836 1842
1837 1843 def gennodelst(log):
1838 1844 for r in log:
1839 1845 n = log.node(r)
1840 1846 if log.linkrev(n) in revset:
1841 1847 yield n
1842 1848
1843 1849 def changed_file_collector(changedfileset):
1844 1850 def collect_changed_files(clnode):
1845 1851 c = cl.read(clnode)
1846 1852 for fname in c[3]:
1847 1853 changedfileset[fname] = 1
1848 1854 return collect_changed_files
1849 1855
1850 1856 def lookuprevlink_func(revlog):
1851 1857 def lookuprevlink(n):
1852 1858 return cl.node(revlog.linkrev(n))
1853 1859 return lookuprevlink
1854 1860
1855 1861 def gengroup():
1856 1862 # construct a list of all changed files
1857 1863 changedfiles = {}
1858 1864
1859 1865 for chnk in cl.group(nodes, identity,
1860 1866 changed_file_collector(changedfiles)):
1861 1867 yield chnk
1862 1868
1863 1869 mnfst = self.manifest
1864 1870 nodeiter = gennodelst(mnfst)
1865 1871 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1866 1872 yield chnk
1867 1873
1868 1874 for fname in util.sort(changedfiles):
1869 1875 filerevlog = self.file(fname)
1870 1876 if not len(filerevlog):
1871 1877 raise util.Abort(_("empty or missing revlog for %s") % fname)
1872 1878 nodeiter = gennodelst(filerevlog)
1873 1879 nodeiter = list(nodeiter)
1874 1880 if nodeiter:
1875 1881 yield changegroup.chunkheader(len(fname))
1876 1882 yield fname
1877 1883 lookup = lookuprevlink_func(filerevlog)
1878 1884 for chnk in filerevlog.group(nodeiter, lookup):
1879 1885 yield chnk
1880 1886
1881 1887 yield changegroup.closechunk()
1882 1888
1883 1889 if nodes:
1884 1890 self.hook('outgoing', node=hex(nodes[0]), source=source)
1885 1891
1886 1892 return util.chunkbuffer(gengroup())
1887 1893
1888 1894 def addchangegroup(self, source, srctype, url, emptyok=False):
1889 1895 """add changegroup to repo.
1890 1896
1891 1897 return values:
1892 1898 - nothing changed or no source: 0
1893 1899 - more heads than before: 1+added heads (2..n)
1894 1900 - less heads than before: -1-removed heads (-2..-n)
1895 1901 - number of heads stays the same: 1
1896 1902 """
1897 1903 def csmap(x):
1898 1904 self.ui.debug(_("add changeset %s\n") % short(x))
1899 1905 return len(cl)
1900 1906
1901 1907 def revmap(x):
1902 1908 return cl.rev(x)
1903 1909
1904 1910 if not source:
1905 1911 return 0
1906 1912
1907 1913 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1908 1914
1909 1915 changesets = files = revisions = 0
1910 1916
1911 1917 # write changelog data to temp files so concurrent readers will not see
1912 1918 # inconsistent view
1913 1919 cl = self.changelog
1914 1920 cl.delayupdate()
1915 1921 oldheads = len(cl.heads())
1916 1922
1917 1923 tr = self.transaction()
1918 1924 try:
1919 1925 trp = weakref.proxy(tr)
1920 1926 # pull off the changeset group
1921 1927 self.ui.status(_("adding changesets\n"))
1922 1928 cor = len(cl) - 1
1923 1929 chunkiter = changegroup.chunkiter(source)
1924 1930 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1925 1931 raise util.Abort(_("received changelog group is empty"))
1926 1932 cnr = len(cl) - 1
1927 1933 changesets = cnr - cor
1928 1934
1929 1935 # pull off the manifest group
1930 1936 self.ui.status(_("adding manifests\n"))
1931 1937 chunkiter = changegroup.chunkiter(source)
1932 1938 # no need to check for empty manifest group here:
1933 1939 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1934 1940 # no new manifest will be created and the manifest group will
1935 1941 # be empty during the pull
1936 1942 self.manifest.addgroup(chunkiter, revmap, trp)
1937 1943
1938 1944 # process the files
1939 1945 self.ui.status(_("adding file changes\n"))
1940 1946 while 1:
1941 1947 f = changegroup.getchunk(source)
1942 1948 if not f:
1943 1949 break
1944 1950 self.ui.debug(_("adding %s revisions\n") % f)
1945 1951 fl = self.file(f)
1946 1952 o = len(fl)
1947 1953 chunkiter = changegroup.chunkiter(source)
1948 1954 if fl.addgroup(chunkiter, revmap, trp) is None:
1949 1955 raise util.Abort(_("received file revlog group is empty"))
1950 1956 revisions += len(fl) - o
1951 1957 files += 1
1952 1958
1953 1959 # make changelog see real files again
1954 1960 cl.finalize(trp)
1955 1961
1956 1962 newheads = len(self.changelog.heads())
1957 1963 heads = ""
1958 1964 if oldheads and newheads != oldheads:
1959 1965 heads = _(" (%+d heads)") % (newheads - oldheads)
1960 1966
1961 1967 self.ui.status(_("added %d changesets"
1962 1968 " with %d changes to %d files%s\n")
1963 1969 % (changesets, revisions, files, heads))
1964 1970
1965 1971 if changesets > 0:
1966 1972 self.hook('pretxnchangegroup', throw=True,
1967 1973 node=hex(self.changelog.node(cor+1)), source=srctype,
1968 1974 url=url)
1969 1975
1970 1976 tr.close()
1971 1977 finally:
1972 1978 del tr
1973 1979
1974 1980 if changesets > 0:
1975 1981 # forcefully update the on-disk branch cache
1976 1982 self.ui.debug(_("updating the branch cache\n"))
1977 1983 self.branchtags()
1978 1984 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1979 1985 source=srctype, url=url)
1980 1986
1981 1987 for i in xrange(cor + 1, cnr + 1):
1982 1988 self.hook("incoming", node=hex(self.changelog.node(i)),
1983 1989 source=srctype, url=url)
1984 1990
1985 1991 # never return 0 here:
1986 1992 if newheads < oldheads:
1987 1993 return newheads - oldheads - 1
1988 1994 else:
1989 1995 return newheads - oldheads + 1
1990 1996
1991 1997
1992 1998 def stream_in(self, remote):
1993 1999 fp = remote.stream_out()
1994 2000 l = fp.readline()
1995 2001 try:
1996 2002 resp = int(l)
1997 2003 except ValueError:
1998 2004 raise util.UnexpectedOutput(
1999 2005 _('Unexpected response from remote server:'), l)
2000 2006 if resp == 1:
2001 2007 raise util.Abort(_('operation forbidden by server'))
2002 2008 elif resp == 2:
2003 2009 raise util.Abort(_('locking the remote repository failed'))
2004 2010 elif resp != 0:
2005 2011 raise util.Abort(_('the server sent an unknown error code'))
2006 2012 self.ui.status(_('streaming all changes\n'))
2007 2013 l = fp.readline()
2008 2014 try:
2009 2015 total_files, total_bytes = map(int, l.split(' ', 1))
2010 2016 except (ValueError, TypeError):
2011 2017 raise util.UnexpectedOutput(
2012 2018 _('Unexpected response from remote server:'), l)
2013 2019 self.ui.status(_('%d files to transfer, %s of data\n') %
2014 2020 (total_files, util.bytecount(total_bytes)))
2015 2021 start = time.time()
2016 2022 for i in xrange(total_files):
2017 2023 # XXX doesn't support '\n' or '\r' in filenames
2018 2024 l = fp.readline()
2019 2025 try:
2020 2026 name, size = l.split('\0', 1)
2021 2027 size = int(size)
2022 2028 except ValueError, TypeError:
2023 2029 raise util.UnexpectedOutput(
2024 2030 _('Unexpected response from remote server:'), l)
2025 2031 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2026 2032 ofp = self.sopener(name, 'w')
2027 2033 for chunk in util.filechunkiter(fp, limit=size):
2028 2034 ofp.write(chunk)
2029 2035 ofp.close()
2030 2036 elapsed = time.time() - start
2031 2037 if elapsed <= 0:
2032 2038 elapsed = 0.001
2033 2039 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2034 2040 (util.bytecount(total_bytes), elapsed,
2035 2041 util.bytecount(total_bytes / elapsed)))
2036 2042 self.invalidate()
2037 2043 return len(self.heads()) + 1
2038 2044
2039 2045 def clone(self, remote, heads=[], stream=False):
2040 2046 '''clone remote repository.
2041 2047
2042 2048 keyword arguments:
2043 2049 heads: list of revs to clone (forces use of pull)
2044 2050 stream: use streaming clone if possible'''
2045 2051
2046 2052 # now, all clients that can request uncompressed clones can
2047 2053 # read repo formats supported by all servers that can serve
2048 2054 # them.
2049 2055
2050 2056 # if revlog format changes, client will have to check version
2051 2057 # and format flags on "stream" capability, and use
2052 2058 # uncompressed only if compatible.
2053 2059
2054 2060 if stream and not heads and remote.capable('stream'):
2055 2061 return self.stream_in(remote)
2056 2062 return self.pull(remote, heads)
2057 2063
2058 2064 # used to avoid circular references so destructors work
2059 2065 def aftertrans(files):
2060 2066 renamefiles = [tuple(t) for t in files]
2061 2067 def a():
2062 2068 for src, dest in renamefiles:
2063 2069 util.rename(src, dest)
2064 2070 return a
2065 2071
2066 2072 def instance(ui, path, create):
2067 2073 return localrepository(ui, util.drop_scheme('file', path), create)
2068 2074
2069 2075 def islocal(path):
2070 2076 return True
General Comments 0
You need to be logged in to leave comments. Login now