##// END OF EJS Templates
Add a features list to branches.cache to detect caches of old hg versions....
Thomas Arendsen Hein -
r4168:bbfe5a3f default
parent child Browse files
Show More
@@ -1,1990 +1,2008
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import gettext as _
10 10 from demandload import *
11 11 import repo
12 12 demandload(globals(), "appendfile changegroup")
13 13 demandload(globals(), "changelog dirstate filelog manifest context")
14 14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 15 demandload(globals(), "os revlog time util")
16 16
17 17 class localrepository(repo.repository):
18 18 capabilities = ('lookup', 'changegroupsubset')
19 19 supported = ('revlogv1', 'store')
20 branchcache_features = ('unnamed',)
20 21
21 22 def __del__(self):
22 23 self.transhandle = None
23 24 def __init__(self, parentui, path=None, create=0):
24 25 repo.repository.__init__(self)
25 26 if not path:
26 27 p = os.getcwd()
27 28 while not os.path.isdir(os.path.join(p, ".hg")):
28 29 oldp = p
29 30 p = os.path.dirname(p)
30 31 if p == oldp:
31 32 raise repo.RepoError(_("There is no Mercurial repository"
32 33 " here (.hg not found)"))
33 34 path = p
34 35
35 36 self.path = os.path.join(path, ".hg")
36 37 self.root = os.path.realpath(path)
37 38 self.origroot = path
38 39 self.opener = util.opener(self.path)
39 40 self.wopener = util.opener(self.root)
40 41
41 42 if not os.path.isdir(self.path):
42 43 if create:
43 44 if not os.path.exists(path):
44 45 os.mkdir(path)
45 46 os.mkdir(self.path)
46 47 os.mkdir(os.path.join(self.path, "store"))
47 48 requirements = ("revlogv1", "store")
48 49 reqfile = self.opener("requires", "w")
49 50 for r in requirements:
50 51 reqfile.write("%s\n" % r)
51 52 reqfile.close()
52 53 # create an invalid changelog
53 54 self.opener("00changelog.i", "a").write(
54 55 '\0\0\0\2' # represents revlogv2
55 56 ' dummy changelog to prevent using the old repo layout'
56 57 )
57 58 else:
58 59 raise repo.RepoError(_("repository %s not found") % path)
59 60 elif create:
60 61 raise repo.RepoError(_("repository %s already exists") % path)
61 62 else:
62 63 # find requirements
63 64 try:
64 65 requirements = self.opener("requires").read().splitlines()
65 66 except IOError, inst:
66 67 if inst.errno != errno.ENOENT:
67 68 raise
68 69 requirements = []
69 70 # check them
70 71 for r in requirements:
71 72 if r not in self.supported:
72 73 raise repo.RepoError(_("requirement '%s' not supported") % r)
73 74
74 75 # setup store
75 76 if "store" in requirements:
76 77 self.encodefn = util.encodefilename
77 78 self.decodefn = util.decodefilename
78 79 self.spath = os.path.join(self.path, "store")
79 80 else:
80 81 self.encodefn = lambda x: x
81 82 self.decodefn = lambda x: x
82 83 self.spath = self.path
83 84 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
84 85
85 86 self.ui = ui.ui(parentui=parentui)
86 87 try:
87 88 self.ui.readconfig(self.join("hgrc"), self.root)
88 89 except IOError:
89 90 pass
90 91
91 92 v = self.ui.configrevlog()
92 93 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
93 94 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
94 95 fl = v.get('flags', None)
95 96 flags = 0
96 97 if fl != None:
97 98 for x in fl.split():
98 99 flags |= revlog.flagstr(x)
99 100 elif self.revlogv1:
100 101 flags = revlog.REVLOG_DEFAULT_FLAGS
101 102
102 103 v = self.revlogversion | flags
103 104 self.manifest = manifest.manifest(self.sopener, v)
104 105 self.changelog = changelog.changelog(self.sopener, v)
105 106
106 107 fallback = self.ui.config('ui', 'fallbackencoding')
107 108 if fallback:
108 109 util._fallbackencoding = fallback
109 110
110 111 # the changelog might not have the inline index flag
111 112 # on. If the format of the changelog is the same as found in
112 113 # .hgrc, apply any flags found in the .hgrc as well.
113 114 # Otherwise, just version from the changelog
114 115 v = self.changelog.version
115 116 if v == self.revlogversion:
116 117 v |= flags
117 118 self.revlogversion = v
118 119
119 120 self.tagscache = None
120 121 self.branchcache = None
121 122 self.nodetagscache = None
122 123 self.encodepats = None
123 124 self.decodepats = None
124 125 self.transhandle = None
125 126
126 127 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
127 128
128 129 def url(self):
129 130 return 'file:' + self.root
130 131
131 132 def hook(self, name, throw=False, **args):
132 133 def callhook(hname, funcname):
133 134 '''call python hook. hook is callable object, looked up as
134 135 name in python module. if callable returns "true", hook
135 136 fails, else passes. if hook raises exception, treated as
136 137 hook failure. exception propagates if throw is "true".
137 138
138 139 reason for "true" meaning "hook failed" is so that
139 140 unmodified commands (e.g. mercurial.commands.update) can
140 141 be run as hooks without wrappers to convert return values.'''
141 142
142 143 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
143 144 d = funcname.rfind('.')
144 145 if d == -1:
145 146 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
146 147 % (hname, funcname))
147 148 modname = funcname[:d]
148 149 try:
149 150 obj = __import__(modname)
150 151 except ImportError:
151 152 try:
152 153 # extensions are loaded with hgext_ prefix
153 154 obj = __import__("hgext_%s" % modname)
154 155 except ImportError:
155 156 raise util.Abort(_('%s hook is invalid '
156 157 '(import of "%s" failed)') %
157 158 (hname, modname))
158 159 try:
159 160 for p in funcname.split('.')[1:]:
160 161 obj = getattr(obj, p)
161 162 except AttributeError, err:
162 163 raise util.Abort(_('%s hook is invalid '
163 164 '("%s" is not defined)') %
164 165 (hname, funcname))
165 166 if not callable(obj):
166 167 raise util.Abort(_('%s hook is invalid '
167 168 '("%s" is not callable)') %
168 169 (hname, funcname))
169 170 try:
170 171 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
171 172 except (KeyboardInterrupt, util.SignalInterrupt):
172 173 raise
173 174 except Exception, exc:
174 175 if isinstance(exc, util.Abort):
175 176 self.ui.warn(_('error: %s hook failed: %s\n') %
176 177 (hname, exc.args[0]))
177 178 else:
178 179 self.ui.warn(_('error: %s hook raised an exception: '
179 180 '%s\n') % (hname, exc))
180 181 if throw:
181 182 raise
182 183 self.ui.print_exc()
183 184 return True
184 185 if r:
185 186 if throw:
186 187 raise util.Abort(_('%s hook failed') % hname)
187 188 self.ui.warn(_('warning: %s hook failed\n') % hname)
188 189 return r
189 190
190 191 def runhook(name, cmd):
191 192 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
192 193 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
193 194 r = util.system(cmd, environ=env, cwd=self.root)
194 195 if r:
195 196 desc, r = util.explain_exit(r)
196 197 if throw:
197 198 raise util.Abort(_('%s hook %s') % (name, desc))
198 199 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
199 200 return r
200 201
201 202 r = False
202 203 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
203 204 if hname.split(".", 1)[0] == name and cmd]
204 205 hooks.sort()
205 206 for hname, cmd in hooks:
206 207 if cmd.startswith('python:'):
207 208 r = callhook(hname, cmd[7:].strip()) or r
208 209 else:
209 210 r = runhook(hname, cmd) or r
210 211 return r
211 212
212 213 tag_disallowed = ':\r\n'
213 214
214 215 def tag(self, name, node, message, local, user, date):
215 216 '''tag a revision with a symbolic name.
216 217
217 218 if local is True, the tag is stored in a per-repository file.
218 219 otherwise, it is stored in the .hgtags file, and a new
219 220 changeset is committed with the change.
220 221
221 222 keyword arguments:
222 223
223 224 local: whether to store tag in non-version-controlled file
224 225 (default False)
225 226
226 227 message: commit message to use if committing
227 228
228 229 user: name of user to use if committing
229 230
230 231 date: date tuple to use if committing'''
231 232
232 233 for c in self.tag_disallowed:
233 234 if c in name:
234 235 raise util.Abort(_('%r cannot be used in a tag name') % c)
235 236
236 237 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
237 238
238 239 if local:
239 240 # local tags are stored in the current charset
240 241 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
241 242 self.hook('tag', node=hex(node), tag=name, local=local)
242 243 return
243 244
244 245 for x in self.status()[:5]:
245 246 if '.hgtags' in x:
246 247 raise util.Abort(_('working copy of .hgtags is changed '
247 248 '(please commit .hgtags manually)'))
248 249
249 250 # committed tags are stored in UTF-8
250 251 line = '%s %s\n' % (hex(node), util.fromlocal(name))
251 252 self.wfile('.hgtags', 'ab').write(line)
252 253 if self.dirstate.state('.hgtags') == '?':
253 254 self.add(['.hgtags'])
254 255
255 256 self.commit(['.hgtags'], message, user, date)
256 257 self.hook('tag', node=hex(node), tag=name, local=local)
257 258
258 259 def tags(self):
259 260 '''return a mapping of tag to node'''
260 261 if not self.tagscache:
261 262 self.tagscache = {}
262 263
263 264 def parsetag(line, context):
264 265 if not line:
265 266 return
266 267 s = l.split(" ", 1)
267 268 if len(s) != 2:
268 269 self.ui.warn(_("%s: cannot parse entry\n") % context)
269 270 return
270 271 node, key = s
271 272 key = util.tolocal(key.strip()) # stored in UTF-8
272 273 try:
273 274 bin_n = bin(node)
274 275 except TypeError:
275 276 self.ui.warn(_("%s: node '%s' is not well formed\n") %
276 277 (context, node))
277 278 return
278 279 if bin_n not in self.changelog.nodemap:
279 280 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
280 281 (context, key))
281 282 return
282 283 self.tagscache[key] = bin_n
283 284
284 285 # read the tags file from each head, ending with the tip,
285 286 # and add each tag found to the map, with "newer" ones
286 287 # taking precedence
287 288 f = None
288 289 for rev, node, fnode in self._hgtagsnodes():
289 290 f = (f and f.filectx(fnode) or
290 291 self.filectx('.hgtags', fileid=fnode))
291 292 count = 0
292 293 for l in f.data().splitlines():
293 294 count += 1
294 295 parsetag(l, _("%s, line %d") % (str(f), count))
295 296
296 297 try:
297 298 f = self.opener("localtags")
298 299 count = 0
299 300 for l in f:
300 301 # localtags are stored in the local character set
301 302 # while the internal tag table is stored in UTF-8
302 303 l = util.fromlocal(l)
303 304 count += 1
304 305 parsetag(l, _("localtags, line %d") % count)
305 306 except IOError:
306 307 pass
307 308
308 309 self.tagscache['tip'] = self.changelog.tip()
309 310
310 311 return self.tagscache
311 312
312 313 def _hgtagsnodes(self):
313 314 heads = self.heads()
314 315 heads.reverse()
315 316 last = {}
316 317 ret = []
317 318 for node in heads:
318 319 c = self.changectx(node)
319 320 rev = c.rev()
320 321 try:
321 322 fnode = c.filenode('.hgtags')
322 323 except repo.LookupError:
323 324 continue
324 325 ret.append((rev, node, fnode))
325 326 if fnode in last:
326 327 ret[last[fnode]] = None
327 328 last[fnode] = len(ret) - 1
328 329 return [item for item in ret if item]
329 330
330 331 def tagslist(self):
331 332 '''return a list of tags ordered by revision'''
332 333 l = []
333 334 for t, n in self.tags().items():
334 335 try:
335 336 r = self.changelog.rev(n)
336 337 except:
337 338 r = -2 # sort to the beginning of the list if unknown
338 339 l.append((r, t, n))
339 340 l.sort()
340 341 return [(t, n) for r, t, n in l]
341 342
342 343 def nodetags(self, node):
343 344 '''return the tags associated with a node'''
344 345 if not self.nodetagscache:
345 346 self.nodetagscache = {}
346 347 for t, n in self.tags().items():
347 348 self.nodetagscache.setdefault(n, []).append(t)
348 349 return self.nodetagscache.get(node, [])
349 350
350 351 def _branchtags(self):
351 352 partial, last, lrev = self._readbranchcache()
352 353
353 354 tiprev = self.changelog.count() - 1
354 355 if lrev != tiprev:
355 356 self._updatebranchcache(partial, lrev+1, tiprev+1)
356 357 self._writebranchcache(partial, self.changelog.tip(), tiprev)
357 358
358 359 return partial
359 360
360 361 def branchtags(self):
361 362 if self.branchcache is not None:
362 363 return self.branchcache
363 364
364 365 self.branchcache = {} # avoid recursion in changectx
365 366 partial = self._branchtags()
366 367
367 368 # the branch cache is stored on disk as UTF-8, but in the local
368 369 # charset internally
369 370 for k, v in partial.items():
370 371 self.branchcache[util.tolocal(k)] = v
371 372 return self.branchcache
372 373
373 374 def _readbranchcache(self):
374 375 partial = {}
375 376 try:
376 377 f = self.opener("branches.cache")
377 378 lines = f.read().split('\n')
378 379 f.close()
380 features = lines.pop(0).strip()
381 if not features.startswith('features: '):
382 raise ValueError(_('branch cache: no features specified'))
383 features = features.split(' ', 1)[1].split()
384 missing_features = []
385 for feature in self.branchcache_features:
386 try:
387 features.remove(feature)
388 except ValueError, inst:
389 missing_features.append(feature)
390 if missing_features:
391 raise ValueError(_('branch cache: missing features: %s')
392 % ', '.join(missing_features))
393 if features:
394 raise ValueError(_('branch cache: unknown features: %s')
395 % ', '.join(features))
379 396 last, lrev = lines.pop(0).split(" ", 1)
380 397 last, lrev = bin(last), int(lrev)
381 398 if not (lrev < self.changelog.count() and
382 399 self.changelog.node(lrev) == last): # sanity check
383 400 # invalidate the cache
384 401 raise ValueError('Invalid branch cache: unknown tip')
385 402 for l in lines:
386 403 if not l: continue
387 404 node, label = l.split(" ", 1)
388 405 partial[label.strip()] = bin(node)
389 406 except (KeyboardInterrupt, util.SignalInterrupt):
390 407 raise
391 408 except Exception, inst:
392 409 if self.ui.debugflag:
393 410 self.ui.warn(str(inst), '\n')
394 411 partial, last, lrev = {}, nullid, nullrev
395 412 return partial, last, lrev
396 413
397 414 def _writebranchcache(self, branches, tip, tiprev):
398 415 try:
399 416 f = self.opener("branches.cache", "w")
417 f.write(" features: %s\n" % ' '.join(self.branchcache_features))
400 418 f.write("%s %s\n" % (hex(tip), tiprev))
401 419 for label, node in branches.iteritems():
402 420 f.write("%s %s\n" % (hex(node), label))
403 421 except IOError:
404 422 pass
405 423
406 424 def _updatebranchcache(self, partial, start, end):
407 425 for r in xrange(start, end):
408 426 c = self.changectx(r)
409 427 b = c.branch()
410 428 partial[b] = c.node()
411 429
412 430 def lookup(self, key):
413 431 if key == '.':
414 432 key = self.dirstate.parents()[0]
415 433 if key == nullid:
416 434 raise repo.RepoError(_("no revision checked out"))
417 435 elif key == 'null':
418 436 return nullid
419 437 n = self.changelog._match(key)
420 438 if n:
421 439 return n
422 440 if key in self.tags():
423 441 return self.tags()[key]
424 442 if key in self.branchtags():
425 443 return self.branchtags()[key]
426 444 n = self.changelog._partialmatch(key)
427 445 if n:
428 446 return n
429 447 raise repo.RepoError(_("unknown revision '%s'") % key)
430 448
431 449 def dev(self):
432 450 return os.lstat(self.path).st_dev
433 451
434 452 def local(self):
435 453 return True
436 454
437 455 def join(self, f):
438 456 return os.path.join(self.path, f)
439 457
440 458 def sjoin(self, f):
441 459 f = self.encodefn(f)
442 460 return os.path.join(self.spath, f)
443 461
444 462 def wjoin(self, f):
445 463 return os.path.join(self.root, f)
446 464
447 465 def file(self, f):
448 466 if f[0] == '/':
449 467 f = f[1:]
450 468 return filelog.filelog(self.sopener, f, self.revlogversion)
451 469
452 470 def changectx(self, changeid=None):
453 471 return context.changectx(self, changeid)
454 472
455 473 def workingctx(self):
456 474 return context.workingctx(self)
457 475
458 476 def parents(self, changeid=None):
459 477 '''
460 478 get list of changectxs for parents of changeid or working directory
461 479 '''
462 480 if changeid is None:
463 481 pl = self.dirstate.parents()
464 482 else:
465 483 n = self.changelog.lookup(changeid)
466 484 pl = self.changelog.parents(n)
467 485 if pl[1] == nullid:
468 486 return [self.changectx(pl[0])]
469 487 return [self.changectx(pl[0]), self.changectx(pl[1])]
470 488
471 489 def filectx(self, path, changeid=None, fileid=None):
472 490 """changeid can be a changeset revision, node, or tag.
473 491 fileid can be a file revision or node."""
474 492 return context.filectx(self, path, changeid, fileid)
475 493
476 494 def getcwd(self):
477 495 return self.dirstate.getcwd()
478 496
479 497 def wfile(self, f, mode='r'):
480 498 return self.wopener(f, mode)
481 499
482 500 def wread(self, filename):
483 501 if self.encodepats == None:
484 502 l = []
485 503 for pat, cmd in self.ui.configitems("encode"):
486 504 mf = util.matcher(self.root, "", [pat], [], [])[1]
487 505 l.append((mf, cmd))
488 506 self.encodepats = l
489 507
490 508 data = self.wopener(filename, 'r').read()
491 509
492 510 for mf, cmd in self.encodepats:
493 511 if mf(filename):
494 512 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
495 513 data = util.filter(data, cmd)
496 514 break
497 515
498 516 return data
499 517
500 518 def wwrite(self, filename, data, fd=None):
501 519 if self.decodepats == None:
502 520 l = []
503 521 for pat, cmd in self.ui.configitems("decode"):
504 522 mf = util.matcher(self.root, "", [pat], [], [])[1]
505 523 l.append((mf, cmd))
506 524 self.decodepats = l
507 525
508 526 for mf, cmd in self.decodepats:
509 527 if mf(filename):
510 528 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
511 529 data = util.filter(data, cmd)
512 530 break
513 531
514 532 if fd:
515 533 return fd.write(data)
516 534 return self.wopener(filename, 'w').write(data)
517 535
518 536 def transaction(self):
519 537 tr = self.transhandle
520 538 if tr != None and tr.running():
521 539 return tr.nest()
522 540
523 541 # save dirstate for rollback
524 542 try:
525 543 ds = self.opener("dirstate").read()
526 544 except IOError:
527 545 ds = ""
528 546 self.opener("journal.dirstate", "w").write(ds)
529 547
530 548 renames = [(self.sjoin("journal"), self.sjoin("undo")),
531 549 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
532 550 tr = transaction.transaction(self.ui.warn, self.sopener,
533 551 self.sjoin("journal"),
534 552 aftertrans(renames))
535 553 self.transhandle = tr
536 554 return tr
537 555
538 556 def recover(self):
539 557 l = self.lock()
540 558 if os.path.exists(self.sjoin("journal")):
541 559 self.ui.status(_("rolling back interrupted transaction\n"))
542 560 transaction.rollback(self.sopener, self.sjoin("journal"))
543 561 self.reload()
544 562 return True
545 563 else:
546 564 self.ui.warn(_("no interrupted transaction available\n"))
547 565 return False
548 566
549 567 def rollback(self, wlock=None):
550 568 if not wlock:
551 569 wlock = self.wlock()
552 570 l = self.lock()
553 571 if os.path.exists(self.sjoin("undo")):
554 572 self.ui.status(_("rolling back last transaction\n"))
555 573 transaction.rollback(self.sopener, self.sjoin("undo"))
556 574 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
557 575 self.reload()
558 576 self.wreload()
559 577 else:
560 578 self.ui.warn(_("no rollback information available\n"))
561 579
562 580 def wreload(self):
563 581 self.dirstate.read()
564 582
565 583 def reload(self):
566 584 self.changelog.load()
567 585 self.manifest.load()
568 586 self.tagscache = None
569 587 self.nodetagscache = None
570 588
571 589 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
572 590 desc=None):
573 591 try:
574 592 l = lock.lock(lockname, 0, releasefn, desc=desc)
575 593 except lock.LockHeld, inst:
576 594 if not wait:
577 595 raise
578 596 self.ui.warn(_("waiting for lock on %s held by %r\n") %
579 597 (desc, inst.locker))
580 598 # default to 600 seconds timeout
581 599 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
582 600 releasefn, desc=desc)
583 601 if acquirefn:
584 602 acquirefn()
585 603 return l
586 604
587 605 def lock(self, wait=1):
588 606 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
589 607 desc=_('repository %s') % self.origroot)
590 608
591 609 def wlock(self, wait=1):
592 610 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
593 611 self.wreload,
594 612 desc=_('working directory of %s') % self.origroot)
595 613
596 614 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
597 615 """
598 616 commit an individual file as part of a larger transaction
599 617 """
600 618
601 619 t = self.wread(fn)
602 620 fl = self.file(fn)
603 621 fp1 = manifest1.get(fn, nullid)
604 622 fp2 = manifest2.get(fn, nullid)
605 623
606 624 meta = {}
607 625 cp = self.dirstate.copied(fn)
608 626 if cp:
609 627 # Mark the new revision of this file as a copy of another
610 628 # file. This copy data will effectively act as a parent
611 629 # of this new revision. If this is a merge, the first
612 630 # parent will be the nullid (meaning "look up the copy data")
613 631 # and the second one will be the other parent. For example:
614 632 #
615 633 # 0 --- 1 --- 3 rev1 changes file foo
616 634 # \ / rev2 renames foo to bar and changes it
617 635 # \- 2 -/ rev3 should have bar with all changes and
618 636 # should record that bar descends from
619 637 # bar in rev2 and foo in rev1
620 638 #
621 639 # this allows this merge to succeed:
622 640 #
623 641 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
624 642 # \ / merging rev3 and rev4 should use bar@rev2
625 643 # \- 2 --- 4 as the merge base
626 644 #
627 645 meta["copy"] = cp
628 646 if not manifest2: # not a branch merge
629 647 meta["copyrev"] = hex(manifest1.get(cp, nullid))
630 648 fp2 = nullid
631 649 elif fp2 != nullid: # copied on remote side
632 650 meta["copyrev"] = hex(manifest1.get(cp, nullid))
633 651 elif fp1 != nullid: # copied on local side, reversed
634 652 meta["copyrev"] = hex(manifest2.get(cp))
635 653 fp2 = fp1
636 654 else: # directory rename
637 655 meta["copyrev"] = hex(manifest1.get(cp, nullid))
638 656 self.ui.debug(_(" %s: copy %s:%s\n") %
639 657 (fn, cp, meta["copyrev"]))
640 658 fp1 = nullid
641 659 elif fp2 != nullid:
642 660 # is one parent an ancestor of the other?
643 661 fpa = fl.ancestor(fp1, fp2)
644 662 if fpa == fp1:
645 663 fp1, fp2 = fp2, nullid
646 664 elif fpa == fp2:
647 665 fp2 = nullid
648 666
649 667 # is the file unmodified from the parent? report existing entry
650 668 if fp2 == nullid and not fl.cmp(fp1, t):
651 669 return fp1
652 670
653 671 changelist.append(fn)
654 672 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
655 673
656 674 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
657 675 if p1 is None:
658 676 p1, p2 = self.dirstate.parents()
659 677 return self.commit(files=files, text=text, user=user, date=date,
660 678 p1=p1, p2=p2, wlock=wlock)
661 679
662 680 def commit(self, files=None, text="", user=None, date=None,
663 681 match=util.always, force=False, lock=None, wlock=None,
664 682 force_editor=False, p1=None, p2=None, extra={}):
665 683
666 684 commit = []
667 685 remove = []
668 686 changed = []
669 687 use_dirstate = (p1 is None) # not rawcommit
670 688 extra = extra.copy()
671 689
672 690 if use_dirstate:
673 691 if files:
674 692 for f in files:
675 693 s = self.dirstate.state(f)
676 694 if s in 'nmai':
677 695 commit.append(f)
678 696 elif s == 'r':
679 697 remove.append(f)
680 698 else:
681 699 self.ui.warn(_("%s not tracked!\n") % f)
682 700 else:
683 701 changes = self.status(match=match)[:5]
684 702 modified, added, removed, deleted, unknown = changes
685 703 commit = modified + added
686 704 remove = removed
687 705 else:
688 706 commit = files
689 707
690 708 if use_dirstate:
691 709 p1, p2 = self.dirstate.parents()
692 710 update_dirstate = True
693 711 else:
694 712 p1, p2 = p1, p2 or nullid
695 713 update_dirstate = (self.dirstate.parents()[0] == p1)
696 714
697 715 c1 = self.changelog.read(p1)
698 716 c2 = self.changelog.read(p2)
699 717 m1 = self.manifest.read(c1[0]).copy()
700 718 m2 = self.manifest.read(c2[0])
701 719
702 720 if use_dirstate:
703 721 branchname = self.workingctx().branch()
704 722 try:
705 723 branchname = branchname.decode('UTF-8').encode('UTF-8')
706 724 except UnicodeDecodeError:
707 725 raise util.Abort(_('branch name not in UTF-8!'))
708 726 else:
709 727 branchname = ""
710 728
711 729 if use_dirstate:
712 730 oldname = c1[5].get("branch", "") # stored in UTF-8
713 731 if not commit and not remove and not force and p2 == nullid and \
714 732 branchname == oldname:
715 733 self.ui.status(_("nothing changed\n"))
716 734 return None
717 735
718 736 xp1 = hex(p1)
719 737 if p2 == nullid: xp2 = ''
720 738 else: xp2 = hex(p2)
721 739
722 740 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
723 741
724 742 if not wlock:
725 743 wlock = self.wlock()
726 744 if not lock:
727 745 lock = self.lock()
728 746 tr = self.transaction()
729 747
730 748 # check in files
731 749 new = {}
732 750 linkrev = self.changelog.count()
733 751 commit.sort()
734 752 for f in commit:
735 753 self.ui.note(f + "\n")
736 754 try:
737 755 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
738 756 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
739 757 except IOError:
740 758 if use_dirstate:
741 759 self.ui.warn(_("trouble committing %s!\n") % f)
742 760 raise
743 761 else:
744 762 remove.append(f)
745 763
746 764 # update manifest
747 765 m1.update(new)
748 766 remove.sort()
749 767
750 768 for f in remove:
751 769 if f in m1:
752 770 del m1[f]
753 771 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
754 772
755 773 # add changeset
756 774 new = new.keys()
757 775 new.sort()
758 776
759 777 user = user or self.ui.username()
760 778 if not text or force_editor:
761 779 edittext = []
762 780 if text:
763 781 edittext.append(text)
764 782 edittext.append("")
765 783 edittext.append("HG: user: %s" % user)
766 784 if p2 != nullid:
767 785 edittext.append("HG: branch merge")
768 786 edittext.extend(["HG: changed %s" % f for f in changed])
769 787 edittext.extend(["HG: removed %s" % f for f in remove])
770 788 if not changed and not remove:
771 789 edittext.append("HG: no files changed")
772 790 edittext.append("")
773 791 # run editor in the repository root
774 792 olddir = os.getcwd()
775 793 os.chdir(self.root)
776 794 text = self.ui.edit("\n".join(edittext), user)
777 795 os.chdir(olddir)
778 796
779 797 lines = [line.rstrip() for line in text.rstrip().splitlines()]
780 798 while lines and not lines[0]:
781 799 del lines[0]
782 800 if not lines:
783 801 return None
784 802 text = '\n'.join(lines)
785 803 if branchname:
786 804 extra["branch"] = branchname
787 805 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
788 806 user, date, extra)
789 807 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
790 808 parent2=xp2)
791 809 tr.close()
792 810
793 811 if use_dirstate or update_dirstate:
794 812 self.dirstate.setparents(n)
795 813 if use_dirstate:
796 814 self.dirstate.update(new, "n")
797 815 self.dirstate.forget(remove)
798 816
799 817 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
800 818 return n
801 819
802 820 def walk(self, node=None, files=[], match=util.always, badmatch=None):
803 821 '''
804 822 walk recursively through the directory tree or a given
805 823 changeset, finding all files matched by the match
806 824 function
807 825
808 826 results are yielded in a tuple (src, filename), where src
809 827 is one of:
810 828 'f' the file was found in the directory tree
811 829 'm' the file was only in the dirstate and not in the tree
812 830 'b' file was not found and matched badmatch
813 831 '''
814 832
815 833 if node:
816 834 fdict = dict.fromkeys(files)
817 835 for fn in self.manifest.read(self.changelog.read(node)[0]):
818 836 for ffn in fdict:
819 837 # match if the file is the exact name or a directory
820 838 if ffn == fn or fn.startswith("%s/" % ffn):
821 839 del fdict[ffn]
822 840 break
823 841 if match(fn):
824 842 yield 'm', fn
825 843 for fn in fdict:
826 844 if badmatch and badmatch(fn):
827 845 if match(fn):
828 846 yield 'b', fn
829 847 else:
830 848 self.ui.warn(_('%s: No such file in rev %s\n') % (
831 849 util.pathto(self.getcwd(), fn), short(node)))
832 850 else:
833 851 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
834 852 yield src, fn
835 853
836 854 def status(self, node1=None, node2=None, files=[], match=util.always,
837 855 wlock=None, list_ignored=False, list_clean=False):
838 856 """return status of files between two nodes or node and working directory
839 857
840 858 If node1 is None, use the first dirstate parent instead.
841 859 If node2 is None, compare node1 with working directory.
842 860 """
843 861
844 862 def fcmp(fn, mf):
845 863 t1 = self.wread(fn)
846 864 return self.file(fn).cmp(mf.get(fn, nullid), t1)
847 865
848 866 def mfmatches(node):
849 867 change = self.changelog.read(node)
850 868 mf = self.manifest.read(change[0]).copy()
851 869 for fn in mf.keys():
852 870 if not match(fn):
853 871 del mf[fn]
854 872 return mf
855 873
856 874 modified, added, removed, deleted, unknown = [], [], [], [], []
857 875 ignored, clean = [], []
858 876
859 877 compareworking = False
860 878 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
861 879 compareworking = True
862 880
863 881 if not compareworking:
864 882 # read the manifest from node1 before the manifest from node2,
865 883 # so that we'll hit the manifest cache if we're going through
866 884 # all the revisions in parent->child order.
867 885 mf1 = mfmatches(node1)
868 886
869 887 # are we comparing the working directory?
870 888 if not node2:
871 889 if not wlock:
872 890 try:
873 891 wlock = self.wlock(wait=0)
874 892 except lock.LockException:
875 893 wlock = None
876 894 (lookup, modified, added, removed, deleted, unknown,
877 895 ignored, clean) = self.dirstate.status(files, match,
878 896 list_ignored, list_clean)
879 897
880 898 # are we comparing working dir against its parent?
881 899 if compareworking:
882 900 if lookup:
883 901 # do a full compare of any files that might have changed
884 902 mf2 = mfmatches(self.dirstate.parents()[0])
885 903 for f in lookup:
886 904 if fcmp(f, mf2):
887 905 modified.append(f)
888 906 else:
889 907 clean.append(f)
890 908 if wlock is not None:
891 909 self.dirstate.update([f], "n")
892 910 else:
893 911 # we are comparing working dir against non-parent
894 912 # generate a pseudo-manifest for the working dir
895 913 # XXX: create it in dirstate.py ?
896 914 mf2 = mfmatches(self.dirstate.parents()[0])
897 915 for f in lookup + modified + added:
898 916 mf2[f] = ""
899 917 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
900 918 for f in removed:
901 919 if f in mf2:
902 920 del mf2[f]
903 921 else:
904 922 # we are comparing two revisions
905 923 mf2 = mfmatches(node2)
906 924
907 925 if not compareworking:
908 926 # flush lists from dirstate before comparing manifests
909 927 modified, added, clean = [], [], []
910 928
911 929 # make sure to sort the files so we talk to the disk in a
912 930 # reasonable order
913 931 mf2keys = mf2.keys()
914 932 mf2keys.sort()
915 933 for fn in mf2keys:
916 934 if mf1.has_key(fn):
917 935 if mf1.flags(fn) != mf2.flags(fn) or \
918 936 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
919 937 modified.append(fn)
920 938 elif list_clean:
921 939 clean.append(fn)
922 940 del mf1[fn]
923 941 else:
924 942 added.append(fn)
925 943
926 944 removed = mf1.keys()
927 945
928 946 # sort and return results:
929 947 for l in modified, added, removed, deleted, unknown, ignored, clean:
930 948 l.sort()
931 949 return (modified, added, removed, deleted, unknown, ignored, clean)
932 950
933 951 def add(self, list, wlock=None):
934 952 if not wlock:
935 953 wlock = self.wlock()
936 954 for f in list:
937 955 p = self.wjoin(f)
938 956 if not os.path.exists(p):
939 957 self.ui.warn(_("%s does not exist!\n") % f)
940 958 elif not os.path.isfile(p):
941 959 self.ui.warn(_("%s not added: only files supported currently\n")
942 960 % f)
943 961 elif self.dirstate.state(f) in 'an':
944 962 self.ui.warn(_("%s already tracked!\n") % f)
945 963 else:
946 964 self.dirstate.update([f], "a")
947 965
948 966 def forget(self, list, wlock=None):
949 967 if not wlock:
950 968 wlock = self.wlock()
951 969 for f in list:
952 970 if self.dirstate.state(f) not in 'ai':
953 971 self.ui.warn(_("%s not added!\n") % f)
954 972 else:
955 973 self.dirstate.forget([f])
956 974
957 975 def remove(self, list, unlink=False, wlock=None):
958 976 if unlink:
959 977 for f in list:
960 978 try:
961 979 util.unlink(self.wjoin(f))
962 980 except OSError, inst:
963 981 if inst.errno != errno.ENOENT:
964 982 raise
965 983 if not wlock:
966 984 wlock = self.wlock()
967 985 for f in list:
968 986 p = self.wjoin(f)
969 987 if os.path.exists(p):
970 988 self.ui.warn(_("%s still exists!\n") % f)
971 989 elif self.dirstate.state(f) == 'a':
972 990 self.dirstate.forget([f])
973 991 elif f not in self.dirstate:
974 992 self.ui.warn(_("%s not tracked!\n") % f)
975 993 else:
976 994 self.dirstate.update([f], "r")
977 995
978 996 def undelete(self, list, wlock=None):
979 997 p = self.dirstate.parents()[0]
980 998 mn = self.changelog.read(p)[0]
981 999 m = self.manifest.read(mn)
982 1000 if not wlock:
983 1001 wlock = self.wlock()
984 1002 for f in list:
985 1003 if self.dirstate.state(f) not in "r":
986 1004 self.ui.warn("%s not removed!\n" % f)
987 1005 else:
988 1006 t = self.file(f).read(m[f])
989 1007 self.wwrite(f, t)
990 1008 util.set_exec(self.wjoin(f), m.execf(f))
991 1009 self.dirstate.update([f], "n")
992 1010
993 1011 def copy(self, source, dest, wlock=None):
994 1012 p = self.wjoin(dest)
995 1013 if not os.path.exists(p):
996 1014 self.ui.warn(_("%s does not exist!\n") % dest)
997 1015 elif not os.path.isfile(p):
998 1016 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
999 1017 else:
1000 1018 if not wlock:
1001 1019 wlock = self.wlock()
1002 1020 if self.dirstate.state(dest) == '?':
1003 1021 self.dirstate.update([dest], "a")
1004 1022 self.dirstate.copy(source, dest)
1005 1023
1006 1024 def heads(self, start=None):
1007 1025 heads = self.changelog.heads(start)
1008 1026 # sort the output in rev descending order
1009 1027 heads = [(-self.changelog.rev(h), h) for h in heads]
1010 1028 heads.sort()
1011 1029 return [n for (r, n) in heads]
1012 1030
1013 1031 # branchlookup returns a dict giving a list of branches for
1014 1032 # each head. A branch is defined as the tag of a node or
1015 1033 # the branch of the node's parents. If a node has multiple
1016 1034 # branch tags, tags are eliminated if they are visible from other
1017 1035 # branch tags.
1018 1036 #
1019 1037 # So, for this graph: a->b->c->d->e
1020 1038 # \ /
1021 1039 # aa -----/
1022 1040 # a has tag 2.6.12
1023 1041 # d has tag 2.6.13
1024 1042 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
1025 1043 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
1026 1044 # from the list.
1027 1045 #
1028 1046 # It is possible that more than one head will have the same branch tag.
1029 1047 # callers need to check the result for multiple heads under the same
1030 1048 # branch tag if that is a problem for them (ie checkout of a specific
1031 1049 # branch).
1032 1050 #
1033 1051 # passing in a specific branch will limit the depth of the search
1034 1052 # through the parents. It won't limit the branches returned in the
1035 1053 # result though.
1036 1054 def branchlookup(self, heads=None, branch=None):
1037 1055 if not heads:
1038 1056 heads = self.heads()
1039 1057 headt = [ h for h in heads ]
1040 1058 chlog = self.changelog
1041 1059 branches = {}
1042 1060 merges = []
1043 1061 seenmerge = {}
1044 1062
1045 1063 # traverse the tree once for each head, recording in the branches
1046 1064 # dict which tags are visible from this head. The branches
1047 1065 # dict also records which tags are visible from each tag
1048 1066 # while we traverse.
1049 1067 while headt or merges:
1050 1068 if merges:
1051 1069 n, found = merges.pop()
1052 1070 visit = [n]
1053 1071 else:
1054 1072 h = headt.pop()
1055 1073 visit = [h]
1056 1074 found = [h]
1057 1075 seen = {}
1058 1076 while visit:
1059 1077 n = visit.pop()
1060 1078 if n in seen:
1061 1079 continue
1062 1080 pp = chlog.parents(n)
1063 1081 tags = self.nodetags(n)
1064 1082 if tags:
1065 1083 for x in tags:
1066 1084 if x == 'tip':
1067 1085 continue
1068 1086 for f in found:
1069 1087 branches.setdefault(f, {})[n] = 1
1070 1088 branches.setdefault(n, {})[n] = 1
1071 1089 break
1072 1090 if n not in found:
1073 1091 found.append(n)
1074 1092 if branch in tags:
1075 1093 continue
1076 1094 seen[n] = 1
1077 1095 if pp[1] != nullid and n not in seenmerge:
1078 1096 merges.append((pp[1], [x for x in found]))
1079 1097 seenmerge[n] = 1
1080 1098 if pp[0] != nullid:
1081 1099 visit.append(pp[0])
1082 1100 # traverse the branches dict, eliminating branch tags from each
1083 1101 # head that are visible from another branch tag for that head.
1084 1102 out = {}
1085 1103 viscache = {}
1086 1104 for h in heads:
1087 1105 def visible(node):
1088 1106 if node in viscache:
1089 1107 return viscache[node]
1090 1108 ret = {}
1091 1109 visit = [node]
1092 1110 while visit:
1093 1111 x = visit.pop()
1094 1112 if x in viscache:
1095 1113 ret.update(viscache[x])
1096 1114 elif x not in ret:
1097 1115 ret[x] = 1
1098 1116 if x in branches:
1099 1117 visit[len(visit):] = branches[x].keys()
1100 1118 viscache[node] = ret
1101 1119 return ret
1102 1120 if h not in branches:
1103 1121 continue
1104 1122 # O(n^2), but somewhat limited. This only searches the
1105 1123 # tags visible from a specific head, not all the tags in the
1106 1124 # whole repo.
1107 1125 for b in branches[h]:
1108 1126 vis = False
1109 1127 for bb in branches[h].keys():
1110 1128 if b != bb:
1111 1129 if b in visible(bb):
1112 1130 vis = True
1113 1131 break
1114 1132 if not vis:
1115 1133 l = out.setdefault(h, [])
1116 1134 l[len(l):] = self.nodetags(b)
1117 1135 return out
1118 1136
1119 1137 def branches(self, nodes):
1120 1138 if not nodes:
1121 1139 nodes = [self.changelog.tip()]
1122 1140 b = []
1123 1141 for n in nodes:
1124 1142 t = n
1125 1143 while 1:
1126 1144 p = self.changelog.parents(n)
1127 1145 if p[1] != nullid or p[0] == nullid:
1128 1146 b.append((t, n, p[0], p[1]))
1129 1147 break
1130 1148 n = p[0]
1131 1149 return b
1132 1150
1133 1151 def between(self, pairs):
1134 1152 r = []
1135 1153
1136 1154 for top, bottom in pairs:
1137 1155 n, l, i = top, [], 0
1138 1156 f = 1
1139 1157
1140 1158 while n != bottom:
1141 1159 p = self.changelog.parents(n)[0]
1142 1160 if i == f:
1143 1161 l.append(n)
1144 1162 f = f * 2
1145 1163 n = p
1146 1164 i += 1
1147 1165
1148 1166 r.append(l)
1149 1167
1150 1168 return r
1151 1169
1152 1170 def findincoming(self, remote, base=None, heads=None, force=False):
1153 1171 """Return list of roots of the subsets of missing nodes from remote
1154 1172
1155 1173 If base dict is specified, assume that these nodes and their parents
1156 1174 exist on the remote side and that no child of a node of base exists
1157 1175 in both remote and self.
1158 1176 Furthermore base will be updated to include the nodes that exists
1159 1177 in self and remote but no children exists in self and remote.
1160 1178 If a list of heads is specified, return only nodes which are heads
1161 1179 or ancestors of these heads.
1162 1180
1163 1181 All the ancestors of base are in self and in remote.
1164 1182 All the descendants of the list returned are missing in self.
1165 1183 (and so we know that the rest of the nodes are missing in remote, see
1166 1184 outgoing)
1167 1185 """
1168 1186 m = self.changelog.nodemap
1169 1187 search = []
1170 1188 fetch = {}
1171 1189 seen = {}
1172 1190 seenbranch = {}
1173 1191 if base == None:
1174 1192 base = {}
1175 1193
1176 1194 if not heads:
1177 1195 heads = remote.heads()
1178 1196
1179 1197 if self.changelog.tip() == nullid:
1180 1198 base[nullid] = 1
1181 1199 if heads != [nullid]:
1182 1200 return [nullid]
1183 1201 return []
1184 1202
1185 1203 # assume we're closer to the tip than the root
1186 1204 # and start by examining the heads
1187 1205 self.ui.status(_("searching for changes\n"))
1188 1206
1189 1207 unknown = []
1190 1208 for h in heads:
1191 1209 if h not in m:
1192 1210 unknown.append(h)
1193 1211 else:
1194 1212 base[h] = 1
1195 1213
1196 1214 if not unknown:
1197 1215 return []
1198 1216
1199 1217 req = dict.fromkeys(unknown)
1200 1218 reqcnt = 0
1201 1219
1202 1220 # search through remote branches
1203 1221 # a 'branch' here is a linear segment of history, with four parts:
1204 1222 # head, root, first parent, second parent
1205 1223 # (a branch always has two parents (or none) by definition)
1206 1224 unknown = remote.branches(unknown)
1207 1225 while unknown:
1208 1226 r = []
1209 1227 while unknown:
1210 1228 n = unknown.pop(0)
1211 1229 if n[0] in seen:
1212 1230 continue
1213 1231
1214 1232 self.ui.debug(_("examining %s:%s\n")
1215 1233 % (short(n[0]), short(n[1])))
1216 1234 if n[0] == nullid: # found the end of the branch
1217 1235 pass
1218 1236 elif n in seenbranch:
1219 1237 self.ui.debug(_("branch already found\n"))
1220 1238 continue
1221 1239 elif n[1] and n[1] in m: # do we know the base?
1222 1240 self.ui.debug(_("found incomplete branch %s:%s\n")
1223 1241 % (short(n[0]), short(n[1])))
1224 1242 search.append(n) # schedule branch range for scanning
1225 1243 seenbranch[n] = 1
1226 1244 else:
1227 1245 if n[1] not in seen and n[1] not in fetch:
1228 1246 if n[2] in m and n[3] in m:
1229 1247 self.ui.debug(_("found new changeset %s\n") %
1230 1248 short(n[1]))
1231 1249 fetch[n[1]] = 1 # earliest unknown
1232 1250 for p in n[2:4]:
1233 1251 if p in m:
1234 1252 base[p] = 1 # latest known
1235 1253
1236 1254 for p in n[2:4]:
1237 1255 if p not in req and p not in m:
1238 1256 r.append(p)
1239 1257 req[p] = 1
1240 1258 seen[n[0]] = 1
1241 1259
1242 1260 if r:
1243 1261 reqcnt += 1
1244 1262 self.ui.debug(_("request %d: %s\n") %
1245 1263 (reqcnt, " ".join(map(short, r))))
1246 1264 for p in xrange(0, len(r), 10):
1247 1265 for b in remote.branches(r[p:p+10]):
1248 1266 self.ui.debug(_("received %s:%s\n") %
1249 1267 (short(b[0]), short(b[1])))
1250 1268 unknown.append(b)
1251 1269
1252 1270 # do binary search on the branches we found
1253 1271 while search:
1254 1272 n = search.pop(0)
1255 1273 reqcnt += 1
1256 1274 l = remote.between([(n[0], n[1])])[0]
1257 1275 l.append(n[1])
1258 1276 p = n[0]
1259 1277 f = 1
1260 1278 for i in l:
1261 1279 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1262 1280 if i in m:
1263 1281 if f <= 2:
1264 1282 self.ui.debug(_("found new branch changeset %s\n") %
1265 1283 short(p))
1266 1284 fetch[p] = 1
1267 1285 base[i] = 1
1268 1286 else:
1269 1287 self.ui.debug(_("narrowed branch search to %s:%s\n")
1270 1288 % (short(p), short(i)))
1271 1289 search.append((p, i))
1272 1290 break
1273 1291 p, f = i, f * 2
1274 1292
1275 1293 # sanity check our fetch list
1276 1294 for f in fetch.keys():
1277 1295 if f in m:
1278 1296 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1279 1297
1280 1298 if base.keys() == [nullid]:
1281 1299 if force:
1282 1300 self.ui.warn(_("warning: repository is unrelated\n"))
1283 1301 else:
1284 1302 raise util.Abort(_("repository is unrelated"))
1285 1303
1286 1304 self.ui.debug(_("found new changesets starting at ") +
1287 1305 " ".join([short(f) for f in fetch]) + "\n")
1288 1306
1289 1307 self.ui.debug(_("%d total queries\n") % reqcnt)
1290 1308
1291 1309 return fetch.keys()
1292 1310
1293 1311 def findoutgoing(self, remote, base=None, heads=None, force=False):
1294 1312 """Return list of nodes that are roots of subsets not in remote
1295 1313
1296 1314 If base dict is specified, assume that these nodes and their parents
1297 1315 exist on the remote side.
1298 1316 If a list of heads is specified, return only nodes which are heads
1299 1317 or ancestors of these heads, and return a second element which
1300 1318 contains all remote heads which get new children.
1301 1319 """
1302 1320 if base == None:
1303 1321 base = {}
1304 1322 self.findincoming(remote, base, heads, force=force)
1305 1323
1306 1324 self.ui.debug(_("common changesets up to ")
1307 1325 + " ".join(map(short, base.keys())) + "\n")
1308 1326
1309 1327 remain = dict.fromkeys(self.changelog.nodemap)
1310 1328
1311 1329 # prune everything remote has from the tree
1312 1330 del remain[nullid]
1313 1331 remove = base.keys()
1314 1332 while remove:
1315 1333 n = remove.pop(0)
1316 1334 if n in remain:
1317 1335 del remain[n]
1318 1336 for p in self.changelog.parents(n):
1319 1337 remove.append(p)
1320 1338
1321 1339 # find every node whose parents have been pruned
1322 1340 subset = []
1323 1341 # find every remote head that will get new children
1324 1342 updated_heads = {}
1325 1343 for n in remain:
1326 1344 p1, p2 = self.changelog.parents(n)
1327 1345 if p1 not in remain and p2 not in remain:
1328 1346 subset.append(n)
1329 1347 if heads:
1330 1348 if p1 in heads:
1331 1349 updated_heads[p1] = True
1332 1350 if p2 in heads:
1333 1351 updated_heads[p2] = True
1334 1352
1335 1353 # this is the set of all roots we have to push
1336 1354 if heads:
1337 1355 return subset, updated_heads.keys()
1338 1356 else:
1339 1357 return subset
1340 1358
1341 1359 def pull(self, remote, heads=None, force=False, lock=None):
1342 1360 mylock = False
1343 1361 if not lock:
1344 1362 lock = self.lock()
1345 1363 mylock = True
1346 1364
1347 1365 try:
1348 1366 fetch = self.findincoming(remote, force=force)
1349 1367 if fetch == [nullid]:
1350 1368 self.ui.status(_("requesting all changes\n"))
1351 1369
1352 1370 if not fetch:
1353 1371 self.ui.status(_("no changes found\n"))
1354 1372 return 0
1355 1373
1356 1374 if heads is None:
1357 1375 cg = remote.changegroup(fetch, 'pull')
1358 1376 else:
1359 1377 if 'changegroupsubset' not in remote.capabilities:
1360 1378 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1361 1379 cg = remote.changegroupsubset(fetch, heads, 'pull')
1362 1380 return self.addchangegroup(cg, 'pull', remote.url())
1363 1381 finally:
1364 1382 if mylock:
1365 1383 lock.release()
1366 1384
1367 1385 def push(self, remote, force=False, revs=None):
1368 1386 # there are two ways to push to remote repo:
1369 1387 #
1370 1388 # addchangegroup assumes local user can lock remote
1371 1389 # repo (local filesystem, old ssh servers).
1372 1390 #
1373 1391 # unbundle assumes local user cannot lock remote repo (new ssh
1374 1392 # servers, http servers).
1375 1393
1376 1394 if remote.capable('unbundle'):
1377 1395 return self.push_unbundle(remote, force, revs)
1378 1396 return self.push_addchangegroup(remote, force, revs)
1379 1397
1380 1398 def prepush(self, remote, force, revs):
1381 1399 base = {}
1382 1400 remote_heads = remote.heads()
1383 1401 inc = self.findincoming(remote, base, remote_heads, force=force)
1384 1402
1385 1403 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1386 1404 if revs is not None:
1387 1405 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1388 1406 else:
1389 1407 bases, heads = update, self.changelog.heads()
1390 1408
1391 1409 if not bases:
1392 1410 self.ui.status(_("no changes found\n"))
1393 1411 return None, 1
1394 1412 elif not force:
1395 1413 # check if we're creating new remote heads
1396 1414 # to be a remote head after push, node must be either
1397 1415 # - unknown locally
1398 1416 # - a local outgoing head descended from update
1399 1417 # - a remote head that's known locally and not
1400 1418 # ancestral to an outgoing head
1401 1419
1402 1420 warn = 0
1403 1421
1404 1422 if remote_heads == [nullid]:
1405 1423 warn = 0
1406 1424 elif not revs and len(heads) > len(remote_heads):
1407 1425 warn = 1
1408 1426 else:
1409 1427 newheads = list(heads)
1410 1428 for r in remote_heads:
1411 1429 if r in self.changelog.nodemap:
1412 1430 desc = self.changelog.heads(r, heads)
1413 1431 l = [h for h in heads if h in desc]
1414 1432 if not l:
1415 1433 newheads.append(r)
1416 1434 else:
1417 1435 newheads.append(r)
1418 1436 if len(newheads) > len(remote_heads):
1419 1437 warn = 1
1420 1438
1421 1439 if warn:
1422 1440 self.ui.warn(_("abort: push creates new remote branches!\n"))
1423 1441 self.ui.status(_("(did you forget to merge?"
1424 1442 " use push -f to force)\n"))
1425 1443 return None, 1
1426 1444 elif inc:
1427 1445 self.ui.warn(_("note: unsynced remote changes!\n"))
1428 1446
1429 1447
1430 1448 if revs is None:
1431 1449 cg = self.changegroup(update, 'push')
1432 1450 else:
1433 1451 cg = self.changegroupsubset(update, revs, 'push')
1434 1452 return cg, remote_heads
1435 1453
1436 1454 def push_addchangegroup(self, remote, force, revs):
1437 1455 lock = remote.lock()
1438 1456
1439 1457 ret = self.prepush(remote, force, revs)
1440 1458 if ret[0] is not None:
1441 1459 cg, remote_heads = ret
1442 1460 return remote.addchangegroup(cg, 'push', self.url())
1443 1461 return ret[1]
1444 1462
1445 1463 def push_unbundle(self, remote, force, revs):
1446 1464 # local repo finds heads on server, finds out what revs it
1447 1465 # must push. once revs transferred, if server finds it has
1448 1466 # different heads (someone else won commit/push race), server
1449 1467 # aborts.
1450 1468
1451 1469 ret = self.prepush(remote, force, revs)
1452 1470 if ret[0] is not None:
1453 1471 cg, remote_heads = ret
1454 1472 if force: remote_heads = ['force']
1455 1473 return remote.unbundle(cg, remote_heads, 'push')
1456 1474 return ret[1]
1457 1475
1458 1476 def changegroupinfo(self, nodes):
1459 1477 self.ui.note(_("%d changesets found\n") % len(nodes))
1460 1478 if self.ui.debugflag:
1461 1479 self.ui.debug(_("List of changesets:\n"))
1462 1480 for node in nodes:
1463 1481 self.ui.debug("%s\n" % hex(node))
1464 1482
1465 1483 def changegroupsubset(self, bases, heads, source):
1466 1484 """This function generates a changegroup consisting of all the nodes
1467 1485 that are descendents of any of the bases, and ancestors of any of
1468 1486 the heads.
1469 1487
1470 1488 It is fairly complex as determining which filenodes and which
1471 1489 manifest nodes need to be included for the changeset to be complete
1472 1490 is non-trivial.
1473 1491
1474 1492 Another wrinkle is doing the reverse, figuring out which changeset in
1475 1493 the changegroup a particular filenode or manifestnode belongs to."""
1476 1494
1477 1495 self.hook('preoutgoing', throw=True, source=source)
1478 1496
1479 1497 # Set up some initial variables
1480 1498 # Make it easy to refer to self.changelog
1481 1499 cl = self.changelog
1482 1500 # msng is short for missing - compute the list of changesets in this
1483 1501 # changegroup.
1484 1502 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1485 1503 self.changegroupinfo(msng_cl_lst)
1486 1504 # Some bases may turn out to be superfluous, and some heads may be
1487 1505 # too. nodesbetween will return the minimal set of bases and heads
1488 1506 # necessary to re-create the changegroup.
1489 1507
1490 1508 # Known heads are the list of heads that it is assumed the recipient
1491 1509 # of this changegroup will know about.
1492 1510 knownheads = {}
1493 1511 # We assume that all parents of bases are known heads.
1494 1512 for n in bases:
1495 1513 for p in cl.parents(n):
1496 1514 if p != nullid:
1497 1515 knownheads[p] = 1
1498 1516 knownheads = knownheads.keys()
1499 1517 if knownheads:
1500 1518 # Now that we know what heads are known, we can compute which
1501 1519 # changesets are known. The recipient must know about all
1502 1520 # changesets required to reach the known heads from the null
1503 1521 # changeset.
1504 1522 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1505 1523 junk = None
1506 1524 # Transform the list into an ersatz set.
1507 1525 has_cl_set = dict.fromkeys(has_cl_set)
1508 1526 else:
1509 1527 # If there were no known heads, the recipient cannot be assumed to
1510 1528 # know about any changesets.
1511 1529 has_cl_set = {}
1512 1530
1513 1531 # Make it easy to refer to self.manifest
1514 1532 mnfst = self.manifest
1515 1533 # We don't know which manifests are missing yet
1516 1534 msng_mnfst_set = {}
1517 1535 # Nor do we know which filenodes are missing.
1518 1536 msng_filenode_set = {}
1519 1537
1520 1538 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1521 1539 junk = None
1522 1540
1523 1541 # A changeset always belongs to itself, so the changenode lookup
1524 1542 # function for a changenode is identity.
1525 1543 def identity(x):
1526 1544 return x
1527 1545
1528 1546 # A function generating function. Sets up an environment for the
1529 1547 # inner function.
1530 1548 def cmp_by_rev_func(revlog):
1531 1549 # Compare two nodes by their revision number in the environment's
1532 1550 # revision history. Since the revision number both represents the
1533 1551 # most efficient order to read the nodes in, and represents a
1534 1552 # topological sorting of the nodes, this function is often useful.
1535 1553 def cmp_by_rev(a, b):
1536 1554 return cmp(revlog.rev(a), revlog.rev(b))
1537 1555 return cmp_by_rev
1538 1556
1539 1557 # If we determine that a particular file or manifest node must be a
1540 1558 # node that the recipient of the changegroup will already have, we can
1541 1559 # also assume the recipient will have all the parents. This function
1542 1560 # prunes them from the set of missing nodes.
1543 1561 def prune_parents(revlog, hasset, msngset):
1544 1562 haslst = hasset.keys()
1545 1563 haslst.sort(cmp_by_rev_func(revlog))
1546 1564 for node in haslst:
1547 1565 parentlst = [p for p in revlog.parents(node) if p != nullid]
1548 1566 while parentlst:
1549 1567 n = parentlst.pop()
1550 1568 if n not in hasset:
1551 1569 hasset[n] = 1
1552 1570 p = [p for p in revlog.parents(n) if p != nullid]
1553 1571 parentlst.extend(p)
1554 1572 for n in hasset:
1555 1573 msngset.pop(n, None)
1556 1574
1557 1575 # This is a function generating function used to set up an environment
1558 1576 # for the inner function to execute in.
1559 1577 def manifest_and_file_collector(changedfileset):
1560 1578 # This is an information gathering function that gathers
1561 1579 # information from each changeset node that goes out as part of
1562 1580 # the changegroup. The information gathered is a list of which
1563 1581 # manifest nodes are potentially required (the recipient may
1564 1582 # already have them) and total list of all files which were
1565 1583 # changed in any changeset in the changegroup.
1566 1584 #
1567 1585 # We also remember the first changenode we saw any manifest
1568 1586 # referenced by so we can later determine which changenode 'owns'
1569 1587 # the manifest.
1570 1588 def collect_manifests_and_files(clnode):
1571 1589 c = cl.read(clnode)
1572 1590 for f in c[3]:
1573 1591 # This is to make sure we only have one instance of each
1574 1592 # filename string for each filename.
1575 1593 changedfileset.setdefault(f, f)
1576 1594 msng_mnfst_set.setdefault(c[0], clnode)
1577 1595 return collect_manifests_and_files
1578 1596
1579 1597 # Figure out which manifest nodes (of the ones we think might be part
1580 1598 # of the changegroup) the recipient must know about and remove them
1581 1599 # from the changegroup.
1582 1600 def prune_manifests():
1583 1601 has_mnfst_set = {}
1584 1602 for n in msng_mnfst_set:
1585 1603 # If a 'missing' manifest thinks it belongs to a changenode
1586 1604 # the recipient is assumed to have, obviously the recipient
1587 1605 # must have that manifest.
1588 1606 linknode = cl.node(mnfst.linkrev(n))
1589 1607 if linknode in has_cl_set:
1590 1608 has_mnfst_set[n] = 1
1591 1609 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1592 1610
1593 1611 # Use the information collected in collect_manifests_and_files to say
1594 1612 # which changenode any manifestnode belongs to.
1595 1613 def lookup_manifest_link(mnfstnode):
1596 1614 return msng_mnfst_set[mnfstnode]
1597 1615
1598 1616 # A function generating function that sets up the initial environment
1599 1617 # the inner function.
1600 1618 def filenode_collector(changedfiles):
1601 1619 next_rev = [0]
1602 1620 # This gathers information from each manifestnode included in the
1603 1621 # changegroup about which filenodes the manifest node references
1604 1622 # so we can include those in the changegroup too.
1605 1623 #
1606 1624 # It also remembers which changenode each filenode belongs to. It
1607 1625 # does this by assuming the a filenode belongs to the changenode
1608 1626 # the first manifest that references it belongs to.
1609 1627 def collect_msng_filenodes(mnfstnode):
1610 1628 r = mnfst.rev(mnfstnode)
1611 1629 if r == next_rev[0]:
1612 1630 # If the last rev we looked at was the one just previous,
1613 1631 # we only need to see a diff.
1614 1632 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1615 1633 # For each line in the delta
1616 1634 for dline in delta.splitlines():
1617 1635 # get the filename and filenode for that line
1618 1636 f, fnode = dline.split('\0')
1619 1637 fnode = bin(fnode[:40])
1620 1638 f = changedfiles.get(f, None)
1621 1639 # And if the file is in the list of files we care
1622 1640 # about.
1623 1641 if f is not None:
1624 1642 # Get the changenode this manifest belongs to
1625 1643 clnode = msng_mnfst_set[mnfstnode]
1626 1644 # Create the set of filenodes for the file if
1627 1645 # there isn't one already.
1628 1646 ndset = msng_filenode_set.setdefault(f, {})
1629 1647 # And set the filenode's changelog node to the
1630 1648 # manifest's if it hasn't been set already.
1631 1649 ndset.setdefault(fnode, clnode)
1632 1650 else:
1633 1651 # Otherwise we need a full manifest.
1634 1652 m = mnfst.read(mnfstnode)
1635 1653 # For every file in we care about.
1636 1654 for f in changedfiles:
1637 1655 fnode = m.get(f, None)
1638 1656 # If it's in the manifest
1639 1657 if fnode is not None:
1640 1658 # See comments above.
1641 1659 clnode = msng_mnfst_set[mnfstnode]
1642 1660 ndset = msng_filenode_set.setdefault(f, {})
1643 1661 ndset.setdefault(fnode, clnode)
1644 1662 # Remember the revision we hope to see next.
1645 1663 next_rev[0] = r + 1
1646 1664 return collect_msng_filenodes
1647 1665
1648 1666 # We have a list of filenodes we think we need for a file, lets remove
1649 1667 # all those we now the recipient must have.
1650 1668 def prune_filenodes(f, filerevlog):
1651 1669 msngset = msng_filenode_set[f]
1652 1670 hasset = {}
1653 1671 # If a 'missing' filenode thinks it belongs to a changenode we
1654 1672 # assume the recipient must have, then the recipient must have
1655 1673 # that filenode.
1656 1674 for n in msngset:
1657 1675 clnode = cl.node(filerevlog.linkrev(n))
1658 1676 if clnode in has_cl_set:
1659 1677 hasset[n] = 1
1660 1678 prune_parents(filerevlog, hasset, msngset)
1661 1679
1662 1680 # A function generator function that sets up the a context for the
1663 1681 # inner function.
1664 1682 def lookup_filenode_link_func(fname):
1665 1683 msngset = msng_filenode_set[fname]
1666 1684 # Lookup the changenode the filenode belongs to.
1667 1685 def lookup_filenode_link(fnode):
1668 1686 return msngset[fnode]
1669 1687 return lookup_filenode_link
1670 1688
1671 1689 # Now that we have all theses utility functions to help out and
1672 1690 # logically divide up the task, generate the group.
1673 1691 def gengroup():
1674 1692 # The set of changed files starts empty.
1675 1693 changedfiles = {}
1676 1694 # Create a changenode group generator that will call our functions
1677 1695 # back to lookup the owning changenode and collect information.
1678 1696 group = cl.group(msng_cl_lst, identity,
1679 1697 manifest_and_file_collector(changedfiles))
1680 1698 for chnk in group:
1681 1699 yield chnk
1682 1700
1683 1701 # The list of manifests has been collected by the generator
1684 1702 # calling our functions back.
1685 1703 prune_manifests()
1686 1704 msng_mnfst_lst = msng_mnfst_set.keys()
1687 1705 # Sort the manifestnodes by revision number.
1688 1706 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1689 1707 # Create a generator for the manifestnodes that calls our lookup
1690 1708 # and data collection functions back.
1691 1709 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1692 1710 filenode_collector(changedfiles))
1693 1711 for chnk in group:
1694 1712 yield chnk
1695 1713
1696 1714 # These are no longer needed, dereference and toss the memory for
1697 1715 # them.
1698 1716 msng_mnfst_lst = None
1699 1717 msng_mnfst_set.clear()
1700 1718
1701 1719 changedfiles = changedfiles.keys()
1702 1720 changedfiles.sort()
1703 1721 # Go through all our files in order sorted by name.
1704 1722 for fname in changedfiles:
1705 1723 filerevlog = self.file(fname)
1706 1724 # Toss out the filenodes that the recipient isn't really
1707 1725 # missing.
1708 1726 if msng_filenode_set.has_key(fname):
1709 1727 prune_filenodes(fname, filerevlog)
1710 1728 msng_filenode_lst = msng_filenode_set[fname].keys()
1711 1729 else:
1712 1730 msng_filenode_lst = []
1713 1731 # If any filenodes are left, generate the group for them,
1714 1732 # otherwise don't bother.
1715 1733 if len(msng_filenode_lst) > 0:
1716 1734 yield changegroup.genchunk(fname)
1717 1735 # Sort the filenodes by their revision #
1718 1736 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1719 1737 # Create a group generator and only pass in a changenode
1720 1738 # lookup function as we need to collect no information
1721 1739 # from filenodes.
1722 1740 group = filerevlog.group(msng_filenode_lst,
1723 1741 lookup_filenode_link_func(fname))
1724 1742 for chnk in group:
1725 1743 yield chnk
1726 1744 if msng_filenode_set.has_key(fname):
1727 1745 # Don't need this anymore, toss it to free memory.
1728 1746 del msng_filenode_set[fname]
1729 1747 # Signal that no more groups are left.
1730 1748 yield changegroup.closechunk()
1731 1749
1732 1750 if msng_cl_lst:
1733 1751 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1734 1752
1735 1753 return util.chunkbuffer(gengroup())
1736 1754
1737 1755 def changegroup(self, basenodes, source):
1738 1756 """Generate a changegroup of all nodes that we have that a recipient
1739 1757 doesn't.
1740 1758
1741 1759 This is much easier than the previous function as we can assume that
1742 1760 the recipient has any changenode we aren't sending them."""
1743 1761
1744 1762 self.hook('preoutgoing', throw=True, source=source)
1745 1763
1746 1764 cl = self.changelog
1747 1765 nodes = cl.nodesbetween(basenodes, None)[0]
1748 1766 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1749 1767 self.changegroupinfo(nodes)
1750 1768
1751 1769 def identity(x):
1752 1770 return x
1753 1771
1754 1772 def gennodelst(revlog):
1755 1773 for r in xrange(0, revlog.count()):
1756 1774 n = revlog.node(r)
1757 1775 if revlog.linkrev(n) in revset:
1758 1776 yield n
1759 1777
1760 1778 def changed_file_collector(changedfileset):
1761 1779 def collect_changed_files(clnode):
1762 1780 c = cl.read(clnode)
1763 1781 for fname in c[3]:
1764 1782 changedfileset[fname] = 1
1765 1783 return collect_changed_files
1766 1784
1767 1785 def lookuprevlink_func(revlog):
1768 1786 def lookuprevlink(n):
1769 1787 return cl.node(revlog.linkrev(n))
1770 1788 return lookuprevlink
1771 1789
1772 1790 def gengroup():
1773 1791 # construct a list of all changed files
1774 1792 changedfiles = {}
1775 1793
1776 1794 for chnk in cl.group(nodes, identity,
1777 1795 changed_file_collector(changedfiles)):
1778 1796 yield chnk
1779 1797 changedfiles = changedfiles.keys()
1780 1798 changedfiles.sort()
1781 1799
1782 1800 mnfst = self.manifest
1783 1801 nodeiter = gennodelst(mnfst)
1784 1802 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1785 1803 yield chnk
1786 1804
1787 1805 for fname in changedfiles:
1788 1806 filerevlog = self.file(fname)
1789 1807 nodeiter = gennodelst(filerevlog)
1790 1808 nodeiter = list(nodeiter)
1791 1809 if nodeiter:
1792 1810 yield changegroup.genchunk(fname)
1793 1811 lookup = lookuprevlink_func(filerevlog)
1794 1812 for chnk in filerevlog.group(nodeiter, lookup):
1795 1813 yield chnk
1796 1814
1797 1815 yield changegroup.closechunk()
1798 1816
1799 1817 if nodes:
1800 1818 self.hook('outgoing', node=hex(nodes[0]), source=source)
1801 1819
1802 1820 return util.chunkbuffer(gengroup())
1803 1821
1804 1822 def addchangegroup(self, source, srctype, url):
1805 1823 """add changegroup to repo.
1806 1824
1807 1825 return values:
1808 1826 - nothing changed or no source: 0
1809 1827 - more heads than before: 1+added heads (2..n)
1810 1828 - less heads than before: -1-removed heads (-2..-n)
1811 1829 - number of heads stays the same: 1
1812 1830 """
1813 1831 def csmap(x):
1814 1832 self.ui.debug(_("add changeset %s\n") % short(x))
1815 1833 return cl.count()
1816 1834
1817 1835 def revmap(x):
1818 1836 return cl.rev(x)
1819 1837
1820 1838 if not source:
1821 1839 return 0
1822 1840
1823 1841 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1824 1842
1825 1843 changesets = files = revisions = 0
1826 1844
1827 1845 tr = self.transaction()
1828 1846
1829 1847 # write changelog data to temp files so concurrent readers will not see
1830 1848 # inconsistent view
1831 1849 cl = None
1832 1850 try:
1833 1851 cl = appendfile.appendchangelog(self.sopener,
1834 1852 self.changelog.version)
1835 1853
1836 1854 oldheads = len(cl.heads())
1837 1855
1838 1856 # pull off the changeset group
1839 1857 self.ui.status(_("adding changesets\n"))
1840 1858 cor = cl.count() - 1
1841 1859 chunkiter = changegroup.chunkiter(source)
1842 1860 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1843 1861 raise util.Abort(_("received changelog group is empty"))
1844 1862 cnr = cl.count() - 1
1845 1863 changesets = cnr - cor
1846 1864
1847 1865 # pull off the manifest group
1848 1866 self.ui.status(_("adding manifests\n"))
1849 1867 chunkiter = changegroup.chunkiter(source)
1850 1868 # no need to check for empty manifest group here:
1851 1869 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1852 1870 # no new manifest will be created and the manifest group will
1853 1871 # be empty during the pull
1854 1872 self.manifest.addgroup(chunkiter, revmap, tr)
1855 1873
1856 1874 # process the files
1857 1875 self.ui.status(_("adding file changes\n"))
1858 1876 while 1:
1859 1877 f = changegroup.getchunk(source)
1860 1878 if not f:
1861 1879 break
1862 1880 self.ui.debug(_("adding %s revisions\n") % f)
1863 1881 fl = self.file(f)
1864 1882 o = fl.count()
1865 1883 chunkiter = changegroup.chunkiter(source)
1866 1884 if fl.addgroup(chunkiter, revmap, tr) is None:
1867 1885 raise util.Abort(_("received file revlog group is empty"))
1868 1886 revisions += fl.count() - o
1869 1887 files += 1
1870 1888
1871 1889 cl.writedata()
1872 1890 finally:
1873 1891 if cl:
1874 1892 cl.cleanup()
1875 1893
1876 1894 # make changelog see real files again
1877 1895 self.changelog = changelog.changelog(self.sopener,
1878 1896 self.changelog.version)
1879 1897 self.changelog.checkinlinesize(tr)
1880 1898
1881 1899 newheads = len(self.changelog.heads())
1882 1900 heads = ""
1883 1901 if oldheads and newheads != oldheads:
1884 1902 heads = _(" (%+d heads)") % (newheads - oldheads)
1885 1903
1886 1904 self.ui.status(_("added %d changesets"
1887 1905 " with %d changes to %d files%s\n")
1888 1906 % (changesets, revisions, files, heads))
1889 1907
1890 1908 if changesets > 0:
1891 1909 self.hook('pretxnchangegroup', throw=True,
1892 1910 node=hex(self.changelog.node(cor+1)), source=srctype,
1893 1911 url=url)
1894 1912
1895 1913 tr.close()
1896 1914
1897 1915 if changesets > 0:
1898 1916 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1899 1917 source=srctype, url=url)
1900 1918
1901 1919 for i in xrange(cor + 1, cnr + 1):
1902 1920 self.hook("incoming", node=hex(self.changelog.node(i)),
1903 1921 source=srctype, url=url)
1904 1922
1905 1923 # never return 0 here:
1906 1924 if newheads < oldheads:
1907 1925 return newheads - oldheads - 1
1908 1926 else:
1909 1927 return newheads - oldheads + 1
1910 1928
1911 1929
1912 1930 def stream_in(self, remote):
1913 1931 fp = remote.stream_out()
1914 1932 l = fp.readline()
1915 1933 try:
1916 1934 resp = int(l)
1917 1935 except ValueError:
1918 1936 raise util.UnexpectedOutput(
1919 1937 _('Unexpected response from remote server:'), l)
1920 1938 if resp == 1:
1921 1939 raise util.Abort(_('operation forbidden by server'))
1922 1940 elif resp == 2:
1923 1941 raise util.Abort(_('locking the remote repository failed'))
1924 1942 elif resp != 0:
1925 1943 raise util.Abort(_('the server sent an unknown error code'))
1926 1944 self.ui.status(_('streaming all changes\n'))
1927 1945 l = fp.readline()
1928 1946 try:
1929 1947 total_files, total_bytes = map(int, l.split(' ', 1))
1930 1948 except ValueError, TypeError:
1931 1949 raise util.UnexpectedOutput(
1932 1950 _('Unexpected response from remote server:'), l)
1933 1951 self.ui.status(_('%d files to transfer, %s of data\n') %
1934 1952 (total_files, util.bytecount(total_bytes)))
1935 1953 start = time.time()
1936 1954 for i in xrange(total_files):
1937 1955 # XXX doesn't support '\n' or '\r' in filenames
1938 1956 l = fp.readline()
1939 1957 try:
1940 1958 name, size = l.split('\0', 1)
1941 1959 size = int(size)
1942 1960 except ValueError, TypeError:
1943 1961 raise util.UnexpectedOutput(
1944 1962 _('Unexpected response from remote server:'), l)
1945 1963 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1946 1964 ofp = self.sopener(name, 'w')
1947 1965 for chunk in util.filechunkiter(fp, limit=size):
1948 1966 ofp.write(chunk)
1949 1967 ofp.close()
1950 1968 elapsed = time.time() - start
1951 1969 if elapsed <= 0:
1952 1970 elapsed = 0.001
1953 1971 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1954 1972 (util.bytecount(total_bytes), elapsed,
1955 1973 util.bytecount(total_bytes / elapsed)))
1956 1974 self.reload()
1957 1975 return len(self.heads()) + 1
1958 1976
1959 1977 def clone(self, remote, heads=[], stream=False):
1960 1978 '''clone remote repository.
1961 1979
1962 1980 keyword arguments:
1963 1981 heads: list of revs to clone (forces use of pull)
1964 1982 stream: use streaming clone if possible'''
1965 1983
1966 1984 # now, all clients that can request uncompressed clones can
1967 1985 # read repo formats supported by all servers that can serve
1968 1986 # them.
1969 1987
1970 1988 # if revlog format changes, client will have to check version
1971 1989 # and format flags on "stream" capability, and use
1972 1990 # uncompressed only if compatible.
1973 1991
1974 1992 if stream and not heads and remote.capable('stream'):
1975 1993 return self.stream_in(remote)
1976 1994 return self.pull(remote, heads)
1977 1995
1978 1996 # used to avoid circular references so destructors work
1979 1997 def aftertrans(files):
1980 1998 renamefiles = [tuple(t) for t in files]
1981 1999 def a():
1982 2000 for src, dest in renamefiles:
1983 2001 util.rename(src, dest)
1984 2002 return a
1985 2003
1986 2004 def instance(ui, path, create):
1987 2005 return localrepository(ui, util.drop_scheme('file', path), create)
1988 2006
1989 2007 def islocal(path):
1990 2008 return True
@@ -1,53 +1,59
1 1 # mq patch on an empty repo
2 2 tip: 0
3 3 No .hg/branches.cache
4 4 tip: 0
5 5 No .hg/branches.cache
6 6
7 7 # some regular revisions
8 8 Patch queue now empty
9 9 tip: 1
10 features: unnamed
10 11 3f910abad313ff802d3a23a7529433872df9b3ae 1
11 12 3f910abad313ff802d3a23a7529433872df9b3ae bar
12 13 9539f35bdc80732cc9a3f84e46508f1ed1ec8cff foo
13 14
14 15 # add some mq patches
15 16 applying p1
16 17 Now at: p1
17 18 tip: 2
19 features: unnamed
18 20 3f910abad313ff802d3a23a7529433872df9b3ae 1
19 21 3f910abad313ff802d3a23a7529433872df9b3ae bar
20 22 9539f35bdc80732cc9a3f84e46508f1ed1ec8cff foo
21 23 tip: 3
24 features: unnamed
22 25 3f910abad313ff802d3a23a7529433872df9b3ae 1
23 26 3f910abad313ff802d3a23a7529433872df9b3ae bar
24 27 9539f35bdc80732cc9a3f84e46508f1ed1ec8cff foo
25 28 branch foo: 3
26 29 branch bar: 2
27 30
28 31 # removing the cache
29 32 tip: 3
33 features: unnamed
30 34 3f910abad313ff802d3a23a7529433872df9b3ae 1
31 35 3f910abad313ff802d3a23a7529433872df9b3ae bar
32 36 9539f35bdc80732cc9a3f84e46508f1ed1ec8cff foo
33 37 branch foo: 3
34 38 branch bar: 2
35 39
36 40 # importing rev 1 (the cache now ends in one of the patches)
37 41 tip: 3
42 features: unnamed
38 43 3f910abad313ff802d3a23a7529433872df9b3ae 1
39 44 3f910abad313ff802d3a23a7529433872df9b3ae bar
40 45 9539f35bdc80732cc9a3f84e46508f1ed1ec8cff foo
41 46 branch foo: 3
42 47 branch bar: 2
43 48 qbase: 1
44 49
45 50 # detect an invalid cache
46 51 Patch queue now empty
47 52 applying p0
48 53 applying p1
49 54 applying p2
50 55 Now at: p2
51 56 tip: 3
57 features: unnamed
52 58 9539f35bdc80732cc9a3f84e46508f1ed1ec8cff 0
53 59 9539f35bdc80732cc9a3f84e46508f1ed1ec8cff foo
@@ -1,80 +1,81
1 1 foo
2 2 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
3 3 foo
4 4 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
5 5 (branch merge, don't forget to commit)
6 6 foo
7 7 changeset: 5:5f8fb06e083e
8 8 branch: foo
9 9 tag: tip
10 10 parent: 4:4909a3732169
11 11 parent: 3:bf1bc2f45e83
12 12 user: test
13 13 date: Mon Jan 12 13:46:40 1970 +0000
14 14 summary: merge
15 15
16 16 changeset: 4:4909a3732169
17 17 branch: foo
18 18 parent: 1:b699b1cec9c2
19 19 user: test
20 20 date: Mon Jan 12 13:46:40 1970 +0000
21 21 summary: modify a branch
22 22
23 23 changeset: 3:bf1bc2f45e83
24 24 user: test
25 25 date: Mon Jan 12 13:46:40 1970 +0000
26 26 summary: clear branch name
27 27
28 28 changeset: 2:67ec16bde7f1
29 29 branch: bar
30 30 user: test
31 31 date: Mon Jan 12 13:46:40 1970 +0000
32 32 summary: change branch name
33 33
34 34 changeset: 1:b699b1cec9c2
35 35 branch: foo
36 36 user: test
37 37 date: Mon Jan 12 13:46:40 1970 +0000
38 38 summary: add branch name
39 39
40 40 changeset: 0:be8523e69bf8
41 41 user: test
42 42 date: Mon Jan 12 13:46:40 1970 +0000
43 43 summary: initial
44 44
45 45 foo 5:5f8fb06e083e
46 46 3:bf1bc2f45e83
47 47 bar 2:67ec16bde7f1
48 48 foo
49 49
50 50 bar
51 51 % test for invalid branch cache
52 52 rolling back last transaction
53 53 changeset: 4:4909a3732169
54 54 branch: foo
55 55 tag: tip
56 56 parent: 1:b699b1cec9c2
57 57 user: test
58 58 date: Mon Jan 12 13:46:40 1970 +0000
59 59 summary: modify a branch
60 60
61 61 Invalid branch cache: unknown tip
62 62 changeset: 4:4909a3732169c0c20011c4f4b8fdff4e3d89b23f
63 63 branch: foo
64 64 tag: tip
65 65 parent: 1:b699b1cec9c2966b3700de4fef0dc123cd754c31
66 66 parent: -1:0000000000000000000000000000000000000000
67 67 manifest: 4:d01b250baaa05909152f7ae07d7a649deea0df9a
68 68 user: test
69 69 date: Mon Jan 12 13:46:40 1970 +0000
70 70 files: a
71 71 extra: branch=foo
72 72 description:
73 73 modify a branch
74 74
75 75
76 76 4:4909a3732169
77 features: unnamed
77 78 4909a3732169c0c20011c4f4b8fdff4e3d89b23f 4
78 79 bf1bc2f45e834c75404d0ddab57d53beab56e2f8
79 80 4909a3732169c0c20011c4f4b8fdff4e3d89b23f foo
80 81 67ec16bde7f1575d523313b9bca000f6a6f12dca bar
General Comments 0
You need to be logged in to leave comments. Login now