##// END OF EJS Templates
Refactor tags code to prepare for improving the algorithm
Matt Mackall -
r4210:caff9204 default
parent child Browse files
Show More
@@ -1,1990 +1,1991 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import gettext as _
10 10 from demandload import *
11 11 import repo
12 12 demandload(globals(), "appendfile changegroup")
13 13 demandload(globals(), "changelog dirstate filelog manifest context")
14 14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 15 demandload(globals(), "os revlog time util")
16 16
17 17 class localrepository(repo.repository):
18 18 capabilities = ('lookup', 'changegroupsubset')
19 19 supported = ('revlogv1', 'store')
20 20
21 21 def __del__(self):
22 22 self.transhandle = None
23 23 def __init__(self, parentui, path=None, create=0):
24 24 repo.repository.__init__(self)
25 25 if not path:
26 26 p = os.getcwd()
27 27 while not os.path.isdir(os.path.join(p, ".hg")):
28 28 oldp = p
29 29 p = os.path.dirname(p)
30 30 if p == oldp:
31 31 raise repo.RepoError(_("There is no Mercurial repository"
32 32 " here (.hg not found)"))
33 33 path = p
34 34
35 35 self.root = os.path.realpath(path)
36 36 self.path = os.path.join(self.root, ".hg")
37 37 self.origroot = path
38 38 self.opener = util.opener(self.path)
39 39 self.wopener = util.opener(self.root)
40 40
41 41 if not os.path.isdir(self.path):
42 42 if create:
43 43 if not os.path.exists(path):
44 44 os.mkdir(path)
45 45 os.mkdir(self.path)
46 46 os.mkdir(os.path.join(self.path, "store"))
47 47 requirements = ("revlogv1", "store")
48 48 reqfile = self.opener("requires", "w")
49 49 for r in requirements:
50 50 reqfile.write("%s\n" % r)
51 51 reqfile.close()
52 52 # create an invalid changelog
53 53 self.opener("00changelog.i", "a").write(
54 54 '\0\0\0\2' # represents revlogv2
55 55 ' dummy changelog to prevent using the old repo layout'
56 56 )
57 57 else:
58 58 raise repo.RepoError(_("repository %s not found") % path)
59 59 elif create:
60 60 raise repo.RepoError(_("repository %s already exists") % path)
61 61 else:
62 62 # find requirements
63 63 try:
64 64 requirements = self.opener("requires").read().splitlines()
65 65 except IOError, inst:
66 66 if inst.errno != errno.ENOENT:
67 67 raise
68 68 requirements = []
69 69 # check them
70 70 for r in requirements:
71 71 if r not in self.supported:
72 72 raise repo.RepoError(_("requirement '%s' not supported") % r)
73 73
74 74 # setup store
75 75 if "store" in requirements:
76 76 self.encodefn = util.encodefilename
77 77 self.decodefn = util.decodefilename
78 78 self.spath = os.path.join(self.path, "store")
79 79 else:
80 80 self.encodefn = lambda x: x
81 81 self.decodefn = lambda x: x
82 82 self.spath = self.path
83 83 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
84 84
85 85 self.ui = ui.ui(parentui=parentui)
86 86 try:
87 87 self.ui.readconfig(self.join("hgrc"), self.root)
88 88 except IOError:
89 89 pass
90 90
91 91 v = self.ui.configrevlog()
92 92 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
93 93 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
94 94 fl = v.get('flags', None)
95 95 flags = 0
96 96 if fl != None:
97 97 for x in fl.split():
98 98 flags |= revlog.flagstr(x)
99 99 elif self.revlogv1:
100 100 flags = revlog.REVLOG_DEFAULT_FLAGS
101 101
102 102 v = self.revlogversion | flags
103 103 self.manifest = manifest.manifest(self.sopener, v)
104 104 self.changelog = changelog.changelog(self.sopener, v)
105 105
106 106 fallback = self.ui.config('ui', 'fallbackencoding')
107 107 if fallback:
108 108 util._fallbackencoding = fallback
109 109
110 110 # the changelog might not have the inline index flag
111 111 # on. If the format of the changelog is the same as found in
112 112 # .hgrc, apply any flags found in the .hgrc as well.
113 113 # Otherwise, just version from the changelog
114 114 v = self.changelog.version
115 115 if v == self.revlogversion:
116 116 v |= flags
117 117 self.revlogversion = v
118 118
119 119 self.tagscache = None
120 120 self.branchcache = None
121 121 self.nodetagscache = None
122 122 self.encodepats = None
123 123 self.decodepats = None
124 124 self.transhandle = None
125 125
126 126 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
127 127
128 128 def url(self):
129 129 return 'file:' + self.root
130 130
131 131 def hook(self, name, throw=False, **args):
132 132 def callhook(hname, funcname):
133 133 '''call python hook. hook is callable object, looked up as
134 134 name in python module. if callable returns "true", hook
135 135 fails, else passes. if hook raises exception, treated as
136 136 hook failure. exception propagates if throw is "true".
137 137
138 138 reason for "true" meaning "hook failed" is so that
139 139 unmodified commands (e.g. mercurial.commands.update) can
140 140 be run as hooks without wrappers to convert return values.'''
141 141
142 142 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
143 143 d = funcname.rfind('.')
144 144 if d == -1:
145 145 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
146 146 % (hname, funcname))
147 147 modname = funcname[:d]
148 148 try:
149 149 obj = __import__(modname)
150 150 except ImportError:
151 151 try:
152 152 # extensions are loaded with hgext_ prefix
153 153 obj = __import__("hgext_%s" % modname)
154 154 except ImportError:
155 155 raise util.Abort(_('%s hook is invalid '
156 156 '(import of "%s" failed)') %
157 157 (hname, modname))
158 158 try:
159 159 for p in funcname.split('.')[1:]:
160 160 obj = getattr(obj, p)
161 161 except AttributeError, err:
162 162 raise util.Abort(_('%s hook is invalid '
163 163 '("%s" is not defined)') %
164 164 (hname, funcname))
165 165 if not callable(obj):
166 166 raise util.Abort(_('%s hook is invalid '
167 167 '("%s" is not callable)') %
168 168 (hname, funcname))
169 169 try:
170 170 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
171 171 except (KeyboardInterrupt, util.SignalInterrupt):
172 172 raise
173 173 except Exception, exc:
174 174 if isinstance(exc, util.Abort):
175 175 self.ui.warn(_('error: %s hook failed: %s\n') %
176 176 (hname, exc.args[0]))
177 177 else:
178 178 self.ui.warn(_('error: %s hook raised an exception: '
179 179 '%s\n') % (hname, exc))
180 180 if throw:
181 181 raise
182 182 self.ui.print_exc()
183 183 return True
184 184 if r:
185 185 if throw:
186 186 raise util.Abort(_('%s hook failed') % hname)
187 187 self.ui.warn(_('warning: %s hook failed\n') % hname)
188 188 return r
189 189
190 190 def runhook(name, cmd):
191 191 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
192 192 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
193 193 r = util.system(cmd, environ=env, cwd=self.root)
194 194 if r:
195 195 desc, r = util.explain_exit(r)
196 196 if throw:
197 197 raise util.Abort(_('%s hook %s') % (name, desc))
198 198 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
199 199 return r
200 200
201 201 r = False
202 202 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
203 203 if hname.split(".", 1)[0] == name and cmd]
204 204 hooks.sort()
205 205 for hname, cmd in hooks:
206 206 if cmd.startswith('python:'):
207 207 r = callhook(hname, cmd[7:].strip()) or r
208 208 else:
209 209 r = runhook(hname, cmd) or r
210 210 return r
211 211
212 212 tag_disallowed = ':\r\n'
213 213
214 214 def tag(self, name, node, message, local, user, date):
215 215 '''tag a revision with a symbolic name.
216 216
217 217 if local is True, the tag is stored in a per-repository file.
218 218 otherwise, it is stored in the .hgtags file, and a new
219 219 changeset is committed with the change.
220 220
221 221 keyword arguments:
222 222
223 223 local: whether to store tag in non-version-controlled file
224 224 (default False)
225 225
226 226 message: commit message to use if committing
227 227
228 228 user: name of user to use if committing
229 229
230 230 date: date tuple to use if committing'''
231 231
232 232 for c in self.tag_disallowed:
233 233 if c in name:
234 234 raise util.Abort(_('%r cannot be used in a tag name') % c)
235 235
236 236 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
237 237
238 238 if local:
239 239 # local tags are stored in the current charset
240 240 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
241 241 self.hook('tag', node=hex(node), tag=name, local=local)
242 242 return
243 243
244 244 for x in self.status()[:5]:
245 245 if '.hgtags' in x:
246 246 raise util.Abort(_('working copy of .hgtags is changed '
247 247 '(please commit .hgtags manually)'))
248 248
249 249 # committed tags are stored in UTF-8
250 250 line = '%s %s\n' % (hex(node), util.fromlocal(name))
251 251 self.wfile('.hgtags', 'ab').write(line)
252 252 if self.dirstate.state('.hgtags') == '?':
253 253 self.add(['.hgtags'])
254 254
255 255 self.commit(['.hgtags'], message, user, date)
256 256 self.hook('tag', node=hex(node), tag=name, local=local)
257 257
258 258 def tags(self):
259 259 '''return a mapping of tag to node'''
260 if not self.tagscache:
261 self.tagscache = {}
260 if self.tagscache:
261 return self.tagscache
262
263 self.tagscache = {}
262 264
263 def parsetag(line, context):
264 if not line:
265 return
265 def readtags(lines, fn):
266 filetags = {}
267 count = 0
268
269 def warn(msg):
270 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
271
272 for l in lines:
273 count += 1
274 if not l:
275 continue
266 276 s = l.split(" ", 1)
267 277 if len(s) != 2:
268 self.ui.warn(_("%s: cannot parse entry\n") % context)
269 return
278 warn(_("cannot parse entry"))
279 continue
270 280 node, key = s
271 281 key = util.tolocal(key.strip()) # stored in UTF-8
272 282 try:
273 283 bin_n = bin(node)
274 284 except TypeError:
275 self.ui.warn(_("%s: node '%s' is not well formed\n") %
276 (context, node))
277 return
285 warn(_("node '%s' is not well formed") % node)
286 continue
278 287 if bin_n not in self.changelog.nodemap:
279 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
280 (context, key))
281 return
288 warn(_("tag '%s' refers to unknown node") % key)
289 continue
282 290 self.tagscache[key] = bin_n
283 291
284 # read the tags file from each head, ending with the tip,
285 # and add each tag found to the map, with "newer" ones
286 # taking precedence
287 f = None
288 for rev, node, fnode in self._hgtagsnodes():
289 f = (f and f.filectx(fnode) or
290 self.filectx('.hgtags', fileid=fnode))
291 count = 0
292 for l in f.data().splitlines():
293 count += 1
294 parsetag(l, _("%s, line %d") % (str(f), count))
292 # read the tags file from each head, ending with the tip,
293 # and add each tag found to the map, with "newer" ones
294 # taking precedence
295 f = None
296 for rev, node, fnode in self._hgtagsnodes():
297 f = (f and f.filectx(fnode) or
298 self.filectx('.hgtags', fileid=fnode))
299 readtags(f.data().splitlines(), f)
295 300
296 try:
297 f = self.opener("localtags")
298 count = 0
299 for l in f:
300 # localtags are stored in the local character set
301 # while the internal tag table is stored in UTF-8
302 l = util.fromlocal(l)
303 count += 1
304 parsetag(l, _("localtags, line %d") % count)
305 except IOError:
306 pass
301 try:
302 data = util.fromlocal(self.opener("localtags").read())
303 # localtags are stored in the local character set
304 # while the internal tag table is stored in UTF-8
305 readtags(data.splitlines(), "localtags")
306 except IOError:
307 pass
307 308
308 self.tagscache['tip'] = self.changelog.tip()
309 self.tagscache['tip'] = self.changelog.tip()
309 310
310 311 return self.tagscache
311 312
312 313 def _hgtagsnodes(self):
313 314 heads = self.heads()
314 315 heads.reverse()
315 316 last = {}
316 317 ret = []
317 318 for node in heads:
318 319 c = self.changectx(node)
319 320 rev = c.rev()
320 321 try:
321 322 fnode = c.filenode('.hgtags')
322 323 except repo.LookupError:
323 324 continue
324 325 ret.append((rev, node, fnode))
325 326 if fnode in last:
326 327 ret[last[fnode]] = None
327 328 last[fnode] = len(ret) - 1
328 329 return [item for item in ret if item]
329 330
330 331 def tagslist(self):
331 332 '''return a list of tags ordered by revision'''
332 333 l = []
333 334 for t, n in self.tags().items():
334 335 try:
335 336 r = self.changelog.rev(n)
336 337 except:
337 338 r = -2 # sort to the beginning of the list if unknown
338 339 l.append((r, t, n))
339 340 l.sort()
340 341 return [(t, n) for r, t, n in l]
341 342
342 343 def nodetags(self, node):
343 344 '''return the tags associated with a node'''
344 345 if not self.nodetagscache:
345 346 self.nodetagscache = {}
346 347 for t, n in self.tags().items():
347 348 self.nodetagscache.setdefault(n, []).append(t)
348 349 return self.nodetagscache.get(node, [])
349 350
350 351 def _branchtags(self):
351 352 partial, last, lrev = self._readbranchcache()
352 353
353 354 tiprev = self.changelog.count() - 1
354 355 if lrev != tiprev:
355 356 self._updatebranchcache(partial, lrev+1, tiprev+1)
356 357 self._writebranchcache(partial, self.changelog.tip(), tiprev)
357 358
358 359 return partial
359 360
360 361 def branchtags(self):
361 362 if self.branchcache is not None:
362 363 return self.branchcache
363 364
364 365 self.branchcache = {} # avoid recursion in changectx
365 366 partial = self._branchtags()
366 367
367 368 # the branch cache is stored on disk as UTF-8, but in the local
368 369 # charset internally
369 370 for k, v in partial.items():
370 371 self.branchcache[util.tolocal(k)] = v
371 372 return self.branchcache
372 373
373 374 def _readbranchcache(self):
374 375 partial = {}
375 376 try:
376 377 f = self.opener("branch.cache")
377 378 lines = f.read().split('\n')
378 379 f.close()
379 380 last, lrev = lines.pop(0).split(" ", 1)
380 381 last, lrev = bin(last), int(lrev)
381 382 if not (lrev < self.changelog.count() and
382 383 self.changelog.node(lrev) == last): # sanity check
383 384 # invalidate the cache
384 385 raise ValueError('Invalid branch cache: unknown tip')
385 386 for l in lines:
386 387 if not l: continue
387 388 node, label = l.split(" ", 1)
388 389 partial[label.strip()] = bin(node)
389 390 except (KeyboardInterrupt, util.SignalInterrupt):
390 391 raise
391 392 except Exception, inst:
392 393 if self.ui.debugflag:
393 394 self.ui.warn(str(inst), '\n')
394 395 partial, last, lrev = {}, nullid, nullrev
395 396 return partial, last, lrev
396 397
397 398 def _writebranchcache(self, branches, tip, tiprev):
398 399 try:
399 400 f = self.opener("branch.cache", "w")
400 401 f.write("%s %s\n" % (hex(tip), tiprev))
401 402 for label, node in branches.iteritems():
402 403 f.write("%s %s\n" % (hex(node), label))
403 404 except IOError:
404 405 pass
405 406
406 407 def _updatebranchcache(self, partial, start, end):
407 408 for r in xrange(start, end):
408 409 c = self.changectx(r)
409 410 b = c.branch()
410 411 partial[b] = c.node()
411 412
412 413 def lookup(self, key):
413 414 if key == '.':
414 415 key = self.dirstate.parents()[0]
415 416 if key == nullid:
416 417 raise repo.RepoError(_("no revision checked out"))
417 418 elif key == 'null':
418 419 return nullid
419 420 n = self.changelog._match(key)
420 421 if n:
421 422 return n
422 423 if key in self.tags():
423 424 return self.tags()[key]
424 425 if key in self.branchtags():
425 426 return self.branchtags()[key]
426 427 n = self.changelog._partialmatch(key)
427 428 if n:
428 429 return n
429 430 raise repo.RepoError(_("unknown revision '%s'") % key)
430 431
431 432 def dev(self):
432 433 return os.lstat(self.path).st_dev
433 434
434 435 def local(self):
435 436 return True
436 437
437 438 def join(self, f):
438 439 return os.path.join(self.path, f)
439 440
440 441 def sjoin(self, f):
441 442 f = self.encodefn(f)
442 443 return os.path.join(self.spath, f)
443 444
444 445 def wjoin(self, f):
445 446 return os.path.join(self.root, f)
446 447
447 448 def file(self, f):
448 449 if f[0] == '/':
449 450 f = f[1:]
450 451 return filelog.filelog(self.sopener, f, self.revlogversion)
451 452
452 453 def changectx(self, changeid=None):
453 454 return context.changectx(self, changeid)
454 455
455 456 def workingctx(self):
456 457 return context.workingctx(self)
457 458
458 459 def parents(self, changeid=None):
459 460 '''
460 461 get list of changectxs for parents of changeid or working directory
461 462 '''
462 463 if changeid is None:
463 464 pl = self.dirstate.parents()
464 465 else:
465 466 n = self.changelog.lookup(changeid)
466 467 pl = self.changelog.parents(n)
467 468 if pl[1] == nullid:
468 469 return [self.changectx(pl[0])]
469 470 return [self.changectx(pl[0]), self.changectx(pl[1])]
470 471
471 472 def filectx(self, path, changeid=None, fileid=None):
472 473 """changeid can be a changeset revision, node, or tag.
473 474 fileid can be a file revision or node."""
474 475 return context.filectx(self, path, changeid, fileid)
475 476
476 477 def getcwd(self):
477 478 return self.dirstate.getcwd()
478 479
479 480 def wfile(self, f, mode='r'):
480 481 return self.wopener(f, mode)
481 482
482 483 def wread(self, filename):
483 484 if self.encodepats == None:
484 485 l = []
485 486 for pat, cmd in self.ui.configitems("encode"):
486 487 mf = util.matcher(self.root, "", [pat], [], [])[1]
487 488 l.append((mf, cmd))
488 489 self.encodepats = l
489 490
490 491 data = self.wopener(filename, 'r').read()
491 492
492 493 for mf, cmd in self.encodepats:
493 494 if mf(filename):
494 495 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
495 496 data = util.filter(data, cmd)
496 497 break
497 498
498 499 return data
499 500
500 501 def wwrite(self, filename, data, fd=None):
501 502 if self.decodepats == None:
502 503 l = []
503 504 for pat, cmd in self.ui.configitems("decode"):
504 505 mf = util.matcher(self.root, "", [pat], [], [])[1]
505 506 l.append((mf, cmd))
506 507 self.decodepats = l
507 508
508 509 for mf, cmd in self.decodepats:
509 510 if mf(filename):
510 511 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
511 512 data = util.filter(data, cmd)
512 513 break
513 514
514 515 if fd:
515 516 return fd.write(data)
516 517 return self.wopener(filename, 'w').write(data)
517 518
518 519 def transaction(self):
519 520 tr = self.transhandle
520 521 if tr != None and tr.running():
521 522 return tr.nest()
522 523
523 524 # save dirstate for rollback
524 525 try:
525 526 ds = self.opener("dirstate").read()
526 527 except IOError:
527 528 ds = ""
528 529 self.opener("journal.dirstate", "w").write(ds)
529 530
530 531 renames = [(self.sjoin("journal"), self.sjoin("undo")),
531 532 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
532 533 tr = transaction.transaction(self.ui.warn, self.sopener,
533 534 self.sjoin("journal"),
534 535 aftertrans(renames))
535 536 self.transhandle = tr
536 537 return tr
537 538
538 539 def recover(self):
539 540 l = self.lock()
540 541 if os.path.exists(self.sjoin("journal")):
541 542 self.ui.status(_("rolling back interrupted transaction\n"))
542 543 transaction.rollback(self.sopener, self.sjoin("journal"))
543 544 self.reload()
544 545 return True
545 546 else:
546 547 self.ui.warn(_("no interrupted transaction available\n"))
547 548 return False
548 549
549 550 def rollback(self, wlock=None):
550 551 if not wlock:
551 552 wlock = self.wlock()
552 553 l = self.lock()
553 554 if os.path.exists(self.sjoin("undo")):
554 555 self.ui.status(_("rolling back last transaction\n"))
555 556 transaction.rollback(self.sopener, self.sjoin("undo"))
556 557 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
557 558 self.reload()
558 559 self.wreload()
559 560 else:
560 561 self.ui.warn(_("no rollback information available\n"))
561 562
562 563 def wreload(self):
563 564 self.dirstate.read()
564 565
565 566 def reload(self):
566 567 self.changelog.load()
567 568 self.manifest.load()
568 569 self.tagscache = None
569 570 self.nodetagscache = None
570 571
571 572 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
572 573 desc=None):
573 574 try:
574 575 l = lock.lock(lockname, 0, releasefn, desc=desc)
575 576 except lock.LockHeld, inst:
576 577 if not wait:
577 578 raise
578 579 self.ui.warn(_("waiting for lock on %s held by %r\n") %
579 580 (desc, inst.locker))
580 581 # default to 600 seconds timeout
581 582 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
582 583 releasefn, desc=desc)
583 584 if acquirefn:
584 585 acquirefn()
585 586 return l
586 587
587 588 def lock(self, wait=1):
588 589 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
589 590 desc=_('repository %s') % self.origroot)
590 591
591 592 def wlock(self, wait=1):
592 593 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
593 594 self.wreload,
594 595 desc=_('working directory of %s') % self.origroot)
595 596
596 597 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
597 598 """
598 599 commit an individual file as part of a larger transaction
599 600 """
600 601
601 602 t = self.wread(fn)
602 603 fl = self.file(fn)
603 604 fp1 = manifest1.get(fn, nullid)
604 605 fp2 = manifest2.get(fn, nullid)
605 606
606 607 meta = {}
607 608 cp = self.dirstate.copied(fn)
608 609 if cp:
609 610 # Mark the new revision of this file as a copy of another
610 611 # file. This copy data will effectively act as a parent
611 612 # of this new revision. If this is a merge, the first
612 613 # parent will be the nullid (meaning "look up the copy data")
613 614 # and the second one will be the other parent. For example:
614 615 #
615 616 # 0 --- 1 --- 3 rev1 changes file foo
616 617 # \ / rev2 renames foo to bar and changes it
617 618 # \- 2 -/ rev3 should have bar with all changes and
618 619 # should record that bar descends from
619 620 # bar in rev2 and foo in rev1
620 621 #
621 622 # this allows this merge to succeed:
622 623 #
623 624 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
624 625 # \ / merging rev3 and rev4 should use bar@rev2
625 626 # \- 2 --- 4 as the merge base
626 627 #
627 628 meta["copy"] = cp
628 629 if not manifest2: # not a branch merge
629 630 meta["copyrev"] = hex(manifest1.get(cp, nullid))
630 631 fp2 = nullid
631 632 elif fp2 != nullid: # copied on remote side
632 633 meta["copyrev"] = hex(manifest1.get(cp, nullid))
633 634 elif fp1 != nullid: # copied on local side, reversed
634 635 meta["copyrev"] = hex(manifest2.get(cp))
635 636 fp2 = fp1
636 637 else: # directory rename
637 638 meta["copyrev"] = hex(manifest1.get(cp, nullid))
638 639 self.ui.debug(_(" %s: copy %s:%s\n") %
639 640 (fn, cp, meta["copyrev"]))
640 641 fp1 = nullid
641 642 elif fp2 != nullid:
642 643 # is one parent an ancestor of the other?
643 644 fpa = fl.ancestor(fp1, fp2)
644 645 if fpa == fp1:
645 646 fp1, fp2 = fp2, nullid
646 647 elif fpa == fp2:
647 648 fp2 = nullid
648 649
649 650 # is the file unmodified from the parent? report existing entry
650 651 if fp2 == nullid and not fl.cmp(fp1, t):
651 652 return fp1
652 653
653 654 changelist.append(fn)
654 655 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
655 656
656 657 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
657 658 if p1 is None:
658 659 p1, p2 = self.dirstate.parents()
659 660 return self.commit(files=files, text=text, user=user, date=date,
660 661 p1=p1, p2=p2, wlock=wlock)
661 662
662 663 def commit(self, files=None, text="", user=None, date=None,
663 664 match=util.always, force=False, lock=None, wlock=None,
664 665 force_editor=False, p1=None, p2=None, extra={}):
665 666
666 667 commit = []
667 668 remove = []
668 669 changed = []
669 670 use_dirstate = (p1 is None) # not rawcommit
670 671 extra = extra.copy()
671 672
672 673 if use_dirstate:
673 674 if files:
674 675 for f in files:
675 676 s = self.dirstate.state(f)
676 677 if s in 'nmai':
677 678 commit.append(f)
678 679 elif s == 'r':
679 680 remove.append(f)
680 681 else:
681 682 self.ui.warn(_("%s not tracked!\n") % f)
682 683 else:
683 684 changes = self.status(match=match)[:5]
684 685 modified, added, removed, deleted, unknown = changes
685 686 commit = modified + added
686 687 remove = removed
687 688 else:
688 689 commit = files
689 690
690 691 if use_dirstate:
691 692 p1, p2 = self.dirstate.parents()
692 693 update_dirstate = True
693 694 else:
694 695 p1, p2 = p1, p2 or nullid
695 696 update_dirstate = (self.dirstate.parents()[0] == p1)
696 697
697 698 c1 = self.changelog.read(p1)
698 699 c2 = self.changelog.read(p2)
699 700 m1 = self.manifest.read(c1[0]).copy()
700 701 m2 = self.manifest.read(c2[0])
701 702
702 703 if use_dirstate:
703 704 branchname = self.workingctx().branch()
704 705 try:
705 706 branchname = branchname.decode('UTF-8').encode('UTF-8')
706 707 except UnicodeDecodeError:
707 708 raise util.Abort(_('branch name not in UTF-8!'))
708 709 else:
709 710 branchname = ""
710 711
711 712 if use_dirstate:
712 713 oldname = c1[5].get("branch") # stored in UTF-8
713 714 if not commit and not remove and not force and p2 == nullid and \
714 715 branchname == oldname:
715 716 self.ui.status(_("nothing changed\n"))
716 717 return None
717 718
718 719 xp1 = hex(p1)
719 720 if p2 == nullid: xp2 = ''
720 721 else: xp2 = hex(p2)
721 722
722 723 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
723 724
724 725 if not wlock:
725 726 wlock = self.wlock()
726 727 if not lock:
727 728 lock = self.lock()
728 729 tr = self.transaction()
729 730
730 731 # check in files
731 732 new = {}
732 733 linkrev = self.changelog.count()
733 734 commit.sort()
734 735 for f in commit:
735 736 self.ui.note(f + "\n")
736 737 try:
737 738 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
738 739 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
739 740 except IOError:
740 741 if use_dirstate:
741 742 self.ui.warn(_("trouble committing %s!\n") % f)
742 743 raise
743 744 else:
744 745 remove.append(f)
745 746
746 747 # update manifest
747 748 m1.update(new)
748 749 remove.sort()
749 750
750 751 for f in remove:
751 752 if f in m1:
752 753 del m1[f]
753 754 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
754 755
755 756 # add changeset
756 757 new = new.keys()
757 758 new.sort()
758 759
759 760 user = user or self.ui.username()
760 761 if not text or force_editor:
761 762 edittext = []
762 763 if text:
763 764 edittext.append(text)
764 765 edittext.append("")
765 766 edittext.append("HG: user: %s" % user)
766 767 if p2 != nullid:
767 768 edittext.append("HG: branch merge")
768 769 edittext.extend(["HG: changed %s" % f for f in changed])
769 770 edittext.extend(["HG: removed %s" % f for f in remove])
770 771 if not changed and not remove:
771 772 edittext.append("HG: no files changed")
772 773 edittext.append("")
773 774 # run editor in the repository root
774 775 olddir = os.getcwd()
775 776 os.chdir(self.root)
776 777 text = self.ui.edit("\n".join(edittext), user)
777 778 os.chdir(olddir)
778 779
779 780 lines = [line.rstrip() for line in text.rstrip().splitlines()]
780 781 while lines and not lines[0]:
781 782 del lines[0]
782 783 if not lines:
783 784 return None
784 785 text = '\n'.join(lines)
785 786 if branchname:
786 787 extra["branch"] = branchname
787 788 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
788 789 user, date, extra)
789 790 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
790 791 parent2=xp2)
791 792 tr.close()
792 793
793 794 if use_dirstate or update_dirstate:
794 795 self.dirstate.setparents(n)
795 796 if use_dirstate:
796 797 self.dirstate.update(new, "n")
797 798 self.dirstate.forget(remove)
798 799
799 800 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
800 801 return n
801 802
802 803 def walk(self, node=None, files=[], match=util.always, badmatch=None):
803 804 '''
804 805 walk recursively through the directory tree or a given
805 806 changeset, finding all files matched by the match
806 807 function
807 808
808 809 results are yielded in a tuple (src, filename), where src
809 810 is one of:
810 811 'f' the file was found in the directory tree
811 812 'm' the file was only in the dirstate and not in the tree
812 813 'b' file was not found and matched badmatch
813 814 '''
814 815
815 816 if node:
816 817 fdict = dict.fromkeys(files)
817 818 for fn in self.manifest.read(self.changelog.read(node)[0]):
818 819 for ffn in fdict:
819 820 # match if the file is the exact name or a directory
820 821 if ffn == fn or fn.startswith("%s/" % ffn):
821 822 del fdict[ffn]
822 823 break
823 824 if match(fn):
824 825 yield 'm', fn
825 826 for fn in fdict:
826 827 if badmatch and badmatch(fn):
827 828 if match(fn):
828 829 yield 'b', fn
829 830 else:
830 831 self.ui.warn(_('%s: No such file in rev %s\n') % (
831 832 util.pathto(self.getcwd(), fn), short(node)))
832 833 else:
833 834 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
834 835 yield src, fn
835 836
836 837 def status(self, node1=None, node2=None, files=[], match=util.always,
837 838 wlock=None, list_ignored=False, list_clean=False):
838 839 """return status of files between two nodes or node and working directory
839 840
840 841 If node1 is None, use the first dirstate parent instead.
841 842 If node2 is None, compare node1 with working directory.
842 843 """
843 844
844 845 def fcmp(fn, mf):
845 846 t1 = self.wread(fn)
846 847 return self.file(fn).cmp(mf.get(fn, nullid), t1)
847 848
848 849 def mfmatches(node):
849 850 change = self.changelog.read(node)
850 851 mf = self.manifest.read(change[0]).copy()
851 852 for fn in mf.keys():
852 853 if not match(fn):
853 854 del mf[fn]
854 855 return mf
855 856
856 857 modified, added, removed, deleted, unknown = [], [], [], [], []
857 858 ignored, clean = [], []
858 859
859 860 compareworking = False
860 861 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
861 862 compareworking = True
862 863
863 864 if not compareworking:
864 865 # read the manifest from node1 before the manifest from node2,
865 866 # so that we'll hit the manifest cache if we're going through
866 867 # all the revisions in parent->child order.
867 868 mf1 = mfmatches(node1)
868 869
869 870 # are we comparing the working directory?
870 871 if not node2:
871 872 if not wlock:
872 873 try:
873 874 wlock = self.wlock(wait=0)
874 875 except lock.LockException:
875 876 wlock = None
876 877 (lookup, modified, added, removed, deleted, unknown,
877 878 ignored, clean) = self.dirstate.status(files, match,
878 879 list_ignored, list_clean)
879 880
880 881 # are we comparing working dir against its parent?
881 882 if compareworking:
882 883 if lookup:
883 884 # do a full compare of any files that might have changed
884 885 mf2 = mfmatches(self.dirstate.parents()[0])
885 886 for f in lookup:
886 887 if fcmp(f, mf2):
887 888 modified.append(f)
888 889 else:
889 890 clean.append(f)
890 891 if wlock is not None:
891 892 self.dirstate.update([f], "n")
892 893 else:
893 894 # we are comparing working dir against non-parent
894 895 # generate a pseudo-manifest for the working dir
895 896 # XXX: create it in dirstate.py ?
896 897 mf2 = mfmatches(self.dirstate.parents()[0])
897 898 for f in lookup + modified + added:
898 899 mf2[f] = ""
899 900 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
900 901 for f in removed:
901 902 if f in mf2:
902 903 del mf2[f]
903 904 else:
904 905 # we are comparing two revisions
905 906 mf2 = mfmatches(node2)
906 907
907 908 if not compareworking:
908 909 # flush lists from dirstate before comparing manifests
909 910 modified, added, clean = [], [], []
910 911
911 912 # make sure to sort the files so we talk to the disk in a
912 913 # reasonable order
913 914 mf2keys = mf2.keys()
914 915 mf2keys.sort()
915 916 for fn in mf2keys:
916 917 if mf1.has_key(fn):
917 918 if mf1.flags(fn) != mf2.flags(fn) or \
918 919 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
919 920 modified.append(fn)
920 921 elif list_clean:
921 922 clean.append(fn)
922 923 del mf1[fn]
923 924 else:
924 925 added.append(fn)
925 926
926 927 removed = mf1.keys()
927 928
928 929 # sort and return results:
929 930 for l in modified, added, removed, deleted, unknown, ignored, clean:
930 931 l.sort()
931 932 return (modified, added, removed, deleted, unknown, ignored, clean)
932 933
933 934 def add(self, list, wlock=None):
934 935 if not wlock:
935 936 wlock = self.wlock()
936 937 for f in list:
937 938 p = self.wjoin(f)
938 939 if not os.path.exists(p):
939 940 self.ui.warn(_("%s does not exist!\n") % f)
940 941 elif not os.path.isfile(p):
941 942 self.ui.warn(_("%s not added: only files supported currently\n")
942 943 % f)
943 944 elif self.dirstate.state(f) in 'an':
944 945 self.ui.warn(_("%s already tracked!\n") % f)
945 946 else:
946 947 self.dirstate.update([f], "a")
947 948
948 949 def forget(self, list, wlock=None):
949 950 if not wlock:
950 951 wlock = self.wlock()
951 952 for f in list:
952 953 if self.dirstate.state(f) not in 'ai':
953 954 self.ui.warn(_("%s not added!\n") % f)
954 955 else:
955 956 self.dirstate.forget([f])
956 957
957 958 def remove(self, list, unlink=False, wlock=None):
958 959 if unlink:
959 960 for f in list:
960 961 try:
961 962 util.unlink(self.wjoin(f))
962 963 except OSError, inst:
963 964 if inst.errno != errno.ENOENT:
964 965 raise
965 966 if not wlock:
966 967 wlock = self.wlock()
967 968 for f in list:
968 969 p = self.wjoin(f)
969 970 if os.path.exists(p):
970 971 self.ui.warn(_("%s still exists!\n") % f)
971 972 elif self.dirstate.state(f) == 'a':
972 973 self.dirstate.forget([f])
973 974 elif f not in self.dirstate:
974 975 self.ui.warn(_("%s not tracked!\n") % f)
975 976 else:
976 977 self.dirstate.update([f], "r")
977 978
978 979 def undelete(self, list, wlock=None):
979 980 p = self.dirstate.parents()[0]
980 981 mn = self.changelog.read(p)[0]
981 982 m = self.manifest.read(mn)
982 983 if not wlock:
983 984 wlock = self.wlock()
984 985 for f in list:
985 986 if self.dirstate.state(f) not in "r":
986 987 self.ui.warn("%s not removed!\n" % f)
987 988 else:
988 989 t = self.file(f).read(m[f])
989 990 self.wwrite(f, t)
990 991 util.set_exec(self.wjoin(f), m.execf(f))
991 992 self.dirstate.update([f], "n")
992 993
993 994 def copy(self, source, dest, wlock=None):
994 995 p = self.wjoin(dest)
995 996 if not os.path.exists(p):
996 997 self.ui.warn(_("%s does not exist!\n") % dest)
997 998 elif not os.path.isfile(p):
998 999 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
999 1000 else:
1000 1001 if not wlock:
1001 1002 wlock = self.wlock()
1002 1003 if self.dirstate.state(dest) == '?':
1003 1004 self.dirstate.update([dest], "a")
1004 1005 self.dirstate.copy(source, dest)
1005 1006
1006 1007 def heads(self, start=None):
1007 1008 heads = self.changelog.heads(start)
1008 1009 # sort the output in rev descending order
1009 1010 heads = [(-self.changelog.rev(h), h) for h in heads]
1010 1011 heads.sort()
1011 1012 return [n for (r, n) in heads]
1012 1013
1013 1014 # branchlookup returns a dict giving a list of branches for
1014 1015 # each head. A branch is defined as the tag of a node or
1015 1016 # the branch of the node's parents. If a node has multiple
1016 1017 # branch tags, tags are eliminated if they are visible from other
1017 1018 # branch tags.
1018 1019 #
1019 1020 # So, for this graph: a->b->c->d->e
1020 1021 # \ /
1021 1022 # aa -----/
1022 1023 # a has tag 2.6.12
1023 1024 # d has tag 2.6.13
1024 1025 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
1025 1026 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
1026 1027 # from the list.
1027 1028 #
1028 1029 # It is possible that more than one head will have the same branch tag.
1029 1030 # callers need to check the result for multiple heads under the same
1030 1031 # branch tag if that is a problem for them (ie checkout of a specific
1031 1032 # branch).
1032 1033 #
1033 1034 # passing in a specific branch will limit the depth of the search
1034 1035 # through the parents. It won't limit the branches returned in the
1035 1036 # result though.
1036 1037 def branchlookup(self, heads=None, branch=None):
1037 1038 if not heads:
1038 1039 heads = self.heads()
1039 1040 headt = [ h for h in heads ]
1040 1041 chlog = self.changelog
1041 1042 branches = {}
1042 1043 merges = []
1043 1044 seenmerge = {}
1044 1045
1045 1046 # traverse the tree once for each head, recording in the branches
1046 1047 # dict which tags are visible from this head. The branches
1047 1048 # dict also records which tags are visible from each tag
1048 1049 # while we traverse.
1049 1050 while headt or merges:
1050 1051 if merges:
1051 1052 n, found = merges.pop()
1052 1053 visit = [n]
1053 1054 else:
1054 1055 h = headt.pop()
1055 1056 visit = [h]
1056 1057 found = [h]
1057 1058 seen = {}
1058 1059 while visit:
1059 1060 n = visit.pop()
1060 1061 if n in seen:
1061 1062 continue
1062 1063 pp = chlog.parents(n)
1063 1064 tags = self.nodetags(n)
1064 1065 if tags:
1065 1066 for x in tags:
1066 1067 if x == 'tip':
1067 1068 continue
1068 1069 for f in found:
1069 1070 branches.setdefault(f, {})[n] = 1
1070 1071 branches.setdefault(n, {})[n] = 1
1071 1072 break
1072 1073 if n not in found:
1073 1074 found.append(n)
1074 1075 if branch in tags:
1075 1076 continue
1076 1077 seen[n] = 1
1077 1078 if pp[1] != nullid and n not in seenmerge:
1078 1079 merges.append((pp[1], [x for x in found]))
1079 1080 seenmerge[n] = 1
1080 1081 if pp[0] != nullid:
1081 1082 visit.append(pp[0])
1082 1083 # traverse the branches dict, eliminating branch tags from each
1083 1084 # head that are visible from another branch tag for that head.
1084 1085 out = {}
1085 1086 viscache = {}
1086 1087 for h in heads:
1087 1088 def visible(node):
1088 1089 if node in viscache:
1089 1090 return viscache[node]
1090 1091 ret = {}
1091 1092 visit = [node]
1092 1093 while visit:
1093 1094 x = visit.pop()
1094 1095 if x in viscache:
1095 1096 ret.update(viscache[x])
1096 1097 elif x not in ret:
1097 1098 ret[x] = 1
1098 1099 if x in branches:
1099 1100 visit[len(visit):] = branches[x].keys()
1100 1101 viscache[node] = ret
1101 1102 return ret
1102 1103 if h not in branches:
1103 1104 continue
1104 1105 # O(n^2), but somewhat limited. This only searches the
1105 1106 # tags visible from a specific head, not all the tags in the
1106 1107 # whole repo.
1107 1108 for b in branches[h]:
1108 1109 vis = False
1109 1110 for bb in branches[h].keys():
1110 1111 if b != bb:
1111 1112 if b in visible(bb):
1112 1113 vis = True
1113 1114 break
1114 1115 if not vis:
1115 1116 l = out.setdefault(h, [])
1116 1117 l[len(l):] = self.nodetags(b)
1117 1118 return out
1118 1119
1119 1120 def branches(self, nodes):
1120 1121 if not nodes:
1121 1122 nodes = [self.changelog.tip()]
1122 1123 b = []
1123 1124 for n in nodes:
1124 1125 t = n
1125 1126 while 1:
1126 1127 p = self.changelog.parents(n)
1127 1128 if p[1] != nullid or p[0] == nullid:
1128 1129 b.append((t, n, p[0], p[1]))
1129 1130 break
1130 1131 n = p[0]
1131 1132 return b
1132 1133
1133 1134 def between(self, pairs):
1134 1135 r = []
1135 1136
1136 1137 for top, bottom in pairs:
1137 1138 n, l, i = top, [], 0
1138 1139 f = 1
1139 1140
1140 1141 while n != bottom:
1141 1142 p = self.changelog.parents(n)[0]
1142 1143 if i == f:
1143 1144 l.append(n)
1144 1145 f = f * 2
1145 1146 n = p
1146 1147 i += 1
1147 1148
1148 1149 r.append(l)
1149 1150
1150 1151 return r
1151 1152
1152 1153 def findincoming(self, remote, base=None, heads=None, force=False):
1153 1154 """Return list of roots of the subsets of missing nodes from remote
1154 1155
1155 1156 If base dict is specified, assume that these nodes and their parents
1156 1157 exist on the remote side and that no child of a node of base exists
1157 1158 in both remote and self.
1158 1159 Furthermore base will be updated to include the nodes that exists
1159 1160 in self and remote but no children exists in self and remote.
1160 1161 If a list of heads is specified, return only nodes which are heads
1161 1162 or ancestors of these heads.
1162 1163
1163 1164 All the ancestors of base are in self and in remote.
1164 1165 All the descendants of the list returned are missing in self.
1165 1166 (and so we know that the rest of the nodes are missing in remote, see
1166 1167 outgoing)
1167 1168 """
1168 1169 m = self.changelog.nodemap
1169 1170 search = []
1170 1171 fetch = {}
1171 1172 seen = {}
1172 1173 seenbranch = {}
1173 1174 if base == None:
1174 1175 base = {}
1175 1176
1176 1177 if not heads:
1177 1178 heads = remote.heads()
1178 1179
1179 1180 if self.changelog.tip() == nullid:
1180 1181 base[nullid] = 1
1181 1182 if heads != [nullid]:
1182 1183 return [nullid]
1183 1184 return []
1184 1185
1185 1186 # assume we're closer to the tip than the root
1186 1187 # and start by examining the heads
1187 1188 self.ui.status(_("searching for changes\n"))
1188 1189
1189 1190 unknown = []
1190 1191 for h in heads:
1191 1192 if h not in m:
1192 1193 unknown.append(h)
1193 1194 else:
1194 1195 base[h] = 1
1195 1196
1196 1197 if not unknown:
1197 1198 return []
1198 1199
1199 1200 req = dict.fromkeys(unknown)
1200 1201 reqcnt = 0
1201 1202
1202 1203 # search through remote branches
1203 1204 # a 'branch' here is a linear segment of history, with four parts:
1204 1205 # head, root, first parent, second parent
1205 1206 # (a branch always has two parents (or none) by definition)
1206 1207 unknown = remote.branches(unknown)
1207 1208 while unknown:
1208 1209 r = []
1209 1210 while unknown:
1210 1211 n = unknown.pop(0)
1211 1212 if n[0] in seen:
1212 1213 continue
1213 1214
1214 1215 self.ui.debug(_("examining %s:%s\n")
1215 1216 % (short(n[0]), short(n[1])))
1216 1217 if n[0] == nullid: # found the end of the branch
1217 1218 pass
1218 1219 elif n in seenbranch:
1219 1220 self.ui.debug(_("branch already found\n"))
1220 1221 continue
1221 1222 elif n[1] and n[1] in m: # do we know the base?
1222 1223 self.ui.debug(_("found incomplete branch %s:%s\n")
1223 1224 % (short(n[0]), short(n[1])))
1224 1225 search.append(n) # schedule branch range for scanning
1225 1226 seenbranch[n] = 1
1226 1227 else:
1227 1228 if n[1] not in seen and n[1] not in fetch:
1228 1229 if n[2] in m and n[3] in m:
1229 1230 self.ui.debug(_("found new changeset %s\n") %
1230 1231 short(n[1]))
1231 1232 fetch[n[1]] = 1 # earliest unknown
1232 1233 for p in n[2:4]:
1233 1234 if p in m:
1234 1235 base[p] = 1 # latest known
1235 1236
1236 1237 for p in n[2:4]:
1237 1238 if p not in req and p not in m:
1238 1239 r.append(p)
1239 1240 req[p] = 1
1240 1241 seen[n[0]] = 1
1241 1242
1242 1243 if r:
1243 1244 reqcnt += 1
1244 1245 self.ui.debug(_("request %d: %s\n") %
1245 1246 (reqcnt, " ".join(map(short, r))))
1246 1247 for p in xrange(0, len(r), 10):
1247 1248 for b in remote.branches(r[p:p+10]):
1248 1249 self.ui.debug(_("received %s:%s\n") %
1249 1250 (short(b[0]), short(b[1])))
1250 1251 unknown.append(b)
1251 1252
1252 1253 # do binary search on the branches we found
1253 1254 while search:
1254 1255 n = search.pop(0)
1255 1256 reqcnt += 1
1256 1257 l = remote.between([(n[0], n[1])])[0]
1257 1258 l.append(n[1])
1258 1259 p = n[0]
1259 1260 f = 1
1260 1261 for i in l:
1261 1262 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1262 1263 if i in m:
1263 1264 if f <= 2:
1264 1265 self.ui.debug(_("found new branch changeset %s\n") %
1265 1266 short(p))
1266 1267 fetch[p] = 1
1267 1268 base[i] = 1
1268 1269 else:
1269 1270 self.ui.debug(_("narrowed branch search to %s:%s\n")
1270 1271 % (short(p), short(i)))
1271 1272 search.append((p, i))
1272 1273 break
1273 1274 p, f = i, f * 2
1274 1275
1275 1276 # sanity check our fetch list
1276 1277 for f in fetch.keys():
1277 1278 if f in m:
1278 1279 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1279 1280
1280 1281 if base.keys() == [nullid]:
1281 1282 if force:
1282 1283 self.ui.warn(_("warning: repository is unrelated\n"))
1283 1284 else:
1284 1285 raise util.Abort(_("repository is unrelated"))
1285 1286
1286 1287 self.ui.debug(_("found new changesets starting at ") +
1287 1288 " ".join([short(f) for f in fetch]) + "\n")
1288 1289
1289 1290 self.ui.debug(_("%d total queries\n") % reqcnt)
1290 1291
1291 1292 return fetch.keys()
1292 1293
1293 1294 def findoutgoing(self, remote, base=None, heads=None, force=False):
1294 1295 """Return list of nodes that are roots of subsets not in remote
1295 1296
1296 1297 If base dict is specified, assume that these nodes and their parents
1297 1298 exist on the remote side.
1298 1299 If a list of heads is specified, return only nodes which are heads
1299 1300 or ancestors of these heads, and return a second element which
1300 1301 contains all remote heads which get new children.
1301 1302 """
1302 1303 if base == None:
1303 1304 base = {}
1304 1305 self.findincoming(remote, base, heads, force=force)
1305 1306
1306 1307 self.ui.debug(_("common changesets up to ")
1307 1308 + " ".join(map(short, base.keys())) + "\n")
1308 1309
1309 1310 remain = dict.fromkeys(self.changelog.nodemap)
1310 1311
1311 1312 # prune everything remote has from the tree
1312 1313 del remain[nullid]
1313 1314 remove = base.keys()
1314 1315 while remove:
1315 1316 n = remove.pop(0)
1316 1317 if n in remain:
1317 1318 del remain[n]
1318 1319 for p in self.changelog.parents(n):
1319 1320 remove.append(p)
1320 1321
1321 1322 # find every node whose parents have been pruned
1322 1323 subset = []
1323 1324 # find every remote head that will get new children
1324 1325 updated_heads = {}
1325 1326 for n in remain:
1326 1327 p1, p2 = self.changelog.parents(n)
1327 1328 if p1 not in remain and p2 not in remain:
1328 1329 subset.append(n)
1329 1330 if heads:
1330 1331 if p1 in heads:
1331 1332 updated_heads[p1] = True
1332 1333 if p2 in heads:
1333 1334 updated_heads[p2] = True
1334 1335
1335 1336 # this is the set of all roots we have to push
1336 1337 if heads:
1337 1338 return subset, updated_heads.keys()
1338 1339 else:
1339 1340 return subset
1340 1341
1341 1342 def pull(self, remote, heads=None, force=False, lock=None):
1342 1343 mylock = False
1343 1344 if not lock:
1344 1345 lock = self.lock()
1345 1346 mylock = True
1346 1347
1347 1348 try:
1348 1349 fetch = self.findincoming(remote, force=force)
1349 1350 if fetch == [nullid]:
1350 1351 self.ui.status(_("requesting all changes\n"))
1351 1352
1352 1353 if not fetch:
1353 1354 self.ui.status(_("no changes found\n"))
1354 1355 return 0
1355 1356
1356 1357 if heads is None:
1357 1358 cg = remote.changegroup(fetch, 'pull')
1358 1359 else:
1359 1360 if 'changegroupsubset' not in remote.capabilities:
1360 1361 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1361 1362 cg = remote.changegroupsubset(fetch, heads, 'pull')
1362 1363 return self.addchangegroup(cg, 'pull', remote.url())
1363 1364 finally:
1364 1365 if mylock:
1365 1366 lock.release()
1366 1367
1367 1368 def push(self, remote, force=False, revs=None):
1368 1369 # there are two ways to push to remote repo:
1369 1370 #
1370 1371 # addchangegroup assumes local user can lock remote
1371 1372 # repo (local filesystem, old ssh servers).
1372 1373 #
1373 1374 # unbundle assumes local user cannot lock remote repo (new ssh
1374 1375 # servers, http servers).
1375 1376
1376 1377 if remote.capable('unbundle'):
1377 1378 return self.push_unbundle(remote, force, revs)
1378 1379 return self.push_addchangegroup(remote, force, revs)
1379 1380
1380 1381 def prepush(self, remote, force, revs):
1381 1382 base = {}
1382 1383 remote_heads = remote.heads()
1383 1384 inc = self.findincoming(remote, base, remote_heads, force=force)
1384 1385
1385 1386 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1386 1387 if revs is not None:
1387 1388 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1388 1389 else:
1389 1390 bases, heads = update, self.changelog.heads()
1390 1391
1391 1392 if not bases:
1392 1393 self.ui.status(_("no changes found\n"))
1393 1394 return None, 1
1394 1395 elif not force:
1395 1396 # check if we're creating new remote heads
1396 1397 # to be a remote head after push, node must be either
1397 1398 # - unknown locally
1398 1399 # - a local outgoing head descended from update
1399 1400 # - a remote head that's known locally and not
1400 1401 # ancestral to an outgoing head
1401 1402
1402 1403 warn = 0
1403 1404
1404 1405 if remote_heads == [nullid]:
1405 1406 warn = 0
1406 1407 elif not revs and len(heads) > len(remote_heads):
1407 1408 warn = 1
1408 1409 else:
1409 1410 newheads = list(heads)
1410 1411 for r in remote_heads:
1411 1412 if r in self.changelog.nodemap:
1412 1413 desc = self.changelog.heads(r, heads)
1413 1414 l = [h for h in heads if h in desc]
1414 1415 if not l:
1415 1416 newheads.append(r)
1416 1417 else:
1417 1418 newheads.append(r)
1418 1419 if len(newheads) > len(remote_heads):
1419 1420 warn = 1
1420 1421
1421 1422 if warn:
1422 1423 self.ui.warn(_("abort: push creates new remote branches!\n"))
1423 1424 self.ui.status(_("(did you forget to merge?"
1424 1425 " use push -f to force)\n"))
1425 1426 return None, 1
1426 1427 elif inc:
1427 1428 self.ui.warn(_("note: unsynced remote changes!\n"))
1428 1429
1429 1430
1430 1431 if revs is None:
1431 1432 cg = self.changegroup(update, 'push')
1432 1433 else:
1433 1434 cg = self.changegroupsubset(update, revs, 'push')
1434 1435 return cg, remote_heads
1435 1436
1436 1437 def push_addchangegroup(self, remote, force, revs):
1437 1438 lock = remote.lock()
1438 1439
1439 1440 ret = self.prepush(remote, force, revs)
1440 1441 if ret[0] is not None:
1441 1442 cg, remote_heads = ret
1442 1443 return remote.addchangegroup(cg, 'push', self.url())
1443 1444 return ret[1]
1444 1445
1445 1446 def push_unbundle(self, remote, force, revs):
1446 1447 # local repo finds heads on server, finds out what revs it
1447 1448 # must push. once revs transferred, if server finds it has
1448 1449 # different heads (someone else won commit/push race), server
1449 1450 # aborts.
1450 1451
1451 1452 ret = self.prepush(remote, force, revs)
1452 1453 if ret[0] is not None:
1453 1454 cg, remote_heads = ret
1454 1455 if force: remote_heads = ['force']
1455 1456 return remote.unbundle(cg, remote_heads, 'push')
1456 1457 return ret[1]
1457 1458
1458 1459 def changegroupinfo(self, nodes):
1459 1460 self.ui.note(_("%d changesets found\n") % len(nodes))
1460 1461 if self.ui.debugflag:
1461 1462 self.ui.debug(_("List of changesets:\n"))
1462 1463 for node in nodes:
1463 1464 self.ui.debug("%s\n" % hex(node))
1464 1465
1465 1466 def changegroupsubset(self, bases, heads, source):
1466 1467 """This function generates a changegroup consisting of all the nodes
1467 1468 that are descendents of any of the bases, and ancestors of any of
1468 1469 the heads.
1469 1470
1470 1471 It is fairly complex as determining which filenodes and which
1471 1472 manifest nodes need to be included for the changeset to be complete
1472 1473 is non-trivial.
1473 1474
1474 1475 Another wrinkle is doing the reverse, figuring out which changeset in
1475 1476 the changegroup a particular filenode or manifestnode belongs to."""
1476 1477
1477 1478 self.hook('preoutgoing', throw=True, source=source)
1478 1479
1479 1480 # Set up some initial variables
1480 1481 # Make it easy to refer to self.changelog
1481 1482 cl = self.changelog
1482 1483 # msng is short for missing - compute the list of changesets in this
1483 1484 # changegroup.
1484 1485 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1485 1486 self.changegroupinfo(msng_cl_lst)
1486 1487 # Some bases may turn out to be superfluous, and some heads may be
1487 1488 # too. nodesbetween will return the minimal set of bases and heads
1488 1489 # necessary to re-create the changegroup.
1489 1490
1490 1491 # Known heads are the list of heads that it is assumed the recipient
1491 1492 # of this changegroup will know about.
1492 1493 knownheads = {}
1493 1494 # We assume that all parents of bases are known heads.
1494 1495 for n in bases:
1495 1496 for p in cl.parents(n):
1496 1497 if p != nullid:
1497 1498 knownheads[p] = 1
1498 1499 knownheads = knownheads.keys()
1499 1500 if knownheads:
1500 1501 # Now that we know what heads are known, we can compute which
1501 1502 # changesets are known. The recipient must know about all
1502 1503 # changesets required to reach the known heads from the null
1503 1504 # changeset.
1504 1505 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1505 1506 junk = None
1506 1507 # Transform the list into an ersatz set.
1507 1508 has_cl_set = dict.fromkeys(has_cl_set)
1508 1509 else:
1509 1510 # If there were no known heads, the recipient cannot be assumed to
1510 1511 # know about any changesets.
1511 1512 has_cl_set = {}
1512 1513
1513 1514 # Make it easy to refer to self.manifest
1514 1515 mnfst = self.manifest
1515 1516 # We don't know which manifests are missing yet
1516 1517 msng_mnfst_set = {}
1517 1518 # Nor do we know which filenodes are missing.
1518 1519 msng_filenode_set = {}
1519 1520
1520 1521 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1521 1522 junk = None
1522 1523
1523 1524 # A changeset always belongs to itself, so the changenode lookup
1524 1525 # function for a changenode is identity.
1525 1526 def identity(x):
1526 1527 return x
1527 1528
1528 1529 # A function generating function. Sets up an environment for the
1529 1530 # inner function.
1530 1531 def cmp_by_rev_func(revlog):
1531 1532 # Compare two nodes by their revision number in the environment's
1532 1533 # revision history. Since the revision number both represents the
1533 1534 # most efficient order to read the nodes in, and represents a
1534 1535 # topological sorting of the nodes, this function is often useful.
1535 1536 def cmp_by_rev(a, b):
1536 1537 return cmp(revlog.rev(a), revlog.rev(b))
1537 1538 return cmp_by_rev
1538 1539
1539 1540 # If we determine that a particular file or manifest node must be a
1540 1541 # node that the recipient of the changegroup will already have, we can
1541 1542 # also assume the recipient will have all the parents. This function
1542 1543 # prunes them from the set of missing nodes.
1543 1544 def prune_parents(revlog, hasset, msngset):
1544 1545 haslst = hasset.keys()
1545 1546 haslst.sort(cmp_by_rev_func(revlog))
1546 1547 for node in haslst:
1547 1548 parentlst = [p for p in revlog.parents(node) if p != nullid]
1548 1549 while parentlst:
1549 1550 n = parentlst.pop()
1550 1551 if n not in hasset:
1551 1552 hasset[n] = 1
1552 1553 p = [p for p in revlog.parents(n) if p != nullid]
1553 1554 parentlst.extend(p)
1554 1555 for n in hasset:
1555 1556 msngset.pop(n, None)
1556 1557
1557 1558 # This is a function generating function used to set up an environment
1558 1559 # for the inner function to execute in.
1559 1560 def manifest_and_file_collector(changedfileset):
1560 1561 # This is an information gathering function that gathers
1561 1562 # information from each changeset node that goes out as part of
1562 1563 # the changegroup. The information gathered is a list of which
1563 1564 # manifest nodes are potentially required (the recipient may
1564 1565 # already have them) and total list of all files which were
1565 1566 # changed in any changeset in the changegroup.
1566 1567 #
1567 1568 # We also remember the first changenode we saw any manifest
1568 1569 # referenced by so we can later determine which changenode 'owns'
1569 1570 # the manifest.
1570 1571 def collect_manifests_and_files(clnode):
1571 1572 c = cl.read(clnode)
1572 1573 for f in c[3]:
1573 1574 # This is to make sure we only have one instance of each
1574 1575 # filename string for each filename.
1575 1576 changedfileset.setdefault(f, f)
1576 1577 msng_mnfst_set.setdefault(c[0], clnode)
1577 1578 return collect_manifests_and_files
1578 1579
1579 1580 # Figure out which manifest nodes (of the ones we think might be part
1580 1581 # of the changegroup) the recipient must know about and remove them
1581 1582 # from the changegroup.
1582 1583 def prune_manifests():
1583 1584 has_mnfst_set = {}
1584 1585 for n in msng_mnfst_set:
1585 1586 # If a 'missing' manifest thinks it belongs to a changenode
1586 1587 # the recipient is assumed to have, obviously the recipient
1587 1588 # must have that manifest.
1588 1589 linknode = cl.node(mnfst.linkrev(n))
1589 1590 if linknode in has_cl_set:
1590 1591 has_mnfst_set[n] = 1
1591 1592 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1592 1593
1593 1594 # Use the information collected in collect_manifests_and_files to say
1594 1595 # which changenode any manifestnode belongs to.
1595 1596 def lookup_manifest_link(mnfstnode):
1596 1597 return msng_mnfst_set[mnfstnode]
1597 1598
1598 1599 # A function generating function that sets up the initial environment
1599 1600 # the inner function.
1600 1601 def filenode_collector(changedfiles):
1601 1602 next_rev = [0]
1602 1603 # This gathers information from each manifestnode included in the
1603 1604 # changegroup about which filenodes the manifest node references
1604 1605 # so we can include those in the changegroup too.
1605 1606 #
1606 1607 # It also remembers which changenode each filenode belongs to. It
1607 1608 # does this by assuming the a filenode belongs to the changenode
1608 1609 # the first manifest that references it belongs to.
1609 1610 def collect_msng_filenodes(mnfstnode):
1610 1611 r = mnfst.rev(mnfstnode)
1611 1612 if r == next_rev[0]:
1612 1613 # If the last rev we looked at was the one just previous,
1613 1614 # we only need to see a diff.
1614 1615 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1615 1616 # For each line in the delta
1616 1617 for dline in delta.splitlines():
1617 1618 # get the filename and filenode for that line
1618 1619 f, fnode = dline.split('\0')
1619 1620 fnode = bin(fnode[:40])
1620 1621 f = changedfiles.get(f, None)
1621 1622 # And if the file is in the list of files we care
1622 1623 # about.
1623 1624 if f is not None:
1624 1625 # Get the changenode this manifest belongs to
1625 1626 clnode = msng_mnfst_set[mnfstnode]
1626 1627 # Create the set of filenodes for the file if
1627 1628 # there isn't one already.
1628 1629 ndset = msng_filenode_set.setdefault(f, {})
1629 1630 # And set the filenode's changelog node to the
1630 1631 # manifest's if it hasn't been set already.
1631 1632 ndset.setdefault(fnode, clnode)
1632 1633 else:
1633 1634 # Otherwise we need a full manifest.
1634 1635 m = mnfst.read(mnfstnode)
1635 1636 # For every file in we care about.
1636 1637 for f in changedfiles:
1637 1638 fnode = m.get(f, None)
1638 1639 # If it's in the manifest
1639 1640 if fnode is not None:
1640 1641 # See comments above.
1641 1642 clnode = msng_mnfst_set[mnfstnode]
1642 1643 ndset = msng_filenode_set.setdefault(f, {})
1643 1644 ndset.setdefault(fnode, clnode)
1644 1645 # Remember the revision we hope to see next.
1645 1646 next_rev[0] = r + 1
1646 1647 return collect_msng_filenodes
1647 1648
1648 1649 # We have a list of filenodes we think we need for a file, lets remove
1649 1650 # all those we now the recipient must have.
1650 1651 def prune_filenodes(f, filerevlog):
1651 1652 msngset = msng_filenode_set[f]
1652 1653 hasset = {}
1653 1654 # If a 'missing' filenode thinks it belongs to a changenode we
1654 1655 # assume the recipient must have, then the recipient must have
1655 1656 # that filenode.
1656 1657 for n in msngset:
1657 1658 clnode = cl.node(filerevlog.linkrev(n))
1658 1659 if clnode in has_cl_set:
1659 1660 hasset[n] = 1
1660 1661 prune_parents(filerevlog, hasset, msngset)
1661 1662
1662 1663 # A function generator function that sets up the a context for the
1663 1664 # inner function.
1664 1665 def lookup_filenode_link_func(fname):
1665 1666 msngset = msng_filenode_set[fname]
1666 1667 # Lookup the changenode the filenode belongs to.
1667 1668 def lookup_filenode_link(fnode):
1668 1669 return msngset[fnode]
1669 1670 return lookup_filenode_link
1670 1671
1671 1672 # Now that we have all theses utility functions to help out and
1672 1673 # logically divide up the task, generate the group.
1673 1674 def gengroup():
1674 1675 # The set of changed files starts empty.
1675 1676 changedfiles = {}
1676 1677 # Create a changenode group generator that will call our functions
1677 1678 # back to lookup the owning changenode and collect information.
1678 1679 group = cl.group(msng_cl_lst, identity,
1679 1680 manifest_and_file_collector(changedfiles))
1680 1681 for chnk in group:
1681 1682 yield chnk
1682 1683
1683 1684 # The list of manifests has been collected by the generator
1684 1685 # calling our functions back.
1685 1686 prune_manifests()
1686 1687 msng_mnfst_lst = msng_mnfst_set.keys()
1687 1688 # Sort the manifestnodes by revision number.
1688 1689 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1689 1690 # Create a generator for the manifestnodes that calls our lookup
1690 1691 # and data collection functions back.
1691 1692 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1692 1693 filenode_collector(changedfiles))
1693 1694 for chnk in group:
1694 1695 yield chnk
1695 1696
1696 1697 # These are no longer needed, dereference and toss the memory for
1697 1698 # them.
1698 1699 msng_mnfst_lst = None
1699 1700 msng_mnfst_set.clear()
1700 1701
1701 1702 changedfiles = changedfiles.keys()
1702 1703 changedfiles.sort()
1703 1704 # Go through all our files in order sorted by name.
1704 1705 for fname in changedfiles:
1705 1706 filerevlog = self.file(fname)
1706 1707 # Toss out the filenodes that the recipient isn't really
1707 1708 # missing.
1708 1709 if msng_filenode_set.has_key(fname):
1709 1710 prune_filenodes(fname, filerevlog)
1710 1711 msng_filenode_lst = msng_filenode_set[fname].keys()
1711 1712 else:
1712 1713 msng_filenode_lst = []
1713 1714 # If any filenodes are left, generate the group for them,
1714 1715 # otherwise don't bother.
1715 1716 if len(msng_filenode_lst) > 0:
1716 1717 yield changegroup.genchunk(fname)
1717 1718 # Sort the filenodes by their revision #
1718 1719 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1719 1720 # Create a group generator and only pass in a changenode
1720 1721 # lookup function as we need to collect no information
1721 1722 # from filenodes.
1722 1723 group = filerevlog.group(msng_filenode_lst,
1723 1724 lookup_filenode_link_func(fname))
1724 1725 for chnk in group:
1725 1726 yield chnk
1726 1727 if msng_filenode_set.has_key(fname):
1727 1728 # Don't need this anymore, toss it to free memory.
1728 1729 del msng_filenode_set[fname]
1729 1730 # Signal that no more groups are left.
1730 1731 yield changegroup.closechunk()
1731 1732
1732 1733 if msng_cl_lst:
1733 1734 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1734 1735
1735 1736 return util.chunkbuffer(gengroup())
1736 1737
1737 1738 def changegroup(self, basenodes, source):
1738 1739 """Generate a changegroup of all nodes that we have that a recipient
1739 1740 doesn't.
1740 1741
1741 1742 This is much easier than the previous function as we can assume that
1742 1743 the recipient has any changenode we aren't sending them."""
1743 1744
1744 1745 self.hook('preoutgoing', throw=True, source=source)
1745 1746
1746 1747 cl = self.changelog
1747 1748 nodes = cl.nodesbetween(basenodes, None)[0]
1748 1749 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1749 1750 self.changegroupinfo(nodes)
1750 1751
1751 1752 def identity(x):
1752 1753 return x
1753 1754
1754 1755 def gennodelst(revlog):
1755 1756 for r in xrange(0, revlog.count()):
1756 1757 n = revlog.node(r)
1757 1758 if revlog.linkrev(n) in revset:
1758 1759 yield n
1759 1760
1760 1761 def changed_file_collector(changedfileset):
1761 1762 def collect_changed_files(clnode):
1762 1763 c = cl.read(clnode)
1763 1764 for fname in c[3]:
1764 1765 changedfileset[fname] = 1
1765 1766 return collect_changed_files
1766 1767
1767 1768 def lookuprevlink_func(revlog):
1768 1769 def lookuprevlink(n):
1769 1770 return cl.node(revlog.linkrev(n))
1770 1771 return lookuprevlink
1771 1772
1772 1773 def gengroup():
1773 1774 # construct a list of all changed files
1774 1775 changedfiles = {}
1775 1776
1776 1777 for chnk in cl.group(nodes, identity,
1777 1778 changed_file_collector(changedfiles)):
1778 1779 yield chnk
1779 1780 changedfiles = changedfiles.keys()
1780 1781 changedfiles.sort()
1781 1782
1782 1783 mnfst = self.manifest
1783 1784 nodeiter = gennodelst(mnfst)
1784 1785 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1785 1786 yield chnk
1786 1787
1787 1788 for fname in changedfiles:
1788 1789 filerevlog = self.file(fname)
1789 1790 nodeiter = gennodelst(filerevlog)
1790 1791 nodeiter = list(nodeiter)
1791 1792 if nodeiter:
1792 1793 yield changegroup.genchunk(fname)
1793 1794 lookup = lookuprevlink_func(filerevlog)
1794 1795 for chnk in filerevlog.group(nodeiter, lookup):
1795 1796 yield chnk
1796 1797
1797 1798 yield changegroup.closechunk()
1798 1799
1799 1800 if nodes:
1800 1801 self.hook('outgoing', node=hex(nodes[0]), source=source)
1801 1802
1802 1803 return util.chunkbuffer(gengroup())
1803 1804
1804 1805 def addchangegroup(self, source, srctype, url):
1805 1806 """add changegroup to repo.
1806 1807
1807 1808 return values:
1808 1809 - nothing changed or no source: 0
1809 1810 - more heads than before: 1+added heads (2..n)
1810 1811 - less heads than before: -1-removed heads (-2..-n)
1811 1812 - number of heads stays the same: 1
1812 1813 """
1813 1814 def csmap(x):
1814 1815 self.ui.debug(_("add changeset %s\n") % short(x))
1815 1816 return cl.count()
1816 1817
1817 1818 def revmap(x):
1818 1819 return cl.rev(x)
1819 1820
1820 1821 if not source:
1821 1822 return 0
1822 1823
1823 1824 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1824 1825
1825 1826 changesets = files = revisions = 0
1826 1827
1827 1828 tr = self.transaction()
1828 1829
1829 1830 # write changelog data to temp files so concurrent readers will not see
1830 1831 # inconsistent view
1831 1832 cl = None
1832 1833 try:
1833 1834 cl = appendfile.appendchangelog(self.sopener,
1834 1835 self.changelog.version)
1835 1836
1836 1837 oldheads = len(cl.heads())
1837 1838
1838 1839 # pull off the changeset group
1839 1840 self.ui.status(_("adding changesets\n"))
1840 1841 cor = cl.count() - 1
1841 1842 chunkiter = changegroup.chunkiter(source)
1842 1843 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1843 1844 raise util.Abort(_("received changelog group is empty"))
1844 1845 cnr = cl.count() - 1
1845 1846 changesets = cnr - cor
1846 1847
1847 1848 # pull off the manifest group
1848 1849 self.ui.status(_("adding manifests\n"))
1849 1850 chunkiter = changegroup.chunkiter(source)
1850 1851 # no need to check for empty manifest group here:
1851 1852 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1852 1853 # no new manifest will be created and the manifest group will
1853 1854 # be empty during the pull
1854 1855 self.manifest.addgroup(chunkiter, revmap, tr)
1855 1856
1856 1857 # process the files
1857 1858 self.ui.status(_("adding file changes\n"))
1858 1859 while 1:
1859 1860 f = changegroup.getchunk(source)
1860 1861 if not f:
1861 1862 break
1862 1863 self.ui.debug(_("adding %s revisions\n") % f)
1863 1864 fl = self.file(f)
1864 1865 o = fl.count()
1865 1866 chunkiter = changegroup.chunkiter(source)
1866 1867 if fl.addgroup(chunkiter, revmap, tr) is None:
1867 1868 raise util.Abort(_("received file revlog group is empty"))
1868 1869 revisions += fl.count() - o
1869 1870 files += 1
1870 1871
1871 1872 cl.writedata()
1872 1873 finally:
1873 1874 if cl:
1874 1875 cl.cleanup()
1875 1876
1876 1877 # make changelog see real files again
1877 1878 self.changelog = changelog.changelog(self.sopener,
1878 1879 self.changelog.version)
1879 1880 self.changelog.checkinlinesize(tr)
1880 1881
1881 1882 newheads = len(self.changelog.heads())
1882 1883 heads = ""
1883 1884 if oldheads and newheads != oldheads:
1884 1885 heads = _(" (%+d heads)") % (newheads - oldheads)
1885 1886
1886 1887 self.ui.status(_("added %d changesets"
1887 1888 " with %d changes to %d files%s\n")
1888 1889 % (changesets, revisions, files, heads))
1889 1890
1890 1891 if changesets > 0:
1891 1892 self.hook('pretxnchangegroup', throw=True,
1892 1893 node=hex(self.changelog.node(cor+1)), source=srctype,
1893 1894 url=url)
1894 1895
1895 1896 tr.close()
1896 1897
1897 1898 if changesets > 0:
1898 1899 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1899 1900 source=srctype, url=url)
1900 1901
1901 1902 for i in xrange(cor + 1, cnr + 1):
1902 1903 self.hook("incoming", node=hex(self.changelog.node(i)),
1903 1904 source=srctype, url=url)
1904 1905
1905 1906 # never return 0 here:
1906 1907 if newheads < oldheads:
1907 1908 return newheads - oldheads - 1
1908 1909 else:
1909 1910 return newheads - oldheads + 1
1910 1911
1911 1912
1912 1913 def stream_in(self, remote):
1913 1914 fp = remote.stream_out()
1914 1915 l = fp.readline()
1915 1916 try:
1916 1917 resp = int(l)
1917 1918 except ValueError:
1918 1919 raise util.UnexpectedOutput(
1919 1920 _('Unexpected response from remote server:'), l)
1920 1921 if resp == 1:
1921 1922 raise util.Abort(_('operation forbidden by server'))
1922 1923 elif resp == 2:
1923 1924 raise util.Abort(_('locking the remote repository failed'))
1924 1925 elif resp != 0:
1925 1926 raise util.Abort(_('the server sent an unknown error code'))
1926 1927 self.ui.status(_('streaming all changes\n'))
1927 1928 l = fp.readline()
1928 1929 try:
1929 1930 total_files, total_bytes = map(int, l.split(' ', 1))
1930 1931 except ValueError, TypeError:
1931 1932 raise util.UnexpectedOutput(
1932 1933 _('Unexpected response from remote server:'), l)
1933 1934 self.ui.status(_('%d files to transfer, %s of data\n') %
1934 1935 (total_files, util.bytecount(total_bytes)))
1935 1936 start = time.time()
1936 1937 for i in xrange(total_files):
1937 1938 # XXX doesn't support '\n' or '\r' in filenames
1938 1939 l = fp.readline()
1939 1940 try:
1940 1941 name, size = l.split('\0', 1)
1941 1942 size = int(size)
1942 1943 except ValueError, TypeError:
1943 1944 raise util.UnexpectedOutput(
1944 1945 _('Unexpected response from remote server:'), l)
1945 1946 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1946 1947 ofp = self.sopener(name, 'w')
1947 1948 for chunk in util.filechunkiter(fp, limit=size):
1948 1949 ofp.write(chunk)
1949 1950 ofp.close()
1950 1951 elapsed = time.time() - start
1951 1952 if elapsed <= 0:
1952 1953 elapsed = 0.001
1953 1954 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1954 1955 (util.bytecount(total_bytes), elapsed,
1955 1956 util.bytecount(total_bytes / elapsed)))
1956 1957 self.reload()
1957 1958 return len(self.heads()) + 1
1958 1959
1959 1960 def clone(self, remote, heads=[], stream=False):
1960 1961 '''clone remote repository.
1961 1962
1962 1963 keyword arguments:
1963 1964 heads: list of revs to clone (forces use of pull)
1964 1965 stream: use streaming clone if possible'''
1965 1966
1966 1967 # now, all clients that can request uncompressed clones can
1967 1968 # read repo formats supported by all servers that can serve
1968 1969 # them.
1969 1970
1970 1971 # if revlog format changes, client will have to check version
1971 1972 # and format flags on "stream" capability, and use
1972 1973 # uncompressed only if compatible.
1973 1974
1974 1975 if stream and not heads and remote.capable('stream'):
1975 1976 return self.stream_in(remote)
1976 1977 return self.pull(remote, heads)
1977 1978
1978 1979 # used to avoid circular references so destructors work
1979 1980 def aftertrans(files):
1980 1981 renamefiles = [tuple(t) for t in files]
1981 1982 def a():
1982 1983 for src, dest in renamefiles:
1983 1984 util.rename(src, dest)
1984 1985 return a
1985 1986
1986 1987 def instance(ui, path, create):
1987 1988 return localrepository(ui, util.drop_scheme('file', path), create)
1988 1989
1989 1990 def islocal(path):
1990 1991 return True
General Comments 0
You need to be logged in to leave comments. Login now