##// END OF EJS Templates
Minor tags optimization
Matt Mackall -
r3456:3464f5e7 default
parent child Browse files
Show More
@@ -1,1817 +1,1819 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import gettext as _
10 10 from demandload import *
11 11 import repo
12 12 demandload(globals(), "appendfile changegroup")
13 13 demandload(globals(), "changelog dirstate filelog manifest context")
14 14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 15 demandload(globals(), "os revlog time util")
16 16
17 17 class localrepository(repo.repository):
18 18 capabilities = ('lookup', 'changegroupsubset')
19 19
20 20 def __del__(self):
21 21 self.transhandle = None
22 22 def __init__(self, parentui, path=None, create=0):
23 23 repo.repository.__init__(self)
24 24 if not path:
25 25 p = os.getcwd()
26 26 while not os.path.isdir(os.path.join(p, ".hg")):
27 27 oldp = p
28 28 p = os.path.dirname(p)
29 29 if p == oldp:
30 30 raise repo.RepoError(_("There is no Mercurial repository"
31 31 " here (.hg not found)"))
32 32 path = p
33 33 self.path = os.path.join(path, ".hg")
34 34
35 35 if not os.path.isdir(self.path):
36 36 if create:
37 37 if not os.path.exists(path):
38 38 os.mkdir(path)
39 39 os.mkdir(self.path)
40 40 os.mkdir(self.join("data"))
41 41 else:
42 42 raise repo.RepoError(_("repository %s not found") % path)
43 43 elif create:
44 44 raise repo.RepoError(_("repository %s already exists") % path)
45 45
46 46 self.root = os.path.abspath(path)
47 47 self.origroot = path
48 48 self.ui = ui.ui(parentui=parentui)
49 49 self.opener = util.opener(self.path)
50 50 self.wopener = util.opener(self.root)
51 51
52 52 try:
53 53 self.ui.readconfig(self.join("hgrc"), self.root)
54 54 except IOError:
55 55 pass
56 56
57 57 v = self.ui.configrevlog()
58 58 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
59 59 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
60 60 fl = v.get('flags', None)
61 61 flags = 0
62 62 if fl != None:
63 63 for x in fl.split():
64 64 flags |= revlog.flagstr(x)
65 65 elif self.revlogv1:
66 66 flags = revlog.REVLOG_DEFAULT_FLAGS
67 67
68 68 v = self.revlogversion | flags
69 69 self.manifest = manifest.manifest(self.opener, v)
70 70 self.changelog = changelog.changelog(self.opener, v)
71 71
72 72 # the changelog might not have the inline index flag
73 73 # on. If the format of the changelog is the same as found in
74 74 # .hgrc, apply any flags found in the .hgrc as well.
75 75 # Otherwise, just version from the changelog
76 76 v = self.changelog.version
77 77 if v == self.revlogversion:
78 78 v |= flags
79 79 self.revlogversion = v
80 80
81 81 self.tagscache = None
82 82 self.branchcache = None
83 83 self.nodetagscache = None
84 84 self.encodepats = None
85 85 self.decodepats = None
86 86 self.transhandle = None
87 87
88 88 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
89 89
90 90 def url(self):
91 91 return 'file:' + self.root
92 92
93 93 def hook(self, name, throw=False, **args):
94 94 def callhook(hname, funcname):
95 95 '''call python hook. hook is callable object, looked up as
96 96 name in python module. if callable returns "true", hook
97 97 fails, else passes. if hook raises exception, treated as
98 98 hook failure. exception propagates if throw is "true".
99 99
100 100 reason for "true" meaning "hook failed" is so that
101 101 unmodified commands (e.g. mercurial.commands.update) can
102 102 be run as hooks without wrappers to convert return values.'''
103 103
104 104 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
105 105 d = funcname.rfind('.')
106 106 if d == -1:
107 107 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
108 108 % (hname, funcname))
109 109 modname = funcname[:d]
110 110 try:
111 111 obj = __import__(modname)
112 112 except ImportError:
113 113 try:
114 114 # extensions are loaded with hgext_ prefix
115 115 obj = __import__("hgext_%s" % modname)
116 116 except ImportError:
117 117 raise util.Abort(_('%s hook is invalid '
118 118 '(import of "%s" failed)') %
119 119 (hname, modname))
120 120 try:
121 121 for p in funcname.split('.')[1:]:
122 122 obj = getattr(obj, p)
123 123 except AttributeError, err:
124 124 raise util.Abort(_('%s hook is invalid '
125 125 '("%s" is not defined)') %
126 126 (hname, funcname))
127 127 if not callable(obj):
128 128 raise util.Abort(_('%s hook is invalid '
129 129 '("%s" is not callable)') %
130 130 (hname, funcname))
131 131 try:
132 132 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
133 133 except (KeyboardInterrupt, util.SignalInterrupt):
134 134 raise
135 135 except Exception, exc:
136 136 if isinstance(exc, util.Abort):
137 137 self.ui.warn(_('error: %s hook failed: %s\n') %
138 138 (hname, exc.args[0]))
139 139 else:
140 140 self.ui.warn(_('error: %s hook raised an exception: '
141 141 '%s\n') % (hname, exc))
142 142 if throw:
143 143 raise
144 144 self.ui.print_exc()
145 145 return True
146 146 if r:
147 147 if throw:
148 148 raise util.Abort(_('%s hook failed') % hname)
149 149 self.ui.warn(_('warning: %s hook failed\n') % hname)
150 150 return r
151 151
152 152 def runhook(name, cmd):
153 153 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
154 154 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
155 155 r = util.system(cmd, environ=env, cwd=self.root)
156 156 if r:
157 157 desc, r = util.explain_exit(r)
158 158 if throw:
159 159 raise util.Abort(_('%s hook %s') % (name, desc))
160 160 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
161 161 return r
162 162
163 163 r = False
164 164 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
165 165 if hname.split(".", 1)[0] == name and cmd]
166 166 hooks.sort()
167 167 for hname, cmd in hooks:
168 168 if cmd.startswith('python:'):
169 169 r = callhook(hname, cmd[7:].strip()) or r
170 170 else:
171 171 r = runhook(hname, cmd) or r
172 172 return r
173 173
174 174 tag_disallowed = ':\r\n'
175 175
176 176 def tag(self, name, node, message, local, user, date):
177 177 '''tag a revision with a symbolic name.
178 178
179 179 if local is True, the tag is stored in a per-repository file.
180 180 otherwise, it is stored in the .hgtags file, and a new
181 181 changeset is committed with the change.
182 182
183 183 keyword arguments:
184 184
185 185 local: whether to store tag in non-version-controlled file
186 186 (default False)
187 187
188 188 message: commit message to use if committing
189 189
190 190 user: name of user to use if committing
191 191
192 192 date: date tuple to use if committing'''
193 193
194 194 for c in self.tag_disallowed:
195 195 if c in name:
196 196 raise util.Abort(_('%r cannot be used in a tag name') % c)
197 197
198 198 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
199 199
200 200 if local:
201 201 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
202 202 self.hook('tag', node=hex(node), tag=name, local=local)
203 203 return
204 204
205 205 for x in self.status()[:5]:
206 206 if '.hgtags' in x:
207 207 raise util.Abort(_('working copy of .hgtags is changed '
208 208 '(please commit .hgtags manually)'))
209 209
210 210 self.wfile('.hgtags', 'ab').write('%s %s\n' % (hex(node), name))
211 211 if self.dirstate.state('.hgtags') == '?':
212 212 self.add(['.hgtags'])
213 213
214 214 self.commit(['.hgtags'], message, user, date)
215 215 self.hook('tag', node=hex(node), tag=name, local=local)
216 216
217 217 def tags(self):
218 218 '''return a mapping of tag to node'''
219 219 if not self.tagscache:
220 220 self.tagscache = {}
221 221
222 222 def parsetag(line, context):
223 223 if not line:
224 224 return
225 225 s = l.split(" ", 1)
226 226 if len(s) != 2:
227 227 self.ui.warn(_("%s: cannot parse entry\n") % context)
228 228 return
229 229 node, key = s
230 230 key = key.strip()
231 231 try:
232 232 bin_n = bin(node)
233 233 except TypeError:
234 234 self.ui.warn(_("%s: node '%s' is not well formed\n") %
235 235 (context, node))
236 236 return
237 237 if bin_n not in self.changelog.nodemap:
238 238 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
239 239 (context, key))
240 240 return
241 241 self.tagscache[key] = bin_n
242 242
243 243 # read the tags file from each head, ending with the tip,
244 244 # and add each tag found to the map, with "newer" ones
245 245 # taking precedence
246 246 heads = self.heads()
247 247 heads.reverse()
248 fl = self.file(".hgtags")
248 seen = {}
249 249 for node in heads:
250 250 f = self.filectx('.hgtags', node)
251 if not f: continue
251 if not f or f.filerev() in seen: continue
252 seen[f.filerev()] = 1
252 253 count = 0
253 254 for l in f.data().splitlines():
254 255 count += 1
255 256 parsetag(l, _("%s, line %d") % (str(f), count))
257
256 258 try:
257 259 f = self.opener("localtags")
258 260 count = 0
259 261 for l in f:
260 262 count += 1
261 263 parsetag(l, _("localtags, line %d") % count)
262 264 except IOError:
263 265 pass
264 266
265 267 self.tagscache['tip'] = self.changelog.tip()
266 268
267 269 return self.tagscache
268 270
269 271 def tagslist(self):
270 272 '''return a list of tags ordered by revision'''
271 273 l = []
272 274 for t, n in self.tags().items():
273 275 try:
274 276 r = self.changelog.rev(n)
275 277 except:
276 278 r = -2 # sort to the beginning of the list if unknown
277 279 l.append((r, t, n))
278 280 l.sort()
279 281 return [(t, n) for r, t, n in l]
280 282
281 283 def nodetags(self, node):
282 284 '''return the tags associated with a node'''
283 285 if not self.nodetagscache:
284 286 self.nodetagscache = {}
285 287 for t, n in self.tags().items():
286 288 self.nodetagscache.setdefault(n, []).append(t)
287 289 return self.nodetagscache.get(node, [])
288 290
289 291 def branchtags(self):
290 292 if self.branchcache != None:
291 293 return self.branchcache
292 294
293 295 self.branchcache = {} # avoid recursion in changectx
294 296
295 297 try:
296 298 f = self.opener("branches.cache")
297 299 last, lrev = f.readline().rstrip().split(" ", 1)
298 300 last, lrev = bin(last), int(lrev)
299 301 if (lrev < self.changelog.count() and
300 302 self.changelog.node(lrev) == last): # sanity check
301 303 for l in f:
302 304 node, label = l.rstrip().split(" ", 1)
303 305 self.branchcache[label] = bin(node)
304 306 else: # invalidate the cache
305 307 last, lrev = nullid, -1
306 308 f.close()
307 309 except IOError:
308 310 last, lrev = nullid, -1
309 311
310 312 tip = self.changelog.count() - 1
311 313 if lrev != tip:
312 314 for r in xrange(lrev + 1, tip + 1):
313 315 c = self.changectx(r)
314 316 b = c.branch()
315 317 if b:
316 318 self.branchcache[b] = c.node()
317 319 self._writebranchcache()
318 320
319 321 return self.branchcache
320 322
321 323 def _writebranchcache(self):
322 324 try:
323 325 f = self.opener("branches.cache", "w")
324 326 t = self.changelog.tip()
325 327 f.write("%s %s\n" % (hex(t), self.changelog.count() - 1))
326 328 for label, node in self.branchcache.iteritems():
327 329 f.write("%s %s\n" % (hex(node), label))
328 330 except IOError:
329 331 pass
330 332
331 333 def lookup(self, key):
332 334 if key == '.':
333 335 key = self.dirstate.parents()[0]
334 336 if key == nullid:
335 337 raise repo.RepoError(_("no revision checked out"))
336 338 n = self.changelog._match(key)
337 339 if n:
338 340 return n
339 341 if key in self.tags():
340 342 return self.tags()[key]
341 343 if key in self.branchtags():
342 344 return self.branchtags()[key]
343 345 n = self.changelog._partialmatch(key)
344 346 if n:
345 347 return n
346 348 raise repo.RepoError(_("unknown revision '%s'") % key)
347 349
348 350 def dev(self):
349 351 return os.lstat(self.path).st_dev
350 352
351 353 def local(self):
352 354 return True
353 355
354 356 def join(self, f):
355 357 return os.path.join(self.path, f)
356 358
357 359 def wjoin(self, f):
358 360 return os.path.join(self.root, f)
359 361
360 362 def file(self, f):
361 363 if f[0] == '/':
362 364 f = f[1:]
363 365 return filelog.filelog(self.opener, f, self.revlogversion)
364 366
365 367 def changectx(self, changeid=None):
366 368 return context.changectx(self, changeid)
367 369
368 370 def workingctx(self):
369 371 return context.workingctx(self)
370 372
371 373 def parents(self, changeid=None):
372 374 '''
373 375 get list of changectxs for parents of changeid or working directory
374 376 '''
375 377 if changeid is None:
376 378 pl = self.dirstate.parents()
377 379 else:
378 380 n = self.changelog.lookup(changeid)
379 381 pl = self.changelog.parents(n)
380 382 if pl[1] == nullid:
381 383 return [self.changectx(pl[0])]
382 384 return [self.changectx(pl[0]), self.changectx(pl[1])]
383 385
384 386 def filectx(self, path, changeid=None, fileid=None):
385 387 """changeid can be a changeset revision, node, or tag.
386 388 fileid can be a file revision or node."""
387 389 return context.filectx(self, path, changeid, fileid)
388 390
389 391 def getcwd(self):
390 392 return self.dirstate.getcwd()
391 393
392 394 def wfile(self, f, mode='r'):
393 395 return self.wopener(f, mode)
394 396
395 397 def wread(self, filename):
396 398 if self.encodepats == None:
397 399 l = []
398 400 for pat, cmd in self.ui.configitems("encode"):
399 401 mf = util.matcher(self.root, "", [pat], [], [])[1]
400 402 l.append((mf, cmd))
401 403 self.encodepats = l
402 404
403 405 data = self.wopener(filename, 'r').read()
404 406
405 407 for mf, cmd in self.encodepats:
406 408 if mf(filename):
407 409 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
408 410 data = util.filter(data, cmd)
409 411 break
410 412
411 413 return data
412 414
413 415 def wwrite(self, filename, data, fd=None):
414 416 if self.decodepats == None:
415 417 l = []
416 418 for pat, cmd in self.ui.configitems("decode"):
417 419 mf = util.matcher(self.root, "", [pat], [], [])[1]
418 420 l.append((mf, cmd))
419 421 self.decodepats = l
420 422
421 423 for mf, cmd in self.decodepats:
422 424 if mf(filename):
423 425 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
424 426 data = util.filter(data, cmd)
425 427 break
426 428
427 429 if fd:
428 430 return fd.write(data)
429 431 return self.wopener(filename, 'w').write(data)
430 432
431 433 def transaction(self):
432 434 tr = self.transhandle
433 435 if tr != None and tr.running():
434 436 return tr.nest()
435 437
436 438 # save dirstate for rollback
437 439 try:
438 440 ds = self.opener("dirstate").read()
439 441 except IOError:
440 442 ds = ""
441 443 self.opener("journal.dirstate", "w").write(ds)
442 444
443 445 tr = transaction.transaction(self.ui.warn, self.opener,
444 446 self.join("journal"),
445 447 aftertrans(self.path))
446 448 self.transhandle = tr
447 449 return tr
448 450
449 451 def recover(self):
450 452 l = self.lock()
451 453 if os.path.exists(self.join("journal")):
452 454 self.ui.status(_("rolling back interrupted transaction\n"))
453 455 transaction.rollback(self.opener, self.join("journal"))
454 456 self.reload()
455 457 return True
456 458 else:
457 459 self.ui.warn(_("no interrupted transaction available\n"))
458 460 return False
459 461
460 462 def rollback(self, wlock=None):
461 463 if not wlock:
462 464 wlock = self.wlock()
463 465 l = self.lock()
464 466 if os.path.exists(self.join("undo")):
465 467 self.ui.status(_("rolling back last transaction\n"))
466 468 transaction.rollback(self.opener, self.join("undo"))
467 469 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
468 470 self.reload()
469 471 self.wreload()
470 472 else:
471 473 self.ui.warn(_("no rollback information available\n"))
472 474
473 475 def wreload(self):
474 476 self.dirstate.read()
475 477
476 478 def reload(self):
477 479 self.changelog.load()
478 480 self.manifest.load()
479 481 self.tagscache = None
480 482 self.nodetagscache = None
481 483
482 484 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
483 485 desc=None):
484 486 try:
485 487 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
486 488 except lock.LockHeld, inst:
487 489 if not wait:
488 490 raise
489 491 self.ui.warn(_("waiting for lock on %s held by %s\n") %
490 492 (desc, inst.args[0]))
491 493 # default to 600 seconds timeout
492 494 l = lock.lock(self.join(lockname),
493 495 int(self.ui.config("ui", "timeout") or 600),
494 496 releasefn, desc=desc)
495 497 if acquirefn:
496 498 acquirefn()
497 499 return l
498 500
499 501 def lock(self, wait=1):
500 502 return self.do_lock("lock", wait, acquirefn=self.reload,
501 503 desc=_('repository %s') % self.origroot)
502 504
503 505 def wlock(self, wait=1):
504 506 return self.do_lock("wlock", wait, self.dirstate.write,
505 507 self.wreload,
506 508 desc=_('working directory of %s') % self.origroot)
507 509
508 510 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
509 511 """
510 512 commit an individual file as part of a larger transaction
511 513 """
512 514
513 515 t = self.wread(fn)
514 516 fl = self.file(fn)
515 517 fp1 = manifest1.get(fn, nullid)
516 518 fp2 = manifest2.get(fn, nullid)
517 519
518 520 meta = {}
519 521 cp = self.dirstate.copied(fn)
520 522 if cp:
521 523 meta["copy"] = cp
522 524 if not manifest2: # not a branch merge
523 525 meta["copyrev"] = hex(manifest1.get(cp, nullid))
524 526 fp2 = nullid
525 527 elif fp2 != nullid: # copied on remote side
526 528 meta["copyrev"] = hex(manifest1.get(cp, nullid))
527 529 else: # copied on local side, reversed
528 530 meta["copyrev"] = hex(manifest2.get(cp))
529 531 fp2 = nullid
530 532 self.ui.debug(_(" %s: copy %s:%s\n") %
531 533 (fn, cp, meta["copyrev"]))
532 534 fp1 = nullid
533 535 elif fp2 != nullid:
534 536 # is one parent an ancestor of the other?
535 537 fpa = fl.ancestor(fp1, fp2)
536 538 if fpa == fp1:
537 539 fp1, fp2 = fp2, nullid
538 540 elif fpa == fp2:
539 541 fp2 = nullid
540 542
541 543 # is the file unmodified from the parent? report existing entry
542 544 if fp2 == nullid and not fl.cmp(fp1, t):
543 545 return fp1
544 546
545 547 changelist.append(fn)
546 548 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
547 549
548 550 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
549 551 orig_parent = self.dirstate.parents()[0] or nullid
550 552 p1 = p1 or self.dirstate.parents()[0] or nullid
551 553 p2 = p2 or self.dirstate.parents()[1] or nullid
552 554 c1 = self.changelog.read(p1)
553 555 c2 = self.changelog.read(p2)
554 556 m1 = self.manifest.read(c1[0]).copy()
555 557 m2 = self.manifest.read(c2[0])
556 558 changed = []
557 559 removed = []
558 560
559 561 if orig_parent == p1:
560 562 update_dirstate = 1
561 563 else:
562 564 update_dirstate = 0
563 565
564 566 if not wlock:
565 567 wlock = self.wlock()
566 568 l = self.lock()
567 569 tr = self.transaction()
568 570 linkrev = self.changelog.count()
569 571 for f in files:
570 572 try:
571 573 m1[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
572 574 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
573 575 except IOError:
574 576 try:
575 577 del m1[f]
576 578 if update_dirstate:
577 579 self.dirstate.forget([f])
578 580 removed.append(f)
579 581 except:
580 582 # deleted from p2?
581 583 pass
582 584
583 585 mnode = self.manifest.add(m1, tr, linkrev, c1[0], c2[0])
584 586 user = user or self.ui.username()
585 587 n = self.changelog.add(mnode, changed + removed, text,
586 588 tr, p1, p2, user, date)
587 589 tr.close()
588 590 if update_dirstate:
589 591 self.dirstate.setparents(n, nullid)
590 592
591 593 def commit(self, files=None, text="", user=None, date=None,
592 594 match=util.always, force=False, lock=None, wlock=None,
593 595 force_editor=False):
594 596 commit = []
595 597 remove = []
596 598 changed = []
597 599
598 600 if files:
599 601 for f in files:
600 602 s = self.dirstate.state(f)
601 603 if s in 'nmai':
602 604 commit.append(f)
603 605 elif s == 'r':
604 606 remove.append(f)
605 607 else:
606 608 self.ui.warn(_("%s not tracked!\n") % f)
607 609 else:
608 610 modified, added, removed, deleted, unknown = self.status(match=match)[:5]
609 611 commit = modified + added
610 612 remove = removed
611 613
612 614 p1, p2 = self.dirstate.parents()
613 615 c1 = self.changelog.read(p1)
614 616 c2 = self.changelog.read(p2)
615 617 m1 = self.manifest.read(c1[0]).copy()
616 618 m2 = self.manifest.read(c2[0])
617 619
618 620 branchname = self.workingctx().branch()
619 621 oldname = c1[5].get("branch", "")
620 622
621 623 if not commit and not remove and not force and p2 == nullid and \
622 624 branchname == oldname:
623 625 self.ui.status(_("nothing changed\n"))
624 626 return None
625 627
626 628 xp1 = hex(p1)
627 629 if p2 == nullid: xp2 = ''
628 630 else: xp2 = hex(p2)
629 631
630 632 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
631 633
632 634 if not wlock:
633 635 wlock = self.wlock()
634 636 if not lock:
635 637 lock = self.lock()
636 638 tr = self.transaction()
637 639
638 640 # check in files
639 641 new = {}
640 642 linkrev = self.changelog.count()
641 643 commit.sort()
642 644 for f in commit:
643 645 self.ui.note(f + "\n")
644 646 try:
645 647 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
646 648 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
647 649 except IOError:
648 650 self.ui.warn(_("trouble committing %s!\n") % f)
649 651 raise
650 652
651 653 # update manifest
652 654 m1.update(new)
653 655 for f in remove:
654 656 if f in m1:
655 657 del m1[f]
656 658 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
657 659
658 660 # add changeset
659 661 new = new.keys()
660 662 new.sort()
661 663
662 664 user = user or self.ui.username()
663 665 if not text or force_editor:
664 666 edittext = []
665 667 if text:
666 668 edittext.append(text)
667 669 edittext.append("")
668 670 if p2 != nullid:
669 671 edittext.append("HG: branch merge")
670 672 edittext.extend(["HG: changed %s" % f for f in changed])
671 673 edittext.extend(["HG: removed %s" % f for f in remove])
672 674 if not changed and not remove:
673 675 edittext.append("HG: no files changed")
674 676 edittext.append("")
675 677 # run editor in the repository root
676 678 olddir = os.getcwd()
677 679 os.chdir(self.root)
678 680 text = self.ui.edit("\n".join(edittext), user)
679 681 os.chdir(olddir)
680 682
681 683 lines = [line.rstrip() for line in text.rstrip().splitlines()]
682 684 while lines and not lines[0]:
683 685 del lines[0]
684 686 if not lines:
685 687 return None
686 688 text = '\n'.join(lines)
687 689 extra = {}
688 690 if branchname:
689 691 extra["branch"] = branchname
690 692 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
691 693 user, date, extra)
692 694 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
693 695 parent2=xp2)
694 696 tr.close()
695 697
696 698 self.dirstate.setparents(n)
697 699 self.dirstate.update(new, "n")
698 700 self.dirstate.forget(remove)
699 701
700 702 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
701 703 return n
702 704
703 705 def walk(self, node=None, files=[], match=util.always, badmatch=None):
704 706 if node:
705 707 fdict = dict.fromkeys(files)
706 708 for fn in self.manifest.read(self.changelog.read(node)[0]):
707 709 for ffn in fdict:
708 710 # match if the file is the exact name or a directory
709 711 if ffn == fn or fn.startswith("%s/" % ffn):
710 712 del fdict[ffn]
711 713 break
712 714 if match(fn):
713 715 yield 'm', fn
714 716 for fn in fdict:
715 717 if badmatch and badmatch(fn):
716 718 if match(fn):
717 719 yield 'b', fn
718 720 else:
719 721 self.ui.warn(_('%s: No such file in rev %s\n') % (
720 722 util.pathto(self.getcwd(), fn), short(node)))
721 723 else:
722 724 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
723 725 yield src, fn
724 726
725 727 def status(self, node1=None, node2=None, files=[], match=util.always,
726 728 wlock=None, list_ignored=False, list_clean=False):
727 729 """return status of files between two nodes or node and working directory
728 730
729 731 If node1 is None, use the first dirstate parent instead.
730 732 If node2 is None, compare node1 with working directory.
731 733 """
732 734
733 735 def fcmp(fn, mf):
734 736 t1 = self.wread(fn)
735 737 return self.file(fn).cmp(mf.get(fn, nullid), t1)
736 738
737 739 def mfmatches(node):
738 740 change = self.changelog.read(node)
739 741 mf = self.manifest.read(change[0]).copy()
740 742 for fn in mf.keys():
741 743 if not match(fn):
742 744 del mf[fn]
743 745 return mf
744 746
745 747 modified, added, removed, deleted, unknown = [], [], [], [], []
746 748 ignored, clean = [], []
747 749
748 750 compareworking = False
749 751 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
750 752 compareworking = True
751 753
752 754 if not compareworking:
753 755 # read the manifest from node1 before the manifest from node2,
754 756 # so that we'll hit the manifest cache if we're going through
755 757 # all the revisions in parent->child order.
756 758 mf1 = mfmatches(node1)
757 759
758 760 # are we comparing the working directory?
759 761 if not node2:
760 762 if not wlock:
761 763 try:
762 764 wlock = self.wlock(wait=0)
763 765 except lock.LockException:
764 766 wlock = None
765 767 (lookup, modified, added, removed, deleted, unknown,
766 768 ignored, clean) = self.dirstate.status(files, match,
767 769 list_ignored, list_clean)
768 770
769 771 # are we comparing working dir against its parent?
770 772 if compareworking:
771 773 if lookup:
772 774 # do a full compare of any files that might have changed
773 775 mf2 = mfmatches(self.dirstate.parents()[0])
774 776 for f in lookup:
775 777 if fcmp(f, mf2):
776 778 modified.append(f)
777 779 else:
778 780 clean.append(f)
779 781 if wlock is not None:
780 782 self.dirstate.update([f], "n")
781 783 else:
782 784 # we are comparing working dir against non-parent
783 785 # generate a pseudo-manifest for the working dir
784 786 # XXX: create it in dirstate.py ?
785 787 mf2 = mfmatches(self.dirstate.parents()[0])
786 788 for f in lookup + modified + added:
787 789 mf2[f] = ""
788 790 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
789 791 for f in removed:
790 792 if f in mf2:
791 793 del mf2[f]
792 794 else:
793 795 # we are comparing two revisions
794 796 mf2 = mfmatches(node2)
795 797
796 798 if not compareworking:
797 799 # flush lists from dirstate before comparing manifests
798 800 modified, added, clean = [], [], []
799 801
800 802 # make sure to sort the files so we talk to the disk in a
801 803 # reasonable order
802 804 mf2keys = mf2.keys()
803 805 mf2keys.sort()
804 806 for fn in mf2keys:
805 807 if mf1.has_key(fn):
806 808 if mf1.flags(fn) != mf2.flags(fn) or \
807 809 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
808 810 modified.append(fn)
809 811 elif list_clean:
810 812 clean.append(fn)
811 813 del mf1[fn]
812 814 else:
813 815 added.append(fn)
814 816
815 817 removed = mf1.keys()
816 818
817 819 # sort and return results:
818 820 for l in modified, added, removed, deleted, unknown, ignored, clean:
819 821 l.sort()
820 822 return (modified, added, removed, deleted, unknown, ignored, clean)
821 823
822 824 def add(self, list, wlock=None):
823 825 if not wlock:
824 826 wlock = self.wlock()
825 827 for f in list:
826 828 p = self.wjoin(f)
827 829 if not os.path.exists(p):
828 830 self.ui.warn(_("%s does not exist!\n") % f)
829 831 elif not os.path.isfile(p):
830 832 self.ui.warn(_("%s not added: only files supported currently\n")
831 833 % f)
832 834 elif self.dirstate.state(f) in 'an':
833 835 self.ui.warn(_("%s already tracked!\n") % f)
834 836 else:
835 837 self.dirstate.update([f], "a")
836 838
837 839 def forget(self, list, wlock=None):
838 840 if not wlock:
839 841 wlock = self.wlock()
840 842 for f in list:
841 843 if self.dirstate.state(f) not in 'ai':
842 844 self.ui.warn(_("%s not added!\n") % f)
843 845 else:
844 846 self.dirstate.forget([f])
845 847
846 848 def remove(self, list, unlink=False, wlock=None):
847 849 if unlink:
848 850 for f in list:
849 851 try:
850 852 util.unlink(self.wjoin(f))
851 853 except OSError, inst:
852 854 if inst.errno != errno.ENOENT:
853 855 raise
854 856 if not wlock:
855 857 wlock = self.wlock()
856 858 for f in list:
857 859 p = self.wjoin(f)
858 860 if os.path.exists(p):
859 861 self.ui.warn(_("%s still exists!\n") % f)
860 862 elif self.dirstate.state(f) == 'a':
861 863 self.dirstate.forget([f])
862 864 elif f not in self.dirstate:
863 865 self.ui.warn(_("%s not tracked!\n") % f)
864 866 else:
865 867 self.dirstate.update([f], "r")
866 868
867 869 def undelete(self, list, wlock=None):
868 870 p = self.dirstate.parents()[0]
869 871 mn = self.changelog.read(p)[0]
870 872 m = self.manifest.read(mn)
871 873 if not wlock:
872 874 wlock = self.wlock()
873 875 for f in list:
874 876 if self.dirstate.state(f) not in "r":
875 877 self.ui.warn("%s not removed!\n" % f)
876 878 else:
877 879 t = self.file(f).read(m[f])
878 880 self.wwrite(f, t)
879 881 util.set_exec(self.wjoin(f), m.execf(f))
880 882 self.dirstate.update([f], "n")
881 883
882 884 def copy(self, source, dest, wlock=None):
883 885 p = self.wjoin(dest)
884 886 if not os.path.exists(p):
885 887 self.ui.warn(_("%s does not exist!\n") % dest)
886 888 elif not os.path.isfile(p):
887 889 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
888 890 else:
889 891 if not wlock:
890 892 wlock = self.wlock()
891 893 if self.dirstate.state(dest) == '?':
892 894 self.dirstate.update([dest], "a")
893 895 self.dirstate.copy(source, dest)
894 896
895 897 def heads(self, start=None):
896 898 heads = self.changelog.heads(start)
897 899 # sort the output in rev descending order
898 900 heads = [(-self.changelog.rev(h), h) for h in heads]
899 901 heads.sort()
900 902 return [n for (r, n) in heads]
901 903
902 904 # branchlookup returns a dict giving a list of branches for
903 905 # each head. A branch is defined as the tag of a node or
904 906 # the branch of the node's parents. If a node has multiple
905 907 # branch tags, tags are eliminated if they are visible from other
906 908 # branch tags.
907 909 #
908 910 # So, for this graph: a->b->c->d->e
909 911 # \ /
910 912 # aa -----/
911 913 # a has tag 2.6.12
912 914 # d has tag 2.6.13
913 915 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
914 916 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
915 917 # from the list.
916 918 #
917 919 # It is possible that more than one head will have the same branch tag.
918 920 # callers need to check the result for multiple heads under the same
919 921 # branch tag if that is a problem for them (ie checkout of a specific
920 922 # branch).
921 923 #
922 924 # passing in a specific branch will limit the depth of the search
923 925 # through the parents. It won't limit the branches returned in the
924 926 # result though.
925 927 def branchlookup(self, heads=None, branch=None):
926 928 if not heads:
927 929 heads = self.heads()
928 930 headt = [ h for h in heads ]
929 931 chlog = self.changelog
930 932 branches = {}
931 933 merges = []
932 934 seenmerge = {}
933 935
934 936 # traverse the tree once for each head, recording in the branches
935 937 # dict which tags are visible from this head. The branches
936 938 # dict also records which tags are visible from each tag
937 939 # while we traverse.
938 940 while headt or merges:
939 941 if merges:
940 942 n, found = merges.pop()
941 943 visit = [n]
942 944 else:
943 945 h = headt.pop()
944 946 visit = [h]
945 947 found = [h]
946 948 seen = {}
947 949 while visit:
948 950 n = visit.pop()
949 951 if n in seen:
950 952 continue
951 953 pp = chlog.parents(n)
952 954 tags = self.nodetags(n)
953 955 if tags:
954 956 for x in tags:
955 957 if x == 'tip':
956 958 continue
957 959 for f in found:
958 960 branches.setdefault(f, {})[n] = 1
959 961 branches.setdefault(n, {})[n] = 1
960 962 break
961 963 if n not in found:
962 964 found.append(n)
963 965 if branch in tags:
964 966 continue
965 967 seen[n] = 1
966 968 if pp[1] != nullid and n not in seenmerge:
967 969 merges.append((pp[1], [x for x in found]))
968 970 seenmerge[n] = 1
969 971 if pp[0] != nullid:
970 972 visit.append(pp[0])
971 973 # traverse the branches dict, eliminating branch tags from each
972 974 # head that are visible from another branch tag for that head.
973 975 out = {}
974 976 viscache = {}
975 977 for h in heads:
976 978 def visible(node):
977 979 if node in viscache:
978 980 return viscache[node]
979 981 ret = {}
980 982 visit = [node]
981 983 while visit:
982 984 x = visit.pop()
983 985 if x in viscache:
984 986 ret.update(viscache[x])
985 987 elif x not in ret:
986 988 ret[x] = 1
987 989 if x in branches:
988 990 visit[len(visit):] = branches[x].keys()
989 991 viscache[node] = ret
990 992 return ret
991 993 if h not in branches:
992 994 continue
993 995 # O(n^2), but somewhat limited. This only searches the
994 996 # tags visible from a specific head, not all the tags in the
995 997 # whole repo.
996 998 for b in branches[h]:
997 999 vis = False
998 1000 for bb in branches[h].keys():
999 1001 if b != bb:
1000 1002 if b in visible(bb):
1001 1003 vis = True
1002 1004 break
1003 1005 if not vis:
1004 1006 l = out.setdefault(h, [])
1005 1007 l[len(l):] = self.nodetags(b)
1006 1008 return out
1007 1009
1008 1010 def branches(self, nodes):
1009 1011 if not nodes:
1010 1012 nodes = [self.changelog.tip()]
1011 1013 b = []
1012 1014 for n in nodes:
1013 1015 t = n
1014 1016 while 1:
1015 1017 p = self.changelog.parents(n)
1016 1018 if p[1] != nullid or p[0] == nullid:
1017 1019 b.append((t, n, p[0], p[1]))
1018 1020 break
1019 1021 n = p[0]
1020 1022 return b
1021 1023
1022 1024 def between(self, pairs):
1023 1025 r = []
1024 1026
1025 1027 for top, bottom in pairs:
1026 1028 n, l, i = top, [], 0
1027 1029 f = 1
1028 1030
1029 1031 while n != bottom:
1030 1032 p = self.changelog.parents(n)[0]
1031 1033 if i == f:
1032 1034 l.append(n)
1033 1035 f = f * 2
1034 1036 n = p
1035 1037 i += 1
1036 1038
1037 1039 r.append(l)
1038 1040
1039 1041 return r
1040 1042
1041 1043 def findincoming(self, remote, base=None, heads=None, force=False):
1042 1044 """Return list of roots of the subsets of missing nodes from remote
1043 1045
1044 1046 If base dict is specified, assume that these nodes and their parents
1045 1047 exist on the remote side and that no child of a node of base exists
1046 1048 in both remote and self.
1047 1049 Furthermore base will be updated to include the nodes that exists
1048 1050 in self and remote but no children exists in self and remote.
1049 1051 If a list of heads is specified, return only nodes which are heads
1050 1052 or ancestors of these heads.
1051 1053
1052 1054 All the ancestors of base are in self and in remote.
1053 1055 All the descendants of the list returned are missing in self.
1054 1056 (and so we know that the rest of the nodes are missing in remote, see
1055 1057 outgoing)
1056 1058 """
1057 1059 m = self.changelog.nodemap
1058 1060 search = []
1059 1061 fetch = {}
1060 1062 seen = {}
1061 1063 seenbranch = {}
1062 1064 if base == None:
1063 1065 base = {}
1064 1066
1065 1067 if not heads:
1066 1068 heads = remote.heads()
1067 1069
1068 1070 if self.changelog.tip() == nullid:
1069 1071 base[nullid] = 1
1070 1072 if heads != [nullid]:
1071 1073 return [nullid]
1072 1074 return []
1073 1075
1074 1076 # assume we're closer to the tip than the root
1075 1077 # and start by examining the heads
1076 1078 self.ui.status(_("searching for changes\n"))
1077 1079
1078 1080 unknown = []
1079 1081 for h in heads:
1080 1082 if h not in m:
1081 1083 unknown.append(h)
1082 1084 else:
1083 1085 base[h] = 1
1084 1086
1085 1087 if not unknown:
1086 1088 return []
1087 1089
1088 1090 req = dict.fromkeys(unknown)
1089 1091 reqcnt = 0
1090 1092
1091 1093 # search through remote branches
1092 1094 # a 'branch' here is a linear segment of history, with four parts:
1093 1095 # head, root, first parent, second parent
1094 1096 # (a branch always has two parents (or none) by definition)
1095 1097 unknown = remote.branches(unknown)
1096 1098 while unknown:
1097 1099 r = []
1098 1100 while unknown:
1099 1101 n = unknown.pop(0)
1100 1102 if n[0] in seen:
1101 1103 continue
1102 1104
1103 1105 self.ui.debug(_("examining %s:%s\n")
1104 1106 % (short(n[0]), short(n[1])))
1105 1107 if n[0] == nullid: # found the end of the branch
1106 1108 pass
1107 1109 elif n in seenbranch:
1108 1110 self.ui.debug(_("branch already found\n"))
1109 1111 continue
1110 1112 elif n[1] and n[1] in m: # do we know the base?
1111 1113 self.ui.debug(_("found incomplete branch %s:%s\n")
1112 1114 % (short(n[0]), short(n[1])))
1113 1115 search.append(n) # schedule branch range for scanning
1114 1116 seenbranch[n] = 1
1115 1117 else:
1116 1118 if n[1] not in seen and n[1] not in fetch:
1117 1119 if n[2] in m and n[3] in m:
1118 1120 self.ui.debug(_("found new changeset %s\n") %
1119 1121 short(n[1]))
1120 1122 fetch[n[1]] = 1 # earliest unknown
1121 1123 for p in n[2:4]:
1122 1124 if p in m:
1123 1125 base[p] = 1 # latest known
1124 1126
1125 1127 for p in n[2:4]:
1126 1128 if p not in req and p not in m:
1127 1129 r.append(p)
1128 1130 req[p] = 1
1129 1131 seen[n[0]] = 1
1130 1132
1131 1133 if r:
1132 1134 reqcnt += 1
1133 1135 self.ui.debug(_("request %d: %s\n") %
1134 1136 (reqcnt, " ".join(map(short, r))))
1135 1137 for p in range(0, len(r), 10):
1136 1138 for b in remote.branches(r[p:p+10]):
1137 1139 self.ui.debug(_("received %s:%s\n") %
1138 1140 (short(b[0]), short(b[1])))
1139 1141 unknown.append(b)
1140 1142
1141 1143 # do binary search on the branches we found
1142 1144 while search:
1143 1145 n = search.pop(0)
1144 1146 reqcnt += 1
1145 1147 l = remote.between([(n[0], n[1])])[0]
1146 1148 l.append(n[1])
1147 1149 p = n[0]
1148 1150 f = 1
1149 1151 for i in l:
1150 1152 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1151 1153 if i in m:
1152 1154 if f <= 2:
1153 1155 self.ui.debug(_("found new branch changeset %s\n") %
1154 1156 short(p))
1155 1157 fetch[p] = 1
1156 1158 base[i] = 1
1157 1159 else:
1158 1160 self.ui.debug(_("narrowed branch search to %s:%s\n")
1159 1161 % (short(p), short(i)))
1160 1162 search.append((p, i))
1161 1163 break
1162 1164 p, f = i, f * 2
1163 1165
1164 1166 # sanity check our fetch list
1165 1167 for f in fetch.keys():
1166 1168 if f in m:
1167 1169 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1168 1170
1169 1171 if base.keys() == [nullid]:
1170 1172 if force:
1171 1173 self.ui.warn(_("warning: repository is unrelated\n"))
1172 1174 else:
1173 1175 raise util.Abort(_("repository is unrelated"))
1174 1176
1175 1177 self.ui.debug(_("found new changesets starting at ") +
1176 1178 " ".join([short(f) for f in fetch]) + "\n")
1177 1179
1178 1180 self.ui.debug(_("%d total queries\n") % reqcnt)
1179 1181
1180 1182 return fetch.keys()
1181 1183
1182 1184 def findoutgoing(self, remote, base=None, heads=None, force=False):
1183 1185 """Return list of nodes that are roots of subsets not in remote
1184 1186
1185 1187 If base dict is specified, assume that these nodes and their parents
1186 1188 exist on the remote side.
1187 1189 If a list of heads is specified, return only nodes which are heads
1188 1190 or ancestors of these heads, and return a second element which
1189 1191 contains all remote heads which get new children.
1190 1192 """
1191 1193 if base == None:
1192 1194 base = {}
1193 1195 self.findincoming(remote, base, heads, force=force)
1194 1196
1195 1197 self.ui.debug(_("common changesets up to ")
1196 1198 + " ".join(map(short, base.keys())) + "\n")
1197 1199
1198 1200 remain = dict.fromkeys(self.changelog.nodemap)
1199 1201
1200 1202 # prune everything remote has from the tree
1201 1203 del remain[nullid]
1202 1204 remove = base.keys()
1203 1205 while remove:
1204 1206 n = remove.pop(0)
1205 1207 if n in remain:
1206 1208 del remain[n]
1207 1209 for p in self.changelog.parents(n):
1208 1210 remove.append(p)
1209 1211
1210 1212 # find every node whose parents have been pruned
1211 1213 subset = []
1212 1214 # find every remote head that will get new children
1213 1215 updated_heads = {}
1214 1216 for n in remain:
1215 1217 p1, p2 = self.changelog.parents(n)
1216 1218 if p1 not in remain and p2 not in remain:
1217 1219 subset.append(n)
1218 1220 if heads:
1219 1221 if p1 in heads:
1220 1222 updated_heads[p1] = True
1221 1223 if p2 in heads:
1222 1224 updated_heads[p2] = True
1223 1225
1224 1226 # this is the set of all roots we have to push
1225 1227 if heads:
1226 1228 return subset, updated_heads.keys()
1227 1229 else:
1228 1230 return subset
1229 1231
1230 1232 def pull(self, remote, heads=None, force=False, lock=None):
1231 1233 mylock = False
1232 1234 if not lock:
1233 1235 lock = self.lock()
1234 1236 mylock = True
1235 1237
1236 1238 try:
1237 1239 fetch = self.findincoming(remote, force=force)
1238 1240 if fetch == [nullid]:
1239 1241 self.ui.status(_("requesting all changes\n"))
1240 1242
1241 1243 if not fetch:
1242 1244 self.ui.status(_("no changes found\n"))
1243 1245 return 0
1244 1246
1245 1247 if heads is None:
1246 1248 cg = remote.changegroup(fetch, 'pull')
1247 1249 else:
1248 1250 if 'changegroupsubset' not in remote.capabilities:
1249 1251 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1250 1252 cg = remote.changegroupsubset(fetch, heads, 'pull')
1251 1253 return self.addchangegroup(cg, 'pull', remote.url())
1252 1254 finally:
1253 1255 if mylock:
1254 1256 lock.release()
1255 1257
1256 1258 def push(self, remote, force=False, revs=None):
1257 1259 # there are two ways to push to remote repo:
1258 1260 #
1259 1261 # addchangegroup assumes local user can lock remote
1260 1262 # repo (local filesystem, old ssh servers).
1261 1263 #
1262 1264 # unbundle assumes local user cannot lock remote repo (new ssh
1263 1265 # servers, http servers).
1264 1266
1265 1267 if remote.capable('unbundle'):
1266 1268 return self.push_unbundle(remote, force, revs)
1267 1269 return self.push_addchangegroup(remote, force, revs)
1268 1270
1269 1271 def prepush(self, remote, force, revs):
1270 1272 base = {}
1271 1273 remote_heads = remote.heads()
1272 1274 inc = self.findincoming(remote, base, remote_heads, force=force)
1273 1275 if not force and inc:
1274 1276 self.ui.warn(_("abort: unsynced remote changes!\n"))
1275 1277 self.ui.status(_("(did you forget to sync?"
1276 1278 " use push -f to force)\n"))
1277 1279 return None, 1
1278 1280
1279 1281 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1280 1282 if revs is not None:
1281 1283 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1282 1284 else:
1283 1285 bases, heads = update, self.changelog.heads()
1284 1286
1285 1287 if not bases:
1286 1288 self.ui.status(_("no changes found\n"))
1287 1289 return None, 1
1288 1290 elif not force:
1289 1291 # FIXME we don't properly detect creation of new heads
1290 1292 # in the push -r case, assume the user knows what he's doing
1291 1293 if not revs and len(remote_heads) < len(heads) \
1292 1294 and remote_heads != [nullid]:
1293 1295 self.ui.warn(_("abort: push creates new remote branches!\n"))
1294 1296 self.ui.status(_("(did you forget to merge?"
1295 1297 " use push -f to force)\n"))
1296 1298 return None, 1
1297 1299
1298 1300 if revs is None:
1299 1301 cg = self.changegroup(update, 'push')
1300 1302 else:
1301 1303 cg = self.changegroupsubset(update, revs, 'push')
1302 1304 return cg, remote_heads
1303 1305
1304 1306 def push_addchangegroup(self, remote, force, revs):
1305 1307 lock = remote.lock()
1306 1308
1307 1309 ret = self.prepush(remote, force, revs)
1308 1310 if ret[0] is not None:
1309 1311 cg, remote_heads = ret
1310 1312 return remote.addchangegroup(cg, 'push', self.url())
1311 1313 return ret[1]
1312 1314
1313 1315 def push_unbundle(self, remote, force, revs):
1314 1316 # local repo finds heads on server, finds out what revs it
1315 1317 # must push. once revs transferred, if server finds it has
1316 1318 # different heads (someone else won commit/push race), server
1317 1319 # aborts.
1318 1320
1319 1321 ret = self.prepush(remote, force, revs)
1320 1322 if ret[0] is not None:
1321 1323 cg, remote_heads = ret
1322 1324 if force: remote_heads = ['force']
1323 1325 return remote.unbundle(cg, remote_heads, 'push')
1324 1326 return ret[1]
1325 1327
1326 1328 def changegroupsubset(self, bases, heads, source):
1327 1329 """This function generates a changegroup consisting of all the nodes
1328 1330 that are descendents of any of the bases, and ancestors of any of
1329 1331 the heads.
1330 1332
1331 1333 It is fairly complex as determining which filenodes and which
1332 1334 manifest nodes need to be included for the changeset to be complete
1333 1335 is non-trivial.
1334 1336
1335 1337 Another wrinkle is doing the reverse, figuring out which changeset in
1336 1338 the changegroup a particular filenode or manifestnode belongs to."""
1337 1339
1338 1340 self.hook('preoutgoing', throw=True, source=source)
1339 1341
1340 1342 # Set up some initial variables
1341 1343 # Make it easy to refer to self.changelog
1342 1344 cl = self.changelog
1343 1345 # msng is short for missing - compute the list of changesets in this
1344 1346 # changegroup.
1345 1347 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1346 1348 # Some bases may turn out to be superfluous, and some heads may be
1347 1349 # too. nodesbetween will return the minimal set of bases and heads
1348 1350 # necessary to re-create the changegroup.
1349 1351
1350 1352 # Known heads are the list of heads that it is assumed the recipient
1351 1353 # of this changegroup will know about.
1352 1354 knownheads = {}
1353 1355 # We assume that all parents of bases are known heads.
1354 1356 for n in bases:
1355 1357 for p in cl.parents(n):
1356 1358 if p != nullid:
1357 1359 knownheads[p] = 1
1358 1360 knownheads = knownheads.keys()
1359 1361 if knownheads:
1360 1362 # Now that we know what heads are known, we can compute which
1361 1363 # changesets are known. The recipient must know about all
1362 1364 # changesets required to reach the known heads from the null
1363 1365 # changeset.
1364 1366 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1365 1367 junk = None
1366 1368 # Transform the list into an ersatz set.
1367 1369 has_cl_set = dict.fromkeys(has_cl_set)
1368 1370 else:
1369 1371 # If there were no known heads, the recipient cannot be assumed to
1370 1372 # know about any changesets.
1371 1373 has_cl_set = {}
1372 1374
1373 1375 # Make it easy to refer to self.manifest
1374 1376 mnfst = self.manifest
1375 1377 # We don't know which manifests are missing yet
1376 1378 msng_mnfst_set = {}
1377 1379 # Nor do we know which filenodes are missing.
1378 1380 msng_filenode_set = {}
1379 1381
1380 1382 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1381 1383 junk = None
1382 1384
1383 1385 # A changeset always belongs to itself, so the changenode lookup
1384 1386 # function for a changenode is identity.
1385 1387 def identity(x):
1386 1388 return x
1387 1389
1388 1390 # A function generating function. Sets up an environment for the
1389 1391 # inner function.
1390 1392 def cmp_by_rev_func(revlog):
1391 1393 # Compare two nodes by their revision number in the environment's
1392 1394 # revision history. Since the revision number both represents the
1393 1395 # most efficient order to read the nodes in, and represents a
1394 1396 # topological sorting of the nodes, this function is often useful.
1395 1397 def cmp_by_rev(a, b):
1396 1398 return cmp(revlog.rev(a), revlog.rev(b))
1397 1399 return cmp_by_rev
1398 1400
1399 1401 # If we determine that a particular file or manifest node must be a
1400 1402 # node that the recipient of the changegroup will already have, we can
1401 1403 # also assume the recipient will have all the parents. This function
1402 1404 # prunes them from the set of missing nodes.
1403 1405 def prune_parents(revlog, hasset, msngset):
1404 1406 haslst = hasset.keys()
1405 1407 haslst.sort(cmp_by_rev_func(revlog))
1406 1408 for node in haslst:
1407 1409 parentlst = [p for p in revlog.parents(node) if p != nullid]
1408 1410 while parentlst:
1409 1411 n = parentlst.pop()
1410 1412 if n not in hasset:
1411 1413 hasset[n] = 1
1412 1414 p = [p for p in revlog.parents(n) if p != nullid]
1413 1415 parentlst.extend(p)
1414 1416 for n in hasset:
1415 1417 msngset.pop(n, None)
1416 1418
1417 1419 # This is a function generating function used to set up an environment
1418 1420 # for the inner function to execute in.
1419 1421 def manifest_and_file_collector(changedfileset):
1420 1422 # This is an information gathering function that gathers
1421 1423 # information from each changeset node that goes out as part of
1422 1424 # the changegroup. The information gathered is a list of which
1423 1425 # manifest nodes are potentially required (the recipient may
1424 1426 # already have them) and total list of all files which were
1425 1427 # changed in any changeset in the changegroup.
1426 1428 #
1427 1429 # We also remember the first changenode we saw any manifest
1428 1430 # referenced by so we can later determine which changenode 'owns'
1429 1431 # the manifest.
1430 1432 def collect_manifests_and_files(clnode):
1431 1433 c = cl.read(clnode)
1432 1434 for f in c[3]:
1433 1435 # This is to make sure we only have one instance of each
1434 1436 # filename string for each filename.
1435 1437 changedfileset.setdefault(f, f)
1436 1438 msng_mnfst_set.setdefault(c[0], clnode)
1437 1439 return collect_manifests_and_files
1438 1440
1439 1441 # Figure out which manifest nodes (of the ones we think might be part
1440 1442 # of the changegroup) the recipient must know about and remove them
1441 1443 # from the changegroup.
1442 1444 def prune_manifests():
1443 1445 has_mnfst_set = {}
1444 1446 for n in msng_mnfst_set:
1445 1447 # If a 'missing' manifest thinks it belongs to a changenode
1446 1448 # the recipient is assumed to have, obviously the recipient
1447 1449 # must have that manifest.
1448 1450 linknode = cl.node(mnfst.linkrev(n))
1449 1451 if linknode in has_cl_set:
1450 1452 has_mnfst_set[n] = 1
1451 1453 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1452 1454
1453 1455 # Use the information collected in collect_manifests_and_files to say
1454 1456 # which changenode any manifestnode belongs to.
1455 1457 def lookup_manifest_link(mnfstnode):
1456 1458 return msng_mnfst_set[mnfstnode]
1457 1459
1458 1460 # A function generating function that sets up the initial environment
1459 1461 # the inner function.
1460 1462 def filenode_collector(changedfiles):
1461 1463 next_rev = [0]
1462 1464 # This gathers information from each manifestnode included in the
1463 1465 # changegroup about which filenodes the manifest node references
1464 1466 # so we can include those in the changegroup too.
1465 1467 #
1466 1468 # It also remembers which changenode each filenode belongs to. It
1467 1469 # does this by assuming the a filenode belongs to the changenode
1468 1470 # the first manifest that references it belongs to.
1469 1471 def collect_msng_filenodes(mnfstnode):
1470 1472 r = mnfst.rev(mnfstnode)
1471 1473 if r == next_rev[0]:
1472 1474 # If the last rev we looked at was the one just previous,
1473 1475 # we only need to see a diff.
1474 1476 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1475 1477 # For each line in the delta
1476 1478 for dline in delta.splitlines():
1477 1479 # get the filename and filenode for that line
1478 1480 f, fnode = dline.split('\0')
1479 1481 fnode = bin(fnode[:40])
1480 1482 f = changedfiles.get(f, None)
1481 1483 # And if the file is in the list of files we care
1482 1484 # about.
1483 1485 if f is not None:
1484 1486 # Get the changenode this manifest belongs to
1485 1487 clnode = msng_mnfst_set[mnfstnode]
1486 1488 # Create the set of filenodes for the file if
1487 1489 # there isn't one already.
1488 1490 ndset = msng_filenode_set.setdefault(f, {})
1489 1491 # And set the filenode's changelog node to the
1490 1492 # manifest's if it hasn't been set already.
1491 1493 ndset.setdefault(fnode, clnode)
1492 1494 else:
1493 1495 # Otherwise we need a full manifest.
1494 1496 m = mnfst.read(mnfstnode)
1495 1497 # For every file in we care about.
1496 1498 for f in changedfiles:
1497 1499 fnode = m.get(f, None)
1498 1500 # If it's in the manifest
1499 1501 if fnode is not None:
1500 1502 # See comments above.
1501 1503 clnode = msng_mnfst_set[mnfstnode]
1502 1504 ndset = msng_filenode_set.setdefault(f, {})
1503 1505 ndset.setdefault(fnode, clnode)
1504 1506 # Remember the revision we hope to see next.
1505 1507 next_rev[0] = r + 1
1506 1508 return collect_msng_filenodes
1507 1509
1508 1510 # We have a list of filenodes we think we need for a file, lets remove
1509 1511 # all those we now the recipient must have.
1510 1512 def prune_filenodes(f, filerevlog):
1511 1513 msngset = msng_filenode_set[f]
1512 1514 hasset = {}
1513 1515 # If a 'missing' filenode thinks it belongs to a changenode we
1514 1516 # assume the recipient must have, then the recipient must have
1515 1517 # that filenode.
1516 1518 for n in msngset:
1517 1519 clnode = cl.node(filerevlog.linkrev(n))
1518 1520 if clnode in has_cl_set:
1519 1521 hasset[n] = 1
1520 1522 prune_parents(filerevlog, hasset, msngset)
1521 1523
1522 1524 # A function generator function that sets up the a context for the
1523 1525 # inner function.
1524 1526 def lookup_filenode_link_func(fname):
1525 1527 msngset = msng_filenode_set[fname]
1526 1528 # Lookup the changenode the filenode belongs to.
1527 1529 def lookup_filenode_link(fnode):
1528 1530 return msngset[fnode]
1529 1531 return lookup_filenode_link
1530 1532
1531 1533 # Now that we have all theses utility functions to help out and
1532 1534 # logically divide up the task, generate the group.
1533 1535 def gengroup():
1534 1536 # The set of changed files starts empty.
1535 1537 changedfiles = {}
1536 1538 # Create a changenode group generator that will call our functions
1537 1539 # back to lookup the owning changenode and collect information.
1538 1540 group = cl.group(msng_cl_lst, identity,
1539 1541 manifest_and_file_collector(changedfiles))
1540 1542 for chnk in group:
1541 1543 yield chnk
1542 1544
1543 1545 # The list of manifests has been collected by the generator
1544 1546 # calling our functions back.
1545 1547 prune_manifests()
1546 1548 msng_mnfst_lst = msng_mnfst_set.keys()
1547 1549 # Sort the manifestnodes by revision number.
1548 1550 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1549 1551 # Create a generator for the manifestnodes that calls our lookup
1550 1552 # and data collection functions back.
1551 1553 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1552 1554 filenode_collector(changedfiles))
1553 1555 for chnk in group:
1554 1556 yield chnk
1555 1557
1556 1558 # These are no longer needed, dereference and toss the memory for
1557 1559 # them.
1558 1560 msng_mnfst_lst = None
1559 1561 msng_mnfst_set.clear()
1560 1562
1561 1563 changedfiles = changedfiles.keys()
1562 1564 changedfiles.sort()
1563 1565 # Go through all our files in order sorted by name.
1564 1566 for fname in changedfiles:
1565 1567 filerevlog = self.file(fname)
1566 1568 # Toss out the filenodes that the recipient isn't really
1567 1569 # missing.
1568 1570 if msng_filenode_set.has_key(fname):
1569 1571 prune_filenodes(fname, filerevlog)
1570 1572 msng_filenode_lst = msng_filenode_set[fname].keys()
1571 1573 else:
1572 1574 msng_filenode_lst = []
1573 1575 # If any filenodes are left, generate the group for them,
1574 1576 # otherwise don't bother.
1575 1577 if len(msng_filenode_lst) > 0:
1576 1578 yield changegroup.genchunk(fname)
1577 1579 # Sort the filenodes by their revision #
1578 1580 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1579 1581 # Create a group generator and only pass in a changenode
1580 1582 # lookup function as we need to collect no information
1581 1583 # from filenodes.
1582 1584 group = filerevlog.group(msng_filenode_lst,
1583 1585 lookup_filenode_link_func(fname))
1584 1586 for chnk in group:
1585 1587 yield chnk
1586 1588 if msng_filenode_set.has_key(fname):
1587 1589 # Don't need this anymore, toss it to free memory.
1588 1590 del msng_filenode_set[fname]
1589 1591 # Signal that no more groups are left.
1590 1592 yield changegroup.closechunk()
1591 1593
1592 1594 if msng_cl_lst:
1593 1595 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1594 1596
1595 1597 return util.chunkbuffer(gengroup())
1596 1598
1597 1599 def changegroup(self, basenodes, source):
1598 1600 """Generate a changegroup of all nodes that we have that a recipient
1599 1601 doesn't.
1600 1602
1601 1603 This is much easier than the previous function as we can assume that
1602 1604 the recipient has any changenode we aren't sending them."""
1603 1605
1604 1606 self.hook('preoutgoing', throw=True, source=source)
1605 1607
1606 1608 cl = self.changelog
1607 1609 nodes = cl.nodesbetween(basenodes, None)[0]
1608 1610 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1609 1611
1610 1612 def identity(x):
1611 1613 return x
1612 1614
1613 1615 def gennodelst(revlog):
1614 1616 for r in xrange(0, revlog.count()):
1615 1617 n = revlog.node(r)
1616 1618 if revlog.linkrev(n) in revset:
1617 1619 yield n
1618 1620
1619 1621 def changed_file_collector(changedfileset):
1620 1622 def collect_changed_files(clnode):
1621 1623 c = cl.read(clnode)
1622 1624 for fname in c[3]:
1623 1625 changedfileset[fname] = 1
1624 1626 return collect_changed_files
1625 1627
1626 1628 def lookuprevlink_func(revlog):
1627 1629 def lookuprevlink(n):
1628 1630 return cl.node(revlog.linkrev(n))
1629 1631 return lookuprevlink
1630 1632
1631 1633 def gengroup():
1632 1634 # construct a list of all changed files
1633 1635 changedfiles = {}
1634 1636
1635 1637 for chnk in cl.group(nodes, identity,
1636 1638 changed_file_collector(changedfiles)):
1637 1639 yield chnk
1638 1640 changedfiles = changedfiles.keys()
1639 1641 changedfiles.sort()
1640 1642
1641 1643 mnfst = self.manifest
1642 1644 nodeiter = gennodelst(mnfst)
1643 1645 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1644 1646 yield chnk
1645 1647
1646 1648 for fname in changedfiles:
1647 1649 filerevlog = self.file(fname)
1648 1650 nodeiter = gennodelst(filerevlog)
1649 1651 nodeiter = list(nodeiter)
1650 1652 if nodeiter:
1651 1653 yield changegroup.genchunk(fname)
1652 1654 lookup = lookuprevlink_func(filerevlog)
1653 1655 for chnk in filerevlog.group(nodeiter, lookup):
1654 1656 yield chnk
1655 1657
1656 1658 yield changegroup.closechunk()
1657 1659
1658 1660 if nodes:
1659 1661 self.hook('outgoing', node=hex(nodes[0]), source=source)
1660 1662
1661 1663 return util.chunkbuffer(gengroup())
1662 1664
1663 1665 def addchangegroup(self, source, srctype, url):
1664 1666 """add changegroup to repo.
1665 1667 returns number of heads modified or added + 1."""
1666 1668
1667 1669 def csmap(x):
1668 1670 self.ui.debug(_("add changeset %s\n") % short(x))
1669 1671 return cl.count()
1670 1672
1671 1673 def revmap(x):
1672 1674 return cl.rev(x)
1673 1675
1674 1676 if not source:
1675 1677 return 0
1676 1678
1677 1679 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1678 1680
1679 1681 changesets = files = revisions = 0
1680 1682
1681 1683 tr = self.transaction()
1682 1684
1683 1685 # write changelog data to temp files so concurrent readers will not see
1684 1686 # inconsistent view
1685 1687 cl = None
1686 1688 try:
1687 1689 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1688 1690
1689 1691 oldheads = len(cl.heads())
1690 1692
1691 1693 # pull off the changeset group
1692 1694 self.ui.status(_("adding changesets\n"))
1693 1695 cor = cl.count() - 1
1694 1696 chunkiter = changegroup.chunkiter(source)
1695 1697 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1696 1698 raise util.Abort(_("received changelog group is empty"))
1697 1699 cnr = cl.count() - 1
1698 1700 changesets = cnr - cor
1699 1701
1700 1702 # pull off the manifest group
1701 1703 self.ui.status(_("adding manifests\n"))
1702 1704 chunkiter = changegroup.chunkiter(source)
1703 1705 # no need to check for empty manifest group here:
1704 1706 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1705 1707 # no new manifest will be created and the manifest group will
1706 1708 # be empty during the pull
1707 1709 self.manifest.addgroup(chunkiter, revmap, tr)
1708 1710
1709 1711 # process the files
1710 1712 self.ui.status(_("adding file changes\n"))
1711 1713 while 1:
1712 1714 f = changegroup.getchunk(source)
1713 1715 if not f:
1714 1716 break
1715 1717 self.ui.debug(_("adding %s revisions\n") % f)
1716 1718 fl = self.file(f)
1717 1719 o = fl.count()
1718 1720 chunkiter = changegroup.chunkiter(source)
1719 1721 if fl.addgroup(chunkiter, revmap, tr) is None:
1720 1722 raise util.Abort(_("received file revlog group is empty"))
1721 1723 revisions += fl.count() - o
1722 1724 files += 1
1723 1725
1724 1726 cl.writedata()
1725 1727 finally:
1726 1728 if cl:
1727 1729 cl.cleanup()
1728 1730
1729 1731 # make changelog see real files again
1730 1732 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1731 1733 self.changelog.checkinlinesize(tr)
1732 1734
1733 1735 newheads = len(self.changelog.heads())
1734 1736 heads = ""
1735 1737 if oldheads and newheads != oldheads:
1736 1738 heads = _(" (%+d heads)") % (newheads - oldheads)
1737 1739
1738 1740 self.ui.status(_("added %d changesets"
1739 1741 " with %d changes to %d files%s\n")
1740 1742 % (changesets, revisions, files, heads))
1741 1743
1742 1744 if changesets > 0:
1743 1745 self.hook('pretxnchangegroup', throw=True,
1744 1746 node=hex(self.changelog.node(cor+1)), source=srctype,
1745 1747 url=url)
1746 1748
1747 1749 tr.close()
1748 1750
1749 1751 if changesets > 0:
1750 1752 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1751 1753 source=srctype, url=url)
1752 1754
1753 1755 for i in range(cor + 1, cnr + 1):
1754 1756 self.hook("incoming", node=hex(self.changelog.node(i)),
1755 1757 source=srctype, url=url)
1756 1758
1757 1759 return newheads - oldheads + 1
1758 1760
1759 1761
1760 1762 def stream_in(self, remote):
1761 1763 fp = remote.stream_out()
1762 1764 resp = int(fp.readline())
1763 1765 if resp != 0:
1764 1766 raise util.Abort(_('operation forbidden by server'))
1765 1767 self.ui.status(_('streaming all changes\n'))
1766 1768 total_files, total_bytes = map(int, fp.readline().split(' ', 1))
1767 1769 self.ui.status(_('%d files to transfer, %s of data\n') %
1768 1770 (total_files, util.bytecount(total_bytes)))
1769 1771 start = time.time()
1770 1772 for i in xrange(total_files):
1771 1773 name, size = fp.readline().split('\0', 1)
1772 1774 size = int(size)
1773 1775 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1774 1776 ofp = self.opener(name, 'w')
1775 1777 for chunk in util.filechunkiter(fp, limit=size):
1776 1778 ofp.write(chunk)
1777 1779 ofp.close()
1778 1780 elapsed = time.time() - start
1779 1781 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1780 1782 (util.bytecount(total_bytes), elapsed,
1781 1783 util.bytecount(total_bytes / elapsed)))
1782 1784 self.reload()
1783 1785 return len(self.heads()) + 1
1784 1786
1785 1787 def clone(self, remote, heads=[], stream=False):
1786 1788 '''clone remote repository.
1787 1789
1788 1790 keyword arguments:
1789 1791 heads: list of revs to clone (forces use of pull)
1790 1792 stream: use streaming clone if possible'''
1791 1793
1792 1794 # now, all clients that can request uncompressed clones can
1793 1795 # read repo formats supported by all servers that can serve
1794 1796 # them.
1795 1797
1796 1798 # if revlog format changes, client will have to check version
1797 1799 # and format flags on "stream" capability, and use
1798 1800 # uncompressed only if compatible.
1799 1801
1800 1802 if stream and not heads and remote.capable('stream'):
1801 1803 return self.stream_in(remote)
1802 1804 return self.pull(remote, heads)
1803 1805
1804 1806 # used to avoid circular references so destructors work
1805 1807 def aftertrans(base):
1806 1808 p = base
1807 1809 def a():
1808 1810 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1809 1811 util.rename(os.path.join(p, "journal.dirstate"),
1810 1812 os.path.join(p, "undo.dirstate"))
1811 1813 return a
1812 1814
1813 1815 def instance(ui, path, create):
1814 1816 return localrepository(ui, util.drop_scheme('file', path), create)
1815 1817
1816 1818 def islocal(path):
1817 1819 return True
General Comments 0
You need to be logged in to leave comments. Login now