##// END OF EJS Templates
Remove manifest.readflags
Matt Mackall -
r2841:e3fb4223 default
parent child Browse files
Show More
@@ -1,1758 +1,1758 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import gettext as _
10 10 from demandload import *
11 11 import repo
12 12 demandload(globals(), "appendfile changegroup")
13 13 demandload(globals(), "changelog dirstate filelog manifest context")
14 14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 15 demandload(globals(), "os revlog time util")
16 16
17 17 class localrepository(repo.repository):
18 18 capabilities = ()
19 19
20 20 def __del__(self):
21 21 self.transhandle = None
22 22 def __init__(self, parentui, path=None, create=0):
23 23 repo.repository.__init__(self)
24 24 if not path:
25 25 p = os.getcwd()
26 26 while not os.path.isdir(os.path.join(p, ".hg")):
27 27 oldp = p
28 28 p = os.path.dirname(p)
29 29 if p == oldp:
30 30 raise repo.RepoError(_("no repo found"))
31 31 path = p
32 32 self.path = os.path.join(path, ".hg")
33 33
34 34 if not create and not os.path.isdir(self.path):
35 35 raise repo.RepoError(_("repository %s not found") % path)
36 36
37 37 self.root = os.path.abspath(path)
38 38 self.origroot = path
39 39 self.ui = ui.ui(parentui=parentui)
40 40 self.opener = util.opener(self.path)
41 41 self.wopener = util.opener(self.root)
42 42
43 43 try:
44 44 self.ui.readconfig(self.join("hgrc"), self.root)
45 45 except IOError:
46 46 pass
47 47
48 48 v = self.ui.revlogopts
49 49 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
50 50 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
51 51 fl = v.get('flags', None)
52 52 flags = 0
53 53 if fl != None:
54 54 for x in fl.split():
55 55 flags |= revlog.flagstr(x)
56 56 elif self.revlogv1:
57 57 flags = revlog.REVLOG_DEFAULT_FLAGS
58 58
59 59 v = self.revlogversion | flags
60 60 self.manifest = manifest.manifest(self.opener, v)
61 61 self.changelog = changelog.changelog(self.opener, v)
62 62
63 63 # the changelog might not have the inline index flag
64 64 # on. If the format of the changelog is the same as found in
65 65 # .hgrc, apply any flags found in the .hgrc as well.
66 66 # Otherwise, just version from the changelog
67 67 v = self.changelog.version
68 68 if v == self.revlogversion:
69 69 v |= flags
70 70 self.revlogversion = v
71 71
72 72 self.tagscache = None
73 73 self.nodetagscache = None
74 74 self.encodepats = None
75 75 self.decodepats = None
76 76 self.transhandle = None
77 77
78 78 if create:
79 79 if not os.path.exists(path):
80 80 os.mkdir(path)
81 81 os.mkdir(self.path)
82 82 os.mkdir(self.join("data"))
83 83
84 84 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
85 85
86 86 def url(self):
87 87 return 'file:' + self.root
88 88
89 89 def hook(self, name, throw=False, **args):
90 90 def callhook(hname, funcname):
91 91 '''call python hook. hook is callable object, looked up as
92 92 name in python module. if callable returns "true", hook
93 93 fails, else passes. if hook raises exception, treated as
94 94 hook failure. exception propagates if throw is "true".
95 95
96 96 reason for "true" meaning "hook failed" is so that
97 97 unmodified commands (e.g. mercurial.commands.update) can
98 98 be run as hooks without wrappers to convert return values.'''
99 99
100 100 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
101 101 d = funcname.rfind('.')
102 102 if d == -1:
103 103 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
104 104 % (hname, funcname))
105 105 modname = funcname[:d]
106 106 try:
107 107 obj = __import__(modname)
108 108 except ImportError:
109 109 try:
110 110 # extensions are loaded with hgext_ prefix
111 111 obj = __import__("hgext_%s" % modname)
112 112 except ImportError:
113 113 raise util.Abort(_('%s hook is invalid '
114 114 '(import of "%s" failed)') %
115 115 (hname, modname))
116 116 try:
117 117 for p in funcname.split('.')[1:]:
118 118 obj = getattr(obj, p)
119 119 except AttributeError, err:
120 120 raise util.Abort(_('%s hook is invalid '
121 121 '("%s" is not defined)') %
122 122 (hname, funcname))
123 123 if not callable(obj):
124 124 raise util.Abort(_('%s hook is invalid '
125 125 '("%s" is not callable)') %
126 126 (hname, funcname))
127 127 try:
128 128 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
129 129 except (KeyboardInterrupt, util.SignalInterrupt):
130 130 raise
131 131 except Exception, exc:
132 132 if isinstance(exc, util.Abort):
133 133 self.ui.warn(_('error: %s hook failed: %s\n') %
134 134 (hname, exc.args[0] % exc.args[1:]))
135 135 else:
136 136 self.ui.warn(_('error: %s hook raised an exception: '
137 137 '%s\n') % (hname, exc))
138 138 if throw:
139 139 raise
140 140 self.ui.print_exc()
141 141 return True
142 142 if r:
143 143 if throw:
144 144 raise util.Abort(_('%s hook failed') % hname)
145 145 self.ui.warn(_('warning: %s hook failed\n') % hname)
146 146 return r
147 147
148 148 def runhook(name, cmd):
149 149 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
150 150 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
151 151 r = util.system(cmd, environ=env, cwd=self.root)
152 152 if r:
153 153 desc, r = util.explain_exit(r)
154 154 if throw:
155 155 raise util.Abort(_('%s hook %s') % (name, desc))
156 156 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
157 157 return r
158 158
159 159 r = False
160 160 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
161 161 if hname.split(".", 1)[0] == name and cmd]
162 162 hooks.sort()
163 163 for hname, cmd in hooks:
164 164 if cmd.startswith('python:'):
165 165 r = callhook(hname, cmd[7:].strip()) or r
166 166 else:
167 167 r = runhook(hname, cmd) or r
168 168 return r
169 169
170 170 tag_disallowed = ':\r\n'
171 171
172 172 def tag(self, name, node, local=False, message=None, user=None, date=None):
173 173 '''tag a revision with a symbolic name.
174 174
175 175 if local is True, the tag is stored in a per-repository file.
176 176 otherwise, it is stored in the .hgtags file, and a new
177 177 changeset is committed with the change.
178 178
179 179 keyword arguments:
180 180
181 181 local: whether to store tag in non-version-controlled file
182 182 (default False)
183 183
184 184 message: commit message to use if committing
185 185
186 186 user: name of user to use if committing
187 187
188 188 date: date tuple to use if committing'''
189 189
190 190 for c in self.tag_disallowed:
191 191 if c in name:
192 192 raise util.Abort(_('%r cannot be used in a tag name') % c)
193 193
194 194 self.hook('pretag', throw=True, node=node, tag=name, local=local)
195 195
196 196 if local:
197 197 self.opener('localtags', 'a').write('%s %s\n' % (node, name))
198 198 self.hook('tag', node=node, tag=name, local=local)
199 199 return
200 200
201 201 for x in self.changes():
202 202 if '.hgtags' in x:
203 203 raise util.Abort(_('working copy of .hgtags is changed '
204 204 '(please commit .hgtags manually)'))
205 205
206 206 self.wfile('.hgtags', 'ab').write('%s %s\n' % (node, name))
207 207 if self.dirstate.state('.hgtags') == '?':
208 208 self.add(['.hgtags'])
209 209
210 210 if not message:
211 211 message = _('Added tag %s for changeset %s') % (name, node)
212 212
213 213 self.commit(['.hgtags'], message, user, date)
214 214 self.hook('tag', node=node, tag=name, local=local)
215 215
216 216 def tags(self):
217 217 '''return a mapping of tag to node'''
218 218 if not self.tagscache:
219 219 self.tagscache = {}
220 220
221 221 def parsetag(line, context):
222 222 if not line:
223 223 return
224 224 s = l.split(" ", 1)
225 225 if len(s) != 2:
226 226 self.ui.warn(_("%s: cannot parse entry\n") % context)
227 227 return
228 228 node, key = s
229 229 key = key.strip()
230 230 try:
231 231 bin_n = bin(node)
232 232 except TypeError:
233 233 self.ui.warn(_("%s: node '%s' is not well formed\n") %
234 234 (context, node))
235 235 return
236 236 if bin_n not in self.changelog.nodemap:
237 237 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
238 238 (context, key))
239 239 return
240 240 self.tagscache[key] = bin_n
241 241
242 242 # read the tags file from each head, ending with the tip,
243 243 # and add each tag found to the map, with "newer" ones
244 244 # taking precedence
245 245 heads = self.heads()
246 246 heads.reverse()
247 247 fl = self.file(".hgtags")
248 248 for node in heads:
249 249 change = self.changelog.read(node)
250 250 rev = self.changelog.rev(node)
251 251 fn, ff = self.manifest.find(change[0], '.hgtags')
252 252 if fn is None: continue
253 253 count = 0
254 254 for l in fl.read(fn).splitlines():
255 255 count += 1
256 256 parsetag(l, _(".hgtags (rev %d:%s), line %d") %
257 257 (rev, short(node), count))
258 258 try:
259 259 f = self.opener("localtags")
260 260 count = 0
261 261 for l in f:
262 262 count += 1
263 263 parsetag(l, _("localtags, line %d") % count)
264 264 except IOError:
265 265 pass
266 266
267 267 self.tagscache['tip'] = self.changelog.tip()
268 268
269 269 return self.tagscache
270 270
271 271 def tagslist(self):
272 272 '''return a list of tags ordered by revision'''
273 273 l = []
274 274 for t, n in self.tags().items():
275 275 try:
276 276 r = self.changelog.rev(n)
277 277 except:
278 278 r = -2 # sort to the beginning of the list if unknown
279 279 l.append((r, t, n))
280 280 l.sort()
281 281 return [(t, n) for r, t, n in l]
282 282
283 283 def nodetags(self, node):
284 284 '''return the tags associated with a node'''
285 285 if not self.nodetagscache:
286 286 self.nodetagscache = {}
287 287 for t, n in self.tags().items():
288 288 self.nodetagscache.setdefault(n, []).append(t)
289 289 return self.nodetagscache.get(node, [])
290 290
291 291 def lookup(self, key):
292 292 try:
293 293 return self.tags()[key]
294 294 except KeyError:
295 295 if key == '.':
296 296 key = self.dirstate.parents()[0]
297 297 if key == nullid:
298 298 raise repo.RepoError(_("no revision checked out"))
299 299 try:
300 300 return self.changelog.lookup(key)
301 301 except:
302 302 raise repo.RepoError(_("unknown revision '%s'") % key)
303 303
304 304 def dev(self):
305 305 return os.lstat(self.path).st_dev
306 306
307 307 def local(self):
308 308 return True
309 309
310 310 def join(self, f):
311 311 return os.path.join(self.path, f)
312 312
313 313 def wjoin(self, f):
314 314 return os.path.join(self.root, f)
315 315
316 316 def file(self, f):
317 317 if f[0] == '/':
318 318 f = f[1:]
319 319 return filelog.filelog(self.opener, f, self.revlogversion)
320 320
321 321 def changectx(self, changeid):
322 322 return context.changectx(self, changeid)
323 323
324 324 def filectx(self, path, changeid=None, fileid=None):
325 325 """changeid can be a changeset revision, node, or tag.
326 326 fileid can be a file revision or node."""
327 327 return context.filectx(self, path, changeid, fileid)
328 328
329 329 def getcwd(self):
330 330 return self.dirstate.getcwd()
331 331
332 332 def wfile(self, f, mode='r'):
333 333 return self.wopener(f, mode)
334 334
335 335 def wread(self, filename):
336 336 if self.encodepats == None:
337 337 l = []
338 338 for pat, cmd in self.ui.configitems("encode"):
339 339 mf = util.matcher(self.root, "", [pat], [], [])[1]
340 340 l.append((mf, cmd))
341 341 self.encodepats = l
342 342
343 343 data = self.wopener(filename, 'r').read()
344 344
345 345 for mf, cmd in self.encodepats:
346 346 if mf(filename):
347 347 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
348 348 data = util.filter(data, cmd)
349 349 break
350 350
351 351 return data
352 352
353 353 def wwrite(self, filename, data, fd=None):
354 354 if self.decodepats == None:
355 355 l = []
356 356 for pat, cmd in self.ui.configitems("decode"):
357 357 mf = util.matcher(self.root, "", [pat], [], [])[1]
358 358 l.append((mf, cmd))
359 359 self.decodepats = l
360 360
361 361 for mf, cmd in self.decodepats:
362 362 if mf(filename):
363 363 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
364 364 data = util.filter(data, cmd)
365 365 break
366 366
367 367 if fd:
368 368 return fd.write(data)
369 369 return self.wopener(filename, 'w').write(data)
370 370
371 371 def transaction(self):
372 372 tr = self.transhandle
373 373 if tr != None and tr.running():
374 374 return tr.nest()
375 375
376 376 # save dirstate for rollback
377 377 try:
378 378 ds = self.opener("dirstate").read()
379 379 except IOError:
380 380 ds = ""
381 381 self.opener("journal.dirstate", "w").write(ds)
382 382
383 383 tr = transaction.transaction(self.ui.warn, self.opener,
384 384 self.join("journal"),
385 385 aftertrans(self.path))
386 386 self.transhandle = tr
387 387 return tr
388 388
389 389 def recover(self):
390 390 l = self.lock()
391 391 if os.path.exists(self.join("journal")):
392 392 self.ui.status(_("rolling back interrupted transaction\n"))
393 393 transaction.rollback(self.opener, self.join("journal"))
394 394 self.reload()
395 395 return True
396 396 else:
397 397 self.ui.warn(_("no interrupted transaction available\n"))
398 398 return False
399 399
400 400 def rollback(self, wlock=None):
401 401 if not wlock:
402 402 wlock = self.wlock()
403 403 l = self.lock()
404 404 if os.path.exists(self.join("undo")):
405 405 self.ui.status(_("rolling back last transaction\n"))
406 406 transaction.rollback(self.opener, self.join("undo"))
407 407 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
408 408 self.reload()
409 409 self.wreload()
410 410 else:
411 411 self.ui.warn(_("no rollback information available\n"))
412 412
413 413 def wreload(self):
414 414 self.dirstate.read()
415 415
416 416 def reload(self):
417 417 self.changelog.load()
418 418 self.manifest.load()
419 419 self.tagscache = None
420 420 self.nodetagscache = None
421 421
422 422 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
423 423 desc=None):
424 424 try:
425 425 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
426 426 except lock.LockHeld, inst:
427 427 if not wait:
428 428 raise
429 429 self.ui.warn(_("waiting for lock on %s held by %s\n") %
430 430 (desc, inst.args[0]))
431 431 # default to 600 seconds timeout
432 432 l = lock.lock(self.join(lockname),
433 433 int(self.ui.config("ui", "timeout") or 600),
434 434 releasefn, desc=desc)
435 435 if acquirefn:
436 436 acquirefn()
437 437 return l
438 438
439 439 def lock(self, wait=1):
440 440 return self.do_lock("lock", wait, acquirefn=self.reload,
441 441 desc=_('repository %s') % self.origroot)
442 442
443 443 def wlock(self, wait=1):
444 444 return self.do_lock("wlock", wait, self.dirstate.write,
445 445 self.wreload,
446 446 desc=_('working directory of %s') % self.origroot)
447 447
448 448 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
449 449 "determine whether a new filenode is needed"
450 450 fp1 = manifest1.get(filename, nullid)
451 451 fp2 = manifest2.get(filename, nullid)
452 452
453 453 if fp2 != nullid:
454 454 # is one parent an ancestor of the other?
455 455 fpa = filelog.ancestor(fp1, fp2)
456 456 if fpa == fp1:
457 457 fp1, fp2 = fp2, nullid
458 458 elif fpa == fp2:
459 459 fp2 = nullid
460 460
461 461 # is the file unmodified from the parent? report existing entry
462 462 if fp2 == nullid and text == filelog.read(fp1):
463 463 return (fp1, None, None)
464 464
465 465 return (None, fp1, fp2)
466 466
467 467 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
468 468 orig_parent = self.dirstate.parents()[0] or nullid
469 469 p1 = p1 or self.dirstate.parents()[0] or nullid
470 470 p2 = p2 or self.dirstate.parents()[1] or nullid
471 471 c1 = self.changelog.read(p1)
472 472 c2 = self.changelog.read(p2)
473 473 m1 = self.manifest.read(c1[0]).copy()
474 474 m2 = self.manifest.read(c2[0])
475 475 changed = []
476 476
477 477 if orig_parent == p1:
478 478 update_dirstate = 1
479 479 else:
480 480 update_dirstate = 0
481 481
482 482 if not wlock:
483 483 wlock = self.wlock()
484 484 l = self.lock()
485 485 tr = self.transaction()
486 486 linkrev = self.changelog.count()
487 487 for f in files:
488 488 try:
489 489 t = self.wread(f)
490 490 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
491 491 r = self.file(f)
492 492
493 493 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
494 494 if entry:
495 495 m1[f] = entry
496 496 continue
497 497
498 498 m1[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
499 499 changed.append(f)
500 500 if update_dirstate:
501 501 self.dirstate.update([f], "n")
502 502 except IOError:
503 503 try:
504 504 del m1[f]
505 505 del m1[f]
506 506 if update_dirstate:
507 507 self.dirstate.forget([f])
508 508 except:
509 509 # deleted from p2?
510 510 pass
511 511
512 mnode = self.manifest.add(m1, m1, tr, linkrev, c1[0], c2[0])
512 mnode = self.manifest.add(m1, tr, linkrev, c1[0], c2[0])
513 513 user = user or self.ui.username()
514 514 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
515 515 tr.close()
516 516 if update_dirstate:
517 517 self.dirstate.setparents(n, nullid)
518 518
519 519 def commit(self, files=None, text="", user=None, date=None,
520 520 match=util.always, force=False, lock=None, wlock=None,
521 521 force_editor=False):
522 522 commit = []
523 523 remove = []
524 524 changed = []
525 525
526 526 if files:
527 527 for f in files:
528 528 s = self.dirstate.state(f)
529 529 if s in 'nmai':
530 530 commit.append(f)
531 531 elif s == 'r':
532 532 remove.append(f)
533 533 else:
534 534 self.ui.warn(_("%s not tracked!\n") % f)
535 535 else:
536 536 modified, added, removed, deleted, unknown = self.changes(match=match)
537 537 commit = modified + added
538 538 remove = removed
539 539
540 540 p1, p2 = self.dirstate.parents()
541 541 c1 = self.changelog.read(p1)
542 542 c2 = self.changelog.read(p2)
543 543 m1 = self.manifest.read(c1[0]).copy()
544 544 m2 = self.manifest.read(c2[0])
545 545
546 546 if not commit and not remove and not force and p2 == nullid:
547 547 self.ui.status(_("nothing changed\n"))
548 548 return None
549 549
550 550 xp1 = hex(p1)
551 551 if p2 == nullid: xp2 = ''
552 552 else: xp2 = hex(p2)
553 553
554 554 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
555 555
556 556 if not wlock:
557 557 wlock = self.wlock()
558 558 if not lock:
559 559 lock = self.lock()
560 560 tr = self.transaction()
561 561
562 562 # check in files
563 563 new = {}
564 564 linkrev = self.changelog.count()
565 565 commit.sort()
566 566 for f in commit:
567 567 self.ui.note(f + "\n")
568 568 try:
569 569 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
570 570 t = self.wread(f)
571 571 except IOError:
572 572 self.ui.warn(_("trouble committing %s!\n") % f)
573 573 raise
574 574
575 575 r = self.file(f)
576 576
577 577 meta = {}
578 578 cp = self.dirstate.copied(f)
579 579 if cp:
580 580 meta["copy"] = cp
581 581 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
582 582 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
583 583 fp1, fp2 = nullid, nullid
584 584 else:
585 585 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
586 586 if entry:
587 587 new[f] = entry
588 588 continue
589 589
590 590 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
591 591 # remember what we've added so that we can later calculate
592 592 # the files to pull from a set of changesets
593 593 changed.append(f)
594 594
595 595 # update manifest
596 596 m1.update(new)
597 597 for f in remove:
598 598 if f in m1:
599 599 del m1[f]
600 mn = self.manifest.add(m1, m1, tr, linkrev, c1[0], c2[0],
600 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0],
601 601 (new, remove))
602 602
603 603 # add changeset
604 604 new = new.keys()
605 605 new.sort()
606 606
607 607 user = user or self.ui.username()
608 608 if not text or force_editor:
609 609 edittext = []
610 610 if text:
611 611 edittext.append(text)
612 612 edittext.append("")
613 613 if p2 != nullid:
614 614 edittext.append("HG: branch merge")
615 615 edittext.extend(["HG: changed %s" % f for f in changed])
616 616 edittext.extend(["HG: removed %s" % f for f in remove])
617 617 if not changed and not remove:
618 618 edittext.append("HG: no files changed")
619 619 edittext.append("")
620 620 # run editor in the repository root
621 621 olddir = os.getcwd()
622 622 os.chdir(self.root)
623 623 text = self.ui.edit("\n".join(edittext), user)
624 624 os.chdir(olddir)
625 625
626 626 lines = [line.rstrip() for line in text.rstrip().splitlines()]
627 627 while lines and not lines[0]:
628 628 del lines[0]
629 629 if not lines:
630 630 return None
631 631 text = '\n'.join(lines)
632 632 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
633 633 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
634 634 parent2=xp2)
635 635 tr.close()
636 636
637 637 self.dirstate.setparents(n)
638 638 self.dirstate.update(new, "n")
639 639 self.dirstate.forget(remove)
640 640
641 641 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
642 642 return n
643 643
644 644 def walk(self, node=None, files=[], match=util.always, badmatch=None):
645 645 if node:
646 646 fdict = dict.fromkeys(files)
647 647 for fn in self.manifest.read(self.changelog.read(node)[0]):
648 648 fdict.pop(fn, None)
649 649 if match(fn):
650 650 yield 'm', fn
651 651 for fn in fdict:
652 652 if badmatch and badmatch(fn):
653 653 if match(fn):
654 654 yield 'b', fn
655 655 else:
656 656 self.ui.warn(_('%s: No such file in rev %s\n') % (
657 657 util.pathto(self.getcwd(), fn), short(node)))
658 658 else:
659 659 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
660 660 yield src, fn
661 661
662 662 def status(self, node1=None, node2=None, files=[], match=util.always,
663 663 wlock=None, list_ignored=False, list_clean=False):
664 664 """return status of files between two nodes or node and working directory
665 665
666 666 If node1 is None, use the first dirstate parent instead.
667 667 If node2 is None, compare node1 with working directory.
668 668 """
669 669
670 670 def fcmp(fn, mf):
671 671 t1 = self.wread(fn)
672 672 t2 = self.file(fn).read(mf.get(fn, nullid))
673 673 return cmp(t1, t2)
674 674
675 675 def mfmatches(node):
676 676 change = self.changelog.read(node)
677 677 mf = dict(self.manifest.read(change[0]))
678 678 for fn in mf.keys():
679 679 if not match(fn):
680 680 del mf[fn]
681 681 return mf
682 682
683 683 modified, added, removed, deleted, unknown = [], [], [], [], []
684 684 ignored, clean = [], []
685 685
686 686 compareworking = False
687 687 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
688 688 compareworking = True
689 689
690 690 if not compareworking:
691 691 # read the manifest from node1 before the manifest from node2,
692 692 # so that we'll hit the manifest cache if we're going through
693 693 # all the revisions in parent->child order.
694 694 mf1 = mfmatches(node1)
695 695
696 696 # are we comparing the working directory?
697 697 if not node2:
698 698 if not wlock:
699 699 try:
700 700 wlock = self.wlock(wait=0)
701 701 except lock.LockException:
702 702 wlock = None
703 703 (lookup, modified, added, removed, deleted, unknown,
704 704 ignored, clean) = self.dirstate.status(files, match,
705 705 list_ignored, list_clean)
706 706
707 707 # are we comparing working dir against its parent?
708 708 if compareworking:
709 709 if lookup:
710 710 # do a full compare of any files that might have changed
711 711 mf2 = mfmatches(self.dirstate.parents()[0])
712 712 for f in lookup:
713 713 if fcmp(f, mf2):
714 714 modified.append(f)
715 715 elif wlock is not None:
716 716 self.dirstate.update([f], "n")
717 717 else:
718 718 # we are comparing working dir against non-parent
719 719 # generate a pseudo-manifest for the working dir
720 720 mf2 = mfmatches(self.dirstate.parents()[0])
721 721 for f in lookup + modified + added:
722 722 mf2[f] = ""
723 723 for f in removed:
724 724 if f in mf2:
725 725 del mf2[f]
726 726 else:
727 727 # we are comparing two revisions
728 728 mf2 = mfmatches(node2)
729 729
730 730 if not compareworking:
731 731 # flush lists from dirstate before comparing manifests
732 732 modified, added, clean = [], [], []
733 733
734 734 # make sure to sort the files so we talk to the disk in a
735 735 # reasonable order
736 736 mf2keys = mf2.keys()
737 737 mf2keys.sort()
738 738 for fn in mf2keys:
739 739 if mf1.has_key(fn):
740 740 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
741 741 modified.append(fn)
742 742 elif list_clean:
743 743 clean.append(fn)
744 744 del mf1[fn]
745 745 else:
746 746 added.append(fn)
747 747
748 748 removed = mf1.keys()
749 749
750 750 # sort and return results:
751 751 for l in modified, added, removed, deleted, unknown, ignored, clean:
752 752 l.sort()
753 753 return (modified, added, removed, deleted, unknown, ignored, clean)
754 754
755 755 def changes(self, node1=None, node2=None, files=[], match=util.always,
756 756 wlock=None, list_ignored=False, list_clean=False):
757 757 '''DEPRECATED - use status instead'''
758 758 marduit = self.status(node1, node2, files, match, wlock,
759 759 list_ignored, list_clean)
760 760 if list_ignored:
761 761 return marduit[:-1]
762 762 else:
763 763 return marduit[:-2]
764 764
765 765 def add(self, list, wlock=None):
766 766 if not wlock:
767 767 wlock = self.wlock()
768 768 for f in list:
769 769 p = self.wjoin(f)
770 770 if not os.path.exists(p):
771 771 self.ui.warn(_("%s does not exist!\n") % f)
772 772 elif not os.path.isfile(p):
773 773 self.ui.warn(_("%s not added: only files supported currently\n")
774 774 % f)
775 775 elif self.dirstate.state(f) in 'an':
776 776 self.ui.warn(_("%s already tracked!\n") % f)
777 777 else:
778 778 self.dirstate.update([f], "a")
779 779
780 780 def forget(self, list, wlock=None):
781 781 if not wlock:
782 782 wlock = self.wlock()
783 783 for f in list:
784 784 if self.dirstate.state(f) not in 'ai':
785 785 self.ui.warn(_("%s not added!\n") % f)
786 786 else:
787 787 self.dirstate.forget([f])
788 788
789 789 def remove(self, list, unlink=False, wlock=None):
790 790 if unlink:
791 791 for f in list:
792 792 try:
793 793 util.unlink(self.wjoin(f))
794 794 except OSError, inst:
795 795 if inst.errno != errno.ENOENT:
796 796 raise
797 797 if not wlock:
798 798 wlock = self.wlock()
799 799 for f in list:
800 800 p = self.wjoin(f)
801 801 if os.path.exists(p):
802 802 self.ui.warn(_("%s still exists!\n") % f)
803 803 elif self.dirstate.state(f) == 'a':
804 804 self.dirstate.forget([f])
805 805 elif f not in self.dirstate:
806 806 self.ui.warn(_("%s not tracked!\n") % f)
807 807 else:
808 808 self.dirstate.update([f], "r")
809 809
810 810 def undelete(self, list, wlock=None):
811 811 p = self.dirstate.parents()[0]
812 812 mn = self.changelog.read(p)[0]
813 813 m = self.manifest.read(mn)
814 814 if not wlock:
815 815 wlock = self.wlock()
816 816 for f in list:
817 817 if self.dirstate.state(f) not in "r":
818 818 self.ui.warn("%s not removed!\n" % f)
819 819 else:
820 820 t = self.file(f).read(m[f])
821 821 self.wwrite(f, t)
822 822 util.set_exec(self.wjoin(f), m.execf(f))
823 823 self.dirstate.update([f], "n")
824 824
825 825 def copy(self, source, dest, wlock=None):
826 826 p = self.wjoin(dest)
827 827 if not os.path.exists(p):
828 828 self.ui.warn(_("%s does not exist!\n") % dest)
829 829 elif not os.path.isfile(p):
830 830 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
831 831 else:
832 832 if not wlock:
833 833 wlock = self.wlock()
834 834 if self.dirstate.state(dest) == '?':
835 835 self.dirstate.update([dest], "a")
836 836 self.dirstate.copy(source, dest)
837 837
838 838 def heads(self, start=None):
839 839 heads = self.changelog.heads(start)
840 840 # sort the output in rev descending order
841 841 heads = [(-self.changelog.rev(h), h) for h in heads]
842 842 heads.sort()
843 843 return [n for (r, n) in heads]
844 844
845 845 # branchlookup returns a dict giving a list of branches for
846 846 # each head. A branch is defined as the tag of a node or
847 847 # the branch of the node's parents. If a node has multiple
848 848 # branch tags, tags are eliminated if they are visible from other
849 849 # branch tags.
850 850 #
851 851 # So, for this graph: a->b->c->d->e
852 852 # \ /
853 853 # aa -----/
854 854 # a has tag 2.6.12
855 855 # d has tag 2.6.13
856 856 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
857 857 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
858 858 # from the list.
859 859 #
860 860 # It is possible that more than one head will have the same branch tag.
861 861 # callers need to check the result for multiple heads under the same
862 862 # branch tag if that is a problem for them (ie checkout of a specific
863 863 # branch).
864 864 #
865 865 # passing in a specific branch will limit the depth of the search
866 866 # through the parents. It won't limit the branches returned in the
867 867 # result though.
868 868 def branchlookup(self, heads=None, branch=None):
869 869 if not heads:
870 870 heads = self.heads()
871 871 headt = [ h for h in heads ]
872 872 chlog = self.changelog
873 873 branches = {}
874 874 merges = []
875 875 seenmerge = {}
876 876
877 877 # traverse the tree once for each head, recording in the branches
878 878 # dict which tags are visible from this head. The branches
879 879 # dict also records which tags are visible from each tag
880 880 # while we traverse.
881 881 while headt or merges:
882 882 if merges:
883 883 n, found = merges.pop()
884 884 visit = [n]
885 885 else:
886 886 h = headt.pop()
887 887 visit = [h]
888 888 found = [h]
889 889 seen = {}
890 890 while visit:
891 891 n = visit.pop()
892 892 if n in seen:
893 893 continue
894 894 pp = chlog.parents(n)
895 895 tags = self.nodetags(n)
896 896 if tags:
897 897 for x in tags:
898 898 if x == 'tip':
899 899 continue
900 900 for f in found:
901 901 branches.setdefault(f, {})[n] = 1
902 902 branches.setdefault(n, {})[n] = 1
903 903 break
904 904 if n not in found:
905 905 found.append(n)
906 906 if branch in tags:
907 907 continue
908 908 seen[n] = 1
909 909 if pp[1] != nullid and n not in seenmerge:
910 910 merges.append((pp[1], [x for x in found]))
911 911 seenmerge[n] = 1
912 912 if pp[0] != nullid:
913 913 visit.append(pp[0])
914 914 # traverse the branches dict, eliminating branch tags from each
915 915 # head that are visible from another branch tag for that head.
916 916 out = {}
917 917 viscache = {}
918 918 for h in heads:
919 919 def visible(node):
920 920 if node in viscache:
921 921 return viscache[node]
922 922 ret = {}
923 923 visit = [node]
924 924 while visit:
925 925 x = visit.pop()
926 926 if x in viscache:
927 927 ret.update(viscache[x])
928 928 elif x not in ret:
929 929 ret[x] = 1
930 930 if x in branches:
931 931 visit[len(visit):] = branches[x].keys()
932 932 viscache[node] = ret
933 933 return ret
934 934 if h not in branches:
935 935 continue
936 936 # O(n^2), but somewhat limited. This only searches the
937 937 # tags visible from a specific head, not all the tags in the
938 938 # whole repo.
939 939 for b in branches[h]:
940 940 vis = False
941 941 for bb in branches[h].keys():
942 942 if b != bb:
943 943 if b in visible(bb):
944 944 vis = True
945 945 break
946 946 if not vis:
947 947 l = out.setdefault(h, [])
948 948 l[len(l):] = self.nodetags(b)
949 949 return out
950 950
951 951 def branches(self, nodes):
952 952 if not nodes:
953 953 nodes = [self.changelog.tip()]
954 954 b = []
955 955 for n in nodes:
956 956 t = n
957 957 while 1:
958 958 p = self.changelog.parents(n)
959 959 if p[1] != nullid or p[0] == nullid:
960 960 b.append((t, n, p[0], p[1]))
961 961 break
962 962 n = p[0]
963 963 return b
964 964
965 965 def between(self, pairs):
966 966 r = []
967 967
968 968 for top, bottom in pairs:
969 969 n, l, i = top, [], 0
970 970 f = 1
971 971
972 972 while n != bottom:
973 973 p = self.changelog.parents(n)[0]
974 974 if i == f:
975 975 l.append(n)
976 976 f = f * 2
977 977 n = p
978 978 i += 1
979 979
980 980 r.append(l)
981 981
982 982 return r
983 983
984 984 def findincoming(self, remote, base=None, heads=None, force=False):
985 985 """Return list of roots of the subsets of missing nodes from remote
986 986
987 987 If base dict is specified, assume that these nodes and their parents
988 988 exist on the remote side and that no child of a node of base exists
989 989 in both remote and self.
990 990 Furthermore base will be updated to include the nodes that exists
991 991 in self and remote but no children exists in self and remote.
992 992 If a list of heads is specified, return only nodes which are heads
993 993 or ancestors of these heads.
994 994
995 995 All the ancestors of base are in self and in remote.
996 996 All the descendants of the list returned are missing in self.
997 997 (and so we know that the rest of the nodes are missing in remote, see
998 998 outgoing)
999 999 """
1000 1000 m = self.changelog.nodemap
1001 1001 search = []
1002 1002 fetch = {}
1003 1003 seen = {}
1004 1004 seenbranch = {}
1005 1005 if base == None:
1006 1006 base = {}
1007 1007
1008 1008 if not heads:
1009 1009 heads = remote.heads()
1010 1010
1011 1011 if self.changelog.tip() == nullid:
1012 1012 base[nullid] = 1
1013 1013 if heads != [nullid]:
1014 1014 return [nullid]
1015 1015 return []
1016 1016
1017 1017 # assume we're closer to the tip than the root
1018 1018 # and start by examining the heads
1019 1019 self.ui.status(_("searching for changes\n"))
1020 1020
1021 1021 unknown = []
1022 1022 for h in heads:
1023 1023 if h not in m:
1024 1024 unknown.append(h)
1025 1025 else:
1026 1026 base[h] = 1
1027 1027
1028 1028 if not unknown:
1029 1029 return []
1030 1030
1031 1031 req = dict.fromkeys(unknown)
1032 1032 reqcnt = 0
1033 1033
1034 1034 # search through remote branches
1035 1035 # a 'branch' here is a linear segment of history, with four parts:
1036 1036 # head, root, first parent, second parent
1037 1037 # (a branch always has two parents (or none) by definition)
1038 1038 unknown = remote.branches(unknown)
1039 1039 while unknown:
1040 1040 r = []
1041 1041 while unknown:
1042 1042 n = unknown.pop(0)
1043 1043 if n[0] in seen:
1044 1044 continue
1045 1045
1046 1046 self.ui.debug(_("examining %s:%s\n")
1047 1047 % (short(n[0]), short(n[1])))
1048 1048 if n[0] == nullid: # found the end of the branch
1049 1049 pass
1050 1050 elif n in seenbranch:
1051 1051 self.ui.debug(_("branch already found\n"))
1052 1052 continue
1053 1053 elif n[1] and n[1] in m: # do we know the base?
1054 1054 self.ui.debug(_("found incomplete branch %s:%s\n")
1055 1055 % (short(n[0]), short(n[1])))
1056 1056 search.append(n) # schedule branch range for scanning
1057 1057 seenbranch[n] = 1
1058 1058 else:
1059 1059 if n[1] not in seen and n[1] not in fetch:
1060 1060 if n[2] in m and n[3] in m:
1061 1061 self.ui.debug(_("found new changeset %s\n") %
1062 1062 short(n[1]))
1063 1063 fetch[n[1]] = 1 # earliest unknown
1064 1064 for p in n[2:4]:
1065 1065 if p in m:
1066 1066 base[p] = 1 # latest known
1067 1067
1068 1068 for p in n[2:4]:
1069 1069 if p not in req and p not in m:
1070 1070 r.append(p)
1071 1071 req[p] = 1
1072 1072 seen[n[0]] = 1
1073 1073
1074 1074 if r:
1075 1075 reqcnt += 1
1076 1076 self.ui.debug(_("request %d: %s\n") %
1077 1077 (reqcnt, " ".join(map(short, r))))
1078 1078 for p in range(0, len(r), 10):
1079 1079 for b in remote.branches(r[p:p+10]):
1080 1080 self.ui.debug(_("received %s:%s\n") %
1081 1081 (short(b[0]), short(b[1])))
1082 1082 unknown.append(b)
1083 1083
1084 1084 # do binary search on the branches we found
1085 1085 while search:
1086 1086 n = search.pop(0)
1087 1087 reqcnt += 1
1088 1088 l = remote.between([(n[0], n[1])])[0]
1089 1089 l.append(n[1])
1090 1090 p = n[0]
1091 1091 f = 1
1092 1092 for i in l:
1093 1093 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1094 1094 if i in m:
1095 1095 if f <= 2:
1096 1096 self.ui.debug(_("found new branch changeset %s\n") %
1097 1097 short(p))
1098 1098 fetch[p] = 1
1099 1099 base[i] = 1
1100 1100 else:
1101 1101 self.ui.debug(_("narrowed branch search to %s:%s\n")
1102 1102 % (short(p), short(i)))
1103 1103 search.append((p, i))
1104 1104 break
1105 1105 p, f = i, f * 2
1106 1106
1107 1107 # sanity check our fetch list
1108 1108 for f in fetch.keys():
1109 1109 if f in m:
1110 1110 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1111 1111
1112 1112 if base.keys() == [nullid]:
1113 1113 if force:
1114 1114 self.ui.warn(_("warning: repository is unrelated\n"))
1115 1115 else:
1116 1116 raise util.Abort(_("repository is unrelated"))
1117 1117
1118 1118 self.ui.note(_("found new changesets starting at ") +
1119 1119 " ".join([short(f) for f in fetch]) + "\n")
1120 1120
1121 1121 self.ui.debug(_("%d total queries\n") % reqcnt)
1122 1122
1123 1123 return fetch.keys()
1124 1124
1125 1125 def findoutgoing(self, remote, base=None, heads=None, force=False):
1126 1126 """Return list of nodes that are roots of subsets not in remote
1127 1127
1128 1128 If base dict is specified, assume that these nodes and their parents
1129 1129 exist on the remote side.
1130 1130 If a list of heads is specified, return only nodes which are heads
1131 1131 or ancestors of these heads, and return a second element which
1132 1132 contains all remote heads which get new children.
1133 1133 """
1134 1134 if base == None:
1135 1135 base = {}
1136 1136 self.findincoming(remote, base, heads, force=force)
1137 1137
1138 1138 self.ui.debug(_("common changesets up to ")
1139 1139 + " ".join(map(short, base.keys())) + "\n")
1140 1140
1141 1141 remain = dict.fromkeys(self.changelog.nodemap)
1142 1142
1143 1143 # prune everything remote has from the tree
1144 1144 del remain[nullid]
1145 1145 remove = base.keys()
1146 1146 while remove:
1147 1147 n = remove.pop(0)
1148 1148 if n in remain:
1149 1149 del remain[n]
1150 1150 for p in self.changelog.parents(n):
1151 1151 remove.append(p)
1152 1152
1153 1153 # find every node whose parents have been pruned
1154 1154 subset = []
1155 1155 # find every remote head that will get new children
1156 1156 updated_heads = {}
1157 1157 for n in remain:
1158 1158 p1, p2 = self.changelog.parents(n)
1159 1159 if p1 not in remain and p2 not in remain:
1160 1160 subset.append(n)
1161 1161 if heads:
1162 1162 if p1 in heads:
1163 1163 updated_heads[p1] = True
1164 1164 if p2 in heads:
1165 1165 updated_heads[p2] = True
1166 1166
1167 1167 # this is the set of all roots we have to push
1168 1168 if heads:
1169 1169 return subset, updated_heads.keys()
1170 1170 else:
1171 1171 return subset
1172 1172
1173 1173 def pull(self, remote, heads=None, force=False, lock=None):
1174 1174 mylock = False
1175 1175 if not lock:
1176 1176 lock = self.lock()
1177 1177 mylock = True
1178 1178
1179 1179 try:
1180 1180 fetch = self.findincoming(remote, force=force)
1181 1181 if fetch == [nullid]:
1182 1182 self.ui.status(_("requesting all changes\n"))
1183 1183
1184 1184 if not fetch:
1185 1185 self.ui.status(_("no changes found\n"))
1186 1186 return 0
1187 1187
1188 1188 if heads is None:
1189 1189 cg = remote.changegroup(fetch, 'pull')
1190 1190 else:
1191 1191 cg = remote.changegroupsubset(fetch, heads, 'pull')
1192 1192 return self.addchangegroup(cg, 'pull', remote.url())
1193 1193 finally:
1194 1194 if mylock:
1195 1195 lock.release()
1196 1196
1197 1197 def push(self, remote, force=False, revs=None):
1198 1198 # there are two ways to push to remote repo:
1199 1199 #
1200 1200 # addchangegroup assumes local user can lock remote
1201 1201 # repo (local filesystem, old ssh servers).
1202 1202 #
1203 1203 # unbundle assumes local user cannot lock remote repo (new ssh
1204 1204 # servers, http servers).
1205 1205
1206 1206 if remote.capable('unbundle'):
1207 1207 return self.push_unbundle(remote, force, revs)
1208 1208 return self.push_addchangegroup(remote, force, revs)
1209 1209
1210 1210 def prepush(self, remote, force, revs):
1211 1211 base = {}
1212 1212 remote_heads = remote.heads()
1213 1213 inc = self.findincoming(remote, base, remote_heads, force=force)
1214 1214 if not force and inc:
1215 1215 self.ui.warn(_("abort: unsynced remote changes!\n"))
1216 1216 self.ui.status(_("(did you forget to sync?"
1217 1217 " use push -f to force)\n"))
1218 1218 return None, 1
1219 1219
1220 1220 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1221 1221 if revs is not None:
1222 1222 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1223 1223 else:
1224 1224 bases, heads = update, self.changelog.heads()
1225 1225
1226 1226 if not bases:
1227 1227 self.ui.status(_("no changes found\n"))
1228 1228 return None, 1
1229 1229 elif not force:
1230 1230 # FIXME we don't properly detect creation of new heads
1231 1231 # in the push -r case, assume the user knows what he's doing
1232 1232 if not revs and len(remote_heads) < len(heads) \
1233 1233 and remote_heads != [nullid]:
1234 1234 self.ui.warn(_("abort: push creates new remote branches!\n"))
1235 1235 self.ui.status(_("(did you forget to merge?"
1236 1236 " use push -f to force)\n"))
1237 1237 return None, 1
1238 1238
1239 1239 if revs is None:
1240 1240 cg = self.changegroup(update, 'push')
1241 1241 else:
1242 1242 cg = self.changegroupsubset(update, revs, 'push')
1243 1243 return cg, remote_heads
1244 1244
1245 1245 def push_addchangegroup(self, remote, force, revs):
1246 1246 lock = remote.lock()
1247 1247
1248 1248 ret = self.prepush(remote, force, revs)
1249 1249 if ret[0] is not None:
1250 1250 cg, remote_heads = ret
1251 1251 return remote.addchangegroup(cg, 'push', self.url())
1252 1252 return ret[1]
1253 1253
1254 1254 def push_unbundle(self, remote, force, revs):
1255 1255 # local repo finds heads on server, finds out what revs it
1256 1256 # must push. once revs transferred, if server finds it has
1257 1257 # different heads (someone else won commit/push race), server
1258 1258 # aborts.
1259 1259
1260 1260 ret = self.prepush(remote, force, revs)
1261 1261 if ret[0] is not None:
1262 1262 cg, remote_heads = ret
1263 1263 if force: remote_heads = ['force']
1264 1264 return remote.unbundle(cg, remote_heads, 'push')
1265 1265 return ret[1]
1266 1266
1267 1267 def changegroupsubset(self, bases, heads, source):
1268 1268 """This function generates a changegroup consisting of all the nodes
1269 1269 that are descendents of any of the bases, and ancestors of any of
1270 1270 the heads.
1271 1271
1272 1272 It is fairly complex as determining which filenodes and which
1273 1273 manifest nodes need to be included for the changeset to be complete
1274 1274 is non-trivial.
1275 1275
1276 1276 Another wrinkle is doing the reverse, figuring out which changeset in
1277 1277 the changegroup a particular filenode or manifestnode belongs to."""
1278 1278
1279 1279 self.hook('preoutgoing', throw=True, source=source)
1280 1280
1281 1281 # Set up some initial variables
1282 1282 # Make it easy to refer to self.changelog
1283 1283 cl = self.changelog
1284 1284 # msng is short for missing - compute the list of changesets in this
1285 1285 # changegroup.
1286 1286 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1287 1287 # Some bases may turn out to be superfluous, and some heads may be
1288 1288 # too. nodesbetween will return the minimal set of bases and heads
1289 1289 # necessary to re-create the changegroup.
1290 1290
1291 1291 # Known heads are the list of heads that it is assumed the recipient
1292 1292 # of this changegroup will know about.
1293 1293 knownheads = {}
1294 1294 # We assume that all parents of bases are known heads.
1295 1295 for n in bases:
1296 1296 for p in cl.parents(n):
1297 1297 if p != nullid:
1298 1298 knownheads[p] = 1
1299 1299 knownheads = knownheads.keys()
1300 1300 if knownheads:
1301 1301 # Now that we know what heads are known, we can compute which
1302 1302 # changesets are known. The recipient must know about all
1303 1303 # changesets required to reach the known heads from the null
1304 1304 # changeset.
1305 1305 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1306 1306 junk = None
1307 1307 # Transform the list into an ersatz set.
1308 1308 has_cl_set = dict.fromkeys(has_cl_set)
1309 1309 else:
1310 1310 # If there were no known heads, the recipient cannot be assumed to
1311 1311 # know about any changesets.
1312 1312 has_cl_set = {}
1313 1313
1314 1314 # Make it easy to refer to self.manifest
1315 1315 mnfst = self.manifest
1316 1316 # We don't know which manifests are missing yet
1317 1317 msng_mnfst_set = {}
1318 1318 # Nor do we know which filenodes are missing.
1319 1319 msng_filenode_set = {}
1320 1320
1321 1321 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1322 1322 junk = None
1323 1323
1324 1324 # A changeset always belongs to itself, so the changenode lookup
1325 1325 # function for a changenode is identity.
1326 1326 def identity(x):
1327 1327 return x
1328 1328
1329 1329 # A function generating function. Sets up an environment for the
1330 1330 # inner function.
1331 1331 def cmp_by_rev_func(revlog):
1332 1332 # Compare two nodes by their revision number in the environment's
1333 1333 # revision history. Since the revision number both represents the
1334 1334 # most efficient order to read the nodes in, and represents a
1335 1335 # topological sorting of the nodes, this function is often useful.
1336 1336 def cmp_by_rev(a, b):
1337 1337 return cmp(revlog.rev(a), revlog.rev(b))
1338 1338 return cmp_by_rev
1339 1339
1340 1340 # If we determine that a particular file or manifest node must be a
1341 1341 # node that the recipient of the changegroup will already have, we can
1342 1342 # also assume the recipient will have all the parents. This function
1343 1343 # prunes them from the set of missing nodes.
1344 1344 def prune_parents(revlog, hasset, msngset):
1345 1345 haslst = hasset.keys()
1346 1346 haslst.sort(cmp_by_rev_func(revlog))
1347 1347 for node in haslst:
1348 1348 parentlst = [p for p in revlog.parents(node) if p != nullid]
1349 1349 while parentlst:
1350 1350 n = parentlst.pop()
1351 1351 if n not in hasset:
1352 1352 hasset[n] = 1
1353 1353 p = [p for p in revlog.parents(n) if p != nullid]
1354 1354 parentlst.extend(p)
1355 1355 for n in hasset:
1356 1356 msngset.pop(n, None)
1357 1357
1358 1358 # This is a function generating function used to set up an environment
1359 1359 # for the inner function to execute in.
1360 1360 def manifest_and_file_collector(changedfileset):
1361 1361 # This is an information gathering function that gathers
1362 1362 # information from each changeset node that goes out as part of
1363 1363 # the changegroup. The information gathered is a list of which
1364 1364 # manifest nodes are potentially required (the recipient may
1365 1365 # already have them) and total list of all files which were
1366 1366 # changed in any changeset in the changegroup.
1367 1367 #
1368 1368 # We also remember the first changenode we saw any manifest
1369 1369 # referenced by so we can later determine which changenode 'owns'
1370 1370 # the manifest.
1371 1371 def collect_manifests_and_files(clnode):
1372 1372 c = cl.read(clnode)
1373 1373 for f in c[3]:
1374 1374 # This is to make sure we only have one instance of each
1375 1375 # filename string for each filename.
1376 1376 changedfileset.setdefault(f, f)
1377 1377 msng_mnfst_set.setdefault(c[0], clnode)
1378 1378 return collect_manifests_and_files
1379 1379
1380 1380 # Figure out which manifest nodes (of the ones we think might be part
1381 1381 # of the changegroup) the recipient must know about and remove them
1382 1382 # from the changegroup.
1383 1383 def prune_manifests():
1384 1384 has_mnfst_set = {}
1385 1385 for n in msng_mnfst_set:
1386 1386 # If a 'missing' manifest thinks it belongs to a changenode
1387 1387 # the recipient is assumed to have, obviously the recipient
1388 1388 # must have that manifest.
1389 1389 linknode = cl.node(mnfst.linkrev(n))
1390 1390 if linknode in has_cl_set:
1391 1391 has_mnfst_set[n] = 1
1392 1392 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1393 1393
1394 1394 # Use the information collected in collect_manifests_and_files to say
1395 1395 # which changenode any manifestnode belongs to.
1396 1396 def lookup_manifest_link(mnfstnode):
1397 1397 return msng_mnfst_set[mnfstnode]
1398 1398
1399 1399 # A function generating function that sets up the initial environment
1400 1400 # the inner function.
1401 1401 def filenode_collector(changedfiles):
1402 1402 next_rev = [0]
1403 1403 # This gathers information from each manifestnode included in the
1404 1404 # changegroup about which filenodes the manifest node references
1405 1405 # so we can include those in the changegroup too.
1406 1406 #
1407 1407 # It also remembers which changenode each filenode belongs to. It
1408 1408 # does this by assuming the a filenode belongs to the changenode
1409 1409 # the first manifest that references it belongs to.
1410 1410 def collect_msng_filenodes(mnfstnode):
1411 1411 r = mnfst.rev(mnfstnode)
1412 1412 if r == next_rev[0]:
1413 1413 # If the last rev we looked at was the one just previous,
1414 1414 # we only need to see a diff.
1415 1415 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1416 1416 # For each line in the delta
1417 1417 for dline in delta.splitlines():
1418 1418 # get the filename and filenode for that line
1419 1419 f, fnode = dline.split('\0')
1420 1420 fnode = bin(fnode[:40])
1421 1421 f = changedfiles.get(f, None)
1422 1422 # And if the file is in the list of files we care
1423 1423 # about.
1424 1424 if f is not None:
1425 1425 # Get the changenode this manifest belongs to
1426 1426 clnode = msng_mnfst_set[mnfstnode]
1427 1427 # Create the set of filenodes for the file if
1428 1428 # there isn't one already.
1429 1429 ndset = msng_filenode_set.setdefault(f, {})
1430 1430 # And set the filenode's changelog node to the
1431 1431 # manifest's if it hasn't been set already.
1432 1432 ndset.setdefault(fnode, clnode)
1433 1433 else:
1434 1434 # Otherwise we need a full manifest.
1435 1435 m = mnfst.read(mnfstnode)
1436 1436 # For every file in we care about.
1437 1437 for f in changedfiles:
1438 1438 fnode = m.get(f, None)
1439 1439 # If it's in the manifest
1440 1440 if fnode is not None:
1441 1441 # See comments above.
1442 1442 clnode = msng_mnfst_set[mnfstnode]
1443 1443 ndset = msng_filenode_set.setdefault(f, {})
1444 1444 ndset.setdefault(fnode, clnode)
1445 1445 # Remember the revision we hope to see next.
1446 1446 next_rev[0] = r + 1
1447 1447 return collect_msng_filenodes
1448 1448
1449 1449 # We have a list of filenodes we think we need for a file, lets remove
1450 1450 # all those we now the recipient must have.
1451 1451 def prune_filenodes(f, filerevlog):
1452 1452 msngset = msng_filenode_set[f]
1453 1453 hasset = {}
1454 1454 # If a 'missing' filenode thinks it belongs to a changenode we
1455 1455 # assume the recipient must have, then the recipient must have
1456 1456 # that filenode.
1457 1457 for n in msngset:
1458 1458 clnode = cl.node(filerevlog.linkrev(n))
1459 1459 if clnode in has_cl_set:
1460 1460 hasset[n] = 1
1461 1461 prune_parents(filerevlog, hasset, msngset)
1462 1462
1463 1463 # A function generator function that sets up the a context for the
1464 1464 # inner function.
1465 1465 def lookup_filenode_link_func(fname):
1466 1466 msngset = msng_filenode_set[fname]
1467 1467 # Lookup the changenode the filenode belongs to.
1468 1468 def lookup_filenode_link(fnode):
1469 1469 return msngset[fnode]
1470 1470 return lookup_filenode_link
1471 1471
1472 1472 # Now that we have all theses utility functions to help out and
1473 1473 # logically divide up the task, generate the group.
1474 1474 def gengroup():
1475 1475 # The set of changed files starts empty.
1476 1476 changedfiles = {}
1477 1477 # Create a changenode group generator that will call our functions
1478 1478 # back to lookup the owning changenode and collect information.
1479 1479 group = cl.group(msng_cl_lst, identity,
1480 1480 manifest_and_file_collector(changedfiles))
1481 1481 for chnk in group:
1482 1482 yield chnk
1483 1483
1484 1484 # The list of manifests has been collected by the generator
1485 1485 # calling our functions back.
1486 1486 prune_manifests()
1487 1487 msng_mnfst_lst = msng_mnfst_set.keys()
1488 1488 # Sort the manifestnodes by revision number.
1489 1489 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1490 1490 # Create a generator for the manifestnodes that calls our lookup
1491 1491 # and data collection functions back.
1492 1492 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1493 1493 filenode_collector(changedfiles))
1494 1494 for chnk in group:
1495 1495 yield chnk
1496 1496
1497 1497 # These are no longer needed, dereference and toss the memory for
1498 1498 # them.
1499 1499 msng_mnfst_lst = None
1500 1500 msng_mnfst_set.clear()
1501 1501
1502 1502 changedfiles = changedfiles.keys()
1503 1503 changedfiles.sort()
1504 1504 # Go through all our files in order sorted by name.
1505 1505 for fname in changedfiles:
1506 1506 filerevlog = self.file(fname)
1507 1507 # Toss out the filenodes that the recipient isn't really
1508 1508 # missing.
1509 1509 if msng_filenode_set.has_key(fname):
1510 1510 prune_filenodes(fname, filerevlog)
1511 1511 msng_filenode_lst = msng_filenode_set[fname].keys()
1512 1512 else:
1513 1513 msng_filenode_lst = []
1514 1514 # If any filenodes are left, generate the group for them,
1515 1515 # otherwise don't bother.
1516 1516 if len(msng_filenode_lst) > 0:
1517 1517 yield changegroup.genchunk(fname)
1518 1518 # Sort the filenodes by their revision #
1519 1519 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1520 1520 # Create a group generator and only pass in a changenode
1521 1521 # lookup function as we need to collect no information
1522 1522 # from filenodes.
1523 1523 group = filerevlog.group(msng_filenode_lst,
1524 1524 lookup_filenode_link_func(fname))
1525 1525 for chnk in group:
1526 1526 yield chnk
1527 1527 if msng_filenode_set.has_key(fname):
1528 1528 # Don't need this anymore, toss it to free memory.
1529 1529 del msng_filenode_set[fname]
1530 1530 # Signal that no more groups are left.
1531 1531 yield changegroup.closechunk()
1532 1532
1533 1533 if msng_cl_lst:
1534 1534 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1535 1535
1536 1536 return util.chunkbuffer(gengroup())
1537 1537
1538 1538 def changegroup(self, basenodes, source):
1539 1539 """Generate a changegroup of all nodes that we have that a recipient
1540 1540 doesn't.
1541 1541
1542 1542 This is much easier than the previous function as we can assume that
1543 1543 the recipient has any changenode we aren't sending them."""
1544 1544
1545 1545 self.hook('preoutgoing', throw=True, source=source)
1546 1546
1547 1547 cl = self.changelog
1548 1548 nodes = cl.nodesbetween(basenodes, None)[0]
1549 1549 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1550 1550
1551 1551 def identity(x):
1552 1552 return x
1553 1553
1554 1554 def gennodelst(revlog):
1555 1555 for r in xrange(0, revlog.count()):
1556 1556 n = revlog.node(r)
1557 1557 if revlog.linkrev(n) in revset:
1558 1558 yield n
1559 1559
1560 1560 def changed_file_collector(changedfileset):
1561 1561 def collect_changed_files(clnode):
1562 1562 c = cl.read(clnode)
1563 1563 for fname in c[3]:
1564 1564 changedfileset[fname] = 1
1565 1565 return collect_changed_files
1566 1566
1567 1567 def lookuprevlink_func(revlog):
1568 1568 def lookuprevlink(n):
1569 1569 return cl.node(revlog.linkrev(n))
1570 1570 return lookuprevlink
1571 1571
1572 1572 def gengroup():
1573 1573 # construct a list of all changed files
1574 1574 changedfiles = {}
1575 1575
1576 1576 for chnk in cl.group(nodes, identity,
1577 1577 changed_file_collector(changedfiles)):
1578 1578 yield chnk
1579 1579 changedfiles = changedfiles.keys()
1580 1580 changedfiles.sort()
1581 1581
1582 1582 mnfst = self.manifest
1583 1583 nodeiter = gennodelst(mnfst)
1584 1584 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1585 1585 yield chnk
1586 1586
1587 1587 for fname in changedfiles:
1588 1588 filerevlog = self.file(fname)
1589 1589 nodeiter = gennodelst(filerevlog)
1590 1590 nodeiter = list(nodeiter)
1591 1591 if nodeiter:
1592 1592 yield changegroup.genchunk(fname)
1593 1593 lookup = lookuprevlink_func(filerevlog)
1594 1594 for chnk in filerevlog.group(nodeiter, lookup):
1595 1595 yield chnk
1596 1596
1597 1597 yield changegroup.closechunk()
1598 1598
1599 1599 if nodes:
1600 1600 self.hook('outgoing', node=hex(nodes[0]), source=source)
1601 1601
1602 1602 return util.chunkbuffer(gengroup())
1603 1603
1604 1604 def addchangegroup(self, source, srctype, url):
1605 1605 """add changegroup to repo.
1606 1606 returns number of heads modified or added + 1."""
1607 1607
1608 1608 def csmap(x):
1609 1609 self.ui.debug(_("add changeset %s\n") % short(x))
1610 1610 return cl.count()
1611 1611
1612 1612 def revmap(x):
1613 1613 return cl.rev(x)
1614 1614
1615 1615 if not source:
1616 1616 return 0
1617 1617
1618 1618 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1619 1619
1620 1620 changesets = files = revisions = 0
1621 1621
1622 1622 tr = self.transaction()
1623 1623
1624 1624 # write changelog data to temp files so concurrent readers will not see
1625 1625 # inconsistent view
1626 1626 cl = None
1627 1627 try:
1628 1628 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1629 1629
1630 1630 oldheads = len(cl.heads())
1631 1631
1632 1632 # pull off the changeset group
1633 1633 self.ui.status(_("adding changesets\n"))
1634 1634 cor = cl.count() - 1
1635 1635 chunkiter = changegroup.chunkiter(source)
1636 1636 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1637 1637 raise util.Abort(_("received changelog group is empty"))
1638 1638 cnr = cl.count() - 1
1639 1639 changesets = cnr - cor
1640 1640
1641 1641 # pull off the manifest group
1642 1642 self.ui.status(_("adding manifests\n"))
1643 1643 chunkiter = changegroup.chunkiter(source)
1644 1644 # no need to check for empty manifest group here:
1645 1645 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1646 1646 # no new manifest will be created and the manifest group will
1647 1647 # be empty during the pull
1648 1648 self.manifest.addgroup(chunkiter, revmap, tr)
1649 1649
1650 1650 # process the files
1651 1651 self.ui.status(_("adding file changes\n"))
1652 1652 while 1:
1653 1653 f = changegroup.getchunk(source)
1654 1654 if not f:
1655 1655 break
1656 1656 self.ui.debug(_("adding %s revisions\n") % f)
1657 1657 fl = self.file(f)
1658 1658 o = fl.count()
1659 1659 chunkiter = changegroup.chunkiter(source)
1660 1660 if fl.addgroup(chunkiter, revmap, tr) is None:
1661 1661 raise util.Abort(_("received file revlog group is empty"))
1662 1662 revisions += fl.count() - o
1663 1663 files += 1
1664 1664
1665 1665 cl.writedata()
1666 1666 finally:
1667 1667 if cl:
1668 1668 cl.cleanup()
1669 1669
1670 1670 # make changelog see real files again
1671 1671 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1672 1672 self.changelog.checkinlinesize(tr)
1673 1673
1674 1674 newheads = len(self.changelog.heads())
1675 1675 heads = ""
1676 1676 if oldheads and newheads != oldheads:
1677 1677 heads = _(" (%+d heads)") % (newheads - oldheads)
1678 1678
1679 1679 self.ui.status(_("added %d changesets"
1680 1680 " with %d changes to %d files%s\n")
1681 1681 % (changesets, revisions, files, heads))
1682 1682
1683 1683 if changesets > 0:
1684 1684 self.hook('pretxnchangegroup', throw=True,
1685 1685 node=hex(self.changelog.node(cor+1)), source=srctype,
1686 1686 url=url)
1687 1687
1688 1688 tr.close()
1689 1689
1690 1690 if changesets > 0:
1691 1691 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1692 1692 source=srctype, url=url)
1693 1693
1694 1694 for i in range(cor + 1, cnr + 1):
1695 1695 self.hook("incoming", node=hex(self.changelog.node(i)),
1696 1696 source=srctype, url=url)
1697 1697
1698 1698 return newheads - oldheads + 1
1699 1699
1700 1700
1701 1701 def stream_in(self, remote):
1702 1702 fp = remote.stream_out()
1703 1703 resp = int(fp.readline())
1704 1704 if resp != 0:
1705 1705 raise util.Abort(_('operation forbidden by server'))
1706 1706 self.ui.status(_('streaming all changes\n'))
1707 1707 total_files, total_bytes = map(int, fp.readline().split(' ', 1))
1708 1708 self.ui.status(_('%d files to transfer, %s of data\n') %
1709 1709 (total_files, util.bytecount(total_bytes)))
1710 1710 start = time.time()
1711 1711 for i in xrange(total_files):
1712 1712 name, size = fp.readline().split('\0', 1)
1713 1713 size = int(size)
1714 1714 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1715 1715 ofp = self.opener(name, 'w')
1716 1716 for chunk in util.filechunkiter(fp, limit=size):
1717 1717 ofp.write(chunk)
1718 1718 ofp.close()
1719 1719 elapsed = time.time() - start
1720 1720 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1721 1721 (util.bytecount(total_bytes), elapsed,
1722 1722 util.bytecount(total_bytes / elapsed)))
1723 1723 self.reload()
1724 1724 return len(self.heads()) + 1
1725 1725
1726 1726 def clone(self, remote, heads=[], stream=False):
1727 1727 '''clone remote repository.
1728 1728
1729 1729 keyword arguments:
1730 1730 heads: list of revs to clone (forces use of pull)
1731 1731 stream: use streaming clone if possible'''
1732 1732
1733 1733 # now, all clients that can request uncompressed clones can
1734 1734 # read repo formats supported by all servers that can serve
1735 1735 # them.
1736 1736
1737 1737 # if revlog format changes, client will have to check version
1738 1738 # and format flags on "stream" capability, and use
1739 1739 # uncompressed only if compatible.
1740 1740
1741 1741 if stream and not heads and remote.capable('stream'):
1742 1742 return self.stream_in(remote)
1743 1743 return self.pull(remote, heads)
1744 1744
1745 1745 # used to avoid circular references so destructors work
1746 1746 def aftertrans(base):
1747 1747 p = base
1748 1748 def a():
1749 1749 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1750 1750 util.rename(os.path.join(p, "journal.dirstate"),
1751 1751 os.path.join(p, "undo.dirstate"))
1752 1752 return a
1753 1753
1754 1754 def instance(ui, path, create):
1755 1755 return localrepository(ui, util.drop_scheme('file', path), create)
1756 1756
1757 1757 def islocal(path):
1758 1758 return True
@@ -1,202 +1,199 b''
1 1 # manifest.py - manifest revision class for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from revlog import *
9 9 from i18n import gettext as _
10 10 from demandload import *
11 11 demandload(globals(), "array bisect struct")
12 12
13 13 class manifestdict(dict):
14 14 def __init__(self, mapping={}, flags={}):
15 15 dict.__init__(self, mapping)
16 16 self._flags = flags
17 17 def flags(self, f):
18 18 return self._flags.get(f, "")
19 19 def execf(self, f):
20 20 "test for executable in manifest flags"
21 21 return "x" in self.flags(f)
22 22 def linkf(self, f):
23 23 "test for symlink in manifest flags"
24 24 return "l" in self.flags(f)
25 25 def rawset(self, f, entry):
26 26 self[f] = bin(entry[:40])
27 27 fl = entry[40:-1]
28 28 if fl: self._flags[f] = fl
29 29 def set(self, f, execf=False, linkf=False):
30 30 if execf: self._flags[f] = "x"
31 31 if linkf: self._flags[f] = "x"
32 32 def copy(self):
33 33 return manifestdict(dict.copy(self), dict.copy(self._flags))
34 34
35 35 class manifest(revlog):
36 36 def __init__(self, opener, defversion=REVLOGV0):
37 37 self.mapcache = None
38 38 self.listcache = None
39 39 revlog.__init__(self, opener, "00manifest.i", "00manifest.d",
40 40 defversion)
41 41
42 42 def read(self, node):
43 43 if node == nullid: return manifestdict() # don't upset local cache
44 44 if self.mapcache and self.mapcache[0] == node:
45 45 return self.mapcache[1]
46 46 text = self.revision(node)
47 47 self.listcache = array.array('c', text)
48 48 lines = text.splitlines(1)
49 49 mapping = manifestdict()
50 50 for l in lines:
51 51 (f, n) = l.split('\0')
52 52 mapping.rawset(f, n)
53 53 self.mapcache = (node, mapping)
54 54 return mapping
55 55
56 def readflags(self, node):
57 return self.read(node)
58
59 56 def diff(self, a, b):
60 57 return mdiff.textdiff(str(a), str(b))
61 58
62 59 def _search(self, m, s, lo=0, hi=None):
63 60 '''return a tuple (start, end) that says where to find s within m.
64 61
65 62 If the string is found m[start:end] are the line containing
66 63 that string. If start == end the string was not found and
67 64 they indicate the proper sorted insertion point. This was
68 65 taken from bisect_left, and modified to find line start/end as
69 66 it goes along.
70 67
71 68 m should be a buffer or a string
72 69 s is a string'''
73 70 def advance(i, c):
74 71 while i < lenm and m[i] != c:
75 72 i += 1
76 73 return i
77 74 lenm = len(m)
78 75 if not hi:
79 76 hi = lenm
80 77 while lo < hi:
81 78 mid = (lo + hi) // 2
82 79 start = mid
83 80 while start > 0 and m[start-1] != '\n':
84 81 start -= 1
85 82 end = advance(start, '\0')
86 83 if m[start:end] < s:
87 84 # we know that after the null there are 40 bytes of sha1
88 85 # this translates to the bisect lo = mid + 1
89 86 lo = advance(end + 40, '\n') + 1
90 87 else:
91 88 # this translates to the bisect hi = mid
92 89 hi = start
93 90 end = advance(lo, '\0')
94 91 found = m[lo:end]
95 92 if cmp(s, found) == 0:
96 93 # we know that after the null there are 40 bytes of sha1
97 94 end = advance(end + 40, '\n')
98 95 return (lo, end+1)
99 96 else:
100 97 return (lo, lo)
101 98
102 99 def find(self, node, f):
103 100 '''look up entry for a single file efficiently.
104 101 return (node, flag) pair if found, (None, None) if not.'''
105 102 if self.mapcache and node == self.mapcache[0]:
106 103 return self.mapcache[1].get(f), self.mapcache[1].flags(f)
107 104 text = self.revision(node)
108 105 start, end = self._search(text, f)
109 106 if start == end:
110 107 return None, None
111 108 l = text[start:end]
112 109 f, n = l.split('\0')
113 110 return bin(n[:40]), n[40:-1] == 'x'
114 111
115 def add(self, map, flags, transaction, link, p1=None, p2=None,
112 def add(self, map, transaction, link, p1=None, p2=None,
116 113 changed=None):
117 114 # apply the changes collected during the bisect loop to our addlist
118 115 # return a delta suitable for addrevision
119 116 def addlistdelta(addlist, x):
120 117 # start from the bottom up
121 118 # so changes to the offsets don't mess things up.
122 119 i = len(x)
123 120 while i > 0:
124 121 i -= 1
125 122 start = x[i][0]
126 123 end = x[i][1]
127 124 if x[i][2]:
128 125 addlist[start:end] = array.array('c', x[i][2])
129 126 else:
130 127 del addlist[start:end]
131 128 return "".join([struct.pack(">lll", d[0], d[1], len(d[2])) + d[2] \
132 129 for d in x ])
133 130
134 131 # if we're using the listcache, make sure it is valid and
135 132 # parented by the same node we're diffing against
136 133 if not changed or not self.listcache or not p1 or \
137 134 self.mapcache[0] != p1:
138 135 files = map.keys()
139 136 files.sort()
140 137
141 138 # if this is changed to support newlines in filenames,
142 139 # be sure to check the templates/ dir again (especially *-raw.tmpl)
143 text = ["%s\000%s%s\n" % (f, hex(map[f]), flags.flags(f)) for f in files]
140 text = ["%s\000%s%s\n" % (f, hex(map[f]), map.flags(f)) for f in files]
144 141 self.listcache = array.array('c', "".join(text))
145 142 cachedelta = None
146 143 else:
147 144 addlist = self.listcache
148 145
149 146 # combine the changed lists into one list for sorting
150 147 work = [[x, 0] for x in changed[0]]
151 148 work[len(work):] = [[x, 1] for x in changed[1]]
152 149 work.sort()
153 150
154 151 delta = []
155 152 dstart = None
156 153 dend = None
157 154 dline = [""]
158 155 start = 0
159 156 # zero copy representation of addlist as a buffer
160 157 addbuf = buffer(addlist)
161 158
162 159 # start with a readonly loop that finds the offset of
163 160 # each line and creates the deltas
164 161 for w in work:
165 162 f = w[0]
166 163 # bs will either be the index of the item or the insert point
167 164 start, end = self._search(addbuf, f, start)
168 165 if w[1] == 0:
169 l = "%s\000%s%s\n" % (f, hex(map[f]), flags.flags(f))
166 l = "%s\000%s%s\n" % (f, hex(map[f]), map.flags(f))
170 167 else:
171 168 l = ""
172 169 if start == end and w[1] == 1:
173 170 # item we want to delete was not found, error out
174 171 raise AssertionError(
175 172 _("failed to remove %s from manifest\n") % f)
176 173 if dstart != None and dstart <= start and dend >= start:
177 174 if dend < end:
178 175 dend = end
179 176 if l:
180 177 dline.append(l)
181 178 else:
182 179 if dstart != None:
183 180 delta.append([dstart, dend, "".join(dline)])
184 181 dstart = start
185 182 dend = end
186 183 dline = [l]
187 184
188 185 if dstart != None:
189 186 delta.append([dstart, dend, "".join(dline)])
190 187 # apply the delta to the addlist, and get a delta for addrevision
191 188 cachedelta = addlistdelta(addlist, delta)
192 189
193 190 # the delta is only valid if we've been processing the tip revision
194 191 if self.mapcache[0] != self.tip():
195 192 cachedelta = None
196 193 self.listcache = addlist
197 194
198 195 n = self.addrevision(buffer(self.listcache), transaction, link, p1, \
199 196 p2, cachedelta)
200 197 self.mapcache = (n, map)
201 198
202 199 return n
General Comments 0
You need to be logged in to leave comments. Login now