##// END OF EJS Templates
merge util.esystem and util.system.
Vadim Gelfer -
r1882:c0320567 default
parent child Browse files
Show More
@@ -1,1887 +1,1887
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 import struct, os, util
9 9 import filelog, manifest, changelog, dirstate, repo
10 10 from node import *
11 11 from i18n import gettext as _
12 12 from demandload import *
13 13 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
14 14
15 15 class localrepository(object):
16 16 def __del__(self):
17 17 self.transhandle = None
18 18 def __init__(self, parentui, path=None, create=0):
19 19 if not path:
20 20 p = os.getcwd()
21 21 while not os.path.isdir(os.path.join(p, ".hg")):
22 22 oldp = p
23 23 p = os.path.dirname(p)
24 24 if p == oldp:
25 25 raise repo.RepoError(_("no repo found"))
26 26 path = p
27 27 self.path = os.path.join(path, ".hg")
28 28
29 29 if not create and not os.path.isdir(self.path):
30 30 raise repo.RepoError(_("repository %s not found") % path)
31 31
32 32 self.root = os.path.abspath(path)
33 33 self.ui = ui.ui(parentui=parentui)
34 34 self.opener = util.opener(self.path)
35 35 self.wopener = util.opener(self.root)
36 36 self.manifest = manifest.manifest(self.opener)
37 37 self.changelog = changelog.changelog(self.opener)
38 38 self.tagscache = None
39 39 self.nodetagscache = None
40 40 self.encodepats = None
41 41 self.decodepats = None
42 42 self.transhandle = None
43 43
44 44 if create:
45 45 os.mkdir(self.path)
46 46 os.mkdir(self.join("data"))
47 47
48 48 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
49 49 try:
50 50 self.ui.readconfig(self.join("hgrc"))
51 51 except IOError:
52 52 pass
53 53
54 54 def hook(self, name, throw=False, **args):
55 55 def runhook(name, cmd):
56 56 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
57 57 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
58 r = util.esystem(cmd, environ=env, cwd=self.root)
58 r = util.system(cmd, environ=env, cwd=self.root)
59 59 if r:
60 60 desc, r = util.explain_exit(r)
61 61 if throw:
62 62 raise util.Abort(_('%s hook %s') % (name, desc))
63 63 self.ui.warn(_('error: %s hook %s\n') % (name, desc))
64 64 return False
65 65 return True
66 66
67 67 r = True
68 68 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
69 69 if hname.split(".", 1)[0] == name and cmd]
70 70 hooks.sort()
71 71 for hname, cmd in hooks:
72 72 r = runhook(hname, cmd) and r
73 73 return r
74 74
75 75 def tags(self):
76 76 '''return a mapping of tag to node'''
77 77 if not self.tagscache:
78 78 self.tagscache = {}
79 79 def addtag(self, k, n):
80 80 try:
81 81 bin_n = bin(n)
82 82 except TypeError:
83 83 bin_n = ''
84 84 self.tagscache[k.strip()] = bin_n
85 85
86 86 try:
87 87 # read each head of the tags file, ending with the tip
88 88 # and add each tag found to the map, with "newer" ones
89 89 # taking precedence
90 90 fl = self.file(".hgtags")
91 91 h = fl.heads()
92 92 h.reverse()
93 93 for r in h:
94 94 for l in fl.read(r).splitlines():
95 95 if l:
96 96 n, k = l.split(" ", 1)
97 97 addtag(self, k, n)
98 98 except KeyError:
99 99 pass
100 100
101 101 try:
102 102 f = self.opener("localtags")
103 103 for l in f:
104 104 n, k = l.split(" ", 1)
105 105 addtag(self, k, n)
106 106 except IOError:
107 107 pass
108 108
109 109 self.tagscache['tip'] = self.changelog.tip()
110 110
111 111 return self.tagscache
112 112
113 113 def tagslist(self):
114 114 '''return a list of tags ordered by revision'''
115 115 l = []
116 116 for t, n in self.tags().items():
117 117 try:
118 118 r = self.changelog.rev(n)
119 119 except:
120 120 r = -2 # sort to the beginning of the list if unknown
121 121 l.append((r, t, n))
122 122 l.sort()
123 123 return [(t, n) for r, t, n in l]
124 124
125 125 def nodetags(self, node):
126 126 '''return the tags associated with a node'''
127 127 if not self.nodetagscache:
128 128 self.nodetagscache = {}
129 129 for t, n in self.tags().items():
130 130 self.nodetagscache.setdefault(n, []).append(t)
131 131 return self.nodetagscache.get(node, [])
132 132
133 133 def lookup(self, key):
134 134 try:
135 135 return self.tags()[key]
136 136 except KeyError:
137 137 try:
138 138 return self.changelog.lookup(key)
139 139 except:
140 140 raise repo.RepoError(_("unknown revision '%s'") % key)
141 141
142 142 def dev(self):
143 143 return os.stat(self.path).st_dev
144 144
145 145 def local(self):
146 146 return True
147 147
148 148 def join(self, f):
149 149 return os.path.join(self.path, f)
150 150
151 151 def wjoin(self, f):
152 152 return os.path.join(self.root, f)
153 153
154 154 def file(self, f):
155 155 if f[0] == '/':
156 156 f = f[1:]
157 157 return filelog.filelog(self.opener, f)
158 158
159 159 def getcwd(self):
160 160 return self.dirstate.getcwd()
161 161
162 162 def wfile(self, f, mode='r'):
163 163 return self.wopener(f, mode)
164 164
165 165 def wread(self, filename):
166 166 if self.encodepats == None:
167 167 l = []
168 168 for pat, cmd in self.ui.configitems("encode"):
169 169 mf = util.matcher("", "/", [pat], [], [])[1]
170 170 l.append((mf, cmd))
171 171 self.encodepats = l
172 172
173 173 data = self.wopener(filename, 'r').read()
174 174
175 175 for mf, cmd in self.encodepats:
176 176 if mf(filename):
177 177 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
178 178 data = util.filter(data, cmd)
179 179 break
180 180
181 181 return data
182 182
183 183 def wwrite(self, filename, data, fd=None):
184 184 if self.decodepats == None:
185 185 l = []
186 186 for pat, cmd in self.ui.configitems("decode"):
187 187 mf = util.matcher("", "/", [pat], [], [])[1]
188 188 l.append((mf, cmd))
189 189 self.decodepats = l
190 190
191 191 for mf, cmd in self.decodepats:
192 192 if mf(filename):
193 193 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
194 194 data = util.filter(data, cmd)
195 195 break
196 196
197 197 if fd:
198 198 return fd.write(data)
199 199 return self.wopener(filename, 'w').write(data)
200 200
201 201 def transaction(self):
202 202 tr = self.transhandle
203 203 if tr != None and tr.running():
204 204 return tr.nest()
205 205
206 206 # save dirstate for undo
207 207 try:
208 208 ds = self.opener("dirstate").read()
209 209 except IOError:
210 210 ds = ""
211 211 self.opener("journal.dirstate", "w").write(ds)
212 212
213 213 tr = transaction.transaction(self.ui.warn, self.opener,
214 214 self.join("journal"),
215 215 aftertrans(self.path))
216 216 self.transhandle = tr
217 217 return tr
218 218
219 219 def recover(self):
220 220 l = self.lock()
221 221 if os.path.exists(self.join("journal")):
222 222 self.ui.status(_("rolling back interrupted transaction\n"))
223 223 transaction.rollback(self.opener, self.join("journal"))
224 224 self.reload()
225 225 return True
226 226 else:
227 227 self.ui.warn(_("no interrupted transaction available\n"))
228 228 return False
229 229
230 230 def undo(self, wlock=None):
231 231 if not wlock:
232 232 wlock = self.wlock()
233 233 l = self.lock()
234 234 if os.path.exists(self.join("undo")):
235 235 self.ui.status(_("rolling back last transaction\n"))
236 236 transaction.rollback(self.opener, self.join("undo"))
237 237 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
238 238 self.reload()
239 239 self.wreload()
240 240 else:
241 241 self.ui.warn(_("no undo information available\n"))
242 242
243 243 def wreload(self):
244 244 self.dirstate.read()
245 245
246 246 def reload(self):
247 247 self.changelog.load()
248 248 self.manifest.load()
249 249 self.tagscache = None
250 250 self.nodetagscache = None
251 251
252 252 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None):
253 253 try:
254 254 l = lock.lock(self.join(lockname), 0, releasefn)
255 255 except lock.LockHeld, inst:
256 256 if not wait:
257 257 raise inst
258 258 self.ui.warn(_("waiting for lock held by %s\n") % inst.args[0])
259 259 try:
260 260 # default to 600 seconds timeout
261 261 l = lock.lock(self.join(lockname),
262 262 int(self.ui.config("ui", "timeout") or 600),
263 263 releasefn)
264 264 except lock.LockHeld, inst:
265 265 raise util.Abort(_("timeout while waiting for "
266 266 "lock held by %s") % inst.args[0])
267 267 if acquirefn:
268 268 acquirefn()
269 269 return l
270 270
271 271 def lock(self, wait=1):
272 272 return self.do_lock("lock", wait, acquirefn=self.reload)
273 273
274 274 def wlock(self, wait=1):
275 275 return self.do_lock("wlock", wait,
276 276 self.dirstate.write,
277 277 self.wreload)
278 278
279 279 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
280 280 "determine whether a new filenode is needed"
281 281 fp1 = manifest1.get(filename, nullid)
282 282 fp2 = manifest2.get(filename, nullid)
283 283
284 284 if fp2 != nullid:
285 285 # is one parent an ancestor of the other?
286 286 fpa = filelog.ancestor(fp1, fp2)
287 287 if fpa == fp1:
288 288 fp1, fp2 = fp2, nullid
289 289 elif fpa == fp2:
290 290 fp2 = nullid
291 291
292 292 # is the file unmodified from the parent? report existing entry
293 293 if fp2 == nullid and text == filelog.read(fp1):
294 294 return (fp1, None, None)
295 295
296 296 return (None, fp1, fp2)
297 297
298 298 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
299 299 orig_parent = self.dirstate.parents()[0] or nullid
300 300 p1 = p1 or self.dirstate.parents()[0] or nullid
301 301 p2 = p2 or self.dirstate.parents()[1] or nullid
302 302 c1 = self.changelog.read(p1)
303 303 c2 = self.changelog.read(p2)
304 304 m1 = self.manifest.read(c1[0])
305 305 mf1 = self.manifest.readflags(c1[0])
306 306 m2 = self.manifest.read(c2[0])
307 307 changed = []
308 308
309 309 if orig_parent == p1:
310 310 update_dirstate = 1
311 311 else:
312 312 update_dirstate = 0
313 313
314 314 if not wlock:
315 315 wlock = self.wlock()
316 316 l = self.lock()
317 317 tr = self.transaction()
318 318 mm = m1.copy()
319 319 mfm = mf1.copy()
320 320 linkrev = self.changelog.count()
321 321 for f in files:
322 322 try:
323 323 t = self.wread(f)
324 324 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
325 325 r = self.file(f)
326 326 mfm[f] = tm
327 327
328 328 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
329 329 if entry:
330 330 mm[f] = entry
331 331 continue
332 332
333 333 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
334 334 changed.append(f)
335 335 if update_dirstate:
336 336 self.dirstate.update([f], "n")
337 337 except IOError:
338 338 try:
339 339 del mm[f]
340 340 del mfm[f]
341 341 if update_dirstate:
342 342 self.dirstate.forget([f])
343 343 except:
344 344 # deleted from p2?
345 345 pass
346 346
347 347 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
348 348 user = user or self.ui.username()
349 349 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
350 350 tr.close()
351 351 if update_dirstate:
352 352 self.dirstate.setparents(n, nullid)
353 353
354 354 def commit(self, files=None, text="", user=None, date=None,
355 355 match=util.always, force=False, lock=None, wlock=None):
356 356 commit = []
357 357 remove = []
358 358 changed = []
359 359
360 360 if files:
361 361 for f in files:
362 362 s = self.dirstate.state(f)
363 363 if s in 'nmai':
364 364 commit.append(f)
365 365 elif s == 'r':
366 366 remove.append(f)
367 367 else:
368 368 self.ui.warn(_("%s not tracked!\n") % f)
369 369 else:
370 370 modified, added, removed, deleted, unknown = self.changes(match=match)
371 371 commit = modified + added
372 372 remove = removed
373 373
374 374 p1, p2 = self.dirstate.parents()
375 375 c1 = self.changelog.read(p1)
376 376 c2 = self.changelog.read(p2)
377 377 m1 = self.manifest.read(c1[0])
378 378 mf1 = self.manifest.readflags(c1[0])
379 379 m2 = self.manifest.read(c2[0])
380 380
381 381 if not commit and not remove and not force and p2 == nullid:
382 382 self.ui.status(_("nothing changed\n"))
383 383 return None
384 384
385 385 xp1 = hex(p1)
386 386 if p2 == nullid: xp2 = ''
387 387 else: xp2 = hex(p2)
388 388
389 389 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
390 390
391 391 if not wlock:
392 392 wlock = self.wlock()
393 393 if not lock:
394 394 lock = self.lock()
395 395 tr = self.transaction()
396 396
397 397 # check in files
398 398 new = {}
399 399 linkrev = self.changelog.count()
400 400 commit.sort()
401 401 for f in commit:
402 402 self.ui.note(f + "\n")
403 403 try:
404 404 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
405 405 t = self.wread(f)
406 406 except IOError:
407 407 self.ui.warn(_("trouble committing %s!\n") % f)
408 408 raise
409 409
410 410 r = self.file(f)
411 411
412 412 meta = {}
413 413 cp = self.dirstate.copied(f)
414 414 if cp:
415 415 meta["copy"] = cp
416 416 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
417 417 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
418 418 fp1, fp2 = nullid, nullid
419 419 else:
420 420 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
421 421 if entry:
422 422 new[f] = entry
423 423 continue
424 424
425 425 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
426 426 # remember what we've added so that we can later calculate
427 427 # the files to pull from a set of changesets
428 428 changed.append(f)
429 429
430 430 # update manifest
431 431 m1 = m1.copy()
432 432 m1.update(new)
433 433 for f in remove:
434 434 if f in m1:
435 435 del m1[f]
436 436 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
437 437 (new, remove))
438 438
439 439 # add changeset
440 440 new = new.keys()
441 441 new.sort()
442 442
443 443 if not text:
444 444 edittext = [""]
445 445 if p2 != nullid:
446 446 edittext.append("HG: branch merge")
447 447 edittext.extend(["HG: changed %s" % f for f in changed])
448 448 edittext.extend(["HG: removed %s" % f for f in remove])
449 449 if not changed and not remove:
450 450 edittext.append("HG: no files changed")
451 451 edittext.append("")
452 452 # run editor in the repository root
453 453 olddir = os.getcwd()
454 454 os.chdir(self.root)
455 455 edittext = self.ui.edit("\n".join(edittext))
456 456 os.chdir(olddir)
457 457 if not edittext.rstrip():
458 458 return None
459 459 text = edittext
460 460
461 461 user = user or self.ui.username()
462 462 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
463 463 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
464 464 parent2=xp2)
465 465 tr.close()
466 466
467 467 self.dirstate.setparents(n)
468 468 self.dirstate.update(new, "n")
469 469 self.dirstate.forget(remove)
470 470
471 471 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
472 472 return n
473 473
474 474 def walk(self, node=None, files=[], match=util.always):
475 475 if node:
476 476 fdict = dict.fromkeys(files)
477 477 for fn in self.manifest.read(self.changelog.read(node)[0]):
478 478 fdict.pop(fn, None)
479 479 if match(fn):
480 480 yield 'm', fn
481 481 for fn in fdict:
482 482 self.ui.warn(_('%s: No such file in rev %s\n') % (
483 483 util.pathto(self.getcwd(), fn), short(node)))
484 484 else:
485 485 for src, fn in self.dirstate.walk(files, match):
486 486 yield src, fn
487 487
488 488 def changes(self, node1=None, node2=None, files=[], match=util.always,
489 489 wlock=None):
490 490 """return changes between two nodes or node and working directory
491 491
492 492 If node1 is None, use the first dirstate parent instead.
493 493 If node2 is None, compare node1 with working directory.
494 494 """
495 495
496 496 def fcmp(fn, mf):
497 497 t1 = self.wread(fn)
498 498 t2 = self.file(fn).read(mf.get(fn, nullid))
499 499 return cmp(t1, t2)
500 500
501 501 def mfmatches(node):
502 502 change = self.changelog.read(node)
503 503 mf = dict(self.manifest.read(change[0]))
504 504 for fn in mf.keys():
505 505 if not match(fn):
506 506 del mf[fn]
507 507 return mf
508 508
509 509 if node1:
510 510 # read the manifest from node1 before the manifest from node2,
511 511 # so that we'll hit the manifest cache if we're going through
512 512 # all the revisions in parent->child order.
513 513 mf1 = mfmatches(node1)
514 514
515 515 # are we comparing the working directory?
516 516 if not node2:
517 517 if not wlock:
518 518 try:
519 519 wlock = self.wlock(wait=0)
520 520 except lock.LockException:
521 521 wlock = None
522 522 lookup, modified, added, removed, deleted, unknown = (
523 523 self.dirstate.changes(files, match))
524 524
525 525 # are we comparing working dir against its parent?
526 526 if not node1:
527 527 if lookup:
528 528 # do a full compare of any files that might have changed
529 529 mf2 = mfmatches(self.dirstate.parents()[0])
530 530 for f in lookup:
531 531 if fcmp(f, mf2):
532 532 modified.append(f)
533 533 elif wlock is not None:
534 534 self.dirstate.update([f], "n")
535 535 else:
536 536 # we are comparing working dir against non-parent
537 537 # generate a pseudo-manifest for the working dir
538 538 mf2 = mfmatches(self.dirstate.parents()[0])
539 539 for f in lookup + modified + added:
540 540 mf2[f] = ""
541 541 for f in removed:
542 542 if f in mf2:
543 543 del mf2[f]
544 544 else:
545 545 # we are comparing two revisions
546 546 deleted, unknown = [], []
547 547 mf2 = mfmatches(node2)
548 548
549 549 if node1:
550 550 # flush lists from dirstate before comparing manifests
551 551 modified, added = [], []
552 552
553 553 for fn in mf2:
554 554 if mf1.has_key(fn):
555 555 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
556 556 modified.append(fn)
557 557 del mf1[fn]
558 558 else:
559 559 added.append(fn)
560 560
561 561 removed = mf1.keys()
562 562
563 563 # sort and return results:
564 564 for l in modified, added, removed, deleted, unknown:
565 565 l.sort()
566 566 return (modified, added, removed, deleted, unknown)
567 567
568 568 def add(self, list, wlock=None):
569 569 if not wlock:
570 570 wlock = self.wlock()
571 571 for f in list:
572 572 p = self.wjoin(f)
573 573 if not os.path.exists(p):
574 574 self.ui.warn(_("%s does not exist!\n") % f)
575 575 elif not os.path.isfile(p):
576 576 self.ui.warn(_("%s not added: only files supported currently\n")
577 577 % f)
578 578 elif self.dirstate.state(f) in 'an':
579 579 self.ui.warn(_("%s already tracked!\n") % f)
580 580 else:
581 581 self.dirstate.update([f], "a")
582 582
583 583 def forget(self, list, wlock=None):
584 584 if not wlock:
585 585 wlock = self.wlock()
586 586 for f in list:
587 587 if self.dirstate.state(f) not in 'ai':
588 588 self.ui.warn(_("%s not added!\n") % f)
589 589 else:
590 590 self.dirstate.forget([f])
591 591
592 592 def remove(self, list, unlink=False, wlock=None):
593 593 if unlink:
594 594 for f in list:
595 595 try:
596 596 util.unlink(self.wjoin(f))
597 597 except OSError, inst:
598 598 if inst.errno != errno.ENOENT:
599 599 raise
600 600 if not wlock:
601 601 wlock = self.wlock()
602 602 for f in list:
603 603 p = self.wjoin(f)
604 604 if os.path.exists(p):
605 605 self.ui.warn(_("%s still exists!\n") % f)
606 606 elif self.dirstate.state(f) == 'a':
607 607 self.dirstate.forget([f])
608 608 elif f not in self.dirstate:
609 609 self.ui.warn(_("%s not tracked!\n") % f)
610 610 else:
611 611 self.dirstate.update([f], "r")
612 612
613 613 def undelete(self, list, wlock=None):
614 614 p = self.dirstate.parents()[0]
615 615 mn = self.changelog.read(p)[0]
616 616 mf = self.manifest.readflags(mn)
617 617 m = self.manifest.read(mn)
618 618 if not wlock:
619 619 wlock = self.wlock()
620 620 for f in list:
621 621 if self.dirstate.state(f) not in "r":
622 622 self.ui.warn("%s not removed!\n" % f)
623 623 else:
624 624 t = self.file(f).read(m[f])
625 625 self.wwrite(f, t)
626 626 util.set_exec(self.wjoin(f), mf[f])
627 627 self.dirstate.update([f], "n")
628 628
629 629 def copy(self, source, dest, wlock=None):
630 630 p = self.wjoin(dest)
631 631 if not os.path.exists(p):
632 632 self.ui.warn(_("%s does not exist!\n") % dest)
633 633 elif not os.path.isfile(p):
634 634 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
635 635 else:
636 636 if not wlock:
637 637 wlock = self.wlock()
638 638 if self.dirstate.state(dest) == '?':
639 639 self.dirstate.update([dest], "a")
640 640 self.dirstate.copy(source, dest)
641 641
642 642 def heads(self, start=None):
643 643 heads = self.changelog.heads(start)
644 644 # sort the output in rev descending order
645 645 heads = [(-self.changelog.rev(h), h) for h in heads]
646 646 heads.sort()
647 647 return [n for (r, n) in heads]
648 648
649 649 # branchlookup returns a dict giving a list of branches for
650 650 # each head. A branch is defined as the tag of a node or
651 651 # the branch of the node's parents. If a node has multiple
652 652 # branch tags, tags are eliminated if they are visible from other
653 653 # branch tags.
654 654 #
655 655 # So, for this graph: a->b->c->d->e
656 656 # \ /
657 657 # aa -----/
658 658 # a has tag 2.6.12
659 659 # d has tag 2.6.13
660 660 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
661 661 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
662 662 # from the list.
663 663 #
664 664 # It is possible that more than one head will have the same branch tag.
665 665 # callers need to check the result for multiple heads under the same
666 666 # branch tag if that is a problem for them (ie checkout of a specific
667 667 # branch).
668 668 #
669 669 # passing in a specific branch will limit the depth of the search
670 670 # through the parents. It won't limit the branches returned in the
671 671 # result though.
672 672 def branchlookup(self, heads=None, branch=None):
673 673 if not heads:
674 674 heads = self.heads()
675 675 headt = [ h for h in heads ]
676 676 chlog = self.changelog
677 677 branches = {}
678 678 merges = []
679 679 seenmerge = {}
680 680
681 681 # traverse the tree once for each head, recording in the branches
682 682 # dict which tags are visible from this head. The branches
683 683 # dict also records which tags are visible from each tag
684 684 # while we traverse.
685 685 while headt or merges:
686 686 if merges:
687 687 n, found = merges.pop()
688 688 visit = [n]
689 689 else:
690 690 h = headt.pop()
691 691 visit = [h]
692 692 found = [h]
693 693 seen = {}
694 694 while visit:
695 695 n = visit.pop()
696 696 if n in seen:
697 697 continue
698 698 pp = chlog.parents(n)
699 699 tags = self.nodetags(n)
700 700 if tags:
701 701 for x in tags:
702 702 if x == 'tip':
703 703 continue
704 704 for f in found:
705 705 branches.setdefault(f, {})[n] = 1
706 706 branches.setdefault(n, {})[n] = 1
707 707 break
708 708 if n not in found:
709 709 found.append(n)
710 710 if branch in tags:
711 711 continue
712 712 seen[n] = 1
713 713 if pp[1] != nullid and n not in seenmerge:
714 714 merges.append((pp[1], [x for x in found]))
715 715 seenmerge[n] = 1
716 716 if pp[0] != nullid:
717 717 visit.append(pp[0])
718 718 # traverse the branches dict, eliminating branch tags from each
719 719 # head that are visible from another branch tag for that head.
720 720 out = {}
721 721 viscache = {}
722 722 for h in heads:
723 723 def visible(node):
724 724 if node in viscache:
725 725 return viscache[node]
726 726 ret = {}
727 727 visit = [node]
728 728 while visit:
729 729 x = visit.pop()
730 730 if x in viscache:
731 731 ret.update(viscache[x])
732 732 elif x not in ret:
733 733 ret[x] = 1
734 734 if x in branches:
735 735 visit[len(visit):] = branches[x].keys()
736 736 viscache[node] = ret
737 737 return ret
738 738 if h not in branches:
739 739 continue
740 740 # O(n^2), but somewhat limited. This only searches the
741 741 # tags visible from a specific head, not all the tags in the
742 742 # whole repo.
743 743 for b in branches[h]:
744 744 vis = False
745 745 for bb in branches[h].keys():
746 746 if b != bb:
747 747 if b in visible(bb):
748 748 vis = True
749 749 break
750 750 if not vis:
751 751 l = out.setdefault(h, [])
752 752 l[len(l):] = self.nodetags(b)
753 753 return out
754 754
755 755 def branches(self, nodes):
756 756 if not nodes:
757 757 nodes = [self.changelog.tip()]
758 758 b = []
759 759 for n in nodes:
760 760 t = n
761 761 while n:
762 762 p = self.changelog.parents(n)
763 763 if p[1] != nullid or p[0] == nullid:
764 764 b.append((t, n, p[0], p[1]))
765 765 break
766 766 n = p[0]
767 767 return b
768 768
769 769 def between(self, pairs):
770 770 r = []
771 771
772 772 for top, bottom in pairs:
773 773 n, l, i = top, [], 0
774 774 f = 1
775 775
776 776 while n != bottom:
777 777 p = self.changelog.parents(n)[0]
778 778 if i == f:
779 779 l.append(n)
780 780 f = f * 2
781 781 n = p
782 782 i += 1
783 783
784 784 r.append(l)
785 785
786 786 return r
787 787
788 788 def findincoming(self, remote, base=None, heads=None):
789 789 m = self.changelog.nodemap
790 790 search = []
791 791 fetch = {}
792 792 seen = {}
793 793 seenbranch = {}
794 794 if base == None:
795 795 base = {}
796 796
797 797 # assume we're closer to the tip than the root
798 798 # and start by examining the heads
799 799 self.ui.status(_("searching for changes\n"))
800 800
801 801 if not heads:
802 802 heads = remote.heads()
803 803
804 804 unknown = []
805 805 for h in heads:
806 806 if h not in m:
807 807 unknown.append(h)
808 808 else:
809 809 base[h] = 1
810 810
811 811 if not unknown:
812 812 return None
813 813
814 814 rep = {}
815 815 reqcnt = 0
816 816
817 817 # search through remote branches
818 818 # a 'branch' here is a linear segment of history, with four parts:
819 819 # head, root, first parent, second parent
820 820 # (a branch always has two parents (or none) by definition)
821 821 unknown = remote.branches(unknown)
822 822 while unknown:
823 823 r = []
824 824 while unknown:
825 825 n = unknown.pop(0)
826 826 if n[0] in seen:
827 827 continue
828 828
829 829 self.ui.debug(_("examining %s:%s\n")
830 830 % (short(n[0]), short(n[1])))
831 831 if n[0] == nullid:
832 832 break
833 833 if n in seenbranch:
834 834 self.ui.debug(_("branch already found\n"))
835 835 continue
836 836 if n[1] and n[1] in m: # do we know the base?
837 837 self.ui.debug(_("found incomplete branch %s:%s\n")
838 838 % (short(n[0]), short(n[1])))
839 839 search.append(n) # schedule branch range for scanning
840 840 seenbranch[n] = 1
841 841 else:
842 842 if n[1] not in seen and n[1] not in fetch:
843 843 if n[2] in m and n[3] in m:
844 844 self.ui.debug(_("found new changeset %s\n") %
845 845 short(n[1]))
846 846 fetch[n[1]] = 1 # earliest unknown
847 847 base[n[2]] = 1 # latest known
848 848 continue
849 849
850 850 for a in n[2:4]:
851 851 if a not in rep:
852 852 r.append(a)
853 853 rep[a] = 1
854 854
855 855 seen[n[0]] = 1
856 856
857 857 if r:
858 858 reqcnt += 1
859 859 self.ui.debug(_("request %d: %s\n") %
860 860 (reqcnt, " ".join(map(short, r))))
861 861 for p in range(0, len(r), 10):
862 862 for b in remote.branches(r[p:p+10]):
863 863 self.ui.debug(_("received %s:%s\n") %
864 864 (short(b[0]), short(b[1])))
865 865 if b[0] in m:
866 866 self.ui.debug(_("found base node %s\n")
867 867 % short(b[0]))
868 868 base[b[0]] = 1
869 869 elif b[0] not in seen:
870 870 unknown.append(b)
871 871
872 872 # do binary search on the branches we found
873 873 while search:
874 874 n = search.pop(0)
875 875 reqcnt += 1
876 876 l = remote.between([(n[0], n[1])])[0]
877 877 l.append(n[1])
878 878 p = n[0]
879 879 f = 1
880 880 for i in l:
881 881 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
882 882 if i in m:
883 883 if f <= 2:
884 884 self.ui.debug(_("found new branch changeset %s\n") %
885 885 short(p))
886 886 fetch[p] = 1
887 887 base[i] = 1
888 888 else:
889 889 self.ui.debug(_("narrowed branch search to %s:%s\n")
890 890 % (short(p), short(i)))
891 891 search.append((p, i))
892 892 break
893 893 p, f = i, f * 2
894 894
895 895 # sanity check our fetch list
896 896 for f in fetch.keys():
897 897 if f in m:
898 898 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
899 899
900 900 if base.keys() == [nullid]:
901 901 self.ui.warn(_("warning: pulling from an unrelated repository!\n"))
902 902
903 903 self.ui.note(_("found new changesets starting at ") +
904 904 " ".join([short(f) for f in fetch]) + "\n")
905 905
906 906 self.ui.debug(_("%d total queries\n") % reqcnt)
907 907
908 908 return fetch.keys()
909 909
910 910 def findoutgoing(self, remote, base=None, heads=None):
911 911 if base == None:
912 912 base = {}
913 913 self.findincoming(remote, base, heads)
914 914
915 915 self.ui.debug(_("common changesets up to ")
916 916 + " ".join(map(short, base.keys())) + "\n")
917 917
918 918 remain = dict.fromkeys(self.changelog.nodemap)
919 919
920 920 # prune everything remote has from the tree
921 921 del remain[nullid]
922 922 remove = base.keys()
923 923 while remove:
924 924 n = remove.pop(0)
925 925 if n in remain:
926 926 del remain[n]
927 927 for p in self.changelog.parents(n):
928 928 remove.append(p)
929 929
930 930 # find every node whose parents have been pruned
931 931 subset = []
932 932 for n in remain:
933 933 p1, p2 = self.changelog.parents(n)
934 934 if p1 not in remain and p2 not in remain:
935 935 subset.append(n)
936 936
937 937 # this is the set of all roots we have to push
938 938 return subset
939 939
940 940 def pull(self, remote, heads=None):
941 941 l = self.lock()
942 942
943 943 # if we have an empty repo, fetch everything
944 944 if self.changelog.tip() == nullid:
945 945 self.ui.status(_("requesting all changes\n"))
946 946 fetch = [nullid]
947 947 else:
948 948 fetch = self.findincoming(remote)
949 949
950 950 if not fetch:
951 951 self.ui.status(_("no changes found\n"))
952 952 return 1
953 953
954 954 if heads is None:
955 955 cg = remote.changegroup(fetch, 'pull')
956 956 else:
957 957 cg = remote.changegroupsubset(fetch, heads, 'pull')
958 958 return self.addchangegroup(cg)
959 959
960 960 def push(self, remote, force=False, revs=None):
961 961 lock = remote.lock()
962 962
963 963 base = {}
964 964 heads = remote.heads()
965 965 inc = self.findincoming(remote, base, heads)
966 966 if not force and inc:
967 967 self.ui.warn(_("abort: unsynced remote changes!\n"))
968 968 self.ui.status(_("(did you forget to sync? use push -f to force)\n"))
969 969 return 1
970 970
971 971 update = self.findoutgoing(remote, base)
972 972 if revs is not None:
973 973 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
974 974 else:
975 975 bases, heads = update, self.changelog.heads()
976 976
977 977 if not bases:
978 978 self.ui.status(_("no changes found\n"))
979 979 return 1
980 980 elif not force:
981 981 if len(bases) < len(heads):
982 982 self.ui.warn(_("abort: push creates new remote branches!\n"))
983 983 self.ui.status(_("(did you forget to merge?"
984 984 " use push -f to force)\n"))
985 985 return 1
986 986
987 987 if revs is None:
988 988 cg = self.changegroup(update, 'push')
989 989 else:
990 990 cg = self.changegroupsubset(update, revs, 'push')
991 991 return remote.addchangegroup(cg)
992 992
993 993 def changegroupsubset(self, bases, heads, source):
994 994 """This function generates a changegroup consisting of all the nodes
995 995 that are descendents of any of the bases, and ancestors of any of
996 996 the heads.
997 997
998 998 It is fairly complex as determining which filenodes and which
999 999 manifest nodes need to be included for the changeset to be complete
1000 1000 is non-trivial.
1001 1001
1002 1002 Another wrinkle is doing the reverse, figuring out which changeset in
1003 1003 the changegroup a particular filenode or manifestnode belongs to."""
1004 1004
1005 1005 self.hook('preoutgoing', throw=True, source=source)
1006 1006
1007 1007 # Set up some initial variables
1008 1008 # Make it easy to refer to self.changelog
1009 1009 cl = self.changelog
1010 1010 # msng is short for missing - compute the list of changesets in this
1011 1011 # changegroup.
1012 1012 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1013 1013 # Some bases may turn out to be superfluous, and some heads may be
1014 1014 # too. nodesbetween will return the minimal set of bases and heads
1015 1015 # necessary to re-create the changegroup.
1016 1016
1017 1017 # Known heads are the list of heads that it is assumed the recipient
1018 1018 # of this changegroup will know about.
1019 1019 knownheads = {}
1020 1020 # We assume that all parents of bases are known heads.
1021 1021 for n in bases:
1022 1022 for p in cl.parents(n):
1023 1023 if p != nullid:
1024 1024 knownheads[p] = 1
1025 1025 knownheads = knownheads.keys()
1026 1026 if knownheads:
1027 1027 # Now that we know what heads are known, we can compute which
1028 1028 # changesets are known. The recipient must know about all
1029 1029 # changesets required to reach the known heads from the null
1030 1030 # changeset.
1031 1031 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1032 1032 junk = None
1033 1033 # Transform the list into an ersatz set.
1034 1034 has_cl_set = dict.fromkeys(has_cl_set)
1035 1035 else:
1036 1036 # If there were no known heads, the recipient cannot be assumed to
1037 1037 # know about any changesets.
1038 1038 has_cl_set = {}
1039 1039
1040 1040 # Make it easy to refer to self.manifest
1041 1041 mnfst = self.manifest
1042 1042 # We don't know which manifests are missing yet
1043 1043 msng_mnfst_set = {}
1044 1044 # Nor do we know which filenodes are missing.
1045 1045 msng_filenode_set = {}
1046 1046
1047 1047 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1048 1048 junk = None
1049 1049
1050 1050 # A changeset always belongs to itself, so the changenode lookup
1051 1051 # function for a changenode is identity.
1052 1052 def identity(x):
1053 1053 return x
1054 1054
1055 1055 # A function generating function. Sets up an environment for the
1056 1056 # inner function.
1057 1057 def cmp_by_rev_func(revlog):
1058 1058 # Compare two nodes by their revision number in the environment's
1059 1059 # revision history. Since the revision number both represents the
1060 1060 # most efficient order to read the nodes in, and represents a
1061 1061 # topological sorting of the nodes, this function is often useful.
1062 1062 def cmp_by_rev(a, b):
1063 1063 return cmp(revlog.rev(a), revlog.rev(b))
1064 1064 return cmp_by_rev
1065 1065
1066 1066 # If we determine that a particular file or manifest node must be a
1067 1067 # node that the recipient of the changegroup will already have, we can
1068 1068 # also assume the recipient will have all the parents. This function
1069 1069 # prunes them from the set of missing nodes.
1070 1070 def prune_parents(revlog, hasset, msngset):
1071 1071 haslst = hasset.keys()
1072 1072 haslst.sort(cmp_by_rev_func(revlog))
1073 1073 for node in haslst:
1074 1074 parentlst = [p for p in revlog.parents(node) if p != nullid]
1075 1075 while parentlst:
1076 1076 n = parentlst.pop()
1077 1077 if n not in hasset:
1078 1078 hasset[n] = 1
1079 1079 p = [p for p in revlog.parents(n) if p != nullid]
1080 1080 parentlst.extend(p)
1081 1081 for n in hasset:
1082 1082 msngset.pop(n, None)
1083 1083
1084 1084 # This is a function generating function used to set up an environment
1085 1085 # for the inner function to execute in.
1086 1086 def manifest_and_file_collector(changedfileset):
1087 1087 # This is an information gathering function that gathers
1088 1088 # information from each changeset node that goes out as part of
1089 1089 # the changegroup. The information gathered is a list of which
1090 1090 # manifest nodes are potentially required (the recipient may
1091 1091 # already have them) and total list of all files which were
1092 1092 # changed in any changeset in the changegroup.
1093 1093 #
1094 1094 # We also remember the first changenode we saw any manifest
1095 1095 # referenced by so we can later determine which changenode 'owns'
1096 1096 # the manifest.
1097 1097 def collect_manifests_and_files(clnode):
1098 1098 c = cl.read(clnode)
1099 1099 for f in c[3]:
1100 1100 # This is to make sure we only have one instance of each
1101 1101 # filename string for each filename.
1102 1102 changedfileset.setdefault(f, f)
1103 1103 msng_mnfst_set.setdefault(c[0], clnode)
1104 1104 return collect_manifests_and_files
1105 1105
1106 1106 # Figure out which manifest nodes (of the ones we think might be part
1107 1107 # of the changegroup) the recipient must know about and remove them
1108 1108 # from the changegroup.
1109 1109 def prune_manifests():
1110 1110 has_mnfst_set = {}
1111 1111 for n in msng_mnfst_set:
1112 1112 # If a 'missing' manifest thinks it belongs to a changenode
1113 1113 # the recipient is assumed to have, obviously the recipient
1114 1114 # must have that manifest.
1115 1115 linknode = cl.node(mnfst.linkrev(n))
1116 1116 if linknode in has_cl_set:
1117 1117 has_mnfst_set[n] = 1
1118 1118 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1119 1119
1120 1120 # Use the information collected in collect_manifests_and_files to say
1121 1121 # which changenode any manifestnode belongs to.
1122 1122 def lookup_manifest_link(mnfstnode):
1123 1123 return msng_mnfst_set[mnfstnode]
1124 1124
1125 1125 # A function generating function that sets up the initial environment
1126 1126 # the inner function.
1127 1127 def filenode_collector(changedfiles):
1128 1128 next_rev = [0]
1129 1129 # This gathers information from each manifestnode included in the
1130 1130 # changegroup about which filenodes the manifest node references
1131 1131 # so we can include those in the changegroup too.
1132 1132 #
1133 1133 # It also remembers which changenode each filenode belongs to. It
1134 1134 # does this by assuming the a filenode belongs to the changenode
1135 1135 # the first manifest that references it belongs to.
1136 1136 def collect_msng_filenodes(mnfstnode):
1137 1137 r = mnfst.rev(mnfstnode)
1138 1138 if r == next_rev[0]:
1139 1139 # If the last rev we looked at was the one just previous,
1140 1140 # we only need to see a diff.
1141 1141 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1142 1142 # For each line in the delta
1143 1143 for dline in delta.splitlines():
1144 1144 # get the filename and filenode for that line
1145 1145 f, fnode = dline.split('\0')
1146 1146 fnode = bin(fnode[:40])
1147 1147 f = changedfiles.get(f, None)
1148 1148 # And if the file is in the list of files we care
1149 1149 # about.
1150 1150 if f is not None:
1151 1151 # Get the changenode this manifest belongs to
1152 1152 clnode = msng_mnfst_set[mnfstnode]
1153 1153 # Create the set of filenodes for the file if
1154 1154 # there isn't one already.
1155 1155 ndset = msng_filenode_set.setdefault(f, {})
1156 1156 # And set the filenode's changelog node to the
1157 1157 # manifest's if it hasn't been set already.
1158 1158 ndset.setdefault(fnode, clnode)
1159 1159 else:
1160 1160 # Otherwise we need a full manifest.
1161 1161 m = mnfst.read(mnfstnode)
1162 1162 # For every file in we care about.
1163 1163 for f in changedfiles:
1164 1164 fnode = m.get(f, None)
1165 1165 # If it's in the manifest
1166 1166 if fnode is not None:
1167 1167 # See comments above.
1168 1168 clnode = msng_mnfst_set[mnfstnode]
1169 1169 ndset = msng_filenode_set.setdefault(f, {})
1170 1170 ndset.setdefault(fnode, clnode)
1171 1171 # Remember the revision we hope to see next.
1172 1172 next_rev[0] = r + 1
1173 1173 return collect_msng_filenodes
1174 1174
1175 1175 # We have a list of filenodes we think we need for a file, lets remove
1176 1176 # all those we now the recipient must have.
1177 1177 def prune_filenodes(f, filerevlog):
1178 1178 msngset = msng_filenode_set[f]
1179 1179 hasset = {}
1180 1180 # If a 'missing' filenode thinks it belongs to a changenode we
1181 1181 # assume the recipient must have, then the recipient must have
1182 1182 # that filenode.
1183 1183 for n in msngset:
1184 1184 clnode = cl.node(filerevlog.linkrev(n))
1185 1185 if clnode in has_cl_set:
1186 1186 hasset[n] = 1
1187 1187 prune_parents(filerevlog, hasset, msngset)
1188 1188
1189 1189 # A function generator function that sets up the a context for the
1190 1190 # inner function.
1191 1191 def lookup_filenode_link_func(fname):
1192 1192 msngset = msng_filenode_set[fname]
1193 1193 # Lookup the changenode the filenode belongs to.
1194 1194 def lookup_filenode_link(fnode):
1195 1195 return msngset[fnode]
1196 1196 return lookup_filenode_link
1197 1197
1198 1198 # Now that we have all theses utility functions to help out and
1199 1199 # logically divide up the task, generate the group.
1200 1200 def gengroup():
1201 1201 # The set of changed files starts empty.
1202 1202 changedfiles = {}
1203 1203 # Create a changenode group generator that will call our functions
1204 1204 # back to lookup the owning changenode and collect information.
1205 1205 group = cl.group(msng_cl_lst, identity,
1206 1206 manifest_and_file_collector(changedfiles))
1207 1207 for chnk in group:
1208 1208 yield chnk
1209 1209
1210 1210 # The list of manifests has been collected by the generator
1211 1211 # calling our functions back.
1212 1212 prune_manifests()
1213 1213 msng_mnfst_lst = msng_mnfst_set.keys()
1214 1214 # Sort the manifestnodes by revision number.
1215 1215 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1216 1216 # Create a generator for the manifestnodes that calls our lookup
1217 1217 # and data collection functions back.
1218 1218 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1219 1219 filenode_collector(changedfiles))
1220 1220 for chnk in group:
1221 1221 yield chnk
1222 1222
1223 1223 # These are no longer needed, dereference and toss the memory for
1224 1224 # them.
1225 1225 msng_mnfst_lst = None
1226 1226 msng_mnfst_set.clear()
1227 1227
1228 1228 changedfiles = changedfiles.keys()
1229 1229 changedfiles.sort()
1230 1230 # Go through all our files in order sorted by name.
1231 1231 for fname in changedfiles:
1232 1232 filerevlog = self.file(fname)
1233 1233 # Toss out the filenodes that the recipient isn't really
1234 1234 # missing.
1235 1235 if msng_filenode_set.has_key(fname):
1236 1236 prune_filenodes(fname, filerevlog)
1237 1237 msng_filenode_lst = msng_filenode_set[fname].keys()
1238 1238 else:
1239 1239 msng_filenode_lst = []
1240 1240 # If any filenodes are left, generate the group for them,
1241 1241 # otherwise don't bother.
1242 1242 if len(msng_filenode_lst) > 0:
1243 1243 yield struct.pack(">l", len(fname) + 4) + fname
1244 1244 # Sort the filenodes by their revision #
1245 1245 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1246 1246 # Create a group generator and only pass in a changenode
1247 1247 # lookup function as we need to collect no information
1248 1248 # from filenodes.
1249 1249 group = filerevlog.group(msng_filenode_lst,
1250 1250 lookup_filenode_link_func(fname))
1251 1251 for chnk in group:
1252 1252 yield chnk
1253 1253 if msng_filenode_set.has_key(fname):
1254 1254 # Don't need this anymore, toss it to free memory.
1255 1255 del msng_filenode_set[fname]
1256 1256 # Signal that no more groups are left.
1257 1257 yield struct.pack(">l", 0)
1258 1258
1259 1259 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1260 1260
1261 1261 return util.chunkbuffer(gengroup())
1262 1262
1263 1263 def changegroup(self, basenodes, source):
1264 1264 """Generate a changegroup of all nodes that we have that a recipient
1265 1265 doesn't.
1266 1266
1267 1267 This is much easier than the previous function as we can assume that
1268 1268 the recipient has any changenode we aren't sending them."""
1269 1269
1270 1270 self.hook('preoutgoing', throw=True, source=source)
1271 1271
1272 1272 cl = self.changelog
1273 1273 nodes = cl.nodesbetween(basenodes, None)[0]
1274 1274 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1275 1275
1276 1276 def identity(x):
1277 1277 return x
1278 1278
1279 1279 def gennodelst(revlog):
1280 1280 for r in xrange(0, revlog.count()):
1281 1281 n = revlog.node(r)
1282 1282 if revlog.linkrev(n) in revset:
1283 1283 yield n
1284 1284
1285 1285 def changed_file_collector(changedfileset):
1286 1286 def collect_changed_files(clnode):
1287 1287 c = cl.read(clnode)
1288 1288 for fname in c[3]:
1289 1289 changedfileset[fname] = 1
1290 1290 return collect_changed_files
1291 1291
1292 1292 def lookuprevlink_func(revlog):
1293 1293 def lookuprevlink(n):
1294 1294 return cl.node(revlog.linkrev(n))
1295 1295 return lookuprevlink
1296 1296
1297 1297 def gengroup():
1298 1298 # construct a list of all changed files
1299 1299 changedfiles = {}
1300 1300
1301 1301 for chnk in cl.group(nodes, identity,
1302 1302 changed_file_collector(changedfiles)):
1303 1303 yield chnk
1304 1304 changedfiles = changedfiles.keys()
1305 1305 changedfiles.sort()
1306 1306
1307 1307 mnfst = self.manifest
1308 1308 nodeiter = gennodelst(mnfst)
1309 1309 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1310 1310 yield chnk
1311 1311
1312 1312 for fname in changedfiles:
1313 1313 filerevlog = self.file(fname)
1314 1314 nodeiter = gennodelst(filerevlog)
1315 1315 nodeiter = list(nodeiter)
1316 1316 if nodeiter:
1317 1317 yield struct.pack(">l", len(fname) + 4) + fname
1318 1318 lookup = lookuprevlink_func(filerevlog)
1319 1319 for chnk in filerevlog.group(nodeiter, lookup):
1320 1320 yield chnk
1321 1321
1322 1322 yield struct.pack(">l", 0)
1323 1323 self.hook('outgoing', node=hex(nodes[0]), source=source)
1324 1324
1325 1325 return util.chunkbuffer(gengroup())
1326 1326
1327 1327 def addchangegroup(self, source):
1328 1328
1329 1329 def getchunk():
1330 1330 d = source.read(4)
1331 1331 if not d:
1332 1332 return ""
1333 1333 l = struct.unpack(">l", d)[0]
1334 1334 if l <= 4:
1335 1335 return ""
1336 1336 d = source.read(l - 4)
1337 1337 if len(d) < l - 4:
1338 1338 raise repo.RepoError(_("premature EOF reading chunk"
1339 1339 " (got %d bytes, expected %d)")
1340 1340 % (len(d), l - 4))
1341 1341 return d
1342 1342
1343 1343 def getgroup():
1344 1344 while 1:
1345 1345 c = getchunk()
1346 1346 if not c:
1347 1347 break
1348 1348 yield c
1349 1349
1350 1350 def csmap(x):
1351 1351 self.ui.debug(_("add changeset %s\n") % short(x))
1352 1352 return self.changelog.count()
1353 1353
1354 1354 def revmap(x):
1355 1355 return self.changelog.rev(x)
1356 1356
1357 1357 if not source:
1358 1358 return
1359 1359
1360 1360 self.hook('prechangegroup', throw=True)
1361 1361
1362 1362 changesets = files = revisions = 0
1363 1363
1364 1364 tr = self.transaction()
1365 1365
1366 1366 oldheads = len(self.changelog.heads())
1367 1367
1368 1368 # pull off the changeset group
1369 1369 self.ui.status(_("adding changesets\n"))
1370 1370 co = self.changelog.tip()
1371 1371 cn = self.changelog.addgroup(getgroup(), csmap, tr, 1) # unique
1372 1372 cnr, cor = map(self.changelog.rev, (cn, co))
1373 1373 if cn == nullid:
1374 1374 cnr = cor
1375 1375 changesets = cnr - cor
1376 1376
1377 1377 # pull off the manifest group
1378 1378 self.ui.status(_("adding manifests\n"))
1379 1379 mm = self.manifest.tip()
1380 1380 mo = self.manifest.addgroup(getgroup(), revmap, tr)
1381 1381
1382 1382 # process the files
1383 1383 self.ui.status(_("adding file changes\n"))
1384 1384 while 1:
1385 1385 f = getchunk()
1386 1386 if not f:
1387 1387 break
1388 1388 self.ui.debug(_("adding %s revisions\n") % f)
1389 1389 fl = self.file(f)
1390 1390 o = fl.count()
1391 1391 n = fl.addgroup(getgroup(), revmap, tr)
1392 1392 revisions += fl.count() - o
1393 1393 files += 1
1394 1394
1395 1395 newheads = len(self.changelog.heads())
1396 1396 heads = ""
1397 1397 if oldheads and newheads > oldheads:
1398 1398 heads = _(" (+%d heads)") % (newheads - oldheads)
1399 1399
1400 1400 self.ui.status(_("added %d changesets"
1401 1401 " with %d changes to %d files%s\n")
1402 1402 % (changesets, revisions, files, heads))
1403 1403
1404 1404 self.hook('pretxnchangegroup', throw=True,
1405 1405 node=hex(self.changelog.node(cor+1)))
1406 1406
1407 1407 tr.close()
1408 1408
1409 1409 if changesets > 0:
1410 1410 self.hook("changegroup", node=hex(self.changelog.node(cor+1)))
1411 1411
1412 1412 for i in range(cor + 1, cnr + 1):
1413 1413 self.hook("incoming", node=hex(self.changelog.node(i)))
1414 1414
1415 1415 def update(self, node, allow=False, force=False, choose=None,
1416 1416 moddirstate=True, forcemerge=False, wlock=None):
1417 1417 pl = self.dirstate.parents()
1418 1418 if not force and pl[1] != nullid:
1419 1419 self.ui.warn(_("aborting: outstanding uncommitted merges\n"))
1420 1420 return 1
1421 1421
1422 1422 err = False
1423 1423
1424 1424 p1, p2 = pl[0], node
1425 1425 pa = self.changelog.ancestor(p1, p2)
1426 1426 m1n = self.changelog.read(p1)[0]
1427 1427 m2n = self.changelog.read(p2)[0]
1428 1428 man = self.manifest.ancestor(m1n, m2n)
1429 1429 m1 = self.manifest.read(m1n)
1430 1430 mf1 = self.manifest.readflags(m1n)
1431 1431 m2 = self.manifest.read(m2n).copy()
1432 1432 mf2 = self.manifest.readflags(m2n)
1433 1433 ma = self.manifest.read(man)
1434 1434 mfa = self.manifest.readflags(man)
1435 1435
1436 1436 modified, added, removed, deleted, unknown = self.changes()
1437 1437
1438 1438 # is this a jump, or a merge? i.e. is there a linear path
1439 1439 # from p1 to p2?
1440 1440 linear_path = (pa == p1 or pa == p2)
1441 1441
1442 1442 if allow and linear_path:
1443 1443 raise util.Abort(_("there is nothing to merge, "
1444 1444 "just use 'hg update'"))
1445 1445 if allow and not forcemerge:
1446 1446 if modified or added or removed:
1447 1447 raise util.Abort(_("outstanding uncommited changes"))
1448 1448 if not forcemerge and not force:
1449 1449 for f in unknown:
1450 1450 if f in m2:
1451 1451 t1 = self.wread(f)
1452 1452 t2 = self.file(f).read(m2[f])
1453 1453 if cmp(t1, t2) != 0:
1454 1454 raise util.Abort(_("'%s' already exists in the working"
1455 1455 " dir and differs from remote") % f)
1456 1456
1457 1457 # resolve the manifest to determine which files
1458 1458 # we care about merging
1459 1459 self.ui.note(_("resolving manifests\n"))
1460 1460 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1461 1461 (force, allow, moddirstate, linear_path))
1462 1462 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1463 1463 (short(man), short(m1n), short(m2n)))
1464 1464
1465 1465 merge = {}
1466 1466 get = {}
1467 1467 remove = []
1468 1468
1469 1469 # construct a working dir manifest
1470 1470 mw = m1.copy()
1471 1471 mfw = mf1.copy()
1472 1472 umap = dict.fromkeys(unknown)
1473 1473
1474 1474 for f in added + modified + unknown:
1475 1475 mw[f] = ""
1476 1476 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1477 1477
1478 1478 if moddirstate and not wlock:
1479 1479 wlock = self.wlock()
1480 1480
1481 1481 for f in deleted + removed:
1482 1482 if f in mw:
1483 1483 del mw[f]
1484 1484
1485 1485 # If we're jumping between revisions (as opposed to merging),
1486 1486 # and if neither the working directory nor the target rev has
1487 1487 # the file, then we need to remove it from the dirstate, to
1488 1488 # prevent the dirstate from listing the file when it is no
1489 1489 # longer in the manifest.
1490 1490 if moddirstate and linear_path and f not in m2:
1491 1491 self.dirstate.forget((f,))
1492 1492
1493 1493 # Compare manifests
1494 1494 for f, n in mw.iteritems():
1495 1495 if choose and not choose(f):
1496 1496 continue
1497 1497 if f in m2:
1498 1498 s = 0
1499 1499
1500 1500 # is the wfile new since m1, and match m2?
1501 1501 if f not in m1:
1502 1502 t1 = self.wread(f)
1503 1503 t2 = self.file(f).read(m2[f])
1504 1504 if cmp(t1, t2) == 0:
1505 1505 n = m2[f]
1506 1506 del t1, t2
1507 1507
1508 1508 # are files different?
1509 1509 if n != m2[f]:
1510 1510 a = ma.get(f, nullid)
1511 1511 # are both different from the ancestor?
1512 1512 if n != a and m2[f] != a:
1513 1513 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1514 1514 # merge executable bits
1515 1515 # "if we changed or they changed, change in merge"
1516 1516 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1517 1517 mode = ((a^b) | (a^c)) ^ a
1518 1518 merge[f] = (m1.get(f, nullid), m2[f], mode)
1519 1519 s = 1
1520 1520 # are we clobbering?
1521 1521 # is remote's version newer?
1522 1522 # or are we going back in time?
1523 1523 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1524 1524 self.ui.debug(_(" remote %s is newer, get\n") % f)
1525 1525 get[f] = m2[f]
1526 1526 s = 1
1527 1527 elif f in umap:
1528 1528 # this unknown file is the same as the checkout
1529 1529 get[f] = m2[f]
1530 1530
1531 1531 if not s and mfw[f] != mf2[f]:
1532 1532 if force:
1533 1533 self.ui.debug(_(" updating permissions for %s\n") % f)
1534 1534 util.set_exec(self.wjoin(f), mf2[f])
1535 1535 else:
1536 1536 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1537 1537 mode = ((a^b) | (a^c)) ^ a
1538 1538 if mode != b:
1539 1539 self.ui.debug(_(" updating permissions for %s\n")
1540 1540 % f)
1541 1541 util.set_exec(self.wjoin(f), mode)
1542 1542 del m2[f]
1543 1543 elif f in ma:
1544 1544 if n != ma[f]:
1545 1545 r = _("d")
1546 1546 if not force and (linear_path or allow):
1547 1547 r = self.ui.prompt(
1548 1548 (_(" local changed %s which remote deleted\n") % f) +
1549 1549 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1550 1550 if r == _("d"):
1551 1551 remove.append(f)
1552 1552 else:
1553 1553 self.ui.debug(_("other deleted %s\n") % f)
1554 1554 remove.append(f) # other deleted it
1555 1555 else:
1556 1556 # file is created on branch or in working directory
1557 1557 if force and f not in umap:
1558 1558 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1559 1559 remove.append(f)
1560 1560 elif n == m1.get(f, nullid): # same as parent
1561 1561 if p2 == pa: # going backwards?
1562 1562 self.ui.debug(_("remote deleted %s\n") % f)
1563 1563 remove.append(f)
1564 1564 else:
1565 1565 self.ui.debug(_("local modified %s, keeping\n") % f)
1566 1566 else:
1567 1567 self.ui.debug(_("working dir created %s, keeping\n") % f)
1568 1568
1569 1569 for f, n in m2.iteritems():
1570 1570 if choose and not choose(f):
1571 1571 continue
1572 1572 if f[0] == "/":
1573 1573 continue
1574 1574 if f in ma and n != ma[f]:
1575 1575 r = _("k")
1576 1576 if not force and (linear_path or allow):
1577 1577 r = self.ui.prompt(
1578 1578 (_("remote changed %s which local deleted\n") % f) +
1579 1579 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1580 1580 if r == _("k"):
1581 1581 get[f] = n
1582 1582 elif f not in ma:
1583 1583 self.ui.debug(_("remote created %s\n") % f)
1584 1584 get[f] = n
1585 1585 else:
1586 1586 if force or p2 == pa: # going backwards?
1587 1587 self.ui.debug(_("local deleted %s, recreating\n") % f)
1588 1588 get[f] = n
1589 1589 else:
1590 1590 self.ui.debug(_("local deleted %s\n") % f)
1591 1591
1592 1592 del mw, m1, m2, ma
1593 1593
1594 1594 if force:
1595 1595 for f in merge:
1596 1596 get[f] = merge[f][1]
1597 1597 merge = {}
1598 1598
1599 1599 if linear_path or force:
1600 1600 # we don't need to do any magic, just jump to the new rev
1601 1601 branch_merge = False
1602 1602 p1, p2 = p2, nullid
1603 1603 else:
1604 1604 if not allow:
1605 1605 self.ui.status(_("this update spans a branch"
1606 1606 " affecting the following files:\n"))
1607 1607 fl = merge.keys() + get.keys()
1608 1608 fl.sort()
1609 1609 for f in fl:
1610 1610 cf = ""
1611 1611 if f in merge:
1612 1612 cf = _(" (resolve)")
1613 1613 self.ui.status(" %s%s\n" % (f, cf))
1614 1614 self.ui.warn(_("aborting update spanning branches!\n"))
1615 1615 self.ui.status(_("(use update -m to merge across branches"
1616 1616 " or -C to lose changes)\n"))
1617 1617 return 1
1618 1618 branch_merge = True
1619 1619
1620 1620 # get the files we don't need to change
1621 1621 files = get.keys()
1622 1622 files.sort()
1623 1623 for f in files:
1624 1624 if f[0] == "/":
1625 1625 continue
1626 1626 self.ui.note(_("getting %s\n") % f)
1627 1627 t = self.file(f).read(get[f])
1628 1628 self.wwrite(f, t)
1629 1629 util.set_exec(self.wjoin(f), mf2[f])
1630 1630 if moddirstate:
1631 1631 if branch_merge:
1632 1632 self.dirstate.update([f], 'n', st_mtime=-1)
1633 1633 else:
1634 1634 self.dirstate.update([f], 'n')
1635 1635
1636 1636 # merge the tricky bits
1637 1637 files = merge.keys()
1638 1638 files.sort()
1639 1639 for f in files:
1640 1640 self.ui.status(_("merging %s\n") % f)
1641 1641 my, other, flag = merge[f]
1642 1642 ret = self.merge3(f, my, other)
1643 1643 if ret:
1644 1644 err = True
1645 1645 util.set_exec(self.wjoin(f), flag)
1646 1646 if moddirstate:
1647 1647 if branch_merge:
1648 1648 # We've done a branch merge, mark this file as merged
1649 1649 # so that we properly record the merger later
1650 1650 self.dirstate.update([f], 'm')
1651 1651 else:
1652 1652 # We've update-merged a locally modified file, so
1653 1653 # we set the dirstate to emulate a normal checkout
1654 1654 # of that file some time in the past. Thus our
1655 1655 # merge will appear as a normal local file
1656 1656 # modification.
1657 1657 f_len = len(self.file(f).read(other))
1658 1658 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1659 1659
1660 1660 remove.sort()
1661 1661 for f in remove:
1662 1662 self.ui.note(_("removing %s\n") % f)
1663 1663 util.audit_path(f)
1664 1664 try:
1665 1665 util.unlink(self.wjoin(f))
1666 1666 except OSError, inst:
1667 1667 if inst.errno != errno.ENOENT:
1668 1668 self.ui.warn(_("update failed to remove %s: %s!\n") %
1669 1669 (f, inst.strerror))
1670 1670 if moddirstate:
1671 1671 if branch_merge:
1672 1672 self.dirstate.update(remove, 'r')
1673 1673 else:
1674 1674 self.dirstate.forget(remove)
1675 1675
1676 1676 if moddirstate:
1677 1677 self.dirstate.setparents(p1, p2)
1678 1678 return err
1679 1679
1680 1680 def merge3(self, fn, my, other):
1681 1681 """perform a 3-way merge in the working directory"""
1682 1682
1683 1683 def temp(prefix, node):
1684 1684 pre = "%s~%s." % (os.path.basename(fn), prefix)
1685 1685 (fd, name) = tempfile.mkstemp("", pre)
1686 1686 f = os.fdopen(fd, "wb")
1687 1687 self.wwrite(fn, fl.read(node), f)
1688 1688 f.close()
1689 1689 return name
1690 1690
1691 1691 fl = self.file(fn)
1692 1692 base = fl.ancestor(my, other)
1693 1693 a = self.wjoin(fn)
1694 1694 b = temp("base", base)
1695 1695 c = temp("other", other)
1696 1696
1697 1697 self.ui.note(_("resolving %s\n") % fn)
1698 1698 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1699 1699 (fn, short(my), short(other), short(base)))
1700 1700
1701 1701 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1702 1702 or "hgmerge")
1703 1703 r = os.system('%s "%s" "%s" "%s"' % (cmd, a, b, c))
1704 1704 if r:
1705 1705 self.ui.warn(_("merging %s failed!\n") % fn)
1706 1706
1707 1707 os.unlink(b)
1708 1708 os.unlink(c)
1709 1709 return r
1710 1710
1711 1711 def verify(self):
1712 1712 filelinkrevs = {}
1713 1713 filenodes = {}
1714 1714 changesets = revisions = files = 0
1715 1715 errors = [0]
1716 1716 neededmanifests = {}
1717 1717
1718 1718 def err(msg):
1719 1719 self.ui.warn(msg + "\n")
1720 1720 errors[0] += 1
1721 1721
1722 1722 def checksize(obj, name):
1723 1723 d = obj.checksize()
1724 1724 if d[0]:
1725 1725 err(_("%s data length off by %d bytes") % (name, d[0]))
1726 1726 if d[1]:
1727 1727 err(_("%s index contains %d extra bytes") % (name, d[1]))
1728 1728
1729 1729 seen = {}
1730 1730 self.ui.status(_("checking changesets\n"))
1731 1731 checksize(self.changelog, "changelog")
1732 1732
1733 1733 for i in range(self.changelog.count()):
1734 1734 changesets += 1
1735 1735 n = self.changelog.node(i)
1736 1736 l = self.changelog.linkrev(n)
1737 1737 if l != i:
1738 1738 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1739 1739 if n in seen:
1740 1740 err(_("duplicate changeset at revision %d") % i)
1741 1741 seen[n] = 1
1742 1742
1743 1743 for p in self.changelog.parents(n):
1744 1744 if p not in self.changelog.nodemap:
1745 1745 err(_("changeset %s has unknown parent %s") %
1746 1746 (short(n), short(p)))
1747 1747 try:
1748 1748 changes = self.changelog.read(n)
1749 1749 except KeyboardInterrupt:
1750 1750 self.ui.warn(_("interrupted"))
1751 1751 raise
1752 1752 except Exception, inst:
1753 1753 err(_("unpacking changeset %s: %s") % (short(n), inst))
1754 1754 continue
1755 1755
1756 1756 neededmanifests[changes[0]] = n
1757 1757
1758 1758 for f in changes[3]:
1759 1759 filelinkrevs.setdefault(f, []).append(i)
1760 1760
1761 1761 seen = {}
1762 1762 self.ui.status(_("checking manifests\n"))
1763 1763 checksize(self.manifest, "manifest")
1764 1764
1765 1765 for i in range(self.manifest.count()):
1766 1766 n = self.manifest.node(i)
1767 1767 l = self.manifest.linkrev(n)
1768 1768
1769 1769 if l < 0 or l >= self.changelog.count():
1770 1770 err(_("bad manifest link (%d) at revision %d") % (l, i))
1771 1771
1772 1772 if n in neededmanifests:
1773 1773 del neededmanifests[n]
1774 1774
1775 1775 if n in seen:
1776 1776 err(_("duplicate manifest at revision %d") % i)
1777 1777
1778 1778 seen[n] = 1
1779 1779
1780 1780 for p in self.manifest.parents(n):
1781 1781 if p not in self.manifest.nodemap:
1782 1782 err(_("manifest %s has unknown parent %s") %
1783 1783 (short(n), short(p)))
1784 1784
1785 1785 try:
1786 1786 delta = mdiff.patchtext(self.manifest.delta(n))
1787 1787 except KeyboardInterrupt:
1788 1788 self.ui.warn(_("interrupted"))
1789 1789 raise
1790 1790 except Exception, inst:
1791 1791 err(_("unpacking manifest %s: %s") % (short(n), inst))
1792 1792 continue
1793 1793
1794 1794 try:
1795 1795 ff = [ l.split('\0') for l in delta.splitlines() ]
1796 1796 for f, fn in ff:
1797 1797 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1798 1798 except (ValueError, TypeError), inst:
1799 1799 err(_("broken delta in manifest %s: %s") % (short(n), inst))
1800 1800
1801 1801 self.ui.status(_("crosschecking files in changesets and manifests\n"))
1802 1802
1803 1803 for m, c in neededmanifests.items():
1804 1804 err(_("Changeset %s refers to unknown manifest %s") %
1805 1805 (short(m), short(c)))
1806 1806 del neededmanifests
1807 1807
1808 1808 for f in filenodes:
1809 1809 if f not in filelinkrevs:
1810 1810 err(_("file %s in manifest but not in changesets") % f)
1811 1811
1812 1812 for f in filelinkrevs:
1813 1813 if f not in filenodes:
1814 1814 err(_("file %s in changeset but not in manifest") % f)
1815 1815
1816 1816 self.ui.status(_("checking files\n"))
1817 1817 ff = filenodes.keys()
1818 1818 ff.sort()
1819 1819 for f in ff:
1820 1820 if f == "/dev/null":
1821 1821 continue
1822 1822 files += 1
1823 1823 if not f:
1824 1824 err(_("file without name in manifest %s") % short(n))
1825 1825 continue
1826 1826 fl = self.file(f)
1827 1827 checksize(fl, f)
1828 1828
1829 1829 nodes = {nullid: 1}
1830 1830 seen = {}
1831 1831 for i in range(fl.count()):
1832 1832 revisions += 1
1833 1833 n = fl.node(i)
1834 1834
1835 1835 if n in seen:
1836 1836 err(_("%s: duplicate revision %d") % (f, i))
1837 1837 if n not in filenodes[f]:
1838 1838 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
1839 1839 else:
1840 1840 del filenodes[f][n]
1841 1841
1842 1842 flr = fl.linkrev(n)
1843 1843 if flr not in filelinkrevs.get(f, []):
1844 1844 err(_("%s:%s points to unexpected changeset %d")
1845 1845 % (f, short(n), flr))
1846 1846 else:
1847 1847 filelinkrevs[f].remove(flr)
1848 1848
1849 1849 # verify contents
1850 1850 try:
1851 1851 t = fl.read(n)
1852 1852 except KeyboardInterrupt:
1853 1853 self.ui.warn(_("interrupted"))
1854 1854 raise
1855 1855 except Exception, inst:
1856 1856 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
1857 1857
1858 1858 # verify parents
1859 1859 (p1, p2) = fl.parents(n)
1860 1860 if p1 not in nodes:
1861 1861 err(_("file %s:%s unknown parent 1 %s") %
1862 1862 (f, short(n), short(p1)))
1863 1863 if p2 not in nodes:
1864 1864 err(_("file %s:%s unknown parent 2 %s") %
1865 1865 (f, short(n), short(p1)))
1866 1866 nodes[n] = 1
1867 1867
1868 1868 # cross-check
1869 1869 for node in filenodes[f]:
1870 1870 err(_("node %s in manifests not in %s") % (hex(node), f))
1871 1871
1872 1872 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
1873 1873 (files, changesets, revisions))
1874 1874
1875 1875 if errors[0]:
1876 1876 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
1877 1877 return 1
1878 1878
1879 1879 # used to avoid circular references so destructors work
1880 1880 def aftertrans(base):
1881 1881 p = base
1882 1882 def a():
1883 1883 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1884 1884 util.rename(os.path.join(p, "journal.dirstate"),
1885 1885 os.path.join(p, "undo.dirstate"))
1886 1886 return a
1887 1887
@@ -1,219 +1,221
1 1 # ui.py - user interface bits for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 import ConfigParser
9 9 from i18n import gettext as _
10 10 from demandload import *
11 11 demandload(globals(), "os re socket sys util")
12 12
13 13 class ui(object):
14 14 def __init__(self, verbose=False, debug=False, quiet=False,
15 15 interactive=True, parentui=None):
16 16 self.overlay = {}
17 17 if parentui is None:
18 18 # this is the parent of all ui children
19 19 self.parentui = None
20 20 self.cdata = ConfigParser.SafeConfigParser()
21 21 self.readconfig(util.rcpath)
22 22
23 23 self.quiet = self.configbool("ui", "quiet")
24 24 self.verbose = self.configbool("ui", "verbose")
25 25 self.debugflag = self.configbool("ui", "debug")
26 26 self.interactive = self.configbool("ui", "interactive", True)
27 27
28 28 self.updateopts(verbose, debug, quiet, interactive)
29 29 self.diffcache = None
30 30 else:
31 31 # parentui may point to an ui object which is already a child
32 32 self.parentui = parentui.parentui or parentui
33 33 parent_cdata = self.parentui.cdata
34 34 self.cdata = ConfigParser.SafeConfigParser(parent_cdata.defaults())
35 35 # make interpolation work
36 36 for section in parent_cdata.sections():
37 37 self.cdata.add_section(section)
38 38 for name, value in parent_cdata.items(section, raw=True):
39 39 self.cdata.set(section, name, value)
40 40
41 41 def __getattr__(self, key):
42 42 return getattr(self.parentui, key)
43 43
44 44 def updateopts(self, verbose=False, debug=False, quiet=False,
45 45 interactive=True):
46 46 self.quiet = (self.quiet or quiet) and not verbose and not debug
47 47 self.verbose = (self.verbose or verbose) or debug
48 48 self.debugflag = (self.debugflag or debug)
49 49 self.interactive = (self.interactive and interactive)
50 50
51 51 def readconfig(self, fn):
52 52 if isinstance(fn, basestring):
53 53 fn = [fn]
54 54 for f in fn:
55 55 try:
56 56 self.cdata.read(f)
57 57 except ConfigParser.ParsingError, inst:
58 58 raise util.Abort(_("Failed to parse %s\n%s") % (f, inst))
59 59
60 60 def setconfig(self, section, name, val):
61 61 self.overlay[(section, name)] = val
62 62
63 63 def config(self, section, name, default=None):
64 64 if self.overlay.has_key((section, name)):
65 65 return self.overlay[(section, name)]
66 66 if self.cdata.has_option(section, name):
67 67 try:
68 68 return self.cdata.get(section, name)
69 69 except ConfigParser.InterpolationError, inst:
70 70 raise util.Abort(_("Error in configuration:\n%s") % inst)
71 71 if self.parentui is None:
72 72 return default
73 73 else:
74 74 return self.parentui.config(section, name, default)
75 75
76 76 def configbool(self, section, name, default=False):
77 77 if self.overlay.has_key((section, name)):
78 78 return self.overlay[(section, name)]
79 79 if self.cdata.has_option(section, name):
80 80 try:
81 81 return self.cdata.getboolean(section, name)
82 82 except ConfigParser.InterpolationError, inst:
83 83 raise util.Abort(_("Error in configuration:\n%s") % inst)
84 84 if self.parentui is None:
85 85 return default
86 86 else:
87 87 return self.parentui.configbool(section, name, default)
88 88
89 89 def configitems(self, section):
90 90 items = {}
91 91 if self.parentui is not None:
92 92 items = dict(self.parentui.configitems(section))
93 93 if self.cdata.has_section(section):
94 94 try:
95 95 items.update(dict(self.cdata.items(section)))
96 96 except ConfigParser.InterpolationError, inst:
97 97 raise util.Abort(_("Error in configuration:\n%s") % inst)
98 98 x = items.items()
99 99 x.sort()
100 100 return x
101 101
102 102 def walkconfig(self, seen=None):
103 103 if seen is None:
104 104 seen = {}
105 105 for (section, name), value in self.overlay.iteritems():
106 106 yield section, name, value
107 107 seen[section, name] = 1
108 108 for section in self.cdata.sections():
109 109 for name, value in self.cdata.items(section):
110 110 if (section, name) in seen: continue
111 111 yield section, name, value.replace('\n', '\\n')
112 112 seen[section, name] = 1
113 113 if self.parentui is not None:
114 114 for parent in self.parentui.walkconfig(seen):
115 115 yield parent
116 116
117 117 def extensions(self):
118 118 return self.configitems("extensions")
119 119
120 120 def diffopts(self):
121 121 if self.diffcache:
122 122 return self.diffcache
123 123 ret = { 'showfunc' : True, 'ignorews' : False}
124 124 for x in self.configitems("diff"):
125 125 k = x[0].lower()
126 126 v = x[1]
127 127 if v:
128 128 v = v.lower()
129 129 if v == 'true':
130 130 value = True
131 131 else:
132 132 value = False
133 133 ret[k] = value
134 134 self.diffcache = ret
135 135 return ret
136 136
137 137 def username(self):
138 138 return (os.environ.get("HGUSER") or
139 139 self.config("ui", "username") or
140 140 os.environ.get("EMAIL") or
141 141 (os.environ.get("LOGNAME",
142 142 os.environ.get("USERNAME", "unknown"))
143 143 + '@' + socket.getfqdn()))
144 144
145 145 def shortuser(self, user):
146 146 """Return a short representation of a user name or email address."""
147 147 if not self.verbose:
148 148 f = user.find('@')
149 149 if f >= 0:
150 150 user = user[:f]
151 151 f = user.find('<')
152 152 if f >= 0:
153 153 user = user[f+1:]
154 154 return user
155 155
156 156 def expandpath(self, loc, root=""):
157 157 paths = {}
158 158 for name, path in self.configitems("paths"):
159 159 m = path.find("://")
160 160 if m == -1:
161 161 path = os.path.join(root, path)
162 162 paths[name] = path
163 163
164 164 return paths.get(loc, loc)
165 165
166 166 def write(self, *args):
167 167 for a in args:
168 168 sys.stdout.write(str(a))
169 169
170 170 def write_err(self, *args):
171 171 if not sys.stdout.closed: sys.stdout.flush()
172 172 for a in args:
173 173 sys.stderr.write(str(a))
174 174
175 175 def flush(self):
176 176 try:
177 177 sys.stdout.flush()
178 178 finally:
179 179 sys.stderr.flush()
180 180
181 181 def readline(self):
182 182 return sys.stdin.readline()[:-1]
183 183 def prompt(self, msg, pat, default="y"):
184 184 if not self.interactive: return default
185 185 while 1:
186 186 self.write(msg, " ")
187 187 r = self.readline()
188 188 if re.match(pat, r):
189 189 return r
190 190 else:
191 191 self.write(_("unrecognized response\n"))
192 192 def status(self, *msg):
193 193 if not self.quiet: self.write(*msg)
194 194 def warn(self, *msg):
195 195 self.write_err(*msg)
196 196 def note(self, *msg):
197 197 if self.verbose: self.write(*msg)
198 198 def debug(self, *msg):
199 199 if self.debugflag: self.write(*msg)
200 200 def edit(self, text):
201 201 import tempfile
202 202 (fd, name) = tempfile.mkstemp("hg")
203 203 f = os.fdopen(fd, "w")
204 204 f.write(text)
205 205 f.close()
206 206
207 207 editor = (os.environ.get("HGEDITOR") or
208 208 self.config("ui", "editor") or
209 209 os.environ.get("EDITOR", "vi"))
210 210
211 211 os.environ["HGUSER"] = self.username()
212 util.system("%s \"%s\"" % (editor, name), errprefix=_("edit failed"))
212 util.system("%s \"%s\"" % (editor, name),
213 environ={'HGUSER': self.username()},
214 onerr=util.Abort, errprefix=_("edit failed"))
213 215
214 216 t = open(name).read()
215 217 t = re.sub("(?m)^HG:.*\n", "", t)
216 218
217 219 os.unlink(name)
218 220
219 221 return t
@@ -1,756 +1,760
1 1 """
2 2 util.py - Mercurial utility functions and platform specfic implementations
3 3
4 4 Copyright 2005 K. Thananchayan <thananck@yahoo.com>
5 5
6 6 This software may be used and distributed according to the terms
7 7 of the GNU General Public License, incorporated herein by reference.
8 8
9 9 This contains helper routines that are independent of the SCM core and hide
10 10 platform-specific details from the core.
11 11 """
12 12
13 13 import os, errno
14 14 from i18n import gettext as _
15 15 from demandload import *
16 16 demandload(globals(), "cStringIO errno popen2 re shutil sys tempfile")
17 17 demandload(globals(), "threading time")
18 18
19 19 def pipefilter(s, cmd):
20 20 '''filter string S through command CMD, returning its output'''
21 21 (pout, pin) = popen2.popen2(cmd, -1, 'b')
22 22 def writer():
23 23 pin.write(s)
24 24 pin.close()
25 25
26 26 # we should use select instead on UNIX, but this will work on most
27 27 # systems, including Windows
28 28 w = threading.Thread(target=writer)
29 29 w.start()
30 30 f = pout.read()
31 31 pout.close()
32 32 w.join()
33 33 return f
34 34
35 35 def tempfilter(s, cmd):
36 36 '''filter string S through a pair of temporary files with CMD.
37 37 CMD is used as a template to create the real command to be run,
38 38 with the strings INFILE and OUTFILE replaced by the real names of
39 39 the temporary files generated.'''
40 40 inname, outname = None, None
41 41 try:
42 42 infd, inname = tempfile.mkstemp(prefix='hgfin')
43 43 fp = os.fdopen(infd, 'wb')
44 44 fp.write(s)
45 45 fp.close()
46 46 outfd, outname = tempfile.mkstemp(prefix='hgfout')
47 47 os.close(outfd)
48 48 cmd = cmd.replace('INFILE', inname)
49 49 cmd = cmd.replace('OUTFILE', outname)
50 50 code = os.system(cmd)
51 51 if code: raise Abort(_("command '%s' failed: %s") %
52 52 (cmd, explain_exit(code)))
53 53 return open(outname, 'rb').read()
54 54 finally:
55 55 try:
56 56 if inname: os.unlink(inname)
57 57 except: pass
58 58 try:
59 59 if outname: os.unlink(outname)
60 60 except: pass
61 61
62 62 filtertable = {
63 63 'tempfile:': tempfilter,
64 64 'pipe:': pipefilter,
65 65 }
66 66
67 67 def filter(s, cmd):
68 68 "filter a string through a command that transforms its input to its output"
69 69 for name, fn in filtertable.iteritems():
70 70 if cmd.startswith(name):
71 71 return fn(s, cmd[len(name):].lstrip())
72 72 return pipefilter(s, cmd)
73 73
74 74 def patch(strip, patchname, ui):
75 75 """apply the patch <patchname> to the working directory.
76 76 a list of patched files is returned"""
77 77 fp = os.popen('patch -p%d < "%s"' % (strip, patchname))
78 78 files = {}
79 79 for line in fp:
80 80 line = line.rstrip()
81 81 ui.status("%s\n" % line)
82 82 if line.startswith('patching file '):
83 83 pf = parse_patch_output(line)
84 84 files.setdefault(pf, 1)
85 85 code = fp.close()
86 86 if code:
87 87 raise Abort(_("patch command failed: %s") % explain_exit(code)[0])
88 88 return files.keys()
89 89
90 90 def binary(s):
91 91 """return true if a string is binary data using diff's heuristic"""
92 92 if s and '\0' in s[:4096]:
93 93 return True
94 94 return False
95 95
96 96 def unique(g):
97 97 """return the uniq elements of iterable g"""
98 98 seen = {}
99 99 for f in g:
100 100 if f not in seen:
101 101 seen[f] = 1
102 102 yield f
103 103
104 104 class Abort(Exception):
105 105 """Raised if a command needs to print an error and exit."""
106 106
107 107 def always(fn): return True
108 108 def never(fn): return False
109 109
110 110 def patkind(name, dflt_pat='glob'):
111 111 """Split a string into an optional pattern kind prefix and the
112 112 actual pattern."""
113 113 for prefix in 're', 'glob', 'path', 'relglob', 'relpath', 'relre':
114 114 if name.startswith(prefix + ':'): return name.split(':', 1)
115 115 return dflt_pat, name
116 116
117 117 def globre(pat, head='^', tail='$'):
118 118 "convert a glob pattern into a regexp"
119 119 i, n = 0, len(pat)
120 120 res = ''
121 121 group = False
122 122 def peek(): return i < n and pat[i]
123 123 while i < n:
124 124 c = pat[i]
125 125 i = i+1
126 126 if c == '*':
127 127 if peek() == '*':
128 128 i += 1
129 129 res += '.*'
130 130 else:
131 131 res += '[^/]*'
132 132 elif c == '?':
133 133 res += '.'
134 134 elif c == '[':
135 135 j = i
136 136 if j < n and pat[j] in '!]':
137 137 j += 1
138 138 while j < n and pat[j] != ']':
139 139 j += 1
140 140 if j >= n:
141 141 res += '\\['
142 142 else:
143 143 stuff = pat[i:j].replace('\\','\\\\')
144 144 i = j + 1
145 145 if stuff[0] == '!':
146 146 stuff = '^' + stuff[1:]
147 147 elif stuff[0] == '^':
148 148 stuff = '\\' + stuff
149 149 res = '%s[%s]' % (res, stuff)
150 150 elif c == '{':
151 151 group = True
152 152 res += '(?:'
153 153 elif c == '}' and group:
154 154 res += ')'
155 155 group = False
156 156 elif c == ',' and group:
157 157 res += '|'
158 158 else:
159 159 res += re.escape(c)
160 160 return head + res + tail
161 161
162 162 _globchars = {'[': 1, '{': 1, '*': 1, '?': 1}
163 163
164 164 def pathto(n1, n2):
165 165 '''return the relative path from one place to another.
166 166 this returns a path in the form used by the local filesystem, not hg.'''
167 167 if not n1: return localpath(n2)
168 168 a, b = n1.split('/'), n2.split('/')
169 169 a.reverse()
170 170 b.reverse()
171 171 while a and b and a[-1] == b[-1]:
172 172 a.pop()
173 173 b.pop()
174 174 b.reverse()
175 175 return os.sep.join((['..'] * len(a)) + b)
176 176
177 177 def canonpath(root, cwd, myname):
178 178 """return the canonical path of myname, given cwd and root"""
179 179 if root == os.sep:
180 180 rootsep = os.sep
181 181 else:
182 182 rootsep = root + os.sep
183 183 name = myname
184 184 if not name.startswith(os.sep):
185 185 name = os.path.join(root, cwd, name)
186 186 name = os.path.normpath(name)
187 187 if name.startswith(rootsep):
188 188 return pconvert(name[len(rootsep):])
189 189 elif name == root:
190 190 return ''
191 191 else:
192 192 raise Abort('%s not under root' % myname)
193 193
194 194 def matcher(canonroot, cwd='', names=['.'], inc=[], exc=[], head='', src=None):
195 195 return _matcher(canonroot, cwd, names, inc, exc, head, 'glob', src)
196 196
197 197 def cmdmatcher(canonroot, cwd='', names=['.'], inc=[], exc=[], head='', src=None):
198 198 if os.name == 'nt':
199 199 dflt_pat = 'glob'
200 200 else:
201 201 dflt_pat = 'relpath'
202 202 return _matcher(canonroot, cwd, names, inc, exc, head, dflt_pat, src)
203 203
204 204 def _matcher(canonroot, cwd, names, inc, exc, head, dflt_pat, src):
205 205 """build a function to match a set of file patterns
206 206
207 207 arguments:
208 208 canonroot - the canonical root of the tree you're matching against
209 209 cwd - the current working directory, if relevant
210 210 names - patterns to find
211 211 inc - patterns to include
212 212 exc - patterns to exclude
213 213 head - a regex to prepend to patterns to control whether a match is rooted
214 214
215 215 a pattern is one of:
216 216 'glob:<rooted glob>'
217 217 're:<rooted regexp>'
218 218 'path:<rooted path>'
219 219 'relglob:<relative glob>'
220 220 'relpath:<relative path>'
221 221 'relre:<relative regexp>'
222 222 '<rooted path or regexp>'
223 223
224 224 returns:
225 225 a 3-tuple containing
226 226 - list of explicit non-pattern names passed in
227 227 - a bool match(filename) function
228 228 - a bool indicating if any patterns were passed in
229 229
230 230 todo:
231 231 make head regex a rooted bool
232 232 """
233 233
234 234 def contains_glob(name):
235 235 for c in name:
236 236 if c in _globchars: return True
237 237 return False
238 238
239 239 def regex(kind, name, tail):
240 240 '''convert a pattern into a regular expression'''
241 241 if kind == 're':
242 242 return name
243 243 elif kind == 'path':
244 244 return '^' + re.escape(name) + '(?:/|$)'
245 245 elif kind == 'relglob':
246 246 return head + globre(name, '(?:|.*/)', tail)
247 247 elif kind == 'relpath':
248 248 return head + re.escape(name) + tail
249 249 elif kind == 'relre':
250 250 if name.startswith('^'):
251 251 return name
252 252 return '.*' + name
253 253 return head + globre(name, '', tail)
254 254
255 255 def matchfn(pats, tail):
256 256 """build a matching function from a set of patterns"""
257 257 if not pats:
258 258 return
259 259 matches = []
260 260 for k, p in pats:
261 261 try:
262 262 pat = '(?:%s)' % regex(k, p, tail)
263 263 matches.append(re.compile(pat).match)
264 264 except re.error:
265 265 if src: raise Abort("%s: invalid pattern (%s): %s" % (src, k, p))
266 266 else: raise Abort("invalid pattern (%s): %s" % (k, p))
267 267
268 268 def buildfn(text):
269 269 for m in matches:
270 270 r = m(text)
271 271 if r:
272 272 return r
273 273
274 274 return buildfn
275 275
276 276 def globprefix(pat):
277 277 '''return the non-glob prefix of a path, e.g. foo/* -> foo'''
278 278 root = []
279 279 for p in pat.split(os.sep):
280 280 if contains_glob(p): break
281 281 root.append(p)
282 282 return '/'.join(root)
283 283
284 284 pats = []
285 285 files = []
286 286 roots = []
287 287 for kind, name in [patkind(p, dflt_pat) for p in names]:
288 288 if kind in ('glob', 'relpath'):
289 289 name = canonpath(canonroot, cwd, name)
290 290 if name == '':
291 291 kind, name = 'glob', '**'
292 292 if kind in ('glob', 'path', 're'):
293 293 pats.append((kind, name))
294 294 if kind == 'glob':
295 295 root = globprefix(name)
296 296 if root: roots.append(root)
297 297 elif kind == 'relpath':
298 298 files.append((kind, name))
299 299 roots.append(name)
300 300
301 301 patmatch = matchfn(pats, '$') or always
302 302 filematch = matchfn(files, '(?:/|$)') or always
303 303 incmatch = always
304 304 if inc:
305 305 incmatch = matchfn(map(patkind, inc), '(?:/|$)')
306 306 excmatch = lambda fn: False
307 307 if exc:
308 308 excmatch = matchfn(map(patkind, exc), '(?:/|$)')
309 309
310 310 return (roots,
311 311 lambda fn: (incmatch(fn) and not excmatch(fn) and
312 312 (fn.endswith('/') or
313 313 (not pats and not files) or
314 314 (pats and patmatch(fn)) or
315 315 (files and filematch(fn)))),
316 316 (inc or exc or (pats and pats != [('glob', '**')])) and True)
317 317
318 def system(cmd, errprefix=None):
319 """execute a shell command that must succeed"""
320 rc = os.system(cmd)
321 if rc:
322 errmsg = "%s %s" % (os.path.basename(cmd.split(None, 1)[0]),
323 explain_exit(rc)[0])
324 if errprefix:
325 errmsg = "%s: %s" % (errprefix, errmsg)
326 raise Abort(errmsg)
318 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None):
319 '''enhanced shell command execution.
320 run with environment maybe modified, maybe in different dir.
327 321
328 def esystem(cmd, environ={}, cwd=None):
329 '''enhanced shell command execution.
330 run with environment maybe modified, maybe in different dir.'''
322 if command fails and onerr is None, return status. if ui object,
323 print error message and return status, else raise onerr object as
324 exception.'''
331 325 oldenv = {}
332 326 for k in environ:
333 327 oldenv[k] = os.environ.get(k)
334 328 if cwd is not None:
335 329 oldcwd = os.getcwd()
336 330 try:
337 331 for k, v in environ.iteritems():
338 332 os.environ[k] = str(v)
339 333 if cwd is not None and oldcwd != cwd:
340 334 os.chdir(cwd)
341 return os.system(cmd)
335 rc = os.system(cmd)
336 if rc and onerr:
337 errmsg = '%s %s' % (os.path.basename(cmd.split(None, 1)[0]),
338 explain_exit(rc)[0])
339 if errprefix:
340 errmsg = '%s: %s' % (errprefix, errmsg)
341 try:
342 onerr.warn(errmsg + '\n')
343 except AttributeError:
344 raise onerr(errmsg)
345 return rc
342 346 finally:
343 347 for k, v in oldenv.iteritems():
344 348 if v is None:
345 349 del os.environ[k]
346 350 else:
347 351 os.environ[k] = v
348 352 if cwd is not None and oldcwd != cwd:
349 353 os.chdir(oldcwd)
350 354
351 355 def rename(src, dst):
352 356 """forcibly rename a file"""
353 357 try:
354 358 os.rename(src, dst)
355 359 except:
356 360 os.unlink(dst)
357 361 os.rename(src, dst)
358 362
359 363 def unlink(f):
360 364 """unlink and remove the directory if it is empty"""
361 365 os.unlink(f)
362 366 # try removing directories that might now be empty
363 367 try: os.removedirs(os.path.dirname(f))
364 368 except: pass
365 369
366 370 def copyfiles(src, dst, hardlink=None):
367 371 """Copy a directory tree using hardlinks if possible"""
368 372
369 373 if hardlink is None:
370 374 hardlink = (os.stat(src).st_dev ==
371 375 os.stat(os.path.dirname(dst)).st_dev)
372 376
373 377 if os.path.isdir(src):
374 378 os.mkdir(dst)
375 379 for name in os.listdir(src):
376 380 srcname = os.path.join(src, name)
377 381 dstname = os.path.join(dst, name)
378 382 copyfiles(srcname, dstname, hardlink)
379 383 else:
380 384 if hardlink:
381 385 try:
382 386 os_link(src, dst)
383 387 except:
384 388 hardlink = False
385 389 shutil.copy(src, dst)
386 390 else:
387 391 shutil.copy(src, dst)
388 392
389 393 def audit_path(path):
390 394 """Abort if path contains dangerous components"""
391 395 parts = os.path.normcase(path).split(os.sep)
392 396 if (os.path.splitdrive(path)[0] or parts[0] in ('.hg', '')
393 397 or os.pardir in parts):
394 398 raise Abort(_("path contains illegal component: %s\n") % path)
395 399
396 400 def opener(base, audit=True):
397 401 """
398 402 return a function that opens files relative to base
399 403
400 404 this function is used to hide the details of COW semantics and
401 405 remote file access from higher level code.
402 406 """
403 407 p = base
404 408 audit_p = audit
405 409
406 410 def mktempcopy(name):
407 411 d, fn = os.path.split(name)
408 412 fd, temp = tempfile.mkstemp(prefix=fn, dir=d)
409 413 fp = os.fdopen(fd, "wb")
410 414 try:
411 415 fp.write(file(name, "rb").read())
412 416 except:
413 417 try: os.unlink(temp)
414 418 except: pass
415 419 raise
416 420 fp.close()
417 421 st = os.lstat(name)
418 422 os.chmod(temp, st.st_mode)
419 423 return temp
420 424
421 425 class atomicfile(file):
422 426 """the file will only be copied on close"""
423 427 def __init__(self, name, mode, atomic=False):
424 428 self.__name = name
425 429 self.temp = mktempcopy(name)
426 430 file.__init__(self, self.temp, mode)
427 431 def close(self):
428 432 if not self.closed:
429 433 file.close(self)
430 434 rename(self.temp, self.__name)
431 435 def __del__(self):
432 436 self.close()
433 437
434 438 def o(path, mode="r", text=False, atomic=False):
435 439 if audit_p:
436 440 audit_path(path)
437 441 f = os.path.join(p, path)
438 442
439 443 if not text:
440 444 mode += "b" # for that other OS
441 445
442 446 if mode[0] != "r":
443 447 try:
444 448 nlink = nlinks(f)
445 449 except OSError:
446 450 d = os.path.dirname(f)
447 451 if not os.path.isdir(d):
448 452 os.makedirs(d)
449 453 else:
450 454 if atomic:
451 455 return atomicfile(f, mode)
452 456 if nlink > 1:
453 457 rename(mktempcopy(f), f)
454 458 return file(f, mode)
455 459
456 460 return o
457 461
458 462 def _makelock_file(info, pathname):
459 463 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
460 464 os.write(ld, info)
461 465 os.close(ld)
462 466
463 467 def _readlock_file(pathname):
464 468 return file(pathname).read()
465 469
466 470 def nlinks(pathname):
467 471 """Return number of hardlinks for the given file."""
468 472 return os.stat(pathname).st_nlink
469 473
470 474 if hasattr(os, 'link'):
471 475 os_link = os.link
472 476 else:
473 477 def os_link(src, dst):
474 478 raise OSError(0, _("Hardlinks not supported"))
475 479
476 480 # Platform specific variants
477 481 if os.name == 'nt':
478 482 demandload(globals(), "msvcrt")
479 483 nulldev = 'NUL:'
480 484
481 485 class winstdout:
482 486 '''stdout on windows misbehaves if sent through a pipe'''
483 487
484 488 def __init__(self, fp):
485 489 self.fp = fp
486 490
487 491 def __getattr__(self, key):
488 492 return getattr(self.fp, key)
489 493
490 494 def close(self):
491 495 try:
492 496 self.fp.close()
493 497 except: pass
494 498
495 499 def write(self, s):
496 500 try:
497 501 return self.fp.write(s)
498 502 except IOError, inst:
499 503 if inst.errno != 0: raise
500 504 self.close()
501 505 raise IOError(errno.EPIPE, 'Broken pipe')
502 506
503 507 sys.stdout = winstdout(sys.stdout)
504 508
505 509 try:
506 510 import win32api, win32process
507 511 filename = win32process.GetModuleFileNameEx(win32api.GetCurrentProcess(), 0)
508 512 systemrc = os.path.join(os.path.dirname(filename), 'mercurial.ini')
509 513
510 514 except ImportError:
511 515 systemrc = r'c:\mercurial\mercurial.ini'
512 516 pass
513 517
514 518 rcpath = (systemrc,
515 519 os.path.join(os.path.expanduser('~'), 'mercurial.ini'))
516 520
517 521 def parse_patch_output(output_line):
518 522 """parses the output produced by patch and returns the file name"""
519 523 pf = output_line[14:]
520 524 if pf[0] == '`':
521 525 pf = pf[1:-1] # Remove the quotes
522 526 return pf
523 527
524 528 try: # ActivePython can create hard links using win32file module
525 529 import win32api, win32con, win32file
526 530
527 531 def os_link(src, dst): # NB will only succeed on NTFS
528 532 win32file.CreateHardLink(dst, src)
529 533
530 534 def nlinks(pathname):
531 535 """Return number of hardlinks for the given file."""
532 536 try:
533 537 fh = win32file.CreateFile(pathname,
534 538 win32file.GENERIC_READ, win32file.FILE_SHARE_READ,
535 539 None, win32file.OPEN_EXISTING, 0, None)
536 540 res = win32file.GetFileInformationByHandle(fh)
537 541 fh.Close()
538 542 return res[7]
539 543 except:
540 544 return os.stat(pathname).st_nlink
541 545
542 546 def testpid(pid):
543 547 '''return False if pid is dead, True if running or not known'''
544 548 try:
545 549 win32api.OpenProcess(win32con.PROCESS_QUERY_INFORMATION,
546 550 False, pid)
547 551 except:
548 552 return True
549 553
550 554 except ImportError:
551 555 def testpid(pid):
552 556 '''return False if pid dead, True if running or not known'''
553 557 return True
554 558
555 559 def is_exec(f, last):
556 560 return last
557 561
558 562 def set_exec(f, mode):
559 563 pass
560 564
561 565 def set_binary(fd):
562 566 msvcrt.setmode(fd.fileno(), os.O_BINARY)
563 567
564 568 def pconvert(path):
565 569 return path.replace("\\", "/")
566 570
567 571 def localpath(path):
568 572 return path.replace('/', '\\')
569 573
570 574 def normpath(path):
571 575 return pconvert(os.path.normpath(path))
572 576
573 577 makelock = _makelock_file
574 578 readlock = _readlock_file
575 579
576 580 def explain_exit(code):
577 581 return _("exited with status %d") % code, code
578 582
579 583 else:
580 584 nulldev = '/dev/null'
581 585
582 586 def rcfiles(path):
583 587 rcs = [os.path.join(path, 'hgrc')]
584 588 rcdir = os.path.join(path, 'hgrc.d')
585 589 try:
586 590 rcs.extend([os.path.join(rcdir, f) for f in os.listdir(rcdir)
587 591 if f.endswith(".rc")])
588 592 except OSError, inst: pass
589 593 return rcs
590 594 rcpath = []
591 595 if len(sys.argv) > 0:
592 596 rcpath.extend(rcfiles(os.path.dirname(sys.argv[0]) + '/../etc/mercurial'))
593 597 rcpath.extend(rcfiles('/etc/mercurial'))
594 598 rcpath.append(os.path.expanduser('~/.hgrc'))
595 599 rcpath = [os.path.normpath(f) for f in rcpath]
596 600
597 601 def parse_patch_output(output_line):
598 602 """parses the output produced by patch and returns the file name"""
599 603 pf = output_line[14:]
600 604 if pf.startswith("'") and pf.endswith("'") and pf.find(" ") >= 0:
601 605 pf = pf[1:-1] # Remove the quotes
602 606 return pf
603 607
604 608 def is_exec(f, last):
605 609 """check whether a file is executable"""
606 610 return (os.stat(f).st_mode & 0100 != 0)
607 611
608 612 def set_exec(f, mode):
609 613 s = os.stat(f).st_mode
610 614 if (s & 0100 != 0) == mode:
611 615 return
612 616 if mode:
613 617 # Turn on +x for every +r bit when making a file executable
614 618 # and obey umask.
615 619 umask = os.umask(0)
616 620 os.umask(umask)
617 621 os.chmod(f, s | (s & 0444) >> 2 & ~umask)
618 622 else:
619 623 os.chmod(f, s & 0666)
620 624
621 625 def set_binary(fd):
622 626 pass
623 627
624 628 def pconvert(path):
625 629 return path
626 630
627 631 def localpath(path):
628 632 return path
629 633
630 634 normpath = os.path.normpath
631 635
632 636 def makelock(info, pathname):
633 637 try:
634 638 os.symlink(info, pathname)
635 639 except OSError, why:
636 640 if why.errno == errno.EEXIST:
637 641 raise
638 642 else:
639 643 _makelock_file(info, pathname)
640 644
641 645 def readlock(pathname):
642 646 try:
643 647 return os.readlink(pathname)
644 648 except OSError, why:
645 649 if why.errno == errno.EINVAL:
646 650 return _readlock_file(pathname)
647 651 else:
648 652 raise
649 653
650 654 def testpid(pid):
651 655 '''return False if pid dead, True if running or not sure'''
652 656 try:
653 657 os.kill(pid, 0)
654 658 return True
655 659 except OSError, inst:
656 660 return inst.errno != errno.ESRCH
657 661
658 662 def explain_exit(code):
659 663 """return a 2-tuple (desc, code) describing a process's status"""
660 664 if os.WIFEXITED(code):
661 665 val = os.WEXITSTATUS(code)
662 666 return _("exited with status %d") % val, val
663 667 elif os.WIFSIGNALED(code):
664 668 val = os.WTERMSIG(code)
665 669 return _("killed by signal %d") % val, val
666 670 elif os.WIFSTOPPED(code):
667 671 val = os.WSTOPSIG(code)
668 672 return _("stopped by signal %d") % val, val
669 673 raise ValueError(_("invalid exit code"))
670 674
671 675 class chunkbuffer(object):
672 676 """Allow arbitrary sized chunks of data to be efficiently read from an
673 677 iterator over chunks of arbitrary size."""
674 678
675 679 def __init__(self, in_iter, targetsize = 2**16):
676 680 """in_iter is the iterator that's iterating over the input chunks.
677 681 targetsize is how big a buffer to try to maintain."""
678 682 self.in_iter = iter(in_iter)
679 683 self.buf = ''
680 684 self.targetsize = int(targetsize)
681 685 if self.targetsize <= 0:
682 686 raise ValueError(_("targetsize must be greater than 0, was %d") %
683 687 targetsize)
684 688 self.iterempty = False
685 689
686 690 def fillbuf(self):
687 691 """Ignore target size; read every chunk from iterator until empty."""
688 692 if not self.iterempty:
689 693 collector = cStringIO.StringIO()
690 694 collector.write(self.buf)
691 695 for ch in self.in_iter:
692 696 collector.write(ch)
693 697 self.buf = collector.getvalue()
694 698 self.iterempty = True
695 699
696 700 def read(self, l):
697 701 """Read L bytes of data from the iterator of chunks of data.
698 702 Returns less than L bytes if the iterator runs dry."""
699 703 if l > len(self.buf) and not self.iterempty:
700 704 # Clamp to a multiple of self.targetsize
701 705 targetsize = self.targetsize * ((l // self.targetsize) + 1)
702 706 collector = cStringIO.StringIO()
703 707 collector.write(self.buf)
704 708 collected = len(self.buf)
705 709 for chunk in self.in_iter:
706 710 collector.write(chunk)
707 711 collected += len(chunk)
708 712 if collected >= targetsize:
709 713 break
710 714 if collected < targetsize:
711 715 self.iterempty = True
712 716 self.buf = collector.getvalue()
713 717 s, self.buf = self.buf[:l], buffer(self.buf, l)
714 718 return s
715 719
716 720 def filechunkiter(f, size = 65536):
717 721 """Create a generator that produces all the data in the file size
718 722 (default 65536) bytes at a time. Chunks may be less than size
719 723 bytes if the chunk is the last chunk in the file, or the file is a
720 724 socket or some other type of file that sometimes reads less data
721 725 than is requested."""
722 726 s = f.read(size)
723 727 while len(s) > 0:
724 728 yield s
725 729 s = f.read(size)
726 730
727 731 def makedate():
728 732 lt = time.localtime()
729 733 if lt[8] == 1 and time.daylight:
730 734 tz = time.altzone
731 735 else:
732 736 tz = time.timezone
733 737 return time.mktime(lt), tz
734 738
735 739 def datestr(date=None, format='%c'):
736 740 """represent a (unixtime, offset) tuple as a localized time.
737 741 unixtime is seconds since the epoch, and offset is the time zone's
738 742 number of seconds away from UTC."""
739 743 t, tz = date or makedate()
740 744 return ("%s %+03d%02d" %
741 745 (time.strftime(format, time.gmtime(float(t) - tz)),
742 746 -tz / 3600,
743 747 ((-tz % 3600) / 60)))
744 748
745 749 def walkrepos(path):
746 750 '''yield every hg repository under path, recursively.'''
747 751 def errhandler(err):
748 752 if err.filename == path:
749 753 raise err
750 754
751 755 for root, dirs, files in os.walk(path, onerror=errhandler):
752 756 for d in dirs:
753 757 if d == '.hg':
754 758 yield root
755 759 dirs[:] = []
756 760 break
General Comments 0
You need to be logged in to leave comments. Login now