##// END OF EJS Templates
make readconfig take a filename instead of a file pointer as argument...
Benoit Boissinot -
r1473:7d66ce98 default
parent child Browse files
Show More
@@ -0,0 +1,7
1 #!/bin/sh
2
3 mkdir t
4 cd t
5 hg init
6 echo "invalid" > .hg/hgrc
7 hg status 2>&1 |sed -e "s:/.*\(/t/.*\):...\1:"
@@ -1,1023 +1,1023
1 1 # hgweb.py - web interface to a mercurial repository
2 2 #
3 3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
4 4 # Copyright 2005 Matt Mackall <mpm@selenic.com>
5 5 #
6 6 # This software may be used and distributed according to the terms
7 7 # of the GNU General Public License, incorporated herein by reference.
8 8
9 9 import os, cgi, sys
10 10 from demandload import demandload
11 11 demandload(globals(), "mdiff time re socket zlib errno ui hg ConfigParser")
12 12 demandload(globals(), "zipfile tempfile StringIO tarfile BaseHTTPServer util")
13 13 demandload(globals(), "mimetypes")
14 14 from node import *
15 15 from i18n import gettext as _
16 16
17 17 def templatepath():
18 18 for f in "templates", "../templates":
19 19 p = os.path.join(os.path.dirname(__file__), f)
20 20 if os.path.isdir(p):
21 21 return p
22 22
23 23 def age(x):
24 24 def plural(t, c):
25 25 if c == 1:
26 26 return t
27 27 return t + "s"
28 28 def fmt(t, c):
29 29 return "%d %s" % (c, plural(t, c))
30 30
31 31 now = time.time()
32 32 then = x[0]
33 33 delta = max(1, int(now - then))
34 34
35 35 scales = [["second", 1],
36 36 ["minute", 60],
37 37 ["hour", 3600],
38 38 ["day", 3600 * 24],
39 39 ["week", 3600 * 24 * 7],
40 40 ["month", 3600 * 24 * 30],
41 41 ["year", 3600 * 24 * 365]]
42 42
43 43 scales.reverse()
44 44
45 45 for t, s in scales:
46 46 n = delta / s
47 47 if n >= 2 or s == 1:
48 48 return fmt(t, n)
49 49
50 50 def nl2br(text):
51 51 return text.replace('\n', '<br/>\n')
52 52
53 53 def obfuscate(text):
54 54 return ''.join(['&#%d;' % ord(c) for c in text])
55 55
56 56 def up(p):
57 57 if p[0] != "/":
58 58 p = "/" + p
59 59 if p[-1] == "/":
60 60 p = p[:-1]
61 61 up = os.path.dirname(p)
62 62 if up == "/":
63 63 return "/"
64 64 return up + "/"
65 65
66 66 def get_mtime(repo_path):
67 67 hg_path = os.path.join(repo_path, ".hg")
68 68 cl_path = os.path.join(hg_path, "00changelog.i")
69 69 if os.path.exists(os.path.join(cl_path)):
70 70 return os.stat(cl_path).st_mtime
71 71 else:
72 72 return os.stat(hg_path).st_mtime
73 73
74 74 class hgrequest:
75 75 def __init__(self, inp=None, out=None, env=None):
76 76 self.inp = inp or sys.stdin
77 77 self.out = out or sys.stdout
78 78 self.env = env or os.environ
79 79 self.form = cgi.parse(self.inp, self.env, keep_blank_values=1)
80 80
81 81 def write(self, *things):
82 82 for thing in things:
83 83 if hasattr(thing, "__iter__"):
84 84 for part in thing:
85 85 self.write(part)
86 86 else:
87 87 try:
88 88 self.out.write(str(thing))
89 89 except socket.error, inst:
90 90 if inst[0] != errno.ECONNRESET:
91 91 raise
92 92
93 93 def header(self, headers=[('Content-type','text/html')]):
94 94 for header in headers:
95 95 self.out.write("%s: %s\r\n" % header)
96 96 self.out.write("\r\n")
97 97
98 98 def httphdr(self, type, file="", size=0):
99 99
100 100 headers = [('Content-type', type)]
101 101 if file:
102 102 headers.append(('Content-disposition', 'attachment; filename=%s' % file))
103 103 if size > 0:
104 104 headers.append(('Content-length', str(size)))
105 105 self.header(headers)
106 106
107 107 class templater:
108 108 def __init__(self, mapfile, filters={}, defaults={}):
109 109 self.cache = {}
110 110 self.map = {}
111 111 self.base = os.path.dirname(mapfile)
112 112 self.filters = filters
113 113 self.defaults = defaults
114 114
115 115 for l in file(mapfile):
116 116 m = re.match(r'(\S+)\s*=\s*"(.*)"$', l)
117 117 if m:
118 118 self.cache[m.group(1)] = m.group(2)
119 119 else:
120 120 m = re.match(r'(\S+)\s*=\s*(\S+)', l)
121 121 if m:
122 122 self.map[m.group(1)] = os.path.join(self.base, m.group(2))
123 123 else:
124 124 raise LookupError(_("unknown map entry '%s'") % l)
125 125
126 126 def __call__(self, t, **map):
127 127 m = self.defaults.copy()
128 128 m.update(map)
129 129 try:
130 130 tmpl = self.cache[t]
131 131 except KeyError:
132 132 tmpl = self.cache[t] = file(self.map[t]).read()
133 133 return self.template(tmpl, self.filters, **m)
134 134
135 135 def template(self, tmpl, filters={}, **map):
136 136 while tmpl:
137 137 m = re.search(r"#([a-zA-Z0-9]+)((%[a-zA-Z0-9]+)*)((\|[a-zA-Z0-9]+)*)#", tmpl)
138 138 if m:
139 139 yield tmpl[:m.start(0)]
140 140 v = map.get(m.group(1), "")
141 141 v = callable(v) and v(**map) or v
142 142
143 143 format = m.group(2)
144 144 fl = m.group(4)
145 145
146 146 if format:
147 147 q = v.__iter__
148 148 for i in q():
149 149 lm = map.copy()
150 150 lm.update(i)
151 151 yield self(format[1:], **lm)
152 152
153 153 v = ""
154 154
155 155 elif fl:
156 156 for f in fl.split("|")[1:]:
157 157 v = filters[f](v)
158 158
159 159 yield v
160 160 tmpl = tmpl[m.end(0):]
161 161 else:
162 162 yield tmpl
163 163 return
164 164
165 165 common_filters = {
166 166 "escape": cgi.escape,
167 167 "strip": lambda x: x.strip(),
168 168 "rstrip": lambda x: x.rstrip(),
169 169 "age": age,
170 170 "date": lambda x: util.datestr(x),
171 171 "addbreaks": nl2br,
172 172 "obfuscate": obfuscate,
173 173 "short": (lambda x: x[:12]),
174 174 "firstline": (lambda x: x.splitlines(1)[0]),
175 175 "permissions": (lambda x: x and "-rwxr-xr-x" or "-rw-r--r--"),
176 176 "rfc822date": lambda x: util.datestr(x, "%a, %d %b %Y %H:%M:%S"),
177 177 }
178 178
179 179 class hgweb:
180 180 def __init__(self, repo, name=None):
181 181 if type(repo) == type(""):
182 182 self.repo = hg.repository(ui.ui(), repo)
183 183 else:
184 184 self.repo = repo
185 185
186 186 self.mtime = -1
187 187 self.reponame = name
188 188 self.archives = 'zip', 'gz', 'bz2'
189 189
190 190 def refresh(self):
191 191 mtime = get_mtime(self.repo.root)
192 192 if mtime != self.mtime:
193 193 self.mtime = mtime
194 194 self.repo = hg.repository(self.repo.ui, self.repo.root)
195 195 self.maxchanges = int(self.repo.ui.config("web", "maxchanges", 10))
196 196 self.maxfiles = int(self.repo.ui.config("web", "maxfiles", 10))
197 197 self.allowpull = self.repo.ui.configbool("web", "allowpull", True)
198 198
199 199 def listfiles(self, files, mf):
200 200 for f in files[:self.maxfiles]:
201 201 yield self.t("filenodelink", node=hex(mf[f]), file=f)
202 202 if len(files) > self.maxfiles:
203 203 yield self.t("fileellipses")
204 204
205 205 def listfilediffs(self, files, changeset):
206 206 for f in files[:self.maxfiles]:
207 207 yield self.t("filedifflink", node=hex(changeset), file=f)
208 208 if len(files) > self.maxfiles:
209 209 yield self.t("fileellipses")
210 210
211 211 def parents(self, node, parents=[], rev=None, hide=False, **args):
212 212 if not rev:
213 213 rev = lambda x: ""
214 214 parents = [p for p in parents if p != nullid]
215 215 if hide and len(parents) == 1 and rev(parents[0]) == rev(node) - 1:
216 216 return
217 217 for p in parents:
218 218 yield dict(node=hex(p), rev=rev(p), **args)
219 219
220 220 def showtag(self, t1, node=nullid, **args):
221 221 for t in self.repo.nodetags(node):
222 222 yield self.t(t1, tag=t, **args)
223 223
224 224 def diff(self, node1, node2, files):
225 225 def filterfiles(list, files):
226 226 l = [x for x in list if x in files]
227 227
228 228 for f in files:
229 229 if f[-1] != os.sep:
230 230 f += os.sep
231 231 l += [x for x in list if x.startswith(f)]
232 232 return l
233 233
234 234 parity = [0]
235 235 def diffblock(diff, f, fn):
236 236 yield self.t("diffblock",
237 237 lines=prettyprintlines(diff),
238 238 parity=parity[0],
239 239 file=f,
240 240 filenode=hex(fn or nullid))
241 241 parity[0] = 1 - parity[0]
242 242
243 243 def prettyprintlines(diff):
244 244 for l in diff.splitlines(1):
245 245 if l.startswith('+'):
246 246 yield self.t("difflineplus", line=l)
247 247 elif l.startswith('-'):
248 248 yield self.t("difflineminus", line=l)
249 249 elif l.startswith('@'):
250 250 yield self.t("difflineat", line=l)
251 251 else:
252 252 yield self.t("diffline", line=l)
253 253
254 254 r = self.repo
255 255 cl = r.changelog
256 256 mf = r.manifest
257 257 change1 = cl.read(node1)
258 258 change2 = cl.read(node2)
259 259 mmap1 = mf.read(change1[0])
260 260 mmap2 = mf.read(change2[0])
261 261 date1 = util.datestr(change1[2])
262 262 date2 = util.datestr(change2[2])
263 263
264 264 c, a, d, u = r.changes(node1, node2)
265 265 if files:
266 266 c, a, d = map(lambda x: filterfiles(x, files), (c, a, d))
267 267
268 268 for f in c:
269 269 to = r.file(f).read(mmap1[f])
270 270 tn = r.file(f).read(mmap2[f])
271 271 yield diffblock(mdiff.unidiff(to, date1, tn, date2, f), f, tn)
272 272 for f in a:
273 273 to = None
274 274 tn = r.file(f).read(mmap2[f])
275 275 yield diffblock(mdiff.unidiff(to, date1, tn, date2, f), f, tn)
276 276 for f in d:
277 277 to = r.file(f).read(mmap1[f])
278 278 tn = None
279 279 yield diffblock(mdiff.unidiff(to, date1, tn, date2, f), f, tn)
280 280
281 281 def changelog(self, pos):
282 282 def changenav(**map):
283 283 def seq(factor=1):
284 284 yield 1 * factor
285 285 yield 3 * factor
286 286 #yield 5 * factor
287 287 for f in seq(factor * 10):
288 288 yield f
289 289
290 290 l = []
291 291 for f in seq():
292 292 if f < self.maxchanges / 2:
293 293 continue
294 294 if f > count:
295 295 break
296 296 r = "%d" % f
297 297 if pos + f < count:
298 298 l.append(("+" + r, pos + f))
299 299 if pos - f >= 0:
300 300 l.insert(0, ("-" + r, pos - f))
301 301
302 302 yield {"rev": 0, "label": "(0)"}
303 303
304 304 for label, rev in l:
305 305 yield {"label": label, "rev": rev}
306 306
307 307 yield {"label": "tip", "rev": "tip"}
308 308
309 309 def changelist(**map):
310 310 parity = (start - end) & 1
311 311 cl = self.repo.changelog
312 312 l = [] # build a list in forward order for efficiency
313 313 for i in range(start, end):
314 314 n = cl.node(i)
315 315 changes = cl.read(n)
316 316 hn = hex(n)
317 317
318 318 l.insert(0, {"parity": parity,
319 319 "author": changes[1],
320 320 "parent": self.parents(n, cl.parents(n), cl.rev,
321 321 hide=True),
322 322 "changelogtag": self.showtag("changelogtag",n),
323 323 "manifest": hex(changes[0]),
324 324 "desc": changes[4],
325 325 "date": changes[2],
326 326 "files": self.listfilediffs(changes[3], n),
327 327 "rev": i,
328 328 "node": hn})
329 329 parity = 1 - parity
330 330
331 331 for e in l:
332 332 yield e
333 333
334 334 cl = self.repo.changelog
335 335 mf = cl.read(cl.tip())[0]
336 336 count = cl.count()
337 337 start = max(0, pos - self.maxchanges + 1)
338 338 end = min(count, start + self.maxchanges)
339 339 pos = end - 1
340 340
341 341 yield self.t('changelog',
342 342 changenav=changenav,
343 343 manifest=hex(mf),
344 344 rev=pos, changesets=count, entries=changelist)
345 345
346 346 def search(self, query):
347 347
348 348 def changelist(**map):
349 349 cl = self.repo.changelog
350 350 count = 0
351 351 qw = query.lower().split()
352 352
353 353 def revgen():
354 354 for i in range(cl.count() - 1, 0, -100):
355 355 l = []
356 356 for j in range(max(0, i - 100), i):
357 357 n = cl.node(j)
358 358 changes = cl.read(n)
359 359 l.append((n, j, changes))
360 360 l.reverse()
361 361 for e in l:
362 362 yield e
363 363
364 364 for n, i, changes in revgen():
365 365 miss = 0
366 366 for q in qw:
367 367 if not (q in changes[1].lower() or
368 368 q in changes[4].lower() or
369 369 q in " ".join(changes[3][:20]).lower()):
370 370 miss = 1
371 371 break
372 372 if miss:
373 373 continue
374 374
375 375 count += 1
376 376 hn = hex(n)
377 377
378 378 yield self.t('searchentry',
379 379 parity=count & 1,
380 380 author=changes[1],
381 381 parent=self.parents(n, cl.parents(n), cl.rev),
382 382 changelogtag=self.showtag("changelogtag",n),
383 383 manifest=hex(changes[0]),
384 384 desc=changes[4],
385 385 date=changes[2],
386 386 files=self.listfilediffs(changes[3], n),
387 387 rev=i,
388 388 node=hn)
389 389
390 390 if count >= self.maxchanges:
391 391 break
392 392
393 393 cl = self.repo.changelog
394 394 mf = cl.read(cl.tip())[0]
395 395
396 396 yield self.t('search',
397 397 query=query,
398 398 manifest=hex(mf),
399 399 entries=changelist)
400 400
401 401 def changeset(self, nodeid):
402 402 cl = self.repo.changelog
403 403 n = self.repo.lookup(nodeid)
404 404 nodeid = hex(n)
405 405 changes = cl.read(n)
406 406 p1 = cl.parents(n)[0]
407 407
408 408 files = []
409 409 mf = self.repo.manifest.read(changes[0])
410 410 for f in changes[3]:
411 411 files.append(self.t("filenodelink",
412 412 filenode=hex(mf.get(f, nullid)), file=f))
413 413
414 414 def diff(**map):
415 415 yield self.diff(p1, n, None)
416 416
417 417 def archivelist():
418 418 for i in self.archives:
419 419 if self.repo.ui.configbool("web", "allow" + i, False):
420 420 yield {"type" : i, "node" : nodeid}
421 421
422 422 yield self.t('changeset',
423 423 diff=diff,
424 424 rev=cl.rev(n),
425 425 node=nodeid,
426 426 parent=self.parents(n, cl.parents(n), cl.rev),
427 427 changesettag=self.showtag("changesettag",n),
428 428 manifest=hex(changes[0]),
429 429 author=changes[1],
430 430 desc=changes[4],
431 431 date=changes[2],
432 432 files=files,
433 433 archives=archivelist())
434 434
435 435 def filelog(self, f, filenode):
436 436 cl = self.repo.changelog
437 437 fl = self.repo.file(f)
438 438 filenode = hex(fl.lookup(filenode))
439 439 count = fl.count()
440 440
441 441 def entries(**map):
442 442 l = []
443 443 parity = (count - 1) & 1
444 444
445 445 for i in range(count):
446 446 n = fl.node(i)
447 447 lr = fl.linkrev(n)
448 448 cn = cl.node(lr)
449 449 cs = cl.read(cl.node(lr))
450 450
451 451 l.insert(0, {"parity": parity,
452 452 "filenode": hex(n),
453 453 "filerev": i,
454 454 "file": f,
455 455 "node": hex(cn),
456 456 "author": cs[1],
457 457 "date": cs[2],
458 458 "parent": self.parents(n, fl.parents(n),
459 459 fl.rev, file=f),
460 460 "desc": cs[4]})
461 461 parity = 1 - parity
462 462
463 463 for e in l:
464 464 yield e
465 465
466 466 yield self.t("filelog", file=f, filenode=filenode, entries=entries)
467 467
468 468 def filerevision(self, f, node):
469 469 fl = self.repo.file(f)
470 470 n = fl.lookup(node)
471 471 node = hex(n)
472 472 text = fl.read(n)
473 473 changerev = fl.linkrev(n)
474 474 cl = self.repo.changelog
475 475 cn = cl.node(changerev)
476 476 cs = cl.read(cn)
477 477 mfn = cs[0]
478 478
479 479 mt = mimetypes.guess_type(f)[0]
480 480 rawtext = text
481 481 if util.binary(text):
482 482 text = "(binary:%s)" % mt
483 483
484 484 def lines():
485 485 for l, t in enumerate(text.splitlines(1)):
486 486 yield {"line": t,
487 487 "linenumber": "% 6d" % (l + 1),
488 488 "parity": l & 1}
489 489
490 490 yield self.t("filerevision",
491 491 file=f,
492 492 filenode=node,
493 493 path=up(f),
494 494 text=lines(),
495 495 raw=rawtext,
496 496 mimetype=mt,
497 497 rev=changerev,
498 498 node=hex(cn),
499 499 manifest=hex(mfn),
500 500 author=cs[1],
501 501 date=cs[2],
502 502 parent=self.parents(n, fl.parents(n), fl.rev, file=f),
503 503 permissions=self.repo.manifest.readflags(mfn)[f])
504 504
505 505 def fileannotate(self, f, node):
506 506 bcache = {}
507 507 ncache = {}
508 508 fl = self.repo.file(f)
509 509 n = fl.lookup(node)
510 510 node = hex(n)
511 511 changerev = fl.linkrev(n)
512 512
513 513 cl = self.repo.changelog
514 514 cn = cl.node(changerev)
515 515 cs = cl.read(cn)
516 516 mfn = cs[0]
517 517
518 518 def annotate(**map):
519 519 parity = 1
520 520 last = None
521 521 for r, l in fl.annotate(n):
522 522 try:
523 523 cnode = ncache[r]
524 524 except KeyError:
525 525 cnode = ncache[r] = self.repo.changelog.node(r)
526 526
527 527 try:
528 528 name = bcache[r]
529 529 except KeyError:
530 530 cl = self.repo.changelog.read(cnode)
531 531 bcache[r] = name = self.repo.ui.shortuser(cl[1])
532 532
533 533 if last != cnode:
534 534 parity = 1 - parity
535 535 last = cnode
536 536
537 537 yield {"parity": parity,
538 538 "node": hex(cnode),
539 539 "rev": r,
540 540 "author": name,
541 541 "file": f,
542 542 "line": l}
543 543
544 544 yield self.t("fileannotate",
545 545 file=f,
546 546 filenode=node,
547 547 annotate=annotate,
548 548 path=up(f),
549 549 rev=changerev,
550 550 node=hex(cn),
551 551 manifest=hex(mfn),
552 552 author=cs[1],
553 553 date=cs[2],
554 554 parent=self.parents(n, fl.parents(n), fl.rev, file=f),
555 555 permissions=self.repo.manifest.readflags(mfn)[f])
556 556
557 557 def manifest(self, mnode, path):
558 558 man = self.repo.manifest
559 559 mn = man.lookup(mnode)
560 560 mnode = hex(mn)
561 561 mf = man.read(mn)
562 562 rev = man.rev(mn)
563 563 node = self.repo.changelog.node(rev)
564 564 mff = man.readflags(mn)
565 565
566 566 files = {}
567 567
568 568 p = path[1:]
569 569 l = len(p)
570 570
571 571 for f,n in mf.items():
572 572 if f[:l] != p:
573 573 continue
574 574 remain = f[l:]
575 575 if "/" in remain:
576 576 short = remain[:remain.find("/") + 1] # bleah
577 577 files[short] = (f, None)
578 578 else:
579 579 short = os.path.basename(remain)
580 580 files[short] = (f, n)
581 581
582 582 def filelist(**map):
583 583 parity = 0
584 584 fl = files.keys()
585 585 fl.sort()
586 586 for f in fl:
587 587 full, fnode = files[f]
588 588 if not fnode:
589 589 continue
590 590
591 591 yield {"file": full,
592 592 "manifest": mnode,
593 593 "filenode": hex(fnode),
594 594 "parity": parity,
595 595 "basename": f,
596 596 "permissions": mff[full]}
597 597 parity = 1 - parity
598 598
599 599 def dirlist(**map):
600 600 parity = 0
601 601 fl = files.keys()
602 602 fl.sort()
603 603 for f in fl:
604 604 full, fnode = files[f]
605 605 if fnode:
606 606 continue
607 607
608 608 yield {"parity": parity,
609 609 "path": os.path.join(path, f),
610 610 "manifest": mnode,
611 611 "basename": f[:-1]}
612 612 parity = 1 - parity
613 613
614 614 yield self.t("manifest",
615 615 manifest=mnode,
616 616 rev=rev,
617 617 node=hex(node),
618 618 path=path,
619 619 up=up(path),
620 620 fentries=filelist,
621 621 dentries=dirlist)
622 622
623 623 def tags(self):
624 624 cl = self.repo.changelog
625 625 mf = cl.read(cl.tip())[0]
626 626
627 627 i = self.repo.tagslist()
628 628 i.reverse()
629 629
630 630 def entries(**map):
631 631 parity = 0
632 632 for k,n in i:
633 633 yield {"parity": parity,
634 634 "tag": k,
635 635 "node": hex(n)}
636 636 parity = 1 - parity
637 637
638 638 yield self.t("tags",
639 639 manifest=hex(mf),
640 640 entries=entries)
641 641
642 642 def filediff(self, file, changeset):
643 643 cl = self.repo.changelog
644 644 n = self.repo.lookup(changeset)
645 645 changeset = hex(n)
646 646 p1 = cl.parents(n)[0]
647 647 cs = cl.read(n)
648 648 mf = self.repo.manifest.read(cs[0])
649 649
650 650 def diff(**map):
651 651 yield self.diff(p1, n, file)
652 652
653 653 yield self.t("filediff",
654 654 file=file,
655 655 filenode=hex(mf.get(file, nullid)),
656 656 node=changeset,
657 657 rev=self.repo.changelog.rev(n),
658 658 parent=self.parents(n, cl.parents(n), cl.rev),
659 659 diff=diff)
660 660
661 661 def archive(self, req, cnode, type):
662 662 cs = self.repo.changelog.read(cnode)
663 663 mnode = cs[0]
664 664 mf = self.repo.manifest.read(mnode)
665 665 rev = self.repo.manifest.rev(mnode)
666 666 reponame = re.sub(r"\W+", "-", self.reponame)
667 667 name = "%s-%s/" % (reponame, short(cnode))
668 668
669 669 files = mf.keys()
670 670 files.sort()
671 671
672 672 if type == 'zip':
673 673 tmp = tempfile.mkstemp()[1]
674 674 try:
675 675 zf = zipfile.ZipFile(tmp, "w", zipfile.ZIP_DEFLATED)
676 676
677 677 for f in files:
678 678 zf.writestr(name + f, self.repo.file(f).read(mf[f]))
679 679 zf.close()
680 680
681 681 f = open(tmp, 'r')
682 682 req.httphdr('application/zip', name[:-1] + '.zip',
683 683 os.path.getsize(tmp))
684 684 req.write(f.read())
685 685 f.close()
686 686 finally:
687 687 os.unlink(tmp)
688 688
689 689 else:
690 690 tf = tarfile.TarFile.open(mode='w|' + type, fileobj=req.out)
691 691 mff = self.repo.manifest.readflags(mnode)
692 692 mtime = int(time.time())
693 693
694 694 if type == "gz":
695 695 encoding = "gzip"
696 696 else:
697 697 encoding = "x-bzip2"
698 698 req.header([('Content-type', 'application/x-tar'),
699 699 ('Content-disposition', 'attachment; filename=%s%s%s' %
700 700 (name[:-1], '.tar.', type)),
701 701 ('Content-encoding', encoding)])
702 702 for fname in files:
703 703 rcont = self.repo.file(fname).read(mf[fname])
704 704 finfo = tarfile.TarInfo(name + fname)
705 705 finfo.mtime = mtime
706 706 finfo.size = len(rcont)
707 707 finfo.mode = mff[fname] and 0755 or 0644
708 708 tf.addfile(finfo, StringIO.StringIO(rcont))
709 709 tf.close()
710 710
711 711 # add tags to things
712 712 # tags -> list of changesets corresponding to tags
713 713 # find tag, changeset, file
714 714
715 715 def run(self, req=hgrequest()):
716 716 def header(**map):
717 717 yield self.t("header", **map)
718 718
719 719 def footer(**map):
720 720 yield self.t("footer", **map)
721 721
722 722 def expand_form(form):
723 723 shortcuts = {
724 724 'cl': [('cmd', ['changelog']), ('rev', None)],
725 725 'cs': [('cmd', ['changeset']), ('node', None)],
726 726 'f': [('cmd', ['file']), ('filenode', None)],
727 727 'fl': [('cmd', ['filelog']), ('filenode', None)],
728 728 'fd': [('cmd', ['filediff']), ('node', None)],
729 729 'fa': [('cmd', ['annotate']), ('filenode', None)],
730 730 'mf': [('cmd', ['manifest']), ('manifest', None)],
731 731 'ca': [('cmd', ['archive']), ('node', None)],
732 732 'tags': [('cmd', ['tags'])],
733 733 'tip': [('cmd', ['changeset']), ('node', ['tip'])],
734 734 }
735 735
736 736 for k in shortcuts.iterkeys():
737 737 if form.has_key(k):
738 738 for name, value in shortcuts[k]:
739 739 if value is None:
740 740 value = form[k]
741 741 form[name] = value
742 742 del form[k]
743 743
744 744 self.refresh()
745 745
746 746 expand_form(req.form)
747 747
748 748 t = self.repo.ui.config("web", "templates", templatepath())
749 749 m = os.path.join(t, "map")
750 750 style = self.repo.ui.config("web", "style", "")
751 751 if req.form.has_key('style'):
752 752 style = req.form['style'][0]
753 753 if style:
754 754 b = os.path.basename("map-" + style)
755 755 p = os.path.join(t, b)
756 756 if os.path.isfile(p):
757 757 m = p
758 758
759 759 port = req.env["SERVER_PORT"]
760 760 port = port != "80" and (":" + port) or ""
761 761 uri = req.env["REQUEST_URI"]
762 762 if "?" in uri:
763 763 uri = uri.split("?")[0]
764 764 url = "http://%s%s%s" % (req.env["SERVER_NAME"], port, uri)
765 765 if not self.reponame:
766 766 self.reponame = (self.repo.ui.config("web", "name")
767 767 or uri.strip('/') or self.repo.root)
768 768
769 769 self.t = templater(m, common_filters,
770 770 {"url": url,
771 771 "repo": self.reponame,
772 772 "header": header,
773 773 "footer": footer,
774 774 })
775 775
776 776 if not req.form.has_key('cmd'):
777 777 req.form['cmd'] = [self.t.cache['default'],]
778 778
779 779 if req.form['cmd'][0] == 'changelog':
780 780 c = self.repo.changelog.count() - 1
781 781 hi = c
782 782 if req.form.has_key('rev'):
783 783 hi = req.form['rev'][0]
784 784 try:
785 785 hi = self.repo.changelog.rev(self.repo.lookup(hi))
786 786 except hg.RepoError:
787 787 req.write(self.search(hi))
788 788 return
789 789
790 790 req.write(self.changelog(hi))
791 791
792 792 elif req.form['cmd'][0] == 'changeset':
793 793 req.write(self.changeset(req.form['node'][0]))
794 794
795 795 elif req.form['cmd'][0] == 'manifest':
796 796 req.write(self.manifest(req.form['manifest'][0], req.form['path'][0]))
797 797
798 798 elif req.form['cmd'][0] == 'tags':
799 799 req.write(self.tags())
800 800
801 801 elif req.form['cmd'][0] == 'filediff':
802 802 req.write(self.filediff(req.form['file'][0], req.form['node'][0]))
803 803
804 804 elif req.form['cmd'][0] == 'file':
805 805 req.write(self.filerevision(req.form['file'][0], req.form['filenode'][0]))
806 806
807 807 elif req.form['cmd'][0] == 'annotate':
808 808 req.write(self.fileannotate(req.form['file'][0], req.form['filenode'][0]))
809 809
810 810 elif req.form['cmd'][0] == 'filelog':
811 811 req.write(self.filelog(req.form['file'][0], req.form['filenode'][0]))
812 812
813 813 elif req.form['cmd'][0] == 'heads':
814 814 req.httphdr("application/mercurial-0.1")
815 815 h = self.repo.heads()
816 816 req.write(" ".join(map(hex, h)) + "\n")
817 817
818 818 elif req.form['cmd'][0] == 'branches':
819 819 req.httphdr("application/mercurial-0.1")
820 820 nodes = []
821 821 if req.form.has_key('nodes'):
822 822 nodes = map(bin, req.form['nodes'][0].split(" "))
823 823 for b in self.repo.branches(nodes):
824 824 req.write(" ".join(map(hex, b)) + "\n")
825 825
826 826 elif req.form['cmd'][0] == 'between':
827 827 req.httphdr("application/mercurial-0.1")
828 828 nodes = []
829 829 if req.form.has_key('pairs'):
830 830 pairs = [map(bin, p.split("-"))
831 831 for p in req.form['pairs'][0].split(" ")]
832 832 for b in self.repo.between(pairs):
833 833 req.write(" ".join(map(hex, b)) + "\n")
834 834
835 835 elif req.form['cmd'][0] == 'changegroup':
836 836 req.httphdr("application/mercurial-0.1")
837 837 nodes = []
838 838 if not self.allowpull:
839 839 return
840 840
841 841 if req.form.has_key('roots'):
842 842 nodes = map(bin, req.form['roots'][0].split(" "))
843 843
844 844 z = zlib.compressobj()
845 845 f = self.repo.changegroup(nodes)
846 846 while 1:
847 847 chunk = f.read(4096)
848 848 if not chunk:
849 849 break
850 850 req.write(z.compress(chunk))
851 851
852 852 req.write(z.flush())
853 853
854 854 elif req.form['cmd'][0] == 'archive':
855 855 changeset = self.repo.lookup(req.form['node'][0])
856 856 type = req.form['type'][0]
857 857 if (type in self.archives and
858 858 self.repo.ui.configbool("web", "allow" + type, False)):
859 859 self.archive(req, changeset, type)
860 860 return
861 861
862 862 req.write(self.t("error"))
863 863
864 864 else:
865 865 req.write(self.t("error"))
866 866
867 867 def create_server(repo):
868 868
869 869 def openlog(opt, default):
870 870 if opt and opt != '-':
871 871 return open(opt, 'w')
872 872 return default
873 873
874 874 address = repo.ui.config("web", "address", "")
875 875 port = int(repo.ui.config("web", "port", 8000))
876 876 use_ipv6 = repo.ui.configbool("web", "ipv6")
877 877 accesslog = openlog(repo.ui.config("web", "accesslog", "-"), sys.stdout)
878 878 errorlog = openlog(repo.ui.config("web", "errorlog", "-"), sys.stderr)
879 879
880 880 class IPv6HTTPServer(BaseHTTPServer.HTTPServer):
881 881 address_family = getattr(socket, 'AF_INET6', None)
882 882
883 883 def __init__(self, *args, **kwargs):
884 884 if self.address_family is None:
885 885 raise hg.RepoError(_('IPv6 not available on this system'))
886 886 BaseHTTPServer.HTTPServer.__init__(self, *args, **kwargs)
887 887
888 888 class hgwebhandler(BaseHTTPServer.BaseHTTPRequestHandler):
889 889 def log_error(self, format, *args):
890 890 errorlog.write("%s - - [%s] %s\n" % (self.address_string(),
891 891 self.log_date_time_string(),
892 892 format % args))
893 893
894 894 def log_message(self, format, *args):
895 895 accesslog.write("%s - - [%s] %s\n" % (self.address_string(),
896 896 self.log_date_time_string(),
897 897 format % args))
898 898
899 899 def do_POST(self):
900 900 try:
901 901 self.do_hgweb()
902 902 except socket.error, inst:
903 903 if inst[0] != errno.EPIPE:
904 904 raise
905 905
906 906 def do_GET(self):
907 907 self.do_POST()
908 908
909 909 def do_hgweb(self):
910 910 query = ""
911 911 p = self.path.find("?")
912 912 if p:
913 913 query = self.path[p + 1:]
914 914 query = query.replace('+', ' ')
915 915
916 916 env = {}
917 917 env['GATEWAY_INTERFACE'] = 'CGI/1.1'
918 918 env['REQUEST_METHOD'] = self.command
919 919 env['SERVER_NAME'] = self.server.server_name
920 920 env['SERVER_PORT'] = str(self.server.server_port)
921 921 env['REQUEST_URI'] = "/"
922 922 if query:
923 923 env['QUERY_STRING'] = query
924 924 host = self.address_string()
925 925 if host != self.client_address[0]:
926 926 env['REMOTE_HOST'] = host
927 927 env['REMOTE_ADDR'] = self.client_address[0]
928 928
929 929 if self.headers.typeheader is None:
930 930 env['CONTENT_TYPE'] = self.headers.type
931 931 else:
932 932 env['CONTENT_TYPE'] = self.headers.typeheader
933 933 length = self.headers.getheader('content-length')
934 934 if length:
935 935 env['CONTENT_LENGTH'] = length
936 936 accept = []
937 937 for line in self.headers.getallmatchingheaders('accept'):
938 938 if line[:1] in "\t\n\r ":
939 939 accept.append(line.strip())
940 940 else:
941 941 accept = accept + line[7:].split(',')
942 942 env['HTTP_ACCEPT'] = ','.join(accept)
943 943
944 944 req = hgrequest(self.rfile, self.wfile, env)
945 945 self.send_response(200, "Script output follows")
946 946 hg.run(req)
947 947
948 948 hg = hgweb(repo)
949 949 if use_ipv6:
950 950 return IPv6HTTPServer((address, port), hgwebhandler)
951 951 else:
952 952 return BaseHTTPServer.HTTPServer((address, port), hgwebhandler)
953 953
954 954 def server(path, name, templates, address, port, use_ipv6=False,
955 955 accesslog=sys.stdout, errorlog=sys.stderr):
956 956 httpd = create_server(path, name, templates, address, port, use_ipv6,
957 957 accesslog, errorlog)
958 958 httpd.serve_forever()
959 959
960 960 # This is a stopgap
961 961 class hgwebdir:
962 962 def __init__(self, config):
963 963 def cleannames(items):
964 964 return [(name.strip('/'), path) for name, path in items]
965 965
966 966 if type(config) == type([]):
967 967 self.repos = cleannames(config)
968 968 elif type(config) == type({}):
969 969 self.repos = cleannames(config.items())
970 970 self.repos.sort()
971 971 else:
972 972 cp = ConfigParser.SafeConfigParser()
973 973 cp.read(config)
974 974 self.repos = cleannames(cp.items("paths"))
975 975 self.repos.sort()
976 976
977 977 def run(self, req=hgrequest()):
978 978 def header(**map):
979 979 yield tmpl("header", **map)
980 980
981 981 def footer(**map):
982 982 yield tmpl("footer", **map)
983 983
984 984 m = os.path.join(templatepath(), "map")
985 985 tmpl = templater(m, common_filters,
986 986 {"header": header, "footer": footer})
987 987
988 988 def entries(**map):
989 989 parity = 0
990 990 for name, path in self.repos:
991 991 u = ui.ui()
992 992 try:
993 u.readconfig(file(os.path.join(path, '.hg', 'hgrc')))
993 u.readconfig(os.path.join(path, '.hg', 'hgrc'))
994 994 except IOError:
995 995 pass
996 996 get = u.config
997 997
998 998 url = ('/'.join([req.env["REQUEST_URI"].split('?')[0], name])
999 999 .replace("//", "/"))
1000 1000
1001 1001 # update time with local timezone
1002 1002 d = (get_mtime(path), util.makedate()[1])
1003 1003
1004 1004 yield dict(contact=(get("ui", "username") or # preferred
1005 1005 get("web", "contact") or # deprecated
1006 1006 get("web", "author", "unknown")), # also
1007 1007 name=get("web", "name", name),
1008 1008 url=url,
1009 1009 parity=parity,
1010 1010 shortdesc=get("web", "description", "unknown"),
1011 1011 lastupdate=d)
1012 1012
1013 1013 parity = 1 - parity
1014 1014
1015 1015 virtual = req.env.get("PATH_INFO", "").strip('/')
1016 1016 if virtual:
1017 1017 real = dict(self.repos).get(virtual)
1018 1018 if real:
1019 1019 hgweb(real).run(req)
1020 1020 else:
1021 1021 req.write(tmpl("notfound", repo=virtual))
1022 1022 else:
1023 1023 req.write(tmpl("index", entries=entries))
@@ -1,1736 +1,1736
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 import struct, os, util
9 9 import filelog, manifest, changelog, dirstate, repo
10 10 from node import *
11 11 from i18n import gettext as _
12 12 from demandload import *
13 13 demandload(globals(), "re lock transaction tempfile stat mdiff errno")
14 14
15 15 class localrepository:
16 16 def __init__(self, ui, path=None, create=0):
17 17 if not path:
18 18 p = os.getcwd()
19 19 while not os.path.isdir(os.path.join(p, ".hg")):
20 20 oldp = p
21 21 p = os.path.dirname(p)
22 22 if p == oldp: raise repo.RepoError(_("no repo found"))
23 23 path = p
24 24 self.path = os.path.join(path, ".hg")
25 25
26 26 if not create and not os.path.isdir(self.path):
27 27 raise repo.RepoError(_("repository %s not found") % self.path)
28 28
29 29 self.root = os.path.abspath(path)
30 30 self.ui = ui
31 31 self.opener = util.opener(self.path)
32 32 self.wopener = util.opener(self.root)
33 33 self.manifest = manifest.manifest(self.opener)
34 34 self.changelog = changelog.changelog(self.opener)
35 35 self.tagscache = None
36 36 self.nodetagscache = None
37 37 self.encodepats = None
38 38 self.decodepats = None
39 39
40 40 if create:
41 41 os.mkdir(self.path)
42 42 os.mkdir(self.join("data"))
43 43
44 44 self.dirstate = dirstate.dirstate(self.opener, ui, self.root)
45 45 try:
46 self.ui.readconfig(self.opener("hgrc"))
46 self.ui.readconfig(os.path.join(self.path, "hgrc"))
47 47 except IOError: pass
48 48
49 49 def hook(self, name, **args):
50 50 s = self.ui.config("hooks", name)
51 51 if s:
52 52 self.ui.note(_("running hook %s: %s\n") % (name, s))
53 53 old = {}
54 54 for k, v in args.items():
55 55 k = k.upper()
56 56 old[k] = os.environ.get(k, None)
57 57 os.environ[k] = v
58 58
59 59 # Hooks run in the repository root
60 60 olddir = os.getcwd()
61 61 os.chdir(self.root)
62 62 r = os.system(s)
63 63 os.chdir(olddir)
64 64
65 65 for k, v in old.items():
66 66 if v != None:
67 67 os.environ[k] = v
68 68 else:
69 69 del os.environ[k]
70 70
71 71 if r:
72 72 self.ui.warn(_("abort: %s hook failed with status %d!\n") %
73 73 (name, r))
74 74 return False
75 75 return True
76 76
77 77 def tags(self):
78 78 '''return a mapping of tag to node'''
79 79 if not self.tagscache:
80 80 self.tagscache = {}
81 81 def addtag(self, k, n):
82 82 try:
83 83 bin_n = bin(n)
84 84 except TypeError:
85 85 bin_n = ''
86 86 self.tagscache[k.strip()] = bin_n
87 87
88 88 try:
89 89 # read each head of the tags file, ending with the tip
90 90 # and add each tag found to the map, with "newer" ones
91 91 # taking precedence
92 92 fl = self.file(".hgtags")
93 93 h = fl.heads()
94 94 h.reverse()
95 95 for r in h:
96 96 for l in fl.read(r).splitlines():
97 97 if l:
98 98 n, k = l.split(" ", 1)
99 99 addtag(self, k, n)
100 100 except KeyError:
101 101 pass
102 102
103 103 try:
104 104 f = self.opener("localtags")
105 105 for l in f:
106 106 n, k = l.split(" ", 1)
107 107 addtag(self, k, n)
108 108 except IOError:
109 109 pass
110 110
111 111 self.tagscache['tip'] = self.changelog.tip()
112 112
113 113 return self.tagscache
114 114
115 115 def tagslist(self):
116 116 '''return a list of tags ordered by revision'''
117 117 l = []
118 118 for t, n in self.tags().items():
119 119 try:
120 120 r = self.changelog.rev(n)
121 121 except:
122 122 r = -2 # sort to the beginning of the list if unknown
123 123 l.append((r,t,n))
124 124 l.sort()
125 125 return [(t,n) for r,t,n in l]
126 126
127 127 def nodetags(self, node):
128 128 '''return the tags associated with a node'''
129 129 if not self.nodetagscache:
130 130 self.nodetagscache = {}
131 131 for t,n in self.tags().items():
132 132 self.nodetagscache.setdefault(n,[]).append(t)
133 133 return self.nodetagscache.get(node, [])
134 134
135 135 def lookup(self, key):
136 136 try:
137 137 return self.tags()[key]
138 138 except KeyError:
139 139 try:
140 140 return self.changelog.lookup(key)
141 141 except:
142 142 raise repo.RepoError(_("unknown revision '%s'") % key)
143 143
144 144 def dev(self):
145 145 return os.stat(self.path).st_dev
146 146
147 147 def local(self):
148 148 return True
149 149
150 150 def join(self, f):
151 151 return os.path.join(self.path, f)
152 152
153 153 def wjoin(self, f):
154 154 return os.path.join(self.root, f)
155 155
156 156 def file(self, f):
157 157 if f[0] == '/': f = f[1:]
158 158 return filelog.filelog(self.opener, f)
159 159
160 160 def getcwd(self):
161 161 return self.dirstate.getcwd()
162 162
163 163 def wfile(self, f, mode='r'):
164 164 return self.wopener(f, mode)
165 165
166 166 def wread(self, filename):
167 167 if self.encodepats == None:
168 168 l = []
169 169 for pat, cmd in self.ui.configitems("encode"):
170 170 mf = util.matcher("", "/", [pat], [], [])[1]
171 171 l.append((mf, cmd))
172 172 self.encodepats = l
173 173
174 174 data = self.wopener(filename, 'r').read()
175 175
176 176 for mf, cmd in self.encodepats:
177 177 if mf(filename):
178 178 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
179 179 data = util.filter(data, cmd)
180 180 break
181 181
182 182 return data
183 183
184 184 def wwrite(self, filename, data, fd=None):
185 185 if self.decodepats == None:
186 186 l = []
187 187 for pat, cmd in self.ui.configitems("decode"):
188 188 mf = util.matcher("", "/", [pat], [], [])[1]
189 189 l.append((mf, cmd))
190 190 self.decodepats = l
191 191
192 192 for mf, cmd in self.decodepats:
193 193 if mf(filename):
194 194 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
195 195 data = util.filter(data, cmd)
196 196 break
197 197
198 198 if fd:
199 199 return fd.write(data)
200 200 return self.wopener(filename, 'w').write(data)
201 201
202 202 def transaction(self):
203 203 # save dirstate for undo
204 204 try:
205 205 ds = self.opener("dirstate").read()
206 206 except IOError:
207 207 ds = ""
208 208 self.opener("journal.dirstate", "w").write(ds)
209 209
210 210 def after():
211 211 util.rename(self.join("journal"), self.join("undo"))
212 212 util.rename(self.join("journal.dirstate"),
213 213 self.join("undo.dirstate"))
214 214
215 215 return transaction.transaction(self.ui.warn, self.opener,
216 216 self.join("journal"), after)
217 217
218 218 def recover(self):
219 219 lock = self.lock()
220 220 if os.path.exists(self.join("journal")):
221 221 self.ui.status(_("rolling back interrupted transaction\n"))
222 222 return transaction.rollback(self.opener, self.join("journal"))
223 223 else:
224 224 self.ui.warn(_("no interrupted transaction available\n"))
225 225
226 226 def undo(self):
227 227 lock = self.lock()
228 228 if os.path.exists(self.join("undo")):
229 229 self.ui.status(_("rolling back last transaction\n"))
230 230 transaction.rollback(self.opener, self.join("undo"))
231 231 self.dirstate = None
232 232 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
233 233 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
234 234 else:
235 235 self.ui.warn(_("no undo information available\n"))
236 236
237 237 def lock(self, wait=1):
238 238 try:
239 239 return lock.lock(self.join("lock"), 0)
240 240 except lock.LockHeld, inst:
241 241 if wait:
242 242 self.ui.warn(_("waiting for lock held by %s\n") % inst.args[0])
243 243 return lock.lock(self.join("lock"), wait)
244 244 raise inst
245 245
246 246 def rawcommit(self, files, text, user, date, p1=None, p2=None):
247 247 orig_parent = self.dirstate.parents()[0] or nullid
248 248 p1 = p1 or self.dirstate.parents()[0] or nullid
249 249 p2 = p2 or self.dirstate.parents()[1] or nullid
250 250 c1 = self.changelog.read(p1)
251 251 c2 = self.changelog.read(p2)
252 252 m1 = self.manifest.read(c1[0])
253 253 mf1 = self.manifest.readflags(c1[0])
254 254 m2 = self.manifest.read(c2[0])
255 255 changed = []
256 256
257 257 if orig_parent == p1:
258 258 update_dirstate = 1
259 259 else:
260 260 update_dirstate = 0
261 261
262 262 tr = self.transaction()
263 263 mm = m1.copy()
264 264 mfm = mf1.copy()
265 265 linkrev = self.changelog.count()
266 266 for f in files:
267 267 try:
268 268 t = self.wread(f)
269 269 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
270 270 r = self.file(f)
271 271 mfm[f] = tm
272 272
273 273 fp1 = m1.get(f, nullid)
274 274 fp2 = m2.get(f, nullid)
275 275
276 276 # is the same revision on two branches of a merge?
277 277 if fp2 == fp1:
278 278 fp2 = nullid
279 279
280 280 if fp2 != nullid:
281 281 # is one parent an ancestor of the other?
282 282 fpa = r.ancestor(fp1, fp2)
283 283 if fpa == fp1:
284 284 fp1, fp2 = fp2, nullid
285 285 elif fpa == fp2:
286 286 fp2 = nullid
287 287
288 288 # is the file unmodified from the parent?
289 289 if t == r.read(fp1):
290 290 # record the proper existing parent in manifest
291 291 # no need to add a revision
292 292 mm[f] = fp1
293 293 continue
294 294
295 295 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
296 296 changed.append(f)
297 297 if update_dirstate:
298 298 self.dirstate.update([f], "n")
299 299 except IOError:
300 300 try:
301 301 del mm[f]
302 302 del mfm[f]
303 303 if update_dirstate:
304 304 self.dirstate.forget([f])
305 305 except:
306 306 # deleted from p2?
307 307 pass
308 308
309 309 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
310 310 user = user or self.ui.username()
311 311 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
312 312 tr.close()
313 313 if update_dirstate:
314 314 self.dirstate.setparents(n, nullid)
315 315
316 316 def commit(self, files = None, text = "", user = None, date = None,
317 317 match = util.always, force=False):
318 318 commit = []
319 319 remove = []
320 320 changed = []
321 321
322 322 if files:
323 323 for f in files:
324 324 s = self.dirstate.state(f)
325 325 if s in 'nmai':
326 326 commit.append(f)
327 327 elif s == 'r':
328 328 remove.append(f)
329 329 else:
330 330 self.ui.warn(_("%s not tracked!\n") % f)
331 331 else:
332 332 (c, a, d, u) = self.changes(match=match)
333 333 commit = c + a
334 334 remove = d
335 335
336 336 p1, p2 = self.dirstate.parents()
337 337 c1 = self.changelog.read(p1)
338 338 c2 = self.changelog.read(p2)
339 339 m1 = self.manifest.read(c1[0])
340 340 mf1 = self.manifest.readflags(c1[0])
341 341 m2 = self.manifest.read(c2[0])
342 342
343 343 if not commit and not remove and not force and p2 == nullid:
344 344 self.ui.status(_("nothing changed\n"))
345 345 return None
346 346
347 347 if not self.hook("precommit"):
348 348 return None
349 349
350 350 lock = self.lock()
351 351 tr = self.transaction()
352 352
353 353 # check in files
354 354 new = {}
355 355 linkrev = self.changelog.count()
356 356 commit.sort()
357 357 for f in commit:
358 358 self.ui.note(f + "\n")
359 359 try:
360 360 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
361 361 t = self.wread(f)
362 362 except IOError:
363 363 self.ui.warn(_("trouble committing %s!\n") % f)
364 364 raise
365 365
366 366 r = self.file(f)
367 367
368 368 meta = {}
369 369 cp = self.dirstate.copied(f)
370 370 if cp:
371 371 meta["copy"] = cp
372 372 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
373 373 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
374 374 fp1, fp2 = nullid, nullid
375 375 else:
376 376 fp1 = m1.get(f, nullid)
377 377 fp2 = m2.get(f, nullid)
378 378
379 379 # is the same revision on two branches of a merge?
380 380 if fp2 == fp1:
381 381 fp2 = nullid
382 382
383 383 if fp2 != nullid:
384 384 # is one parent an ancestor of the other?
385 385 fpa = r.ancestor(fp1, fp2)
386 386 if fpa == fp1:
387 387 fp1, fp2 = fp2, nullid
388 388 elif fpa == fp2:
389 389 fp2 = nullid
390 390
391 391 # is the file unmodified from the parent?
392 392 if not meta and t == r.read(fp1):
393 393 # record the proper existing parent in manifest
394 394 # no need to add a revision
395 395 new[f] = fp1
396 396 continue
397 397
398 398 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
399 399 # remember what we've added so that we can later calculate
400 400 # the files to pull from a set of changesets
401 401 changed.append(f)
402 402
403 403 # update manifest
404 404 m1.update(new)
405 405 for f in remove:
406 406 if f in m1:
407 407 del m1[f]
408 408 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
409 409 (new, remove))
410 410
411 411 # add changeset
412 412 new = new.keys()
413 413 new.sort()
414 414
415 415 if not text:
416 416 edittext = ""
417 417 if p2 != nullid:
418 418 edittext += "HG: branch merge\n"
419 419 edittext += "\n" + "HG: manifest hash %s\n" % hex(mn)
420 420 edittext += "".join(["HG: changed %s\n" % f for f in changed])
421 421 edittext += "".join(["HG: removed %s\n" % f for f in remove])
422 422 if not changed and not remove:
423 423 edittext += "HG: no files changed\n"
424 424 edittext = self.ui.edit(edittext)
425 425 if not edittext.rstrip():
426 426 return None
427 427 text = edittext
428 428
429 429 user = user or self.ui.username()
430 430 n = self.changelog.add(mn, changed, text, tr, p1, p2, user, date)
431 431 tr.close()
432 432
433 433 self.dirstate.setparents(n)
434 434 self.dirstate.update(new, "n")
435 435 self.dirstate.forget(remove)
436 436
437 437 if not self.hook("commit", node=hex(n)):
438 438 return None
439 439 return n
440 440
441 441 def walk(self, node=None, files=[], match=util.always):
442 442 if node:
443 443 for fn in self.manifest.read(self.changelog.read(node)[0]):
444 444 if match(fn): yield 'm', fn
445 445 else:
446 446 for src, fn in self.dirstate.walk(files, match):
447 447 yield src, fn
448 448
449 449 def changes(self, node1 = None, node2 = None, files = [],
450 450 match = util.always):
451 451 mf2, u = None, []
452 452
453 453 def fcmp(fn, mf):
454 454 t1 = self.wread(fn)
455 455 t2 = self.file(fn).read(mf.get(fn, nullid))
456 456 return cmp(t1, t2)
457 457
458 458 def mfmatches(node):
459 459 mf = dict(self.manifest.read(node))
460 460 for fn in mf.keys():
461 461 if not match(fn):
462 462 del mf[fn]
463 463 return mf
464 464
465 465 # are we comparing the working directory?
466 466 if not node2:
467 467 l, c, a, d, u = self.dirstate.changes(files, match)
468 468
469 469 # are we comparing working dir against its parent?
470 470 if not node1:
471 471 if l:
472 472 # do a full compare of any files that might have changed
473 473 change = self.changelog.read(self.dirstate.parents()[0])
474 474 mf2 = mfmatches(change[0])
475 475 for f in l:
476 476 if fcmp(f, mf2):
477 477 c.append(f)
478 478
479 479 for l in c, a, d, u:
480 480 l.sort()
481 481
482 482 return (c, a, d, u)
483 483
484 484 # are we comparing working dir against non-tip?
485 485 # generate a pseudo-manifest for the working dir
486 486 if not node2:
487 487 if not mf2:
488 488 change = self.changelog.read(self.dirstate.parents()[0])
489 489 mf2 = mfmatches(change[0])
490 490 for f in a + c + l:
491 491 mf2[f] = ""
492 492 for f in d:
493 493 if f in mf2: del mf2[f]
494 494 else:
495 495 change = self.changelog.read(node2)
496 496 mf2 = mfmatches(change[0])
497 497
498 498 # flush lists from dirstate before comparing manifests
499 499 c, a = [], []
500 500
501 501 change = self.changelog.read(node1)
502 502 mf1 = mfmatches(change[0])
503 503
504 504 for fn in mf2:
505 505 if mf1.has_key(fn):
506 506 if mf1[fn] != mf2[fn]:
507 507 if mf2[fn] != "" or fcmp(fn, mf1):
508 508 c.append(fn)
509 509 del mf1[fn]
510 510 else:
511 511 a.append(fn)
512 512
513 513 d = mf1.keys()
514 514
515 515 for l in c, a, d, u:
516 516 l.sort()
517 517
518 518 return (c, a, d, u)
519 519
520 520 def add(self, list):
521 521 for f in list:
522 522 p = self.wjoin(f)
523 523 if not os.path.exists(p):
524 524 self.ui.warn(_("%s does not exist!\n") % f)
525 525 elif not os.path.isfile(p):
526 526 self.ui.warn(_("%s not added: only files supported currently\n") % f)
527 527 elif self.dirstate.state(f) in 'an':
528 528 self.ui.warn(_("%s already tracked!\n") % f)
529 529 else:
530 530 self.dirstate.update([f], "a")
531 531
532 532 def forget(self, list):
533 533 for f in list:
534 534 if self.dirstate.state(f) not in 'ai':
535 535 self.ui.warn(_("%s not added!\n") % f)
536 536 else:
537 537 self.dirstate.forget([f])
538 538
539 539 def remove(self, list, unlink=False):
540 540 if unlink:
541 541 for f in list:
542 542 try:
543 543 util.unlink(self.wjoin(f))
544 544 except OSError, inst:
545 545 if inst.errno != errno.ENOENT: raise
546 546 for f in list:
547 547 p = self.wjoin(f)
548 548 if os.path.exists(p):
549 549 self.ui.warn(_("%s still exists!\n") % f)
550 550 elif self.dirstate.state(f) == 'a':
551 551 self.ui.warn(_("%s never committed!\n") % f)
552 552 self.dirstate.forget([f])
553 553 elif f not in self.dirstate:
554 554 self.ui.warn(_("%s not tracked!\n") % f)
555 555 else:
556 556 self.dirstate.update([f], "r")
557 557
558 558 def undelete(self, list):
559 559 p = self.dirstate.parents()[0]
560 560 mn = self.changelog.read(p)[0]
561 561 mf = self.manifest.readflags(mn)
562 562 m = self.manifest.read(mn)
563 563 for f in list:
564 564 if self.dirstate.state(f) not in "r":
565 565 self.ui.warn("%s not removed!\n" % f)
566 566 else:
567 567 t = self.file(f).read(m[f])
568 568 try:
569 569 self.wwrite(f, t)
570 570 except IOError, e:
571 571 if e.errno != errno.ENOENT:
572 572 raise
573 573 os.makedirs(os.path.dirname(self.wjoin(f)))
574 574 self.wwrite(f, t)
575 575 util.set_exec(self.wjoin(f), mf[f])
576 576 self.dirstate.update([f], "n")
577 577
578 578 def copy(self, source, dest):
579 579 p = self.wjoin(dest)
580 580 if not os.path.exists(p):
581 581 self.ui.warn(_("%s does not exist!\n") % dest)
582 582 elif not os.path.isfile(p):
583 583 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
584 584 else:
585 585 if self.dirstate.state(dest) == '?':
586 586 self.dirstate.update([dest], "a")
587 587 self.dirstate.copy(source, dest)
588 588
589 589 def heads(self):
590 590 return self.changelog.heads()
591 591
592 592 # branchlookup returns a dict giving a list of branches for
593 593 # each head. A branch is defined as the tag of a node or
594 594 # the branch of the node's parents. If a node has multiple
595 595 # branch tags, tags are eliminated if they are visible from other
596 596 # branch tags.
597 597 #
598 598 # So, for this graph: a->b->c->d->e
599 599 # \ /
600 600 # aa -----/
601 601 # a has tag 2.6.12
602 602 # d has tag 2.6.13
603 603 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
604 604 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
605 605 # from the list.
606 606 #
607 607 # It is possible that more than one head will have the same branch tag.
608 608 # callers need to check the result for multiple heads under the same
609 609 # branch tag if that is a problem for them (ie checkout of a specific
610 610 # branch).
611 611 #
612 612 # passing in a specific branch will limit the depth of the search
613 613 # through the parents. It won't limit the branches returned in the
614 614 # result though.
615 615 def branchlookup(self, heads=None, branch=None):
616 616 if not heads:
617 617 heads = self.heads()
618 618 headt = [ h for h in heads ]
619 619 chlog = self.changelog
620 620 branches = {}
621 621 merges = []
622 622 seenmerge = {}
623 623
624 624 # traverse the tree once for each head, recording in the branches
625 625 # dict which tags are visible from this head. The branches
626 626 # dict also records which tags are visible from each tag
627 627 # while we traverse.
628 628 while headt or merges:
629 629 if merges:
630 630 n, found = merges.pop()
631 631 visit = [n]
632 632 else:
633 633 h = headt.pop()
634 634 visit = [h]
635 635 found = [h]
636 636 seen = {}
637 637 while visit:
638 638 n = visit.pop()
639 639 if n in seen:
640 640 continue
641 641 pp = chlog.parents(n)
642 642 tags = self.nodetags(n)
643 643 if tags:
644 644 for x in tags:
645 645 if x == 'tip':
646 646 continue
647 647 for f in found:
648 648 branches.setdefault(f, {})[n] = 1
649 649 branches.setdefault(n, {})[n] = 1
650 650 break
651 651 if n not in found:
652 652 found.append(n)
653 653 if branch in tags:
654 654 continue
655 655 seen[n] = 1
656 656 if pp[1] != nullid and n not in seenmerge:
657 657 merges.append((pp[1], [x for x in found]))
658 658 seenmerge[n] = 1
659 659 if pp[0] != nullid:
660 660 visit.append(pp[0])
661 661 # traverse the branches dict, eliminating branch tags from each
662 662 # head that are visible from another branch tag for that head.
663 663 out = {}
664 664 viscache = {}
665 665 for h in heads:
666 666 def visible(node):
667 667 if node in viscache:
668 668 return viscache[node]
669 669 ret = {}
670 670 visit = [node]
671 671 while visit:
672 672 x = visit.pop()
673 673 if x in viscache:
674 674 ret.update(viscache[x])
675 675 elif x not in ret:
676 676 ret[x] = 1
677 677 if x in branches:
678 678 visit[len(visit):] = branches[x].keys()
679 679 viscache[node] = ret
680 680 return ret
681 681 if h not in branches:
682 682 continue
683 683 # O(n^2), but somewhat limited. This only searches the
684 684 # tags visible from a specific head, not all the tags in the
685 685 # whole repo.
686 686 for b in branches[h]:
687 687 vis = False
688 688 for bb in branches[h].keys():
689 689 if b != bb:
690 690 if b in visible(bb):
691 691 vis = True
692 692 break
693 693 if not vis:
694 694 l = out.setdefault(h, [])
695 695 l[len(l):] = self.nodetags(b)
696 696 return out
697 697
698 698 def branches(self, nodes):
699 699 if not nodes: nodes = [self.changelog.tip()]
700 700 b = []
701 701 for n in nodes:
702 702 t = n
703 703 while n:
704 704 p = self.changelog.parents(n)
705 705 if p[1] != nullid or p[0] == nullid:
706 706 b.append((t, n, p[0], p[1]))
707 707 break
708 708 n = p[0]
709 709 return b
710 710
711 711 def between(self, pairs):
712 712 r = []
713 713
714 714 for top, bottom in pairs:
715 715 n, l, i = top, [], 0
716 716 f = 1
717 717
718 718 while n != bottom:
719 719 p = self.changelog.parents(n)[0]
720 720 if i == f:
721 721 l.append(n)
722 722 f = f * 2
723 723 n = p
724 724 i += 1
725 725
726 726 r.append(l)
727 727
728 728 return r
729 729
730 730 def findincoming(self, remote, base=None, heads=None):
731 731 m = self.changelog.nodemap
732 732 search = []
733 733 fetch = {}
734 734 seen = {}
735 735 seenbranch = {}
736 736 if base == None:
737 737 base = {}
738 738
739 739 # assume we're closer to the tip than the root
740 740 # and start by examining the heads
741 741 self.ui.status(_("searching for changes\n"))
742 742
743 743 if not heads:
744 744 heads = remote.heads()
745 745
746 746 unknown = []
747 747 for h in heads:
748 748 if h not in m:
749 749 unknown.append(h)
750 750 else:
751 751 base[h] = 1
752 752
753 753 if not unknown:
754 754 return None
755 755
756 756 rep = {}
757 757 reqcnt = 0
758 758
759 759 # search through remote branches
760 760 # a 'branch' here is a linear segment of history, with four parts:
761 761 # head, root, first parent, second parent
762 762 # (a branch always has two parents (or none) by definition)
763 763 unknown = remote.branches(unknown)
764 764 while unknown:
765 765 r = []
766 766 while unknown:
767 767 n = unknown.pop(0)
768 768 if n[0] in seen:
769 769 continue
770 770
771 771 self.ui.debug(_("examining %s:%s\n") % (short(n[0]), short(n[1])))
772 772 if n[0] == nullid:
773 773 break
774 774 if n in seenbranch:
775 775 self.ui.debug(_("branch already found\n"))
776 776 continue
777 777 if n[1] and n[1] in m: # do we know the base?
778 778 self.ui.debug(_("found incomplete branch %s:%s\n")
779 779 % (short(n[0]), short(n[1])))
780 780 search.append(n) # schedule branch range for scanning
781 781 seenbranch[n] = 1
782 782 else:
783 783 if n[1] not in seen and n[1] not in fetch:
784 784 if n[2] in m and n[3] in m:
785 785 self.ui.debug(_("found new changeset %s\n") %
786 786 short(n[1]))
787 787 fetch[n[1]] = 1 # earliest unknown
788 788 base[n[2]] = 1 # latest known
789 789 continue
790 790
791 791 for a in n[2:4]:
792 792 if a not in rep:
793 793 r.append(a)
794 794 rep[a] = 1
795 795
796 796 seen[n[0]] = 1
797 797
798 798 if r:
799 799 reqcnt += 1
800 800 self.ui.debug(_("request %d: %s\n") %
801 801 (reqcnt, " ".join(map(short, r))))
802 802 for p in range(0, len(r), 10):
803 803 for b in remote.branches(r[p:p+10]):
804 804 self.ui.debug(_("received %s:%s\n") %
805 805 (short(b[0]), short(b[1])))
806 806 if b[0] in m:
807 807 self.ui.debug(_("found base node %s\n") % short(b[0]))
808 808 base[b[0]] = 1
809 809 elif b[0] not in seen:
810 810 unknown.append(b)
811 811
812 812 # do binary search on the branches we found
813 813 while search:
814 814 n = search.pop(0)
815 815 reqcnt += 1
816 816 l = remote.between([(n[0], n[1])])[0]
817 817 l.append(n[1])
818 818 p = n[0]
819 819 f = 1
820 820 for i in l:
821 821 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
822 822 if i in m:
823 823 if f <= 2:
824 824 self.ui.debug(_("found new branch changeset %s\n") %
825 825 short(p))
826 826 fetch[p] = 1
827 827 base[i] = 1
828 828 else:
829 829 self.ui.debug(_("narrowed branch search to %s:%s\n")
830 830 % (short(p), short(i)))
831 831 search.append((p, i))
832 832 break
833 833 p, f = i, f * 2
834 834
835 835 # sanity check our fetch list
836 836 for f in fetch.keys():
837 837 if f in m:
838 838 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
839 839
840 840 if base.keys() == [nullid]:
841 841 self.ui.warn(_("warning: pulling from an unrelated repository!\n"))
842 842
843 843 self.ui.note(_("found new changesets starting at ") +
844 844 " ".join([short(f) for f in fetch]) + "\n")
845 845
846 846 self.ui.debug(_("%d total queries\n") % reqcnt)
847 847
848 848 return fetch.keys()
849 849
850 850 def findoutgoing(self, remote, base=None, heads=None):
851 851 if base == None:
852 852 base = {}
853 853 self.findincoming(remote, base, heads)
854 854
855 855 self.ui.debug(_("common changesets up to ")
856 856 + " ".join(map(short, base.keys())) + "\n")
857 857
858 858 remain = dict.fromkeys(self.changelog.nodemap)
859 859
860 860 # prune everything remote has from the tree
861 861 del remain[nullid]
862 862 remove = base.keys()
863 863 while remove:
864 864 n = remove.pop(0)
865 865 if n in remain:
866 866 del remain[n]
867 867 for p in self.changelog.parents(n):
868 868 remove.append(p)
869 869
870 870 # find every node whose parents have been pruned
871 871 subset = []
872 872 for n in remain:
873 873 p1, p2 = self.changelog.parents(n)
874 874 if p1 not in remain and p2 not in remain:
875 875 subset.append(n)
876 876
877 877 # this is the set of all roots we have to push
878 878 return subset
879 879
880 880 def pull(self, remote, heads = None):
881 881 lock = self.lock()
882 882
883 883 # if we have an empty repo, fetch everything
884 884 if self.changelog.tip() == nullid:
885 885 self.ui.status(_("requesting all changes\n"))
886 886 fetch = [nullid]
887 887 else:
888 888 fetch = self.findincoming(remote)
889 889
890 890 if not fetch:
891 891 self.ui.status(_("no changes found\n"))
892 892 return 1
893 893
894 894 if heads is None:
895 895 cg = remote.changegroup(fetch)
896 896 else:
897 897 cg = remote.changegroupsubset(fetch, heads)
898 898 return self.addchangegroup(cg)
899 899
900 900 def push(self, remote, force=False):
901 901 lock = remote.lock()
902 902
903 903 base = {}
904 904 heads = remote.heads()
905 905 inc = self.findincoming(remote, base, heads)
906 906 if not force and inc:
907 907 self.ui.warn(_("abort: unsynced remote changes!\n"))
908 908 self.ui.status(_("(did you forget to sync? use push -f to force)\n"))
909 909 return 1
910 910
911 911 update = self.findoutgoing(remote, base)
912 912 if not update:
913 913 self.ui.status(_("no changes found\n"))
914 914 return 1
915 915 elif not force:
916 916 if len(heads) < len(self.changelog.heads()):
917 917 self.ui.warn(_("abort: push creates new remote branches!\n"))
918 918 self.ui.status(_("(did you forget to merge?"
919 919 " use push -f to force)\n"))
920 920 return 1
921 921
922 922 cg = self.changegroup(update)
923 923 return remote.addchangegroup(cg)
924 924
925 925 def changegroupsubset(self, bases, heads):
926 926 """This function generates a changegroup consisting of all the nodes
927 927 that are descendents of any of the bases, and ancestors of any of
928 928 the heads.
929 929
930 930 It is fairly complex as determining which filenodes and which
931 931 manifest nodes need to be included for the changeset to be complete
932 932 is non-trivial.
933 933
934 934 Another wrinkle is doing the reverse, figuring out which changeset in
935 935 the changegroup a particular filenode or manifestnode belongs to."""
936 936
937 937 # Set up some initial variables
938 938 # Make it easy to refer to self.changelog
939 939 cl = self.changelog
940 940 # msng is short for missing - compute the list of changesets in this
941 941 # changegroup.
942 942 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
943 943 # Some bases may turn out to be superfluous, and some heads may be
944 944 # too. nodesbetween will return the minimal set of bases and heads
945 945 # necessary to re-create the changegroup.
946 946
947 947 # Known heads are the list of heads that it is assumed the recipient
948 948 # of this changegroup will know about.
949 949 knownheads = {}
950 950 # We assume that all parents of bases are known heads.
951 951 for n in bases:
952 952 for p in cl.parents(n):
953 953 if p != nullid:
954 954 knownheads[p] = 1
955 955 knownheads = knownheads.keys()
956 956 if knownheads:
957 957 # Now that we know what heads are known, we can compute which
958 958 # changesets are known. The recipient must know about all
959 959 # changesets required to reach the known heads from the null
960 960 # changeset.
961 961 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
962 962 junk = None
963 963 # Transform the list into an ersatz set.
964 964 has_cl_set = dict.fromkeys(has_cl_set)
965 965 else:
966 966 # If there were no known heads, the recipient cannot be assumed to
967 967 # know about any changesets.
968 968 has_cl_set = {}
969 969
970 970 # Make it easy to refer to self.manifest
971 971 mnfst = self.manifest
972 972 # We don't know which manifests are missing yet
973 973 msng_mnfst_set = {}
974 974 # Nor do we know which filenodes are missing.
975 975 msng_filenode_set = {}
976 976
977 977 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
978 978 junk = None
979 979
980 980 # A changeset always belongs to itself, so the changenode lookup
981 981 # function for a changenode is identity.
982 982 def identity(x):
983 983 return x
984 984
985 985 # A function generating function. Sets up an environment for the
986 986 # inner function.
987 987 def cmp_by_rev_func(revlog):
988 988 # Compare two nodes by their revision number in the environment's
989 989 # revision history. Since the revision number both represents the
990 990 # most efficient order to read the nodes in, and represents a
991 991 # topological sorting of the nodes, this function is often useful.
992 992 def cmp_by_rev(a, b):
993 993 return cmp(revlog.rev(a), revlog.rev(b))
994 994 return cmp_by_rev
995 995
996 996 # If we determine that a particular file or manifest node must be a
997 997 # node that the recipient of the changegroup will already have, we can
998 998 # also assume the recipient will have all the parents. This function
999 999 # prunes them from the set of missing nodes.
1000 1000 def prune_parents(revlog, hasset, msngset):
1001 1001 haslst = hasset.keys()
1002 1002 haslst.sort(cmp_by_rev_func(revlog))
1003 1003 for node in haslst:
1004 1004 parentlst = [p for p in revlog.parents(node) if p != nullid]
1005 1005 while parentlst:
1006 1006 n = parentlst.pop()
1007 1007 if n not in hasset:
1008 1008 hasset[n] = 1
1009 1009 p = [p for p in revlog.parents(n) if p != nullid]
1010 1010 parentlst.extend(p)
1011 1011 for n in hasset:
1012 1012 msngset.pop(n, None)
1013 1013
1014 1014 # This is a function generating function used to set up an environment
1015 1015 # for the inner function to execute in.
1016 1016 def manifest_and_file_collector(changedfileset):
1017 1017 # This is an information gathering function that gathers
1018 1018 # information from each changeset node that goes out as part of
1019 1019 # the changegroup. The information gathered is a list of which
1020 1020 # manifest nodes are potentially required (the recipient may
1021 1021 # already have them) and total list of all files which were
1022 1022 # changed in any changeset in the changegroup.
1023 1023 #
1024 1024 # We also remember the first changenode we saw any manifest
1025 1025 # referenced by so we can later determine which changenode 'owns'
1026 1026 # the manifest.
1027 1027 def collect_manifests_and_files(clnode):
1028 1028 c = cl.read(clnode)
1029 1029 for f in c[3]:
1030 1030 # This is to make sure we only have one instance of each
1031 1031 # filename string for each filename.
1032 1032 changedfileset.setdefault(f, f)
1033 1033 msng_mnfst_set.setdefault(c[0], clnode)
1034 1034 return collect_manifests_and_files
1035 1035
1036 1036 # Figure out which manifest nodes (of the ones we think might be part
1037 1037 # of the changegroup) the recipient must know about and remove them
1038 1038 # from the changegroup.
1039 1039 def prune_manifests():
1040 1040 has_mnfst_set = {}
1041 1041 for n in msng_mnfst_set:
1042 1042 # If a 'missing' manifest thinks it belongs to a changenode
1043 1043 # the recipient is assumed to have, obviously the recipient
1044 1044 # must have that manifest.
1045 1045 linknode = cl.node(mnfst.linkrev(n))
1046 1046 if linknode in has_cl_set:
1047 1047 has_mnfst_set[n] = 1
1048 1048 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1049 1049
1050 1050 # Use the information collected in collect_manifests_and_files to say
1051 1051 # which changenode any manifestnode belongs to.
1052 1052 def lookup_manifest_link(mnfstnode):
1053 1053 return msng_mnfst_set[mnfstnode]
1054 1054
1055 1055 # A function generating function that sets up the initial environment
1056 1056 # the inner function.
1057 1057 def filenode_collector(changedfiles):
1058 1058 next_rev = [0]
1059 1059 # This gathers information from each manifestnode included in the
1060 1060 # changegroup about which filenodes the manifest node references
1061 1061 # so we can include those in the changegroup too.
1062 1062 #
1063 1063 # It also remembers which changenode each filenode belongs to. It
1064 1064 # does this by assuming the a filenode belongs to the changenode
1065 1065 # the first manifest that references it belongs to.
1066 1066 def collect_msng_filenodes(mnfstnode):
1067 1067 r = mnfst.rev(mnfstnode)
1068 1068 if r == next_rev[0]:
1069 1069 # If the last rev we looked at was the one just previous,
1070 1070 # we only need to see a diff.
1071 1071 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1072 1072 # For each line in the delta
1073 1073 for dline in delta.splitlines():
1074 1074 # get the filename and filenode for that line
1075 1075 f, fnode = dline.split('\0')
1076 1076 fnode = bin(fnode[:40])
1077 1077 f = changedfiles.get(f, None)
1078 1078 # And if the file is in the list of files we care
1079 1079 # about.
1080 1080 if f is not None:
1081 1081 # Get the changenode this manifest belongs to
1082 1082 clnode = msng_mnfst_set[mnfstnode]
1083 1083 # Create the set of filenodes for the file if
1084 1084 # there isn't one already.
1085 1085 ndset = msng_filenode_set.setdefault(f, {})
1086 1086 # And set the filenode's changelog node to the
1087 1087 # manifest's if it hasn't been set already.
1088 1088 ndset.setdefault(fnode, clnode)
1089 1089 else:
1090 1090 # Otherwise we need a full manifest.
1091 1091 m = mnfst.read(mnfstnode)
1092 1092 # For every file in we care about.
1093 1093 for f in changedfiles:
1094 1094 fnode = m.get(f, None)
1095 1095 # If it's in the manifest
1096 1096 if fnode is not None:
1097 1097 # See comments above.
1098 1098 clnode = msng_mnfst_set[mnfstnode]
1099 1099 ndset = msng_filenode_set.setdefault(f, {})
1100 1100 ndset.setdefault(fnode, clnode)
1101 1101 # Remember the revision we hope to see next.
1102 1102 next_rev[0] = r + 1
1103 1103 return collect_msng_filenodes
1104 1104
1105 1105 # We have a list of filenodes we think we need for a file, lets remove
1106 1106 # all those we now the recipient must have.
1107 1107 def prune_filenodes(f, filerevlog):
1108 1108 msngset = msng_filenode_set[f]
1109 1109 hasset = {}
1110 1110 # If a 'missing' filenode thinks it belongs to a changenode we
1111 1111 # assume the recipient must have, then the recipient must have
1112 1112 # that filenode.
1113 1113 for n in msngset:
1114 1114 clnode = cl.node(filerevlog.linkrev(n))
1115 1115 if clnode in has_cl_set:
1116 1116 hasset[n] = 1
1117 1117 prune_parents(filerevlog, hasset, msngset)
1118 1118
1119 1119 # A function generator function that sets up the a context for the
1120 1120 # inner function.
1121 1121 def lookup_filenode_link_func(fname):
1122 1122 msngset = msng_filenode_set[fname]
1123 1123 # Lookup the changenode the filenode belongs to.
1124 1124 def lookup_filenode_link(fnode):
1125 1125 return msngset[fnode]
1126 1126 return lookup_filenode_link
1127 1127
1128 1128 # Now that we have all theses utility functions to help out and
1129 1129 # logically divide up the task, generate the group.
1130 1130 def gengroup():
1131 1131 # The set of changed files starts empty.
1132 1132 changedfiles = {}
1133 1133 # Create a changenode group generator that will call our functions
1134 1134 # back to lookup the owning changenode and collect information.
1135 1135 group = cl.group(msng_cl_lst, identity,
1136 1136 manifest_and_file_collector(changedfiles))
1137 1137 for chnk in group:
1138 1138 yield chnk
1139 1139
1140 1140 # The list of manifests has been collected by the generator
1141 1141 # calling our functions back.
1142 1142 prune_manifests()
1143 1143 msng_mnfst_lst = msng_mnfst_set.keys()
1144 1144 # Sort the manifestnodes by revision number.
1145 1145 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1146 1146 # Create a generator for the manifestnodes that calls our lookup
1147 1147 # and data collection functions back.
1148 1148 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1149 1149 filenode_collector(changedfiles))
1150 1150 for chnk in group:
1151 1151 yield chnk
1152 1152
1153 1153 # These are no longer needed, dereference and toss the memory for
1154 1154 # them.
1155 1155 msng_mnfst_lst = None
1156 1156 msng_mnfst_set.clear()
1157 1157
1158 1158 changedfiles = changedfiles.keys()
1159 1159 changedfiles.sort()
1160 1160 # Go through all our files in order sorted by name.
1161 1161 for fname in changedfiles:
1162 1162 filerevlog = self.file(fname)
1163 1163 # Toss out the filenodes that the recipient isn't really
1164 1164 # missing.
1165 1165 prune_filenodes(fname, filerevlog)
1166 1166 msng_filenode_lst = msng_filenode_set[fname].keys()
1167 1167 # If any filenodes are left, generate the group for them,
1168 1168 # otherwise don't bother.
1169 1169 if len(msng_filenode_lst) > 0:
1170 1170 yield struct.pack(">l", len(fname) + 4) + fname
1171 1171 # Sort the filenodes by their revision #
1172 1172 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1173 1173 # Create a group generator and only pass in a changenode
1174 1174 # lookup function as we need to collect no information
1175 1175 # from filenodes.
1176 1176 group = filerevlog.group(msng_filenode_lst,
1177 1177 lookup_filenode_link_func(fname))
1178 1178 for chnk in group:
1179 1179 yield chnk
1180 1180 # Don't need this anymore, toss it to free memory.
1181 1181 del msng_filenode_set[fname]
1182 1182 # Signal that no more groups are left.
1183 1183 yield struct.pack(">l", 0)
1184 1184
1185 1185 return util.chunkbuffer(gengroup())
1186 1186
1187 1187 def changegroup(self, basenodes):
1188 1188 """Generate a changegroup of all nodes that we have that a recipient
1189 1189 doesn't.
1190 1190
1191 1191 This is much easier than the previous function as we can assume that
1192 1192 the recipient has any changenode we aren't sending them."""
1193 1193 cl = self.changelog
1194 1194 nodes = cl.nodesbetween(basenodes, None)[0]
1195 1195 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1196 1196
1197 1197 def identity(x):
1198 1198 return x
1199 1199
1200 1200 def gennodelst(revlog):
1201 1201 for r in xrange(0, revlog.count()):
1202 1202 n = revlog.node(r)
1203 1203 if revlog.linkrev(n) in revset:
1204 1204 yield n
1205 1205
1206 1206 def changed_file_collector(changedfileset):
1207 1207 def collect_changed_files(clnode):
1208 1208 c = cl.read(clnode)
1209 1209 for fname in c[3]:
1210 1210 changedfileset[fname] = 1
1211 1211 return collect_changed_files
1212 1212
1213 1213 def lookuprevlink_func(revlog):
1214 1214 def lookuprevlink(n):
1215 1215 return cl.node(revlog.linkrev(n))
1216 1216 return lookuprevlink
1217 1217
1218 1218 def gengroup():
1219 1219 # construct a list of all changed files
1220 1220 changedfiles = {}
1221 1221
1222 1222 for chnk in cl.group(nodes, identity,
1223 1223 changed_file_collector(changedfiles)):
1224 1224 yield chnk
1225 1225 changedfiles = changedfiles.keys()
1226 1226 changedfiles.sort()
1227 1227
1228 1228 mnfst = self.manifest
1229 1229 nodeiter = gennodelst(mnfst)
1230 1230 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1231 1231 yield chnk
1232 1232
1233 1233 for fname in changedfiles:
1234 1234 filerevlog = self.file(fname)
1235 1235 nodeiter = gennodelst(filerevlog)
1236 1236 nodeiter = list(nodeiter)
1237 1237 if nodeiter:
1238 1238 yield struct.pack(">l", len(fname) + 4) + fname
1239 1239 lookup = lookuprevlink_func(filerevlog)
1240 1240 for chnk in filerevlog.group(nodeiter, lookup):
1241 1241 yield chnk
1242 1242
1243 1243 yield struct.pack(">l", 0)
1244 1244
1245 1245 return util.chunkbuffer(gengroup())
1246 1246
1247 1247 def addchangegroup(self, source):
1248 1248
1249 1249 def getchunk():
1250 1250 d = source.read(4)
1251 1251 if not d: return ""
1252 1252 l = struct.unpack(">l", d)[0]
1253 1253 if l <= 4: return ""
1254 1254 d = source.read(l - 4)
1255 1255 if len(d) < l - 4:
1256 1256 raise repo.RepoError(_("premature EOF reading chunk"
1257 1257 " (got %d bytes, expected %d)")
1258 1258 % (len(d), l - 4))
1259 1259 return d
1260 1260
1261 1261 def getgroup():
1262 1262 while 1:
1263 1263 c = getchunk()
1264 1264 if not c: break
1265 1265 yield c
1266 1266
1267 1267 def csmap(x):
1268 1268 self.ui.debug(_("add changeset %s\n") % short(x))
1269 1269 return self.changelog.count()
1270 1270
1271 1271 def revmap(x):
1272 1272 return self.changelog.rev(x)
1273 1273
1274 1274 if not source: return
1275 1275 changesets = files = revisions = 0
1276 1276
1277 1277 tr = self.transaction()
1278 1278
1279 1279 oldheads = len(self.changelog.heads())
1280 1280
1281 1281 # pull off the changeset group
1282 1282 self.ui.status(_("adding changesets\n"))
1283 1283 co = self.changelog.tip()
1284 1284 cn = self.changelog.addgroup(getgroup(), csmap, tr, 1) # unique
1285 1285 cnr, cor = map(self.changelog.rev, (cn, co))
1286 1286 if cn == nullid:
1287 1287 cnr = cor
1288 1288 changesets = cnr - cor
1289 1289
1290 1290 # pull off the manifest group
1291 1291 self.ui.status(_("adding manifests\n"))
1292 1292 mm = self.manifest.tip()
1293 1293 mo = self.manifest.addgroup(getgroup(), revmap, tr)
1294 1294
1295 1295 # process the files
1296 1296 self.ui.status(_("adding file changes\n"))
1297 1297 while 1:
1298 1298 f = getchunk()
1299 1299 if not f: break
1300 1300 self.ui.debug(_("adding %s revisions\n") % f)
1301 1301 fl = self.file(f)
1302 1302 o = fl.count()
1303 1303 n = fl.addgroup(getgroup(), revmap, tr)
1304 1304 revisions += fl.count() - o
1305 1305 files += 1
1306 1306
1307 1307 newheads = len(self.changelog.heads())
1308 1308 heads = ""
1309 1309 if oldheads and newheads > oldheads:
1310 1310 heads = _(" (+%d heads)") % (newheads - oldheads)
1311 1311
1312 1312 self.ui.status(_("added %d changesets"
1313 1313 " with %d changes to %d files%s\n")
1314 1314 % (changesets, revisions, files, heads))
1315 1315
1316 1316 tr.close()
1317 1317
1318 1318 if changesets > 0:
1319 1319 if not self.hook("changegroup",
1320 1320 node=hex(self.changelog.node(cor+1))):
1321 1321 self.ui.warn(_("abort: changegroup hook returned failure!\n"))
1322 1322 return 1
1323 1323
1324 1324 for i in range(cor + 1, cnr + 1):
1325 1325 self.hook("commit", node=hex(self.changelog.node(i)))
1326 1326
1327 1327 return
1328 1328
1329 1329 def update(self, node, allow=False, force=False, choose=None,
1330 1330 moddirstate=True):
1331 1331 pl = self.dirstate.parents()
1332 1332 if not force and pl[1] != nullid:
1333 1333 self.ui.warn(_("aborting: outstanding uncommitted merges\n"))
1334 1334 return 1
1335 1335
1336 1336 p1, p2 = pl[0], node
1337 1337 pa = self.changelog.ancestor(p1, p2)
1338 1338 m1n = self.changelog.read(p1)[0]
1339 1339 m2n = self.changelog.read(p2)[0]
1340 1340 man = self.manifest.ancestor(m1n, m2n)
1341 1341 m1 = self.manifest.read(m1n)
1342 1342 mf1 = self.manifest.readflags(m1n)
1343 1343 m2 = self.manifest.read(m2n)
1344 1344 mf2 = self.manifest.readflags(m2n)
1345 1345 ma = self.manifest.read(man)
1346 1346 mfa = self.manifest.readflags(man)
1347 1347
1348 1348 (c, a, d, u) = self.changes()
1349 1349
1350 1350 # is this a jump, or a merge? i.e. is there a linear path
1351 1351 # from p1 to p2?
1352 1352 linear_path = (pa == p1 or pa == p2)
1353 1353
1354 1354 # resolve the manifest to determine which files
1355 1355 # we care about merging
1356 1356 self.ui.note(_("resolving manifests\n"))
1357 1357 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1358 1358 (force, allow, moddirstate, linear_path))
1359 1359 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1360 1360 (short(man), short(m1n), short(m2n)))
1361 1361
1362 1362 merge = {}
1363 1363 get = {}
1364 1364 remove = []
1365 1365
1366 1366 # construct a working dir manifest
1367 1367 mw = m1.copy()
1368 1368 mfw = mf1.copy()
1369 1369 umap = dict.fromkeys(u)
1370 1370
1371 1371 for f in a + c + u:
1372 1372 mw[f] = ""
1373 1373 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1374 1374
1375 1375 for f in d:
1376 1376 if f in mw: del mw[f]
1377 1377
1378 1378 # If we're jumping between revisions (as opposed to merging),
1379 1379 # and if neither the working directory nor the target rev has
1380 1380 # the file, then we need to remove it from the dirstate, to
1381 1381 # prevent the dirstate from listing the file when it is no
1382 1382 # longer in the manifest.
1383 1383 if moddirstate and linear_path and f not in m2:
1384 1384 self.dirstate.forget((f,))
1385 1385
1386 1386 # Compare manifests
1387 1387 for f, n in mw.iteritems():
1388 1388 if choose and not choose(f): continue
1389 1389 if f in m2:
1390 1390 s = 0
1391 1391
1392 1392 # is the wfile new since m1, and match m2?
1393 1393 if f not in m1:
1394 1394 t1 = self.wread(f)
1395 1395 t2 = self.file(f).read(m2[f])
1396 1396 if cmp(t1, t2) == 0:
1397 1397 n = m2[f]
1398 1398 del t1, t2
1399 1399
1400 1400 # are files different?
1401 1401 if n != m2[f]:
1402 1402 a = ma.get(f, nullid)
1403 1403 # are both different from the ancestor?
1404 1404 if n != a and m2[f] != a:
1405 1405 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1406 1406 # merge executable bits
1407 1407 # "if we changed or they changed, change in merge"
1408 1408 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1409 1409 mode = ((a^b) | (a^c)) ^ a
1410 1410 merge[f] = (m1.get(f, nullid), m2[f], mode)
1411 1411 s = 1
1412 1412 # are we clobbering?
1413 1413 # is remote's version newer?
1414 1414 # or are we going back in time?
1415 1415 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1416 1416 self.ui.debug(_(" remote %s is newer, get\n") % f)
1417 1417 get[f] = m2[f]
1418 1418 s = 1
1419 1419 elif f in umap:
1420 1420 # this unknown file is the same as the checkout
1421 1421 get[f] = m2[f]
1422 1422
1423 1423 if not s and mfw[f] != mf2[f]:
1424 1424 if force:
1425 1425 self.ui.debug(_(" updating permissions for %s\n") % f)
1426 1426 util.set_exec(self.wjoin(f), mf2[f])
1427 1427 else:
1428 1428 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1429 1429 mode = ((a^b) | (a^c)) ^ a
1430 1430 if mode != b:
1431 1431 self.ui.debug(_(" updating permissions for %s\n") % f)
1432 1432 util.set_exec(self.wjoin(f), mode)
1433 1433 del m2[f]
1434 1434 elif f in ma:
1435 1435 if n != ma[f]:
1436 1436 r = _("d")
1437 1437 if not force and (linear_path or allow):
1438 1438 r = self.ui.prompt(
1439 1439 (_(" local changed %s which remote deleted\n") % f) +
1440 1440 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1441 1441 if r == _("d"):
1442 1442 remove.append(f)
1443 1443 else:
1444 1444 self.ui.debug(_("other deleted %s\n") % f)
1445 1445 remove.append(f) # other deleted it
1446 1446 else:
1447 1447 # file is created on branch or in working directory
1448 1448 if force and f not in umap:
1449 1449 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1450 1450 remove.append(f)
1451 1451 elif n == m1.get(f, nullid): # same as parent
1452 1452 if p2 == pa: # going backwards?
1453 1453 self.ui.debug(_("remote deleted %s\n") % f)
1454 1454 remove.append(f)
1455 1455 else:
1456 1456 self.ui.debug(_("local modified %s, keeping\n") % f)
1457 1457 else:
1458 1458 self.ui.debug(_("working dir created %s, keeping\n") % f)
1459 1459
1460 1460 for f, n in m2.iteritems():
1461 1461 if choose and not choose(f): continue
1462 1462 if f[0] == "/": continue
1463 1463 if f in ma and n != ma[f]:
1464 1464 r = _("k")
1465 1465 if not force and (linear_path or allow):
1466 1466 r = self.ui.prompt(
1467 1467 (_("remote changed %s which local deleted\n") % f) +
1468 1468 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1469 1469 if r == _("k"): get[f] = n
1470 1470 elif f not in ma:
1471 1471 self.ui.debug(_("remote created %s\n") % f)
1472 1472 get[f] = n
1473 1473 else:
1474 1474 if force or p2 == pa: # going backwards?
1475 1475 self.ui.debug(_("local deleted %s, recreating\n") % f)
1476 1476 get[f] = n
1477 1477 else:
1478 1478 self.ui.debug(_("local deleted %s\n") % f)
1479 1479
1480 1480 del mw, m1, m2, ma
1481 1481
1482 1482 if force:
1483 1483 for f in merge:
1484 1484 get[f] = merge[f][1]
1485 1485 merge = {}
1486 1486
1487 1487 if linear_path or force:
1488 1488 # we don't need to do any magic, just jump to the new rev
1489 1489 branch_merge = False
1490 1490 p1, p2 = p2, nullid
1491 1491 else:
1492 1492 if not allow:
1493 1493 self.ui.status(_("this update spans a branch"
1494 1494 " affecting the following files:\n"))
1495 1495 fl = merge.keys() + get.keys()
1496 1496 fl.sort()
1497 1497 for f in fl:
1498 1498 cf = ""
1499 1499 if f in merge: cf = _(" (resolve)")
1500 1500 self.ui.status(" %s%s\n" % (f, cf))
1501 1501 self.ui.warn(_("aborting update spanning branches!\n"))
1502 1502 self.ui.status(_("(use update -m to merge across branches"
1503 1503 " or -C to lose changes)\n"))
1504 1504 return 1
1505 1505 branch_merge = True
1506 1506
1507 1507 if moddirstate:
1508 1508 self.dirstate.setparents(p1, p2)
1509 1509
1510 1510 # get the files we don't need to change
1511 1511 files = get.keys()
1512 1512 files.sort()
1513 1513 for f in files:
1514 1514 if f[0] == "/": continue
1515 1515 self.ui.note(_("getting %s\n") % f)
1516 1516 t = self.file(f).read(get[f])
1517 1517 try:
1518 1518 self.wwrite(f, t)
1519 1519 except IOError, e:
1520 1520 if e.errno != errno.ENOENT:
1521 1521 raise
1522 1522 os.makedirs(os.path.dirname(self.wjoin(f)))
1523 1523 self.wwrite(f, t)
1524 1524 util.set_exec(self.wjoin(f), mf2[f])
1525 1525 if moddirstate:
1526 1526 if branch_merge:
1527 1527 self.dirstate.update([f], 'n', st_mtime=-1)
1528 1528 else:
1529 1529 self.dirstate.update([f], 'n')
1530 1530
1531 1531 # merge the tricky bits
1532 1532 files = merge.keys()
1533 1533 files.sort()
1534 1534 for f in files:
1535 1535 self.ui.status(_("merging %s\n") % f)
1536 1536 my, other, flag = merge[f]
1537 1537 self.merge3(f, my, other)
1538 1538 util.set_exec(self.wjoin(f), flag)
1539 1539 if moddirstate:
1540 1540 if branch_merge:
1541 1541 # We've done a branch merge, mark this file as merged
1542 1542 # so that we properly record the merger later
1543 1543 self.dirstate.update([f], 'm')
1544 1544 else:
1545 1545 # We've update-merged a locally modified file, so
1546 1546 # we set the dirstate to emulate a normal checkout
1547 1547 # of that file some time in the past. Thus our
1548 1548 # merge will appear as a normal local file
1549 1549 # modification.
1550 1550 f_len = len(self.file(f).read(other))
1551 1551 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1552 1552
1553 1553 remove.sort()
1554 1554 for f in remove:
1555 1555 self.ui.note(_("removing %s\n") % f)
1556 1556 try:
1557 1557 util.unlink(self.wjoin(f))
1558 1558 except OSError, inst:
1559 1559 if inst.errno != errno.ENOENT:
1560 1560 self.ui.warn(_("update failed to remove %s: %s!\n") %
1561 1561 (f, inst.strerror))
1562 1562 if moddirstate:
1563 1563 if branch_merge:
1564 1564 self.dirstate.update(remove, 'r')
1565 1565 else:
1566 1566 self.dirstate.forget(remove)
1567 1567
1568 1568 def merge3(self, fn, my, other):
1569 1569 """perform a 3-way merge in the working directory"""
1570 1570
1571 1571 def temp(prefix, node):
1572 1572 pre = "%s~%s." % (os.path.basename(fn), prefix)
1573 1573 (fd, name) = tempfile.mkstemp("", pre)
1574 1574 f = os.fdopen(fd, "wb")
1575 1575 self.wwrite(fn, fl.read(node), f)
1576 1576 f.close()
1577 1577 return name
1578 1578
1579 1579 fl = self.file(fn)
1580 1580 base = fl.ancestor(my, other)
1581 1581 a = self.wjoin(fn)
1582 1582 b = temp("base", base)
1583 1583 c = temp("other", other)
1584 1584
1585 1585 self.ui.note(_("resolving %s\n") % fn)
1586 1586 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1587 1587 (fn, short(my), short(other), short(base)))
1588 1588
1589 1589 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1590 1590 or "hgmerge")
1591 1591 r = os.system('%s "%s" "%s" "%s"' % (cmd, a, b, c))
1592 1592 if r:
1593 1593 self.ui.warn(_("merging %s failed!\n") % fn)
1594 1594
1595 1595 os.unlink(b)
1596 1596 os.unlink(c)
1597 1597
1598 1598 def verify(self):
1599 1599 filelinkrevs = {}
1600 1600 filenodes = {}
1601 1601 changesets = revisions = files = 0
1602 1602 errors = [0]
1603 1603 neededmanifests = {}
1604 1604
1605 1605 def err(msg):
1606 1606 self.ui.warn(msg + "\n")
1607 1607 errors[0] += 1
1608 1608
1609 1609 seen = {}
1610 1610 self.ui.status(_("checking changesets\n"))
1611 1611 for i in range(self.changelog.count()):
1612 1612 changesets += 1
1613 1613 n = self.changelog.node(i)
1614 1614 l = self.changelog.linkrev(n)
1615 1615 if l != i:
1616 1616 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1617 1617 if n in seen:
1618 1618 err(_("duplicate changeset at revision %d") % i)
1619 1619 seen[n] = 1
1620 1620
1621 1621 for p in self.changelog.parents(n):
1622 1622 if p not in self.changelog.nodemap:
1623 1623 err(_("changeset %s has unknown parent %s") %
1624 1624 (short(n), short(p)))
1625 1625 try:
1626 1626 changes = self.changelog.read(n)
1627 1627 except Exception, inst:
1628 1628 err(_("unpacking changeset %s: %s") % (short(n), inst))
1629 1629
1630 1630 neededmanifests[changes[0]] = n
1631 1631
1632 1632 for f in changes[3]:
1633 1633 filelinkrevs.setdefault(f, []).append(i)
1634 1634
1635 1635 seen = {}
1636 1636 self.ui.status(_("checking manifests\n"))
1637 1637 for i in range(self.manifest.count()):
1638 1638 n = self.manifest.node(i)
1639 1639 l = self.manifest.linkrev(n)
1640 1640
1641 1641 if l < 0 or l >= self.changelog.count():
1642 1642 err(_("bad manifest link (%d) at revision %d") % (l, i))
1643 1643
1644 1644 if n in neededmanifests:
1645 1645 del neededmanifests[n]
1646 1646
1647 1647 if n in seen:
1648 1648 err(_("duplicate manifest at revision %d") % i)
1649 1649
1650 1650 seen[n] = 1
1651 1651
1652 1652 for p in self.manifest.parents(n):
1653 1653 if p not in self.manifest.nodemap:
1654 1654 err(_("manifest %s has unknown parent %s") %
1655 1655 (short(n), short(p)))
1656 1656
1657 1657 try:
1658 1658 delta = mdiff.patchtext(self.manifest.delta(n))
1659 1659 except KeyboardInterrupt:
1660 1660 self.ui.warn(_("interrupted"))
1661 1661 raise
1662 1662 except Exception, inst:
1663 1663 err(_("unpacking manifest %s: %s") % (short(n), inst))
1664 1664
1665 1665 ff = [ l.split('\0') for l in delta.splitlines() ]
1666 1666 for f, fn in ff:
1667 1667 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1668 1668
1669 1669 self.ui.status(_("crosschecking files in changesets and manifests\n"))
1670 1670
1671 1671 for m,c in neededmanifests.items():
1672 1672 err(_("Changeset %s refers to unknown manifest %s") %
1673 1673 (short(m), short(c)))
1674 1674 del neededmanifests
1675 1675
1676 1676 for f in filenodes:
1677 1677 if f not in filelinkrevs:
1678 1678 err(_("file %s in manifest but not in changesets") % f)
1679 1679
1680 1680 for f in filelinkrevs:
1681 1681 if f not in filenodes:
1682 1682 err(_("file %s in changeset but not in manifest") % f)
1683 1683
1684 1684 self.ui.status(_("checking files\n"))
1685 1685 ff = filenodes.keys()
1686 1686 ff.sort()
1687 1687 for f in ff:
1688 1688 if f == "/dev/null": continue
1689 1689 files += 1
1690 1690 fl = self.file(f)
1691 1691 nodes = { nullid: 1 }
1692 1692 seen = {}
1693 1693 for i in range(fl.count()):
1694 1694 revisions += 1
1695 1695 n = fl.node(i)
1696 1696
1697 1697 if n in seen:
1698 1698 err(_("%s: duplicate revision %d") % (f, i))
1699 1699 if n not in filenodes[f]:
1700 1700 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
1701 1701 else:
1702 1702 del filenodes[f][n]
1703 1703
1704 1704 flr = fl.linkrev(n)
1705 1705 if flr not in filelinkrevs[f]:
1706 1706 err(_("%s:%s points to unexpected changeset %d")
1707 1707 % (f, short(n), flr))
1708 1708 else:
1709 1709 filelinkrevs[f].remove(flr)
1710 1710
1711 1711 # verify contents
1712 1712 try:
1713 1713 t = fl.read(n)
1714 1714 except Exception, inst:
1715 1715 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
1716 1716
1717 1717 # verify parents
1718 1718 (p1, p2) = fl.parents(n)
1719 1719 if p1 not in nodes:
1720 1720 err(_("file %s:%s unknown parent 1 %s") %
1721 1721 (f, short(n), short(p1)))
1722 1722 if p2 not in nodes:
1723 1723 err(_("file %s:%s unknown parent 2 %s") %
1724 1724 (f, short(n), short(p1)))
1725 1725 nodes[n] = 1
1726 1726
1727 1727 # cross-check
1728 1728 for node in filenodes[f]:
1729 1729 err(_("node %s in manifests not in %s") % (hex(node), f))
1730 1730
1731 1731 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
1732 1732 (files, changesets, revisions))
1733 1733
1734 1734 if errors[0]:
1735 1735 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
1736 1736 return 1
@@ -1,149 +1,152
1 1 # ui.py - user interface bits for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 import os, ConfigParser
9 9 from i18n import gettext as _
10 10 from demandload import *
11 11 demandload(globals(), "re socket sys util")
12 12
13 13 class ui:
14 14 def __init__(self, verbose=False, debug=False, quiet=False,
15 15 interactive=True):
16 16 self.overlay = {}
17 17 self.cdata = ConfigParser.SafeConfigParser()
18 self.cdata.read(util.rcpath)
18 self.readconfig(util.rcpath)
19 19
20 20 self.quiet = self.configbool("ui", "quiet")
21 21 self.verbose = self.configbool("ui", "verbose")
22 22 self.debugflag = self.configbool("ui", "debug")
23 23 self.interactive = self.configbool("ui", "interactive", True)
24 24
25 25 self.updateopts(verbose, debug, quiet, interactive)
26 26
27 27 def updateopts(self, verbose=False, debug=False, quiet=False,
28 28 interactive=True):
29 29 self.quiet = (self.quiet or quiet) and not verbose and not debug
30 30 self.verbose = (self.verbose or verbose) or debug
31 31 self.debugflag = (self.debugflag or debug)
32 32 self.interactive = (self.interactive and interactive)
33 33
34 def readconfig(self, fp):
35 self.cdata.readfp(fp)
34 def readconfig(self, fn):
35 try:
36 self.cdata.read(fn)
37 except ConfigParser.ParsingError, inst:
38 raise util.Abort(_("Failed to parse %s\n%s") % (fn, inst))
36 39
37 40 def setconfig(self, section, name, val):
38 41 self.overlay[(section, name)] = val
39 42
40 43 def config(self, section, name, default=None):
41 44 if self.overlay.has_key((section, name)):
42 45 return self.overlay[(section, name)]
43 46 if self.cdata.has_option(section, name):
44 47 return self.cdata.get(section, name)
45 48 return default
46 49
47 50 def configbool(self, section, name, default=False):
48 51 if self.overlay.has_key((section, name)):
49 52 return self.overlay[(section, name)]
50 53 if self.cdata.has_option(section, name):
51 54 return self.cdata.getboolean(section, name)
52 55 return default
53 56
54 57 def configitems(self, section):
55 58 if self.cdata.has_section(section):
56 59 return self.cdata.items(section)
57 60 return []
58 61
59 62 def walkconfig(self):
60 63 seen = {}
61 64 for (section, name), value in self.overlay.iteritems():
62 65 yield section, name, value
63 66 seen[section, name] = 1
64 67 for section in self.cdata.sections():
65 68 for name, value in self.cdata.items(section):
66 69 if (section, name) in seen: continue
67 70 yield section, name, value.replace('\n', '\\n')
68 71 seen[section, name] = 1
69 72
70 73 def extensions(self):
71 74 return self.configitems("extensions")
72 75
73 76 def username(self):
74 77 return (os.environ.get("HGUSER") or
75 78 self.config("ui", "username") or
76 79 os.environ.get("EMAIL") or
77 80 (os.environ.get("LOGNAME",
78 81 os.environ.get("USERNAME", "unknown"))
79 82 + '@' + socket.getfqdn()))
80 83
81 84 def shortuser(self, user):
82 85 """Return a short representation of a user name or email address."""
83 86 if not self.verbose:
84 87 f = user.find('@')
85 88 if f >= 0:
86 89 user = user[:f]
87 90 f = user.find('<')
88 91 if f >= 0:
89 92 user = user[f+1:]
90 93 return user
91 94
92 95 def expandpath(self, loc, root=""):
93 96 paths = {}
94 97 for name, path in self.configitems("paths"):
95 98 m = path.find("://")
96 99 if m == -1:
97 100 path = os.path.join(root, path)
98 101 paths[name] = path
99 102
100 103 return paths.get(loc, loc)
101 104
102 105 def write(self, *args):
103 106 for a in args:
104 107 sys.stdout.write(str(a))
105 108
106 109 def write_err(self, *args):
107 110 sys.stdout.flush()
108 111 for a in args:
109 112 sys.stderr.write(str(a))
110 113
111 114 def readline(self):
112 115 return sys.stdin.readline()[:-1]
113 116 def prompt(self, msg, pat, default="y"):
114 117 if not self.interactive: return default
115 118 while 1:
116 119 self.write(msg, " ")
117 120 r = self.readline()
118 121 if re.match(pat, r):
119 122 return r
120 123 else:
121 124 self.write(_("unrecognized response\n"))
122 125 def status(self, *msg):
123 126 if not self.quiet: self.write(*msg)
124 127 def warn(self, *msg):
125 128 self.write_err(*msg)
126 129 def note(self, *msg):
127 130 if self.verbose: self.write(*msg)
128 131 def debug(self, *msg):
129 132 if self.debugflag: self.write(*msg)
130 133 def edit(self, text):
131 134 import tempfile
132 135 (fd, name) = tempfile.mkstemp("hg")
133 136 f = os.fdopen(fd, "w")
134 137 f.write(text)
135 138 f.close()
136 139
137 140 editor = (os.environ.get("HGEDITOR") or
138 141 self.config("ui", "editor") or
139 142 os.environ.get("EDITOR", "vi"))
140 143
141 144 os.environ["HGUSER"] = self.username()
142 145 util.system("%s %s" % (editor, name), errprefix=_("edit failed"))
143 146
144 147 t = open(name).read()
145 148 t = re.sub("(?m)^HG:.*\n", "", t)
146 149
147 150 os.unlink(name)
148 151
149 152 return t
General Comments 0
You need to be logged in to leave comments. Login now