##// END OF EJS Templates
misc: adding missing file close() calls...
Matt Mackall -
r15407:ee112eb6 stable
parent child Browse files
Show More
@@ -1,104 +1,105
1 1 # ignore.py - ignored file handling for mercurial
2 2 #
3 3 # Copyright 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 import util, match
10 10 import re
11 11
12 12 _commentre = None
13 13
14 14 def ignorepats(lines):
15 15 '''parse lines (iterable) of .hgignore text, returning a tuple of
16 16 (patterns, parse errors). These patterns should be given to compile()
17 17 to be validated and converted into a match function.'''
18 18 syntaxes = {'re': 'relre:', 'regexp': 'relre:', 'glob': 'relglob:'}
19 19 syntax = 'relre:'
20 20 patterns = []
21 21 warnings = []
22 22
23 23 for line in lines:
24 24 if "#" in line:
25 25 global _commentre
26 26 if not _commentre:
27 27 _commentre = re.compile(r'((^|[^\\])(\\\\)*)#.*')
28 28 # remove comments prefixed by an even number of escapes
29 29 line = _commentre.sub(r'\1', line)
30 30 # fixup properly escaped comments that survived the above
31 31 line = line.replace("\\#", "#")
32 32 line = line.rstrip()
33 33 if not line:
34 34 continue
35 35
36 36 if line.startswith('syntax:'):
37 37 s = line[7:].strip()
38 38 try:
39 39 syntax = syntaxes[s]
40 40 except KeyError:
41 41 warnings.append(_("ignoring invalid syntax '%s'") % s)
42 42 continue
43 43 pat = syntax + line
44 44 for s, rels in syntaxes.iteritems():
45 45 if line.startswith(rels):
46 46 pat = line
47 47 break
48 48 elif line.startswith(s+':'):
49 49 pat = rels + line[len(s)+1:]
50 50 break
51 51 patterns.append(pat)
52 52
53 53 return patterns, warnings
54 54
55 55 def ignore(root, files, warn):
56 56 '''return matcher covering patterns in 'files'.
57 57
58 58 the files parsed for patterns include:
59 59 .hgignore in the repository root
60 60 any additional files specified in the [ui] section of ~/.hgrc
61 61
62 62 trailing white space is dropped.
63 63 the escape character is backslash.
64 64 comments start with #.
65 65 empty lines are skipped.
66 66
67 67 lines can be of the following formats:
68 68
69 69 syntax: regexp # defaults following lines to non-rooted regexps
70 70 syntax: glob # defaults following lines to non-rooted globs
71 71 re:pattern # non-rooted regular expression
72 72 glob:pattern # non-rooted glob
73 73 pattern # pattern of the current default type'''
74 74
75 75 pats = {}
76 76 for f in files:
77 77 try:
78 78 pats[f] = []
79 79 fp = open(f)
80 80 pats[f], warnings = ignorepats(fp)
81 fp.close()
81 82 for warning in warnings:
82 83 warn("%s: %s\n" % (f, warning))
83 84 except IOError, inst:
84 85 if f != files[0]:
85 86 warn(_("skipping unreadable ignore file '%s': %s\n") %
86 87 (f, inst.strerror))
87 88
88 89 allpats = []
89 90 for patlist in pats.values():
90 91 allpats.extend(patlist)
91 92 if not allpats:
92 93 return util.never
93 94
94 95 try:
95 96 ignorefunc = match.match(root, '', [], allpats)
96 97 except util.Abort:
97 98 # Re-raise an exception where the src is the right file
98 99 for f, patlist in pats.iteritems():
99 100 try:
100 101 match.match(root, '', [], patlist)
101 102 except util.Abort, inst:
102 103 raise util.Abort('%s: %s' % (f, inst[0]))
103 104
104 105 return ignorefunc
@@ -1,1279 +1,1280
1 1 # revlog.py - storage back-end for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 """Storage back-end for Mercurial.
9 9
10 10 This provides efficient delta storage with O(1) retrieve and append
11 11 and O(changes) merge between branches.
12 12 """
13 13
14 14 # import stuff from node for others to import from revlog
15 15 from node import bin, hex, nullid, nullrev
16 16 from i18n import _
17 17 import ancestor, mdiff, parsers, error, util, dagutil
18 18 import struct, zlib, errno
19 19
20 20 _pack = struct.pack
21 21 _unpack = struct.unpack
22 22 _compress = zlib.compress
23 23 _decompress = zlib.decompress
24 24 _sha = util.sha1
25 25
26 26 # revlog header flags
27 27 REVLOGV0 = 0
28 28 REVLOGNG = 1
29 29 REVLOGNGINLINEDATA = (1 << 16)
30 30 REVLOGGENERALDELTA = (1 << 17)
31 31 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
32 32 REVLOG_DEFAULT_FORMAT = REVLOGNG
33 33 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
34 34 REVLOGNG_FLAGS = REVLOGNGINLINEDATA | REVLOGGENERALDELTA
35 35
36 36 # revlog index flags
37 37 REVIDX_KNOWN_FLAGS = 0
38 38
39 39 # max size of revlog with inline data
40 40 _maxinline = 131072
41 41 _chunksize = 1048576
42 42
43 43 RevlogError = error.RevlogError
44 44 LookupError = error.LookupError
45 45
46 46 def getoffset(q):
47 47 return int(q >> 16)
48 48
49 49 def gettype(q):
50 50 return int(q & 0xFFFF)
51 51
52 52 def offset_type(offset, type):
53 53 return long(long(offset) << 16 | type)
54 54
55 55 nullhash = _sha(nullid)
56 56
57 57 def hash(text, p1, p2):
58 58 """generate a hash from the given text and its parent hashes
59 59
60 60 This hash combines both the current file contents and its history
61 61 in a manner that makes it easy to distinguish nodes with the same
62 62 content in the revision graph.
63 63 """
64 64 # As of now, if one of the parent node is null, p2 is null
65 65 if p2 == nullid:
66 66 # deep copy of a hash is faster than creating one
67 67 s = nullhash.copy()
68 68 s.update(p1)
69 69 else:
70 70 # none of the parent nodes are nullid
71 71 l = [p1, p2]
72 72 l.sort()
73 73 s = _sha(l[0])
74 74 s.update(l[1])
75 75 s.update(text)
76 76 return s.digest()
77 77
78 78 def compress(text):
79 79 """ generate a possibly-compressed representation of text """
80 80 if not text:
81 81 return ("", text)
82 82 l = len(text)
83 83 bin = None
84 84 if l < 44:
85 85 pass
86 86 elif l > 1000000:
87 87 # zlib makes an internal copy, thus doubling memory usage for
88 88 # large files, so lets do this in pieces
89 89 z = zlib.compressobj()
90 90 p = []
91 91 pos = 0
92 92 while pos < l:
93 93 pos2 = pos + 2**20
94 94 p.append(z.compress(text[pos:pos2]))
95 95 pos = pos2
96 96 p.append(z.flush())
97 97 if sum(map(len, p)) < l:
98 98 bin = "".join(p)
99 99 else:
100 100 bin = _compress(text)
101 101 if bin is None or len(bin) > l:
102 102 if text[0] == '\0':
103 103 return ("", text)
104 104 return ('u', text)
105 105 return ("", bin)
106 106
107 107 def decompress(bin):
108 108 """ decompress the given input """
109 109 if not bin:
110 110 return bin
111 111 t = bin[0]
112 112 if t == '\0':
113 113 return bin
114 114 if t == 'x':
115 115 return _decompress(bin)
116 116 if t == 'u':
117 117 return bin[1:]
118 118 raise RevlogError(_("unknown compression type %r") % t)
119 119
120 120 indexformatv0 = ">4l20s20s20s"
121 121 v0shaoffset = 56
122 122
123 123 class revlogoldio(object):
124 124 def __init__(self):
125 125 self.size = struct.calcsize(indexformatv0)
126 126
127 127 def parseindex(self, data, inline):
128 128 s = self.size
129 129 index = []
130 130 nodemap = {nullid: nullrev}
131 131 n = off = 0
132 132 l = len(data)
133 133 while off + s <= l:
134 134 cur = data[off:off + s]
135 135 off += s
136 136 e = _unpack(indexformatv0, cur)
137 137 # transform to revlogv1 format
138 138 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
139 139 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
140 140 index.append(e2)
141 141 nodemap[e[6]] = n
142 142 n += 1
143 143
144 144 # add the magic null revision at -1
145 145 index.append((0, 0, 0, -1, -1, -1, -1, nullid))
146 146
147 147 return index, nodemap, None
148 148
149 149 def packentry(self, entry, node, version, rev):
150 150 if gettype(entry[0]):
151 151 raise RevlogError(_("index entry flags need RevlogNG"))
152 152 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
153 153 node(entry[5]), node(entry[6]), entry[7])
154 154 return _pack(indexformatv0, *e2)
155 155
156 156 # index ng:
157 157 # 6 bytes: offset
158 158 # 2 bytes: flags
159 159 # 4 bytes: compressed length
160 160 # 4 bytes: uncompressed length
161 161 # 4 bytes: base rev
162 162 # 4 bytes: link rev
163 163 # 4 bytes: parent 1 rev
164 164 # 4 bytes: parent 2 rev
165 165 # 32 bytes: nodeid
166 166 indexformatng = ">Qiiiiii20s12x"
167 167 ngshaoffset = 32
168 168 versionformat = ">I"
169 169
170 170 class revlogio(object):
171 171 def __init__(self):
172 172 self.size = struct.calcsize(indexformatng)
173 173
174 174 def parseindex(self, data, inline):
175 175 # call the C implementation to parse the index data
176 176 index, cache = parsers.parse_index2(data, inline)
177 177 return index, None, cache
178 178
179 179 def packentry(self, entry, node, version, rev):
180 180 p = _pack(indexformatng, *entry)
181 181 if rev == 0:
182 182 p = _pack(versionformat, version) + p[4:]
183 183 return p
184 184
185 185 class revlog(object):
186 186 """
187 187 the underlying revision storage object
188 188
189 189 A revlog consists of two parts, an index and the revision data.
190 190
191 191 The index is a file with a fixed record size containing
192 192 information on each revision, including its nodeid (hash), the
193 193 nodeids of its parents, the position and offset of its data within
194 194 the data file, and the revision it's based on. Finally, each entry
195 195 contains a linkrev entry that can serve as a pointer to external
196 196 data.
197 197
198 198 The revision data itself is a linear collection of data chunks.
199 199 Each chunk represents a revision and is usually represented as a
200 200 delta against the previous chunk. To bound lookup time, runs of
201 201 deltas are limited to about 2 times the length of the original
202 202 version data. This makes retrieval of a version proportional to
203 203 its size, or O(1) relative to the number of revisions.
204 204
205 205 Both pieces of the revlog are written to in an append-only
206 206 fashion, which means we never need to rewrite a file to insert or
207 207 remove data, and can use some simple techniques to avoid the need
208 208 for locking while reading.
209 209 """
210 210 def __init__(self, opener, indexfile):
211 211 """
212 212 create a revlog object
213 213
214 214 opener is a function that abstracts the file opening operation
215 215 and can be used to implement COW semantics or the like.
216 216 """
217 217 self.indexfile = indexfile
218 218 self.datafile = indexfile[:-2] + ".d"
219 219 self.opener = opener
220 220 self._cache = None
221 221 self._basecache = (0, 0)
222 222 self._chunkcache = (0, '')
223 223 self.index = []
224 224 self._pcache = {}
225 225 self._nodecache = {nullid: nullrev}
226 226 self._nodepos = None
227 227
228 228 v = REVLOG_DEFAULT_VERSION
229 229 opts = getattr(opener, 'options', None)
230 230 if opts is not None:
231 231 if 'revlogv1' in opts:
232 232 if 'generaldelta' in opts:
233 233 v |= REVLOGGENERALDELTA
234 234 else:
235 235 v = 0
236 236
237 237 i = ''
238 238 self._initempty = True
239 239 try:
240 240 f = self.opener(self.indexfile)
241 241 i = f.read()
242 242 f.close()
243 243 if len(i) > 0:
244 244 v = struct.unpack(versionformat, i[:4])[0]
245 245 self._initempty = False
246 246 except IOError, inst:
247 247 if inst.errno != errno.ENOENT:
248 248 raise
249 249
250 250 self.version = v
251 251 self._inline = v & REVLOGNGINLINEDATA
252 252 self._generaldelta = v & REVLOGGENERALDELTA
253 253 flags = v & ~0xFFFF
254 254 fmt = v & 0xFFFF
255 255 if fmt == REVLOGV0 and flags:
256 256 raise RevlogError(_("index %s unknown flags %#04x for format v0")
257 257 % (self.indexfile, flags >> 16))
258 258 elif fmt == REVLOGNG and flags & ~REVLOGNG_FLAGS:
259 259 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
260 260 % (self.indexfile, flags >> 16))
261 261 elif fmt > REVLOGNG:
262 262 raise RevlogError(_("index %s unknown format %d")
263 263 % (self.indexfile, fmt))
264 264
265 265 self._io = revlogio()
266 266 if self.version == REVLOGV0:
267 267 self._io = revlogoldio()
268 268 try:
269 269 d = self._io.parseindex(i, self._inline)
270 270 except (ValueError, IndexError):
271 271 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
272 272 self.index, nodemap, self._chunkcache = d
273 273 if nodemap is not None:
274 274 self.nodemap = self._nodecache = nodemap
275 275 if not self._chunkcache:
276 276 self._chunkclear()
277 277
278 278 def tip(self):
279 279 return self.node(len(self.index) - 2)
280 280 def __len__(self):
281 281 return len(self.index) - 1
282 282 def __iter__(self):
283 283 for i in xrange(len(self)):
284 284 yield i
285 285
286 286 @util.propertycache
287 287 def nodemap(self):
288 288 self.rev(self.node(0))
289 289 return self._nodecache
290 290
291 291 def rev(self, node):
292 292 try:
293 293 return self._nodecache[node]
294 294 except KeyError:
295 295 n = self._nodecache
296 296 i = self.index
297 297 p = self._nodepos
298 298 if p is None:
299 299 p = len(i) - 2
300 300 for r in xrange(p, -1, -1):
301 301 v = i[r][7]
302 302 n[v] = r
303 303 if v == node:
304 304 self._nodepos = r - 1
305 305 return r
306 306 raise LookupError(node, self.indexfile, _('no node'))
307 307
308 308 def node(self, rev):
309 309 return self.index[rev][7]
310 310 def linkrev(self, rev):
311 311 return self.index[rev][4]
312 312 def parents(self, node):
313 313 i = self.index
314 314 d = i[self.rev(node)]
315 315 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
316 316 def parentrevs(self, rev):
317 317 return self.index[rev][5:7]
318 318 def start(self, rev):
319 319 return int(self.index[rev][0] >> 16)
320 320 def end(self, rev):
321 321 return self.start(rev) + self.length(rev)
322 322 def length(self, rev):
323 323 return self.index[rev][1]
324 324 def chainbase(self, rev):
325 325 index = self.index
326 326 base = index[rev][3]
327 327 while base != rev:
328 328 rev = base
329 329 base = index[rev][3]
330 330 return base
331 331 def flags(self, rev):
332 332 return self.index[rev][0] & 0xFFFF
333 333 def rawsize(self, rev):
334 334 """return the length of the uncompressed text for a given revision"""
335 335 l = self.index[rev][2]
336 336 if l >= 0:
337 337 return l
338 338
339 339 t = self.revision(self.node(rev))
340 340 return len(t)
341 341 size = rawsize
342 342
343 343 def reachable(self, node, stop=None):
344 344 """return the set of all nodes ancestral to a given node, including
345 345 the node itself, stopping when stop is matched"""
346 346 reachable = set((node,))
347 347 visit = [node]
348 348 if stop:
349 349 stopn = self.rev(stop)
350 350 else:
351 351 stopn = 0
352 352 while visit:
353 353 n = visit.pop(0)
354 354 if n == stop:
355 355 continue
356 356 if n == nullid:
357 357 continue
358 358 for p in self.parents(n):
359 359 if self.rev(p) < stopn:
360 360 continue
361 361 if p not in reachable:
362 362 reachable.add(p)
363 363 visit.append(p)
364 364 return reachable
365 365
366 366 def ancestors(self, *revs):
367 367 """Generate the ancestors of 'revs' in reverse topological order.
368 368
369 369 Yield a sequence of revision numbers starting with the parents
370 370 of each revision in revs, i.e., each revision is *not* considered
371 371 an ancestor of itself. Results are in breadth-first order:
372 372 parents of each rev in revs, then parents of those, etc. Result
373 373 does not include the null revision."""
374 374 visit = list(revs)
375 375 seen = set([nullrev])
376 376 while visit:
377 377 for parent in self.parentrevs(visit.pop(0)):
378 378 if parent not in seen:
379 379 visit.append(parent)
380 380 seen.add(parent)
381 381 yield parent
382 382
383 383 def descendants(self, *revs):
384 384 """Generate the descendants of 'revs' in revision order.
385 385
386 386 Yield a sequence of revision numbers starting with a child of
387 387 some rev in revs, i.e., each revision is *not* considered a
388 388 descendant of itself. Results are ordered by revision number (a
389 389 topological sort)."""
390 390 first = min(revs)
391 391 if first == nullrev:
392 392 for i in self:
393 393 yield i
394 394 return
395 395
396 396 seen = set(revs)
397 397 for i in xrange(first + 1, len(self)):
398 398 for x in self.parentrevs(i):
399 399 if x != nullrev and x in seen:
400 400 seen.add(i)
401 401 yield i
402 402 break
403 403
404 404 def findcommonmissing(self, common=None, heads=None):
405 405 """Return a tuple of the ancestors of common and the ancestors of heads
406 406 that are not ancestors of common.
407 407
408 408 More specifically, the second element is a list of nodes N such that
409 409 every N satisfies the following constraints:
410 410
411 411 1. N is an ancestor of some node in 'heads'
412 412 2. N is not an ancestor of any node in 'common'
413 413
414 414 The list is sorted by revision number, meaning it is
415 415 topologically sorted.
416 416
417 417 'heads' and 'common' are both lists of node IDs. If heads is
418 418 not supplied, uses all of the revlog's heads. If common is not
419 419 supplied, uses nullid."""
420 420 if common is None:
421 421 common = [nullid]
422 422 if heads is None:
423 423 heads = self.heads()
424 424
425 425 common = [self.rev(n) for n in common]
426 426 heads = [self.rev(n) for n in heads]
427 427
428 428 # we want the ancestors, but inclusive
429 429 has = set(self.ancestors(*common))
430 430 has.add(nullrev)
431 431 has.update(common)
432 432
433 433 # take all ancestors from heads that aren't in has
434 434 missing = set()
435 435 visit = [r for r in heads if r not in has]
436 436 while visit:
437 437 r = visit.pop(0)
438 438 if r in missing:
439 439 continue
440 440 else:
441 441 missing.add(r)
442 442 for p in self.parentrevs(r):
443 443 if p not in has:
444 444 visit.append(p)
445 445 missing = list(missing)
446 446 missing.sort()
447 447 return has, [self.node(r) for r in missing]
448 448
449 449 def findmissing(self, common=None, heads=None):
450 450 """Return the ancestors of heads that are not ancestors of common.
451 451
452 452 More specifically, return a list of nodes N such that every N
453 453 satisfies the following constraints:
454 454
455 455 1. N is an ancestor of some node in 'heads'
456 456 2. N is not an ancestor of any node in 'common'
457 457
458 458 The list is sorted by revision number, meaning it is
459 459 topologically sorted.
460 460
461 461 'heads' and 'common' are both lists of node IDs. If heads is
462 462 not supplied, uses all of the revlog's heads. If common is not
463 463 supplied, uses nullid."""
464 464 _common, missing = self.findcommonmissing(common, heads)
465 465 return missing
466 466
467 467 def nodesbetween(self, roots=None, heads=None):
468 468 """Return a topological path from 'roots' to 'heads'.
469 469
470 470 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
471 471 topologically sorted list of all nodes N that satisfy both of
472 472 these constraints:
473 473
474 474 1. N is a descendant of some node in 'roots'
475 475 2. N is an ancestor of some node in 'heads'
476 476
477 477 Every node is considered to be both a descendant and an ancestor
478 478 of itself, so every reachable node in 'roots' and 'heads' will be
479 479 included in 'nodes'.
480 480
481 481 'outroots' is the list of reachable nodes in 'roots', i.e., the
482 482 subset of 'roots' that is returned in 'nodes'. Likewise,
483 483 'outheads' is the subset of 'heads' that is also in 'nodes'.
484 484
485 485 'roots' and 'heads' are both lists of node IDs. If 'roots' is
486 486 unspecified, uses nullid as the only root. If 'heads' is
487 487 unspecified, uses list of all of the revlog's heads."""
488 488 nonodes = ([], [], [])
489 489 if roots is not None:
490 490 roots = list(roots)
491 491 if not roots:
492 492 return nonodes
493 493 lowestrev = min([self.rev(n) for n in roots])
494 494 else:
495 495 roots = [nullid] # Everybody's a descendant of nullid
496 496 lowestrev = nullrev
497 497 if (lowestrev == nullrev) and (heads is None):
498 498 # We want _all_ the nodes!
499 499 return ([self.node(r) for r in self], [nullid], list(self.heads()))
500 500 if heads is None:
501 501 # All nodes are ancestors, so the latest ancestor is the last
502 502 # node.
503 503 highestrev = len(self) - 1
504 504 # Set ancestors to None to signal that every node is an ancestor.
505 505 ancestors = None
506 506 # Set heads to an empty dictionary for later discovery of heads
507 507 heads = {}
508 508 else:
509 509 heads = list(heads)
510 510 if not heads:
511 511 return nonodes
512 512 ancestors = set()
513 513 # Turn heads into a dictionary so we can remove 'fake' heads.
514 514 # Also, later we will be using it to filter out the heads we can't
515 515 # find from roots.
516 516 heads = dict.fromkeys(heads, False)
517 517 # Start at the top and keep marking parents until we're done.
518 518 nodestotag = set(heads)
519 519 # Remember where the top was so we can use it as a limit later.
520 520 highestrev = max([self.rev(n) for n in nodestotag])
521 521 while nodestotag:
522 522 # grab a node to tag
523 523 n = nodestotag.pop()
524 524 # Never tag nullid
525 525 if n == nullid:
526 526 continue
527 527 # A node's revision number represents its place in a
528 528 # topologically sorted list of nodes.
529 529 r = self.rev(n)
530 530 if r >= lowestrev:
531 531 if n not in ancestors:
532 532 # If we are possibly a descendant of one of the roots
533 533 # and we haven't already been marked as an ancestor
534 534 ancestors.add(n) # Mark as ancestor
535 535 # Add non-nullid parents to list of nodes to tag.
536 536 nodestotag.update([p for p in self.parents(n) if
537 537 p != nullid])
538 538 elif n in heads: # We've seen it before, is it a fake head?
539 539 # So it is, real heads should not be the ancestors of
540 540 # any other heads.
541 541 heads.pop(n)
542 542 if not ancestors:
543 543 return nonodes
544 544 # Now that we have our set of ancestors, we want to remove any
545 545 # roots that are not ancestors.
546 546
547 547 # If one of the roots was nullid, everything is included anyway.
548 548 if lowestrev > nullrev:
549 549 # But, since we weren't, let's recompute the lowest rev to not
550 550 # include roots that aren't ancestors.
551 551
552 552 # Filter out roots that aren't ancestors of heads
553 553 roots = [n for n in roots if n in ancestors]
554 554 # Recompute the lowest revision
555 555 if roots:
556 556 lowestrev = min([self.rev(n) for n in roots])
557 557 else:
558 558 # No more roots? Return empty list
559 559 return nonodes
560 560 else:
561 561 # We are descending from nullid, and don't need to care about
562 562 # any other roots.
563 563 lowestrev = nullrev
564 564 roots = [nullid]
565 565 # Transform our roots list into a set.
566 566 descendants = set(roots)
567 567 # Also, keep the original roots so we can filter out roots that aren't
568 568 # 'real' roots (i.e. are descended from other roots).
569 569 roots = descendants.copy()
570 570 # Our topologically sorted list of output nodes.
571 571 orderedout = []
572 572 # Don't start at nullid since we don't want nullid in our output list,
573 573 # and if nullid shows up in descedents, empty parents will look like
574 574 # they're descendants.
575 575 for r in xrange(max(lowestrev, 0), highestrev + 1):
576 576 n = self.node(r)
577 577 isdescendant = False
578 578 if lowestrev == nullrev: # Everybody is a descendant of nullid
579 579 isdescendant = True
580 580 elif n in descendants:
581 581 # n is already a descendant
582 582 isdescendant = True
583 583 # This check only needs to be done here because all the roots
584 584 # will start being marked is descendants before the loop.
585 585 if n in roots:
586 586 # If n was a root, check if it's a 'real' root.
587 587 p = tuple(self.parents(n))
588 588 # If any of its parents are descendants, it's not a root.
589 589 if (p[0] in descendants) or (p[1] in descendants):
590 590 roots.remove(n)
591 591 else:
592 592 p = tuple(self.parents(n))
593 593 # A node is a descendant if either of its parents are
594 594 # descendants. (We seeded the dependents list with the roots
595 595 # up there, remember?)
596 596 if (p[0] in descendants) or (p[1] in descendants):
597 597 descendants.add(n)
598 598 isdescendant = True
599 599 if isdescendant and ((ancestors is None) or (n in ancestors)):
600 600 # Only include nodes that are both descendants and ancestors.
601 601 orderedout.append(n)
602 602 if (ancestors is not None) and (n in heads):
603 603 # We're trying to figure out which heads are reachable
604 604 # from roots.
605 605 # Mark this head as having been reached
606 606 heads[n] = True
607 607 elif ancestors is None:
608 608 # Otherwise, we're trying to discover the heads.
609 609 # Assume this is a head because if it isn't, the next step
610 610 # will eventually remove it.
611 611 heads[n] = True
612 612 # But, obviously its parents aren't.
613 613 for p in self.parents(n):
614 614 heads.pop(p, None)
615 615 heads = [n for n, flag in heads.iteritems() if flag]
616 616 roots = list(roots)
617 617 assert orderedout
618 618 assert roots
619 619 assert heads
620 620 return (orderedout, roots, heads)
621 621
622 622 def headrevs(self):
623 623 count = len(self)
624 624 if not count:
625 625 return [nullrev]
626 626 ishead = [1] * (count + 1)
627 627 index = self.index
628 628 for r in xrange(count):
629 629 e = index[r]
630 630 ishead[e[5]] = ishead[e[6]] = 0
631 631 return [r for r in xrange(count) if ishead[r]]
632 632
633 633 def heads(self, start=None, stop=None):
634 634 """return the list of all nodes that have no children
635 635
636 636 if start is specified, only heads that are descendants of
637 637 start will be returned
638 638 if stop is specified, it will consider all the revs from stop
639 639 as if they had no children
640 640 """
641 641 if start is None and stop is None:
642 642 if not len(self):
643 643 return [nullid]
644 644 return [self.node(r) for r in self.headrevs()]
645 645
646 646 if start is None:
647 647 start = nullid
648 648 if stop is None:
649 649 stop = []
650 650 stoprevs = set([self.rev(n) for n in stop])
651 651 startrev = self.rev(start)
652 652 reachable = set((startrev,))
653 653 heads = set((startrev,))
654 654
655 655 parentrevs = self.parentrevs
656 656 for r in xrange(startrev + 1, len(self)):
657 657 for p in parentrevs(r):
658 658 if p in reachable:
659 659 if r not in stoprevs:
660 660 reachable.add(r)
661 661 heads.add(r)
662 662 if p in heads and p not in stoprevs:
663 663 heads.remove(p)
664 664
665 665 return [self.node(r) for r in heads]
666 666
667 667 def children(self, node):
668 668 """find the children of a given node"""
669 669 c = []
670 670 p = self.rev(node)
671 671 for r in range(p + 1, len(self)):
672 672 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
673 673 if prevs:
674 674 for pr in prevs:
675 675 if pr == p:
676 676 c.append(self.node(r))
677 677 elif p == nullrev:
678 678 c.append(self.node(r))
679 679 return c
680 680
681 681 def descendant(self, start, end):
682 682 if start == nullrev:
683 683 return True
684 684 for i in self.descendants(start):
685 685 if i == end:
686 686 return True
687 687 elif i > end:
688 688 break
689 689 return False
690 690
691 691 def ancestor(self, a, b):
692 692 """calculate the least common ancestor of nodes a and b"""
693 693
694 694 # fast path, check if it is a descendant
695 695 a, b = self.rev(a), self.rev(b)
696 696 start, end = sorted((a, b))
697 697 if self.descendant(start, end):
698 698 return self.node(start)
699 699
700 700 def parents(rev):
701 701 return [p for p in self.parentrevs(rev) if p != nullrev]
702 702
703 703 c = ancestor.ancestor(a, b, parents)
704 704 if c is None:
705 705 return nullid
706 706
707 707 return self.node(c)
708 708
709 709 def _match(self, id):
710 710 if isinstance(id, (long, int)):
711 711 # rev
712 712 return self.node(id)
713 713 if len(id) == 20:
714 714 # possibly a binary node
715 715 # odds of a binary node being all hex in ASCII are 1 in 10**25
716 716 try:
717 717 node = id
718 718 self.rev(node) # quick search the index
719 719 return node
720 720 except LookupError:
721 721 pass # may be partial hex id
722 722 try:
723 723 # str(rev)
724 724 rev = int(id)
725 725 if str(rev) != id:
726 726 raise ValueError
727 727 if rev < 0:
728 728 rev = len(self) + rev
729 729 if rev < 0 or rev >= len(self):
730 730 raise ValueError
731 731 return self.node(rev)
732 732 except (ValueError, OverflowError):
733 733 pass
734 734 if len(id) == 40:
735 735 try:
736 736 # a full hex nodeid?
737 737 node = bin(id)
738 738 self.rev(node)
739 739 return node
740 740 except (TypeError, LookupError):
741 741 pass
742 742
743 743 def _partialmatch(self, id):
744 744 if id in self._pcache:
745 745 return self._pcache[id]
746 746
747 747 if len(id) < 40:
748 748 try:
749 749 # hex(node)[:...]
750 750 l = len(id) // 2 # grab an even number of digits
751 751 prefix = bin(id[:l * 2])
752 752 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
753 753 nl = [n for n in nl if hex(n).startswith(id)]
754 754 if len(nl) > 0:
755 755 if len(nl) == 1:
756 756 self._pcache[id] = nl[0]
757 757 return nl[0]
758 758 raise LookupError(id, self.indexfile,
759 759 _('ambiguous identifier'))
760 760 return None
761 761 except TypeError:
762 762 pass
763 763
764 764 def lookup(self, id):
765 765 """locate a node based on:
766 766 - revision number or str(revision number)
767 767 - nodeid or subset of hex nodeid
768 768 """
769 769 n = self._match(id)
770 770 if n is not None:
771 771 return n
772 772 n = self._partialmatch(id)
773 773 if n:
774 774 return n
775 775
776 776 raise LookupError(id, self.indexfile, _('no match found'))
777 777
778 778 def cmp(self, node, text):
779 779 """compare text with a given file revision
780 780
781 781 returns True if text is different than what is stored.
782 782 """
783 783 p1, p2 = self.parents(node)
784 784 return hash(text, p1, p2) != node
785 785
786 786 def _addchunk(self, offset, data):
787 787 o, d = self._chunkcache
788 788 # try to add to existing cache
789 789 if o + len(d) == offset and len(d) + len(data) < _chunksize:
790 790 self._chunkcache = o, d + data
791 791 else:
792 792 self._chunkcache = offset, data
793 793
794 794 def _loadchunk(self, offset, length):
795 795 if self._inline:
796 796 df = self.opener(self.indexfile)
797 797 else:
798 798 df = self.opener(self.datafile)
799 799
800 800 readahead = max(65536, length)
801 801 df.seek(offset)
802 802 d = df.read(readahead)
803 df.close()
803 804 self._addchunk(offset, d)
804 805 if readahead > length:
805 806 return d[:length]
806 807 return d
807 808
808 809 def _getchunk(self, offset, length):
809 810 o, d = self._chunkcache
810 811 l = len(d)
811 812
812 813 # is it in the cache?
813 814 cachestart = offset - o
814 815 cacheend = cachestart + length
815 816 if cachestart >= 0 and cacheend <= l:
816 817 if cachestart == 0 and cacheend == l:
817 818 return d # avoid a copy
818 819 return d[cachestart:cacheend]
819 820
820 821 return self._loadchunk(offset, length)
821 822
822 823 def _chunkraw(self, startrev, endrev):
823 824 start = self.start(startrev)
824 825 length = self.end(endrev) - start
825 826 if self._inline:
826 827 start += (startrev + 1) * self._io.size
827 828 return self._getchunk(start, length)
828 829
829 830 def _chunk(self, rev):
830 831 return decompress(self._chunkraw(rev, rev))
831 832
832 833 def _chunkbase(self, rev):
833 834 return self._chunk(rev)
834 835
835 836 def _chunkclear(self):
836 837 self._chunkcache = (0, '')
837 838
838 839 def deltaparent(self, rev):
839 840 """return deltaparent of the given revision"""
840 841 base = self.index[rev][3]
841 842 if base == rev:
842 843 return nullrev
843 844 elif self._generaldelta:
844 845 return base
845 846 else:
846 847 return rev - 1
847 848
848 849 def revdiff(self, rev1, rev2):
849 850 """return or calculate a delta between two revisions"""
850 851 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
851 852 return self._chunk(rev2)
852 853
853 854 return mdiff.textdiff(self.revision(self.node(rev1)),
854 855 self.revision(self.node(rev2)))
855 856
856 857 def revision(self, node):
857 858 """return an uncompressed revision of a given node"""
858 859 cachedrev = None
859 860 if node == nullid:
860 861 return ""
861 862 if self._cache:
862 863 if self._cache[0] == node:
863 864 return self._cache[2]
864 865 cachedrev = self._cache[1]
865 866
866 867 # look up what we need to read
867 868 text = None
868 869 rev = self.rev(node)
869 870
870 871 # check rev flags
871 872 if self.flags(rev) & ~REVIDX_KNOWN_FLAGS:
872 873 raise RevlogError(_('incompatible revision flag %x') %
873 874 (self.flags(rev) & ~REVIDX_KNOWN_FLAGS))
874 875
875 876 # build delta chain
876 877 chain = []
877 878 index = self.index # for performance
878 879 generaldelta = self._generaldelta
879 880 iterrev = rev
880 881 e = index[iterrev]
881 882 while iterrev != e[3] and iterrev != cachedrev:
882 883 chain.append(iterrev)
883 884 if generaldelta:
884 885 iterrev = e[3]
885 886 else:
886 887 iterrev -= 1
887 888 e = index[iterrev]
888 889 chain.reverse()
889 890 base = iterrev
890 891
891 892 if iterrev == cachedrev:
892 893 # cache hit
893 894 text = self._cache[2]
894 895
895 896 # drop cache to save memory
896 897 self._cache = None
897 898
898 899 self._chunkraw(base, rev)
899 900 if text is None:
900 901 text = self._chunkbase(base)
901 902
902 903 bins = [self._chunk(r) for r in chain]
903 904 text = mdiff.patches(text, bins)
904 905
905 906 text = self._checkhash(text, node, rev)
906 907
907 908 self._cache = (node, rev, text)
908 909 return text
909 910
910 911 def _checkhash(self, text, node, rev):
911 912 p1, p2 = self.parents(node)
912 913 if node != hash(text, p1, p2):
913 914 raise RevlogError(_("integrity check failed on %s:%d")
914 915 % (self.indexfile, rev))
915 916 return text
916 917
917 918 def checkinlinesize(self, tr, fp=None):
918 919 if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline:
919 920 return
920 921
921 922 trinfo = tr.find(self.indexfile)
922 923 if trinfo is None:
923 924 raise RevlogError(_("%s not found in the transaction")
924 925 % self.indexfile)
925 926
926 927 trindex = trinfo[2]
927 928 dataoff = self.start(trindex)
928 929
929 930 tr.add(self.datafile, dataoff)
930 931
931 932 if fp:
932 933 fp.flush()
933 934 fp.close()
934 935
935 936 df = self.opener(self.datafile, 'w')
936 937 try:
937 938 for r in self:
938 939 df.write(self._chunkraw(r, r))
939 940 finally:
940 941 df.close()
941 942
942 943 fp = self.opener(self.indexfile, 'w', atomictemp=True)
943 944 self.version &= ~(REVLOGNGINLINEDATA)
944 945 self._inline = False
945 946 for i in self:
946 947 e = self._io.packentry(self.index[i], self.node, self.version, i)
947 948 fp.write(e)
948 949
949 950 # if we don't call close, the temp file will never replace the
950 951 # real index
951 952 fp.close()
952 953
953 954 tr.replace(self.indexfile, trindex * self._io.size)
954 955 self._chunkclear()
955 956
956 957 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None):
957 958 """add a revision to the log
958 959
959 960 text - the revision data to add
960 961 transaction - the transaction object used for rollback
961 962 link - the linkrev data to add
962 963 p1, p2 - the parent nodeids of the revision
963 964 cachedelta - an optional precomputed delta
964 965 """
965 966 node = hash(text, p1, p2)
966 967 if node in self.nodemap:
967 968 return node
968 969
969 970 dfh = None
970 971 if not self._inline:
971 972 dfh = self.opener(self.datafile, "a")
972 973 ifh = self.opener(self.indexfile, "a+")
973 974 try:
974 975 return self._addrevision(node, text, transaction, link, p1, p2,
975 976 cachedelta, ifh, dfh)
976 977 finally:
977 978 if dfh:
978 979 dfh.close()
979 980 ifh.close()
980 981
981 982 def _addrevision(self, node, text, transaction, link, p1, p2,
982 983 cachedelta, ifh, dfh):
983 984 """internal function to add revisions to the log
984 985
985 986 see addrevision for argument descriptions.
986 987 invariants:
987 988 - text is optional (can be None); if not set, cachedelta must be set.
988 989 if both are set, they must correspond to eachother.
989 990 """
990 991 btext = [text]
991 992 def buildtext():
992 993 if btext[0] is not None:
993 994 return btext[0]
994 995 # flush any pending writes here so we can read it in revision
995 996 if dfh:
996 997 dfh.flush()
997 998 ifh.flush()
998 999 basetext = self.revision(self.node(cachedelta[0]))
999 1000 btext[0] = mdiff.patch(basetext, cachedelta[1])
1000 1001 chk = hash(btext[0], p1, p2)
1001 1002 if chk != node:
1002 1003 raise RevlogError(_("consistency error in delta"))
1003 1004 return btext[0]
1004 1005
1005 1006 def builddelta(rev):
1006 1007 # can we use the cached delta?
1007 1008 if cachedelta and cachedelta[0] == rev:
1008 1009 delta = cachedelta[1]
1009 1010 else:
1010 1011 t = buildtext()
1011 1012 ptext = self.revision(self.node(rev))
1012 1013 delta = mdiff.textdiff(ptext, t)
1013 1014 data = compress(delta)
1014 1015 l = len(data[1]) + len(data[0])
1015 1016 if basecache[0] == rev:
1016 1017 chainbase = basecache[1]
1017 1018 else:
1018 1019 chainbase = self.chainbase(rev)
1019 1020 dist = l + offset - self.start(chainbase)
1020 1021 if self._generaldelta:
1021 1022 base = rev
1022 1023 else:
1023 1024 base = chainbase
1024 1025 return dist, l, data, base, chainbase
1025 1026
1026 1027 curr = len(self)
1027 1028 prev = curr - 1
1028 1029 base = chainbase = curr
1029 1030 offset = self.end(prev)
1030 1031 flags = 0
1031 1032 d = None
1032 1033 basecache = self._basecache
1033 1034 p1r, p2r = self.rev(p1), self.rev(p2)
1034 1035
1035 1036 # should we try to build a delta?
1036 1037 if prev != nullrev:
1037 1038 if self._generaldelta:
1038 1039 if p1r >= basecache[1]:
1039 1040 d = builddelta(p1r)
1040 1041 elif p2r >= basecache[1]:
1041 1042 d = builddelta(p2r)
1042 1043 else:
1043 1044 d = builddelta(prev)
1044 1045 else:
1045 1046 d = builddelta(prev)
1046 1047 dist, l, data, base, chainbase = d
1047 1048
1048 1049 # full versions are inserted when the needed deltas
1049 1050 # become comparable to the uncompressed text
1050 1051 if text is None:
1051 1052 textlen = mdiff.patchedsize(self.rawsize(cachedelta[0]),
1052 1053 cachedelta[1])
1053 1054 else:
1054 1055 textlen = len(text)
1055 1056 if d is None or dist > textlen * 2:
1056 1057 text = buildtext()
1057 1058 data = compress(text)
1058 1059 l = len(data[1]) + len(data[0])
1059 1060 base = chainbase = curr
1060 1061
1061 1062 e = (offset_type(offset, flags), l, textlen,
1062 1063 base, link, p1r, p2r, node)
1063 1064 self.index.insert(-1, e)
1064 1065 self.nodemap[node] = curr
1065 1066
1066 1067 entry = self._io.packentry(e, self.node, self.version, curr)
1067 1068 if not self._inline:
1068 1069 transaction.add(self.datafile, offset)
1069 1070 transaction.add(self.indexfile, curr * len(entry))
1070 1071 if data[0]:
1071 1072 dfh.write(data[0])
1072 1073 dfh.write(data[1])
1073 1074 dfh.flush()
1074 1075 ifh.write(entry)
1075 1076 else:
1076 1077 offset += curr * self._io.size
1077 1078 transaction.add(self.indexfile, offset, curr)
1078 1079 ifh.write(entry)
1079 1080 ifh.write(data[0])
1080 1081 ifh.write(data[1])
1081 1082 self.checkinlinesize(transaction, ifh)
1082 1083
1083 1084 if type(text) == str: # only accept immutable objects
1084 1085 self._cache = (node, curr, text)
1085 1086 self._basecache = (curr, chainbase)
1086 1087 return node
1087 1088
1088 1089 def group(self, nodelist, bundler, reorder=None):
1089 1090 """Calculate a delta group, yielding a sequence of changegroup chunks
1090 1091 (strings).
1091 1092
1092 1093 Given a list of changeset revs, return a set of deltas and
1093 1094 metadata corresponding to nodes. The first delta is
1094 1095 first parent(nodelist[0]) -> nodelist[0], the receiver is
1095 1096 guaranteed to have this parent as it has all history before
1096 1097 these changesets. In the case firstparent is nullrev the
1097 1098 changegroup starts with a full revision.
1098 1099 """
1099 1100
1100 1101 # if we don't have any revisions touched by these changesets, bail
1101 1102 if len(nodelist) == 0:
1102 1103 yield bundler.close()
1103 1104 return
1104 1105
1105 1106 # for generaldelta revlogs, we linearize the revs; this will both be
1106 1107 # much quicker and generate a much smaller bundle
1107 1108 if (self._generaldelta and reorder is not False) or reorder:
1108 1109 dag = dagutil.revlogdag(self)
1109 1110 revs = set(self.rev(n) for n in nodelist)
1110 1111 revs = dag.linearize(revs)
1111 1112 else:
1112 1113 revs = sorted([self.rev(n) for n in nodelist])
1113 1114
1114 1115 # add the parent of the first rev
1115 1116 p = self.parentrevs(revs[0])[0]
1116 1117 revs.insert(0, p)
1117 1118
1118 1119 # build deltas
1119 1120 for r in xrange(len(revs) - 1):
1120 1121 prev, curr = revs[r], revs[r + 1]
1121 1122 for c in bundler.revchunk(self, curr, prev):
1122 1123 yield c
1123 1124
1124 1125 yield bundler.close()
1125 1126
1126 1127 def addgroup(self, bundle, linkmapper, transaction):
1127 1128 """
1128 1129 add a delta group
1129 1130
1130 1131 given a set of deltas, add them to the revision log. the
1131 1132 first delta is against its parent, which should be in our
1132 1133 log, the rest are against the previous delta.
1133 1134 """
1134 1135
1135 1136 # track the base of the current delta log
1136 1137 node = None
1137 1138
1138 1139 r = len(self)
1139 1140 end = 0
1140 1141 if r:
1141 1142 end = self.end(r - 1)
1142 1143 ifh = self.opener(self.indexfile, "a+")
1143 1144 isize = r * self._io.size
1144 1145 if self._inline:
1145 1146 transaction.add(self.indexfile, end + isize, r)
1146 1147 dfh = None
1147 1148 else:
1148 1149 transaction.add(self.indexfile, isize, r)
1149 1150 transaction.add(self.datafile, end)
1150 1151 dfh = self.opener(self.datafile, "a")
1151 1152
1152 1153 try:
1153 1154 # loop through our set of deltas
1154 1155 chain = None
1155 1156 while True:
1156 1157 chunkdata = bundle.deltachunk(chain)
1157 1158 if not chunkdata:
1158 1159 break
1159 1160 node = chunkdata['node']
1160 1161 p1 = chunkdata['p1']
1161 1162 p2 = chunkdata['p2']
1162 1163 cs = chunkdata['cs']
1163 1164 deltabase = chunkdata['deltabase']
1164 1165 delta = chunkdata['delta']
1165 1166
1166 1167 link = linkmapper(cs)
1167 1168 if node in self.nodemap:
1168 1169 # this can happen if two branches make the same change
1169 1170 chain = node
1170 1171 continue
1171 1172
1172 1173 for p in (p1, p2):
1173 1174 if not p in self.nodemap:
1174 1175 raise LookupError(p, self.indexfile,
1175 1176 _('unknown parent'))
1176 1177
1177 1178 if deltabase not in self.nodemap:
1178 1179 raise LookupError(deltabase, self.indexfile,
1179 1180 _('unknown delta base'))
1180 1181
1181 1182 baserev = self.rev(deltabase)
1182 1183 chain = self._addrevision(node, None, transaction, link,
1183 1184 p1, p2, (baserev, delta), ifh, dfh)
1184 1185 if not dfh and not self._inline:
1185 1186 # addrevision switched from inline to conventional
1186 1187 # reopen the index
1187 1188 ifh.close()
1188 1189 dfh = self.opener(self.datafile, "a")
1189 1190 ifh = self.opener(self.indexfile, "a")
1190 1191 finally:
1191 1192 if dfh:
1192 1193 dfh.close()
1193 1194 ifh.close()
1194 1195
1195 1196 return node
1196 1197
1197 1198 def strip(self, minlink, transaction):
1198 1199 """truncate the revlog on the first revision with a linkrev >= minlink
1199 1200
1200 1201 This function is called when we're stripping revision minlink and
1201 1202 its descendants from the repository.
1202 1203
1203 1204 We have to remove all revisions with linkrev >= minlink, because
1204 1205 the equivalent changelog revisions will be renumbered after the
1205 1206 strip.
1206 1207
1207 1208 So we truncate the revlog on the first of these revisions, and
1208 1209 trust that the caller has saved the revisions that shouldn't be
1209 1210 removed and that it'll readd them after this truncation.
1210 1211 """
1211 1212 if len(self) == 0:
1212 1213 return
1213 1214
1214 1215 for rev in self:
1215 1216 if self.index[rev][4] >= minlink:
1216 1217 break
1217 1218 else:
1218 1219 return
1219 1220
1220 1221 # first truncate the files on disk
1221 1222 end = self.start(rev)
1222 1223 if not self._inline:
1223 1224 transaction.add(self.datafile, end)
1224 1225 end = rev * self._io.size
1225 1226 else:
1226 1227 end += rev * self._io.size
1227 1228
1228 1229 transaction.add(self.indexfile, end)
1229 1230
1230 1231 # then reset internal state in memory to forget those revisions
1231 1232 self._cache = None
1232 1233 self._chunkclear()
1233 1234 for x in xrange(rev, len(self)):
1234 1235 del self.nodemap[self.node(x)]
1235 1236
1236 1237 del self.index[rev:-1]
1237 1238
1238 1239 def checksize(self):
1239 1240 expected = 0
1240 1241 if len(self):
1241 1242 expected = max(0, self.end(len(self) - 1))
1242 1243
1243 1244 try:
1244 1245 f = self.opener(self.datafile)
1245 1246 f.seek(0, 2)
1246 1247 actual = f.tell()
1247 1248 f.close()
1248 1249 dd = actual - expected
1249 1250 except IOError, inst:
1250 1251 if inst.errno != errno.ENOENT:
1251 1252 raise
1252 1253 dd = 0
1253 1254
1254 1255 try:
1255 1256 f = self.opener(self.indexfile)
1256 1257 f.seek(0, 2)
1257 1258 actual = f.tell()
1258 1259 f.close()
1259 1260 s = self._io.size
1260 1261 i = max(0, actual // s)
1261 1262 di = actual - (i * s)
1262 1263 if self._inline:
1263 1264 databytes = 0
1264 1265 for r in self:
1265 1266 databytes += max(0, self.length(r))
1266 1267 dd = 0
1267 1268 di = actual - len(self) * s - databytes
1268 1269 except IOError, inst:
1269 1270 if inst.errno != errno.ENOENT:
1270 1271 raise
1271 1272 di = 0
1272 1273
1273 1274 return (dd, di)
1274 1275
1275 1276 def files(self):
1276 1277 res = [self.indexfile]
1277 1278 if not self._inline:
1278 1279 res.append(self.datafile)
1279 1280 return res
@@ -1,733 +1,734
1 1 # ui.py - user interface bits for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 import errno, getpass, os, socket, sys, tempfile, traceback
10 10 import config, scmutil, util, error
11 11
12 12 class ui(object):
13 13 def __init__(self, src=None):
14 14 self._buffers = []
15 15 self.quiet = self.verbose = self.debugflag = self.tracebackflag = False
16 16 self._reportuntrusted = True
17 17 self._ocfg = config.config() # overlay
18 18 self._tcfg = config.config() # trusted
19 19 self._ucfg = config.config() # untrusted
20 20 self._trustusers = set()
21 21 self._trustgroups = set()
22 22
23 23 if src:
24 24 self.fout = src.fout
25 25 self.ferr = src.ferr
26 26 self.fin = src.fin
27 27
28 28 self._tcfg = src._tcfg.copy()
29 29 self._ucfg = src._ucfg.copy()
30 30 self._ocfg = src._ocfg.copy()
31 31 self._trustusers = src._trustusers.copy()
32 32 self._trustgroups = src._trustgroups.copy()
33 33 self.environ = src.environ
34 34 self.fixconfig()
35 35 else:
36 36 self.fout = sys.stdout
37 37 self.ferr = sys.stderr
38 38 self.fin = sys.stdin
39 39
40 40 # shared read-only environment
41 41 self.environ = os.environ
42 42 # we always trust global config files
43 43 for f in scmutil.rcpath():
44 44 self.readconfig(f, trust=True)
45 45
46 46 def copy(self):
47 47 return self.__class__(self)
48 48
49 49 def _trusted(self, fp, f):
50 50 st = util.fstat(fp)
51 51 if util.isowner(st):
52 52 return True
53 53
54 54 tusers, tgroups = self._trustusers, self._trustgroups
55 55 if '*' in tusers or '*' in tgroups:
56 56 return True
57 57
58 58 user = util.username(st.st_uid)
59 59 group = util.groupname(st.st_gid)
60 60 if user in tusers or group in tgroups or user == util.username():
61 61 return True
62 62
63 63 if self._reportuntrusted:
64 64 self.warn(_('Not trusting file %s from untrusted '
65 65 'user %s, group %s\n') % (f, user, group))
66 66 return False
67 67
68 68 def readconfig(self, filename, root=None, trust=False,
69 69 sections=None, remap=None):
70 70 try:
71 71 fp = open(filename)
72 72 except IOError:
73 73 if not sections: # ignore unless we were looking for something
74 74 return
75 75 raise
76 76
77 77 cfg = config.config()
78 78 trusted = sections or trust or self._trusted(fp, filename)
79 79
80 80 try:
81 81 cfg.read(filename, fp, sections=sections, remap=remap)
82 fp.close()
82 83 except error.ConfigError, inst:
83 84 if trusted:
84 85 raise
85 86 self.warn(_("Ignored: %s\n") % str(inst))
86 87
87 88 if self.plain():
88 89 for k in ('debug', 'fallbackencoding', 'quiet', 'slash',
89 90 'logtemplate', 'style',
90 91 'traceback', 'verbose'):
91 92 if k in cfg['ui']:
92 93 del cfg['ui'][k]
93 94 for k, v in cfg.items('defaults'):
94 95 del cfg['defaults'][k]
95 96 # Don't remove aliases from the configuration if in the exceptionlist
96 97 if self.plain('alias'):
97 98 for k, v in cfg.items('alias'):
98 99 del cfg['alias'][k]
99 100
100 101 if trusted:
101 102 self._tcfg.update(cfg)
102 103 self._tcfg.update(self._ocfg)
103 104 self._ucfg.update(cfg)
104 105 self._ucfg.update(self._ocfg)
105 106
106 107 if root is None:
107 108 root = os.path.expanduser('~')
108 109 self.fixconfig(root=root)
109 110
110 111 def fixconfig(self, root=None, section=None):
111 112 if section in (None, 'paths'):
112 113 # expand vars and ~
113 114 # translate paths relative to root (or home) into absolute paths
114 115 root = root or os.getcwd()
115 116 for c in self._tcfg, self._ucfg, self._ocfg:
116 117 for n, p in c.items('paths'):
117 118 if not p:
118 119 continue
119 120 if '%%' in p:
120 121 self.warn(_("(deprecated '%%' in path %s=%s from %s)\n")
121 122 % (n, p, self.configsource('paths', n)))
122 123 p = p.replace('%%', '%')
123 124 p = util.expandpath(p)
124 125 if not util.hasscheme(p) and not os.path.isabs(p):
125 126 p = os.path.normpath(os.path.join(root, p))
126 127 c.set("paths", n, p)
127 128
128 129 if section in (None, 'ui'):
129 130 # update ui options
130 131 self.debugflag = self.configbool('ui', 'debug')
131 132 self.verbose = self.debugflag or self.configbool('ui', 'verbose')
132 133 self.quiet = not self.debugflag and self.configbool('ui', 'quiet')
133 134 if self.verbose and self.quiet:
134 135 self.quiet = self.verbose = False
135 136 self._reportuntrusted = self.debugflag or self.configbool("ui",
136 137 "report_untrusted", True)
137 138 self.tracebackflag = self.configbool('ui', 'traceback', False)
138 139
139 140 if section in (None, 'trusted'):
140 141 # update trust information
141 142 self._trustusers.update(self.configlist('trusted', 'users'))
142 143 self._trustgroups.update(self.configlist('trusted', 'groups'))
143 144
144 145 def setconfig(self, section, name, value, overlay=True):
145 146 if overlay:
146 147 self._ocfg.set(section, name, value)
147 148 self._tcfg.set(section, name, value)
148 149 self._ucfg.set(section, name, value)
149 150 self.fixconfig(section=section)
150 151
151 152 def _data(self, untrusted):
152 153 return untrusted and self._ucfg or self._tcfg
153 154
154 155 def configsource(self, section, name, untrusted=False):
155 156 return self._data(untrusted).source(section, name) or 'none'
156 157
157 158 def config(self, section, name, default=None, untrusted=False):
158 159 if isinstance(name, list):
159 160 alternates = name
160 161 else:
161 162 alternates = [name]
162 163
163 164 for n in alternates:
164 165 value = self._data(untrusted).get(section, name, None)
165 166 if value is not None:
166 167 name = n
167 168 break
168 169 else:
169 170 value = default
170 171
171 172 if self.debugflag and not untrusted and self._reportuntrusted:
172 173 uvalue = self._ucfg.get(section, name)
173 174 if uvalue is not None and uvalue != value:
174 175 self.debug("ignoring untrusted configuration option "
175 176 "%s.%s = %s\n" % (section, name, uvalue))
176 177 return value
177 178
178 179 def configpath(self, section, name, default=None, untrusted=False):
179 180 'get a path config item, expanded relative to repo root or config file'
180 181 v = self.config(section, name, default, untrusted)
181 182 if v is None:
182 183 return None
183 184 if not os.path.isabs(v) or "://" not in v:
184 185 src = self.configsource(section, name, untrusted)
185 186 if ':' in src:
186 187 base = os.path.dirname(src.rsplit(':')[0])
187 188 v = os.path.join(base, os.path.expanduser(v))
188 189 return v
189 190
190 191 def configbool(self, section, name, default=False, untrusted=False):
191 192 """parse a configuration element as a boolean
192 193
193 194 >>> u = ui(); s = 'foo'
194 195 >>> u.setconfig(s, 'true', 'yes')
195 196 >>> u.configbool(s, 'true')
196 197 True
197 198 >>> u.setconfig(s, 'false', 'no')
198 199 >>> u.configbool(s, 'false')
199 200 False
200 201 >>> u.configbool(s, 'unknown')
201 202 False
202 203 >>> u.configbool(s, 'unknown', True)
203 204 True
204 205 >>> u.setconfig(s, 'invalid', 'somevalue')
205 206 >>> u.configbool(s, 'invalid')
206 207 Traceback (most recent call last):
207 208 ...
208 209 ConfigError: foo.invalid is not a boolean ('somevalue')
209 210 """
210 211
211 212 v = self.config(section, name, None, untrusted)
212 213 if v is None:
213 214 return default
214 215 if isinstance(v, bool):
215 216 return v
216 217 b = util.parsebool(v)
217 218 if b is None:
218 219 raise error.ConfigError(_("%s.%s is not a boolean ('%s')")
219 220 % (section, name, v))
220 221 return b
221 222
222 223 def configint(self, section, name, default=None, untrusted=False):
223 224 """parse a configuration element as an integer
224 225
225 226 >>> u = ui(); s = 'foo'
226 227 >>> u.setconfig(s, 'int1', '42')
227 228 >>> u.configint(s, 'int1')
228 229 42
229 230 >>> u.setconfig(s, 'int2', '-42')
230 231 >>> u.configint(s, 'int2')
231 232 -42
232 233 >>> u.configint(s, 'unknown', 7)
233 234 7
234 235 >>> u.setconfig(s, 'invalid', 'somevalue')
235 236 >>> u.configint(s, 'invalid')
236 237 Traceback (most recent call last):
237 238 ...
238 239 ConfigError: foo.invalid is not an integer ('somevalue')
239 240 """
240 241
241 242 v = self.config(section, name, None, untrusted)
242 243 if v is None:
243 244 return default
244 245 try:
245 246 return int(v)
246 247 except ValueError:
247 248 raise error.ConfigError(_("%s.%s is not an integer ('%s')")
248 249 % (section, name, v))
249 250
250 251 def configlist(self, section, name, default=None, untrusted=False):
251 252 """parse a configuration element as a list of comma/space separated
252 253 strings
253 254
254 255 >>> u = ui(); s = 'foo'
255 256 >>> u.setconfig(s, 'list1', 'this,is "a small" ,test')
256 257 >>> u.configlist(s, 'list1')
257 258 ['this', 'is', 'a small', 'test']
258 259 """
259 260
260 261 def _parse_plain(parts, s, offset):
261 262 whitespace = False
262 263 while offset < len(s) and (s[offset].isspace() or s[offset] == ','):
263 264 whitespace = True
264 265 offset += 1
265 266 if offset >= len(s):
266 267 return None, parts, offset
267 268 if whitespace:
268 269 parts.append('')
269 270 if s[offset] == '"' and not parts[-1]:
270 271 return _parse_quote, parts, offset + 1
271 272 elif s[offset] == '"' and parts[-1][-1] == '\\':
272 273 parts[-1] = parts[-1][:-1] + s[offset]
273 274 return _parse_plain, parts, offset + 1
274 275 parts[-1] += s[offset]
275 276 return _parse_plain, parts, offset + 1
276 277
277 278 def _parse_quote(parts, s, offset):
278 279 if offset < len(s) and s[offset] == '"': # ""
279 280 parts.append('')
280 281 offset += 1
281 282 while offset < len(s) and (s[offset].isspace() or
282 283 s[offset] == ','):
283 284 offset += 1
284 285 return _parse_plain, parts, offset
285 286
286 287 while offset < len(s) and s[offset] != '"':
287 288 if (s[offset] == '\\' and offset + 1 < len(s)
288 289 and s[offset + 1] == '"'):
289 290 offset += 1
290 291 parts[-1] += '"'
291 292 else:
292 293 parts[-1] += s[offset]
293 294 offset += 1
294 295
295 296 if offset >= len(s):
296 297 real_parts = _configlist(parts[-1])
297 298 if not real_parts:
298 299 parts[-1] = '"'
299 300 else:
300 301 real_parts[0] = '"' + real_parts[0]
301 302 parts = parts[:-1]
302 303 parts.extend(real_parts)
303 304 return None, parts, offset
304 305
305 306 offset += 1
306 307 while offset < len(s) and s[offset] in [' ', ',']:
307 308 offset += 1
308 309
309 310 if offset < len(s):
310 311 if offset + 1 == len(s) and s[offset] == '"':
311 312 parts[-1] += '"'
312 313 offset += 1
313 314 else:
314 315 parts.append('')
315 316 else:
316 317 return None, parts, offset
317 318
318 319 return _parse_plain, parts, offset
319 320
320 321 def _configlist(s):
321 322 s = s.rstrip(' ,')
322 323 if not s:
323 324 return []
324 325 parser, parts, offset = _parse_plain, [''], 0
325 326 while parser:
326 327 parser, parts, offset = parser(parts, s, offset)
327 328 return parts
328 329
329 330 result = self.config(section, name, untrusted=untrusted)
330 331 if result is None:
331 332 result = default or []
332 333 if isinstance(result, basestring):
333 334 result = _configlist(result.lstrip(' ,\n'))
334 335 if result is None:
335 336 result = default or []
336 337 return result
337 338
338 339 def has_section(self, section, untrusted=False):
339 340 '''tell whether section exists in config.'''
340 341 return section in self._data(untrusted)
341 342
342 343 def configitems(self, section, untrusted=False):
343 344 items = self._data(untrusted).items(section)
344 345 if self.debugflag and not untrusted and self._reportuntrusted:
345 346 for k, v in self._ucfg.items(section):
346 347 if self._tcfg.get(section, k) != v:
347 348 self.debug("ignoring untrusted configuration option "
348 349 "%s.%s = %s\n" % (section, k, v))
349 350 return items
350 351
351 352 def walkconfig(self, untrusted=False):
352 353 cfg = self._data(untrusted)
353 354 for section in cfg.sections():
354 355 for name, value in self.configitems(section, untrusted):
355 356 yield section, name, value
356 357
357 358 def plain(self, feature=None):
358 359 '''is plain mode active?
359 360
360 361 Plain mode means that all configuration variables which affect
361 362 the behavior and output of Mercurial should be
362 363 ignored. Additionally, the output should be stable,
363 364 reproducible and suitable for use in scripts or applications.
364 365
365 366 The only way to trigger plain mode is by setting either the
366 367 `HGPLAIN' or `HGPLAINEXCEPT' environment variables.
367 368
368 369 The return value can either be
369 370 - False if HGPLAIN is not set, or feature is in HGPLAINEXCEPT
370 371 - True otherwise
371 372 '''
372 373 if 'HGPLAIN' not in os.environ and 'HGPLAINEXCEPT' not in os.environ:
373 374 return False
374 375 exceptions = os.environ.get('HGPLAINEXCEPT', '').strip().split(',')
375 376 if feature and exceptions:
376 377 return feature not in exceptions
377 378 return True
378 379
379 380 def username(self):
380 381 """Return default username to be used in commits.
381 382
382 383 Searched in this order: $HGUSER, [ui] section of hgrcs, $EMAIL
383 384 and stop searching if one of these is set.
384 385 If not found and ui.askusername is True, ask the user, else use
385 386 ($LOGNAME or $USER or $LNAME or $USERNAME) + "@full.hostname".
386 387 """
387 388 user = os.environ.get("HGUSER")
388 389 if user is None:
389 390 user = self.config("ui", "username")
390 391 if user is not None:
391 392 user = os.path.expandvars(user)
392 393 if user is None:
393 394 user = os.environ.get("EMAIL")
394 395 if user is None and self.configbool("ui", "askusername"):
395 396 user = self.prompt(_("enter a commit username:"), default=None)
396 397 if user is None and not self.interactive():
397 398 try:
398 399 user = '%s@%s' % (util.getuser(), socket.getfqdn())
399 400 self.warn(_("No username found, using '%s' instead\n") % user)
400 401 except KeyError:
401 402 pass
402 403 if not user:
403 404 raise util.Abort(_('no username supplied (see "hg help config")'))
404 405 if "\n" in user:
405 406 raise util.Abort(_("username %s contains a newline\n") % repr(user))
406 407 return user
407 408
408 409 def shortuser(self, user):
409 410 """Return a short representation of a user name or email address."""
410 411 if not self.verbose:
411 412 user = util.shortuser(user)
412 413 return user
413 414
414 415 def expandpath(self, loc, default=None):
415 416 """Return repository location relative to cwd or from [paths]"""
416 417 if util.hasscheme(loc) or os.path.isdir(os.path.join(loc, '.hg')):
417 418 return loc
418 419
419 420 path = self.config('paths', loc)
420 421 if not path and default is not None:
421 422 path = self.config('paths', default)
422 423 return path or loc
423 424
424 425 def pushbuffer(self):
425 426 self._buffers.append([])
426 427
427 428 def popbuffer(self, labeled=False):
428 429 '''pop the last buffer and return the buffered output
429 430
430 431 If labeled is True, any labels associated with buffered
431 432 output will be handled. By default, this has no effect
432 433 on the output returned, but extensions and GUI tools may
433 434 handle this argument and returned styled output. If output
434 435 is being buffered so it can be captured and parsed or
435 436 processed, labeled should not be set to True.
436 437 '''
437 438 return "".join(self._buffers.pop())
438 439
439 440 def write(self, *args, **opts):
440 441 '''write args to output
441 442
442 443 By default, this method simply writes to the buffer or stdout,
443 444 but extensions or GUI tools may override this method,
444 445 write_err(), popbuffer(), and label() to style output from
445 446 various parts of hg.
446 447
447 448 An optional keyword argument, "label", can be passed in.
448 449 This should be a string containing label names separated by
449 450 space. Label names take the form of "topic.type". For example,
450 451 ui.debug() issues a label of "ui.debug".
451 452
452 453 When labeling output for a specific command, a label of
453 454 "cmdname.type" is recommended. For example, status issues
454 455 a label of "status.modified" for modified files.
455 456 '''
456 457 if self._buffers:
457 458 self._buffers[-1].extend([str(a) for a in args])
458 459 else:
459 460 for a in args:
460 461 self.fout.write(str(a))
461 462
462 463 def write_err(self, *args, **opts):
463 464 try:
464 465 if not getattr(self.fout, 'closed', False):
465 466 self.fout.flush()
466 467 for a in args:
467 468 self.ferr.write(str(a))
468 469 # stderr may be buffered under win32 when redirected to files,
469 470 # including stdout.
470 471 if not getattr(self.ferr, 'closed', False):
471 472 self.ferr.flush()
472 473 except IOError, inst:
473 474 if inst.errno not in (errno.EPIPE, errno.EIO):
474 475 raise
475 476
476 477 def flush(self):
477 478 try: self.fout.flush()
478 479 except: pass
479 480 try: self.ferr.flush()
480 481 except: pass
481 482
482 483 def interactive(self):
483 484 '''is interactive input allowed?
484 485
485 486 An interactive session is a session where input can be reasonably read
486 487 from `sys.stdin'. If this function returns false, any attempt to read
487 488 from stdin should fail with an error, unless a sensible default has been
488 489 specified.
489 490
490 491 Interactiveness is triggered by the value of the `ui.interactive'
491 492 configuration variable or - if it is unset - when `sys.stdin' points
492 493 to a terminal device.
493 494
494 495 This function refers to input only; for output, see `ui.formatted()'.
495 496 '''
496 497 i = self.configbool("ui", "interactive", None)
497 498 if i is None:
498 499 # some environments replace stdin without implementing isatty
499 500 # usually those are non-interactive
500 501 return util.isatty(self.fin)
501 502
502 503 return i
503 504
504 505 def termwidth(self):
505 506 '''how wide is the terminal in columns?
506 507 '''
507 508 if 'COLUMNS' in os.environ:
508 509 try:
509 510 return int(os.environ['COLUMNS'])
510 511 except ValueError:
511 512 pass
512 513 return util.termwidth()
513 514
514 515 def formatted(self):
515 516 '''should formatted output be used?
516 517
517 518 It is often desirable to format the output to suite the output medium.
518 519 Examples of this are truncating long lines or colorizing messages.
519 520 However, this is not often not desirable when piping output into other
520 521 utilities, e.g. `grep'.
521 522
522 523 Formatted output is triggered by the value of the `ui.formatted'
523 524 configuration variable or - if it is unset - when `sys.stdout' points
524 525 to a terminal device. Please note that `ui.formatted' should be
525 526 considered an implementation detail; it is not intended for use outside
526 527 Mercurial or its extensions.
527 528
528 529 This function refers to output only; for input, see `ui.interactive()'.
529 530 This function always returns false when in plain mode, see `ui.plain()'.
530 531 '''
531 532 if self.plain():
532 533 return False
533 534
534 535 i = self.configbool("ui", "formatted", None)
535 536 if i is None:
536 537 # some environments replace stdout without implementing isatty
537 538 # usually those are non-interactive
538 539 return util.isatty(self.fout)
539 540
540 541 return i
541 542
542 543 def _readline(self, prompt=''):
543 544 if util.isatty(self.fin):
544 545 try:
545 546 # magically add command line editing support, where
546 547 # available
547 548 import readline
548 549 # force demandimport to really load the module
549 550 readline.read_history_file
550 551 # windows sometimes raises something other than ImportError
551 552 except Exception:
552 553 pass
553 554
554 555 # call write() so output goes through subclassed implementation
555 556 # e.g. color extension on Windows
556 557 self.write(prompt)
557 558
558 559 # instead of trying to emulate raw_input, swap (self.fin,
559 560 # self.fout) with (sys.stdin, sys.stdout)
560 561 oldin = sys.stdin
561 562 oldout = sys.stdout
562 563 sys.stdin = self.fin
563 564 sys.stdout = self.fout
564 565 line = raw_input(' ')
565 566 sys.stdin = oldin
566 567 sys.stdout = oldout
567 568
568 569 # When stdin is in binary mode on Windows, it can cause
569 570 # raw_input() to emit an extra trailing carriage return
570 571 if os.linesep == '\r\n' and line and line[-1] == '\r':
571 572 line = line[:-1]
572 573 return line
573 574
574 575 def prompt(self, msg, default="y"):
575 576 """Prompt user with msg, read response.
576 577 If ui is not interactive, the default is returned.
577 578 """
578 579 if not self.interactive():
579 580 self.write(msg, ' ', default, "\n")
580 581 return default
581 582 try:
582 583 r = self._readline(self.label(msg, 'ui.prompt'))
583 584 if not r:
584 585 return default
585 586 return r
586 587 except EOFError:
587 588 raise util.Abort(_('response expected'))
588 589
589 590 def promptchoice(self, msg, choices, default=0):
590 591 """Prompt user with msg, read response, and ensure it matches
591 592 one of the provided choices. The index of the choice is returned.
592 593 choices is a sequence of acceptable responses with the format:
593 594 ('&None', 'E&xec', 'Sym&link') Responses are case insensitive.
594 595 If ui is not interactive, the default is returned.
595 596 """
596 597 resps = [s[s.index('&')+1].lower() for s in choices]
597 598 while True:
598 599 r = self.prompt(msg, resps[default])
599 600 if r.lower() in resps:
600 601 return resps.index(r.lower())
601 602 self.write(_("unrecognized response\n"))
602 603
603 604 def getpass(self, prompt=None, default=None):
604 605 if not self.interactive():
605 606 return default
606 607 try:
607 608 return getpass.getpass(prompt or _('password: '))
608 609 except EOFError:
609 610 raise util.Abort(_('response expected'))
610 611 def status(self, *msg, **opts):
611 612 '''write status message to output (if ui.quiet is False)
612 613
613 614 This adds an output label of "ui.status".
614 615 '''
615 616 if not self.quiet:
616 617 opts['label'] = opts.get('label', '') + ' ui.status'
617 618 self.write(*msg, **opts)
618 619 def warn(self, *msg, **opts):
619 620 '''write warning message to output (stderr)
620 621
621 622 This adds an output label of "ui.warning".
622 623 '''
623 624 opts['label'] = opts.get('label', '') + ' ui.warning'
624 625 self.write_err(*msg, **opts)
625 626 def note(self, *msg, **opts):
626 627 '''write note to output (if ui.verbose is True)
627 628
628 629 This adds an output label of "ui.note".
629 630 '''
630 631 if self.verbose:
631 632 opts['label'] = opts.get('label', '') + ' ui.note'
632 633 self.write(*msg, **opts)
633 634 def debug(self, *msg, **opts):
634 635 '''write debug message to output (if ui.debugflag is True)
635 636
636 637 This adds an output label of "ui.debug".
637 638 '''
638 639 if self.debugflag:
639 640 opts['label'] = opts.get('label', '') + ' ui.debug'
640 641 self.write(*msg, **opts)
641 642 def edit(self, text, user):
642 643 (fd, name) = tempfile.mkstemp(prefix="hg-editor-", suffix=".txt",
643 644 text=True)
644 645 try:
645 646 f = os.fdopen(fd, "w")
646 647 f.write(text)
647 648 f.close()
648 649
649 650 editor = self.geteditor()
650 651
651 652 util.system("%s \"%s\"" % (editor, name),
652 653 environ={'HGUSER': user},
653 654 onerr=util.Abort, errprefix=_("edit failed"),
654 655 out=self.fout)
655 656
656 657 f = open(name)
657 658 t = f.read()
658 659 f.close()
659 660 finally:
660 661 os.unlink(name)
661 662
662 663 return t
663 664
664 665 def traceback(self, exc=None):
665 666 '''print exception traceback if traceback printing enabled.
666 667 only to call in exception handler. returns true if traceback
667 668 printed.'''
668 669 if self.tracebackflag:
669 670 if exc:
670 671 traceback.print_exception(exc[0], exc[1], exc[2], file=self.ferr)
671 672 else:
672 673 traceback.print_exc(file=self.ferr)
673 674 return self.tracebackflag
674 675
675 676 def geteditor(self):
676 677 '''return editor to use'''
677 678 return (os.environ.get("HGEDITOR") or
678 679 self.config("ui", "editor") or
679 680 os.environ.get("VISUAL") or
680 681 os.environ.get("EDITOR", "vi"))
681 682
682 683 def progress(self, topic, pos, item="", unit="", total=None):
683 684 '''show a progress message
684 685
685 686 With stock hg, this is simply a debug message that is hidden
686 687 by default, but with extensions or GUI tools it may be
687 688 visible. 'topic' is the current operation, 'item' is a
688 689 non-numeric marker of the current position (ie the currently
689 690 in-process file), 'pos' is the current numeric position (ie
690 691 revision, bytes, etc.), unit is a corresponding unit label,
691 692 and total is the highest expected pos.
692 693
693 694 Multiple nested topics may be active at a time.
694 695
695 696 All topics should be marked closed by setting pos to None at
696 697 termination.
697 698 '''
698 699
699 700 if pos is None or not self.debugflag:
700 701 return
701 702
702 703 if unit:
703 704 unit = ' ' + unit
704 705 if item:
705 706 item = ' ' + item
706 707
707 708 if total:
708 709 pct = 100.0 * pos / total
709 710 self.debug('%s:%s %s/%s%s (%4.2f%%)\n'
710 711 % (topic, item, pos, total, unit, pct))
711 712 else:
712 713 self.debug('%s:%s %s%s\n' % (topic, item, pos, unit))
713 714
714 715 def log(self, service, message):
715 716 '''hook for logging facility extensions
716 717
717 718 service should be a readily-identifiable subsystem, which will
718 719 allow filtering.
719 720 message should be a newline-terminated string to log.
720 721 '''
721 722 pass
722 723
723 724 def label(self, msg, label):
724 725 '''style msg based on supplied label
725 726
726 727 Like ui.write(), this just returns msg unchanged, but extensions
727 728 and GUI tools can override it to allow styling output without
728 729 writing it.
729 730
730 731 ui.write(s, 'label') is equivalent to
731 732 ui.write(ui.label(s, 'label')).
732 733 '''
733 734 return msg
General Comments 0
You need to be logged in to leave comments. Login now