##// END OF EJS Templates
util, minirst: do not crash with COLUMNS=0
Martin Geisler -
r9417:4c3fb451 default
parent child Browse files
Show More
@@ -1,343 +1,345
1 # minirst.py - minimal reStructuredText parser
1 # minirst.py - minimal reStructuredText parser
2 #
2 #
3 # Copyright 2009 Matt Mackall <mpm@selenic.com> and others
3 # Copyright 2009 Matt Mackall <mpm@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2, incorporated herein by reference.
6 # GNU General Public License version 2, incorporated herein by reference.
7
7
8 """simplified reStructuredText parser.
8 """simplified reStructuredText parser.
9
9
10 This parser knows just enough about reStructuredText to parse the
10 This parser knows just enough about reStructuredText to parse the
11 Mercurial docstrings.
11 Mercurial docstrings.
12
12
13 It cheats in a major way: nested blocks are not really nested. They
13 It cheats in a major way: nested blocks are not really nested. They
14 are just indented blocks that look like they are nested. This relies
14 are just indented blocks that look like they are nested. This relies
15 on the user to keep the right indentation for the blocks.
15 on the user to keep the right indentation for the blocks.
16
16
17 It only supports a small subset of reStructuredText:
17 It only supports a small subset of reStructuredText:
18
18
19 - paragraphs
19 - paragraphs
20
20
21 - definition lists (must use ' ' to indent definitions)
21 - definition lists (must use ' ' to indent definitions)
22
22
23 - lists (items must start with '-')
23 - lists (items must start with '-')
24
24
25 - field lists (colons cannot be escaped)
25 - field lists (colons cannot be escaped)
26
26
27 - literal blocks
27 - literal blocks
28
28
29 - option lists (supports only long options without arguments)
29 - option lists (supports only long options without arguments)
30
30
31 - inline markup is not recognized at all.
31 - inline markup is not recognized at all.
32 """
32 """
33
33
34 import re, sys, textwrap
34 import re, sys, textwrap
35
35
36
36
37 def findblocks(text):
37 def findblocks(text):
38 """Find continuous blocks of lines in text.
38 """Find continuous blocks of lines in text.
39
39
40 Returns a list of dictionaries representing the blocks. Each block
40 Returns a list of dictionaries representing the blocks. Each block
41 has an 'indent' field and a 'lines' field.
41 has an 'indent' field and a 'lines' field.
42 """
42 """
43 blocks = [[]]
43 blocks = [[]]
44 lines = text.splitlines()
44 lines = text.splitlines()
45 for line in lines:
45 for line in lines:
46 if line.strip():
46 if line.strip():
47 blocks[-1].append(line)
47 blocks[-1].append(line)
48 elif blocks[-1]:
48 elif blocks[-1]:
49 blocks.append([])
49 blocks.append([])
50 if not blocks[-1]:
50 if not blocks[-1]:
51 del blocks[-1]
51 del blocks[-1]
52
52
53 for i, block in enumerate(blocks):
53 for i, block in enumerate(blocks):
54 indent = min((len(l) - len(l.lstrip())) for l in block)
54 indent = min((len(l) - len(l.lstrip())) for l in block)
55 blocks[i] = dict(indent=indent, lines=[l[indent:] for l in block])
55 blocks[i] = dict(indent=indent, lines=[l[indent:] for l in block])
56 return blocks
56 return blocks
57
57
58
58
59 def findliteralblocks(blocks):
59 def findliteralblocks(blocks):
60 """Finds literal blocks and adds a 'type' field to the blocks.
60 """Finds literal blocks and adds a 'type' field to the blocks.
61
61
62 Literal blocks are given the type 'literal', all other blocks are
62 Literal blocks are given the type 'literal', all other blocks are
63 given type the 'paragraph'.
63 given type the 'paragraph'.
64 """
64 """
65 i = 0
65 i = 0
66 while i < len(blocks):
66 while i < len(blocks):
67 # Searching for a block that looks like this:
67 # Searching for a block that looks like this:
68 #
68 #
69 # +------------------------------+
69 # +------------------------------+
70 # | paragraph |
70 # | paragraph |
71 # | (ends with "::") |
71 # | (ends with "::") |
72 # +------------------------------+
72 # +------------------------------+
73 # +---------------------------+
73 # +---------------------------+
74 # | indented literal block |
74 # | indented literal block |
75 # +---------------------------+
75 # +---------------------------+
76 blocks[i]['type'] = 'paragraph'
76 blocks[i]['type'] = 'paragraph'
77 if blocks[i]['lines'][-1].endswith('::') and i+1 < len(blocks):
77 if blocks[i]['lines'][-1].endswith('::') and i+1 < len(blocks):
78 indent = blocks[i]['indent']
78 indent = blocks[i]['indent']
79 adjustment = blocks[i+1]['indent'] - indent
79 adjustment = blocks[i+1]['indent'] - indent
80
80
81 if blocks[i]['lines'] == ['::']:
81 if blocks[i]['lines'] == ['::']:
82 # Expanded form: remove block
82 # Expanded form: remove block
83 del blocks[i]
83 del blocks[i]
84 i -= 1
84 i -= 1
85 elif blocks[i]['lines'][-1].endswith(' ::'):
85 elif blocks[i]['lines'][-1].endswith(' ::'):
86 # Partially minimized form: remove space and both
86 # Partially minimized form: remove space and both
87 # colons.
87 # colons.
88 blocks[i]['lines'][-1] = blocks[i]['lines'][-1][:-3]
88 blocks[i]['lines'][-1] = blocks[i]['lines'][-1][:-3]
89 else:
89 else:
90 # Fully minimized form: remove just one colon.
90 # Fully minimized form: remove just one colon.
91 blocks[i]['lines'][-1] = blocks[i]['lines'][-1][:-1]
91 blocks[i]['lines'][-1] = blocks[i]['lines'][-1][:-1]
92
92
93 # List items are formatted with a hanging indent. We must
93 # List items are formatted with a hanging indent. We must
94 # correct for this here while we still have the original
94 # correct for this here while we still have the original
95 # information on the indentation of the subsequent literal
95 # information on the indentation of the subsequent literal
96 # blocks available.
96 # blocks available.
97 if blocks[i]['lines'][0].startswith('- '):
97 if blocks[i]['lines'][0].startswith('- '):
98 indent += 2
98 indent += 2
99 adjustment -= 2
99 adjustment -= 2
100
100
101 # Mark the following indented blocks.
101 # Mark the following indented blocks.
102 while i+1 < len(blocks) and blocks[i+1]['indent'] > indent:
102 while i+1 < len(blocks) and blocks[i+1]['indent'] > indent:
103 blocks[i+1]['type'] = 'literal'
103 blocks[i+1]['type'] = 'literal'
104 blocks[i+1]['indent'] -= adjustment
104 blocks[i+1]['indent'] -= adjustment
105 i += 1
105 i += 1
106 i += 1
106 i += 1
107 return blocks
107 return blocks
108
108
109
109
110 def findsections(blocks):
110 def findsections(blocks):
111 """Finds sections.
111 """Finds sections.
112
112
113 The blocks must have a 'type' field, i.e., they should have been
113 The blocks must have a 'type' field, i.e., they should have been
114 run through findliteralblocks first.
114 run through findliteralblocks first.
115 """
115 """
116 for block in blocks:
116 for block in blocks:
117 # Searching for a block that looks like this:
117 # Searching for a block that looks like this:
118 #
118 #
119 # +------------------------------+
119 # +------------------------------+
120 # | Section title |
120 # | Section title |
121 # | ------------- |
121 # | ------------- |
122 # +------------------------------+
122 # +------------------------------+
123 if (block['type'] == 'paragraph' and
123 if (block['type'] == 'paragraph' and
124 len(block['lines']) == 2 and
124 len(block['lines']) == 2 and
125 block['lines'][1] == '-' * len(block['lines'][0])):
125 block['lines'][1] == '-' * len(block['lines'][0])):
126 block['type'] = 'section'
126 block['type'] = 'section'
127 return blocks
127 return blocks
128
128
129
129
130 def findbulletlists(blocks):
130 def findbulletlists(blocks):
131 """Finds bullet lists.
131 """Finds bullet lists.
132
132
133 The blocks must have a 'type' field, i.e., they should have been
133 The blocks must have a 'type' field, i.e., they should have been
134 run through findliteralblocks first.
134 run through findliteralblocks first.
135 """
135 """
136 i = 0
136 i = 0
137 while i < len(blocks):
137 while i < len(blocks):
138 # Searching for a paragraph that looks like this:
138 # Searching for a paragraph that looks like this:
139 #
139 #
140 # +------+-----------------------+
140 # +------+-----------------------+
141 # | "- " | list item |
141 # | "- " | list item |
142 # +------| (body elements)+ |
142 # +------| (body elements)+ |
143 # +-----------------------+
143 # +-----------------------+
144 if (blocks[i]['type'] == 'paragraph' and
144 if (blocks[i]['type'] == 'paragraph' and
145 blocks[i]['lines'][0].startswith('- ')):
145 blocks[i]['lines'][0].startswith('- ')):
146 items = []
146 items = []
147 for line in blocks[i]['lines']:
147 for line in blocks[i]['lines']:
148 if line.startswith('- '):
148 if line.startswith('- '):
149 items.append(dict(type='bullet', lines=[],
149 items.append(dict(type='bullet', lines=[],
150 indent=blocks[i]['indent']))
150 indent=blocks[i]['indent']))
151 line = line[2:]
151 line = line[2:]
152 items[-1]['lines'].append(line)
152 items[-1]['lines'].append(line)
153 blocks[i:i+1] = items
153 blocks[i:i+1] = items
154 i += len(items) - 1
154 i += len(items) - 1
155 i += 1
155 i += 1
156 return blocks
156 return blocks
157
157
158
158
159 _optionre = re.compile(r'^(--[a-z-]+)((?:[ =][a-zA-Z][\w-]*)? +)(.*)$')
159 _optionre = re.compile(r'^(--[a-z-]+)((?:[ =][a-zA-Z][\w-]*)? +)(.*)$')
160 def findoptionlists(blocks):
160 def findoptionlists(blocks):
161 """Finds option lists.
161 """Finds option lists.
162
162
163 The blocks must have a 'type' field, i.e., they should have been
163 The blocks must have a 'type' field, i.e., they should have been
164 run through findliteralblocks first.
164 run through findliteralblocks first.
165 """
165 """
166 i = 0
166 i = 0
167 while i < len(blocks):
167 while i < len(blocks):
168 # Searching for a paragraph that looks like this:
168 # Searching for a paragraph that looks like this:
169 #
169 #
170 # +----------------------------+-------------+
170 # +----------------------------+-------------+
171 # | "--" option " " | description |
171 # | "--" option " " | description |
172 # +-------+--------------------+ |
172 # +-------+--------------------+ |
173 # | (body elements)+ |
173 # | (body elements)+ |
174 # +----------------------------------+
174 # +----------------------------------+
175 if (blocks[i]['type'] == 'paragraph' and
175 if (blocks[i]['type'] == 'paragraph' and
176 _optionre.match(blocks[i]['lines'][0])):
176 _optionre.match(blocks[i]['lines'][0])):
177 options = []
177 options = []
178 for line in blocks[i]['lines']:
178 for line in blocks[i]['lines']:
179 m = _optionre.match(line)
179 m = _optionre.match(line)
180 if m:
180 if m:
181 option, arg, rest = m.groups()
181 option, arg, rest = m.groups()
182 width = len(option) + len(arg)
182 width = len(option) + len(arg)
183 options.append(dict(type='option', lines=[],
183 options.append(dict(type='option', lines=[],
184 indent=blocks[i]['indent'],
184 indent=blocks[i]['indent'],
185 width=width))
185 width=width))
186 options[-1]['lines'].append(line)
186 options[-1]['lines'].append(line)
187 blocks[i:i+1] = options
187 blocks[i:i+1] = options
188 i += len(options) - 1
188 i += len(options) - 1
189 i += 1
189 i += 1
190 return blocks
190 return blocks
191
191
192
192
193 _fieldre = re.compile(r':(?![: ])([^:]*)(?<! ):( +)(.*)')
193 _fieldre = re.compile(r':(?![: ])([^:]*)(?<! ):( +)(.*)')
194 def findfieldlists(blocks):
194 def findfieldlists(blocks):
195 """Finds fields lists.
195 """Finds fields lists.
196
196
197 The blocks must have a 'type' field, i.e., they should have been
197 The blocks must have a 'type' field, i.e., they should have been
198 run through findliteralblocks first.
198 run through findliteralblocks first.
199 """
199 """
200 i = 0
200 i = 0
201 while i < len(blocks):
201 while i < len(blocks):
202 # Searching for a paragraph that looks like this:
202 # Searching for a paragraph that looks like this:
203 #
203 #
204 #
204 #
205 # +--------------------+----------------------+
205 # +--------------------+----------------------+
206 # | ":" field name ":" | field body |
206 # | ":" field name ":" | field body |
207 # +-------+------------+ |
207 # +-------+------------+ |
208 # | (body elements)+ |
208 # | (body elements)+ |
209 # +-----------------------------------+
209 # +-----------------------------------+
210 if (blocks[i]['type'] == 'paragraph' and
210 if (blocks[i]['type'] == 'paragraph' and
211 _fieldre.match(blocks[i]['lines'][0])):
211 _fieldre.match(blocks[i]['lines'][0])):
212 indent = blocks[i]['indent']
212 indent = blocks[i]['indent']
213 fields = []
213 fields = []
214 for line in blocks[i]['lines']:
214 for line in blocks[i]['lines']:
215 m = _fieldre.match(line)
215 m = _fieldre.match(line)
216 if m:
216 if m:
217 key, spaces, rest = m.groups()
217 key, spaces, rest = m.groups()
218 width = 2 + len(key) + len(spaces)
218 width = 2 + len(key) + len(spaces)
219 fields.append(dict(type='field', lines=[],
219 fields.append(dict(type='field', lines=[],
220 indent=indent, width=width))
220 indent=indent, width=width))
221 # Turn ":foo: bar" into "foo bar".
221 # Turn ":foo: bar" into "foo bar".
222 line = '%s %s%s' % (key, spaces, rest)
222 line = '%s %s%s' % (key, spaces, rest)
223 fields[-1]['lines'].append(line)
223 fields[-1]['lines'].append(line)
224 blocks[i:i+1] = fields
224 blocks[i:i+1] = fields
225 i += len(fields) - 1
225 i += len(fields) - 1
226 i += 1
226 i += 1
227 return blocks
227 return blocks
228
228
229
229
230 def finddefinitionlists(blocks):
230 def finddefinitionlists(blocks):
231 """Finds definition lists.
231 """Finds definition lists.
232
232
233 The blocks must have a 'type' field, i.e., they should have been
233 The blocks must have a 'type' field, i.e., they should have been
234 run through findliteralblocks first.
234 run through findliteralblocks first.
235 """
235 """
236 i = 0
236 i = 0
237 while i < len(blocks):
237 while i < len(blocks):
238 # Searching for a paragraph that looks like this:
238 # Searching for a paragraph that looks like this:
239 #
239 #
240 # +----------------------------+
240 # +----------------------------+
241 # | term |
241 # | term |
242 # +--+-------------------------+--+
242 # +--+-------------------------+--+
243 # | definition |
243 # | definition |
244 # | (body elements)+ |
244 # | (body elements)+ |
245 # +----------------------------+
245 # +----------------------------+
246 if (blocks[i]['type'] == 'paragraph' and
246 if (blocks[i]['type'] == 'paragraph' and
247 len(blocks[i]['lines']) > 1 and
247 len(blocks[i]['lines']) > 1 and
248 not blocks[i]['lines'][0].startswith(' ') and
248 not blocks[i]['lines'][0].startswith(' ') and
249 blocks[i]['lines'][1].startswith(' ')):
249 blocks[i]['lines'][1].startswith(' ')):
250 definitions = []
250 definitions = []
251 for line in blocks[i]['lines']:
251 for line in blocks[i]['lines']:
252 if not line.startswith(' '):
252 if not line.startswith(' '):
253 definitions.append(dict(type='definition', lines=[],
253 definitions.append(dict(type='definition', lines=[],
254 indent=blocks[i]['indent']))
254 indent=blocks[i]['indent']))
255 definitions[-1]['lines'].append(line)
255 definitions[-1]['lines'].append(line)
256 definitions[-1]['hang'] = len(line) - len(line.lstrip())
256 definitions[-1]['hang'] = len(line) - len(line.lstrip())
257 blocks[i:i+1] = definitions
257 blocks[i:i+1] = definitions
258 i += len(definitions) - 1
258 i += len(definitions) - 1
259 i += 1
259 i += 1
260 return blocks
260 return blocks
261
261
262
262
263 def addmargins(blocks):
263 def addmargins(blocks):
264 """Adds empty blocks for vertical spacing.
264 """Adds empty blocks for vertical spacing.
265
265
266 This groups bullets, options, and definitions together with no vertical
266 This groups bullets, options, and definitions together with no vertical
267 space between them, and adds an empty block between all other blocks.
267 space between them, and adds an empty block between all other blocks.
268 """
268 """
269 i = 1
269 i = 1
270 while i < len(blocks):
270 while i < len(blocks):
271 if (blocks[i]['type'] == blocks[i-1]['type'] and
271 if (blocks[i]['type'] == blocks[i-1]['type'] and
272 blocks[i]['type'] in ('bullet', 'option', 'field', 'definition')):
272 blocks[i]['type'] in ('bullet', 'option', 'field', 'definition')):
273 i += 1
273 i += 1
274 else:
274 else:
275 blocks.insert(i, dict(lines=[''], indent=0, type='margin'))
275 blocks.insert(i, dict(lines=[''], indent=0, type='margin'))
276 i += 2
276 i += 2
277 return blocks
277 return blocks
278
278
279
279
280 def formatblock(block, width):
280 def formatblock(block, width):
281 """Format a block according to width."""
281 """Format a block according to width."""
282 if width <= 0:
283 width = 78
282 indent = ' ' * block['indent']
284 indent = ' ' * block['indent']
283 if block['type'] == 'margin':
285 if block['type'] == 'margin':
284 return ''
286 return ''
285 elif block['type'] == 'literal':
287 elif block['type'] == 'literal':
286 indent += ' '
288 indent += ' '
287 return indent + ('\n' + indent).join(block['lines'])
289 return indent + ('\n' + indent).join(block['lines'])
288 elif block['type'] == 'section':
290 elif block['type'] == 'section':
289 return indent + ('\n' + indent).join(block['lines'])
291 return indent + ('\n' + indent).join(block['lines'])
290 elif block['type'] == 'definition':
292 elif block['type'] == 'definition':
291 term = indent + block['lines'][0]
293 term = indent + block['lines'][0]
292 defindent = indent + block['hang'] * ' '
294 defindent = indent + block['hang'] * ' '
293 text = ' '.join(map(str.strip, block['lines'][1:]))
295 text = ' '.join(map(str.strip, block['lines'][1:]))
294 return "%s\n%s" % (term, textwrap.fill(text, width=width,
296 return "%s\n%s" % (term, textwrap.fill(text, width=width,
295 initial_indent=defindent,
297 initial_indent=defindent,
296 subsequent_indent=defindent))
298 subsequent_indent=defindent))
297 else:
299 else:
298 initindent = subindent = indent
300 initindent = subindent = indent
299 text = ' '.join(map(str.strip, block['lines']))
301 text = ' '.join(map(str.strip, block['lines']))
300 if block['type'] == 'bullet':
302 if block['type'] == 'bullet':
301 initindent = indent + '- '
303 initindent = indent + '- '
302 subindent = indent + ' '
304 subindent = indent + ' '
303 elif block['type'] in ('option', 'field'):
305 elif block['type'] in ('option', 'field'):
304 subindent = indent + block['width'] * ' '
306 subindent = indent + block['width'] * ' '
305
307
306 return textwrap.fill(text, width=width,
308 return textwrap.fill(text, width=width,
307 initial_indent=initindent,
309 initial_indent=initindent,
308 subsequent_indent=subindent)
310 subsequent_indent=subindent)
309
311
310
312
311 def format(text, width):
313 def format(text, width):
312 """Parse and format the text according to width."""
314 """Parse and format the text according to width."""
313 blocks = findblocks(text)
315 blocks = findblocks(text)
314 blocks = findliteralblocks(blocks)
316 blocks = findliteralblocks(blocks)
315 blocks = findsections(blocks)
317 blocks = findsections(blocks)
316 blocks = findbulletlists(blocks)
318 blocks = findbulletlists(blocks)
317 blocks = findoptionlists(blocks)
319 blocks = findoptionlists(blocks)
318 blocks = findfieldlists(blocks)
320 blocks = findfieldlists(blocks)
319 blocks = finddefinitionlists(blocks)
321 blocks = finddefinitionlists(blocks)
320 blocks = addmargins(blocks)
322 blocks = addmargins(blocks)
321 return '\n'.join(formatblock(b, width) for b in blocks)
323 return '\n'.join(formatblock(b, width) for b in blocks)
322
324
323
325
324 if __name__ == "__main__":
326 if __name__ == "__main__":
325 from pprint import pprint
327 from pprint import pprint
326
328
327 def debug(func, blocks):
329 def debug(func, blocks):
328 blocks = func(blocks)
330 blocks = func(blocks)
329 print "*** after %s:" % func.__name__
331 print "*** after %s:" % func.__name__
330 pprint(blocks)
332 pprint(blocks)
331 print
333 print
332 return blocks
334 return blocks
333
335
334 text = open(sys.argv[1]).read()
336 text = open(sys.argv[1]).read()
335 blocks = debug(findblocks, text)
337 blocks = debug(findblocks, text)
336 blocks = debug(findliteralblocks, blocks)
338 blocks = debug(findliteralblocks, blocks)
337 blocks = debug(findsections, blocks)
339 blocks = debug(findsections, blocks)
338 blocks = debug(findbulletlists, blocks)
340 blocks = debug(findbulletlists, blocks)
339 blocks = debug(findoptionlists, blocks)
341 blocks = debug(findoptionlists, blocks)
340 blocks = debug(findfieldlists, blocks)
342 blocks = debug(findfieldlists, blocks)
341 blocks = debug(finddefinitionlists, blocks)
343 blocks = debug(finddefinitionlists, blocks)
342 blocks = debug(addmargins, blocks)
344 blocks = debug(addmargins, blocks)
343 print '\n'.join(formatblock(b, 30) for b in blocks)
345 print '\n'.join(formatblock(b, 30) for b in blocks)
@@ -1,1283 +1,1286
1 # util.py - Mercurial utility functions and platform specfic implementations
1 # util.py - Mercurial utility functions and platform specfic implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2, incorporated herein by reference.
8 # GNU General Public License version 2, incorporated herein by reference.
9
9
10 """Mercurial utility functions and platform specfic implementations.
10 """Mercurial utility functions and platform specfic implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 from i18n import _
16 from i18n import _
17 import error, osutil
17 import error, osutil
18 import cStringIO, errno, re, shutil, sys, tempfile, traceback
18 import cStringIO, errno, re, shutil, sys, tempfile, traceback
19 import os, stat, time, calendar, random, textwrap
19 import os, stat, time, calendar, random, textwrap
20 import imp
20 import imp
21
21
22 # Python compatibility
22 # Python compatibility
23
23
24 def sha1(s):
24 def sha1(s):
25 return _fastsha1(s)
25 return _fastsha1(s)
26
26
27 def _fastsha1(s):
27 def _fastsha1(s):
28 # This function will import sha1 from hashlib or sha (whichever is
28 # This function will import sha1 from hashlib or sha (whichever is
29 # available) and overwrite itself with it on the first call.
29 # available) and overwrite itself with it on the first call.
30 # Subsequent calls will go directly to the imported function.
30 # Subsequent calls will go directly to the imported function.
31 try:
31 try:
32 from hashlib import sha1 as _sha1
32 from hashlib import sha1 as _sha1
33 except ImportError:
33 except ImportError:
34 from sha import sha as _sha1
34 from sha import sha as _sha1
35 global _fastsha1, sha1
35 global _fastsha1, sha1
36 _fastsha1 = sha1 = _sha1
36 _fastsha1 = sha1 = _sha1
37 return _sha1(s)
37 return _sha1(s)
38
38
39 import subprocess
39 import subprocess
40 closefds = os.name == 'posix'
40 closefds = os.name == 'posix'
41 def popen2(cmd):
41 def popen2(cmd):
42 # Setting bufsize to -1 lets the system decide the buffer size.
42 # Setting bufsize to -1 lets the system decide the buffer size.
43 # The default for bufsize is 0, meaning unbuffered. This leads to
43 # The default for bufsize is 0, meaning unbuffered. This leads to
44 # poor performance on Mac OS X: http://bugs.python.org/issue4194
44 # poor performance on Mac OS X: http://bugs.python.org/issue4194
45 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
45 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
46 close_fds=closefds,
46 close_fds=closefds,
47 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
47 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
48 return p.stdin, p.stdout
48 return p.stdin, p.stdout
49 def popen3(cmd):
49 def popen3(cmd):
50 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
50 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
51 close_fds=closefds,
51 close_fds=closefds,
52 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
52 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
53 stderr=subprocess.PIPE)
53 stderr=subprocess.PIPE)
54 return p.stdin, p.stdout, p.stderr
54 return p.stdin, p.stdout, p.stderr
55
55
56 def version():
56 def version():
57 """Return version information if available."""
57 """Return version information if available."""
58 try:
58 try:
59 import __version__
59 import __version__
60 return __version__.version
60 return __version__.version
61 except ImportError:
61 except ImportError:
62 return 'unknown'
62 return 'unknown'
63
63
64 # used by parsedate
64 # used by parsedate
65 defaultdateformats = (
65 defaultdateformats = (
66 '%Y-%m-%d %H:%M:%S',
66 '%Y-%m-%d %H:%M:%S',
67 '%Y-%m-%d %I:%M:%S%p',
67 '%Y-%m-%d %I:%M:%S%p',
68 '%Y-%m-%d %H:%M',
68 '%Y-%m-%d %H:%M',
69 '%Y-%m-%d %I:%M%p',
69 '%Y-%m-%d %I:%M%p',
70 '%Y-%m-%d',
70 '%Y-%m-%d',
71 '%m-%d',
71 '%m-%d',
72 '%m/%d',
72 '%m/%d',
73 '%m/%d/%y',
73 '%m/%d/%y',
74 '%m/%d/%Y',
74 '%m/%d/%Y',
75 '%a %b %d %H:%M:%S %Y',
75 '%a %b %d %H:%M:%S %Y',
76 '%a %b %d %I:%M:%S%p %Y',
76 '%a %b %d %I:%M:%S%p %Y',
77 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
77 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
78 '%b %d %H:%M:%S %Y',
78 '%b %d %H:%M:%S %Y',
79 '%b %d %I:%M:%S%p %Y',
79 '%b %d %I:%M:%S%p %Y',
80 '%b %d %H:%M:%S',
80 '%b %d %H:%M:%S',
81 '%b %d %I:%M:%S%p',
81 '%b %d %I:%M:%S%p',
82 '%b %d %H:%M',
82 '%b %d %H:%M',
83 '%b %d %I:%M%p',
83 '%b %d %I:%M%p',
84 '%b %d %Y',
84 '%b %d %Y',
85 '%b %d',
85 '%b %d',
86 '%H:%M:%S',
86 '%H:%M:%S',
87 '%I:%M:%S%p',
87 '%I:%M:%S%p',
88 '%H:%M',
88 '%H:%M',
89 '%I:%M%p',
89 '%I:%M%p',
90 )
90 )
91
91
92 extendeddateformats = defaultdateformats + (
92 extendeddateformats = defaultdateformats + (
93 "%Y",
93 "%Y",
94 "%Y-%m",
94 "%Y-%m",
95 "%b",
95 "%b",
96 "%b %Y",
96 "%b %Y",
97 )
97 )
98
98
99 def cachefunc(func):
99 def cachefunc(func):
100 '''cache the result of function calls'''
100 '''cache the result of function calls'''
101 # XXX doesn't handle keywords args
101 # XXX doesn't handle keywords args
102 cache = {}
102 cache = {}
103 if func.func_code.co_argcount == 1:
103 if func.func_code.co_argcount == 1:
104 # we gain a small amount of time because
104 # we gain a small amount of time because
105 # we don't need to pack/unpack the list
105 # we don't need to pack/unpack the list
106 def f(arg):
106 def f(arg):
107 if arg not in cache:
107 if arg not in cache:
108 cache[arg] = func(arg)
108 cache[arg] = func(arg)
109 return cache[arg]
109 return cache[arg]
110 else:
110 else:
111 def f(*args):
111 def f(*args):
112 if args not in cache:
112 if args not in cache:
113 cache[args] = func(*args)
113 cache[args] = func(*args)
114 return cache[args]
114 return cache[args]
115
115
116 return f
116 return f
117
117
118 def lrucachefunc(func):
118 def lrucachefunc(func):
119 '''cache most recent results of function calls'''
119 '''cache most recent results of function calls'''
120 cache = {}
120 cache = {}
121 order = []
121 order = []
122 if func.func_code.co_argcount == 1:
122 if func.func_code.co_argcount == 1:
123 def f(arg):
123 def f(arg):
124 if arg not in cache:
124 if arg not in cache:
125 if len(cache) > 20:
125 if len(cache) > 20:
126 del cache[order.pop(0)]
126 del cache[order.pop(0)]
127 cache[arg] = func(arg)
127 cache[arg] = func(arg)
128 else:
128 else:
129 order.remove(arg)
129 order.remove(arg)
130 order.append(arg)
130 order.append(arg)
131 return cache[arg]
131 return cache[arg]
132 else:
132 else:
133 def f(*args):
133 def f(*args):
134 if args not in cache:
134 if args not in cache:
135 if len(cache) > 20:
135 if len(cache) > 20:
136 del cache[order.pop(0)]
136 del cache[order.pop(0)]
137 cache[args] = func(*args)
137 cache[args] = func(*args)
138 else:
138 else:
139 order.remove(args)
139 order.remove(args)
140 order.append(args)
140 order.append(args)
141 return cache[args]
141 return cache[args]
142
142
143 return f
143 return f
144
144
145 class propertycache(object):
145 class propertycache(object):
146 def __init__(self, func):
146 def __init__(self, func):
147 self.func = func
147 self.func = func
148 self.name = func.__name__
148 self.name = func.__name__
149 def __get__(self, obj, type=None):
149 def __get__(self, obj, type=None):
150 result = self.func(obj)
150 result = self.func(obj)
151 setattr(obj, self.name, result)
151 setattr(obj, self.name, result)
152 return result
152 return result
153
153
154 def pipefilter(s, cmd):
154 def pipefilter(s, cmd):
155 '''filter string S through command CMD, returning its output'''
155 '''filter string S through command CMD, returning its output'''
156 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
156 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
157 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
157 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
158 pout, perr = p.communicate(s)
158 pout, perr = p.communicate(s)
159 return pout
159 return pout
160
160
161 def tempfilter(s, cmd):
161 def tempfilter(s, cmd):
162 '''filter string S through a pair of temporary files with CMD.
162 '''filter string S through a pair of temporary files with CMD.
163 CMD is used as a template to create the real command to be run,
163 CMD is used as a template to create the real command to be run,
164 with the strings INFILE and OUTFILE replaced by the real names of
164 with the strings INFILE and OUTFILE replaced by the real names of
165 the temporary files generated.'''
165 the temporary files generated.'''
166 inname, outname = None, None
166 inname, outname = None, None
167 try:
167 try:
168 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
168 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
169 fp = os.fdopen(infd, 'wb')
169 fp = os.fdopen(infd, 'wb')
170 fp.write(s)
170 fp.write(s)
171 fp.close()
171 fp.close()
172 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
172 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
173 os.close(outfd)
173 os.close(outfd)
174 cmd = cmd.replace('INFILE', inname)
174 cmd = cmd.replace('INFILE', inname)
175 cmd = cmd.replace('OUTFILE', outname)
175 cmd = cmd.replace('OUTFILE', outname)
176 code = os.system(cmd)
176 code = os.system(cmd)
177 if sys.platform == 'OpenVMS' and code & 1:
177 if sys.platform == 'OpenVMS' and code & 1:
178 code = 0
178 code = 0
179 if code: raise Abort(_("command '%s' failed: %s") %
179 if code: raise Abort(_("command '%s' failed: %s") %
180 (cmd, explain_exit(code)))
180 (cmd, explain_exit(code)))
181 return open(outname, 'rb').read()
181 return open(outname, 'rb').read()
182 finally:
182 finally:
183 try:
183 try:
184 if inname: os.unlink(inname)
184 if inname: os.unlink(inname)
185 except: pass
185 except: pass
186 try:
186 try:
187 if outname: os.unlink(outname)
187 if outname: os.unlink(outname)
188 except: pass
188 except: pass
189
189
190 filtertable = {
190 filtertable = {
191 'tempfile:': tempfilter,
191 'tempfile:': tempfilter,
192 'pipe:': pipefilter,
192 'pipe:': pipefilter,
193 }
193 }
194
194
195 def filter(s, cmd):
195 def filter(s, cmd):
196 "filter a string through a command that transforms its input to its output"
196 "filter a string through a command that transforms its input to its output"
197 for name, fn in filtertable.iteritems():
197 for name, fn in filtertable.iteritems():
198 if cmd.startswith(name):
198 if cmd.startswith(name):
199 return fn(s, cmd[len(name):].lstrip())
199 return fn(s, cmd[len(name):].lstrip())
200 return pipefilter(s, cmd)
200 return pipefilter(s, cmd)
201
201
202 def binary(s):
202 def binary(s):
203 """return true if a string is binary data"""
203 """return true if a string is binary data"""
204 return bool(s and '\0' in s)
204 return bool(s and '\0' in s)
205
205
206 def increasingchunks(source, min=1024, max=65536):
206 def increasingchunks(source, min=1024, max=65536):
207 '''return no less than min bytes per chunk while data remains,
207 '''return no less than min bytes per chunk while data remains,
208 doubling min after each chunk until it reaches max'''
208 doubling min after each chunk until it reaches max'''
209 def log2(x):
209 def log2(x):
210 if not x:
210 if not x:
211 return 0
211 return 0
212 i = 0
212 i = 0
213 while x:
213 while x:
214 x >>= 1
214 x >>= 1
215 i += 1
215 i += 1
216 return i - 1
216 return i - 1
217
217
218 buf = []
218 buf = []
219 blen = 0
219 blen = 0
220 for chunk in source:
220 for chunk in source:
221 buf.append(chunk)
221 buf.append(chunk)
222 blen += len(chunk)
222 blen += len(chunk)
223 if blen >= min:
223 if blen >= min:
224 if min < max:
224 if min < max:
225 min = min << 1
225 min = min << 1
226 nmin = 1 << log2(blen)
226 nmin = 1 << log2(blen)
227 if nmin > min:
227 if nmin > min:
228 min = nmin
228 min = nmin
229 if min > max:
229 if min > max:
230 min = max
230 min = max
231 yield ''.join(buf)
231 yield ''.join(buf)
232 blen = 0
232 blen = 0
233 buf = []
233 buf = []
234 if buf:
234 if buf:
235 yield ''.join(buf)
235 yield ''.join(buf)
236
236
237 Abort = error.Abort
237 Abort = error.Abort
238
238
239 def always(fn): return True
239 def always(fn): return True
240 def never(fn): return False
240 def never(fn): return False
241
241
242 def pathto(root, n1, n2):
242 def pathto(root, n1, n2):
243 '''return the relative path from one place to another.
243 '''return the relative path from one place to another.
244 root should use os.sep to separate directories
244 root should use os.sep to separate directories
245 n1 should use os.sep to separate directories
245 n1 should use os.sep to separate directories
246 n2 should use "/" to separate directories
246 n2 should use "/" to separate directories
247 returns an os.sep-separated path.
247 returns an os.sep-separated path.
248
248
249 If n1 is a relative path, it's assumed it's
249 If n1 is a relative path, it's assumed it's
250 relative to root.
250 relative to root.
251 n2 should always be relative to root.
251 n2 should always be relative to root.
252 '''
252 '''
253 if not n1: return localpath(n2)
253 if not n1: return localpath(n2)
254 if os.path.isabs(n1):
254 if os.path.isabs(n1):
255 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
255 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
256 return os.path.join(root, localpath(n2))
256 return os.path.join(root, localpath(n2))
257 n2 = '/'.join((pconvert(root), n2))
257 n2 = '/'.join((pconvert(root), n2))
258 a, b = splitpath(n1), n2.split('/')
258 a, b = splitpath(n1), n2.split('/')
259 a.reverse()
259 a.reverse()
260 b.reverse()
260 b.reverse()
261 while a and b and a[-1] == b[-1]:
261 while a and b and a[-1] == b[-1]:
262 a.pop()
262 a.pop()
263 b.pop()
263 b.pop()
264 b.reverse()
264 b.reverse()
265 return os.sep.join((['..'] * len(a)) + b) or '.'
265 return os.sep.join((['..'] * len(a)) + b) or '.'
266
266
267 def canonpath(root, cwd, myname):
267 def canonpath(root, cwd, myname):
268 """return the canonical path of myname, given cwd and root"""
268 """return the canonical path of myname, given cwd and root"""
269 if endswithsep(root):
269 if endswithsep(root):
270 rootsep = root
270 rootsep = root
271 else:
271 else:
272 rootsep = root + os.sep
272 rootsep = root + os.sep
273 name = myname
273 name = myname
274 if not os.path.isabs(name):
274 if not os.path.isabs(name):
275 name = os.path.join(root, cwd, name)
275 name = os.path.join(root, cwd, name)
276 name = os.path.normpath(name)
276 name = os.path.normpath(name)
277 audit_path = path_auditor(root)
277 audit_path = path_auditor(root)
278 if name != rootsep and name.startswith(rootsep):
278 if name != rootsep and name.startswith(rootsep):
279 name = name[len(rootsep):]
279 name = name[len(rootsep):]
280 audit_path(name)
280 audit_path(name)
281 return pconvert(name)
281 return pconvert(name)
282 elif name == root:
282 elif name == root:
283 return ''
283 return ''
284 else:
284 else:
285 # Determine whether `name' is in the hierarchy at or beneath `root',
285 # Determine whether `name' is in the hierarchy at or beneath `root',
286 # by iterating name=dirname(name) until that causes no change (can't
286 # by iterating name=dirname(name) until that causes no change (can't
287 # check name == '/', because that doesn't work on windows). For each
287 # check name == '/', because that doesn't work on windows). For each
288 # `name', compare dev/inode numbers. If they match, the list `rel'
288 # `name', compare dev/inode numbers. If they match, the list `rel'
289 # holds the reversed list of components making up the relative file
289 # holds the reversed list of components making up the relative file
290 # name we want.
290 # name we want.
291 root_st = os.stat(root)
291 root_st = os.stat(root)
292 rel = []
292 rel = []
293 while True:
293 while True:
294 try:
294 try:
295 name_st = os.stat(name)
295 name_st = os.stat(name)
296 except OSError:
296 except OSError:
297 break
297 break
298 if samestat(name_st, root_st):
298 if samestat(name_st, root_st):
299 if not rel:
299 if not rel:
300 # name was actually the same as root (maybe a symlink)
300 # name was actually the same as root (maybe a symlink)
301 return ''
301 return ''
302 rel.reverse()
302 rel.reverse()
303 name = os.path.join(*rel)
303 name = os.path.join(*rel)
304 audit_path(name)
304 audit_path(name)
305 return pconvert(name)
305 return pconvert(name)
306 dirname, basename = os.path.split(name)
306 dirname, basename = os.path.split(name)
307 rel.append(basename)
307 rel.append(basename)
308 if dirname == name:
308 if dirname == name:
309 break
309 break
310 name = dirname
310 name = dirname
311
311
312 raise Abort('%s not under root' % myname)
312 raise Abort('%s not under root' % myname)
313
313
314 _hgexecutable = None
314 _hgexecutable = None
315
315
316 def main_is_frozen():
316 def main_is_frozen():
317 """return True if we are a frozen executable.
317 """return True if we are a frozen executable.
318
318
319 The code supports py2exe (most common, Windows only) and tools/freeze
319 The code supports py2exe (most common, Windows only) and tools/freeze
320 (portable, not much used).
320 (portable, not much used).
321 """
321 """
322 return (hasattr(sys, "frozen") or # new py2exe
322 return (hasattr(sys, "frozen") or # new py2exe
323 hasattr(sys, "importers") or # old py2exe
323 hasattr(sys, "importers") or # old py2exe
324 imp.is_frozen("__main__")) # tools/freeze
324 imp.is_frozen("__main__")) # tools/freeze
325
325
326 def hgexecutable():
326 def hgexecutable():
327 """return location of the 'hg' executable.
327 """return location of the 'hg' executable.
328
328
329 Defaults to $HG or 'hg' in the search path.
329 Defaults to $HG or 'hg' in the search path.
330 """
330 """
331 if _hgexecutable is None:
331 if _hgexecutable is None:
332 hg = os.environ.get('HG')
332 hg = os.environ.get('HG')
333 if hg:
333 if hg:
334 set_hgexecutable(hg)
334 set_hgexecutable(hg)
335 elif main_is_frozen():
335 elif main_is_frozen():
336 set_hgexecutable(sys.executable)
336 set_hgexecutable(sys.executable)
337 else:
337 else:
338 set_hgexecutable(find_exe('hg') or 'hg')
338 set_hgexecutable(find_exe('hg') or 'hg')
339 return _hgexecutable
339 return _hgexecutable
340
340
341 def set_hgexecutable(path):
341 def set_hgexecutable(path):
342 """set location of the 'hg' executable"""
342 """set location of the 'hg' executable"""
343 global _hgexecutable
343 global _hgexecutable
344 _hgexecutable = path
344 _hgexecutable = path
345
345
346 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None):
346 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None):
347 '''enhanced shell command execution.
347 '''enhanced shell command execution.
348 run with environment maybe modified, maybe in different dir.
348 run with environment maybe modified, maybe in different dir.
349
349
350 if command fails and onerr is None, return status. if ui object,
350 if command fails and onerr is None, return status. if ui object,
351 print error message and return status, else raise onerr object as
351 print error message and return status, else raise onerr object as
352 exception.'''
352 exception.'''
353 def py2shell(val):
353 def py2shell(val):
354 'convert python object into string that is useful to shell'
354 'convert python object into string that is useful to shell'
355 if val is None or val is False:
355 if val is None or val is False:
356 return '0'
356 return '0'
357 if val is True:
357 if val is True:
358 return '1'
358 return '1'
359 return str(val)
359 return str(val)
360 oldenv = {}
360 oldenv = {}
361 for k in environ:
361 for k in environ:
362 oldenv[k] = os.environ.get(k)
362 oldenv[k] = os.environ.get(k)
363 if cwd is not None:
363 if cwd is not None:
364 oldcwd = os.getcwd()
364 oldcwd = os.getcwd()
365 origcmd = cmd
365 origcmd = cmd
366 if os.name == 'nt':
366 if os.name == 'nt':
367 cmd = '"%s"' % cmd
367 cmd = '"%s"' % cmd
368 try:
368 try:
369 for k, v in environ.iteritems():
369 for k, v in environ.iteritems():
370 os.environ[k] = py2shell(v)
370 os.environ[k] = py2shell(v)
371 os.environ['HG'] = hgexecutable()
371 os.environ['HG'] = hgexecutable()
372 if cwd is not None and oldcwd != cwd:
372 if cwd is not None and oldcwd != cwd:
373 os.chdir(cwd)
373 os.chdir(cwd)
374 rc = os.system(cmd)
374 rc = os.system(cmd)
375 if sys.platform == 'OpenVMS' and rc & 1:
375 if sys.platform == 'OpenVMS' and rc & 1:
376 rc = 0
376 rc = 0
377 if rc and onerr:
377 if rc and onerr:
378 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
378 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
379 explain_exit(rc)[0])
379 explain_exit(rc)[0])
380 if errprefix:
380 if errprefix:
381 errmsg = '%s: %s' % (errprefix, errmsg)
381 errmsg = '%s: %s' % (errprefix, errmsg)
382 try:
382 try:
383 onerr.warn(errmsg + '\n')
383 onerr.warn(errmsg + '\n')
384 except AttributeError:
384 except AttributeError:
385 raise onerr(errmsg)
385 raise onerr(errmsg)
386 return rc
386 return rc
387 finally:
387 finally:
388 for k, v in oldenv.iteritems():
388 for k, v in oldenv.iteritems():
389 if v is None:
389 if v is None:
390 del os.environ[k]
390 del os.environ[k]
391 else:
391 else:
392 os.environ[k] = v
392 os.environ[k] = v
393 if cwd is not None and oldcwd != cwd:
393 if cwd is not None and oldcwd != cwd:
394 os.chdir(oldcwd)
394 os.chdir(oldcwd)
395
395
396 def checksignature(func):
396 def checksignature(func):
397 '''wrap a function with code to check for calling errors'''
397 '''wrap a function with code to check for calling errors'''
398 def check(*args, **kwargs):
398 def check(*args, **kwargs):
399 try:
399 try:
400 return func(*args, **kwargs)
400 return func(*args, **kwargs)
401 except TypeError:
401 except TypeError:
402 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
402 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
403 raise error.SignatureError
403 raise error.SignatureError
404 raise
404 raise
405
405
406 return check
406 return check
407
407
408 # os.path.lexists is not available on python2.3
408 # os.path.lexists is not available on python2.3
409 def lexists(filename):
409 def lexists(filename):
410 "test whether a file with this name exists. does not follow symlinks"
410 "test whether a file with this name exists. does not follow symlinks"
411 try:
411 try:
412 os.lstat(filename)
412 os.lstat(filename)
413 except:
413 except:
414 return False
414 return False
415 return True
415 return True
416
416
417 def rename(src, dst):
417 def rename(src, dst):
418 """forcibly rename a file"""
418 """forcibly rename a file"""
419 try:
419 try:
420 os.rename(src, dst)
420 os.rename(src, dst)
421 except OSError, err: # FIXME: check err (EEXIST ?)
421 except OSError, err: # FIXME: check err (EEXIST ?)
422
422
423 # On windows, rename to existing file is not allowed, so we
423 # On windows, rename to existing file is not allowed, so we
424 # must delete destination first. But if a file is open, unlink
424 # must delete destination first. But if a file is open, unlink
425 # schedules it for delete but does not delete it. Rename
425 # schedules it for delete but does not delete it. Rename
426 # happens immediately even for open files, so we rename
426 # happens immediately even for open files, so we rename
427 # destination to a temporary name, then delete that. Then
427 # destination to a temporary name, then delete that. Then
428 # rename is safe to do.
428 # rename is safe to do.
429 # The temporary name is chosen at random to avoid the situation
429 # The temporary name is chosen at random to avoid the situation
430 # where a file is left lying around from a previous aborted run.
430 # where a file is left lying around from a previous aborted run.
431 # The usual race condition this introduces can't be avoided as
431 # The usual race condition this introduces can't be avoided as
432 # we need the name to rename into, and not the file itself. Due
432 # we need the name to rename into, and not the file itself. Due
433 # to the nature of the operation however, any races will at worst
433 # to the nature of the operation however, any races will at worst
434 # lead to the rename failing and the current operation aborting.
434 # lead to the rename failing and the current operation aborting.
435
435
436 def tempname(prefix):
436 def tempname(prefix):
437 for tries in xrange(10):
437 for tries in xrange(10):
438 temp = '%s-%08x' % (prefix, random.randint(0, 0xffffffff))
438 temp = '%s-%08x' % (prefix, random.randint(0, 0xffffffff))
439 if not os.path.exists(temp):
439 if not os.path.exists(temp):
440 return temp
440 return temp
441 raise IOError, (errno.EEXIST, "No usable temporary filename found")
441 raise IOError, (errno.EEXIST, "No usable temporary filename found")
442
442
443 temp = tempname(dst)
443 temp = tempname(dst)
444 os.rename(dst, temp)
444 os.rename(dst, temp)
445 os.unlink(temp)
445 os.unlink(temp)
446 os.rename(src, dst)
446 os.rename(src, dst)
447
447
448 def unlink(f):
448 def unlink(f):
449 """unlink and remove the directory if it is empty"""
449 """unlink and remove the directory if it is empty"""
450 os.unlink(f)
450 os.unlink(f)
451 # try removing directories that might now be empty
451 # try removing directories that might now be empty
452 try:
452 try:
453 os.removedirs(os.path.dirname(f))
453 os.removedirs(os.path.dirname(f))
454 except OSError:
454 except OSError:
455 pass
455 pass
456
456
457 def copyfile(src, dest):
457 def copyfile(src, dest):
458 "copy a file, preserving mode and atime/mtime"
458 "copy a file, preserving mode and atime/mtime"
459 if os.path.islink(src):
459 if os.path.islink(src):
460 try:
460 try:
461 os.unlink(dest)
461 os.unlink(dest)
462 except:
462 except:
463 pass
463 pass
464 os.symlink(os.readlink(src), dest)
464 os.symlink(os.readlink(src), dest)
465 else:
465 else:
466 try:
466 try:
467 shutil.copyfile(src, dest)
467 shutil.copyfile(src, dest)
468 shutil.copystat(src, dest)
468 shutil.copystat(src, dest)
469 except shutil.Error, inst:
469 except shutil.Error, inst:
470 raise Abort(str(inst))
470 raise Abort(str(inst))
471
471
472 def copyfiles(src, dst, hardlink=None):
472 def copyfiles(src, dst, hardlink=None):
473 """Copy a directory tree using hardlinks if possible"""
473 """Copy a directory tree using hardlinks if possible"""
474
474
475 if hardlink is None:
475 if hardlink is None:
476 hardlink = (os.stat(src).st_dev ==
476 hardlink = (os.stat(src).st_dev ==
477 os.stat(os.path.dirname(dst)).st_dev)
477 os.stat(os.path.dirname(dst)).st_dev)
478
478
479 if os.path.isdir(src):
479 if os.path.isdir(src):
480 os.mkdir(dst)
480 os.mkdir(dst)
481 for name, kind in osutil.listdir(src):
481 for name, kind in osutil.listdir(src):
482 srcname = os.path.join(src, name)
482 srcname = os.path.join(src, name)
483 dstname = os.path.join(dst, name)
483 dstname = os.path.join(dst, name)
484 copyfiles(srcname, dstname, hardlink)
484 copyfiles(srcname, dstname, hardlink)
485 else:
485 else:
486 if hardlink:
486 if hardlink:
487 try:
487 try:
488 os_link(src, dst)
488 os_link(src, dst)
489 except (IOError, OSError):
489 except (IOError, OSError):
490 hardlink = False
490 hardlink = False
491 shutil.copy(src, dst)
491 shutil.copy(src, dst)
492 else:
492 else:
493 shutil.copy(src, dst)
493 shutil.copy(src, dst)
494
494
495 class path_auditor(object):
495 class path_auditor(object):
496 '''ensure that a filesystem path contains no banned components.
496 '''ensure that a filesystem path contains no banned components.
497 the following properties of a path are checked:
497 the following properties of a path are checked:
498
498
499 - under top-level .hg
499 - under top-level .hg
500 - starts at the root of a windows drive
500 - starts at the root of a windows drive
501 - contains ".."
501 - contains ".."
502 - traverses a symlink (e.g. a/symlink_here/b)
502 - traverses a symlink (e.g. a/symlink_here/b)
503 - inside a nested repository'''
503 - inside a nested repository'''
504
504
505 def __init__(self, root):
505 def __init__(self, root):
506 self.audited = set()
506 self.audited = set()
507 self.auditeddir = set()
507 self.auditeddir = set()
508 self.root = root
508 self.root = root
509
509
510 def __call__(self, path):
510 def __call__(self, path):
511 if path in self.audited:
511 if path in self.audited:
512 return
512 return
513 normpath = os.path.normcase(path)
513 normpath = os.path.normcase(path)
514 parts = splitpath(normpath)
514 parts = splitpath(normpath)
515 if (os.path.splitdrive(path)[0]
515 if (os.path.splitdrive(path)[0]
516 or parts[0].lower() in ('.hg', '.hg.', '')
516 or parts[0].lower() in ('.hg', '.hg.', '')
517 or os.pardir in parts):
517 or os.pardir in parts):
518 raise Abort(_("path contains illegal component: %s") % path)
518 raise Abort(_("path contains illegal component: %s") % path)
519 if '.hg' in path.lower():
519 if '.hg' in path.lower():
520 lparts = [p.lower() for p in parts]
520 lparts = [p.lower() for p in parts]
521 for p in '.hg', '.hg.':
521 for p in '.hg', '.hg.':
522 if p in lparts[1:]:
522 if p in lparts[1:]:
523 pos = lparts.index(p)
523 pos = lparts.index(p)
524 base = os.path.join(*parts[:pos])
524 base = os.path.join(*parts[:pos])
525 raise Abort(_('path %r is inside repo %r') % (path, base))
525 raise Abort(_('path %r is inside repo %r') % (path, base))
526 def check(prefix):
526 def check(prefix):
527 curpath = os.path.join(self.root, prefix)
527 curpath = os.path.join(self.root, prefix)
528 try:
528 try:
529 st = os.lstat(curpath)
529 st = os.lstat(curpath)
530 except OSError, err:
530 except OSError, err:
531 # EINVAL can be raised as invalid path syntax under win32.
531 # EINVAL can be raised as invalid path syntax under win32.
532 # They must be ignored for patterns can be checked too.
532 # They must be ignored for patterns can be checked too.
533 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
533 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
534 raise
534 raise
535 else:
535 else:
536 if stat.S_ISLNK(st.st_mode):
536 if stat.S_ISLNK(st.st_mode):
537 raise Abort(_('path %r traverses symbolic link %r') %
537 raise Abort(_('path %r traverses symbolic link %r') %
538 (path, prefix))
538 (path, prefix))
539 elif (stat.S_ISDIR(st.st_mode) and
539 elif (stat.S_ISDIR(st.st_mode) and
540 os.path.isdir(os.path.join(curpath, '.hg'))):
540 os.path.isdir(os.path.join(curpath, '.hg'))):
541 raise Abort(_('path %r is inside repo %r') %
541 raise Abort(_('path %r is inside repo %r') %
542 (path, prefix))
542 (path, prefix))
543 parts.pop()
543 parts.pop()
544 prefixes = []
544 prefixes = []
545 while parts:
545 while parts:
546 prefix = os.sep.join(parts)
546 prefix = os.sep.join(parts)
547 if prefix in self.auditeddir:
547 if prefix in self.auditeddir:
548 break
548 break
549 check(prefix)
549 check(prefix)
550 prefixes.append(prefix)
550 prefixes.append(prefix)
551 parts.pop()
551 parts.pop()
552
552
553 self.audited.add(path)
553 self.audited.add(path)
554 # only add prefixes to the cache after checking everything: we don't
554 # only add prefixes to the cache after checking everything: we don't
555 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
555 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
556 self.auditeddir.update(prefixes)
556 self.auditeddir.update(prefixes)
557
557
558 def nlinks(pathname):
558 def nlinks(pathname):
559 """Return number of hardlinks for the given file."""
559 """Return number of hardlinks for the given file."""
560 return os.lstat(pathname).st_nlink
560 return os.lstat(pathname).st_nlink
561
561
562 if hasattr(os, 'link'):
562 if hasattr(os, 'link'):
563 os_link = os.link
563 os_link = os.link
564 else:
564 else:
565 def os_link(src, dst):
565 def os_link(src, dst):
566 raise OSError(0, _("Hardlinks not supported"))
566 raise OSError(0, _("Hardlinks not supported"))
567
567
568 def lookup_reg(key, name=None, scope=None):
568 def lookup_reg(key, name=None, scope=None):
569 return None
569 return None
570
570
571 if os.name == 'nt':
571 if os.name == 'nt':
572 from windows import *
572 from windows import *
573 else:
573 else:
574 from posix import *
574 from posix import *
575
575
576 def makelock(info, pathname):
576 def makelock(info, pathname):
577 try:
577 try:
578 return os.symlink(info, pathname)
578 return os.symlink(info, pathname)
579 except OSError, why:
579 except OSError, why:
580 if why.errno == errno.EEXIST:
580 if why.errno == errno.EEXIST:
581 raise
581 raise
582 except AttributeError: # no symlink in os
582 except AttributeError: # no symlink in os
583 pass
583 pass
584
584
585 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
585 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
586 os.write(ld, info)
586 os.write(ld, info)
587 os.close(ld)
587 os.close(ld)
588
588
589 def readlock(pathname):
589 def readlock(pathname):
590 try:
590 try:
591 return os.readlink(pathname)
591 return os.readlink(pathname)
592 except OSError, why:
592 except OSError, why:
593 if why.errno not in (errno.EINVAL, errno.ENOSYS):
593 if why.errno not in (errno.EINVAL, errno.ENOSYS):
594 raise
594 raise
595 except AttributeError: # no symlink in os
595 except AttributeError: # no symlink in os
596 pass
596 pass
597 return posixfile(pathname).read()
597 return posixfile(pathname).read()
598
598
599 def fstat(fp):
599 def fstat(fp):
600 '''stat file object that may not have fileno method.'''
600 '''stat file object that may not have fileno method.'''
601 try:
601 try:
602 return os.fstat(fp.fileno())
602 return os.fstat(fp.fileno())
603 except AttributeError:
603 except AttributeError:
604 return os.stat(fp.name)
604 return os.stat(fp.name)
605
605
606 # File system features
606 # File system features
607
607
608 def checkcase(path):
608 def checkcase(path):
609 """
609 """
610 Check whether the given path is on a case-sensitive filesystem
610 Check whether the given path is on a case-sensitive filesystem
611
611
612 Requires a path (like /foo/.hg) ending with a foldable final
612 Requires a path (like /foo/.hg) ending with a foldable final
613 directory component.
613 directory component.
614 """
614 """
615 s1 = os.stat(path)
615 s1 = os.stat(path)
616 d, b = os.path.split(path)
616 d, b = os.path.split(path)
617 p2 = os.path.join(d, b.upper())
617 p2 = os.path.join(d, b.upper())
618 if path == p2:
618 if path == p2:
619 p2 = os.path.join(d, b.lower())
619 p2 = os.path.join(d, b.lower())
620 try:
620 try:
621 s2 = os.stat(p2)
621 s2 = os.stat(p2)
622 if s2 == s1:
622 if s2 == s1:
623 return False
623 return False
624 return True
624 return True
625 except:
625 except:
626 return True
626 return True
627
627
628 _fspathcache = {}
628 _fspathcache = {}
629 def fspath(name, root):
629 def fspath(name, root):
630 '''Get name in the case stored in the filesystem
630 '''Get name in the case stored in the filesystem
631
631
632 The name is either relative to root, or it is an absolute path starting
632 The name is either relative to root, or it is an absolute path starting
633 with root. Note that this function is unnecessary, and should not be
633 with root. Note that this function is unnecessary, and should not be
634 called, for case-sensitive filesystems (simply because it's expensive).
634 called, for case-sensitive filesystems (simply because it's expensive).
635 '''
635 '''
636 # If name is absolute, make it relative
636 # If name is absolute, make it relative
637 if name.lower().startswith(root.lower()):
637 if name.lower().startswith(root.lower()):
638 l = len(root)
638 l = len(root)
639 if name[l] == os.sep or name[l] == os.altsep:
639 if name[l] == os.sep or name[l] == os.altsep:
640 l = l + 1
640 l = l + 1
641 name = name[l:]
641 name = name[l:]
642
642
643 if not os.path.exists(os.path.join(root, name)):
643 if not os.path.exists(os.path.join(root, name)):
644 return None
644 return None
645
645
646 seps = os.sep
646 seps = os.sep
647 if os.altsep:
647 if os.altsep:
648 seps = seps + os.altsep
648 seps = seps + os.altsep
649 # Protect backslashes. This gets silly very quickly.
649 # Protect backslashes. This gets silly very quickly.
650 seps.replace('\\','\\\\')
650 seps.replace('\\','\\\\')
651 pattern = re.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
651 pattern = re.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
652 dir = os.path.normcase(os.path.normpath(root))
652 dir = os.path.normcase(os.path.normpath(root))
653 result = []
653 result = []
654 for part, sep in pattern.findall(name):
654 for part, sep in pattern.findall(name):
655 if sep:
655 if sep:
656 result.append(sep)
656 result.append(sep)
657 continue
657 continue
658
658
659 if dir not in _fspathcache:
659 if dir not in _fspathcache:
660 _fspathcache[dir] = os.listdir(dir)
660 _fspathcache[dir] = os.listdir(dir)
661 contents = _fspathcache[dir]
661 contents = _fspathcache[dir]
662
662
663 lpart = part.lower()
663 lpart = part.lower()
664 lenp = len(part)
664 lenp = len(part)
665 for n in contents:
665 for n in contents:
666 if lenp == len(n) and n.lower() == lpart:
666 if lenp == len(n) and n.lower() == lpart:
667 result.append(n)
667 result.append(n)
668 break
668 break
669 else:
669 else:
670 # Cannot happen, as the file exists!
670 # Cannot happen, as the file exists!
671 result.append(part)
671 result.append(part)
672 dir = os.path.join(dir, lpart)
672 dir = os.path.join(dir, lpart)
673
673
674 return ''.join(result)
674 return ''.join(result)
675
675
676 def checkexec(path):
676 def checkexec(path):
677 """
677 """
678 Check whether the given path is on a filesystem with UNIX-like exec flags
678 Check whether the given path is on a filesystem with UNIX-like exec flags
679
679
680 Requires a directory (like /foo/.hg)
680 Requires a directory (like /foo/.hg)
681 """
681 """
682
682
683 # VFAT on some Linux versions can flip mode but it doesn't persist
683 # VFAT on some Linux versions can flip mode but it doesn't persist
684 # a FS remount. Frequently we can detect it if files are created
684 # a FS remount. Frequently we can detect it if files are created
685 # with exec bit on.
685 # with exec bit on.
686
686
687 try:
687 try:
688 EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
688 EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
689 fh, fn = tempfile.mkstemp("", "", path)
689 fh, fn = tempfile.mkstemp("", "", path)
690 try:
690 try:
691 os.close(fh)
691 os.close(fh)
692 m = os.stat(fn).st_mode & 0777
692 m = os.stat(fn).st_mode & 0777
693 new_file_has_exec = m & EXECFLAGS
693 new_file_has_exec = m & EXECFLAGS
694 os.chmod(fn, m ^ EXECFLAGS)
694 os.chmod(fn, m ^ EXECFLAGS)
695 exec_flags_cannot_flip = ((os.stat(fn).st_mode & 0777) == m)
695 exec_flags_cannot_flip = ((os.stat(fn).st_mode & 0777) == m)
696 finally:
696 finally:
697 os.unlink(fn)
697 os.unlink(fn)
698 except (IOError, OSError):
698 except (IOError, OSError):
699 # we don't care, the user probably won't be able to commit anyway
699 # we don't care, the user probably won't be able to commit anyway
700 return False
700 return False
701 return not (new_file_has_exec or exec_flags_cannot_flip)
701 return not (new_file_has_exec or exec_flags_cannot_flip)
702
702
703 def checklink(path):
703 def checklink(path):
704 """check whether the given path is on a symlink-capable filesystem"""
704 """check whether the given path is on a symlink-capable filesystem"""
705 # mktemp is not racy because symlink creation will fail if the
705 # mktemp is not racy because symlink creation will fail if the
706 # file already exists
706 # file already exists
707 name = tempfile.mktemp(dir=path)
707 name = tempfile.mktemp(dir=path)
708 try:
708 try:
709 os.symlink(".", name)
709 os.symlink(".", name)
710 os.unlink(name)
710 os.unlink(name)
711 return True
711 return True
712 except (OSError, AttributeError):
712 except (OSError, AttributeError):
713 return False
713 return False
714
714
715 def needbinarypatch():
715 def needbinarypatch():
716 """return True if patches should be applied in binary mode by default."""
716 """return True if patches should be applied in binary mode by default."""
717 return os.name == 'nt'
717 return os.name == 'nt'
718
718
719 def endswithsep(path):
719 def endswithsep(path):
720 '''Check path ends with os.sep or os.altsep.'''
720 '''Check path ends with os.sep or os.altsep.'''
721 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
721 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
722
722
723 def splitpath(path):
723 def splitpath(path):
724 '''Split path by os.sep.
724 '''Split path by os.sep.
725 Note that this function does not use os.altsep because this is
725 Note that this function does not use os.altsep because this is
726 an alternative of simple "xxx.split(os.sep)".
726 an alternative of simple "xxx.split(os.sep)".
727 It is recommended to use os.path.normpath() before using this
727 It is recommended to use os.path.normpath() before using this
728 function if need.'''
728 function if need.'''
729 return path.split(os.sep)
729 return path.split(os.sep)
730
730
731 def gui():
731 def gui():
732 '''Are we running in a GUI?'''
732 '''Are we running in a GUI?'''
733 return os.name == "nt" or os.name == "mac" or os.environ.get("DISPLAY")
733 return os.name == "nt" or os.name == "mac" or os.environ.get("DISPLAY")
734
734
735 def mktempcopy(name, emptyok=False, createmode=None):
735 def mktempcopy(name, emptyok=False, createmode=None):
736 """Create a temporary file with the same contents from name
736 """Create a temporary file with the same contents from name
737
737
738 The permission bits are copied from the original file.
738 The permission bits are copied from the original file.
739
739
740 If the temporary file is going to be truncated immediately, you
740 If the temporary file is going to be truncated immediately, you
741 can use emptyok=True as an optimization.
741 can use emptyok=True as an optimization.
742
742
743 Returns the name of the temporary file.
743 Returns the name of the temporary file.
744 """
744 """
745 d, fn = os.path.split(name)
745 d, fn = os.path.split(name)
746 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
746 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
747 os.close(fd)
747 os.close(fd)
748 # Temporary files are created with mode 0600, which is usually not
748 # Temporary files are created with mode 0600, which is usually not
749 # what we want. If the original file already exists, just copy
749 # what we want. If the original file already exists, just copy
750 # its mode. Otherwise, manually obey umask.
750 # its mode. Otherwise, manually obey umask.
751 try:
751 try:
752 st_mode = os.lstat(name).st_mode & 0777
752 st_mode = os.lstat(name).st_mode & 0777
753 except OSError, inst:
753 except OSError, inst:
754 if inst.errno != errno.ENOENT:
754 if inst.errno != errno.ENOENT:
755 raise
755 raise
756 st_mode = createmode
756 st_mode = createmode
757 if st_mode is None:
757 if st_mode is None:
758 st_mode = ~umask
758 st_mode = ~umask
759 st_mode &= 0666
759 st_mode &= 0666
760 os.chmod(temp, st_mode)
760 os.chmod(temp, st_mode)
761 if emptyok:
761 if emptyok:
762 return temp
762 return temp
763 try:
763 try:
764 try:
764 try:
765 ifp = posixfile(name, "rb")
765 ifp = posixfile(name, "rb")
766 except IOError, inst:
766 except IOError, inst:
767 if inst.errno == errno.ENOENT:
767 if inst.errno == errno.ENOENT:
768 return temp
768 return temp
769 if not getattr(inst, 'filename', None):
769 if not getattr(inst, 'filename', None):
770 inst.filename = name
770 inst.filename = name
771 raise
771 raise
772 ofp = posixfile(temp, "wb")
772 ofp = posixfile(temp, "wb")
773 for chunk in filechunkiter(ifp):
773 for chunk in filechunkiter(ifp):
774 ofp.write(chunk)
774 ofp.write(chunk)
775 ifp.close()
775 ifp.close()
776 ofp.close()
776 ofp.close()
777 except:
777 except:
778 try: os.unlink(temp)
778 try: os.unlink(temp)
779 except: pass
779 except: pass
780 raise
780 raise
781 return temp
781 return temp
782
782
783 class atomictempfile(object):
783 class atomictempfile(object):
784 """file-like object that atomically updates a file
784 """file-like object that atomically updates a file
785
785
786 All writes will be redirected to a temporary copy of the original
786 All writes will be redirected to a temporary copy of the original
787 file. When rename is called, the copy is renamed to the original
787 file. When rename is called, the copy is renamed to the original
788 name, making the changes visible.
788 name, making the changes visible.
789 """
789 """
790 def __init__(self, name, mode, createmode):
790 def __init__(self, name, mode, createmode):
791 self.__name = name
791 self.__name = name
792 self._fp = None
792 self._fp = None
793 self.temp = mktempcopy(name, emptyok=('w' in mode),
793 self.temp = mktempcopy(name, emptyok=('w' in mode),
794 createmode=createmode)
794 createmode=createmode)
795 self._fp = posixfile(self.temp, mode)
795 self._fp = posixfile(self.temp, mode)
796
796
797 def __getattr__(self, name):
797 def __getattr__(self, name):
798 return getattr(self._fp, name)
798 return getattr(self._fp, name)
799
799
800 def rename(self):
800 def rename(self):
801 if not self._fp.closed:
801 if not self._fp.closed:
802 self._fp.close()
802 self._fp.close()
803 rename(self.temp, localpath(self.__name))
803 rename(self.temp, localpath(self.__name))
804
804
805 def __del__(self):
805 def __del__(self):
806 if not self._fp:
806 if not self._fp:
807 return
807 return
808 if not self._fp.closed:
808 if not self._fp.closed:
809 try:
809 try:
810 os.unlink(self.temp)
810 os.unlink(self.temp)
811 except: pass
811 except: pass
812 self._fp.close()
812 self._fp.close()
813
813
814 def makedirs(name, mode=None):
814 def makedirs(name, mode=None):
815 """recursive directory creation with parent mode inheritance"""
815 """recursive directory creation with parent mode inheritance"""
816 try:
816 try:
817 os.mkdir(name)
817 os.mkdir(name)
818 if mode is not None:
818 if mode is not None:
819 os.chmod(name, mode)
819 os.chmod(name, mode)
820 return
820 return
821 except OSError, err:
821 except OSError, err:
822 if err.errno == errno.EEXIST:
822 if err.errno == errno.EEXIST:
823 return
823 return
824 if err.errno != errno.ENOENT:
824 if err.errno != errno.ENOENT:
825 raise
825 raise
826 parent = os.path.abspath(os.path.dirname(name))
826 parent = os.path.abspath(os.path.dirname(name))
827 makedirs(parent, mode)
827 makedirs(parent, mode)
828 makedirs(name, mode)
828 makedirs(name, mode)
829
829
830 class opener(object):
830 class opener(object):
831 """Open files relative to a base directory
831 """Open files relative to a base directory
832
832
833 This class is used to hide the details of COW semantics and
833 This class is used to hide the details of COW semantics and
834 remote file access from higher level code.
834 remote file access from higher level code.
835 """
835 """
836 def __init__(self, base, audit=True):
836 def __init__(self, base, audit=True):
837 self.base = base
837 self.base = base
838 if audit:
838 if audit:
839 self.audit_path = path_auditor(base)
839 self.audit_path = path_auditor(base)
840 else:
840 else:
841 self.audit_path = always
841 self.audit_path = always
842 self.createmode = None
842 self.createmode = None
843
843
844 @propertycache
844 @propertycache
845 def _can_symlink(self):
845 def _can_symlink(self):
846 return checklink(self.base)
846 return checklink(self.base)
847
847
848 def _fixfilemode(self, name):
848 def _fixfilemode(self, name):
849 if self.createmode is None:
849 if self.createmode is None:
850 return
850 return
851 os.chmod(name, self.createmode & 0666)
851 os.chmod(name, self.createmode & 0666)
852
852
853 def __call__(self, path, mode="r", text=False, atomictemp=False):
853 def __call__(self, path, mode="r", text=False, atomictemp=False):
854 self.audit_path(path)
854 self.audit_path(path)
855 f = os.path.join(self.base, path)
855 f = os.path.join(self.base, path)
856
856
857 if not text and "b" not in mode:
857 if not text and "b" not in mode:
858 mode += "b" # for that other OS
858 mode += "b" # for that other OS
859
859
860 nlink = -1
860 nlink = -1
861 if mode not in ("r", "rb"):
861 if mode not in ("r", "rb"):
862 try:
862 try:
863 nlink = nlinks(f)
863 nlink = nlinks(f)
864 except OSError:
864 except OSError:
865 nlink = 0
865 nlink = 0
866 d = os.path.dirname(f)
866 d = os.path.dirname(f)
867 if not os.path.isdir(d):
867 if not os.path.isdir(d):
868 makedirs(d, self.createmode)
868 makedirs(d, self.createmode)
869 if atomictemp:
869 if atomictemp:
870 return atomictempfile(f, mode, self.createmode)
870 return atomictempfile(f, mode, self.createmode)
871 if nlink > 1:
871 if nlink > 1:
872 rename(mktempcopy(f), f)
872 rename(mktempcopy(f), f)
873 fp = posixfile(f, mode)
873 fp = posixfile(f, mode)
874 if nlink == 0:
874 if nlink == 0:
875 self._fixfilemode(f)
875 self._fixfilemode(f)
876 return fp
876 return fp
877
877
878 def symlink(self, src, dst):
878 def symlink(self, src, dst):
879 self.audit_path(dst)
879 self.audit_path(dst)
880 linkname = os.path.join(self.base, dst)
880 linkname = os.path.join(self.base, dst)
881 try:
881 try:
882 os.unlink(linkname)
882 os.unlink(linkname)
883 except OSError:
883 except OSError:
884 pass
884 pass
885
885
886 dirname = os.path.dirname(linkname)
886 dirname = os.path.dirname(linkname)
887 if not os.path.exists(dirname):
887 if not os.path.exists(dirname):
888 makedirs(dirname, self.createmode)
888 makedirs(dirname, self.createmode)
889
889
890 if self._can_symlink:
890 if self._can_symlink:
891 try:
891 try:
892 os.symlink(src, linkname)
892 os.symlink(src, linkname)
893 except OSError, err:
893 except OSError, err:
894 raise OSError(err.errno, _('could not symlink to %r: %s') %
894 raise OSError(err.errno, _('could not symlink to %r: %s') %
895 (src, err.strerror), linkname)
895 (src, err.strerror), linkname)
896 else:
896 else:
897 f = self(dst, "w")
897 f = self(dst, "w")
898 f.write(src)
898 f.write(src)
899 f.close()
899 f.close()
900 self._fixfilemode(dst)
900 self._fixfilemode(dst)
901
901
902 class chunkbuffer(object):
902 class chunkbuffer(object):
903 """Allow arbitrary sized chunks of data to be efficiently read from an
903 """Allow arbitrary sized chunks of data to be efficiently read from an
904 iterator over chunks of arbitrary size."""
904 iterator over chunks of arbitrary size."""
905
905
906 def __init__(self, in_iter):
906 def __init__(self, in_iter):
907 """in_iter is the iterator that's iterating over the input chunks.
907 """in_iter is the iterator that's iterating over the input chunks.
908 targetsize is how big a buffer to try to maintain."""
908 targetsize is how big a buffer to try to maintain."""
909 self.iter = iter(in_iter)
909 self.iter = iter(in_iter)
910 self.buf = ''
910 self.buf = ''
911 self.targetsize = 2**16
911 self.targetsize = 2**16
912
912
913 def read(self, l):
913 def read(self, l):
914 """Read L bytes of data from the iterator of chunks of data.
914 """Read L bytes of data from the iterator of chunks of data.
915 Returns less than L bytes if the iterator runs dry."""
915 Returns less than L bytes if the iterator runs dry."""
916 if l > len(self.buf) and self.iter:
916 if l > len(self.buf) and self.iter:
917 # Clamp to a multiple of self.targetsize
917 # Clamp to a multiple of self.targetsize
918 targetsize = max(l, self.targetsize)
918 targetsize = max(l, self.targetsize)
919 collector = cStringIO.StringIO()
919 collector = cStringIO.StringIO()
920 collector.write(self.buf)
920 collector.write(self.buf)
921 collected = len(self.buf)
921 collected = len(self.buf)
922 for chunk in self.iter:
922 for chunk in self.iter:
923 collector.write(chunk)
923 collector.write(chunk)
924 collected += len(chunk)
924 collected += len(chunk)
925 if collected >= targetsize:
925 if collected >= targetsize:
926 break
926 break
927 if collected < targetsize:
927 if collected < targetsize:
928 self.iter = False
928 self.iter = False
929 self.buf = collector.getvalue()
929 self.buf = collector.getvalue()
930 if len(self.buf) == l:
930 if len(self.buf) == l:
931 s, self.buf = str(self.buf), ''
931 s, self.buf = str(self.buf), ''
932 else:
932 else:
933 s, self.buf = self.buf[:l], buffer(self.buf, l)
933 s, self.buf = self.buf[:l], buffer(self.buf, l)
934 return s
934 return s
935
935
936 def filechunkiter(f, size=65536, limit=None):
936 def filechunkiter(f, size=65536, limit=None):
937 """Create a generator that produces the data in the file size
937 """Create a generator that produces the data in the file size
938 (default 65536) bytes at a time, up to optional limit (default is
938 (default 65536) bytes at a time, up to optional limit (default is
939 to read all data). Chunks may be less than size bytes if the
939 to read all data). Chunks may be less than size bytes if the
940 chunk is the last chunk in the file, or the file is a socket or
940 chunk is the last chunk in the file, or the file is a socket or
941 some other type of file that sometimes reads less data than is
941 some other type of file that sometimes reads less data than is
942 requested."""
942 requested."""
943 assert size >= 0
943 assert size >= 0
944 assert limit is None or limit >= 0
944 assert limit is None or limit >= 0
945 while True:
945 while True:
946 if limit is None: nbytes = size
946 if limit is None: nbytes = size
947 else: nbytes = min(limit, size)
947 else: nbytes = min(limit, size)
948 s = nbytes and f.read(nbytes)
948 s = nbytes and f.read(nbytes)
949 if not s: break
949 if not s: break
950 if limit: limit -= len(s)
950 if limit: limit -= len(s)
951 yield s
951 yield s
952
952
953 def makedate():
953 def makedate():
954 lt = time.localtime()
954 lt = time.localtime()
955 if lt[8] == 1 and time.daylight:
955 if lt[8] == 1 and time.daylight:
956 tz = time.altzone
956 tz = time.altzone
957 else:
957 else:
958 tz = time.timezone
958 tz = time.timezone
959 return time.mktime(lt), tz
959 return time.mktime(lt), tz
960
960
961 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
961 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
962 """represent a (unixtime, offset) tuple as a localized time.
962 """represent a (unixtime, offset) tuple as a localized time.
963 unixtime is seconds since the epoch, and offset is the time zone's
963 unixtime is seconds since the epoch, and offset is the time zone's
964 number of seconds away from UTC. if timezone is false, do not
964 number of seconds away from UTC. if timezone is false, do not
965 append time zone to string."""
965 append time zone to string."""
966 t, tz = date or makedate()
966 t, tz = date or makedate()
967 if "%1" in format or "%2" in format:
967 if "%1" in format or "%2" in format:
968 sign = (tz > 0) and "-" or "+"
968 sign = (tz > 0) and "-" or "+"
969 minutes = abs(tz) // 60
969 minutes = abs(tz) // 60
970 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
970 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
971 format = format.replace("%2", "%02d" % (minutes % 60))
971 format = format.replace("%2", "%02d" % (minutes % 60))
972 s = time.strftime(format, time.gmtime(float(t) - tz))
972 s = time.strftime(format, time.gmtime(float(t) - tz))
973 return s
973 return s
974
974
975 def shortdate(date=None):
975 def shortdate(date=None):
976 """turn (timestamp, tzoff) tuple into iso 8631 date."""
976 """turn (timestamp, tzoff) tuple into iso 8631 date."""
977 return datestr(date, format='%Y-%m-%d')
977 return datestr(date, format='%Y-%m-%d')
978
978
979 def strdate(string, format, defaults=[]):
979 def strdate(string, format, defaults=[]):
980 """parse a localized time string and return a (unixtime, offset) tuple.
980 """parse a localized time string and return a (unixtime, offset) tuple.
981 if the string cannot be parsed, ValueError is raised."""
981 if the string cannot be parsed, ValueError is raised."""
982 def timezone(string):
982 def timezone(string):
983 tz = string.split()[-1]
983 tz = string.split()[-1]
984 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
984 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
985 sign = (tz[0] == "+") and 1 or -1
985 sign = (tz[0] == "+") and 1 or -1
986 hours = int(tz[1:3])
986 hours = int(tz[1:3])
987 minutes = int(tz[3:5])
987 minutes = int(tz[3:5])
988 return -sign * (hours * 60 + minutes) * 60
988 return -sign * (hours * 60 + minutes) * 60
989 if tz == "GMT" or tz == "UTC":
989 if tz == "GMT" or tz == "UTC":
990 return 0
990 return 0
991 return None
991 return None
992
992
993 # NOTE: unixtime = localunixtime + offset
993 # NOTE: unixtime = localunixtime + offset
994 offset, date = timezone(string), string
994 offset, date = timezone(string), string
995 if offset != None:
995 if offset != None:
996 date = " ".join(string.split()[:-1])
996 date = " ".join(string.split()[:-1])
997
997
998 # add missing elements from defaults
998 # add missing elements from defaults
999 for part in defaults:
999 for part in defaults:
1000 found = [True for p in part if ("%"+p) in format]
1000 found = [True for p in part if ("%"+p) in format]
1001 if not found:
1001 if not found:
1002 date += "@" + defaults[part]
1002 date += "@" + defaults[part]
1003 format += "@%" + part[0]
1003 format += "@%" + part[0]
1004
1004
1005 timetuple = time.strptime(date, format)
1005 timetuple = time.strptime(date, format)
1006 localunixtime = int(calendar.timegm(timetuple))
1006 localunixtime = int(calendar.timegm(timetuple))
1007 if offset is None:
1007 if offset is None:
1008 # local timezone
1008 # local timezone
1009 unixtime = int(time.mktime(timetuple))
1009 unixtime = int(time.mktime(timetuple))
1010 offset = unixtime - localunixtime
1010 offset = unixtime - localunixtime
1011 else:
1011 else:
1012 unixtime = localunixtime + offset
1012 unixtime = localunixtime + offset
1013 return unixtime, offset
1013 return unixtime, offset
1014
1014
1015 def parsedate(date, formats=None, defaults=None):
1015 def parsedate(date, formats=None, defaults=None):
1016 """parse a localized date/time string and return a (unixtime, offset) tuple.
1016 """parse a localized date/time string and return a (unixtime, offset) tuple.
1017
1017
1018 The date may be a "unixtime offset" string or in one of the specified
1018 The date may be a "unixtime offset" string or in one of the specified
1019 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1019 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1020 """
1020 """
1021 if not date:
1021 if not date:
1022 return 0, 0
1022 return 0, 0
1023 if isinstance(date, tuple) and len(date) == 2:
1023 if isinstance(date, tuple) and len(date) == 2:
1024 return date
1024 return date
1025 if not formats:
1025 if not formats:
1026 formats = defaultdateformats
1026 formats = defaultdateformats
1027 date = date.strip()
1027 date = date.strip()
1028 try:
1028 try:
1029 when, offset = map(int, date.split(' '))
1029 when, offset = map(int, date.split(' '))
1030 except ValueError:
1030 except ValueError:
1031 # fill out defaults
1031 # fill out defaults
1032 if not defaults:
1032 if not defaults:
1033 defaults = {}
1033 defaults = {}
1034 now = makedate()
1034 now = makedate()
1035 for part in "d mb yY HI M S".split():
1035 for part in "d mb yY HI M S".split():
1036 if part not in defaults:
1036 if part not in defaults:
1037 if part[0] in "HMS":
1037 if part[0] in "HMS":
1038 defaults[part] = "00"
1038 defaults[part] = "00"
1039 else:
1039 else:
1040 defaults[part] = datestr(now, "%" + part[0])
1040 defaults[part] = datestr(now, "%" + part[0])
1041
1041
1042 for format in formats:
1042 for format in formats:
1043 try:
1043 try:
1044 when, offset = strdate(date, format, defaults)
1044 when, offset = strdate(date, format, defaults)
1045 except (ValueError, OverflowError):
1045 except (ValueError, OverflowError):
1046 pass
1046 pass
1047 else:
1047 else:
1048 break
1048 break
1049 else:
1049 else:
1050 raise Abort(_('invalid date: %r ') % date)
1050 raise Abort(_('invalid date: %r ') % date)
1051 # validate explicit (probably user-specified) date and
1051 # validate explicit (probably user-specified) date and
1052 # time zone offset. values must fit in signed 32 bits for
1052 # time zone offset. values must fit in signed 32 bits for
1053 # current 32-bit linux runtimes. timezones go from UTC-12
1053 # current 32-bit linux runtimes. timezones go from UTC-12
1054 # to UTC+14
1054 # to UTC+14
1055 if abs(when) > 0x7fffffff:
1055 if abs(when) > 0x7fffffff:
1056 raise Abort(_('date exceeds 32 bits: %d') % when)
1056 raise Abort(_('date exceeds 32 bits: %d') % when)
1057 if offset < -50400 or offset > 43200:
1057 if offset < -50400 or offset > 43200:
1058 raise Abort(_('impossible time zone offset: %d') % offset)
1058 raise Abort(_('impossible time zone offset: %d') % offset)
1059 return when, offset
1059 return when, offset
1060
1060
1061 def matchdate(date):
1061 def matchdate(date):
1062 """Return a function that matches a given date match specifier
1062 """Return a function that matches a given date match specifier
1063
1063
1064 Formats include:
1064 Formats include:
1065
1065
1066 '{date}' match a given date to the accuracy provided
1066 '{date}' match a given date to the accuracy provided
1067
1067
1068 '<{date}' on or before a given date
1068 '<{date}' on or before a given date
1069
1069
1070 '>{date}' on or after a given date
1070 '>{date}' on or after a given date
1071
1071
1072 """
1072 """
1073
1073
1074 def lower(date):
1074 def lower(date):
1075 d = dict(mb="1", d="1")
1075 d = dict(mb="1", d="1")
1076 return parsedate(date, extendeddateformats, d)[0]
1076 return parsedate(date, extendeddateformats, d)[0]
1077
1077
1078 def upper(date):
1078 def upper(date):
1079 d = dict(mb="12", HI="23", M="59", S="59")
1079 d = dict(mb="12", HI="23", M="59", S="59")
1080 for days in "31 30 29".split():
1080 for days in "31 30 29".split():
1081 try:
1081 try:
1082 d["d"] = days
1082 d["d"] = days
1083 return parsedate(date, extendeddateformats, d)[0]
1083 return parsedate(date, extendeddateformats, d)[0]
1084 except:
1084 except:
1085 pass
1085 pass
1086 d["d"] = "28"
1086 d["d"] = "28"
1087 return parsedate(date, extendeddateformats, d)[0]
1087 return parsedate(date, extendeddateformats, d)[0]
1088
1088
1089 date = date.strip()
1089 date = date.strip()
1090 if date[0] == "<":
1090 if date[0] == "<":
1091 when = upper(date[1:])
1091 when = upper(date[1:])
1092 return lambda x: x <= when
1092 return lambda x: x <= when
1093 elif date[0] == ">":
1093 elif date[0] == ">":
1094 when = lower(date[1:])
1094 when = lower(date[1:])
1095 return lambda x: x >= when
1095 return lambda x: x >= when
1096 elif date[0] == "-":
1096 elif date[0] == "-":
1097 try:
1097 try:
1098 days = int(date[1:])
1098 days = int(date[1:])
1099 except ValueError:
1099 except ValueError:
1100 raise Abort(_("invalid day spec: %s") % date[1:])
1100 raise Abort(_("invalid day spec: %s") % date[1:])
1101 when = makedate()[0] - days * 3600 * 24
1101 when = makedate()[0] - days * 3600 * 24
1102 return lambda x: x >= when
1102 return lambda x: x >= when
1103 elif " to " in date:
1103 elif " to " in date:
1104 a, b = date.split(" to ")
1104 a, b = date.split(" to ")
1105 start, stop = lower(a), upper(b)
1105 start, stop = lower(a), upper(b)
1106 return lambda x: x >= start and x <= stop
1106 return lambda x: x >= start and x <= stop
1107 else:
1107 else:
1108 start, stop = lower(date), upper(date)
1108 start, stop = lower(date), upper(date)
1109 return lambda x: x >= start and x <= stop
1109 return lambda x: x >= start and x <= stop
1110
1110
1111 def shortuser(user):
1111 def shortuser(user):
1112 """Return a short representation of a user name or email address."""
1112 """Return a short representation of a user name or email address."""
1113 f = user.find('@')
1113 f = user.find('@')
1114 if f >= 0:
1114 if f >= 0:
1115 user = user[:f]
1115 user = user[:f]
1116 f = user.find('<')
1116 f = user.find('<')
1117 if f >= 0:
1117 if f >= 0:
1118 user = user[f+1:]
1118 user = user[f+1:]
1119 f = user.find(' ')
1119 f = user.find(' ')
1120 if f >= 0:
1120 if f >= 0:
1121 user = user[:f]
1121 user = user[:f]
1122 f = user.find('.')
1122 f = user.find('.')
1123 if f >= 0:
1123 if f >= 0:
1124 user = user[:f]
1124 user = user[:f]
1125 return user
1125 return user
1126
1126
1127 def email(author):
1127 def email(author):
1128 '''get email of author.'''
1128 '''get email of author.'''
1129 r = author.find('>')
1129 r = author.find('>')
1130 if r == -1: r = None
1130 if r == -1: r = None
1131 return author[author.find('<')+1:r]
1131 return author[author.find('<')+1:r]
1132
1132
1133 def ellipsis(text, maxlength=400):
1133 def ellipsis(text, maxlength=400):
1134 """Trim string to at most maxlength (default: 400) characters."""
1134 """Trim string to at most maxlength (default: 400) characters."""
1135 if len(text) <= maxlength:
1135 if len(text) <= maxlength:
1136 return text
1136 return text
1137 else:
1137 else:
1138 return "%s..." % (text[:maxlength-3])
1138 return "%s..." % (text[:maxlength-3])
1139
1139
1140 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
1140 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
1141 '''yield every hg repository under path, recursively.'''
1141 '''yield every hg repository under path, recursively.'''
1142 def errhandler(err):
1142 def errhandler(err):
1143 if err.filename == path:
1143 if err.filename == path:
1144 raise err
1144 raise err
1145 if followsym and hasattr(os.path, 'samestat'):
1145 if followsym and hasattr(os.path, 'samestat'):
1146 def _add_dir_if_not_there(dirlst, dirname):
1146 def _add_dir_if_not_there(dirlst, dirname):
1147 match = False
1147 match = False
1148 samestat = os.path.samestat
1148 samestat = os.path.samestat
1149 dirstat = os.stat(dirname)
1149 dirstat = os.stat(dirname)
1150 for lstdirstat in dirlst:
1150 for lstdirstat in dirlst:
1151 if samestat(dirstat, lstdirstat):
1151 if samestat(dirstat, lstdirstat):
1152 match = True
1152 match = True
1153 break
1153 break
1154 if not match:
1154 if not match:
1155 dirlst.append(dirstat)
1155 dirlst.append(dirstat)
1156 return not match
1156 return not match
1157 else:
1157 else:
1158 followsym = False
1158 followsym = False
1159
1159
1160 if (seen_dirs is None) and followsym:
1160 if (seen_dirs is None) and followsym:
1161 seen_dirs = []
1161 seen_dirs = []
1162 _add_dir_if_not_there(seen_dirs, path)
1162 _add_dir_if_not_there(seen_dirs, path)
1163 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
1163 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
1164 if '.hg' in dirs:
1164 if '.hg' in dirs:
1165 yield root # found a repository
1165 yield root # found a repository
1166 qroot = os.path.join(root, '.hg', 'patches')
1166 qroot = os.path.join(root, '.hg', 'patches')
1167 if os.path.isdir(os.path.join(qroot, '.hg')):
1167 if os.path.isdir(os.path.join(qroot, '.hg')):
1168 yield qroot # we have a patch queue repo here
1168 yield qroot # we have a patch queue repo here
1169 if recurse:
1169 if recurse:
1170 # avoid recursing inside the .hg directory
1170 # avoid recursing inside the .hg directory
1171 dirs.remove('.hg')
1171 dirs.remove('.hg')
1172 else:
1172 else:
1173 dirs[:] = [] # don't descend further
1173 dirs[:] = [] # don't descend further
1174 elif followsym:
1174 elif followsym:
1175 newdirs = []
1175 newdirs = []
1176 for d in dirs:
1176 for d in dirs:
1177 fname = os.path.join(root, d)
1177 fname = os.path.join(root, d)
1178 if _add_dir_if_not_there(seen_dirs, fname):
1178 if _add_dir_if_not_there(seen_dirs, fname):
1179 if os.path.islink(fname):
1179 if os.path.islink(fname):
1180 for hgname in walkrepos(fname, True, seen_dirs):
1180 for hgname in walkrepos(fname, True, seen_dirs):
1181 yield hgname
1181 yield hgname
1182 else:
1182 else:
1183 newdirs.append(d)
1183 newdirs.append(d)
1184 dirs[:] = newdirs
1184 dirs[:] = newdirs
1185
1185
1186 _rcpath = None
1186 _rcpath = None
1187
1187
1188 def os_rcpath():
1188 def os_rcpath():
1189 '''return default os-specific hgrc search path'''
1189 '''return default os-specific hgrc search path'''
1190 path = system_rcpath()
1190 path = system_rcpath()
1191 path.extend(user_rcpath())
1191 path.extend(user_rcpath())
1192 path = [os.path.normpath(f) for f in path]
1192 path = [os.path.normpath(f) for f in path]
1193 return path
1193 return path
1194
1194
1195 def rcpath():
1195 def rcpath():
1196 '''return hgrc search path. if env var HGRCPATH is set, use it.
1196 '''return hgrc search path. if env var HGRCPATH is set, use it.
1197 for each item in path, if directory, use files ending in .rc,
1197 for each item in path, if directory, use files ending in .rc,
1198 else use item.
1198 else use item.
1199 make HGRCPATH empty to only look in .hg/hgrc of current repo.
1199 make HGRCPATH empty to only look in .hg/hgrc of current repo.
1200 if no HGRCPATH, use default os-specific path.'''
1200 if no HGRCPATH, use default os-specific path.'''
1201 global _rcpath
1201 global _rcpath
1202 if _rcpath is None:
1202 if _rcpath is None:
1203 if 'HGRCPATH' in os.environ:
1203 if 'HGRCPATH' in os.environ:
1204 _rcpath = []
1204 _rcpath = []
1205 for p in os.environ['HGRCPATH'].split(os.pathsep):
1205 for p in os.environ['HGRCPATH'].split(os.pathsep):
1206 if not p: continue
1206 if not p: continue
1207 if os.path.isdir(p):
1207 if os.path.isdir(p):
1208 for f, kind in osutil.listdir(p):
1208 for f, kind in osutil.listdir(p):
1209 if f.endswith('.rc'):
1209 if f.endswith('.rc'):
1210 _rcpath.append(os.path.join(p, f))
1210 _rcpath.append(os.path.join(p, f))
1211 else:
1211 else:
1212 _rcpath.append(p)
1212 _rcpath.append(p)
1213 else:
1213 else:
1214 _rcpath = os_rcpath()
1214 _rcpath = os_rcpath()
1215 return _rcpath
1215 return _rcpath
1216
1216
1217 def bytecount(nbytes):
1217 def bytecount(nbytes):
1218 '''return byte count formatted as readable string, with units'''
1218 '''return byte count formatted as readable string, with units'''
1219
1219
1220 units = (
1220 units = (
1221 (100, 1<<30, _('%.0f GB')),
1221 (100, 1<<30, _('%.0f GB')),
1222 (10, 1<<30, _('%.1f GB')),
1222 (10, 1<<30, _('%.1f GB')),
1223 (1, 1<<30, _('%.2f GB')),
1223 (1, 1<<30, _('%.2f GB')),
1224 (100, 1<<20, _('%.0f MB')),
1224 (100, 1<<20, _('%.0f MB')),
1225 (10, 1<<20, _('%.1f MB')),
1225 (10, 1<<20, _('%.1f MB')),
1226 (1, 1<<20, _('%.2f MB')),
1226 (1, 1<<20, _('%.2f MB')),
1227 (100, 1<<10, _('%.0f KB')),
1227 (100, 1<<10, _('%.0f KB')),
1228 (10, 1<<10, _('%.1f KB')),
1228 (10, 1<<10, _('%.1f KB')),
1229 (1, 1<<10, _('%.2f KB')),
1229 (1, 1<<10, _('%.2f KB')),
1230 (1, 1, _('%.0f bytes')),
1230 (1, 1, _('%.0f bytes')),
1231 )
1231 )
1232
1232
1233 for multiplier, divisor, format in units:
1233 for multiplier, divisor, format in units:
1234 if nbytes >= divisor * multiplier:
1234 if nbytes >= divisor * multiplier:
1235 return format % (nbytes / float(divisor))
1235 return format % (nbytes / float(divisor))
1236 return units[-1][2] % nbytes
1236 return units[-1][2] % nbytes
1237
1237
1238 def drop_scheme(scheme, path):
1238 def drop_scheme(scheme, path):
1239 sc = scheme + ':'
1239 sc = scheme + ':'
1240 if path.startswith(sc):
1240 if path.startswith(sc):
1241 path = path[len(sc):]
1241 path = path[len(sc):]
1242 if path.startswith('//'):
1242 if path.startswith('//'):
1243 path = path[2:]
1243 path = path[2:]
1244 return path
1244 return path
1245
1245
1246 def uirepr(s):
1246 def uirepr(s):
1247 # Avoid double backslash in Windows path repr()
1247 # Avoid double backslash in Windows path repr()
1248 return repr(s).replace('\\\\', '\\')
1248 return repr(s).replace('\\\\', '\\')
1249
1249
1250 def termwidth():
1250 def termwidth():
1251 if 'COLUMNS' in os.environ:
1251 if 'COLUMNS' in os.environ:
1252 try:
1252 try:
1253 return int(os.environ['COLUMNS'])
1253 return int(os.environ['COLUMNS'])
1254 except ValueError:
1254 except ValueError:
1255 pass
1255 pass
1256 try:
1256 try:
1257 import termios, array, fcntl
1257 import termios, array, fcntl
1258 for dev in (sys.stdout, sys.stdin):
1258 for dev in (sys.stdout, sys.stdin):
1259 try:
1259 try:
1260 try:
1260 try:
1261 fd = dev.fileno()
1261 fd = dev.fileno()
1262 except AttributeError:
1262 except AttributeError:
1263 continue
1263 continue
1264 if not os.isatty(fd):
1264 if not os.isatty(fd):
1265 continue
1265 continue
1266 arri = fcntl.ioctl(fd, termios.TIOCGWINSZ, '\0' * 8)
1266 arri = fcntl.ioctl(fd, termios.TIOCGWINSZ, '\0' * 8)
1267 return array.array('h', arri)[1]
1267 return array.array('h', arri)[1]
1268 except ValueError:
1268 except ValueError:
1269 pass
1269 pass
1270 except ImportError:
1270 except ImportError:
1271 pass
1271 pass
1272 return 80
1272 return 80
1273
1273
1274 def wrap(line, hangindent, width=None):
1274 def wrap(line, hangindent, width=None):
1275 if width is None:
1275 if width is None:
1276 width = termwidth() - 2
1276 width = termwidth() - 2
1277 if width <= hangindent:
1278 # adjust for weird terminal size
1279 width = max(78, hangindent + 1)
1277 padding = '\n' + ' ' * hangindent
1280 padding = '\n' + ' ' * hangindent
1278 return padding.join(textwrap.wrap(line, width=width - hangindent))
1281 return padding.join(textwrap.wrap(line, width=width - hangindent))
1279
1282
1280 def iterlines(iterator):
1283 def iterlines(iterator):
1281 for chunk in iterator:
1284 for chunk in iterator:
1282 for line in chunk.splitlines():
1285 for line in chunk.splitlines():
1283 yield line
1286 yield line
General Comments 0
You need to be logged in to leave comments. Login now