##// END OF EJS Templates
util, minirst: do not crash with COLUMNS=0
Martin Geisler -
r9417:4c3fb451 default
parent child Browse files
Show More
@@ -1,343 +1,345
1 1 # minirst.py - minimal reStructuredText parser
2 2 #
3 3 # Copyright 2009 Matt Mackall <mpm@selenic.com> and others
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2, incorporated herein by reference.
7 7
8 8 """simplified reStructuredText parser.
9 9
10 10 This parser knows just enough about reStructuredText to parse the
11 11 Mercurial docstrings.
12 12
13 13 It cheats in a major way: nested blocks are not really nested. They
14 14 are just indented blocks that look like they are nested. This relies
15 15 on the user to keep the right indentation for the blocks.
16 16
17 17 It only supports a small subset of reStructuredText:
18 18
19 19 - paragraphs
20 20
21 21 - definition lists (must use ' ' to indent definitions)
22 22
23 23 - lists (items must start with '-')
24 24
25 25 - field lists (colons cannot be escaped)
26 26
27 27 - literal blocks
28 28
29 29 - option lists (supports only long options without arguments)
30 30
31 31 - inline markup is not recognized at all.
32 32 """
33 33
34 34 import re, sys, textwrap
35 35
36 36
37 37 def findblocks(text):
38 38 """Find continuous blocks of lines in text.
39 39
40 40 Returns a list of dictionaries representing the blocks. Each block
41 41 has an 'indent' field and a 'lines' field.
42 42 """
43 43 blocks = [[]]
44 44 lines = text.splitlines()
45 45 for line in lines:
46 46 if line.strip():
47 47 blocks[-1].append(line)
48 48 elif blocks[-1]:
49 49 blocks.append([])
50 50 if not blocks[-1]:
51 51 del blocks[-1]
52 52
53 53 for i, block in enumerate(blocks):
54 54 indent = min((len(l) - len(l.lstrip())) for l in block)
55 55 blocks[i] = dict(indent=indent, lines=[l[indent:] for l in block])
56 56 return blocks
57 57
58 58
59 59 def findliteralblocks(blocks):
60 60 """Finds literal blocks and adds a 'type' field to the blocks.
61 61
62 62 Literal blocks are given the type 'literal', all other blocks are
63 63 given type the 'paragraph'.
64 64 """
65 65 i = 0
66 66 while i < len(blocks):
67 67 # Searching for a block that looks like this:
68 68 #
69 69 # +------------------------------+
70 70 # | paragraph |
71 71 # | (ends with "::") |
72 72 # +------------------------------+
73 73 # +---------------------------+
74 74 # | indented literal block |
75 75 # +---------------------------+
76 76 blocks[i]['type'] = 'paragraph'
77 77 if blocks[i]['lines'][-1].endswith('::') and i+1 < len(blocks):
78 78 indent = blocks[i]['indent']
79 79 adjustment = blocks[i+1]['indent'] - indent
80 80
81 81 if blocks[i]['lines'] == ['::']:
82 82 # Expanded form: remove block
83 83 del blocks[i]
84 84 i -= 1
85 85 elif blocks[i]['lines'][-1].endswith(' ::'):
86 86 # Partially minimized form: remove space and both
87 87 # colons.
88 88 blocks[i]['lines'][-1] = blocks[i]['lines'][-1][:-3]
89 89 else:
90 90 # Fully minimized form: remove just one colon.
91 91 blocks[i]['lines'][-1] = blocks[i]['lines'][-1][:-1]
92 92
93 93 # List items are formatted with a hanging indent. We must
94 94 # correct for this here while we still have the original
95 95 # information on the indentation of the subsequent literal
96 96 # blocks available.
97 97 if blocks[i]['lines'][0].startswith('- '):
98 98 indent += 2
99 99 adjustment -= 2
100 100
101 101 # Mark the following indented blocks.
102 102 while i+1 < len(blocks) and blocks[i+1]['indent'] > indent:
103 103 blocks[i+1]['type'] = 'literal'
104 104 blocks[i+1]['indent'] -= adjustment
105 105 i += 1
106 106 i += 1
107 107 return blocks
108 108
109 109
110 110 def findsections(blocks):
111 111 """Finds sections.
112 112
113 113 The blocks must have a 'type' field, i.e., they should have been
114 114 run through findliteralblocks first.
115 115 """
116 116 for block in blocks:
117 117 # Searching for a block that looks like this:
118 118 #
119 119 # +------------------------------+
120 120 # | Section title |
121 121 # | ------------- |
122 122 # +------------------------------+
123 123 if (block['type'] == 'paragraph' and
124 124 len(block['lines']) == 2 and
125 125 block['lines'][1] == '-' * len(block['lines'][0])):
126 126 block['type'] = 'section'
127 127 return blocks
128 128
129 129
130 130 def findbulletlists(blocks):
131 131 """Finds bullet lists.
132 132
133 133 The blocks must have a 'type' field, i.e., they should have been
134 134 run through findliteralblocks first.
135 135 """
136 136 i = 0
137 137 while i < len(blocks):
138 138 # Searching for a paragraph that looks like this:
139 139 #
140 140 # +------+-----------------------+
141 141 # | "- " | list item |
142 142 # +------| (body elements)+ |
143 143 # +-----------------------+
144 144 if (blocks[i]['type'] == 'paragraph' and
145 145 blocks[i]['lines'][0].startswith('- ')):
146 146 items = []
147 147 for line in blocks[i]['lines']:
148 148 if line.startswith('- '):
149 149 items.append(dict(type='bullet', lines=[],
150 150 indent=blocks[i]['indent']))
151 151 line = line[2:]
152 152 items[-1]['lines'].append(line)
153 153 blocks[i:i+1] = items
154 154 i += len(items) - 1
155 155 i += 1
156 156 return blocks
157 157
158 158
159 159 _optionre = re.compile(r'^(--[a-z-]+)((?:[ =][a-zA-Z][\w-]*)? +)(.*)$')
160 160 def findoptionlists(blocks):
161 161 """Finds option lists.
162 162
163 163 The blocks must have a 'type' field, i.e., they should have been
164 164 run through findliteralblocks first.
165 165 """
166 166 i = 0
167 167 while i < len(blocks):
168 168 # Searching for a paragraph that looks like this:
169 169 #
170 170 # +----------------------------+-------------+
171 171 # | "--" option " " | description |
172 172 # +-------+--------------------+ |
173 173 # | (body elements)+ |
174 174 # +----------------------------------+
175 175 if (blocks[i]['type'] == 'paragraph' and
176 176 _optionre.match(blocks[i]['lines'][0])):
177 177 options = []
178 178 for line in blocks[i]['lines']:
179 179 m = _optionre.match(line)
180 180 if m:
181 181 option, arg, rest = m.groups()
182 182 width = len(option) + len(arg)
183 183 options.append(dict(type='option', lines=[],
184 184 indent=blocks[i]['indent'],
185 185 width=width))
186 186 options[-1]['lines'].append(line)
187 187 blocks[i:i+1] = options
188 188 i += len(options) - 1
189 189 i += 1
190 190 return blocks
191 191
192 192
193 193 _fieldre = re.compile(r':(?![: ])([^:]*)(?<! ):( +)(.*)')
194 194 def findfieldlists(blocks):
195 195 """Finds fields lists.
196 196
197 197 The blocks must have a 'type' field, i.e., they should have been
198 198 run through findliteralblocks first.
199 199 """
200 200 i = 0
201 201 while i < len(blocks):
202 202 # Searching for a paragraph that looks like this:
203 203 #
204 204 #
205 205 # +--------------------+----------------------+
206 206 # | ":" field name ":" | field body |
207 207 # +-------+------------+ |
208 208 # | (body elements)+ |
209 209 # +-----------------------------------+
210 210 if (blocks[i]['type'] == 'paragraph' and
211 211 _fieldre.match(blocks[i]['lines'][0])):
212 212 indent = blocks[i]['indent']
213 213 fields = []
214 214 for line in blocks[i]['lines']:
215 215 m = _fieldre.match(line)
216 216 if m:
217 217 key, spaces, rest = m.groups()
218 218 width = 2 + len(key) + len(spaces)
219 219 fields.append(dict(type='field', lines=[],
220 220 indent=indent, width=width))
221 221 # Turn ":foo: bar" into "foo bar".
222 222 line = '%s %s%s' % (key, spaces, rest)
223 223 fields[-1]['lines'].append(line)
224 224 blocks[i:i+1] = fields
225 225 i += len(fields) - 1
226 226 i += 1
227 227 return blocks
228 228
229 229
230 230 def finddefinitionlists(blocks):
231 231 """Finds definition lists.
232 232
233 233 The blocks must have a 'type' field, i.e., they should have been
234 234 run through findliteralblocks first.
235 235 """
236 236 i = 0
237 237 while i < len(blocks):
238 238 # Searching for a paragraph that looks like this:
239 239 #
240 240 # +----------------------------+
241 241 # | term |
242 242 # +--+-------------------------+--+
243 243 # | definition |
244 244 # | (body elements)+ |
245 245 # +----------------------------+
246 246 if (blocks[i]['type'] == 'paragraph' and
247 247 len(blocks[i]['lines']) > 1 and
248 248 not blocks[i]['lines'][0].startswith(' ') and
249 249 blocks[i]['lines'][1].startswith(' ')):
250 250 definitions = []
251 251 for line in blocks[i]['lines']:
252 252 if not line.startswith(' '):
253 253 definitions.append(dict(type='definition', lines=[],
254 254 indent=blocks[i]['indent']))
255 255 definitions[-1]['lines'].append(line)
256 256 definitions[-1]['hang'] = len(line) - len(line.lstrip())
257 257 blocks[i:i+1] = definitions
258 258 i += len(definitions) - 1
259 259 i += 1
260 260 return blocks
261 261
262 262
263 263 def addmargins(blocks):
264 264 """Adds empty blocks for vertical spacing.
265 265
266 266 This groups bullets, options, and definitions together with no vertical
267 267 space between them, and adds an empty block between all other blocks.
268 268 """
269 269 i = 1
270 270 while i < len(blocks):
271 271 if (blocks[i]['type'] == blocks[i-1]['type'] and
272 272 blocks[i]['type'] in ('bullet', 'option', 'field', 'definition')):
273 273 i += 1
274 274 else:
275 275 blocks.insert(i, dict(lines=[''], indent=0, type='margin'))
276 276 i += 2
277 277 return blocks
278 278
279 279
280 280 def formatblock(block, width):
281 281 """Format a block according to width."""
282 if width <= 0:
283 width = 78
282 284 indent = ' ' * block['indent']
283 285 if block['type'] == 'margin':
284 286 return ''
285 287 elif block['type'] == 'literal':
286 288 indent += ' '
287 289 return indent + ('\n' + indent).join(block['lines'])
288 290 elif block['type'] == 'section':
289 291 return indent + ('\n' + indent).join(block['lines'])
290 292 elif block['type'] == 'definition':
291 293 term = indent + block['lines'][0]
292 294 defindent = indent + block['hang'] * ' '
293 295 text = ' '.join(map(str.strip, block['lines'][1:]))
294 296 return "%s\n%s" % (term, textwrap.fill(text, width=width,
295 297 initial_indent=defindent,
296 298 subsequent_indent=defindent))
297 299 else:
298 300 initindent = subindent = indent
299 301 text = ' '.join(map(str.strip, block['lines']))
300 302 if block['type'] == 'bullet':
301 303 initindent = indent + '- '
302 304 subindent = indent + ' '
303 305 elif block['type'] in ('option', 'field'):
304 306 subindent = indent + block['width'] * ' '
305 307
306 308 return textwrap.fill(text, width=width,
307 309 initial_indent=initindent,
308 310 subsequent_indent=subindent)
309 311
310 312
311 313 def format(text, width):
312 314 """Parse and format the text according to width."""
313 315 blocks = findblocks(text)
314 316 blocks = findliteralblocks(blocks)
315 317 blocks = findsections(blocks)
316 318 blocks = findbulletlists(blocks)
317 319 blocks = findoptionlists(blocks)
318 320 blocks = findfieldlists(blocks)
319 321 blocks = finddefinitionlists(blocks)
320 322 blocks = addmargins(blocks)
321 323 return '\n'.join(formatblock(b, width) for b in blocks)
322 324
323 325
324 326 if __name__ == "__main__":
325 327 from pprint import pprint
326 328
327 329 def debug(func, blocks):
328 330 blocks = func(blocks)
329 331 print "*** after %s:" % func.__name__
330 332 pprint(blocks)
331 333 print
332 334 return blocks
333 335
334 336 text = open(sys.argv[1]).read()
335 337 blocks = debug(findblocks, text)
336 338 blocks = debug(findliteralblocks, blocks)
337 339 blocks = debug(findsections, blocks)
338 340 blocks = debug(findbulletlists, blocks)
339 341 blocks = debug(findoptionlists, blocks)
340 342 blocks = debug(findfieldlists, blocks)
341 343 blocks = debug(finddefinitionlists, blocks)
342 344 blocks = debug(addmargins, blocks)
343 345 print '\n'.join(formatblock(b, 30) for b in blocks)
@@ -1,1283 +1,1286
1 1 # util.py - Mercurial utility functions and platform specfic implementations
2 2 #
3 3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2, incorporated herein by reference.
9 9
10 10 """Mercurial utility functions and platform specfic implementations.
11 11
12 12 This contains helper routines that are independent of the SCM core and
13 13 hide platform-specific details from the core.
14 14 """
15 15
16 16 from i18n import _
17 17 import error, osutil
18 18 import cStringIO, errno, re, shutil, sys, tempfile, traceback
19 19 import os, stat, time, calendar, random, textwrap
20 20 import imp
21 21
22 22 # Python compatibility
23 23
24 24 def sha1(s):
25 25 return _fastsha1(s)
26 26
27 27 def _fastsha1(s):
28 28 # This function will import sha1 from hashlib or sha (whichever is
29 29 # available) and overwrite itself with it on the first call.
30 30 # Subsequent calls will go directly to the imported function.
31 31 try:
32 32 from hashlib import sha1 as _sha1
33 33 except ImportError:
34 34 from sha import sha as _sha1
35 35 global _fastsha1, sha1
36 36 _fastsha1 = sha1 = _sha1
37 37 return _sha1(s)
38 38
39 39 import subprocess
40 40 closefds = os.name == 'posix'
41 41 def popen2(cmd):
42 42 # Setting bufsize to -1 lets the system decide the buffer size.
43 43 # The default for bufsize is 0, meaning unbuffered. This leads to
44 44 # poor performance on Mac OS X: http://bugs.python.org/issue4194
45 45 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
46 46 close_fds=closefds,
47 47 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
48 48 return p.stdin, p.stdout
49 49 def popen3(cmd):
50 50 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
51 51 close_fds=closefds,
52 52 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
53 53 stderr=subprocess.PIPE)
54 54 return p.stdin, p.stdout, p.stderr
55 55
56 56 def version():
57 57 """Return version information if available."""
58 58 try:
59 59 import __version__
60 60 return __version__.version
61 61 except ImportError:
62 62 return 'unknown'
63 63
64 64 # used by parsedate
65 65 defaultdateformats = (
66 66 '%Y-%m-%d %H:%M:%S',
67 67 '%Y-%m-%d %I:%M:%S%p',
68 68 '%Y-%m-%d %H:%M',
69 69 '%Y-%m-%d %I:%M%p',
70 70 '%Y-%m-%d',
71 71 '%m-%d',
72 72 '%m/%d',
73 73 '%m/%d/%y',
74 74 '%m/%d/%Y',
75 75 '%a %b %d %H:%M:%S %Y',
76 76 '%a %b %d %I:%M:%S%p %Y',
77 77 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
78 78 '%b %d %H:%M:%S %Y',
79 79 '%b %d %I:%M:%S%p %Y',
80 80 '%b %d %H:%M:%S',
81 81 '%b %d %I:%M:%S%p',
82 82 '%b %d %H:%M',
83 83 '%b %d %I:%M%p',
84 84 '%b %d %Y',
85 85 '%b %d',
86 86 '%H:%M:%S',
87 87 '%I:%M:%S%p',
88 88 '%H:%M',
89 89 '%I:%M%p',
90 90 )
91 91
92 92 extendeddateformats = defaultdateformats + (
93 93 "%Y",
94 94 "%Y-%m",
95 95 "%b",
96 96 "%b %Y",
97 97 )
98 98
99 99 def cachefunc(func):
100 100 '''cache the result of function calls'''
101 101 # XXX doesn't handle keywords args
102 102 cache = {}
103 103 if func.func_code.co_argcount == 1:
104 104 # we gain a small amount of time because
105 105 # we don't need to pack/unpack the list
106 106 def f(arg):
107 107 if arg not in cache:
108 108 cache[arg] = func(arg)
109 109 return cache[arg]
110 110 else:
111 111 def f(*args):
112 112 if args not in cache:
113 113 cache[args] = func(*args)
114 114 return cache[args]
115 115
116 116 return f
117 117
118 118 def lrucachefunc(func):
119 119 '''cache most recent results of function calls'''
120 120 cache = {}
121 121 order = []
122 122 if func.func_code.co_argcount == 1:
123 123 def f(arg):
124 124 if arg not in cache:
125 125 if len(cache) > 20:
126 126 del cache[order.pop(0)]
127 127 cache[arg] = func(arg)
128 128 else:
129 129 order.remove(arg)
130 130 order.append(arg)
131 131 return cache[arg]
132 132 else:
133 133 def f(*args):
134 134 if args not in cache:
135 135 if len(cache) > 20:
136 136 del cache[order.pop(0)]
137 137 cache[args] = func(*args)
138 138 else:
139 139 order.remove(args)
140 140 order.append(args)
141 141 return cache[args]
142 142
143 143 return f
144 144
145 145 class propertycache(object):
146 146 def __init__(self, func):
147 147 self.func = func
148 148 self.name = func.__name__
149 149 def __get__(self, obj, type=None):
150 150 result = self.func(obj)
151 151 setattr(obj, self.name, result)
152 152 return result
153 153
154 154 def pipefilter(s, cmd):
155 155 '''filter string S through command CMD, returning its output'''
156 156 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
157 157 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
158 158 pout, perr = p.communicate(s)
159 159 return pout
160 160
161 161 def tempfilter(s, cmd):
162 162 '''filter string S through a pair of temporary files with CMD.
163 163 CMD is used as a template to create the real command to be run,
164 164 with the strings INFILE and OUTFILE replaced by the real names of
165 165 the temporary files generated.'''
166 166 inname, outname = None, None
167 167 try:
168 168 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
169 169 fp = os.fdopen(infd, 'wb')
170 170 fp.write(s)
171 171 fp.close()
172 172 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
173 173 os.close(outfd)
174 174 cmd = cmd.replace('INFILE', inname)
175 175 cmd = cmd.replace('OUTFILE', outname)
176 176 code = os.system(cmd)
177 177 if sys.platform == 'OpenVMS' and code & 1:
178 178 code = 0
179 179 if code: raise Abort(_("command '%s' failed: %s") %
180 180 (cmd, explain_exit(code)))
181 181 return open(outname, 'rb').read()
182 182 finally:
183 183 try:
184 184 if inname: os.unlink(inname)
185 185 except: pass
186 186 try:
187 187 if outname: os.unlink(outname)
188 188 except: pass
189 189
190 190 filtertable = {
191 191 'tempfile:': tempfilter,
192 192 'pipe:': pipefilter,
193 193 }
194 194
195 195 def filter(s, cmd):
196 196 "filter a string through a command that transforms its input to its output"
197 197 for name, fn in filtertable.iteritems():
198 198 if cmd.startswith(name):
199 199 return fn(s, cmd[len(name):].lstrip())
200 200 return pipefilter(s, cmd)
201 201
202 202 def binary(s):
203 203 """return true if a string is binary data"""
204 204 return bool(s and '\0' in s)
205 205
206 206 def increasingchunks(source, min=1024, max=65536):
207 207 '''return no less than min bytes per chunk while data remains,
208 208 doubling min after each chunk until it reaches max'''
209 209 def log2(x):
210 210 if not x:
211 211 return 0
212 212 i = 0
213 213 while x:
214 214 x >>= 1
215 215 i += 1
216 216 return i - 1
217 217
218 218 buf = []
219 219 blen = 0
220 220 for chunk in source:
221 221 buf.append(chunk)
222 222 blen += len(chunk)
223 223 if blen >= min:
224 224 if min < max:
225 225 min = min << 1
226 226 nmin = 1 << log2(blen)
227 227 if nmin > min:
228 228 min = nmin
229 229 if min > max:
230 230 min = max
231 231 yield ''.join(buf)
232 232 blen = 0
233 233 buf = []
234 234 if buf:
235 235 yield ''.join(buf)
236 236
237 237 Abort = error.Abort
238 238
239 239 def always(fn): return True
240 240 def never(fn): return False
241 241
242 242 def pathto(root, n1, n2):
243 243 '''return the relative path from one place to another.
244 244 root should use os.sep to separate directories
245 245 n1 should use os.sep to separate directories
246 246 n2 should use "/" to separate directories
247 247 returns an os.sep-separated path.
248 248
249 249 If n1 is a relative path, it's assumed it's
250 250 relative to root.
251 251 n2 should always be relative to root.
252 252 '''
253 253 if not n1: return localpath(n2)
254 254 if os.path.isabs(n1):
255 255 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
256 256 return os.path.join(root, localpath(n2))
257 257 n2 = '/'.join((pconvert(root), n2))
258 258 a, b = splitpath(n1), n2.split('/')
259 259 a.reverse()
260 260 b.reverse()
261 261 while a and b and a[-1] == b[-1]:
262 262 a.pop()
263 263 b.pop()
264 264 b.reverse()
265 265 return os.sep.join((['..'] * len(a)) + b) or '.'
266 266
267 267 def canonpath(root, cwd, myname):
268 268 """return the canonical path of myname, given cwd and root"""
269 269 if endswithsep(root):
270 270 rootsep = root
271 271 else:
272 272 rootsep = root + os.sep
273 273 name = myname
274 274 if not os.path.isabs(name):
275 275 name = os.path.join(root, cwd, name)
276 276 name = os.path.normpath(name)
277 277 audit_path = path_auditor(root)
278 278 if name != rootsep and name.startswith(rootsep):
279 279 name = name[len(rootsep):]
280 280 audit_path(name)
281 281 return pconvert(name)
282 282 elif name == root:
283 283 return ''
284 284 else:
285 285 # Determine whether `name' is in the hierarchy at or beneath `root',
286 286 # by iterating name=dirname(name) until that causes no change (can't
287 287 # check name == '/', because that doesn't work on windows). For each
288 288 # `name', compare dev/inode numbers. If they match, the list `rel'
289 289 # holds the reversed list of components making up the relative file
290 290 # name we want.
291 291 root_st = os.stat(root)
292 292 rel = []
293 293 while True:
294 294 try:
295 295 name_st = os.stat(name)
296 296 except OSError:
297 297 break
298 298 if samestat(name_st, root_st):
299 299 if not rel:
300 300 # name was actually the same as root (maybe a symlink)
301 301 return ''
302 302 rel.reverse()
303 303 name = os.path.join(*rel)
304 304 audit_path(name)
305 305 return pconvert(name)
306 306 dirname, basename = os.path.split(name)
307 307 rel.append(basename)
308 308 if dirname == name:
309 309 break
310 310 name = dirname
311 311
312 312 raise Abort('%s not under root' % myname)
313 313
314 314 _hgexecutable = None
315 315
316 316 def main_is_frozen():
317 317 """return True if we are a frozen executable.
318 318
319 319 The code supports py2exe (most common, Windows only) and tools/freeze
320 320 (portable, not much used).
321 321 """
322 322 return (hasattr(sys, "frozen") or # new py2exe
323 323 hasattr(sys, "importers") or # old py2exe
324 324 imp.is_frozen("__main__")) # tools/freeze
325 325
326 326 def hgexecutable():
327 327 """return location of the 'hg' executable.
328 328
329 329 Defaults to $HG or 'hg' in the search path.
330 330 """
331 331 if _hgexecutable is None:
332 332 hg = os.environ.get('HG')
333 333 if hg:
334 334 set_hgexecutable(hg)
335 335 elif main_is_frozen():
336 336 set_hgexecutable(sys.executable)
337 337 else:
338 338 set_hgexecutable(find_exe('hg') or 'hg')
339 339 return _hgexecutable
340 340
341 341 def set_hgexecutable(path):
342 342 """set location of the 'hg' executable"""
343 343 global _hgexecutable
344 344 _hgexecutable = path
345 345
346 346 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None):
347 347 '''enhanced shell command execution.
348 348 run with environment maybe modified, maybe in different dir.
349 349
350 350 if command fails and onerr is None, return status. if ui object,
351 351 print error message and return status, else raise onerr object as
352 352 exception.'''
353 353 def py2shell(val):
354 354 'convert python object into string that is useful to shell'
355 355 if val is None or val is False:
356 356 return '0'
357 357 if val is True:
358 358 return '1'
359 359 return str(val)
360 360 oldenv = {}
361 361 for k in environ:
362 362 oldenv[k] = os.environ.get(k)
363 363 if cwd is not None:
364 364 oldcwd = os.getcwd()
365 365 origcmd = cmd
366 366 if os.name == 'nt':
367 367 cmd = '"%s"' % cmd
368 368 try:
369 369 for k, v in environ.iteritems():
370 370 os.environ[k] = py2shell(v)
371 371 os.environ['HG'] = hgexecutable()
372 372 if cwd is not None and oldcwd != cwd:
373 373 os.chdir(cwd)
374 374 rc = os.system(cmd)
375 375 if sys.platform == 'OpenVMS' and rc & 1:
376 376 rc = 0
377 377 if rc and onerr:
378 378 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
379 379 explain_exit(rc)[0])
380 380 if errprefix:
381 381 errmsg = '%s: %s' % (errprefix, errmsg)
382 382 try:
383 383 onerr.warn(errmsg + '\n')
384 384 except AttributeError:
385 385 raise onerr(errmsg)
386 386 return rc
387 387 finally:
388 388 for k, v in oldenv.iteritems():
389 389 if v is None:
390 390 del os.environ[k]
391 391 else:
392 392 os.environ[k] = v
393 393 if cwd is not None and oldcwd != cwd:
394 394 os.chdir(oldcwd)
395 395
396 396 def checksignature(func):
397 397 '''wrap a function with code to check for calling errors'''
398 398 def check(*args, **kwargs):
399 399 try:
400 400 return func(*args, **kwargs)
401 401 except TypeError:
402 402 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
403 403 raise error.SignatureError
404 404 raise
405 405
406 406 return check
407 407
408 408 # os.path.lexists is not available on python2.3
409 409 def lexists(filename):
410 410 "test whether a file with this name exists. does not follow symlinks"
411 411 try:
412 412 os.lstat(filename)
413 413 except:
414 414 return False
415 415 return True
416 416
417 417 def rename(src, dst):
418 418 """forcibly rename a file"""
419 419 try:
420 420 os.rename(src, dst)
421 421 except OSError, err: # FIXME: check err (EEXIST ?)
422 422
423 423 # On windows, rename to existing file is not allowed, so we
424 424 # must delete destination first. But if a file is open, unlink
425 425 # schedules it for delete but does not delete it. Rename
426 426 # happens immediately even for open files, so we rename
427 427 # destination to a temporary name, then delete that. Then
428 428 # rename is safe to do.
429 429 # The temporary name is chosen at random to avoid the situation
430 430 # where a file is left lying around from a previous aborted run.
431 431 # The usual race condition this introduces can't be avoided as
432 432 # we need the name to rename into, and not the file itself. Due
433 433 # to the nature of the operation however, any races will at worst
434 434 # lead to the rename failing and the current operation aborting.
435 435
436 436 def tempname(prefix):
437 437 for tries in xrange(10):
438 438 temp = '%s-%08x' % (prefix, random.randint(0, 0xffffffff))
439 439 if not os.path.exists(temp):
440 440 return temp
441 441 raise IOError, (errno.EEXIST, "No usable temporary filename found")
442 442
443 443 temp = tempname(dst)
444 444 os.rename(dst, temp)
445 445 os.unlink(temp)
446 446 os.rename(src, dst)
447 447
448 448 def unlink(f):
449 449 """unlink and remove the directory if it is empty"""
450 450 os.unlink(f)
451 451 # try removing directories that might now be empty
452 452 try:
453 453 os.removedirs(os.path.dirname(f))
454 454 except OSError:
455 455 pass
456 456
457 457 def copyfile(src, dest):
458 458 "copy a file, preserving mode and atime/mtime"
459 459 if os.path.islink(src):
460 460 try:
461 461 os.unlink(dest)
462 462 except:
463 463 pass
464 464 os.symlink(os.readlink(src), dest)
465 465 else:
466 466 try:
467 467 shutil.copyfile(src, dest)
468 468 shutil.copystat(src, dest)
469 469 except shutil.Error, inst:
470 470 raise Abort(str(inst))
471 471
472 472 def copyfiles(src, dst, hardlink=None):
473 473 """Copy a directory tree using hardlinks if possible"""
474 474
475 475 if hardlink is None:
476 476 hardlink = (os.stat(src).st_dev ==
477 477 os.stat(os.path.dirname(dst)).st_dev)
478 478
479 479 if os.path.isdir(src):
480 480 os.mkdir(dst)
481 481 for name, kind in osutil.listdir(src):
482 482 srcname = os.path.join(src, name)
483 483 dstname = os.path.join(dst, name)
484 484 copyfiles(srcname, dstname, hardlink)
485 485 else:
486 486 if hardlink:
487 487 try:
488 488 os_link(src, dst)
489 489 except (IOError, OSError):
490 490 hardlink = False
491 491 shutil.copy(src, dst)
492 492 else:
493 493 shutil.copy(src, dst)
494 494
495 495 class path_auditor(object):
496 496 '''ensure that a filesystem path contains no banned components.
497 497 the following properties of a path are checked:
498 498
499 499 - under top-level .hg
500 500 - starts at the root of a windows drive
501 501 - contains ".."
502 502 - traverses a symlink (e.g. a/symlink_here/b)
503 503 - inside a nested repository'''
504 504
505 505 def __init__(self, root):
506 506 self.audited = set()
507 507 self.auditeddir = set()
508 508 self.root = root
509 509
510 510 def __call__(self, path):
511 511 if path in self.audited:
512 512 return
513 513 normpath = os.path.normcase(path)
514 514 parts = splitpath(normpath)
515 515 if (os.path.splitdrive(path)[0]
516 516 or parts[0].lower() in ('.hg', '.hg.', '')
517 517 or os.pardir in parts):
518 518 raise Abort(_("path contains illegal component: %s") % path)
519 519 if '.hg' in path.lower():
520 520 lparts = [p.lower() for p in parts]
521 521 for p in '.hg', '.hg.':
522 522 if p in lparts[1:]:
523 523 pos = lparts.index(p)
524 524 base = os.path.join(*parts[:pos])
525 525 raise Abort(_('path %r is inside repo %r') % (path, base))
526 526 def check(prefix):
527 527 curpath = os.path.join(self.root, prefix)
528 528 try:
529 529 st = os.lstat(curpath)
530 530 except OSError, err:
531 531 # EINVAL can be raised as invalid path syntax under win32.
532 532 # They must be ignored for patterns can be checked too.
533 533 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
534 534 raise
535 535 else:
536 536 if stat.S_ISLNK(st.st_mode):
537 537 raise Abort(_('path %r traverses symbolic link %r') %
538 538 (path, prefix))
539 539 elif (stat.S_ISDIR(st.st_mode) and
540 540 os.path.isdir(os.path.join(curpath, '.hg'))):
541 541 raise Abort(_('path %r is inside repo %r') %
542 542 (path, prefix))
543 543 parts.pop()
544 544 prefixes = []
545 545 while parts:
546 546 prefix = os.sep.join(parts)
547 547 if prefix in self.auditeddir:
548 548 break
549 549 check(prefix)
550 550 prefixes.append(prefix)
551 551 parts.pop()
552 552
553 553 self.audited.add(path)
554 554 # only add prefixes to the cache after checking everything: we don't
555 555 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
556 556 self.auditeddir.update(prefixes)
557 557
558 558 def nlinks(pathname):
559 559 """Return number of hardlinks for the given file."""
560 560 return os.lstat(pathname).st_nlink
561 561
562 562 if hasattr(os, 'link'):
563 563 os_link = os.link
564 564 else:
565 565 def os_link(src, dst):
566 566 raise OSError(0, _("Hardlinks not supported"))
567 567
568 568 def lookup_reg(key, name=None, scope=None):
569 569 return None
570 570
571 571 if os.name == 'nt':
572 572 from windows import *
573 573 else:
574 574 from posix import *
575 575
576 576 def makelock(info, pathname):
577 577 try:
578 578 return os.symlink(info, pathname)
579 579 except OSError, why:
580 580 if why.errno == errno.EEXIST:
581 581 raise
582 582 except AttributeError: # no symlink in os
583 583 pass
584 584
585 585 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
586 586 os.write(ld, info)
587 587 os.close(ld)
588 588
589 589 def readlock(pathname):
590 590 try:
591 591 return os.readlink(pathname)
592 592 except OSError, why:
593 593 if why.errno not in (errno.EINVAL, errno.ENOSYS):
594 594 raise
595 595 except AttributeError: # no symlink in os
596 596 pass
597 597 return posixfile(pathname).read()
598 598
599 599 def fstat(fp):
600 600 '''stat file object that may not have fileno method.'''
601 601 try:
602 602 return os.fstat(fp.fileno())
603 603 except AttributeError:
604 604 return os.stat(fp.name)
605 605
606 606 # File system features
607 607
608 608 def checkcase(path):
609 609 """
610 610 Check whether the given path is on a case-sensitive filesystem
611 611
612 612 Requires a path (like /foo/.hg) ending with a foldable final
613 613 directory component.
614 614 """
615 615 s1 = os.stat(path)
616 616 d, b = os.path.split(path)
617 617 p2 = os.path.join(d, b.upper())
618 618 if path == p2:
619 619 p2 = os.path.join(d, b.lower())
620 620 try:
621 621 s2 = os.stat(p2)
622 622 if s2 == s1:
623 623 return False
624 624 return True
625 625 except:
626 626 return True
627 627
628 628 _fspathcache = {}
629 629 def fspath(name, root):
630 630 '''Get name in the case stored in the filesystem
631 631
632 632 The name is either relative to root, or it is an absolute path starting
633 633 with root. Note that this function is unnecessary, and should not be
634 634 called, for case-sensitive filesystems (simply because it's expensive).
635 635 '''
636 636 # If name is absolute, make it relative
637 637 if name.lower().startswith(root.lower()):
638 638 l = len(root)
639 639 if name[l] == os.sep or name[l] == os.altsep:
640 640 l = l + 1
641 641 name = name[l:]
642 642
643 643 if not os.path.exists(os.path.join(root, name)):
644 644 return None
645 645
646 646 seps = os.sep
647 647 if os.altsep:
648 648 seps = seps + os.altsep
649 649 # Protect backslashes. This gets silly very quickly.
650 650 seps.replace('\\','\\\\')
651 651 pattern = re.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
652 652 dir = os.path.normcase(os.path.normpath(root))
653 653 result = []
654 654 for part, sep in pattern.findall(name):
655 655 if sep:
656 656 result.append(sep)
657 657 continue
658 658
659 659 if dir not in _fspathcache:
660 660 _fspathcache[dir] = os.listdir(dir)
661 661 contents = _fspathcache[dir]
662 662
663 663 lpart = part.lower()
664 664 lenp = len(part)
665 665 for n in contents:
666 666 if lenp == len(n) and n.lower() == lpart:
667 667 result.append(n)
668 668 break
669 669 else:
670 670 # Cannot happen, as the file exists!
671 671 result.append(part)
672 672 dir = os.path.join(dir, lpart)
673 673
674 674 return ''.join(result)
675 675
676 676 def checkexec(path):
677 677 """
678 678 Check whether the given path is on a filesystem with UNIX-like exec flags
679 679
680 680 Requires a directory (like /foo/.hg)
681 681 """
682 682
683 683 # VFAT on some Linux versions can flip mode but it doesn't persist
684 684 # a FS remount. Frequently we can detect it if files are created
685 685 # with exec bit on.
686 686
687 687 try:
688 688 EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
689 689 fh, fn = tempfile.mkstemp("", "", path)
690 690 try:
691 691 os.close(fh)
692 692 m = os.stat(fn).st_mode & 0777
693 693 new_file_has_exec = m & EXECFLAGS
694 694 os.chmod(fn, m ^ EXECFLAGS)
695 695 exec_flags_cannot_flip = ((os.stat(fn).st_mode & 0777) == m)
696 696 finally:
697 697 os.unlink(fn)
698 698 except (IOError, OSError):
699 699 # we don't care, the user probably won't be able to commit anyway
700 700 return False
701 701 return not (new_file_has_exec or exec_flags_cannot_flip)
702 702
703 703 def checklink(path):
704 704 """check whether the given path is on a symlink-capable filesystem"""
705 705 # mktemp is not racy because symlink creation will fail if the
706 706 # file already exists
707 707 name = tempfile.mktemp(dir=path)
708 708 try:
709 709 os.symlink(".", name)
710 710 os.unlink(name)
711 711 return True
712 712 except (OSError, AttributeError):
713 713 return False
714 714
715 715 def needbinarypatch():
716 716 """return True if patches should be applied in binary mode by default."""
717 717 return os.name == 'nt'
718 718
719 719 def endswithsep(path):
720 720 '''Check path ends with os.sep or os.altsep.'''
721 721 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
722 722
723 723 def splitpath(path):
724 724 '''Split path by os.sep.
725 725 Note that this function does not use os.altsep because this is
726 726 an alternative of simple "xxx.split(os.sep)".
727 727 It is recommended to use os.path.normpath() before using this
728 728 function if need.'''
729 729 return path.split(os.sep)
730 730
731 731 def gui():
732 732 '''Are we running in a GUI?'''
733 733 return os.name == "nt" or os.name == "mac" or os.environ.get("DISPLAY")
734 734
735 735 def mktempcopy(name, emptyok=False, createmode=None):
736 736 """Create a temporary file with the same contents from name
737 737
738 738 The permission bits are copied from the original file.
739 739
740 740 If the temporary file is going to be truncated immediately, you
741 741 can use emptyok=True as an optimization.
742 742
743 743 Returns the name of the temporary file.
744 744 """
745 745 d, fn = os.path.split(name)
746 746 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
747 747 os.close(fd)
748 748 # Temporary files are created with mode 0600, which is usually not
749 749 # what we want. If the original file already exists, just copy
750 750 # its mode. Otherwise, manually obey umask.
751 751 try:
752 752 st_mode = os.lstat(name).st_mode & 0777
753 753 except OSError, inst:
754 754 if inst.errno != errno.ENOENT:
755 755 raise
756 756 st_mode = createmode
757 757 if st_mode is None:
758 758 st_mode = ~umask
759 759 st_mode &= 0666
760 760 os.chmod(temp, st_mode)
761 761 if emptyok:
762 762 return temp
763 763 try:
764 764 try:
765 765 ifp = posixfile(name, "rb")
766 766 except IOError, inst:
767 767 if inst.errno == errno.ENOENT:
768 768 return temp
769 769 if not getattr(inst, 'filename', None):
770 770 inst.filename = name
771 771 raise
772 772 ofp = posixfile(temp, "wb")
773 773 for chunk in filechunkiter(ifp):
774 774 ofp.write(chunk)
775 775 ifp.close()
776 776 ofp.close()
777 777 except:
778 778 try: os.unlink(temp)
779 779 except: pass
780 780 raise
781 781 return temp
782 782
783 783 class atomictempfile(object):
784 784 """file-like object that atomically updates a file
785 785
786 786 All writes will be redirected to a temporary copy of the original
787 787 file. When rename is called, the copy is renamed to the original
788 788 name, making the changes visible.
789 789 """
790 790 def __init__(self, name, mode, createmode):
791 791 self.__name = name
792 792 self._fp = None
793 793 self.temp = mktempcopy(name, emptyok=('w' in mode),
794 794 createmode=createmode)
795 795 self._fp = posixfile(self.temp, mode)
796 796
797 797 def __getattr__(self, name):
798 798 return getattr(self._fp, name)
799 799
800 800 def rename(self):
801 801 if not self._fp.closed:
802 802 self._fp.close()
803 803 rename(self.temp, localpath(self.__name))
804 804
805 805 def __del__(self):
806 806 if not self._fp:
807 807 return
808 808 if not self._fp.closed:
809 809 try:
810 810 os.unlink(self.temp)
811 811 except: pass
812 812 self._fp.close()
813 813
814 814 def makedirs(name, mode=None):
815 815 """recursive directory creation with parent mode inheritance"""
816 816 try:
817 817 os.mkdir(name)
818 818 if mode is not None:
819 819 os.chmod(name, mode)
820 820 return
821 821 except OSError, err:
822 822 if err.errno == errno.EEXIST:
823 823 return
824 824 if err.errno != errno.ENOENT:
825 825 raise
826 826 parent = os.path.abspath(os.path.dirname(name))
827 827 makedirs(parent, mode)
828 828 makedirs(name, mode)
829 829
830 830 class opener(object):
831 831 """Open files relative to a base directory
832 832
833 833 This class is used to hide the details of COW semantics and
834 834 remote file access from higher level code.
835 835 """
836 836 def __init__(self, base, audit=True):
837 837 self.base = base
838 838 if audit:
839 839 self.audit_path = path_auditor(base)
840 840 else:
841 841 self.audit_path = always
842 842 self.createmode = None
843 843
844 844 @propertycache
845 845 def _can_symlink(self):
846 846 return checklink(self.base)
847 847
848 848 def _fixfilemode(self, name):
849 849 if self.createmode is None:
850 850 return
851 851 os.chmod(name, self.createmode & 0666)
852 852
853 853 def __call__(self, path, mode="r", text=False, atomictemp=False):
854 854 self.audit_path(path)
855 855 f = os.path.join(self.base, path)
856 856
857 857 if not text and "b" not in mode:
858 858 mode += "b" # for that other OS
859 859
860 860 nlink = -1
861 861 if mode not in ("r", "rb"):
862 862 try:
863 863 nlink = nlinks(f)
864 864 except OSError:
865 865 nlink = 0
866 866 d = os.path.dirname(f)
867 867 if not os.path.isdir(d):
868 868 makedirs(d, self.createmode)
869 869 if atomictemp:
870 870 return atomictempfile(f, mode, self.createmode)
871 871 if nlink > 1:
872 872 rename(mktempcopy(f), f)
873 873 fp = posixfile(f, mode)
874 874 if nlink == 0:
875 875 self._fixfilemode(f)
876 876 return fp
877 877
878 878 def symlink(self, src, dst):
879 879 self.audit_path(dst)
880 880 linkname = os.path.join(self.base, dst)
881 881 try:
882 882 os.unlink(linkname)
883 883 except OSError:
884 884 pass
885 885
886 886 dirname = os.path.dirname(linkname)
887 887 if not os.path.exists(dirname):
888 888 makedirs(dirname, self.createmode)
889 889
890 890 if self._can_symlink:
891 891 try:
892 892 os.symlink(src, linkname)
893 893 except OSError, err:
894 894 raise OSError(err.errno, _('could not symlink to %r: %s') %
895 895 (src, err.strerror), linkname)
896 896 else:
897 897 f = self(dst, "w")
898 898 f.write(src)
899 899 f.close()
900 900 self._fixfilemode(dst)
901 901
902 902 class chunkbuffer(object):
903 903 """Allow arbitrary sized chunks of data to be efficiently read from an
904 904 iterator over chunks of arbitrary size."""
905 905
906 906 def __init__(self, in_iter):
907 907 """in_iter is the iterator that's iterating over the input chunks.
908 908 targetsize is how big a buffer to try to maintain."""
909 909 self.iter = iter(in_iter)
910 910 self.buf = ''
911 911 self.targetsize = 2**16
912 912
913 913 def read(self, l):
914 914 """Read L bytes of data from the iterator of chunks of data.
915 915 Returns less than L bytes if the iterator runs dry."""
916 916 if l > len(self.buf) and self.iter:
917 917 # Clamp to a multiple of self.targetsize
918 918 targetsize = max(l, self.targetsize)
919 919 collector = cStringIO.StringIO()
920 920 collector.write(self.buf)
921 921 collected = len(self.buf)
922 922 for chunk in self.iter:
923 923 collector.write(chunk)
924 924 collected += len(chunk)
925 925 if collected >= targetsize:
926 926 break
927 927 if collected < targetsize:
928 928 self.iter = False
929 929 self.buf = collector.getvalue()
930 930 if len(self.buf) == l:
931 931 s, self.buf = str(self.buf), ''
932 932 else:
933 933 s, self.buf = self.buf[:l], buffer(self.buf, l)
934 934 return s
935 935
936 936 def filechunkiter(f, size=65536, limit=None):
937 937 """Create a generator that produces the data in the file size
938 938 (default 65536) bytes at a time, up to optional limit (default is
939 939 to read all data). Chunks may be less than size bytes if the
940 940 chunk is the last chunk in the file, or the file is a socket or
941 941 some other type of file that sometimes reads less data than is
942 942 requested."""
943 943 assert size >= 0
944 944 assert limit is None or limit >= 0
945 945 while True:
946 946 if limit is None: nbytes = size
947 947 else: nbytes = min(limit, size)
948 948 s = nbytes and f.read(nbytes)
949 949 if not s: break
950 950 if limit: limit -= len(s)
951 951 yield s
952 952
953 953 def makedate():
954 954 lt = time.localtime()
955 955 if lt[8] == 1 and time.daylight:
956 956 tz = time.altzone
957 957 else:
958 958 tz = time.timezone
959 959 return time.mktime(lt), tz
960 960
961 961 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
962 962 """represent a (unixtime, offset) tuple as a localized time.
963 963 unixtime is seconds since the epoch, and offset is the time zone's
964 964 number of seconds away from UTC. if timezone is false, do not
965 965 append time zone to string."""
966 966 t, tz = date or makedate()
967 967 if "%1" in format or "%2" in format:
968 968 sign = (tz > 0) and "-" or "+"
969 969 minutes = abs(tz) // 60
970 970 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
971 971 format = format.replace("%2", "%02d" % (minutes % 60))
972 972 s = time.strftime(format, time.gmtime(float(t) - tz))
973 973 return s
974 974
975 975 def shortdate(date=None):
976 976 """turn (timestamp, tzoff) tuple into iso 8631 date."""
977 977 return datestr(date, format='%Y-%m-%d')
978 978
979 979 def strdate(string, format, defaults=[]):
980 980 """parse a localized time string and return a (unixtime, offset) tuple.
981 981 if the string cannot be parsed, ValueError is raised."""
982 982 def timezone(string):
983 983 tz = string.split()[-1]
984 984 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
985 985 sign = (tz[0] == "+") and 1 or -1
986 986 hours = int(tz[1:3])
987 987 minutes = int(tz[3:5])
988 988 return -sign * (hours * 60 + minutes) * 60
989 989 if tz == "GMT" or tz == "UTC":
990 990 return 0
991 991 return None
992 992
993 993 # NOTE: unixtime = localunixtime + offset
994 994 offset, date = timezone(string), string
995 995 if offset != None:
996 996 date = " ".join(string.split()[:-1])
997 997
998 998 # add missing elements from defaults
999 999 for part in defaults:
1000 1000 found = [True for p in part if ("%"+p) in format]
1001 1001 if not found:
1002 1002 date += "@" + defaults[part]
1003 1003 format += "@%" + part[0]
1004 1004
1005 1005 timetuple = time.strptime(date, format)
1006 1006 localunixtime = int(calendar.timegm(timetuple))
1007 1007 if offset is None:
1008 1008 # local timezone
1009 1009 unixtime = int(time.mktime(timetuple))
1010 1010 offset = unixtime - localunixtime
1011 1011 else:
1012 1012 unixtime = localunixtime + offset
1013 1013 return unixtime, offset
1014 1014
1015 1015 def parsedate(date, formats=None, defaults=None):
1016 1016 """parse a localized date/time string and return a (unixtime, offset) tuple.
1017 1017
1018 1018 The date may be a "unixtime offset" string or in one of the specified
1019 1019 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1020 1020 """
1021 1021 if not date:
1022 1022 return 0, 0
1023 1023 if isinstance(date, tuple) and len(date) == 2:
1024 1024 return date
1025 1025 if not formats:
1026 1026 formats = defaultdateformats
1027 1027 date = date.strip()
1028 1028 try:
1029 1029 when, offset = map(int, date.split(' '))
1030 1030 except ValueError:
1031 1031 # fill out defaults
1032 1032 if not defaults:
1033 1033 defaults = {}
1034 1034 now = makedate()
1035 1035 for part in "d mb yY HI M S".split():
1036 1036 if part not in defaults:
1037 1037 if part[0] in "HMS":
1038 1038 defaults[part] = "00"
1039 1039 else:
1040 1040 defaults[part] = datestr(now, "%" + part[0])
1041 1041
1042 1042 for format in formats:
1043 1043 try:
1044 1044 when, offset = strdate(date, format, defaults)
1045 1045 except (ValueError, OverflowError):
1046 1046 pass
1047 1047 else:
1048 1048 break
1049 1049 else:
1050 1050 raise Abort(_('invalid date: %r ') % date)
1051 1051 # validate explicit (probably user-specified) date and
1052 1052 # time zone offset. values must fit in signed 32 bits for
1053 1053 # current 32-bit linux runtimes. timezones go from UTC-12
1054 1054 # to UTC+14
1055 1055 if abs(when) > 0x7fffffff:
1056 1056 raise Abort(_('date exceeds 32 bits: %d') % when)
1057 1057 if offset < -50400 or offset > 43200:
1058 1058 raise Abort(_('impossible time zone offset: %d') % offset)
1059 1059 return when, offset
1060 1060
1061 1061 def matchdate(date):
1062 1062 """Return a function that matches a given date match specifier
1063 1063
1064 1064 Formats include:
1065 1065
1066 1066 '{date}' match a given date to the accuracy provided
1067 1067
1068 1068 '<{date}' on or before a given date
1069 1069
1070 1070 '>{date}' on or after a given date
1071 1071
1072 1072 """
1073 1073
1074 1074 def lower(date):
1075 1075 d = dict(mb="1", d="1")
1076 1076 return parsedate(date, extendeddateformats, d)[0]
1077 1077
1078 1078 def upper(date):
1079 1079 d = dict(mb="12", HI="23", M="59", S="59")
1080 1080 for days in "31 30 29".split():
1081 1081 try:
1082 1082 d["d"] = days
1083 1083 return parsedate(date, extendeddateformats, d)[0]
1084 1084 except:
1085 1085 pass
1086 1086 d["d"] = "28"
1087 1087 return parsedate(date, extendeddateformats, d)[0]
1088 1088
1089 1089 date = date.strip()
1090 1090 if date[0] == "<":
1091 1091 when = upper(date[1:])
1092 1092 return lambda x: x <= when
1093 1093 elif date[0] == ">":
1094 1094 when = lower(date[1:])
1095 1095 return lambda x: x >= when
1096 1096 elif date[0] == "-":
1097 1097 try:
1098 1098 days = int(date[1:])
1099 1099 except ValueError:
1100 1100 raise Abort(_("invalid day spec: %s") % date[1:])
1101 1101 when = makedate()[0] - days * 3600 * 24
1102 1102 return lambda x: x >= when
1103 1103 elif " to " in date:
1104 1104 a, b = date.split(" to ")
1105 1105 start, stop = lower(a), upper(b)
1106 1106 return lambda x: x >= start and x <= stop
1107 1107 else:
1108 1108 start, stop = lower(date), upper(date)
1109 1109 return lambda x: x >= start and x <= stop
1110 1110
1111 1111 def shortuser(user):
1112 1112 """Return a short representation of a user name or email address."""
1113 1113 f = user.find('@')
1114 1114 if f >= 0:
1115 1115 user = user[:f]
1116 1116 f = user.find('<')
1117 1117 if f >= 0:
1118 1118 user = user[f+1:]
1119 1119 f = user.find(' ')
1120 1120 if f >= 0:
1121 1121 user = user[:f]
1122 1122 f = user.find('.')
1123 1123 if f >= 0:
1124 1124 user = user[:f]
1125 1125 return user
1126 1126
1127 1127 def email(author):
1128 1128 '''get email of author.'''
1129 1129 r = author.find('>')
1130 1130 if r == -1: r = None
1131 1131 return author[author.find('<')+1:r]
1132 1132
1133 1133 def ellipsis(text, maxlength=400):
1134 1134 """Trim string to at most maxlength (default: 400) characters."""
1135 1135 if len(text) <= maxlength:
1136 1136 return text
1137 1137 else:
1138 1138 return "%s..." % (text[:maxlength-3])
1139 1139
1140 1140 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
1141 1141 '''yield every hg repository under path, recursively.'''
1142 1142 def errhandler(err):
1143 1143 if err.filename == path:
1144 1144 raise err
1145 1145 if followsym and hasattr(os.path, 'samestat'):
1146 1146 def _add_dir_if_not_there(dirlst, dirname):
1147 1147 match = False
1148 1148 samestat = os.path.samestat
1149 1149 dirstat = os.stat(dirname)
1150 1150 for lstdirstat in dirlst:
1151 1151 if samestat(dirstat, lstdirstat):
1152 1152 match = True
1153 1153 break
1154 1154 if not match:
1155 1155 dirlst.append(dirstat)
1156 1156 return not match
1157 1157 else:
1158 1158 followsym = False
1159 1159
1160 1160 if (seen_dirs is None) and followsym:
1161 1161 seen_dirs = []
1162 1162 _add_dir_if_not_there(seen_dirs, path)
1163 1163 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
1164 1164 if '.hg' in dirs:
1165 1165 yield root # found a repository
1166 1166 qroot = os.path.join(root, '.hg', 'patches')
1167 1167 if os.path.isdir(os.path.join(qroot, '.hg')):
1168 1168 yield qroot # we have a patch queue repo here
1169 1169 if recurse:
1170 1170 # avoid recursing inside the .hg directory
1171 1171 dirs.remove('.hg')
1172 1172 else:
1173 1173 dirs[:] = [] # don't descend further
1174 1174 elif followsym:
1175 1175 newdirs = []
1176 1176 for d in dirs:
1177 1177 fname = os.path.join(root, d)
1178 1178 if _add_dir_if_not_there(seen_dirs, fname):
1179 1179 if os.path.islink(fname):
1180 1180 for hgname in walkrepos(fname, True, seen_dirs):
1181 1181 yield hgname
1182 1182 else:
1183 1183 newdirs.append(d)
1184 1184 dirs[:] = newdirs
1185 1185
1186 1186 _rcpath = None
1187 1187
1188 1188 def os_rcpath():
1189 1189 '''return default os-specific hgrc search path'''
1190 1190 path = system_rcpath()
1191 1191 path.extend(user_rcpath())
1192 1192 path = [os.path.normpath(f) for f in path]
1193 1193 return path
1194 1194
1195 1195 def rcpath():
1196 1196 '''return hgrc search path. if env var HGRCPATH is set, use it.
1197 1197 for each item in path, if directory, use files ending in .rc,
1198 1198 else use item.
1199 1199 make HGRCPATH empty to only look in .hg/hgrc of current repo.
1200 1200 if no HGRCPATH, use default os-specific path.'''
1201 1201 global _rcpath
1202 1202 if _rcpath is None:
1203 1203 if 'HGRCPATH' in os.environ:
1204 1204 _rcpath = []
1205 1205 for p in os.environ['HGRCPATH'].split(os.pathsep):
1206 1206 if not p: continue
1207 1207 if os.path.isdir(p):
1208 1208 for f, kind in osutil.listdir(p):
1209 1209 if f.endswith('.rc'):
1210 1210 _rcpath.append(os.path.join(p, f))
1211 1211 else:
1212 1212 _rcpath.append(p)
1213 1213 else:
1214 1214 _rcpath = os_rcpath()
1215 1215 return _rcpath
1216 1216
1217 1217 def bytecount(nbytes):
1218 1218 '''return byte count formatted as readable string, with units'''
1219 1219
1220 1220 units = (
1221 1221 (100, 1<<30, _('%.0f GB')),
1222 1222 (10, 1<<30, _('%.1f GB')),
1223 1223 (1, 1<<30, _('%.2f GB')),
1224 1224 (100, 1<<20, _('%.0f MB')),
1225 1225 (10, 1<<20, _('%.1f MB')),
1226 1226 (1, 1<<20, _('%.2f MB')),
1227 1227 (100, 1<<10, _('%.0f KB')),
1228 1228 (10, 1<<10, _('%.1f KB')),
1229 1229 (1, 1<<10, _('%.2f KB')),
1230 1230 (1, 1, _('%.0f bytes')),
1231 1231 )
1232 1232
1233 1233 for multiplier, divisor, format in units:
1234 1234 if nbytes >= divisor * multiplier:
1235 1235 return format % (nbytes / float(divisor))
1236 1236 return units[-1][2] % nbytes
1237 1237
1238 1238 def drop_scheme(scheme, path):
1239 1239 sc = scheme + ':'
1240 1240 if path.startswith(sc):
1241 1241 path = path[len(sc):]
1242 1242 if path.startswith('//'):
1243 1243 path = path[2:]
1244 1244 return path
1245 1245
1246 1246 def uirepr(s):
1247 1247 # Avoid double backslash in Windows path repr()
1248 1248 return repr(s).replace('\\\\', '\\')
1249 1249
1250 1250 def termwidth():
1251 1251 if 'COLUMNS' in os.environ:
1252 1252 try:
1253 1253 return int(os.environ['COLUMNS'])
1254 1254 except ValueError:
1255 1255 pass
1256 1256 try:
1257 1257 import termios, array, fcntl
1258 1258 for dev in (sys.stdout, sys.stdin):
1259 1259 try:
1260 1260 try:
1261 1261 fd = dev.fileno()
1262 1262 except AttributeError:
1263 1263 continue
1264 1264 if not os.isatty(fd):
1265 1265 continue
1266 1266 arri = fcntl.ioctl(fd, termios.TIOCGWINSZ, '\0' * 8)
1267 1267 return array.array('h', arri)[1]
1268 1268 except ValueError:
1269 1269 pass
1270 1270 except ImportError:
1271 1271 pass
1272 1272 return 80
1273 1273
1274 1274 def wrap(line, hangindent, width=None):
1275 1275 if width is None:
1276 1276 width = termwidth() - 2
1277 if width <= hangindent:
1278 # adjust for weird terminal size
1279 width = max(78, hangindent + 1)
1277 1280 padding = '\n' + ' ' * hangindent
1278 1281 return padding.join(textwrap.wrap(line, width=width - hangindent))
1279 1282
1280 1283 def iterlines(iterator):
1281 1284 for chunk in iterator:
1282 1285 for line in chunk.splitlines():
1283 1286 yield line
General Comments 0
You need to be logged in to leave comments. Login now