##// END OF EJS Templates
opener: always reset flags on 'w'rite...
Adrian Buehlmann -
r13112:039a964d default
parent child Browse files
Show More
@@ -1,1614 +1,1622 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 import cStringIO, email.Parser, os, re
9 import cStringIO, email.Parser, os, errno, re
10 import tempfile, zlib
10 import tempfile, zlib
11
11
12 from i18n import _
12 from i18n import _
13 from node import hex, nullid, short
13 from node import hex, nullid, short
14 import base85, mdiff, util, diffhelpers, copies, encoding
14 import base85, mdiff, util, diffhelpers, copies, encoding
15
15
16 gitre = re.compile('diff --git a/(.*) b/(.*)')
16 gitre = re.compile('diff --git a/(.*) b/(.*)')
17
17
18 class PatchError(Exception):
18 class PatchError(Exception):
19 pass
19 pass
20
20
21 # helper functions
21 # helper functions
22
22
23 def copyfile(src, dst, basedir):
23 def copyfile(src, dst, basedir):
24 abssrc, absdst = [util.canonpath(basedir, basedir, x) for x in [src, dst]]
24 abssrc, absdst = [util.canonpath(basedir, basedir, x) for x in [src, dst]]
25 if os.path.lexists(absdst):
25 if os.path.lexists(absdst):
26 raise util.Abort(_("cannot create %s: destination already exists") %
26 raise util.Abort(_("cannot create %s: destination already exists") %
27 dst)
27 dst)
28
28
29 dstdir = os.path.dirname(absdst)
29 dstdir = os.path.dirname(absdst)
30 if dstdir and not os.path.isdir(dstdir):
30 if dstdir and not os.path.isdir(dstdir):
31 try:
31 try:
32 os.makedirs(dstdir)
32 os.makedirs(dstdir)
33 except IOError:
33 except IOError:
34 raise util.Abort(
34 raise util.Abort(
35 _("cannot create %s: unable to create destination directory")
35 _("cannot create %s: unable to create destination directory")
36 % dst)
36 % dst)
37
37
38 util.copyfile(abssrc, absdst)
38 util.copyfile(abssrc, absdst)
39
39
40 # public functions
40 # public functions
41
41
42 def split(stream):
42 def split(stream):
43 '''return an iterator of individual patches from a stream'''
43 '''return an iterator of individual patches from a stream'''
44 def isheader(line, inheader):
44 def isheader(line, inheader):
45 if inheader and line[0] in (' ', '\t'):
45 if inheader and line[0] in (' ', '\t'):
46 # continuation
46 # continuation
47 return True
47 return True
48 if line[0] in (' ', '-', '+'):
48 if line[0] in (' ', '-', '+'):
49 # diff line - don't check for header pattern in there
49 # diff line - don't check for header pattern in there
50 return False
50 return False
51 l = line.split(': ', 1)
51 l = line.split(': ', 1)
52 return len(l) == 2 and ' ' not in l[0]
52 return len(l) == 2 and ' ' not in l[0]
53
53
54 def chunk(lines):
54 def chunk(lines):
55 return cStringIO.StringIO(''.join(lines))
55 return cStringIO.StringIO(''.join(lines))
56
56
57 def hgsplit(stream, cur):
57 def hgsplit(stream, cur):
58 inheader = True
58 inheader = True
59
59
60 for line in stream:
60 for line in stream:
61 if not line.strip():
61 if not line.strip():
62 inheader = False
62 inheader = False
63 if not inheader and line.startswith('# HG changeset patch'):
63 if not inheader and line.startswith('# HG changeset patch'):
64 yield chunk(cur)
64 yield chunk(cur)
65 cur = []
65 cur = []
66 inheader = True
66 inheader = True
67
67
68 cur.append(line)
68 cur.append(line)
69
69
70 if cur:
70 if cur:
71 yield chunk(cur)
71 yield chunk(cur)
72
72
73 def mboxsplit(stream, cur):
73 def mboxsplit(stream, cur):
74 for line in stream:
74 for line in stream:
75 if line.startswith('From '):
75 if line.startswith('From '):
76 for c in split(chunk(cur[1:])):
76 for c in split(chunk(cur[1:])):
77 yield c
77 yield c
78 cur = []
78 cur = []
79
79
80 cur.append(line)
80 cur.append(line)
81
81
82 if cur:
82 if cur:
83 for c in split(chunk(cur[1:])):
83 for c in split(chunk(cur[1:])):
84 yield c
84 yield c
85
85
86 def mimesplit(stream, cur):
86 def mimesplit(stream, cur):
87 def msgfp(m):
87 def msgfp(m):
88 fp = cStringIO.StringIO()
88 fp = cStringIO.StringIO()
89 g = email.Generator.Generator(fp, mangle_from_=False)
89 g = email.Generator.Generator(fp, mangle_from_=False)
90 g.flatten(m)
90 g.flatten(m)
91 fp.seek(0)
91 fp.seek(0)
92 return fp
92 return fp
93
93
94 for line in stream:
94 for line in stream:
95 cur.append(line)
95 cur.append(line)
96 c = chunk(cur)
96 c = chunk(cur)
97
97
98 m = email.Parser.Parser().parse(c)
98 m = email.Parser.Parser().parse(c)
99 if not m.is_multipart():
99 if not m.is_multipart():
100 yield msgfp(m)
100 yield msgfp(m)
101 else:
101 else:
102 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
102 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
103 for part in m.walk():
103 for part in m.walk():
104 ct = part.get_content_type()
104 ct = part.get_content_type()
105 if ct not in ok_types:
105 if ct not in ok_types:
106 continue
106 continue
107 yield msgfp(part)
107 yield msgfp(part)
108
108
109 def headersplit(stream, cur):
109 def headersplit(stream, cur):
110 inheader = False
110 inheader = False
111
111
112 for line in stream:
112 for line in stream:
113 if not inheader and isheader(line, inheader):
113 if not inheader and isheader(line, inheader):
114 yield chunk(cur)
114 yield chunk(cur)
115 cur = []
115 cur = []
116 inheader = True
116 inheader = True
117 if inheader and not isheader(line, inheader):
117 if inheader and not isheader(line, inheader):
118 inheader = False
118 inheader = False
119
119
120 cur.append(line)
120 cur.append(line)
121
121
122 if cur:
122 if cur:
123 yield chunk(cur)
123 yield chunk(cur)
124
124
125 def remainder(cur):
125 def remainder(cur):
126 yield chunk(cur)
126 yield chunk(cur)
127
127
128 class fiter(object):
128 class fiter(object):
129 def __init__(self, fp):
129 def __init__(self, fp):
130 self.fp = fp
130 self.fp = fp
131
131
132 def __iter__(self):
132 def __iter__(self):
133 return self
133 return self
134
134
135 def next(self):
135 def next(self):
136 l = self.fp.readline()
136 l = self.fp.readline()
137 if not l:
137 if not l:
138 raise StopIteration
138 raise StopIteration
139 return l
139 return l
140
140
141 inheader = False
141 inheader = False
142 cur = []
142 cur = []
143
143
144 mimeheaders = ['content-type']
144 mimeheaders = ['content-type']
145
145
146 if not hasattr(stream, 'next'):
146 if not hasattr(stream, 'next'):
147 # http responses, for example, have readline but not next
147 # http responses, for example, have readline but not next
148 stream = fiter(stream)
148 stream = fiter(stream)
149
149
150 for line in stream:
150 for line in stream:
151 cur.append(line)
151 cur.append(line)
152 if line.startswith('# HG changeset patch'):
152 if line.startswith('# HG changeset patch'):
153 return hgsplit(stream, cur)
153 return hgsplit(stream, cur)
154 elif line.startswith('From '):
154 elif line.startswith('From '):
155 return mboxsplit(stream, cur)
155 return mboxsplit(stream, cur)
156 elif isheader(line, inheader):
156 elif isheader(line, inheader):
157 inheader = True
157 inheader = True
158 if line.split(':', 1)[0].lower() in mimeheaders:
158 if line.split(':', 1)[0].lower() in mimeheaders:
159 # let email parser handle this
159 # let email parser handle this
160 return mimesplit(stream, cur)
160 return mimesplit(stream, cur)
161 elif line.startswith('--- ') and inheader:
161 elif line.startswith('--- ') and inheader:
162 # No evil headers seen by diff start, split by hand
162 # No evil headers seen by diff start, split by hand
163 return headersplit(stream, cur)
163 return headersplit(stream, cur)
164 # Not enough info, keep reading
164 # Not enough info, keep reading
165
165
166 # if we are here, we have a very plain patch
166 # if we are here, we have a very plain patch
167 return remainder(cur)
167 return remainder(cur)
168
168
169 def extract(ui, fileobj):
169 def extract(ui, fileobj):
170 '''extract patch from data read from fileobj.
170 '''extract patch from data read from fileobj.
171
171
172 patch can be a normal patch or contained in an email message.
172 patch can be a normal patch or contained in an email message.
173
173
174 return tuple (filename, message, user, date, branch, node, p1, p2).
174 return tuple (filename, message, user, date, branch, node, p1, p2).
175 Any item in the returned tuple can be None. If filename is None,
175 Any item in the returned tuple can be None. If filename is None,
176 fileobj did not contain a patch. Caller must unlink filename when done.'''
176 fileobj did not contain a patch. Caller must unlink filename when done.'''
177
177
178 # attempt to detect the start of a patch
178 # attempt to detect the start of a patch
179 # (this heuristic is borrowed from quilt)
179 # (this heuristic is borrowed from quilt)
180 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
180 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
181 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
181 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
182 r'---[ \t].*?^\+\+\+[ \t]|'
182 r'---[ \t].*?^\+\+\+[ \t]|'
183 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
183 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
184
184
185 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
185 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
186 tmpfp = os.fdopen(fd, 'w')
186 tmpfp = os.fdopen(fd, 'w')
187 try:
187 try:
188 msg = email.Parser.Parser().parse(fileobj)
188 msg = email.Parser.Parser().parse(fileobj)
189
189
190 subject = msg['Subject']
190 subject = msg['Subject']
191 user = msg['From']
191 user = msg['From']
192 if not subject and not user:
192 if not subject and not user:
193 # Not an email, restore parsed headers if any
193 # Not an email, restore parsed headers if any
194 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
194 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
195
195
196 gitsendmail = 'git-send-email' in msg.get('X-Mailer', '')
196 gitsendmail = 'git-send-email' in msg.get('X-Mailer', '')
197 # should try to parse msg['Date']
197 # should try to parse msg['Date']
198 date = None
198 date = None
199 nodeid = None
199 nodeid = None
200 branch = None
200 branch = None
201 parents = []
201 parents = []
202
202
203 if subject:
203 if subject:
204 if subject.startswith('[PATCH'):
204 if subject.startswith('[PATCH'):
205 pend = subject.find(']')
205 pend = subject.find(']')
206 if pend >= 0:
206 if pend >= 0:
207 subject = subject[pend + 1:].lstrip()
207 subject = subject[pend + 1:].lstrip()
208 subject = subject.replace('\n\t', ' ')
208 subject = subject.replace('\n\t', ' ')
209 ui.debug('Subject: %s\n' % subject)
209 ui.debug('Subject: %s\n' % subject)
210 if user:
210 if user:
211 ui.debug('From: %s\n' % user)
211 ui.debug('From: %s\n' % user)
212 diffs_seen = 0
212 diffs_seen = 0
213 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
213 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
214 message = ''
214 message = ''
215 for part in msg.walk():
215 for part in msg.walk():
216 content_type = part.get_content_type()
216 content_type = part.get_content_type()
217 ui.debug('Content-Type: %s\n' % content_type)
217 ui.debug('Content-Type: %s\n' % content_type)
218 if content_type not in ok_types:
218 if content_type not in ok_types:
219 continue
219 continue
220 payload = part.get_payload(decode=True)
220 payload = part.get_payload(decode=True)
221 m = diffre.search(payload)
221 m = diffre.search(payload)
222 if m:
222 if m:
223 hgpatch = False
223 hgpatch = False
224 hgpatchheader = False
224 hgpatchheader = False
225 ignoretext = False
225 ignoretext = False
226
226
227 ui.debug('found patch at byte %d\n' % m.start(0))
227 ui.debug('found patch at byte %d\n' % m.start(0))
228 diffs_seen += 1
228 diffs_seen += 1
229 cfp = cStringIO.StringIO()
229 cfp = cStringIO.StringIO()
230 for line in payload[:m.start(0)].splitlines():
230 for line in payload[:m.start(0)].splitlines():
231 if line.startswith('# HG changeset patch') and not hgpatch:
231 if line.startswith('# HG changeset patch') and not hgpatch:
232 ui.debug('patch generated by hg export\n')
232 ui.debug('patch generated by hg export\n')
233 hgpatch = True
233 hgpatch = True
234 hgpatchheader = True
234 hgpatchheader = True
235 # drop earlier commit message content
235 # drop earlier commit message content
236 cfp.seek(0)
236 cfp.seek(0)
237 cfp.truncate()
237 cfp.truncate()
238 subject = None
238 subject = None
239 elif hgpatchheader:
239 elif hgpatchheader:
240 if line.startswith('# User '):
240 if line.startswith('# User '):
241 user = line[7:]
241 user = line[7:]
242 ui.debug('From: %s\n' % user)
242 ui.debug('From: %s\n' % user)
243 elif line.startswith("# Date "):
243 elif line.startswith("# Date "):
244 date = line[7:]
244 date = line[7:]
245 elif line.startswith("# Branch "):
245 elif line.startswith("# Branch "):
246 branch = line[9:]
246 branch = line[9:]
247 elif line.startswith("# Node ID "):
247 elif line.startswith("# Node ID "):
248 nodeid = line[10:]
248 nodeid = line[10:]
249 elif line.startswith("# Parent "):
249 elif line.startswith("# Parent "):
250 parents.append(line[10:])
250 parents.append(line[10:])
251 elif not line.startswith("# "):
251 elif not line.startswith("# "):
252 hgpatchheader = False
252 hgpatchheader = False
253 elif line == '---' and gitsendmail:
253 elif line == '---' and gitsendmail:
254 ignoretext = True
254 ignoretext = True
255 if not hgpatchheader and not ignoretext:
255 if not hgpatchheader and not ignoretext:
256 cfp.write(line)
256 cfp.write(line)
257 cfp.write('\n')
257 cfp.write('\n')
258 message = cfp.getvalue()
258 message = cfp.getvalue()
259 if tmpfp:
259 if tmpfp:
260 tmpfp.write(payload)
260 tmpfp.write(payload)
261 if not payload.endswith('\n'):
261 if not payload.endswith('\n'):
262 tmpfp.write('\n')
262 tmpfp.write('\n')
263 elif not diffs_seen and message and content_type == 'text/plain':
263 elif not diffs_seen and message and content_type == 'text/plain':
264 message += '\n' + payload
264 message += '\n' + payload
265 except:
265 except:
266 tmpfp.close()
266 tmpfp.close()
267 os.unlink(tmpname)
267 os.unlink(tmpname)
268 raise
268 raise
269
269
270 if subject and not message.startswith(subject):
270 if subject and not message.startswith(subject):
271 message = '%s\n%s' % (subject, message)
271 message = '%s\n%s' % (subject, message)
272 tmpfp.close()
272 tmpfp.close()
273 if not diffs_seen:
273 if not diffs_seen:
274 os.unlink(tmpname)
274 os.unlink(tmpname)
275 return None, message, user, date, branch, None, None, None
275 return None, message, user, date, branch, None, None, None
276 p1 = parents and parents.pop(0) or None
276 p1 = parents and parents.pop(0) or None
277 p2 = parents and parents.pop(0) or None
277 p2 = parents and parents.pop(0) or None
278 return tmpname, message, user, date, branch, nodeid, p1, p2
278 return tmpname, message, user, date, branch, nodeid, p1, p2
279
279
280 class patchmeta(object):
280 class patchmeta(object):
281 """Patched file metadata
281 """Patched file metadata
282
282
283 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
283 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
284 or COPY. 'path' is patched file path. 'oldpath' is set to the
284 or COPY. 'path' is patched file path. 'oldpath' is set to the
285 origin file when 'op' is either COPY or RENAME, None otherwise. If
285 origin file when 'op' is either COPY or RENAME, None otherwise. If
286 file mode is changed, 'mode' is a tuple (islink, isexec) where
286 file mode is changed, 'mode' is a tuple (islink, isexec) where
287 'islink' is True if the file is a symlink and 'isexec' is True if
287 'islink' is True if the file is a symlink and 'isexec' is True if
288 the file is executable. Otherwise, 'mode' is None.
288 the file is executable. Otherwise, 'mode' is None.
289 """
289 """
290 def __init__(self, path):
290 def __init__(self, path):
291 self.path = path
291 self.path = path
292 self.oldpath = None
292 self.oldpath = None
293 self.mode = None
293 self.mode = None
294 self.op = 'MODIFY'
294 self.op = 'MODIFY'
295 self.binary = False
295 self.binary = False
296
296
297 def setmode(self, mode):
297 def setmode(self, mode):
298 islink = mode & 020000
298 islink = mode & 020000
299 isexec = mode & 0100
299 isexec = mode & 0100
300 self.mode = (islink, isexec)
300 self.mode = (islink, isexec)
301
301
302 def __repr__(self):
302 def __repr__(self):
303 return "<patchmeta %s %r>" % (self.op, self.path)
303 return "<patchmeta %s %r>" % (self.op, self.path)
304
304
305 def readgitpatch(lr):
305 def readgitpatch(lr):
306 """extract git-style metadata about patches from <patchname>"""
306 """extract git-style metadata about patches from <patchname>"""
307
307
308 # Filter patch for git information
308 # Filter patch for git information
309 gp = None
309 gp = None
310 gitpatches = []
310 gitpatches = []
311 for line in lr:
311 for line in lr:
312 line = line.rstrip(' \r\n')
312 line = line.rstrip(' \r\n')
313 if line.startswith('diff --git'):
313 if line.startswith('diff --git'):
314 m = gitre.match(line)
314 m = gitre.match(line)
315 if m:
315 if m:
316 if gp:
316 if gp:
317 gitpatches.append(gp)
317 gitpatches.append(gp)
318 dst = m.group(2)
318 dst = m.group(2)
319 gp = patchmeta(dst)
319 gp = patchmeta(dst)
320 elif gp:
320 elif gp:
321 if line.startswith('--- '):
321 if line.startswith('--- '):
322 gitpatches.append(gp)
322 gitpatches.append(gp)
323 gp = None
323 gp = None
324 continue
324 continue
325 if line.startswith('rename from '):
325 if line.startswith('rename from '):
326 gp.op = 'RENAME'
326 gp.op = 'RENAME'
327 gp.oldpath = line[12:]
327 gp.oldpath = line[12:]
328 elif line.startswith('rename to '):
328 elif line.startswith('rename to '):
329 gp.path = line[10:]
329 gp.path = line[10:]
330 elif line.startswith('copy from '):
330 elif line.startswith('copy from '):
331 gp.op = 'COPY'
331 gp.op = 'COPY'
332 gp.oldpath = line[10:]
332 gp.oldpath = line[10:]
333 elif line.startswith('copy to '):
333 elif line.startswith('copy to '):
334 gp.path = line[8:]
334 gp.path = line[8:]
335 elif line.startswith('deleted file'):
335 elif line.startswith('deleted file'):
336 gp.op = 'DELETE'
336 gp.op = 'DELETE'
337 elif line.startswith('new file mode '):
337 elif line.startswith('new file mode '):
338 gp.op = 'ADD'
338 gp.op = 'ADD'
339 gp.setmode(int(line[-6:], 8))
339 gp.setmode(int(line[-6:], 8))
340 elif line.startswith('new mode '):
340 elif line.startswith('new mode '):
341 gp.setmode(int(line[-6:], 8))
341 gp.setmode(int(line[-6:], 8))
342 elif line.startswith('GIT binary patch'):
342 elif line.startswith('GIT binary patch'):
343 gp.binary = True
343 gp.binary = True
344 if gp:
344 if gp:
345 gitpatches.append(gp)
345 gitpatches.append(gp)
346
346
347 return gitpatches
347 return gitpatches
348
348
349 class linereader(object):
349 class linereader(object):
350 # simple class to allow pushing lines back into the input stream
350 # simple class to allow pushing lines back into the input stream
351 def __init__(self, fp, textmode=False):
351 def __init__(self, fp, textmode=False):
352 self.fp = fp
352 self.fp = fp
353 self.buf = []
353 self.buf = []
354 self.textmode = textmode
354 self.textmode = textmode
355 self.eol = None
355 self.eol = None
356
356
357 def push(self, line):
357 def push(self, line):
358 if line is not None:
358 if line is not None:
359 self.buf.append(line)
359 self.buf.append(line)
360
360
361 def readline(self):
361 def readline(self):
362 if self.buf:
362 if self.buf:
363 l = self.buf[0]
363 l = self.buf[0]
364 del self.buf[0]
364 del self.buf[0]
365 return l
365 return l
366 l = self.fp.readline()
366 l = self.fp.readline()
367 if not self.eol:
367 if not self.eol:
368 if l.endswith('\r\n'):
368 if l.endswith('\r\n'):
369 self.eol = '\r\n'
369 self.eol = '\r\n'
370 elif l.endswith('\n'):
370 elif l.endswith('\n'):
371 self.eol = '\n'
371 self.eol = '\n'
372 if self.textmode and l.endswith('\r\n'):
372 if self.textmode and l.endswith('\r\n'):
373 l = l[:-2] + '\n'
373 l = l[:-2] + '\n'
374 return l
374 return l
375
375
376 def __iter__(self):
376 def __iter__(self):
377 while 1:
377 while 1:
378 l = self.readline()
378 l = self.readline()
379 if not l:
379 if not l:
380 break
380 break
381 yield l
381 yield l
382
382
383 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
383 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
384 unidesc = re.compile('@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))? @@')
384 unidesc = re.compile('@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))? @@')
385 contextdesc = re.compile('(---|\*\*\*) (\d+)(,(\d+))? (---|\*\*\*)')
385 contextdesc = re.compile('(---|\*\*\*) (\d+)(,(\d+))? (---|\*\*\*)')
386 eolmodes = ['strict', 'crlf', 'lf', 'auto']
386 eolmodes = ['strict', 'crlf', 'lf', 'auto']
387
387
388 class patchfile(object):
388 class patchfile(object):
389 def __init__(self, ui, fname, opener, missing=False, eolmode='strict'):
389 def __init__(self, ui, fname, opener, missing=False, eolmode='strict'):
390 self.fname = fname
390 self.fname = fname
391 self.eolmode = eolmode
391 self.eolmode = eolmode
392 self.eol = None
392 self.eol = None
393 self.opener = opener
393 self.opener = opener
394 self.ui = ui
394 self.ui = ui
395 self.lines = []
395 self.lines = []
396 self.exists = False
396 self.exists = False
397 self.missing = missing
397 self.missing = missing
398 if not missing:
398 if not missing:
399 try:
399 try:
400 self.lines = self.readlines(fname)
400 self.lines = self.readlines(fname)
401 self.exists = True
401 self.exists = True
402 except IOError:
402 except IOError:
403 pass
403 pass
404 else:
404 else:
405 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
405 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
406
406
407 self.hash = {}
407 self.hash = {}
408 self.dirty = 0
408 self.dirty = 0
409 self.offset = 0
409 self.offset = 0
410 self.skew = 0
410 self.skew = 0
411 self.rej = []
411 self.rej = []
412 self.fileprinted = False
412 self.fileprinted = False
413 self.printfile(False)
413 self.printfile(False)
414 self.hunks = 0
414 self.hunks = 0
415
415
416 def readlines(self, fname):
416 def readlines(self, fname):
417 if os.path.islink(fname):
417 if os.path.islink(fname):
418 return [os.readlink(fname)]
418 return [os.readlink(fname)]
419 fp = self.opener(fname, 'r')
419 fp = self.opener(fname, 'r')
420 try:
420 try:
421 lr = linereader(fp, self.eolmode != 'strict')
421 lr = linereader(fp, self.eolmode != 'strict')
422 lines = list(lr)
422 lines = list(lr)
423 self.eol = lr.eol
423 self.eol = lr.eol
424 return lines
424 return lines
425 finally:
425 finally:
426 fp.close()
426 fp.close()
427
427
428 def writelines(self, fname, lines):
428 def writelines(self, fname, lines):
429 # Ensure supplied data ends in fname, being a regular file or
429 # Ensure supplied data ends in fname, being a regular file or
430 # a symlink. cmdutil.updatedir will -too magically- take care
430 # a symlink. cmdutil.updatedir will -too magically- take care
431 # of setting it to the proper type afterwards.
431 # of setting it to the proper type afterwards.
432 st_mode = None
432 islink = os.path.islink(fname)
433 islink = os.path.islink(fname)
433 if islink:
434 if islink:
434 fp = cStringIO.StringIO()
435 fp = cStringIO.StringIO()
435 else:
436 else:
437 try:
438 st_mode = os.lstat(fname).st_mode & 0777
439 except OSError, e:
440 if e.errno != errno.ENOENT:
441 raise
436 fp = self.opener(fname, 'w')
442 fp = self.opener(fname, 'w')
437 try:
443 try:
438 if self.eolmode == 'auto':
444 if self.eolmode == 'auto':
439 eol = self.eol
445 eol = self.eol
440 elif self.eolmode == 'crlf':
446 elif self.eolmode == 'crlf':
441 eol = '\r\n'
447 eol = '\r\n'
442 else:
448 else:
443 eol = '\n'
449 eol = '\n'
444
450
445 if self.eolmode != 'strict' and eol and eol != '\n':
451 if self.eolmode != 'strict' and eol and eol != '\n':
446 for l in lines:
452 for l in lines:
447 if l and l[-1] == '\n':
453 if l and l[-1] == '\n':
448 l = l[:-1] + eol
454 l = l[:-1] + eol
449 fp.write(l)
455 fp.write(l)
450 else:
456 else:
451 fp.writelines(lines)
457 fp.writelines(lines)
452 if islink:
458 if islink:
453 self.opener.symlink(fp.getvalue(), fname)
459 self.opener.symlink(fp.getvalue(), fname)
460 if st_mode is not None:
461 os.chmod(fname, st_mode)
454 finally:
462 finally:
455 fp.close()
463 fp.close()
456
464
457 def unlink(self, fname):
465 def unlink(self, fname):
458 os.unlink(fname)
466 os.unlink(fname)
459
467
460 def printfile(self, warn):
468 def printfile(self, warn):
461 if self.fileprinted:
469 if self.fileprinted:
462 return
470 return
463 if warn or self.ui.verbose:
471 if warn or self.ui.verbose:
464 self.fileprinted = True
472 self.fileprinted = True
465 s = _("patching file %s\n") % self.fname
473 s = _("patching file %s\n") % self.fname
466 if warn:
474 if warn:
467 self.ui.warn(s)
475 self.ui.warn(s)
468 else:
476 else:
469 self.ui.note(s)
477 self.ui.note(s)
470
478
471
479
472 def findlines(self, l, linenum):
480 def findlines(self, l, linenum):
473 # looks through the hash and finds candidate lines. The
481 # looks through the hash and finds candidate lines. The
474 # result is a list of line numbers sorted based on distance
482 # result is a list of line numbers sorted based on distance
475 # from linenum
483 # from linenum
476
484
477 cand = self.hash.get(l, [])
485 cand = self.hash.get(l, [])
478 if len(cand) > 1:
486 if len(cand) > 1:
479 # resort our list of potentials forward then back.
487 # resort our list of potentials forward then back.
480 cand.sort(key=lambda x: abs(x - linenum))
488 cand.sort(key=lambda x: abs(x - linenum))
481 return cand
489 return cand
482
490
483 def hashlines(self):
491 def hashlines(self):
484 self.hash = {}
492 self.hash = {}
485 for x, s in enumerate(self.lines):
493 for x, s in enumerate(self.lines):
486 self.hash.setdefault(s, []).append(x)
494 self.hash.setdefault(s, []).append(x)
487
495
488 def makerejlines(self, fname):
496 def makerejlines(self, fname):
489 base = os.path.basename(fname)
497 base = os.path.basename(fname)
490 yield "--- %s\n+++ %s\n" % (base, base)
498 yield "--- %s\n+++ %s\n" % (base, base)
491 for x in self.rej:
499 for x in self.rej:
492 for l in x.hunk:
500 for l in x.hunk:
493 yield l
501 yield l
494 if l[-1] != '\n':
502 if l[-1] != '\n':
495 yield "\n\ No newline at end of file\n"
503 yield "\n\ No newline at end of file\n"
496
504
497 def write_rej(self):
505 def write_rej(self):
498 # our rejects are a little different from patch(1). This always
506 # our rejects are a little different from patch(1). This always
499 # creates rejects in the same form as the original patch. A file
507 # creates rejects in the same form as the original patch. A file
500 # header is inserted so that you can run the reject through patch again
508 # header is inserted so that you can run the reject through patch again
501 # without having to type the filename.
509 # without having to type the filename.
502
510
503 if not self.rej:
511 if not self.rej:
504 return
512 return
505
513
506 fname = self.fname + ".rej"
514 fname = self.fname + ".rej"
507 self.ui.warn(
515 self.ui.warn(
508 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
516 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
509 (len(self.rej), self.hunks, fname))
517 (len(self.rej), self.hunks, fname))
510
518
511 fp = self.opener(fname, 'w')
519 fp = self.opener(fname, 'w')
512 fp.writelines(self.makerejlines(self.fname))
520 fp.writelines(self.makerejlines(self.fname))
513 fp.close()
521 fp.close()
514
522
515 def apply(self, h):
523 def apply(self, h):
516 if not h.complete():
524 if not h.complete():
517 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
525 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
518 (h.number, h.desc, len(h.a), h.lena, len(h.b),
526 (h.number, h.desc, len(h.a), h.lena, len(h.b),
519 h.lenb))
527 h.lenb))
520
528
521 self.hunks += 1
529 self.hunks += 1
522
530
523 if self.missing:
531 if self.missing:
524 self.rej.append(h)
532 self.rej.append(h)
525 return -1
533 return -1
526
534
527 if self.exists and h.createfile():
535 if self.exists and h.createfile():
528 self.ui.warn(_("file %s already exists\n") % self.fname)
536 self.ui.warn(_("file %s already exists\n") % self.fname)
529 self.rej.append(h)
537 self.rej.append(h)
530 return -1
538 return -1
531
539
532 if isinstance(h, binhunk):
540 if isinstance(h, binhunk):
533 if h.rmfile():
541 if h.rmfile():
534 self.unlink(self.fname)
542 self.unlink(self.fname)
535 else:
543 else:
536 self.lines[:] = h.new()
544 self.lines[:] = h.new()
537 self.offset += len(h.new())
545 self.offset += len(h.new())
538 self.dirty = 1
546 self.dirty = 1
539 return 0
547 return 0
540
548
541 horig = h
549 horig = h
542 if (self.eolmode in ('crlf', 'lf')
550 if (self.eolmode in ('crlf', 'lf')
543 or self.eolmode == 'auto' and self.eol):
551 or self.eolmode == 'auto' and self.eol):
544 # If new eols are going to be normalized, then normalize
552 # If new eols are going to be normalized, then normalize
545 # hunk data before patching. Otherwise, preserve input
553 # hunk data before patching. Otherwise, preserve input
546 # line-endings.
554 # line-endings.
547 h = h.getnormalized()
555 h = h.getnormalized()
548
556
549 # fast case first, no offsets, no fuzz
557 # fast case first, no offsets, no fuzz
550 old = h.old()
558 old = h.old()
551 # patch starts counting at 1 unless we are adding the file
559 # patch starts counting at 1 unless we are adding the file
552 if h.starta == 0:
560 if h.starta == 0:
553 start = 0
561 start = 0
554 else:
562 else:
555 start = h.starta + self.offset - 1
563 start = h.starta + self.offset - 1
556 orig_start = start
564 orig_start = start
557 # if there's skew we want to emit the "(offset %d lines)" even
565 # if there's skew we want to emit the "(offset %d lines)" even
558 # when the hunk cleanly applies at start + skew, so skip the
566 # when the hunk cleanly applies at start + skew, so skip the
559 # fast case code
567 # fast case code
560 if self.skew == 0 and diffhelpers.testhunk(old, self.lines, start) == 0:
568 if self.skew == 0 and diffhelpers.testhunk(old, self.lines, start) == 0:
561 if h.rmfile():
569 if h.rmfile():
562 self.unlink(self.fname)
570 self.unlink(self.fname)
563 else:
571 else:
564 self.lines[start : start + h.lena] = h.new()
572 self.lines[start : start + h.lena] = h.new()
565 self.offset += h.lenb - h.lena
573 self.offset += h.lenb - h.lena
566 self.dirty = 1
574 self.dirty = 1
567 return 0
575 return 0
568
576
569 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
577 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
570 self.hashlines()
578 self.hashlines()
571 if h.hunk[-1][0] != ' ':
579 if h.hunk[-1][0] != ' ':
572 # if the hunk tried to put something at the bottom of the file
580 # if the hunk tried to put something at the bottom of the file
573 # override the start line and use eof here
581 # override the start line and use eof here
574 search_start = len(self.lines)
582 search_start = len(self.lines)
575 else:
583 else:
576 search_start = orig_start + self.skew
584 search_start = orig_start + self.skew
577
585
578 for fuzzlen in xrange(3):
586 for fuzzlen in xrange(3):
579 for toponly in [True, False]:
587 for toponly in [True, False]:
580 old = h.old(fuzzlen, toponly)
588 old = h.old(fuzzlen, toponly)
581
589
582 cand = self.findlines(old[0][1:], search_start)
590 cand = self.findlines(old[0][1:], search_start)
583 for l in cand:
591 for l in cand:
584 if diffhelpers.testhunk(old, self.lines, l) == 0:
592 if diffhelpers.testhunk(old, self.lines, l) == 0:
585 newlines = h.new(fuzzlen, toponly)
593 newlines = h.new(fuzzlen, toponly)
586 self.lines[l : l + len(old)] = newlines
594 self.lines[l : l + len(old)] = newlines
587 self.offset += len(newlines) - len(old)
595 self.offset += len(newlines) - len(old)
588 self.skew = l - orig_start
596 self.skew = l - orig_start
589 self.dirty = 1
597 self.dirty = 1
590 offset = l - orig_start - fuzzlen
598 offset = l - orig_start - fuzzlen
591 if fuzzlen:
599 if fuzzlen:
592 msg = _("Hunk #%d succeeded at %d "
600 msg = _("Hunk #%d succeeded at %d "
593 "with fuzz %d "
601 "with fuzz %d "
594 "(offset %d lines).\n")
602 "(offset %d lines).\n")
595 self.printfile(True)
603 self.printfile(True)
596 self.ui.warn(msg %
604 self.ui.warn(msg %
597 (h.number, l + 1, fuzzlen, offset))
605 (h.number, l + 1, fuzzlen, offset))
598 else:
606 else:
599 msg = _("Hunk #%d succeeded at %d "
607 msg = _("Hunk #%d succeeded at %d "
600 "(offset %d lines).\n")
608 "(offset %d lines).\n")
601 self.ui.note(msg % (h.number, l + 1, offset))
609 self.ui.note(msg % (h.number, l + 1, offset))
602 return fuzzlen
610 return fuzzlen
603 self.printfile(True)
611 self.printfile(True)
604 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
612 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
605 self.rej.append(horig)
613 self.rej.append(horig)
606 return -1
614 return -1
607
615
608 class hunk(object):
616 class hunk(object):
609 def __init__(self, desc, num, lr, context, create=False, remove=False):
617 def __init__(self, desc, num, lr, context, create=False, remove=False):
610 self.number = num
618 self.number = num
611 self.desc = desc
619 self.desc = desc
612 self.hunk = [desc]
620 self.hunk = [desc]
613 self.a = []
621 self.a = []
614 self.b = []
622 self.b = []
615 self.starta = self.lena = None
623 self.starta = self.lena = None
616 self.startb = self.lenb = None
624 self.startb = self.lenb = None
617 if lr is not None:
625 if lr is not None:
618 if context:
626 if context:
619 self.read_context_hunk(lr)
627 self.read_context_hunk(lr)
620 else:
628 else:
621 self.read_unified_hunk(lr)
629 self.read_unified_hunk(lr)
622 self.create = create
630 self.create = create
623 self.remove = remove and not create
631 self.remove = remove and not create
624
632
625 def getnormalized(self):
633 def getnormalized(self):
626 """Return a copy with line endings normalized to LF."""
634 """Return a copy with line endings normalized to LF."""
627
635
628 def normalize(lines):
636 def normalize(lines):
629 nlines = []
637 nlines = []
630 for line in lines:
638 for line in lines:
631 if line.endswith('\r\n'):
639 if line.endswith('\r\n'):
632 line = line[:-2] + '\n'
640 line = line[:-2] + '\n'
633 nlines.append(line)
641 nlines.append(line)
634 return nlines
642 return nlines
635
643
636 # Dummy object, it is rebuilt manually
644 # Dummy object, it is rebuilt manually
637 nh = hunk(self.desc, self.number, None, None, False, False)
645 nh = hunk(self.desc, self.number, None, None, False, False)
638 nh.number = self.number
646 nh.number = self.number
639 nh.desc = self.desc
647 nh.desc = self.desc
640 nh.hunk = self.hunk
648 nh.hunk = self.hunk
641 nh.a = normalize(self.a)
649 nh.a = normalize(self.a)
642 nh.b = normalize(self.b)
650 nh.b = normalize(self.b)
643 nh.starta = self.starta
651 nh.starta = self.starta
644 nh.startb = self.startb
652 nh.startb = self.startb
645 nh.lena = self.lena
653 nh.lena = self.lena
646 nh.lenb = self.lenb
654 nh.lenb = self.lenb
647 nh.create = self.create
655 nh.create = self.create
648 nh.remove = self.remove
656 nh.remove = self.remove
649 return nh
657 return nh
650
658
651 def read_unified_hunk(self, lr):
659 def read_unified_hunk(self, lr):
652 m = unidesc.match(self.desc)
660 m = unidesc.match(self.desc)
653 if not m:
661 if not m:
654 raise PatchError(_("bad hunk #%d") % self.number)
662 raise PatchError(_("bad hunk #%d") % self.number)
655 self.starta, foo, self.lena, self.startb, foo2, self.lenb = m.groups()
663 self.starta, foo, self.lena, self.startb, foo2, self.lenb = m.groups()
656 if self.lena is None:
664 if self.lena is None:
657 self.lena = 1
665 self.lena = 1
658 else:
666 else:
659 self.lena = int(self.lena)
667 self.lena = int(self.lena)
660 if self.lenb is None:
668 if self.lenb is None:
661 self.lenb = 1
669 self.lenb = 1
662 else:
670 else:
663 self.lenb = int(self.lenb)
671 self.lenb = int(self.lenb)
664 self.starta = int(self.starta)
672 self.starta = int(self.starta)
665 self.startb = int(self.startb)
673 self.startb = int(self.startb)
666 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a, self.b)
674 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a, self.b)
667 # if we hit eof before finishing out the hunk, the last line will
675 # if we hit eof before finishing out the hunk, the last line will
668 # be zero length. Lets try to fix it up.
676 # be zero length. Lets try to fix it up.
669 while len(self.hunk[-1]) == 0:
677 while len(self.hunk[-1]) == 0:
670 del self.hunk[-1]
678 del self.hunk[-1]
671 del self.a[-1]
679 del self.a[-1]
672 del self.b[-1]
680 del self.b[-1]
673 self.lena -= 1
681 self.lena -= 1
674 self.lenb -= 1
682 self.lenb -= 1
675
683
676 def read_context_hunk(self, lr):
684 def read_context_hunk(self, lr):
677 self.desc = lr.readline()
685 self.desc = lr.readline()
678 m = contextdesc.match(self.desc)
686 m = contextdesc.match(self.desc)
679 if not m:
687 if not m:
680 raise PatchError(_("bad hunk #%d") % self.number)
688 raise PatchError(_("bad hunk #%d") % self.number)
681 foo, self.starta, foo2, aend, foo3 = m.groups()
689 foo, self.starta, foo2, aend, foo3 = m.groups()
682 self.starta = int(self.starta)
690 self.starta = int(self.starta)
683 if aend is None:
691 if aend is None:
684 aend = self.starta
692 aend = self.starta
685 self.lena = int(aend) - self.starta
693 self.lena = int(aend) - self.starta
686 if self.starta:
694 if self.starta:
687 self.lena += 1
695 self.lena += 1
688 for x in xrange(self.lena):
696 for x in xrange(self.lena):
689 l = lr.readline()
697 l = lr.readline()
690 if l.startswith('---'):
698 if l.startswith('---'):
691 # lines addition, old block is empty
699 # lines addition, old block is empty
692 lr.push(l)
700 lr.push(l)
693 break
701 break
694 s = l[2:]
702 s = l[2:]
695 if l.startswith('- ') or l.startswith('! '):
703 if l.startswith('- ') or l.startswith('! '):
696 u = '-' + s
704 u = '-' + s
697 elif l.startswith(' '):
705 elif l.startswith(' '):
698 u = ' ' + s
706 u = ' ' + s
699 else:
707 else:
700 raise PatchError(_("bad hunk #%d old text line %d") %
708 raise PatchError(_("bad hunk #%d old text line %d") %
701 (self.number, x))
709 (self.number, x))
702 self.a.append(u)
710 self.a.append(u)
703 self.hunk.append(u)
711 self.hunk.append(u)
704
712
705 l = lr.readline()
713 l = lr.readline()
706 if l.startswith('\ '):
714 if l.startswith('\ '):
707 s = self.a[-1][:-1]
715 s = self.a[-1][:-1]
708 self.a[-1] = s
716 self.a[-1] = s
709 self.hunk[-1] = s
717 self.hunk[-1] = s
710 l = lr.readline()
718 l = lr.readline()
711 m = contextdesc.match(l)
719 m = contextdesc.match(l)
712 if not m:
720 if not m:
713 raise PatchError(_("bad hunk #%d") % self.number)
721 raise PatchError(_("bad hunk #%d") % self.number)
714 foo, self.startb, foo2, bend, foo3 = m.groups()
722 foo, self.startb, foo2, bend, foo3 = m.groups()
715 self.startb = int(self.startb)
723 self.startb = int(self.startb)
716 if bend is None:
724 if bend is None:
717 bend = self.startb
725 bend = self.startb
718 self.lenb = int(bend) - self.startb
726 self.lenb = int(bend) - self.startb
719 if self.startb:
727 if self.startb:
720 self.lenb += 1
728 self.lenb += 1
721 hunki = 1
729 hunki = 1
722 for x in xrange(self.lenb):
730 for x in xrange(self.lenb):
723 l = lr.readline()
731 l = lr.readline()
724 if l.startswith('\ '):
732 if l.startswith('\ '):
725 # XXX: the only way to hit this is with an invalid line range.
733 # XXX: the only way to hit this is with an invalid line range.
726 # The no-eol marker is not counted in the line range, but I
734 # The no-eol marker is not counted in the line range, but I
727 # guess there are diff(1) out there which behave differently.
735 # guess there are diff(1) out there which behave differently.
728 s = self.b[-1][:-1]
736 s = self.b[-1][:-1]
729 self.b[-1] = s
737 self.b[-1] = s
730 self.hunk[hunki - 1] = s
738 self.hunk[hunki - 1] = s
731 continue
739 continue
732 if not l:
740 if not l:
733 # line deletions, new block is empty and we hit EOF
741 # line deletions, new block is empty and we hit EOF
734 lr.push(l)
742 lr.push(l)
735 break
743 break
736 s = l[2:]
744 s = l[2:]
737 if l.startswith('+ ') or l.startswith('! '):
745 if l.startswith('+ ') or l.startswith('! '):
738 u = '+' + s
746 u = '+' + s
739 elif l.startswith(' '):
747 elif l.startswith(' '):
740 u = ' ' + s
748 u = ' ' + s
741 elif len(self.b) == 0:
749 elif len(self.b) == 0:
742 # line deletions, new block is empty
750 # line deletions, new block is empty
743 lr.push(l)
751 lr.push(l)
744 break
752 break
745 else:
753 else:
746 raise PatchError(_("bad hunk #%d old text line %d") %
754 raise PatchError(_("bad hunk #%d old text line %d") %
747 (self.number, x))
755 (self.number, x))
748 self.b.append(s)
756 self.b.append(s)
749 while True:
757 while True:
750 if hunki >= len(self.hunk):
758 if hunki >= len(self.hunk):
751 h = ""
759 h = ""
752 else:
760 else:
753 h = self.hunk[hunki]
761 h = self.hunk[hunki]
754 hunki += 1
762 hunki += 1
755 if h == u:
763 if h == u:
756 break
764 break
757 elif h.startswith('-'):
765 elif h.startswith('-'):
758 continue
766 continue
759 else:
767 else:
760 self.hunk.insert(hunki - 1, u)
768 self.hunk.insert(hunki - 1, u)
761 break
769 break
762
770
763 if not self.a:
771 if not self.a:
764 # this happens when lines were only added to the hunk
772 # this happens when lines were only added to the hunk
765 for x in self.hunk:
773 for x in self.hunk:
766 if x.startswith('-') or x.startswith(' '):
774 if x.startswith('-') or x.startswith(' '):
767 self.a.append(x)
775 self.a.append(x)
768 if not self.b:
776 if not self.b:
769 # this happens when lines were only deleted from the hunk
777 # this happens when lines were only deleted from the hunk
770 for x in self.hunk:
778 for x in self.hunk:
771 if x.startswith('+') or x.startswith(' '):
779 if x.startswith('+') or x.startswith(' '):
772 self.b.append(x[1:])
780 self.b.append(x[1:])
773 # @@ -start,len +start,len @@
781 # @@ -start,len +start,len @@
774 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
782 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
775 self.startb, self.lenb)
783 self.startb, self.lenb)
776 self.hunk[0] = self.desc
784 self.hunk[0] = self.desc
777
785
778 def fix_newline(self):
786 def fix_newline(self):
779 diffhelpers.fix_newline(self.hunk, self.a, self.b)
787 diffhelpers.fix_newline(self.hunk, self.a, self.b)
780
788
781 def complete(self):
789 def complete(self):
782 return len(self.a) == self.lena and len(self.b) == self.lenb
790 return len(self.a) == self.lena and len(self.b) == self.lenb
783
791
784 def createfile(self):
792 def createfile(self):
785 return self.starta == 0 and self.lena == 0 and self.create
793 return self.starta == 0 and self.lena == 0 and self.create
786
794
787 def rmfile(self):
795 def rmfile(self):
788 return self.startb == 0 and self.lenb == 0 and self.remove
796 return self.startb == 0 and self.lenb == 0 and self.remove
789
797
790 def fuzzit(self, l, fuzz, toponly):
798 def fuzzit(self, l, fuzz, toponly):
791 # this removes context lines from the top and bottom of list 'l'. It
799 # this removes context lines from the top and bottom of list 'l'. It
792 # checks the hunk to make sure only context lines are removed, and then
800 # checks the hunk to make sure only context lines are removed, and then
793 # returns a new shortened list of lines.
801 # returns a new shortened list of lines.
794 fuzz = min(fuzz, len(l)-1)
802 fuzz = min(fuzz, len(l)-1)
795 if fuzz:
803 if fuzz:
796 top = 0
804 top = 0
797 bot = 0
805 bot = 0
798 hlen = len(self.hunk)
806 hlen = len(self.hunk)
799 for x in xrange(hlen - 1):
807 for x in xrange(hlen - 1):
800 # the hunk starts with the @@ line, so use x+1
808 # the hunk starts with the @@ line, so use x+1
801 if self.hunk[x + 1][0] == ' ':
809 if self.hunk[x + 1][0] == ' ':
802 top += 1
810 top += 1
803 else:
811 else:
804 break
812 break
805 if not toponly:
813 if not toponly:
806 for x in xrange(hlen - 1):
814 for x in xrange(hlen - 1):
807 if self.hunk[hlen - bot - 1][0] == ' ':
815 if self.hunk[hlen - bot - 1][0] == ' ':
808 bot += 1
816 bot += 1
809 else:
817 else:
810 break
818 break
811
819
812 # top and bot now count context in the hunk
820 # top and bot now count context in the hunk
813 # adjust them if either one is short
821 # adjust them if either one is short
814 context = max(top, bot, 3)
822 context = max(top, bot, 3)
815 if bot < context:
823 if bot < context:
816 bot = max(0, fuzz - (context - bot))
824 bot = max(0, fuzz - (context - bot))
817 else:
825 else:
818 bot = min(fuzz, bot)
826 bot = min(fuzz, bot)
819 if top < context:
827 if top < context:
820 top = max(0, fuzz - (context - top))
828 top = max(0, fuzz - (context - top))
821 else:
829 else:
822 top = min(fuzz, top)
830 top = min(fuzz, top)
823
831
824 return l[top:len(l)-bot]
832 return l[top:len(l)-bot]
825 return l
833 return l
826
834
827 def old(self, fuzz=0, toponly=False):
835 def old(self, fuzz=0, toponly=False):
828 return self.fuzzit(self.a, fuzz, toponly)
836 return self.fuzzit(self.a, fuzz, toponly)
829
837
830 def new(self, fuzz=0, toponly=False):
838 def new(self, fuzz=0, toponly=False):
831 return self.fuzzit(self.b, fuzz, toponly)
839 return self.fuzzit(self.b, fuzz, toponly)
832
840
833 class binhunk:
841 class binhunk:
834 'A binary patch file. Only understands literals so far.'
842 'A binary patch file. Only understands literals so far.'
835 def __init__(self, gitpatch):
843 def __init__(self, gitpatch):
836 self.gitpatch = gitpatch
844 self.gitpatch = gitpatch
837 self.text = None
845 self.text = None
838 self.hunk = ['GIT binary patch\n']
846 self.hunk = ['GIT binary patch\n']
839
847
840 def createfile(self):
848 def createfile(self):
841 return self.gitpatch.op in ('ADD', 'RENAME', 'COPY')
849 return self.gitpatch.op in ('ADD', 'RENAME', 'COPY')
842
850
843 def rmfile(self):
851 def rmfile(self):
844 return self.gitpatch.op == 'DELETE'
852 return self.gitpatch.op == 'DELETE'
845
853
846 def complete(self):
854 def complete(self):
847 return self.text is not None
855 return self.text is not None
848
856
849 def new(self):
857 def new(self):
850 return [self.text]
858 return [self.text]
851
859
852 def extract(self, lr):
860 def extract(self, lr):
853 line = lr.readline()
861 line = lr.readline()
854 self.hunk.append(line)
862 self.hunk.append(line)
855 while line and not line.startswith('literal '):
863 while line and not line.startswith('literal '):
856 line = lr.readline()
864 line = lr.readline()
857 self.hunk.append(line)
865 self.hunk.append(line)
858 if not line:
866 if not line:
859 raise PatchError(_('could not extract binary patch'))
867 raise PatchError(_('could not extract binary patch'))
860 size = int(line[8:].rstrip())
868 size = int(line[8:].rstrip())
861 dec = []
869 dec = []
862 line = lr.readline()
870 line = lr.readline()
863 self.hunk.append(line)
871 self.hunk.append(line)
864 while len(line) > 1:
872 while len(line) > 1:
865 l = line[0]
873 l = line[0]
866 if l <= 'Z' and l >= 'A':
874 if l <= 'Z' and l >= 'A':
867 l = ord(l) - ord('A') + 1
875 l = ord(l) - ord('A') + 1
868 else:
876 else:
869 l = ord(l) - ord('a') + 27
877 l = ord(l) - ord('a') + 27
870 dec.append(base85.b85decode(line[1:-1])[:l])
878 dec.append(base85.b85decode(line[1:-1])[:l])
871 line = lr.readline()
879 line = lr.readline()
872 self.hunk.append(line)
880 self.hunk.append(line)
873 text = zlib.decompress(''.join(dec))
881 text = zlib.decompress(''.join(dec))
874 if len(text) != size:
882 if len(text) != size:
875 raise PatchError(_('binary patch is %d bytes, not %d') %
883 raise PatchError(_('binary patch is %d bytes, not %d') %
876 len(text), size)
884 len(text), size)
877 self.text = text
885 self.text = text
878
886
879 def parsefilename(str):
887 def parsefilename(str):
880 # --- filename \t|space stuff
888 # --- filename \t|space stuff
881 s = str[4:].rstrip('\r\n')
889 s = str[4:].rstrip('\r\n')
882 i = s.find('\t')
890 i = s.find('\t')
883 if i < 0:
891 if i < 0:
884 i = s.find(' ')
892 i = s.find(' ')
885 if i < 0:
893 if i < 0:
886 return s
894 return s
887 return s[:i]
895 return s[:i]
888
896
889 def pathstrip(path, strip):
897 def pathstrip(path, strip):
890 pathlen = len(path)
898 pathlen = len(path)
891 i = 0
899 i = 0
892 if strip == 0:
900 if strip == 0:
893 return '', path.rstrip()
901 return '', path.rstrip()
894 count = strip
902 count = strip
895 while count > 0:
903 while count > 0:
896 i = path.find('/', i)
904 i = path.find('/', i)
897 if i == -1:
905 if i == -1:
898 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
906 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
899 (count, strip, path))
907 (count, strip, path))
900 i += 1
908 i += 1
901 # consume '//' in the path
909 # consume '//' in the path
902 while i < pathlen - 1 and path[i] == '/':
910 while i < pathlen - 1 and path[i] == '/':
903 i += 1
911 i += 1
904 count -= 1
912 count -= 1
905 return path[:i].lstrip(), path[i:].rstrip()
913 return path[:i].lstrip(), path[i:].rstrip()
906
914
907 def selectfile(afile_orig, bfile_orig, hunk, strip):
915 def selectfile(afile_orig, bfile_orig, hunk, strip):
908 nulla = afile_orig == "/dev/null"
916 nulla = afile_orig == "/dev/null"
909 nullb = bfile_orig == "/dev/null"
917 nullb = bfile_orig == "/dev/null"
910 abase, afile = pathstrip(afile_orig, strip)
918 abase, afile = pathstrip(afile_orig, strip)
911 gooda = not nulla and os.path.lexists(afile)
919 gooda = not nulla and os.path.lexists(afile)
912 bbase, bfile = pathstrip(bfile_orig, strip)
920 bbase, bfile = pathstrip(bfile_orig, strip)
913 if afile == bfile:
921 if afile == bfile:
914 goodb = gooda
922 goodb = gooda
915 else:
923 else:
916 goodb = not nullb and os.path.lexists(bfile)
924 goodb = not nullb and os.path.lexists(bfile)
917 createfunc = hunk.createfile
925 createfunc = hunk.createfile
918 missing = not goodb and not gooda and not createfunc()
926 missing = not goodb and not gooda and not createfunc()
919
927
920 # some diff programs apparently produce patches where the afile is
928 # some diff programs apparently produce patches where the afile is
921 # not /dev/null, but afile starts with bfile
929 # not /dev/null, but afile starts with bfile
922 abasedir = afile[:afile.rfind('/') + 1]
930 abasedir = afile[:afile.rfind('/') + 1]
923 bbasedir = bfile[:bfile.rfind('/') + 1]
931 bbasedir = bfile[:bfile.rfind('/') + 1]
924 if missing and abasedir == bbasedir and afile.startswith(bfile):
932 if missing and abasedir == bbasedir and afile.startswith(bfile):
925 # this isn't very pretty
933 # this isn't very pretty
926 hunk.create = True
934 hunk.create = True
927 if createfunc():
935 if createfunc():
928 missing = False
936 missing = False
929 else:
937 else:
930 hunk.create = False
938 hunk.create = False
931
939
932 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
940 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
933 # diff is between a file and its backup. In this case, the original
941 # diff is between a file and its backup. In this case, the original
934 # file should be patched (see original mpatch code).
942 # file should be patched (see original mpatch code).
935 isbackup = (abase == bbase and bfile.startswith(afile))
943 isbackup = (abase == bbase and bfile.startswith(afile))
936 fname = None
944 fname = None
937 if not missing:
945 if not missing:
938 if gooda and goodb:
946 if gooda and goodb:
939 fname = isbackup and afile or bfile
947 fname = isbackup and afile or bfile
940 elif gooda:
948 elif gooda:
941 fname = afile
949 fname = afile
942
950
943 if not fname:
951 if not fname:
944 if not nullb:
952 if not nullb:
945 fname = isbackup and afile or bfile
953 fname = isbackup and afile or bfile
946 elif not nulla:
954 elif not nulla:
947 fname = afile
955 fname = afile
948 else:
956 else:
949 raise PatchError(_("undefined source and destination files"))
957 raise PatchError(_("undefined source and destination files"))
950
958
951 return fname, missing
959 return fname, missing
952
960
953 def scangitpatch(lr, firstline):
961 def scangitpatch(lr, firstline):
954 """
962 """
955 Git patches can emit:
963 Git patches can emit:
956 - rename a to b
964 - rename a to b
957 - change b
965 - change b
958 - copy a to c
966 - copy a to c
959 - change c
967 - change c
960
968
961 We cannot apply this sequence as-is, the renamed 'a' could not be
969 We cannot apply this sequence as-is, the renamed 'a' could not be
962 found for it would have been renamed already. And we cannot copy
970 found for it would have been renamed already. And we cannot copy
963 from 'b' instead because 'b' would have been changed already. So
971 from 'b' instead because 'b' would have been changed already. So
964 we scan the git patch for copy and rename commands so we can
972 we scan the git patch for copy and rename commands so we can
965 perform the copies ahead of time.
973 perform the copies ahead of time.
966 """
974 """
967 pos = 0
975 pos = 0
968 try:
976 try:
969 pos = lr.fp.tell()
977 pos = lr.fp.tell()
970 fp = lr.fp
978 fp = lr.fp
971 except IOError:
979 except IOError:
972 fp = cStringIO.StringIO(lr.fp.read())
980 fp = cStringIO.StringIO(lr.fp.read())
973 gitlr = linereader(fp, lr.textmode)
981 gitlr = linereader(fp, lr.textmode)
974 gitlr.push(firstline)
982 gitlr.push(firstline)
975 gitpatches = readgitpatch(gitlr)
983 gitpatches = readgitpatch(gitlr)
976 fp.seek(pos)
984 fp.seek(pos)
977 return gitpatches
985 return gitpatches
978
986
979 def iterhunks(ui, fp):
987 def iterhunks(ui, fp):
980 """Read a patch and yield the following events:
988 """Read a patch and yield the following events:
981 - ("file", afile, bfile, firsthunk): select a new target file.
989 - ("file", afile, bfile, firsthunk): select a new target file.
982 - ("hunk", hunk): a new hunk is ready to be applied, follows a
990 - ("hunk", hunk): a new hunk is ready to be applied, follows a
983 "file" event.
991 "file" event.
984 - ("git", gitchanges): current diff is in git format, gitchanges
992 - ("git", gitchanges): current diff is in git format, gitchanges
985 maps filenames to gitpatch records. Unique event.
993 maps filenames to gitpatch records. Unique event.
986 """
994 """
987 changed = {}
995 changed = {}
988 current_hunk = None
996 current_hunk = None
989 afile = ""
997 afile = ""
990 bfile = ""
998 bfile = ""
991 state = None
999 state = None
992 hunknum = 0
1000 hunknum = 0
993 emitfile = False
1001 emitfile = False
994 git = False
1002 git = False
995
1003
996 # our states
1004 # our states
997 BFILE = 1
1005 BFILE = 1
998 context = None
1006 context = None
999 lr = linereader(fp)
1007 lr = linereader(fp)
1000
1008
1001 while True:
1009 while True:
1002 newfile = newgitfile = False
1010 newfile = newgitfile = False
1003 x = lr.readline()
1011 x = lr.readline()
1004 if not x:
1012 if not x:
1005 break
1013 break
1006 if current_hunk:
1014 if current_hunk:
1007 if x.startswith('\ '):
1015 if x.startswith('\ '):
1008 current_hunk.fix_newline()
1016 current_hunk.fix_newline()
1009 yield 'hunk', current_hunk
1017 yield 'hunk', current_hunk
1010 current_hunk = None
1018 current_hunk = None
1011 if (state == BFILE and ((not context and x[0] == '@') or
1019 if (state == BFILE and ((not context and x[0] == '@') or
1012 ((context is not False) and x.startswith('***************')))):
1020 ((context is not False) and x.startswith('***************')))):
1013 if context is None and x.startswith('***************'):
1021 if context is None and x.startswith('***************'):
1014 context = True
1022 context = True
1015 gpatch = changed.get(bfile)
1023 gpatch = changed.get(bfile)
1016 create = afile == '/dev/null' or gpatch and gpatch.op == 'ADD'
1024 create = afile == '/dev/null' or gpatch and gpatch.op == 'ADD'
1017 remove = bfile == '/dev/null' or gpatch and gpatch.op == 'DELETE'
1025 remove = bfile == '/dev/null' or gpatch and gpatch.op == 'DELETE'
1018 current_hunk = hunk(x, hunknum + 1, lr, context, create, remove)
1026 current_hunk = hunk(x, hunknum + 1, lr, context, create, remove)
1019 hunknum += 1
1027 hunknum += 1
1020 if emitfile:
1028 if emitfile:
1021 emitfile = False
1029 emitfile = False
1022 yield 'file', (afile, bfile, current_hunk)
1030 yield 'file', (afile, bfile, current_hunk)
1023 elif state == BFILE and x.startswith('GIT binary patch'):
1031 elif state == BFILE and x.startswith('GIT binary patch'):
1024 current_hunk = binhunk(changed[bfile])
1032 current_hunk = binhunk(changed[bfile])
1025 hunknum += 1
1033 hunknum += 1
1026 if emitfile:
1034 if emitfile:
1027 emitfile = False
1035 emitfile = False
1028 yield 'file', ('a/' + afile, 'b/' + bfile, current_hunk)
1036 yield 'file', ('a/' + afile, 'b/' + bfile, current_hunk)
1029 current_hunk.extract(lr)
1037 current_hunk.extract(lr)
1030 elif x.startswith('diff --git'):
1038 elif x.startswith('diff --git'):
1031 # check for git diff, scanning the whole patch file if needed
1039 # check for git diff, scanning the whole patch file if needed
1032 m = gitre.match(x)
1040 m = gitre.match(x)
1033 if m:
1041 if m:
1034 afile, bfile = m.group(1, 2)
1042 afile, bfile = m.group(1, 2)
1035 if not git:
1043 if not git:
1036 git = True
1044 git = True
1037 gitpatches = scangitpatch(lr, x)
1045 gitpatches = scangitpatch(lr, x)
1038 yield 'git', gitpatches
1046 yield 'git', gitpatches
1039 for gp in gitpatches:
1047 for gp in gitpatches:
1040 changed[gp.path] = gp
1048 changed[gp.path] = gp
1041 # else error?
1049 # else error?
1042 # copy/rename + modify should modify target, not source
1050 # copy/rename + modify should modify target, not source
1043 gp = changed.get(bfile)
1051 gp = changed.get(bfile)
1044 if gp and (gp.op in ('COPY', 'DELETE', 'RENAME', 'ADD')
1052 if gp and (gp.op in ('COPY', 'DELETE', 'RENAME', 'ADD')
1045 or gp.mode):
1053 or gp.mode):
1046 afile = bfile
1054 afile = bfile
1047 newgitfile = True
1055 newgitfile = True
1048 elif x.startswith('---'):
1056 elif x.startswith('---'):
1049 # check for a unified diff
1057 # check for a unified diff
1050 l2 = lr.readline()
1058 l2 = lr.readline()
1051 if not l2.startswith('+++'):
1059 if not l2.startswith('+++'):
1052 lr.push(l2)
1060 lr.push(l2)
1053 continue
1061 continue
1054 newfile = True
1062 newfile = True
1055 context = False
1063 context = False
1056 afile = parsefilename(x)
1064 afile = parsefilename(x)
1057 bfile = parsefilename(l2)
1065 bfile = parsefilename(l2)
1058 elif x.startswith('***'):
1066 elif x.startswith('***'):
1059 # check for a context diff
1067 # check for a context diff
1060 l2 = lr.readline()
1068 l2 = lr.readline()
1061 if not l2.startswith('---'):
1069 if not l2.startswith('---'):
1062 lr.push(l2)
1070 lr.push(l2)
1063 continue
1071 continue
1064 l3 = lr.readline()
1072 l3 = lr.readline()
1065 lr.push(l3)
1073 lr.push(l3)
1066 if not l3.startswith("***************"):
1074 if not l3.startswith("***************"):
1067 lr.push(l2)
1075 lr.push(l2)
1068 continue
1076 continue
1069 newfile = True
1077 newfile = True
1070 context = True
1078 context = True
1071 afile = parsefilename(x)
1079 afile = parsefilename(x)
1072 bfile = parsefilename(l2)
1080 bfile = parsefilename(l2)
1073
1081
1074 if newgitfile or newfile:
1082 if newgitfile or newfile:
1075 emitfile = True
1083 emitfile = True
1076 state = BFILE
1084 state = BFILE
1077 hunknum = 0
1085 hunknum = 0
1078 if current_hunk:
1086 if current_hunk:
1079 if current_hunk.complete():
1087 if current_hunk.complete():
1080 yield 'hunk', current_hunk
1088 yield 'hunk', current_hunk
1081 else:
1089 else:
1082 raise PatchError(_("malformed patch %s %s") % (afile,
1090 raise PatchError(_("malformed patch %s %s") % (afile,
1083 current_hunk.desc))
1091 current_hunk.desc))
1084
1092
1085 def applydiff(ui, fp, changed, strip=1, eolmode='strict'):
1093 def applydiff(ui, fp, changed, strip=1, eolmode='strict'):
1086 """Reads a patch from fp and tries to apply it.
1094 """Reads a patch from fp and tries to apply it.
1087
1095
1088 The dict 'changed' is filled in with all of the filenames changed
1096 The dict 'changed' is filled in with all of the filenames changed
1089 by the patch. Returns 0 for a clean patch, -1 if any rejects were
1097 by the patch. Returns 0 for a clean patch, -1 if any rejects were
1090 found and 1 if there was any fuzz.
1098 found and 1 if there was any fuzz.
1091
1099
1092 If 'eolmode' is 'strict', the patch content and patched file are
1100 If 'eolmode' is 'strict', the patch content and patched file are
1093 read in binary mode. Otherwise, line endings are ignored when
1101 read in binary mode. Otherwise, line endings are ignored when
1094 patching then normalized according to 'eolmode'.
1102 patching then normalized according to 'eolmode'.
1095
1103
1096 Callers probably want to call 'cmdutil.updatedir' after this to
1104 Callers probably want to call 'cmdutil.updatedir' after this to
1097 apply certain categories of changes not done by this function.
1105 apply certain categories of changes not done by this function.
1098 """
1106 """
1099 return _applydiff(ui, fp, patchfile, copyfile, changed, strip=strip,
1107 return _applydiff(ui, fp, patchfile, copyfile, changed, strip=strip,
1100 eolmode=eolmode)
1108 eolmode=eolmode)
1101
1109
1102 def _applydiff(ui, fp, patcher, copyfn, changed, strip=1, eolmode='strict'):
1110 def _applydiff(ui, fp, patcher, copyfn, changed, strip=1, eolmode='strict'):
1103 rejects = 0
1111 rejects = 0
1104 err = 0
1112 err = 0
1105 current_file = None
1113 current_file = None
1106 cwd = os.getcwd()
1114 cwd = os.getcwd()
1107 opener = util.opener(cwd)
1115 opener = util.opener(cwd)
1108
1116
1109 def closefile():
1117 def closefile():
1110 if not current_file:
1118 if not current_file:
1111 return 0
1119 return 0
1112 if current_file.dirty:
1120 if current_file.dirty:
1113 current_file.writelines(current_file.fname, current_file.lines)
1121 current_file.writelines(current_file.fname, current_file.lines)
1114 current_file.write_rej()
1122 current_file.write_rej()
1115 return len(current_file.rej)
1123 return len(current_file.rej)
1116
1124
1117 for state, values in iterhunks(ui, fp):
1125 for state, values in iterhunks(ui, fp):
1118 if state == 'hunk':
1126 if state == 'hunk':
1119 if not current_file:
1127 if not current_file:
1120 continue
1128 continue
1121 ret = current_file.apply(values)
1129 ret = current_file.apply(values)
1122 if ret >= 0:
1130 if ret >= 0:
1123 changed.setdefault(current_file.fname, None)
1131 changed.setdefault(current_file.fname, None)
1124 if ret > 0:
1132 if ret > 0:
1125 err = 1
1133 err = 1
1126 elif state == 'file':
1134 elif state == 'file':
1127 rejects += closefile()
1135 rejects += closefile()
1128 afile, bfile, first_hunk = values
1136 afile, bfile, first_hunk = values
1129 try:
1137 try:
1130 current_file, missing = selectfile(afile, bfile,
1138 current_file, missing = selectfile(afile, bfile,
1131 first_hunk, strip)
1139 first_hunk, strip)
1132 current_file = patcher(ui, current_file, opener,
1140 current_file = patcher(ui, current_file, opener,
1133 missing=missing, eolmode=eolmode)
1141 missing=missing, eolmode=eolmode)
1134 except PatchError, err:
1142 except PatchError, err:
1135 ui.warn(str(err) + '\n')
1143 ui.warn(str(err) + '\n')
1136 current_file = None
1144 current_file = None
1137 rejects += 1
1145 rejects += 1
1138 continue
1146 continue
1139 elif state == 'git':
1147 elif state == 'git':
1140 for gp in values:
1148 for gp in values:
1141 gp.path = pathstrip(gp.path, strip - 1)[1]
1149 gp.path = pathstrip(gp.path, strip - 1)[1]
1142 if gp.oldpath:
1150 if gp.oldpath:
1143 gp.oldpath = pathstrip(gp.oldpath, strip - 1)[1]
1151 gp.oldpath = pathstrip(gp.oldpath, strip - 1)[1]
1144 # Binary patches really overwrite target files, copying them
1152 # Binary patches really overwrite target files, copying them
1145 # will just make it fails with "target file exists"
1153 # will just make it fails with "target file exists"
1146 if gp.op in ('COPY', 'RENAME') and not gp.binary:
1154 if gp.op in ('COPY', 'RENAME') and not gp.binary:
1147 copyfn(gp.oldpath, gp.path, cwd)
1155 copyfn(gp.oldpath, gp.path, cwd)
1148 changed[gp.path] = gp
1156 changed[gp.path] = gp
1149 else:
1157 else:
1150 raise util.Abort(_('unsupported parser state: %s') % state)
1158 raise util.Abort(_('unsupported parser state: %s') % state)
1151
1159
1152 rejects += closefile()
1160 rejects += closefile()
1153
1161
1154 if rejects:
1162 if rejects:
1155 return -1
1163 return -1
1156 return err
1164 return err
1157
1165
1158 def externalpatch(patcher, patchname, ui, strip, cwd, files):
1166 def externalpatch(patcher, patchname, ui, strip, cwd, files):
1159 """use <patcher> to apply <patchname> to the working directory.
1167 """use <patcher> to apply <patchname> to the working directory.
1160 returns whether patch was applied with fuzz factor."""
1168 returns whether patch was applied with fuzz factor."""
1161
1169
1162 fuzz = False
1170 fuzz = False
1163 args = []
1171 args = []
1164 if cwd:
1172 if cwd:
1165 args.append('-d %s' % util.shellquote(cwd))
1173 args.append('-d %s' % util.shellquote(cwd))
1166 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1174 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1167 util.shellquote(patchname)))
1175 util.shellquote(patchname)))
1168
1176
1169 for line in fp:
1177 for line in fp:
1170 line = line.rstrip()
1178 line = line.rstrip()
1171 ui.note(line + '\n')
1179 ui.note(line + '\n')
1172 if line.startswith('patching file '):
1180 if line.startswith('patching file '):
1173 pf = util.parse_patch_output(line)
1181 pf = util.parse_patch_output(line)
1174 printed_file = False
1182 printed_file = False
1175 files.setdefault(pf, None)
1183 files.setdefault(pf, None)
1176 elif line.find('with fuzz') >= 0:
1184 elif line.find('with fuzz') >= 0:
1177 fuzz = True
1185 fuzz = True
1178 if not printed_file:
1186 if not printed_file:
1179 ui.warn(pf + '\n')
1187 ui.warn(pf + '\n')
1180 printed_file = True
1188 printed_file = True
1181 ui.warn(line + '\n')
1189 ui.warn(line + '\n')
1182 elif line.find('saving rejects to file') >= 0:
1190 elif line.find('saving rejects to file') >= 0:
1183 ui.warn(line + '\n')
1191 ui.warn(line + '\n')
1184 elif line.find('FAILED') >= 0:
1192 elif line.find('FAILED') >= 0:
1185 if not printed_file:
1193 if not printed_file:
1186 ui.warn(pf + '\n')
1194 ui.warn(pf + '\n')
1187 printed_file = True
1195 printed_file = True
1188 ui.warn(line + '\n')
1196 ui.warn(line + '\n')
1189 code = fp.close()
1197 code = fp.close()
1190 if code:
1198 if code:
1191 raise PatchError(_("patch command failed: %s") %
1199 raise PatchError(_("patch command failed: %s") %
1192 util.explain_exit(code)[0])
1200 util.explain_exit(code)[0])
1193 return fuzz
1201 return fuzz
1194
1202
1195 def internalpatch(patchobj, ui, strip, cwd, files=None, eolmode='strict'):
1203 def internalpatch(patchobj, ui, strip, cwd, files=None, eolmode='strict'):
1196 """use builtin patch to apply <patchobj> to the working directory.
1204 """use builtin patch to apply <patchobj> to the working directory.
1197 returns whether patch was applied with fuzz factor."""
1205 returns whether patch was applied with fuzz factor."""
1198
1206
1199 if files is None:
1207 if files is None:
1200 files = {}
1208 files = {}
1201 if eolmode is None:
1209 if eolmode is None:
1202 eolmode = ui.config('patch', 'eol', 'strict')
1210 eolmode = ui.config('patch', 'eol', 'strict')
1203 if eolmode.lower() not in eolmodes:
1211 if eolmode.lower() not in eolmodes:
1204 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1212 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1205 eolmode = eolmode.lower()
1213 eolmode = eolmode.lower()
1206
1214
1207 try:
1215 try:
1208 fp = open(patchobj, 'rb')
1216 fp = open(patchobj, 'rb')
1209 except TypeError:
1217 except TypeError:
1210 fp = patchobj
1218 fp = patchobj
1211 if cwd:
1219 if cwd:
1212 curdir = os.getcwd()
1220 curdir = os.getcwd()
1213 os.chdir(cwd)
1221 os.chdir(cwd)
1214 try:
1222 try:
1215 ret = applydiff(ui, fp, files, strip=strip, eolmode=eolmode)
1223 ret = applydiff(ui, fp, files, strip=strip, eolmode=eolmode)
1216 finally:
1224 finally:
1217 if cwd:
1225 if cwd:
1218 os.chdir(curdir)
1226 os.chdir(curdir)
1219 if fp != patchobj:
1227 if fp != patchobj:
1220 fp.close()
1228 fp.close()
1221 if ret < 0:
1229 if ret < 0:
1222 raise PatchError(_('patch failed to apply'))
1230 raise PatchError(_('patch failed to apply'))
1223 return ret > 0
1231 return ret > 0
1224
1232
1225 def patch(patchname, ui, strip=1, cwd=None, files=None, eolmode='strict'):
1233 def patch(patchname, ui, strip=1, cwd=None, files=None, eolmode='strict'):
1226 """Apply <patchname> to the working directory.
1234 """Apply <patchname> to the working directory.
1227
1235
1228 'eolmode' specifies how end of lines should be handled. It can be:
1236 'eolmode' specifies how end of lines should be handled. It can be:
1229 - 'strict': inputs are read in binary mode, EOLs are preserved
1237 - 'strict': inputs are read in binary mode, EOLs are preserved
1230 - 'crlf': EOLs are ignored when patching and reset to CRLF
1238 - 'crlf': EOLs are ignored when patching and reset to CRLF
1231 - 'lf': EOLs are ignored when patching and reset to LF
1239 - 'lf': EOLs are ignored when patching and reset to LF
1232 - None: get it from user settings, default to 'strict'
1240 - None: get it from user settings, default to 'strict'
1233 'eolmode' is ignored when using an external patcher program.
1241 'eolmode' is ignored when using an external patcher program.
1234
1242
1235 Returns whether patch was applied with fuzz factor.
1243 Returns whether patch was applied with fuzz factor.
1236 """
1244 """
1237 patcher = ui.config('ui', 'patch')
1245 patcher = ui.config('ui', 'patch')
1238 if files is None:
1246 if files is None:
1239 files = {}
1247 files = {}
1240 try:
1248 try:
1241 if patcher:
1249 if patcher:
1242 return externalpatch(patcher, patchname, ui, strip, cwd, files)
1250 return externalpatch(patcher, patchname, ui, strip, cwd, files)
1243 return internalpatch(patchname, ui, strip, cwd, files, eolmode)
1251 return internalpatch(patchname, ui, strip, cwd, files, eolmode)
1244 except PatchError, err:
1252 except PatchError, err:
1245 raise util.Abort(str(err))
1253 raise util.Abort(str(err))
1246
1254
1247 def b85diff(to, tn):
1255 def b85diff(to, tn):
1248 '''print base85-encoded binary diff'''
1256 '''print base85-encoded binary diff'''
1249 def gitindex(text):
1257 def gitindex(text):
1250 if not text:
1258 if not text:
1251 return hex(nullid)
1259 return hex(nullid)
1252 l = len(text)
1260 l = len(text)
1253 s = util.sha1('blob %d\0' % l)
1261 s = util.sha1('blob %d\0' % l)
1254 s.update(text)
1262 s.update(text)
1255 return s.hexdigest()
1263 return s.hexdigest()
1256
1264
1257 def fmtline(line):
1265 def fmtline(line):
1258 l = len(line)
1266 l = len(line)
1259 if l <= 26:
1267 if l <= 26:
1260 l = chr(ord('A') + l - 1)
1268 l = chr(ord('A') + l - 1)
1261 else:
1269 else:
1262 l = chr(l - 26 + ord('a') - 1)
1270 l = chr(l - 26 + ord('a') - 1)
1263 return '%c%s\n' % (l, base85.b85encode(line, True))
1271 return '%c%s\n' % (l, base85.b85encode(line, True))
1264
1272
1265 def chunk(text, csize=52):
1273 def chunk(text, csize=52):
1266 l = len(text)
1274 l = len(text)
1267 i = 0
1275 i = 0
1268 while i < l:
1276 while i < l:
1269 yield text[i:i + csize]
1277 yield text[i:i + csize]
1270 i += csize
1278 i += csize
1271
1279
1272 tohash = gitindex(to)
1280 tohash = gitindex(to)
1273 tnhash = gitindex(tn)
1281 tnhash = gitindex(tn)
1274 if tohash == tnhash:
1282 if tohash == tnhash:
1275 return ""
1283 return ""
1276
1284
1277 # TODO: deltas
1285 # TODO: deltas
1278 ret = ['index %s..%s\nGIT binary patch\nliteral %s\n' %
1286 ret = ['index %s..%s\nGIT binary patch\nliteral %s\n' %
1279 (tohash, tnhash, len(tn))]
1287 (tohash, tnhash, len(tn))]
1280 for l in chunk(zlib.compress(tn)):
1288 for l in chunk(zlib.compress(tn)):
1281 ret.append(fmtline(l))
1289 ret.append(fmtline(l))
1282 ret.append('\n')
1290 ret.append('\n')
1283 return ''.join(ret)
1291 return ''.join(ret)
1284
1292
1285 class GitDiffRequired(Exception):
1293 class GitDiffRequired(Exception):
1286 pass
1294 pass
1287
1295
1288 def diffopts(ui, opts=None, untrusted=False):
1296 def diffopts(ui, opts=None, untrusted=False):
1289 def get(key, name=None, getter=ui.configbool):
1297 def get(key, name=None, getter=ui.configbool):
1290 return ((opts and opts.get(key)) or
1298 return ((opts and opts.get(key)) or
1291 getter('diff', name or key, None, untrusted=untrusted))
1299 getter('diff', name or key, None, untrusted=untrusted))
1292 return mdiff.diffopts(
1300 return mdiff.diffopts(
1293 text=opts and opts.get('text'),
1301 text=opts and opts.get('text'),
1294 git=get('git'),
1302 git=get('git'),
1295 nodates=get('nodates'),
1303 nodates=get('nodates'),
1296 showfunc=get('show_function', 'showfunc'),
1304 showfunc=get('show_function', 'showfunc'),
1297 ignorews=get('ignore_all_space', 'ignorews'),
1305 ignorews=get('ignore_all_space', 'ignorews'),
1298 ignorewsamount=get('ignore_space_change', 'ignorewsamount'),
1306 ignorewsamount=get('ignore_space_change', 'ignorewsamount'),
1299 ignoreblanklines=get('ignore_blank_lines', 'ignoreblanklines'),
1307 ignoreblanklines=get('ignore_blank_lines', 'ignoreblanklines'),
1300 context=get('unified', getter=ui.config))
1308 context=get('unified', getter=ui.config))
1301
1309
1302 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
1310 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
1303 losedatafn=None, prefix=''):
1311 losedatafn=None, prefix=''):
1304 '''yields diff of changes to files between two nodes, or node and
1312 '''yields diff of changes to files between two nodes, or node and
1305 working directory.
1313 working directory.
1306
1314
1307 if node1 is None, use first dirstate parent instead.
1315 if node1 is None, use first dirstate parent instead.
1308 if node2 is None, compare node1 with working directory.
1316 if node2 is None, compare node1 with working directory.
1309
1317
1310 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
1318 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
1311 every time some change cannot be represented with the current
1319 every time some change cannot be represented with the current
1312 patch format. Return False to upgrade to git patch format, True to
1320 patch format. Return False to upgrade to git patch format, True to
1313 accept the loss or raise an exception to abort the diff. It is
1321 accept the loss or raise an exception to abort the diff. It is
1314 called with the name of current file being diffed as 'fn'. If set
1322 called with the name of current file being diffed as 'fn'. If set
1315 to None, patches will always be upgraded to git format when
1323 to None, patches will always be upgraded to git format when
1316 necessary.
1324 necessary.
1317
1325
1318 prefix is a filename prefix that is prepended to all filenames on
1326 prefix is a filename prefix that is prepended to all filenames on
1319 display (used for subrepos).
1327 display (used for subrepos).
1320 '''
1328 '''
1321
1329
1322 if opts is None:
1330 if opts is None:
1323 opts = mdiff.defaultopts
1331 opts = mdiff.defaultopts
1324
1332
1325 if not node1 and not node2:
1333 if not node1 and not node2:
1326 node1 = repo.dirstate.parents()[0]
1334 node1 = repo.dirstate.parents()[0]
1327
1335
1328 def lrugetfilectx():
1336 def lrugetfilectx():
1329 cache = {}
1337 cache = {}
1330 order = []
1338 order = []
1331 def getfilectx(f, ctx):
1339 def getfilectx(f, ctx):
1332 fctx = ctx.filectx(f, filelog=cache.get(f))
1340 fctx = ctx.filectx(f, filelog=cache.get(f))
1333 if f not in cache:
1341 if f not in cache:
1334 if len(cache) > 20:
1342 if len(cache) > 20:
1335 del cache[order.pop(0)]
1343 del cache[order.pop(0)]
1336 cache[f] = fctx.filelog()
1344 cache[f] = fctx.filelog()
1337 else:
1345 else:
1338 order.remove(f)
1346 order.remove(f)
1339 order.append(f)
1347 order.append(f)
1340 return fctx
1348 return fctx
1341 return getfilectx
1349 return getfilectx
1342 getfilectx = lrugetfilectx()
1350 getfilectx = lrugetfilectx()
1343
1351
1344 ctx1 = repo[node1]
1352 ctx1 = repo[node1]
1345 ctx2 = repo[node2]
1353 ctx2 = repo[node2]
1346
1354
1347 if not changes:
1355 if not changes:
1348 changes = repo.status(ctx1, ctx2, match=match)
1356 changes = repo.status(ctx1, ctx2, match=match)
1349 modified, added, removed = changes[:3]
1357 modified, added, removed = changes[:3]
1350
1358
1351 if not modified and not added and not removed:
1359 if not modified and not added and not removed:
1352 return []
1360 return []
1353
1361
1354 revs = None
1362 revs = None
1355 if not repo.ui.quiet:
1363 if not repo.ui.quiet:
1356 hexfunc = repo.ui.debugflag and hex or short
1364 hexfunc = repo.ui.debugflag and hex or short
1357 revs = [hexfunc(node) for node in [node1, node2] if node]
1365 revs = [hexfunc(node) for node in [node1, node2] if node]
1358
1366
1359 copy = {}
1367 copy = {}
1360 if opts.git or opts.upgrade:
1368 if opts.git or opts.upgrade:
1361 copy = copies.copies(repo, ctx1, ctx2, repo[nullid])[0]
1369 copy = copies.copies(repo, ctx1, ctx2, repo[nullid])[0]
1362
1370
1363 difffn = lambda opts, losedata: trydiff(repo, revs, ctx1, ctx2,
1371 difffn = lambda opts, losedata: trydiff(repo, revs, ctx1, ctx2,
1364 modified, added, removed, copy, getfilectx, opts, losedata, prefix)
1372 modified, added, removed, copy, getfilectx, opts, losedata, prefix)
1365 if opts.upgrade and not opts.git:
1373 if opts.upgrade and not opts.git:
1366 try:
1374 try:
1367 def losedata(fn):
1375 def losedata(fn):
1368 if not losedatafn or not losedatafn(fn=fn):
1376 if not losedatafn or not losedatafn(fn=fn):
1369 raise GitDiffRequired()
1377 raise GitDiffRequired()
1370 # Buffer the whole output until we are sure it can be generated
1378 # Buffer the whole output until we are sure it can be generated
1371 return list(difffn(opts.copy(git=False), losedata))
1379 return list(difffn(opts.copy(git=False), losedata))
1372 except GitDiffRequired:
1380 except GitDiffRequired:
1373 return difffn(opts.copy(git=True), None)
1381 return difffn(opts.copy(git=True), None)
1374 else:
1382 else:
1375 return difffn(opts, None)
1383 return difffn(opts, None)
1376
1384
1377 def difflabel(func, *args, **kw):
1385 def difflabel(func, *args, **kw):
1378 '''yields 2-tuples of (output, label) based on the output of func()'''
1386 '''yields 2-tuples of (output, label) based on the output of func()'''
1379 prefixes = [('diff', 'diff.diffline'),
1387 prefixes = [('diff', 'diff.diffline'),
1380 ('copy', 'diff.extended'),
1388 ('copy', 'diff.extended'),
1381 ('rename', 'diff.extended'),
1389 ('rename', 'diff.extended'),
1382 ('old', 'diff.extended'),
1390 ('old', 'diff.extended'),
1383 ('new', 'diff.extended'),
1391 ('new', 'diff.extended'),
1384 ('deleted', 'diff.extended'),
1392 ('deleted', 'diff.extended'),
1385 ('---', 'diff.file_a'),
1393 ('---', 'diff.file_a'),
1386 ('+++', 'diff.file_b'),
1394 ('+++', 'diff.file_b'),
1387 ('@@', 'diff.hunk'),
1395 ('@@', 'diff.hunk'),
1388 ('-', 'diff.deleted'),
1396 ('-', 'diff.deleted'),
1389 ('+', 'diff.inserted')]
1397 ('+', 'diff.inserted')]
1390
1398
1391 for chunk in func(*args, **kw):
1399 for chunk in func(*args, **kw):
1392 lines = chunk.split('\n')
1400 lines = chunk.split('\n')
1393 for i, line in enumerate(lines):
1401 for i, line in enumerate(lines):
1394 if i != 0:
1402 if i != 0:
1395 yield ('\n', '')
1403 yield ('\n', '')
1396 stripline = line
1404 stripline = line
1397 if line and line[0] in '+-':
1405 if line and line[0] in '+-':
1398 # highlight trailing whitespace, but only in changed lines
1406 # highlight trailing whitespace, but only in changed lines
1399 stripline = line.rstrip()
1407 stripline = line.rstrip()
1400 for prefix, label in prefixes:
1408 for prefix, label in prefixes:
1401 if stripline.startswith(prefix):
1409 if stripline.startswith(prefix):
1402 yield (stripline, label)
1410 yield (stripline, label)
1403 break
1411 break
1404 else:
1412 else:
1405 yield (line, '')
1413 yield (line, '')
1406 if line != stripline:
1414 if line != stripline:
1407 yield (line[len(stripline):], 'diff.trailingwhitespace')
1415 yield (line[len(stripline):], 'diff.trailingwhitespace')
1408
1416
1409 def diffui(*args, **kw):
1417 def diffui(*args, **kw):
1410 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
1418 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
1411 return difflabel(diff, *args, **kw)
1419 return difflabel(diff, *args, **kw)
1412
1420
1413
1421
1414 def _addmodehdr(header, omode, nmode):
1422 def _addmodehdr(header, omode, nmode):
1415 if omode != nmode:
1423 if omode != nmode:
1416 header.append('old mode %s\n' % omode)
1424 header.append('old mode %s\n' % omode)
1417 header.append('new mode %s\n' % nmode)
1425 header.append('new mode %s\n' % nmode)
1418
1426
1419 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1427 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1420 copy, getfilectx, opts, losedatafn, prefix):
1428 copy, getfilectx, opts, losedatafn, prefix):
1421
1429
1422 def join(f):
1430 def join(f):
1423 return os.path.join(prefix, f)
1431 return os.path.join(prefix, f)
1424
1432
1425 date1 = util.datestr(ctx1.date())
1433 date1 = util.datestr(ctx1.date())
1426 man1 = ctx1.manifest()
1434 man1 = ctx1.manifest()
1427
1435
1428 gone = set()
1436 gone = set()
1429 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
1437 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
1430
1438
1431 copyto = dict([(v, k) for k, v in copy.items()])
1439 copyto = dict([(v, k) for k, v in copy.items()])
1432
1440
1433 if opts.git:
1441 if opts.git:
1434 revs = None
1442 revs = None
1435
1443
1436 for f in sorted(modified + added + removed):
1444 for f in sorted(modified + added + removed):
1437 to = None
1445 to = None
1438 tn = None
1446 tn = None
1439 dodiff = True
1447 dodiff = True
1440 header = []
1448 header = []
1441 if f in man1:
1449 if f in man1:
1442 to = getfilectx(f, ctx1).data()
1450 to = getfilectx(f, ctx1).data()
1443 if f not in removed:
1451 if f not in removed:
1444 tn = getfilectx(f, ctx2).data()
1452 tn = getfilectx(f, ctx2).data()
1445 a, b = f, f
1453 a, b = f, f
1446 if opts.git or losedatafn:
1454 if opts.git or losedatafn:
1447 if f in added:
1455 if f in added:
1448 mode = gitmode[ctx2.flags(f)]
1456 mode = gitmode[ctx2.flags(f)]
1449 if f in copy or f in copyto:
1457 if f in copy or f in copyto:
1450 if opts.git:
1458 if opts.git:
1451 if f in copy:
1459 if f in copy:
1452 a = copy[f]
1460 a = copy[f]
1453 else:
1461 else:
1454 a = copyto[f]
1462 a = copyto[f]
1455 omode = gitmode[man1.flags(a)]
1463 omode = gitmode[man1.flags(a)]
1456 _addmodehdr(header, omode, mode)
1464 _addmodehdr(header, omode, mode)
1457 if a in removed and a not in gone:
1465 if a in removed and a not in gone:
1458 op = 'rename'
1466 op = 'rename'
1459 gone.add(a)
1467 gone.add(a)
1460 else:
1468 else:
1461 op = 'copy'
1469 op = 'copy'
1462 header.append('%s from %s\n' % (op, join(a)))
1470 header.append('%s from %s\n' % (op, join(a)))
1463 header.append('%s to %s\n' % (op, join(f)))
1471 header.append('%s to %s\n' % (op, join(f)))
1464 to = getfilectx(a, ctx1).data()
1472 to = getfilectx(a, ctx1).data()
1465 else:
1473 else:
1466 losedatafn(f)
1474 losedatafn(f)
1467 else:
1475 else:
1468 if opts.git:
1476 if opts.git:
1469 header.append('new file mode %s\n' % mode)
1477 header.append('new file mode %s\n' % mode)
1470 elif ctx2.flags(f):
1478 elif ctx2.flags(f):
1471 losedatafn(f)
1479 losedatafn(f)
1472 # In theory, if tn was copied or renamed we should check
1480 # In theory, if tn was copied or renamed we should check
1473 # if the source is binary too but the copy record already
1481 # if the source is binary too but the copy record already
1474 # forces git mode.
1482 # forces git mode.
1475 if util.binary(tn):
1483 if util.binary(tn):
1476 if opts.git:
1484 if opts.git:
1477 dodiff = 'binary'
1485 dodiff = 'binary'
1478 else:
1486 else:
1479 losedatafn(f)
1487 losedatafn(f)
1480 if not opts.git and not tn:
1488 if not opts.git and not tn:
1481 # regular diffs cannot represent new empty file
1489 # regular diffs cannot represent new empty file
1482 losedatafn(f)
1490 losedatafn(f)
1483 elif f in removed:
1491 elif f in removed:
1484 if opts.git:
1492 if opts.git:
1485 # have we already reported a copy above?
1493 # have we already reported a copy above?
1486 if ((f in copy and copy[f] in added
1494 if ((f in copy and copy[f] in added
1487 and copyto[copy[f]] == f) or
1495 and copyto[copy[f]] == f) or
1488 (f in copyto and copyto[f] in added
1496 (f in copyto and copyto[f] in added
1489 and copy[copyto[f]] == f)):
1497 and copy[copyto[f]] == f)):
1490 dodiff = False
1498 dodiff = False
1491 else:
1499 else:
1492 header.append('deleted file mode %s\n' %
1500 header.append('deleted file mode %s\n' %
1493 gitmode[man1.flags(f)])
1501 gitmode[man1.flags(f)])
1494 elif not to or util.binary(to):
1502 elif not to or util.binary(to):
1495 # regular diffs cannot represent empty file deletion
1503 # regular diffs cannot represent empty file deletion
1496 losedatafn(f)
1504 losedatafn(f)
1497 else:
1505 else:
1498 oflag = man1.flags(f)
1506 oflag = man1.flags(f)
1499 nflag = ctx2.flags(f)
1507 nflag = ctx2.flags(f)
1500 binary = util.binary(to) or util.binary(tn)
1508 binary = util.binary(to) or util.binary(tn)
1501 if opts.git:
1509 if opts.git:
1502 _addmodehdr(header, gitmode[oflag], gitmode[nflag])
1510 _addmodehdr(header, gitmode[oflag], gitmode[nflag])
1503 if binary:
1511 if binary:
1504 dodiff = 'binary'
1512 dodiff = 'binary'
1505 elif binary or nflag != oflag:
1513 elif binary or nflag != oflag:
1506 losedatafn(f)
1514 losedatafn(f)
1507 if opts.git:
1515 if opts.git:
1508 header.insert(0, mdiff.diffline(revs, join(a), join(b), opts))
1516 header.insert(0, mdiff.diffline(revs, join(a), join(b), opts))
1509
1517
1510 if dodiff:
1518 if dodiff:
1511 if dodiff == 'binary':
1519 if dodiff == 'binary':
1512 text = b85diff(to, tn)
1520 text = b85diff(to, tn)
1513 else:
1521 else:
1514 text = mdiff.unidiff(to, date1,
1522 text = mdiff.unidiff(to, date1,
1515 # ctx2 date may be dynamic
1523 # ctx2 date may be dynamic
1516 tn, util.datestr(ctx2.date()),
1524 tn, util.datestr(ctx2.date()),
1517 join(a), join(b), revs, opts=opts)
1525 join(a), join(b), revs, opts=opts)
1518 if header and (text or len(header) > 1):
1526 if header and (text or len(header) > 1):
1519 yield ''.join(header)
1527 yield ''.join(header)
1520 if text:
1528 if text:
1521 yield text
1529 yield text
1522
1530
1523 def diffstatdata(lines):
1531 def diffstatdata(lines):
1524 filename, adds, removes = None, 0, 0
1532 filename, adds, removes = None, 0, 0
1525 for line in lines:
1533 for line in lines:
1526 if line.startswith('diff'):
1534 if line.startswith('diff'):
1527 if filename:
1535 if filename:
1528 isbinary = adds == 0 and removes == 0
1536 isbinary = adds == 0 and removes == 0
1529 yield (filename, adds, removes, isbinary)
1537 yield (filename, adds, removes, isbinary)
1530 # set numbers to 0 anyway when starting new file
1538 # set numbers to 0 anyway when starting new file
1531 adds, removes = 0, 0
1539 adds, removes = 0, 0
1532 if line.startswith('diff --git'):
1540 if line.startswith('diff --git'):
1533 filename = gitre.search(line).group(1)
1541 filename = gitre.search(line).group(1)
1534 else:
1542 else:
1535 # format: "diff -r ... -r ... filename"
1543 # format: "diff -r ... -r ... filename"
1536 filename = line.split(None, 5)[-1]
1544 filename = line.split(None, 5)[-1]
1537 elif line.startswith('+') and not line.startswith('+++'):
1545 elif line.startswith('+') and not line.startswith('+++'):
1538 adds += 1
1546 adds += 1
1539 elif line.startswith('-') and not line.startswith('---'):
1547 elif line.startswith('-') and not line.startswith('---'):
1540 removes += 1
1548 removes += 1
1541 if filename:
1549 if filename:
1542 isbinary = adds == 0 and removes == 0
1550 isbinary = adds == 0 and removes == 0
1543 yield (filename, adds, removes, isbinary)
1551 yield (filename, adds, removes, isbinary)
1544
1552
1545 def diffstat(lines, width=80, git=False):
1553 def diffstat(lines, width=80, git=False):
1546 output = []
1554 output = []
1547 stats = list(diffstatdata(lines))
1555 stats = list(diffstatdata(lines))
1548
1556
1549 maxtotal, maxname = 0, 0
1557 maxtotal, maxname = 0, 0
1550 totaladds, totalremoves = 0, 0
1558 totaladds, totalremoves = 0, 0
1551 hasbinary = False
1559 hasbinary = False
1552
1560
1553 sized = [(filename, adds, removes, isbinary, encoding.colwidth(filename))
1561 sized = [(filename, adds, removes, isbinary, encoding.colwidth(filename))
1554 for filename, adds, removes, isbinary in stats]
1562 for filename, adds, removes, isbinary in stats]
1555
1563
1556 for filename, adds, removes, isbinary, namewidth in sized:
1564 for filename, adds, removes, isbinary, namewidth in sized:
1557 totaladds += adds
1565 totaladds += adds
1558 totalremoves += removes
1566 totalremoves += removes
1559 maxname = max(maxname, namewidth)
1567 maxname = max(maxname, namewidth)
1560 maxtotal = max(maxtotal, adds + removes)
1568 maxtotal = max(maxtotal, adds + removes)
1561 if isbinary:
1569 if isbinary:
1562 hasbinary = True
1570 hasbinary = True
1563
1571
1564 countwidth = len(str(maxtotal))
1572 countwidth = len(str(maxtotal))
1565 if hasbinary and countwidth < 3:
1573 if hasbinary and countwidth < 3:
1566 countwidth = 3
1574 countwidth = 3
1567 graphwidth = width - countwidth - maxname - 6
1575 graphwidth = width - countwidth - maxname - 6
1568 if graphwidth < 10:
1576 if graphwidth < 10:
1569 graphwidth = 10
1577 graphwidth = 10
1570
1578
1571 def scale(i):
1579 def scale(i):
1572 if maxtotal <= graphwidth:
1580 if maxtotal <= graphwidth:
1573 return i
1581 return i
1574 # If diffstat runs out of room it doesn't print anything,
1582 # If diffstat runs out of room it doesn't print anything,
1575 # which isn't very useful, so always print at least one + or -
1583 # which isn't very useful, so always print at least one + or -
1576 # if there were at least some changes.
1584 # if there were at least some changes.
1577 return max(i * graphwidth // maxtotal, int(bool(i)))
1585 return max(i * graphwidth // maxtotal, int(bool(i)))
1578
1586
1579 for filename, adds, removes, isbinary, namewidth in sized:
1587 for filename, adds, removes, isbinary, namewidth in sized:
1580 if git and isbinary:
1588 if git and isbinary:
1581 count = 'Bin'
1589 count = 'Bin'
1582 else:
1590 else:
1583 count = adds + removes
1591 count = adds + removes
1584 pluses = '+' * scale(adds)
1592 pluses = '+' * scale(adds)
1585 minuses = '-' * scale(removes)
1593 minuses = '-' * scale(removes)
1586 output.append(' %s%s | %*s %s%s\n' %
1594 output.append(' %s%s | %*s %s%s\n' %
1587 (filename, ' ' * (maxname - namewidth),
1595 (filename, ' ' * (maxname - namewidth),
1588 countwidth, count,
1596 countwidth, count,
1589 pluses, minuses))
1597 pluses, minuses))
1590
1598
1591 if stats:
1599 if stats:
1592 output.append(_(' %d files changed, %d insertions(+), %d deletions(-)\n')
1600 output.append(_(' %d files changed, %d insertions(+), %d deletions(-)\n')
1593 % (len(stats), totaladds, totalremoves))
1601 % (len(stats), totaladds, totalremoves))
1594
1602
1595 return ''.join(output)
1603 return ''.join(output)
1596
1604
1597 def diffstatui(*args, **kw):
1605 def diffstatui(*args, **kw):
1598 '''like diffstat(), but yields 2-tuples of (output, label) for
1606 '''like diffstat(), but yields 2-tuples of (output, label) for
1599 ui.write()
1607 ui.write()
1600 '''
1608 '''
1601
1609
1602 for line in diffstat(*args, **kw).splitlines():
1610 for line in diffstat(*args, **kw).splitlines():
1603 if line and line[-1] in '+-':
1611 if line and line[-1] in '+-':
1604 name, graph = line.rsplit(' ', 1)
1612 name, graph = line.rsplit(' ', 1)
1605 yield (name + ' ', '')
1613 yield (name + ' ', '')
1606 m = re.search(r'\++', graph)
1614 m = re.search(r'\++', graph)
1607 if m:
1615 if m:
1608 yield (m.group(0), 'diffstat.inserted')
1616 yield (m.group(0), 'diffstat.inserted')
1609 m = re.search(r'-+', graph)
1617 m = re.search(r'-+', graph)
1610 if m:
1618 if m:
1611 yield (m.group(0), 'diffstat.deleted')
1619 yield (m.group(0), 'diffstat.deleted')
1612 else:
1620 else:
1613 yield (line, '')
1621 yield (line, '')
1614 yield ('\n', '')
1622 yield ('\n', '')
@@ -1,1498 +1,1493 b''
1 # util.py - Mercurial utility functions and platform specfic implementations
1 # util.py - Mercurial utility functions and platform specfic implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specfic implementations.
10 """Mercurial utility functions and platform specfic implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 from i18n import _
16 from i18n import _
17 import error, osutil, encoding
17 import error, osutil, encoding
18 import errno, re, shutil, sys, tempfile, traceback
18 import errno, re, shutil, sys, tempfile, traceback
19 import os, stat, time, calendar, textwrap, unicodedata, signal
19 import os, stat, time, calendar, textwrap, unicodedata, signal
20 import imp, socket
20 import imp, socket
21
21
22 # Python compatibility
22 # Python compatibility
23
23
24 def sha1(s):
24 def sha1(s):
25 return _fastsha1(s)
25 return _fastsha1(s)
26
26
27 def _fastsha1(s):
27 def _fastsha1(s):
28 # This function will import sha1 from hashlib or sha (whichever is
28 # This function will import sha1 from hashlib or sha (whichever is
29 # available) and overwrite itself with it on the first call.
29 # available) and overwrite itself with it on the first call.
30 # Subsequent calls will go directly to the imported function.
30 # Subsequent calls will go directly to the imported function.
31 if sys.version_info >= (2, 5):
31 if sys.version_info >= (2, 5):
32 from hashlib import sha1 as _sha1
32 from hashlib import sha1 as _sha1
33 else:
33 else:
34 from sha import sha as _sha1
34 from sha import sha as _sha1
35 global _fastsha1, sha1
35 global _fastsha1, sha1
36 _fastsha1 = sha1 = _sha1
36 _fastsha1 = sha1 = _sha1
37 return _sha1(s)
37 return _sha1(s)
38
38
39 import __builtin__
39 import __builtin__
40
40
41 if sys.version_info[0] < 3:
41 if sys.version_info[0] < 3:
42 def fakebuffer(sliceable, offset=0):
42 def fakebuffer(sliceable, offset=0):
43 return sliceable[offset:]
43 return sliceable[offset:]
44 else:
44 else:
45 def fakebuffer(sliceable, offset=0):
45 def fakebuffer(sliceable, offset=0):
46 return memoryview(sliceable)[offset:]
46 return memoryview(sliceable)[offset:]
47 try:
47 try:
48 buffer
48 buffer
49 except NameError:
49 except NameError:
50 __builtin__.buffer = fakebuffer
50 __builtin__.buffer = fakebuffer
51
51
52 import subprocess
52 import subprocess
53 closefds = os.name == 'posix'
53 closefds = os.name == 'posix'
54
54
55 def popen2(cmd, env=None, newlines=False):
55 def popen2(cmd, env=None, newlines=False):
56 # Setting bufsize to -1 lets the system decide the buffer size.
56 # Setting bufsize to -1 lets the system decide the buffer size.
57 # The default for bufsize is 0, meaning unbuffered. This leads to
57 # The default for bufsize is 0, meaning unbuffered. This leads to
58 # poor performance on Mac OS X: http://bugs.python.org/issue4194
58 # poor performance on Mac OS X: http://bugs.python.org/issue4194
59 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
59 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
60 close_fds=closefds,
60 close_fds=closefds,
61 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
61 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
62 universal_newlines=newlines,
62 universal_newlines=newlines,
63 env=env)
63 env=env)
64 return p.stdin, p.stdout
64 return p.stdin, p.stdout
65
65
66 def popen3(cmd, env=None, newlines=False):
66 def popen3(cmd, env=None, newlines=False):
67 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
67 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
68 close_fds=closefds,
68 close_fds=closefds,
69 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
69 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
70 stderr=subprocess.PIPE,
70 stderr=subprocess.PIPE,
71 universal_newlines=newlines,
71 universal_newlines=newlines,
72 env=env)
72 env=env)
73 return p.stdin, p.stdout, p.stderr
73 return p.stdin, p.stdout, p.stderr
74
74
75 def version():
75 def version():
76 """Return version information if available."""
76 """Return version information if available."""
77 try:
77 try:
78 import __version__
78 import __version__
79 return __version__.version
79 return __version__.version
80 except ImportError:
80 except ImportError:
81 return 'unknown'
81 return 'unknown'
82
82
83 # used by parsedate
83 # used by parsedate
84 defaultdateformats = (
84 defaultdateformats = (
85 '%Y-%m-%d %H:%M:%S',
85 '%Y-%m-%d %H:%M:%S',
86 '%Y-%m-%d %I:%M:%S%p',
86 '%Y-%m-%d %I:%M:%S%p',
87 '%Y-%m-%d %H:%M',
87 '%Y-%m-%d %H:%M',
88 '%Y-%m-%d %I:%M%p',
88 '%Y-%m-%d %I:%M%p',
89 '%Y-%m-%d',
89 '%Y-%m-%d',
90 '%m-%d',
90 '%m-%d',
91 '%m/%d',
91 '%m/%d',
92 '%m/%d/%y',
92 '%m/%d/%y',
93 '%m/%d/%Y',
93 '%m/%d/%Y',
94 '%a %b %d %H:%M:%S %Y',
94 '%a %b %d %H:%M:%S %Y',
95 '%a %b %d %I:%M:%S%p %Y',
95 '%a %b %d %I:%M:%S%p %Y',
96 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
96 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
97 '%b %d %H:%M:%S %Y',
97 '%b %d %H:%M:%S %Y',
98 '%b %d %I:%M:%S%p %Y',
98 '%b %d %I:%M:%S%p %Y',
99 '%b %d %H:%M:%S',
99 '%b %d %H:%M:%S',
100 '%b %d %I:%M:%S%p',
100 '%b %d %I:%M:%S%p',
101 '%b %d %H:%M',
101 '%b %d %H:%M',
102 '%b %d %I:%M%p',
102 '%b %d %I:%M%p',
103 '%b %d %Y',
103 '%b %d %Y',
104 '%b %d',
104 '%b %d',
105 '%H:%M:%S',
105 '%H:%M:%S',
106 '%I:%M:%S%p',
106 '%I:%M:%S%p',
107 '%H:%M',
107 '%H:%M',
108 '%I:%M%p',
108 '%I:%M%p',
109 )
109 )
110
110
111 extendeddateformats = defaultdateformats + (
111 extendeddateformats = defaultdateformats + (
112 "%Y",
112 "%Y",
113 "%Y-%m",
113 "%Y-%m",
114 "%b",
114 "%b",
115 "%b %Y",
115 "%b %Y",
116 )
116 )
117
117
118 def cachefunc(func):
118 def cachefunc(func):
119 '''cache the result of function calls'''
119 '''cache the result of function calls'''
120 # XXX doesn't handle keywords args
120 # XXX doesn't handle keywords args
121 cache = {}
121 cache = {}
122 if func.func_code.co_argcount == 1:
122 if func.func_code.co_argcount == 1:
123 # we gain a small amount of time because
123 # we gain a small amount of time because
124 # we don't need to pack/unpack the list
124 # we don't need to pack/unpack the list
125 def f(arg):
125 def f(arg):
126 if arg not in cache:
126 if arg not in cache:
127 cache[arg] = func(arg)
127 cache[arg] = func(arg)
128 return cache[arg]
128 return cache[arg]
129 else:
129 else:
130 def f(*args):
130 def f(*args):
131 if args not in cache:
131 if args not in cache:
132 cache[args] = func(*args)
132 cache[args] = func(*args)
133 return cache[args]
133 return cache[args]
134
134
135 return f
135 return f
136
136
137 def lrucachefunc(func):
137 def lrucachefunc(func):
138 '''cache most recent results of function calls'''
138 '''cache most recent results of function calls'''
139 cache = {}
139 cache = {}
140 order = []
140 order = []
141 if func.func_code.co_argcount == 1:
141 if func.func_code.co_argcount == 1:
142 def f(arg):
142 def f(arg):
143 if arg not in cache:
143 if arg not in cache:
144 if len(cache) > 20:
144 if len(cache) > 20:
145 del cache[order.pop(0)]
145 del cache[order.pop(0)]
146 cache[arg] = func(arg)
146 cache[arg] = func(arg)
147 else:
147 else:
148 order.remove(arg)
148 order.remove(arg)
149 order.append(arg)
149 order.append(arg)
150 return cache[arg]
150 return cache[arg]
151 else:
151 else:
152 def f(*args):
152 def f(*args):
153 if args not in cache:
153 if args not in cache:
154 if len(cache) > 20:
154 if len(cache) > 20:
155 del cache[order.pop(0)]
155 del cache[order.pop(0)]
156 cache[args] = func(*args)
156 cache[args] = func(*args)
157 else:
157 else:
158 order.remove(args)
158 order.remove(args)
159 order.append(args)
159 order.append(args)
160 return cache[args]
160 return cache[args]
161
161
162 return f
162 return f
163
163
164 class propertycache(object):
164 class propertycache(object):
165 def __init__(self, func):
165 def __init__(self, func):
166 self.func = func
166 self.func = func
167 self.name = func.__name__
167 self.name = func.__name__
168 def __get__(self, obj, type=None):
168 def __get__(self, obj, type=None):
169 result = self.func(obj)
169 result = self.func(obj)
170 setattr(obj, self.name, result)
170 setattr(obj, self.name, result)
171 return result
171 return result
172
172
173 def pipefilter(s, cmd):
173 def pipefilter(s, cmd):
174 '''filter string S through command CMD, returning its output'''
174 '''filter string S through command CMD, returning its output'''
175 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
175 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
176 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
176 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
177 pout, perr = p.communicate(s)
177 pout, perr = p.communicate(s)
178 return pout
178 return pout
179
179
180 def tempfilter(s, cmd):
180 def tempfilter(s, cmd):
181 '''filter string S through a pair of temporary files with CMD.
181 '''filter string S through a pair of temporary files with CMD.
182 CMD is used as a template to create the real command to be run,
182 CMD is used as a template to create the real command to be run,
183 with the strings INFILE and OUTFILE replaced by the real names of
183 with the strings INFILE and OUTFILE replaced by the real names of
184 the temporary files generated.'''
184 the temporary files generated.'''
185 inname, outname = None, None
185 inname, outname = None, None
186 try:
186 try:
187 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
187 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
188 fp = os.fdopen(infd, 'wb')
188 fp = os.fdopen(infd, 'wb')
189 fp.write(s)
189 fp.write(s)
190 fp.close()
190 fp.close()
191 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
191 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
192 os.close(outfd)
192 os.close(outfd)
193 cmd = cmd.replace('INFILE', inname)
193 cmd = cmd.replace('INFILE', inname)
194 cmd = cmd.replace('OUTFILE', outname)
194 cmd = cmd.replace('OUTFILE', outname)
195 code = os.system(cmd)
195 code = os.system(cmd)
196 if sys.platform == 'OpenVMS' and code & 1:
196 if sys.platform == 'OpenVMS' and code & 1:
197 code = 0
197 code = 0
198 if code:
198 if code:
199 raise Abort(_("command '%s' failed: %s") %
199 raise Abort(_("command '%s' failed: %s") %
200 (cmd, explain_exit(code)))
200 (cmd, explain_exit(code)))
201 return open(outname, 'rb').read()
201 return open(outname, 'rb').read()
202 finally:
202 finally:
203 try:
203 try:
204 if inname:
204 if inname:
205 os.unlink(inname)
205 os.unlink(inname)
206 except:
206 except:
207 pass
207 pass
208 try:
208 try:
209 if outname:
209 if outname:
210 os.unlink(outname)
210 os.unlink(outname)
211 except:
211 except:
212 pass
212 pass
213
213
214 filtertable = {
214 filtertable = {
215 'tempfile:': tempfilter,
215 'tempfile:': tempfilter,
216 'pipe:': pipefilter,
216 'pipe:': pipefilter,
217 }
217 }
218
218
219 def filter(s, cmd):
219 def filter(s, cmd):
220 "filter a string through a command that transforms its input to its output"
220 "filter a string through a command that transforms its input to its output"
221 for name, fn in filtertable.iteritems():
221 for name, fn in filtertable.iteritems():
222 if cmd.startswith(name):
222 if cmd.startswith(name):
223 return fn(s, cmd[len(name):].lstrip())
223 return fn(s, cmd[len(name):].lstrip())
224 return pipefilter(s, cmd)
224 return pipefilter(s, cmd)
225
225
226 def binary(s):
226 def binary(s):
227 """return true if a string is binary data"""
227 """return true if a string is binary data"""
228 return bool(s and '\0' in s)
228 return bool(s and '\0' in s)
229
229
230 def increasingchunks(source, min=1024, max=65536):
230 def increasingchunks(source, min=1024, max=65536):
231 '''return no less than min bytes per chunk while data remains,
231 '''return no less than min bytes per chunk while data remains,
232 doubling min after each chunk until it reaches max'''
232 doubling min after each chunk until it reaches max'''
233 def log2(x):
233 def log2(x):
234 if not x:
234 if not x:
235 return 0
235 return 0
236 i = 0
236 i = 0
237 while x:
237 while x:
238 x >>= 1
238 x >>= 1
239 i += 1
239 i += 1
240 return i - 1
240 return i - 1
241
241
242 buf = []
242 buf = []
243 blen = 0
243 blen = 0
244 for chunk in source:
244 for chunk in source:
245 buf.append(chunk)
245 buf.append(chunk)
246 blen += len(chunk)
246 blen += len(chunk)
247 if blen >= min:
247 if blen >= min:
248 if min < max:
248 if min < max:
249 min = min << 1
249 min = min << 1
250 nmin = 1 << log2(blen)
250 nmin = 1 << log2(blen)
251 if nmin > min:
251 if nmin > min:
252 min = nmin
252 min = nmin
253 if min > max:
253 if min > max:
254 min = max
254 min = max
255 yield ''.join(buf)
255 yield ''.join(buf)
256 blen = 0
256 blen = 0
257 buf = []
257 buf = []
258 if buf:
258 if buf:
259 yield ''.join(buf)
259 yield ''.join(buf)
260
260
261 Abort = error.Abort
261 Abort = error.Abort
262
262
263 def always(fn):
263 def always(fn):
264 return True
264 return True
265
265
266 def never(fn):
266 def never(fn):
267 return False
267 return False
268
268
269 def pathto(root, n1, n2):
269 def pathto(root, n1, n2):
270 '''return the relative path from one place to another.
270 '''return the relative path from one place to another.
271 root should use os.sep to separate directories
271 root should use os.sep to separate directories
272 n1 should use os.sep to separate directories
272 n1 should use os.sep to separate directories
273 n2 should use "/" to separate directories
273 n2 should use "/" to separate directories
274 returns an os.sep-separated path.
274 returns an os.sep-separated path.
275
275
276 If n1 is a relative path, it's assumed it's
276 If n1 is a relative path, it's assumed it's
277 relative to root.
277 relative to root.
278 n2 should always be relative to root.
278 n2 should always be relative to root.
279 '''
279 '''
280 if not n1:
280 if not n1:
281 return localpath(n2)
281 return localpath(n2)
282 if os.path.isabs(n1):
282 if os.path.isabs(n1):
283 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
283 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
284 return os.path.join(root, localpath(n2))
284 return os.path.join(root, localpath(n2))
285 n2 = '/'.join((pconvert(root), n2))
285 n2 = '/'.join((pconvert(root), n2))
286 a, b = splitpath(n1), n2.split('/')
286 a, b = splitpath(n1), n2.split('/')
287 a.reverse()
287 a.reverse()
288 b.reverse()
288 b.reverse()
289 while a and b and a[-1] == b[-1]:
289 while a and b and a[-1] == b[-1]:
290 a.pop()
290 a.pop()
291 b.pop()
291 b.pop()
292 b.reverse()
292 b.reverse()
293 return os.sep.join((['..'] * len(a)) + b) or '.'
293 return os.sep.join((['..'] * len(a)) + b) or '.'
294
294
295 def canonpath(root, cwd, myname, auditor=None):
295 def canonpath(root, cwd, myname, auditor=None):
296 """return the canonical path of myname, given cwd and root"""
296 """return the canonical path of myname, given cwd and root"""
297 if endswithsep(root):
297 if endswithsep(root):
298 rootsep = root
298 rootsep = root
299 else:
299 else:
300 rootsep = root + os.sep
300 rootsep = root + os.sep
301 name = myname
301 name = myname
302 if not os.path.isabs(name):
302 if not os.path.isabs(name):
303 name = os.path.join(root, cwd, name)
303 name = os.path.join(root, cwd, name)
304 name = os.path.normpath(name)
304 name = os.path.normpath(name)
305 if auditor is None:
305 if auditor is None:
306 auditor = path_auditor(root)
306 auditor = path_auditor(root)
307 if name != rootsep and name.startswith(rootsep):
307 if name != rootsep and name.startswith(rootsep):
308 name = name[len(rootsep):]
308 name = name[len(rootsep):]
309 auditor(name)
309 auditor(name)
310 return pconvert(name)
310 return pconvert(name)
311 elif name == root:
311 elif name == root:
312 return ''
312 return ''
313 else:
313 else:
314 # Determine whether `name' is in the hierarchy at or beneath `root',
314 # Determine whether `name' is in the hierarchy at or beneath `root',
315 # by iterating name=dirname(name) until that causes no change (can't
315 # by iterating name=dirname(name) until that causes no change (can't
316 # check name == '/', because that doesn't work on windows). For each
316 # check name == '/', because that doesn't work on windows). For each
317 # `name', compare dev/inode numbers. If they match, the list `rel'
317 # `name', compare dev/inode numbers. If they match, the list `rel'
318 # holds the reversed list of components making up the relative file
318 # holds the reversed list of components making up the relative file
319 # name we want.
319 # name we want.
320 root_st = os.stat(root)
320 root_st = os.stat(root)
321 rel = []
321 rel = []
322 while True:
322 while True:
323 try:
323 try:
324 name_st = os.stat(name)
324 name_st = os.stat(name)
325 except OSError:
325 except OSError:
326 break
326 break
327 if samestat(name_st, root_st):
327 if samestat(name_st, root_st):
328 if not rel:
328 if not rel:
329 # name was actually the same as root (maybe a symlink)
329 # name was actually the same as root (maybe a symlink)
330 return ''
330 return ''
331 rel.reverse()
331 rel.reverse()
332 name = os.path.join(*rel)
332 name = os.path.join(*rel)
333 auditor(name)
333 auditor(name)
334 return pconvert(name)
334 return pconvert(name)
335 dirname, basename = os.path.split(name)
335 dirname, basename = os.path.split(name)
336 rel.append(basename)
336 rel.append(basename)
337 if dirname == name:
337 if dirname == name:
338 break
338 break
339 name = dirname
339 name = dirname
340
340
341 raise Abort('%s not under root' % myname)
341 raise Abort('%s not under root' % myname)
342
342
343 _hgexecutable = None
343 _hgexecutable = None
344
344
345 def main_is_frozen():
345 def main_is_frozen():
346 """return True if we are a frozen executable.
346 """return True if we are a frozen executable.
347
347
348 The code supports py2exe (most common, Windows only) and tools/freeze
348 The code supports py2exe (most common, Windows only) and tools/freeze
349 (portable, not much used).
349 (portable, not much used).
350 """
350 """
351 return (hasattr(sys, "frozen") or # new py2exe
351 return (hasattr(sys, "frozen") or # new py2exe
352 hasattr(sys, "importers") or # old py2exe
352 hasattr(sys, "importers") or # old py2exe
353 imp.is_frozen("__main__")) # tools/freeze
353 imp.is_frozen("__main__")) # tools/freeze
354
354
355 def hgexecutable():
355 def hgexecutable():
356 """return location of the 'hg' executable.
356 """return location of the 'hg' executable.
357
357
358 Defaults to $HG or 'hg' in the search path.
358 Defaults to $HG or 'hg' in the search path.
359 """
359 """
360 if _hgexecutable is None:
360 if _hgexecutable is None:
361 hg = os.environ.get('HG')
361 hg = os.environ.get('HG')
362 if hg:
362 if hg:
363 set_hgexecutable(hg)
363 set_hgexecutable(hg)
364 elif main_is_frozen():
364 elif main_is_frozen():
365 set_hgexecutable(sys.executable)
365 set_hgexecutable(sys.executable)
366 else:
366 else:
367 exe = find_exe('hg') or os.path.basename(sys.argv[0])
367 exe = find_exe('hg') or os.path.basename(sys.argv[0])
368 set_hgexecutable(exe)
368 set_hgexecutable(exe)
369 return _hgexecutable
369 return _hgexecutable
370
370
371 def set_hgexecutable(path):
371 def set_hgexecutable(path):
372 """set location of the 'hg' executable"""
372 """set location of the 'hg' executable"""
373 global _hgexecutable
373 global _hgexecutable
374 _hgexecutable = path
374 _hgexecutable = path
375
375
376 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None, out=None):
376 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None, out=None):
377 '''enhanced shell command execution.
377 '''enhanced shell command execution.
378 run with environment maybe modified, maybe in different dir.
378 run with environment maybe modified, maybe in different dir.
379
379
380 if command fails and onerr is None, return status. if ui object,
380 if command fails and onerr is None, return status. if ui object,
381 print error message and return status, else raise onerr object as
381 print error message and return status, else raise onerr object as
382 exception.
382 exception.
383
383
384 if out is specified, it is assumed to be a file-like object that has a
384 if out is specified, it is assumed to be a file-like object that has a
385 write() method. stdout and stderr will be redirected to out.'''
385 write() method. stdout and stderr will be redirected to out.'''
386 def py2shell(val):
386 def py2shell(val):
387 'convert python object into string that is useful to shell'
387 'convert python object into string that is useful to shell'
388 if val is None or val is False:
388 if val is None or val is False:
389 return '0'
389 return '0'
390 if val is True:
390 if val is True:
391 return '1'
391 return '1'
392 return str(val)
392 return str(val)
393 origcmd = cmd
393 origcmd = cmd
394 if os.name == 'nt':
394 if os.name == 'nt':
395 cmd = '"%s"' % cmd
395 cmd = '"%s"' % cmd
396 env = dict(os.environ)
396 env = dict(os.environ)
397 env.update((k, py2shell(v)) for k, v in environ.iteritems())
397 env.update((k, py2shell(v)) for k, v in environ.iteritems())
398 env['HG'] = hgexecutable()
398 env['HG'] = hgexecutable()
399 if out is None:
399 if out is None:
400 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
400 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
401 env=env, cwd=cwd)
401 env=env, cwd=cwd)
402 else:
402 else:
403 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
403 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
404 env=env, cwd=cwd, stdout=subprocess.PIPE,
404 env=env, cwd=cwd, stdout=subprocess.PIPE,
405 stderr=subprocess.STDOUT)
405 stderr=subprocess.STDOUT)
406 for line in proc.stdout:
406 for line in proc.stdout:
407 out.write(line)
407 out.write(line)
408 proc.wait()
408 proc.wait()
409 rc = proc.returncode
409 rc = proc.returncode
410 if sys.platform == 'OpenVMS' and rc & 1:
410 if sys.platform == 'OpenVMS' and rc & 1:
411 rc = 0
411 rc = 0
412 if rc and onerr:
412 if rc and onerr:
413 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
413 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
414 explain_exit(rc)[0])
414 explain_exit(rc)[0])
415 if errprefix:
415 if errprefix:
416 errmsg = '%s: %s' % (errprefix, errmsg)
416 errmsg = '%s: %s' % (errprefix, errmsg)
417 try:
417 try:
418 onerr.warn(errmsg + '\n')
418 onerr.warn(errmsg + '\n')
419 except AttributeError:
419 except AttributeError:
420 raise onerr(errmsg)
420 raise onerr(errmsg)
421 return rc
421 return rc
422
422
423 def checksignature(func):
423 def checksignature(func):
424 '''wrap a function with code to check for calling errors'''
424 '''wrap a function with code to check for calling errors'''
425 def check(*args, **kwargs):
425 def check(*args, **kwargs):
426 try:
426 try:
427 return func(*args, **kwargs)
427 return func(*args, **kwargs)
428 except TypeError:
428 except TypeError:
429 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
429 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
430 raise error.SignatureError
430 raise error.SignatureError
431 raise
431 raise
432
432
433 return check
433 return check
434
434
435 def unlink(f):
435 def unlink(f):
436 """unlink and remove the directory if it is empty"""
436 """unlink and remove the directory if it is empty"""
437 os.unlink(f)
437 os.unlink(f)
438 # try removing directories that might now be empty
438 # try removing directories that might now be empty
439 try:
439 try:
440 os.removedirs(os.path.dirname(f))
440 os.removedirs(os.path.dirname(f))
441 except OSError:
441 except OSError:
442 pass
442 pass
443
443
444 def copyfile(src, dest):
444 def copyfile(src, dest):
445 "copy a file, preserving mode and atime/mtime"
445 "copy a file, preserving mode and atime/mtime"
446 if os.path.islink(src):
446 if os.path.islink(src):
447 try:
447 try:
448 os.unlink(dest)
448 os.unlink(dest)
449 except:
449 except:
450 pass
450 pass
451 os.symlink(os.readlink(src), dest)
451 os.symlink(os.readlink(src), dest)
452 else:
452 else:
453 try:
453 try:
454 shutil.copyfile(src, dest)
454 shutil.copyfile(src, dest)
455 shutil.copymode(src, dest)
455 shutil.copymode(src, dest)
456 except shutil.Error, inst:
456 except shutil.Error, inst:
457 raise Abort(str(inst))
457 raise Abort(str(inst))
458
458
459 def copyfiles(src, dst, hardlink=None):
459 def copyfiles(src, dst, hardlink=None):
460 """Copy a directory tree using hardlinks if possible"""
460 """Copy a directory tree using hardlinks if possible"""
461
461
462 if hardlink is None:
462 if hardlink is None:
463 hardlink = (os.stat(src).st_dev ==
463 hardlink = (os.stat(src).st_dev ==
464 os.stat(os.path.dirname(dst)).st_dev)
464 os.stat(os.path.dirname(dst)).st_dev)
465
465
466 num = 0
466 num = 0
467 if os.path.isdir(src):
467 if os.path.isdir(src):
468 os.mkdir(dst)
468 os.mkdir(dst)
469 for name, kind in osutil.listdir(src):
469 for name, kind in osutil.listdir(src):
470 srcname = os.path.join(src, name)
470 srcname = os.path.join(src, name)
471 dstname = os.path.join(dst, name)
471 dstname = os.path.join(dst, name)
472 hardlink, n = copyfiles(srcname, dstname, hardlink)
472 hardlink, n = copyfiles(srcname, dstname, hardlink)
473 num += n
473 num += n
474 else:
474 else:
475 if hardlink:
475 if hardlink:
476 try:
476 try:
477 os_link(src, dst)
477 os_link(src, dst)
478 except (IOError, OSError):
478 except (IOError, OSError):
479 hardlink = False
479 hardlink = False
480 shutil.copy(src, dst)
480 shutil.copy(src, dst)
481 else:
481 else:
482 shutil.copy(src, dst)
482 shutil.copy(src, dst)
483 num += 1
483 num += 1
484
484
485 return hardlink, num
485 return hardlink, num
486
486
487 class path_auditor(object):
487 class path_auditor(object):
488 '''ensure that a filesystem path contains no banned components.
488 '''ensure that a filesystem path contains no banned components.
489 the following properties of a path are checked:
489 the following properties of a path are checked:
490
490
491 - under top-level .hg
491 - under top-level .hg
492 - starts at the root of a windows drive
492 - starts at the root of a windows drive
493 - contains ".."
493 - contains ".."
494 - traverses a symlink (e.g. a/symlink_here/b)
494 - traverses a symlink (e.g. a/symlink_here/b)
495 - inside a nested repository (a callback can be used to approve
495 - inside a nested repository (a callback can be used to approve
496 some nested repositories, e.g., subrepositories)
496 some nested repositories, e.g., subrepositories)
497 '''
497 '''
498
498
499 def __init__(self, root, callback=None):
499 def __init__(self, root, callback=None):
500 self.audited = set()
500 self.audited = set()
501 self.auditeddir = set()
501 self.auditeddir = set()
502 self.root = root
502 self.root = root
503 self.callback = callback
503 self.callback = callback
504
504
505 def __call__(self, path):
505 def __call__(self, path):
506 if path in self.audited:
506 if path in self.audited:
507 return
507 return
508 normpath = os.path.normcase(path)
508 normpath = os.path.normcase(path)
509 parts = splitpath(normpath)
509 parts = splitpath(normpath)
510 if (os.path.splitdrive(path)[0]
510 if (os.path.splitdrive(path)[0]
511 or parts[0].lower() in ('.hg', '.hg.', '')
511 or parts[0].lower() in ('.hg', '.hg.', '')
512 or os.pardir in parts):
512 or os.pardir in parts):
513 raise Abort(_("path contains illegal component: %s") % path)
513 raise Abort(_("path contains illegal component: %s") % path)
514 if '.hg' in path.lower():
514 if '.hg' in path.lower():
515 lparts = [p.lower() for p in parts]
515 lparts = [p.lower() for p in parts]
516 for p in '.hg', '.hg.':
516 for p in '.hg', '.hg.':
517 if p in lparts[1:]:
517 if p in lparts[1:]:
518 pos = lparts.index(p)
518 pos = lparts.index(p)
519 base = os.path.join(*parts[:pos])
519 base = os.path.join(*parts[:pos])
520 raise Abort(_('path %r is inside repo %r') % (path, base))
520 raise Abort(_('path %r is inside repo %r') % (path, base))
521 def check(prefix):
521 def check(prefix):
522 curpath = os.path.join(self.root, prefix)
522 curpath = os.path.join(self.root, prefix)
523 try:
523 try:
524 st = os.lstat(curpath)
524 st = os.lstat(curpath)
525 except OSError, err:
525 except OSError, err:
526 # EINVAL can be raised as invalid path syntax under win32.
526 # EINVAL can be raised as invalid path syntax under win32.
527 # They must be ignored for patterns can be checked too.
527 # They must be ignored for patterns can be checked too.
528 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
528 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
529 raise
529 raise
530 else:
530 else:
531 if stat.S_ISLNK(st.st_mode):
531 if stat.S_ISLNK(st.st_mode):
532 raise Abort(_('path %r traverses symbolic link %r') %
532 raise Abort(_('path %r traverses symbolic link %r') %
533 (path, prefix))
533 (path, prefix))
534 elif (stat.S_ISDIR(st.st_mode) and
534 elif (stat.S_ISDIR(st.st_mode) and
535 os.path.isdir(os.path.join(curpath, '.hg'))):
535 os.path.isdir(os.path.join(curpath, '.hg'))):
536 if not self.callback or not self.callback(curpath):
536 if not self.callback or not self.callback(curpath):
537 raise Abort(_('path %r is inside repo %r') %
537 raise Abort(_('path %r is inside repo %r') %
538 (path, prefix))
538 (path, prefix))
539 parts.pop()
539 parts.pop()
540 prefixes = []
540 prefixes = []
541 while parts:
541 while parts:
542 prefix = os.sep.join(parts)
542 prefix = os.sep.join(parts)
543 if prefix in self.auditeddir:
543 if prefix in self.auditeddir:
544 break
544 break
545 check(prefix)
545 check(prefix)
546 prefixes.append(prefix)
546 prefixes.append(prefix)
547 parts.pop()
547 parts.pop()
548
548
549 self.audited.add(path)
549 self.audited.add(path)
550 # only add prefixes to the cache after checking everything: we don't
550 # only add prefixes to the cache after checking everything: we don't
551 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
551 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
552 self.auditeddir.update(prefixes)
552 self.auditeddir.update(prefixes)
553
553
554 def nlinks(pathname):
554 def nlinks(pathname):
555 """Return number of hardlinks for the given file."""
555 """Return number of hardlinks for the given file."""
556 return os.lstat(pathname).st_nlink
556 return os.lstat(pathname).st_nlink
557
557
558 if hasattr(os, 'link'):
558 if hasattr(os, 'link'):
559 os_link = os.link
559 os_link = os.link
560 else:
560 else:
561 def os_link(src, dst):
561 def os_link(src, dst):
562 raise OSError(0, _("Hardlinks not supported"))
562 raise OSError(0, _("Hardlinks not supported"))
563
563
564 def lookup_reg(key, name=None, scope=None):
564 def lookup_reg(key, name=None, scope=None):
565 return None
565 return None
566
566
567 def hidewindow():
567 def hidewindow():
568 """Hide current shell window.
568 """Hide current shell window.
569
569
570 Used to hide the window opened when starting asynchronous
570 Used to hide the window opened when starting asynchronous
571 child process under Windows, unneeded on other systems.
571 child process under Windows, unneeded on other systems.
572 """
572 """
573 pass
573 pass
574
574
575 if os.name == 'nt':
575 if os.name == 'nt':
576 from windows import *
576 from windows import *
577 else:
577 else:
578 from posix import *
578 from posix import *
579
579
580 def makelock(info, pathname):
580 def makelock(info, pathname):
581 try:
581 try:
582 return os.symlink(info, pathname)
582 return os.symlink(info, pathname)
583 except OSError, why:
583 except OSError, why:
584 if why.errno == errno.EEXIST:
584 if why.errno == errno.EEXIST:
585 raise
585 raise
586 except AttributeError: # no symlink in os
586 except AttributeError: # no symlink in os
587 pass
587 pass
588
588
589 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
589 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
590 os.write(ld, info)
590 os.write(ld, info)
591 os.close(ld)
591 os.close(ld)
592
592
593 def readlock(pathname):
593 def readlock(pathname):
594 try:
594 try:
595 return os.readlink(pathname)
595 return os.readlink(pathname)
596 except OSError, why:
596 except OSError, why:
597 if why.errno not in (errno.EINVAL, errno.ENOSYS):
597 if why.errno not in (errno.EINVAL, errno.ENOSYS):
598 raise
598 raise
599 except AttributeError: # no symlink in os
599 except AttributeError: # no symlink in os
600 pass
600 pass
601 return posixfile(pathname).read()
601 return posixfile(pathname).read()
602
602
603 def fstat(fp):
603 def fstat(fp):
604 '''stat file object that may not have fileno method.'''
604 '''stat file object that may not have fileno method.'''
605 try:
605 try:
606 return os.fstat(fp.fileno())
606 return os.fstat(fp.fileno())
607 except AttributeError:
607 except AttributeError:
608 return os.stat(fp.name)
608 return os.stat(fp.name)
609
609
610 # File system features
610 # File system features
611
611
612 def checkcase(path):
612 def checkcase(path):
613 """
613 """
614 Check whether the given path is on a case-sensitive filesystem
614 Check whether the given path is on a case-sensitive filesystem
615
615
616 Requires a path (like /foo/.hg) ending with a foldable final
616 Requires a path (like /foo/.hg) ending with a foldable final
617 directory component.
617 directory component.
618 """
618 """
619 s1 = os.stat(path)
619 s1 = os.stat(path)
620 d, b = os.path.split(path)
620 d, b = os.path.split(path)
621 p2 = os.path.join(d, b.upper())
621 p2 = os.path.join(d, b.upper())
622 if path == p2:
622 if path == p2:
623 p2 = os.path.join(d, b.lower())
623 p2 = os.path.join(d, b.lower())
624 try:
624 try:
625 s2 = os.stat(p2)
625 s2 = os.stat(p2)
626 if s2 == s1:
626 if s2 == s1:
627 return False
627 return False
628 return True
628 return True
629 except:
629 except:
630 return True
630 return True
631
631
632 _fspathcache = {}
632 _fspathcache = {}
633 def fspath(name, root):
633 def fspath(name, root):
634 '''Get name in the case stored in the filesystem
634 '''Get name in the case stored in the filesystem
635
635
636 The name is either relative to root, or it is an absolute path starting
636 The name is either relative to root, or it is an absolute path starting
637 with root. Note that this function is unnecessary, and should not be
637 with root. Note that this function is unnecessary, and should not be
638 called, for case-sensitive filesystems (simply because it's expensive).
638 called, for case-sensitive filesystems (simply because it's expensive).
639 '''
639 '''
640 # If name is absolute, make it relative
640 # If name is absolute, make it relative
641 if name.lower().startswith(root.lower()):
641 if name.lower().startswith(root.lower()):
642 l = len(root)
642 l = len(root)
643 if name[l] == os.sep or name[l] == os.altsep:
643 if name[l] == os.sep or name[l] == os.altsep:
644 l = l + 1
644 l = l + 1
645 name = name[l:]
645 name = name[l:]
646
646
647 if not os.path.lexists(os.path.join(root, name)):
647 if not os.path.lexists(os.path.join(root, name)):
648 return None
648 return None
649
649
650 seps = os.sep
650 seps = os.sep
651 if os.altsep:
651 if os.altsep:
652 seps = seps + os.altsep
652 seps = seps + os.altsep
653 # Protect backslashes. This gets silly very quickly.
653 # Protect backslashes. This gets silly very quickly.
654 seps.replace('\\','\\\\')
654 seps.replace('\\','\\\\')
655 pattern = re.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
655 pattern = re.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
656 dir = os.path.normcase(os.path.normpath(root))
656 dir = os.path.normcase(os.path.normpath(root))
657 result = []
657 result = []
658 for part, sep in pattern.findall(name):
658 for part, sep in pattern.findall(name):
659 if sep:
659 if sep:
660 result.append(sep)
660 result.append(sep)
661 continue
661 continue
662
662
663 if dir not in _fspathcache:
663 if dir not in _fspathcache:
664 _fspathcache[dir] = os.listdir(dir)
664 _fspathcache[dir] = os.listdir(dir)
665 contents = _fspathcache[dir]
665 contents = _fspathcache[dir]
666
666
667 lpart = part.lower()
667 lpart = part.lower()
668 lenp = len(part)
668 lenp = len(part)
669 for n in contents:
669 for n in contents:
670 if lenp == len(n) and n.lower() == lpart:
670 if lenp == len(n) and n.lower() == lpart:
671 result.append(n)
671 result.append(n)
672 break
672 break
673 else:
673 else:
674 # Cannot happen, as the file exists!
674 # Cannot happen, as the file exists!
675 result.append(part)
675 result.append(part)
676 dir = os.path.join(dir, lpart)
676 dir = os.path.join(dir, lpart)
677
677
678 return ''.join(result)
678 return ''.join(result)
679
679
680 def checkexec(path):
680 def checkexec(path):
681 """
681 """
682 Check whether the given path is on a filesystem with UNIX-like exec flags
682 Check whether the given path is on a filesystem with UNIX-like exec flags
683
683
684 Requires a directory (like /foo/.hg)
684 Requires a directory (like /foo/.hg)
685 """
685 """
686
686
687 # VFAT on some Linux versions can flip mode but it doesn't persist
687 # VFAT on some Linux versions can flip mode but it doesn't persist
688 # a FS remount. Frequently we can detect it if files are created
688 # a FS remount. Frequently we can detect it if files are created
689 # with exec bit on.
689 # with exec bit on.
690
690
691 try:
691 try:
692 EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
692 EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
693 fh, fn = tempfile.mkstemp(dir=path, prefix='hg-checkexec-')
693 fh, fn = tempfile.mkstemp(dir=path, prefix='hg-checkexec-')
694 try:
694 try:
695 os.close(fh)
695 os.close(fh)
696 m = os.stat(fn).st_mode & 0777
696 m = os.stat(fn).st_mode & 0777
697 new_file_has_exec = m & EXECFLAGS
697 new_file_has_exec = m & EXECFLAGS
698 os.chmod(fn, m ^ EXECFLAGS)
698 os.chmod(fn, m ^ EXECFLAGS)
699 exec_flags_cannot_flip = ((os.stat(fn).st_mode & 0777) == m)
699 exec_flags_cannot_flip = ((os.stat(fn).st_mode & 0777) == m)
700 finally:
700 finally:
701 os.unlink(fn)
701 os.unlink(fn)
702 except (IOError, OSError):
702 except (IOError, OSError):
703 # we don't care, the user probably won't be able to commit anyway
703 # we don't care, the user probably won't be able to commit anyway
704 return False
704 return False
705 return not (new_file_has_exec or exec_flags_cannot_flip)
705 return not (new_file_has_exec or exec_flags_cannot_flip)
706
706
707 def checklink(path):
707 def checklink(path):
708 """check whether the given path is on a symlink-capable filesystem"""
708 """check whether the given path is on a symlink-capable filesystem"""
709 # mktemp is not racy because symlink creation will fail if the
709 # mktemp is not racy because symlink creation will fail if the
710 # file already exists
710 # file already exists
711 name = tempfile.mktemp(dir=path, prefix='hg-checklink-')
711 name = tempfile.mktemp(dir=path, prefix='hg-checklink-')
712 try:
712 try:
713 os.symlink(".", name)
713 os.symlink(".", name)
714 os.unlink(name)
714 os.unlink(name)
715 return True
715 return True
716 except (OSError, AttributeError):
716 except (OSError, AttributeError):
717 return False
717 return False
718
718
719 def checknlink(testfile):
719 def checknlink(testfile):
720 '''check whether hardlink count reporting works properly'''
720 '''check whether hardlink count reporting works properly'''
721 f = testfile + ".hgtmp"
721 f = testfile + ".hgtmp"
722
722
723 try:
723 try:
724 os_link(testfile, f)
724 os_link(testfile, f)
725 except OSError:
725 except OSError:
726 return False
726 return False
727
727
728 try:
728 try:
729 # nlinks() may behave differently for files on Windows shares if
729 # nlinks() may behave differently for files on Windows shares if
730 # the file is open.
730 # the file is open.
731 fd = open(f)
731 fd = open(f)
732 return nlinks(f) > 1
732 return nlinks(f) > 1
733 finally:
733 finally:
734 fd.close()
734 fd.close()
735 os.unlink(f)
735 os.unlink(f)
736
736
737 return False
737 return False
738
738
739 def endswithsep(path):
739 def endswithsep(path):
740 '''Check path ends with os.sep or os.altsep.'''
740 '''Check path ends with os.sep or os.altsep.'''
741 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
741 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
742
742
743 def splitpath(path):
743 def splitpath(path):
744 '''Split path by os.sep.
744 '''Split path by os.sep.
745 Note that this function does not use os.altsep because this is
745 Note that this function does not use os.altsep because this is
746 an alternative of simple "xxx.split(os.sep)".
746 an alternative of simple "xxx.split(os.sep)".
747 It is recommended to use os.path.normpath() before using this
747 It is recommended to use os.path.normpath() before using this
748 function if need.'''
748 function if need.'''
749 return path.split(os.sep)
749 return path.split(os.sep)
750
750
751 def gui():
751 def gui():
752 '''Are we running in a GUI?'''
752 '''Are we running in a GUI?'''
753 return os.name == "nt" or os.name == "mac" or os.environ.get("DISPLAY")
753 return os.name == "nt" or os.name == "mac" or os.environ.get("DISPLAY")
754
754
755 def mktempcopy(name, emptyok=False, createmode=None):
755 def mktempcopy(name, emptyok=False, createmode=None):
756 """Create a temporary file with the same contents from name
756 """Create a temporary file with the same contents from name
757
757
758 The permission bits are copied from the original file.
758 The permission bits are copied from the original file.
759
759
760 If the temporary file is going to be truncated immediately, you
760 If the temporary file is going to be truncated immediately, you
761 can use emptyok=True as an optimization.
761 can use emptyok=True as an optimization.
762
762
763 Returns the name of the temporary file.
763 Returns the name of the temporary file.
764 """
764 """
765 d, fn = os.path.split(name)
765 d, fn = os.path.split(name)
766 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
766 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
767 os.close(fd)
767 os.close(fd)
768 # Temporary files are created with mode 0600, which is usually not
768 # Temporary files are created with mode 0600, which is usually not
769 # what we want. If the original file already exists, just copy
769 # what we want. If the original file already exists, just copy
770 # its mode. Otherwise, manually obey umask.
770 # its mode. Otherwise, manually obey umask.
771 try:
771 try:
772 st_mode = os.lstat(name).st_mode & 0777
772 st_mode = os.lstat(name).st_mode & 0777
773 except OSError, inst:
773 except OSError, inst:
774 if inst.errno != errno.ENOENT:
774 if inst.errno != errno.ENOENT:
775 raise
775 raise
776 st_mode = createmode
776 st_mode = createmode
777 if st_mode is None:
777 if st_mode is None:
778 st_mode = ~umask
778 st_mode = ~umask
779 st_mode &= 0666
779 st_mode &= 0666
780 os.chmod(temp, st_mode)
780 os.chmod(temp, st_mode)
781 if emptyok:
781 if emptyok:
782 return temp
782 return temp
783 try:
783 try:
784 try:
784 try:
785 ifp = posixfile(name, "rb")
785 ifp = posixfile(name, "rb")
786 except IOError, inst:
786 except IOError, inst:
787 if inst.errno == errno.ENOENT:
787 if inst.errno == errno.ENOENT:
788 return temp
788 return temp
789 if not getattr(inst, 'filename', None):
789 if not getattr(inst, 'filename', None):
790 inst.filename = name
790 inst.filename = name
791 raise
791 raise
792 ofp = posixfile(temp, "wb")
792 ofp = posixfile(temp, "wb")
793 for chunk in filechunkiter(ifp):
793 for chunk in filechunkiter(ifp):
794 ofp.write(chunk)
794 ofp.write(chunk)
795 ifp.close()
795 ifp.close()
796 ofp.close()
796 ofp.close()
797 except:
797 except:
798 try: os.unlink(temp)
798 try: os.unlink(temp)
799 except: pass
799 except: pass
800 raise
800 raise
801 return temp
801 return temp
802
802
803 class atomictempfile(object):
803 class atomictempfile(object):
804 """file-like object that atomically updates a file
804 """file-like object that atomically updates a file
805
805
806 All writes will be redirected to a temporary copy of the original
806 All writes will be redirected to a temporary copy of the original
807 file. When rename is called, the copy is renamed to the original
807 file. When rename is called, the copy is renamed to the original
808 name, making the changes visible.
808 name, making the changes visible.
809 """
809 """
810 def __init__(self, name, mode='w+b', createmode=None):
810 def __init__(self, name, mode='w+b', createmode=None):
811 self.__name = name
811 self.__name = name
812 self._fp = None
812 self._fp = None
813 self.temp = mktempcopy(name, emptyok=('w' in mode),
813 self.temp = mktempcopy(name, emptyok=('w' in mode),
814 createmode=createmode)
814 createmode=createmode)
815 self._fp = posixfile(self.temp, mode)
815 self._fp = posixfile(self.temp, mode)
816
816
817 def __getattr__(self, name):
817 def __getattr__(self, name):
818 return getattr(self._fp, name)
818 return getattr(self._fp, name)
819
819
820 def rename(self):
820 def rename(self):
821 if not self._fp.closed:
821 if not self._fp.closed:
822 self._fp.close()
822 self._fp.close()
823 rename(self.temp, localpath(self.__name))
823 rename(self.temp, localpath(self.__name))
824
824
825 def close(self):
825 def close(self):
826 if not self._fp:
826 if not self._fp:
827 return
827 return
828 if not self._fp.closed:
828 if not self._fp.closed:
829 try:
829 try:
830 os.unlink(self.temp)
830 os.unlink(self.temp)
831 except: pass
831 except: pass
832 self._fp.close()
832 self._fp.close()
833
833
834 def __del__(self):
834 def __del__(self):
835 self.close()
835 self.close()
836
836
837 def makedirs(name, mode=None):
837 def makedirs(name, mode=None):
838 """recursive directory creation with parent mode inheritance"""
838 """recursive directory creation with parent mode inheritance"""
839 parent = os.path.abspath(os.path.dirname(name))
839 parent = os.path.abspath(os.path.dirname(name))
840 try:
840 try:
841 os.mkdir(name)
841 os.mkdir(name)
842 if mode is not None:
842 if mode is not None:
843 os.chmod(name, mode)
843 os.chmod(name, mode)
844 return
844 return
845 except OSError, err:
845 except OSError, err:
846 if err.errno == errno.EEXIST:
846 if err.errno == errno.EEXIST:
847 return
847 return
848 if not name or parent == name or err.errno != errno.ENOENT:
848 if not name or parent == name or err.errno != errno.ENOENT:
849 raise
849 raise
850 makedirs(parent, mode)
850 makedirs(parent, mode)
851 makedirs(name, mode)
851 makedirs(name, mode)
852
852
853 class opener(object):
853 class opener(object):
854 """Open files relative to a base directory
854 """Open files relative to a base directory
855
855
856 This class is used to hide the details of COW semantics and
856 This class is used to hide the details of COW semantics and
857 remote file access from higher level code.
857 remote file access from higher level code.
858 """
858 """
859 def __init__(self, base, audit=True):
859 def __init__(self, base, audit=True):
860 self.base = base
860 self.base = base
861 if audit:
861 if audit:
862 self.auditor = path_auditor(base)
862 self.auditor = path_auditor(base)
863 else:
863 else:
864 self.auditor = always
864 self.auditor = always
865 self.createmode = None
865 self.createmode = None
866 self._trustnlink = None
866 self._trustnlink = None
867
867
868 @propertycache
868 @propertycache
869 def _can_symlink(self):
869 def _can_symlink(self):
870 return checklink(self.base)
870 return checklink(self.base)
871
871
872 def _fixfilemode(self, name):
872 def _fixfilemode(self, name):
873 if self.createmode is None:
873 if self.createmode is None:
874 return
874 return
875 os.chmod(name, self.createmode & 0666)
875 os.chmod(name, self.createmode & 0666)
876
876
877 def __call__(self, path, mode="r", text=False, atomictemp=False):
877 def __call__(self, path, mode="r", text=False, atomictemp=False):
878 self.auditor(path)
878 self.auditor(path)
879 f = os.path.join(self.base, path)
879 f = os.path.join(self.base, path)
880
880
881 if not text and "b" not in mode:
881 if not text and "b" not in mode:
882 mode += "b" # for that other OS
882 mode += "b" # for that other OS
883
883
884 nlink = -1
884 nlink = -1
885 st_mode = None
886 dirname, basename = os.path.split(f)
885 dirname, basename = os.path.split(f)
887 # If basename is empty, then the path is malformed because it points
886 # If basename is empty, then the path is malformed because it points
888 # to a directory. Let the posixfile() call below raise IOError.
887 # to a directory. Let the posixfile() call below raise IOError.
889 if basename and mode not in ('r', 'rb'):
888 if basename and mode not in ('r', 'rb'):
890 if atomictemp:
889 if atomictemp:
891 if not os.path.isdir(dirname):
890 if not os.path.isdir(dirname):
892 makedirs(dirname, self.createmode)
891 makedirs(dirname, self.createmode)
893 return atomictempfile(f, mode, self.createmode)
892 return atomictempfile(f, mode, self.createmode)
894 try:
893 try:
895 if 'w' in mode:
894 if 'w' in mode:
896 st_mode = os.lstat(f).st_mode & 0777
897 os.unlink(f)
895 os.unlink(f)
898 nlink = 0
896 nlink = 0
899 else:
897 else:
900 # nlinks() may behave differently for files on Windows
898 # nlinks() may behave differently for files on Windows
901 # shares if the file is open.
899 # shares if the file is open.
902 fd = open(f)
900 fd = open(f)
903 nlink = nlinks(f)
901 nlink = nlinks(f)
904 fd.close()
902 fd.close()
905 except (OSError, IOError):
903 except (OSError, IOError):
906 nlink = 0
904 nlink = 0
907 if not os.path.isdir(dirname):
905 if not os.path.isdir(dirname):
908 makedirs(dirname, self.createmode)
906 makedirs(dirname, self.createmode)
909 if nlink > 0:
907 if nlink > 0:
910 if self._trustnlink is None:
908 if self._trustnlink is None:
911 self._trustnlink = nlink > 1 or checknlink(f)
909 self._trustnlink = nlink > 1 or checknlink(f)
912 if nlink > 1 or not self._trustnlink:
910 if nlink > 1 or not self._trustnlink:
913 rename(mktempcopy(f), f)
911 rename(mktempcopy(f), f)
914 fp = posixfile(f, mode)
912 fp = posixfile(f, mode)
915 if nlink == 0:
913 if nlink == 0:
916 if st_mode is None:
914 self._fixfilemode(f)
917 self._fixfilemode(f)
918 else:
919 os.chmod(f, st_mode)
920 return fp
915 return fp
921
916
922 def symlink(self, src, dst):
917 def symlink(self, src, dst):
923 self.auditor(dst)
918 self.auditor(dst)
924 linkname = os.path.join(self.base, dst)
919 linkname = os.path.join(self.base, dst)
925 try:
920 try:
926 os.unlink(linkname)
921 os.unlink(linkname)
927 except OSError:
922 except OSError:
928 pass
923 pass
929
924
930 dirname = os.path.dirname(linkname)
925 dirname = os.path.dirname(linkname)
931 if not os.path.exists(dirname):
926 if not os.path.exists(dirname):
932 makedirs(dirname, self.createmode)
927 makedirs(dirname, self.createmode)
933
928
934 if self._can_symlink:
929 if self._can_symlink:
935 try:
930 try:
936 os.symlink(src, linkname)
931 os.symlink(src, linkname)
937 except OSError, err:
932 except OSError, err:
938 raise OSError(err.errno, _('could not symlink to %r: %s') %
933 raise OSError(err.errno, _('could not symlink to %r: %s') %
939 (src, err.strerror), linkname)
934 (src, err.strerror), linkname)
940 else:
935 else:
941 f = self(dst, "w")
936 f = self(dst, "w")
942 f.write(src)
937 f.write(src)
943 f.close()
938 f.close()
944 self._fixfilemode(dst)
939 self._fixfilemode(dst)
945
940
946 class chunkbuffer(object):
941 class chunkbuffer(object):
947 """Allow arbitrary sized chunks of data to be efficiently read from an
942 """Allow arbitrary sized chunks of data to be efficiently read from an
948 iterator over chunks of arbitrary size."""
943 iterator over chunks of arbitrary size."""
949
944
950 def __init__(self, in_iter):
945 def __init__(self, in_iter):
951 """in_iter is the iterator that's iterating over the input chunks.
946 """in_iter is the iterator that's iterating over the input chunks.
952 targetsize is how big a buffer to try to maintain."""
947 targetsize is how big a buffer to try to maintain."""
953 def splitbig(chunks):
948 def splitbig(chunks):
954 for chunk in chunks:
949 for chunk in chunks:
955 if len(chunk) > 2**20:
950 if len(chunk) > 2**20:
956 pos = 0
951 pos = 0
957 while pos < len(chunk):
952 while pos < len(chunk):
958 end = pos + 2 ** 18
953 end = pos + 2 ** 18
959 yield chunk[pos:end]
954 yield chunk[pos:end]
960 pos = end
955 pos = end
961 else:
956 else:
962 yield chunk
957 yield chunk
963 self.iter = splitbig(in_iter)
958 self.iter = splitbig(in_iter)
964 self._queue = []
959 self._queue = []
965
960
966 def read(self, l):
961 def read(self, l):
967 """Read L bytes of data from the iterator of chunks of data.
962 """Read L bytes of data from the iterator of chunks of data.
968 Returns less than L bytes if the iterator runs dry."""
963 Returns less than L bytes if the iterator runs dry."""
969 left = l
964 left = l
970 buf = ''
965 buf = ''
971 queue = self._queue
966 queue = self._queue
972 while left > 0:
967 while left > 0:
973 # refill the queue
968 # refill the queue
974 if not queue:
969 if not queue:
975 target = 2**18
970 target = 2**18
976 for chunk in self.iter:
971 for chunk in self.iter:
977 queue.append(chunk)
972 queue.append(chunk)
978 target -= len(chunk)
973 target -= len(chunk)
979 if target <= 0:
974 if target <= 0:
980 break
975 break
981 if not queue:
976 if not queue:
982 break
977 break
983
978
984 chunk = queue.pop(0)
979 chunk = queue.pop(0)
985 left -= len(chunk)
980 left -= len(chunk)
986 if left < 0:
981 if left < 0:
987 queue.insert(0, chunk[left:])
982 queue.insert(0, chunk[left:])
988 buf += chunk[:left]
983 buf += chunk[:left]
989 else:
984 else:
990 buf += chunk
985 buf += chunk
991
986
992 return buf
987 return buf
993
988
994 def filechunkiter(f, size=65536, limit=None):
989 def filechunkiter(f, size=65536, limit=None):
995 """Create a generator that produces the data in the file size
990 """Create a generator that produces the data in the file size
996 (default 65536) bytes at a time, up to optional limit (default is
991 (default 65536) bytes at a time, up to optional limit (default is
997 to read all data). Chunks may be less than size bytes if the
992 to read all data). Chunks may be less than size bytes if the
998 chunk is the last chunk in the file, or the file is a socket or
993 chunk is the last chunk in the file, or the file is a socket or
999 some other type of file that sometimes reads less data than is
994 some other type of file that sometimes reads less data than is
1000 requested."""
995 requested."""
1001 assert size >= 0
996 assert size >= 0
1002 assert limit is None or limit >= 0
997 assert limit is None or limit >= 0
1003 while True:
998 while True:
1004 if limit is None:
999 if limit is None:
1005 nbytes = size
1000 nbytes = size
1006 else:
1001 else:
1007 nbytes = min(limit, size)
1002 nbytes = min(limit, size)
1008 s = nbytes and f.read(nbytes)
1003 s = nbytes and f.read(nbytes)
1009 if not s:
1004 if not s:
1010 break
1005 break
1011 if limit:
1006 if limit:
1012 limit -= len(s)
1007 limit -= len(s)
1013 yield s
1008 yield s
1014
1009
1015 def makedate():
1010 def makedate():
1016 lt = time.localtime()
1011 lt = time.localtime()
1017 if lt[8] == 1 and time.daylight:
1012 if lt[8] == 1 and time.daylight:
1018 tz = time.altzone
1013 tz = time.altzone
1019 else:
1014 else:
1020 tz = time.timezone
1015 tz = time.timezone
1021 t = time.mktime(lt)
1016 t = time.mktime(lt)
1022 if t < 0:
1017 if t < 0:
1023 hint = _("check your clock")
1018 hint = _("check your clock")
1024 raise Abort(_("negative timestamp: %d") % t, hint=hint)
1019 raise Abort(_("negative timestamp: %d") % t, hint=hint)
1025 return t, tz
1020 return t, tz
1026
1021
1027 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1022 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1028 """represent a (unixtime, offset) tuple as a localized time.
1023 """represent a (unixtime, offset) tuple as a localized time.
1029 unixtime is seconds since the epoch, and offset is the time zone's
1024 unixtime is seconds since the epoch, and offset is the time zone's
1030 number of seconds away from UTC. if timezone is false, do not
1025 number of seconds away from UTC. if timezone is false, do not
1031 append time zone to string."""
1026 append time zone to string."""
1032 t, tz = date or makedate()
1027 t, tz = date or makedate()
1033 if t < 0:
1028 if t < 0:
1034 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
1029 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
1035 tz = 0
1030 tz = 0
1036 if "%1" in format or "%2" in format:
1031 if "%1" in format or "%2" in format:
1037 sign = (tz > 0) and "-" or "+"
1032 sign = (tz > 0) and "-" or "+"
1038 minutes = abs(tz) // 60
1033 minutes = abs(tz) // 60
1039 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
1034 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
1040 format = format.replace("%2", "%02d" % (minutes % 60))
1035 format = format.replace("%2", "%02d" % (minutes % 60))
1041 s = time.strftime(format, time.gmtime(float(t) - tz))
1036 s = time.strftime(format, time.gmtime(float(t) - tz))
1042 return s
1037 return s
1043
1038
1044 def shortdate(date=None):
1039 def shortdate(date=None):
1045 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1040 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1046 return datestr(date, format='%Y-%m-%d')
1041 return datestr(date, format='%Y-%m-%d')
1047
1042
1048 def strdate(string, format, defaults=[]):
1043 def strdate(string, format, defaults=[]):
1049 """parse a localized time string and return a (unixtime, offset) tuple.
1044 """parse a localized time string and return a (unixtime, offset) tuple.
1050 if the string cannot be parsed, ValueError is raised."""
1045 if the string cannot be parsed, ValueError is raised."""
1051 def timezone(string):
1046 def timezone(string):
1052 tz = string.split()[-1]
1047 tz = string.split()[-1]
1053 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1048 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1054 sign = (tz[0] == "+") and 1 or -1
1049 sign = (tz[0] == "+") and 1 or -1
1055 hours = int(tz[1:3])
1050 hours = int(tz[1:3])
1056 minutes = int(tz[3:5])
1051 minutes = int(tz[3:5])
1057 return -sign * (hours * 60 + minutes) * 60
1052 return -sign * (hours * 60 + minutes) * 60
1058 if tz == "GMT" or tz == "UTC":
1053 if tz == "GMT" or tz == "UTC":
1059 return 0
1054 return 0
1060 return None
1055 return None
1061
1056
1062 # NOTE: unixtime = localunixtime + offset
1057 # NOTE: unixtime = localunixtime + offset
1063 offset, date = timezone(string), string
1058 offset, date = timezone(string), string
1064 if offset is not None:
1059 if offset is not None:
1065 date = " ".join(string.split()[:-1])
1060 date = " ".join(string.split()[:-1])
1066
1061
1067 # add missing elements from defaults
1062 # add missing elements from defaults
1068 for part in defaults:
1063 for part in defaults:
1069 found = [True for p in part if ("%"+p) in format]
1064 found = [True for p in part if ("%"+p) in format]
1070 if not found:
1065 if not found:
1071 date += "@" + defaults[part]
1066 date += "@" + defaults[part]
1072 format += "@%" + part[0]
1067 format += "@%" + part[0]
1073
1068
1074 timetuple = time.strptime(date, format)
1069 timetuple = time.strptime(date, format)
1075 localunixtime = int(calendar.timegm(timetuple))
1070 localunixtime = int(calendar.timegm(timetuple))
1076 if offset is None:
1071 if offset is None:
1077 # local timezone
1072 # local timezone
1078 unixtime = int(time.mktime(timetuple))
1073 unixtime = int(time.mktime(timetuple))
1079 offset = unixtime - localunixtime
1074 offset = unixtime - localunixtime
1080 else:
1075 else:
1081 unixtime = localunixtime + offset
1076 unixtime = localunixtime + offset
1082 return unixtime, offset
1077 return unixtime, offset
1083
1078
1084 def parsedate(date, formats=None, defaults=None):
1079 def parsedate(date, formats=None, defaults=None):
1085 """parse a localized date/time string and return a (unixtime, offset) tuple.
1080 """parse a localized date/time string and return a (unixtime, offset) tuple.
1086
1081
1087 The date may be a "unixtime offset" string or in one of the specified
1082 The date may be a "unixtime offset" string or in one of the specified
1088 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1083 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1089 """
1084 """
1090 if not date:
1085 if not date:
1091 return 0, 0
1086 return 0, 0
1092 if isinstance(date, tuple) and len(date) == 2:
1087 if isinstance(date, tuple) and len(date) == 2:
1093 return date
1088 return date
1094 if not formats:
1089 if not formats:
1095 formats = defaultdateformats
1090 formats = defaultdateformats
1096 date = date.strip()
1091 date = date.strip()
1097 try:
1092 try:
1098 when, offset = map(int, date.split(' '))
1093 when, offset = map(int, date.split(' '))
1099 except ValueError:
1094 except ValueError:
1100 # fill out defaults
1095 # fill out defaults
1101 if not defaults:
1096 if not defaults:
1102 defaults = {}
1097 defaults = {}
1103 now = makedate()
1098 now = makedate()
1104 for part in "d mb yY HI M S".split():
1099 for part in "d mb yY HI M S".split():
1105 if part not in defaults:
1100 if part not in defaults:
1106 if part[0] in "HMS":
1101 if part[0] in "HMS":
1107 defaults[part] = "00"
1102 defaults[part] = "00"
1108 else:
1103 else:
1109 defaults[part] = datestr(now, "%" + part[0])
1104 defaults[part] = datestr(now, "%" + part[0])
1110
1105
1111 for format in formats:
1106 for format in formats:
1112 try:
1107 try:
1113 when, offset = strdate(date, format, defaults)
1108 when, offset = strdate(date, format, defaults)
1114 except (ValueError, OverflowError):
1109 except (ValueError, OverflowError):
1115 pass
1110 pass
1116 else:
1111 else:
1117 break
1112 break
1118 else:
1113 else:
1119 raise Abort(_('invalid date: %r') % date)
1114 raise Abort(_('invalid date: %r') % date)
1120 # validate explicit (probably user-specified) date and
1115 # validate explicit (probably user-specified) date and
1121 # time zone offset. values must fit in signed 32 bits for
1116 # time zone offset. values must fit in signed 32 bits for
1122 # current 32-bit linux runtimes. timezones go from UTC-12
1117 # current 32-bit linux runtimes. timezones go from UTC-12
1123 # to UTC+14
1118 # to UTC+14
1124 if abs(when) > 0x7fffffff:
1119 if abs(when) > 0x7fffffff:
1125 raise Abort(_('date exceeds 32 bits: %d') % when)
1120 raise Abort(_('date exceeds 32 bits: %d') % when)
1126 if when < 0:
1121 if when < 0:
1127 raise Abort(_('negative date value: %d') % when)
1122 raise Abort(_('negative date value: %d') % when)
1128 if offset < -50400 or offset > 43200:
1123 if offset < -50400 or offset > 43200:
1129 raise Abort(_('impossible time zone offset: %d') % offset)
1124 raise Abort(_('impossible time zone offset: %d') % offset)
1130 return when, offset
1125 return when, offset
1131
1126
1132 def matchdate(date):
1127 def matchdate(date):
1133 """Return a function that matches a given date match specifier
1128 """Return a function that matches a given date match specifier
1134
1129
1135 Formats include:
1130 Formats include:
1136
1131
1137 '{date}' match a given date to the accuracy provided
1132 '{date}' match a given date to the accuracy provided
1138
1133
1139 '<{date}' on or before a given date
1134 '<{date}' on or before a given date
1140
1135
1141 '>{date}' on or after a given date
1136 '>{date}' on or after a given date
1142
1137
1143 """
1138 """
1144
1139
1145 def lower(date):
1140 def lower(date):
1146 d = dict(mb="1", d="1")
1141 d = dict(mb="1", d="1")
1147 return parsedate(date, extendeddateformats, d)[0]
1142 return parsedate(date, extendeddateformats, d)[0]
1148
1143
1149 def upper(date):
1144 def upper(date):
1150 d = dict(mb="12", HI="23", M="59", S="59")
1145 d = dict(mb="12", HI="23", M="59", S="59")
1151 for days in "31 30 29".split():
1146 for days in "31 30 29".split():
1152 try:
1147 try:
1153 d["d"] = days
1148 d["d"] = days
1154 return parsedate(date, extendeddateformats, d)[0]
1149 return parsedate(date, extendeddateformats, d)[0]
1155 except:
1150 except:
1156 pass
1151 pass
1157 d["d"] = "28"
1152 d["d"] = "28"
1158 return parsedate(date, extendeddateformats, d)[0]
1153 return parsedate(date, extendeddateformats, d)[0]
1159
1154
1160 date = date.strip()
1155 date = date.strip()
1161 if date[0] == "<":
1156 if date[0] == "<":
1162 when = upper(date[1:])
1157 when = upper(date[1:])
1163 return lambda x: x <= when
1158 return lambda x: x <= when
1164 elif date[0] == ">":
1159 elif date[0] == ">":
1165 when = lower(date[1:])
1160 when = lower(date[1:])
1166 return lambda x: x >= when
1161 return lambda x: x >= when
1167 elif date[0] == "-":
1162 elif date[0] == "-":
1168 try:
1163 try:
1169 days = int(date[1:])
1164 days = int(date[1:])
1170 except ValueError:
1165 except ValueError:
1171 raise Abort(_("invalid day spec: %s") % date[1:])
1166 raise Abort(_("invalid day spec: %s") % date[1:])
1172 when = makedate()[0] - days * 3600 * 24
1167 when = makedate()[0] - days * 3600 * 24
1173 return lambda x: x >= when
1168 return lambda x: x >= when
1174 elif " to " in date:
1169 elif " to " in date:
1175 a, b = date.split(" to ")
1170 a, b = date.split(" to ")
1176 start, stop = lower(a), upper(b)
1171 start, stop = lower(a), upper(b)
1177 return lambda x: x >= start and x <= stop
1172 return lambda x: x >= start and x <= stop
1178 else:
1173 else:
1179 start, stop = lower(date), upper(date)
1174 start, stop = lower(date), upper(date)
1180 return lambda x: x >= start and x <= stop
1175 return lambda x: x >= start and x <= stop
1181
1176
1182 def shortuser(user):
1177 def shortuser(user):
1183 """Return a short representation of a user name or email address."""
1178 """Return a short representation of a user name or email address."""
1184 f = user.find('@')
1179 f = user.find('@')
1185 if f >= 0:
1180 if f >= 0:
1186 user = user[:f]
1181 user = user[:f]
1187 f = user.find('<')
1182 f = user.find('<')
1188 if f >= 0:
1183 if f >= 0:
1189 user = user[f + 1:]
1184 user = user[f + 1:]
1190 f = user.find(' ')
1185 f = user.find(' ')
1191 if f >= 0:
1186 if f >= 0:
1192 user = user[:f]
1187 user = user[:f]
1193 f = user.find('.')
1188 f = user.find('.')
1194 if f >= 0:
1189 if f >= 0:
1195 user = user[:f]
1190 user = user[:f]
1196 return user
1191 return user
1197
1192
1198 def email(author):
1193 def email(author):
1199 '''get email of author.'''
1194 '''get email of author.'''
1200 r = author.find('>')
1195 r = author.find('>')
1201 if r == -1:
1196 if r == -1:
1202 r = None
1197 r = None
1203 return author[author.find('<') + 1:r]
1198 return author[author.find('<') + 1:r]
1204
1199
1205 def ellipsis(text, maxlength=400):
1200 def ellipsis(text, maxlength=400):
1206 """Trim string to at most maxlength (default: 400) characters."""
1201 """Trim string to at most maxlength (default: 400) characters."""
1207 if len(text) <= maxlength:
1202 if len(text) <= maxlength:
1208 return text
1203 return text
1209 else:
1204 else:
1210 return "%s..." % (text[:maxlength - 3])
1205 return "%s..." % (text[:maxlength - 3])
1211
1206
1212 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
1207 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
1213 '''yield every hg repository under path, recursively.'''
1208 '''yield every hg repository under path, recursively.'''
1214 def errhandler(err):
1209 def errhandler(err):
1215 if err.filename == path:
1210 if err.filename == path:
1216 raise err
1211 raise err
1217 if followsym and hasattr(os.path, 'samestat'):
1212 if followsym and hasattr(os.path, 'samestat'):
1218 def _add_dir_if_not_there(dirlst, dirname):
1213 def _add_dir_if_not_there(dirlst, dirname):
1219 match = False
1214 match = False
1220 samestat = os.path.samestat
1215 samestat = os.path.samestat
1221 dirstat = os.stat(dirname)
1216 dirstat = os.stat(dirname)
1222 for lstdirstat in dirlst:
1217 for lstdirstat in dirlst:
1223 if samestat(dirstat, lstdirstat):
1218 if samestat(dirstat, lstdirstat):
1224 match = True
1219 match = True
1225 break
1220 break
1226 if not match:
1221 if not match:
1227 dirlst.append(dirstat)
1222 dirlst.append(dirstat)
1228 return not match
1223 return not match
1229 else:
1224 else:
1230 followsym = False
1225 followsym = False
1231
1226
1232 if (seen_dirs is None) and followsym:
1227 if (seen_dirs is None) and followsym:
1233 seen_dirs = []
1228 seen_dirs = []
1234 _add_dir_if_not_there(seen_dirs, path)
1229 _add_dir_if_not_there(seen_dirs, path)
1235 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
1230 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
1236 dirs.sort()
1231 dirs.sort()
1237 if '.hg' in dirs:
1232 if '.hg' in dirs:
1238 yield root # found a repository
1233 yield root # found a repository
1239 qroot = os.path.join(root, '.hg', 'patches')
1234 qroot = os.path.join(root, '.hg', 'patches')
1240 if os.path.isdir(os.path.join(qroot, '.hg')):
1235 if os.path.isdir(os.path.join(qroot, '.hg')):
1241 yield qroot # we have a patch queue repo here
1236 yield qroot # we have a patch queue repo here
1242 if recurse:
1237 if recurse:
1243 # avoid recursing inside the .hg directory
1238 # avoid recursing inside the .hg directory
1244 dirs.remove('.hg')
1239 dirs.remove('.hg')
1245 else:
1240 else:
1246 dirs[:] = [] # don't descend further
1241 dirs[:] = [] # don't descend further
1247 elif followsym:
1242 elif followsym:
1248 newdirs = []
1243 newdirs = []
1249 for d in dirs:
1244 for d in dirs:
1250 fname = os.path.join(root, d)
1245 fname = os.path.join(root, d)
1251 if _add_dir_if_not_there(seen_dirs, fname):
1246 if _add_dir_if_not_there(seen_dirs, fname):
1252 if os.path.islink(fname):
1247 if os.path.islink(fname):
1253 for hgname in walkrepos(fname, True, seen_dirs):
1248 for hgname in walkrepos(fname, True, seen_dirs):
1254 yield hgname
1249 yield hgname
1255 else:
1250 else:
1256 newdirs.append(d)
1251 newdirs.append(d)
1257 dirs[:] = newdirs
1252 dirs[:] = newdirs
1258
1253
1259 _rcpath = None
1254 _rcpath = None
1260
1255
1261 def os_rcpath():
1256 def os_rcpath():
1262 '''return default os-specific hgrc search path'''
1257 '''return default os-specific hgrc search path'''
1263 path = system_rcpath()
1258 path = system_rcpath()
1264 path.extend(user_rcpath())
1259 path.extend(user_rcpath())
1265 path = [os.path.normpath(f) for f in path]
1260 path = [os.path.normpath(f) for f in path]
1266 return path
1261 return path
1267
1262
1268 def rcpath():
1263 def rcpath():
1269 '''return hgrc search path. if env var HGRCPATH is set, use it.
1264 '''return hgrc search path. if env var HGRCPATH is set, use it.
1270 for each item in path, if directory, use files ending in .rc,
1265 for each item in path, if directory, use files ending in .rc,
1271 else use item.
1266 else use item.
1272 make HGRCPATH empty to only look in .hg/hgrc of current repo.
1267 make HGRCPATH empty to only look in .hg/hgrc of current repo.
1273 if no HGRCPATH, use default os-specific path.'''
1268 if no HGRCPATH, use default os-specific path.'''
1274 global _rcpath
1269 global _rcpath
1275 if _rcpath is None:
1270 if _rcpath is None:
1276 if 'HGRCPATH' in os.environ:
1271 if 'HGRCPATH' in os.environ:
1277 _rcpath = []
1272 _rcpath = []
1278 for p in os.environ['HGRCPATH'].split(os.pathsep):
1273 for p in os.environ['HGRCPATH'].split(os.pathsep):
1279 if not p:
1274 if not p:
1280 continue
1275 continue
1281 p = expandpath(p)
1276 p = expandpath(p)
1282 if os.path.isdir(p):
1277 if os.path.isdir(p):
1283 for f, kind in osutil.listdir(p):
1278 for f, kind in osutil.listdir(p):
1284 if f.endswith('.rc'):
1279 if f.endswith('.rc'):
1285 _rcpath.append(os.path.join(p, f))
1280 _rcpath.append(os.path.join(p, f))
1286 else:
1281 else:
1287 _rcpath.append(p)
1282 _rcpath.append(p)
1288 else:
1283 else:
1289 _rcpath = os_rcpath()
1284 _rcpath = os_rcpath()
1290 return _rcpath
1285 return _rcpath
1291
1286
1292 def bytecount(nbytes):
1287 def bytecount(nbytes):
1293 '''return byte count formatted as readable string, with units'''
1288 '''return byte count formatted as readable string, with units'''
1294
1289
1295 units = (
1290 units = (
1296 (100, 1 << 30, _('%.0f GB')),
1291 (100, 1 << 30, _('%.0f GB')),
1297 (10, 1 << 30, _('%.1f GB')),
1292 (10, 1 << 30, _('%.1f GB')),
1298 (1, 1 << 30, _('%.2f GB')),
1293 (1, 1 << 30, _('%.2f GB')),
1299 (100, 1 << 20, _('%.0f MB')),
1294 (100, 1 << 20, _('%.0f MB')),
1300 (10, 1 << 20, _('%.1f MB')),
1295 (10, 1 << 20, _('%.1f MB')),
1301 (1, 1 << 20, _('%.2f MB')),
1296 (1, 1 << 20, _('%.2f MB')),
1302 (100, 1 << 10, _('%.0f KB')),
1297 (100, 1 << 10, _('%.0f KB')),
1303 (10, 1 << 10, _('%.1f KB')),
1298 (10, 1 << 10, _('%.1f KB')),
1304 (1, 1 << 10, _('%.2f KB')),
1299 (1, 1 << 10, _('%.2f KB')),
1305 (1, 1, _('%.0f bytes')),
1300 (1, 1, _('%.0f bytes')),
1306 )
1301 )
1307
1302
1308 for multiplier, divisor, format in units:
1303 for multiplier, divisor, format in units:
1309 if nbytes >= divisor * multiplier:
1304 if nbytes >= divisor * multiplier:
1310 return format % (nbytes / float(divisor))
1305 return format % (nbytes / float(divisor))
1311 return units[-1][2] % nbytes
1306 return units[-1][2] % nbytes
1312
1307
1313 def drop_scheme(scheme, path):
1308 def drop_scheme(scheme, path):
1314 sc = scheme + ':'
1309 sc = scheme + ':'
1315 if path.startswith(sc):
1310 if path.startswith(sc):
1316 path = path[len(sc):]
1311 path = path[len(sc):]
1317 if path.startswith('//'):
1312 if path.startswith('//'):
1318 if scheme == 'file':
1313 if scheme == 'file':
1319 i = path.find('/', 2)
1314 i = path.find('/', 2)
1320 if i == -1:
1315 if i == -1:
1321 return ''
1316 return ''
1322 # On Windows, absolute paths are rooted at the current drive
1317 # On Windows, absolute paths are rooted at the current drive
1323 # root. On POSIX they are rooted at the file system root.
1318 # root. On POSIX they are rooted at the file system root.
1324 if os.name == 'nt':
1319 if os.name == 'nt':
1325 droot = os.path.splitdrive(os.getcwd())[0] + '/'
1320 droot = os.path.splitdrive(os.getcwd())[0] + '/'
1326 path = os.path.join(droot, path[i + 1:])
1321 path = os.path.join(droot, path[i + 1:])
1327 else:
1322 else:
1328 path = path[i:]
1323 path = path[i:]
1329 else:
1324 else:
1330 path = path[2:]
1325 path = path[2:]
1331 return path
1326 return path
1332
1327
1333 def uirepr(s):
1328 def uirepr(s):
1334 # Avoid double backslash in Windows path repr()
1329 # Avoid double backslash in Windows path repr()
1335 return repr(s).replace('\\\\', '\\')
1330 return repr(s).replace('\\\\', '\\')
1336
1331
1337 #### naming convention of below implementation follows 'textwrap' module
1332 #### naming convention of below implementation follows 'textwrap' module
1338
1333
1339 class MBTextWrapper(textwrap.TextWrapper):
1334 class MBTextWrapper(textwrap.TextWrapper):
1340 """
1335 """
1341 Extend TextWrapper for double-width characters.
1336 Extend TextWrapper for double-width characters.
1342
1337
1343 Some Asian characters use two terminal columns instead of one.
1338 Some Asian characters use two terminal columns instead of one.
1344 A good example of this behavior can be seen with u'\u65e5\u672c',
1339 A good example of this behavior can be seen with u'\u65e5\u672c',
1345 the two Japanese characters for "Japan":
1340 the two Japanese characters for "Japan":
1346 len() returns 2, but when printed to a terminal, they eat 4 columns.
1341 len() returns 2, but when printed to a terminal, they eat 4 columns.
1347
1342
1348 (Note that this has nothing to do whatsoever with unicode
1343 (Note that this has nothing to do whatsoever with unicode
1349 representation, or encoding of the underlying string)
1344 representation, or encoding of the underlying string)
1350 """
1345 """
1351 def __init__(self, **kwargs):
1346 def __init__(self, **kwargs):
1352 textwrap.TextWrapper.__init__(self, **kwargs)
1347 textwrap.TextWrapper.__init__(self, **kwargs)
1353
1348
1354 def _cutdown(self, str, space_left):
1349 def _cutdown(self, str, space_left):
1355 l = 0
1350 l = 0
1356 ucstr = unicode(str, encoding.encoding)
1351 ucstr = unicode(str, encoding.encoding)
1357 colwidth = unicodedata.east_asian_width
1352 colwidth = unicodedata.east_asian_width
1358 for i in xrange(len(ucstr)):
1353 for i in xrange(len(ucstr)):
1359 l += colwidth(ucstr[i]) in 'WFA' and 2 or 1
1354 l += colwidth(ucstr[i]) in 'WFA' and 2 or 1
1360 if space_left < l:
1355 if space_left < l:
1361 return (ucstr[:i].encode(encoding.encoding),
1356 return (ucstr[:i].encode(encoding.encoding),
1362 ucstr[i:].encode(encoding.encoding))
1357 ucstr[i:].encode(encoding.encoding))
1363 return str, ''
1358 return str, ''
1364
1359
1365 # ----------------------------------------
1360 # ----------------------------------------
1366 # overriding of base class
1361 # overriding of base class
1367
1362
1368 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1363 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1369 space_left = max(width - cur_len, 1)
1364 space_left = max(width - cur_len, 1)
1370
1365
1371 if self.break_long_words:
1366 if self.break_long_words:
1372 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1367 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1373 cur_line.append(cut)
1368 cur_line.append(cut)
1374 reversed_chunks[-1] = res
1369 reversed_chunks[-1] = res
1375 elif not cur_line:
1370 elif not cur_line:
1376 cur_line.append(reversed_chunks.pop())
1371 cur_line.append(reversed_chunks.pop())
1377
1372
1378 #### naming convention of above implementation follows 'textwrap' module
1373 #### naming convention of above implementation follows 'textwrap' module
1379
1374
1380 def wrap(line, width, initindent='', hangindent=''):
1375 def wrap(line, width, initindent='', hangindent=''):
1381 maxindent = max(len(hangindent), len(initindent))
1376 maxindent = max(len(hangindent), len(initindent))
1382 if width <= maxindent:
1377 if width <= maxindent:
1383 # adjust for weird terminal size
1378 # adjust for weird terminal size
1384 width = max(78, maxindent + 1)
1379 width = max(78, maxindent + 1)
1385 wrapper = MBTextWrapper(width=width,
1380 wrapper = MBTextWrapper(width=width,
1386 initial_indent=initindent,
1381 initial_indent=initindent,
1387 subsequent_indent=hangindent)
1382 subsequent_indent=hangindent)
1388 return wrapper.fill(line)
1383 return wrapper.fill(line)
1389
1384
1390 def iterlines(iterator):
1385 def iterlines(iterator):
1391 for chunk in iterator:
1386 for chunk in iterator:
1392 for line in chunk.splitlines():
1387 for line in chunk.splitlines():
1393 yield line
1388 yield line
1394
1389
1395 def expandpath(path):
1390 def expandpath(path):
1396 return os.path.expanduser(os.path.expandvars(path))
1391 return os.path.expanduser(os.path.expandvars(path))
1397
1392
1398 def hgcmd():
1393 def hgcmd():
1399 """Return the command used to execute current hg
1394 """Return the command used to execute current hg
1400
1395
1401 This is different from hgexecutable() because on Windows we want
1396 This is different from hgexecutable() because on Windows we want
1402 to avoid things opening new shell windows like batch files, so we
1397 to avoid things opening new shell windows like batch files, so we
1403 get either the python call or current executable.
1398 get either the python call or current executable.
1404 """
1399 """
1405 if main_is_frozen():
1400 if main_is_frozen():
1406 return [sys.executable]
1401 return [sys.executable]
1407 return gethgcmd()
1402 return gethgcmd()
1408
1403
1409 def rundetached(args, condfn):
1404 def rundetached(args, condfn):
1410 """Execute the argument list in a detached process.
1405 """Execute the argument list in a detached process.
1411
1406
1412 condfn is a callable which is called repeatedly and should return
1407 condfn is a callable which is called repeatedly and should return
1413 True once the child process is known to have started successfully.
1408 True once the child process is known to have started successfully.
1414 At this point, the child process PID is returned. If the child
1409 At this point, the child process PID is returned. If the child
1415 process fails to start or finishes before condfn() evaluates to
1410 process fails to start or finishes before condfn() evaluates to
1416 True, return -1.
1411 True, return -1.
1417 """
1412 """
1418 # Windows case is easier because the child process is either
1413 # Windows case is easier because the child process is either
1419 # successfully starting and validating the condition or exiting
1414 # successfully starting and validating the condition or exiting
1420 # on failure. We just poll on its PID. On Unix, if the child
1415 # on failure. We just poll on its PID. On Unix, if the child
1421 # process fails to start, it will be left in a zombie state until
1416 # process fails to start, it will be left in a zombie state until
1422 # the parent wait on it, which we cannot do since we expect a long
1417 # the parent wait on it, which we cannot do since we expect a long
1423 # running process on success. Instead we listen for SIGCHLD telling
1418 # running process on success. Instead we listen for SIGCHLD telling
1424 # us our child process terminated.
1419 # us our child process terminated.
1425 terminated = set()
1420 terminated = set()
1426 def handler(signum, frame):
1421 def handler(signum, frame):
1427 terminated.add(os.wait())
1422 terminated.add(os.wait())
1428 prevhandler = None
1423 prevhandler = None
1429 if hasattr(signal, 'SIGCHLD'):
1424 if hasattr(signal, 'SIGCHLD'):
1430 prevhandler = signal.signal(signal.SIGCHLD, handler)
1425 prevhandler = signal.signal(signal.SIGCHLD, handler)
1431 try:
1426 try:
1432 pid = spawndetached(args)
1427 pid = spawndetached(args)
1433 while not condfn():
1428 while not condfn():
1434 if ((pid in terminated or not testpid(pid))
1429 if ((pid in terminated or not testpid(pid))
1435 and not condfn()):
1430 and not condfn()):
1436 return -1
1431 return -1
1437 time.sleep(0.1)
1432 time.sleep(0.1)
1438 return pid
1433 return pid
1439 finally:
1434 finally:
1440 if prevhandler is not None:
1435 if prevhandler is not None:
1441 signal.signal(signal.SIGCHLD, prevhandler)
1436 signal.signal(signal.SIGCHLD, prevhandler)
1442
1437
1443 try:
1438 try:
1444 any, all = any, all
1439 any, all = any, all
1445 except NameError:
1440 except NameError:
1446 def any(iterable):
1441 def any(iterable):
1447 for i in iterable:
1442 for i in iterable:
1448 if i:
1443 if i:
1449 return True
1444 return True
1450 return False
1445 return False
1451
1446
1452 def all(iterable):
1447 def all(iterable):
1453 for i in iterable:
1448 for i in iterable:
1454 if not i:
1449 if not i:
1455 return False
1450 return False
1456 return True
1451 return True
1457
1452
1458 def interpolate(prefix, mapping, s, fn=None):
1453 def interpolate(prefix, mapping, s, fn=None):
1459 """Return the result of interpolating items in the mapping into string s.
1454 """Return the result of interpolating items in the mapping into string s.
1460
1455
1461 prefix is a single character string, or a two character string with
1456 prefix is a single character string, or a two character string with
1462 a backslash as the first character if the prefix needs to be escaped in
1457 a backslash as the first character if the prefix needs to be escaped in
1463 a regular expression.
1458 a regular expression.
1464
1459
1465 fn is an optional function that will be applied to the replacement text
1460 fn is an optional function that will be applied to the replacement text
1466 just before replacement.
1461 just before replacement.
1467 """
1462 """
1468 fn = fn or (lambda s: s)
1463 fn = fn or (lambda s: s)
1469 r = re.compile(r'%s(%s)' % (prefix, '|'.join(mapping.keys())))
1464 r = re.compile(r'%s(%s)' % (prefix, '|'.join(mapping.keys())))
1470 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1465 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1471
1466
1472 def getport(port):
1467 def getport(port):
1473 """Return the port for a given network service.
1468 """Return the port for a given network service.
1474
1469
1475 If port is an integer, it's returned as is. If it's a string, it's
1470 If port is an integer, it's returned as is. If it's a string, it's
1476 looked up using socket.getservbyname(). If there's no matching
1471 looked up using socket.getservbyname(). If there's no matching
1477 service, util.Abort is raised.
1472 service, util.Abort is raised.
1478 """
1473 """
1479 try:
1474 try:
1480 return int(port)
1475 return int(port)
1481 except ValueError:
1476 except ValueError:
1482 pass
1477 pass
1483
1478
1484 try:
1479 try:
1485 return socket.getservbyname(port)
1480 return socket.getservbyname(port)
1486 except socket.error:
1481 except socket.error:
1487 raise Abort(_("no port number associated with service '%s'") % port)
1482 raise Abort(_("no port number associated with service '%s'") % port)
1488
1483
1489 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
1484 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
1490 '0': False, 'no': False, 'false': False, 'off': False,
1485 '0': False, 'no': False, 'false': False, 'off': False,
1491 'never': False}
1486 'never': False}
1492
1487
1493 def parsebool(s):
1488 def parsebool(s):
1494 """Parse s into a boolean.
1489 """Parse s into a boolean.
1495
1490
1496 If s is not a valid boolean, returns None.
1491 If s is not a valid boolean, returns None.
1497 """
1492 """
1498 return _booleans.get(s.lower(), None)
1493 return _booleans.get(s.lower(), None)
General Comments 0
You need to be logged in to leave comments. Login now