##// END OF EJS Templates
cleanup: "raise SomeException()" -> "raise SomeException"
Brodie Rao -
r16687:e34106fa default
parent child Browse files
Show More
@@ -1,47 +1,47 b''
1 #!/usr/bin/env python
1 #!/usr/bin/env python
2 #
2 #
3 # Dumps output generated by Mercurial's command server in a formatted style to a
3 # Dumps output generated by Mercurial's command server in a formatted style to a
4 # given file or stderr if '-' is specified. Output is also written in its raw
4 # given file or stderr if '-' is specified. Output is also written in its raw
5 # format to stdout.
5 # format to stdout.
6 #
6 #
7 # $ ./hg serve --cmds pipe | ./contrib/debugcmdserver.py -
7 # $ ./hg serve --cmds pipe | ./contrib/debugcmdserver.py -
8 # o, 52 -> 'capabilities: getencoding runcommand\nencoding: UTF-8'
8 # o, 52 -> 'capabilities: getencoding runcommand\nencoding: UTF-8'
9
9
10 import sys, struct
10 import sys, struct
11
11
12 if len(sys.argv) != 2:
12 if len(sys.argv) != 2:
13 print 'usage: debugcmdserver.py FILE'
13 print 'usage: debugcmdserver.py FILE'
14 sys.exit(1)
14 sys.exit(1)
15
15
16 outputfmt = '>cI'
16 outputfmt = '>cI'
17 outputfmtsize = struct.calcsize(outputfmt)
17 outputfmtsize = struct.calcsize(outputfmt)
18
18
19 if sys.argv[1] == '-':
19 if sys.argv[1] == '-':
20 log = sys.stderr
20 log = sys.stderr
21 else:
21 else:
22 log = open(sys.argv[1], 'a')
22 log = open(sys.argv[1], 'a')
23
23
24 def read(size):
24 def read(size):
25 data = sys.stdin.read(size)
25 data = sys.stdin.read(size)
26 if not data:
26 if not data:
27 raise EOFError()
27 raise EOFError
28 sys.stdout.write(data)
28 sys.stdout.write(data)
29 sys.stdout.flush()
29 sys.stdout.flush()
30 return data
30 return data
31
31
32 try:
32 try:
33 while True:
33 while True:
34 header = read(outputfmtsize)
34 header = read(outputfmtsize)
35 channel, length = struct.unpack(outputfmt, header)
35 channel, length = struct.unpack(outputfmt, header)
36 log.write('%s, %-4d' % (channel, length))
36 log.write('%s, %-4d' % (channel, length))
37 if channel in 'IL':
37 if channel in 'IL':
38 log.write(' -> waiting for input\n')
38 log.write(' -> waiting for input\n')
39 else:
39 else:
40 data = read(length)
40 data = read(length)
41 log.write(' -> %r\n' % data)
41 log.write(' -> %r\n' % data)
42 log.flush()
42 log.flush()
43 except EOFError:
43 except EOFError:
44 pass
44 pass
45 finally:
45 finally:
46 if log != sys.stderr:
46 if log != sys.stderr:
47 log.close()
47 log.close()
@@ -1,445 +1,445 b''
1 # common.py - common code for the convert extension
1 # common.py - common code for the convert extension
2 #
2 #
3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import base64, errno
8 import base64, errno
9 import os
9 import os
10 import cPickle as pickle
10 import cPickle as pickle
11 from mercurial import util
11 from mercurial import util
12 from mercurial.i18n import _
12 from mercurial.i18n import _
13
13
14 propertycache = util.propertycache
14 propertycache = util.propertycache
15
15
16 def encodeargs(args):
16 def encodeargs(args):
17 def encodearg(s):
17 def encodearg(s):
18 lines = base64.encodestring(s)
18 lines = base64.encodestring(s)
19 lines = [l.splitlines()[0] for l in lines]
19 lines = [l.splitlines()[0] for l in lines]
20 return ''.join(lines)
20 return ''.join(lines)
21
21
22 s = pickle.dumps(args)
22 s = pickle.dumps(args)
23 return encodearg(s)
23 return encodearg(s)
24
24
25 def decodeargs(s):
25 def decodeargs(s):
26 s = base64.decodestring(s)
26 s = base64.decodestring(s)
27 return pickle.loads(s)
27 return pickle.loads(s)
28
28
29 class MissingTool(Exception):
29 class MissingTool(Exception):
30 pass
30 pass
31
31
32 def checktool(exe, name=None, abort=True):
32 def checktool(exe, name=None, abort=True):
33 name = name or exe
33 name = name or exe
34 if not util.findexe(exe):
34 if not util.findexe(exe):
35 exc = abort and util.Abort or MissingTool
35 exc = abort and util.Abort or MissingTool
36 raise exc(_('cannot find required "%s" tool') % name)
36 raise exc(_('cannot find required "%s" tool') % name)
37
37
38 class NoRepo(Exception):
38 class NoRepo(Exception):
39 pass
39 pass
40
40
41 SKIPREV = 'SKIP'
41 SKIPREV = 'SKIP'
42
42
43 class commit(object):
43 class commit(object):
44 def __init__(self, author, date, desc, parents, branch=None, rev=None,
44 def __init__(self, author, date, desc, parents, branch=None, rev=None,
45 extra={}, sortkey=None):
45 extra={}, sortkey=None):
46 self.author = author or 'unknown'
46 self.author = author or 'unknown'
47 self.date = date or '0 0'
47 self.date = date or '0 0'
48 self.desc = desc
48 self.desc = desc
49 self.parents = parents
49 self.parents = parents
50 self.branch = branch
50 self.branch = branch
51 self.rev = rev
51 self.rev = rev
52 self.extra = extra
52 self.extra = extra
53 self.sortkey = sortkey
53 self.sortkey = sortkey
54
54
55 class converter_source(object):
55 class converter_source(object):
56 """Conversion source interface"""
56 """Conversion source interface"""
57
57
58 def __init__(self, ui, path=None, rev=None):
58 def __init__(self, ui, path=None, rev=None):
59 """Initialize conversion source (or raise NoRepo("message")
59 """Initialize conversion source (or raise NoRepo("message")
60 exception if path is not a valid repository)"""
60 exception if path is not a valid repository)"""
61 self.ui = ui
61 self.ui = ui
62 self.path = path
62 self.path = path
63 self.rev = rev
63 self.rev = rev
64
64
65 self.encoding = 'utf-8'
65 self.encoding = 'utf-8'
66
66
67 def before(self):
67 def before(self):
68 pass
68 pass
69
69
70 def after(self):
70 def after(self):
71 pass
71 pass
72
72
73 def setrevmap(self, revmap):
73 def setrevmap(self, revmap):
74 """set the map of already-converted revisions"""
74 """set the map of already-converted revisions"""
75 pass
75 pass
76
76
77 def getheads(self):
77 def getheads(self):
78 """Return a list of this repository's heads"""
78 """Return a list of this repository's heads"""
79 raise NotImplementedError()
79 raise NotImplementedError
80
80
81 def getfile(self, name, rev):
81 def getfile(self, name, rev):
82 """Return a pair (data, mode) where data is the file content
82 """Return a pair (data, mode) where data is the file content
83 as a string and mode one of '', 'x' or 'l'. rev is the
83 as a string and mode one of '', 'x' or 'l'. rev is the
84 identifier returned by a previous call to getchanges(). Raise
84 identifier returned by a previous call to getchanges(). Raise
85 IOError to indicate that name was deleted in rev.
85 IOError to indicate that name was deleted in rev.
86 """
86 """
87 raise NotImplementedError()
87 raise NotImplementedError
88
88
89 def getchanges(self, version):
89 def getchanges(self, version):
90 """Returns a tuple of (files, copies).
90 """Returns a tuple of (files, copies).
91
91
92 files is a sorted list of (filename, id) tuples for all files
92 files is a sorted list of (filename, id) tuples for all files
93 changed between version and its first parent returned by
93 changed between version and its first parent returned by
94 getcommit(). id is the source revision id of the file.
94 getcommit(). id is the source revision id of the file.
95
95
96 copies is a dictionary of dest: source
96 copies is a dictionary of dest: source
97 """
97 """
98 raise NotImplementedError()
98 raise NotImplementedError
99
99
100 def getcommit(self, version):
100 def getcommit(self, version):
101 """Return the commit object for version"""
101 """Return the commit object for version"""
102 raise NotImplementedError()
102 raise NotImplementedError
103
103
104 def gettags(self):
104 def gettags(self):
105 """Return the tags as a dictionary of name: revision
105 """Return the tags as a dictionary of name: revision
106
106
107 Tag names must be UTF-8 strings.
107 Tag names must be UTF-8 strings.
108 """
108 """
109 raise NotImplementedError()
109 raise NotImplementedError
110
110
111 def recode(self, s, encoding=None):
111 def recode(self, s, encoding=None):
112 if not encoding:
112 if not encoding:
113 encoding = self.encoding or 'utf-8'
113 encoding = self.encoding or 'utf-8'
114
114
115 if isinstance(s, unicode):
115 if isinstance(s, unicode):
116 return s.encode("utf-8")
116 return s.encode("utf-8")
117 try:
117 try:
118 return s.decode(encoding).encode("utf-8")
118 return s.decode(encoding).encode("utf-8")
119 except:
119 except:
120 try:
120 try:
121 return s.decode("latin-1").encode("utf-8")
121 return s.decode("latin-1").encode("utf-8")
122 except:
122 except:
123 return s.decode(encoding, "replace").encode("utf-8")
123 return s.decode(encoding, "replace").encode("utf-8")
124
124
125 def getchangedfiles(self, rev, i):
125 def getchangedfiles(self, rev, i):
126 """Return the files changed by rev compared to parent[i].
126 """Return the files changed by rev compared to parent[i].
127
127
128 i is an index selecting one of the parents of rev. The return
128 i is an index selecting one of the parents of rev. The return
129 value should be the list of files that are different in rev and
129 value should be the list of files that are different in rev and
130 this parent.
130 this parent.
131
131
132 If rev has no parents, i is None.
132 If rev has no parents, i is None.
133
133
134 This function is only needed to support --filemap
134 This function is only needed to support --filemap
135 """
135 """
136 raise NotImplementedError()
136 raise NotImplementedError
137
137
138 def converted(self, rev, sinkrev):
138 def converted(self, rev, sinkrev):
139 '''Notify the source that a revision has been converted.'''
139 '''Notify the source that a revision has been converted.'''
140 pass
140 pass
141
141
142 def hasnativeorder(self):
142 def hasnativeorder(self):
143 """Return true if this source has a meaningful, native revision
143 """Return true if this source has a meaningful, native revision
144 order. For instance, Mercurial revisions are store sequentially
144 order. For instance, Mercurial revisions are store sequentially
145 while there is no such global ordering with Darcs.
145 while there is no such global ordering with Darcs.
146 """
146 """
147 return False
147 return False
148
148
149 def lookuprev(self, rev):
149 def lookuprev(self, rev):
150 """If rev is a meaningful revision reference in source, return
150 """If rev is a meaningful revision reference in source, return
151 the referenced identifier in the same format used by getcommit().
151 the referenced identifier in the same format used by getcommit().
152 return None otherwise.
152 return None otherwise.
153 """
153 """
154 return None
154 return None
155
155
156 def getbookmarks(self):
156 def getbookmarks(self):
157 """Return the bookmarks as a dictionary of name: revision
157 """Return the bookmarks as a dictionary of name: revision
158
158
159 Bookmark names are to be UTF-8 strings.
159 Bookmark names are to be UTF-8 strings.
160 """
160 """
161 return {}
161 return {}
162
162
163 class converter_sink(object):
163 class converter_sink(object):
164 """Conversion sink (target) interface"""
164 """Conversion sink (target) interface"""
165
165
166 def __init__(self, ui, path):
166 def __init__(self, ui, path):
167 """Initialize conversion sink (or raise NoRepo("message")
167 """Initialize conversion sink (or raise NoRepo("message")
168 exception if path is not a valid repository)
168 exception if path is not a valid repository)
169
169
170 created is a list of paths to remove if a fatal error occurs
170 created is a list of paths to remove if a fatal error occurs
171 later"""
171 later"""
172 self.ui = ui
172 self.ui = ui
173 self.path = path
173 self.path = path
174 self.created = []
174 self.created = []
175
175
176 def getheads(self):
176 def getheads(self):
177 """Return a list of this repository's heads"""
177 """Return a list of this repository's heads"""
178 raise NotImplementedError()
178 raise NotImplementedError
179
179
180 def revmapfile(self):
180 def revmapfile(self):
181 """Path to a file that will contain lines
181 """Path to a file that will contain lines
182 source_rev_id sink_rev_id
182 source_rev_id sink_rev_id
183 mapping equivalent revision identifiers for each system."""
183 mapping equivalent revision identifiers for each system."""
184 raise NotImplementedError()
184 raise NotImplementedError
185
185
186 def authorfile(self):
186 def authorfile(self):
187 """Path to a file that will contain lines
187 """Path to a file that will contain lines
188 srcauthor=dstauthor
188 srcauthor=dstauthor
189 mapping equivalent authors identifiers for each system."""
189 mapping equivalent authors identifiers for each system."""
190 return None
190 return None
191
191
192 def putcommit(self, files, copies, parents, commit, source, revmap):
192 def putcommit(self, files, copies, parents, commit, source, revmap):
193 """Create a revision with all changed files listed in 'files'
193 """Create a revision with all changed files listed in 'files'
194 and having listed parents. 'commit' is a commit object
194 and having listed parents. 'commit' is a commit object
195 containing at a minimum the author, date, and message for this
195 containing at a minimum the author, date, and message for this
196 changeset. 'files' is a list of (path, version) tuples,
196 changeset. 'files' is a list of (path, version) tuples,
197 'copies' is a dictionary mapping destinations to sources,
197 'copies' is a dictionary mapping destinations to sources,
198 'source' is the source repository, and 'revmap' is a mapfile
198 'source' is the source repository, and 'revmap' is a mapfile
199 of source revisions to converted revisions. Only getfile() and
199 of source revisions to converted revisions. Only getfile() and
200 lookuprev() should be called on 'source'.
200 lookuprev() should be called on 'source'.
201
201
202 Note that the sink repository is not told to update itself to
202 Note that the sink repository is not told to update itself to
203 a particular revision (or even what that revision would be)
203 a particular revision (or even what that revision would be)
204 before it receives the file data.
204 before it receives the file data.
205 """
205 """
206 raise NotImplementedError()
206 raise NotImplementedError
207
207
208 def puttags(self, tags):
208 def puttags(self, tags):
209 """Put tags into sink.
209 """Put tags into sink.
210
210
211 tags: {tagname: sink_rev_id, ...} where tagname is an UTF-8 string.
211 tags: {tagname: sink_rev_id, ...} where tagname is an UTF-8 string.
212 Return a pair (tag_revision, tag_parent_revision), or (None, None)
212 Return a pair (tag_revision, tag_parent_revision), or (None, None)
213 if nothing was changed.
213 if nothing was changed.
214 """
214 """
215 raise NotImplementedError()
215 raise NotImplementedError
216
216
217 def setbranch(self, branch, pbranches):
217 def setbranch(self, branch, pbranches):
218 """Set the current branch name. Called before the first putcommit
218 """Set the current branch name. Called before the first putcommit
219 on the branch.
219 on the branch.
220 branch: branch name for subsequent commits
220 branch: branch name for subsequent commits
221 pbranches: (converted parent revision, parent branch) tuples"""
221 pbranches: (converted parent revision, parent branch) tuples"""
222 pass
222 pass
223
223
224 def setfilemapmode(self, active):
224 def setfilemapmode(self, active):
225 """Tell the destination that we're using a filemap
225 """Tell the destination that we're using a filemap
226
226
227 Some converter_sources (svn in particular) can claim that a file
227 Some converter_sources (svn in particular) can claim that a file
228 was changed in a revision, even if there was no change. This method
228 was changed in a revision, even if there was no change. This method
229 tells the destination that we're using a filemap and that it should
229 tells the destination that we're using a filemap and that it should
230 filter empty revisions.
230 filter empty revisions.
231 """
231 """
232 pass
232 pass
233
233
234 def before(self):
234 def before(self):
235 pass
235 pass
236
236
237 def after(self):
237 def after(self):
238 pass
238 pass
239
239
240 def putbookmarks(self, bookmarks):
240 def putbookmarks(self, bookmarks):
241 """Put bookmarks into sink.
241 """Put bookmarks into sink.
242
242
243 bookmarks: {bookmarkname: sink_rev_id, ...}
243 bookmarks: {bookmarkname: sink_rev_id, ...}
244 where bookmarkname is an UTF-8 string.
244 where bookmarkname is an UTF-8 string.
245 """
245 """
246 pass
246 pass
247
247
248 def hascommit(self, rev):
248 def hascommit(self, rev):
249 """Return True if the sink contains rev"""
249 """Return True if the sink contains rev"""
250 raise NotImplementedError()
250 raise NotImplementedError
251
251
252 class commandline(object):
252 class commandline(object):
253 def __init__(self, ui, command):
253 def __init__(self, ui, command):
254 self.ui = ui
254 self.ui = ui
255 self.command = command
255 self.command = command
256
256
257 def prerun(self):
257 def prerun(self):
258 pass
258 pass
259
259
260 def postrun(self):
260 def postrun(self):
261 pass
261 pass
262
262
263 def _cmdline(self, cmd, closestdin, *args, **kwargs):
263 def _cmdline(self, cmd, closestdin, *args, **kwargs):
264 cmdline = [self.command, cmd] + list(args)
264 cmdline = [self.command, cmd] + list(args)
265 for k, v in kwargs.iteritems():
265 for k, v in kwargs.iteritems():
266 if len(k) == 1:
266 if len(k) == 1:
267 cmdline.append('-' + k)
267 cmdline.append('-' + k)
268 else:
268 else:
269 cmdline.append('--' + k.replace('_', '-'))
269 cmdline.append('--' + k.replace('_', '-'))
270 try:
270 try:
271 if len(k) == 1:
271 if len(k) == 1:
272 cmdline.append('' + v)
272 cmdline.append('' + v)
273 else:
273 else:
274 cmdline[-1] += '=' + v
274 cmdline[-1] += '=' + v
275 except TypeError:
275 except TypeError:
276 pass
276 pass
277 cmdline = [util.shellquote(arg) for arg in cmdline]
277 cmdline = [util.shellquote(arg) for arg in cmdline]
278 if not self.ui.debugflag:
278 if not self.ui.debugflag:
279 cmdline += ['2>', util.nulldev]
279 cmdline += ['2>', util.nulldev]
280 if closestdin:
280 if closestdin:
281 cmdline += ['<', util.nulldev]
281 cmdline += ['<', util.nulldev]
282 cmdline = ' '.join(cmdline)
282 cmdline = ' '.join(cmdline)
283 return cmdline
283 return cmdline
284
284
285 def _run(self, cmd, *args, **kwargs):
285 def _run(self, cmd, *args, **kwargs):
286 return self._dorun(util.popen, cmd, True, *args, **kwargs)
286 return self._dorun(util.popen, cmd, True, *args, **kwargs)
287
287
288 def _run2(self, cmd, *args, **kwargs):
288 def _run2(self, cmd, *args, **kwargs):
289 return self._dorun(util.popen2, cmd, False, *args, **kwargs)
289 return self._dorun(util.popen2, cmd, False, *args, **kwargs)
290
290
291 def _dorun(self, openfunc, cmd, closestdin, *args, **kwargs):
291 def _dorun(self, openfunc, cmd, closestdin, *args, **kwargs):
292 cmdline = self._cmdline(cmd, closestdin, *args, **kwargs)
292 cmdline = self._cmdline(cmd, closestdin, *args, **kwargs)
293 self.ui.debug('running: %s\n' % (cmdline,))
293 self.ui.debug('running: %s\n' % (cmdline,))
294 self.prerun()
294 self.prerun()
295 try:
295 try:
296 return openfunc(cmdline)
296 return openfunc(cmdline)
297 finally:
297 finally:
298 self.postrun()
298 self.postrun()
299
299
300 def run(self, cmd, *args, **kwargs):
300 def run(self, cmd, *args, **kwargs):
301 fp = self._run(cmd, *args, **kwargs)
301 fp = self._run(cmd, *args, **kwargs)
302 output = fp.read()
302 output = fp.read()
303 self.ui.debug(output)
303 self.ui.debug(output)
304 return output, fp.close()
304 return output, fp.close()
305
305
306 def runlines(self, cmd, *args, **kwargs):
306 def runlines(self, cmd, *args, **kwargs):
307 fp = self._run(cmd, *args, **kwargs)
307 fp = self._run(cmd, *args, **kwargs)
308 output = fp.readlines()
308 output = fp.readlines()
309 self.ui.debug(''.join(output))
309 self.ui.debug(''.join(output))
310 return output, fp.close()
310 return output, fp.close()
311
311
312 def checkexit(self, status, output=''):
312 def checkexit(self, status, output=''):
313 if status:
313 if status:
314 if output:
314 if output:
315 self.ui.warn(_('%s error:\n') % self.command)
315 self.ui.warn(_('%s error:\n') % self.command)
316 self.ui.warn(output)
316 self.ui.warn(output)
317 msg = util.explainexit(status)[0]
317 msg = util.explainexit(status)[0]
318 raise util.Abort('%s %s' % (self.command, msg))
318 raise util.Abort('%s %s' % (self.command, msg))
319
319
320 def run0(self, cmd, *args, **kwargs):
320 def run0(self, cmd, *args, **kwargs):
321 output, status = self.run(cmd, *args, **kwargs)
321 output, status = self.run(cmd, *args, **kwargs)
322 self.checkexit(status, output)
322 self.checkexit(status, output)
323 return output
323 return output
324
324
325 def runlines0(self, cmd, *args, **kwargs):
325 def runlines0(self, cmd, *args, **kwargs):
326 output, status = self.runlines(cmd, *args, **kwargs)
326 output, status = self.runlines(cmd, *args, **kwargs)
327 self.checkexit(status, ''.join(output))
327 self.checkexit(status, ''.join(output))
328 return output
328 return output
329
329
330 @propertycache
330 @propertycache
331 def argmax(self):
331 def argmax(self):
332 # POSIX requires at least 4096 bytes for ARG_MAX
332 # POSIX requires at least 4096 bytes for ARG_MAX
333 argmax = 4096
333 argmax = 4096
334 try:
334 try:
335 argmax = os.sysconf("SC_ARG_MAX")
335 argmax = os.sysconf("SC_ARG_MAX")
336 except:
336 except:
337 pass
337 pass
338
338
339 # Windows shells impose their own limits on command line length,
339 # Windows shells impose their own limits on command line length,
340 # down to 2047 bytes for cmd.exe under Windows NT/2k and 2500 bytes
340 # down to 2047 bytes for cmd.exe under Windows NT/2k and 2500 bytes
341 # for older 4nt.exe. See http://support.microsoft.com/kb/830473 for
341 # for older 4nt.exe. See http://support.microsoft.com/kb/830473 for
342 # details about cmd.exe limitations.
342 # details about cmd.exe limitations.
343
343
344 # Since ARG_MAX is for command line _and_ environment, lower our limit
344 # Since ARG_MAX is for command line _and_ environment, lower our limit
345 # (and make happy Windows shells while doing this).
345 # (and make happy Windows shells while doing this).
346 return argmax // 2 - 1
346 return argmax // 2 - 1
347
347
348 def limit_arglist(self, arglist, cmd, closestdin, *args, **kwargs):
348 def limit_arglist(self, arglist, cmd, closestdin, *args, **kwargs):
349 cmdlen = len(self._cmdline(cmd, closestdin, *args, **kwargs))
349 cmdlen = len(self._cmdline(cmd, closestdin, *args, **kwargs))
350 limit = self.argmax - cmdlen
350 limit = self.argmax - cmdlen
351 bytes = 0
351 bytes = 0
352 fl = []
352 fl = []
353 for fn in arglist:
353 for fn in arglist:
354 b = len(fn) + 3
354 b = len(fn) + 3
355 if bytes + b < limit or len(fl) == 0:
355 if bytes + b < limit or len(fl) == 0:
356 fl.append(fn)
356 fl.append(fn)
357 bytes += b
357 bytes += b
358 else:
358 else:
359 yield fl
359 yield fl
360 fl = [fn]
360 fl = [fn]
361 bytes = b
361 bytes = b
362 if fl:
362 if fl:
363 yield fl
363 yield fl
364
364
365 def xargs(self, arglist, cmd, *args, **kwargs):
365 def xargs(self, arglist, cmd, *args, **kwargs):
366 for l in self.limit_arglist(arglist, cmd, True, *args, **kwargs):
366 for l in self.limit_arglist(arglist, cmd, True, *args, **kwargs):
367 self.run0(cmd, *(list(args) + l), **kwargs)
367 self.run0(cmd, *(list(args) + l), **kwargs)
368
368
369 class mapfile(dict):
369 class mapfile(dict):
370 def __init__(self, ui, path):
370 def __init__(self, ui, path):
371 super(mapfile, self).__init__()
371 super(mapfile, self).__init__()
372 self.ui = ui
372 self.ui = ui
373 self.path = path
373 self.path = path
374 self.fp = None
374 self.fp = None
375 self.order = []
375 self.order = []
376 self._read()
376 self._read()
377
377
378 def _read(self):
378 def _read(self):
379 if not self.path:
379 if not self.path:
380 return
380 return
381 try:
381 try:
382 fp = open(self.path, 'r')
382 fp = open(self.path, 'r')
383 except IOError, err:
383 except IOError, err:
384 if err.errno != errno.ENOENT:
384 if err.errno != errno.ENOENT:
385 raise
385 raise
386 return
386 return
387 for i, line in enumerate(fp):
387 for i, line in enumerate(fp):
388 line = line.splitlines()[0].rstrip()
388 line = line.splitlines()[0].rstrip()
389 if not line:
389 if not line:
390 # Ignore blank lines
390 # Ignore blank lines
391 continue
391 continue
392 try:
392 try:
393 key, value = line.rsplit(' ', 1)
393 key, value = line.rsplit(' ', 1)
394 except ValueError:
394 except ValueError:
395 raise util.Abort(
395 raise util.Abort(
396 _('syntax error in %s(%d): key/value pair expected')
396 _('syntax error in %s(%d): key/value pair expected')
397 % (self.path, i + 1))
397 % (self.path, i + 1))
398 if key not in self:
398 if key not in self:
399 self.order.append(key)
399 self.order.append(key)
400 super(mapfile, self).__setitem__(key, value)
400 super(mapfile, self).__setitem__(key, value)
401 fp.close()
401 fp.close()
402
402
403 def __setitem__(self, key, value):
403 def __setitem__(self, key, value):
404 if self.fp is None:
404 if self.fp is None:
405 try:
405 try:
406 self.fp = open(self.path, 'a')
406 self.fp = open(self.path, 'a')
407 except IOError, err:
407 except IOError, err:
408 raise util.Abort(_('could not open map file %r: %s') %
408 raise util.Abort(_('could not open map file %r: %s') %
409 (self.path, err.strerror))
409 (self.path, err.strerror))
410 self.fp.write('%s %s\n' % (key, value))
410 self.fp.write('%s %s\n' % (key, value))
411 self.fp.flush()
411 self.fp.flush()
412 super(mapfile, self).__setitem__(key, value)
412 super(mapfile, self).__setitem__(key, value)
413
413
414 def close(self):
414 def close(self):
415 if self.fp:
415 if self.fp:
416 self.fp.close()
416 self.fp.close()
417 self.fp = None
417 self.fp = None
418
418
419 def parsesplicemap(path):
419 def parsesplicemap(path):
420 """Parse a splicemap, return a child/parents dictionary."""
420 """Parse a splicemap, return a child/parents dictionary."""
421 if not path:
421 if not path:
422 return {}
422 return {}
423 m = {}
423 m = {}
424 try:
424 try:
425 fp = open(path, 'r')
425 fp = open(path, 'r')
426 for i, line in enumerate(fp):
426 for i, line in enumerate(fp):
427 line = line.splitlines()[0].rstrip()
427 line = line.splitlines()[0].rstrip()
428 if not line:
428 if not line:
429 # Ignore blank lines
429 # Ignore blank lines
430 continue
430 continue
431 try:
431 try:
432 child, parents = line.split(' ', 1)
432 child, parents = line.split(' ', 1)
433 parents = parents.replace(',', ' ').split()
433 parents = parents.replace(',', ' ').split()
434 except ValueError:
434 except ValueError:
435 raise util.Abort(_('syntax error in %s(%d): child parent1'
435 raise util.Abort(_('syntax error in %s(%d): child parent1'
436 '[,parent2] expected') % (path, i + 1))
436 '[,parent2] expected') % (path, i + 1))
437 pp = []
437 pp = []
438 for p in parents:
438 for p in parents:
439 if p not in pp:
439 if p not in pp:
440 pp.append(p)
440 pp.append(p)
441 m[child] = pp
441 m[child] = pp
442 except IOError, e:
442 except IOError, e:
443 if e.errno != errno.ENOENT:
443 if e.errno != errno.ENOENT:
444 raise
444 raise
445 return m
445 return m
@@ -1,217 +1,217 b''
1 # git.py - git support for the convert extension
1 # git.py - git support for the convert extension
2 #
2 #
3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import os
8 import os
9 from mercurial import util
9 from mercurial import util
10 from mercurial.node import hex, nullid
10 from mercurial.node import hex, nullid
11 from mercurial.i18n import _
11 from mercurial.i18n import _
12
12
13 from common import NoRepo, commit, converter_source, checktool
13 from common import NoRepo, commit, converter_source, checktool
14
14
15 class convert_git(converter_source):
15 class convert_git(converter_source):
16 # Windows does not support GIT_DIR= construct while other systems
16 # Windows does not support GIT_DIR= construct while other systems
17 # cannot remove environment variable. Just assume none have
17 # cannot remove environment variable. Just assume none have
18 # both issues.
18 # both issues.
19 if util.safehasattr(os, 'unsetenv'):
19 if util.safehasattr(os, 'unsetenv'):
20 def gitopen(self, s, noerr=False):
20 def gitopen(self, s, noerr=False):
21 prevgitdir = os.environ.get('GIT_DIR')
21 prevgitdir = os.environ.get('GIT_DIR')
22 os.environ['GIT_DIR'] = self.path
22 os.environ['GIT_DIR'] = self.path
23 try:
23 try:
24 if noerr:
24 if noerr:
25 (stdin, stdout, stderr) = util.popen3(s)
25 (stdin, stdout, stderr) = util.popen3(s)
26 return stdout
26 return stdout
27 else:
27 else:
28 return util.popen(s, 'rb')
28 return util.popen(s, 'rb')
29 finally:
29 finally:
30 if prevgitdir is None:
30 if prevgitdir is None:
31 del os.environ['GIT_DIR']
31 del os.environ['GIT_DIR']
32 else:
32 else:
33 os.environ['GIT_DIR'] = prevgitdir
33 os.environ['GIT_DIR'] = prevgitdir
34 else:
34 else:
35 def gitopen(self, s, noerr=False):
35 def gitopen(self, s, noerr=False):
36 if noerr:
36 if noerr:
37 (sin, so, se) = util.popen3('GIT_DIR=%s %s' % (self.path, s))
37 (sin, so, se) = util.popen3('GIT_DIR=%s %s' % (self.path, s))
38 return so
38 return so
39 else:
39 else:
40 return util.popen('GIT_DIR=%s %s' % (self.path, s), 'rb')
40 return util.popen('GIT_DIR=%s %s' % (self.path, s), 'rb')
41
41
42 def gitread(self, s):
42 def gitread(self, s):
43 fh = self.gitopen(s)
43 fh = self.gitopen(s)
44 data = fh.read()
44 data = fh.read()
45 return data, fh.close()
45 return data, fh.close()
46
46
47 def __init__(self, ui, path, rev=None):
47 def __init__(self, ui, path, rev=None):
48 super(convert_git, self).__init__(ui, path, rev=rev)
48 super(convert_git, self).__init__(ui, path, rev=rev)
49
49
50 if os.path.isdir(path + "/.git"):
50 if os.path.isdir(path + "/.git"):
51 path += "/.git"
51 path += "/.git"
52 if not os.path.exists(path + "/objects"):
52 if not os.path.exists(path + "/objects"):
53 raise NoRepo(_("%s does not look like a Git repository") % path)
53 raise NoRepo(_("%s does not look like a Git repository") % path)
54
54
55 checktool('git', 'git')
55 checktool('git', 'git')
56
56
57 self.path = path
57 self.path = path
58
58
59 def getheads(self):
59 def getheads(self):
60 if not self.rev:
60 if not self.rev:
61 heads, ret = self.gitread('git rev-parse --branches --remotes')
61 heads, ret = self.gitread('git rev-parse --branches --remotes')
62 heads = heads.splitlines()
62 heads = heads.splitlines()
63 else:
63 else:
64 heads, ret = self.gitread("git rev-parse --verify %s" % self.rev)
64 heads, ret = self.gitread("git rev-parse --verify %s" % self.rev)
65 heads = [heads[:-1]]
65 heads = [heads[:-1]]
66 if ret:
66 if ret:
67 raise util.Abort(_('cannot retrieve git heads'))
67 raise util.Abort(_('cannot retrieve git heads'))
68 return heads
68 return heads
69
69
70 def catfile(self, rev, type):
70 def catfile(self, rev, type):
71 if rev == hex(nullid):
71 if rev == hex(nullid):
72 raise IOError()
72 raise IOError
73 data, ret = self.gitread("git cat-file %s %s" % (type, rev))
73 data, ret = self.gitread("git cat-file %s %s" % (type, rev))
74 if ret:
74 if ret:
75 raise util.Abort(_('cannot read %r object at %s') % (type, rev))
75 raise util.Abort(_('cannot read %r object at %s') % (type, rev))
76 return data
76 return data
77
77
78 def getfile(self, name, rev):
78 def getfile(self, name, rev):
79 data = self.catfile(rev, "blob")
79 data = self.catfile(rev, "blob")
80 mode = self.modecache[(name, rev)]
80 mode = self.modecache[(name, rev)]
81 return data, mode
81 return data, mode
82
82
83 def getchanges(self, version):
83 def getchanges(self, version):
84 self.modecache = {}
84 self.modecache = {}
85 fh = self.gitopen("git diff-tree -z --root -m -r %s" % version)
85 fh = self.gitopen("git diff-tree -z --root -m -r %s" % version)
86 changes = []
86 changes = []
87 seen = set()
87 seen = set()
88 entry = None
88 entry = None
89 for l in fh.read().split('\x00'):
89 for l in fh.read().split('\x00'):
90 if not entry:
90 if not entry:
91 if not l.startswith(':'):
91 if not l.startswith(':'):
92 continue
92 continue
93 entry = l
93 entry = l
94 continue
94 continue
95 f = l
95 f = l
96 if f not in seen:
96 if f not in seen:
97 seen.add(f)
97 seen.add(f)
98 entry = entry.split()
98 entry = entry.split()
99 h = entry[3]
99 h = entry[3]
100 if entry[1] == '160000':
100 if entry[1] == '160000':
101 raise util.Abort('git submodules are not supported!')
101 raise util.Abort('git submodules are not supported!')
102 p = (entry[1] == "100755")
102 p = (entry[1] == "100755")
103 s = (entry[1] == "120000")
103 s = (entry[1] == "120000")
104 self.modecache[(f, h)] = (p and "x") or (s and "l") or ""
104 self.modecache[(f, h)] = (p and "x") or (s and "l") or ""
105 changes.append((f, h))
105 changes.append((f, h))
106 entry = None
106 entry = None
107 if fh.close():
107 if fh.close():
108 raise util.Abort(_('cannot read changes in %s') % version)
108 raise util.Abort(_('cannot read changes in %s') % version)
109 return (changes, {})
109 return (changes, {})
110
110
111 def getcommit(self, version):
111 def getcommit(self, version):
112 c = self.catfile(version, "commit") # read the commit hash
112 c = self.catfile(version, "commit") # read the commit hash
113 end = c.find("\n\n")
113 end = c.find("\n\n")
114 message = c[end + 2:]
114 message = c[end + 2:]
115 message = self.recode(message)
115 message = self.recode(message)
116 l = c[:end].splitlines()
116 l = c[:end].splitlines()
117 parents = []
117 parents = []
118 author = committer = None
118 author = committer = None
119 for e in l[1:]:
119 for e in l[1:]:
120 n, v = e.split(" ", 1)
120 n, v = e.split(" ", 1)
121 if n == "author":
121 if n == "author":
122 p = v.split()
122 p = v.split()
123 tm, tz = p[-2:]
123 tm, tz = p[-2:]
124 author = " ".join(p[:-2])
124 author = " ".join(p[:-2])
125 if author[0] == "<": author = author[1:-1]
125 if author[0] == "<": author = author[1:-1]
126 author = self.recode(author)
126 author = self.recode(author)
127 if n == "committer":
127 if n == "committer":
128 p = v.split()
128 p = v.split()
129 tm, tz = p[-2:]
129 tm, tz = p[-2:]
130 committer = " ".join(p[:-2])
130 committer = " ".join(p[:-2])
131 if committer[0] == "<": committer = committer[1:-1]
131 if committer[0] == "<": committer = committer[1:-1]
132 committer = self.recode(committer)
132 committer = self.recode(committer)
133 if n == "parent":
133 if n == "parent":
134 parents.append(v)
134 parents.append(v)
135
135
136 if committer and committer != author:
136 if committer and committer != author:
137 message += "\ncommitter: %s\n" % committer
137 message += "\ncommitter: %s\n" % committer
138 tzs, tzh, tzm = tz[-5:-4] + "1", tz[-4:-2], tz[-2:]
138 tzs, tzh, tzm = tz[-5:-4] + "1", tz[-4:-2], tz[-2:]
139 tz = -int(tzs) * (int(tzh) * 3600 + int(tzm))
139 tz = -int(tzs) * (int(tzh) * 3600 + int(tzm))
140 date = tm + " " + str(tz)
140 date = tm + " " + str(tz)
141
141
142 c = commit(parents=parents, date=date, author=author, desc=message,
142 c = commit(parents=parents, date=date, author=author, desc=message,
143 rev=version)
143 rev=version)
144 return c
144 return c
145
145
146 def gettags(self):
146 def gettags(self):
147 tags = {}
147 tags = {}
148 alltags = {}
148 alltags = {}
149 fh = self.gitopen('git ls-remote --tags "%s"' % self.path)
149 fh = self.gitopen('git ls-remote --tags "%s"' % self.path)
150 prefix = 'refs/tags/'
150 prefix = 'refs/tags/'
151
151
152 # Build complete list of tags, both annotated and bare ones
152 # Build complete list of tags, both annotated and bare ones
153 for line in fh:
153 for line in fh:
154 line = line.strip()
154 line = line.strip()
155 node, tag = line.split(None, 1)
155 node, tag = line.split(None, 1)
156 if not tag.startswith(prefix):
156 if not tag.startswith(prefix):
157 continue
157 continue
158 alltags[tag[len(prefix):]] = node
158 alltags[tag[len(prefix):]] = node
159 if fh.close():
159 if fh.close():
160 raise util.Abort(_('cannot read tags from %s') % self.path)
160 raise util.Abort(_('cannot read tags from %s') % self.path)
161
161
162 # Filter out tag objects for annotated tag refs
162 # Filter out tag objects for annotated tag refs
163 for tag in alltags:
163 for tag in alltags:
164 if tag.endswith('^{}'):
164 if tag.endswith('^{}'):
165 tags[tag[:-3]] = alltags[tag]
165 tags[tag[:-3]] = alltags[tag]
166 else:
166 else:
167 if tag + '^{}' in alltags:
167 if tag + '^{}' in alltags:
168 continue
168 continue
169 else:
169 else:
170 tags[tag] = alltags[tag]
170 tags[tag] = alltags[tag]
171
171
172 return tags
172 return tags
173
173
174 def getchangedfiles(self, version, i):
174 def getchangedfiles(self, version, i):
175 changes = []
175 changes = []
176 if i is None:
176 if i is None:
177 fh = self.gitopen("git diff-tree --root -m -r %s" % version)
177 fh = self.gitopen("git diff-tree --root -m -r %s" % version)
178 for l in fh:
178 for l in fh:
179 if "\t" not in l:
179 if "\t" not in l:
180 continue
180 continue
181 m, f = l[:-1].split("\t")
181 m, f = l[:-1].split("\t")
182 changes.append(f)
182 changes.append(f)
183 else:
183 else:
184 fh = self.gitopen('git diff-tree --name-only --root -r %s '
184 fh = self.gitopen('git diff-tree --name-only --root -r %s '
185 '"%s^%s" --' % (version, version, i + 1))
185 '"%s^%s" --' % (version, version, i + 1))
186 changes = [f.rstrip('\n') for f in fh]
186 changes = [f.rstrip('\n') for f in fh]
187 if fh.close():
187 if fh.close():
188 raise util.Abort(_('cannot read changes in %s') % version)
188 raise util.Abort(_('cannot read changes in %s') % version)
189
189
190 return changes
190 return changes
191
191
192 def getbookmarks(self):
192 def getbookmarks(self):
193 bookmarks = {}
193 bookmarks = {}
194
194
195 # Interesting references in git are prefixed
195 # Interesting references in git are prefixed
196 prefix = 'refs/heads/'
196 prefix = 'refs/heads/'
197 prefixlen = len(prefix)
197 prefixlen = len(prefix)
198
198
199 # factor two commands
199 # factor two commands
200 gitcmd = { 'remote/': 'git ls-remote --heads origin',
200 gitcmd = { 'remote/': 'git ls-remote --heads origin',
201 '': 'git show-ref'}
201 '': 'git show-ref'}
202
202
203 # Origin heads
203 # Origin heads
204 for reftype in gitcmd:
204 for reftype in gitcmd:
205 try:
205 try:
206 fh = self.gitopen(gitcmd[reftype], noerr=True)
206 fh = self.gitopen(gitcmd[reftype], noerr=True)
207 for line in fh:
207 for line in fh:
208 line = line.strip()
208 line = line.strip()
209 rev, name = line.split(None, 1)
209 rev, name = line.split(None, 1)
210 if not name.startswith(prefix):
210 if not name.startswith(prefix):
211 continue
211 continue
212 name = '%s%s' % (reftype, name[prefixlen:])
212 name = '%s%s' % (reftype, name[prefixlen:])
213 bookmarks[name] = rev
213 bookmarks[name] = rev
214 except:
214 except:
215 pass
215 pass
216
216
217 return bookmarks
217 return bookmarks
@@ -1,395 +1,395 b''
1 # hg.py - hg backend for convert extension
1 # hg.py - hg backend for convert extension
2 #
2 #
3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 # Notes for hg->hg conversion:
8 # Notes for hg->hg conversion:
9 #
9 #
10 # * Old versions of Mercurial didn't trim the whitespace from the ends
10 # * Old versions of Mercurial didn't trim the whitespace from the ends
11 # of commit messages, but new versions do. Changesets created by
11 # of commit messages, but new versions do. Changesets created by
12 # those older versions, then converted, may thus have different
12 # those older versions, then converted, may thus have different
13 # hashes for changesets that are otherwise identical.
13 # hashes for changesets that are otherwise identical.
14 #
14 #
15 # * Using "--config convert.hg.saverev=true" will make the source
15 # * Using "--config convert.hg.saverev=true" will make the source
16 # identifier to be stored in the converted revision. This will cause
16 # identifier to be stored in the converted revision. This will cause
17 # the converted revision to have a different identity than the
17 # the converted revision to have a different identity than the
18 # source.
18 # source.
19
19
20
20
21 import os, time, cStringIO
21 import os, time, cStringIO
22 from mercurial.i18n import _
22 from mercurial.i18n import _
23 from mercurial.node import bin, hex, nullid
23 from mercurial.node import bin, hex, nullid
24 from mercurial import hg, util, context, bookmarks, error
24 from mercurial import hg, util, context, bookmarks, error
25
25
26 from common import NoRepo, commit, converter_source, converter_sink
26 from common import NoRepo, commit, converter_source, converter_sink
27
27
28 class mercurial_sink(converter_sink):
28 class mercurial_sink(converter_sink):
29 def __init__(self, ui, path):
29 def __init__(self, ui, path):
30 converter_sink.__init__(self, ui, path)
30 converter_sink.__init__(self, ui, path)
31 self.branchnames = ui.configbool('convert', 'hg.usebranchnames', True)
31 self.branchnames = ui.configbool('convert', 'hg.usebranchnames', True)
32 self.clonebranches = ui.configbool('convert', 'hg.clonebranches', False)
32 self.clonebranches = ui.configbool('convert', 'hg.clonebranches', False)
33 self.tagsbranch = ui.config('convert', 'hg.tagsbranch', 'default')
33 self.tagsbranch = ui.config('convert', 'hg.tagsbranch', 'default')
34 self.lastbranch = None
34 self.lastbranch = None
35 if os.path.isdir(path) and len(os.listdir(path)) > 0:
35 if os.path.isdir(path) and len(os.listdir(path)) > 0:
36 try:
36 try:
37 self.repo = hg.repository(self.ui, path)
37 self.repo = hg.repository(self.ui, path)
38 if not self.repo.local():
38 if not self.repo.local():
39 raise NoRepo(_('%s is not a local Mercurial repository')
39 raise NoRepo(_('%s is not a local Mercurial repository')
40 % path)
40 % path)
41 except error.RepoError, err:
41 except error.RepoError, err:
42 ui.traceback()
42 ui.traceback()
43 raise NoRepo(err.args[0])
43 raise NoRepo(err.args[0])
44 else:
44 else:
45 try:
45 try:
46 ui.status(_('initializing destination %s repository\n') % path)
46 ui.status(_('initializing destination %s repository\n') % path)
47 self.repo = hg.repository(self.ui, path, create=True)
47 self.repo = hg.repository(self.ui, path, create=True)
48 if not self.repo.local():
48 if not self.repo.local():
49 raise NoRepo(_('%s is not a local Mercurial repository')
49 raise NoRepo(_('%s is not a local Mercurial repository')
50 % path)
50 % path)
51 self.created.append(path)
51 self.created.append(path)
52 except error.RepoError:
52 except error.RepoError:
53 ui.traceback()
53 ui.traceback()
54 raise NoRepo(_("could not create hg repository %s as sink")
54 raise NoRepo(_("could not create hg repository %s as sink")
55 % path)
55 % path)
56 self.lock = None
56 self.lock = None
57 self.wlock = None
57 self.wlock = None
58 self.filemapmode = False
58 self.filemapmode = False
59
59
60 def before(self):
60 def before(self):
61 self.ui.debug('run hg sink pre-conversion action\n')
61 self.ui.debug('run hg sink pre-conversion action\n')
62 self.wlock = self.repo.wlock()
62 self.wlock = self.repo.wlock()
63 self.lock = self.repo.lock()
63 self.lock = self.repo.lock()
64
64
65 def after(self):
65 def after(self):
66 self.ui.debug('run hg sink post-conversion action\n')
66 self.ui.debug('run hg sink post-conversion action\n')
67 if self.lock:
67 if self.lock:
68 self.lock.release()
68 self.lock.release()
69 if self.wlock:
69 if self.wlock:
70 self.wlock.release()
70 self.wlock.release()
71
71
72 def revmapfile(self):
72 def revmapfile(self):
73 return self.repo.join("shamap")
73 return self.repo.join("shamap")
74
74
75 def authorfile(self):
75 def authorfile(self):
76 return self.repo.join("authormap")
76 return self.repo.join("authormap")
77
77
78 def getheads(self):
78 def getheads(self):
79 h = self.repo.changelog.heads()
79 h = self.repo.changelog.heads()
80 return [hex(x) for x in h]
80 return [hex(x) for x in h]
81
81
82 def setbranch(self, branch, pbranches):
82 def setbranch(self, branch, pbranches):
83 if not self.clonebranches:
83 if not self.clonebranches:
84 return
84 return
85
85
86 setbranch = (branch != self.lastbranch)
86 setbranch = (branch != self.lastbranch)
87 self.lastbranch = branch
87 self.lastbranch = branch
88 if not branch:
88 if not branch:
89 branch = 'default'
89 branch = 'default'
90 pbranches = [(b[0], b[1] and b[1] or 'default') for b in pbranches]
90 pbranches = [(b[0], b[1] and b[1] or 'default') for b in pbranches]
91 pbranch = pbranches and pbranches[0][1] or 'default'
91 pbranch = pbranches and pbranches[0][1] or 'default'
92
92
93 branchpath = os.path.join(self.path, branch)
93 branchpath = os.path.join(self.path, branch)
94 if setbranch:
94 if setbranch:
95 self.after()
95 self.after()
96 try:
96 try:
97 self.repo = hg.repository(self.ui, branchpath)
97 self.repo = hg.repository(self.ui, branchpath)
98 except:
98 except:
99 self.repo = hg.repository(self.ui, branchpath, create=True)
99 self.repo = hg.repository(self.ui, branchpath, create=True)
100 self.before()
100 self.before()
101
101
102 # pbranches may bring revisions from other branches (merge parents)
102 # pbranches may bring revisions from other branches (merge parents)
103 # Make sure we have them, or pull them.
103 # Make sure we have them, or pull them.
104 missings = {}
104 missings = {}
105 for b in pbranches:
105 for b in pbranches:
106 try:
106 try:
107 self.repo.lookup(b[0])
107 self.repo.lookup(b[0])
108 except:
108 except:
109 missings.setdefault(b[1], []).append(b[0])
109 missings.setdefault(b[1], []).append(b[0])
110
110
111 if missings:
111 if missings:
112 self.after()
112 self.after()
113 for pbranch, heads in missings.iteritems():
113 for pbranch, heads in missings.iteritems():
114 pbranchpath = os.path.join(self.path, pbranch)
114 pbranchpath = os.path.join(self.path, pbranch)
115 prepo = hg.peer(self.ui, {}, pbranchpath)
115 prepo = hg.peer(self.ui, {}, pbranchpath)
116 self.ui.note(_('pulling from %s into %s\n') % (pbranch, branch))
116 self.ui.note(_('pulling from %s into %s\n') % (pbranch, branch))
117 self.repo.pull(prepo, [prepo.lookup(h) for h in heads])
117 self.repo.pull(prepo, [prepo.lookup(h) for h in heads])
118 self.before()
118 self.before()
119
119
120 def _rewritetags(self, source, revmap, data):
120 def _rewritetags(self, source, revmap, data):
121 fp = cStringIO.StringIO()
121 fp = cStringIO.StringIO()
122 for line in data.splitlines():
122 for line in data.splitlines():
123 s = line.split(' ', 1)
123 s = line.split(' ', 1)
124 if len(s) != 2:
124 if len(s) != 2:
125 continue
125 continue
126 revid = revmap.get(source.lookuprev(s[0]))
126 revid = revmap.get(source.lookuprev(s[0]))
127 if not revid:
127 if not revid:
128 continue
128 continue
129 fp.write('%s %s\n' % (revid, s[1]))
129 fp.write('%s %s\n' % (revid, s[1]))
130 return fp.getvalue()
130 return fp.getvalue()
131
131
132 def putcommit(self, files, copies, parents, commit, source, revmap):
132 def putcommit(self, files, copies, parents, commit, source, revmap):
133
133
134 files = dict(files)
134 files = dict(files)
135 def getfilectx(repo, memctx, f):
135 def getfilectx(repo, memctx, f):
136 v = files[f]
136 v = files[f]
137 data, mode = source.getfile(f, v)
137 data, mode = source.getfile(f, v)
138 if f == '.hgtags':
138 if f == '.hgtags':
139 data = self._rewritetags(source, revmap, data)
139 data = self._rewritetags(source, revmap, data)
140 return context.memfilectx(f, data, 'l' in mode, 'x' in mode,
140 return context.memfilectx(f, data, 'l' in mode, 'x' in mode,
141 copies.get(f))
141 copies.get(f))
142
142
143 pl = []
143 pl = []
144 for p in parents:
144 for p in parents:
145 if p not in pl:
145 if p not in pl:
146 pl.append(p)
146 pl.append(p)
147 parents = pl
147 parents = pl
148 nparents = len(parents)
148 nparents = len(parents)
149 if self.filemapmode and nparents == 1:
149 if self.filemapmode and nparents == 1:
150 m1node = self.repo.changelog.read(bin(parents[0]))[0]
150 m1node = self.repo.changelog.read(bin(parents[0]))[0]
151 parent = parents[0]
151 parent = parents[0]
152
152
153 if len(parents) < 2:
153 if len(parents) < 2:
154 parents.append(nullid)
154 parents.append(nullid)
155 if len(parents) < 2:
155 if len(parents) < 2:
156 parents.append(nullid)
156 parents.append(nullid)
157 p2 = parents.pop(0)
157 p2 = parents.pop(0)
158
158
159 text = commit.desc
159 text = commit.desc
160 extra = commit.extra.copy()
160 extra = commit.extra.copy()
161 if self.branchnames and commit.branch:
161 if self.branchnames and commit.branch:
162 extra['branch'] = commit.branch
162 extra['branch'] = commit.branch
163 if commit.rev:
163 if commit.rev:
164 extra['convert_revision'] = commit.rev
164 extra['convert_revision'] = commit.rev
165
165
166 while parents:
166 while parents:
167 p1 = p2
167 p1 = p2
168 p2 = parents.pop(0)
168 p2 = parents.pop(0)
169 ctx = context.memctx(self.repo, (p1, p2), text, files.keys(),
169 ctx = context.memctx(self.repo, (p1, p2), text, files.keys(),
170 getfilectx, commit.author, commit.date, extra)
170 getfilectx, commit.author, commit.date, extra)
171 self.repo.commitctx(ctx)
171 self.repo.commitctx(ctx)
172 text = "(octopus merge fixup)\n"
172 text = "(octopus merge fixup)\n"
173 p2 = hex(self.repo.changelog.tip())
173 p2 = hex(self.repo.changelog.tip())
174
174
175 if self.filemapmode and nparents == 1:
175 if self.filemapmode and nparents == 1:
176 man = self.repo.manifest
176 man = self.repo.manifest
177 mnode = self.repo.changelog.read(bin(p2))[0]
177 mnode = self.repo.changelog.read(bin(p2))[0]
178 closed = 'close' in commit.extra
178 closed = 'close' in commit.extra
179 if not closed and not man.cmp(m1node, man.revision(mnode)):
179 if not closed and not man.cmp(m1node, man.revision(mnode)):
180 self.ui.status(_("filtering out empty revision\n"))
180 self.ui.status(_("filtering out empty revision\n"))
181 self.repo.rollback(force=True)
181 self.repo.rollback(force=True)
182 return parent
182 return parent
183 return p2
183 return p2
184
184
185 def puttags(self, tags):
185 def puttags(self, tags):
186 try:
186 try:
187 parentctx = self.repo[self.tagsbranch]
187 parentctx = self.repo[self.tagsbranch]
188 tagparent = parentctx.node()
188 tagparent = parentctx.node()
189 except error.RepoError:
189 except error.RepoError:
190 parentctx = None
190 parentctx = None
191 tagparent = nullid
191 tagparent = nullid
192
192
193 try:
193 try:
194 oldlines = sorted(parentctx['.hgtags'].data().splitlines(True))
194 oldlines = sorted(parentctx['.hgtags'].data().splitlines(True))
195 except:
195 except:
196 oldlines = []
196 oldlines = []
197
197
198 newlines = sorted([("%s %s\n" % (tags[tag], tag)) for tag in tags])
198 newlines = sorted([("%s %s\n" % (tags[tag], tag)) for tag in tags])
199 if newlines == oldlines:
199 if newlines == oldlines:
200 return None, None
200 return None, None
201 data = "".join(newlines)
201 data = "".join(newlines)
202 def getfilectx(repo, memctx, f):
202 def getfilectx(repo, memctx, f):
203 return context.memfilectx(f, data, False, False, None)
203 return context.memfilectx(f, data, False, False, None)
204
204
205 self.ui.status(_("updating tags\n"))
205 self.ui.status(_("updating tags\n"))
206 date = "%s 0" % int(time.mktime(time.gmtime()))
206 date = "%s 0" % int(time.mktime(time.gmtime()))
207 extra = {'branch': self.tagsbranch}
207 extra = {'branch': self.tagsbranch}
208 ctx = context.memctx(self.repo, (tagparent, None), "update tags",
208 ctx = context.memctx(self.repo, (tagparent, None), "update tags",
209 [".hgtags"], getfilectx, "convert-repo", date,
209 [".hgtags"], getfilectx, "convert-repo", date,
210 extra)
210 extra)
211 self.repo.commitctx(ctx)
211 self.repo.commitctx(ctx)
212 return hex(self.repo.changelog.tip()), hex(tagparent)
212 return hex(self.repo.changelog.tip()), hex(tagparent)
213
213
214 def setfilemapmode(self, active):
214 def setfilemapmode(self, active):
215 self.filemapmode = active
215 self.filemapmode = active
216
216
217 def putbookmarks(self, updatedbookmark):
217 def putbookmarks(self, updatedbookmark):
218 if not len(updatedbookmark):
218 if not len(updatedbookmark):
219 return
219 return
220
220
221 self.ui.status(_("updating bookmarks\n"))
221 self.ui.status(_("updating bookmarks\n"))
222 for bookmark in updatedbookmark:
222 for bookmark in updatedbookmark:
223 self.repo._bookmarks[bookmark] = bin(updatedbookmark[bookmark])
223 self.repo._bookmarks[bookmark] = bin(updatedbookmark[bookmark])
224 bookmarks.write(self.repo)
224 bookmarks.write(self.repo)
225
225
226 def hascommit(self, rev):
226 def hascommit(self, rev):
227 if rev not in self.repo and self.clonebranches:
227 if rev not in self.repo and self.clonebranches:
228 raise util.Abort(_('revision %s not found in destination '
228 raise util.Abort(_('revision %s not found in destination '
229 'repository (lookups with clonebranches=true '
229 'repository (lookups with clonebranches=true '
230 'are not implemented)') % rev)
230 'are not implemented)') % rev)
231 return rev in self.repo
231 return rev in self.repo
232
232
233 class mercurial_source(converter_source):
233 class mercurial_source(converter_source):
234 def __init__(self, ui, path, rev=None):
234 def __init__(self, ui, path, rev=None):
235 converter_source.__init__(self, ui, path, rev)
235 converter_source.__init__(self, ui, path, rev)
236 self.ignoreerrors = ui.configbool('convert', 'hg.ignoreerrors', False)
236 self.ignoreerrors = ui.configbool('convert', 'hg.ignoreerrors', False)
237 self.ignored = set()
237 self.ignored = set()
238 self.saverev = ui.configbool('convert', 'hg.saverev', False)
238 self.saverev = ui.configbool('convert', 'hg.saverev', False)
239 try:
239 try:
240 self.repo = hg.repository(self.ui, path)
240 self.repo = hg.repository(self.ui, path)
241 # try to provoke an exception if this isn't really a hg
241 # try to provoke an exception if this isn't really a hg
242 # repo, but some other bogus compatible-looking url
242 # repo, but some other bogus compatible-looking url
243 if not self.repo.local():
243 if not self.repo.local():
244 raise error.RepoError()
244 raise error.RepoError
245 except error.RepoError:
245 except error.RepoError:
246 ui.traceback()
246 ui.traceback()
247 raise NoRepo(_("%s is not a local Mercurial repository") % path)
247 raise NoRepo(_("%s is not a local Mercurial repository") % path)
248 self.lastrev = None
248 self.lastrev = None
249 self.lastctx = None
249 self.lastctx = None
250 self._changescache = None
250 self._changescache = None
251 self.convertfp = None
251 self.convertfp = None
252 # Restrict converted revisions to startrev descendants
252 # Restrict converted revisions to startrev descendants
253 startnode = ui.config('convert', 'hg.startrev')
253 startnode = ui.config('convert', 'hg.startrev')
254 if startnode is not None:
254 if startnode is not None:
255 try:
255 try:
256 startnode = self.repo.lookup(startnode)
256 startnode = self.repo.lookup(startnode)
257 except error.RepoError:
257 except error.RepoError:
258 raise util.Abort(_('%s is not a valid start revision')
258 raise util.Abort(_('%s is not a valid start revision')
259 % startnode)
259 % startnode)
260 startrev = self.repo.changelog.rev(startnode)
260 startrev = self.repo.changelog.rev(startnode)
261 children = {startnode: 1}
261 children = {startnode: 1}
262 for rev in self.repo.changelog.descendants(startrev):
262 for rev in self.repo.changelog.descendants(startrev):
263 children[self.repo.changelog.node(rev)] = 1
263 children[self.repo.changelog.node(rev)] = 1
264 self.keep = children.__contains__
264 self.keep = children.__contains__
265 else:
265 else:
266 self.keep = util.always
266 self.keep = util.always
267
267
268 def changectx(self, rev):
268 def changectx(self, rev):
269 if self.lastrev != rev:
269 if self.lastrev != rev:
270 self.lastctx = self.repo[rev]
270 self.lastctx = self.repo[rev]
271 self.lastrev = rev
271 self.lastrev = rev
272 return self.lastctx
272 return self.lastctx
273
273
274 def parents(self, ctx):
274 def parents(self, ctx):
275 return [p for p in ctx.parents() if p and self.keep(p.node())]
275 return [p for p in ctx.parents() if p and self.keep(p.node())]
276
276
277 def getheads(self):
277 def getheads(self):
278 if self.rev:
278 if self.rev:
279 heads = [self.repo[self.rev].node()]
279 heads = [self.repo[self.rev].node()]
280 else:
280 else:
281 heads = self.repo.heads()
281 heads = self.repo.heads()
282 return [hex(h) for h in heads if self.keep(h)]
282 return [hex(h) for h in heads if self.keep(h)]
283
283
284 def getfile(self, name, rev):
284 def getfile(self, name, rev):
285 try:
285 try:
286 fctx = self.changectx(rev)[name]
286 fctx = self.changectx(rev)[name]
287 return fctx.data(), fctx.flags()
287 return fctx.data(), fctx.flags()
288 except error.LookupError, err:
288 except error.LookupError, err:
289 raise IOError(err)
289 raise IOError(err)
290
290
291 def getchanges(self, rev):
291 def getchanges(self, rev):
292 ctx = self.changectx(rev)
292 ctx = self.changectx(rev)
293 parents = self.parents(ctx)
293 parents = self.parents(ctx)
294 if not parents:
294 if not parents:
295 files = sorted(ctx.manifest())
295 files = sorted(ctx.manifest())
296 # getcopies() is not needed for roots, but it is a simple way to
296 # getcopies() is not needed for roots, but it is a simple way to
297 # detect missing revlogs and abort on errors or populate
297 # detect missing revlogs and abort on errors or populate
298 # self.ignored
298 # self.ignored
299 self.getcopies(ctx, parents, files)
299 self.getcopies(ctx, parents, files)
300 return [(f, rev) for f in files if f not in self.ignored], {}
300 return [(f, rev) for f in files if f not in self.ignored], {}
301 if self._changescache and self._changescache[0] == rev:
301 if self._changescache and self._changescache[0] == rev:
302 m, a, r = self._changescache[1]
302 m, a, r = self._changescache[1]
303 else:
303 else:
304 m, a, r = self.repo.status(parents[0].node(), ctx.node())[:3]
304 m, a, r = self.repo.status(parents[0].node(), ctx.node())[:3]
305 # getcopies() detects missing revlogs early, run it before
305 # getcopies() detects missing revlogs early, run it before
306 # filtering the changes.
306 # filtering the changes.
307 copies = self.getcopies(ctx, parents, m + a)
307 copies = self.getcopies(ctx, parents, m + a)
308 changes = [(name, rev) for name in m + a + r
308 changes = [(name, rev) for name in m + a + r
309 if name not in self.ignored]
309 if name not in self.ignored]
310 return sorted(changes), copies
310 return sorted(changes), copies
311
311
312 def getcopies(self, ctx, parents, files):
312 def getcopies(self, ctx, parents, files):
313 copies = {}
313 copies = {}
314 for name in files:
314 for name in files:
315 if name in self.ignored:
315 if name in self.ignored:
316 continue
316 continue
317 try:
317 try:
318 copysource, copynode = ctx.filectx(name).renamed()
318 copysource, copynode = ctx.filectx(name).renamed()
319 if copysource in self.ignored or not self.keep(copynode):
319 if copysource in self.ignored or not self.keep(copynode):
320 continue
320 continue
321 # Ignore copy sources not in parent revisions
321 # Ignore copy sources not in parent revisions
322 found = False
322 found = False
323 for p in parents:
323 for p in parents:
324 if copysource in p:
324 if copysource in p:
325 found = True
325 found = True
326 break
326 break
327 if not found:
327 if not found:
328 continue
328 continue
329 copies[name] = copysource
329 copies[name] = copysource
330 except TypeError:
330 except TypeError:
331 pass
331 pass
332 except error.LookupError, e:
332 except error.LookupError, e:
333 if not self.ignoreerrors:
333 if not self.ignoreerrors:
334 raise
334 raise
335 self.ignored.add(name)
335 self.ignored.add(name)
336 self.ui.warn(_('ignoring: %s\n') % e)
336 self.ui.warn(_('ignoring: %s\n') % e)
337 return copies
337 return copies
338
338
339 def getcommit(self, rev):
339 def getcommit(self, rev):
340 ctx = self.changectx(rev)
340 ctx = self.changectx(rev)
341 parents = [p.hex() for p in self.parents(ctx)]
341 parents = [p.hex() for p in self.parents(ctx)]
342 if self.saverev:
342 if self.saverev:
343 crev = rev
343 crev = rev
344 else:
344 else:
345 crev = None
345 crev = None
346 return commit(author=ctx.user(),
346 return commit(author=ctx.user(),
347 date=util.datestr(ctx.date(), '%Y-%m-%d %H:%M:%S %1%2'),
347 date=util.datestr(ctx.date(), '%Y-%m-%d %H:%M:%S %1%2'),
348 desc=ctx.description(), rev=crev, parents=parents,
348 desc=ctx.description(), rev=crev, parents=parents,
349 branch=ctx.branch(), extra=ctx.extra(),
349 branch=ctx.branch(), extra=ctx.extra(),
350 sortkey=ctx.rev())
350 sortkey=ctx.rev())
351
351
352 def gettags(self):
352 def gettags(self):
353 tags = [t for t in self.repo.tagslist() if t[0] != 'tip']
353 tags = [t for t in self.repo.tagslist() if t[0] != 'tip']
354 return dict([(name, hex(node)) for name, node in tags
354 return dict([(name, hex(node)) for name, node in tags
355 if self.keep(node)])
355 if self.keep(node)])
356
356
357 def getchangedfiles(self, rev, i):
357 def getchangedfiles(self, rev, i):
358 ctx = self.changectx(rev)
358 ctx = self.changectx(rev)
359 parents = self.parents(ctx)
359 parents = self.parents(ctx)
360 if not parents and i is None:
360 if not parents and i is None:
361 i = 0
361 i = 0
362 changes = [], ctx.manifest().keys(), []
362 changes = [], ctx.manifest().keys(), []
363 else:
363 else:
364 i = i or 0
364 i = i or 0
365 changes = self.repo.status(parents[i].node(), ctx.node())[:3]
365 changes = self.repo.status(parents[i].node(), ctx.node())[:3]
366 changes = [[f for f in l if f not in self.ignored] for l in changes]
366 changes = [[f for f in l if f not in self.ignored] for l in changes]
367
367
368 if i == 0:
368 if i == 0:
369 self._changescache = (rev, changes)
369 self._changescache = (rev, changes)
370
370
371 return changes[0] + changes[1] + changes[2]
371 return changes[0] + changes[1] + changes[2]
372
372
373 def converted(self, rev, destrev):
373 def converted(self, rev, destrev):
374 if self.convertfp is None:
374 if self.convertfp is None:
375 self.convertfp = open(self.repo.join('shamap'), 'a')
375 self.convertfp = open(self.repo.join('shamap'), 'a')
376 self.convertfp.write('%s %s\n' % (destrev, rev))
376 self.convertfp.write('%s %s\n' % (destrev, rev))
377 self.convertfp.flush()
377 self.convertfp.flush()
378
378
379 def before(self):
379 def before(self):
380 self.ui.debug('run hg source pre-conversion action\n')
380 self.ui.debug('run hg source pre-conversion action\n')
381
381
382 def after(self):
382 def after(self):
383 self.ui.debug('run hg source post-conversion action\n')
383 self.ui.debug('run hg source post-conversion action\n')
384
384
385 def hasnativeorder(self):
385 def hasnativeorder(self):
386 return True
386 return True
387
387
388 def lookuprev(self, rev):
388 def lookuprev(self, rev):
389 try:
389 try:
390 return hex(self.repo.lookup(rev))
390 return hex(self.repo.lookup(rev))
391 except error.RepoError:
391 except error.RepoError:
392 return None
392 return None
393
393
394 def getbookmarks(self):
394 def getbookmarks(self):
395 return bookmarks.listbookmarks(self.repo)
395 return bookmarks.listbookmarks(self.repo)
@@ -1,360 +1,360 b''
1 # monotone.py - monotone support for the convert extension
1 # monotone.py - monotone support for the convert extension
2 #
2 #
3 # Copyright 2008, 2009 Mikkel Fahnoe Jorgensen <mikkel@dvide.com> and
3 # Copyright 2008, 2009 Mikkel Fahnoe Jorgensen <mikkel@dvide.com> and
4 # others
4 # others
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 import os, re
9 import os, re
10 from mercurial import util
10 from mercurial import util
11 from common import NoRepo, commit, converter_source, checktool
11 from common import NoRepo, commit, converter_source, checktool
12 from common import commandline
12 from common import commandline
13 from mercurial.i18n import _
13 from mercurial.i18n import _
14
14
15 class monotone_source(converter_source, commandline):
15 class monotone_source(converter_source, commandline):
16 def __init__(self, ui, path=None, rev=None):
16 def __init__(self, ui, path=None, rev=None):
17 converter_source.__init__(self, ui, path, rev)
17 converter_source.__init__(self, ui, path, rev)
18 commandline.__init__(self, ui, 'mtn')
18 commandline.__init__(self, ui, 'mtn')
19
19
20 self.ui = ui
20 self.ui = ui
21 self.path = path
21 self.path = path
22 self.automatestdio = False
22 self.automatestdio = False
23 self.rev = rev
23 self.rev = rev
24
24
25 norepo = NoRepo(_("%s does not look like a monotone repository")
25 norepo = NoRepo(_("%s does not look like a monotone repository")
26 % path)
26 % path)
27 if not os.path.exists(os.path.join(path, '_MTN')):
27 if not os.path.exists(os.path.join(path, '_MTN')):
28 # Could be a monotone repository (SQLite db file)
28 # Could be a monotone repository (SQLite db file)
29 try:
29 try:
30 f = file(path, 'rb')
30 f = file(path, 'rb')
31 header = f.read(16)
31 header = f.read(16)
32 f.close()
32 f.close()
33 except:
33 except:
34 header = ''
34 header = ''
35 if header != 'SQLite format 3\x00':
35 if header != 'SQLite format 3\x00':
36 raise norepo
36 raise norepo
37
37
38 # regular expressions for parsing monotone output
38 # regular expressions for parsing monotone output
39 space = r'\s*'
39 space = r'\s*'
40 name = r'\s+"((?:\\"|[^"])*)"\s*'
40 name = r'\s+"((?:\\"|[^"])*)"\s*'
41 value = name
41 value = name
42 revision = r'\s+\[(\w+)\]\s*'
42 revision = r'\s+\[(\w+)\]\s*'
43 lines = r'(?:.|\n)+'
43 lines = r'(?:.|\n)+'
44
44
45 self.dir_re = re.compile(space + "dir" + name)
45 self.dir_re = re.compile(space + "dir" + name)
46 self.file_re = re.compile(space + "file" + name +
46 self.file_re = re.compile(space + "file" + name +
47 "content" + revision)
47 "content" + revision)
48 self.add_file_re = re.compile(space + "add_file" + name +
48 self.add_file_re = re.compile(space + "add_file" + name +
49 "content" + revision)
49 "content" + revision)
50 self.patch_re = re.compile(space + "patch" + name +
50 self.patch_re = re.compile(space + "patch" + name +
51 "from" + revision + "to" + revision)
51 "from" + revision + "to" + revision)
52 self.rename_re = re.compile(space + "rename" + name + "to" + name)
52 self.rename_re = re.compile(space + "rename" + name + "to" + name)
53 self.delete_re = re.compile(space + "delete" + name)
53 self.delete_re = re.compile(space + "delete" + name)
54 self.tag_re = re.compile(space + "tag" + name + "revision" +
54 self.tag_re = re.compile(space + "tag" + name + "revision" +
55 revision)
55 revision)
56 self.cert_re = re.compile(lines + space + "name" + name +
56 self.cert_re = re.compile(lines + space + "name" + name +
57 "value" + value)
57 "value" + value)
58
58
59 attr = space + "file" + lines + space + "attr" + space
59 attr = space + "file" + lines + space + "attr" + space
60 self.attr_execute_re = re.compile(attr + '"mtn:execute"' +
60 self.attr_execute_re = re.compile(attr + '"mtn:execute"' +
61 space + '"true"')
61 space + '"true"')
62
62
63 # cached data
63 # cached data
64 self.manifest_rev = None
64 self.manifest_rev = None
65 self.manifest = None
65 self.manifest = None
66 self.files = None
66 self.files = None
67 self.dirs = None
67 self.dirs = None
68
68
69 checktool('mtn', abort=False)
69 checktool('mtn', abort=False)
70
70
71 def mtnrun(self, *args, **kwargs):
71 def mtnrun(self, *args, **kwargs):
72 if self.automatestdio:
72 if self.automatestdio:
73 return self.mtnrunstdio(*args, **kwargs)
73 return self.mtnrunstdio(*args, **kwargs)
74 else:
74 else:
75 return self.mtnrunsingle(*args, **kwargs)
75 return self.mtnrunsingle(*args, **kwargs)
76
76
77 def mtnrunsingle(self, *args, **kwargs):
77 def mtnrunsingle(self, *args, **kwargs):
78 kwargs['d'] = self.path
78 kwargs['d'] = self.path
79 return self.run0('automate', *args, **kwargs)
79 return self.run0('automate', *args, **kwargs)
80
80
81 def mtnrunstdio(self, *args, **kwargs):
81 def mtnrunstdio(self, *args, **kwargs):
82 # Prepare the command in automate stdio format
82 # Prepare the command in automate stdio format
83 command = []
83 command = []
84 for k, v in kwargs.iteritems():
84 for k, v in kwargs.iteritems():
85 command.append("%s:%s" % (len(k), k))
85 command.append("%s:%s" % (len(k), k))
86 if v:
86 if v:
87 command.append("%s:%s" % (len(v), v))
87 command.append("%s:%s" % (len(v), v))
88 if command:
88 if command:
89 command.insert(0, 'o')
89 command.insert(0, 'o')
90 command.append('e')
90 command.append('e')
91
91
92 command.append('l')
92 command.append('l')
93 for arg in args:
93 for arg in args:
94 command += "%s:%s" % (len(arg), arg)
94 command += "%s:%s" % (len(arg), arg)
95 command.append('e')
95 command.append('e')
96 command = ''.join(command)
96 command = ''.join(command)
97
97
98 self.ui.debug("mtn: sending '%s'\n" % command)
98 self.ui.debug("mtn: sending '%s'\n" % command)
99 self.mtnwritefp.write(command)
99 self.mtnwritefp.write(command)
100 self.mtnwritefp.flush()
100 self.mtnwritefp.flush()
101
101
102 return self.mtnstdioreadcommandoutput(command)
102 return self.mtnstdioreadcommandoutput(command)
103
103
104 def mtnstdioreadpacket(self):
104 def mtnstdioreadpacket(self):
105 read = None
105 read = None
106 commandnbr = ''
106 commandnbr = ''
107 while read != ':':
107 while read != ':':
108 read = self.mtnreadfp.read(1)
108 read = self.mtnreadfp.read(1)
109 if not read:
109 if not read:
110 raise util.Abort(_('bad mtn packet - no end of commandnbr'))
110 raise util.Abort(_('bad mtn packet - no end of commandnbr'))
111 commandnbr += read
111 commandnbr += read
112 commandnbr = commandnbr[:-1]
112 commandnbr = commandnbr[:-1]
113
113
114 stream = self.mtnreadfp.read(1)
114 stream = self.mtnreadfp.read(1)
115 if stream not in 'mewptl':
115 if stream not in 'mewptl':
116 raise util.Abort(_('bad mtn packet - bad stream type %s') % stream)
116 raise util.Abort(_('bad mtn packet - bad stream type %s') % stream)
117
117
118 read = self.mtnreadfp.read(1)
118 read = self.mtnreadfp.read(1)
119 if read != ':':
119 if read != ':':
120 raise util.Abort(_('bad mtn packet - no divider before size'))
120 raise util.Abort(_('bad mtn packet - no divider before size'))
121
121
122 read = None
122 read = None
123 lengthstr = ''
123 lengthstr = ''
124 while read != ':':
124 while read != ':':
125 read = self.mtnreadfp.read(1)
125 read = self.mtnreadfp.read(1)
126 if not read:
126 if not read:
127 raise util.Abort(_('bad mtn packet - no end of packet size'))
127 raise util.Abort(_('bad mtn packet - no end of packet size'))
128 lengthstr += read
128 lengthstr += read
129 try:
129 try:
130 length = long(lengthstr[:-1])
130 length = long(lengthstr[:-1])
131 except TypeError:
131 except TypeError:
132 raise util.Abort(_('bad mtn packet - bad packet size %s')
132 raise util.Abort(_('bad mtn packet - bad packet size %s')
133 % lengthstr)
133 % lengthstr)
134
134
135 read = self.mtnreadfp.read(length)
135 read = self.mtnreadfp.read(length)
136 if len(read) != length:
136 if len(read) != length:
137 raise util.Abort(_("bad mtn packet - unable to read full packet "
137 raise util.Abort(_("bad mtn packet - unable to read full packet "
138 "read %s of %s") % (len(read), length))
138 "read %s of %s") % (len(read), length))
139
139
140 return (commandnbr, stream, length, read)
140 return (commandnbr, stream, length, read)
141
141
142 def mtnstdioreadcommandoutput(self, command):
142 def mtnstdioreadcommandoutput(self, command):
143 retval = []
143 retval = []
144 while True:
144 while True:
145 commandnbr, stream, length, output = self.mtnstdioreadpacket()
145 commandnbr, stream, length, output = self.mtnstdioreadpacket()
146 self.ui.debug('mtn: read packet %s:%s:%s\n' %
146 self.ui.debug('mtn: read packet %s:%s:%s\n' %
147 (commandnbr, stream, length))
147 (commandnbr, stream, length))
148
148
149 if stream == 'l':
149 if stream == 'l':
150 # End of command
150 # End of command
151 if output != '0':
151 if output != '0':
152 raise util.Abort(_("mtn command '%s' returned %s") %
152 raise util.Abort(_("mtn command '%s' returned %s") %
153 (command, output))
153 (command, output))
154 break
154 break
155 elif stream in 'ew':
155 elif stream in 'ew':
156 # Error, warning output
156 # Error, warning output
157 self.ui.warn(_('%s error:\n') % self.command)
157 self.ui.warn(_('%s error:\n') % self.command)
158 self.ui.warn(output)
158 self.ui.warn(output)
159 elif stream == 'p':
159 elif stream == 'p':
160 # Progress messages
160 # Progress messages
161 self.ui.debug('mtn: ' + output)
161 self.ui.debug('mtn: ' + output)
162 elif stream == 'm':
162 elif stream == 'm':
163 # Main stream - command output
163 # Main stream - command output
164 retval.append(output)
164 retval.append(output)
165
165
166 return ''.join(retval)
166 return ''.join(retval)
167
167
168 def mtnloadmanifest(self, rev):
168 def mtnloadmanifest(self, rev):
169 if self.manifest_rev == rev:
169 if self.manifest_rev == rev:
170 return
170 return
171 self.manifest = self.mtnrun("get_manifest_of", rev).split("\n\n")
171 self.manifest = self.mtnrun("get_manifest_of", rev).split("\n\n")
172 self.manifest_rev = rev
172 self.manifest_rev = rev
173 self.files = {}
173 self.files = {}
174 self.dirs = {}
174 self.dirs = {}
175
175
176 for e in self.manifest:
176 for e in self.manifest:
177 m = self.file_re.match(e)
177 m = self.file_re.match(e)
178 if m:
178 if m:
179 attr = ""
179 attr = ""
180 name = m.group(1)
180 name = m.group(1)
181 node = m.group(2)
181 node = m.group(2)
182 if self.attr_execute_re.match(e):
182 if self.attr_execute_re.match(e):
183 attr += "x"
183 attr += "x"
184 self.files[name] = (node, attr)
184 self.files[name] = (node, attr)
185 m = self.dir_re.match(e)
185 m = self.dir_re.match(e)
186 if m:
186 if m:
187 self.dirs[m.group(1)] = True
187 self.dirs[m.group(1)] = True
188
188
189 def mtnisfile(self, name, rev):
189 def mtnisfile(self, name, rev):
190 # a non-file could be a directory or a deleted or renamed file
190 # a non-file could be a directory or a deleted or renamed file
191 self.mtnloadmanifest(rev)
191 self.mtnloadmanifest(rev)
192 return name in self.files
192 return name in self.files
193
193
194 def mtnisdir(self, name, rev):
194 def mtnisdir(self, name, rev):
195 self.mtnloadmanifest(rev)
195 self.mtnloadmanifest(rev)
196 return name in self.dirs
196 return name in self.dirs
197
197
198 def mtngetcerts(self, rev):
198 def mtngetcerts(self, rev):
199 certs = {"author":"<missing>", "date":"<missing>",
199 certs = {"author":"<missing>", "date":"<missing>",
200 "changelog":"<missing>", "branch":"<missing>"}
200 "changelog":"<missing>", "branch":"<missing>"}
201 certlist = self.mtnrun("certs", rev)
201 certlist = self.mtnrun("certs", rev)
202 # mtn < 0.45:
202 # mtn < 0.45:
203 # key "test@selenic.com"
203 # key "test@selenic.com"
204 # mtn >= 0.45:
204 # mtn >= 0.45:
205 # key [ff58a7ffb771907c4ff68995eada1c4da068d328]
205 # key [ff58a7ffb771907c4ff68995eada1c4da068d328]
206 certlist = re.split('\n\n key ["\[]', certlist)
206 certlist = re.split('\n\n key ["\[]', certlist)
207 for e in certlist:
207 for e in certlist:
208 m = self.cert_re.match(e)
208 m = self.cert_re.match(e)
209 if m:
209 if m:
210 name, value = m.groups()
210 name, value = m.groups()
211 value = value.replace(r'\"', '"')
211 value = value.replace(r'\"', '"')
212 value = value.replace(r'\\', '\\')
212 value = value.replace(r'\\', '\\')
213 certs[name] = value
213 certs[name] = value
214 # Monotone may have subsecond dates: 2005-02-05T09:39:12.364306
214 # Monotone may have subsecond dates: 2005-02-05T09:39:12.364306
215 # and all times are stored in UTC
215 # and all times are stored in UTC
216 certs["date"] = certs["date"].split('.')[0] + " UTC"
216 certs["date"] = certs["date"].split('.')[0] + " UTC"
217 return certs
217 return certs
218
218
219 # implement the converter_source interface:
219 # implement the converter_source interface:
220
220
221 def getheads(self):
221 def getheads(self):
222 if not self.rev:
222 if not self.rev:
223 return self.mtnrun("leaves").splitlines()
223 return self.mtnrun("leaves").splitlines()
224 else:
224 else:
225 return [self.rev]
225 return [self.rev]
226
226
227 def getchanges(self, rev):
227 def getchanges(self, rev):
228 #revision = self.mtncmd("get_revision %s" % rev).split("\n\n")
228 #revision = self.mtncmd("get_revision %s" % rev).split("\n\n")
229 revision = self.mtnrun("get_revision", rev).split("\n\n")
229 revision = self.mtnrun("get_revision", rev).split("\n\n")
230 files = {}
230 files = {}
231 ignoremove = {}
231 ignoremove = {}
232 renameddirs = []
232 renameddirs = []
233 copies = {}
233 copies = {}
234 for e in revision:
234 for e in revision:
235 m = self.add_file_re.match(e)
235 m = self.add_file_re.match(e)
236 if m:
236 if m:
237 files[m.group(1)] = rev
237 files[m.group(1)] = rev
238 ignoremove[m.group(1)] = rev
238 ignoremove[m.group(1)] = rev
239 m = self.patch_re.match(e)
239 m = self.patch_re.match(e)
240 if m:
240 if m:
241 files[m.group(1)] = rev
241 files[m.group(1)] = rev
242 # Delete/rename is handled later when the convert engine
242 # Delete/rename is handled later when the convert engine
243 # discovers an IOError exception from getfile,
243 # discovers an IOError exception from getfile,
244 # but only if we add the "from" file to the list of changes.
244 # but only if we add the "from" file to the list of changes.
245 m = self.delete_re.match(e)
245 m = self.delete_re.match(e)
246 if m:
246 if m:
247 files[m.group(1)] = rev
247 files[m.group(1)] = rev
248 m = self.rename_re.match(e)
248 m = self.rename_re.match(e)
249 if m:
249 if m:
250 toname = m.group(2)
250 toname = m.group(2)
251 fromname = m.group(1)
251 fromname = m.group(1)
252 if self.mtnisfile(toname, rev):
252 if self.mtnisfile(toname, rev):
253 ignoremove[toname] = 1
253 ignoremove[toname] = 1
254 copies[toname] = fromname
254 copies[toname] = fromname
255 files[toname] = rev
255 files[toname] = rev
256 files[fromname] = rev
256 files[fromname] = rev
257 elif self.mtnisdir(toname, rev):
257 elif self.mtnisdir(toname, rev):
258 renameddirs.append((fromname, toname))
258 renameddirs.append((fromname, toname))
259
259
260 # Directory renames can be handled only once we have recorded
260 # Directory renames can be handled only once we have recorded
261 # all new files
261 # all new files
262 for fromdir, todir in renameddirs:
262 for fromdir, todir in renameddirs:
263 renamed = {}
263 renamed = {}
264 for tofile in self.files:
264 for tofile in self.files:
265 if tofile in ignoremove:
265 if tofile in ignoremove:
266 continue
266 continue
267 if tofile.startswith(todir + '/'):
267 if tofile.startswith(todir + '/'):
268 renamed[tofile] = fromdir + tofile[len(todir):]
268 renamed[tofile] = fromdir + tofile[len(todir):]
269 # Avoid chained moves like:
269 # Avoid chained moves like:
270 # d1(/a) => d3/d1(/a)
270 # d1(/a) => d3/d1(/a)
271 # d2 => d3
271 # d2 => d3
272 ignoremove[tofile] = 1
272 ignoremove[tofile] = 1
273 for tofile, fromfile in renamed.items():
273 for tofile, fromfile in renamed.items():
274 self.ui.debug (_("copying file in renamed directory "
274 self.ui.debug (_("copying file in renamed directory "
275 "from '%s' to '%s'")
275 "from '%s' to '%s'")
276 % (fromfile, tofile), '\n')
276 % (fromfile, tofile), '\n')
277 files[tofile] = rev
277 files[tofile] = rev
278 copies[tofile] = fromfile
278 copies[tofile] = fromfile
279 for fromfile in renamed.values():
279 for fromfile in renamed.values():
280 files[fromfile] = rev
280 files[fromfile] = rev
281
281
282 return (files.items(), copies)
282 return (files.items(), copies)
283
283
284 def getfile(self, name, rev):
284 def getfile(self, name, rev):
285 if not self.mtnisfile(name, rev):
285 if not self.mtnisfile(name, rev):
286 raise IOError() # file was deleted or renamed
286 raise IOError # file was deleted or renamed
287 try:
287 try:
288 data = self.mtnrun("get_file_of", name, r=rev)
288 data = self.mtnrun("get_file_of", name, r=rev)
289 except:
289 except:
290 raise IOError() # file was deleted or renamed
290 raise IOError # file was deleted or renamed
291 self.mtnloadmanifest(rev)
291 self.mtnloadmanifest(rev)
292 node, attr = self.files.get(name, (None, ""))
292 node, attr = self.files.get(name, (None, ""))
293 return data, attr
293 return data, attr
294
294
295 def getcommit(self, rev):
295 def getcommit(self, rev):
296 extra = {}
296 extra = {}
297 certs = self.mtngetcerts(rev)
297 certs = self.mtngetcerts(rev)
298 if certs.get('suspend') == certs["branch"]:
298 if certs.get('suspend') == certs["branch"]:
299 extra['close'] = '1'
299 extra['close'] = '1'
300 return commit(
300 return commit(
301 author=certs["author"],
301 author=certs["author"],
302 date=util.datestr(util.strdate(certs["date"], "%Y-%m-%dT%H:%M:%S")),
302 date=util.datestr(util.strdate(certs["date"], "%Y-%m-%dT%H:%M:%S")),
303 desc=certs["changelog"],
303 desc=certs["changelog"],
304 rev=rev,
304 rev=rev,
305 parents=self.mtnrun("parents", rev).splitlines(),
305 parents=self.mtnrun("parents", rev).splitlines(),
306 branch=certs["branch"],
306 branch=certs["branch"],
307 extra=extra)
307 extra=extra)
308
308
309 def gettags(self):
309 def gettags(self):
310 tags = {}
310 tags = {}
311 for e in self.mtnrun("tags").split("\n\n"):
311 for e in self.mtnrun("tags").split("\n\n"):
312 m = self.tag_re.match(e)
312 m = self.tag_re.match(e)
313 if m:
313 if m:
314 tags[m.group(1)] = m.group(2)
314 tags[m.group(1)] = m.group(2)
315 return tags
315 return tags
316
316
317 def getchangedfiles(self, rev, i):
317 def getchangedfiles(self, rev, i):
318 # This function is only needed to support --filemap
318 # This function is only needed to support --filemap
319 # ... and we don't support that
319 # ... and we don't support that
320 raise NotImplementedError()
320 raise NotImplementedError
321
321
322 def before(self):
322 def before(self):
323 # Check if we have a new enough version to use automate stdio
323 # Check if we have a new enough version to use automate stdio
324 version = 0.0
324 version = 0.0
325 try:
325 try:
326 versionstr = self.mtnrunsingle("interface_version")
326 versionstr = self.mtnrunsingle("interface_version")
327 version = float(versionstr)
327 version = float(versionstr)
328 except Exception:
328 except Exception:
329 raise util.Abort(_("unable to determine mtn automate interface "
329 raise util.Abort(_("unable to determine mtn automate interface "
330 "version"))
330 "version"))
331
331
332 if version >= 12.0:
332 if version >= 12.0:
333 self.automatestdio = True
333 self.automatestdio = True
334 self.ui.debug("mtn automate version %s - using automate stdio\n" %
334 self.ui.debug("mtn automate version %s - using automate stdio\n" %
335 version)
335 version)
336
336
337 # launch the long-running automate stdio process
337 # launch the long-running automate stdio process
338 self.mtnwritefp, self.mtnreadfp = self._run2('automate', 'stdio',
338 self.mtnwritefp, self.mtnreadfp = self._run2('automate', 'stdio',
339 '-d', self.path)
339 '-d', self.path)
340 # read the headers
340 # read the headers
341 read = self.mtnreadfp.readline()
341 read = self.mtnreadfp.readline()
342 if read != 'format-version: 2\n':
342 if read != 'format-version: 2\n':
343 raise util.Abort(_('mtn automate stdio header unexpected: %s')
343 raise util.Abort(_('mtn automate stdio header unexpected: %s')
344 % read)
344 % read)
345 while read != '\n':
345 while read != '\n':
346 read = self.mtnreadfp.readline()
346 read = self.mtnreadfp.readline()
347 if not read:
347 if not read:
348 raise util.Abort(_("failed to reach end of mtn automate "
348 raise util.Abort(_("failed to reach end of mtn automate "
349 "stdio headers"))
349 "stdio headers"))
350 else:
350 else:
351 self.ui.debug("mtn automate version %s - not using automate stdio "
351 self.ui.debug("mtn automate version %s - not using automate stdio "
352 "(automate >= 12.0 - mtn >= 0.46 is needed)\n" % version)
352 "(automate >= 12.0 - mtn >= 0.46 is needed)\n" % version)
353
353
354 def after(self):
354 def after(self):
355 if self.automatestdio:
355 if self.automatestdio:
356 self.mtnwritefp.close()
356 self.mtnwritefp.close()
357 self.mtnwritefp = None
357 self.mtnwritefp = None
358 self.mtnreadfp.close()
358 self.mtnreadfp.close()
359 self.mtnreadfp = None
359 self.mtnreadfp = None
360
360
@@ -1,1252 +1,1252 b''
1 # Subversion 1.4/1.5 Python API backend
1 # Subversion 1.4/1.5 Python API backend
2 #
2 #
3 # Copyright(C) 2007 Daniel Holth et al
3 # Copyright(C) 2007 Daniel Holth et al
4
4
5 import os, re, sys, tempfile, urllib, urllib2, xml.dom.minidom
5 import os, re, sys, tempfile, urllib, urllib2, xml.dom.minidom
6 import cPickle as pickle
6 import cPickle as pickle
7
7
8 from mercurial import strutil, scmutil, util, encoding
8 from mercurial import strutil, scmutil, util, encoding
9 from mercurial.i18n import _
9 from mercurial.i18n import _
10
10
11 propertycache = util.propertycache
11 propertycache = util.propertycache
12
12
13 # Subversion stuff. Works best with very recent Python SVN bindings
13 # Subversion stuff. Works best with very recent Python SVN bindings
14 # e.g. SVN 1.5 or backports. Thanks to the bzr folks for enhancing
14 # e.g. SVN 1.5 or backports. Thanks to the bzr folks for enhancing
15 # these bindings.
15 # these bindings.
16
16
17 from cStringIO import StringIO
17 from cStringIO import StringIO
18
18
19 from common import NoRepo, MissingTool, commit, encodeargs, decodeargs
19 from common import NoRepo, MissingTool, commit, encodeargs, decodeargs
20 from common import commandline, converter_source, converter_sink, mapfile
20 from common import commandline, converter_source, converter_sink, mapfile
21
21
22 try:
22 try:
23 from svn.core import SubversionException, Pool
23 from svn.core import SubversionException, Pool
24 import svn
24 import svn
25 import svn.client
25 import svn.client
26 import svn.core
26 import svn.core
27 import svn.ra
27 import svn.ra
28 import svn.delta
28 import svn.delta
29 import transport
29 import transport
30 import warnings
30 import warnings
31 warnings.filterwarnings('ignore',
31 warnings.filterwarnings('ignore',
32 module='svn.core',
32 module='svn.core',
33 category=DeprecationWarning)
33 category=DeprecationWarning)
34
34
35 except ImportError:
35 except ImportError:
36 svn = None
36 svn = None
37
37
38 class SvnPathNotFound(Exception):
38 class SvnPathNotFound(Exception):
39 pass
39 pass
40
40
41 def revsplit(rev):
41 def revsplit(rev):
42 """Parse a revision string and return (uuid, path, revnum)."""
42 """Parse a revision string and return (uuid, path, revnum)."""
43 url, revnum = rev.rsplit('@', 1)
43 url, revnum = rev.rsplit('@', 1)
44 parts = url.split('/', 1)
44 parts = url.split('/', 1)
45 mod = ''
45 mod = ''
46 if len(parts) > 1:
46 if len(parts) > 1:
47 mod = '/' + parts[1]
47 mod = '/' + parts[1]
48 return parts[0][4:], mod, int(revnum)
48 return parts[0][4:], mod, int(revnum)
49
49
50 def quote(s):
50 def quote(s):
51 # As of svn 1.7, many svn calls expect "canonical" paths. In
51 # As of svn 1.7, many svn calls expect "canonical" paths. In
52 # theory, we should call svn.core.*canonicalize() on all paths
52 # theory, we should call svn.core.*canonicalize() on all paths
53 # before passing them to the API. Instead, we assume the base url
53 # before passing them to the API. Instead, we assume the base url
54 # is canonical and copy the behaviour of svn URL encoding function
54 # is canonical and copy the behaviour of svn URL encoding function
55 # so we can extend it safely with new components. The "safe"
55 # so we can extend it safely with new components. The "safe"
56 # characters were taken from the "svn_uri__char_validity" table in
56 # characters were taken from the "svn_uri__char_validity" table in
57 # libsvn_subr/path.c.
57 # libsvn_subr/path.c.
58 return urllib.quote(s, "!$&'()*+,-./:=@_~")
58 return urllib.quote(s, "!$&'()*+,-./:=@_~")
59
59
60 def geturl(path):
60 def geturl(path):
61 try:
61 try:
62 return svn.client.url_from_path(svn.core.svn_path_canonicalize(path))
62 return svn.client.url_from_path(svn.core.svn_path_canonicalize(path))
63 except SubversionException:
63 except SubversionException:
64 # svn.client.url_from_path() fails with local repositories
64 # svn.client.url_from_path() fails with local repositories
65 pass
65 pass
66 if os.path.isdir(path):
66 if os.path.isdir(path):
67 path = os.path.normpath(os.path.abspath(path))
67 path = os.path.normpath(os.path.abspath(path))
68 if os.name == 'nt':
68 if os.name == 'nt':
69 path = '/' + util.normpath(path)
69 path = '/' + util.normpath(path)
70 # Module URL is later compared with the repository URL returned
70 # Module URL is later compared with the repository URL returned
71 # by svn API, which is UTF-8.
71 # by svn API, which is UTF-8.
72 path = encoding.tolocal(path)
72 path = encoding.tolocal(path)
73 path = 'file://%s' % quote(path)
73 path = 'file://%s' % quote(path)
74 return svn.core.svn_path_canonicalize(path)
74 return svn.core.svn_path_canonicalize(path)
75
75
76 def optrev(number):
76 def optrev(number):
77 optrev = svn.core.svn_opt_revision_t()
77 optrev = svn.core.svn_opt_revision_t()
78 optrev.kind = svn.core.svn_opt_revision_number
78 optrev.kind = svn.core.svn_opt_revision_number
79 optrev.value.number = number
79 optrev.value.number = number
80 return optrev
80 return optrev
81
81
82 class changedpath(object):
82 class changedpath(object):
83 def __init__(self, p):
83 def __init__(self, p):
84 self.copyfrom_path = p.copyfrom_path
84 self.copyfrom_path = p.copyfrom_path
85 self.copyfrom_rev = p.copyfrom_rev
85 self.copyfrom_rev = p.copyfrom_rev
86 self.action = p.action
86 self.action = p.action
87
87
88 def get_log_child(fp, url, paths, start, end, limit=0,
88 def get_log_child(fp, url, paths, start, end, limit=0,
89 discover_changed_paths=True, strict_node_history=False):
89 discover_changed_paths=True, strict_node_history=False):
90 protocol = -1
90 protocol = -1
91 def receiver(orig_paths, revnum, author, date, message, pool):
91 def receiver(orig_paths, revnum, author, date, message, pool):
92 if orig_paths is not None:
92 if orig_paths is not None:
93 for k, v in orig_paths.iteritems():
93 for k, v in orig_paths.iteritems():
94 orig_paths[k] = changedpath(v)
94 orig_paths[k] = changedpath(v)
95 pickle.dump((orig_paths, revnum, author, date, message),
95 pickle.dump((orig_paths, revnum, author, date, message),
96 fp, protocol)
96 fp, protocol)
97
97
98 try:
98 try:
99 # Use an ra of our own so that our parent can consume
99 # Use an ra of our own so that our parent can consume
100 # our results without confusing the server.
100 # our results without confusing the server.
101 t = transport.SvnRaTransport(url=url)
101 t = transport.SvnRaTransport(url=url)
102 svn.ra.get_log(t.ra, paths, start, end, limit,
102 svn.ra.get_log(t.ra, paths, start, end, limit,
103 discover_changed_paths,
103 discover_changed_paths,
104 strict_node_history,
104 strict_node_history,
105 receiver)
105 receiver)
106 except IOError:
106 except IOError:
107 # Caller may interrupt the iteration
107 # Caller may interrupt the iteration
108 pickle.dump(None, fp, protocol)
108 pickle.dump(None, fp, protocol)
109 except Exception, inst:
109 except Exception, inst:
110 pickle.dump(str(inst), fp, protocol)
110 pickle.dump(str(inst), fp, protocol)
111 else:
111 else:
112 pickle.dump(None, fp, protocol)
112 pickle.dump(None, fp, protocol)
113 fp.close()
113 fp.close()
114 # With large history, cleanup process goes crazy and suddenly
114 # With large history, cleanup process goes crazy and suddenly
115 # consumes *huge* amount of memory. The output file being closed,
115 # consumes *huge* amount of memory. The output file being closed,
116 # there is no need for clean termination.
116 # there is no need for clean termination.
117 os._exit(0)
117 os._exit(0)
118
118
119 def debugsvnlog(ui, **opts):
119 def debugsvnlog(ui, **opts):
120 """Fetch SVN log in a subprocess and channel them back to parent to
120 """Fetch SVN log in a subprocess and channel them back to parent to
121 avoid memory collection issues.
121 avoid memory collection issues.
122 """
122 """
123 util.setbinary(sys.stdin)
123 util.setbinary(sys.stdin)
124 util.setbinary(sys.stdout)
124 util.setbinary(sys.stdout)
125 args = decodeargs(sys.stdin.read())
125 args = decodeargs(sys.stdin.read())
126 get_log_child(sys.stdout, *args)
126 get_log_child(sys.stdout, *args)
127
127
128 class logstream(object):
128 class logstream(object):
129 """Interruptible revision log iterator."""
129 """Interruptible revision log iterator."""
130 def __init__(self, stdout):
130 def __init__(self, stdout):
131 self._stdout = stdout
131 self._stdout = stdout
132
132
133 def __iter__(self):
133 def __iter__(self):
134 while True:
134 while True:
135 try:
135 try:
136 entry = pickle.load(self._stdout)
136 entry = pickle.load(self._stdout)
137 except EOFError:
137 except EOFError:
138 raise util.Abort(_('Mercurial failed to run itself, check'
138 raise util.Abort(_('Mercurial failed to run itself, check'
139 ' hg executable is in PATH'))
139 ' hg executable is in PATH'))
140 try:
140 try:
141 orig_paths, revnum, author, date, message = entry
141 orig_paths, revnum, author, date, message = entry
142 except:
142 except:
143 if entry is None:
143 if entry is None:
144 break
144 break
145 raise util.Abort(_("log stream exception '%s'") % entry)
145 raise util.Abort(_("log stream exception '%s'") % entry)
146 yield entry
146 yield entry
147
147
148 def close(self):
148 def close(self):
149 if self._stdout:
149 if self._stdout:
150 self._stdout.close()
150 self._stdout.close()
151 self._stdout = None
151 self._stdout = None
152
152
153
153
154 # Check to see if the given path is a local Subversion repo. Verify this by
154 # Check to see if the given path is a local Subversion repo. Verify this by
155 # looking for several svn-specific files and directories in the given
155 # looking for several svn-specific files and directories in the given
156 # directory.
156 # directory.
157 def filecheck(ui, path, proto):
157 def filecheck(ui, path, proto):
158 for x in ('locks', 'hooks', 'format', 'db'):
158 for x in ('locks', 'hooks', 'format', 'db'):
159 if not os.path.exists(os.path.join(path, x)):
159 if not os.path.exists(os.path.join(path, x)):
160 return False
160 return False
161 return True
161 return True
162
162
163 # Check to see if a given path is the root of an svn repo over http. We verify
163 # Check to see if a given path is the root of an svn repo over http. We verify
164 # this by requesting a version-controlled URL we know can't exist and looking
164 # this by requesting a version-controlled URL we know can't exist and looking
165 # for the svn-specific "not found" XML.
165 # for the svn-specific "not found" XML.
166 def httpcheck(ui, path, proto):
166 def httpcheck(ui, path, proto):
167 try:
167 try:
168 opener = urllib2.build_opener()
168 opener = urllib2.build_opener()
169 rsp = opener.open('%s://%s/!svn/ver/0/.svn' % (proto, path))
169 rsp = opener.open('%s://%s/!svn/ver/0/.svn' % (proto, path))
170 data = rsp.read()
170 data = rsp.read()
171 except urllib2.HTTPError, inst:
171 except urllib2.HTTPError, inst:
172 if inst.code != 404:
172 if inst.code != 404:
173 # Except for 404 we cannot know for sure this is not an svn repo
173 # Except for 404 we cannot know for sure this is not an svn repo
174 ui.warn(_('svn: cannot probe remote repository, assume it could '
174 ui.warn(_('svn: cannot probe remote repository, assume it could '
175 'be a subversion repository. Use --source-type if you '
175 'be a subversion repository. Use --source-type if you '
176 'know better.\n'))
176 'know better.\n'))
177 return True
177 return True
178 data = inst.fp.read()
178 data = inst.fp.read()
179 except:
179 except:
180 # Could be urllib2.URLError if the URL is invalid or anything else.
180 # Could be urllib2.URLError if the URL is invalid or anything else.
181 return False
181 return False
182 return '<m:human-readable errcode="160013">' in data
182 return '<m:human-readable errcode="160013">' in data
183
183
184 protomap = {'http': httpcheck,
184 protomap = {'http': httpcheck,
185 'https': httpcheck,
185 'https': httpcheck,
186 'file': filecheck,
186 'file': filecheck,
187 }
187 }
188 def issvnurl(ui, url):
188 def issvnurl(ui, url):
189 try:
189 try:
190 proto, path = url.split('://', 1)
190 proto, path = url.split('://', 1)
191 if proto == 'file':
191 if proto == 'file':
192 path = urllib.url2pathname(path)
192 path = urllib.url2pathname(path)
193 except ValueError:
193 except ValueError:
194 proto = 'file'
194 proto = 'file'
195 path = os.path.abspath(url)
195 path = os.path.abspath(url)
196 if proto == 'file':
196 if proto == 'file':
197 path = util.pconvert(path)
197 path = util.pconvert(path)
198 check = protomap.get(proto, lambda *args: False)
198 check = protomap.get(proto, lambda *args: False)
199 while '/' in path:
199 while '/' in path:
200 if check(ui, path, proto):
200 if check(ui, path, proto):
201 return True
201 return True
202 path = path.rsplit('/', 1)[0]
202 path = path.rsplit('/', 1)[0]
203 return False
203 return False
204
204
205 # SVN conversion code stolen from bzr-svn and tailor
205 # SVN conversion code stolen from bzr-svn and tailor
206 #
206 #
207 # Subversion looks like a versioned filesystem, branches structures
207 # Subversion looks like a versioned filesystem, branches structures
208 # are defined by conventions and not enforced by the tool. First,
208 # are defined by conventions and not enforced by the tool. First,
209 # we define the potential branches (modules) as "trunk" and "branches"
209 # we define the potential branches (modules) as "trunk" and "branches"
210 # children directories. Revisions are then identified by their
210 # children directories. Revisions are then identified by their
211 # module and revision number (and a repository identifier).
211 # module and revision number (and a repository identifier).
212 #
212 #
213 # The revision graph is really a tree (or a forest). By default, a
213 # The revision graph is really a tree (or a forest). By default, a
214 # revision parent is the previous revision in the same module. If the
214 # revision parent is the previous revision in the same module. If the
215 # module directory is copied/moved from another module then the
215 # module directory is copied/moved from another module then the
216 # revision is the module root and its parent the source revision in
216 # revision is the module root and its parent the source revision in
217 # the parent module. A revision has at most one parent.
217 # the parent module. A revision has at most one parent.
218 #
218 #
219 class svn_source(converter_source):
219 class svn_source(converter_source):
220 def __init__(self, ui, url, rev=None):
220 def __init__(self, ui, url, rev=None):
221 super(svn_source, self).__init__(ui, url, rev=rev)
221 super(svn_source, self).__init__(ui, url, rev=rev)
222
222
223 if not (url.startswith('svn://') or url.startswith('svn+ssh://') or
223 if not (url.startswith('svn://') or url.startswith('svn+ssh://') or
224 (os.path.exists(url) and
224 (os.path.exists(url) and
225 os.path.exists(os.path.join(url, '.svn'))) or
225 os.path.exists(os.path.join(url, '.svn'))) or
226 issvnurl(ui, url)):
226 issvnurl(ui, url)):
227 raise NoRepo(_("%s does not look like a Subversion repository")
227 raise NoRepo(_("%s does not look like a Subversion repository")
228 % url)
228 % url)
229 if svn is None:
229 if svn is None:
230 raise MissingTool(_('Could not load Subversion python bindings'))
230 raise MissingTool(_('Could not load Subversion python bindings'))
231
231
232 try:
232 try:
233 version = svn.core.SVN_VER_MAJOR, svn.core.SVN_VER_MINOR
233 version = svn.core.SVN_VER_MAJOR, svn.core.SVN_VER_MINOR
234 if version < (1, 4):
234 if version < (1, 4):
235 raise MissingTool(_('Subversion python bindings %d.%d found, '
235 raise MissingTool(_('Subversion python bindings %d.%d found, '
236 '1.4 or later required') % version)
236 '1.4 or later required') % version)
237 except AttributeError:
237 except AttributeError:
238 raise MissingTool(_('Subversion python bindings are too old, 1.4 '
238 raise MissingTool(_('Subversion python bindings are too old, 1.4 '
239 'or later required'))
239 'or later required'))
240
240
241 self.lastrevs = {}
241 self.lastrevs = {}
242
242
243 latest = None
243 latest = None
244 try:
244 try:
245 # Support file://path@rev syntax. Useful e.g. to convert
245 # Support file://path@rev syntax. Useful e.g. to convert
246 # deleted branches.
246 # deleted branches.
247 at = url.rfind('@')
247 at = url.rfind('@')
248 if at >= 0:
248 if at >= 0:
249 latest = int(url[at + 1:])
249 latest = int(url[at + 1:])
250 url = url[:at]
250 url = url[:at]
251 except ValueError:
251 except ValueError:
252 pass
252 pass
253 self.url = geturl(url)
253 self.url = geturl(url)
254 self.encoding = 'UTF-8' # Subversion is always nominal UTF-8
254 self.encoding = 'UTF-8' # Subversion is always nominal UTF-8
255 try:
255 try:
256 self.transport = transport.SvnRaTransport(url=self.url)
256 self.transport = transport.SvnRaTransport(url=self.url)
257 self.ra = self.transport.ra
257 self.ra = self.transport.ra
258 self.ctx = self.transport.client
258 self.ctx = self.transport.client
259 self.baseurl = svn.ra.get_repos_root(self.ra)
259 self.baseurl = svn.ra.get_repos_root(self.ra)
260 # Module is either empty or a repository path starting with
260 # Module is either empty or a repository path starting with
261 # a slash and not ending with a slash.
261 # a slash and not ending with a slash.
262 self.module = urllib.unquote(self.url[len(self.baseurl):])
262 self.module = urllib.unquote(self.url[len(self.baseurl):])
263 self.prevmodule = None
263 self.prevmodule = None
264 self.rootmodule = self.module
264 self.rootmodule = self.module
265 self.commits = {}
265 self.commits = {}
266 self.paths = {}
266 self.paths = {}
267 self.uuid = svn.ra.get_uuid(self.ra)
267 self.uuid = svn.ra.get_uuid(self.ra)
268 except SubversionException:
268 except SubversionException:
269 ui.traceback()
269 ui.traceback()
270 raise NoRepo(_("%s does not look like a Subversion repository")
270 raise NoRepo(_("%s does not look like a Subversion repository")
271 % self.url)
271 % self.url)
272
272
273 if rev:
273 if rev:
274 try:
274 try:
275 latest = int(rev)
275 latest = int(rev)
276 except ValueError:
276 except ValueError:
277 raise util.Abort(_('svn: revision %s is not an integer') % rev)
277 raise util.Abort(_('svn: revision %s is not an integer') % rev)
278
278
279 self.trunkname = self.ui.config('convert', 'svn.trunk',
279 self.trunkname = self.ui.config('convert', 'svn.trunk',
280 'trunk').strip('/')
280 'trunk').strip('/')
281 self.startrev = self.ui.config('convert', 'svn.startrev', default=0)
281 self.startrev = self.ui.config('convert', 'svn.startrev', default=0)
282 try:
282 try:
283 self.startrev = int(self.startrev)
283 self.startrev = int(self.startrev)
284 if self.startrev < 0:
284 if self.startrev < 0:
285 self.startrev = 0
285 self.startrev = 0
286 except ValueError:
286 except ValueError:
287 raise util.Abort(_('svn: start revision %s is not an integer')
287 raise util.Abort(_('svn: start revision %s is not an integer')
288 % self.startrev)
288 % self.startrev)
289
289
290 try:
290 try:
291 self.head = self.latest(self.module, latest)
291 self.head = self.latest(self.module, latest)
292 except SvnPathNotFound:
292 except SvnPathNotFound:
293 self.head = None
293 self.head = None
294 if not self.head:
294 if not self.head:
295 raise util.Abort(_('no revision found in module %s')
295 raise util.Abort(_('no revision found in module %s')
296 % self.module)
296 % self.module)
297 self.last_changed = self.revnum(self.head)
297 self.last_changed = self.revnum(self.head)
298
298
299 self._changescache = None
299 self._changescache = None
300
300
301 if os.path.exists(os.path.join(url, '.svn/entries')):
301 if os.path.exists(os.path.join(url, '.svn/entries')):
302 self.wc = url
302 self.wc = url
303 else:
303 else:
304 self.wc = None
304 self.wc = None
305 self.convertfp = None
305 self.convertfp = None
306
306
307 def setrevmap(self, revmap):
307 def setrevmap(self, revmap):
308 lastrevs = {}
308 lastrevs = {}
309 for revid in revmap.iterkeys():
309 for revid in revmap.iterkeys():
310 uuid, module, revnum = revsplit(revid)
310 uuid, module, revnum = revsplit(revid)
311 lastrevnum = lastrevs.setdefault(module, revnum)
311 lastrevnum = lastrevs.setdefault(module, revnum)
312 if revnum > lastrevnum:
312 if revnum > lastrevnum:
313 lastrevs[module] = revnum
313 lastrevs[module] = revnum
314 self.lastrevs = lastrevs
314 self.lastrevs = lastrevs
315
315
316 def exists(self, path, optrev):
316 def exists(self, path, optrev):
317 try:
317 try:
318 svn.client.ls(self.url.rstrip('/') + '/' + quote(path),
318 svn.client.ls(self.url.rstrip('/') + '/' + quote(path),
319 optrev, False, self.ctx)
319 optrev, False, self.ctx)
320 return True
320 return True
321 except SubversionException:
321 except SubversionException:
322 return False
322 return False
323
323
324 def getheads(self):
324 def getheads(self):
325
325
326 def isdir(path, revnum):
326 def isdir(path, revnum):
327 kind = self._checkpath(path, revnum)
327 kind = self._checkpath(path, revnum)
328 return kind == svn.core.svn_node_dir
328 return kind == svn.core.svn_node_dir
329
329
330 def getcfgpath(name, rev):
330 def getcfgpath(name, rev):
331 cfgpath = self.ui.config('convert', 'svn.' + name)
331 cfgpath = self.ui.config('convert', 'svn.' + name)
332 if cfgpath is not None and cfgpath.strip() == '':
332 if cfgpath is not None and cfgpath.strip() == '':
333 return None
333 return None
334 path = (cfgpath or name).strip('/')
334 path = (cfgpath or name).strip('/')
335 if not self.exists(path, rev):
335 if not self.exists(path, rev):
336 if self.module.endswith(path) and name == 'trunk':
336 if self.module.endswith(path) and name == 'trunk':
337 # we are converting from inside this directory
337 # we are converting from inside this directory
338 return None
338 return None
339 if cfgpath:
339 if cfgpath:
340 raise util.Abort(_('expected %s to be at %r, but not found')
340 raise util.Abort(_('expected %s to be at %r, but not found')
341 % (name, path))
341 % (name, path))
342 return None
342 return None
343 self.ui.note(_('found %s at %r\n') % (name, path))
343 self.ui.note(_('found %s at %r\n') % (name, path))
344 return path
344 return path
345
345
346 rev = optrev(self.last_changed)
346 rev = optrev(self.last_changed)
347 oldmodule = ''
347 oldmodule = ''
348 trunk = getcfgpath('trunk', rev)
348 trunk = getcfgpath('trunk', rev)
349 self.tags = getcfgpath('tags', rev)
349 self.tags = getcfgpath('tags', rev)
350 branches = getcfgpath('branches', rev)
350 branches = getcfgpath('branches', rev)
351
351
352 # If the project has a trunk or branches, we will extract heads
352 # If the project has a trunk or branches, we will extract heads
353 # from them. We keep the project root otherwise.
353 # from them. We keep the project root otherwise.
354 if trunk:
354 if trunk:
355 oldmodule = self.module or ''
355 oldmodule = self.module or ''
356 self.module += '/' + trunk
356 self.module += '/' + trunk
357 self.head = self.latest(self.module, self.last_changed)
357 self.head = self.latest(self.module, self.last_changed)
358 if not self.head:
358 if not self.head:
359 raise util.Abort(_('no revision found in module %s')
359 raise util.Abort(_('no revision found in module %s')
360 % self.module)
360 % self.module)
361
361
362 # First head in the list is the module's head
362 # First head in the list is the module's head
363 self.heads = [self.head]
363 self.heads = [self.head]
364 if self.tags is not None:
364 if self.tags is not None:
365 self.tags = '%s/%s' % (oldmodule , (self.tags or 'tags'))
365 self.tags = '%s/%s' % (oldmodule , (self.tags or 'tags'))
366
366
367 # Check if branches bring a few more heads to the list
367 # Check if branches bring a few more heads to the list
368 if branches:
368 if branches:
369 rpath = self.url.strip('/')
369 rpath = self.url.strip('/')
370 branchnames = svn.client.ls(rpath + '/' + quote(branches),
370 branchnames = svn.client.ls(rpath + '/' + quote(branches),
371 rev, False, self.ctx)
371 rev, False, self.ctx)
372 for branch in branchnames.keys():
372 for branch in branchnames.keys():
373 module = '%s/%s/%s' % (oldmodule, branches, branch)
373 module = '%s/%s/%s' % (oldmodule, branches, branch)
374 if not isdir(module, self.last_changed):
374 if not isdir(module, self.last_changed):
375 continue
375 continue
376 brevid = self.latest(module, self.last_changed)
376 brevid = self.latest(module, self.last_changed)
377 if not brevid:
377 if not brevid:
378 self.ui.note(_('ignoring empty branch %s\n') % branch)
378 self.ui.note(_('ignoring empty branch %s\n') % branch)
379 continue
379 continue
380 self.ui.note(_('found branch %s at %d\n') %
380 self.ui.note(_('found branch %s at %d\n') %
381 (branch, self.revnum(brevid)))
381 (branch, self.revnum(brevid)))
382 self.heads.append(brevid)
382 self.heads.append(brevid)
383
383
384 if self.startrev and self.heads:
384 if self.startrev and self.heads:
385 if len(self.heads) > 1:
385 if len(self.heads) > 1:
386 raise util.Abort(_('svn: start revision is not supported '
386 raise util.Abort(_('svn: start revision is not supported '
387 'with more than one branch'))
387 'with more than one branch'))
388 revnum = self.revnum(self.heads[0])
388 revnum = self.revnum(self.heads[0])
389 if revnum < self.startrev:
389 if revnum < self.startrev:
390 raise util.Abort(
390 raise util.Abort(
391 _('svn: no revision found after start revision %d')
391 _('svn: no revision found after start revision %d')
392 % self.startrev)
392 % self.startrev)
393
393
394 return self.heads
394 return self.heads
395
395
396 def getchanges(self, rev):
396 def getchanges(self, rev):
397 if self._changescache and self._changescache[0] == rev:
397 if self._changescache and self._changescache[0] == rev:
398 return self._changescache[1]
398 return self._changescache[1]
399 self._changescache = None
399 self._changescache = None
400 (paths, parents) = self.paths[rev]
400 (paths, parents) = self.paths[rev]
401 if parents:
401 if parents:
402 files, self.removed, copies = self.expandpaths(rev, paths, parents)
402 files, self.removed, copies = self.expandpaths(rev, paths, parents)
403 else:
403 else:
404 # Perform a full checkout on roots
404 # Perform a full checkout on roots
405 uuid, module, revnum = revsplit(rev)
405 uuid, module, revnum = revsplit(rev)
406 entries = svn.client.ls(self.baseurl + quote(module),
406 entries = svn.client.ls(self.baseurl + quote(module),
407 optrev(revnum), True, self.ctx)
407 optrev(revnum), True, self.ctx)
408 files = [n for n, e in entries.iteritems()
408 files = [n for n, e in entries.iteritems()
409 if e.kind == svn.core.svn_node_file]
409 if e.kind == svn.core.svn_node_file]
410 copies = {}
410 copies = {}
411 self.removed = set()
411 self.removed = set()
412
412
413 files.sort()
413 files.sort()
414 files = zip(files, [rev] * len(files))
414 files = zip(files, [rev] * len(files))
415
415
416 # caller caches the result, so free it here to release memory
416 # caller caches the result, so free it here to release memory
417 del self.paths[rev]
417 del self.paths[rev]
418 return (files, copies)
418 return (files, copies)
419
419
420 def getchangedfiles(self, rev, i):
420 def getchangedfiles(self, rev, i):
421 changes = self.getchanges(rev)
421 changes = self.getchanges(rev)
422 self._changescache = (rev, changes)
422 self._changescache = (rev, changes)
423 return [f[0] for f in changes[0]]
423 return [f[0] for f in changes[0]]
424
424
425 def getcommit(self, rev):
425 def getcommit(self, rev):
426 if rev not in self.commits:
426 if rev not in self.commits:
427 uuid, module, revnum = revsplit(rev)
427 uuid, module, revnum = revsplit(rev)
428 self.module = module
428 self.module = module
429 self.reparent(module)
429 self.reparent(module)
430 # We assume that:
430 # We assume that:
431 # - requests for revisions after "stop" come from the
431 # - requests for revisions after "stop" come from the
432 # revision graph backward traversal. Cache all of them
432 # revision graph backward traversal. Cache all of them
433 # down to stop, they will be used eventually.
433 # down to stop, they will be used eventually.
434 # - requests for revisions before "stop" come to get
434 # - requests for revisions before "stop" come to get
435 # isolated branches parents. Just fetch what is needed.
435 # isolated branches parents. Just fetch what is needed.
436 stop = self.lastrevs.get(module, 0)
436 stop = self.lastrevs.get(module, 0)
437 if revnum < stop:
437 if revnum < stop:
438 stop = revnum + 1
438 stop = revnum + 1
439 self._fetch_revisions(revnum, stop)
439 self._fetch_revisions(revnum, stop)
440 if rev not in self.commits:
440 if rev not in self.commits:
441 raise util.Abort(_('svn: revision %s not found') % revnum)
441 raise util.Abort(_('svn: revision %s not found') % revnum)
442 commit = self.commits[rev]
442 commit = self.commits[rev]
443 # caller caches the result, so free it here to release memory
443 # caller caches the result, so free it here to release memory
444 del self.commits[rev]
444 del self.commits[rev]
445 return commit
445 return commit
446
446
447 def gettags(self):
447 def gettags(self):
448 tags = {}
448 tags = {}
449 if self.tags is None:
449 if self.tags is None:
450 return tags
450 return tags
451
451
452 # svn tags are just a convention, project branches left in a
452 # svn tags are just a convention, project branches left in a
453 # 'tags' directory. There is no other relationship than
453 # 'tags' directory. There is no other relationship than
454 # ancestry, which is expensive to discover and makes them hard
454 # ancestry, which is expensive to discover and makes them hard
455 # to update incrementally. Worse, past revisions may be
455 # to update incrementally. Worse, past revisions may be
456 # referenced by tags far away in the future, requiring a deep
456 # referenced by tags far away in the future, requiring a deep
457 # history traversal on every calculation. Current code
457 # history traversal on every calculation. Current code
458 # performs a single backward traversal, tracking moves within
458 # performs a single backward traversal, tracking moves within
459 # the tags directory (tag renaming) and recording a new tag
459 # the tags directory (tag renaming) and recording a new tag
460 # everytime a project is copied from outside the tags
460 # everytime a project is copied from outside the tags
461 # directory. It also lists deleted tags, this behaviour may
461 # directory. It also lists deleted tags, this behaviour may
462 # change in the future.
462 # change in the future.
463 pendings = []
463 pendings = []
464 tagspath = self.tags
464 tagspath = self.tags
465 start = svn.ra.get_latest_revnum(self.ra)
465 start = svn.ra.get_latest_revnum(self.ra)
466 stream = self._getlog([self.tags], start, self.startrev)
466 stream = self._getlog([self.tags], start, self.startrev)
467 try:
467 try:
468 for entry in stream:
468 for entry in stream:
469 origpaths, revnum, author, date, message = entry
469 origpaths, revnum, author, date, message = entry
470 copies = [(e.copyfrom_path, e.copyfrom_rev, p) for p, e
470 copies = [(e.copyfrom_path, e.copyfrom_rev, p) for p, e
471 in origpaths.iteritems() if e.copyfrom_path]
471 in origpaths.iteritems() if e.copyfrom_path]
472 # Apply moves/copies from more specific to general
472 # Apply moves/copies from more specific to general
473 copies.sort(reverse=True)
473 copies.sort(reverse=True)
474
474
475 srctagspath = tagspath
475 srctagspath = tagspath
476 if copies and copies[-1][2] == tagspath:
476 if copies and copies[-1][2] == tagspath:
477 # Track tags directory moves
477 # Track tags directory moves
478 srctagspath = copies.pop()[0]
478 srctagspath = copies.pop()[0]
479
479
480 for source, sourcerev, dest in copies:
480 for source, sourcerev, dest in copies:
481 if not dest.startswith(tagspath + '/'):
481 if not dest.startswith(tagspath + '/'):
482 continue
482 continue
483 for tag in pendings:
483 for tag in pendings:
484 if tag[0].startswith(dest):
484 if tag[0].startswith(dest):
485 tagpath = source + tag[0][len(dest):]
485 tagpath = source + tag[0][len(dest):]
486 tag[:2] = [tagpath, sourcerev]
486 tag[:2] = [tagpath, sourcerev]
487 break
487 break
488 else:
488 else:
489 pendings.append([source, sourcerev, dest])
489 pendings.append([source, sourcerev, dest])
490
490
491 # Filter out tags with children coming from different
491 # Filter out tags with children coming from different
492 # parts of the repository like:
492 # parts of the repository like:
493 # /tags/tag.1 (from /trunk:10)
493 # /tags/tag.1 (from /trunk:10)
494 # /tags/tag.1/foo (from /branches/foo:12)
494 # /tags/tag.1/foo (from /branches/foo:12)
495 # Here/tags/tag.1 discarded as well as its children.
495 # Here/tags/tag.1 discarded as well as its children.
496 # It happens with tools like cvs2svn. Such tags cannot
496 # It happens with tools like cvs2svn. Such tags cannot
497 # be represented in mercurial.
497 # be represented in mercurial.
498 addeds = dict((p, e.copyfrom_path) for p, e
498 addeds = dict((p, e.copyfrom_path) for p, e
499 in origpaths.iteritems()
499 in origpaths.iteritems()
500 if e.action == 'A' and e.copyfrom_path)
500 if e.action == 'A' and e.copyfrom_path)
501 badroots = set()
501 badroots = set()
502 for destroot in addeds:
502 for destroot in addeds:
503 for source, sourcerev, dest in pendings:
503 for source, sourcerev, dest in pendings:
504 if (not dest.startswith(destroot + '/')
504 if (not dest.startswith(destroot + '/')
505 or source.startswith(addeds[destroot] + '/')):
505 or source.startswith(addeds[destroot] + '/')):
506 continue
506 continue
507 badroots.add(destroot)
507 badroots.add(destroot)
508 break
508 break
509
509
510 for badroot in badroots:
510 for badroot in badroots:
511 pendings = [p for p in pendings if p[2] != badroot
511 pendings = [p for p in pendings if p[2] != badroot
512 and not p[2].startswith(badroot + '/')]
512 and not p[2].startswith(badroot + '/')]
513
513
514 # Tell tag renamings from tag creations
514 # Tell tag renamings from tag creations
515 renamings = []
515 renamings = []
516 for source, sourcerev, dest in pendings:
516 for source, sourcerev, dest in pendings:
517 tagname = dest.split('/')[-1]
517 tagname = dest.split('/')[-1]
518 if source.startswith(srctagspath):
518 if source.startswith(srctagspath):
519 renamings.append([source, sourcerev, tagname])
519 renamings.append([source, sourcerev, tagname])
520 continue
520 continue
521 if tagname in tags:
521 if tagname in tags:
522 # Keep the latest tag value
522 # Keep the latest tag value
523 continue
523 continue
524 # From revision may be fake, get one with changes
524 # From revision may be fake, get one with changes
525 try:
525 try:
526 tagid = self.latest(source, sourcerev)
526 tagid = self.latest(source, sourcerev)
527 if tagid and tagname not in tags:
527 if tagid and tagname not in tags:
528 tags[tagname] = tagid
528 tags[tagname] = tagid
529 except SvnPathNotFound:
529 except SvnPathNotFound:
530 # It happens when we are following directories
530 # It happens when we are following directories
531 # we assumed were copied with their parents
531 # we assumed were copied with their parents
532 # but were really created in the tag
532 # but were really created in the tag
533 # directory.
533 # directory.
534 pass
534 pass
535 pendings = renamings
535 pendings = renamings
536 tagspath = srctagspath
536 tagspath = srctagspath
537 finally:
537 finally:
538 stream.close()
538 stream.close()
539 return tags
539 return tags
540
540
541 def converted(self, rev, destrev):
541 def converted(self, rev, destrev):
542 if not self.wc:
542 if not self.wc:
543 return
543 return
544 if self.convertfp is None:
544 if self.convertfp is None:
545 self.convertfp = open(os.path.join(self.wc, '.svn', 'hg-shamap'),
545 self.convertfp = open(os.path.join(self.wc, '.svn', 'hg-shamap'),
546 'a')
546 'a')
547 self.convertfp.write('%s %d\n' % (destrev, self.revnum(rev)))
547 self.convertfp.write('%s %d\n' % (destrev, self.revnum(rev)))
548 self.convertfp.flush()
548 self.convertfp.flush()
549
549
550 def revid(self, revnum, module=None):
550 def revid(self, revnum, module=None):
551 return 'svn:%s%s@%s' % (self.uuid, module or self.module, revnum)
551 return 'svn:%s%s@%s' % (self.uuid, module or self.module, revnum)
552
552
553 def revnum(self, rev):
553 def revnum(self, rev):
554 return int(rev.split('@')[-1])
554 return int(rev.split('@')[-1])
555
555
556 def latest(self, path, stop=None):
556 def latest(self, path, stop=None):
557 """Find the latest revid affecting path, up to stop revision
557 """Find the latest revid affecting path, up to stop revision
558 number. If stop is None, default to repository latest
558 number. If stop is None, default to repository latest
559 revision. It may return a revision in a different module,
559 revision. It may return a revision in a different module,
560 since a branch may be moved without a change being
560 since a branch may be moved without a change being
561 reported. Return None if computed module does not belong to
561 reported. Return None if computed module does not belong to
562 rootmodule subtree.
562 rootmodule subtree.
563 """
563 """
564 def findchanges(path, start, stop=None):
564 def findchanges(path, start, stop=None):
565 stream = self._getlog([path], start, stop or 1)
565 stream = self._getlog([path], start, stop or 1)
566 try:
566 try:
567 for entry in stream:
567 for entry in stream:
568 paths, revnum, author, date, message = entry
568 paths, revnum, author, date, message = entry
569 if stop is None and paths:
569 if stop is None and paths:
570 # We do not know the latest changed revision,
570 # We do not know the latest changed revision,
571 # keep the first one with changed paths.
571 # keep the first one with changed paths.
572 break
572 break
573 if revnum <= stop:
573 if revnum <= stop:
574 break
574 break
575
575
576 for p in paths:
576 for p in paths:
577 if (not path.startswith(p) or
577 if (not path.startswith(p) or
578 not paths[p].copyfrom_path):
578 not paths[p].copyfrom_path):
579 continue
579 continue
580 newpath = paths[p].copyfrom_path + path[len(p):]
580 newpath = paths[p].copyfrom_path + path[len(p):]
581 self.ui.debug("branch renamed from %s to %s at %d\n" %
581 self.ui.debug("branch renamed from %s to %s at %d\n" %
582 (path, newpath, revnum))
582 (path, newpath, revnum))
583 path = newpath
583 path = newpath
584 break
584 break
585 if not paths:
585 if not paths:
586 revnum = None
586 revnum = None
587 return revnum, path
587 return revnum, path
588 finally:
588 finally:
589 stream.close()
589 stream.close()
590
590
591 if not path.startswith(self.rootmodule):
591 if not path.startswith(self.rootmodule):
592 # Requests on foreign branches may be forbidden at server level
592 # Requests on foreign branches may be forbidden at server level
593 self.ui.debug('ignoring foreign branch %r\n' % path)
593 self.ui.debug('ignoring foreign branch %r\n' % path)
594 return None
594 return None
595
595
596 if stop is None:
596 if stop is None:
597 stop = svn.ra.get_latest_revnum(self.ra)
597 stop = svn.ra.get_latest_revnum(self.ra)
598 try:
598 try:
599 prevmodule = self.reparent('')
599 prevmodule = self.reparent('')
600 dirent = svn.ra.stat(self.ra, path.strip('/'), stop)
600 dirent = svn.ra.stat(self.ra, path.strip('/'), stop)
601 self.reparent(prevmodule)
601 self.reparent(prevmodule)
602 except SubversionException:
602 except SubversionException:
603 dirent = None
603 dirent = None
604 if not dirent:
604 if not dirent:
605 raise SvnPathNotFound(_('%s not found up to revision %d')
605 raise SvnPathNotFound(_('%s not found up to revision %d')
606 % (path, stop))
606 % (path, stop))
607
607
608 # stat() gives us the previous revision on this line of
608 # stat() gives us the previous revision on this line of
609 # development, but it might be in *another module*. Fetch the
609 # development, but it might be in *another module*. Fetch the
610 # log and detect renames down to the latest revision.
610 # log and detect renames down to the latest revision.
611 revnum, realpath = findchanges(path, stop, dirent.created_rev)
611 revnum, realpath = findchanges(path, stop, dirent.created_rev)
612 if revnum is None:
612 if revnum is None:
613 # Tools like svnsync can create empty revision, when
613 # Tools like svnsync can create empty revision, when
614 # synchronizing only a subtree for instance. These empty
614 # synchronizing only a subtree for instance. These empty
615 # revisions created_rev still have their original values
615 # revisions created_rev still have their original values
616 # despite all changes having disappeared and can be
616 # despite all changes having disappeared and can be
617 # returned by ra.stat(), at least when stating the root
617 # returned by ra.stat(), at least when stating the root
618 # module. In that case, do not trust created_rev and scan
618 # module. In that case, do not trust created_rev and scan
619 # the whole history.
619 # the whole history.
620 revnum, realpath = findchanges(path, stop)
620 revnum, realpath = findchanges(path, stop)
621 if revnum is None:
621 if revnum is None:
622 self.ui.debug('ignoring empty branch %r\n' % realpath)
622 self.ui.debug('ignoring empty branch %r\n' % realpath)
623 return None
623 return None
624
624
625 if not realpath.startswith(self.rootmodule):
625 if not realpath.startswith(self.rootmodule):
626 self.ui.debug('ignoring foreign branch %r\n' % realpath)
626 self.ui.debug('ignoring foreign branch %r\n' % realpath)
627 return None
627 return None
628 return self.revid(revnum, realpath)
628 return self.revid(revnum, realpath)
629
629
630 def reparent(self, module):
630 def reparent(self, module):
631 """Reparent the svn transport and return the previous parent."""
631 """Reparent the svn transport and return the previous parent."""
632 if self.prevmodule == module:
632 if self.prevmodule == module:
633 return module
633 return module
634 svnurl = self.baseurl + quote(module)
634 svnurl = self.baseurl + quote(module)
635 prevmodule = self.prevmodule
635 prevmodule = self.prevmodule
636 if prevmodule is None:
636 if prevmodule is None:
637 prevmodule = ''
637 prevmodule = ''
638 self.ui.debug("reparent to %s\n" % svnurl)
638 self.ui.debug("reparent to %s\n" % svnurl)
639 svn.ra.reparent(self.ra, svnurl)
639 svn.ra.reparent(self.ra, svnurl)
640 self.prevmodule = module
640 self.prevmodule = module
641 return prevmodule
641 return prevmodule
642
642
643 def expandpaths(self, rev, paths, parents):
643 def expandpaths(self, rev, paths, parents):
644 changed, removed = set(), set()
644 changed, removed = set(), set()
645 copies = {}
645 copies = {}
646
646
647 new_module, revnum = revsplit(rev)[1:]
647 new_module, revnum = revsplit(rev)[1:]
648 if new_module != self.module:
648 if new_module != self.module:
649 self.module = new_module
649 self.module = new_module
650 self.reparent(self.module)
650 self.reparent(self.module)
651
651
652 for i, (path, ent) in enumerate(paths):
652 for i, (path, ent) in enumerate(paths):
653 self.ui.progress(_('scanning paths'), i, item=path,
653 self.ui.progress(_('scanning paths'), i, item=path,
654 total=len(paths))
654 total=len(paths))
655 entrypath = self.getrelpath(path)
655 entrypath = self.getrelpath(path)
656
656
657 kind = self._checkpath(entrypath, revnum)
657 kind = self._checkpath(entrypath, revnum)
658 if kind == svn.core.svn_node_file:
658 if kind == svn.core.svn_node_file:
659 changed.add(self.recode(entrypath))
659 changed.add(self.recode(entrypath))
660 if not ent.copyfrom_path or not parents:
660 if not ent.copyfrom_path or not parents:
661 continue
661 continue
662 # Copy sources not in parent revisions cannot be
662 # Copy sources not in parent revisions cannot be
663 # represented, ignore their origin for now
663 # represented, ignore their origin for now
664 pmodule, prevnum = revsplit(parents[0])[1:]
664 pmodule, prevnum = revsplit(parents[0])[1:]
665 if ent.copyfrom_rev < prevnum:
665 if ent.copyfrom_rev < prevnum:
666 continue
666 continue
667 copyfrom_path = self.getrelpath(ent.copyfrom_path, pmodule)
667 copyfrom_path = self.getrelpath(ent.copyfrom_path, pmodule)
668 if not copyfrom_path:
668 if not copyfrom_path:
669 continue
669 continue
670 self.ui.debug("copied to %s from %s@%s\n" %
670 self.ui.debug("copied to %s from %s@%s\n" %
671 (entrypath, copyfrom_path, ent.copyfrom_rev))
671 (entrypath, copyfrom_path, ent.copyfrom_rev))
672 copies[self.recode(entrypath)] = self.recode(copyfrom_path)
672 copies[self.recode(entrypath)] = self.recode(copyfrom_path)
673 elif kind == 0: # gone, but had better be a deleted *file*
673 elif kind == 0: # gone, but had better be a deleted *file*
674 self.ui.debug("gone from %s\n" % ent.copyfrom_rev)
674 self.ui.debug("gone from %s\n" % ent.copyfrom_rev)
675 pmodule, prevnum = revsplit(parents[0])[1:]
675 pmodule, prevnum = revsplit(parents[0])[1:]
676 parentpath = pmodule + "/" + entrypath
676 parentpath = pmodule + "/" + entrypath
677 fromkind = self._checkpath(entrypath, prevnum, pmodule)
677 fromkind = self._checkpath(entrypath, prevnum, pmodule)
678
678
679 if fromkind == svn.core.svn_node_file:
679 if fromkind == svn.core.svn_node_file:
680 removed.add(self.recode(entrypath))
680 removed.add(self.recode(entrypath))
681 elif fromkind == svn.core.svn_node_dir:
681 elif fromkind == svn.core.svn_node_dir:
682 oroot = parentpath.strip('/')
682 oroot = parentpath.strip('/')
683 nroot = path.strip('/')
683 nroot = path.strip('/')
684 children = self._iterfiles(oroot, prevnum)
684 children = self._iterfiles(oroot, prevnum)
685 for childpath in children:
685 for childpath in children:
686 childpath = childpath.replace(oroot, nroot)
686 childpath = childpath.replace(oroot, nroot)
687 childpath = self.getrelpath("/" + childpath, pmodule)
687 childpath = self.getrelpath("/" + childpath, pmodule)
688 if childpath:
688 if childpath:
689 removed.add(self.recode(childpath))
689 removed.add(self.recode(childpath))
690 else:
690 else:
691 self.ui.debug('unknown path in revision %d: %s\n' % \
691 self.ui.debug('unknown path in revision %d: %s\n' % \
692 (revnum, path))
692 (revnum, path))
693 elif kind == svn.core.svn_node_dir:
693 elif kind == svn.core.svn_node_dir:
694 if ent.action == 'M':
694 if ent.action == 'M':
695 # If the directory just had a prop change,
695 # If the directory just had a prop change,
696 # then we shouldn't need to look for its children.
696 # then we shouldn't need to look for its children.
697 continue
697 continue
698 if ent.action == 'R' and parents:
698 if ent.action == 'R' and parents:
699 # If a directory is replacing a file, mark the previous
699 # If a directory is replacing a file, mark the previous
700 # file as deleted
700 # file as deleted
701 pmodule, prevnum = revsplit(parents[0])[1:]
701 pmodule, prevnum = revsplit(parents[0])[1:]
702 pkind = self._checkpath(entrypath, prevnum, pmodule)
702 pkind = self._checkpath(entrypath, prevnum, pmodule)
703 if pkind == svn.core.svn_node_file:
703 if pkind == svn.core.svn_node_file:
704 removed.add(self.recode(entrypath))
704 removed.add(self.recode(entrypath))
705 elif pkind == svn.core.svn_node_dir:
705 elif pkind == svn.core.svn_node_dir:
706 # We do not know what files were kept or removed,
706 # We do not know what files were kept or removed,
707 # mark them all as changed.
707 # mark them all as changed.
708 for childpath in self._iterfiles(pmodule, prevnum):
708 for childpath in self._iterfiles(pmodule, prevnum):
709 childpath = self.getrelpath("/" + childpath)
709 childpath = self.getrelpath("/" + childpath)
710 if childpath:
710 if childpath:
711 changed.add(self.recode(childpath))
711 changed.add(self.recode(childpath))
712
712
713 for childpath in self._iterfiles(path, revnum):
713 for childpath in self._iterfiles(path, revnum):
714 childpath = self.getrelpath("/" + childpath)
714 childpath = self.getrelpath("/" + childpath)
715 if childpath:
715 if childpath:
716 changed.add(self.recode(childpath))
716 changed.add(self.recode(childpath))
717
717
718 # Handle directory copies
718 # Handle directory copies
719 if not ent.copyfrom_path or not parents:
719 if not ent.copyfrom_path or not parents:
720 continue
720 continue
721 # Copy sources not in parent revisions cannot be
721 # Copy sources not in parent revisions cannot be
722 # represented, ignore their origin for now
722 # represented, ignore their origin for now
723 pmodule, prevnum = revsplit(parents[0])[1:]
723 pmodule, prevnum = revsplit(parents[0])[1:]
724 if ent.copyfrom_rev < prevnum:
724 if ent.copyfrom_rev < prevnum:
725 continue
725 continue
726 copyfrompath = self.getrelpath(ent.copyfrom_path, pmodule)
726 copyfrompath = self.getrelpath(ent.copyfrom_path, pmodule)
727 if not copyfrompath:
727 if not copyfrompath:
728 continue
728 continue
729 self.ui.debug("mark %s came from %s:%d\n"
729 self.ui.debug("mark %s came from %s:%d\n"
730 % (path, copyfrompath, ent.copyfrom_rev))
730 % (path, copyfrompath, ent.copyfrom_rev))
731 children = self._iterfiles(ent.copyfrom_path, ent.copyfrom_rev)
731 children = self._iterfiles(ent.copyfrom_path, ent.copyfrom_rev)
732 for childpath in children:
732 for childpath in children:
733 childpath = self.getrelpath("/" + childpath, pmodule)
733 childpath = self.getrelpath("/" + childpath, pmodule)
734 if not childpath:
734 if not childpath:
735 continue
735 continue
736 copytopath = path + childpath[len(copyfrompath):]
736 copytopath = path + childpath[len(copyfrompath):]
737 copytopath = self.getrelpath(copytopath)
737 copytopath = self.getrelpath(copytopath)
738 copies[self.recode(copytopath)] = self.recode(childpath)
738 copies[self.recode(copytopath)] = self.recode(childpath)
739
739
740 self.ui.progress(_('scanning paths'), None)
740 self.ui.progress(_('scanning paths'), None)
741 changed.update(removed)
741 changed.update(removed)
742 return (list(changed), removed, copies)
742 return (list(changed), removed, copies)
743
743
744 def _fetch_revisions(self, from_revnum, to_revnum):
744 def _fetch_revisions(self, from_revnum, to_revnum):
745 if from_revnum < to_revnum:
745 if from_revnum < to_revnum:
746 from_revnum, to_revnum = to_revnum, from_revnum
746 from_revnum, to_revnum = to_revnum, from_revnum
747
747
748 self.child_cset = None
748 self.child_cset = None
749
749
750 def parselogentry(orig_paths, revnum, author, date, message):
750 def parselogentry(orig_paths, revnum, author, date, message):
751 """Return the parsed commit object or None, and True if
751 """Return the parsed commit object or None, and True if
752 the revision is a branch root.
752 the revision is a branch root.
753 """
753 """
754 self.ui.debug("parsing revision %d (%d changes)\n" %
754 self.ui.debug("parsing revision %d (%d changes)\n" %
755 (revnum, len(orig_paths)))
755 (revnum, len(orig_paths)))
756
756
757 branched = False
757 branched = False
758 rev = self.revid(revnum)
758 rev = self.revid(revnum)
759 # branch log might return entries for a parent we already have
759 # branch log might return entries for a parent we already have
760
760
761 if rev in self.commits or revnum < to_revnum:
761 if rev in self.commits or revnum < to_revnum:
762 return None, branched
762 return None, branched
763
763
764 parents = []
764 parents = []
765 # check whether this revision is the start of a branch or part
765 # check whether this revision is the start of a branch or part
766 # of a branch renaming
766 # of a branch renaming
767 orig_paths = sorted(orig_paths.iteritems())
767 orig_paths = sorted(orig_paths.iteritems())
768 root_paths = [(p, e) for p, e in orig_paths
768 root_paths = [(p, e) for p, e in orig_paths
769 if self.module.startswith(p)]
769 if self.module.startswith(p)]
770 if root_paths:
770 if root_paths:
771 path, ent = root_paths[-1]
771 path, ent = root_paths[-1]
772 if ent.copyfrom_path:
772 if ent.copyfrom_path:
773 branched = True
773 branched = True
774 newpath = ent.copyfrom_path + self.module[len(path):]
774 newpath = ent.copyfrom_path + self.module[len(path):]
775 # ent.copyfrom_rev may not be the actual last revision
775 # ent.copyfrom_rev may not be the actual last revision
776 previd = self.latest(newpath, ent.copyfrom_rev)
776 previd = self.latest(newpath, ent.copyfrom_rev)
777 if previd is not None:
777 if previd is not None:
778 prevmodule, prevnum = revsplit(previd)[1:]
778 prevmodule, prevnum = revsplit(previd)[1:]
779 if prevnum >= self.startrev:
779 if prevnum >= self.startrev:
780 parents = [previd]
780 parents = [previd]
781 self.ui.note(
781 self.ui.note(
782 _('found parent of branch %s at %d: %s\n') %
782 _('found parent of branch %s at %d: %s\n') %
783 (self.module, prevnum, prevmodule))
783 (self.module, prevnum, prevmodule))
784 else:
784 else:
785 self.ui.debug("no copyfrom path, don't know what to do.\n")
785 self.ui.debug("no copyfrom path, don't know what to do.\n")
786
786
787 paths = []
787 paths = []
788 # filter out unrelated paths
788 # filter out unrelated paths
789 for path, ent in orig_paths:
789 for path, ent in orig_paths:
790 if self.getrelpath(path) is None:
790 if self.getrelpath(path) is None:
791 continue
791 continue
792 paths.append((path, ent))
792 paths.append((path, ent))
793
793
794 # Example SVN datetime. Includes microseconds.
794 # Example SVN datetime. Includes microseconds.
795 # ISO-8601 conformant
795 # ISO-8601 conformant
796 # '2007-01-04T17:35:00.902377Z'
796 # '2007-01-04T17:35:00.902377Z'
797 date = util.parsedate(date[:19] + " UTC", ["%Y-%m-%dT%H:%M:%S"])
797 date = util.parsedate(date[:19] + " UTC", ["%Y-%m-%dT%H:%M:%S"])
798
798
799 log = message and self.recode(message) or ''
799 log = message and self.recode(message) or ''
800 author = author and self.recode(author) or ''
800 author = author and self.recode(author) or ''
801 try:
801 try:
802 branch = self.module.split("/")[-1]
802 branch = self.module.split("/")[-1]
803 if branch == self.trunkname:
803 if branch == self.trunkname:
804 branch = None
804 branch = None
805 except IndexError:
805 except IndexError:
806 branch = None
806 branch = None
807
807
808 cset = commit(author=author,
808 cset = commit(author=author,
809 date=util.datestr(date, '%Y-%m-%d %H:%M:%S %1%2'),
809 date=util.datestr(date, '%Y-%m-%d %H:%M:%S %1%2'),
810 desc=log,
810 desc=log,
811 parents=parents,
811 parents=parents,
812 branch=branch,
812 branch=branch,
813 rev=rev)
813 rev=rev)
814
814
815 self.commits[rev] = cset
815 self.commits[rev] = cset
816 # The parents list is *shared* among self.paths and the
816 # The parents list is *shared* among self.paths and the
817 # commit object. Both will be updated below.
817 # commit object. Both will be updated below.
818 self.paths[rev] = (paths, cset.parents)
818 self.paths[rev] = (paths, cset.parents)
819 if self.child_cset and not self.child_cset.parents:
819 if self.child_cset and not self.child_cset.parents:
820 self.child_cset.parents[:] = [rev]
820 self.child_cset.parents[:] = [rev]
821 self.child_cset = cset
821 self.child_cset = cset
822 return cset, branched
822 return cset, branched
823
823
824 self.ui.note(_('fetching revision log for "%s" from %d to %d\n') %
824 self.ui.note(_('fetching revision log for "%s" from %d to %d\n') %
825 (self.module, from_revnum, to_revnum))
825 (self.module, from_revnum, to_revnum))
826
826
827 try:
827 try:
828 firstcset = None
828 firstcset = None
829 lastonbranch = False
829 lastonbranch = False
830 stream = self._getlog([self.module], from_revnum, to_revnum)
830 stream = self._getlog([self.module], from_revnum, to_revnum)
831 try:
831 try:
832 for entry in stream:
832 for entry in stream:
833 paths, revnum, author, date, message = entry
833 paths, revnum, author, date, message = entry
834 if revnum < self.startrev:
834 if revnum < self.startrev:
835 lastonbranch = True
835 lastonbranch = True
836 break
836 break
837 if not paths:
837 if not paths:
838 self.ui.debug('revision %d has no entries\n' % revnum)
838 self.ui.debug('revision %d has no entries\n' % revnum)
839 # If we ever leave the loop on an empty
839 # If we ever leave the loop on an empty
840 # revision, do not try to get a parent branch
840 # revision, do not try to get a parent branch
841 lastonbranch = lastonbranch or revnum == 0
841 lastonbranch = lastonbranch or revnum == 0
842 continue
842 continue
843 cset, lastonbranch = parselogentry(paths, revnum, author,
843 cset, lastonbranch = parselogentry(paths, revnum, author,
844 date, message)
844 date, message)
845 if cset:
845 if cset:
846 firstcset = cset
846 firstcset = cset
847 if lastonbranch:
847 if lastonbranch:
848 break
848 break
849 finally:
849 finally:
850 stream.close()
850 stream.close()
851
851
852 if not lastonbranch and firstcset and not firstcset.parents:
852 if not lastonbranch and firstcset and not firstcset.parents:
853 # The first revision of the sequence (the last fetched one)
853 # The first revision of the sequence (the last fetched one)
854 # has invalid parents if not a branch root. Find the parent
854 # has invalid parents if not a branch root. Find the parent
855 # revision now, if any.
855 # revision now, if any.
856 try:
856 try:
857 firstrevnum = self.revnum(firstcset.rev)
857 firstrevnum = self.revnum(firstcset.rev)
858 if firstrevnum > 1:
858 if firstrevnum > 1:
859 latest = self.latest(self.module, firstrevnum - 1)
859 latest = self.latest(self.module, firstrevnum - 1)
860 if latest:
860 if latest:
861 firstcset.parents.append(latest)
861 firstcset.parents.append(latest)
862 except SvnPathNotFound:
862 except SvnPathNotFound:
863 pass
863 pass
864 except SubversionException, (inst, num):
864 except SubversionException, (inst, num):
865 if num == svn.core.SVN_ERR_FS_NO_SUCH_REVISION:
865 if num == svn.core.SVN_ERR_FS_NO_SUCH_REVISION:
866 raise util.Abort(_('svn: branch has no revision %s')
866 raise util.Abort(_('svn: branch has no revision %s')
867 % to_revnum)
867 % to_revnum)
868 raise
868 raise
869
869
870 def getfile(self, file, rev):
870 def getfile(self, file, rev):
871 # TODO: ra.get_file transmits the whole file instead of diffs.
871 # TODO: ra.get_file transmits the whole file instead of diffs.
872 if file in self.removed:
872 if file in self.removed:
873 raise IOError()
873 raise IOError
874 mode = ''
874 mode = ''
875 try:
875 try:
876 new_module, revnum = revsplit(rev)[1:]
876 new_module, revnum = revsplit(rev)[1:]
877 if self.module != new_module:
877 if self.module != new_module:
878 self.module = new_module
878 self.module = new_module
879 self.reparent(self.module)
879 self.reparent(self.module)
880 io = StringIO()
880 io = StringIO()
881 info = svn.ra.get_file(self.ra, file, revnum, io)
881 info = svn.ra.get_file(self.ra, file, revnum, io)
882 data = io.getvalue()
882 data = io.getvalue()
883 # ra.get_files() seems to keep a reference on the input buffer
883 # ra.get_files() seems to keep a reference on the input buffer
884 # preventing collection. Release it explicitely.
884 # preventing collection. Release it explicitely.
885 io.close()
885 io.close()
886 if isinstance(info, list):
886 if isinstance(info, list):
887 info = info[-1]
887 info = info[-1]
888 mode = ("svn:executable" in info) and 'x' or ''
888 mode = ("svn:executable" in info) and 'x' or ''
889 mode = ("svn:special" in info) and 'l' or mode
889 mode = ("svn:special" in info) and 'l' or mode
890 except SubversionException, e:
890 except SubversionException, e:
891 notfound = (svn.core.SVN_ERR_FS_NOT_FOUND,
891 notfound = (svn.core.SVN_ERR_FS_NOT_FOUND,
892 svn.core.SVN_ERR_RA_DAV_PATH_NOT_FOUND)
892 svn.core.SVN_ERR_RA_DAV_PATH_NOT_FOUND)
893 if e.apr_err in notfound: # File not found
893 if e.apr_err in notfound: # File not found
894 raise IOError()
894 raise IOError
895 raise
895 raise
896 if mode == 'l':
896 if mode == 'l':
897 link_prefix = "link "
897 link_prefix = "link "
898 if data.startswith(link_prefix):
898 if data.startswith(link_prefix):
899 data = data[len(link_prefix):]
899 data = data[len(link_prefix):]
900 return data, mode
900 return data, mode
901
901
902 def _iterfiles(self, path, revnum):
902 def _iterfiles(self, path, revnum):
903 """Enumerate all files in path at revnum, recursively."""
903 """Enumerate all files in path at revnum, recursively."""
904 path = path.strip('/')
904 path = path.strip('/')
905 pool = Pool()
905 pool = Pool()
906 rpath = '/'.join([self.baseurl, quote(path)]).strip('/')
906 rpath = '/'.join([self.baseurl, quote(path)]).strip('/')
907 entries = svn.client.ls(rpath, optrev(revnum), True, self.ctx, pool)
907 entries = svn.client.ls(rpath, optrev(revnum), True, self.ctx, pool)
908 if path:
908 if path:
909 path += '/'
909 path += '/'
910 return ((path + p) for p, e in entries.iteritems()
910 return ((path + p) for p, e in entries.iteritems()
911 if e.kind == svn.core.svn_node_file)
911 if e.kind == svn.core.svn_node_file)
912
912
913 def getrelpath(self, path, module=None):
913 def getrelpath(self, path, module=None):
914 if module is None:
914 if module is None:
915 module = self.module
915 module = self.module
916 # Given the repository url of this wc, say
916 # Given the repository url of this wc, say
917 # "http://server/plone/CMFPlone/branches/Plone-2_0-branch"
917 # "http://server/plone/CMFPlone/branches/Plone-2_0-branch"
918 # extract the "entry" portion (a relative path) from what
918 # extract the "entry" portion (a relative path) from what
919 # svn log --xml says, ie
919 # svn log --xml says, ie
920 # "/CMFPlone/branches/Plone-2_0-branch/tests/PloneTestCase.py"
920 # "/CMFPlone/branches/Plone-2_0-branch/tests/PloneTestCase.py"
921 # that is to say "tests/PloneTestCase.py"
921 # that is to say "tests/PloneTestCase.py"
922 if path.startswith(module):
922 if path.startswith(module):
923 relative = path.rstrip('/')[len(module):]
923 relative = path.rstrip('/')[len(module):]
924 if relative.startswith('/'):
924 if relative.startswith('/'):
925 return relative[1:]
925 return relative[1:]
926 elif relative == '':
926 elif relative == '':
927 return relative
927 return relative
928
928
929 # The path is outside our tracked tree...
929 # The path is outside our tracked tree...
930 self.ui.debug('%r is not under %r, ignoring\n' % (path, module))
930 self.ui.debug('%r is not under %r, ignoring\n' % (path, module))
931 return None
931 return None
932
932
933 def _checkpath(self, path, revnum, module=None):
933 def _checkpath(self, path, revnum, module=None):
934 if module is not None:
934 if module is not None:
935 prevmodule = self.reparent('')
935 prevmodule = self.reparent('')
936 path = module + '/' + path
936 path = module + '/' + path
937 try:
937 try:
938 # ra.check_path does not like leading slashes very much, it leads
938 # ra.check_path does not like leading slashes very much, it leads
939 # to PROPFIND subversion errors
939 # to PROPFIND subversion errors
940 return svn.ra.check_path(self.ra, path.strip('/'), revnum)
940 return svn.ra.check_path(self.ra, path.strip('/'), revnum)
941 finally:
941 finally:
942 if module is not None:
942 if module is not None:
943 self.reparent(prevmodule)
943 self.reparent(prevmodule)
944
944
945 def _getlog(self, paths, start, end, limit=0, discover_changed_paths=True,
945 def _getlog(self, paths, start, end, limit=0, discover_changed_paths=True,
946 strict_node_history=False):
946 strict_node_history=False):
947 # Normalize path names, svn >= 1.5 only wants paths relative to
947 # Normalize path names, svn >= 1.5 only wants paths relative to
948 # supplied URL
948 # supplied URL
949 relpaths = []
949 relpaths = []
950 for p in paths:
950 for p in paths:
951 if not p.startswith('/'):
951 if not p.startswith('/'):
952 p = self.module + '/' + p
952 p = self.module + '/' + p
953 relpaths.append(p.strip('/'))
953 relpaths.append(p.strip('/'))
954 args = [self.baseurl, relpaths, start, end, limit,
954 args = [self.baseurl, relpaths, start, end, limit,
955 discover_changed_paths, strict_node_history]
955 discover_changed_paths, strict_node_history]
956 arg = encodeargs(args)
956 arg = encodeargs(args)
957 hgexe = util.hgexecutable()
957 hgexe = util.hgexecutable()
958 cmd = '%s debugsvnlog' % util.shellquote(hgexe)
958 cmd = '%s debugsvnlog' % util.shellquote(hgexe)
959 stdin, stdout = util.popen2(util.quotecommand(cmd))
959 stdin, stdout = util.popen2(util.quotecommand(cmd))
960 stdin.write(arg)
960 stdin.write(arg)
961 try:
961 try:
962 stdin.close()
962 stdin.close()
963 except IOError:
963 except IOError:
964 raise util.Abort(_('Mercurial failed to run itself, check'
964 raise util.Abort(_('Mercurial failed to run itself, check'
965 ' hg executable is in PATH'))
965 ' hg executable is in PATH'))
966 return logstream(stdout)
966 return logstream(stdout)
967
967
968 pre_revprop_change = '''#!/bin/sh
968 pre_revprop_change = '''#!/bin/sh
969
969
970 REPOS="$1"
970 REPOS="$1"
971 REV="$2"
971 REV="$2"
972 USER="$3"
972 USER="$3"
973 PROPNAME="$4"
973 PROPNAME="$4"
974 ACTION="$5"
974 ACTION="$5"
975
975
976 if [ "$ACTION" = "M" -a "$PROPNAME" = "svn:log" ]; then exit 0; fi
976 if [ "$ACTION" = "M" -a "$PROPNAME" = "svn:log" ]; then exit 0; fi
977 if [ "$ACTION" = "A" -a "$PROPNAME" = "hg:convert-branch" ]; then exit 0; fi
977 if [ "$ACTION" = "A" -a "$PROPNAME" = "hg:convert-branch" ]; then exit 0; fi
978 if [ "$ACTION" = "A" -a "$PROPNAME" = "hg:convert-rev" ]; then exit 0; fi
978 if [ "$ACTION" = "A" -a "$PROPNAME" = "hg:convert-rev" ]; then exit 0; fi
979
979
980 echo "Changing prohibited revision property" >&2
980 echo "Changing prohibited revision property" >&2
981 exit 1
981 exit 1
982 '''
982 '''
983
983
984 class svn_sink(converter_sink, commandline):
984 class svn_sink(converter_sink, commandline):
985 commit_re = re.compile(r'Committed revision (\d+).', re.M)
985 commit_re = re.compile(r'Committed revision (\d+).', re.M)
986 uuid_re = re.compile(r'Repository UUID:\s*(\S+)', re.M)
986 uuid_re = re.compile(r'Repository UUID:\s*(\S+)', re.M)
987
987
988 def prerun(self):
988 def prerun(self):
989 if self.wc:
989 if self.wc:
990 os.chdir(self.wc)
990 os.chdir(self.wc)
991
991
992 def postrun(self):
992 def postrun(self):
993 if self.wc:
993 if self.wc:
994 os.chdir(self.cwd)
994 os.chdir(self.cwd)
995
995
996 def join(self, name):
996 def join(self, name):
997 return os.path.join(self.wc, '.svn', name)
997 return os.path.join(self.wc, '.svn', name)
998
998
999 def revmapfile(self):
999 def revmapfile(self):
1000 return self.join('hg-shamap')
1000 return self.join('hg-shamap')
1001
1001
1002 def authorfile(self):
1002 def authorfile(self):
1003 return self.join('hg-authormap')
1003 return self.join('hg-authormap')
1004
1004
1005 def __init__(self, ui, path):
1005 def __init__(self, ui, path):
1006
1006
1007 converter_sink.__init__(self, ui, path)
1007 converter_sink.__init__(self, ui, path)
1008 commandline.__init__(self, ui, 'svn')
1008 commandline.__init__(self, ui, 'svn')
1009 self.delete = []
1009 self.delete = []
1010 self.setexec = []
1010 self.setexec = []
1011 self.delexec = []
1011 self.delexec = []
1012 self.copies = []
1012 self.copies = []
1013 self.wc = None
1013 self.wc = None
1014 self.cwd = os.getcwd()
1014 self.cwd = os.getcwd()
1015
1015
1016 path = os.path.realpath(path)
1016 path = os.path.realpath(path)
1017
1017
1018 created = False
1018 created = False
1019 if os.path.isfile(os.path.join(path, '.svn', 'entries')):
1019 if os.path.isfile(os.path.join(path, '.svn', 'entries')):
1020 self.wc = path
1020 self.wc = path
1021 self.run0('update')
1021 self.run0('update')
1022 else:
1022 else:
1023 wcpath = os.path.join(os.getcwd(), os.path.basename(path) + '-wc')
1023 wcpath = os.path.join(os.getcwd(), os.path.basename(path) + '-wc')
1024
1024
1025 if os.path.isdir(os.path.dirname(path)):
1025 if os.path.isdir(os.path.dirname(path)):
1026 if not os.path.exists(os.path.join(path, 'db', 'fs-type')):
1026 if not os.path.exists(os.path.join(path, 'db', 'fs-type')):
1027 ui.status(_('initializing svn repository %r\n') %
1027 ui.status(_('initializing svn repository %r\n') %
1028 os.path.basename(path))
1028 os.path.basename(path))
1029 commandline(ui, 'svnadmin').run0('create', path)
1029 commandline(ui, 'svnadmin').run0('create', path)
1030 created = path
1030 created = path
1031 path = util.normpath(path)
1031 path = util.normpath(path)
1032 if not path.startswith('/'):
1032 if not path.startswith('/'):
1033 path = '/' + path
1033 path = '/' + path
1034 path = 'file://' + path
1034 path = 'file://' + path
1035
1035
1036 ui.status(_('initializing svn working copy %r\n')
1036 ui.status(_('initializing svn working copy %r\n')
1037 % os.path.basename(wcpath))
1037 % os.path.basename(wcpath))
1038 self.run0('checkout', path, wcpath)
1038 self.run0('checkout', path, wcpath)
1039
1039
1040 self.wc = wcpath
1040 self.wc = wcpath
1041 self.opener = scmutil.opener(self.wc)
1041 self.opener = scmutil.opener(self.wc)
1042 self.wopener = scmutil.opener(self.wc)
1042 self.wopener = scmutil.opener(self.wc)
1043 self.childmap = mapfile(ui, self.join('hg-childmap'))
1043 self.childmap = mapfile(ui, self.join('hg-childmap'))
1044 self.is_exec = util.checkexec(self.wc) and util.isexec or None
1044 self.is_exec = util.checkexec(self.wc) and util.isexec or None
1045
1045
1046 if created:
1046 if created:
1047 hook = os.path.join(created, 'hooks', 'pre-revprop-change')
1047 hook = os.path.join(created, 'hooks', 'pre-revprop-change')
1048 fp = open(hook, 'w')
1048 fp = open(hook, 'w')
1049 fp.write(pre_revprop_change)
1049 fp.write(pre_revprop_change)
1050 fp.close()
1050 fp.close()
1051 util.setflags(hook, False, True)
1051 util.setflags(hook, False, True)
1052
1052
1053 output = self.run0('info')
1053 output = self.run0('info')
1054 self.uuid = self.uuid_re.search(output).group(1).strip()
1054 self.uuid = self.uuid_re.search(output).group(1).strip()
1055
1055
1056 def wjoin(self, *names):
1056 def wjoin(self, *names):
1057 return os.path.join(self.wc, *names)
1057 return os.path.join(self.wc, *names)
1058
1058
1059 @propertycache
1059 @propertycache
1060 def manifest(self):
1060 def manifest(self):
1061 # As of svn 1.7, the "add" command fails when receiving
1061 # As of svn 1.7, the "add" command fails when receiving
1062 # already tracked entries, so we have to track and filter them
1062 # already tracked entries, so we have to track and filter them
1063 # ourselves.
1063 # ourselves.
1064 m = set()
1064 m = set()
1065 output = self.run0('ls', recursive=True, xml=True)
1065 output = self.run0('ls', recursive=True, xml=True)
1066 doc = xml.dom.minidom.parseString(output)
1066 doc = xml.dom.minidom.parseString(output)
1067 for e in doc.getElementsByTagName('entry'):
1067 for e in doc.getElementsByTagName('entry'):
1068 for n in e.childNodes:
1068 for n in e.childNodes:
1069 if n.nodeType != n.ELEMENT_NODE or n.tagName != 'name':
1069 if n.nodeType != n.ELEMENT_NODE or n.tagName != 'name':
1070 continue
1070 continue
1071 name = ''.join(c.data for c in n.childNodes
1071 name = ''.join(c.data for c in n.childNodes
1072 if c.nodeType == c.TEXT_NODE)
1072 if c.nodeType == c.TEXT_NODE)
1073 # Entries are compared with names coming from
1073 # Entries are compared with names coming from
1074 # mercurial, so bytes with undefined encoding. Our
1074 # mercurial, so bytes with undefined encoding. Our
1075 # best bet is to assume they are in local
1075 # best bet is to assume they are in local
1076 # encoding. They will be passed to command line calls
1076 # encoding. They will be passed to command line calls
1077 # later anyway, so they better be.
1077 # later anyway, so they better be.
1078 m.add(encoding.tolocal(name.encode('utf-8')))
1078 m.add(encoding.tolocal(name.encode('utf-8')))
1079 break
1079 break
1080 return m
1080 return m
1081
1081
1082 def putfile(self, filename, flags, data):
1082 def putfile(self, filename, flags, data):
1083 if 'l' in flags:
1083 if 'l' in flags:
1084 self.wopener.symlink(data, filename)
1084 self.wopener.symlink(data, filename)
1085 else:
1085 else:
1086 try:
1086 try:
1087 if os.path.islink(self.wjoin(filename)):
1087 if os.path.islink(self.wjoin(filename)):
1088 os.unlink(filename)
1088 os.unlink(filename)
1089 except OSError:
1089 except OSError:
1090 pass
1090 pass
1091 self.wopener.write(filename, data)
1091 self.wopener.write(filename, data)
1092
1092
1093 if self.is_exec:
1093 if self.is_exec:
1094 was_exec = self.is_exec(self.wjoin(filename))
1094 was_exec = self.is_exec(self.wjoin(filename))
1095 else:
1095 else:
1096 # On filesystems not supporting execute-bit, there is no way
1096 # On filesystems not supporting execute-bit, there is no way
1097 # to know if it is set but asking subversion. Setting it
1097 # to know if it is set but asking subversion. Setting it
1098 # systematically is just as expensive and much simpler.
1098 # systematically is just as expensive and much simpler.
1099 was_exec = 'x' not in flags
1099 was_exec = 'x' not in flags
1100
1100
1101 util.setflags(self.wjoin(filename), False, 'x' in flags)
1101 util.setflags(self.wjoin(filename), False, 'x' in flags)
1102 if was_exec:
1102 if was_exec:
1103 if 'x' not in flags:
1103 if 'x' not in flags:
1104 self.delexec.append(filename)
1104 self.delexec.append(filename)
1105 else:
1105 else:
1106 if 'x' in flags:
1106 if 'x' in flags:
1107 self.setexec.append(filename)
1107 self.setexec.append(filename)
1108
1108
1109 def _copyfile(self, source, dest):
1109 def _copyfile(self, source, dest):
1110 # SVN's copy command pukes if the destination file exists, but
1110 # SVN's copy command pukes if the destination file exists, but
1111 # our copyfile method expects to record a copy that has
1111 # our copyfile method expects to record a copy that has
1112 # already occurred. Cross the semantic gap.
1112 # already occurred. Cross the semantic gap.
1113 wdest = self.wjoin(dest)
1113 wdest = self.wjoin(dest)
1114 exists = os.path.lexists(wdest)
1114 exists = os.path.lexists(wdest)
1115 if exists:
1115 if exists:
1116 fd, tempname = tempfile.mkstemp(
1116 fd, tempname = tempfile.mkstemp(
1117 prefix='hg-copy-', dir=os.path.dirname(wdest))
1117 prefix='hg-copy-', dir=os.path.dirname(wdest))
1118 os.close(fd)
1118 os.close(fd)
1119 os.unlink(tempname)
1119 os.unlink(tempname)
1120 os.rename(wdest, tempname)
1120 os.rename(wdest, tempname)
1121 try:
1121 try:
1122 self.run0('copy', source, dest)
1122 self.run0('copy', source, dest)
1123 finally:
1123 finally:
1124 self.manifest.add(dest)
1124 self.manifest.add(dest)
1125 if exists:
1125 if exists:
1126 try:
1126 try:
1127 os.unlink(wdest)
1127 os.unlink(wdest)
1128 except OSError:
1128 except OSError:
1129 pass
1129 pass
1130 os.rename(tempname, wdest)
1130 os.rename(tempname, wdest)
1131
1131
1132 def dirs_of(self, files):
1132 def dirs_of(self, files):
1133 dirs = set()
1133 dirs = set()
1134 for f in files:
1134 for f in files:
1135 if os.path.isdir(self.wjoin(f)):
1135 if os.path.isdir(self.wjoin(f)):
1136 dirs.add(f)
1136 dirs.add(f)
1137 for i in strutil.rfindall(f, '/'):
1137 for i in strutil.rfindall(f, '/'):
1138 dirs.add(f[:i])
1138 dirs.add(f[:i])
1139 return dirs
1139 return dirs
1140
1140
1141 def add_dirs(self, files):
1141 def add_dirs(self, files):
1142 add_dirs = [d for d in sorted(self.dirs_of(files))
1142 add_dirs = [d for d in sorted(self.dirs_of(files))
1143 if d not in self.manifest]
1143 if d not in self.manifest]
1144 if add_dirs:
1144 if add_dirs:
1145 self.manifest.update(add_dirs)
1145 self.manifest.update(add_dirs)
1146 self.xargs(add_dirs, 'add', non_recursive=True, quiet=True)
1146 self.xargs(add_dirs, 'add', non_recursive=True, quiet=True)
1147 return add_dirs
1147 return add_dirs
1148
1148
1149 def add_files(self, files):
1149 def add_files(self, files):
1150 files = [f for f in files if f not in self.manifest]
1150 files = [f for f in files if f not in self.manifest]
1151 if files:
1151 if files:
1152 self.manifest.update(files)
1152 self.manifest.update(files)
1153 self.xargs(files, 'add', quiet=True)
1153 self.xargs(files, 'add', quiet=True)
1154 return files
1154 return files
1155
1155
1156 def tidy_dirs(self, names):
1156 def tidy_dirs(self, names):
1157 deleted = []
1157 deleted = []
1158 for d in sorted(self.dirs_of(names), reverse=True):
1158 for d in sorted(self.dirs_of(names), reverse=True):
1159 wd = self.wjoin(d)
1159 wd = self.wjoin(d)
1160 if os.listdir(wd) == '.svn':
1160 if os.listdir(wd) == '.svn':
1161 self.run0('delete', d)
1161 self.run0('delete', d)
1162 self.manifest.remove(d)
1162 self.manifest.remove(d)
1163 deleted.append(d)
1163 deleted.append(d)
1164 return deleted
1164 return deleted
1165
1165
1166 def addchild(self, parent, child):
1166 def addchild(self, parent, child):
1167 self.childmap[parent] = child
1167 self.childmap[parent] = child
1168
1168
1169 def revid(self, rev):
1169 def revid(self, rev):
1170 return u"svn:%s@%s" % (self.uuid, rev)
1170 return u"svn:%s@%s" % (self.uuid, rev)
1171
1171
1172 def putcommit(self, files, copies, parents, commit, source, revmap):
1172 def putcommit(self, files, copies, parents, commit, source, revmap):
1173 for parent in parents:
1173 for parent in parents:
1174 try:
1174 try:
1175 return self.revid(self.childmap[parent])
1175 return self.revid(self.childmap[parent])
1176 except KeyError:
1176 except KeyError:
1177 pass
1177 pass
1178
1178
1179 # Apply changes to working copy
1179 # Apply changes to working copy
1180 for f, v in files:
1180 for f, v in files:
1181 try:
1181 try:
1182 data, mode = source.getfile(f, v)
1182 data, mode = source.getfile(f, v)
1183 except IOError:
1183 except IOError:
1184 self.delete.append(f)
1184 self.delete.append(f)
1185 else:
1185 else:
1186 self.putfile(f, mode, data)
1186 self.putfile(f, mode, data)
1187 if f in copies:
1187 if f in copies:
1188 self.copies.append([copies[f], f])
1188 self.copies.append([copies[f], f])
1189 files = [f[0] for f in files]
1189 files = [f[0] for f in files]
1190
1190
1191 entries = set(self.delete)
1191 entries = set(self.delete)
1192 files = frozenset(files)
1192 files = frozenset(files)
1193 entries.update(self.add_dirs(files.difference(entries)))
1193 entries.update(self.add_dirs(files.difference(entries)))
1194 if self.copies:
1194 if self.copies:
1195 for s, d in self.copies:
1195 for s, d in self.copies:
1196 self._copyfile(s, d)
1196 self._copyfile(s, d)
1197 self.copies = []
1197 self.copies = []
1198 if self.delete:
1198 if self.delete:
1199 self.xargs(self.delete, 'delete')
1199 self.xargs(self.delete, 'delete')
1200 for f in self.delete:
1200 for f in self.delete:
1201 self.manifest.remove(f)
1201 self.manifest.remove(f)
1202 self.delete = []
1202 self.delete = []
1203 entries.update(self.add_files(files.difference(entries)))
1203 entries.update(self.add_files(files.difference(entries)))
1204 entries.update(self.tidy_dirs(entries))
1204 entries.update(self.tidy_dirs(entries))
1205 if self.delexec:
1205 if self.delexec:
1206 self.xargs(self.delexec, 'propdel', 'svn:executable')
1206 self.xargs(self.delexec, 'propdel', 'svn:executable')
1207 self.delexec = []
1207 self.delexec = []
1208 if self.setexec:
1208 if self.setexec:
1209 self.xargs(self.setexec, 'propset', 'svn:executable', '*')
1209 self.xargs(self.setexec, 'propset', 'svn:executable', '*')
1210 self.setexec = []
1210 self.setexec = []
1211
1211
1212 fd, messagefile = tempfile.mkstemp(prefix='hg-convert-')
1212 fd, messagefile = tempfile.mkstemp(prefix='hg-convert-')
1213 fp = os.fdopen(fd, 'w')
1213 fp = os.fdopen(fd, 'w')
1214 fp.write(commit.desc)
1214 fp.write(commit.desc)
1215 fp.close()
1215 fp.close()
1216 try:
1216 try:
1217 output = self.run0('commit',
1217 output = self.run0('commit',
1218 username=util.shortuser(commit.author),
1218 username=util.shortuser(commit.author),
1219 file=messagefile,
1219 file=messagefile,
1220 encoding='utf-8')
1220 encoding='utf-8')
1221 try:
1221 try:
1222 rev = self.commit_re.search(output).group(1)
1222 rev = self.commit_re.search(output).group(1)
1223 except AttributeError:
1223 except AttributeError:
1224 if not files:
1224 if not files:
1225 return parents[0]
1225 return parents[0]
1226 self.ui.warn(_('unexpected svn output:\n'))
1226 self.ui.warn(_('unexpected svn output:\n'))
1227 self.ui.warn(output)
1227 self.ui.warn(output)
1228 raise util.Abort(_('unable to cope with svn output'))
1228 raise util.Abort(_('unable to cope with svn output'))
1229 if commit.rev:
1229 if commit.rev:
1230 self.run('propset', 'hg:convert-rev', commit.rev,
1230 self.run('propset', 'hg:convert-rev', commit.rev,
1231 revprop=True, revision=rev)
1231 revprop=True, revision=rev)
1232 if commit.branch and commit.branch != 'default':
1232 if commit.branch and commit.branch != 'default':
1233 self.run('propset', 'hg:convert-branch', commit.branch,
1233 self.run('propset', 'hg:convert-branch', commit.branch,
1234 revprop=True, revision=rev)
1234 revprop=True, revision=rev)
1235 for parent in parents:
1235 for parent in parents:
1236 self.addchild(parent, rev)
1236 self.addchild(parent, rev)
1237 return self.revid(rev)
1237 return self.revid(rev)
1238 finally:
1238 finally:
1239 os.unlink(messagefile)
1239 os.unlink(messagefile)
1240
1240
1241 def puttags(self, tags):
1241 def puttags(self, tags):
1242 self.ui.warn(_('writing Subversion tags is not yet implemented\n'))
1242 self.ui.warn(_('writing Subversion tags is not yet implemented\n'))
1243 return None, None
1243 return None, None
1244
1244
1245 def hascommit(self, rev):
1245 def hascommit(self, rev):
1246 # This is not correct as one can convert to an existing subversion
1246 # This is not correct as one can convert to an existing subversion
1247 # repository and childmap would not list all revisions. Too bad.
1247 # repository and childmap would not list all revisions. Too bad.
1248 if rev in self.childmap:
1248 if rev in self.childmap:
1249 return True
1249 return True
1250 raise util.Abort(_('splice map revision %s not found in subversion '
1250 raise util.Abort(_('splice map revision %s not found in subversion '
1251 'child map (revision lookups are not implemented)')
1251 'child map (revision lookups are not implemented)')
1252 % rev)
1252 % rev)
@@ -1,517 +1,517 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''High-level command function for lfconvert, plus the cmdtable.'''
9 '''High-level command function for lfconvert, plus the cmdtable.'''
10
10
11 import os
11 import os
12 import shutil
12 import shutil
13
13
14 from mercurial import util, match as match_, hg, node, context, error, cmdutil
14 from mercurial import util, match as match_, hg, node, context, error, cmdutil
15 from mercurial.i18n import _
15 from mercurial.i18n import _
16
16
17 import lfutil
17 import lfutil
18 import basestore
18 import basestore
19
19
20 # -- Commands ----------------------------------------------------------
20 # -- Commands ----------------------------------------------------------
21
21
22 def lfconvert(ui, src, dest, *pats, **opts):
22 def lfconvert(ui, src, dest, *pats, **opts):
23 '''convert a normal repository to a largefiles repository
23 '''convert a normal repository to a largefiles repository
24
24
25 Convert repository SOURCE to a new repository DEST, identical to
25 Convert repository SOURCE to a new repository DEST, identical to
26 SOURCE except that certain files will be converted as largefiles:
26 SOURCE except that certain files will be converted as largefiles:
27 specifically, any file that matches any PATTERN *or* whose size is
27 specifically, any file that matches any PATTERN *or* whose size is
28 above the minimum size threshold is converted as a largefile. The
28 above the minimum size threshold is converted as a largefile. The
29 size used to determine whether or not to track a file as a
29 size used to determine whether or not to track a file as a
30 largefile is the size of the first version of the file. The
30 largefile is the size of the first version of the file. The
31 minimum size can be specified either with --size or in
31 minimum size can be specified either with --size or in
32 configuration as ``largefiles.size``.
32 configuration as ``largefiles.size``.
33
33
34 After running this command you will need to make sure that
34 After running this command you will need to make sure that
35 largefiles is enabled anywhere you intend to push the new
35 largefiles is enabled anywhere you intend to push the new
36 repository.
36 repository.
37
37
38 Use --to-normal to convert largefiles back to normal files; after
38 Use --to-normal to convert largefiles back to normal files; after
39 this, the DEST repository can be used without largefiles at all.'''
39 this, the DEST repository can be used without largefiles at all.'''
40
40
41 if opts['to_normal']:
41 if opts['to_normal']:
42 tolfile = False
42 tolfile = False
43 else:
43 else:
44 tolfile = True
44 tolfile = True
45 size = lfutil.getminsize(ui, True, opts.get('size'), default=None)
45 size = lfutil.getminsize(ui, True, opts.get('size'), default=None)
46
46
47 if not hg.islocal(src):
47 if not hg.islocal(src):
48 raise util.Abort(_('%s is not a local Mercurial repo') % src)
48 raise util.Abort(_('%s is not a local Mercurial repo') % src)
49 if not hg.islocal(dest):
49 if not hg.islocal(dest):
50 raise util.Abort(_('%s is not a local Mercurial repo') % dest)
50 raise util.Abort(_('%s is not a local Mercurial repo') % dest)
51
51
52 rsrc = hg.repository(ui, src)
52 rsrc = hg.repository(ui, src)
53 ui.status(_('initializing destination %s\n') % dest)
53 ui.status(_('initializing destination %s\n') % dest)
54 rdst = hg.repository(ui, dest, create=True)
54 rdst = hg.repository(ui, dest, create=True)
55
55
56 success = False
56 success = False
57 try:
57 try:
58 # Lock destination to prevent modification while it is converted to.
58 # Lock destination to prevent modification while it is converted to.
59 # Don't need to lock src because we are just reading from its history
59 # Don't need to lock src because we are just reading from its history
60 # which can't change.
60 # which can't change.
61 dstlock = rdst.lock()
61 dstlock = rdst.lock()
62
62
63 # Get a list of all changesets in the source. The easy way to do this
63 # Get a list of all changesets in the source. The easy way to do this
64 # is to simply walk the changelog, using changelog.nodesbewteen().
64 # is to simply walk the changelog, using changelog.nodesbewteen().
65 # Take a look at mercurial/revlog.py:639 for more details.
65 # Take a look at mercurial/revlog.py:639 for more details.
66 # Use a generator instead of a list to decrease memory usage
66 # Use a generator instead of a list to decrease memory usage
67 ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
67 ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
68 rsrc.heads())[0])
68 rsrc.heads())[0])
69 revmap = {node.nullid: node.nullid}
69 revmap = {node.nullid: node.nullid}
70 if tolfile:
70 if tolfile:
71 lfiles = set()
71 lfiles = set()
72 normalfiles = set()
72 normalfiles = set()
73 if not pats:
73 if not pats:
74 pats = ui.configlist(lfutil.longname, 'patterns', default=[])
74 pats = ui.configlist(lfutil.longname, 'patterns', default=[])
75 if pats:
75 if pats:
76 matcher = match_.match(rsrc.root, '', list(pats))
76 matcher = match_.match(rsrc.root, '', list(pats))
77 else:
77 else:
78 matcher = None
78 matcher = None
79
79
80 lfiletohash = {}
80 lfiletohash = {}
81 for ctx in ctxs:
81 for ctx in ctxs:
82 ui.progress(_('converting revisions'), ctx.rev(),
82 ui.progress(_('converting revisions'), ctx.rev(),
83 unit=_('revision'), total=rsrc['tip'].rev())
83 unit=_('revision'), total=rsrc['tip'].rev())
84 _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
84 _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
85 lfiles, normalfiles, matcher, size, lfiletohash)
85 lfiles, normalfiles, matcher, size, lfiletohash)
86 ui.progress(_('converting revisions'), None)
86 ui.progress(_('converting revisions'), None)
87
87
88 if os.path.exists(rdst.wjoin(lfutil.shortname)):
88 if os.path.exists(rdst.wjoin(lfutil.shortname)):
89 shutil.rmtree(rdst.wjoin(lfutil.shortname))
89 shutil.rmtree(rdst.wjoin(lfutil.shortname))
90
90
91 for f in lfiletohash.keys():
91 for f in lfiletohash.keys():
92 if os.path.isfile(rdst.wjoin(f)):
92 if os.path.isfile(rdst.wjoin(f)):
93 os.unlink(rdst.wjoin(f))
93 os.unlink(rdst.wjoin(f))
94 try:
94 try:
95 os.removedirs(os.path.dirname(rdst.wjoin(f)))
95 os.removedirs(os.path.dirname(rdst.wjoin(f)))
96 except OSError:
96 except OSError:
97 pass
97 pass
98
98
99 # If there were any files converted to largefiles, add largefiles
99 # If there were any files converted to largefiles, add largefiles
100 # to the destination repository's requirements.
100 # to the destination repository's requirements.
101 if lfiles:
101 if lfiles:
102 rdst.requirements.add('largefiles')
102 rdst.requirements.add('largefiles')
103 rdst._writerequirements()
103 rdst._writerequirements()
104 else:
104 else:
105 for ctx in ctxs:
105 for ctx in ctxs:
106 ui.progress(_('converting revisions'), ctx.rev(),
106 ui.progress(_('converting revisions'), ctx.rev(),
107 unit=_('revision'), total=rsrc['tip'].rev())
107 unit=_('revision'), total=rsrc['tip'].rev())
108 _addchangeset(ui, rsrc, rdst, ctx, revmap)
108 _addchangeset(ui, rsrc, rdst, ctx, revmap)
109
109
110 ui.progress(_('converting revisions'), None)
110 ui.progress(_('converting revisions'), None)
111 success = True
111 success = True
112 finally:
112 finally:
113 if not success:
113 if not success:
114 # we failed, remove the new directory
114 # we failed, remove the new directory
115 shutil.rmtree(rdst.root)
115 shutil.rmtree(rdst.root)
116 dstlock.release()
116 dstlock.release()
117
117
118 def _addchangeset(ui, rsrc, rdst, ctx, revmap):
118 def _addchangeset(ui, rsrc, rdst, ctx, revmap):
119 # Convert src parents to dst parents
119 # Convert src parents to dst parents
120 parents = _convertparents(ctx, revmap)
120 parents = _convertparents(ctx, revmap)
121
121
122 # Generate list of changed files
122 # Generate list of changed files
123 files = _getchangedfiles(ctx, parents)
123 files = _getchangedfiles(ctx, parents)
124
124
125 def getfilectx(repo, memctx, f):
125 def getfilectx(repo, memctx, f):
126 if lfutil.standin(f) in files:
126 if lfutil.standin(f) in files:
127 # if the file isn't in the manifest then it was removed
127 # if the file isn't in the manifest then it was removed
128 # or renamed, raise IOError to indicate this
128 # or renamed, raise IOError to indicate this
129 try:
129 try:
130 fctx = ctx.filectx(lfutil.standin(f))
130 fctx = ctx.filectx(lfutil.standin(f))
131 except error.LookupError:
131 except error.LookupError:
132 raise IOError()
132 raise IOError
133 renamed = fctx.renamed()
133 renamed = fctx.renamed()
134 if renamed:
134 if renamed:
135 renamed = lfutil.splitstandin(renamed[0])
135 renamed = lfutil.splitstandin(renamed[0])
136
136
137 hash = fctx.data().strip()
137 hash = fctx.data().strip()
138 path = lfutil.findfile(rsrc, hash)
138 path = lfutil.findfile(rsrc, hash)
139 ### TODO: What if the file is not cached?
139 ### TODO: What if the file is not cached?
140 data = ''
140 data = ''
141 fd = None
141 fd = None
142 try:
142 try:
143 fd = open(path, 'rb')
143 fd = open(path, 'rb')
144 data = fd.read()
144 data = fd.read()
145 finally:
145 finally:
146 if fd:
146 if fd:
147 fd.close()
147 fd.close()
148 return context.memfilectx(f, data, 'l' in fctx.flags(),
148 return context.memfilectx(f, data, 'l' in fctx.flags(),
149 'x' in fctx.flags(), renamed)
149 'x' in fctx.flags(), renamed)
150 else:
150 else:
151 return _getnormalcontext(repo.ui, ctx, f, revmap)
151 return _getnormalcontext(repo.ui, ctx, f, revmap)
152
152
153 dstfiles = []
153 dstfiles = []
154 for file in files:
154 for file in files:
155 if lfutil.isstandin(file):
155 if lfutil.isstandin(file):
156 dstfiles.append(lfutil.splitstandin(file))
156 dstfiles.append(lfutil.splitstandin(file))
157 else:
157 else:
158 dstfiles.append(file)
158 dstfiles.append(file)
159 # Commit
159 # Commit
160 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
160 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
161
161
162 def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles,
162 def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles,
163 matcher, size, lfiletohash):
163 matcher, size, lfiletohash):
164 # Convert src parents to dst parents
164 # Convert src parents to dst parents
165 parents = _convertparents(ctx, revmap)
165 parents = _convertparents(ctx, revmap)
166
166
167 # Generate list of changed files
167 # Generate list of changed files
168 files = _getchangedfiles(ctx, parents)
168 files = _getchangedfiles(ctx, parents)
169
169
170 dstfiles = []
170 dstfiles = []
171 for f in files:
171 for f in files:
172 if f not in lfiles and f not in normalfiles:
172 if f not in lfiles and f not in normalfiles:
173 islfile = _islfile(f, ctx, matcher, size)
173 islfile = _islfile(f, ctx, matcher, size)
174 # If this file was renamed or copied then copy
174 # If this file was renamed or copied then copy
175 # the lfileness of its predecessor
175 # the lfileness of its predecessor
176 if f in ctx.manifest():
176 if f in ctx.manifest():
177 fctx = ctx.filectx(f)
177 fctx = ctx.filectx(f)
178 renamed = fctx.renamed()
178 renamed = fctx.renamed()
179 renamedlfile = renamed and renamed[0] in lfiles
179 renamedlfile = renamed and renamed[0] in lfiles
180 islfile |= renamedlfile
180 islfile |= renamedlfile
181 if 'l' in fctx.flags():
181 if 'l' in fctx.flags():
182 if renamedlfile:
182 if renamedlfile:
183 raise util.Abort(
183 raise util.Abort(
184 _('renamed/copied largefile %s becomes symlink')
184 _('renamed/copied largefile %s becomes symlink')
185 % f)
185 % f)
186 islfile = False
186 islfile = False
187 if islfile:
187 if islfile:
188 lfiles.add(f)
188 lfiles.add(f)
189 else:
189 else:
190 normalfiles.add(f)
190 normalfiles.add(f)
191
191
192 if f in lfiles:
192 if f in lfiles:
193 dstfiles.append(lfutil.standin(f))
193 dstfiles.append(lfutil.standin(f))
194 # largefile in manifest if it has not been removed/renamed
194 # largefile in manifest if it has not been removed/renamed
195 if f in ctx.manifest():
195 if f in ctx.manifest():
196 fctx = ctx.filectx(f)
196 fctx = ctx.filectx(f)
197 if 'l' in fctx.flags():
197 if 'l' in fctx.flags():
198 renamed = fctx.renamed()
198 renamed = fctx.renamed()
199 if renamed and renamed[0] in lfiles:
199 if renamed and renamed[0] in lfiles:
200 raise util.Abort(_('largefile %s becomes symlink') % f)
200 raise util.Abort(_('largefile %s becomes symlink') % f)
201
201
202 # largefile was modified, update standins
202 # largefile was modified, update standins
203 fullpath = rdst.wjoin(f)
203 fullpath = rdst.wjoin(f)
204 util.makedirs(os.path.dirname(fullpath))
204 util.makedirs(os.path.dirname(fullpath))
205 m = util.sha1('')
205 m = util.sha1('')
206 m.update(ctx[f].data())
206 m.update(ctx[f].data())
207 hash = m.hexdigest()
207 hash = m.hexdigest()
208 if f not in lfiletohash or lfiletohash[f] != hash:
208 if f not in lfiletohash or lfiletohash[f] != hash:
209 try:
209 try:
210 fd = open(fullpath, 'wb')
210 fd = open(fullpath, 'wb')
211 fd.write(ctx[f].data())
211 fd.write(ctx[f].data())
212 finally:
212 finally:
213 if fd:
213 if fd:
214 fd.close()
214 fd.close()
215 executable = 'x' in ctx[f].flags()
215 executable = 'x' in ctx[f].flags()
216 os.chmod(fullpath, lfutil.getmode(executable))
216 os.chmod(fullpath, lfutil.getmode(executable))
217 lfutil.writestandin(rdst, lfutil.standin(f), hash,
217 lfutil.writestandin(rdst, lfutil.standin(f), hash,
218 executable)
218 executable)
219 lfiletohash[f] = hash
219 lfiletohash[f] = hash
220 else:
220 else:
221 # normal file
221 # normal file
222 dstfiles.append(f)
222 dstfiles.append(f)
223
223
224 def getfilectx(repo, memctx, f):
224 def getfilectx(repo, memctx, f):
225 if lfutil.isstandin(f):
225 if lfutil.isstandin(f):
226 # if the file isn't in the manifest then it was removed
226 # if the file isn't in the manifest then it was removed
227 # or renamed, raise IOError to indicate this
227 # or renamed, raise IOError to indicate this
228 srcfname = lfutil.splitstandin(f)
228 srcfname = lfutil.splitstandin(f)
229 try:
229 try:
230 fctx = ctx.filectx(srcfname)
230 fctx = ctx.filectx(srcfname)
231 except error.LookupError:
231 except error.LookupError:
232 raise IOError()
232 raise IOError
233 renamed = fctx.renamed()
233 renamed = fctx.renamed()
234 if renamed:
234 if renamed:
235 # standin is always a largefile because largefile-ness
235 # standin is always a largefile because largefile-ness
236 # doesn't change after rename or copy
236 # doesn't change after rename or copy
237 renamed = lfutil.standin(renamed[0])
237 renamed = lfutil.standin(renamed[0])
238
238
239 return context.memfilectx(f, lfiletohash[srcfname] + '\n', 'l' in
239 return context.memfilectx(f, lfiletohash[srcfname] + '\n', 'l' in
240 fctx.flags(), 'x' in fctx.flags(), renamed)
240 fctx.flags(), 'x' in fctx.flags(), renamed)
241 else:
241 else:
242 return _getnormalcontext(repo.ui, ctx, f, revmap)
242 return _getnormalcontext(repo.ui, ctx, f, revmap)
243
243
244 # Commit
244 # Commit
245 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
245 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
246
246
247 def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
247 def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
248 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
248 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
249 getfilectx, ctx.user(), ctx.date(), ctx.extra())
249 getfilectx, ctx.user(), ctx.date(), ctx.extra())
250 ret = rdst.commitctx(mctx)
250 ret = rdst.commitctx(mctx)
251 rdst.setparents(ret)
251 rdst.setparents(ret)
252 revmap[ctx.node()] = rdst.changelog.tip()
252 revmap[ctx.node()] = rdst.changelog.tip()
253
253
254 # Generate list of changed files
254 # Generate list of changed files
255 def _getchangedfiles(ctx, parents):
255 def _getchangedfiles(ctx, parents):
256 files = set(ctx.files())
256 files = set(ctx.files())
257 if node.nullid not in parents:
257 if node.nullid not in parents:
258 mc = ctx.manifest()
258 mc = ctx.manifest()
259 mp1 = ctx.parents()[0].manifest()
259 mp1 = ctx.parents()[0].manifest()
260 mp2 = ctx.parents()[1].manifest()
260 mp2 = ctx.parents()[1].manifest()
261 files |= (set(mp1) | set(mp2)) - set(mc)
261 files |= (set(mp1) | set(mp2)) - set(mc)
262 for f in mc:
262 for f in mc:
263 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
263 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
264 files.add(f)
264 files.add(f)
265 return files
265 return files
266
266
267 # Convert src parents to dst parents
267 # Convert src parents to dst parents
268 def _convertparents(ctx, revmap):
268 def _convertparents(ctx, revmap):
269 parents = []
269 parents = []
270 for p in ctx.parents():
270 for p in ctx.parents():
271 parents.append(revmap[p.node()])
271 parents.append(revmap[p.node()])
272 while len(parents) < 2:
272 while len(parents) < 2:
273 parents.append(node.nullid)
273 parents.append(node.nullid)
274 return parents
274 return parents
275
275
276 # Get memfilectx for a normal file
276 # Get memfilectx for a normal file
277 def _getnormalcontext(ui, ctx, f, revmap):
277 def _getnormalcontext(ui, ctx, f, revmap):
278 try:
278 try:
279 fctx = ctx.filectx(f)
279 fctx = ctx.filectx(f)
280 except error.LookupError:
280 except error.LookupError:
281 raise IOError()
281 raise IOError
282 renamed = fctx.renamed()
282 renamed = fctx.renamed()
283 if renamed:
283 if renamed:
284 renamed = renamed[0]
284 renamed = renamed[0]
285
285
286 data = fctx.data()
286 data = fctx.data()
287 if f == '.hgtags':
287 if f == '.hgtags':
288 data = _converttags (ui, revmap, data)
288 data = _converttags (ui, revmap, data)
289 return context.memfilectx(f, data, 'l' in fctx.flags(),
289 return context.memfilectx(f, data, 'l' in fctx.flags(),
290 'x' in fctx.flags(), renamed)
290 'x' in fctx.flags(), renamed)
291
291
292 # Remap tag data using a revision map
292 # Remap tag data using a revision map
293 def _converttags(ui, revmap, data):
293 def _converttags(ui, revmap, data):
294 newdata = []
294 newdata = []
295 for line in data.splitlines():
295 for line in data.splitlines():
296 try:
296 try:
297 id, name = line.split(' ', 1)
297 id, name = line.split(' ', 1)
298 except ValueError:
298 except ValueError:
299 ui.warn(_('skipping incorrectly formatted tag %s\n'
299 ui.warn(_('skipping incorrectly formatted tag %s\n'
300 % line))
300 % line))
301 continue
301 continue
302 try:
302 try:
303 newid = node.bin(id)
303 newid = node.bin(id)
304 except TypeError:
304 except TypeError:
305 ui.warn(_('skipping incorrectly formatted id %s\n'
305 ui.warn(_('skipping incorrectly formatted id %s\n'
306 % id))
306 % id))
307 continue
307 continue
308 try:
308 try:
309 newdata.append('%s %s\n' % (node.hex(revmap[newid]),
309 newdata.append('%s %s\n' % (node.hex(revmap[newid]),
310 name))
310 name))
311 except KeyError:
311 except KeyError:
312 ui.warn(_('no mapping for id %s\n') % id)
312 ui.warn(_('no mapping for id %s\n') % id)
313 continue
313 continue
314 return ''.join(newdata)
314 return ''.join(newdata)
315
315
316 def _islfile(file, ctx, matcher, size):
316 def _islfile(file, ctx, matcher, size):
317 '''Return true if file should be considered a largefile, i.e.
317 '''Return true if file should be considered a largefile, i.e.
318 matcher matches it or it is larger than size.'''
318 matcher matches it or it is larger than size.'''
319 # never store special .hg* files as largefiles
319 # never store special .hg* files as largefiles
320 if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs':
320 if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs':
321 return False
321 return False
322 if matcher and matcher(file):
322 if matcher and matcher(file):
323 return True
323 return True
324 try:
324 try:
325 return ctx.filectx(file).size() >= size * 1024 * 1024
325 return ctx.filectx(file).size() >= size * 1024 * 1024
326 except error.LookupError:
326 except error.LookupError:
327 return False
327 return False
328
328
329 def uploadlfiles(ui, rsrc, rdst, files):
329 def uploadlfiles(ui, rsrc, rdst, files):
330 '''upload largefiles to the central store'''
330 '''upload largefiles to the central store'''
331
331
332 if not files:
332 if not files:
333 return
333 return
334
334
335 store = basestore._openstore(rsrc, rdst, put=True)
335 store = basestore._openstore(rsrc, rdst, put=True)
336
336
337 at = 0
337 at = 0
338 files = filter(lambda h: not store.exists(h), files)
338 files = filter(lambda h: not store.exists(h), files)
339 for hash in files:
339 for hash in files:
340 ui.progress(_('uploading largefiles'), at, unit='largefile',
340 ui.progress(_('uploading largefiles'), at, unit='largefile',
341 total=len(files))
341 total=len(files))
342 source = lfutil.findfile(rsrc, hash)
342 source = lfutil.findfile(rsrc, hash)
343 if not source:
343 if not source:
344 raise util.Abort(_('largefile %s missing from store'
344 raise util.Abort(_('largefile %s missing from store'
345 ' (needs to be uploaded)') % hash)
345 ' (needs to be uploaded)') % hash)
346 # XXX check for errors here
346 # XXX check for errors here
347 store.put(source, hash)
347 store.put(source, hash)
348 at += 1
348 at += 1
349 ui.progress(_('uploading largefiles'), None)
349 ui.progress(_('uploading largefiles'), None)
350
350
351 def verifylfiles(ui, repo, all=False, contents=False):
351 def verifylfiles(ui, repo, all=False, contents=False):
352 '''Verify that every big file revision in the current changeset
352 '''Verify that every big file revision in the current changeset
353 exists in the central store. With --contents, also verify that
353 exists in the central store. With --contents, also verify that
354 the contents of each big file revision are correct (SHA-1 hash
354 the contents of each big file revision are correct (SHA-1 hash
355 matches the revision ID). With --all, check every changeset in
355 matches the revision ID). With --all, check every changeset in
356 this repository.'''
356 this repository.'''
357 if all:
357 if all:
358 # Pass a list to the function rather than an iterator because we know a
358 # Pass a list to the function rather than an iterator because we know a
359 # list will work.
359 # list will work.
360 revs = range(len(repo))
360 revs = range(len(repo))
361 else:
361 else:
362 revs = ['.']
362 revs = ['.']
363
363
364 store = basestore._openstore(repo)
364 store = basestore._openstore(repo)
365 return store.verify(revs, contents=contents)
365 return store.verify(revs, contents=contents)
366
366
367 def cachelfiles(ui, repo, node):
367 def cachelfiles(ui, repo, node):
368 '''cachelfiles ensures that all largefiles needed by the specified revision
368 '''cachelfiles ensures that all largefiles needed by the specified revision
369 are present in the repository's largefile cache.
369 are present in the repository's largefile cache.
370
370
371 returns a tuple (cached, missing). cached is the list of files downloaded
371 returns a tuple (cached, missing). cached is the list of files downloaded
372 by this operation; missing is the list of files that were needed but could
372 by this operation; missing is the list of files that were needed but could
373 not be found.'''
373 not be found.'''
374 lfiles = lfutil.listlfiles(repo, node)
374 lfiles = lfutil.listlfiles(repo, node)
375 toget = []
375 toget = []
376
376
377 for lfile in lfiles:
377 for lfile in lfiles:
378 # If we are mid-merge, then we have to trust the standin that is in the
378 # If we are mid-merge, then we have to trust the standin that is in the
379 # working copy to have the correct hashvalue. This is because the
379 # working copy to have the correct hashvalue. This is because the
380 # original hg.merge() already updated the standin as part of the normal
380 # original hg.merge() already updated the standin as part of the normal
381 # merge process -- we just have to udpate the largefile to match.
381 # merge process -- we just have to udpate the largefile to match.
382 if (getattr(repo, "_ismerging", False) and
382 if (getattr(repo, "_ismerging", False) and
383 os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
383 os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
384 expectedhash = lfutil.readstandin(repo, lfile)
384 expectedhash = lfutil.readstandin(repo, lfile)
385 else:
385 else:
386 expectedhash = repo[node][lfutil.standin(lfile)].data().strip()
386 expectedhash = repo[node][lfutil.standin(lfile)].data().strip()
387
387
388 # if it exists and its hash matches, it might have been locally
388 # if it exists and its hash matches, it might have been locally
389 # modified before updating and the user chose 'local'. in this case,
389 # modified before updating and the user chose 'local'. in this case,
390 # it will not be in any store, so don't look for it.
390 # it will not be in any store, so don't look for it.
391 if ((not os.path.exists(repo.wjoin(lfile)) or
391 if ((not os.path.exists(repo.wjoin(lfile)) or
392 expectedhash != lfutil.hashfile(repo.wjoin(lfile))) and
392 expectedhash != lfutil.hashfile(repo.wjoin(lfile))) and
393 not lfutil.findfile(repo, expectedhash)):
393 not lfutil.findfile(repo, expectedhash)):
394 toget.append((lfile, expectedhash))
394 toget.append((lfile, expectedhash))
395
395
396 if toget:
396 if toget:
397 store = basestore._openstore(repo)
397 store = basestore._openstore(repo)
398 ret = store.get(toget)
398 ret = store.get(toget)
399 return ret
399 return ret
400
400
401 return ([], [])
401 return ([], [])
402
402
403 def updatelfiles(ui, repo, filelist=None, printmessage=True):
403 def updatelfiles(ui, repo, filelist=None, printmessage=True):
404 wlock = repo.wlock()
404 wlock = repo.wlock()
405 try:
405 try:
406 lfdirstate = lfutil.openlfdirstate(ui, repo)
406 lfdirstate = lfutil.openlfdirstate(ui, repo)
407 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
407 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
408
408
409 if filelist is not None:
409 if filelist is not None:
410 lfiles = [f for f in lfiles if f in filelist]
410 lfiles = [f for f in lfiles if f in filelist]
411
411
412 printed = False
412 printed = False
413 if printmessage and lfiles:
413 if printmessage and lfiles:
414 ui.status(_('getting changed largefiles\n'))
414 ui.status(_('getting changed largefiles\n'))
415 printed = True
415 printed = True
416 cachelfiles(ui, repo, '.')
416 cachelfiles(ui, repo, '.')
417
417
418 updated, removed = 0, 0
418 updated, removed = 0, 0
419 for i in map(lambda f: _updatelfile(repo, lfdirstate, f), lfiles):
419 for i in map(lambda f: _updatelfile(repo, lfdirstate, f), lfiles):
420 # increment the appropriate counter according to _updatelfile's
420 # increment the appropriate counter according to _updatelfile's
421 # return value
421 # return value
422 updated += i > 0 and i or 0
422 updated += i > 0 and i or 0
423 removed -= i < 0 and i or 0
423 removed -= i < 0 and i or 0
424 if printmessage and (removed or updated) and not printed:
424 if printmessage and (removed or updated) and not printed:
425 ui.status(_('getting changed largefiles\n'))
425 ui.status(_('getting changed largefiles\n'))
426 printed = True
426 printed = True
427
427
428 lfdirstate.write()
428 lfdirstate.write()
429 if printed and printmessage:
429 if printed and printmessage:
430 ui.status(_('%d largefiles updated, %d removed\n') % (updated,
430 ui.status(_('%d largefiles updated, %d removed\n') % (updated,
431 removed))
431 removed))
432 finally:
432 finally:
433 wlock.release()
433 wlock.release()
434
434
435 def _updatelfile(repo, lfdirstate, lfile):
435 def _updatelfile(repo, lfdirstate, lfile):
436 '''updates a single largefile and copies the state of its standin from
436 '''updates a single largefile and copies the state of its standin from
437 the repository's dirstate to its state in the lfdirstate.
437 the repository's dirstate to its state in the lfdirstate.
438
438
439 returns 1 if the file was modified, -1 if the file was removed, 0 if the
439 returns 1 if the file was modified, -1 if the file was removed, 0 if the
440 file was unchanged, and None if the needed largefile was missing from the
440 file was unchanged, and None if the needed largefile was missing from the
441 cache.'''
441 cache.'''
442 ret = 0
442 ret = 0
443 abslfile = repo.wjoin(lfile)
443 abslfile = repo.wjoin(lfile)
444 absstandin = repo.wjoin(lfutil.standin(lfile))
444 absstandin = repo.wjoin(lfutil.standin(lfile))
445 if os.path.exists(absstandin):
445 if os.path.exists(absstandin):
446 if os.path.exists(absstandin+'.orig'):
446 if os.path.exists(absstandin+'.orig'):
447 shutil.copyfile(abslfile, abslfile+'.orig')
447 shutil.copyfile(abslfile, abslfile+'.orig')
448 expecthash = lfutil.readstandin(repo, lfile)
448 expecthash = lfutil.readstandin(repo, lfile)
449 if (expecthash != '' and
449 if (expecthash != '' and
450 (not os.path.exists(abslfile) or
450 (not os.path.exists(abslfile) or
451 expecthash != lfutil.hashfile(abslfile))):
451 expecthash != lfutil.hashfile(abslfile))):
452 if not lfutil.copyfromcache(repo, expecthash, lfile):
452 if not lfutil.copyfromcache(repo, expecthash, lfile):
453 # use normallookup() to allocate entry in largefiles dirstate,
453 # use normallookup() to allocate entry in largefiles dirstate,
454 # because lack of it misleads lfilesrepo.status() into
454 # because lack of it misleads lfilesrepo.status() into
455 # recognition that such cache missing files are REMOVED.
455 # recognition that such cache missing files are REMOVED.
456 lfdirstate.normallookup(lfile)
456 lfdirstate.normallookup(lfile)
457 return None # don't try to set the mode
457 return None # don't try to set the mode
458 ret = 1
458 ret = 1
459 mode = os.stat(absstandin).st_mode
459 mode = os.stat(absstandin).st_mode
460 if mode != os.stat(abslfile).st_mode:
460 if mode != os.stat(abslfile).st_mode:
461 os.chmod(abslfile, mode)
461 os.chmod(abslfile, mode)
462 ret = 1
462 ret = 1
463 else:
463 else:
464 # Remove lfiles for which the standin is deleted, unless the
464 # Remove lfiles for which the standin is deleted, unless the
465 # lfile is added to the repository again. This happens when a
465 # lfile is added to the repository again. This happens when a
466 # largefile is converted back to a normal file: the standin
466 # largefile is converted back to a normal file: the standin
467 # disappears, but a new (normal) file appears as the lfile.
467 # disappears, but a new (normal) file appears as the lfile.
468 if os.path.exists(abslfile) and lfile not in repo[None]:
468 if os.path.exists(abslfile) and lfile not in repo[None]:
469 util.unlinkpath(abslfile)
469 util.unlinkpath(abslfile)
470 ret = -1
470 ret = -1
471 state = repo.dirstate[lfutil.standin(lfile)]
471 state = repo.dirstate[lfutil.standin(lfile)]
472 if state == 'n':
472 if state == 'n':
473 # When rebasing, we need to synchronize the standin and the largefile,
473 # When rebasing, we need to synchronize the standin and the largefile,
474 # because otherwise the largefile will get reverted. But for commit's
474 # because otherwise the largefile will get reverted. But for commit's
475 # sake, we have to mark the file as unclean.
475 # sake, we have to mark the file as unclean.
476 if getattr(repo, "_isrebasing", False):
476 if getattr(repo, "_isrebasing", False):
477 lfdirstate.normallookup(lfile)
477 lfdirstate.normallookup(lfile)
478 else:
478 else:
479 lfdirstate.normal(lfile)
479 lfdirstate.normal(lfile)
480 elif state == 'r':
480 elif state == 'r':
481 lfdirstate.remove(lfile)
481 lfdirstate.remove(lfile)
482 elif state == 'a':
482 elif state == 'a':
483 lfdirstate.add(lfile)
483 lfdirstate.add(lfile)
484 elif state == '?':
484 elif state == '?':
485 lfdirstate.drop(lfile)
485 lfdirstate.drop(lfile)
486 return ret
486 return ret
487
487
488 def catlfile(repo, lfile, rev, filename):
488 def catlfile(repo, lfile, rev, filename):
489 hash = lfutil.readstandin(repo, lfile, rev)
489 hash = lfutil.readstandin(repo, lfile, rev)
490 if not lfutil.inusercache(repo.ui, hash):
490 if not lfutil.inusercache(repo.ui, hash):
491 store = basestore._openstore(repo)
491 store = basestore._openstore(repo)
492 success, missing = store.get([(lfile, hash)])
492 success, missing = store.get([(lfile, hash)])
493 if len(success) != 1:
493 if len(success) != 1:
494 raise util.Abort(
494 raise util.Abort(
495 _('largefile %s is not in cache and could not be downloaded')
495 _('largefile %s is not in cache and could not be downloaded')
496 % lfile)
496 % lfile)
497 path = lfutil.usercachepath(repo.ui, hash)
497 path = lfutil.usercachepath(repo.ui, hash)
498 fpout = cmdutil.makefileobj(repo, filename)
498 fpout = cmdutil.makefileobj(repo, filename)
499 fpin = open(path, "rb")
499 fpin = open(path, "rb")
500 fpout.write(fpin.read())
500 fpout.write(fpin.read())
501 fpout.close()
501 fpout.close()
502 fpin.close()
502 fpin.close()
503 return 0
503 return 0
504
504
505 # -- hg commands declarations ------------------------------------------------
505 # -- hg commands declarations ------------------------------------------------
506
506
507 cmdtable = {
507 cmdtable = {
508 'lfconvert': (lfconvert,
508 'lfconvert': (lfconvert,
509 [('s', 'size', '',
509 [('s', 'size', '',
510 _('minimum size (MB) for files to be converted '
510 _('minimum size (MB) for files to be converted '
511 'as largefiles'),
511 'as largefiles'),
512 'SIZE'),
512 'SIZE'),
513 ('', 'to-normal', False,
513 ('', 'to-normal', False,
514 _('convert from a largefiles repo to a normal repo')),
514 _('convert from a largefiles repo to a normal repo')),
515 ],
515 ],
516 _('hg lfconvert SOURCE DEST [FILE ...]')),
516 _('hg lfconvert SOURCE DEST [FILE ...]')),
517 }
517 }
@@ -1,3533 +1,3533 b''
1 # mq.py - patch queues for mercurial
1 # mq.py - patch queues for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 '''manage a stack of patches
8 '''manage a stack of patches
9
9
10 This extension lets you work with a stack of patches in a Mercurial
10 This extension lets you work with a stack of patches in a Mercurial
11 repository. It manages two stacks of patches - all known patches, and
11 repository. It manages two stacks of patches - all known patches, and
12 applied patches (subset of known patches).
12 applied patches (subset of known patches).
13
13
14 Known patches are represented as patch files in the .hg/patches
14 Known patches are represented as patch files in the .hg/patches
15 directory. Applied patches are both patch files and changesets.
15 directory. Applied patches are both patch files and changesets.
16
16
17 Common tasks (use :hg:`help command` for more details)::
17 Common tasks (use :hg:`help command` for more details)::
18
18
19 create new patch qnew
19 create new patch qnew
20 import existing patch qimport
20 import existing patch qimport
21
21
22 print patch series qseries
22 print patch series qseries
23 print applied patches qapplied
23 print applied patches qapplied
24
24
25 add known patch to applied stack qpush
25 add known patch to applied stack qpush
26 remove patch from applied stack qpop
26 remove patch from applied stack qpop
27 refresh contents of top applied patch qrefresh
27 refresh contents of top applied patch qrefresh
28
28
29 By default, mq will automatically use git patches when required to
29 By default, mq will automatically use git patches when required to
30 avoid losing file mode changes, copy records, binary files or empty
30 avoid losing file mode changes, copy records, binary files or empty
31 files creations or deletions. This behaviour can be configured with::
31 files creations or deletions. This behaviour can be configured with::
32
32
33 [mq]
33 [mq]
34 git = auto/keep/yes/no
34 git = auto/keep/yes/no
35
35
36 If set to 'keep', mq will obey the [diff] section configuration while
36 If set to 'keep', mq will obey the [diff] section configuration while
37 preserving existing git patches upon qrefresh. If set to 'yes' or
37 preserving existing git patches upon qrefresh. If set to 'yes' or
38 'no', mq will override the [diff] section and always generate git or
38 'no', mq will override the [diff] section and always generate git or
39 regular patches, possibly losing data in the second case.
39 regular patches, possibly losing data in the second case.
40
40
41 It may be desirable for mq changesets to be kept in the secret phase (see
41 It may be desirable for mq changesets to be kept in the secret phase (see
42 :hg:`help phases`), which can be enabled with the following setting::
42 :hg:`help phases`), which can be enabled with the following setting::
43
43
44 [mq]
44 [mq]
45 secret = True
45 secret = True
46
46
47 You will by default be managing a patch queue named "patches". You can
47 You will by default be managing a patch queue named "patches". You can
48 create other, independent patch queues with the :hg:`qqueue` command.
48 create other, independent patch queues with the :hg:`qqueue` command.
49
49
50 If the working directory contains uncommitted files, qpush, qpop and
50 If the working directory contains uncommitted files, qpush, qpop and
51 qgoto abort immediately. If -f/--force is used, the changes are
51 qgoto abort immediately. If -f/--force is used, the changes are
52 discarded. Setting:
52 discarded. Setting:
53
53
54 [mq]
54 [mq]
55 check = True
55 check = True
56
56
57 make them behave as if -c/--check were passed, and non-conflicting
57 make them behave as if -c/--check were passed, and non-conflicting
58 local changes will be tolerated and preserved. If incompatible options
58 local changes will be tolerated and preserved. If incompatible options
59 such as -f/--force or --exact are passed, this setting is ignored.
59 such as -f/--force or --exact are passed, this setting is ignored.
60 '''
60 '''
61
61
62 from mercurial.i18n import _
62 from mercurial.i18n import _
63 from mercurial.node import bin, hex, short, nullid, nullrev
63 from mercurial.node import bin, hex, short, nullid, nullrev
64 from mercurial.lock import release
64 from mercurial.lock import release
65 from mercurial import commands, cmdutil, hg, scmutil, util, revset
65 from mercurial import commands, cmdutil, hg, scmutil, util, revset
66 from mercurial import repair, extensions, url, error, phases
66 from mercurial import repair, extensions, url, error, phases
67 from mercurial import patch as patchmod
67 from mercurial import patch as patchmod
68 import os, re, errno, shutil
68 import os, re, errno, shutil
69
69
70 commands.norepo += " qclone"
70 commands.norepo += " qclone"
71
71
72 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
72 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
73
73
74 cmdtable = {}
74 cmdtable = {}
75 command = cmdutil.command(cmdtable)
75 command = cmdutil.command(cmdtable)
76
76
77 # Patch names looks like unix-file names.
77 # Patch names looks like unix-file names.
78 # They must be joinable with queue directory and result in the patch path.
78 # They must be joinable with queue directory and result in the patch path.
79 normname = util.normpath
79 normname = util.normpath
80
80
81 class statusentry(object):
81 class statusentry(object):
82 def __init__(self, node, name):
82 def __init__(self, node, name):
83 self.node, self.name = node, name
83 self.node, self.name = node, name
84 def __repr__(self):
84 def __repr__(self):
85 return hex(self.node) + ':' + self.name
85 return hex(self.node) + ':' + self.name
86
86
87 class patchheader(object):
87 class patchheader(object):
88 def __init__(self, pf, plainmode=False):
88 def __init__(self, pf, plainmode=False):
89 def eatdiff(lines):
89 def eatdiff(lines):
90 while lines:
90 while lines:
91 l = lines[-1]
91 l = lines[-1]
92 if (l.startswith("diff -") or
92 if (l.startswith("diff -") or
93 l.startswith("Index:") or
93 l.startswith("Index:") or
94 l.startswith("===========")):
94 l.startswith("===========")):
95 del lines[-1]
95 del lines[-1]
96 else:
96 else:
97 break
97 break
98 def eatempty(lines):
98 def eatempty(lines):
99 while lines:
99 while lines:
100 if not lines[-1].strip():
100 if not lines[-1].strip():
101 del lines[-1]
101 del lines[-1]
102 else:
102 else:
103 break
103 break
104
104
105 message = []
105 message = []
106 comments = []
106 comments = []
107 user = None
107 user = None
108 date = None
108 date = None
109 parent = None
109 parent = None
110 format = None
110 format = None
111 subject = None
111 subject = None
112 branch = None
112 branch = None
113 nodeid = None
113 nodeid = None
114 diffstart = 0
114 diffstart = 0
115
115
116 for line in file(pf):
116 for line in file(pf):
117 line = line.rstrip()
117 line = line.rstrip()
118 if (line.startswith('diff --git')
118 if (line.startswith('diff --git')
119 or (diffstart and line.startswith('+++ '))):
119 or (diffstart and line.startswith('+++ '))):
120 diffstart = 2
120 diffstart = 2
121 break
121 break
122 diffstart = 0 # reset
122 diffstart = 0 # reset
123 if line.startswith("--- "):
123 if line.startswith("--- "):
124 diffstart = 1
124 diffstart = 1
125 continue
125 continue
126 elif format == "hgpatch":
126 elif format == "hgpatch":
127 # parse values when importing the result of an hg export
127 # parse values when importing the result of an hg export
128 if line.startswith("# User "):
128 if line.startswith("# User "):
129 user = line[7:]
129 user = line[7:]
130 elif line.startswith("# Date "):
130 elif line.startswith("# Date "):
131 date = line[7:]
131 date = line[7:]
132 elif line.startswith("# Parent "):
132 elif line.startswith("# Parent "):
133 parent = line[9:].lstrip()
133 parent = line[9:].lstrip()
134 elif line.startswith("# Branch "):
134 elif line.startswith("# Branch "):
135 branch = line[9:]
135 branch = line[9:]
136 elif line.startswith("# Node ID "):
136 elif line.startswith("# Node ID "):
137 nodeid = line[10:]
137 nodeid = line[10:]
138 elif not line.startswith("# ") and line:
138 elif not line.startswith("# ") and line:
139 message.append(line)
139 message.append(line)
140 format = None
140 format = None
141 elif line == '# HG changeset patch':
141 elif line == '# HG changeset patch':
142 message = []
142 message = []
143 format = "hgpatch"
143 format = "hgpatch"
144 elif (format != "tagdone" and (line.startswith("Subject: ") or
144 elif (format != "tagdone" and (line.startswith("Subject: ") or
145 line.startswith("subject: "))):
145 line.startswith("subject: "))):
146 subject = line[9:]
146 subject = line[9:]
147 format = "tag"
147 format = "tag"
148 elif (format != "tagdone" and (line.startswith("From: ") or
148 elif (format != "tagdone" and (line.startswith("From: ") or
149 line.startswith("from: "))):
149 line.startswith("from: "))):
150 user = line[6:]
150 user = line[6:]
151 format = "tag"
151 format = "tag"
152 elif (format != "tagdone" and (line.startswith("Date: ") or
152 elif (format != "tagdone" and (line.startswith("Date: ") or
153 line.startswith("date: "))):
153 line.startswith("date: "))):
154 date = line[6:]
154 date = line[6:]
155 format = "tag"
155 format = "tag"
156 elif format == "tag" and line == "":
156 elif format == "tag" and line == "":
157 # when looking for tags (subject: from: etc) they
157 # when looking for tags (subject: from: etc) they
158 # end once you find a blank line in the source
158 # end once you find a blank line in the source
159 format = "tagdone"
159 format = "tagdone"
160 elif message or line:
160 elif message or line:
161 message.append(line)
161 message.append(line)
162 comments.append(line)
162 comments.append(line)
163
163
164 eatdiff(message)
164 eatdiff(message)
165 eatdiff(comments)
165 eatdiff(comments)
166 # Remember the exact starting line of the patch diffs before consuming
166 # Remember the exact starting line of the patch diffs before consuming
167 # empty lines, for external use by TortoiseHg and others
167 # empty lines, for external use by TortoiseHg and others
168 self.diffstartline = len(comments)
168 self.diffstartline = len(comments)
169 eatempty(message)
169 eatempty(message)
170 eatempty(comments)
170 eatempty(comments)
171
171
172 # make sure message isn't empty
172 # make sure message isn't empty
173 if format and format.startswith("tag") and subject:
173 if format and format.startswith("tag") and subject:
174 message.insert(0, "")
174 message.insert(0, "")
175 message.insert(0, subject)
175 message.insert(0, subject)
176
176
177 self.message = message
177 self.message = message
178 self.comments = comments
178 self.comments = comments
179 self.user = user
179 self.user = user
180 self.date = date
180 self.date = date
181 self.parent = parent
181 self.parent = parent
182 # nodeid and branch are for external use by TortoiseHg and others
182 # nodeid and branch are for external use by TortoiseHg and others
183 self.nodeid = nodeid
183 self.nodeid = nodeid
184 self.branch = branch
184 self.branch = branch
185 self.haspatch = diffstart > 1
185 self.haspatch = diffstart > 1
186 self.plainmode = plainmode
186 self.plainmode = plainmode
187
187
188 def setuser(self, user):
188 def setuser(self, user):
189 if not self.updateheader(['From: ', '# User '], user):
189 if not self.updateheader(['From: ', '# User '], user):
190 try:
190 try:
191 patchheaderat = self.comments.index('# HG changeset patch')
191 patchheaderat = self.comments.index('# HG changeset patch')
192 self.comments.insert(patchheaderat + 1, '# User ' + user)
192 self.comments.insert(patchheaderat + 1, '# User ' + user)
193 except ValueError:
193 except ValueError:
194 if self.plainmode or self._hasheader(['Date: ']):
194 if self.plainmode or self._hasheader(['Date: ']):
195 self.comments = ['From: ' + user] + self.comments
195 self.comments = ['From: ' + user] + self.comments
196 else:
196 else:
197 tmp = ['# HG changeset patch', '# User ' + user, '']
197 tmp = ['# HG changeset patch', '# User ' + user, '']
198 self.comments = tmp + self.comments
198 self.comments = tmp + self.comments
199 self.user = user
199 self.user = user
200
200
201 def setdate(self, date):
201 def setdate(self, date):
202 if not self.updateheader(['Date: ', '# Date '], date):
202 if not self.updateheader(['Date: ', '# Date '], date):
203 try:
203 try:
204 patchheaderat = self.comments.index('# HG changeset patch')
204 patchheaderat = self.comments.index('# HG changeset patch')
205 self.comments.insert(patchheaderat + 1, '# Date ' + date)
205 self.comments.insert(patchheaderat + 1, '# Date ' + date)
206 except ValueError:
206 except ValueError:
207 if self.plainmode or self._hasheader(['From: ']):
207 if self.plainmode or self._hasheader(['From: ']):
208 self.comments = ['Date: ' + date] + self.comments
208 self.comments = ['Date: ' + date] + self.comments
209 else:
209 else:
210 tmp = ['# HG changeset patch', '# Date ' + date, '']
210 tmp = ['# HG changeset patch', '# Date ' + date, '']
211 self.comments = tmp + self.comments
211 self.comments = tmp + self.comments
212 self.date = date
212 self.date = date
213
213
214 def setparent(self, parent):
214 def setparent(self, parent):
215 if not self.updateheader(['# Parent '], parent):
215 if not self.updateheader(['# Parent '], parent):
216 try:
216 try:
217 patchheaderat = self.comments.index('# HG changeset patch')
217 patchheaderat = self.comments.index('# HG changeset patch')
218 self.comments.insert(patchheaderat + 1, '# Parent ' + parent)
218 self.comments.insert(patchheaderat + 1, '# Parent ' + parent)
219 except ValueError:
219 except ValueError:
220 pass
220 pass
221 self.parent = parent
221 self.parent = parent
222
222
223 def setmessage(self, message):
223 def setmessage(self, message):
224 if self.comments:
224 if self.comments:
225 self._delmsg()
225 self._delmsg()
226 self.message = [message]
226 self.message = [message]
227 self.comments += self.message
227 self.comments += self.message
228
228
229 def updateheader(self, prefixes, new):
229 def updateheader(self, prefixes, new):
230 '''Update all references to a field in the patch header.
230 '''Update all references to a field in the patch header.
231 Return whether the field is present.'''
231 Return whether the field is present.'''
232 res = False
232 res = False
233 for prefix in prefixes:
233 for prefix in prefixes:
234 for i in xrange(len(self.comments)):
234 for i in xrange(len(self.comments)):
235 if self.comments[i].startswith(prefix):
235 if self.comments[i].startswith(prefix):
236 self.comments[i] = prefix + new
236 self.comments[i] = prefix + new
237 res = True
237 res = True
238 break
238 break
239 return res
239 return res
240
240
241 def _hasheader(self, prefixes):
241 def _hasheader(self, prefixes):
242 '''Check if a header starts with any of the given prefixes.'''
242 '''Check if a header starts with any of the given prefixes.'''
243 for prefix in prefixes:
243 for prefix in prefixes:
244 for comment in self.comments:
244 for comment in self.comments:
245 if comment.startswith(prefix):
245 if comment.startswith(prefix):
246 return True
246 return True
247 return False
247 return False
248
248
249 def __str__(self):
249 def __str__(self):
250 if not self.comments:
250 if not self.comments:
251 return ''
251 return ''
252 return '\n'.join(self.comments) + '\n\n'
252 return '\n'.join(self.comments) + '\n\n'
253
253
254 def _delmsg(self):
254 def _delmsg(self):
255 '''Remove existing message, keeping the rest of the comments fields.
255 '''Remove existing message, keeping the rest of the comments fields.
256 If comments contains 'subject: ', message will prepend
256 If comments contains 'subject: ', message will prepend
257 the field and a blank line.'''
257 the field and a blank line.'''
258 if self.message:
258 if self.message:
259 subj = 'subject: ' + self.message[0].lower()
259 subj = 'subject: ' + self.message[0].lower()
260 for i in xrange(len(self.comments)):
260 for i in xrange(len(self.comments)):
261 if subj == self.comments[i].lower():
261 if subj == self.comments[i].lower():
262 del self.comments[i]
262 del self.comments[i]
263 self.message = self.message[2:]
263 self.message = self.message[2:]
264 break
264 break
265 ci = 0
265 ci = 0
266 for mi in self.message:
266 for mi in self.message:
267 while mi != self.comments[ci]:
267 while mi != self.comments[ci]:
268 ci += 1
268 ci += 1
269 del self.comments[ci]
269 del self.comments[ci]
270
270
271 def newcommit(repo, phase, *args, **kwargs):
271 def newcommit(repo, phase, *args, **kwargs):
272 """helper dedicated to ensure a commit respect mq.secret setting
272 """helper dedicated to ensure a commit respect mq.secret setting
273
273
274 It should be used instead of repo.commit inside the mq source for operation
274 It should be used instead of repo.commit inside the mq source for operation
275 creating new changeset.
275 creating new changeset.
276 """
276 """
277 if phase is None:
277 if phase is None:
278 if repo.ui.configbool('mq', 'secret', False):
278 if repo.ui.configbool('mq', 'secret', False):
279 phase = phases.secret
279 phase = phases.secret
280 if phase is not None:
280 if phase is not None:
281 backup = repo.ui.backupconfig('phases', 'new-commit')
281 backup = repo.ui.backupconfig('phases', 'new-commit')
282 # Marking the repository as committing an mq patch can be used
282 # Marking the repository as committing an mq patch can be used
283 # to optimize operations like _branchtags().
283 # to optimize operations like _branchtags().
284 repo._committingpatch = True
284 repo._committingpatch = True
285 try:
285 try:
286 if phase is not None:
286 if phase is not None:
287 repo.ui.setconfig('phases', 'new-commit', phase)
287 repo.ui.setconfig('phases', 'new-commit', phase)
288 return repo.commit(*args, **kwargs)
288 return repo.commit(*args, **kwargs)
289 finally:
289 finally:
290 repo._committingpatch = False
290 repo._committingpatch = False
291 if phase is not None:
291 if phase is not None:
292 repo.ui.restoreconfig(backup)
292 repo.ui.restoreconfig(backup)
293
293
294 class AbortNoCleanup(error.Abort):
294 class AbortNoCleanup(error.Abort):
295 pass
295 pass
296
296
297 class queue(object):
297 class queue(object):
298 def __init__(self, ui, path, patchdir=None):
298 def __init__(self, ui, path, patchdir=None):
299 self.basepath = path
299 self.basepath = path
300 try:
300 try:
301 fh = open(os.path.join(path, 'patches.queue'))
301 fh = open(os.path.join(path, 'patches.queue'))
302 cur = fh.read().rstrip()
302 cur = fh.read().rstrip()
303 fh.close()
303 fh.close()
304 if not cur:
304 if not cur:
305 curpath = os.path.join(path, 'patches')
305 curpath = os.path.join(path, 'patches')
306 else:
306 else:
307 curpath = os.path.join(path, 'patches-' + cur)
307 curpath = os.path.join(path, 'patches-' + cur)
308 except IOError:
308 except IOError:
309 curpath = os.path.join(path, 'patches')
309 curpath = os.path.join(path, 'patches')
310 self.path = patchdir or curpath
310 self.path = patchdir or curpath
311 self.opener = scmutil.opener(self.path)
311 self.opener = scmutil.opener(self.path)
312 self.ui = ui
312 self.ui = ui
313 self.applieddirty = False
313 self.applieddirty = False
314 self.seriesdirty = False
314 self.seriesdirty = False
315 self.added = []
315 self.added = []
316 self.seriespath = "series"
316 self.seriespath = "series"
317 self.statuspath = "status"
317 self.statuspath = "status"
318 self.guardspath = "guards"
318 self.guardspath = "guards"
319 self.activeguards = None
319 self.activeguards = None
320 self.guardsdirty = False
320 self.guardsdirty = False
321 # Handle mq.git as a bool with extended values
321 # Handle mq.git as a bool with extended values
322 try:
322 try:
323 gitmode = ui.configbool('mq', 'git', None)
323 gitmode = ui.configbool('mq', 'git', None)
324 if gitmode is None:
324 if gitmode is None:
325 raise error.ConfigError()
325 raise error.ConfigError
326 self.gitmode = gitmode and 'yes' or 'no'
326 self.gitmode = gitmode and 'yes' or 'no'
327 except error.ConfigError:
327 except error.ConfigError:
328 self.gitmode = ui.config('mq', 'git', 'auto').lower()
328 self.gitmode = ui.config('mq', 'git', 'auto').lower()
329 self.plainmode = ui.configbool('mq', 'plain', False)
329 self.plainmode = ui.configbool('mq', 'plain', False)
330
330
331 @util.propertycache
331 @util.propertycache
332 def applied(self):
332 def applied(self):
333 def parselines(lines):
333 def parselines(lines):
334 for l in lines:
334 for l in lines:
335 entry = l.split(':', 1)
335 entry = l.split(':', 1)
336 if len(entry) > 1:
336 if len(entry) > 1:
337 n, name = entry
337 n, name = entry
338 yield statusentry(bin(n), name)
338 yield statusentry(bin(n), name)
339 elif l.strip():
339 elif l.strip():
340 self.ui.warn(_('malformated mq status line: %s\n') % entry)
340 self.ui.warn(_('malformated mq status line: %s\n') % entry)
341 # else we ignore empty lines
341 # else we ignore empty lines
342 try:
342 try:
343 lines = self.opener.read(self.statuspath).splitlines()
343 lines = self.opener.read(self.statuspath).splitlines()
344 return list(parselines(lines))
344 return list(parselines(lines))
345 except IOError, e:
345 except IOError, e:
346 if e.errno == errno.ENOENT:
346 if e.errno == errno.ENOENT:
347 return []
347 return []
348 raise
348 raise
349
349
350 @util.propertycache
350 @util.propertycache
351 def fullseries(self):
351 def fullseries(self):
352 try:
352 try:
353 return self.opener.read(self.seriespath).splitlines()
353 return self.opener.read(self.seriespath).splitlines()
354 except IOError, e:
354 except IOError, e:
355 if e.errno == errno.ENOENT:
355 if e.errno == errno.ENOENT:
356 return []
356 return []
357 raise
357 raise
358
358
359 @util.propertycache
359 @util.propertycache
360 def series(self):
360 def series(self):
361 self.parseseries()
361 self.parseseries()
362 return self.series
362 return self.series
363
363
364 @util.propertycache
364 @util.propertycache
365 def seriesguards(self):
365 def seriesguards(self):
366 self.parseseries()
366 self.parseseries()
367 return self.seriesguards
367 return self.seriesguards
368
368
369 def invalidate(self):
369 def invalidate(self):
370 for a in 'applied fullseries series seriesguards'.split():
370 for a in 'applied fullseries series seriesguards'.split():
371 if a in self.__dict__:
371 if a in self.__dict__:
372 delattr(self, a)
372 delattr(self, a)
373 self.applieddirty = False
373 self.applieddirty = False
374 self.seriesdirty = False
374 self.seriesdirty = False
375 self.guardsdirty = False
375 self.guardsdirty = False
376 self.activeguards = None
376 self.activeguards = None
377
377
378 def diffopts(self, opts={}, patchfn=None):
378 def diffopts(self, opts={}, patchfn=None):
379 diffopts = patchmod.diffopts(self.ui, opts)
379 diffopts = patchmod.diffopts(self.ui, opts)
380 if self.gitmode == 'auto':
380 if self.gitmode == 'auto':
381 diffopts.upgrade = True
381 diffopts.upgrade = True
382 elif self.gitmode == 'keep':
382 elif self.gitmode == 'keep':
383 pass
383 pass
384 elif self.gitmode in ('yes', 'no'):
384 elif self.gitmode in ('yes', 'no'):
385 diffopts.git = self.gitmode == 'yes'
385 diffopts.git = self.gitmode == 'yes'
386 else:
386 else:
387 raise util.Abort(_('mq.git option can be auto/keep/yes/no'
387 raise util.Abort(_('mq.git option can be auto/keep/yes/no'
388 ' got %s') % self.gitmode)
388 ' got %s') % self.gitmode)
389 if patchfn:
389 if patchfn:
390 diffopts = self.patchopts(diffopts, patchfn)
390 diffopts = self.patchopts(diffopts, patchfn)
391 return diffopts
391 return diffopts
392
392
393 def patchopts(self, diffopts, *patches):
393 def patchopts(self, diffopts, *patches):
394 """Return a copy of input diff options with git set to true if
394 """Return a copy of input diff options with git set to true if
395 referenced patch is a git patch and should be preserved as such.
395 referenced patch is a git patch and should be preserved as such.
396 """
396 """
397 diffopts = diffopts.copy()
397 diffopts = diffopts.copy()
398 if not diffopts.git and self.gitmode == 'keep':
398 if not diffopts.git and self.gitmode == 'keep':
399 for patchfn in patches:
399 for patchfn in patches:
400 patchf = self.opener(patchfn, 'r')
400 patchf = self.opener(patchfn, 'r')
401 # if the patch was a git patch, refresh it as a git patch
401 # if the patch was a git patch, refresh it as a git patch
402 for line in patchf:
402 for line in patchf:
403 if line.startswith('diff --git'):
403 if line.startswith('diff --git'):
404 diffopts.git = True
404 diffopts.git = True
405 break
405 break
406 patchf.close()
406 patchf.close()
407 return diffopts
407 return diffopts
408
408
409 def join(self, *p):
409 def join(self, *p):
410 return os.path.join(self.path, *p)
410 return os.path.join(self.path, *p)
411
411
412 def findseries(self, patch):
412 def findseries(self, patch):
413 def matchpatch(l):
413 def matchpatch(l):
414 l = l.split('#', 1)[0]
414 l = l.split('#', 1)[0]
415 return l.strip() == patch
415 return l.strip() == patch
416 for index, l in enumerate(self.fullseries):
416 for index, l in enumerate(self.fullseries):
417 if matchpatch(l):
417 if matchpatch(l):
418 return index
418 return index
419 return None
419 return None
420
420
421 guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
421 guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
422
422
423 def parseseries(self):
423 def parseseries(self):
424 self.series = []
424 self.series = []
425 self.seriesguards = []
425 self.seriesguards = []
426 for l in self.fullseries:
426 for l in self.fullseries:
427 h = l.find('#')
427 h = l.find('#')
428 if h == -1:
428 if h == -1:
429 patch = l
429 patch = l
430 comment = ''
430 comment = ''
431 elif h == 0:
431 elif h == 0:
432 continue
432 continue
433 else:
433 else:
434 patch = l[:h]
434 patch = l[:h]
435 comment = l[h:]
435 comment = l[h:]
436 patch = patch.strip()
436 patch = patch.strip()
437 if patch:
437 if patch:
438 if patch in self.series:
438 if patch in self.series:
439 raise util.Abort(_('%s appears more than once in %s') %
439 raise util.Abort(_('%s appears more than once in %s') %
440 (patch, self.join(self.seriespath)))
440 (patch, self.join(self.seriespath)))
441 self.series.append(patch)
441 self.series.append(patch)
442 self.seriesguards.append(self.guard_re.findall(comment))
442 self.seriesguards.append(self.guard_re.findall(comment))
443
443
444 def checkguard(self, guard):
444 def checkguard(self, guard):
445 if not guard:
445 if not guard:
446 return _('guard cannot be an empty string')
446 return _('guard cannot be an empty string')
447 bad_chars = '# \t\r\n\f'
447 bad_chars = '# \t\r\n\f'
448 first = guard[0]
448 first = guard[0]
449 if first in '-+':
449 if first in '-+':
450 return (_('guard %r starts with invalid character: %r') %
450 return (_('guard %r starts with invalid character: %r') %
451 (guard, first))
451 (guard, first))
452 for c in bad_chars:
452 for c in bad_chars:
453 if c in guard:
453 if c in guard:
454 return _('invalid character in guard %r: %r') % (guard, c)
454 return _('invalid character in guard %r: %r') % (guard, c)
455
455
456 def setactive(self, guards):
456 def setactive(self, guards):
457 for guard in guards:
457 for guard in guards:
458 bad = self.checkguard(guard)
458 bad = self.checkguard(guard)
459 if bad:
459 if bad:
460 raise util.Abort(bad)
460 raise util.Abort(bad)
461 guards = sorted(set(guards))
461 guards = sorted(set(guards))
462 self.ui.debug('active guards: %s\n' % ' '.join(guards))
462 self.ui.debug('active guards: %s\n' % ' '.join(guards))
463 self.activeguards = guards
463 self.activeguards = guards
464 self.guardsdirty = True
464 self.guardsdirty = True
465
465
466 def active(self):
466 def active(self):
467 if self.activeguards is None:
467 if self.activeguards is None:
468 self.activeguards = []
468 self.activeguards = []
469 try:
469 try:
470 guards = self.opener.read(self.guardspath).split()
470 guards = self.opener.read(self.guardspath).split()
471 except IOError, err:
471 except IOError, err:
472 if err.errno != errno.ENOENT:
472 if err.errno != errno.ENOENT:
473 raise
473 raise
474 guards = []
474 guards = []
475 for i, guard in enumerate(guards):
475 for i, guard in enumerate(guards):
476 bad = self.checkguard(guard)
476 bad = self.checkguard(guard)
477 if bad:
477 if bad:
478 self.ui.warn('%s:%d: %s\n' %
478 self.ui.warn('%s:%d: %s\n' %
479 (self.join(self.guardspath), i + 1, bad))
479 (self.join(self.guardspath), i + 1, bad))
480 else:
480 else:
481 self.activeguards.append(guard)
481 self.activeguards.append(guard)
482 return self.activeguards
482 return self.activeguards
483
483
484 def setguards(self, idx, guards):
484 def setguards(self, idx, guards):
485 for g in guards:
485 for g in guards:
486 if len(g) < 2:
486 if len(g) < 2:
487 raise util.Abort(_('guard %r too short') % g)
487 raise util.Abort(_('guard %r too short') % g)
488 if g[0] not in '-+':
488 if g[0] not in '-+':
489 raise util.Abort(_('guard %r starts with invalid char') % g)
489 raise util.Abort(_('guard %r starts with invalid char') % g)
490 bad = self.checkguard(g[1:])
490 bad = self.checkguard(g[1:])
491 if bad:
491 if bad:
492 raise util.Abort(bad)
492 raise util.Abort(bad)
493 drop = self.guard_re.sub('', self.fullseries[idx])
493 drop = self.guard_re.sub('', self.fullseries[idx])
494 self.fullseries[idx] = drop + ''.join([' #' + g for g in guards])
494 self.fullseries[idx] = drop + ''.join([' #' + g for g in guards])
495 self.parseseries()
495 self.parseseries()
496 self.seriesdirty = True
496 self.seriesdirty = True
497
497
498 def pushable(self, idx):
498 def pushable(self, idx):
499 if isinstance(idx, str):
499 if isinstance(idx, str):
500 idx = self.series.index(idx)
500 idx = self.series.index(idx)
501 patchguards = self.seriesguards[idx]
501 patchguards = self.seriesguards[idx]
502 if not patchguards:
502 if not patchguards:
503 return True, None
503 return True, None
504 guards = self.active()
504 guards = self.active()
505 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
505 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
506 if exactneg:
506 if exactneg:
507 return False, repr(exactneg[0])
507 return False, repr(exactneg[0])
508 pos = [g for g in patchguards if g[0] == '+']
508 pos = [g for g in patchguards if g[0] == '+']
509 exactpos = [g for g in pos if g[1:] in guards]
509 exactpos = [g for g in pos if g[1:] in guards]
510 if pos:
510 if pos:
511 if exactpos:
511 if exactpos:
512 return True, repr(exactpos[0])
512 return True, repr(exactpos[0])
513 return False, ' '.join(map(repr, pos))
513 return False, ' '.join(map(repr, pos))
514 return True, ''
514 return True, ''
515
515
516 def explainpushable(self, idx, all_patches=False):
516 def explainpushable(self, idx, all_patches=False):
517 write = all_patches and self.ui.write or self.ui.warn
517 write = all_patches and self.ui.write or self.ui.warn
518 if all_patches or self.ui.verbose:
518 if all_patches or self.ui.verbose:
519 if isinstance(idx, str):
519 if isinstance(idx, str):
520 idx = self.series.index(idx)
520 idx = self.series.index(idx)
521 pushable, why = self.pushable(idx)
521 pushable, why = self.pushable(idx)
522 if all_patches and pushable:
522 if all_patches and pushable:
523 if why is None:
523 if why is None:
524 write(_('allowing %s - no guards in effect\n') %
524 write(_('allowing %s - no guards in effect\n') %
525 self.series[idx])
525 self.series[idx])
526 else:
526 else:
527 if not why:
527 if not why:
528 write(_('allowing %s - no matching negative guards\n') %
528 write(_('allowing %s - no matching negative guards\n') %
529 self.series[idx])
529 self.series[idx])
530 else:
530 else:
531 write(_('allowing %s - guarded by %s\n') %
531 write(_('allowing %s - guarded by %s\n') %
532 (self.series[idx], why))
532 (self.series[idx], why))
533 if not pushable:
533 if not pushable:
534 if why:
534 if why:
535 write(_('skipping %s - guarded by %s\n') %
535 write(_('skipping %s - guarded by %s\n') %
536 (self.series[idx], why))
536 (self.series[idx], why))
537 else:
537 else:
538 write(_('skipping %s - no matching guards\n') %
538 write(_('skipping %s - no matching guards\n') %
539 self.series[idx])
539 self.series[idx])
540
540
541 def savedirty(self):
541 def savedirty(self):
542 def writelist(items, path):
542 def writelist(items, path):
543 fp = self.opener(path, 'w')
543 fp = self.opener(path, 'w')
544 for i in items:
544 for i in items:
545 fp.write("%s\n" % i)
545 fp.write("%s\n" % i)
546 fp.close()
546 fp.close()
547 if self.applieddirty:
547 if self.applieddirty:
548 writelist(map(str, self.applied), self.statuspath)
548 writelist(map(str, self.applied), self.statuspath)
549 self.applieddirty = False
549 self.applieddirty = False
550 if self.seriesdirty:
550 if self.seriesdirty:
551 writelist(self.fullseries, self.seriespath)
551 writelist(self.fullseries, self.seriespath)
552 self.seriesdirty = False
552 self.seriesdirty = False
553 if self.guardsdirty:
553 if self.guardsdirty:
554 writelist(self.activeguards, self.guardspath)
554 writelist(self.activeguards, self.guardspath)
555 self.guardsdirty = False
555 self.guardsdirty = False
556 if self.added:
556 if self.added:
557 qrepo = self.qrepo()
557 qrepo = self.qrepo()
558 if qrepo:
558 if qrepo:
559 qrepo[None].add(f for f in self.added if f not in qrepo[None])
559 qrepo[None].add(f for f in self.added if f not in qrepo[None])
560 self.added = []
560 self.added = []
561
561
562 def removeundo(self, repo):
562 def removeundo(self, repo):
563 undo = repo.sjoin('undo')
563 undo = repo.sjoin('undo')
564 if not os.path.exists(undo):
564 if not os.path.exists(undo):
565 return
565 return
566 try:
566 try:
567 os.unlink(undo)
567 os.unlink(undo)
568 except OSError, inst:
568 except OSError, inst:
569 self.ui.warn(_('error removing undo: %s\n') % str(inst))
569 self.ui.warn(_('error removing undo: %s\n') % str(inst))
570
570
571 def backup(self, repo, files, copy=False):
571 def backup(self, repo, files, copy=False):
572 # backup local changes in --force case
572 # backup local changes in --force case
573 for f in sorted(files):
573 for f in sorted(files):
574 absf = repo.wjoin(f)
574 absf = repo.wjoin(f)
575 if os.path.lexists(absf):
575 if os.path.lexists(absf):
576 self.ui.note(_('saving current version of %s as %s\n') %
576 self.ui.note(_('saving current version of %s as %s\n') %
577 (f, f + '.orig'))
577 (f, f + '.orig'))
578 if copy:
578 if copy:
579 util.copyfile(absf, absf + '.orig')
579 util.copyfile(absf, absf + '.orig')
580 else:
580 else:
581 util.rename(absf, absf + '.orig')
581 util.rename(absf, absf + '.orig')
582
582
583 def printdiff(self, repo, diffopts, node1, node2=None, files=None,
583 def printdiff(self, repo, diffopts, node1, node2=None, files=None,
584 fp=None, changes=None, opts={}):
584 fp=None, changes=None, opts={}):
585 stat = opts.get('stat')
585 stat = opts.get('stat')
586 m = scmutil.match(repo[node1], files, opts)
586 m = scmutil.match(repo[node1], files, opts)
587 cmdutil.diffordiffstat(self.ui, repo, diffopts, node1, node2, m,
587 cmdutil.diffordiffstat(self.ui, repo, diffopts, node1, node2, m,
588 changes, stat, fp)
588 changes, stat, fp)
589
589
590 def mergeone(self, repo, mergeq, head, patch, rev, diffopts):
590 def mergeone(self, repo, mergeq, head, patch, rev, diffopts):
591 # first try just applying the patch
591 # first try just applying the patch
592 (err, n) = self.apply(repo, [patch], update_status=False,
592 (err, n) = self.apply(repo, [patch], update_status=False,
593 strict=True, merge=rev)
593 strict=True, merge=rev)
594
594
595 if err == 0:
595 if err == 0:
596 return (err, n)
596 return (err, n)
597
597
598 if n is None:
598 if n is None:
599 raise util.Abort(_("apply failed for patch %s") % patch)
599 raise util.Abort(_("apply failed for patch %s") % patch)
600
600
601 self.ui.warn(_("patch didn't work out, merging %s\n") % patch)
601 self.ui.warn(_("patch didn't work out, merging %s\n") % patch)
602
602
603 # apply failed, strip away that rev and merge.
603 # apply failed, strip away that rev and merge.
604 hg.clean(repo, head)
604 hg.clean(repo, head)
605 self.strip(repo, [n], update=False, backup='strip')
605 self.strip(repo, [n], update=False, backup='strip')
606
606
607 ctx = repo[rev]
607 ctx = repo[rev]
608 ret = hg.merge(repo, rev)
608 ret = hg.merge(repo, rev)
609 if ret:
609 if ret:
610 raise util.Abort(_("update returned %d") % ret)
610 raise util.Abort(_("update returned %d") % ret)
611 n = newcommit(repo, None, ctx.description(), ctx.user(), force=True)
611 n = newcommit(repo, None, ctx.description(), ctx.user(), force=True)
612 if n is None:
612 if n is None:
613 raise util.Abort(_("repo commit failed"))
613 raise util.Abort(_("repo commit failed"))
614 try:
614 try:
615 ph = patchheader(mergeq.join(patch), self.plainmode)
615 ph = patchheader(mergeq.join(patch), self.plainmode)
616 except:
616 except:
617 raise util.Abort(_("unable to read %s") % patch)
617 raise util.Abort(_("unable to read %s") % patch)
618
618
619 diffopts = self.patchopts(diffopts, patch)
619 diffopts = self.patchopts(diffopts, patch)
620 patchf = self.opener(patch, "w")
620 patchf = self.opener(patch, "w")
621 comments = str(ph)
621 comments = str(ph)
622 if comments:
622 if comments:
623 patchf.write(comments)
623 patchf.write(comments)
624 self.printdiff(repo, diffopts, head, n, fp=patchf)
624 self.printdiff(repo, diffopts, head, n, fp=patchf)
625 patchf.close()
625 patchf.close()
626 self.removeundo(repo)
626 self.removeundo(repo)
627 return (0, n)
627 return (0, n)
628
628
629 def qparents(self, repo, rev=None):
629 def qparents(self, repo, rev=None):
630 if rev is None:
630 if rev is None:
631 (p1, p2) = repo.dirstate.parents()
631 (p1, p2) = repo.dirstate.parents()
632 if p2 == nullid:
632 if p2 == nullid:
633 return p1
633 return p1
634 if not self.applied:
634 if not self.applied:
635 return None
635 return None
636 return self.applied[-1].node
636 return self.applied[-1].node
637 p1, p2 = repo.changelog.parents(rev)
637 p1, p2 = repo.changelog.parents(rev)
638 if p2 != nullid and p2 in [x.node for x in self.applied]:
638 if p2 != nullid and p2 in [x.node for x in self.applied]:
639 return p2
639 return p2
640 return p1
640 return p1
641
641
642 def mergepatch(self, repo, mergeq, series, diffopts):
642 def mergepatch(self, repo, mergeq, series, diffopts):
643 if not self.applied:
643 if not self.applied:
644 # each of the patches merged in will have two parents. This
644 # each of the patches merged in will have two parents. This
645 # can confuse the qrefresh, qdiff, and strip code because it
645 # can confuse the qrefresh, qdiff, and strip code because it
646 # needs to know which parent is actually in the patch queue.
646 # needs to know which parent is actually in the patch queue.
647 # so, we insert a merge marker with only one parent. This way
647 # so, we insert a merge marker with only one parent. This way
648 # the first patch in the queue is never a merge patch
648 # the first patch in the queue is never a merge patch
649 #
649 #
650 pname = ".hg.patches.merge.marker"
650 pname = ".hg.patches.merge.marker"
651 n = newcommit(repo, None, '[mq]: merge marker', force=True)
651 n = newcommit(repo, None, '[mq]: merge marker', force=True)
652 self.removeundo(repo)
652 self.removeundo(repo)
653 self.applied.append(statusentry(n, pname))
653 self.applied.append(statusentry(n, pname))
654 self.applieddirty = True
654 self.applieddirty = True
655
655
656 head = self.qparents(repo)
656 head = self.qparents(repo)
657
657
658 for patch in series:
658 for patch in series:
659 patch = mergeq.lookup(patch, strict=True)
659 patch = mergeq.lookup(patch, strict=True)
660 if not patch:
660 if not patch:
661 self.ui.warn(_("patch %s does not exist\n") % patch)
661 self.ui.warn(_("patch %s does not exist\n") % patch)
662 return (1, None)
662 return (1, None)
663 pushable, reason = self.pushable(patch)
663 pushable, reason = self.pushable(patch)
664 if not pushable:
664 if not pushable:
665 self.explainpushable(patch, all_patches=True)
665 self.explainpushable(patch, all_patches=True)
666 continue
666 continue
667 info = mergeq.isapplied(patch)
667 info = mergeq.isapplied(patch)
668 if not info:
668 if not info:
669 self.ui.warn(_("patch %s is not applied\n") % patch)
669 self.ui.warn(_("patch %s is not applied\n") % patch)
670 return (1, None)
670 return (1, None)
671 rev = info[1]
671 rev = info[1]
672 err, head = self.mergeone(repo, mergeq, head, patch, rev, diffopts)
672 err, head = self.mergeone(repo, mergeq, head, patch, rev, diffopts)
673 if head:
673 if head:
674 self.applied.append(statusentry(head, patch))
674 self.applied.append(statusentry(head, patch))
675 self.applieddirty = True
675 self.applieddirty = True
676 if err:
676 if err:
677 return (err, head)
677 return (err, head)
678 self.savedirty()
678 self.savedirty()
679 return (0, head)
679 return (0, head)
680
680
681 def patch(self, repo, patchfile):
681 def patch(self, repo, patchfile):
682 '''Apply patchfile to the working directory.
682 '''Apply patchfile to the working directory.
683 patchfile: name of patch file'''
683 patchfile: name of patch file'''
684 files = set()
684 files = set()
685 try:
685 try:
686 fuzz = patchmod.patch(self.ui, repo, patchfile, strip=1,
686 fuzz = patchmod.patch(self.ui, repo, patchfile, strip=1,
687 files=files, eolmode=None)
687 files=files, eolmode=None)
688 return (True, list(files), fuzz)
688 return (True, list(files), fuzz)
689 except Exception, inst:
689 except Exception, inst:
690 self.ui.note(str(inst) + '\n')
690 self.ui.note(str(inst) + '\n')
691 if not self.ui.verbose:
691 if not self.ui.verbose:
692 self.ui.warn(_("patch failed, unable to continue (try -v)\n"))
692 self.ui.warn(_("patch failed, unable to continue (try -v)\n"))
693 self.ui.traceback()
693 self.ui.traceback()
694 return (False, list(files), False)
694 return (False, list(files), False)
695
695
696 def apply(self, repo, series, list=False, update_status=True,
696 def apply(self, repo, series, list=False, update_status=True,
697 strict=False, patchdir=None, merge=None, all_files=None,
697 strict=False, patchdir=None, merge=None, all_files=None,
698 tobackup=None, check=False):
698 tobackup=None, check=False):
699 wlock = lock = tr = None
699 wlock = lock = tr = None
700 try:
700 try:
701 wlock = repo.wlock()
701 wlock = repo.wlock()
702 lock = repo.lock()
702 lock = repo.lock()
703 tr = repo.transaction("qpush")
703 tr = repo.transaction("qpush")
704 try:
704 try:
705 ret = self._apply(repo, series, list, update_status,
705 ret = self._apply(repo, series, list, update_status,
706 strict, patchdir, merge, all_files=all_files,
706 strict, patchdir, merge, all_files=all_files,
707 tobackup=tobackup, check=check)
707 tobackup=tobackup, check=check)
708 tr.close()
708 tr.close()
709 self.savedirty()
709 self.savedirty()
710 return ret
710 return ret
711 except AbortNoCleanup:
711 except AbortNoCleanup:
712 tr.close()
712 tr.close()
713 self.savedirty()
713 self.savedirty()
714 return 2, repo.dirstate.p1()
714 return 2, repo.dirstate.p1()
715 except:
715 except:
716 try:
716 try:
717 tr.abort()
717 tr.abort()
718 finally:
718 finally:
719 repo.invalidate()
719 repo.invalidate()
720 repo.dirstate.invalidate()
720 repo.dirstate.invalidate()
721 self.invalidate()
721 self.invalidate()
722 raise
722 raise
723 finally:
723 finally:
724 release(tr, lock, wlock)
724 release(tr, lock, wlock)
725 self.removeundo(repo)
725 self.removeundo(repo)
726
726
727 def _apply(self, repo, series, list=False, update_status=True,
727 def _apply(self, repo, series, list=False, update_status=True,
728 strict=False, patchdir=None, merge=None, all_files=None,
728 strict=False, patchdir=None, merge=None, all_files=None,
729 tobackup=None, check=False):
729 tobackup=None, check=False):
730 """returns (error, hash)
730 """returns (error, hash)
731
731
732 error = 1 for unable to read, 2 for patch failed, 3 for patch
732 error = 1 for unable to read, 2 for patch failed, 3 for patch
733 fuzz. tobackup is None or a set of files to backup before they
733 fuzz. tobackup is None or a set of files to backup before they
734 are modified by a patch.
734 are modified by a patch.
735 """
735 """
736 # TODO unify with commands.py
736 # TODO unify with commands.py
737 if not patchdir:
737 if not patchdir:
738 patchdir = self.path
738 patchdir = self.path
739 err = 0
739 err = 0
740 n = None
740 n = None
741 for patchname in series:
741 for patchname in series:
742 pushable, reason = self.pushable(patchname)
742 pushable, reason = self.pushable(patchname)
743 if not pushable:
743 if not pushable:
744 self.explainpushable(patchname, all_patches=True)
744 self.explainpushable(patchname, all_patches=True)
745 continue
745 continue
746 self.ui.status(_("applying %s\n") % patchname)
746 self.ui.status(_("applying %s\n") % patchname)
747 pf = os.path.join(patchdir, patchname)
747 pf = os.path.join(patchdir, patchname)
748
748
749 try:
749 try:
750 ph = patchheader(self.join(patchname), self.plainmode)
750 ph = patchheader(self.join(patchname), self.plainmode)
751 except IOError:
751 except IOError:
752 self.ui.warn(_("unable to read %s\n") % patchname)
752 self.ui.warn(_("unable to read %s\n") % patchname)
753 err = 1
753 err = 1
754 break
754 break
755
755
756 message = ph.message
756 message = ph.message
757 if not message:
757 if not message:
758 # The commit message should not be translated
758 # The commit message should not be translated
759 message = "imported patch %s\n" % patchname
759 message = "imported patch %s\n" % patchname
760 else:
760 else:
761 if list:
761 if list:
762 # The commit message should not be translated
762 # The commit message should not be translated
763 message.append("\nimported patch %s" % patchname)
763 message.append("\nimported patch %s" % patchname)
764 message = '\n'.join(message)
764 message = '\n'.join(message)
765
765
766 if ph.haspatch:
766 if ph.haspatch:
767 if tobackup:
767 if tobackup:
768 touched = patchmod.changedfiles(self.ui, repo, pf)
768 touched = patchmod.changedfiles(self.ui, repo, pf)
769 touched = set(touched) & tobackup
769 touched = set(touched) & tobackup
770 if touched and check:
770 if touched and check:
771 raise AbortNoCleanup(
771 raise AbortNoCleanup(
772 _("local changes found, refresh first"))
772 _("local changes found, refresh first"))
773 self.backup(repo, touched, copy=True)
773 self.backup(repo, touched, copy=True)
774 tobackup = tobackup - touched
774 tobackup = tobackup - touched
775 (patcherr, files, fuzz) = self.patch(repo, pf)
775 (patcherr, files, fuzz) = self.patch(repo, pf)
776 if all_files is not None:
776 if all_files is not None:
777 all_files.update(files)
777 all_files.update(files)
778 patcherr = not patcherr
778 patcherr = not patcherr
779 else:
779 else:
780 self.ui.warn(_("patch %s is empty\n") % patchname)
780 self.ui.warn(_("patch %s is empty\n") % patchname)
781 patcherr, files, fuzz = 0, [], 0
781 patcherr, files, fuzz = 0, [], 0
782
782
783 if merge and files:
783 if merge and files:
784 # Mark as removed/merged and update dirstate parent info
784 # Mark as removed/merged and update dirstate parent info
785 removed = []
785 removed = []
786 merged = []
786 merged = []
787 for f in files:
787 for f in files:
788 if os.path.lexists(repo.wjoin(f)):
788 if os.path.lexists(repo.wjoin(f)):
789 merged.append(f)
789 merged.append(f)
790 else:
790 else:
791 removed.append(f)
791 removed.append(f)
792 for f in removed:
792 for f in removed:
793 repo.dirstate.remove(f)
793 repo.dirstate.remove(f)
794 for f in merged:
794 for f in merged:
795 repo.dirstate.merge(f)
795 repo.dirstate.merge(f)
796 p1, p2 = repo.dirstate.parents()
796 p1, p2 = repo.dirstate.parents()
797 repo.setparents(p1, merge)
797 repo.setparents(p1, merge)
798
798
799 match = scmutil.matchfiles(repo, files or [])
799 match = scmutil.matchfiles(repo, files or [])
800 oldtip = repo['tip']
800 oldtip = repo['tip']
801 n = newcommit(repo, None, message, ph.user, ph.date, match=match,
801 n = newcommit(repo, None, message, ph.user, ph.date, match=match,
802 force=True)
802 force=True)
803 if repo['tip'] == oldtip:
803 if repo['tip'] == oldtip:
804 raise util.Abort(_("qpush exactly duplicates child changeset"))
804 raise util.Abort(_("qpush exactly duplicates child changeset"))
805 if n is None:
805 if n is None:
806 raise util.Abort(_("repository commit failed"))
806 raise util.Abort(_("repository commit failed"))
807
807
808 if update_status:
808 if update_status:
809 self.applied.append(statusentry(n, patchname))
809 self.applied.append(statusentry(n, patchname))
810
810
811 if patcherr:
811 if patcherr:
812 self.ui.warn(_("patch failed, rejects left in working dir\n"))
812 self.ui.warn(_("patch failed, rejects left in working dir\n"))
813 err = 2
813 err = 2
814 break
814 break
815
815
816 if fuzz and strict:
816 if fuzz and strict:
817 self.ui.warn(_("fuzz found when applying patch, stopping\n"))
817 self.ui.warn(_("fuzz found when applying patch, stopping\n"))
818 err = 3
818 err = 3
819 break
819 break
820 return (err, n)
820 return (err, n)
821
821
822 def _cleanup(self, patches, numrevs, keep=False):
822 def _cleanup(self, patches, numrevs, keep=False):
823 if not keep:
823 if not keep:
824 r = self.qrepo()
824 r = self.qrepo()
825 if r:
825 if r:
826 r[None].forget(patches)
826 r[None].forget(patches)
827 for p in patches:
827 for p in patches:
828 os.unlink(self.join(p))
828 os.unlink(self.join(p))
829
829
830 qfinished = []
830 qfinished = []
831 if numrevs:
831 if numrevs:
832 qfinished = self.applied[:numrevs]
832 qfinished = self.applied[:numrevs]
833 del self.applied[:numrevs]
833 del self.applied[:numrevs]
834 self.applieddirty = True
834 self.applieddirty = True
835
835
836 unknown = []
836 unknown = []
837
837
838 for (i, p) in sorted([(self.findseries(p), p) for p in patches],
838 for (i, p) in sorted([(self.findseries(p), p) for p in patches],
839 reverse=True):
839 reverse=True):
840 if i is not None:
840 if i is not None:
841 del self.fullseries[i]
841 del self.fullseries[i]
842 else:
842 else:
843 unknown.append(p)
843 unknown.append(p)
844
844
845 if unknown:
845 if unknown:
846 if numrevs:
846 if numrevs:
847 rev = dict((entry.name, entry.node) for entry in qfinished)
847 rev = dict((entry.name, entry.node) for entry in qfinished)
848 for p in unknown:
848 for p in unknown:
849 msg = _('revision %s refers to unknown patches: %s\n')
849 msg = _('revision %s refers to unknown patches: %s\n')
850 self.ui.warn(msg % (short(rev[p]), p))
850 self.ui.warn(msg % (short(rev[p]), p))
851 else:
851 else:
852 msg = _('unknown patches: %s\n')
852 msg = _('unknown patches: %s\n')
853 raise util.Abort(''.join(msg % p for p in unknown))
853 raise util.Abort(''.join(msg % p for p in unknown))
854
854
855 self.parseseries()
855 self.parseseries()
856 self.seriesdirty = True
856 self.seriesdirty = True
857 return [entry.node for entry in qfinished]
857 return [entry.node for entry in qfinished]
858
858
859 def _revpatches(self, repo, revs):
859 def _revpatches(self, repo, revs):
860 firstrev = repo[self.applied[0].node].rev()
860 firstrev = repo[self.applied[0].node].rev()
861 patches = []
861 patches = []
862 for i, rev in enumerate(revs):
862 for i, rev in enumerate(revs):
863
863
864 if rev < firstrev:
864 if rev < firstrev:
865 raise util.Abort(_('revision %d is not managed') % rev)
865 raise util.Abort(_('revision %d is not managed') % rev)
866
866
867 ctx = repo[rev]
867 ctx = repo[rev]
868 base = self.applied[i].node
868 base = self.applied[i].node
869 if ctx.node() != base:
869 if ctx.node() != base:
870 msg = _('cannot delete revision %d above applied patches')
870 msg = _('cannot delete revision %d above applied patches')
871 raise util.Abort(msg % rev)
871 raise util.Abort(msg % rev)
872
872
873 patch = self.applied[i].name
873 patch = self.applied[i].name
874 for fmt in ('[mq]: %s', 'imported patch %s'):
874 for fmt in ('[mq]: %s', 'imported patch %s'):
875 if ctx.description() == fmt % patch:
875 if ctx.description() == fmt % patch:
876 msg = _('patch %s finalized without changeset message\n')
876 msg = _('patch %s finalized without changeset message\n')
877 repo.ui.status(msg % patch)
877 repo.ui.status(msg % patch)
878 break
878 break
879
879
880 patches.append(patch)
880 patches.append(patch)
881 return patches
881 return patches
882
882
883 def finish(self, repo, revs):
883 def finish(self, repo, revs):
884 # Manually trigger phase computation to ensure phasedefaults is
884 # Manually trigger phase computation to ensure phasedefaults is
885 # executed before we remove the patches.
885 # executed before we remove the patches.
886 repo._phasecache
886 repo._phasecache
887 patches = self._revpatches(repo, sorted(revs))
887 patches = self._revpatches(repo, sorted(revs))
888 qfinished = self._cleanup(patches, len(patches))
888 qfinished = self._cleanup(patches, len(patches))
889 if qfinished and repo.ui.configbool('mq', 'secret', False):
889 if qfinished and repo.ui.configbool('mq', 'secret', False):
890 # only use this logic when the secret option is added
890 # only use this logic when the secret option is added
891 oldqbase = repo[qfinished[0]]
891 oldqbase = repo[qfinished[0]]
892 tphase = repo.ui.config('phases', 'new-commit', phases.draft)
892 tphase = repo.ui.config('phases', 'new-commit', phases.draft)
893 if oldqbase.phase() > tphase and oldqbase.p1().phase() <= tphase:
893 if oldqbase.phase() > tphase and oldqbase.p1().phase() <= tphase:
894 phases.advanceboundary(repo, tphase, qfinished)
894 phases.advanceboundary(repo, tphase, qfinished)
895
895
896 def delete(self, repo, patches, opts):
896 def delete(self, repo, patches, opts):
897 if not patches and not opts.get('rev'):
897 if not patches and not opts.get('rev'):
898 raise util.Abort(_('qdelete requires at least one revision or '
898 raise util.Abort(_('qdelete requires at least one revision or '
899 'patch name'))
899 'patch name'))
900
900
901 realpatches = []
901 realpatches = []
902 for patch in patches:
902 for patch in patches:
903 patch = self.lookup(patch, strict=True)
903 patch = self.lookup(patch, strict=True)
904 info = self.isapplied(patch)
904 info = self.isapplied(patch)
905 if info:
905 if info:
906 raise util.Abort(_("cannot delete applied patch %s") % patch)
906 raise util.Abort(_("cannot delete applied patch %s") % patch)
907 if patch not in self.series:
907 if patch not in self.series:
908 raise util.Abort(_("patch %s not in series file") % patch)
908 raise util.Abort(_("patch %s not in series file") % patch)
909 if patch not in realpatches:
909 if patch not in realpatches:
910 realpatches.append(patch)
910 realpatches.append(patch)
911
911
912 numrevs = 0
912 numrevs = 0
913 if opts.get('rev'):
913 if opts.get('rev'):
914 if not self.applied:
914 if not self.applied:
915 raise util.Abort(_('no patches applied'))
915 raise util.Abort(_('no patches applied'))
916 revs = scmutil.revrange(repo, opts.get('rev'))
916 revs = scmutil.revrange(repo, opts.get('rev'))
917 if len(revs) > 1 and revs[0] > revs[1]:
917 if len(revs) > 1 and revs[0] > revs[1]:
918 revs.reverse()
918 revs.reverse()
919 revpatches = self._revpatches(repo, revs)
919 revpatches = self._revpatches(repo, revs)
920 realpatches += revpatches
920 realpatches += revpatches
921 numrevs = len(revpatches)
921 numrevs = len(revpatches)
922
922
923 self._cleanup(realpatches, numrevs, opts.get('keep'))
923 self._cleanup(realpatches, numrevs, opts.get('keep'))
924
924
925 def checktoppatch(self, repo):
925 def checktoppatch(self, repo):
926 if self.applied:
926 if self.applied:
927 top = self.applied[-1].node
927 top = self.applied[-1].node
928 patch = self.applied[-1].name
928 patch = self.applied[-1].name
929 pp = repo.dirstate.parents()
929 pp = repo.dirstate.parents()
930 if top not in pp:
930 if top not in pp:
931 raise util.Abort(_("working directory revision is not qtip"))
931 raise util.Abort(_("working directory revision is not qtip"))
932 return top, patch
932 return top, patch
933 return None, None
933 return None, None
934
934
935 def checksubstate(self, repo):
935 def checksubstate(self, repo):
936 '''return list of subrepos at a different revision than substate.
936 '''return list of subrepos at a different revision than substate.
937 Abort if any subrepos have uncommitted changes.'''
937 Abort if any subrepos have uncommitted changes.'''
938 inclsubs = []
938 inclsubs = []
939 wctx = repo[None]
939 wctx = repo[None]
940 for s in wctx.substate:
940 for s in wctx.substate:
941 if wctx.sub(s).dirty(True):
941 if wctx.sub(s).dirty(True):
942 raise util.Abort(
942 raise util.Abort(
943 _("uncommitted changes in subrepository %s") % s)
943 _("uncommitted changes in subrepository %s") % s)
944 elif wctx.sub(s).dirty():
944 elif wctx.sub(s).dirty():
945 inclsubs.append(s)
945 inclsubs.append(s)
946 return inclsubs
946 return inclsubs
947
947
948 def localchangesfound(self, refresh=True):
948 def localchangesfound(self, refresh=True):
949 if refresh:
949 if refresh:
950 raise util.Abort(_("local changes found, refresh first"))
950 raise util.Abort(_("local changes found, refresh first"))
951 else:
951 else:
952 raise util.Abort(_("local changes found"))
952 raise util.Abort(_("local changes found"))
953
953
954 def checklocalchanges(self, repo, force=False, refresh=True):
954 def checklocalchanges(self, repo, force=False, refresh=True):
955 m, a, r, d = repo.status()[:4]
955 m, a, r, d = repo.status()[:4]
956 if (m or a or r or d) and not force:
956 if (m or a or r or d) and not force:
957 self.localchangesfound(refresh)
957 self.localchangesfound(refresh)
958 return m, a, r, d
958 return m, a, r, d
959
959
960 _reserved = ('series', 'status', 'guards', '.', '..')
960 _reserved = ('series', 'status', 'guards', '.', '..')
961 def checkreservedname(self, name):
961 def checkreservedname(self, name):
962 if name in self._reserved:
962 if name in self._reserved:
963 raise util.Abort(_('"%s" cannot be used as the name of a patch')
963 raise util.Abort(_('"%s" cannot be used as the name of a patch')
964 % name)
964 % name)
965 for prefix in ('.hg', '.mq'):
965 for prefix in ('.hg', '.mq'):
966 if name.startswith(prefix):
966 if name.startswith(prefix):
967 raise util.Abort(_('patch name cannot begin with "%s"')
967 raise util.Abort(_('patch name cannot begin with "%s"')
968 % prefix)
968 % prefix)
969 for c in ('#', ':'):
969 for c in ('#', ':'):
970 if c in name:
970 if c in name:
971 raise util.Abort(_('"%s" cannot be used in the name of a patch')
971 raise util.Abort(_('"%s" cannot be used in the name of a patch')
972 % c)
972 % c)
973
973
974 def checkpatchname(self, name, force=False):
974 def checkpatchname(self, name, force=False):
975 self.checkreservedname(name)
975 self.checkreservedname(name)
976 if not force and os.path.exists(self.join(name)):
976 if not force and os.path.exists(self.join(name)):
977 if os.path.isdir(self.join(name)):
977 if os.path.isdir(self.join(name)):
978 raise util.Abort(_('"%s" already exists as a directory')
978 raise util.Abort(_('"%s" already exists as a directory')
979 % name)
979 % name)
980 else:
980 else:
981 raise util.Abort(_('patch "%s" already exists') % name)
981 raise util.Abort(_('patch "%s" already exists') % name)
982
982
983 def checkforcecheck(self, check, force):
983 def checkforcecheck(self, check, force):
984 if force and check:
984 if force and check:
985 raise util.Abort(_('cannot use both --force and --check'))
985 raise util.Abort(_('cannot use both --force and --check'))
986
986
987 def new(self, repo, patchfn, *pats, **opts):
987 def new(self, repo, patchfn, *pats, **opts):
988 """options:
988 """options:
989 msg: a string or a no-argument function returning a string
989 msg: a string or a no-argument function returning a string
990 """
990 """
991 msg = opts.get('msg')
991 msg = opts.get('msg')
992 user = opts.get('user')
992 user = opts.get('user')
993 date = opts.get('date')
993 date = opts.get('date')
994 if date:
994 if date:
995 date = util.parsedate(date)
995 date = util.parsedate(date)
996 diffopts = self.diffopts({'git': opts.get('git')})
996 diffopts = self.diffopts({'git': opts.get('git')})
997 if opts.get('checkname', True):
997 if opts.get('checkname', True):
998 self.checkpatchname(patchfn)
998 self.checkpatchname(patchfn)
999 inclsubs = self.checksubstate(repo)
999 inclsubs = self.checksubstate(repo)
1000 if inclsubs:
1000 if inclsubs:
1001 inclsubs.append('.hgsubstate')
1001 inclsubs.append('.hgsubstate')
1002 substatestate = repo.dirstate['.hgsubstate']
1002 substatestate = repo.dirstate['.hgsubstate']
1003 if opts.get('include') or opts.get('exclude') or pats:
1003 if opts.get('include') or opts.get('exclude') or pats:
1004 if inclsubs:
1004 if inclsubs:
1005 pats = list(pats or []) + inclsubs
1005 pats = list(pats or []) + inclsubs
1006 match = scmutil.match(repo[None], pats, opts)
1006 match = scmutil.match(repo[None], pats, opts)
1007 # detect missing files in pats
1007 # detect missing files in pats
1008 def badfn(f, msg):
1008 def badfn(f, msg):
1009 if f != '.hgsubstate': # .hgsubstate is auto-created
1009 if f != '.hgsubstate': # .hgsubstate is auto-created
1010 raise util.Abort('%s: %s' % (f, msg))
1010 raise util.Abort('%s: %s' % (f, msg))
1011 match.bad = badfn
1011 match.bad = badfn
1012 changes = repo.status(match=match)
1012 changes = repo.status(match=match)
1013 m, a, r, d = changes[:4]
1013 m, a, r, d = changes[:4]
1014 else:
1014 else:
1015 changes = self.checklocalchanges(repo, force=True)
1015 changes = self.checklocalchanges(repo, force=True)
1016 m, a, r, d = changes
1016 m, a, r, d = changes
1017 match = scmutil.matchfiles(repo, m + a + r + inclsubs)
1017 match = scmutil.matchfiles(repo, m + a + r + inclsubs)
1018 if len(repo[None].parents()) > 1:
1018 if len(repo[None].parents()) > 1:
1019 raise util.Abort(_('cannot manage merge changesets'))
1019 raise util.Abort(_('cannot manage merge changesets'))
1020 commitfiles = m + a + r
1020 commitfiles = m + a + r
1021 self.checktoppatch(repo)
1021 self.checktoppatch(repo)
1022 insert = self.fullseriesend()
1022 insert = self.fullseriesend()
1023 wlock = repo.wlock()
1023 wlock = repo.wlock()
1024 try:
1024 try:
1025 try:
1025 try:
1026 # if patch file write fails, abort early
1026 # if patch file write fails, abort early
1027 p = self.opener(patchfn, "w")
1027 p = self.opener(patchfn, "w")
1028 except IOError, e:
1028 except IOError, e:
1029 raise util.Abort(_('cannot write patch "%s": %s')
1029 raise util.Abort(_('cannot write patch "%s": %s')
1030 % (patchfn, e.strerror))
1030 % (patchfn, e.strerror))
1031 try:
1031 try:
1032 if self.plainmode:
1032 if self.plainmode:
1033 if user:
1033 if user:
1034 p.write("From: " + user + "\n")
1034 p.write("From: " + user + "\n")
1035 if not date:
1035 if not date:
1036 p.write("\n")
1036 p.write("\n")
1037 if date:
1037 if date:
1038 p.write("Date: %d %d\n\n" % date)
1038 p.write("Date: %d %d\n\n" % date)
1039 else:
1039 else:
1040 p.write("# HG changeset patch\n")
1040 p.write("# HG changeset patch\n")
1041 p.write("# Parent "
1041 p.write("# Parent "
1042 + hex(repo[None].p1().node()) + "\n")
1042 + hex(repo[None].p1().node()) + "\n")
1043 if user:
1043 if user:
1044 p.write("# User " + user + "\n")
1044 p.write("# User " + user + "\n")
1045 if date:
1045 if date:
1046 p.write("# Date %s %s\n\n" % date)
1046 p.write("# Date %s %s\n\n" % date)
1047 if util.safehasattr(msg, '__call__'):
1047 if util.safehasattr(msg, '__call__'):
1048 msg = msg()
1048 msg = msg()
1049 commitmsg = msg and msg or ("[mq]: %s" % patchfn)
1049 commitmsg = msg and msg or ("[mq]: %s" % patchfn)
1050 n = newcommit(repo, None, commitmsg, user, date, match=match,
1050 n = newcommit(repo, None, commitmsg, user, date, match=match,
1051 force=True)
1051 force=True)
1052 if n is None:
1052 if n is None:
1053 raise util.Abort(_("repo commit failed"))
1053 raise util.Abort(_("repo commit failed"))
1054 try:
1054 try:
1055 self.fullseries[insert:insert] = [patchfn]
1055 self.fullseries[insert:insert] = [patchfn]
1056 self.applied.append(statusentry(n, patchfn))
1056 self.applied.append(statusentry(n, patchfn))
1057 self.parseseries()
1057 self.parseseries()
1058 self.seriesdirty = True
1058 self.seriesdirty = True
1059 self.applieddirty = True
1059 self.applieddirty = True
1060 if msg:
1060 if msg:
1061 msg = msg + "\n\n"
1061 msg = msg + "\n\n"
1062 p.write(msg)
1062 p.write(msg)
1063 if commitfiles:
1063 if commitfiles:
1064 parent = self.qparents(repo, n)
1064 parent = self.qparents(repo, n)
1065 if inclsubs:
1065 if inclsubs:
1066 if substatestate in 'a?':
1066 if substatestate in 'a?':
1067 changes[1].append('.hgsubstate')
1067 changes[1].append('.hgsubstate')
1068 elif substatestate in 'r':
1068 elif substatestate in 'r':
1069 changes[2].append('.hgsubstate')
1069 changes[2].append('.hgsubstate')
1070 else: # modified
1070 else: # modified
1071 changes[0].append('.hgsubstate')
1071 changes[0].append('.hgsubstate')
1072 chunks = patchmod.diff(repo, node1=parent, node2=n,
1072 chunks = patchmod.diff(repo, node1=parent, node2=n,
1073 changes=changes, opts=diffopts)
1073 changes=changes, opts=diffopts)
1074 for chunk in chunks:
1074 for chunk in chunks:
1075 p.write(chunk)
1075 p.write(chunk)
1076 p.close()
1076 p.close()
1077 r = self.qrepo()
1077 r = self.qrepo()
1078 if r:
1078 if r:
1079 r[None].add([patchfn])
1079 r[None].add([patchfn])
1080 except:
1080 except:
1081 repo.rollback()
1081 repo.rollback()
1082 raise
1082 raise
1083 except Exception:
1083 except Exception:
1084 patchpath = self.join(patchfn)
1084 patchpath = self.join(patchfn)
1085 try:
1085 try:
1086 os.unlink(patchpath)
1086 os.unlink(patchpath)
1087 except:
1087 except:
1088 self.ui.warn(_('error unlinking %s\n') % patchpath)
1088 self.ui.warn(_('error unlinking %s\n') % patchpath)
1089 raise
1089 raise
1090 self.removeundo(repo)
1090 self.removeundo(repo)
1091 finally:
1091 finally:
1092 release(wlock)
1092 release(wlock)
1093
1093
1094 def strip(self, repo, revs, update=True, backup="all", force=None):
1094 def strip(self, repo, revs, update=True, backup="all", force=None):
1095 wlock = lock = None
1095 wlock = lock = None
1096 try:
1096 try:
1097 wlock = repo.wlock()
1097 wlock = repo.wlock()
1098 lock = repo.lock()
1098 lock = repo.lock()
1099
1099
1100 if update:
1100 if update:
1101 self.checklocalchanges(repo, force=force, refresh=False)
1101 self.checklocalchanges(repo, force=force, refresh=False)
1102 urev = self.qparents(repo, revs[0])
1102 urev = self.qparents(repo, revs[0])
1103 hg.clean(repo, urev)
1103 hg.clean(repo, urev)
1104 repo.dirstate.write()
1104 repo.dirstate.write()
1105
1105
1106 repair.strip(self.ui, repo, revs, backup)
1106 repair.strip(self.ui, repo, revs, backup)
1107 finally:
1107 finally:
1108 release(lock, wlock)
1108 release(lock, wlock)
1109
1109
1110 def isapplied(self, patch):
1110 def isapplied(self, patch):
1111 """returns (index, rev, patch)"""
1111 """returns (index, rev, patch)"""
1112 for i, a in enumerate(self.applied):
1112 for i, a in enumerate(self.applied):
1113 if a.name == patch:
1113 if a.name == patch:
1114 return (i, a.node, a.name)
1114 return (i, a.node, a.name)
1115 return None
1115 return None
1116
1116
1117 # if the exact patch name does not exist, we try a few
1117 # if the exact patch name does not exist, we try a few
1118 # variations. If strict is passed, we try only #1
1118 # variations. If strict is passed, we try only #1
1119 #
1119 #
1120 # 1) a number (as string) to indicate an offset in the series file
1120 # 1) a number (as string) to indicate an offset in the series file
1121 # 2) a unique substring of the patch name was given
1121 # 2) a unique substring of the patch name was given
1122 # 3) patchname[-+]num to indicate an offset in the series file
1122 # 3) patchname[-+]num to indicate an offset in the series file
1123 def lookup(self, patch, strict=False):
1123 def lookup(self, patch, strict=False):
1124 def partialname(s):
1124 def partialname(s):
1125 if s in self.series:
1125 if s in self.series:
1126 return s
1126 return s
1127 matches = [x for x in self.series if s in x]
1127 matches = [x for x in self.series if s in x]
1128 if len(matches) > 1:
1128 if len(matches) > 1:
1129 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
1129 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
1130 for m in matches:
1130 for m in matches:
1131 self.ui.warn(' %s\n' % m)
1131 self.ui.warn(' %s\n' % m)
1132 return None
1132 return None
1133 if matches:
1133 if matches:
1134 return matches[0]
1134 return matches[0]
1135 if self.series and self.applied:
1135 if self.series and self.applied:
1136 if s == 'qtip':
1136 if s == 'qtip':
1137 return self.series[self.seriesend(True)-1]
1137 return self.series[self.seriesend(True)-1]
1138 if s == 'qbase':
1138 if s == 'qbase':
1139 return self.series[0]
1139 return self.series[0]
1140 return None
1140 return None
1141
1141
1142 if patch in self.series:
1142 if patch in self.series:
1143 return patch
1143 return patch
1144
1144
1145 if not os.path.isfile(self.join(patch)):
1145 if not os.path.isfile(self.join(patch)):
1146 try:
1146 try:
1147 sno = int(patch)
1147 sno = int(patch)
1148 except (ValueError, OverflowError):
1148 except (ValueError, OverflowError):
1149 pass
1149 pass
1150 else:
1150 else:
1151 if -len(self.series) <= sno < len(self.series):
1151 if -len(self.series) <= sno < len(self.series):
1152 return self.series[sno]
1152 return self.series[sno]
1153
1153
1154 if not strict:
1154 if not strict:
1155 res = partialname(patch)
1155 res = partialname(patch)
1156 if res:
1156 if res:
1157 return res
1157 return res
1158 minus = patch.rfind('-')
1158 minus = patch.rfind('-')
1159 if minus >= 0:
1159 if minus >= 0:
1160 res = partialname(patch[:minus])
1160 res = partialname(patch[:minus])
1161 if res:
1161 if res:
1162 i = self.series.index(res)
1162 i = self.series.index(res)
1163 try:
1163 try:
1164 off = int(patch[minus + 1:] or 1)
1164 off = int(patch[minus + 1:] or 1)
1165 except (ValueError, OverflowError):
1165 except (ValueError, OverflowError):
1166 pass
1166 pass
1167 else:
1167 else:
1168 if i - off >= 0:
1168 if i - off >= 0:
1169 return self.series[i - off]
1169 return self.series[i - off]
1170 plus = patch.rfind('+')
1170 plus = patch.rfind('+')
1171 if plus >= 0:
1171 if plus >= 0:
1172 res = partialname(patch[:plus])
1172 res = partialname(patch[:plus])
1173 if res:
1173 if res:
1174 i = self.series.index(res)
1174 i = self.series.index(res)
1175 try:
1175 try:
1176 off = int(patch[plus + 1:] or 1)
1176 off = int(patch[plus + 1:] or 1)
1177 except (ValueError, OverflowError):
1177 except (ValueError, OverflowError):
1178 pass
1178 pass
1179 else:
1179 else:
1180 if i + off < len(self.series):
1180 if i + off < len(self.series):
1181 return self.series[i + off]
1181 return self.series[i + off]
1182 raise util.Abort(_("patch %s not in series") % patch)
1182 raise util.Abort(_("patch %s not in series") % patch)
1183
1183
1184 def push(self, repo, patch=None, force=False, list=False, mergeq=None,
1184 def push(self, repo, patch=None, force=False, list=False, mergeq=None,
1185 all=False, move=False, exact=False, nobackup=False, check=False):
1185 all=False, move=False, exact=False, nobackup=False, check=False):
1186 self.checkforcecheck(check, force)
1186 self.checkforcecheck(check, force)
1187 diffopts = self.diffopts()
1187 diffopts = self.diffopts()
1188 wlock = repo.wlock()
1188 wlock = repo.wlock()
1189 try:
1189 try:
1190 heads = []
1190 heads = []
1191 for b, ls in repo.branchmap().iteritems():
1191 for b, ls in repo.branchmap().iteritems():
1192 heads += ls
1192 heads += ls
1193 if not heads:
1193 if not heads:
1194 heads = [nullid]
1194 heads = [nullid]
1195 if repo.dirstate.p1() not in heads and not exact:
1195 if repo.dirstate.p1() not in heads and not exact:
1196 self.ui.status(_("(working directory not at a head)\n"))
1196 self.ui.status(_("(working directory not at a head)\n"))
1197
1197
1198 if not self.series:
1198 if not self.series:
1199 self.ui.warn(_('no patches in series\n'))
1199 self.ui.warn(_('no patches in series\n'))
1200 return 0
1200 return 0
1201
1201
1202 # Suppose our series file is: A B C and the current 'top'
1202 # Suppose our series file is: A B C and the current 'top'
1203 # patch is B. qpush C should be performed (moving forward)
1203 # patch is B. qpush C should be performed (moving forward)
1204 # qpush B is a NOP (no change) qpush A is an error (can't
1204 # qpush B is a NOP (no change) qpush A is an error (can't
1205 # go backwards with qpush)
1205 # go backwards with qpush)
1206 if patch:
1206 if patch:
1207 patch = self.lookup(patch)
1207 patch = self.lookup(patch)
1208 info = self.isapplied(patch)
1208 info = self.isapplied(patch)
1209 if info and info[0] >= len(self.applied) - 1:
1209 if info and info[0] >= len(self.applied) - 1:
1210 self.ui.warn(
1210 self.ui.warn(
1211 _('qpush: %s is already at the top\n') % patch)
1211 _('qpush: %s is already at the top\n') % patch)
1212 return 0
1212 return 0
1213
1213
1214 pushable, reason = self.pushable(patch)
1214 pushable, reason = self.pushable(patch)
1215 if pushable:
1215 if pushable:
1216 if self.series.index(patch) < self.seriesend():
1216 if self.series.index(patch) < self.seriesend():
1217 raise util.Abort(
1217 raise util.Abort(
1218 _("cannot push to a previous patch: %s") % patch)
1218 _("cannot push to a previous patch: %s") % patch)
1219 else:
1219 else:
1220 if reason:
1220 if reason:
1221 reason = _('guarded by %s') % reason
1221 reason = _('guarded by %s') % reason
1222 else:
1222 else:
1223 reason = _('no matching guards')
1223 reason = _('no matching guards')
1224 self.ui.warn(_("cannot push '%s' - %s\n") % (patch, reason))
1224 self.ui.warn(_("cannot push '%s' - %s\n") % (patch, reason))
1225 return 1
1225 return 1
1226 elif all:
1226 elif all:
1227 patch = self.series[-1]
1227 patch = self.series[-1]
1228 if self.isapplied(patch):
1228 if self.isapplied(patch):
1229 self.ui.warn(_('all patches are currently applied\n'))
1229 self.ui.warn(_('all patches are currently applied\n'))
1230 return 0
1230 return 0
1231
1231
1232 # Following the above example, starting at 'top' of B:
1232 # Following the above example, starting at 'top' of B:
1233 # qpush should be performed (pushes C), but a subsequent
1233 # qpush should be performed (pushes C), but a subsequent
1234 # qpush without an argument is an error (nothing to
1234 # qpush without an argument is an error (nothing to
1235 # apply). This allows a loop of "...while hg qpush..." to
1235 # apply). This allows a loop of "...while hg qpush..." to
1236 # work as it detects an error when done
1236 # work as it detects an error when done
1237 start = self.seriesend()
1237 start = self.seriesend()
1238 if start == len(self.series):
1238 if start == len(self.series):
1239 self.ui.warn(_('patch series already fully applied\n'))
1239 self.ui.warn(_('patch series already fully applied\n'))
1240 return 1
1240 return 1
1241 if not force and not check:
1241 if not force and not check:
1242 self.checklocalchanges(repo, refresh=self.applied)
1242 self.checklocalchanges(repo, refresh=self.applied)
1243
1243
1244 if exact:
1244 if exact:
1245 if check:
1245 if check:
1246 raise util.Abort(
1246 raise util.Abort(
1247 _("cannot use --exact and --check together"))
1247 _("cannot use --exact and --check together"))
1248 if move:
1248 if move:
1249 raise util.Abort(_('cannot use --exact and --move '
1249 raise util.Abort(_('cannot use --exact and --move '
1250 'together'))
1250 'together'))
1251 if self.applied:
1251 if self.applied:
1252 raise util.Abort(_('cannot push --exact with applied '
1252 raise util.Abort(_('cannot push --exact with applied '
1253 'patches'))
1253 'patches'))
1254 root = self.series[start]
1254 root = self.series[start]
1255 target = patchheader(self.join(root), self.plainmode).parent
1255 target = patchheader(self.join(root), self.plainmode).parent
1256 if not target:
1256 if not target:
1257 raise util.Abort(
1257 raise util.Abort(
1258 _("%s does not have a parent recorded") % root)
1258 _("%s does not have a parent recorded") % root)
1259 if not repo[target] == repo['.']:
1259 if not repo[target] == repo['.']:
1260 hg.update(repo, target)
1260 hg.update(repo, target)
1261
1261
1262 if move:
1262 if move:
1263 if not patch:
1263 if not patch:
1264 raise util.Abort(_("please specify the patch to move"))
1264 raise util.Abort(_("please specify the patch to move"))
1265 for fullstart, rpn in enumerate(self.fullseries):
1265 for fullstart, rpn in enumerate(self.fullseries):
1266 # strip markers for patch guards
1266 # strip markers for patch guards
1267 if self.guard_re.split(rpn, 1)[0] == self.series[start]:
1267 if self.guard_re.split(rpn, 1)[0] == self.series[start]:
1268 break
1268 break
1269 for i, rpn in enumerate(self.fullseries[fullstart:]):
1269 for i, rpn in enumerate(self.fullseries[fullstart:]):
1270 # strip markers for patch guards
1270 # strip markers for patch guards
1271 if self.guard_re.split(rpn, 1)[0] == patch:
1271 if self.guard_re.split(rpn, 1)[0] == patch:
1272 break
1272 break
1273 index = fullstart + i
1273 index = fullstart + i
1274 assert index < len(self.fullseries)
1274 assert index < len(self.fullseries)
1275 fullpatch = self.fullseries[index]
1275 fullpatch = self.fullseries[index]
1276 del self.fullseries[index]
1276 del self.fullseries[index]
1277 self.fullseries.insert(fullstart, fullpatch)
1277 self.fullseries.insert(fullstart, fullpatch)
1278 self.parseseries()
1278 self.parseseries()
1279 self.seriesdirty = True
1279 self.seriesdirty = True
1280
1280
1281 self.applieddirty = True
1281 self.applieddirty = True
1282 if start > 0:
1282 if start > 0:
1283 self.checktoppatch(repo)
1283 self.checktoppatch(repo)
1284 if not patch:
1284 if not patch:
1285 patch = self.series[start]
1285 patch = self.series[start]
1286 end = start + 1
1286 end = start + 1
1287 else:
1287 else:
1288 end = self.series.index(patch, start) + 1
1288 end = self.series.index(patch, start) + 1
1289
1289
1290 tobackup = set()
1290 tobackup = set()
1291 if (not nobackup and force) or check:
1291 if (not nobackup and force) or check:
1292 m, a, r, d = self.checklocalchanges(repo, force=True)
1292 m, a, r, d = self.checklocalchanges(repo, force=True)
1293 if check:
1293 if check:
1294 tobackup.update(m + a + r + d)
1294 tobackup.update(m + a + r + d)
1295 else:
1295 else:
1296 tobackup.update(m + a)
1296 tobackup.update(m + a)
1297
1297
1298 s = self.series[start:end]
1298 s = self.series[start:end]
1299 all_files = set()
1299 all_files = set()
1300 try:
1300 try:
1301 if mergeq:
1301 if mergeq:
1302 ret = self.mergepatch(repo, mergeq, s, diffopts)
1302 ret = self.mergepatch(repo, mergeq, s, diffopts)
1303 else:
1303 else:
1304 ret = self.apply(repo, s, list, all_files=all_files,
1304 ret = self.apply(repo, s, list, all_files=all_files,
1305 tobackup=tobackup, check=check)
1305 tobackup=tobackup, check=check)
1306 except:
1306 except:
1307 self.ui.warn(_('cleaning up working directory...'))
1307 self.ui.warn(_('cleaning up working directory...'))
1308 node = repo.dirstate.p1()
1308 node = repo.dirstate.p1()
1309 hg.revert(repo, node, None)
1309 hg.revert(repo, node, None)
1310 # only remove unknown files that we know we touched or
1310 # only remove unknown files that we know we touched or
1311 # created while patching
1311 # created while patching
1312 for f in all_files:
1312 for f in all_files:
1313 if f not in repo.dirstate:
1313 if f not in repo.dirstate:
1314 try:
1314 try:
1315 util.unlinkpath(repo.wjoin(f))
1315 util.unlinkpath(repo.wjoin(f))
1316 except OSError, inst:
1316 except OSError, inst:
1317 if inst.errno != errno.ENOENT:
1317 if inst.errno != errno.ENOENT:
1318 raise
1318 raise
1319 self.ui.warn(_('done\n'))
1319 self.ui.warn(_('done\n'))
1320 raise
1320 raise
1321
1321
1322 if not self.applied:
1322 if not self.applied:
1323 return ret[0]
1323 return ret[0]
1324 top = self.applied[-1].name
1324 top = self.applied[-1].name
1325 if ret[0] and ret[0] > 1:
1325 if ret[0] and ret[0] > 1:
1326 msg = _("errors during apply, please fix and refresh %s\n")
1326 msg = _("errors during apply, please fix and refresh %s\n")
1327 self.ui.write(msg % top)
1327 self.ui.write(msg % top)
1328 else:
1328 else:
1329 self.ui.write(_("now at: %s\n") % top)
1329 self.ui.write(_("now at: %s\n") % top)
1330 return ret[0]
1330 return ret[0]
1331
1331
1332 finally:
1332 finally:
1333 wlock.release()
1333 wlock.release()
1334
1334
1335 def pop(self, repo, patch=None, force=False, update=True, all=False,
1335 def pop(self, repo, patch=None, force=False, update=True, all=False,
1336 nobackup=False, check=False):
1336 nobackup=False, check=False):
1337 self.checkforcecheck(check, force)
1337 self.checkforcecheck(check, force)
1338 wlock = repo.wlock()
1338 wlock = repo.wlock()
1339 try:
1339 try:
1340 if patch:
1340 if patch:
1341 # index, rev, patch
1341 # index, rev, patch
1342 info = self.isapplied(patch)
1342 info = self.isapplied(patch)
1343 if not info:
1343 if not info:
1344 patch = self.lookup(patch)
1344 patch = self.lookup(patch)
1345 info = self.isapplied(patch)
1345 info = self.isapplied(patch)
1346 if not info:
1346 if not info:
1347 raise util.Abort(_("patch %s is not applied") % patch)
1347 raise util.Abort(_("patch %s is not applied") % patch)
1348
1348
1349 if not self.applied:
1349 if not self.applied:
1350 # Allow qpop -a to work repeatedly,
1350 # Allow qpop -a to work repeatedly,
1351 # but not qpop without an argument
1351 # but not qpop without an argument
1352 self.ui.warn(_("no patches applied\n"))
1352 self.ui.warn(_("no patches applied\n"))
1353 return not all
1353 return not all
1354
1354
1355 if all:
1355 if all:
1356 start = 0
1356 start = 0
1357 elif patch:
1357 elif patch:
1358 start = info[0] + 1
1358 start = info[0] + 1
1359 else:
1359 else:
1360 start = len(self.applied) - 1
1360 start = len(self.applied) - 1
1361
1361
1362 if start >= len(self.applied):
1362 if start >= len(self.applied):
1363 self.ui.warn(_("qpop: %s is already at the top\n") % patch)
1363 self.ui.warn(_("qpop: %s is already at the top\n") % patch)
1364 return
1364 return
1365
1365
1366 if not update:
1366 if not update:
1367 parents = repo.dirstate.parents()
1367 parents = repo.dirstate.parents()
1368 rr = [x.node for x in self.applied]
1368 rr = [x.node for x in self.applied]
1369 for p in parents:
1369 for p in parents:
1370 if p in rr:
1370 if p in rr:
1371 self.ui.warn(_("qpop: forcing dirstate update\n"))
1371 self.ui.warn(_("qpop: forcing dirstate update\n"))
1372 update = True
1372 update = True
1373 else:
1373 else:
1374 parents = [p.node() for p in repo[None].parents()]
1374 parents = [p.node() for p in repo[None].parents()]
1375 needupdate = False
1375 needupdate = False
1376 for entry in self.applied[start:]:
1376 for entry in self.applied[start:]:
1377 if entry.node in parents:
1377 if entry.node in parents:
1378 needupdate = True
1378 needupdate = True
1379 break
1379 break
1380 update = needupdate
1380 update = needupdate
1381
1381
1382 tobackup = set()
1382 tobackup = set()
1383 if update:
1383 if update:
1384 m, a, r, d = self.checklocalchanges(repo, force=force or check)
1384 m, a, r, d = self.checklocalchanges(repo, force=force or check)
1385 if force:
1385 if force:
1386 if not nobackup:
1386 if not nobackup:
1387 tobackup.update(m + a)
1387 tobackup.update(m + a)
1388 elif check:
1388 elif check:
1389 tobackup.update(m + a + r + d)
1389 tobackup.update(m + a + r + d)
1390
1390
1391 self.applieddirty = True
1391 self.applieddirty = True
1392 end = len(self.applied)
1392 end = len(self.applied)
1393 rev = self.applied[start].node
1393 rev = self.applied[start].node
1394 if update:
1394 if update:
1395 top = self.checktoppatch(repo)[0]
1395 top = self.checktoppatch(repo)[0]
1396
1396
1397 try:
1397 try:
1398 heads = repo.changelog.heads(rev)
1398 heads = repo.changelog.heads(rev)
1399 except error.LookupError:
1399 except error.LookupError:
1400 node = short(rev)
1400 node = short(rev)
1401 raise util.Abort(_('trying to pop unknown node %s') % node)
1401 raise util.Abort(_('trying to pop unknown node %s') % node)
1402
1402
1403 if heads != [self.applied[-1].node]:
1403 if heads != [self.applied[-1].node]:
1404 raise util.Abort(_("popping would remove a revision not "
1404 raise util.Abort(_("popping would remove a revision not "
1405 "managed by this patch queue"))
1405 "managed by this patch queue"))
1406 if not repo[self.applied[-1].node].mutable():
1406 if not repo[self.applied[-1].node].mutable():
1407 raise util.Abort(
1407 raise util.Abort(
1408 _("popping would remove an immutable revision"),
1408 _("popping would remove an immutable revision"),
1409 hint=_('see "hg help phases" for details'))
1409 hint=_('see "hg help phases" for details'))
1410
1410
1411 # we know there are no local changes, so we can make a simplified
1411 # we know there are no local changes, so we can make a simplified
1412 # form of hg.update.
1412 # form of hg.update.
1413 if update:
1413 if update:
1414 qp = self.qparents(repo, rev)
1414 qp = self.qparents(repo, rev)
1415 ctx = repo[qp]
1415 ctx = repo[qp]
1416 m, a, r, d = repo.status(qp, top)[:4]
1416 m, a, r, d = repo.status(qp, top)[:4]
1417 if d:
1417 if d:
1418 raise util.Abort(_("deletions found between repo revs"))
1418 raise util.Abort(_("deletions found between repo revs"))
1419
1419
1420 tobackup = set(a + m + r) & tobackup
1420 tobackup = set(a + m + r) & tobackup
1421 if check and tobackup:
1421 if check and tobackup:
1422 self.localchangesfound()
1422 self.localchangesfound()
1423 self.backup(repo, tobackup)
1423 self.backup(repo, tobackup)
1424
1424
1425 for f in a:
1425 for f in a:
1426 try:
1426 try:
1427 util.unlinkpath(repo.wjoin(f))
1427 util.unlinkpath(repo.wjoin(f))
1428 except OSError, e:
1428 except OSError, e:
1429 if e.errno != errno.ENOENT:
1429 if e.errno != errno.ENOENT:
1430 raise
1430 raise
1431 repo.dirstate.drop(f)
1431 repo.dirstate.drop(f)
1432 for f in m + r:
1432 for f in m + r:
1433 fctx = ctx[f]
1433 fctx = ctx[f]
1434 repo.wwrite(f, fctx.data(), fctx.flags())
1434 repo.wwrite(f, fctx.data(), fctx.flags())
1435 repo.dirstate.normal(f)
1435 repo.dirstate.normal(f)
1436 repo.setparents(qp, nullid)
1436 repo.setparents(qp, nullid)
1437 for patch in reversed(self.applied[start:end]):
1437 for patch in reversed(self.applied[start:end]):
1438 self.ui.status(_("popping %s\n") % patch.name)
1438 self.ui.status(_("popping %s\n") % patch.name)
1439 del self.applied[start:end]
1439 del self.applied[start:end]
1440 self.strip(repo, [rev], update=False, backup='strip')
1440 self.strip(repo, [rev], update=False, backup='strip')
1441 if self.applied:
1441 if self.applied:
1442 self.ui.write(_("now at: %s\n") % self.applied[-1].name)
1442 self.ui.write(_("now at: %s\n") % self.applied[-1].name)
1443 else:
1443 else:
1444 self.ui.write(_("patch queue now empty\n"))
1444 self.ui.write(_("patch queue now empty\n"))
1445 finally:
1445 finally:
1446 wlock.release()
1446 wlock.release()
1447
1447
1448 def diff(self, repo, pats, opts):
1448 def diff(self, repo, pats, opts):
1449 top, patch = self.checktoppatch(repo)
1449 top, patch = self.checktoppatch(repo)
1450 if not top:
1450 if not top:
1451 self.ui.write(_("no patches applied\n"))
1451 self.ui.write(_("no patches applied\n"))
1452 return
1452 return
1453 qp = self.qparents(repo, top)
1453 qp = self.qparents(repo, top)
1454 if opts.get('reverse'):
1454 if opts.get('reverse'):
1455 node1, node2 = None, qp
1455 node1, node2 = None, qp
1456 else:
1456 else:
1457 node1, node2 = qp, None
1457 node1, node2 = qp, None
1458 diffopts = self.diffopts(opts, patch)
1458 diffopts = self.diffopts(opts, patch)
1459 self.printdiff(repo, diffopts, node1, node2, files=pats, opts=opts)
1459 self.printdiff(repo, diffopts, node1, node2, files=pats, opts=opts)
1460
1460
1461 def refresh(self, repo, pats=None, **opts):
1461 def refresh(self, repo, pats=None, **opts):
1462 if not self.applied:
1462 if not self.applied:
1463 self.ui.write(_("no patches applied\n"))
1463 self.ui.write(_("no patches applied\n"))
1464 return 1
1464 return 1
1465 msg = opts.get('msg', '').rstrip()
1465 msg = opts.get('msg', '').rstrip()
1466 newuser = opts.get('user')
1466 newuser = opts.get('user')
1467 newdate = opts.get('date')
1467 newdate = opts.get('date')
1468 if newdate:
1468 if newdate:
1469 newdate = '%d %d' % util.parsedate(newdate)
1469 newdate = '%d %d' % util.parsedate(newdate)
1470 wlock = repo.wlock()
1470 wlock = repo.wlock()
1471
1471
1472 try:
1472 try:
1473 self.checktoppatch(repo)
1473 self.checktoppatch(repo)
1474 (top, patchfn) = (self.applied[-1].node, self.applied[-1].name)
1474 (top, patchfn) = (self.applied[-1].node, self.applied[-1].name)
1475 if repo.changelog.heads(top) != [top]:
1475 if repo.changelog.heads(top) != [top]:
1476 raise util.Abort(_("cannot refresh a revision with children"))
1476 raise util.Abort(_("cannot refresh a revision with children"))
1477 if not repo[top].mutable():
1477 if not repo[top].mutable():
1478 raise util.Abort(_("cannot refresh immutable revision"),
1478 raise util.Abort(_("cannot refresh immutable revision"),
1479 hint=_('see "hg help phases" for details'))
1479 hint=_('see "hg help phases" for details'))
1480
1480
1481 inclsubs = self.checksubstate(repo)
1481 inclsubs = self.checksubstate(repo)
1482
1482
1483 cparents = repo.changelog.parents(top)
1483 cparents = repo.changelog.parents(top)
1484 patchparent = self.qparents(repo, top)
1484 patchparent = self.qparents(repo, top)
1485 ph = patchheader(self.join(patchfn), self.plainmode)
1485 ph = patchheader(self.join(patchfn), self.plainmode)
1486 diffopts = self.diffopts({'git': opts.get('git')}, patchfn)
1486 diffopts = self.diffopts({'git': opts.get('git')}, patchfn)
1487 if msg:
1487 if msg:
1488 ph.setmessage(msg)
1488 ph.setmessage(msg)
1489 if newuser:
1489 if newuser:
1490 ph.setuser(newuser)
1490 ph.setuser(newuser)
1491 if newdate:
1491 if newdate:
1492 ph.setdate(newdate)
1492 ph.setdate(newdate)
1493 ph.setparent(hex(patchparent))
1493 ph.setparent(hex(patchparent))
1494
1494
1495 # only commit new patch when write is complete
1495 # only commit new patch when write is complete
1496 patchf = self.opener(patchfn, 'w', atomictemp=True)
1496 patchf = self.opener(patchfn, 'w', atomictemp=True)
1497
1497
1498 comments = str(ph)
1498 comments = str(ph)
1499 if comments:
1499 if comments:
1500 patchf.write(comments)
1500 patchf.write(comments)
1501
1501
1502 # update the dirstate in place, strip off the qtip commit
1502 # update the dirstate in place, strip off the qtip commit
1503 # and then commit.
1503 # and then commit.
1504 #
1504 #
1505 # this should really read:
1505 # this should really read:
1506 # mm, dd, aa = repo.status(top, patchparent)[:3]
1506 # mm, dd, aa = repo.status(top, patchparent)[:3]
1507 # but we do it backwards to take advantage of manifest/chlog
1507 # but we do it backwards to take advantage of manifest/chlog
1508 # caching against the next repo.status call
1508 # caching against the next repo.status call
1509 mm, aa, dd = repo.status(patchparent, top)[:3]
1509 mm, aa, dd = repo.status(patchparent, top)[:3]
1510 changes = repo.changelog.read(top)
1510 changes = repo.changelog.read(top)
1511 man = repo.manifest.read(changes[0])
1511 man = repo.manifest.read(changes[0])
1512 aaa = aa[:]
1512 aaa = aa[:]
1513 matchfn = scmutil.match(repo[None], pats, opts)
1513 matchfn = scmutil.match(repo[None], pats, opts)
1514 # in short mode, we only diff the files included in the
1514 # in short mode, we only diff the files included in the
1515 # patch already plus specified files
1515 # patch already plus specified files
1516 if opts.get('short'):
1516 if opts.get('short'):
1517 # if amending a patch, we start with existing
1517 # if amending a patch, we start with existing
1518 # files plus specified files - unfiltered
1518 # files plus specified files - unfiltered
1519 match = scmutil.matchfiles(repo, mm + aa + dd + matchfn.files())
1519 match = scmutil.matchfiles(repo, mm + aa + dd + matchfn.files())
1520 # filter with inc/exl options
1520 # filter with inc/exl options
1521 matchfn = scmutil.match(repo[None], opts=opts)
1521 matchfn = scmutil.match(repo[None], opts=opts)
1522 else:
1522 else:
1523 match = scmutil.matchall(repo)
1523 match = scmutil.matchall(repo)
1524 m, a, r, d = repo.status(match=match)[:4]
1524 m, a, r, d = repo.status(match=match)[:4]
1525 mm = set(mm)
1525 mm = set(mm)
1526 aa = set(aa)
1526 aa = set(aa)
1527 dd = set(dd)
1527 dd = set(dd)
1528
1528
1529 # we might end up with files that were added between
1529 # we might end up with files that were added between
1530 # qtip and the dirstate parent, but then changed in the
1530 # qtip and the dirstate parent, but then changed in the
1531 # local dirstate. in this case, we want them to only
1531 # local dirstate. in this case, we want them to only
1532 # show up in the added section
1532 # show up in the added section
1533 for x in m:
1533 for x in m:
1534 if x not in aa:
1534 if x not in aa:
1535 mm.add(x)
1535 mm.add(x)
1536 # we might end up with files added by the local dirstate that
1536 # we might end up with files added by the local dirstate that
1537 # were deleted by the patch. In this case, they should only
1537 # were deleted by the patch. In this case, they should only
1538 # show up in the changed section.
1538 # show up in the changed section.
1539 for x in a:
1539 for x in a:
1540 if x in dd:
1540 if x in dd:
1541 dd.remove(x)
1541 dd.remove(x)
1542 mm.add(x)
1542 mm.add(x)
1543 else:
1543 else:
1544 aa.add(x)
1544 aa.add(x)
1545 # make sure any files deleted in the local dirstate
1545 # make sure any files deleted in the local dirstate
1546 # are not in the add or change column of the patch
1546 # are not in the add or change column of the patch
1547 forget = []
1547 forget = []
1548 for x in d + r:
1548 for x in d + r:
1549 if x in aa:
1549 if x in aa:
1550 aa.remove(x)
1550 aa.remove(x)
1551 forget.append(x)
1551 forget.append(x)
1552 continue
1552 continue
1553 else:
1553 else:
1554 mm.discard(x)
1554 mm.discard(x)
1555 dd.add(x)
1555 dd.add(x)
1556
1556
1557 m = list(mm)
1557 m = list(mm)
1558 r = list(dd)
1558 r = list(dd)
1559 a = list(aa)
1559 a = list(aa)
1560 c = [filter(matchfn, l) for l in (m, a, r)]
1560 c = [filter(matchfn, l) for l in (m, a, r)]
1561 match = scmutil.matchfiles(repo, set(c[0] + c[1] + c[2] + inclsubs))
1561 match = scmutil.matchfiles(repo, set(c[0] + c[1] + c[2] + inclsubs))
1562 chunks = patchmod.diff(repo, patchparent, match=match,
1562 chunks = patchmod.diff(repo, patchparent, match=match,
1563 changes=c, opts=diffopts)
1563 changes=c, opts=diffopts)
1564 for chunk in chunks:
1564 for chunk in chunks:
1565 patchf.write(chunk)
1565 patchf.write(chunk)
1566
1566
1567 try:
1567 try:
1568 if diffopts.git or diffopts.upgrade:
1568 if diffopts.git or diffopts.upgrade:
1569 copies = {}
1569 copies = {}
1570 for dst in a:
1570 for dst in a:
1571 src = repo.dirstate.copied(dst)
1571 src = repo.dirstate.copied(dst)
1572 # during qfold, the source file for copies may
1572 # during qfold, the source file for copies may
1573 # be removed. Treat this as a simple add.
1573 # be removed. Treat this as a simple add.
1574 if src is not None and src in repo.dirstate:
1574 if src is not None and src in repo.dirstate:
1575 copies.setdefault(src, []).append(dst)
1575 copies.setdefault(src, []).append(dst)
1576 repo.dirstate.add(dst)
1576 repo.dirstate.add(dst)
1577 # remember the copies between patchparent and qtip
1577 # remember the copies between patchparent and qtip
1578 for dst in aaa:
1578 for dst in aaa:
1579 f = repo.file(dst)
1579 f = repo.file(dst)
1580 src = f.renamed(man[dst])
1580 src = f.renamed(man[dst])
1581 if src:
1581 if src:
1582 copies.setdefault(src[0], []).extend(
1582 copies.setdefault(src[0], []).extend(
1583 copies.get(dst, []))
1583 copies.get(dst, []))
1584 if dst in a:
1584 if dst in a:
1585 copies[src[0]].append(dst)
1585 copies[src[0]].append(dst)
1586 # we can't copy a file created by the patch itself
1586 # we can't copy a file created by the patch itself
1587 if dst in copies:
1587 if dst in copies:
1588 del copies[dst]
1588 del copies[dst]
1589 for src, dsts in copies.iteritems():
1589 for src, dsts in copies.iteritems():
1590 for dst in dsts:
1590 for dst in dsts:
1591 repo.dirstate.copy(src, dst)
1591 repo.dirstate.copy(src, dst)
1592 else:
1592 else:
1593 for dst in a:
1593 for dst in a:
1594 repo.dirstate.add(dst)
1594 repo.dirstate.add(dst)
1595 # Drop useless copy information
1595 # Drop useless copy information
1596 for f in list(repo.dirstate.copies()):
1596 for f in list(repo.dirstate.copies()):
1597 repo.dirstate.copy(None, f)
1597 repo.dirstate.copy(None, f)
1598 for f in r:
1598 for f in r:
1599 repo.dirstate.remove(f)
1599 repo.dirstate.remove(f)
1600 # if the patch excludes a modified file, mark that
1600 # if the patch excludes a modified file, mark that
1601 # file with mtime=0 so status can see it.
1601 # file with mtime=0 so status can see it.
1602 mm = []
1602 mm = []
1603 for i in xrange(len(m)-1, -1, -1):
1603 for i in xrange(len(m)-1, -1, -1):
1604 if not matchfn(m[i]):
1604 if not matchfn(m[i]):
1605 mm.append(m[i])
1605 mm.append(m[i])
1606 del m[i]
1606 del m[i]
1607 for f in m:
1607 for f in m:
1608 repo.dirstate.normal(f)
1608 repo.dirstate.normal(f)
1609 for f in mm:
1609 for f in mm:
1610 repo.dirstate.normallookup(f)
1610 repo.dirstate.normallookup(f)
1611 for f in forget:
1611 for f in forget:
1612 repo.dirstate.drop(f)
1612 repo.dirstate.drop(f)
1613
1613
1614 if not msg:
1614 if not msg:
1615 if not ph.message:
1615 if not ph.message:
1616 message = "[mq]: %s\n" % patchfn
1616 message = "[mq]: %s\n" % patchfn
1617 else:
1617 else:
1618 message = "\n".join(ph.message)
1618 message = "\n".join(ph.message)
1619 else:
1619 else:
1620 message = msg
1620 message = msg
1621
1621
1622 user = ph.user or changes[1]
1622 user = ph.user or changes[1]
1623
1623
1624 oldphase = repo[top].phase()
1624 oldphase = repo[top].phase()
1625
1625
1626 # assumes strip can roll itself back if interrupted
1626 # assumes strip can roll itself back if interrupted
1627 repo.setparents(*cparents)
1627 repo.setparents(*cparents)
1628 self.applied.pop()
1628 self.applied.pop()
1629 self.applieddirty = True
1629 self.applieddirty = True
1630 self.strip(repo, [top], update=False,
1630 self.strip(repo, [top], update=False,
1631 backup='strip')
1631 backup='strip')
1632 except:
1632 except:
1633 repo.dirstate.invalidate()
1633 repo.dirstate.invalidate()
1634 raise
1634 raise
1635
1635
1636 try:
1636 try:
1637 # might be nice to attempt to roll back strip after this
1637 # might be nice to attempt to roll back strip after this
1638
1638
1639 # Ensure we create a new changeset in the same phase than
1639 # Ensure we create a new changeset in the same phase than
1640 # the old one.
1640 # the old one.
1641 n = newcommit(repo, oldphase, message, user, ph.date,
1641 n = newcommit(repo, oldphase, message, user, ph.date,
1642 match=match, force=True)
1642 match=match, force=True)
1643 # only write patch after a successful commit
1643 # only write patch after a successful commit
1644 patchf.close()
1644 patchf.close()
1645 self.applied.append(statusentry(n, patchfn))
1645 self.applied.append(statusentry(n, patchfn))
1646 except:
1646 except:
1647 ctx = repo[cparents[0]]
1647 ctx = repo[cparents[0]]
1648 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
1648 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
1649 self.savedirty()
1649 self.savedirty()
1650 self.ui.warn(_('refresh interrupted while patch was popped! '
1650 self.ui.warn(_('refresh interrupted while patch was popped! '
1651 '(revert --all, qpush to recover)\n'))
1651 '(revert --all, qpush to recover)\n'))
1652 raise
1652 raise
1653 finally:
1653 finally:
1654 wlock.release()
1654 wlock.release()
1655 self.removeundo(repo)
1655 self.removeundo(repo)
1656
1656
1657 def init(self, repo, create=False):
1657 def init(self, repo, create=False):
1658 if not create and os.path.isdir(self.path):
1658 if not create and os.path.isdir(self.path):
1659 raise util.Abort(_("patch queue directory already exists"))
1659 raise util.Abort(_("patch queue directory already exists"))
1660 try:
1660 try:
1661 os.mkdir(self.path)
1661 os.mkdir(self.path)
1662 except OSError, inst:
1662 except OSError, inst:
1663 if inst.errno != errno.EEXIST or not create:
1663 if inst.errno != errno.EEXIST or not create:
1664 raise
1664 raise
1665 if create:
1665 if create:
1666 return self.qrepo(create=True)
1666 return self.qrepo(create=True)
1667
1667
1668 def unapplied(self, repo, patch=None):
1668 def unapplied(self, repo, patch=None):
1669 if patch and patch not in self.series:
1669 if patch and patch not in self.series:
1670 raise util.Abort(_("patch %s is not in series file") % patch)
1670 raise util.Abort(_("patch %s is not in series file") % patch)
1671 if not patch:
1671 if not patch:
1672 start = self.seriesend()
1672 start = self.seriesend()
1673 else:
1673 else:
1674 start = self.series.index(patch) + 1
1674 start = self.series.index(patch) + 1
1675 unapplied = []
1675 unapplied = []
1676 for i in xrange(start, len(self.series)):
1676 for i in xrange(start, len(self.series)):
1677 pushable, reason = self.pushable(i)
1677 pushable, reason = self.pushable(i)
1678 if pushable:
1678 if pushable:
1679 unapplied.append((i, self.series[i]))
1679 unapplied.append((i, self.series[i]))
1680 self.explainpushable(i)
1680 self.explainpushable(i)
1681 return unapplied
1681 return unapplied
1682
1682
1683 def qseries(self, repo, missing=None, start=0, length=None, status=None,
1683 def qseries(self, repo, missing=None, start=0, length=None, status=None,
1684 summary=False):
1684 summary=False):
1685 def displayname(pfx, patchname, state):
1685 def displayname(pfx, patchname, state):
1686 if pfx:
1686 if pfx:
1687 self.ui.write(pfx)
1687 self.ui.write(pfx)
1688 if summary:
1688 if summary:
1689 ph = patchheader(self.join(patchname), self.plainmode)
1689 ph = patchheader(self.join(patchname), self.plainmode)
1690 msg = ph.message and ph.message[0] or ''
1690 msg = ph.message and ph.message[0] or ''
1691 if self.ui.formatted():
1691 if self.ui.formatted():
1692 width = self.ui.termwidth() - len(pfx) - len(patchname) - 2
1692 width = self.ui.termwidth() - len(pfx) - len(patchname) - 2
1693 if width > 0:
1693 if width > 0:
1694 msg = util.ellipsis(msg, width)
1694 msg = util.ellipsis(msg, width)
1695 else:
1695 else:
1696 msg = ''
1696 msg = ''
1697 self.ui.write(patchname, label='qseries.' + state)
1697 self.ui.write(patchname, label='qseries.' + state)
1698 self.ui.write(': ')
1698 self.ui.write(': ')
1699 self.ui.write(msg, label='qseries.message.' + state)
1699 self.ui.write(msg, label='qseries.message.' + state)
1700 else:
1700 else:
1701 self.ui.write(patchname, label='qseries.' + state)
1701 self.ui.write(patchname, label='qseries.' + state)
1702 self.ui.write('\n')
1702 self.ui.write('\n')
1703
1703
1704 applied = set([p.name for p in self.applied])
1704 applied = set([p.name for p in self.applied])
1705 if length is None:
1705 if length is None:
1706 length = len(self.series) - start
1706 length = len(self.series) - start
1707 if not missing:
1707 if not missing:
1708 if self.ui.verbose:
1708 if self.ui.verbose:
1709 idxwidth = len(str(start + length - 1))
1709 idxwidth = len(str(start + length - 1))
1710 for i in xrange(start, start + length):
1710 for i in xrange(start, start + length):
1711 patch = self.series[i]
1711 patch = self.series[i]
1712 if patch in applied:
1712 if patch in applied:
1713 char, state = 'A', 'applied'
1713 char, state = 'A', 'applied'
1714 elif self.pushable(i)[0]:
1714 elif self.pushable(i)[0]:
1715 char, state = 'U', 'unapplied'
1715 char, state = 'U', 'unapplied'
1716 else:
1716 else:
1717 char, state = 'G', 'guarded'
1717 char, state = 'G', 'guarded'
1718 pfx = ''
1718 pfx = ''
1719 if self.ui.verbose:
1719 if self.ui.verbose:
1720 pfx = '%*d %s ' % (idxwidth, i, char)
1720 pfx = '%*d %s ' % (idxwidth, i, char)
1721 elif status and status != char:
1721 elif status and status != char:
1722 continue
1722 continue
1723 displayname(pfx, patch, state)
1723 displayname(pfx, patch, state)
1724 else:
1724 else:
1725 msng_list = []
1725 msng_list = []
1726 for root, dirs, files in os.walk(self.path):
1726 for root, dirs, files in os.walk(self.path):
1727 d = root[len(self.path) + 1:]
1727 d = root[len(self.path) + 1:]
1728 for f in files:
1728 for f in files:
1729 fl = os.path.join(d, f)
1729 fl = os.path.join(d, f)
1730 if (fl not in self.series and
1730 if (fl not in self.series and
1731 fl not in (self.statuspath, self.seriespath,
1731 fl not in (self.statuspath, self.seriespath,
1732 self.guardspath)
1732 self.guardspath)
1733 and not fl.startswith('.')):
1733 and not fl.startswith('.')):
1734 msng_list.append(fl)
1734 msng_list.append(fl)
1735 for x in sorted(msng_list):
1735 for x in sorted(msng_list):
1736 pfx = self.ui.verbose and ('D ') or ''
1736 pfx = self.ui.verbose and ('D ') or ''
1737 displayname(pfx, x, 'missing')
1737 displayname(pfx, x, 'missing')
1738
1738
1739 def issaveline(self, l):
1739 def issaveline(self, l):
1740 if l.name == '.hg.patches.save.line':
1740 if l.name == '.hg.patches.save.line':
1741 return True
1741 return True
1742
1742
1743 def qrepo(self, create=False):
1743 def qrepo(self, create=False):
1744 ui = self.ui.copy()
1744 ui = self.ui.copy()
1745 ui.setconfig('paths', 'default', '', overlay=False)
1745 ui.setconfig('paths', 'default', '', overlay=False)
1746 ui.setconfig('paths', 'default-push', '', overlay=False)
1746 ui.setconfig('paths', 'default-push', '', overlay=False)
1747 if create or os.path.isdir(self.join(".hg")):
1747 if create or os.path.isdir(self.join(".hg")):
1748 return hg.repository(ui, path=self.path, create=create)
1748 return hg.repository(ui, path=self.path, create=create)
1749
1749
1750 def restore(self, repo, rev, delete=None, qupdate=None):
1750 def restore(self, repo, rev, delete=None, qupdate=None):
1751 desc = repo[rev].description().strip()
1751 desc = repo[rev].description().strip()
1752 lines = desc.splitlines()
1752 lines = desc.splitlines()
1753 i = 0
1753 i = 0
1754 datastart = None
1754 datastart = None
1755 series = []
1755 series = []
1756 applied = []
1756 applied = []
1757 qpp = None
1757 qpp = None
1758 for i, line in enumerate(lines):
1758 for i, line in enumerate(lines):
1759 if line == 'Patch Data:':
1759 if line == 'Patch Data:':
1760 datastart = i + 1
1760 datastart = i + 1
1761 elif line.startswith('Dirstate:'):
1761 elif line.startswith('Dirstate:'):
1762 l = line.rstrip()
1762 l = line.rstrip()
1763 l = l[10:].split(' ')
1763 l = l[10:].split(' ')
1764 qpp = [bin(x) for x in l]
1764 qpp = [bin(x) for x in l]
1765 elif datastart is not None:
1765 elif datastart is not None:
1766 l = line.rstrip()
1766 l = line.rstrip()
1767 n, name = l.split(':', 1)
1767 n, name = l.split(':', 1)
1768 if n:
1768 if n:
1769 applied.append(statusentry(bin(n), name))
1769 applied.append(statusentry(bin(n), name))
1770 else:
1770 else:
1771 series.append(l)
1771 series.append(l)
1772 if datastart is None:
1772 if datastart is None:
1773 self.ui.warn(_("No saved patch data found\n"))
1773 self.ui.warn(_("No saved patch data found\n"))
1774 return 1
1774 return 1
1775 self.ui.warn(_("restoring status: %s\n") % lines[0])
1775 self.ui.warn(_("restoring status: %s\n") % lines[0])
1776 self.fullseries = series
1776 self.fullseries = series
1777 self.applied = applied
1777 self.applied = applied
1778 self.parseseries()
1778 self.parseseries()
1779 self.seriesdirty = True
1779 self.seriesdirty = True
1780 self.applieddirty = True
1780 self.applieddirty = True
1781 heads = repo.changelog.heads()
1781 heads = repo.changelog.heads()
1782 if delete:
1782 if delete:
1783 if rev not in heads:
1783 if rev not in heads:
1784 self.ui.warn(_("save entry has children, leaving it alone\n"))
1784 self.ui.warn(_("save entry has children, leaving it alone\n"))
1785 else:
1785 else:
1786 self.ui.warn(_("removing save entry %s\n") % short(rev))
1786 self.ui.warn(_("removing save entry %s\n") % short(rev))
1787 pp = repo.dirstate.parents()
1787 pp = repo.dirstate.parents()
1788 if rev in pp:
1788 if rev in pp:
1789 update = True
1789 update = True
1790 else:
1790 else:
1791 update = False
1791 update = False
1792 self.strip(repo, [rev], update=update, backup='strip')
1792 self.strip(repo, [rev], update=update, backup='strip')
1793 if qpp:
1793 if qpp:
1794 self.ui.warn(_("saved queue repository parents: %s %s\n") %
1794 self.ui.warn(_("saved queue repository parents: %s %s\n") %
1795 (short(qpp[0]), short(qpp[1])))
1795 (short(qpp[0]), short(qpp[1])))
1796 if qupdate:
1796 if qupdate:
1797 self.ui.status(_("updating queue directory\n"))
1797 self.ui.status(_("updating queue directory\n"))
1798 r = self.qrepo()
1798 r = self.qrepo()
1799 if not r:
1799 if not r:
1800 self.ui.warn(_("Unable to load queue repository\n"))
1800 self.ui.warn(_("Unable to load queue repository\n"))
1801 return 1
1801 return 1
1802 hg.clean(r, qpp[0])
1802 hg.clean(r, qpp[0])
1803
1803
1804 def save(self, repo, msg=None):
1804 def save(self, repo, msg=None):
1805 if not self.applied:
1805 if not self.applied:
1806 self.ui.warn(_("save: no patches applied, exiting\n"))
1806 self.ui.warn(_("save: no patches applied, exiting\n"))
1807 return 1
1807 return 1
1808 if self.issaveline(self.applied[-1]):
1808 if self.issaveline(self.applied[-1]):
1809 self.ui.warn(_("status is already saved\n"))
1809 self.ui.warn(_("status is already saved\n"))
1810 return 1
1810 return 1
1811
1811
1812 if not msg:
1812 if not msg:
1813 msg = _("hg patches saved state")
1813 msg = _("hg patches saved state")
1814 else:
1814 else:
1815 msg = "hg patches: " + msg.rstrip('\r\n')
1815 msg = "hg patches: " + msg.rstrip('\r\n')
1816 r = self.qrepo()
1816 r = self.qrepo()
1817 if r:
1817 if r:
1818 pp = r.dirstate.parents()
1818 pp = r.dirstate.parents()
1819 msg += "\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1]))
1819 msg += "\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1]))
1820 msg += "\n\nPatch Data:\n"
1820 msg += "\n\nPatch Data:\n"
1821 msg += ''.join('%s\n' % x for x in self.applied)
1821 msg += ''.join('%s\n' % x for x in self.applied)
1822 msg += ''.join(':%s\n' % x for x in self.fullseries)
1822 msg += ''.join(':%s\n' % x for x in self.fullseries)
1823 n = repo.commit(msg, force=True)
1823 n = repo.commit(msg, force=True)
1824 if not n:
1824 if not n:
1825 self.ui.warn(_("repo commit failed\n"))
1825 self.ui.warn(_("repo commit failed\n"))
1826 return 1
1826 return 1
1827 self.applied.append(statusentry(n, '.hg.patches.save.line'))
1827 self.applied.append(statusentry(n, '.hg.patches.save.line'))
1828 self.applieddirty = True
1828 self.applieddirty = True
1829 self.removeundo(repo)
1829 self.removeundo(repo)
1830
1830
1831 def fullseriesend(self):
1831 def fullseriesend(self):
1832 if self.applied:
1832 if self.applied:
1833 p = self.applied[-1].name
1833 p = self.applied[-1].name
1834 end = self.findseries(p)
1834 end = self.findseries(p)
1835 if end is None:
1835 if end is None:
1836 return len(self.fullseries)
1836 return len(self.fullseries)
1837 return end + 1
1837 return end + 1
1838 return 0
1838 return 0
1839
1839
1840 def seriesend(self, all_patches=False):
1840 def seriesend(self, all_patches=False):
1841 """If all_patches is False, return the index of the next pushable patch
1841 """If all_patches is False, return the index of the next pushable patch
1842 in the series, or the series length. If all_patches is True, return the
1842 in the series, or the series length. If all_patches is True, return the
1843 index of the first patch past the last applied one.
1843 index of the first patch past the last applied one.
1844 """
1844 """
1845 end = 0
1845 end = 0
1846 def next(start):
1846 def next(start):
1847 if all_patches or start >= len(self.series):
1847 if all_patches or start >= len(self.series):
1848 return start
1848 return start
1849 for i in xrange(start, len(self.series)):
1849 for i in xrange(start, len(self.series)):
1850 p, reason = self.pushable(i)
1850 p, reason = self.pushable(i)
1851 if p:
1851 if p:
1852 return i
1852 return i
1853 self.explainpushable(i)
1853 self.explainpushable(i)
1854 return len(self.series)
1854 return len(self.series)
1855 if self.applied:
1855 if self.applied:
1856 p = self.applied[-1].name
1856 p = self.applied[-1].name
1857 try:
1857 try:
1858 end = self.series.index(p)
1858 end = self.series.index(p)
1859 except ValueError:
1859 except ValueError:
1860 return 0
1860 return 0
1861 return next(end + 1)
1861 return next(end + 1)
1862 return next(end)
1862 return next(end)
1863
1863
1864 def appliedname(self, index):
1864 def appliedname(self, index):
1865 pname = self.applied[index].name
1865 pname = self.applied[index].name
1866 if not self.ui.verbose:
1866 if not self.ui.verbose:
1867 p = pname
1867 p = pname
1868 else:
1868 else:
1869 p = str(self.series.index(pname)) + " " + pname
1869 p = str(self.series.index(pname)) + " " + pname
1870 return p
1870 return p
1871
1871
1872 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
1872 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
1873 force=None, git=False):
1873 force=None, git=False):
1874 def checkseries(patchname):
1874 def checkseries(patchname):
1875 if patchname in self.series:
1875 if patchname in self.series:
1876 raise util.Abort(_('patch %s is already in the series file')
1876 raise util.Abort(_('patch %s is already in the series file')
1877 % patchname)
1877 % patchname)
1878
1878
1879 if rev:
1879 if rev:
1880 if files:
1880 if files:
1881 raise util.Abort(_('option "-r" not valid when importing '
1881 raise util.Abort(_('option "-r" not valid when importing '
1882 'files'))
1882 'files'))
1883 rev = scmutil.revrange(repo, rev)
1883 rev = scmutil.revrange(repo, rev)
1884 rev.sort(reverse=True)
1884 rev.sort(reverse=True)
1885 if (len(files) > 1 or len(rev) > 1) and patchname:
1885 if (len(files) > 1 or len(rev) > 1) and patchname:
1886 raise util.Abort(_('option "-n" not valid when importing multiple '
1886 raise util.Abort(_('option "-n" not valid when importing multiple '
1887 'patches'))
1887 'patches'))
1888 imported = []
1888 imported = []
1889 if rev:
1889 if rev:
1890 # If mq patches are applied, we can only import revisions
1890 # If mq patches are applied, we can only import revisions
1891 # that form a linear path to qbase.
1891 # that form a linear path to qbase.
1892 # Otherwise, they should form a linear path to a head.
1892 # Otherwise, they should form a linear path to a head.
1893 heads = repo.changelog.heads(repo.changelog.node(rev[-1]))
1893 heads = repo.changelog.heads(repo.changelog.node(rev[-1]))
1894 if len(heads) > 1:
1894 if len(heads) > 1:
1895 raise util.Abort(_('revision %d is the root of more than one '
1895 raise util.Abort(_('revision %d is the root of more than one '
1896 'branch') % rev[-1])
1896 'branch') % rev[-1])
1897 if self.applied:
1897 if self.applied:
1898 base = repo.changelog.node(rev[0])
1898 base = repo.changelog.node(rev[0])
1899 if base in [n.node for n in self.applied]:
1899 if base in [n.node for n in self.applied]:
1900 raise util.Abort(_('revision %d is already managed')
1900 raise util.Abort(_('revision %d is already managed')
1901 % rev[0])
1901 % rev[0])
1902 if heads != [self.applied[-1].node]:
1902 if heads != [self.applied[-1].node]:
1903 raise util.Abort(_('revision %d is not the parent of '
1903 raise util.Abort(_('revision %d is not the parent of '
1904 'the queue') % rev[0])
1904 'the queue') % rev[0])
1905 base = repo.changelog.rev(self.applied[0].node)
1905 base = repo.changelog.rev(self.applied[0].node)
1906 lastparent = repo.changelog.parentrevs(base)[0]
1906 lastparent = repo.changelog.parentrevs(base)[0]
1907 else:
1907 else:
1908 if heads != [repo.changelog.node(rev[0])]:
1908 if heads != [repo.changelog.node(rev[0])]:
1909 raise util.Abort(_('revision %d has unmanaged children')
1909 raise util.Abort(_('revision %d has unmanaged children')
1910 % rev[0])
1910 % rev[0])
1911 lastparent = None
1911 lastparent = None
1912
1912
1913 diffopts = self.diffopts({'git': git})
1913 diffopts = self.diffopts({'git': git})
1914 for r in rev:
1914 for r in rev:
1915 if not repo[r].mutable():
1915 if not repo[r].mutable():
1916 raise util.Abort(_('revision %d is not mutable') % r,
1916 raise util.Abort(_('revision %d is not mutable') % r,
1917 hint=_('see "hg help phases" for details'))
1917 hint=_('see "hg help phases" for details'))
1918 p1, p2 = repo.changelog.parentrevs(r)
1918 p1, p2 = repo.changelog.parentrevs(r)
1919 n = repo.changelog.node(r)
1919 n = repo.changelog.node(r)
1920 if p2 != nullrev:
1920 if p2 != nullrev:
1921 raise util.Abort(_('cannot import merge revision %d') % r)
1921 raise util.Abort(_('cannot import merge revision %d') % r)
1922 if lastparent and lastparent != r:
1922 if lastparent and lastparent != r:
1923 raise util.Abort(_('revision %d is not the parent of %d')
1923 raise util.Abort(_('revision %d is not the parent of %d')
1924 % (r, lastparent))
1924 % (r, lastparent))
1925 lastparent = p1
1925 lastparent = p1
1926
1926
1927 if not patchname:
1927 if not patchname:
1928 patchname = normname('%d.diff' % r)
1928 patchname = normname('%d.diff' % r)
1929 checkseries(patchname)
1929 checkseries(patchname)
1930 self.checkpatchname(patchname, force)
1930 self.checkpatchname(patchname, force)
1931 self.fullseries.insert(0, patchname)
1931 self.fullseries.insert(0, patchname)
1932
1932
1933 patchf = self.opener(patchname, "w")
1933 patchf = self.opener(patchname, "w")
1934 cmdutil.export(repo, [n], fp=patchf, opts=diffopts)
1934 cmdutil.export(repo, [n], fp=patchf, opts=diffopts)
1935 patchf.close()
1935 patchf.close()
1936
1936
1937 se = statusentry(n, patchname)
1937 se = statusentry(n, patchname)
1938 self.applied.insert(0, se)
1938 self.applied.insert(0, se)
1939
1939
1940 self.added.append(patchname)
1940 self.added.append(patchname)
1941 imported.append(patchname)
1941 imported.append(patchname)
1942 patchname = None
1942 patchname = None
1943 if rev and repo.ui.configbool('mq', 'secret', False):
1943 if rev and repo.ui.configbool('mq', 'secret', False):
1944 # if we added anything with --rev, we must move the secret root
1944 # if we added anything with --rev, we must move the secret root
1945 phases.retractboundary(repo, phases.secret, [n])
1945 phases.retractboundary(repo, phases.secret, [n])
1946 self.parseseries()
1946 self.parseseries()
1947 self.applieddirty = True
1947 self.applieddirty = True
1948 self.seriesdirty = True
1948 self.seriesdirty = True
1949
1949
1950 for i, filename in enumerate(files):
1950 for i, filename in enumerate(files):
1951 if existing:
1951 if existing:
1952 if filename == '-':
1952 if filename == '-':
1953 raise util.Abort(_('-e is incompatible with import from -'))
1953 raise util.Abort(_('-e is incompatible with import from -'))
1954 filename = normname(filename)
1954 filename = normname(filename)
1955 self.checkreservedname(filename)
1955 self.checkreservedname(filename)
1956 originpath = self.join(filename)
1956 originpath = self.join(filename)
1957 if not os.path.isfile(originpath):
1957 if not os.path.isfile(originpath):
1958 raise util.Abort(_("patch %s does not exist") % filename)
1958 raise util.Abort(_("patch %s does not exist") % filename)
1959
1959
1960 if patchname:
1960 if patchname:
1961 self.checkpatchname(patchname, force)
1961 self.checkpatchname(patchname, force)
1962
1962
1963 self.ui.write(_('renaming %s to %s\n')
1963 self.ui.write(_('renaming %s to %s\n')
1964 % (filename, patchname))
1964 % (filename, patchname))
1965 util.rename(originpath, self.join(patchname))
1965 util.rename(originpath, self.join(patchname))
1966 else:
1966 else:
1967 patchname = filename
1967 patchname = filename
1968
1968
1969 else:
1969 else:
1970 if filename == '-' and not patchname:
1970 if filename == '-' and not patchname:
1971 raise util.Abort(_('need --name to import a patch from -'))
1971 raise util.Abort(_('need --name to import a patch from -'))
1972 elif not patchname:
1972 elif not patchname:
1973 patchname = normname(os.path.basename(filename.rstrip('/')))
1973 patchname = normname(os.path.basename(filename.rstrip('/')))
1974 self.checkpatchname(patchname, force)
1974 self.checkpatchname(patchname, force)
1975 try:
1975 try:
1976 if filename == '-':
1976 if filename == '-':
1977 text = self.ui.fin.read()
1977 text = self.ui.fin.read()
1978 else:
1978 else:
1979 fp = url.open(self.ui, filename)
1979 fp = url.open(self.ui, filename)
1980 text = fp.read()
1980 text = fp.read()
1981 fp.close()
1981 fp.close()
1982 except (OSError, IOError):
1982 except (OSError, IOError):
1983 raise util.Abort(_("unable to read file %s") % filename)
1983 raise util.Abort(_("unable to read file %s") % filename)
1984 patchf = self.opener(patchname, "w")
1984 patchf = self.opener(patchname, "w")
1985 patchf.write(text)
1985 patchf.write(text)
1986 patchf.close()
1986 patchf.close()
1987 if not force:
1987 if not force:
1988 checkseries(patchname)
1988 checkseries(patchname)
1989 if patchname not in self.series:
1989 if patchname not in self.series:
1990 index = self.fullseriesend() + i
1990 index = self.fullseriesend() + i
1991 self.fullseries[index:index] = [patchname]
1991 self.fullseries[index:index] = [patchname]
1992 self.parseseries()
1992 self.parseseries()
1993 self.seriesdirty = True
1993 self.seriesdirty = True
1994 self.ui.warn(_("adding %s to series file\n") % patchname)
1994 self.ui.warn(_("adding %s to series file\n") % patchname)
1995 self.added.append(patchname)
1995 self.added.append(patchname)
1996 imported.append(patchname)
1996 imported.append(patchname)
1997 patchname = None
1997 patchname = None
1998
1998
1999 self.removeundo(repo)
1999 self.removeundo(repo)
2000 return imported
2000 return imported
2001
2001
2002 def fixcheckopts(ui, opts):
2002 def fixcheckopts(ui, opts):
2003 if (not ui.configbool('mq', 'check') or opts.get('force')
2003 if (not ui.configbool('mq', 'check') or opts.get('force')
2004 or opts.get('exact')):
2004 or opts.get('exact')):
2005 return opts
2005 return opts
2006 opts = dict(opts)
2006 opts = dict(opts)
2007 opts['check'] = True
2007 opts['check'] = True
2008 return opts
2008 return opts
2009
2009
2010 @command("qdelete|qremove|qrm",
2010 @command("qdelete|qremove|qrm",
2011 [('k', 'keep', None, _('keep patch file')),
2011 [('k', 'keep', None, _('keep patch file')),
2012 ('r', 'rev', [],
2012 ('r', 'rev', [],
2013 _('stop managing a revision (DEPRECATED)'), _('REV'))],
2013 _('stop managing a revision (DEPRECATED)'), _('REV'))],
2014 _('hg qdelete [-k] [PATCH]...'))
2014 _('hg qdelete [-k] [PATCH]...'))
2015 def delete(ui, repo, *patches, **opts):
2015 def delete(ui, repo, *patches, **opts):
2016 """remove patches from queue
2016 """remove patches from queue
2017
2017
2018 The patches must not be applied, and at least one patch is required. Exact
2018 The patches must not be applied, and at least one patch is required. Exact
2019 patch identifiers must be given. With -k/--keep, the patch files are
2019 patch identifiers must be given. With -k/--keep, the patch files are
2020 preserved in the patch directory.
2020 preserved in the patch directory.
2021
2021
2022 To stop managing a patch and move it into permanent history,
2022 To stop managing a patch and move it into permanent history,
2023 use the :hg:`qfinish` command."""
2023 use the :hg:`qfinish` command."""
2024 q = repo.mq
2024 q = repo.mq
2025 q.delete(repo, patches, opts)
2025 q.delete(repo, patches, opts)
2026 q.savedirty()
2026 q.savedirty()
2027 return 0
2027 return 0
2028
2028
2029 @command("qapplied",
2029 @command("qapplied",
2030 [('1', 'last', None, _('show only the preceding applied patch'))
2030 [('1', 'last', None, _('show only the preceding applied patch'))
2031 ] + seriesopts,
2031 ] + seriesopts,
2032 _('hg qapplied [-1] [-s] [PATCH]'))
2032 _('hg qapplied [-1] [-s] [PATCH]'))
2033 def applied(ui, repo, patch=None, **opts):
2033 def applied(ui, repo, patch=None, **opts):
2034 """print the patches already applied
2034 """print the patches already applied
2035
2035
2036 Returns 0 on success."""
2036 Returns 0 on success."""
2037
2037
2038 q = repo.mq
2038 q = repo.mq
2039
2039
2040 if patch:
2040 if patch:
2041 if patch not in q.series:
2041 if patch not in q.series:
2042 raise util.Abort(_("patch %s is not in series file") % patch)
2042 raise util.Abort(_("patch %s is not in series file") % patch)
2043 end = q.series.index(patch) + 1
2043 end = q.series.index(patch) + 1
2044 else:
2044 else:
2045 end = q.seriesend(True)
2045 end = q.seriesend(True)
2046
2046
2047 if opts.get('last') and not end:
2047 if opts.get('last') and not end:
2048 ui.write(_("no patches applied\n"))
2048 ui.write(_("no patches applied\n"))
2049 return 1
2049 return 1
2050 elif opts.get('last') and end == 1:
2050 elif opts.get('last') and end == 1:
2051 ui.write(_("only one patch applied\n"))
2051 ui.write(_("only one patch applied\n"))
2052 return 1
2052 return 1
2053 elif opts.get('last'):
2053 elif opts.get('last'):
2054 start = end - 2
2054 start = end - 2
2055 end = 1
2055 end = 1
2056 else:
2056 else:
2057 start = 0
2057 start = 0
2058
2058
2059 q.qseries(repo, length=end, start=start, status='A',
2059 q.qseries(repo, length=end, start=start, status='A',
2060 summary=opts.get('summary'))
2060 summary=opts.get('summary'))
2061
2061
2062
2062
2063 @command("qunapplied",
2063 @command("qunapplied",
2064 [('1', 'first', None, _('show only the first patch'))] + seriesopts,
2064 [('1', 'first', None, _('show only the first patch'))] + seriesopts,
2065 _('hg qunapplied [-1] [-s] [PATCH]'))
2065 _('hg qunapplied [-1] [-s] [PATCH]'))
2066 def unapplied(ui, repo, patch=None, **opts):
2066 def unapplied(ui, repo, patch=None, **opts):
2067 """print the patches not yet applied
2067 """print the patches not yet applied
2068
2068
2069 Returns 0 on success."""
2069 Returns 0 on success."""
2070
2070
2071 q = repo.mq
2071 q = repo.mq
2072 if patch:
2072 if patch:
2073 if patch not in q.series:
2073 if patch not in q.series:
2074 raise util.Abort(_("patch %s is not in series file") % patch)
2074 raise util.Abort(_("patch %s is not in series file") % patch)
2075 start = q.series.index(patch) + 1
2075 start = q.series.index(patch) + 1
2076 else:
2076 else:
2077 start = q.seriesend(True)
2077 start = q.seriesend(True)
2078
2078
2079 if start == len(q.series) and opts.get('first'):
2079 if start == len(q.series) and opts.get('first'):
2080 ui.write(_("all patches applied\n"))
2080 ui.write(_("all patches applied\n"))
2081 return 1
2081 return 1
2082
2082
2083 length = opts.get('first') and 1 or None
2083 length = opts.get('first') and 1 or None
2084 q.qseries(repo, start=start, length=length, status='U',
2084 q.qseries(repo, start=start, length=length, status='U',
2085 summary=opts.get('summary'))
2085 summary=opts.get('summary'))
2086
2086
2087 @command("qimport",
2087 @command("qimport",
2088 [('e', 'existing', None, _('import file in patch directory')),
2088 [('e', 'existing', None, _('import file in patch directory')),
2089 ('n', 'name', '',
2089 ('n', 'name', '',
2090 _('name of patch file'), _('NAME')),
2090 _('name of patch file'), _('NAME')),
2091 ('f', 'force', None, _('overwrite existing files')),
2091 ('f', 'force', None, _('overwrite existing files')),
2092 ('r', 'rev', [],
2092 ('r', 'rev', [],
2093 _('place existing revisions under mq control'), _('REV')),
2093 _('place existing revisions under mq control'), _('REV')),
2094 ('g', 'git', None, _('use git extended diff format')),
2094 ('g', 'git', None, _('use git extended diff format')),
2095 ('P', 'push', None, _('qpush after importing'))],
2095 ('P', 'push', None, _('qpush after importing'))],
2096 _('hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... FILE...'))
2096 _('hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... FILE...'))
2097 def qimport(ui, repo, *filename, **opts):
2097 def qimport(ui, repo, *filename, **opts):
2098 """import a patch or existing changeset
2098 """import a patch or existing changeset
2099
2099
2100 The patch is inserted into the series after the last applied
2100 The patch is inserted into the series after the last applied
2101 patch. If no patches have been applied, qimport prepends the patch
2101 patch. If no patches have been applied, qimport prepends the patch
2102 to the series.
2102 to the series.
2103
2103
2104 The patch will have the same name as its source file unless you
2104 The patch will have the same name as its source file unless you
2105 give it a new one with -n/--name.
2105 give it a new one with -n/--name.
2106
2106
2107 You can register an existing patch inside the patch directory with
2107 You can register an existing patch inside the patch directory with
2108 the -e/--existing flag.
2108 the -e/--existing flag.
2109
2109
2110 With -f/--force, an existing patch of the same name will be
2110 With -f/--force, an existing patch of the same name will be
2111 overwritten.
2111 overwritten.
2112
2112
2113 An existing changeset may be placed under mq control with -r/--rev
2113 An existing changeset may be placed under mq control with -r/--rev
2114 (e.g. qimport --rev tip -n patch will place tip under mq control).
2114 (e.g. qimport --rev tip -n patch will place tip under mq control).
2115 With -g/--git, patches imported with --rev will use the git diff
2115 With -g/--git, patches imported with --rev will use the git diff
2116 format. See the diffs help topic for information on why this is
2116 format. See the diffs help topic for information on why this is
2117 important for preserving rename/copy information and permission
2117 important for preserving rename/copy information and permission
2118 changes. Use :hg:`qfinish` to remove changesets from mq control.
2118 changes. Use :hg:`qfinish` to remove changesets from mq control.
2119
2119
2120 To import a patch from standard input, pass - as the patch file.
2120 To import a patch from standard input, pass - as the patch file.
2121 When importing from standard input, a patch name must be specified
2121 When importing from standard input, a patch name must be specified
2122 using the --name flag.
2122 using the --name flag.
2123
2123
2124 To import an existing patch while renaming it::
2124 To import an existing patch while renaming it::
2125
2125
2126 hg qimport -e existing-patch -n new-name
2126 hg qimport -e existing-patch -n new-name
2127
2127
2128 Returns 0 if import succeeded.
2128 Returns 0 if import succeeded.
2129 """
2129 """
2130 lock = repo.lock() # cause this may move phase
2130 lock = repo.lock() # cause this may move phase
2131 try:
2131 try:
2132 q = repo.mq
2132 q = repo.mq
2133 try:
2133 try:
2134 imported = q.qimport(
2134 imported = q.qimport(
2135 repo, filename, patchname=opts.get('name'),
2135 repo, filename, patchname=opts.get('name'),
2136 existing=opts.get('existing'), force=opts.get('force'),
2136 existing=opts.get('existing'), force=opts.get('force'),
2137 rev=opts.get('rev'), git=opts.get('git'))
2137 rev=opts.get('rev'), git=opts.get('git'))
2138 finally:
2138 finally:
2139 q.savedirty()
2139 q.savedirty()
2140
2140
2141
2141
2142 if imported and opts.get('push') and not opts.get('rev'):
2142 if imported and opts.get('push') and not opts.get('rev'):
2143 return q.push(repo, imported[-1])
2143 return q.push(repo, imported[-1])
2144 finally:
2144 finally:
2145 lock.release()
2145 lock.release()
2146 return 0
2146 return 0
2147
2147
2148 def qinit(ui, repo, create):
2148 def qinit(ui, repo, create):
2149 """initialize a new queue repository
2149 """initialize a new queue repository
2150
2150
2151 This command also creates a series file for ordering patches, and
2151 This command also creates a series file for ordering patches, and
2152 an mq-specific .hgignore file in the queue repository, to exclude
2152 an mq-specific .hgignore file in the queue repository, to exclude
2153 the status and guards files (these contain mostly transient state).
2153 the status and guards files (these contain mostly transient state).
2154
2154
2155 Returns 0 if initialization succeeded."""
2155 Returns 0 if initialization succeeded."""
2156 q = repo.mq
2156 q = repo.mq
2157 r = q.init(repo, create)
2157 r = q.init(repo, create)
2158 q.savedirty()
2158 q.savedirty()
2159 if r:
2159 if r:
2160 if not os.path.exists(r.wjoin('.hgignore')):
2160 if not os.path.exists(r.wjoin('.hgignore')):
2161 fp = r.wopener('.hgignore', 'w')
2161 fp = r.wopener('.hgignore', 'w')
2162 fp.write('^\\.hg\n')
2162 fp.write('^\\.hg\n')
2163 fp.write('^\\.mq\n')
2163 fp.write('^\\.mq\n')
2164 fp.write('syntax: glob\n')
2164 fp.write('syntax: glob\n')
2165 fp.write('status\n')
2165 fp.write('status\n')
2166 fp.write('guards\n')
2166 fp.write('guards\n')
2167 fp.close()
2167 fp.close()
2168 if not os.path.exists(r.wjoin('series')):
2168 if not os.path.exists(r.wjoin('series')):
2169 r.wopener('series', 'w').close()
2169 r.wopener('series', 'w').close()
2170 r[None].add(['.hgignore', 'series'])
2170 r[None].add(['.hgignore', 'series'])
2171 commands.add(ui, r)
2171 commands.add(ui, r)
2172 return 0
2172 return 0
2173
2173
2174 @command("^qinit",
2174 @command("^qinit",
2175 [('c', 'create-repo', None, _('create queue repository'))],
2175 [('c', 'create-repo', None, _('create queue repository'))],
2176 _('hg qinit [-c]'))
2176 _('hg qinit [-c]'))
2177 def init(ui, repo, **opts):
2177 def init(ui, repo, **opts):
2178 """init a new queue repository (DEPRECATED)
2178 """init a new queue repository (DEPRECATED)
2179
2179
2180 The queue repository is unversioned by default. If
2180 The queue repository is unversioned by default. If
2181 -c/--create-repo is specified, qinit will create a separate nested
2181 -c/--create-repo is specified, qinit will create a separate nested
2182 repository for patches (qinit -c may also be run later to convert
2182 repository for patches (qinit -c may also be run later to convert
2183 an unversioned patch repository into a versioned one). You can use
2183 an unversioned patch repository into a versioned one). You can use
2184 qcommit to commit changes to this queue repository.
2184 qcommit to commit changes to this queue repository.
2185
2185
2186 This command is deprecated. Without -c, it's implied by other relevant
2186 This command is deprecated. Without -c, it's implied by other relevant
2187 commands. With -c, use :hg:`init --mq` instead."""
2187 commands. With -c, use :hg:`init --mq` instead."""
2188 return qinit(ui, repo, create=opts.get('create_repo'))
2188 return qinit(ui, repo, create=opts.get('create_repo'))
2189
2189
2190 @command("qclone",
2190 @command("qclone",
2191 [('', 'pull', None, _('use pull protocol to copy metadata')),
2191 [('', 'pull', None, _('use pull protocol to copy metadata')),
2192 ('U', 'noupdate', None,
2192 ('U', 'noupdate', None,
2193 _('do not update the new working directories')),
2193 _('do not update the new working directories')),
2194 ('', 'uncompressed', None,
2194 ('', 'uncompressed', None,
2195 _('use uncompressed transfer (fast over LAN)')),
2195 _('use uncompressed transfer (fast over LAN)')),
2196 ('p', 'patches', '',
2196 ('p', 'patches', '',
2197 _('location of source patch repository'), _('REPO')),
2197 _('location of source patch repository'), _('REPO')),
2198 ] + commands.remoteopts,
2198 ] + commands.remoteopts,
2199 _('hg qclone [OPTION]... SOURCE [DEST]'))
2199 _('hg qclone [OPTION]... SOURCE [DEST]'))
2200 def clone(ui, source, dest=None, **opts):
2200 def clone(ui, source, dest=None, **opts):
2201 '''clone main and patch repository at same time
2201 '''clone main and patch repository at same time
2202
2202
2203 If source is local, destination will have no patches applied. If
2203 If source is local, destination will have no patches applied. If
2204 source is remote, this command can not check if patches are
2204 source is remote, this command can not check if patches are
2205 applied in source, so cannot guarantee that patches are not
2205 applied in source, so cannot guarantee that patches are not
2206 applied in destination. If you clone remote repository, be sure
2206 applied in destination. If you clone remote repository, be sure
2207 before that it has no patches applied.
2207 before that it has no patches applied.
2208
2208
2209 Source patch repository is looked for in <src>/.hg/patches by
2209 Source patch repository is looked for in <src>/.hg/patches by
2210 default. Use -p <url> to change.
2210 default. Use -p <url> to change.
2211
2211
2212 The patch directory must be a nested Mercurial repository, as
2212 The patch directory must be a nested Mercurial repository, as
2213 would be created by :hg:`init --mq`.
2213 would be created by :hg:`init --mq`.
2214
2214
2215 Return 0 on success.
2215 Return 0 on success.
2216 '''
2216 '''
2217 def patchdir(repo):
2217 def patchdir(repo):
2218 """compute a patch repo url from a repo object"""
2218 """compute a patch repo url from a repo object"""
2219 url = repo.url()
2219 url = repo.url()
2220 if url.endswith('/'):
2220 if url.endswith('/'):
2221 url = url[:-1]
2221 url = url[:-1]
2222 return url + '/.hg/patches'
2222 return url + '/.hg/patches'
2223
2223
2224 # main repo (destination and sources)
2224 # main repo (destination and sources)
2225 if dest is None:
2225 if dest is None:
2226 dest = hg.defaultdest(source)
2226 dest = hg.defaultdest(source)
2227 sr = hg.repository(hg.remoteui(ui, opts), ui.expandpath(source))
2227 sr = hg.repository(hg.remoteui(ui, opts), ui.expandpath(source))
2228
2228
2229 # patches repo (source only)
2229 # patches repo (source only)
2230 if opts.get('patches'):
2230 if opts.get('patches'):
2231 patchespath = ui.expandpath(opts.get('patches'))
2231 patchespath = ui.expandpath(opts.get('patches'))
2232 else:
2232 else:
2233 patchespath = patchdir(sr)
2233 patchespath = patchdir(sr)
2234 try:
2234 try:
2235 hg.repository(ui, patchespath)
2235 hg.repository(ui, patchespath)
2236 except error.RepoError:
2236 except error.RepoError:
2237 raise util.Abort(_('versioned patch repository not found'
2237 raise util.Abort(_('versioned patch repository not found'
2238 ' (see init --mq)'))
2238 ' (see init --mq)'))
2239 qbase, destrev = None, None
2239 qbase, destrev = None, None
2240 if sr.local():
2240 if sr.local():
2241 if sr.mq.applied and sr[qbase].phase() != phases.secret:
2241 if sr.mq.applied and sr[qbase].phase() != phases.secret:
2242 qbase = sr.mq.applied[0].node
2242 qbase = sr.mq.applied[0].node
2243 if not hg.islocal(dest):
2243 if not hg.islocal(dest):
2244 heads = set(sr.heads())
2244 heads = set(sr.heads())
2245 destrev = list(heads.difference(sr.heads(qbase)))
2245 destrev = list(heads.difference(sr.heads(qbase)))
2246 destrev.append(sr.changelog.parents(qbase)[0])
2246 destrev.append(sr.changelog.parents(qbase)[0])
2247 elif sr.capable('lookup'):
2247 elif sr.capable('lookup'):
2248 try:
2248 try:
2249 qbase = sr.lookup('qbase')
2249 qbase = sr.lookup('qbase')
2250 except error.RepoError:
2250 except error.RepoError:
2251 pass
2251 pass
2252
2252
2253 ui.note(_('cloning main repository\n'))
2253 ui.note(_('cloning main repository\n'))
2254 sr, dr = hg.clone(ui, opts, sr.url(), dest,
2254 sr, dr = hg.clone(ui, opts, sr.url(), dest,
2255 pull=opts.get('pull'),
2255 pull=opts.get('pull'),
2256 rev=destrev,
2256 rev=destrev,
2257 update=False,
2257 update=False,
2258 stream=opts.get('uncompressed'))
2258 stream=opts.get('uncompressed'))
2259
2259
2260 ui.note(_('cloning patch repository\n'))
2260 ui.note(_('cloning patch repository\n'))
2261 hg.clone(ui, opts, opts.get('patches') or patchdir(sr), patchdir(dr),
2261 hg.clone(ui, opts, opts.get('patches') or patchdir(sr), patchdir(dr),
2262 pull=opts.get('pull'), update=not opts.get('noupdate'),
2262 pull=opts.get('pull'), update=not opts.get('noupdate'),
2263 stream=opts.get('uncompressed'))
2263 stream=opts.get('uncompressed'))
2264
2264
2265 if dr.local():
2265 if dr.local():
2266 if qbase:
2266 if qbase:
2267 ui.note(_('stripping applied patches from destination '
2267 ui.note(_('stripping applied patches from destination '
2268 'repository\n'))
2268 'repository\n'))
2269 dr.mq.strip(dr, [qbase], update=False, backup=None)
2269 dr.mq.strip(dr, [qbase], update=False, backup=None)
2270 if not opts.get('noupdate'):
2270 if not opts.get('noupdate'):
2271 ui.note(_('updating destination repository\n'))
2271 ui.note(_('updating destination repository\n'))
2272 hg.update(dr, dr.changelog.tip())
2272 hg.update(dr, dr.changelog.tip())
2273
2273
2274 @command("qcommit|qci",
2274 @command("qcommit|qci",
2275 commands.table["^commit|ci"][1],
2275 commands.table["^commit|ci"][1],
2276 _('hg qcommit [OPTION]... [FILE]...'))
2276 _('hg qcommit [OPTION]... [FILE]...'))
2277 def commit(ui, repo, *pats, **opts):
2277 def commit(ui, repo, *pats, **opts):
2278 """commit changes in the queue repository (DEPRECATED)
2278 """commit changes in the queue repository (DEPRECATED)
2279
2279
2280 This command is deprecated; use :hg:`commit --mq` instead."""
2280 This command is deprecated; use :hg:`commit --mq` instead."""
2281 q = repo.mq
2281 q = repo.mq
2282 r = q.qrepo()
2282 r = q.qrepo()
2283 if not r:
2283 if not r:
2284 raise util.Abort('no queue repository')
2284 raise util.Abort('no queue repository')
2285 commands.commit(r.ui, r, *pats, **opts)
2285 commands.commit(r.ui, r, *pats, **opts)
2286
2286
2287 @command("qseries",
2287 @command("qseries",
2288 [('m', 'missing', None, _('print patches not in series')),
2288 [('m', 'missing', None, _('print patches not in series')),
2289 ] + seriesopts,
2289 ] + seriesopts,
2290 _('hg qseries [-ms]'))
2290 _('hg qseries [-ms]'))
2291 def series(ui, repo, **opts):
2291 def series(ui, repo, **opts):
2292 """print the entire series file
2292 """print the entire series file
2293
2293
2294 Returns 0 on success."""
2294 Returns 0 on success."""
2295 repo.mq.qseries(repo, missing=opts.get('missing'),
2295 repo.mq.qseries(repo, missing=opts.get('missing'),
2296 summary=opts.get('summary'))
2296 summary=opts.get('summary'))
2297 return 0
2297 return 0
2298
2298
2299 @command("qtop", seriesopts, _('hg qtop [-s]'))
2299 @command("qtop", seriesopts, _('hg qtop [-s]'))
2300 def top(ui, repo, **opts):
2300 def top(ui, repo, **opts):
2301 """print the name of the current patch
2301 """print the name of the current patch
2302
2302
2303 Returns 0 on success."""
2303 Returns 0 on success."""
2304 q = repo.mq
2304 q = repo.mq
2305 t = q.applied and q.seriesend(True) or 0
2305 t = q.applied and q.seriesend(True) or 0
2306 if t:
2306 if t:
2307 q.qseries(repo, start=t - 1, length=1, status='A',
2307 q.qseries(repo, start=t - 1, length=1, status='A',
2308 summary=opts.get('summary'))
2308 summary=opts.get('summary'))
2309 else:
2309 else:
2310 ui.write(_("no patches applied\n"))
2310 ui.write(_("no patches applied\n"))
2311 return 1
2311 return 1
2312
2312
2313 @command("qnext", seriesopts, _('hg qnext [-s]'))
2313 @command("qnext", seriesopts, _('hg qnext [-s]'))
2314 def next(ui, repo, **opts):
2314 def next(ui, repo, **opts):
2315 """print the name of the next pushable patch
2315 """print the name of the next pushable patch
2316
2316
2317 Returns 0 on success."""
2317 Returns 0 on success."""
2318 q = repo.mq
2318 q = repo.mq
2319 end = q.seriesend()
2319 end = q.seriesend()
2320 if end == len(q.series):
2320 if end == len(q.series):
2321 ui.write(_("all patches applied\n"))
2321 ui.write(_("all patches applied\n"))
2322 return 1
2322 return 1
2323 q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
2323 q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
2324
2324
2325 @command("qprev", seriesopts, _('hg qprev [-s]'))
2325 @command("qprev", seriesopts, _('hg qprev [-s]'))
2326 def prev(ui, repo, **opts):
2326 def prev(ui, repo, **opts):
2327 """print the name of the preceding applied patch
2327 """print the name of the preceding applied patch
2328
2328
2329 Returns 0 on success."""
2329 Returns 0 on success."""
2330 q = repo.mq
2330 q = repo.mq
2331 l = len(q.applied)
2331 l = len(q.applied)
2332 if l == 1:
2332 if l == 1:
2333 ui.write(_("only one patch applied\n"))
2333 ui.write(_("only one patch applied\n"))
2334 return 1
2334 return 1
2335 if not l:
2335 if not l:
2336 ui.write(_("no patches applied\n"))
2336 ui.write(_("no patches applied\n"))
2337 return 1
2337 return 1
2338 idx = q.series.index(q.applied[-2].name)
2338 idx = q.series.index(q.applied[-2].name)
2339 q.qseries(repo, start=idx, length=1, status='A',
2339 q.qseries(repo, start=idx, length=1, status='A',
2340 summary=opts.get('summary'))
2340 summary=opts.get('summary'))
2341
2341
2342 def setupheaderopts(ui, opts):
2342 def setupheaderopts(ui, opts):
2343 if not opts.get('user') and opts.get('currentuser'):
2343 if not opts.get('user') and opts.get('currentuser'):
2344 opts['user'] = ui.username()
2344 opts['user'] = ui.username()
2345 if not opts.get('date') and opts.get('currentdate'):
2345 if not opts.get('date') and opts.get('currentdate'):
2346 opts['date'] = "%d %d" % util.makedate()
2346 opts['date'] = "%d %d" % util.makedate()
2347
2347
2348 @command("^qnew",
2348 @command("^qnew",
2349 [('e', 'edit', None, _('edit commit message')),
2349 [('e', 'edit', None, _('edit commit message')),
2350 ('f', 'force', None, _('import uncommitted changes (DEPRECATED)')),
2350 ('f', 'force', None, _('import uncommitted changes (DEPRECATED)')),
2351 ('g', 'git', None, _('use git extended diff format')),
2351 ('g', 'git', None, _('use git extended diff format')),
2352 ('U', 'currentuser', None, _('add "From: <current user>" to patch')),
2352 ('U', 'currentuser', None, _('add "From: <current user>" to patch')),
2353 ('u', 'user', '',
2353 ('u', 'user', '',
2354 _('add "From: <USER>" to patch'), _('USER')),
2354 _('add "From: <USER>" to patch'), _('USER')),
2355 ('D', 'currentdate', None, _('add "Date: <current date>" to patch')),
2355 ('D', 'currentdate', None, _('add "Date: <current date>" to patch')),
2356 ('d', 'date', '',
2356 ('d', 'date', '',
2357 _('add "Date: <DATE>" to patch'), _('DATE'))
2357 _('add "Date: <DATE>" to patch'), _('DATE'))
2358 ] + commands.walkopts + commands.commitopts,
2358 ] + commands.walkopts + commands.commitopts,
2359 _('hg qnew [-e] [-m TEXT] [-l FILE] PATCH [FILE]...'))
2359 _('hg qnew [-e] [-m TEXT] [-l FILE] PATCH [FILE]...'))
2360 def new(ui, repo, patch, *args, **opts):
2360 def new(ui, repo, patch, *args, **opts):
2361 """create a new patch
2361 """create a new patch
2362
2362
2363 qnew creates a new patch on top of the currently-applied patch (if
2363 qnew creates a new patch on top of the currently-applied patch (if
2364 any). The patch will be initialized with any outstanding changes
2364 any). The patch will be initialized with any outstanding changes
2365 in the working directory. You may also use -I/--include,
2365 in the working directory. You may also use -I/--include,
2366 -X/--exclude, and/or a list of files after the patch name to add
2366 -X/--exclude, and/or a list of files after the patch name to add
2367 only changes to matching files to the new patch, leaving the rest
2367 only changes to matching files to the new patch, leaving the rest
2368 as uncommitted modifications.
2368 as uncommitted modifications.
2369
2369
2370 -u/--user and -d/--date can be used to set the (given) user and
2370 -u/--user and -d/--date can be used to set the (given) user and
2371 date, respectively. -U/--currentuser and -D/--currentdate set user
2371 date, respectively. -U/--currentuser and -D/--currentdate set user
2372 to current user and date to current date.
2372 to current user and date to current date.
2373
2373
2374 -e/--edit, -m/--message or -l/--logfile set the patch header as
2374 -e/--edit, -m/--message or -l/--logfile set the patch header as
2375 well as the commit message. If none is specified, the header is
2375 well as the commit message. If none is specified, the header is
2376 empty and the commit message is '[mq]: PATCH'.
2376 empty and the commit message is '[mq]: PATCH'.
2377
2377
2378 Use the -g/--git option to keep the patch in the git extended diff
2378 Use the -g/--git option to keep the patch in the git extended diff
2379 format. Read the diffs help topic for more information on why this
2379 format. Read the diffs help topic for more information on why this
2380 is important for preserving permission changes and copy/rename
2380 is important for preserving permission changes and copy/rename
2381 information.
2381 information.
2382
2382
2383 Returns 0 on successful creation of a new patch.
2383 Returns 0 on successful creation of a new patch.
2384 """
2384 """
2385 msg = cmdutil.logmessage(ui, opts)
2385 msg = cmdutil.logmessage(ui, opts)
2386 def getmsg():
2386 def getmsg():
2387 return ui.edit(msg, opts.get('user') or ui.username())
2387 return ui.edit(msg, opts.get('user') or ui.username())
2388 q = repo.mq
2388 q = repo.mq
2389 opts['msg'] = msg
2389 opts['msg'] = msg
2390 if opts.get('edit'):
2390 if opts.get('edit'):
2391 opts['msg'] = getmsg
2391 opts['msg'] = getmsg
2392 else:
2392 else:
2393 opts['msg'] = msg
2393 opts['msg'] = msg
2394 setupheaderopts(ui, opts)
2394 setupheaderopts(ui, opts)
2395 q.new(repo, patch, *args, **opts)
2395 q.new(repo, patch, *args, **opts)
2396 q.savedirty()
2396 q.savedirty()
2397 return 0
2397 return 0
2398
2398
2399 @command("^qrefresh",
2399 @command("^qrefresh",
2400 [('e', 'edit', None, _('edit commit message')),
2400 [('e', 'edit', None, _('edit commit message')),
2401 ('g', 'git', None, _('use git extended diff format')),
2401 ('g', 'git', None, _('use git extended diff format')),
2402 ('s', 'short', None,
2402 ('s', 'short', None,
2403 _('refresh only files already in the patch and specified files')),
2403 _('refresh only files already in the patch and specified files')),
2404 ('U', 'currentuser', None,
2404 ('U', 'currentuser', None,
2405 _('add/update author field in patch with current user')),
2405 _('add/update author field in patch with current user')),
2406 ('u', 'user', '',
2406 ('u', 'user', '',
2407 _('add/update author field in patch with given user'), _('USER')),
2407 _('add/update author field in patch with given user'), _('USER')),
2408 ('D', 'currentdate', None,
2408 ('D', 'currentdate', None,
2409 _('add/update date field in patch with current date')),
2409 _('add/update date field in patch with current date')),
2410 ('d', 'date', '',
2410 ('d', 'date', '',
2411 _('add/update date field in patch with given date'), _('DATE'))
2411 _('add/update date field in patch with given date'), _('DATE'))
2412 ] + commands.walkopts + commands.commitopts,
2412 ] + commands.walkopts + commands.commitopts,
2413 _('hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...'))
2413 _('hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...'))
2414 def refresh(ui, repo, *pats, **opts):
2414 def refresh(ui, repo, *pats, **opts):
2415 """update the current patch
2415 """update the current patch
2416
2416
2417 If any file patterns are provided, the refreshed patch will
2417 If any file patterns are provided, the refreshed patch will
2418 contain only the modifications that match those patterns; the
2418 contain only the modifications that match those patterns; the
2419 remaining modifications will remain in the working directory.
2419 remaining modifications will remain in the working directory.
2420
2420
2421 If -s/--short is specified, files currently included in the patch
2421 If -s/--short is specified, files currently included in the patch
2422 will be refreshed just like matched files and remain in the patch.
2422 will be refreshed just like matched files and remain in the patch.
2423
2423
2424 If -e/--edit is specified, Mercurial will start your configured editor for
2424 If -e/--edit is specified, Mercurial will start your configured editor for
2425 you to enter a message. In case qrefresh fails, you will find a backup of
2425 you to enter a message. In case qrefresh fails, you will find a backup of
2426 your message in ``.hg/last-message.txt``.
2426 your message in ``.hg/last-message.txt``.
2427
2427
2428 hg add/remove/copy/rename work as usual, though you might want to
2428 hg add/remove/copy/rename work as usual, though you might want to
2429 use git-style patches (-g/--git or [diff] git=1) to track copies
2429 use git-style patches (-g/--git or [diff] git=1) to track copies
2430 and renames. See the diffs help topic for more information on the
2430 and renames. See the diffs help topic for more information on the
2431 git diff format.
2431 git diff format.
2432
2432
2433 Returns 0 on success.
2433 Returns 0 on success.
2434 """
2434 """
2435 q = repo.mq
2435 q = repo.mq
2436 message = cmdutil.logmessage(ui, opts)
2436 message = cmdutil.logmessage(ui, opts)
2437 if opts.get('edit'):
2437 if opts.get('edit'):
2438 if not q.applied:
2438 if not q.applied:
2439 ui.write(_("no patches applied\n"))
2439 ui.write(_("no patches applied\n"))
2440 return 1
2440 return 1
2441 if message:
2441 if message:
2442 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
2442 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
2443 patch = q.applied[-1].name
2443 patch = q.applied[-1].name
2444 ph = patchheader(q.join(patch), q.plainmode)
2444 ph = patchheader(q.join(patch), q.plainmode)
2445 message = ui.edit('\n'.join(ph.message), ph.user or ui.username())
2445 message = ui.edit('\n'.join(ph.message), ph.user or ui.username())
2446 # We don't want to lose the patch message if qrefresh fails (issue2062)
2446 # We don't want to lose the patch message if qrefresh fails (issue2062)
2447 repo.savecommitmessage(message)
2447 repo.savecommitmessage(message)
2448 setupheaderopts(ui, opts)
2448 setupheaderopts(ui, opts)
2449 wlock = repo.wlock()
2449 wlock = repo.wlock()
2450 try:
2450 try:
2451 ret = q.refresh(repo, pats, msg=message, **opts)
2451 ret = q.refresh(repo, pats, msg=message, **opts)
2452 q.savedirty()
2452 q.savedirty()
2453 return ret
2453 return ret
2454 finally:
2454 finally:
2455 wlock.release()
2455 wlock.release()
2456
2456
2457 @command("^qdiff",
2457 @command("^qdiff",
2458 commands.diffopts + commands.diffopts2 + commands.walkopts,
2458 commands.diffopts + commands.diffopts2 + commands.walkopts,
2459 _('hg qdiff [OPTION]... [FILE]...'))
2459 _('hg qdiff [OPTION]... [FILE]...'))
2460 def diff(ui, repo, *pats, **opts):
2460 def diff(ui, repo, *pats, **opts):
2461 """diff of the current patch and subsequent modifications
2461 """diff of the current patch and subsequent modifications
2462
2462
2463 Shows a diff which includes the current patch as well as any
2463 Shows a diff which includes the current patch as well as any
2464 changes which have been made in the working directory since the
2464 changes which have been made in the working directory since the
2465 last refresh (thus showing what the current patch would become
2465 last refresh (thus showing what the current patch would become
2466 after a qrefresh).
2466 after a qrefresh).
2467
2467
2468 Use :hg:`diff` if you only want to see the changes made since the
2468 Use :hg:`diff` if you only want to see the changes made since the
2469 last qrefresh, or :hg:`export qtip` if you want to see changes
2469 last qrefresh, or :hg:`export qtip` if you want to see changes
2470 made by the current patch without including changes made since the
2470 made by the current patch without including changes made since the
2471 qrefresh.
2471 qrefresh.
2472
2472
2473 Returns 0 on success.
2473 Returns 0 on success.
2474 """
2474 """
2475 repo.mq.diff(repo, pats, opts)
2475 repo.mq.diff(repo, pats, opts)
2476 return 0
2476 return 0
2477
2477
2478 @command('qfold',
2478 @command('qfold',
2479 [('e', 'edit', None, _('edit patch header')),
2479 [('e', 'edit', None, _('edit patch header')),
2480 ('k', 'keep', None, _('keep folded patch files')),
2480 ('k', 'keep', None, _('keep folded patch files')),
2481 ] + commands.commitopts,
2481 ] + commands.commitopts,
2482 _('hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...'))
2482 _('hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...'))
2483 def fold(ui, repo, *files, **opts):
2483 def fold(ui, repo, *files, **opts):
2484 """fold the named patches into the current patch
2484 """fold the named patches into the current patch
2485
2485
2486 Patches must not yet be applied. Each patch will be successively
2486 Patches must not yet be applied. Each patch will be successively
2487 applied to the current patch in the order given. If all the
2487 applied to the current patch in the order given. If all the
2488 patches apply successfully, the current patch will be refreshed
2488 patches apply successfully, the current patch will be refreshed
2489 with the new cumulative patch, and the folded patches will be
2489 with the new cumulative patch, and the folded patches will be
2490 deleted. With -k/--keep, the folded patch files will not be
2490 deleted. With -k/--keep, the folded patch files will not be
2491 removed afterwards.
2491 removed afterwards.
2492
2492
2493 The header for each folded patch will be concatenated with the
2493 The header for each folded patch will be concatenated with the
2494 current patch header, separated by a line of ``* * *``.
2494 current patch header, separated by a line of ``* * *``.
2495
2495
2496 Returns 0 on success."""
2496 Returns 0 on success."""
2497 q = repo.mq
2497 q = repo.mq
2498 if not files:
2498 if not files:
2499 raise util.Abort(_('qfold requires at least one patch name'))
2499 raise util.Abort(_('qfold requires at least one patch name'))
2500 if not q.checktoppatch(repo)[0]:
2500 if not q.checktoppatch(repo)[0]:
2501 raise util.Abort(_('no patches applied'))
2501 raise util.Abort(_('no patches applied'))
2502 q.checklocalchanges(repo)
2502 q.checklocalchanges(repo)
2503
2503
2504 message = cmdutil.logmessage(ui, opts)
2504 message = cmdutil.logmessage(ui, opts)
2505 if opts.get('edit'):
2505 if opts.get('edit'):
2506 if message:
2506 if message:
2507 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
2507 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
2508
2508
2509 parent = q.lookup('qtip')
2509 parent = q.lookup('qtip')
2510 patches = []
2510 patches = []
2511 messages = []
2511 messages = []
2512 for f in files:
2512 for f in files:
2513 p = q.lookup(f)
2513 p = q.lookup(f)
2514 if p in patches or p == parent:
2514 if p in patches or p == parent:
2515 ui.warn(_('Skipping already folded patch %s\n') % p)
2515 ui.warn(_('Skipping already folded patch %s\n') % p)
2516 if q.isapplied(p):
2516 if q.isapplied(p):
2517 raise util.Abort(_('qfold cannot fold already applied patch %s')
2517 raise util.Abort(_('qfold cannot fold already applied patch %s')
2518 % p)
2518 % p)
2519 patches.append(p)
2519 patches.append(p)
2520
2520
2521 for p in patches:
2521 for p in patches:
2522 if not message:
2522 if not message:
2523 ph = patchheader(q.join(p), q.plainmode)
2523 ph = patchheader(q.join(p), q.plainmode)
2524 if ph.message:
2524 if ph.message:
2525 messages.append(ph.message)
2525 messages.append(ph.message)
2526 pf = q.join(p)
2526 pf = q.join(p)
2527 (patchsuccess, files, fuzz) = q.patch(repo, pf)
2527 (patchsuccess, files, fuzz) = q.patch(repo, pf)
2528 if not patchsuccess:
2528 if not patchsuccess:
2529 raise util.Abort(_('error folding patch %s') % p)
2529 raise util.Abort(_('error folding patch %s') % p)
2530
2530
2531 if not message:
2531 if not message:
2532 ph = patchheader(q.join(parent), q.plainmode)
2532 ph = patchheader(q.join(parent), q.plainmode)
2533 message, user = ph.message, ph.user
2533 message, user = ph.message, ph.user
2534 for msg in messages:
2534 for msg in messages:
2535 message.append('* * *')
2535 message.append('* * *')
2536 message.extend(msg)
2536 message.extend(msg)
2537 message = '\n'.join(message)
2537 message = '\n'.join(message)
2538
2538
2539 if opts.get('edit'):
2539 if opts.get('edit'):
2540 message = ui.edit(message, user or ui.username())
2540 message = ui.edit(message, user or ui.username())
2541
2541
2542 diffopts = q.patchopts(q.diffopts(), *patches)
2542 diffopts = q.patchopts(q.diffopts(), *patches)
2543 wlock = repo.wlock()
2543 wlock = repo.wlock()
2544 try:
2544 try:
2545 q.refresh(repo, msg=message, git=diffopts.git)
2545 q.refresh(repo, msg=message, git=diffopts.git)
2546 q.delete(repo, patches, opts)
2546 q.delete(repo, patches, opts)
2547 q.savedirty()
2547 q.savedirty()
2548 finally:
2548 finally:
2549 wlock.release()
2549 wlock.release()
2550
2550
2551 @command("qgoto",
2551 @command("qgoto",
2552 [('c', 'check', None, _('tolerate non-conflicting local changes')),
2552 [('c', 'check', None, _('tolerate non-conflicting local changes')),
2553 ('f', 'force', None, _('overwrite any local changes')),
2553 ('f', 'force', None, _('overwrite any local changes')),
2554 ('', 'no-backup', None, _('do not save backup copies of files'))],
2554 ('', 'no-backup', None, _('do not save backup copies of files'))],
2555 _('hg qgoto [OPTION]... PATCH'))
2555 _('hg qgoto [OPTION]... PATCH'))
2556 def goto(ui, repo, patch, **opts):
2556 def goto(ui, repo, patch, **opts):
2557 '''push or pop patches until named patch is at top of stack
2557 '''push or pop patches until named patch is at top of stack
2558
2558
2559 Returns 0 on success.'''
2559 Returns 0 on success.'''
2560 opts = fixcheckopts(ui, opts)
2560 opts = fixcheckopts(ui, opts)
2561 q = repo.mq
2561 q = repo.mq
2562 patch = q.lookup(patch)
2562 patch = q.lookup(patch)
2563 nobackup = opts.get('no_backup')
2563 nobackup = opts.get('no_backup')
2564 check = opts.get('check')
2564 check = opts.get('check')
2565 if q.isapplied(patch):
2565 if q.isapplied(patch):
2566 ret = q.pop(repo, patch, force=opts.get('force'), nobackup=nobackup,
2566 ret = q.pop(repo, patch, force=opts.get('force'), nobackup=nobackup,
2567 check=check)
2567 check=check)
2568 else:
2568 else:
2569 ret = q.push(repo, patch, force=opts.get('force'), nobackup=nobackup,
2569 ret = q.push(repo, patch, force=opts.get('force'), nobackup=nobackup,
2570 check=check)
2570 check=check)
2571 q.savedirty()
2571 q.savedirty()
2572 return ret
2572 return ret
2573
2573
2574 @command("qguard",
2574 @command("qguard",
2575 [('l', 'list', None, _('list all patches and guards')),
2575 [('l', 'list', None, _('list all patches and guards')),
2576 ('n', 'none', None, _('drop all guards'))],
2576 ('n', 'none', None, _('drop all guards'))],
2577 _('hg qguard [-l] [-n] [PATCH] [-- [+GUARD]... [-GUARD]...]'))
2577 _('hg qguard [-l] [-n] [PATCH] [-- [+GUARD]... [-GUARD]...]'))
2578 def guard(ui, repo, *args, **opts):
2578 def guard(ui, repo, *args, **opts):
2579 '''set or print guards for a patch
2579 '''set or print guards for a patch
2580
2580
2581 Guards control whether a patch can be pushed. A patch with no
2581 Guards control whether a patch can be pushed. A patch with no
2582 guards is always pushed. A patch with a positive guard ("+foo") is
2582 guards is always pushed. A patch with a positive guard ("+foo") is
2583 pushed only if the :hg:`qselect` command has activated it. A patch with
2583 pushed only if the :hg:`qselect` command has activated it. A patch with
2584 a negative guard ("-foo") is never pushed if the :hg:`qselect` command
2584 a negative guard ("-foo") is never pushed if the :hg:`qselect` command
2585 has activated it.
2585 has activated it.
2586
2586
2587 With no arguments, print the currently active guards.
2587 With no arguments, print the currently active guards.
2588 With arguments, set guards for the named patch.
2588 With arguments, set guards for the named patch.
2589
2589
2590 .. note::
2590 .. note::
2591 Specifying negative guards now requires '--'.
2591 Specifying negative guards now requires '--'.
2592
2592
2593 To set guards on another patch::
2593 To set guards on another patch::
2594
2594
2595 hg qguard other.patch -- +2.6.17 -stable
2595 hg qguard other.patch -- +2.6.17 -stable
2596
2596
2597 Returns 0 on success.
2597 Returns 0 on success.
2598 '''
2598 '''
2599 def status(idx):
2599 def status(idx):
2600 guards = q.seriesguards[idx] or ['unguarded']
2600 guards = q.seriesguards[idx] or ['unguarded']
2601 if q.series[idx] in applied:
2601 if q.series[idx] in applied:
2602 state = 'applied'
2602 state = 'applied'
2603 elif q.pushable(idx)[0]:
2603 elif q.pushable(idx)[0]:
2604 state = 'unapplied'
2604 state = 'unapplied'
2605 else:
2605 else:
2606 state = 'guarded'
2606 state = 'guarded'
2607 label = 'qguard.patch qguard.%s qseries.%s' % (state, state)
2607 label = 'qguard.patch qguard.%s qseries.%s' % (state, state)
2608 ui.write('%s: ' % ui.label(q.series[idx], label))
2608 ui.write('%s: ' % ui.label(q.series[idx], label))
2609
2609
2610 for i, guard in enumerate(guards):
2610 for i, guard in enumerate(guards):
2611 if guard.startswith('+'):
2611 if guard.startswith('+'):
2612 ui.write(guard, label='qguard.positive')
2612 ui.write(guard, label='qguard.positive')
2613 elif guard.startswith('-'):
2613 elif guard.startswith('-'):
2614 ui.write(guard, label='qguard.negative')
2614 ui.write(guard, label='qguard.negative')
2615 else:
2615 else:
2616 ui.write(guard, label='qguard.unguarded')
2616 ui.write(guard, label='qguard.unguarded')
2617 if i != len(guards) - 1:
2617 if i != len(guards) - 1:
2618 ui.write(' ')
2618 ui.write(' ')
2619 ui.write('\n')
2619 ui.write('\n')
2620 q = repo.mq
2620 q = repo.mq
2621 applied = set(p.name for p in q.applied)
2621 applied = set(p.name for p in q.applied)
2622 patch = None
2622 patch = None
2623 args = list(args)
2623 args = list(args)
2624 if opts.get('list'):
2624 if opts.get('list'):
2625 if args or opts.get('none'):
2625 if args or opts.get('none'):
2626 raise util.Abort(_('cannot mix -l/--list with options or '
2626 raise util.Abort(_('cannot mix -l/--list with options or '
2627 'arguments'))
2627 'arguments'))
2628 for i in xrange(len(q.series)):
2628 for i in xrange(len(q.series)):
2629 status(i)
2629 status(i)
2630 return
2630 return
2631 if not args or args[0][0:1] in '-+':
2631 if not args or args[0][0:1] in '-+':
2632 if not q.applied:
2632 if not q.applied:
2633 raise util.Abort(_('no patches applied'))
2633 raise util.Abort(_('no patches applied'))
2634 patch = q.applied[-1].name
2634 patch = q.applied[-1].name
2635 if patch is None and args[0][0:1] not in '-+':
2635 if patch is None and args[0][0:1] not in '-+':
2636 patch = args.pop(0)
2636 patch = args.pop(0)
2637 if patch is None:
2637 if patch is None:
2638 raise util.Abort(_('no patch to work with'))
2638 raise util.Abort(_('no patch to work with'))
2639 if args or opts.get('none'):
2639 if args or opts.get('none'):
2640 idx = q.findseries(patch)
2640 idx = q.findseries(patch)
2641 if idx is None:
2641 if idx is None:
2642 raise util.Abort(_('no patch named %s') % patch)
2642 raise util.Abort(_('no patch named %s') % patch)
2643 q.setguards(idx, args)
2643 q.setguards(idx, args)
2644 q.savedirty()
2644 q.savedirty()
2645 else:
2645 else:
2646 status(q.series.index(q.lookup(patch)))
2646 status(q.series.index(q.lookup(patch)))
2647
2647
2648 @command("qheader", [], _('hg qheader [PATCH]'))
2648 @command("qheader", [], _('hg qheader [PATCH]'))
2649 def header(ui, repo, patch=None):
2649 def header(ui, repo, patch=None):
2650 """print the header of the topmost or specified patch
2650 """print the header of the topmost or specified patch
2651
2651
2652 Returns 0 on success."""
2652 Returns 0 on success."""
2653 q = repo.mq
2653 q = repo.mq
2654
2654
2655 if patch:
2655 if patch:
2656 patch = q.lookup(patch)
2656 patch = q.lookup(patch)
2657 else:
2657 else:
2658 if not q.applied:
2658 if not q.applied:
2659 ui.write(_('no patches applied\n'))
2659 ui.write(_('no patches applied\n'))
2660 return 1
2660 return 1
2661 patch = q.lookup('qtip')
2661 patch = q.lookup('qtip')
2662 ph = patchheader(q.join(patch), q.plainmode)
2662 ph = patchheader(q.join(patch), q.plainmode)
2663
2663
2664 ui.write('\n'.join(ph.message) + '\n')
2664 ui.write('\n'.join(ph.message) + '\n')
2665
2665
2666 def lastsavename(path):
2666 def lastsavename(path):
2667 (directory, base) = os.path.split(path)
2667 (directory, base) = os.path.split(path)
2668 names = os.listdir(directory)
2668 names = os.listdir(directory)
2669 namere = re.compile("%s.([0-9]+)" % base)
2669 namere = re.compile("%s.([0-9]+)" % base)
2670 maxindex = None
2670 maxindex = None
2671 maxname = None
2671 maxname = None
2672 for f in names:
2672 for f in names:
2673 m = namere.match(f)
2673 m = namere.match(f)
2674 if m:
2674 if m:
2675 index = int(m.group(1))
2675 index = int(m.group(1))
2676 if maxindex is None or index > maxindex:
2676 if maxindex is None or index > maxindex:
2677 maxindex = index
2677 maxindex = index
2678 maxname = f
2678 maxname = f
2679 if maxname:
2679 if maxname:
2680 return (os.path.join(directory, maxname), maxindex)
2680 return (os.path.join(directory, maxname), maxindex)
2681 return (None, None)
2681 return (None, None)
2682
2682
2683 def savename(path):
2683 def savename(path):
2684 (last, index) = lastsavename(path)
2684 (last, index) = lastsavename(path)
2685 if last is None:
2685 if last is None:
2686 index = 0
2686 index = 0
2687 newpath = path + ".%d" % (index + 1)
2687 newpath = path + ".%d" % (index + 1)
2688 return newpath
2688 return newpath
2689
2689
2690 @command("^qpush",
2690 @command("^qpush",
2691 [('c', 'check', None, _('tolerate non-conflicting local changes')),
2691 [('c', 'check', None, _('tolerate non-conflicting local changes')),
2692 ('f', 'force', None, _('apply on top of local changes')),
2692 ('f', 'force', None, _('apply on top of local changes')),
2693 ('e', 'exact', None,
2693 ('e', 'exact', None,
2694 _('apply the target patch to its recorded parent')),
2694 _('apply the target patch to its recorded parent')),
2695 ('l', 'list', None, _('list patch name in commit text')),
2695 ('l', 'list', None, _('list patch name in commit text')),
2696 ('a', 'all', None, _('apply all patches')),
2696 ('a', 'all', None, _('apply all patches')),
2697 ('m', 'merge', None, _('merge from another queue (DEPRECATED)')),
2697 ('m', 'merge', None, _('merge from another queue (DEPRECATED)')),
2698 ('n', 'name', '',
2698 ('n', 'name', '',
2699 _('merge queue name (DEPRECATED)'), _('NAME')),
2699 _('merge queue name (DEPRECATED)'), _('NAME')),
2700 ('', 'move', None,
2700 ('', 'move', None,
2701 _('reorder patch series and apply only the patch')),
2701 _('reorder patch series and apply only the patch')),
2702 ('', 'no-backup', None, _('do not save backup copies of files'))],
2702 ('', 'no-backup', None, _('do not save backup copies of files'))],
2703 _('hg qpush [-f] [-l] [-a] [--move] [PATCH | INDEX]'))
2703 _('hg qpush [-f] [-l] [-a] [--move] [PATCH | INDEX]'))
2704 def push(ui, repo, patch=None, **opts):
2704 def push(ui, repo, patch=None, **opts):
2705 """push the next patch onto the stack
2705 """push the next patch onto the stack
2706
2706
2707 By default, abort if the working directory contains uncommitted
2707 By default, abort if the working directory contains uncommitted
2708 changes. With -c/--check, abort only if the uncommitted files
2708 changes. With -c/--check, abort only if the uncommitted files
2709 overlap with patched files. With -f/--force, backup and patch over
2709 overlap with patched files. With -f/--force, backup and patch over
2710 uncommitted changes.
2710 uncommitted changes.
2711
2711
2712 Return 0 on success.
2712 Return 0 on success.
2713 """
2713 """
2714 q = repo.mq
2714 q = repo.mq
2715 mergeq = None
2715 mergeq = None
2716
2716
2717 opts = fixcheckopts(ui, opts)
2717 opts = fixcheckopts(ui, opts)
2718 if opts.get('merge'):
2718 if opts.get('merge'):
2719 if opts.get('name'):
2719 if opts.get('name'):
2720 newpath = repo.join(opts.get('name'))
2720 newpath = repo.join(opts.get('name'))
2721 else:
2721 else:
2722 newpath, i = lastsavename(q.path)
2722 newpath, i = lastsavename(q.path)
2723 if not newpath:
2723 if not newpath:
2724 ui.warn(_("no saved queues found, please use -n\n"))
2724 ui.warn(_("no saved queues found, please use -n\n"))
2725 return 1
2725 return 1
2726 mergeq = queue(ui, repo.path, newpath)
2726 mergeq = queue(ui, repo.path, newpath)
2727 ui.warn(_("merging with queue at: %s\n") % mergeq.path)
2727 ui.warn(_("merging with queue at: %s\n") % mergeq.path)
2728 ret = q.push(repo, patch, force=opts.get('force'), list=opts.get('list'),
2728 ret = q.push(repo, patch, force=opts.get('force'), list=opts.get('list'),
2729 mergeq=mergeq, all=opts.get('all'), move=opts.get('move'),
2729 mergeq=mergeq, all=opts.get('all'), move=opts.get('move'),
2730 exact=opts.get('exact'), nobackup=opts.get('no_backup'),
2730 exact=opts.get('exact'), nobackup=opts.get('no_backup'),
2731 check=opts.get('check'))
2731 check=opts.get('check'))
2732 return ret
2732 return ret
2733
2733
2734 @command("^qpop",
2734 @command("^qpop",
2735 [('a', 'all', None, _('pop all patches')),
2735 [('a', 'all', None, _('pop all patches')),
2736 ('n', 'name', '',
2736 ('n', 'name', '',
2737 _('queue name to pop (DEPRECATED)'), _('NAME')),
2737 _('queue name to pop (DEPRECATED)'), _('NAME')),
2738 ('c', 'check', None, _('tolerate non-conflicting local changes')),
2738 ('c', 'check', None, _('tolerate non-conflicting local changes')),
2739 ('f', 'force', None, _('forget any local changes to patched files')),
2739 ('f', 'force', None, _('forget any local changes to patched files')),
2740 ('', 'no-backup', None, _('do not save backup copies of files'))],
2740 ('', 'no-backup', None, _('do not save backup copies of files'))],
2741 _('hg qpop [-a] [-f] [PATCH | INDEX]'))
2741 _('hg qpop [-a] [-f] [PATCH | INDEX]'))
2742 def pop(ui, repo, patch=None, **opts):
2742 def pop(ui, repo, patch=None, **opts):
2743 """pop the current patch off the stack
2743 """pop the current patch off the stack
2744
2744
2745 Without argument, pops off the top of the patch stack. If given a
2745 Without argument, pops off the top of the patch stack. If given a
2746 patch name, keeps popping off patches until the named patch is at
2746 patch name, keeps popping off patches until the named patch is at
2747 the top of the stack.
2747 the top of the stack.
2748
2748
2749 By default, abort if the working directory contains uncommitted
2749 By default, abort if the working directory contains uncommitted
2750 changes. With -c/--check, abort only if the uncommitted files
2750 changes. With -c/--check, abort only if the uncommitted files
2751 overlap with patched files. With -f/--force, backup and discard
2751 overlap with patched files. With -f/--force, backup and discard
2752 changes made to such files.
2752 changes made to such files.
2753
2753
2754 Return 0 on success.
2754 Return 0 on success.
2755 """
2755 """
2756 opts = fixcheckopts(ui, opts)
2756 opts = fixcheckopts(ui, opts)
2757 localupdate = True
2757 localupdate = True
2758 if opts.get('name'):
2758 if opts.get('name'):
2759 q = queue(ui, repo.path, repo.join(opts.get('name')))
2759 q = queue(ui, repo.path, repo.join(opts.get('name')))
2760 ui.warn(_('using patch queue: %s\n') % q.path)
2760 ui.warn(_('using patch queue: %s\n') % q.path)
2761 localupdate = False
2761 localupdate = False
2762 else:
2762 else:
2763 q = repo.mq
2763 q = repo.mq
2764 ret = q.pop(repo, patch, force=opts.get('force'), update=localupdate,
2764 ret = q.pop(repo, patch, force=opts.get('force'), update=localupdate,
2765 all=opts.get('all'), nobackup=opts.get('no_backup'),
2765 all=opts.get('all'), nobackup=opts.get('no_backup'),
2766 check=opts.get('check'))
2766 check=opts.get('check'))
2767 q.savedirty()
2767 q.savedirty()
2768 return ret
2768 return ret
2769
2769
2770 @command("qrename|qmv", [], _('hg qrename PATCH1 [PATCH2]'))
2770 @command("qrename|qmv", [], _('hg qrename PATCH1 [PATCH2]'))
2771 def rename(ui, repo, patch, name=None, **opts):
2771 def rename(ui, repo, patch, name=None, **opts):
2772 """rename a patch
2772 """rename a patch
2773
2773
2774 With one argument, renames the current patch to PATCH1.
2774 With one argument, renames the current patch to PATCH1.
2775 With two arguments, renames PATCH1 to PATCH2.
2775 With two arguments, renames PATCH1 to PATCH2.
2776
2776
2777 Returns 0 on success."""
2777 Returns 0 on success."""
2778 q = repo.mq
2778 q = repo.mq
2779 if not name:
2779 if not name:
2780 name = patch
2780 name = patch
2781 patch = None
2781 patch = None
2782
2782
2783 if patch:
2783 if patch:
2784 patch = q.lookup(patch)
2784 patch = q.lookup(patch)
2785 else:
2785 else:
2786 if not q.applied:
2786 if not q.applied:
2787 ui.write(_('no patches applied\n'))
2787 ui.write(_('no patches applied\n'))
2788 return
2788 return
2789 patch = q.lookup('qtip')
2789 patch = q.lookup('qtip')
2790 absdest = q.join(name)
2790 absdest = q.join(name)
2791 if os.path.isdir(absdest):
2791 if os.path.isdir(absdest):
2792 name = normname(os.path.join(name, os.path.basename(patch)))
2792 name = normname(os.path.join(name, os.path.basename(patch)))
2793 absdest = q.join(name)
2793 absdest = q.join(name)
2794 q.checkpatchname(name)
2794 q.checkpatchname(name)
2795
2795
2796 ui.note(_('renaming %s to %s\n') % (patch, name))
2796 ui.note(_('renaming %s to %s\n') % (patch, name))
2797 i = q.findseries(patch)
2797 i = q.findseries(patch)
2798 guards = q.guard_re.findall(q.fullseries[i])
2798 guards = q.guard_re.findall(q.fullseries[i])
2799 q.fullseries[i] = name + ''.join([' #' + g for g in guards])
2799 q.fullseries[i] = name + ''.join([' #' + g for g in guards])
2800 q.parseseries()
2800 q.parseseries()
2801 q.seriesdirty = True
2801 q.seriesdirty = True
2802
2802
2803 info = q.isapplied(patch)
2803 info = q.isapplied(patch)
2804 if info:
2804 if info:
2805 q.applied[info[0]] = statusentry(info[1], name)
2805 q.applied[info[0]] = statusentry(info[1], name)
2806 q.applieddirty = True
2806 q.applieddirty = True
2807
2807
2808 destdir = os.path.dirname(absdest)
2808 destdir = os.path.dirname(absdest)
2809 if not os.path.isdir(destdir):
2809 if not os.path.isdir(destdir):
2810 os.makedirs(destdir)
2810 os.makedirs(destdir)
2811 util.rename(q.join(patch), absdest)
2811 util.rename(q.join(patch), absdest)
2812 r = q.qrepo()
2812 r = q.qrepo()
2813 if r and patch in r.dirstate:
2813 if r and patch in r.dirstate:
2814 wctx = r[None]
2814 wctx = r[None]
2815 wlock = r.wlock()
2815 wlock = r.wlock()
2816 try:
2816 try:
2817 if r.dirstate[patch] == 'a':
2817 if r.dirstate[patch] == 'a':
2818 r.dirstate.drop(patch)
2818 r.dirstate.drop(patch)
2819 r.dirstate.add(name)
2819 r.dirstate.add(name)
2820 else:
2820 else:
2821 wctx.copy(patch, name)
2821 wctx.copy(patch, name)
2822 wctx.forget([patch])
2822 wctx.forget([patch])
2823 finally:
2823 finally:
2824 wlock.release()
2824 wlock.release()
2825
2825
2826 q.savedirty()
2826 q.savedirty()
2827
2827
2828 @command("qrestore",
2828 @command("qrestore",
2829 [('d', 'delete', None, _('delete save entry')),
2829 [('d', 'delete', None, _('delete save entry')),
2830 ('u', 'update', None, _('update queue working directory'))],
2830 ('u', 'update', None, _('update queue working directory'))],
2831 _('hg qrestore [-d] [-u] REV'))
2831 _('hg qrestore [-d] [-u] REV'))
2832 def restore(ui, repo, rev, **opts):
2832 def restore(ui, repo, rev, **opts):
2833 """restore the queue state saved by a revision (DEPRECATED)
2833 """restore the queue state saved by a revision (DEPRECATED)
2834
2834
2835 This command is deprecated, use :hg:`rebase` instead."""
2835 This command is deprecated, use :hg:`rebase` instead."""
2836 rev = repo.lookup(rev)
2836 rev = repo.lookup(rev)
2837 q = repo.mq
2837 q = repo.mq
2838 q.restore(repo, rev, delete=opts.get('delete'),
2838 q.restore(repo, rev, delete=opts.get('delete'),
2839 qupdate=opts.get('update'))
2839 qupdate=opts.get('update'))
2840 q.savedirty()
2840 q.savedirty()
2841 return 0
2841 return 0
2842
2842
2843 @command("qsave",
2843 @command("qsave",
2844 [('c', 'copy', None, _('copy patch directory')),
2844 [('c', 'copy', None, _('copy patch directory')),
2845 ('n', 'name', '',
2845 ('n', 'name', '',
2846 _('copy directory name'), _('NAME')),
2846 _('copy directory name'), _('NAME')),
2847 ('e', 'empty', None, _('clear queue status file')),
2847 ('e', 'empty', None, _('clear queue status file')),
2848 ('f', 'force', None, _('force copy'))] + commands.commitopts,
2848 ('f', 'force', None, _('force copy'))] + commands.commitopts,
2849 _('hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]'))
2849 _('hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]'))
2850 def save(ui, repo, **opts):
2850 def save(ui, repo, **opts):
2851 """save current queue state (DEPRECATED)
2851 """save current queue state (DEPRECATED)
2852
2852
2853 This command is deprecated, use :hg:`rebase` instead."""
2853 This command is deprecated, use :hg:`rebase` instead."""
2854 q = repo.mq
2854 q = repo.mq
2855 message = cmdutil.logmessage(ui, opts)
2855 message = cmdutil.logmessage(ui, opts)
2856 ret = q.save(repo, msg=message)
2856 ret = q.save(repo, msg=message)
2857 if ret:
2857 if ret:
2858 return ret
2858 return ret
2859 q.savedirty() # save to .hg/patches before copying
2859 q.savedirty() # save to .hg/patches before copying
2860 if opts.get('copy'):
2860 if opts.get('copy'):
2861 path = q.path
2861 path = q.path
2862 if opts.get('name'):
2862 if opts.get('name'):
2863 newpath = os.path.join(q.basepath, opts.get('name'))
2863 newpath = os.path.join(q.basepath, opts.get('name'))
2864 if os.path.exists(newpath):
2864 if os.path.exists(newpath):
2865 if not os.path.isdir(newpath):
2865 if not os.path.isdir(newpath):
2866 raise util.Abort(_('destination %s exists and is not '
2866 raise util.Abort(_('destination %s exists and is not '
2867 'a directory') % newpath)
2867 'a directory') % newpath)
2868 if not opts.get('force'):
2868 if not opts.get('force'):
2869 raise util.Abort(_('destination %s exists, '
2869 raise util.Abort(_('destination %s exists, '
2870 'use -f to force') % newpath)
2870 'use -f to force') % newpath)
2871 else:
2871 else:
2872 newpath = savename(path)
2872 newpath = savename(path)
2873 ui.warn(_("copy %s to %s\n") % (path, newpath))
2873 ui.warn(_("copy %s to %s\n") % (path, newpath))
2874 util.copyfiles(path, newpath)
2874 util.copyfiles(path, newpath)
2875 if opts.get('empty'):
2875 if opts.get('empty'):
2876 del q.applied[:]
2876 del q.applied[:]
2877 q.applieddirty = True
2877 q.applieddirty = True
2878 q.savedirty()
2878 q.savedirty()
2879 return 0
2879 return 0
2880
2880
2881 @command("strip",
2881 @command("strip",
2882 [
2882 [
2883 ('r', 'rev', [], _('strip specified revision (optional, '
2883 ('r', 'rev', [], _('strip specified revision (optional, '
2884 'can specify revisions without this '
2884 'can specify revisions without this '
2885 'option)'), _('REV')),
2885 'option)'), _('REV')),
2886 ('f', 'force', None, _('force removal of changesets, discard '
2886 ('f', 'force', None, _('force removal of changesets, discard '
2887 'uncommitted changes (no backup)')),
2887 'uncommitted changes (no backup)')),
2888 ('b', 'backup', None, _('bundle only changesets with local revision'
2888 ('b', 'backup', None, _('bundle only changesets with local revision'
2889 ' number greater than REV which are not'
2889 ' number greater than REV which are not'
2890 ' descendants of REV (DEPRECATED)')),
2890 ' descendants of REV (DEPRECATED)')),
2891 ('', 'no-backup', None, _('no backups')),
2891 ('', 'no-backup', None, _('no backups')),
2892 ('', 'nobackup', None, _('no backups (DEPRECATED)')),
2892 ('', 'nobackup', None, _('no backups (DEPRECATED)')),
2893 ('n', '', None, _('ignored (DEPRECATED)')),
2893 ('n', '', None, _('ignored (DEPRECATED)')),
2894 ('k', 'keep', None, _("do not modify working copy during strip"))],
2894 ('k', 'keep', None, _("do not modify working copy during strip"))],
2895 _('hg strip [-k] [-f] [-n] REV...'))
2895 _('hg strip [-k] [-f] [-n] REV...'))
2896 def strip(ui, repo, *revs, **opts):
2896 def strip(ui, repo, *revs, **opts):
2897 """strip changesets and all their descendants from the repository
2897 """strip changesets and all their descendants from the repository
2898
2898
2899 The strip command removes the specified changesets and all their
2899 The strip command removes the specified changesets and all their
2900 descendants. If the working directory has uncommitted changes, the
2900 descendants. If the working directory has uncommitted changes, the
2901 operation is aborted unless the --force flag is supplied, in which
2901 operation is aborted unless the --force flag is supplied, in which
2902 case changes will be discarded.
2902 case changes will be discarded.
2903
2903
2904 If a parent of the working directory is stripped, then the working
2904 If a parent of the working directory is stripped, then the working
2905 directory will automatically be updated to the most recent
2905 directory will automatically be updated to the most recent
2906 available ancestor of the stripped parent after the operation
2906 available ancestor of the stripped parent after the operation
2907 completes.
2907 completes.
2908
2908
2909 Any stripped changesets are stored in ``.hg/strip-backup`` as a
2909 Any stripped changesets are stored in ``.hg/strip-backup`` as a
2910 bundle (see :hg:`help bundle` and :hg:`help unbundle`). They can
2910 bundle (see :hg:`help bundle` and :hg:`help unbundle`). They can
2911 be restored by running :hg:`unbundle .hg/strip-backup/BUNDLE`,
2911 be restored by running :hg:`unbundle .hg/strip-backup/BUNDLE`,
2912 where BUNDLE is the bundle file created by the strip. Note that
2912 where BUNDLE is the bundle file created by the strip. Note that
2913 the local revision numbers will in general be different after the
2913 the local revision numbers will in general be different after the
2914 restore.
2914 restore.
2915
2915
2916 Use the --no-backup option to discard the backup bundle once the
2916 Use the --no-backup option to discard the backup bundle once the
2917 operation completes.
2917 operation completes.
2918
2918
2919 Return 0 on success.
2919 Return 0 on success.
2920 """
2920 """
2921 backup = 'all'
2921 backup = 'all'
2922 if opts.get('backup'):
2922 if opts.get('backup'):
2923 backup = 'strip'
2923 backup = 'strip'
2924 elif opts.get('no_backup') or opts.get('nobackup'):
2924 elif opts.get('no_backup') or opts.get('nobackup'):
2925 backup = 'none'
2925 backup = 'none'
2926
2926
2927 cl = repo.changelog
2927 cl = repo.changelog
2928 revs = list(revs) + opts.get('rev')
2928 revs = list(revs) + opts.get('rev')
2929 revs = set(scmutil.revrange(repo, revs))
2929 revs = set(scmutil.revrange(repo, revs))
2930 if not revs:
2930 if not revs:
2931 raise util.Abort(_('empty revision set'))
2931 raise util.Abort(_('empty revision set'))
2932
2932
2933 descendants = set(cl.descendants(*revs))
2933 descendants = set(cl.descendants(*revs))
2934 strippedrevs = revs.union(descendants)
2934 strippedrevs = revs.union(descendants)
2935 roots = revs.difference(descendants)
2935 roots = revs.difference(descendants)
2936
2936
2937 update = False
2937 update = False
2938 # if one of the wdir parent is stripped we'll need
2938 # if one of the wdir parent is stripped we'll need
2939 # to update away to an earlier revision
2939 # to update away to an earlier revision
2940 for p in repo.dirstate.parents():
2940 for p in repo.dirstate.parents():
2941 if p != nullid and cl.rev(p) in strippedrevs:
2941 if p != nullid and cl.rev(p) in strippedrevs:
2942 update = True
2942 update = True
2943 break
2943 break
2944
2944
2945 rootnodes = set(cl.node(r) for r in roots)
2945 rootnodes = set(cl.node(r) for r in roots)
2946
2946
2947 q = repo.mq
2947 q = repo.mq
2948 if q.applied:
2948 if q.applied:
2949 # refresh queue state if we're about to strip
2949 # refresh queue state if we're about to strip
2950 # applied patches
2950 # applied patches
2951 if cl.rev(repo.lookup('qtip')) in strippedrevs:
2951 if cl.rev(repo.lookup('qtip')) in strippedrevs:
2952 q.applieddirty = True
2952 q.applieddirty = True
2953 start = 0
2953 start = 0
2954 end = len(q.applied)
2954 end = len(q.applied)
2955 for i, statusentry in enumerate(q.applied):
2955 for i, statusentry in enumerate(q.applied):
2956 if statusentry.node in rootnodes:
2956 if statusentry.node in rootnodes:
2957 # if one of the stripped roots is an applied
2957 # if one of the stripped roots is an applied
2958 # patch, only part of the queue is stripped
2958 # patch, only part of the queue is stripped
2959 start = i
2959 start = i
2960 break
2960 break
2961 del q.applied[start:end]
2961 del q.applied[start:end]
2962 q.savedirty()
2962 q.savedirty()
2963
2963
2964 revs = list(rootnodes)
2964 revs = list(rootnodes)
2965 if update and opts.get('keep'):
2965 if update and opts.get('keep'):
2966 wlock = repo.wlock()
2966 wlock = repo.wlock()
2967 try:
2967 try:
2968 urev = repo.mq.qparents(repo, revs[0])
2968 urev = repo.mq.qparents(repo, revs[0])
2969 repo.dirstate.rebuild(urev, repo[urev].manifest())
2969 repo.dirstate.rebuild(urev, repo[urev].manifest())
2970 repo.dirstate.write()
2970 repo.dirstate.write()
2971 update = False
2971 update = False
2972 finally:
2972 finally:
2973 wlock.release()
2973 wlock.release()
2974
2974
2975 repo.mq.strip(repo, revs, backup=backup, update=update,
2975 repo.mq.strip(repo, revs, backup=backup, update=update,
2976 force=opts.get('force'))
2976 force=opts.get('force'))
2977 return 0
2977 return 0
2978
2978
2979 @command("qselect",
2979 @command("qselect",
2980 [('n', 'none', None, _('disable all guards')),
2980 [('n', 'none', None, _('disable all guards')),
2981 ('s', 'series', None, _('list all guards in series file')),
2981 ('s', 'series', None, _('list all guards in series file')),
2982 ('', 'pop', None, _('pop to before first guarded applied patch')),
2982 ('', 'pop', None, _('pop to before first guarded applied patch')),
2983 ('', 'reapply', None, _('pop, then reapply patches'))],
2983 ('', 'reapply', None, _('pop, then reapply patches'))],
2984 _('hg qselect [OPTION]... [GUARD]...'))
2984 _('hg qselect [OPTION]... [GUARD]...'))
2985 def select(ui, repo, *args, **opts):
2985 def select(ui, repo, *args, **opts):
2986 '''set or print guarded patches to push
2986 '''set or print guarded patches to push
2987
2987
2988 Use the :hg:`qguard` command to set or print guards on patch, then use
2988 Use the :hg:`qguard` command to set or print guards on patch, then use
2989 qselect to tell mq which guards to use. A patch will be pushed if
2989 qselect to tell mq which guards to use. A patch will be pushed if
2990 it has no guards or any positive guards match the currently
2990 it has no guards or any positive guards match the currently
2991 selected guard, but will not be pushed if any negative guards
2991 selected guard, but will not be pushed if any negative guards
2992 match the current guard. For example::
2992 match the current guard. For example::
2993
2993
2994 qguard foo.patch -- -stable (negative guard)
2994 qguard foo.patch -- -stable (negative guard)
2995 qguard bar.patch +stable (positive guard)
2995 qguard bar.patch +stable (positive guard)
2996 qselect stable
2996 qselect stable
2997
2997
2998 This activates the "stable" guard. mq will skip foo.patch (because
2998 This activates the "stable" guard. mq will skip foo.patch (because
2999 it has a negative match) but push bar.patch (because it has a
2999 it has a negative match) but push bar.patch (because it has a
3000 positive match).
3000 positive match).
3001
3001
3002 With no arguments, prints the currently active guards.
3002 With no arguments, prints the currently active guards.
3003 With one argument, sets the active guard.
3003 With one argument, sets the active guard.
3004
3004
3005 Use -n/--none to deactivate guards (no other arguments needed).
3005 Use -n/--none to deactivate guards (no other arguments needed).
3006 When no guards are active, patches with positive guards are
3006 When no guards are active, patches with positive guards are
3007 skipped and patches with negative guards are pushed.
3007 skipped and patches with negative guards are pushed.
3008
3008
3009 qselect can change the guards on applied patches. It does not pop
3009 qselect can change the guards on applied patches. It does not pop
3010 guarded patches by default. Use --pop to pop back to the last
3010 guarded patches by default. Use --pop to pop back to the last
3011 applied patch that is not guarded. Use --reapply (which implies
3011 applied patch that is not guarded. Use --reapply (which implies
3012 --pop) to push back to the current patch afterwards, but skip
3012 --pop) to push back to the current patch afterwards, but skip
3013 guarded patches.
3013 guarded patches.
3014
3014
3015 Use -s/--series to print a list of all guards in the series file
3015 Use -s/--series to print a list of all guards in the series file
3016 (no other arguments needed). Use -v for more information.
3016 (no other arguments needed). Use -v for more information.
3017
3017
3018 Returns 0 on success.'''
3018 Returns 0 on success.'''
3019
3019
3020 q = repo.mq
3020 q = repo.mq
3021 guards = q.active()
3021 guards = q.active()
3022 if args or opts.get('none'):
3022 if args or opts.get('none'):
3023 old_unapplied = q.unapplied(repo)
3023 old_unapplied = q.unapplied(repo)
3024 old_guarded = [i for i in xrange(len(q.applied)) if
3024 old_guarded = [i for i in xrange(len(q.applied)) if
3025 not q.pushable(i)[0]]
3025 not q.pushable(i)[0]]
3026 q.setactive(args)
3026 q.setactive(args)
3027 q.savedirty()
3027 q.savedirty()
3028 if not args:
3028 if not args:
3029 ui.status(_('guards deactivated\n'))
3029 ui.status(_('guards deactivated\n'))
3030 if not opts.get('pop') and not opts.get('reapply'):
3030 if not opts.get('pop') and not opts.get('reapply'):
3031 unapplied = q.unapplied(repo)
3031 unapplied = q.unapplied(repo)
3032 guarded = [i for i in xrange(len(q.applied))
3032 guarded = [i for i in xrange(len(q.applied))
3033 if not q.pushable(i)[0]]
3033 if not q.pushable(i)[0]]
3034 if len(unapplied) != len(old_unapplied):
3034 if len(unapplied) != len(old_unapplied):
3035 ui.status(_('number of unguarded, unapplied patches has '
3035 ui.status(_('number of unguarded, unapplied patches has '
3036 'changed from %d to %d\n') %
3036 'changed from %d to %d\n') %
3037 (len(old_unapplied), len(unapplied)))
3037 (len(old_unapplied), len(unapplied)))
3038 if len(guarded) != len(old_guarded):
3038 if len(guarded) != len(old_guarded):
3039 ui.status(_('number of guarded, applied patches has changed '
3039 ui.status(_('number of guarded, applied patches has changed '
3040 'from %d to %d\n') %
3040 'from %d to %d\n') %
3041 (len(old_guarded), len(guarded)))
3041 (len(old_guarded), len(guarded)))
3042 elif opts.get('series'):
3042 elif opts.get('series'):
3043 guards = {}
3043 guards = {}
3044 noguards = 0
3044 noguards = 0
3045 for gs in q.seriesguards:
3045 for gs in q.seriesguards:
3046 if not gs:
3046 if not gs:
3047 noguards += 1
3047 noguards += 1
3048 for g in gs:
3048 for g in gs:
3049 guards.setdefault(g, 0)
3049 guards.setdefault(g, 0)
3050 guards[g] += 1
3050 guards[g] += 1
3051 if ui.verbose:
3051 if ui.verbose:
3052 guards['NONE'] = noguards
3052 guards['NONE'] = noguards
3053 guards = guards.items()
3053 guards = guards.items()
3054 guards.sort(key=lambda x: x[0][1:])
3054 guards.sort(key=lambda x: x[0][1:])
3055 if guards:
3055 if guards:
3056 ui.note(_('guards in series file:\n'))
3056 ui.note(_('guards in series file:\n'))
3057 for guard, count in guards:
3057 for guard, count in guards:
3058 ui.note('%2d ' % count)
3058 ui.note('%2d ' % count)
3059 ui.write(guard, '\n')
3059 ui.write(guard, '\n')
3060 else:
3060 else:
3061 ui.note(_('no guards in series file\n'))
3061 ui.note(_('no guards in series file\n'))
3062 else:
3062 else:
3063 if guards:
3063 if guards:
3064 ui.note(_('active guards:\n'))
3064 ui.note(_('active guards:\n'))
3065 for g in guards:
3065 for g in guards:
3066 ui.write(g, '\n')
3066 ui.write(g, '\n')
3067 else:
3067 else:
3068 ui.write(_('no active guards\n'))
3068 ui.write(_('no active guards\n'))
3069 reapply = opts.get('reapply') and q.applied and q.appliedname(-1)
3069 reapply = opts.get('reapply') and q.applied and q.appliedname(-1)
3070 popped = False
3070 popped = False
3071 if opts.get('pop') or opts.get('reapply'):
3071 if opts.get('pop') or opts.get('reapply'):
3072 for i in xrange(len(q.applied)):
3072 for i in xrange(len(q.applied)):
3073 pushable, reason = q.pushable(i)
3073 pushable, reason = q.pushable(i)
3074 if not pushable:
3074 if not pushable:
3075 ui.status(_('popping guarded patches\n'))
3075 ui.status(_('popping guarded patches\n'))
3076 popped = True
3076 popped = True
3077 if i == 0:
3077 if i == 0:
3078 q.pop(repo, all=True)
3078 q.pop(repo, all=True)
3079 else:
3079 else:
3080 q.pop(repo, str(i - 1))
3080 q.pop(repo, str(i - 1))
3081 break
3081 break
3082 if popped:
3082 if popped:
3083 try:
3083 try:
3084 if reapply:
3084 if reapply:
3085 ui.status(_('reapplying unguarded patches\n'))
3085 ui.status(_('reapplying unguarded patches\n'))
3086 q.push(repo, reapply)
3086 q.push(repo, reapply)
3087 finally:
3087 finally:
3088 q.savedirty()
3088 q.savedirty()
3089
3089
3090 @command("qfinish",
3090 @command("qfinish",
3091 [('a', 'applied', None, _('finish all applied changesets'))],
3091 [('a', 'applied', None, _('finish all applied changesets'))],
3092 _('hg qfinish [-a] [REV]...'))
3092 _('hg qfinish [-a] [REV]...'))
3093 def finish(ui, repo, *revrange, **opts):
3093 def finish(ui, repo, *revrange, **opts):
3094 """move applied patches into repository history
3094 """move applied patches into repository history
3095
3095
3096 Finishes the specified revisions (corresponding to applied
3096 Finishes the specified revisions (corresponding to applied
3097 patches) by moving them out of mq control into regular repository
3097 patches) by moving them out of mq control into regular repository
3098 history.
3098 history.
3099
3099
3100 Accepts a revision range or the -a/--applied option. If --applied
3100 Accepts a revision range or the -a/--applied option. If --applied
3101 is specified, all applied mq revisions are removed from mq
3101 is specified, all applied mq revisions are removed from mq
3102 control. Otherwise, the given revisions must be at the base of the
3102 control. Otherwise, the given revisions must be at the base of the
3103 stack of applied patches.
3103 stack of applied patches.
3104
3104
3105 This can be especially useful if your changes have been applied to
3105 This can be especially useful if your changes have been applied to
3106 an upstream repository, or if you are about to push your changes
3106 an upstream repository, or if you are about to push your changes
3107 to upstream.
3107 to upstream.
3108
3108
3109 Returns 0 on success.
3109 Returns 0 on success.
3110 """
3110 """
3111 if not opts.get('applied') and not revrange:
3111 if not opts.get('applied') and not revrange:
3112 raise util.Abort(_('no revisions specified'))
3112 raise util.Abort(_('no revisions specified'))
3113 elif opts.get('applied'):
3113 elif opts.get('applied'):
3114 revrange = ('qbase::qtip',) + revrange
3114 revrange = ('qbase::qtip',) + revrange
3115
3115
3116 q = repo.mq
3116 q = repo.mq
3117 if not q.applied:
3117 if not q.applied:
3118 ui.status(_('no patches applied\n'))
3118 ui.status(_('no patches applied\n'))
3119 return 0
3119 return 0
3120
3120
3121 revs = scmutil.revrange(repo, revrange)
3121 revs = scmutil.revrange(repo, revrange)
3122 if repo['.'].rev() in revs and repo[None].files():
3122 if repo['.'].rev() in revs and repo[None].files():
3123 ui.warn(_('warning: uncommitted changes in the working directory\n'))
3123 ui.warn(_('warning: uncommitted changes in the working directory\n'))
3124 # queue.finish may changes phases but leave the responsability to lock the
3124 # queue.finish may changes phases but leave the responsability to lock the
3125 # repo to the caller to avoid deadlock with wlock. This command code is
3125 # repo to the caller to avoid deadlock with wlock. This command code is
3126 # responsability for this locking.
3126 # responsability for this locking.
3127 lock = repo.lock()
3127 lock = repo.lock()
3128 try:
3128 try:
3129 q.finish(repo, revs)
3129 q.finish(repo, revs)
3130 q.savedirty()
3130 q.savedirty()
3131 finally:
3131 finally:
3132 lock.release()
3132 lock.release()
3133 return 0
3133 return 0
3134
3134
3135 @command("qqueue",
3135 @command("qqueue",
3136 [('l', 'list', False, _('list all available queues')),
3136 [('l', 'list', False, _('list all available queues')),
3137 ('', 'active', False, _('print name of active queue')),
3137 ('', 'active', False, _('print name of active queue')),
3138 ('c', 'create', False, _('create new queue')),
3138 ('c', 'create', False, _('create new queue')),
3139 ('', 'rename', False, _('rename active queue')),
3139 ('', 'rename', False, _('rename active queue')),
3140 ('', 'delete', False, _('delete reference to queue')),
3140 ('', 'delete', False, _('delete reference to queue')),
3141 ('', 'purge', False, _('delete queue, and remove patch dir')),
3141 ('', 'purge', False, _('delete queue, and remove patch dir')),
3142 ],
3142 ],
3143 _('[OPTION] [QUEUE]'))
3143 _('[OPTION] [QUEUE]'))
3144 def qqueue(ui, repo, name=None, **opts):
3144 def qqueue(ui, repo, name=None, **opts):
3145 '''manage multiple patch queues
3145 '''manage multiple patch queues
3146
3146
3147 Supports switching between different patch queues, as well as creating
3147 Supports switching between different patch queues, as well as creating
3148 new patch queues and deleting existing ones.
3148 new patch queues and deleting existing ones.
3149
3149
3150 Omitting a queue name or specifying -l/--list will show you the registered
3150 Omitting a queue name or specifying -l/--list will show you the registered
3151 queues - by default the "normal" patches queue is registered. The currently
3151 queues - by default the "normal" patches queue is registered. The currently
3152 active queue will be marked with "(active)". Specifying --active will print
3152 active queue will be marked with "(active)". Specifying --active will print
3153 only the name of the active queue.
3153 only the name of the active queue.
3154
3154
3155 To create a new queue, use -c/--create. The queue is automatically made
3155 To create a new queue, use -c/--create. The queue is automatically made
3156 active, except in the case where there are applied patches from the
3156 active, except in the case where there are applied patches from the
3157 currently active queue in the repository. Then the queue will only be
3157 currently active queue in the repository. Then the queue will only be
3158 created and switching will fail.
3158 created and switching will fail.
3159
3159
3160 To delete an existing queue, use --delete. You cannot delete the currently
3160 To delete an existing queue, use --delete. You cannot delete the currently
3161 active queue.
3161 active queue.
3162
3162
3163 Returns 0 on success.
3163 Returns 0 on success.
3164 '''
3164 '''
3165 q = repo.mq
3165 q = repo.mq
3166 _defaultqueue = 'patches'
3166 _defaultqueue = 'patches'
3167 _allqueues = 'patches.queues'
3167 _allqueues = 'patches.queues'
3168 _activequeue = 'patches.queue'
3168 _activequeue = 'patches.queue'
3169
3169
3170 def _getcurrent():
3170 def _getcurrent():
3171 cur = os.path.basename(q.path)
3171 cur = os.path.basename(q.path)
3172 if cur.startswith('patches-'):
3172 if cur.startswith('patches-'):
3173 cur = cur[8:]
3173 cur = cur[8:]
3174 return cur
3174 return cur
3175
3175
3176 def _noqueues():
3176 def _noqueues():
3177 try:
3177 try:
3178 fh = repo.opener(_allqueues, 'r')
3178 fh = repo.opener(_allqueues, 'r')
3179 fh.close()
3179 fh.close()
3180 except IOError:
3180 except IOError:
3181 return True
3181 return True
3182
3182
3183 return False
3183 return False
3184
3184
3185 def _getqueues():
3185 def _getqueues():
3186 current = _getcurrent()
3186 current = _getcurrent()
3187
3187
3188 try:
3188 try:
3189 fh = repo.opener(_allqueues, 'r')
3189 fh = repo.opener(_allqueues, 'r')
3190 queues = [queue.strip() for queue in fh if queue.strip()]
3190 queues = [queue.strip() for queue in fh if queue.strip()]
3191 fh.close()
3191 fh.close()
3192 if current not in queues:
3192 if current not in queues:
3193 queues.append(current)
3193 queues.append(current)
3194 except IOError:
3194 except IOError:
3195 queues = [_defaultqueue]
3195 queues = [_defaultqueue]
3196
3196
3197 return sorted(queues)
3197 return sorted(queues)
3198
3198
3199 def _setactive(name):
3199 def _setactive(name):
3200 if q.applied:
3200 if q.applied:
3201 raise util.Abort(_('patches applied - cannot set new queue active'))
3201 raise util.Abort(_('patches applied - cannot set new queue active'))
3202 _setactivenocheck(name)
3202 _setactivenocheck(name)
3203
3203
3204 def _setactivenocheck(name):
3204 def _setactivenocheck(name):
3205 fh = repo.opener(_activequeue, 'w')
3205 fh = repo.opener(_activequeue, 'w')
3206 if name != 'patches':
3206 if name != 'patches':
3207 fh.write(name)
3207 fh.write(name)
3208 fh.close()
3208 fh.close()
3209
3209
3210 def _addqueue(name):
3210 def _addqueue(name):
3211 fh = repo.opener(_allqueues, 'a')
3211 fh = repo.opener(_allqueues, 'a')
3212 fh.write('%s\n' % (name,))
3212 fh.write('%s\n' % (name,))
3213 fh.close()
3213 fh.close()
3214
3214
3215 def _queuedir(name):
3215 def _queuedir(name):
3216 if name == 'patches':
3216 if name == 'patches':
3217 return repo.join('patches')
3217 return repo.join('patches')
3218 else:
3218 else:
3219 return repo.join('patches-' + name)
3219 return repo.join('patches-' + name)
3220
3220
3221 def _validname(name):
3221 def _validname(name):
3222 for n in name:
3222 for n in name:
3223 if n in ':\\/.':
3223 if n in ':\\/.':
3224 return False
3224 return False
3225 return True
3225 return True
3226
3226
3227 def _delete(name):
3227 def _delete(name):
3228 if name not in existing:
3228 if name not in existing:
3229 raise util.Abort(_('cannot delete queue that does not exist'))
3229 raise util.Abort(_('cannot delete queue that does not exist'))
3230
3230
3231 current = _getcurrent()
3231 current = _getcurrent()
3232
3232
3233 if name == current:
3233 if name == current:
3234 raise util.Abort(_('cannot delete currently active queue'))
3234 raise util.Abort(_('cannot delete currently active queue'))
3235
3235
3236 fh = repo.opener('patches.queues.new', 'w')
3236 fh = repo.opener('patches.queues.new', 'w')
3237 for queue in existing:
3237 for queue in existing:
3238 if queue == name:
3238 if queue == name:
3239 continue
3239 continue
3240 fh.write('%s\n' % (queue,))
3240 fh.write('%s\n' % (queue,))
3241 fh.close()
3241 fh.close()
3242 util.rename(repo.join('patches.queues.new'), repo.join(_allqueues))
3242 util.rename(repo.join('patches.queues.new'), repo.join(_allqueues))
3243
3243
3244 if not name or opts.get('list') or opts.get('active'):
3244 if not name or opts.get('list') or opts.get('active'):
3245 current = _getcurrent()
3245 current = _getcurrent()
3246 if opts.get('active'):
3246 if opts.get('active'):
3247 ui.write('%s\n' % (current,))
3247 ui.write('%s\n' % (current,))
3248 return
3248 return
3249 for queue in _getqueues():
3249 for queue in _getqueues():
3250 ui.write('%s' % (queue,))
3250 ui.write('%s' % (queue,))
3251 if queue == current and not ui.quiet:
3251 if queue == current and not ui.quiet:
3252 ui.write(_(' (active)\n'))
3252 ui.write(_(' (active)\n'))
3253 else:
3253 else:
3254 ui.write('\n')
3254 ui.write('\n')
3255 return
3255 return
3256
3256
3257 if not _validname(name):
3257 if not _validname(name):
3258 raise util.Abort(
3258 raise util.Abort(
3259 _('invalid queue name, may not contain the characters ":\\/."'))
3259 _('invalid queue name, may not contain the characters ":\\/."'))
3260
3260
3261 existing = _getqueues()
3261 existing = _getqueues()
3262
3262
3263 if opts.get('create'):
3263 if opts.get('create'):
3264 if name in existing:
3264 if name in existing:
3265 raise util.Abort(_('queue "%s" already exists') % name)
3265 raise util.Abort(_('queue "%s" already exists') % name)
3266 if _noqueues():
3266 if _noqueues():
3267 _addqueue(_defaultqueue)
3267 _addqueue(_defaultqueue)
3268 _addqueue(name)
3268 _addqueue(name)
3269 _setactive(name)
3269 _setactive(name)
3270 elif opts.get('rename'):
3270 elif opts.get('rename'):
3271 current = _getcurrent()
3271 current = _getcurrent()
3272 if name == current:
3272 if name == current:
3273 raise util.Abort(_('can\'t rename "%s" to its current name') % name)
3273 raise util.Abort(_('can\'t rename "%s" to its current name') % name)
3274 if name in existing:
3274 if name in existing:
3275 raise util.Abort(_('queue "%s" already exists') % name)
3275 raise util.Abort(_('queue "%s" already exists') % name)
3276
3276
3277 olddir = _queuedir(current)
3277 olddir = _queuedir(current)
3278 newdir = _queuedir(name)
3278 newdir = _queuedir(name)
3279
3279
3280 if os.path.exists(newdir):
3280 if os.path.exists(newdir):
3281 raise util.Abort(_('non-queue directory "%s" already exists') %
3281 raise util.Abort(_('non-queue directory "%s" already exists') %
3282 newdir)
3282 newdir)
3283
3283
3284 fh = repo.opener('patches.queues.new', 'w')
3284 fh = repo.opener('patches.queues.new', 'w')
3285 for queue in existing:
3285 for queue in existing:
3286 if queue == current:
3286 if queue == current:
3287 fh.write('%s\n' % (name,))
3287 fh.write('%s\n' % (name,))
3288 if os.path.exists(olddir):
3288 if os.path.exists(olddir):
3289 util.rename(olddir, newdir)
3289 util.rename(olddir, newdir)
3290 else:
3290 else:
3291 fh.write('%s\n' % (queue,))
3291 fh.write('%s\n' % (queue,))
3292 fh.close()
3292 fh.close()
3293 util.rename(repo.join('patches.queues.new'), repo.join(_allqueues))
3293 util.rename(repo.join('patches.queues.new'), repo.join(_allqueues))
3294 _setactivenocheck(name)
3294 _setactivenocheck(name)
3295 elif opts.get('delete'):
3295 elif opts.get('delete'):
3296 _delete(name)
3296 _delete(name)
3297 elif opts.get('purge'):
3297 elif opts.get('purge'):
3298 if name in existing:
3298 if name in existing:
3299 _delete(name)
3299 _delete(name)
3300 qdir = _queuedir(name)
3300 qdir = _queuedir(name)
3301 if os.path.exists(qdir):
3301 if os.path.exists(qdir):
3302 shutil.rmtree(qdir)
3302 shutil.rmtree(qdir)
3303 else:
3303 else:
3304 if name not in existing:
3304 if name not in existing:
3305 raise util.Abort(_('use --create to create a new queue'))
3305 raise util.Abort(_('use --create to create a new queue'))
3306 _setactive(name)
3306 _setactive(name)
3307
3307
3308 def mqphasedefaults(repo, roots):
3308 def mqphasedefaults(repo, roots):
3309 """callback used to set mq changeset as secret when no phase data exists"""
3309 """callback used to set mq changeset as secret when no phase data exists"""
3310 if repo.mq.applied:
3310 if repo.mq.applied:
3311 if repo.ui.configbool('mq', 'secret', False):
3311 if repo.ui.configbool('mq', 'secret', False):
3312 mqphase = phases.secret
3312 mqphase = phases.secret
3313 else:
3313 else:
3314 mqphase = phases.draft
3314 mqphase = phases.draft
3315 qbase = repo[repo.mq.applied[0].node]
3315 qbase = repo[repo.mq.applied[0].node]
3316 roots[mqphase].add(qbase.node())
3316 roots[mqphase].add(qbase.node())
3317 return roots
3317 return roots
3318
3318
3319 def reposetup(ui, repo):
3319 def reposetup(ui, repo):
3320 class mqrepo(repo.__class__):
3320 class mqrepo(repo.__class__):
3321 @util.propertycache
3321 @util.propertycache
3322 def mq(self):
3322 def mq(self):
3323 return queue(self.ui, self.path)
3323 return queue(self.ui, self.path)
3324
3324
3325 def abortifwdirpatched(self, errmsg, force=False):
3325 def abortifwdirpatched(self, errmsg, force=False):
3326 if self.mq.applied and not force:
3326 if self.mq.applied and not force:
3327 parents = self.dirstate.parents()
3327 parents = self.dirstate.parents()
3328 patches = [s.node for s in self.mq.applied]
3328 patches = [s.node for s in self.mq.applied]
3329 if parents[0] in patches or parents[1] in patches:
3329 if parents[0] in patches or parents[1] in patches:
3330 raise util.Abort(errmsg)
3330 raise util.Abort(errmsg)
3331
3331
3332 def commit(self, text="", user=None, date=None, match=None,
3332 def commit(self, text="", user=None, date=None, match=None,
3333 force=False, editor=False, extra={}):
3333 force=False, editor=False, extra={}):
3334 self.abortifwdirpatched(
3334 self.abortifwdirpatched(
3335 _('cannot commit over an applied mq patch'),
3335 _('cannot commit over an applied mq patch'),
3336 force)
3336 force)
3337
3337
3338 return super(mqrepo, self).commit(text, user, date, match, force,
3338 return super(mqrepo, self).commit(text, user, date, match, force,
3339 editor, extra)
3339 editor, extra)
3340
3340
3341 def checkpush(self, force, revs):
3341 def checkpush(self, force, revs):
3342 if self.mq.applied and not force:
3342 if self.mq.applied and not force:
3343 outapplied = [e.node for e in self.mq.applied]
3343 outapplied = [e.node for e in self.mq.applied]
3344 if revs:
3344 if revs:
3345 # Assume applied patches have no non-patch descendants and
3345 # Assume applied patches have no non-patch descendants and
3346 # are not on remote already. Filtering any changeset not
3346 # are not on remote already. Filtering any changeset not
3347 # pushed.
3347 # pushed.
3348 heads = set(revs)
3348 heads = set(revs)
3349 for node in reversed(outapplied):
3349 for node in reversed(outapplied):
3350 if node in heads:
3350 if node in heads:
3351 break
3351 break
3352 else:
3352 else:
3353 outapplied.pop()
3353 outapplied.pop()
3354 # looking for pushed and shared changeset
3354 # looking for pushed and shared changeset
3355 for node in outapplied:
3355 for node in outapplied:
3356 if repo[node].phase() < phases.secret:
3356 if repo[node].phase() < phases.secret:
3357 raise util.Abort(_('source has mq patches applied'))
3357 raise util.Abort(_('source has mq patches applied'))
3358 # no non-secret patches pushed
3358 # no non-secret patches pushed
3359 super(mqrepo, self).checkpush(force, revs)
3359 super(mqrepo, self).checkpush(force, revs)
3360
3360
3361 def _findtags(self):
3361 def _findtags(self):
3362 '''augment tags from base class with patch tags'''
3362 '''augment tags from base class with patch tags'''
3363 result = super(mqrepo, self)._findtags()
3363 result = super(mqrepo, self)._findtags()
3364
3364
3365 q = self.mq
3365 q = self.mq
3366 if not q.applied:
3366 if not q.applied:
3367 return result
3367 return result
3368
3368
3369 mqtags = [(patch.node, patch.name) for patch in q.applied]
3369 mqtags = [(patch.node, patch.name) for patch in q.applied]
3370
3370
3371 try:
3371 try:
3372 self.changelog.rev(mqtags[-1][0])
3372 self.changelog.rev(mqtags[-1][0])
3373 except error.LookupError:
3373 except error.LookupError:
3374 self.ui.warn(_('mq status file refers to unknown node %s\n')
3374 self.ui.warn(_('mq status file refers to unknown node %s\n')
3375 % short(mqtags[-1][0]))
3375 % short(mqtags[-1][0]))
3376 return result
3376 return result
3377
3377
3378 mqtags.append((mqtags[-1][0], 'qtip'))
3378 mqtags.append((mqtags[-1][0], 'qtip'))
3379 mqtags.append((mqtags[0][0], 'qbase'))
3379 mqtags.append((mqtags[0][0], 'qbase'))
3380 mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
3380 mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
3381 tags = result[0]
3381 tags = result[0]
3382 for patch in mqtags:
3382 for patch in mqtags:
3383 if patch[1] in tags:
3383 if patch[1] in tags:
3384 self.ui.warn(_('Tag %s overrides mq patch of the same '
3384 self.ui.warn(_('Tag %s overrides mq patch of the same '
3385 'name\n') % patch[1])
3385 'name\n') % patch[1])
3386 else:
3386 else:
3387 tags[patch[1]] = patch[0]
3387 tags[patch[1]] = patch[0]
3388
3388
3389 return result
3389 return result
3390
3390
3391 def _branchtags(self, partial, lrev):
3391 def _branchtags(self, partial, lrev):
3392 q = self.mq
3392 q = self.mq
3393 cl = self.changelog
3393 cl = self.changelog
3394 qbase = None
3394 qbase = None
3395 if not q.applied:
3395 if not q.applied:
3396 if getattr(self, '_committingpatch', False):
3396 if getattr(self, '_committingpatch', False):
3397 # Committing a new patch, must be tip
3397 # Committing a new patch, must be tip
3398 qbase = len(cl) - 1
3398 qbase = len(cl) - 1
3399 else:
3399 else:
3400 qbasenode = q.applied[0].node
3400 qbasenode = q.applied[0].node
3401 try:
3401 try:
3402 qbase = cl.rev(qbasenode)
3402 qbase = cl.rev(qbasenode)
3403 except error.LookupError:
3403 except error.LookupError:
3404 self.ui.warn(_('mq status file refers to unknown node %s\n')
3404 self.ui.warn(_('mq status file refers to unknown node %s\n')
3405 % short(qbasenode))
3405 % short(qbasenode))
3406 if qbase is None:
3406 if qbase is None:
3407 return super(mqrepo, self)._branchtags(partial, lrev)
3407 return super(mqrepo, self)._branchtags(partial, lrev)
3408
3408
3409 start = lrev + 1
3409 start = lrev + 1
3410 if start < qbase:
3410 if start < qbase:
3411 # update the cache (excluding the patches) and save it
3411 # update the cache (excluding the patches) and save it
3412 ctxgen = (self[r] for r in xrange(lrev + 1, qbase))
3412 ctxgen = (self[r] for r in xrange(lrev + 1, qbase))
3413 self._updatebranchcache(partial, ctxgen)
3413 self._updatebranchcache(partial, ctxgen)
3414 self._writebranchcache(partial, cl.node(qbase - 1), qbase - 1)
3414 self._writebranchcache(partial, cl.node(qbase - 1), qbase - 1)
3415 start = qbase
3415 start = qbase
3416 # if start = qbase, the cache is as updated as it should be.
3416 # if start = qbase, the cache is as updated as it should be.
3417 # if start > qbase, the cache includes (part of) the patches.
3417 # if start > qbase, the cache includes (part of) the patches.
3418 # we might as well use it, but we won't save it.
3418 # we might as well use it, but we won't save it.
3419
3419
3420 # update the cache up to the tip
3420 # update the cache up to the tip
3421 ctxgen = (self[r] for r in xrange(start, len(cl)))
3421 ctxgen = (self[r] for r in xrange(start, len(cl)))
3422 self._updatebranchcache(partial, ctxgen)
3422 self._updatebranchcache(partial, ctxgen)
3423
3423
3424 return partial
3424 return partial
3425
3425
3426 if repo.local():
3426 if repo.local():
3427 repo.__class__ = mqrepo
3427 repo.__class__ = mqrepo
3428
3428
3429 repo._phasedefaults.append(mqphasedefaults)
3429 repo._phasedefaults.append(mqphasedefaults)
3430
3430
3431 def mqimport(orig, ui, repo, *args, **kwargs):
3431 def mqimport(orig, ui, repo, *args, **kwargs):
3432 if (util.safehasattr(repo, 'abortifwdirpatched')
3432 if (util.safehasattr(repo, 'abortifwdirpatched')
3433 and not kwargs.get('no_commit', False)):
3433 and not kwargs.get('no_commit', False)):
3434 repo.abortifwdirpatched(_('cannot import over an applied patch'),
3434 repo.abortifwdirpatched(_('cannot import over an applied patch'),
3435 kwargs.get('force'))
3435 kwargs.get('force'))
3436 return orig(ui, repo, *args, **kwargs)
3436 return orig(ui, repo, *args, **kwargs)
3437
3437
3438 def mqinit(orig, ui, *args, **kwargs):
3438 def mqinit(orig, ui, *args, **kwargs):
3439 mq = kwargs.pop('mq', None)
3439 mq = kwargs.pop('mq', None)
3440
3440
3441 if not mq:
3441 if not mq:
3442 return orig(ui, *args, **kwargs)
3442 return orig(ui, *args, **kwargs)
3443
3443
3444 if args:
3444 if args:
3445 repopath = args[0]
3445 repopath = args[0]
3446 if not hg.islocal(repopath):
3446 if not hg.islocal(repopath):
3447 raise util.Abort(_('only a local queue repository '
3447 raise util.Abort(_('only a local queue repository '
3448 'may be initialized'))
3448 'may be initialized'))
3449 else:
3449 else:
3450 repopath = cmdutil.findrepo(os.getcwd())
3450 repopath = cmdutil.findrepo(os.getcwd())
3451 if not repopath:
3451 if not repopath:
3452 raise util.Abort(_('there is no Mercurial repository here '
3452 raise util.Abort(_('there is no Mercurial repository here '
3453 '(.hg not found)'))
3453 '(.hg not found)'))
3454 repo = hg.repository(ui, repopath)
3454 repo = hg.repository(ui, repopath)
3455 return qinit(ui, repo, True)
3455 return qinit(ui, repo, True)
3456
3456
3457 def mqcommand(orig, ui, repo, *args, **kwargs):
3457 def mqcommand(orig, ui, repo, *args, **kwargs):
3458 """Add --mq option to operate on patch repository instead of main"""
3458 """Add --mq option to operate on patch repository instead of main"""
3459
3459
3460 # some commands do not like getting unknown options
3460 # some commands do not like getting unknown options
3461 mq = kwargs.pop('mq', None)
3461 mq = kwargs.pop('mq', None)
3462
3462
3463 if not mq:
3463 if not mq:
3464 return orig(ui, repo, *args, **kwargs)
3464 return orig(ui, repo, *args, **kwargs)
3465
3465
3466 q = repo.mq
3466 q = repo.mq
3467 r = q.qrepo()
3467 r = q.qrepo()
3468 if not r:
3468 if not r:
3469 raise util.Abort(_('no queue repository'))
3469 raise util.Abort(_('no queue repository'))
3470 return orig(r.ui, r, *args, **kwargs)
3470 return orig(r.ui, r, *args, **kwargs)
3471
3471
3472 def summary(orig, ui, repo, *args, **kwargs):
3472 def summary(orig, ui, repo, *args, **kwargs):
3473 r = orig(ui, repo, *args, **kwargs)
3473 r = orig(ui, repo, *args, **kwargs)
3474 q = repo.mq
3474 q = repo.mq
3475 m = []
3475 m = []
3476 a, u = len(q.applied), len(q.unapplied(repo))
3476 a, u = len(q.applied), len(q.unapplied(repo))
3477 if a:
3477 if a:
3478 m.append(ui.label(_("%d applied"), 'qseries.applied') % a)
3478 m.append(ui.label(_("%d applied"), 'qseries.applied') % a)
3479 if u:
3479 if u:
3480 m.append(ui.label(_("%d unapplied"), 'qseries.unapplied') % u)
3480 m.append(ui.label(_("%d unapplied"), 'qseries.unapplied') % u)
3481 if m:
3481 if m:
3482 ui.write("mq: %s\n" % ', '.join(m))
3482 ui.write("mq: %s\n" % ', '.join(m))
3483 else:
3483 else:
3484 ui.note(_("mq: (empty queue)\n"))
3484 ui.note(_("mq: (empty queue)\n"))
3485 return r
3485 return r
3486
3486
3487 def revsetmq(repo, subset, x):
3487 def revsetmq(repo, subset, x):
3488 """``mq()``
3488 """``mq()``
3489 Changesets managed by MQ.
3489 Changesets managed by MQ.
3490 """
3490 """
3491 revset.getargs(x, 0, 0, _("mq takes no arguments"))
3491 revset.getargs(x, 0, 0, _("mq takes no arguments"))
3492 applied = set([repo[r.node].rev() for r in repo.mq.applied])
3492 applied = set([repo[r.node].rev() for r in repo.mq.applied])
3493 return [r for r in subset if r in applied]
3493 return [r for r in subset if r in applied]
3494
3494
3495 def extsetup(ui):
3495 def extsetup(ui):
3496 revset.symbols['mq'] = revsetmq
3496 revset.symbols['mq'] = revsetmq
3497
3497
3498 # tell hggettext to extract docstrings from these functions:
3498 # tell hggettext to extract docstrings from these functions:
3499 i18nfunctions = [revsetmq]
3499 i18nfunctions = [revsetmq]
3500
3500
3501 def uisetup(ui):
3501 def uisetup(ui):
3502 mqopt = [('', 'mq', None, _("operate on patch repository"))]
3502 mqopt = [('', 'mq', None, _("operate on patch repository"))]
3503
3503
3504 extensions.wrapcommand(commands.table, 'import', mqimport)
3504 extensions.wrapcommand(commands.table, 'import', mqimport)
3505 extensions.wrapcommand(commands.table, 'summary', summary)
3505 extensions.wrapcommand(commands.table, 'summary', summary)
3506
3506
3507 entry = extensions.wrapcommand(commands.table, 'init', mqinit)
3507 entry = extensions.wrapcommand(commands.table, 'init', mqinit)
3508 entry[1].extend(mqopt)
3508 entry[1].extend(mqopt)
3509
3509
3510 nowrap = set(commands.norepo.split(" "))
3510 nowrap = set(commands.norepo.split(" "))
3511
3511
3512 def dotable(cmdtable):
3512 def dotable(cmdtable):
3513 for cmd in cmdtable.keys():
3513 for cmd in cmdtable.keys():
3514 cmd = cmdutil.parsealiases(cmd)[0]
3514 cmd = cmdutil.parsealiases(cmd)[0]
3515 if cmd in nowrap:
3515 if cmd in nowrap:
3516 continue
3516 continue
3517 entry = extensions.wrapcommand(cmdtable, cmd, mqcommand)
3517 entry = extensions.wrapcommand(cmdtable, cmd, mqcommand)
3518 entry[1].extend(mqopt)
3518 entry[1].extend(mqopt)
3519
3519
3520 dotable(commands.table)
3520 dotable(commands.table)
3521
3521
3522 for extname, extmodule in extensions.extensions():
3522 for extname, extmodule in extensions.extensions():
3523 if extmodule.__file__ != __file__:
3523 if extmodule.__file__ != __file__:
3524 dotable(getattr(extmodule, 'cmdtable', {}))
3524 dotable(getattr(extmodule, 'cmdtable', {}))
3525
3525
3526
3526
3527 colortable = {'qguard.negative': 'red',
3527 colortable = {'qguard.negative': 'red',
3528 'qguard.positive': 'yellow',
3528 'qguard.positive': 'yellow',
3529 'qguard.unguarded': 'green',
3529 'qguard.unguarded': 'green',
3530 'qseries.applied': 'blue bold underline',
3530 'qseries.applied': 'blue bold underline',
3531 'qseries.guarded': 'black bold',
3531 'qseries.guarded': 'black bold',
3532 'qseries.missing': 'red bold',
3532 'qseries.missing': 'red bold',
3533 'qseries.unapplied': 'black bold'}
3533 'qseries.unapplied': 'black bold'}
@@ -1,1653 +1,1653 b''
1 # cmdutil.py - help for command processing in mercurial
1 # cmdutil.py - help for command processing in mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import hex, nullid, nullrev, short
8 from node import hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import os, sys, errno, re, tempfile
10 import os, sys, errno, re, tempfile
11 import util, scmutil, templater, patch, error, templatekw, revlog, copies
11 import util, scmutil, templater, patch, error, templatekw, revlog, copies
12 import match as matchmod
12 import match as matchmod
13 import subrepo, context, repair, bookmarks
13 import subrepo, context, repair, bookmarks
14
14
15 def parsealiases(cmd):
15 def parsealiases(cmd):
16 return cmd.lstrip("^").split("|")
16 return cmd.lstrip("^").split("|")
17
17
18 def findpossible(cmd, table, strict=False):
18 def findpossible(cmd, table, strict=False):
19 """
19 """
20 Return cmd -> (aliases, command table entry)
20 Return cmd -> (aliases, command table entry)
21 for each matching command.
21 for each matching command.
22 Return debug commands (or their aliases) only if no normal command matches.
22 Return debug commands (or their aliases) only if no normal command matches.
23 """
23 """
24 choice = {}
24 choice = {}
25 debugchoice = {}
25 debugchoice = {}
26
26
27 if cmd in table:
27 if cmd in table:
28 # short-circuit exact matches, "log" alias beats "^log|history"
28 # short-circuit exact matches, "log" alias beats "^log|history"
29 keys = [cmd]
29 keys = [cmd]
30 else:
30 else:
31 keys = table.keys()
31 keys = table.keys()
32
32
33 for e in keys:
33 for e in keys:
34 aliases = parsealiases(e)
34 aliases = parsealiases(e)
35 found = None
35 found = None
36 if cmd in aliases:
36 if cmd in aliases:
37 found = cmd
37 found = cmd
38 elif not strict:
38 elif not strict:
39 for a in aliases:
39 for a in aliases:
40 if a.startswith(cmd):
40 if a.startswith(cmd):
41 found = a
41 found = a
42 break
42 break
43 if found is not None:
43 if found is not None:
44 if aliases[0].startswith("debug") or found.startswith("debug"):
44 if aliases[0].startswith("debug") or found.startswith("debug"):
45 debugchoice[found] = (aliases, table[e])
45 debugchoice[found] = (aliases, table[e])
46 else:
46 else:
47 choice[found] = (aliases, table[e])
47 choice[found] = (aliases, table[e])
48
48
49 if not choice and debugchoice:
49 if not choice and debugchoice:
50 choice = debugchoice
50 choice = debugchoice
51
51
52 return choice
52 return choice
53
53
54 def findcmd(cmd, table, strict=True):
54 def findcmd(cmd, table, strict=True):
55 """Return (aliases, command table entry) for command string."""
55 """Return (aliases, command table entry) for command string."""
56 choice = findpossible(cmd, table, strict)
56 choice = findpossible(cmd, table, strict)
57
57
58 if cmd in choice:
58 if cmd in choice:
59 return choice[cmd]
59 return choice[cmd]
60
60
61 if len(choice) > 1:
61 if len(choice) > 1:
62 clist = choice.keys()
62 clist = choice.keys()
63 clist.sort()
63 clist.sort()
64 raise error.AmbiguousCommand(cmd, clist)
64 raise error.AmbiguousCommand(cmd, clist)
65
65
66 if choice:
66 if choice:
67 return choice.values()[0]
67 return choice.values()[0]
68
68
69 raise error.UnknownCommand(cmd)
69 raise error.UnknownCommand(cmd)
70
70
71 def findrepo(p):
71 def findrepo(p):
72 while not os.path.isdir(os.path.join(p, ".hg")):
72 while not os.path.isdir(os.path.join(p, ".hg")):
73 oldp, p = p, os.path.dirname(p)
73 oldp, p = p, os.path.dirname(p)
74 if p == oldp:
74 if p == oldp:
75 return None
75 return None
76
76
77 return p
77 return p
78
78
79 def bailifchanged(repo):
79 def bailifchanged(repo):
80 if repo.dirstate.p2() != nullid:
80 if repo.dirstate.p2() != nullid:
81 raise util.Abort(_('outstanding uncommitted merge'))
81 raise util.Abort(_('outstanding uncommitted merge'))
82 modified, added, removed, deleted = repo.status()[:4]
82 modified, added, removed, deleted = repo.status()[:4]
83 if modified or added or removed or deleted:
83 if modified or added or removed or deleted:
84 raise util.Abort(_("outstanding uncommitted changes"))
84 raise util.Abort(_("outstanding uncommitted changes"))
85 ctx = repo[None]
85 ctx = repo[None]
86 for s in ctx.substate:
86 for s in ctx.substate:
87 if ctx.sub(s).dirty():
87 if ctx.sub(s).dirty():
88 raise util.Abort(_("uncommitted changes in subrepo %s") % s)
88 raise util.Abort(_("uncommitted changes in subrepo %s") % s)
89
89
90 def logmessage(ui, opts):
90 def logmessage(ui, opts):
91 """ get the log message according to -m and -l option """
91 """ get the log message according to -m and -l option """
92 message = opts.get('message')
92 message = opts.get('message')
93 logfile = opts.get('logfile')
93 logfile = opts.get('logfile')
94
94
95 if message and logfile:
95 if message and logfile:
96 raise util.Abort(_('options --message and --logfile are mutually '
96 raise util.Abort(_('options --message and --logfile are mutually '
97 'exclusive'))
97 'exclusive'))
98 if not message and logfile:
98 if not message and logfile:
99 try:
99 try:
100 if logfile == '-':
100 if logfile == '-':
101 message = ui.fin.read()
101 message = ui.fin.read()
102 else:
102 else:
103 message = '\n'.join(util.readfile(logfile).splitlines())
103 message = '\n'.join(util.readfile(logfile).splitlines())
104 except IOError, inst:
104 except IOError, inst:
105 raise util.Abort(_("can't read commit message '%s': %s") %
105 raise util.Abort(_("can't read commit message '%s': %s") %
106 (logfile, inst.strerror))
106 (logfile, inst.strerror))
107 return message
107 return message
108
108
109 def loglimit(opts):
109 def loglimit(opts):
110 """get the log limit according to option -l/--limit"""
110 """get the log limit according to option -l/--limit"""
111 limit = opts.get('limit')
111 limit = opts.get('limit')
112 if limit:
112 if limit:
113 try:
113 try:
114 limit = int(limit)
114 limit = int(limit)
115 except ValueError:
115 except ValueError:
116 raise util.Abort(_('limit must be a positive integer'))
116 raise util.Abort(_('limit must be a positive integer'))
117 if limit <= 0:
117 if limit <= 0:
118 raise util.Abort(_('limit must be positive'))
118 raise util.Abort(_('limit must be positive'))
119 else:
119 else:
120 limit = None
120 limit = None
121 return limit
121 return limit
122
122
123 def makefilename(repo, pat, node, desc=None,
123 def makefilename(repo, pat, node, desc=None,
124 total=None, seqno=None, revwidth=None, pathname=None):
124 total=None, seqno=None, revwidth=None, pathname=None):
125 node_expander = {
125 node_expander = {
126 'H': lambda: hex(node),
126 'H': lambda: hex(node),
127 'R': lambda: str(repo.changelog.rev(node)),
127 'R': lambda: str(repo.changelog.rev(node)),
128 'h': lambda: short(node),
128 'h': lambda: short(node),
129 'm': lambda: re.sub('[^\w]', '_', str(desc))
129 'm': lambda: re.sub('[^\w]', '_', str(desc))
130 }
130 }
131 expander = {
131 expander = {
132 '%': lambda: '%',
132 '%': lambda: '%',
133 'b': lambda: os.path.basename(repo.root),
133 'b': lambda: os.path.basename(repo.root),
134 }
134 }
135
135
136 try:
136 try:
137 if node:
137 if node:
138 expander.update(node_expander)
138 expander.update(node_expander)
139 if node:
139 if node:
140 expander['r'] = (lambda:
140 expander['r'] = (lambda:
141 str(repo.changelog.rev(node)).zfill(revwidth or 0))
141 str(repo.changelog.rev(node)).zfill(revwidth or 0))
142 if total is not None:
142 if total is not None:
143 expander['N'] = lambda: str(total)
143 expander['N'] = lambda: str(total)
144 if seqno is not None:
144 if seqno is not None:
145 expander['n'] = lambda: str(seqno)
145 expander['n'] = lambda: str(seqno)
146 if total is not None and seqno is not None:
146 if total is not None and seqno is not None:
147 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
147 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
148 if pathname is not None:
148 if pathname is not None:
149 expander['s'] = lambda: os.path.basename(pathname)
149 expander['s'] = lambda: os.path.basename(pathname)
150 expander['d'] = lambda: os.path.dirname(pathname) or '.'
150 expander['d'] = lambda: os.path.dirname(pathname) or '.'
151 expander['p'] = lambda: pathname
151 expander['p'] = lambda: pathname
152
152
153 newname = []
153 newname = []
154 patlen = len(pat)
154 patlen = len(pat)
155 i = 0
155 i = 0
156 while i < patlen:
156 while i < patlen:
157 c = pat[i]
157 c = pat[i]
158 if c == '%':
158 if c == '%':
159 i += 1
159 i += 1
160 c = pat[i]
160 c = pat[i]
161 c = expander[c]()
161 c = expander[c]()
162 newname.append(c)
162 newname.append(c)
163 i += 1
163 i += 1
164 return ''.join(newname)
164 return ''.join(newname)
165 except KeyError, inst:
165 except KeyError, inst:
166 raise util.Abort(_("invalid format spec '%%%s' in output filename") %
166 raise util.Abort(_("invalid format spec '%%%s' in output filename") %
167 inst.args[0])
167 inst.args[0])
168
168
169 def makefileobj(repo, pat, node=None, desc=None, total=None,
169 def makefileobj(repo, pat, node=None, desc=None, total=None,
170 seqno=None, revwidth=None, mode='wb', pathname=None):
170 seqno=None, revwidth=None, mode='wb', pathname=None):
171
171
172 writable = mode not in ('r', 'rb')
172 writable = mode not in ('r', 'rb')
173
173
174 if not pat or pat == '-':
174 if not pat or pat == '-':
175 fp = writable and repo.ui.fout or repo.ui.fin
175 fp = writable and repo.ui.fout or repo.ui.fin
176 if util.safehasattr(fp, 'fileno'):
176 if util.safehasattr(fp, 'fileno'):
177 return os.fdopen(os.dup(fp.fileno()), mode)
177 return os.fdopen(os.dup(fp.fileno()), mode)
178 else:
178 else:
179 # if this fp can't be duped properly, return
179 # if this fp can't be duped properly, return
180 # a dummy object that can be closed
180 # a dummy object that can be closed
181 class wrappedfileobj(object):
181 class wrappedfileobj(object):
182 noop = lambda x: None
182 noop = lambda x: None
183 def __init__(self, f):
183 def __init__(self, f):
184 self.f = f
184 self.f = f
185 def __getattr__(self, attr):
185 def __getattr__(self, attr):
186 if attr == 'close':
186 if attr == 'close':
187 return self.noop
187 return self.noop
188 else:
188 else:
189 return getattr(self.f, attr)
189 return getattr(self.f, attr)
190
190
191 return wrappedfileobj(fp)
191 return wrappedfileobj(fp)
192 if util.safehasattr(pat, 'write') and writable:
192 if util.safehasattr(pat, 'write') and writable:
193 return pat
193 return pat
194 if util.safehasattr(pat, 'read') and 'r' in mode:
194 if util.safehasattr(pat, 'read') and 'r' in mode:
195 return pat
195 return pat
196 return open(makefilename(repo, pat, node, desc, total, seqno, revwidth,
196 return open(makefilename(repo, pat, node, desc, total, seqno, revwidth,
197 pathname),
197 pathname),
198 mode)
198 mode)
199
199
200 def openrevlog(repo, cmd, file_, opts):
200 def openrevlog(repo, cmd, file_, opts):
201 """opens the changelog, manifest, a filelog or a given revlog"""
201 """opens the changelog, manifest, a filelog or a given revlog"""
202 cl = opts['changelog']
202 cl = opts['changelog']
203 mf = opts['manifest']
203 mf = opts['manifest']
204 msg = None
204 msg = None
205 if cl and mf:
205 if cl and mf:
206 msg = _('cannot specify --changelog and --manifest at the same time')
206 msg = _('cannot specify --changelog and --manifest at the same time')
207 elif cl or mf:
207 elif cl or mf:
208 if file_:
208 if file_:
209 msg = _('cannot specify filename with --changelog or --manifest')
209 msg = _('cannot specify filename with --changelog or --manifest')
210 elif not repo:
210 elif not repo:
211 msg = _('cannot specify --changelog or --manifest '
211 msg = _('cannot specify --changelog or --manifest '
212 'without a repository')
212 'without a repository')
213 if msg:
213 if msg:
214 raise util.Abort(msg)
214 raise util.Abort(msg)
215
215
216 r = None
216 r = None
217 if repo:
217 if repo:
218 if cl:
218 if cl:
219 r = repo.changelog
219 r = repo.changelog
220 elif mf:
220 elif mf:
221 r = repo.manifest
221 r = repo.manifest
222 elif file_:
222 elif file_:
223 filelog = repo.file(file_)
223 filelog = repo.file(file_)
224 if len(filelog):
224 if len(filelog):
225 r = filelog
225 r = filelog
226 if not r:
226 if not r:
227 if not file_:
227 if not file_:
228 raise error.CommandError(cmd, _('invalid arguments'))
228 raise error.CommandError(cmd, _('invalid arguments'))
229 if not os.path.isfile(file_):
229 if not os.path.isfile(file_):
230 raise util.Abort(_("revlog '%s' not found") % file_)
230 raise util.Abort(_("revlog '%s' not found") % file_)
231 r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False),
231 r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False),
232 file_[:-2] + ".i")
232 file_[:-2] + ".i")
233 return r
233 return r
234
234
235 def copy(ui, repo, pats, opts, rename=False):
235 def copy(ui, repo, pats, opts, rename=False):
236 # called with the repo lock held
236 # called with the repo lock held
237 #
237 #
238 # hgsep => pathname that uses "/" to separate directories
238 # hgsep => pathname that uses "/" to separate directories
239 # ossep => pathname that uses os.sep to separate directories
239 # ossep => pathname that uses os.sep to separate directories
240 cwd = repo.getcwd()
240 cwd = repo.getcwd()
241 targets = {}
241 targets = {}
242 after = opts.get("after")
242 after = opts.get("after")
243 dryrun = opts.get("dry_run")
243 dryrun = opts.get("dry_run")
244 wctx = repo[None]
244 wctx = repo[None]
245
245
246 def walkpat(pat):
246 def walkpat(pat):
247 srcs = []
247 srcs = []
248 badstates = after and '?' or '?r'
248 badstates = after and '?' or '?r'
249 m = scmutil.match(repo[None], [pat], opts, globbed=True)
249 m = scmutil.match(repo[None], [pat], opts, globbed=True)
250 for abs in repo.walk(m):
250 for abs in repo.walk(m):
251 state = repo.dirstate[abs]
251 state = repo.dirstate[abs]
252 rel = m.rel(abs)
252 rel = m.rel(abs)
253 exact = m.exact(abs)
253 exact = m.exact(abs)
254 if state in badstates:
254 if state in badstates:
255 if exact and state == '?':
255 if exact and state == '?':
256 ui.warn(_('%s: not copying - file is not managed\n') % rel)
256 ui.warn(_('%s: not copying - file is not managed\n') % rel)
257 if exact and state == 'r':
257 if exact and state == 'r':
258 ui.warn(_('%s: not copying - file has been marked for'
258 ui.warn(_('%s: not copying - file has been marked for'
259 ' remove\n') % rel)
259 ' remove\n') % rel)
260 continue
260 continue
261 # abs: hgsep
261 # abs: hgsep
262 # rel: ossep
262 # rel: ossep
263 srcs.append((abs, rel, exact))
263 srcs.append((abs, rel, exact))
264 return srcs
264 return srcs
265
265
266 # abssrc: hgsep
266 # abssrc: hgsep
267 # relsrc: ossep
267 # relsrc: ossep
268 # otarget: ossep
268 # otarget: ossep
269 def copyfile(abssrc, relsrc, otarget, exact):
269 def copyfile(abssrc, relsrc, otarget, exact):
270 abstarget = scmutil.canonpath(repo.root, cwd, otarget)
270 abstarget = scmutil.canonpath(repo.root, cwd, otarget)
271 if '/' in abstarget:
271 if '/' in abstarget:
272 # We cannot normalize abstarget itself, this would prevent
272 # We cannot normalize abstarget itself, this would prevent
273 # case only renames, like a => A.
273 # case only renames, like a => A.
274 abspath, absname = abstarget.rsplit('/', 1)
274 abspath, absname = abstarget.rsplit('/', 1)
275 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
275 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
276 reltarget = repo.pathto(abstarget, cwd)
276 reltarget = repo.pathto(abstarget, cwd)
277 target = repo.wjoin(abstarget)
277 target = repo.wjoin(abstarget)
278 src = repo.wjoin(abssrc)
278 src = repo.wjoin(abssrc)
279 state = repo.dirstate[abstarget]
279 state = repo.dirstate[abstarget]
280
280
281 scmutil.checkportable(ui, abstarget)
281 scmutil.checkportable(ui, abstarget)
282
282
283 # check for collisions
283 # check for collisions
284 prevsrc = targets.get(abstarget)
284 prevsrc = targets.get(abstarget)
285 if prevsrc is not None:
285 if prevsrc is not None:
286 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
286 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
287 (reltarget, repo.pathto(abssrc, cwd),
287 (reltarget, repo.pathto(abssrc, cwd),
288 repo.pathto(prevsrc, cwd)))
288 repo.pathto(prevsrc, cwd)))
289 return
289 return
290
290
291 # check for overwrites
291 # check for overwrites
292 exists = os.path.lexists(target)
292 exists = os.path.lexists(target)
293 samefile = False
293 samefile = False
294 if exists and abssrc != abstarget:
294 if exists and abssrc != abstarget:
295 if (repo.dirstate.normalize(abssrc) ==
295 if (repo.dirstate.normalize(abssrc) ==
296 repo.dirstate.normalize(abstarget)):
296 repo.dirstate.normalize(abstarget)):
297 if not rename:
297 if not rename:
298 ui.warn(_("%s: can't copy - same file\n") % reltarget)
298 ui.warn(_("%s: can't copy - same file\n") % reltarget)
299 return
299 return
300 exists = False
300 exists = False
301 samefile = True
301 samefile = True
302
302
303 if not after and exists or after and state in 'mn':
303 if not after and exists or after and state in 'mn':
304 if not opts['force']:
304 if not opts['force']:
305 ui.warn(_('%s: not overwriting - file exists\n') %
305 ui.warn(_('%s: not overwriting - file exists\n') %
306 reltarget)
306 reltarget)
307 return
307 return
308
308
309 if after:
309 if after:
310 if not exists:
310 if not exists:
311 if rename:
311 if rename:
312 ui.warn(_('%s: not recording move - %s does not exist\n') %
312 ui.warn(_('%s: not recording move - %s does not exist\n') %
313 (relsrc, reltarget))
313 (relsrc, reltarget))
314 else:
314 else:
315 ui.warn(_('%s: not recording copy - %s does not exist\n') %
315 ui.warn(_('%s: not recording copy - %s does not exist\n') %
316 (relsrc, reltarget))
316 (relsrc, reltarget))
317 return
317 return
318 elif not dryrun:
318 elif not dryrun:
319 try:
319 try:
320 if exists:
320 if exists:
321 os.unlink(target)
321 os.unlink(target)
322 targetdir = os.path.dirname(target) or '.'
322 targetdir = os.path.dirname(target) or '.'
323 if not os.path.isdir(targetdir):
323 if not os.path.isdir(targetdir):
324 os.makedirs(targetdir)
324 os.makedirs(targetdir)
325 if samefile:
325 if samefile:
326 tmp = target + "~hgrename"
326 tmp = target + "~hgrename"
327 os.rename(src, tmp)
327 os.rename(src, tmp)
328 os.rename(tmp, target)
328 os.rename(tmp, target)
329 else:
329 else:
330 util.copyfile(src, target)
330 util.copyfile(src, target)
331 srcexists = True
331 srcexists = True
332 except IOError, inst:
332 except IOError, inst:
333 if inst.errno == errno.ENOENT:
333 if inst.errno == errno.ENOENT:
334 ui.warn(_('%s: deleted in working copy\n') % relsrc)
334 ui.warn(_('%s: deleted in working copy\n') % relsrc)
335 srcexists = False
335 srcexists = False
336 else:
336 else:
337 ui.warn(_('%s: cannot copy - %s\n') %
337 ui.warn(_('%s: cannot copy - %s\n') %
338 (relsrc, inst.strerror))
338 (relsrc, inst.strerror))
339 return True # report a failure
339 return True # report a failure
340
340
341 if ui.verbose or not exact:
341 if ui.verbose or not exact:
342 if rename:
342 if rename:
343 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
343 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
344 else:
344 else:
345 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
345 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
346
346
347 targets[abstarget] = abssrc
347 targets[abstarget] = abssrc
348
348
349 # fix up dirstate
349 # fix up dirstate
350 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
350 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
351 dryrun=dryrun, cwd=cwd)
351 dryrun=dryrun, cwd=cwd)
352 if rename and not dryrun:
352 if rename and not dryrun:
353 if not after and srcexists and not samefile:
353 if not after and srcexists and not samefile:
354 util.unlinkpath(repo.wjoin(abssrc))
354 util.unlinkpath(repo.wjoin(abssrc))
355 wctx.forget([abssrc])
355 wctx.forget([abssrc])
356
356
357 # pat: ossep
357 # pat: ossep
358 # dest ossep
358 # dest ossep
359 # srcs: list of (hgsep, hgsep, ossep, bool)
359 # srcs: list of (hgsep, hgsep, ossep, bool)
360 # return: function that takes hgsep and returns ossep
360 # return: function that takes hgsep and returns ossep
361 def targetpathfn(pat, dest, srcs):
361 def targetpathfn(pat, dest, srcs):
362 if os.path.isdir(pat):
362 if os.path.isdir(pat):
363 abspfx = scmutil.canonpath(repo.root, cwd, pat)
363 abspfx = scmutil.canonpath(repo.root, cwd, pat)
364 abspfx = util.localpath(abspfx)
364 abspfx = util.localpath(abspfx)
365 if destdirexists:
365 if destdirexists:
366 striplen = len(os.path.split(abspfx)[0])
366 striplen = len(os.path.split(abspfx)[0])
367 else:
367 else:
368 striplen = len(abspfx)
368 striplen = len(abspfx)
369 if striplen:
369 if striplen:
370 striplen += len(os.sep)
370 striplen += len(os.sep)
371 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
371 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
372 elif destdirexists:
372 elif destdirexists:
373 res = lambda p: os.path.join(dest,
373 res = lambda p: os.path.join(dest,
374 os.path.basename(util.localpath(p)))
374 os.path.basename(util.localpath(p)))
375 else:
375 else:
376 res = lambda p: dest
376 res = lambda p: dest
377 return res
377 return res
378
378
379 # pat: ossep
379 # pat: ossep
380 # dest ossep
380 # dest ossep
381 # srcs: list of (hgsep, hgsep, ossep, bool)
381 # srcs: list of (hgsep, hgsep, ossep, bool)
382 # return: function that takes hgsep and returns ossep
382 # return: function that takes hgsep and returns ossep
383 def targetpathafterfn(pat, dest, srcs):
383 def targetpathafterfn(pat, dest, srcs):
384 if matchmod.patkind(pat):
384 if matchmod.patkind(pat):
385 # a mercurial pattern
385 # a mercurial pattern
386 res = lambda p: os.path.join(dest,
386 res = lambda p: os.path.join(dest,
387 os.path.basename(util.localpath(p)))
387 os.path.basename(util.localpath(p)))
388 else:
388 else:
389 abspfx = scmutil.canonpath(repo.root, cwd, pat)
389 abspfx = scmutil.canonpath(repo.root, cwd, pat)
390 if len(abspfx) < len(srcs[0][0]):
390 if len(abspfx) < len(srcs[0][0]):
391 # A directory. Either the target path contains the last
391 # A directory. Either the target path contains the last
392 # component of the source path or it does not.
392 # component of the source path or it does not.
393 def evalpath(striplen):
393 def evalpath(striplen):
394 score = 0
394 score = 0
395 for s in srcs:
395 for s in srcs:
396 t = os.path.join(dest, util.localpath(s[0])[striplen:])
396 t = os.path.join(dest, util.localpath(s[0])[striplen:])
397 if os.path.lexists(t):
397 if os.path.lexists(t):
398 score += 1
398 score += 1
399 return score
399 return score
400
400
401 abspfx = util.localpath(abspfx)
401 abspfx = util.localpath(abspfx)
402 striplen = len(abspfx)
402 striplen = len(abspfx)
403 if striplen:
403 if striplen:
404 striplen += len(os.sep)
404 striplen += len(os.sep)
405 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
405 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
406 score = evalpath(striplen)
406 score = evalpath(striplen)
407 striplen1 = len(os.path.split(abspfx)[0])
407 striplen1 = len(os.path.split(abspfx)[0])
408 if striplen1:
408 if striplen1:
409 striplen1 += len(os.sep)
409 striplen1 += len(os.sep)
410 if evalpath(striplen1) > score:
410 if evalpath(striplen1) > score:
411 striplen = striplen1
411 striplen = striplen1
412 res = lambda p: os.path.join(dest,
412 res = lambda p: os.path.join(dest,
413 util.localpath(p)[striplen:])
413 util.localpath(p)[striplen:])
414 else:
414 else:
415 # a file
415 # a file
416 if destdirexists:
416 if destdirexists:
417 res = lambda p: os.path.join(dest,
417 res = lambda p: os.path.join(dest,
418 os.path.basename(util.localpath(p)))
418 os.path.basename(util.localpath(p)))
419 else:
419 else:
420 res = lambda p: dest
420 res = lambda p: dest
421 return res
421 return res
422
422
423
423
424 pats = scmutil.expandpats(pats)
424 pats = scmutil.expandpats(pats)
425 if not pats:
425 if not pats:
426 raise util.Abort(_('no source or destination specified'))
426 raise util.Abort(_('no source or destination specified'))
427 if len(pats) == 1:
427 if len(pats) == 1:
428 raise util.Abort(_('no destination specified'))
428 raise util.Abort(_('no destination specified'))
429 dest = pats.pop()
429 dest = pats.pop()
430 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
430 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
431 if not destdirexists:
431 if not destdirexists:
432 if len(pats) > 1 or matchmod.patkind(pats[0]):
432 if len(pats) > 1 or matchmod.patkind(pats[0]):
433 raise util.Abort(_('with multiple sources, destination must be an '
433 raise util.Abort(_('with multiple sources, destination must be an '
434 'existing directory'))
434 'existing directory'))
435 if util.endswithsep(dest):
435 if util.endswithsep(dest):
436 raise util.Abort(_('destination %s is not a directory') % dest)
436 raise util.Abort(_('destination %s is not a directory') % dest)
437
437
438 tfn = targetpathfn
438 tfn = targetpathfn
439 if after:
439 if after:
440 tfn = targetpathafterfn
440 tfn = targetpathafterfn
441 copylist = []
441 copylist = []
442 for pat in pats:
442 for pat in pats:
443 srcs = walkpat(pat)
443 srcs = walkpat(pat)
444 if not srcs:
444 if not srcs:
445 continue
445 continue
446 copylist.append((tfn(pat, dest, srcs), srcs))
446 copylist.append((tfn(pat, dest, srcs), srcs))
447 if not copylist:
447 if not copylist:
448 raise util.Abort(_('no files to copy'))
448 raise util.Abort(_('no files to copy'))
449
449
450 errors = 0
450 errors = 0
451 for targetpath, srcs in copylist:
451 for targetpath, srcs in copylist:
452 for abssrc, relsrc, exact in srcs:
452 for abssrc, relsrc, exact in srcs:
453 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
453 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
454 errors += 1
454 errors += 1
455
455
456 if errors:
456 if errors:
457 ui.warn(_('(consider using --after)\n'))
457 ui.warn(_('(consider using --after)\n'))
458
458
459 return errors != 0
459 return errors != 0
460
460
461 def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
461 def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
462 runargs=None, appendpid=False):
462 runargs=None, appendpid=False):
463 '''Run a command as a service.'''
463 '''Run a command as a service.'''
464
464
465 if opts['daemon'] and not opts['daemon_pipefds']:
465 if opts['daemon'] and not opts['daemon_pipefds']:
466 # Signal child process startup with file removal
466 # Signal child process startup with file removal
467 lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
467 lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
468 os.close(lockfd)
468 os.close(lockfd)
469 try:
469 try:
470 if not runargs:
470 if not runargs:
471 runargs = util.hgcmd() + sys.argv[1:]
471 runargs = util.hgcmd() + sys.argv[1:]
472 runargs.append('--daemon-pipefds=%s' % lockpath)
472 runargs.append('--daemon-pipefds=%s' % lockpath)
473 # Don't pass --cwd to the child process, because we've already
473 # Don't pass --cwd to the child process, because we've already
474 # changed directory.
474 # changed directory.
475 for i in xrange(1, len(runargs)):
475 for i in xrange(1, len(runargs)):
476 if runargs[i].startswith('--cwd='):
476 if runargs[i].startswith('--cwd='):
477 del runargs[i]
477 del runargs[i]
478 break
478 break
479 elif runargs[i].startswith('--cwd'):
479 elif runargs[i].startswith('--cwd'):
480 del runargs[i:i + 2]
480 del runargs[i:i + 2]
481 break
481 break
482 def condfn():
482 def condfn():
483 return not os.path.exists(lockpath)
483 return not os.path.exists(lockpath)
484 pid = util.rundetached(runargs, condfn)
484 pid = util.rundetached(runargs, condfn)
485 if pid < 0:
485 if pid < 0:
486 raise util.Abort(_('child process failed to start'))
486 raise util.Abort(_('child process failed to start'))
487 finally:
487 finally:
488 try:
488 try:
489 os.unlink(lockpath)
489 os.unlink(lockpath)
490 except OSError, e:
490 except OSError, e:
491 if e.errno != errno.ENOENT:
491 if e.errno != errno.ENOENT:
492 raise
492 raise
493 if parentfn:
493 if parentfn:
494 return parentfn(pid)
494 return parentfn(pid)
495 else:
495 else:
496 return
496 return
497
497
498 if initfn:
498 if initfn:
499 initfn()
499 initfn()
500
500
501 if opts['pid_file']:
501 if opts['pid_file']:
502 mode = appendpid and 'a' or 'w'
502 mode = appendpid and 'a' or 'w'
503 fp = open(opts['pid_file'], mode)
503 fp = open(opts['pid_file'], mode)
504 fp.write(str(os.getpid()) + '\n')
504 fp.write(str(os.getpid()) + '\n')
505 fp.close()
505 fp.close()
506
506
507 if opts['daemon_pipefds']:
507 if opts['daemon_pipefds']:
508 lockpath = opts['daemon_pipefds']
508 lockpath = opts['daemon_pipefds']
509 try:
509 try:
510 os.setsid()
510 os.setsid()
511 except AttributeError:
511 except AttributeError:
512 pass
512 pass
513 os.unlink(lockpath)
513 os.unlink(lockpath)
514 util.hidewindow()
514 util.hidewindow()
515 sys.stdout.flush()
515 sys.stdout.flush()
516 sys.stderr.flush()
516 sys.stderr.flush()
517
517
518 nullfd = os.open(util.nulldev, os.O_RDWR)
518 nullfd = os.open(util.nulldev, os.O_RDWR)
519 logfilefd = nullfd
519 logfilefd = nullfd
520 if logfile:
520 if logfile:
521 logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
521 logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
522 os.dup2(nullfd, 0)
522 os.dup2(nullfd, 0)
523 os.dup2(logfilefd, 1)
523 os.dup2(logfilefd, 1)
524 os.dup2(logfilefd, 2)
524 os.dup2(logfilefd, 2)
525 if nullfd not in (0, 1, 2):
525 if nullfd not in (0, 1, 2):
526 os.close(nullfd)
526 os.close(nullfd)
527 if logfile and logfilefd not in (0, 1, 2):
527 if logfile and logfilefd not in (0, 1, 2):
528 os.close(logfilefd)
528 os.close(logfilefd)
529
529
530 if runfn:
530 if runfn:
531 return runfn()
531 return runfn()
532
532
533 def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
533 def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
534 opts=None):
534 opts=None):
535 '''export changesets as hg patches.'''
535 '''export changesets as hg patches.'''
536
536
537 total = len(revs)
537 total = len(revs)
538 revwidth = max([len(str(rev)) for rev in revs])
538 revwidth = max([len(str(rev)) for rev in revs])
539
539
540 def single(rev, seqno, fp):
540 def single(rev, seqno, fp):
541 ctx = repo[rev]
541 ctx = repo[rev]
542 node = ctx.node()
542 node = ctx.node()
543 parents = [p.node() for p in ctx.parents() if p]
543 parents = [p.node() for p in ctx.parents() if p]
544 branch = ctx.branch()
544 branch = ctx.branch()
545 if switch_parent:
545 if switch_parent:
546 parents.reverse()
546 parents.reverse()
547 prev = (parents and parents[0]) or nullid
547 prev = (parents and parents[0]) or nullid
548
548
549 shouldclose = False
549 shouldclose = False
550 if not fp:
550 if not fp:
551 desc_lines = ctx.description().rstrip().split('\n')
551 desc_lines = ctx.description().rstrip().split('\n')
552 desc = desc_lines[0] #Commit always has a first line.
552 desc = desc_lines[0] #Commit always has a first line.
553 fp = makefileobj(repo, template, node, desc=desc, total=total,
553 fp = makefileobj(repo, template, node, desc=desc, total=total,
554 seqno=seqno, revwidth=revwidth, mode='ab')
554 seqno=seqno, revwidth=revwidth, mode='ab')
555 if fp != template:
555 if fp != template:
556 shouldclose = True
556 shouldclose = True
557 if fp != sys.stdout and util.safehasattr(fp, 'name'):
557 if fp != sys.stdout and util.safehasattr(fp, 'name'):
558 repo.ui.note("%s\n" % fp.name)
558 repo.ui.note("%s\n" % fp.name)
559
559
560 fp.write("# HG changeset patch\n")
560 fp.write("# HG changeset patch\n")
561 fp.write("# User %s\n" % ctx.user())
561 fp.write("# User %s\n" % ctx.user())
562 fp.write("# Date %d %d\n" % ctx.date())
562 fp.write("# Date %d %d\n" % ctx.date())
563 if branch and branch != 'default':
563 if branch and branch != 'default':
564 fp.write("# Branch %s\n" % branch)
564 fp.write("# Branch %s\n" % branch)
565 fp.write("# Node ID %s\n" % hex(node))
565 fp.write("# Node ID %s\n" % hex(node))
566 fp.write("# Parent %s\n" % hex(prev))
566 fp.write("# Parent %s\n" % hex(prev))
567 if len(parents) > 1:
567 if len(parents) > 1:
568 fp.write("# Parent %s\n" % hex(parents[1]))
568 fp.write("# Parent %s\n" % hex(parents[1]))
569 fp.write(ctx.description().rstrip())
569 fp.write(ctx.description().rstrip())
570 fp.write("\n\n")
570 fp.write("\n\n")
571
571
572 for chunk in patch.diff(repo, prev, node, opts=opts):
572 for chunk in patch.diff(repo, prev, node, opts=opts):
573 fp.write(chunk)
573 fp.write(chunk)
574
574
575 if shouldclose:
575 if shouldclose:
576 fp.close()
576 fp.close()
577
577
578 for seqno, rev in enumerate(revs):
578 for seqno, rev in enumerate(revs):
579 single(rev, seqno + 1, fp)
579 single(rev, seqno + 1, fp)
580
580
581 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
581 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
582 changes=None, stat=False, fp=None, prefix='',
582 changes=None, stat=False, fp=None, prefix='',
583 listsubrepos=False):
583 listsubrepos=False):
584 '''show diff or diffstat.'''
584 '''show diff or diffstat.'''
585 if fp is None:
585 if fp is None:
586 write = ui.write
586 write = ui.write
587 else:
587 else:
588 def write(s, **kw):
588 def write(s, **kw):
589 fp.write(s)
589 fp.write(s)
590
590
591 if stat:
591 if stat:
592 diffopts = diffopts.copy(context=0)
592 diffopts = diffopts.copy(context=0)
593 width = 80
593 width = 80
594 if not ui.plain():
594 if not ui.plain():
595 width = ui.termwidth()
595 width = ui.termwidth()
596 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
596 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
597 prefix=prefix)
597 prefix=prefix)
598 for chunk, label in patch.diffstatui(util.iterlines(chunks),
598 for chunk, label in patch.diffstatui(util.iterlines(chunks),
599 width=width,
599 width=width,
600 git=diffopts.git):
600 git=diffopts.git):
601 write(chunk, label=label)
601 write(chunk, label=label)
602 else:
602 else:
603 for chunk, label in patch.diffui(repo, node1, node2, match,
603 for chunk, label in patch.diffui(repo, node1, node2, match,
604 changes, diffopts, prefix=prefix):
604 changes, diffopts, prefix=prefix):
605 write(chunk, label=label)
605 write(chunk, label=label)
606
606
607 if listsubrepos:
607 if listsubrepos:
608 ctx1 = repo[node1]
608 ctx1 = repo[node1]
609 ctx2 = repo[node2]
609 ctx2 = repo[node2]
610 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
610 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
611 tempnode2 = node2
611 tempnode2 = node2
612 try:
612 try:
613 if node2 is not None:
613 if node2 is not None:
614 tempnode2 = ctx2.substate[subpath][1]
614 tempnode2 = ctx2.substate[subpath][1]
615 except KeyError:
615 except KeyError:
616 # A subrepo that existed in node1 was deleted between node1 and
616 # A subrepo that existed in node1 was deleted between node1 and
617 # node2 (inclusive). Thus, ctx2's substate won't contain that
617 # node2 (inclusive). Thus, ctx2's substate won't contain that
618 # subpath. The best we can do is to ignore it.
618 # subpath. The best we can do is to ignore it.
619 tempnode2 = None
619 tempnode2 = None
620 submatch = matchmod.narrowmatcher(subpath, match)
620 submatch = matchmod.narrowmatcher(subpath, match)
621 sub.diff(diffopts, tempnode2, submatch, changes=changes,
621 sub.diff(diffopts, tempnode2, submatch, changes=changes,
622 stat=stat, fp=fp, prefix=prefix)
622 stat=stat, fp=fp, prefix=prefix)
623
623
624 class changeset_printer(object):
624 class changeset_printer(object):
625 '''show changeset information when templating not requested.'''
625 '''show changeset information when templating not requested.'''
626
626
627 def __init__(self, ui, repo, patch, diffopts, buffered):
627 def __init__(self, ui, repo, patch, diffopts, buffered):
628 self.ui = ui
628 self.ui = ui
629 self.repo = repo
629 self.repo = repo
630 self.buffered = buffered
630 self.buffered = buffered
631 self.patch = patch
631 self.patch = patch
632 self.diffopts = diffopts
632 self.diffopts = diffopts
633 self.header = {}
633 self.header = {}
634 self.hunk = {}
634 self.hunk = {}
635 self.lastheader = None
635 self.lastheader = None
636 self.footer = None
636 self.footer = None
637
637
638 def flush(self, rev):
638 def flush(self, rev):
639 if rev in self.header:
639 if rev in self.header:
640 h = self.header[rev]
640 h = self.header[rev]
641 if h != self.lastheader:
641 if h != self.lastheader:
642 self.lastheader = h
642 self.lastheader = h
643 self.ui.write(h)
643 self.ui.write(h)
644 del self.header[rev]
644 del self.header[rev]
645 if rev in self.hunk:
645 if rev in self.hunk:
646 self.ui.write(self.hunk[rev])
646 self.ui.write(self.hunk[rev])
647 del self.hunk[rev]
647 del self.hunk[rev]
648 return 1
648 return 1
649 return 0
649 return 0
650
650
651 def close(self):
651 def close(self):
652 if self.footer:
652 if self.footer:
653 self.ui.write(self.footer)
653 self.ui.write(self.footer)
654
654
655 def show(self, ctx, copies=None, matchfn=None, **props):
655 def show(self, ctx, copies=None, matchfn=None, **props):
656 if self.buffered:
656 if self.buffered:
657 self.ui.pushbuffer()
657 self.ui.pushbuffer()
658 self._show(ctx, copies, matchfn, props)
658 self._show(ctx, copies, matchfn, props)
659 self.hunk[ctx.rev()] = self.ui.popbuffer(labeled=True)
659 self.hunk[ctx.rev()] = self.ui.popbuffer(labeled=True)
660 else:
660 else:
661 self._show(ctx, copies, matchfn, props)
661 self._show(ctx, copies, matchfn, props)
662
662
663 def _show(self, ctx, copies, matchfn, props):
663 def _show(self, ctx, copies, matchfn, props):
664 '''show a single changeset or file revision'''
664 '''show a single changeset or file revision'''
665 changenode = ctx.node()
665 changenode = ctx.node()
666 rev = ctx.rev()
666 rev = ctx.rev()
667
667
668 if self.ui.quiet:
668 if self.ui.quiet:
669 self.ui.write("%d:%s\n" % (rev, short(changenode)),
669 self.ui.write("%d:%s\n" % (rev, short(changenode)),
670 label='log.node')
670 label='log.node')
671 return
671 return
672
672
673 log = self.repo.changelog
673 log = self.repo.changelog
674 date = util.datestr(ctx.date())
674 date = util.datestr(ctx.date())
675
675
676 hexfunc = self.ui.debugflag and hex or short
676 hexfunc = self.ui.debugflag and hex or short
677
677
678 parents = [(p, hexfunc(log.node(p)))
678 parents = [(p, hexfunc(log.node(p)))
679 for p in self._meaningful_parentrevs(log, rev)]
679 for p in self._meaningful_parentrevs(log, rev)]
680
680
681 self.ui.write(_("changeset: %d:%s\n") % (rev, hexfunc(changenode)),
681 self.ui.write(_("changeset: %d:%s\n") % (rev, hexfunc(changenode)),
682 label='log.changeset')
682 label='log.changeset')
683
683
684 branch = ctx.branch()
684 branch = ctx.branch()
685 # don't show the default branch name
685 # don't show the default branch name
686 if branch != 'default':
686 if branch != 'default':
687 self.ui.write(_("branch: %s\n") % branch,
687 self.ui.write(_("branch: %s\n") % branch,
688 label='log.branch')
688 label='log.branch')
689 for bookmark in self.repo.nodebookmarks(changenode):
689 for bookmark in self.repo.nodebookmarks(changenode):
690 self.ui.write(_("bookmark: %s\n") % bookmark,
690 self.ui.write(_("bookmark: %s\n") % bookmark,
691 label='log.bookmark')
691 label='log.bookmark')
692 for tag in self.repo.nodetags(changenode):
692 for tag in self.repo.nodetags(changenode):
693 self.ui.write(_("tag: %s\n") % tag,
693 self.ui.write(_("tag: %s\n") % tag,
694 label='log.tag')
694 label='log.tag')
695 if self.ui.debugflag and ctx.phase():
695 if self.ui.debugflag and ctx.phase():
696 self.ui.write(_("phase: %s\n") % _(ctx.phasestr()),
696 self.ui.write(_("phase: %s\n") % _(ctx.phasestr()),
697 label='log.phase')
697 label='log.phase')
698 for parent in parents:
698 for parent in parents:
699 self.ui.write(_("parent: %d:%s\n") % parent,
699 self.ui.write(_("parent: %d:%s\n") % parent,
700 label='log.parent')
700 label='log.parent')
701
701
702 if self.ui.debugflag:
702 if self.ui.debugflag:
703 mnode = ctx.manifestnode()
703 mnode = ctx.manifestnode()
704 self.ui.write(_("manifest: %d:%s\n") %
704 self.ui.write(_("manifest: %d:%s\n") %
705 (self.repo.manifest.rev(mnode), hex(mnode)),
705 (self.repo.manifest.rev(mnode), hex(mnode)),
706 label='ui.debug log.manifest')
706 label='ui.debug log.manifest')
707 self.ui.write(_("user: %s\n") % ctx.user(),
707 self.ui.write(_("user: %s\n") % ctx.user(),
708 label='log.user')
708 label='log.user')
709 self.ui.write(_("date: %s\n") % date,
709 self.ui.write(_("date: %s\n") % date,
710 label='log.date')
710 label='log.date')
711
711
712 if self.ui.debugflag:
712 if self.ui.debugflag:
713 files = self.repo.status(log.parents(changenode)[0], changenode)[:3]
713 files = self.repo.status(log.parents(changenode)[0], changenode)[:3]
714 for key, value in zip([_("files:"), _("files+:"), _("files-:")],
714 for key, value in zip([_("files:"), _("files+:"), _("files-:")],
715 files):
715 files):
716 if value:
716 if value:
717 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
717 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
718 label='ui.debug log.files')
718 label='ui.debug log.files')
719 elif ctx.files() and self.ui.verbose:
719 elif ctx.files() and self.ui.verbose:
720 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
720 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
721 label='ui.note log.files')
721 label='ui.note log.files')
722 if copies and self.ui.verbose:
722 if copies and self.ui.verbose:
723 copies = ['%s (%s)' % c for c in copies]
723 copies = ['%s (%s)' % c for c in copies]
724 self.ui.write(_("copies: %s\n") % ' '.join(copies),
724 self.ui.write(_("copies: %s\n") % ' '.join(copies),
725 label='ui.note log.copies')
725 label='ui.note log.copies')
726
726
727 extra = ctx.extra()
727 extra = ctx.extra()
728 if extra and self.ui.debugflag:
728 if extra and self.ui.debugflag:
729 for key, value in sorted(extra.items()):
729 for key, value in sorted(extra.items()):
730 self.ui.write(_("extra: %s=%s\n")
730 self.ui.write(_("extra: %s=%s\n")
731 % (key, value.encode('string_escape')),
731 % (key, value.encode('string_escape')),
732 label='ui.debug log.extra')
732 label='ui.debug log.extra')
733
733
734 description = ctx.description().strip()
734 description = ctx.description().strip()
735 if description:
735 if description:
736 if self.ui.verbose:
736 if self.ui.verbose:
737 self.ui.write(_("description:\n"),
737 self.ui.write(_("description:\n"),
738 label='ui.note log.description')
738 label='ui.note log.description')
739 self.ui.write(description,
739 self.ui.write(description,
740 label='ui.note log.description')
740 label='ui.note log.description')
741 self.ui.write("\n\n")
741 self.ui.write("\n\n")
742 else:
742 else:
743 self.ui.write(_("summary: %s\n") %
743 self.ui.write(_("summary: %s\n") %
744 description.splitlines()[0],
744 description.splitlines()[0],
745 label='log.summary')
745 label='log.summary')
746 self.ui.write("\n")
746 self.ui.write("\n")
747
747
748 self.showpatch(changenode, matchfn)
748 self.showpatch(changenode, matchfn)
749
749
750 def showpatch(self, node, matchfn):
750 def showpatch(self, node, matchfn):
751 if not matchfn:
751 if not matchfn:
752 matchfn = self.patch
752 matchfn = self.patch
753 if matchfn:
753 if matchfn:
754 stat = self.diffopts.get('stat')
754 stat = self.diffopts.get('stat')
755 diff = self.diffopts.get('patch')
755 diff = self.diffopts.get('patch')
756 diffopts = patch.diffopts(self.ui, self.diffopts)
756 diffopts = patch.diffopts(self.ui, self.diffopts)
757 prev = self.repo.changelog.parents(node)[0]
757 prev = self.repo.changelog.parents(node)[0]
758 if stat:
758 if stat:
759 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
759 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
760 match=matchfn, stat=True)
760 match=matchfn, stat=True)
761 if diff:
761 if diff:
762 if stat:
762 if stat:
763 self.ui.write("\n")
763 self.ui.write("\n")
764 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
764 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
765 match=matchfn, stat=False)
765 match=matchfn, stat=False)
766 self.ui.write("\n")
766 self.ui.write("\n")
767
767
768 def _meaningful_parentrevs(self, log, rev):
768 def _meaningful_parentrevs(self, log, rev):
769 """Return list of meaningful (or all if debug) parentrevs for rev.
769 """Return list of meaningful (or all if debug) parentrevs for rev.
770
770
771 For merges (two non-nullrev revisions) both parents are meaningful.
771 For merges (two non-nullrev revisions) both parents are meaningful.
772 Otherwise the first parent revision is considered meaningful if it
772 Otherwise the first parent revision is considered meaningful if it
773 is not the preceding revision.
773 is not the preceding revision.
774 """
774 """
775 parents = log.parentrevs(rev)
775 parents = log.parentrevs(rev)
776 if not self.ui.debugflag and parents[1] == nullrev:
776 if not self.ui.debugflag and parents[1] == nullrev:
777 if parents[0] >= rev - 1:
777 if parents[0] >= rev - 1:
778 parents = []
778 parents = []
779 else:
779 else:
780 parents = [parents[0]]
780 parents = [parents[0]]
781 return parents
781 return parents
782
782
783
783
784 class changeset_templater(changeset_printer):
784 class changeset_templater(changeset_printer):
785 '''format changeset information.'''
785 '''format changeset information.'''
786
786
787 def __init__(self, ui, repo, patch, diffopts, mapfile, buffered):
787 def __init__(self, ui, repo, patch, diffopts, mapfile, buffered):
788 changeset_printer.__init__(self, ui, repo, patch, diffopts, buffered)
788 changeset_printer.__init__(self, ui, repo, patch, diffopts, buffered)
789 formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
789 formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
790 defaulttempl = {
790 defaulttempl = {
791 'parent': '{rev}:{node|formatnode} ',
791 'parent': '{rev}:{node|formatnode} ',
792 'manifest': '{rev}:{node|formatnode}',
792 'manifest': '{rev}:{node|formatnode}',
793 'file_copy': '{name} ({source})',
793 'file_copy': '{name} ({source})',
794 'extra': '{key}={value|stringescape}'
794 'extra': '{key}={value|stringescape}'
795 }
795 }
796 # filecopy is preserved for compatibility reasons
796 # filecopy is preserved for compatibility reasons
797 defaulttempl['filecopy'] = defaulttempl['file_copy']
797 defaulttempl['filecopy'] = defaulttempl['file_copy']
798 self.t = templater.templater(mapfile, {'formatnode': formatnode},
798 self.t = templater.templater(mapfile, {'formatnode': formatnode},
799 cache=defaulttempl)
799 cache=defaulttempl)
800 self.cache = {}
800 self.cache = {}
801
801
802 def use_template(self, t):
802 def use_template(self, t):
803 '''set template string to use'''
803 '''set template string to use'''
804 self.t.cache['changeset'] = t
804 self.t.cache['changeset'] = t
805
805
806 def _meaningful_parentrevs(self, ctx):
806 def _meaningful_parentrevs(self, ctx):
807 """Return list of meaningful (or all if debug) parentrevs for rev.
807 """Return list of meaningful (or all if debug) parentrevs for rev.
808 """
808 """
809 parents = ctx.parents()
809 parents = ctx.parents()
810 if len(parents) > 1:
810 if len(parents) > 1:
811 return parents
811 return parents
812 if self.ui.debugflag:
812 if self.ui.debugflag:
813 return [parents[0], self.repo['null']]
813 return [parents[0], self.repo['null']]
814 if parents[0].rev() >= ctx.rev() - 1:
814 if parents[0].rev() >= ctx.rev() - 1:
815 return []
815 return []
816 return parents
816 return parents
817
817
818 def _show(self, ctx, copies, matchfn, props):
818 def _show(self, ctx, copies, matchfn, props):
819 '''show a single changeset or file revision'''
819 '''show a single changeset or file revision'''
820
820
821 showlist = templatekw.showlist
821 showlist = templatekw.showlist
822
822
823 # showparents() behaviour depends on ui trace level which
823 # showparents() behaviour depends on ui trace level which
824 # causes unexpected behaviours at templating level and makes
824 # causes unexpected behaviours at templating level and makes
825 # it harder to extract it in a standalone function. Its
825 # it harder to extract it in a standalone function. Its
826 # behaviour cannot be changed so leave it here for now.
826 # behaviour cannot be changed so leave it here for now.
827 def showparents(**args):
827 def showparents(**args):
828 ctx = args['ctx']
828 ctx = args['ctx']
829 parents = [[('rev', p.rev()), ('node', p.hex())]
829 parents = [[('rev', p.rev()), ('node', p.hex())]
830 for p in self._meaningful_parentrevs(ctx)]
830 for p in self._meaningful_parentrevs(ctx)]
831 return showlist('parent', parents, **args)
831 return showlist('parent', parents, **args)
832
832
833 props = props.copy()
833 props = props.copy()
834 props.update(templatekw.keywords)
834 props.update(templatekw.keywords)
835 props['parents'] = showparents
835 props['parents'] = showparents
836 props['templ'] = self.t
836 props['templ'] = self.t
837 props['ctx'] = ctx
837 props['ctx'] = ctx
838 props['repo'] = self.repo
838 props['repo'] = self.repo
839 props['revcache'] = {'copies': copies}
839 props['revcache'] = {'copies': copies}
840 props['cache'] = self.cache
840 props['cache'] = self.cache
841
841
842 # find correct templates for current mode
842 # find correct templates for current mode
843
843
844 tmplmodes = [
844 tmplmodes = [
845 (True, None),
845 (True, None),
846 (self.ui.verbose, 'verbose'),
846 (self.ui.verbose, 'verbose'),
847 (self.ui.quiet, 'quiet'),
847 (self.ui.quiet, 'quiet'),
848 (self.ui.debugflag, 'debug'),
848 (self.ui.debugflag, 'debug'),
849 ]
849 ]
850
850
851 types = {'header': '', 'footer':'', 'changeset': 'changeset'}
851 types = {'header': '', 'footer':'', 'changeset': 'changeset'}
852 for mode, postfix in tmplmodes:
852 for mode, postfix in tmplmodes:
853 for type in types:
853 for type in types:
854 cur = postfix and ('%s_%s' % (type, postfix)) or type
854 cur = postfix and ('%s_%s' % (type, postfix)) or type
855 if mode and cur in self.t:
855 if mode and cur in self.t:
856 types[type] = cur
856 types[type] = cur
857
857
858 try:
858 try:
859
859
860 # write header
860 # write header
861 if types['header']:
861 if types['header']:
862 h = templater.stringify(self.t(types['header'], **props))
862 h = templater.stringify(self.t(types['header'], **props))
863 if self.buffered:
863 if self.buffered:
864 self.header[ctx.rev()] = h
864 self.header[ctx.rev()] = h
865 else:
865 else:
866 if self.lastheader != h:
866 if self.lastheader != h:
867 self.lastheader = h
867 self.lastheader = h
868 self.ui.write(h)
868 self.ui.write(h)
869
869
870 # write changeset metadata, then patch if requested
870 # write changeset metadata, then patch if requested
871 key = types['changeset']
871 key = types['changeset']
872 self.ui.write(templater.stringify(self.t(key, **props)))
872 self.ui.write(templater.stringify(self.t(key, **props)))
873 self.showpatch(ctx.node(), matchfn)
873 self.showpatch(ctx.node(), matchfn)
874
874
875 if types['footer']:
875 if types['footer']:
876 if not self.footer:
876 if not self.footer:
877 self.footer = templater.stringify(self.t(types['footer'],
877 self.footer = templater.stringify(self.t(types['footer'],
878 **props))
878 **props))
879
879
880 except KeyError, inst:
880 except KeyError, inst:
881 msg = _("%s: no key named '%s'")
881 msg = _("%s: no key named '%s'")
882 raise util.Abort(msg % (self.t.mapfile, inst.args[0]))
882 raise util.Abort(msg % (self.t.mapfile, inst.args[0]))
883 except SyntaxError, inst:
883 except SyntaxError, inst:
884 raise util.Abort('%s: %s' % (self.t.mapfile, inst.args[0]))
884 raise util.Abort('%s: %s' % (self.t.mapfile, inst.args[0]))
885
885
886 def show_changeset(ui, repo, opts, buffered=False):
886 def show_changeset(ui, repo, opts, buffered=False):
887 """show one changeset using template or regular display.
887 """show one changeset using template or regular display.
888
888
889 Display format will be the first non-empty hit of:
889 Display format will be the first non-empty hit of:
890 1. option 'template'
890 1. option 'template'
891 2. option 'style'
891 2. option 'style'
892 3. [ui] setting 'logtemplate'
892 3. [ui] setting 'logtemplate'
893 4. [ui] setting 'style'
893 4. [ui] setting 'style'
894 If all of these values are either the unset or the empty string,
894 If all of these values are either the unset or the empty string,
895 regular display via changeset_printer() is done.
895 regular display via changeset_printer() is done.
896 """
896 """
897 # options
897 # options
898 patch = False
898 patch = False
899 if opts.get('patch') or opts.get('stat'):
899 if opts.get('patch') or opts.get('stat'):
900 patch = scmutil.matchall(repo)
900 patch = scmutil.matchall(repo)
901
901
902 tmpl = opts.get('template')
902 tmpl = opts.get('template')
903 style = None
903 style = None
904 if tmpl:
904 if tmpl:
905 tmpl = templater.parsestring(tmpl, quoted=False)
905 tmpl = templater.parsestring(tmpl, quoted=False)
906 else:
906 else:
907 style = opts.get('style')
907 style = opts.get('style')
908
908
909 # ui settings
909 # ui settings
910 if not (tmpl or style):
910 if not (tmpl or style):
911 tmpl = ui.config('ui', 'logtemplate')
911 tmpl = ui.config('ui', 'logtemplate')
912 if tmpl:
912 if tmpl:
913 tmpl = templater.parsestring(tmpl)
913 tmpl = templater.parsestring(tmpl)
914 else:
914 else:
915 style = util.expandpath(ui.config('ui', 'style', ''))
915 style = util.expandpath(ui.config('ui', 'style', ''))
916
916
917 if not (tmpl or style):
917 if not (tmpl or style):
918 return changeset_printer(ui, repo, patch, opts, buffered)
918 return changeset_printer(ui, repo, patch, opts, buffered)
919
919
920 mapfile = None
920 mapfile = None
921 if style and not tmpl:
921 if style and not tmpl:
922 mapfile = style
922 mapfile = style
923 if not os.path.split(mapfile)[0]:
923 if not os.path.split(mapfile)[0]:
924 mapname = (templater.templatepath('map-cmdline.' + mapfile)
924 mapname = (templater.templatepath('map-cmdline.' + mapfile)
925 or templater.templatepath(mapfile))
925 or templater.templatepath(mapfile))
926 if mapname:
926 if mapname:
927 mapfile = mapname
927 mapfile = mapname
928
928
929 try:
929 try:
930 t = changeset_templater(ui, repo, patch, opts, mapfile, buffered)
930 t = changeset_templater(ui, repo, patch, opts, mapfile, buffered)
931 except SyntaxError, inst:
931 except SyntaxError, inst:
932 raise util.Abort(inst.args[0])
932 raise util.Abort(inst.args[0])
933 if tmpl:
933 if tmpl:
934 t.use_template(tmpl)
934 t.use_template(tmpl)
935 return t
935 return t
936
936
937 def finddate(ui, repo, date):
937 def finddate(ui, repo, date):
938 """Find the tipmost changeset that matches the given date spec"""
938 """Find the tipmost changeset that matches the given date spec"""
939
939
940 df = util.matchdate(date)
940 df = util.matchdate(date)
941 m = scmutil.matchall(repo)
941 m = scmutil.matchall(repo)
942 results = {}
942 results = {}
943
943
944 def prep(ctx, fns):
944 def prep(ctx, fns):
945 d = ctx.date()
945 d = ctx.date()
946 if df(d[0]):
946 if df(d[0]):
947 results[ctx.rev()] = d
947 results[ctx.rev()] = d
948
948
949 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
949 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
950 rev = ctx.rev()
950 rev = ctx.rev()
951 if rev in results:
951 if rev in results:
952 ui.status(_("Found revision %s from %s\n") %
952 ui.status(_("Found revision %s from %s\n") %
953 (rev, util.datestr(results[rev])))
953 (rev, util.datestr(results[rev])))
954 return str(rev)
954 return str(rev)
955
955
956 raise util.Abort(_("revision matching date not found"))
956 raise util.Abort(_("revision matching date not found"))
957
957
958 def walkchangerevs(repo, match, opts, prepare):
958 def walkchangerevs(repo, match, opts, prepare):
959 '''Iterate over files and the revs in which they changed.
959 '''Iterate over files and the revs in which they changed.
960
960
961 Callers most commonly need to iterate backwards over the history
961 Callers most commonly need to iterate backwards over the history
962 in which they are interested. Doing so has awful (quadratic-looking)
962 in which they are interested. Doing so has awful (quadratic-looking)
963 performance, so we use iterators in a "windowed" way.
963 performance, so we use iterators in a "windowed" way.
964
964
965 We walk a window of revisions in the desired order. Within the
965 We walk a window of revisions in the desired order. Within the
966 window, we first walk forwards to gather data, then in the desired
966 window, we first walk forwards to gather data, then in the desired
967 order (usually backwards) to display it.
967 order (usually backwards) to display it.
968
968
969 This function returns an iterator yielding contexts. Before
969 This function returns an iterator yielding contexts. Before
970 yielding each context, the iterator will first call the prepare
970 yielding each context, the iterator will first call the prepare
971 function on each context in the window in forward order.'''
971 function on each context in the window in forward order.'''
972
972
973 def increasing_windows(start, end, windowsize=8, sizelimit=512):
973 def increasing_windows(start, end, windowsize=8, sizelimit=512):
974 if start < end:
974 if start < end:
975 while start < end:
975 while start < end:
976 yield start, min(windowsize, end - start)
976 yield start, min(windowsize, end - start)
977 start += windowsize
977 start += windowsize
978 if windowsize < sizelimit:
978 if windowsize < sizelimit:
979 windowsize *= 2
979 windowsize *= 2
980 else:
980 else:
981 while start > end:
981 while start > end:
982 yield start, min(windowsize, start - end - 1)
982 yield start, min(windowsize, start - end - 1)
983 start -= windowsize
983 start -= windowsize
984 if windowsize < sizelimit:
984 if windowsize < sizelimit:
985 windowsize *= 2
985 windowsize *= 2
986
986
987 follow = opts.get('follow') or opts.get('follow_first')
987 follow = opts.get('follow') or opts.get('follow_first')
988
988
989 if not len(repo):
989 if not len(repo):
990 return []
990 return []
991
991
992 if follow:
992 if follow:
993 defrange = '%s:0' % repo['.'].rev()
993 defrange = '%s:0' % repo['.'].rev()
994 else:
994 else:
995 defrange = '-1:0'
995 defrange = '-1:0'
996 revs = scmutil.revrange(repo, opts['rev'] or [defrange])
996 revs = scmutil.revrange(repo, opts['rev'] or [defrange])
997 if not revs:
997 if not revs:
998 return []
998 return []
999 wanted = set()
999 wanted = set()
1000 slowpath = match.anypats() or (match.files() and opts.get('removed'))
1000 slowpath = match.anypats() or (match.files() and opts.get('removed'))
1001 fncache = {}
1001 fncache = {}
1002 change = repo.changectx
1002 change = repo.changectx
1003
1003
1004 # First step is to fill wanted, the set of revisions that we want to yield.
1004 # First step is to fill wanted, the set of revisions that we want to yield.
1005 # When it does not induce extra cost, we also fill fncache for revisions in
1005 # When it does not induce extra cost, we also fill fncache for revisions in
1006 # wanted: a cache of filenames that were changed (ctx.files()) and that
1006 # wanted: a cache of filenames that were changed (ctx.files()) and that
1007 # match the file filtering conditions.
1007 # match the file filtering conditions.
1008
1008
1009 if not slowpath and not match.files():
1009 if not slowpath and not match.files():
1010 # No files, no patterns. Display all revs.
1010 # No files, no patterns. Display all revs.
1011 wanted = set(revs)
1011 wanted = set(revs)
1012 copies = []
1012 copies = []
1013
1013
1014 if not slowpath and match.files():
1014 if not slowpath and match.files():
1015 # We only have to read through the filelog to find wanted revisions
1015 # We only have to read through the filelog to find wanted revisions
1016
1016
1017 minrev, maxrev = min(revs), max(revs)
1017 minrev, maxrev = min(revs), max(revs)
1018 def filerevgen(filelog, last):
1018 def filerevgen(filelog, last):
1019 """
1019 """
1020 Only files, no patterns. Check the history of each file.
1020 Only files, no patterns. Check the history of each file.
1021
1021
1022 Examines filelog entries within minrev, maxrev linkrev range
1022 Examines filelog entries within minrev, maxrev linkrev range
1023 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1023 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1024 tuples in backwards order
1024 tuples in backwards order
1025 """
1025 """
1026 cl_count = len(repo)
1026 cl_count = len(repo)
1027 revs = []
1027 revs = []
1028 for j in xrange(0, last + 1):
1028 for j in xrange(0, last + 1):
1029 linkrev = filelog.linkrev(j)
1029 linkrev = filelog.linkrev(j)
1030 if linkrev < minrev:
1030 if linkrev < minrev:
1031 continue
1031 continue
1032 # only yield rev for which we have the changelog, it can
1032 # only yield rev for which we have the changelog, it can
1033 # happen while doing "hg log" during a pull or commit
1033 # happen while doing "hg log" during a pull or commit
1034 if linkrev >= cl_count:
1034 if linkrev >= cl_count:
1035 break
1035 break
1036
1036
1037 parentlinkrevs = []
1037 parentlinkrevs = []
1038 for p in filelog.parentrevs(j):
1038 for p in filelog.parentrevs(j):
1039 if p != nullrev:
1039 if p != nullrev:
1040 parentlinkrevs.append(filelog.linkrev(p))
1040 parentlinkrevs.append(filelog.linkrev(p))
1041 n = filelog.node(j)
1041 n = filelog.node(j)
1042 revs.append((linkrev, parentlinkrevs,
1042 revs.append((linkrev, parentlinkrevs,
1043 follow and filelog.renamed(n)))
1043 follow and filelog.renamed(n)))
1044
1044
1045 return reversed(revs)
1045 return reversed(revs)
1046 def iterfiles():
1046 def iterfiles():
1047 pctx = repo['.']
1047 pctx = repo['.']
1048 for filename in match.files():
1048 for filename in match.files():
1049 if follow:
1049 if follow:
1050 if filename not in pctx:
1050 if filename not in pctx:
1051 raise util.Abort(_('cannot follow file not in parent '
1051 raise util.Abort(_('cannot follow file not in parent '
1052 'revision: "%s"') % filename)
1052 'revision: "%s"') % filename)
1053 yield filename, pctx[filename].filenode()
1053 yield filename, pctx[filename].filenode()
1054 else:
1054 else:
1055 yield filename, None
1055 yield filename, None
1056 for filename_node in copies:
1056 for filename_node in copies:
1057 yield filename_node
1057 yield filename_node
1058 for file_, node in iterfiles():
1058 for file_, node in iterfiles():
1059 filelog = repo.file(file_)
1059 filelog = repo.file(file_)
1060 if not len(filelog):
1060 if not len(filelog):
1061 if node is None:
1061 if node is None:
1062 # A zero count may be a directory or deleted file, so
1062 # A zero count may be a directory or deleted file, so
1063 # try to find matching entries on the slow path.
1063 # try to find matching entries on the slow path.
1064 if follow:
1064 if follow:
1065 raise util.Abort(
1065 raise util.Abort(
1066 _('cannot follow nonexistent file: "%s"') % file_)
1066 _('cannot follow nonexistent file: "%s"') % file_)
1067 slowpath = True
1067 slowpath = True
1068 break
1068 break
1069 else:
1069 else:
1070 continue
1070 continue
1071
1071
1072 if node is None:
1072 if node is None:
1073 last = len(filelog) - 1
1073 last = len(filelog) - 1
1074 else:
1074 else:
1075 last = filelog.rev(node)
1075 last = filelog.rev(node)
1076
1076
1077
1077
1078 # keep track of all ancestors of the file
1078 # keep track of all ancestors of the file
1079 ancestors = set([filelog.linkrev(last)])
1079 ancestors = set([filelog.linkrev(last)])
1080
1080
1081 # iterate from latest to oldest revision
1081 # iterate from latest to oldest revision
1082 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1082 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1083 if not follow:
1083 if not follow:
1084 if rev > maxrev:
1084 if rev > maxrev:
1085 continue
1085 continue
1086 else:
1086 else:
1087 # Note that last might not be the first interesting
1087 # Note that last might not be the first interesting
1088 # rev to us:
1088 # rev to us:
1089 # if the file has been changed after maxrev, we'll
1089 # if the file has been changed after maxrev, we'll
1090 # have linkrev(last) > maxrev, and we still need
1090 # have linkrev(last) > maxrev, and we still need
1091 # to explore the file graph
1091 # to explore the file graph
1092 if rev not in ancestors:
1092 if rev not in ancestors:
1093 continue
1093 continue
1094 # XXX insert 1327 fix here
1094 # XXX insert 1327 fix here
1095 if flparentlinkrevs:
1095 if flparentlinkrevs:
1096 ancestors.update(flparentlinkrevs)
1096 ancestors.update(flparentlinkrevs)
1097
1097
1098 fncache.setdefault(rev, []).append(file_)
1098 fncache.setdefault(rev, []).append(file_)
1099 wanted.add(rev)
1099 wanted.add(rev)
1100 if copied:
1100 if copied:
1101 copies.append(copied)
1101 copies.append(copied)
1102 if slowpath:
1102 if slowpath:
1103 # We have to read the changelog to match filenames against
1103 # We have to read the changelog to match filenames against
1104 # changed files
1104 # changed files
1105
1105
1106 if follow:
1106 if follow:
1107 raise util.Abort(_('can only follow copies/renames for explicit '
1107 raise util.Abort(_('can only follow copies/renames for explicit '
1108 'filenames'))
1108 'filenames'))
1109
1109
1110 # The slow path checks files modified in every changeset.
1110 # The slow path checks files modified in every changeset.
1111 for i in sorted(revs):
1111 for i in sorted(revs):
1112 ctx = change(i)
1112 ctx = change(i)
1113 matches = filter(match, ctx.files())
1113 matches = filter(match, ctx.files())
1114 if matches:
1114 if matches:
1115 fncache[i] = matches
1115 fncache[i] = matches
1116 wanted.add(i)
1116 wanted.add(i)
1117
1117
1118 class followfilter(object):
1118 class followfilter(object):
1119 def __init__(self, onlyfirst=False):
1119 def __init__(self, onlyfirst=False):
1120 self.startrev = nullrev
1120 self.startrev = nullrev
1121 self.roots = set()
1121 self.roots = set()
1122 self.onlyfirst = onlyfirst
1122 self.onlyfirst = onlyfirst
1123
1123
1124 def match(self, rev):
1124 def match(self, rev):
1125 def realparents(rev):
1125 def realparents(rev):
1126 if self.onlyfirst:
1126 if self.onlyfirst:
1127 return repo.changelog.parentrevs(rev)[0:1]
1127 return repo.changelog.parentrevs(rev)[0:1]
1128 else:
1128 else:
1129 return filter(lambda x: x != nullrev,
1129 return filter(lambda x: x != nullrev,
1130 repo.changelog.parentrevs(rev))
1130 repo.changelog.parentrevs(rev))
1131
1131
1132 if self.startrev == nullrev:
1132 if self.startrev == nullrev:
1133 self.startrev = rev
1133 self.startrev = rev
1134 return True
1134 return True
1135
1135
1136 if rev > self.startrev:
1136 if rev > self.startrev:
1137 # forward: all descendants
1137 # forward: all descendants
1138 if not self.roots:
1138 if not self.roots:
1139 self.roots.add(self.startrev)
1139 self.roots.add(self.startrev)
1140 for parent in realparents(rev):
1140 for parent in realparents(rev):
1141 if parent in self.roots:
1141 if parent in self.roots:
1142 self.roots.add(rev)
1142 self.roots.add(rev)
1143 return True
1143 return True
1144 else:
1144 else:
1145 # backwards: all parents
1145 # backwards: all parents
1146 if not self.roots:
1146 if not self.roots:
1147 self.roots.update(realparents(self.startrev))
1147 self.roots.update(realparents(self.startrev))
1148 if rev in self.roots:
1148 if rev in self.roots:
1149 self.roots.remove(rev)
1149 self.roots.remove(rev)
1150 self.roots.update(realparents(rev))
1150 self.roots.update(realparents(rev))
1151 return True
1151 return True
1152
1152
1153 return False
1153 return False
1154
1154
1155 # it might be worthwhile to do this in the iterator if the rev range
1155 # it might be worthwhile to do this in the iterator if the rev range
1156 # is descending and the prune args are all within that range
1156 # is descending and the prune args are all within that range
1157 for rev in opts.get('prune', ()):
1157 for rev in opts.get('prune', ()):
1158 rev = repo[rev].rev()
1158 rev = repo[rev].rev()
1159 ff = followfilter()
1159 ff = followfilter()
1160 stop = min(revs[0], revs[-1])
1160 stop = min(revs[0], revs[-1])
1161 for x in xrange(rev, stop - 1, -1):
1161 for x in xrange(rev, stop - 1, -1):
1162 if ff.match(x):
1162 if ff.match(x):
1163 wanted.discard(x)
1163 wanted.discard(x)
1164
1164
1165 # Now that wanted is correctly initialized, we can iterate over the
1165 # Now that wanted is correctly initialized, we can iterate over the
1166 # revision range, yielding only revisions in wanted.
1166 # revision range, yielding only revisions in wanted.
1167 def iterate():
1167 def iterate():
1168 if follow and not match.files():
1168 if follow and not match.files():
1169 ff = followfilter(onlyfirst=opts.get('follow_first'))
1169 ff = followfilter(onlyfirst=opts.get('follow_first'))
1170 def want(rev):
1170 def want(rev):
1171 return ff.match(rev) and rev in wanted
1171 return ff.match(rev) and rev in wanted
1172 else:
1172 else:
1173 def want(rev):
1173 def want(rev):
1174 return rev in wanted
1174 return rev in wanted
1175
1175
1176 for i, window in increasing_windows(0, len(revs)):
1176 for i, window in increasing_windows(0, len(revs)):
1177 nrevs = [rev for rev in revs[i:i + window] if want(rev)]
1177 nrevs = [rev for rev in revs[i:i + window] if want(rev)]
1178 for rev in sorted(nrevs):
1178 for rev in sorted(nrevs):
1179 fns = fncache.get(rev)
1179 fns = fncache.get(rev)
1180 ctx = change(rev)
1180 ctx = change(rev)
1181 if not fns:
1181 if not fns:
1182 def fns_generator():
1182 def fns_generator():
1183 for f in ctx.files():
1183 for f in ctx.files():
1184 if match(f):
1184 if match(f):
1185 yield f
1185 yield f
1186 fns = fns_generator()
1186 fns = fns_generator()
1187 prepare(ctx, fns)
1187 prepare(ctx, fns)
1188 for rev in nrevs:
1188 for rev in nrevs:
1189 yield change(rev)
1189 yield change(rev)
1190 return iterate()
1190 return iterate()
1191
1191
1192 def add(ui, repo, match, dryrun, listsubrepos, prefix, explicitonly):
1192 def add(ui, repo, match, dryrun, listsubrepos, prefix, explicitonly):
1193 join = lambda f: os.path.join(prefix, f)
1193 join = lambda f: os.path.join(prefix, f)
1194 bad = []
1194 bad = []
1195 oldbad = match.bad
1195 oldbad = match.bad
1196 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
1196 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
1197 names = []
1197 names = []
1198 wctx = repo[None]
1198 wctx = repo[None]
1199 cca = None
1199 cca = None
1200 abort, warn = scmutil.checkportabilityalert(ui)
1200 abort, warn = scmutil.checkportabilityalert(ui)
1201 if abort or warn:
1201 if abort or warn:
1202 cca = scmutil.casecollisionauditor(ui, abort, wctx)
1202 cca = scmutil.casecollisionauditor(ui, abort, wctx)
1203 for f in repo.walk(match):
1203 for f in repo.walk(match):
1204 exact = match.exact(f)
1204 exact = match.exact(f)
1205 if exact or not explicitonly and f not in repo.dirstate:
1205 if exact or not explicitonly and f not in repo.dirstate:
1206 if cca:
1206 if cca:
1207 cca(f)
1207 cca(f)
1208 names.append(f)
1208 names.append(f)
1209 if ui.verbose or not exact:
1209 if ui.verbose or not exact:
1210 ui.status(_('adding %s\n') % match.rel(join(f)))
1210 ui.status(_('adding %s\n') % match.rel(join(f)))
1211
1211
1212 for subpath in wctx.substate:
1212 for subpath in wctx.substate:
1213 sub = wctx.sub(subpath)
1213 sub = wctx.sub(subpath)
1214 try:
1214 try:
1215 submatch = matchmod.narrowmatcher(subpath, match)
1215 submatch = matchmod.narrowmatcher(subpath, match)
1216 if listsubrepos:
1216 if listsubrepos:
1217 bad.extend(sub.add(ui, submatch, dryrun, listsubrepos, prefix,
1217 bad.extend(sub.add(ui, submatch, dryrun, listsubrepos, prefix,
1218 False))
1218 False))
1219 else:
1219 else:
1220 bad.extend(sub.add(ui, submatch, dryrun, listsubrepos, prefix,
1220 bad.extend(sub.add(ui, submatch, dryrun, listsubrepos, prefix,
1221 True))
1221 True))
1222 except error.LookupError:
1222 except error.LookupError:
1223 ui.status(_("skipping missing subrepository: %s\n")
1223 ui.status(_("skipping missing subrepository: %s\n")
1224 % join(subpath))
1224 % join(subpath))
1225
1225
1226 if not dryrun:
1226 if not dryrun:
1227 rejected = wctx.add(names, prefix)
1227 rejected = wctx.add(names, prefix)
1228 bad.extend(f for f in rejected if f in match.files())
1228 bad.extend(f for f in rejected if f in match.files())
1229 return bad
1229 return bad
1230
1230
1231 def forget(ui, repo, match, prefix, explicitonly):
1231 def forget(ui, repo, match, prefix, explicitonly):
1232 join = lambda f: os.path.join(prefix, f)
1232 join = lambda f: os.path.join(prefix, f)
1233 bad = []
1233 bad = []
1234 oldbad = match.bad
1234 oldbad = match.bad
1235 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
1235 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
1236 wctx = repo[None]
1236 wctx = repo[None]
1237 forgot = []
1237 forgot = []
1238 s = repo.status(match=match, clean=True)
1238 s = repo.status(match=match, clean=True)
1239 forget = sorted(s[0] + s[1] + s[3] + s[6])
1239 forget = sorted(s[0] + s[1] + s[3] + s[6])
1240 if explicitonly:
1240 if explicitonly:
1241 forget = [f for f in forget if match.exact(f)]
1241 forget = [f for f in forget if match.exact(f)]
1242
1242
1243 for subpath in wctx.substate:
1243 for subpath in wctx.substate:
1244 sub = wctx.sub(subpath)
1244 sub = wctx.sub(subpath)
1245 try:
1245 try:
1246 submatch = matchmod.narrowmatcher(subpath, match)
1246 submatch = matchmod.narrowmatcher(subpath, match)
1247 subbad, subforgot = sub.forget(ui, submatch, prefix)
1247 subbad, subforgot = sub.forget(ui, submatch, prefix)
1248 bad.extend([subpath + '/' + f for f in subbad])
1248 bad.extend([subpath + '/' + f for f in subbad])
1249 forgot.extend([subpath + '/' + f for f in subforgot])
1249 forgot.extend([subpath + '/' + f for f in subforgot])
1250 except error.LookupError:
1250 except error.LookupError:
1251 ui.status(_("skipping missing subrepository: %s\n")
1251 ui.status(_("skipping missing subrepository: %s\n")
1252 % join(subpath))
1252 % join(subpath))
1253
1253
1254 if not explicitonly:
1254 if not explicitonly:
1255 for f in match.files():
1255 for f in match.files():
1256 if f not in repo.dirstate and not os.path.isdir(match.rel(join(f))):
1256 if f not in repo.dirstate and not os.path.isdir(match.rel(join(f))):
1257 if f not in forgot:
1257 if f not in forgot:
1258 if os.path.exists(match.rel(join(f))):
1258 if os.path.exists(match.rel(join(f))):
1259 ui.warn(_('not removing %s: '
1259 ui.warn(_('not removing %s: '
1260 'file is already untracked\n')
1260 'file is already untracked\n')
1261 % match.rel(join(f)))
1261 % match.rel(join(f)))
1262 bad.append(f)
1262 bad.append(f)
1263
1263
1264 for f in forget:
1264 for f in forget:
1265 if ui.verbose or not match.exact(f):
1265 if ui.verbose or not match.exact(f):
1266 ui.status(_('removing %s\n') % match.rel(join(f)))
1266 ui.status(_('removing %s\n') % match.rel(join(f)))
1267
1267
1268 rejected = wctx.forget(forget, prefix)
1268 rejected = wctx.forget(forget, prefix)
1269 bad.extend(f for f in rejected if f in match.files())
1269 bad.extend(f for f in rejected if f in match.files())
1270 forgot.extend(forget)
1270 forgot.extend(forget)
1271 return bad, forgot
1271 return bad, forgot
1272
1272
1273 def duplicatecopies(repo, rev, p1):
1273 def duplicatecopies(repo, rev, p1):
1274 "Reproduce copies found in the source revision in the dirstate for grafts"
1274 "Reproduce copies found in the source revision in the dirstate for grafts"
1275 for dst, src in copies.pathcopies(repo[p1], repo[rev]).iteritems():
1275 for dst, src in copies.pathcopies(repo[p1], repo[rev]).iteritems():
1276 repo.dirstate.copy(src, dst)
1276 repo.dirstate.copy(src, dst)
1277
1277
1278 def commit(ui, repo, commitfunc, pats, opts):
1278 def commit(ui, repo, commitfunc, pats, opts):
1279 '''commit the specified files or all outstanding changes'''
1279 '''commit the specified files or all outstanding changes'''
1280 date = opts.get('date')
1280 date = opts.get('date')
1281 if date:
1281 if date:
1282 opts['date'] = util.parsedate(date)
1282 opts['date'] = util.parsedate(date)
1283 message = logmessage(ui, opts)
1283 message = logmessage(ui, opts)
1284
1284
1285 # extract addremove carefully -- this function can be called from a command
1285 # extract addremove carefully -- this function can be called from a command
1286 # that doesn't support addremove
1286 # that doesn't support addremove
1287 if opts.get('addremove'):
1287 if opts.get('addremove'):
1288 scmutil.addremove(repo, pats, opts)
1288 scmutil.addremove(repo, pats, opts)
1289
1289
1290 return commitfunc(ui, repo, message,
1290 return commitfunc(ui, repo, message,
1291 scmutil.match(repo[None], pats, opts), opts)
1291 scmutil.match(repo[None], pats, opts), opts)
1292
1292
1293 def amend(ui, repo, commitfunc, old, extra, pats, opts):
1293 def amend(ui, repo, commitfunc, old, extra, pats, opts):
1294 ui.note(_('amending changeset %s\n') % old)
1294 ui.note(_('amending changeset %s\n') % old)
1295 base = old.p1()
1295 base = old.p1()
1296
1296
1297 wlock = repo.wlock()
1297 wlock = repo.wlock()
1298 try:
1298 try:
1299 # First, do a regular commit to record all changes in the working
1299 # First, do a regular commit to record all changes in the working
1300 # directory (if there are any)
1300 # directory (if there are any)
1301 node = commit(ui, repo, commitfunc, pats, opts)
1301 node = commit(ui, repo, commitfunc, pats, opts)
1302 ctx = repo[node]
1302 ctx = repo[node]
1303
1303
1304 # Participating changesets:
1304 # Participating changesets:
1305 #
1305 #
1306 # node/ctx o - new (intermediate) commit that contains changes from
1306 # node/ctx o - new (intermediate) commit that contains changes from
1307 # | working dir to go into amending commit (or a workingctx
1307 # | working dir to go into amending commit (or a workingctx
1308 # | if there were no changes)
1308 # | if there were no changes)
1309 # |
1309 # |
1310 # old o - changeset to amend
1310 # old o - changeset to amend
1311 # |
1311 # |
1312 # base o - parent of amending changeset
1312 # base o - parent of amending changeset
1313
1313
1314 # Update extra dict from amended commit (e.g. to preserve graft source)
1314 # Update extra dict from amended commit (e.g. to preserve graft source)
1315 extra.update(old.extra())
1315 extra.update(old.extra())
1316
1316
1317 # Also update it from the intermediate commit or from the wctx
1317 # Also update it from the intermediate commit or from the wctx
1318 extra.update(ctx.extra())
1318 extra.update(ctx.extra())
1319
1319
1320 files = set(old.files())
1320 files = set(old.files())
1321
1321
1322 # Second, we use either the commit we just did, or if there were no
1322 # Second, we use either the commit we just did, or if there were no
1323 # changes the parent of the working directory as the version of the
1323 # changes the parent of the working directory as the version of the
1324 # files in the final amend commit
1324 # files in the final amend commit
1325 if node:
1325 if node:
1326 ui.note(_('copying changeset %s to %s\n') % (ctx, base))
1326 ui.note(_('copying changeset %s to %s\n') % (ctx, base))
1327
1327
1328 user = ctx.user()
1328 user = ctx.user()
1329 date = ctx.date()
1329 date = ctx.date()
1330 message = ctx.description()
1330 message = ctx.description()
1331 # Recompute copies (avoid recording a -> b -> a)
1331 # Recompute copies (avoid recording a -> b -> a)
1332 copied = copies.pathcopies(base, ctx)
1332 copied = copies.pathcopies(base, ctx)
1333
1333
1334 # Prune files which were reverted by the updates: if old introduced
1334 # Prune files which were reverted by the updates: if old introduced
1335 # file X and our intermediate commit, node, renamed that file, then
1335 # file X and our intermediate commit, node, renamed that file, then
1336 # those two files are the same and we can discard X from our list
1336 # those two files are the same and we can discard X from our list
1337 # of files. Likewise if X was deleted, it's no longer relevant
1337 # of files. Likewise if X was deleted, it's no longer relevant
1338 files.update(ctx.files())
1338 files.update(ctx.files())
1339
1339
1340 def samefile(f):
1340 def samefile(f):
1341 if f in ctx.manifest():
1341 if f in ctx.manifest():
1342 a = ctx.filectx(f)
1342 a = ctx.filectx(f)
1343 if f in base.manifest():
1343 if f in base.manifest():
1344 b = base.filectx(f)
1344 b = base.filectx(f)
1345 return (a.data() == b.data()
1345 return (a.data() == b.data()
1346 and a.flags() == b.flags())
1346 and a.flags() == b.flags())
1347 else:
1347 else:
1348 return False
1348 return False
1349 else:
1349 else:
1350 return f not in base.manifest()
1350 return f not in base.manifest()
1351 files = [f for f in files if not samefile(f)]
1351 files = [f for f in files if not samefile(f)]
1352
1352
1353 def filectxfn(repo, ctx_, path):
1353 def filectxfn(repo, ctx_, path):
1354 try:
1354 try:
1355 fctx = ctx[path]
1355 fctx = ctx[path]
1356 flags = fctx.flags()
1356 flags = fctx.flags()
1357 mctx = context.memfilectx(fctx.path(), fctx.data(),
1357 mctx = context.memfilectx(fctx.path(), fctx.data(),
1358 islink='l' in flags,
1358 islink='l' in flags,
1359 isexec='x' in flags,
1359 isexec='x' in flags,
1360 copied=copied.get(path))
1360 copied=copied.get(path))
1361 return mctx
1361 return mctx
1362 except KeyError:
1362 except KeyError:
1363 raise IOError()
1363 raise IOError
1364 else:
1364 else:
1365 ui.note(_('copying changeset %s to %s\n') % (old, base))
1365 ui.note(_('copying changeset %s to %s\n') % (old, base))
1366
1366
1367 # Use version of files as in the old cset
1367 # Use version of files as in the old cset
1368 def filectxfn(repo, ctx_, path):
1368 def filectxfn(repo, ctx_, path):
1369 try:
1369 try:
1370 return old.filectx(path)
1370 return old.filectx(path)
1371 except KeyError:
1371 except KeyError:
1372 raise IOError()
1372 raise IOError
1373
1373
1374 # See if we got a message from -m or -l, if not, open the editor
1374 # See if we got a message from -m or -l, if not, open the editor
1375 # with the message of the changeset to amend
1375 # with the message of the changeset to amend
1376 user = opts.get('user') or old.user()
1376 user = opts.get('user') or old.user()
1377 date = opts.get('date') or old.date()
1377 date = opts.get('date') or old.date()
1378 message = logmessage(ui, opts)
1378 message = logmessage(ui, opts)
1379 if not message:
1379 if not message:
1380 cctx = context.workingctx(repo, old.description(), user, date,
1380 cctx = context.workingctx(repo, old.description(), user, date,
1381 extra,
1381 extra,
1382 repo.status(base.node(), old.node()))
1382 repo.status(base.node(), old.node()))
1383 message = commitforceeditor(repo, cctx, [])
1383 message = commitforceeditor(repo, cctx, [])
1384
1384
1385 new = context.memctx(repo,
1385 new = context.memctx(repo,
1386 parents=[base.node(), nullid],
1386 parents=[base.node(), nullid],
1387 text=message,
1387 text=message,
1388 files=files,
1388 files=files,
1389 filectxfn=filectxfn,
1389 filectxfn=filectxfn,
1390 user=user,
1390 user=user,
1391 date=date,
1391 date=date,
1392 extra=extra)
1392 extra=extra)
1393 newid = repo.commitctx(new)
1393 newid = repo.commitctx(new)
1394 if newid != old.node():
1394 if newid != old.node():
1395 # Reroute the working copy parent to the new changeset
1395 # Reroute the working copy parent to the new changeset
1396 repo.setparents(newid, nullid)
1396 repo.setparents(newid, nullid)
1397
1397
1398 # Move bookmarks from old parent to amend commit
1398 # Move bookmarks from old parent to amend commit
1399 bms = repo.nodebookmarks(old.node())
1399 bms = repo.nodebookmarks(old.node())
1400 if bms:
1400 if bms:
1401 for bm in bms:
1401 for bm in bms:
1402 repo._bookmarks[bm] = newid
1402 repo._bookmarks[bm] = newid
1403 bookmarks.write(repo)
1403 bookmarks.write(repo)
1404
1404
1405 # Strip the intermediate commit (if there was one) and the amended
1405 # Strip the intermediate commit (if there was one) and the amended
1406 # commit
1406 # commit
1407 lock = repo.lock()
1407 lock = repo.lock()
1408 try:
1408 try:
1409 if node:
1409 if node:
1410 ui.note(_('stripping intermediate changeset %s\n') % ctx)
1410 ui.note(_('stripping intermediate changeset %s\n') % ctx)
1411 ui.note(_('stripping amended changeset %s\n') % old)
1411 ui.note(_('stripping amended changeset %s\n') % old)
1412 repair.strip(ui, repo, old.node(), topic='amend-backup')
1412 repair.strip(ui, repo, old.node(), topic='amend-backup')
1413 finally:
1413 finally:
1414 lock.release()
1414 lock.release()
1415 finally:
1415 finally:
1416 wlock.release()
1416 wlock.release()
1417 return newid
1417 return newid
1418
1418
1419 def commiteditor(repo, ctx, subs):
1419 def commiteditor(repo, ctx, subs):
1420 if ctx.description():
1420 if ctx.description():
1421 return ctx.description()
1421 return ctx.description()
1422 return commitforceeditor(repo, ctx, subs)
1422 return commitforceeditor(repo, ctx, subs)
1423
1423
1424 def commitforceeditor(repo, ctx, subs):
1424 def commitforceeditor(repo, ctx, subs):
1425 edittext = []
1425 edittext = []
1426 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
1426 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
1427 if ctx.description():
1427 if ctx.description():
1428 edittext.append(ctx.description())
1428 edittext.append(ctx.description())
1429 edittext.append("")
1429 edittext.append("")
1430 edittext.append("") # Empty line between message and comments.
1430 edittext.append("") # Empty line between message and comments.
1431 edittext.append(_("HG: Enter commit message."
1431 edittext.append(_("HG: Enter commit message."
1432 " Lines beginning with 'HG:' are removed."))
1432 " Lines beginning with 'HG:' are removed."))
1433 edittext.append(_("HG: Leave message empty to abort commit."))
1433 edittext.append(_("HG: Leave message empty to abort commit."))
1434 edittext.append("HG: --")
1434 edittext.append("HG: --")
1435 edittext.append(_("HG: user: %s") % ctx.user())
1435 edittext.append(_("HG: user: %s") % ctx.user())
1436 if ctx.p2():
1436 if ctx.p2():
1437 edittext.append(_("HG: branch merge"))
1437 edittext.append(_("HG: branch merge"))
1438 if ctx.branch():
1438 if ctx.branch():
1439 edittext.append(_("HG: branch '%s'") % ctx.branch())
1439 edittext.append(_("HG: branch '%s'") % ctx.branch())
1440 edittext.extend([_("HG: subrepo %s") % s for s in subs])
1440 edittext.extend([_("HG: subrepo %s") % s for s in subs])
1441 edittext.extend([_("HG: added %s") % f for f in added])
1441 edittext.extend([_("HG: added %s") % f for f in added])
1442 edittext.extend([_("HG: changed %s") % f for f in modified])
1442 edittext.extend([_("HG: changed %s") % f for f in modified])
1443 edittext.extend([_("HG: removed %s") % f for f in removed])
1443 edittext.extend([_("HG: removed %s") % f for f in removed])
1444 if not added and not modified and not removed:
1444 if not added and not modified and not removed:
1445 edittext.append(_("HG: no files changed"))
1445 edittext.append(_("HG: no files changed"))
1446 edittext.append("")
1446 edittext.append("")
1447 # run editor in the repository root
1447 # run editor in the repository root
1448 olddir = os.getcwd()
1448 olddir = os.getcwd()
1449 os.chdir(repo.root)
1449 os.chdir(repo.root)
1450 text = repo.ui.edit("\n".join(edittext), ctx.user())
1450 text = repo.ui.edit("\n".join(edittext), ctx.user())
1451 text = re.sub("(?m)^HG:.*(\n|$)", "", text)
1451 text = re.sub("(?m)^HG:.*(\n|$)", "", text)
1452 os.chdir(olddir)
1452 os.chdir(olddir)
1453
1453
1454 if not text.strip():
1454 if not text.strip():
1455 raise util.Abort(_("empty commit message"))
1455 raise util.Abort(_("empty commit message"))
1456
1456
1457 return text
1457 return text
1458
1458
1459 def revert(ui, repo, ctx, parents, *pats, **opts):
1459 def revert(ui, repo, ctx, parents, *pats, **opts):
1460 parent, p2 = parents
1460 parent, p2 = parents
1461 node = ctx.node()
1461 node = ctx.node()
1462
1462
1463 mf = ctx.manifest()
1463 mf = ctx.manifest()
1464 if node == parent:
1464 if node == parent:
1465 pmf = mf
1465 pmf = mf
1466 else:
1466 else:
1467 pmf = None
1467 pmf = None
1468
1468
1469 # need all matching names in dirstate and manifest of target rev,
1469 # need all matching names in dirstate and manifest of target rev,
1470 # so have to walk both. do not print errors if files exist in one
1470 # so have to walk both. do not print errors if files exist in one
1471 # but not other.
1471 # but not other.
1472
1472
1473 names = {}
1473 names = {}
1474
1474
1475 wlock = repo.wlock()
1475 wlock = repo.wlock()
1476 try:
1476 try:
1477 # walk dirstate.
1477 # walk dirstate.
1478
1478
1479 m = scmutil.match(repo[None], pats, opts)
1479 m = scmutil.match(repo[None], pats, opts)
1480 m.bad = lambda x, y: False
1480 m.bad = lambda x, y: False
1481 for abs in repo.walk(m):
1481 for abs in repo.walk(m):
1482 names[abs] = m.rel(abs), m.exact(abs)
1482 names[abs] = m.rel(abs), m.exact(abs)
1483
1483
1484 # walk target manifest.
1484 # walk target manifest.
1485
1485
1486 def badfn(path, msg):
1486 def badfn(path, msg):
1487 if path in names:
1487 if path in names:
1488 return
1488 return
1489 if path in ctx.substate:
1489 if path in ctx.substate:
1490 return
1490 return
1491 path_ = path + '/'
1491 path_ = path + '/'
1492 for f in names:
1492 for f in names:
1493 if f.startswith(path_):
1493 if f.startswith(path_):
1494 return
1494 return
1495 ui.warn("%s: %s\n" % (m.rel(path), msg))
1495 ui.warn("%s: %s\n" % (m.rel(path), msg))
1496
1496
1497 m = scmutil.match(ctx, pats, opts)
1497 m = scmutil.match(ctx, pats, opts)
1498 m.bad = badfn
1498 m.bad = badfn
1499 for abs in ctx.walk(m):
1499 for abs in ctx.walk(m):
1500 if abs not in names:
1500 if abs not in names:
1501 names[abs] = m.rel(abs), m.exact(abs)
1501 names[abs] = m.rel(abs), m.exact(abs)
1502
1502
1503 # get the list of subrepos that must be reverted
1503 # get the list of subrepos that must be reverted
1504 targetsubs = [s for s in ctx.substate if m(s)]
1504 targetsubs = [s for s in ctx.substate if m(s)]
1505 m = scmutil.matchfiles(repo, names)
1505 m = scmutil.matchfiles(repo, names)
1506 changes = repo.status(match=m)[:4]
1506 changes = repo.status(match=m)[:4]
1507 modified, added, removed, deleted = map(set, changes)
1507 modified, added, removed, deleted = map(set, changes)
1508
1508
1509 # if f is a rename, also revert the source
1509 # if f is a rename, also revert the source
1510 cwd = repo.getcwd()
1510 cwd = repo.getcwd()
1511 for f in added:
1511 for f in added:
1512 src = repo.dirstate.copied(f)
1512 src = repo.dirstate.copied(f)
1513 if src and src not in names and repo.dirstate[src] == 'r':
1513 if src and src not in names and repo.dirstate[src] == 'r':
1514 removed.add(src)
1514 removed.add(src)
1515 names[src] = (repo.pathto(src, cwd), True)
1515 names[src] = (repo.pathto(src, cwd), True)
1516
1516
1517 def removeforget(abs):
1517 def removeforget(abs):
1518 if repo.dirstate[abs] == 'a':
1518 if repo.dirstate[abs] == 'a':
1519 return _('forgetting %s\n')
1519 return _('forgetting %s\n')
1520 return _('removing %s\n')
1520 return _('removing %s\n')
1521
1521
1522 revert = ([], _('reverting %s\n'))
1522 revert = ([], _('reverting %s\n'))
1523 add = ([], _('adding %s\n'))
1523 add = ([], _('adding %s\n'))
1524 remove = ([], removeforget)
1524 remove = ([], removeforget)
1525 undelete = ([], _('undeleting %s\n'))
1525 undelete = ([], _('undeleting %s\n'))
1526
1526
1527 disptable = (
1527 disptable = (
1528 # dispatch table:
1528 # dispatch table:
1529 # file state
1529 # file state
1530 # action if in target manifest
1530 # action if in target manifest
1531 # action if not in target manifest
1531 # action if not in target manifest
1532 # make backup if in target manifest
1532 # make backup if in target manifest
1533 # make backup if not in target manifest
1533 # make backup if not in target manifest
1534 (modified, revert, remove, True, True),
1534 (modified, revert, remove, True, True),
1535 (added, revert, remove, True, False),
1535 (added, revert, remove, True, False),
1536 (removed, undelete, None, False, False),
1536 (removed, undelete, None, False, False),
1537 (deleted, revert, remove, False, False),
1537 (deleted, revert, remove, False, False),
1538 )
1538 )
1539
1539
1540 for abs, (rel, exact) in sorted(names.items()):
1540 for abs, (rel, exact) in sorted(names.items()):
1541 mfentry = mf.get(abs)
1541 mfentry = mf.get(abs)
1542 target = repo.wjoin(abs)
1542 target = repo.wjoin(abs)
1543 def handle(xlist, dobackup):
1543 def handle(xlist, dobackup):
1544 xlist[0].append(abs)
1544 xlist[0].append(abs)
1545 if (dobackup and not opts.get('no_backup') and
1545 if (dobackup and not opts.get('no_backup') and
1546 os.path.lexists(target)):
1546 os.path.lexists(target)):
1547 bakname = "%s.orig" % rel
1547 bakname = "%s.orig" % rel
1548 ui.note(_('saving current version of %s as %s\n') %
1548 ui.note(_('saving current version of %s as %s\n') %
1549 (rel, bakname))
1549 (rel, bakname))
1550 if not opts.get('dry_run'):
1550 if not opts.get('dry_run'):
1551 util.rename(target, bakname)
1551 util.rename(target, bakname)
1552 if ui.verbose or not exact:
1552 if ui.verbose or not exact:
1553 msg = xlist[1]
1553 msg = xlist[1]
1554 if not isinstance(msg, basestring):
1554 if not isinstance(msg, basestring):
1555 msg = msg(abs)
1555 msg = msg(abs)
1556 ui.status(msg % rel)
1556 ui.status(msg % rel)
1557 for table, hitlist, misslist, backuphit, backupmiss in disptable:
1557 for table, hitlist, misslist, backuphit, backupmiss in disptable:
1558 if abs not in table:
1558 if abs not in table:
1559 continue
1559 continue
1560 # file has changed in dirstate
1560 # file has changed in dirstate
1561 if mfentry:
1561 if mfentry:
1562 handle(hitlist, backuphit)
1562 handle(hitlist, backuphit)
1563 elif misslist is not None:
1563 elif misslist is not None:
1564 handle(misslist, backupmiss)
1564 handle(misslist, backupmiss)
1565 break
1565 break
1566 else:
1566 else:
1567 if abs not in repo.dirstate:
1567 if abs not in repo.dirstate:
1568 if mfentry:
1568 if mfentry:
1569 handle(add, True)
1569 handle(add, True)
1570 elif exact:
1570 elif exact:
1571 ui.warn(_('file not managed: %s\n') % rel)
1571 ui.warn(_('file not managed: %s\n') % rel)
1572 continue
1572 continue
1573 # file has not changed in dirstate
1573 # file has not changed in dirstate
1574 if node == parent:
1574 if node == parent:
1575 if exact:
1575 if exact:
1576 ui.warn(_('no changes needed to %s\n') % rel)
1576 ui.warn(_('no changes needed to %s\n') % rel)
1577 continue
1577 continue
1578 if pmf is None:
1578 if pmf is None:
1579 # only need parent manifest in this unlikely case,
1579 # only need parent manifest in this unlikely case,
1580 # so do not read by default
1580 # so do not read by default
1581 pmf = repo[parent].manifest()
1581 pmf = repo[parent].manifest()
1582 if abs in pmf and mfentry:
1582 if abs in pmf and mfentry:
1583 # if version of file is same in parent and target
1583 # if version of file is same in parent and target
1584 # manifests, do nothing
1584 # manifests, do nothing
1585 if (pmf[abs] != mfentry or
1585 if (pmf[abs] != mfentry or
1586 pmf.flags(abs) != mf.flags(abs)):
1586 pmf.flags(abs) != mf.flags(abs)):
1587 handle(revert, False)
1587 handle(revert, False)
1588 else:
1588 else:
1589 handle(remove, False)
1589 handle(remove, False)
1590
1590
1591 if not opts.get('dry_run'):
1591 if not opts.get('dry_run'):
1592 def checkout(f):
1592 def checkout(f):
1593 fc = ctx[f]
1593 fc = ctx[f]
1594 repo.wwrite(f, fc.data(), fc.flags())
1594 repo.wwrite(f, fc.data(), fc.flags())
1595
1595
1596 audit_path = scmutil.pathauditor(repo.root)
1596 audit_path = scmutil.pathauditor(repo.root)
1597 for f in remove[0]:
1597 for f in remove[0]:
1598 if repo.dirstate[f] == 'a':
1598 if repo.dirstate[f] == 'a':
1599 repo.dirstate.drop(f)
1599 repo.dirstate.drop(f)
1600 continue
1600 continue
1601 audit_path(f)
1601 audit_path(f)
1602 try:
1602 try:
1603 util.unlinkpath(repo.wjoin(f))
1603 util.unlinkpath(repo.wjoin(f))
1604 except OSError:
1604 except OSError:
1605 pass
1605 pass
1606 repo.dirstate.remove(f)
1606 repo.dirstate.remove(f)
1607
1607
1608 normal = None
1608 normal = None
1609 if node == parent:
1609 if node == parent:
1610 # We're reverting to our parent. If possible, we'd like status
1610 # We're reverting to our parent. If possible, we'd like status
1611 # to report the file as clean. We have to use normallookup for
1611 # to report the file as clean. We have to use normallookup for
1612 # merges to avoid losing information about merged/dirty files.
1612 # merges to avoid losing information about merged/dirty files.
1613 if p2 != nullid:
1613 if p2 != nullid:
1614 normal = repo.dirstate.normallookup
1614 normal = repo.dirstate.normallookup
1615 else:
1615 else:
1616 normal = repo.dirstate.normal
1616 normal = repo.dirstate.normal
1617 for f in revert[0]:
1617 for f in revert[0]:
1618 checkout(f)
1618 checkout(f)
1619 if normal:
1619 if normal:
1620 normal(f)
1620 normal(f)
1621
1621
1622 for f in add[0]:
1622 for f in add[0]:
1623 checkout(f)
1623 checkout(f)
1624 repo.dirstate.add(f)
1624 repo.dirstate.add(f)
1625
1625
1626 normal = repo.dirstate.normallookup
1626 normal = repo.dirstate.normallookup
1627 if node == parent and p2 == nullid:
1627 if node == parent and p2 == nullid:
1628 normal = repo.dirstate.normal
1628 normal = repo.dirstate.normal
1629 for f in undelete[0]:
1629 for f in undelete[0]:
1630 checkout(f)
1630 checkout(f)
1631 normal(f)
1631 normal(f)
1632
1632
1633 if targetsubs:
1633 if targetsubs:
1634 # Revert the subrepos on the revert list
1634 # Revert the subrepos on the revert list
1635 for sub in targetsubs:
1635 for sub in targetsubs:
1636 ctx.sub(sub).revert(ui, ctx.substate[sub], *pats, **opts)
1636 ctx.sub(sub).revert(ui, ctx.substate[sub], *pats, **opts)
1637 finally:
1637 finally:
1638 wlock.release()
1638 wlock.release()
1639
1639
1640 def command(table):
1640 def command(table):
1641 '''returns a function object bound to table which can be used as
1641 '''returns a function object bound to table which can be used as
1642 a decorator for populating table as a command table'''
1642 a decorator for populating table as a command table'''
1643
1643
1644 def cmd(name, options, synopsis=None):
1644 def cmd(name, options, synopsis=None):
1645 def decorator(func):
1645 def decorator(func):
1646 if synopsis:
1646 if synopsis:
1647 table[name] = func, options[:], synopsis
1647 table[name] = func, options[:], synopsis
1648 else:
1648 else:
1649 table[name] = func, options[:]
1649 table[name] = func, options[:]
1650 return func
1650 return func
1651 return decorator
1651 return decorator
1652
1652
1653 return cmd
1653 return cmd
@@ -1,238 +1,238 b''
1 # commandserver.py - communicate with Mercurial's API over a pipe
1 # commandserver.py - communicate with Mercurial's API over a pipe
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com>
3 # Copyright Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 import struct
9 import struct
10 import sys, os
10 import sys, os
11 import dispatch, encoding, util
11 import dispatch, encoding, util
12
12
13 logfile = None
13 logfile = None
14
14
15 def log(*args):
15 def log(*args):
16 if not logfile:
16 if not logfile:
17 return
17 return
18
18
19 for a in args:
19 for a in args:
20 logfile.write(str(a))
20 logfile.write(str(a))
21
21
22 logfile.flush()
22 logfile.flush()
23
23
24 class channeledoutput(object):
24 class channeledoutput(object):
25 """
25 """
26 Write data from in_ to out in the following format:
26 Write data from in_ to out in the following format:
27
27
28 data length (unsigned int),
28 data length (unsigned int),
29 data
29 data
30 """
30 """
31 def __init__(self, in_, out, channel):
31 def __init__(self, in_, out, channel):
32 self.in_ = in_
32 self.in_ = in_
33 self.out = out
33 self.out = out
34 self.channel = channel
34 self.channel = channel
35
35
36 def write(self, data):
36 def write(self, data):
37 if not data:
37 if not data:
38 return
38 return
39 self.out.write(struct.pack('>cI', self.channel, len(data)))
39 self.out.write(struct.pack('>cI', self.channel, len(data)))
40 self.out.write(data)
40 self.out.write(data)
41 self.out.flush()
41 self.out.flush()
42
42
43 def __getattr__(self, attr):
43 def __getattr__(self, attr):
44 if attr in ('isatty', 'fileno'):
44 if attr in ('isatty', 'fileno'):
45 raise AttributeError, attr
45 raise AttributeError, attr
46 return getattr(self.in_, attr)
46 return getattr(self.in_, attr)
47
47
48 class channeledinput(object):
48 class channeledinput(object):
49 """
49 """
50 Read data from in_.
50 Read data from in_.
51
51
52 Requests for input are written to out in the following format:
52 Requests for input are written to out in the following format:
53 channel identifier - 'I' for plain input, 'L' line based (1 byte)
53 channel identifier - 'I' for plain input, 'L' line based (1 byte)
54 how many bytes to send at most (unsigned int),
54 how many bytes to send at most (unsigned int),
55
55
56 The client replies with:
56 The client replies with:
57 data length (unsigned int), 0 meaning EOF
57 data length (unsigned int), 0 meaning EOF
58 data
58 data
59 """
59 """
60
60
61 maxchunksize = 4 * 1024
61 maxchunksize = 4 * 1024
62
62
63 def __init__(self, in_, out, channel):
63 def __init__(self, in_, out, channel):
64 self.in_ = in_
64 self.in_ = in_
65 self.out = out
65 self.out = out
66 self.channel = channel
66 self.channel = channel
67
67
68 def read(self, size=-1):
68 def read(self, size=-1):
69 if size < 0:
69 if size < 0:
70 # if we need to consume all the clients input, ask for 4k chunks
70 # if we need to consume all the clients input, ask for 4k chunks
71 # so the pipe doesn't fill up risking a deadlock
71 # so the pipe doesn't fill up risking a deadlock
72 size = self.maxchunksize
72 size = self.maxchunksize
73 s = self._read(size, self.channel)
73 s = self._read(size, self.channel)
74 buf = s
74 buf = s
75 while s:
75 while s:
76 s = self._read(size, self.channel)
76 s = self._read(size, self.channel)
77 buf += s
77 buf += s
78
78
79 return buf
79 return buf
80 else:
80 else:
81 return self._read(size, self.channel)
81 return self._read(size, self.channel)
82
82
83 def _read(self, size, channel):
83 def _read(self, size, channel):
84 if not size:
84 if not size:
85 return ''
85 return ''
86 assert size > 0
86 assert size > 0
87
87
88 # tell the client we need at most size bytes
88 # tell the client we need at most size bytes
89 self.out.write(struct.pack('>cI', channel, size))
89 self.out.write(struct.pack('>cI', channel, size))
90 self.out.flush()
90 self.out.flush()
91
91
92 length = self.in_.read(4)
92 length = self.in_.read(4)
93 length = struct.unpack('>I', length)[0]
93 length = struct.unpack('>I', length)[0]
94 if not length:
94 if not length:
95 return ''
95 return ''
96 else:
96 else:
97 return self.in_.read(length)
97 return self.in_.read(length)
98
98
99 def readline(self, size=-1):
99 def readline(self, size=-1):
100 if size < 0:
100 if size < 0:
101 size = self.maxchunksize
101 size = self.maxchunksize
102 s = self._read(size, 'L')
102 s = self._read(size, 'L')
103 buf = s
103 buf = s
104 # keep asking for more until there's either no more or
104 # keep asking for more until there's either no more or
105 # we got a full line
105 # we got a full line
106 while s and s[-1] != '\n':
106 while s and s[-1] != '\n':
107 s = self._read(size, 'L')
107 s = self._read(size, 'L')
108 buf += s
108 buf += s
109
109
110 return buf
110 return buf
111 else:
111 else:
112 return self._read(size, 'L')
112 return self._read(size, 'L')
113
113
114 def __iter__(self):
114 def __iter__(self):
115 return self
115 return self
116
116
117 def next(self):
117 def next(self):
118 l = self.readline()
118 l = self.readline()
119 if not l:
119 if not l:
120 raise StopIteration
120 raise StopIteration
121 return l
121 return l
122
122
123 def __getattr__(self, attr):
123 def __getattr__(self, attr):
124 if attr in ('isatty', 'fileno'):
124 if attr in ('isatty', 'fileno'):
125 raise AttributeError, attr
125 raise AttributeError, attr
126 return getattr(self.in_, attr)
126 return getattr(self.in_, attr)
127
127
128 class server(object):
128 class server(object):
129 """
129 """
130 Listens for commands on stdin, runs them and writes the output on a channel
130 Listens for commands on stdin, runs them and writes the output on a channel
131 based stream to stdout.
131 based stream to stdout.
132 """
132 """
133 def __init__(self, ui, repo, mode):
133 def __init__(self, ui, repo, mode):
134 self.cwd = os.getcwd()
134 self.cwd = os.getcwd()
135
135
136 logpath = ui.config("cmdserver", "log", None)
136 logpath = ui.config("cmdserver", "log", None)
137 if logpath:
137 if logpath:
138 global logfile
138 global logfile
139 if logpath == '-':
139 if logpath == '-':
140 # write log on a special 'd'ebug channel
140 # write log on a special 'd'ebug channel
141 logfile = channeledoutput(sys.stdout, sys.stdout, 'd')
141 logfile = channeledoutput(sys.stdout, sys.stdout, 'd')
142 else:
142 else:
143 logfile = open(logpath, 'a')
143 logfile = open(logpath, 'a')
144
144
145 # the ui here is really the repo ui so take its baseui so we don't end
145 # the ui here is really the repo ui so take its baseui so we don't end
146 # up with its local configuration
146 # up with its local configuration
147 self.ui = repo.baseui
147 self.ui = repo.baseui
148 self.repo = repo
148 self.repo = repo
149 self.repoui = repo.ui
149 self.repoui = repo.ui
150
150
151 if mode == 'pipe':
151 if mode == 'pipe':
152 self.cerr = channeledoutput(sys.stderr, sys.stdout, 'e')
152 self.cerr = channeledoutput(sys.stderr, sys.stdout, 'e')
153 self.cout = channeledoutput(sys.stdout, sys.stdout, 'o')
153 self.cout = channeledoutput(sys.stdout, sys.stdout, 'o')
154 self.cin = channeledinput(sys.stdin, sys.stdout, 'I')
154 self.cin = channeledinput(sys.stdin, sys.stdout, 'I')
155 self.cresult = channeledoutput(sys.stdout, sys.stdout, 'r')
155 self.cresult = channeledoutput(sys.stdout, sys.stdout, 'r')
156
156
157 self.client = sys.stdin
157 self.client = sys.stdin
158 else:
158 else:
159 raise util.Abort(_('unknown mode %s') % mode)
159 raise util.Abort(_('unknown mode %s') % mode)
160
160
161 def _read(self, size):
161 def _read(self, size):
162 if not size:
162 if not size:
163 return ''
163 return ''
164
164
165 data = self.client.read(size)
165 data = self.client.read(size)
166
166
167 # is the other end closed?
167 # is the other end closed?
168 if not data:
168 if not data:
169 raise EOFError()
169 raise EOFError
170
170
171 return data
171 return data
172
172
173 def runcommand(self):
173 def runcommand(self):
174 """ reads a list of \0 terminated arguments, executes
174 """ reads a list of \0 terminated arguments, executes
175 and writes the return code to the result channel """
175 and writes the return code to the result channel """
176
176
177 length = struct.unpack('>I', self._read(4))[0]
177 length = struct.unpack('>I', self._read(4))[0]
178 if not length:
178 if not length:
179 args = []
179 args = []
180 else:
180 else:
181 args = self._read(length).split('\0')
181 args = self._read(length).split('\0')
182
182
183 # copy the uis so changes (e.g. --config or --verbose) don't
183 # copy the uis so changes (e.g. --config or --verbose) don't
184 # persist between requests
184 # persist between requests
185 copiedui = self.ui.copy()
185 copiedui = self.ui.copy()
186 self.repo.baseui = copiedui
186 self.repo.baseui = copiedui
187 self.repo.ui = self.repo.dirstate._ui = self.repoui.copy()
187 self.repo.ui = self.repo.dirstate._ui = self.repoui.copy()
188 self.repo.invalidate()
188 self.repo.invalidate()
189 self.repo.invalidatedirstate()
189 self.repo.invalidatedirstate()
190
190
191 req = dispatch.request(args[:], copiedui, self.repo, self.cin,
191 req = dispatch.request(args[:], copiedui, self.repo, self.cin,
192 self.cout, self.cerr)
192 self.cout, self.cerr)
193
193
194 ret = dispatch.dispatch(req) or 0 # might return None
194 ret = dispatch.dispatch(req) or 0 # might return None
195
195
196 # restore old cwd
196 # restore old cwd
197 if '--cwd' in args:
197 if '--cwd' in args:
198 os.chdir(self.cwd)
198 os.chdir(self.cwd)
199
199
200 self.cresult.write(struct.pack('>i', int(ret)))
200 self.cresult.write(struct.pack('>i', int(ret)))
201
201
202 def getencoding(self):
202 def getencoding(self):
203 """ writes the current encoding to the result channel """
203 """ writes the current encoding to the result channel """
204 self.cresult.write(encoding.encoding)
204 self.cresult.write(encoding.encoding)
205
205
206 def serveone(self):
206 def serveone(self):
207 cmd = self.client.readline()[:-1]
207 cmd = self.client.readline()[:-1]
208 if cmd:
208 if cmd:
209 handler = self.capabilities.get(cmd)
209 handler = self.capabilities.get(cmd)
210 if handler:
210 if handler:
211 handler(self)
211 handler(self)
212 else:
212 else:
213 # clients are expected to check what commands are supported by
213 # clients are expected to check what commands are supported by
214 # looking at the servers capabilities
214 # looking at the servers capabilities
215 raise util.Abort(_('unknown command %s') % cmd)
215 raise util.Abort(_('unknown command %s') % cmd)
216
216
217 return cmd != ''
217 return cmd != ''
218
218
219 capabilities = {'runcommand' : runcommand,
219 capabilities = {'runcommand' : runcommand,
220 'getencoding' : getencoding}
220 'getencoding' : getencoding}
221
221
222 def serve(self):
222 def serve(self):
223 hellomsg = 'capabilities: ' + ' '.join(self.capabilities.keys())
223 hellomsg = 'capabilities: ' + ' '.join(self.capabilities.keys())
224 hellomsg += '\n'
224 hellomsg += '\n'
225 hellomsg += 'encoding: ' + encoding.encoding
225 hellomsg += 'encoding: ' + encoding.encoding
226
226
227 # write the hello msg in -one- chunk
227 # write the hello msg in -one- chunk
228 self.cout.write(hellomsg)
228 self.cout.write(hellomsg)
229
229
230 try:
230 try:
231 while self.serveone():
231 while self.serveone():
232 pass
232 pass
233 except EOFError:
233 except EOFError:
234 # we'll get here if the client disconnected while we were reading
234 # we'll get here if the client disconnected while we were reading
235 # its request
235 # its request
236 return 1
236 return 1
237
237
238 return 0
238 return 0
@@ -1,277 +1,277 b''
1 # dagutil.py - dag utilities for mercurial
1 # dagutil.py - dag utilities for mercurial
2 #
2 #
3 # Copyright 2010 Benoit Boissinot <bboissin@gmail.com>
3 # Copyright 2010 Benoit Boissinot <bboissin@gmail.com>
4 # and Peter Arrenbrecht <peter@arrenbrecht.ch>
4 # and Peter Arrenbrecht <peter@arrenbrecht.ch>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from node import nullrev
9 from node import nullrev
10 from i18n import _
10 from i18n import _
11
11
12
12
13 class basedag(object):
13 class basedag(object):
14 '''generic interface for DAGs
14 '''generic interface for DAGs
15
15
16 terms:
16 terms:
17 "ix" (short for index) identifies a nodes internally,
17 "ix" (short for index) identifies a nodes internally,
18 "id" identifies one externally.
18 "id" identifies one externally.
19
19
20 All params are ixs unless explicitly suffixed otherwise.
20 All params are ixs unless explicitly suffixed otherwise.
21 Pluralized params are lists or sets.
21 Pluralized params are lists or sets.
22 '''
22 '''
23
23
24 def __init__(self):
24 def __init__(self):
25 self._inverse = None
25 self._inverse = None
26
26
27 def nodeset(self):
27 def nodeset(self):
28 '''set of all node idxs'''
28 '''set of all node idxs'''
29 raise NotImplementedError()
29 raise NotImplementedError
30
30
31 def heads(self):
31 def heads(self):
32 '''list of head ixs'''
32 '''list of head ixs'''
33 raise NotImplementedError()
33 raise NotImplementedError
34
34
35 def parents(self, ix):
35 def parents(self, ix):
36 '''list of parents ixs of ix'''
36 '''list of parents ixs of ix'''
37 raise NotImplementedError()
37 raise NotImplementedError
38
38
39 def inverse(self):
39 def inverse(self):
40 '''inverse DAG, where parents becomes children, etc.'''
40 '''inverse DAG, where parents becomes children, etc.'''
41 raise NotImplementedError()
41 raise NotImplementedError
42
42
43 def ancestorset(self, starts, stops=None):
43 def ancestorset(self, starts, stops=None):
44 '''
44 '''
45 set of all ancestors of starts (incl), but stop walk at stops (excl)
45 set of all ancestors of starts (incl), but stop walk at stops (excl)
46 '''
46 '''
47 raise NotImplementedError()
47 raise NotImplementedError
48
48
49 def descendantset(self, starts, stops=None):
49 def descendantset(self, starts, stops=None):
50 '''
50 '''
51 set of all descendants of starts (incl), but stop walk at stops (excl)
51 set of all descendants of starts (incl), but stop walk at stops (excl)
52 '''
52 '''
53 return self.inverse().ancestorset(starts, stops)
53 return self.inverse().ancestorset(starts, stops)
54
54
55 def headsetofconnecteds(self, ixs):
55 def headsetofconnecteds(self, ixs):
56 '''
56 '''
57 subset of connected list of ixs so that no node has a descendant in it
57 subset of connected list of ixs so that no node has a descendant in it
58
58
59 By "connected list" we mean that if an ancestor and a descendant are in
59 By "connected list" we mean that if an ancestor and a descendant are in
60 the list, then so is at least one path connecting them.
60 the list, then so is at least one path connecting them.
61 '''
61 '''
62 raise NotImplementedError()
62 raise NotImplementedError
63
63
64 def externalize(self, ix):
64 def externalize(self, ix):
65 '''return a list of (or set if given a set) of node ids'''
65 '''return a list of (or set if given a set) of node ids'''
66 return self._externalize(ix)
66 return self._externalize(ix)
67
67
68 def externalizeall(self, ixs):
68 def externalizeall(self, ixs):
69 '''return a list of (or set if given a set) of node ids'''
69 '''return a list of (or set if given a set) of node ids'''
70 ids = self._externalizeall(ixs)
70 ids = self._externalizeall(ixs)
71 if isinstance(ixs, set):
71 if isinstance(ixs, set):
72 return set(ids)
72 return set(ids)
73 return list(ids)
73 return list(ids)
74
74
75 def internalize(self, id):
75 def internalize(self, id):
76 '''return a list of (or set if given a set) of node ixs'''
76 '''return a list of (or set if given a set) of node ixs'''
77 return self._internalize(id)
77 return self._internalize(id)
78
78
79 def internalizeall(self, ids, filterunknown=False):
79 def internalizeall(self, ids, filterunknown=False):
80 '''return a list of (or set if given a set) of node ids'''
80 '''return a list of (or set if given a set) of node ids'''
81 ixs = self._internalizeall(ids, filterunknown)
81 ixs = self._internalizeall(ids, filterunknown)
82 if isinstance(ids, set):
82 if isinstance(ids, set):
83 return set(ixs)
83 return set(ixs)
84 return list(ixs)
84 return list(ixs)
85
85
86
86
87 class genericdag(basedag):
87 class genericdag(basedag):
88 '''generic implementations for DAGs'''
88 '''generic implementations for DAGs'''
89
89
90 def ancestorset(self, starts, stops=None):
90 def ancestorset(self, starts, stops=None):
91 stops = stops and set(stops) or set()
91 stops = stops and set(stops) or set()
92 seen = set()
92 seen = set()
93 pending = list(starts)
93 pending = list(starts)
94 while pending:
94 while pending:
95 n = pending.pop()
95 n = pending.pop()
96 if n not in seen and n not in stops:
96 if n not in seen and n not in stops:
97 seen.add(n)
97 seen.add(n)
98 pending.extend(self.parents(n))
98 pending.extend(self.parents(n))
99 return seen
99 return seen
100
100
101 def headsetofconnecteds(self, ixs):
101 def headsetofconnecteds(self, ixs):
102 hds = set(ixs)
102 hds = set(ixs)
103 if not hds:
103 if not hds:
104 return hds
104 return hds
105 for n in ixs:
105 for n in ixs:
106 for p in self.parents(n):
106 for p in self.parents(n):
107 hds.discard(p)
107 hds.discard(p)
108 assert hds
108 assert hds
109 return hds
109 return hds
110
110
111
111
112 class revlogbaseddag(basedag):
112 class revlogbaseddag(basedag):
113 '''generic dag interface to a revlog'''
113 '''generic dag interface to a revlog'''
114
114
115 def __init__(self, revlog, nodeset):
115 def __init__(self, revlog, nodeset):
116 basedag.__init__(self)
116 basedag.__init__(self)
117 self._revlog = revlog
117 self._revlog = revlog
118 self._heads = None
118 self._heads = None
119 self._nodeset = nodeset
119 self._nodeset = nodeset
120
120
121 def nodeset(self):
121 def nodeset(self):
122 return self._nodeset
122 return self._nodeset
123
123
124 def heads(self):
124 def heads(self):
125 if self._heads is None:
125 if self._heads is None:
126 self._heads = self._getheads()
126 self._heads = self._getheads()
127 return self._heads
127 return self._heads
128
128
129 def _externalize(self, ix):
129 def _externalize(self, ix):
130 return self._revlog.index[ix][7]
130 return self._revlog.index[ix][7]
131 def _externalizeall(self, ixs):
131 def _externalizeall(self, ixs):
132 idx = self._revlog.index
132 idx = self._revlog.index
133 return [idx[i][7] for i in ixs]
133 return [idx[i][7] for i in ixs]
134
134
135 def _internalize(self, id):
135 def _internalize(self, id):
136 ix = self._revlog.rev(id)
136 ix = self._revlog.rev(id)
137 if ix == nullrev:
137 if ix == nullrev:
138 raise LookupError(id, self._revlog.indexfile, _('nullid'))
138 raise LookupError(id, self._revlog.indexfile, _('nullid'))
139 return ix
139 return ix
140 def _internalizeall(self, ids, filterunknown):
140 def _internalizeall(self, ids, filterunknown):
141 rl = self._revlog
141 rl = self._revlog
142 if filterunknown:
142 if filterunknown:
143 return [r for r in map(rl.nodemap.get, ids)
143 return [r for r in map(rl.nodemap.get, ids)
144 if r is not None and r != nullrev]
144 if r is not None and r != nullrev]
145 return map(self._internalize, ids)
145 return map(self._internalize, ids)
146
146
147
147
148 class revlogdag(revlogbaseddag):
148 class revlogdag(revlogbaseddag):
149 '''dag interface to a revlog'''
149 '''dag interface to a revlog'''
150
150
151 def __init__(self, revlog):
151 def __init__(self, revlog):
152 revlogbaseddag.__init__(self, revlog, set(xrange(len(revlog))))
152 revlogbaseddag.__init__(self, revlog, set(xrange(len(revlog))))
153
153
154 def _getheads(self):
154 def _getheads(self):
155 return [r for r in self._revlog.headrevs() if r != nullrev]
155 return [r for r in self._revlog.headrevs() if r != nullrev]
156
156
157 def parents(self, ix):
157 def parents(self, ix):
158 rlog = self._revlog
158 rlog = self._revlog
159 idx = rlog.index
159 idx = rlog.index
160 revdata = idx[ix]
160 revdata = idx[ix]
161 prev = revdata[5]
161 prev = revdata[5]
162 if prev != nullrev:
162 if prev != nullrev:
163 prev2 = revdata[6]
163 prev2 = revdata[6]
164 if prev2 == nullrev:
164 if prev2 == nullrev:
165 return [prev]
165 return [prev]
166 return [prev, prev2]
166 return [prev, prev2]
167 prev2 = revdata[6]
167 prev2 = revdata[6]
168 if prev2 != nullrev:
168 if prev2 != nullrev:
169 return [prev2]
169 return [prev2]
170 return []
170 return []
171
171
172 def inverse(self):
172 def inverse(self):
173 if self._inverse is None:
173 if self._inverse is None:
174 self._inverse = inverserevlogdag(self)
174 self._inverse = inverserevlogdag(self)
175 return self._inverse
175 return self._inverse
176
176
177 def ancestorset(self, starts, stops=None):
177 def ancestorset(self, starts, stops=None):
178 rlog = self._revlog
178 rlog = self._revlog
179 idx = rlog.index
179 idx = rlog.index
180 stops = stops and set(stops) or set()
180 stops = stops and set(stops) or set()
181 seen = set()
181 seen = set()
182 pending = list(starts)
182 pending = list(starts)
183 while pending:
183 while pending:
184 rev = pending.pop()
184 rev = pending.pop()
185 if rev not in seen and rev not in stops:
185 if rev not in seen and rev not in stops:
186 seen.add(rev)
186 seen.add(rev)
187 revdata = idx[rev]
187 revdata = idx[rev]
188 for i in [5, 6]:
188 for i in [5, 6]:
189 prev = revdata[i]
189 prev = revdata[i]
190 if prev != nullrev:
190 if prev != nullrev:
191 pending.append(prev)
191 pending.append(prev)
192 return seen
192 return seen
193
193
194 def headsetofconnecteds(self, ixs):
194 def headsetofconnecteds(self, ixs):
195 if not ixs:
195 if not ixs:
196 return set()
196 return set()
197 rlog = self._revlog
197 rlog = self._revlog
198 idx = rlog.index
198 idx = rlog.index
199 headrevs = set(ixs)
199 headrevs = set(ixs)
200 for rev in ixs:
200 for rev in ixs:
201 revdata = idx[rev]
201 revdata = idx[rev]
202 for i in [5, 6]:
202 for i in [5, 6]:
203 prev = revdata[i]
203 prev = revdata[i]
204 if prev != nullrev:
204 if prev != nullrev:
205 headrevs.discard(prev)
205 headrevs.discard(prev)
206 assert headrevs
206 assert headrevs
207 return headrevs
207 return headrevs
208
208
209 def linearize(self, ixs):
209 def linearize(self, ixs):
210 '''linearize and topologically sort a list of revisions
210 '''linearize and topologically sort a list of revisions
211
211
212 The linearization process tries to create long runs of revs where
212 The linearization process tries to create long runs of revs where
213 a child rev comes immediately after its first parent. This is done by
213 a child rev comes immediately after its first parent. This is done by
214 visiting the heads of the given revs in inverse topological order,
214 visiting the heads of the given revs in inverse topological order,
215 and for each visited rev, visiting its second parent, then its first
215 and for each visited rev, visiting its second parent, then its first
216 parent, then adding the rev itself to the output list.
216 parent, then adding the rev itself to the output list.
217 '''
217 '''
218 sorted = []
218 sorted = []
219 visit = list(self.headsetofconnecteds(ixs))
219 visit = list(self.headsetofconnecteds(ixs))
220 visit.sort(reverse=True)
220 visit.sort(reverse=True)
221 finished = set()
221 finished = set()
222
222
223 while visit:
223 while visit:
224 cur = visit.pop()
224 cur = visit.pop()
225 if cur < 0:
225 if cur < 0:
226 cur = -cur - 1
226 cur = -cur - 1
227 if cur not in finished:
227 if cur not in finished:
228 sorted.append(cur)
228 sorted.append(cur)
229 finished.add(cur)
229 finished.add(cur)
230 else:
230 else:
231 visit.append(-cur - 1)
231 visit.append(-cur - 1)
232 visit += [p for p in self.parents(cur)
232 visit += [p for p in self.parents(cur)
233 if p in ixs and p not in finished]
233 if p in ixs and p not in finished]
234 assert len(sorted) == len(ixs)
234 assert len(sorted) == len(ixs)
235 return sorted
235 return sorted
236
236
237
237
238 class inverserevlogdag(revlogbaseddag, genericdag):
238 class inverserevlogdag(revlogbaseddag, genericdag):
239 '''inverse of an existing revlog dag; see revlogdag.inverse()'''
239 '''inverse of an existing revlog dag; see revlogdag.inverse()'''
240
240
241 def __init__(self, orig):
241 def __init__(self, orig):
242 revlogbaseddag.__init__(self, orig._revlog, orig._nodeset)
242 revlogbaseddag.__init__(self, orig._revlog, orig._nodeset)
243 self._orig = orig
243 self._orig = orig
244 self._children = {}
244 self._children = {}
245 self._roots = []
245 self._roots = []
246 self._walkfrom = len(self._revlog) - 1
246 self._walkfrom = len(self._revlog) - 1
247
247
248 def _walkto(self, walkto):
248 def _walkto(self, walkto):
249 rev = self._walkfrom
249 rev = self._walkfrom
250 cs = self._children
250 cs = self._children
251 roots = self._roots
251 roots = self._roots
252 idx = self._revlog.index
252 idx = self._revlog.index
253 while rev >= walkto:
253 while rev >= walkto:
254 data = idx[rev]
254 data = idx[rev]
255 isroot = True
255 isroot = True
256 for prev in [data[5], data[6]]: # parent revs
256 for prev in [data[5], data[6]]: # parent revs
257 if prev != nullrev:
257 if prev != nullrev:
258 cs.setdefault(prev, []).append(rev)
258 cs.setdefault(prev, []).append(rev)
259 isroot = False
259 isroot = False
260 if isroot:
260 if isroot:
261 roots.append(rev)
261 roots.append(rev)
262 rev -= 1
262 rev -= 1
263 self._walkfrom = rev
263 self._walkfrom = rev
264
264
265 def _getheads(self):
265 def _getheads(self):
266 self._walkto(nullrev)
266 self._walkto(nullrev)
267 return self._roots
267 return self._roots
268
268
269 def parents(self, ix):
269 def parents(self, ix):
270 if ix is None:
270 if ix is None:
271 return []
271 return []
272 if ix <= self._walkfrom:
272 if ix <= self._walkfrom:
273 self._walkto(ix)
273 self._walkto(ix)
274 return self._children.get(ix, [])
274 return self._children.get(ix, [])
275
275
276 def inverse(self):
276 def inverse(self):
277 return self._orig
277 return self._orig
@@ -1,186 +1,186 b''
1 # hgweb/common.py - Utility functions needed by hgweb_mod and hgwebdir_mod
1 # hgweb/common.py - Utility functions needed by hgweb_mod and hgwebdir_mod
2 #
2 #
3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
4 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 import errno, mimetypes, os
9 import errno, mimetypes, os
10
10
11 HTTP_OK = 200
11 HTTP_OK = 200
12 HTTP_NOT_MODIFIED = 304
12 HTTP_NOT_MODIFIED = 304
13 HTTP_BAD_REQUEST = 400
13 HTTP_BAD_REQUEST = 400
14 HTTP_UNAUTHORIZED = 401
14 HTTP_UNAUTHORIZED = 401
15 HTTP_FORBIDDEN = 403
15 HTTP_FORBIDDEN = 403
16 HTTP_NOT_FOUND = 404
16 HTTP_NOT_FOUND = 404
17 HTTP_METHOD_NOT_ALLOWED = 405
17 HTTP_METHOD_NOT_ALLOWED = 405
18 HTTP_SERVER_ERROR = 500
18 HTTP_SERVER_ERROR = 500
19
19
20
20
21 def checkauthz(hgweb, req, op):
21 def checkauthz(hgweb, req, op):
22 '''Check permission for operation based on request data (including
22 '''Check permission for operation based on request data (including
23 authentication info). Return if op allowed, else raise an ErrorResponse
23 authentication info). Return if op allowed, else raise an ErrorResponse
24 exception.'''
24 exception.'''
25
25
26 user = req.env.get('REMOTE_USER')
26 user = req.env.get('REMOTE_USER')
27
27
28 deny_read = hgweb.configlist('web', 'deny_read')
28 deny_read = hgweb.configlist('web', 'deny_read')
29 if deny_read and (not user or deny_read == ['*'] or user in deny_read):
29 if deny_read and (not user or deny_read == ['*'] or user in deny_read):
30 raise ErrorResponse(HTTP_UNAUTHORIZED, 'read not authorized')
30 raise ErrorResponse(HTTP_UNAUTHORIZED, 'read not authorized')
31
31
32 allow_read = hgweb.configlist('web', 'allow_read')
32 allow_read = hgweb.configlist('web', 'allow_read')
33 result = (not allow_read) or (allow_read == ['*'])
33 result = (not allow_read) or (allow_read == ['*'])
34 if not (result or user in allow_read):
34 if not (result or user in allow_read):
35 raise ErrorResponse(HTTP_UNAUTHORIZED, 'read not authorized')
35 raise ErrorResponse(HTTP_UNAUTHORIZED, 'read not authorized')
36
36
37 if op == 'pull' and not hgweb.allowpull:
37 if op == 'pull' and not hgweb.allowpull:
38 raise ErrorResponse(HTTP_UNAUTHORIZED, 'pull not authorized')
38 raise ErrorResponse(HTTP_UNAUTHORIZED, 'pull not authorized')
39 elif op == 'pull' or op is None: # op is None for interface requests
39 elif op == 'pull' or op is None: # op is None for interface requests
40 return
40 return
41
41
42 # enforce that you can only push using POST requests
42 # enforce that you can only push using POST requests
43 if req.env['REQUEST_METHOD'] != 'POST':
43 if req.env['REQUEST_METHOD'] != 'POST':
44 msg = 'push requires POST request'
44 msg = 'push requires POST request'
45 raise ErrorResponse(HTTP_METHOD_NOT_ALLOWED, msg)
45 raise ErrorResponse(HTTP_METHOD_NOT_ALLOWED, msg)
46
46
47 # require ssl by default for pushing, auth info cannot be sniffed
47 # require ssl by default for pushing, auth info cannot be sniffed
48 # and replayed
48 # and replayed
49 scheme = req.env.get('wsgi.url_scheme')
49 scheme = req.env.get('wsgi.url_scheme')
50 if hgweb.configbool('web', 'push_ssl', True) and scheme != 'https':
50 if hgweb.configbool('web', 'push_ssl', True) and scheme != 'https':
51 raise ErrorResponse(HTTP_OK, 'ssl required')
51 raise ErrorResponse(HTTP_OK, 'ssl required')
52
52
53 deny = hgweb.configlist('web', 'deny_push')
53 deny = hgweb.configlist('web', 'deny_push')
54 if deny and (not user or deny == ['*'] or user in deny):
54 if deny and (not user or deny == ['*'] or user in deny):
55 raise ErrorResponse(HTTP_UNAUTHORIZED, 'push not authorized')
55 raise ErrorResponse(HTTP_UNAUTHORIZED, 'push not authorized')
56
56
57 allow = hgweb.configlist('web', 'allow_push')
57 allow = hgweb.configlist('web', 'allow_push')
58 result = allow and (allow == ['*'] or user in allow)
58 result = allow and (allow == ['*'] or user in allow)
59 if not result:
59 if not result:
60 raise ErrorResponse(HTTP_UNAUTHORIZED, 'push not authorized')
60 raise ErrorResponse(HTTP_UNAUTHORIZED, 'push not authorized')
61
61
62 # Hooks for hgweb permission checks; extensions can add hooks here.
62 # Hooks for hgweb permission checks; extensions can add hooks here.
63 # Each hook is invoked like this: hook(hgweb, request, operation),
63 # Each hook is invoked like this: hook(hgweb, request, operation),
64 # where operation is either read, pull or push. Hooks should either
64 # where operation is either read, pull or push. Hooks should either
65 # raise an ErrorResponse exception, or just return.
65 # raise an ErrorResponse exception, or just return.
66 #
66 #
67 # It is possible to do both authentication and authorization through
67 # It is possible to do both authentication and authorization through
68 # this.
68 # this.
69 permhooks = [checkauthz]
69 permhooks = [checkauthz]
70
70
71
71
72 class ErrorResponse(Exception):
72 class ErrorResponse(Exception):
73 def __init__(self, code, message=None, headers=[]):
73 def __init__(self, code, message=None, headers=[]):
74 if message is None:
74 if message is None:
75 message = _statusmessage(code)
75 message = _statusmessage(code)
76 Exception.__init__(self)
76 Exception.__init__(self)
77 self.code = code
77 self.code = code
78 self.message = message
78 self.message = message
79 self.headers = headers
79 self.headers = headers
80 def __str__(self):
80 def __str__(self):
81 return self.message
81 return self.message
82
82
83 class continuereader(object):
83 class continuereader(object):
84 def __init__(self, f, write):
84 def __init__(self, f, write):
85 self.f = f
85 self.f = f
86 self._write = write
86 self._write = write
87 self.continued = False
87 self.continued = False
88
88
89 def read(self, amt=-1):
89 def read(self, amt=-1):
90 if not self.continued:
90 if not self.continued:
91 self.continued = True
91 self.continued = True
92 self._write('HTTP/1.1 100 Continue\r\n\r\n')
92 self._write('HTTP/1.1 100 Continue\r\n\r\n')
93 return self.f.read(amt)
93 return self.f.read(amt)
94
94
95 def __getattr__(self, attr):
95 def __getattr__(self, attr):
96 if attr in ('close', 'readline', 'readlines', '__iter__'):
96 if attr in ('close', 'readline', 'readlines', '__iter__'):
97 return getattr(self.f, attr)
97 return getattr(self.f, attr)
98 raise AttributeError()
98 raise AttributeError
99
99
100 def _statusmessage(code):
100 def _statusmessage(code):
101 from BaseHTTPServer import BaseHTTPRequestHandler
101 from BaseHTTPServer import BaseHTTPRequestHandler
102 responses = BaseHTTPRequestHandler.responses
102 responses = BaseHTTPRequestHandler.responses
103 return responses.get(code, ('Error', 'Unknown error'))[0]
103 return responses.get(code, ('Error', 'Unknown error'))[0]
104
104
105 def statusmessage(code, message=None):
105 def statusmessage(code, message=None):
106 return '%d %s' % (code, message or _statusmessage(code))
106 return '%d %s' % (code, message or _statusmessage(code))
107
107
108 def get_stat(spath):
108 def get_stat(spath):
109 """stat changelog if it exists, spath otherwise"""
109 """stat changelog if it exists, spath otherwise"""
110 cl_path = os.path.join(spath, "00changelog.i")
110 cl_path = os.path.join(spath, "00changelog.i")
111 if os.path.exists(cl_path):
111 if os.path.exists(cl_path):
112 return os.stat(cl_path)
112 return os.stat(cl_path)
113 else:
113 else:
114 return os.stat(spath)
114 return os.stat(spath)
115
115
116 def get_mtime(spath):
116 def get_mtime(spath):
117 return get_stat(spath).st_mtime
117 return get_stat(spath).st_mtime
118
118
119 def staticfile(directory, fname, req):
119 def staticfile(directory, fname, req):
120 """return a file inside directory with guessed Content-Type header
120 """return a file inside directory with guessed Content-Type header
121
121
122 fname always uses '/' as directory separator and isn't allowed to
122 fname always uses '/' as directory separator and isn't allowed to
123 contain unusual path components.
123 contain unusual path components.
124 Content-Type is guessed using the mimetypes module.
124 Content-Type is guessed using the mimetypes module.
125 Return an empty string if fname is illegal or file not found.
125 Return an empty string if fname is illegal or file not found.
126
126
127 """
127 """
128 parts = fname.split('/')
128 parts = fname.split('/')
129 for part in parts:
129 for part in parts:
130 if (part in ('', os.curdir, os.pardir) or
130 if (part in ('', os.curdir, os.pardir) or
131 os.sep in part or os.altsep is not None and os.altsep in part):
131 os.sep in part or os.altsep is not None and os.altsep in part):
132 return ""
132 return ""
133 fpath = os.path.join(*parts)
133 fpath = os.path.join(*parts)
134 if isinstance(directory, str):
134 if isinstance(directory, str):
135 directory = [directory]
135 directory = [directory]
136 for d in directory:
136 for d in directory:
137 path = os.path.join(d, fpath)
137 path = os.path.join(d, fpath)
138 if os.path.exists(path):
138 if os.path.exists(path):
139 break
139 break
140 try:
140 try:
141 os.stat(path)
141 os.stat(path)
142 ct = mimetypes.guess_type(path)[0] or "text/plain"
142 ct = mimetypes.guess_type(path)[0] or "text/plain"
143 req.respond(HTTP_OK, ct, length = os.path.getsize(path))
143 req.respond(HTTP_OK, ct, length = os.path.getsize(path))
144 fp = open(path, 'rb')
144 fp = open(path, 'rb')
145 data = fp.read()
145 data = fp.read()
146 fp.close()
146 fp.close()
147 return data
147 return data
148 except TypeError:
148 except TypeError:
149 raise ErrorResponse(HTTP_SERVER_ERROR, 'illegal filename')
149 raise ErrorResponse(HTTP_SERVER_ERROR, 'illegal filename')
150 except OSError, err:
150 except OSError, err:
151 if err.errno == errno.ENOENT:
151 if err.errno == errno.ENOENT:
152 raise ErrorResponse(HTTP_NOT_FOUND)
152 raise ErrorResponse(HTTP_NOT_FOUND)
153 else:
153 else:
154 raise ErrorResponse(HTTP_SERVER_ERROR, err.strerror)
154 raise ErrorResponse(HTTP_SERVER_ERROR, err.strerror)
155
155
156 def paritygen(stripecount, offset=0):
156 def paritygen(stripecount, offset=0):
157 """count parity of horizontal stripes for easier reading"""
157 """count parity of horizontal stripes for easier reading"""
158 if stripecount and offset:
158 if stripecount and offset:
159 # account for offset, e.g. due to building the list in reverse
159 # account for offset, e.g. due to building the list in reverse
160 count = (stripecount + offset) % stripecount
160 count = (stripecount + offset) % stripecount
161 parity = (stripecount + offset) / stripecount & 1
161 parity = (stripecount + offset) / stripecount & 1
162 else:
162 else:
163 count = 0
163 count = 0
164 parity = 0
164 parity = 0
165 while True:
165 while True:
166 yield parity
166 yield parity
167 count += 1
167 count += 1
168 if stripecount and count >= stripecount:
168 if stripecount and count >= stripecount:
169 parity = 1 - parity
169 parity = 1 - parity
170 count = 0
170 count = 0
171
171
172 def get_contact(config):
172 def get_contact(config):
173 """Return repo contact information or empty string.
173 """Return repo contact information or empty string.
174
174
175 web.contact is the primary source, but if that is not set, try
175 web.contact is the primary source, but if that is not set, try
176 ui.username or $EMAIL as a fallback to display something useful.
176 ui.username or $EMAIL as a fallback to display something useful.
177 """
177 """
178 return (config("web", "contact") or
178 return (config("web", "contact") or
179 config("ui", "username") or
179 config("ui", "username") or
180 os.environ.get("EMAIL") or "")
180 os.environ.get("EMAIL") or "")
181
181
182 def caching(web, req):
182 def caching(web, req):
183 tag = str(web.mtime)
183 tag = str(web.mtime)
184 if req.env.get('HTTP_IF_NONE_MATCH') == tag:
184 if req.env.get('HTTP_IF_NONE_MATCH') == tag:
185 raise ErrorResponse(HTTP_NOT_MODIFIED)
185 raise ErrorResponse(HTTP_NOT_MODIFIED)
186 req.headers.append(('ETag', tag))
186 req.headers.append(('ETag', tag))
@@ -1,764 +1,764 b''
1 # This library is free software; you can redistribute it and/or
1 # This library is free software; you can redistribute it and/or
2 # modify it under the terms of the GNU Lesser General Public
2 # modify it under the terms of the GNU Lesser General Public
3 # License as published by the Free Software Foundation; either
3 # License as published by the Free Software Foundation; either
4 # version 2.1 of the License, or (at your option) any later version.
4 # version 2.1 of the License, or (at your option) any later version.
5 #
5 #
6 # This library is distributed in the hope that it will be useful,
6 # This library is distributed in the hope that it will be useful,
7 # but WITHOUT ANY WARRANTY; without even the implied warranty of
7 # but WITHOUT ANY WARRANTY; without even the implied warranty of
8 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
8 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
9 # Lesser General Public License for more details.
9 # Lesser General Public License for more details.
10 #
10 #
11 # You should have received a copy of the GNU Lesser General Public
11 # You should have received a copy of the GNU Lesser General Public
12 # License along with this library; if not, see
12 # License along with this library; if not, see
13 # <http://www.gnu.org/licenses/>.
13 # <http://www.gnu.org/licenses/>.
14
14
15 # This file is part of urlgrabber, a high-level cross-protocol url-grabber
15 # This file is part of urlgrabber, a high-level cross-protocol url-grabber
16 # Copyright 2002-2004 Michael D. Stenner, Ryan Tomayko
16 # Copyright 2002-2004 Michael D. Stenner, Ryan Tomayko
17
17
18 # Modified by Benoit Boissinot:
18 # Modified by Benoit Boissinot:
19 # - fix for digest auth (inspired from urllib2.py @ Python v2.4)
19 # - fix for digest auth (inspired from urllib2.py @ Python v2.4)
20 # Modified by Dirkjan Ochtman:
20 # Modified by Dirkjan Ochtman:
21 # - import md5 function from a local util module
21 # - import md5 function from a local util module
22 # Modified by Martin Geisler:
22 # Modified by Martin Geisler:
23 # - moved md5 function from local util module to this module
23 # - moved md5 function from local util module to this module
24 # Modified by Augie Fackler:
24 # Modified by Augie Fackler:
25 # - add safesend method and use it to prevent broken pipe errors
25 # - add safesend method and use it to prevent broken pipe errors
26 # on large POST requests
26 # on large POST requests
27
27
28 """An HTTP handler for urllib2 that supports HTTP 1.1 and keepalive.
28 """An HTTP handler for urllib2 that supports HTTP 1.1 and keepalive.
29
29
30 >>> import urllib2
30 >>> import urllib2
31 >>> from keepalive import HTTPHandler
31 >>> from keepalive import HTTPHandler
32 >>> keepalive_handler = HTTPHandler()
32 >>> keepalive_handler = HTTPHandler()
33 >>> opener = urllib2.build_opener(keepalive_handler)
33 >>> opener = urllib2.build_opener(keepalive_handler)
34 >>> urllib2.install_opener(opener)
34 >>> urllib2.install_opener(opener)
35 >>>
35 >>>
36 >>> fo = urllib2.urlopen('http://www.python.org')
36 >>> fo = urllib2.urlopen('http://www.python.org')
37
37
38 If a connection to a given host is requested, and all of the existing
38 If a connection to a given host is requested, and all of the existing
39 connections are still in use, another connection will be opened. If
39 connections are still in use, another connection will be opened. If
40 the handler tries to use an existing connection but it fails in some
40 the handler tries to use an existing connection but it fails in some
41 way, it will be closed and removed from the pool.
41 way, it will be closed and removed from the pool.
42
42
43 To remove the handler, simply re-run build_opener with no arguments, and
43 To remove the handler, simply re-run build_opener with no arguments, and
44 install that opener.
44 install that opener.
45
45
46 You can explicitly close connections by using the close_connection()
46 You can explicitly close connections by using the close_connection()
47 method of the returned file-like object (described below) or you can
47 method of the returned file-like object (described below) or you can
48 use the handler methods:
48 use the handler methods:
49
49
50 close_connection(host)
50 close_connection(host)
51 close_all()
51 close_all()
52 open_connections()
52 open_connections()
53
53
54 NOTE: using the close_connection and close_all methods of the handler
54 NOTE: using the close_connection and close_all methods of the handler
55 should be done with care when using multiple threads.
55 should be done with care when using multiple threads.
56 * there is nothing that prevents another thread from creating new
56 * there is nothing that prevents another thread from creating new
57 connections immediately after connections are closed
57 connections immediately after connections are closed
58 * no checks are done to prevent in-use connections from being closed
58 * no checks are done to prevent in-use connections from being closed
59
59
60 >>> keepalive_handler.close_all()
60 >>> keepalive_handler.close_all()
61
61
62 EXTRA ATTRIBUTES AND METHODS
62 EXTRA ATTRIBUTES AND METHODS
63
63
64 Upon a status of 200, the object returned has a few additional
64 Upon a status of 200, the object returned has a few additional
65 attributes and methods, which should not be used if you want to
65 attributes and methods, which should not be used if you want to
66 remain consistent with the normal urllib2-returned objects:
66 remain consistent with the normal urllib2-returned objects:
67
67
68 close_connection() - close the connection to the host
68 close_connection() - close the connection to the host
69 readlines() - you know, readlines()
69 readlines() - you know, readlines()
70 status - the return status (ie 404)
70 status - the return status (ie 404)
71 reason - english translation of status (ie 'File not found')
71 reason - english translation of status (ie 'File not found')
72
72
73 If you want the best of both worlds, use this inside an
73 If you want the best of both worlds, use this inside an
74 AttributeError-catching try:
74 AttributeError-catching try:
75
75
76 >>> try: status = fo.status
76 >>> try: status = fo.status
77 >>> except AttributeError: status = None
77 >>> except AttributeError: status = None
78
78
79 Unfortunately, these are ONLY there if status == 200, so it's not
79 Unfortunately, these are ONLY there if status == 200, so it's not
80 easy to distinguish between non-200 responses. The reason is that
80 easy to distinguish between non-200 responses. The reason is that
81 urllib2 tries to do clever things with error codes 301, 302, 401,
81 urllib2 tries to do clever things with error codes 301, 302, 401,
82 and 407, and it wraps the object upon return.
82 and 407, and it wraps the object upon return.
83
83
84 For python versions earlier than 2.4, you can avoid this fancy error
84 For python versions earlier than 2.4, you can avoid this fancy error
85 handling by setting the module-level global HANDLE_ERRORS to zero.
85 handling by setting the module-level global HANDLE_ERRORS to zero.
86 You see, prior to 2.4, it's the HTTP Handler's job to determine what
86 You see, prior to 2.4, it's the HTTP Handler's job to determine what
87 to handle specially, and what to just pass up. HANDLE_ERRORS == 0
87 to handle specially, and what to just pass up. HANDLE_ERRORS == 0
88 means "pass everything up". In python 2.4, however, this job no
88 means "pass everything up". In python 2.4, however, this job no
89 longer belongs to the HTTP Handler and is now done by a NEW handler,
89 longer belongs to the HTTP Handler and is now done by a NEW handler,
90 HTTPErrorProcessor. Here's the bottom line:
90 HTTPErrorProcessor. Here's the bottom line:
91
91
92 python version < 2.4
92 python version < 2.4
93 HANDLE_ERRORS == 1 (default) pass up 200, treat the rest as
93 HANDLE_ERRORS == 1 (default) pass up 200, treat the rest as
94 errors
94 errors
95 HANDLE_ERRORS == 0 pass everything up, error processing is
95 HANDLE_ERRORS == 0 pass everything up, error processing is
96 left to the calling code
96 left to the calling code
97 python version >= 2.4
97 python version >= 2.4
98 HANDLE_ERRORS == 1 pass up 200, treat the rest as errors
98 HANDLE_ERRORS == 1 pass up 200, treat the rest as errors
99 HANDLE_ERRORS == 0 (default) pass everything up, let the
99 HANDLE_ERRORS == 0 (default) pass everything up, let the
100 other handlers (specifically,
100 other handlers (specifically,
101 HTTPErrorProcessor) decide what to do
101 HTTPErrorProcessor) decide what to do
102
102
103 In practice, setting the variable either way makes little difference
103 In practice, setting the variable either way makes little difference
104 in python 2.4, so for the most consistent behavior across versions,
104 in python 2.4, so for the most consistent behavior across versions,
105 you probably just want to use the defaults, which will give you
105 you probably just want to use the defaults, which will give you
106 exceptions on errors.
106 exceptions on errors.
107
107
108 """
108 """
109
109
110 # $Id: keepalive.py,v 1.14 2006/04/04 21:00:32 mstenner Exp $
110 # $Id: keepalive.py,v 1.14 2006/04/04 21:00:32 mstenner Exp $
111
111
112 import errno
112 import errno
113 import httplib
113 import httplib
114 import socket
114 import socket
115 import thread
115 import thread
116 import urllib2
116 import urllib2
117
117
118 DEBUG = None
118 DEBUG = None
119
119
120 import sys
120 import sys
121 if sys.version_info < (2, 4):
121 if sys.version_info < (2, 4):
122 HANDLE_ERRORS = 1
122 HANDLE_ERRORS = 1
123 else: HANDLE_ERRORS = 0
123 else: HANDLE_ERRORS = 0
124
124
125 class ConnectionManager(object):
125 class ConnectionManager(object):
126 """
126 """
127 The connection manager must be able to:
127 The connection manager must be able to:
128 * keep track of all existing
128 * keep track of all existing
129 """
129 """
130 def __init__(self):
130 def __init__(self):
131 self._lock = thread.allocate_lock()
131 self._lock = thread.allocate_lock()
132 self._hostmap = {} # map hosts to a list of connections
132 self._hostmap = {} # map hosts to a list of connections
133 self._connmap = {} # map connections to host
133 self._connmap = {} # map connections to host
134 self._readymap = {} # map connection to ready state
134 self._readymap = {} # map connection to ready state
135
135
136 def add(self, host, connection, ready):
136 def add(self, host, connection, ready):
137 self._lock.acquire()
137 self._lock.acquire()
138 try:
138 try:
139 if host not in self._hostmap:
139 if host not in self._hostmap:
140 self._hostmap[host] = []
140 self._hostmap[host] = []
141 self._hostmap[host].append(connection)
141 self._hostmap[host].append(connection)
142 self._connmap[connection] = host
142 self._connmap[connection] = host
143 self._readymap[connection] = ready
143 self._readymap[connection] = ready
144 finally:
144 finally:
145 self._lock.release()
145 self._lock.release()
146
146
147 def remove(self, connection):
147 def remove(self, connection):
148 self._lock.acquire()
148 self._lock.acquire()
149 try:
149 try:
150 try:
150 try:
151 host = self._connmap[connection]
151 host = self._connmap[connection]
152 except KeyError:
152 except KeyError:
153 pass
153 pass
154 else:
154 else:
155 del self._connmap[connection]
155 del self._connmap[connection]
156 del self._readymap[connection]
156 del self._readymap[connection]
157 self._hostmap[host].remove(connection)
157 self._hostmap[host].remove(connection)
158 if not self._hostmap[host]: del self._hostmap[host]
158 if not self._hostmap[host]: del self._hostmap[host]
159 finally:
159 finally:
160 self._lock.release()
160 self._lock.release()
161
161
162 def set_ready(self, connection, ready):
162 def set_ready(self, connection, ready):
163 try:
163 try:
164 self._readymap[connection] = ready
164 self._readymap[connection] = ready
165 except KeyError:
165 except KeyError:
166 pass
166 pass
167
167
168 def get_ready_conn(self, host):
168 def get_ready_conn(self, host):
169 conn = None
169 conn = None
170 self._lock.acquire()
170 self._lock.acquire()
171 try:
171 try:
172 if host in self._hostmap:
172 if host in self._hostmap:
173 for c in self._hostmap[host]:
173 for c in self._hostmap[host]:
174 if self._readymap[c]:
174 if self._readymap[c]:
175 self._readymap[c] = 0
175 self._readymap[c] = 0
176 conn = c
176 conn = c
177 break
177 break
178 finally:
178 finally:
179 self._lock.release()
179 self._lock.release()
180 return conn
180 return conn
181
181
182 def get_all(self, host=None):
182 def get_all(self, host=None):
183 if host:
183 if host:
184 return list(self._hostmap.get(host, []))
184 return list(self._hostmap.get(host, []))
185 else:
185 else:
186 return dict(self._hostmap)
186 return dict(self._hostmap)
187
187
188 class KeepAliveHandler(object):
188 class KeepAliveHandler(object):
189 def __init__(self):
189 def __init__(self):
190 self._cm = ConnectionManager()
190 self._cm = ConnectionManager()
191
191
192 #### Connection Management
192 #### Connection Management
193 def open_connections(self):
193 def open_connections(self):
194 """return a list of connected hosts and the number of connections
194 """return a list of connected hosts and the number of connections
195 to each. [('foo.com:80', 2), ('bar.org', 1)]"""
195 to each. [('foo.com:80', 2), ('bar.org', 1)]"""
196 return [(host, len(li)) for (host, li) in self._cm.get_all().items()]
196 return [(host, len(li)) for (host, li) in self._cm.get_all().items()]
197
197
198 def close_connection(self, host):
198 def close_connection(self, host):
199 """close connection(s) to <host>
199 """close connection(s) to <host>
200 host is the host:port spec, as in 'www.cnn.com:8080' as passed in.
200 host is the host:port spec, as in 'www.cnn.com:8080' as passed in.
201 no error occurs if there is no connection to that host."""
201 no error occurs if there is no connection to that host."""
202 for h in self._cm.get_all(host):
202 for h in self._cm.get_all(host):
203 self._cm.remove(h)
203 self._cm.remove(h)
204 h.close()
204 h.close()
205
205
206 def close_all(self):
206 def close_all(self):
207 """close all open connections"""
207 """close all open connections"""
208 for host, conns in self._cm.get_all().iteritems():
208 for host, conns in self._cm.get_all().iteritems():
209 for h in conns:
209 for h in conns:
210 self._cm.remove(h)
210 self._cm.remove(h)
211 h.close()
211 h.close()
212
212
213 def _request_closed(self, request, host, connection):
213 def _request_closed(self, request, host, connection):
214 """tells us that this request is now closed and the the
214 """tells us that this request is now closed and the the
215 connection is ready for another request"""
215 connection is ready for another request"""
216 self._cm.set_ready(connection, 1)
216 self._cm.set_ready(connection, 1)
217
217
218 def _remove_connection(self, host, connection, close=0):
218 def _remove_connection(self, host, connection, close=0):
219 if close:
219 if close:
220 connection.close()
220 connection.close()
221 self._cm.remove(connection)
221 self._cm.remove(connection)
222
222
223 #### Transaction Execution
223 #### Transaction Execution
224 def http_open(self, req):
224 def http_open(self, req):
225 return self.do_open(HTTPConnection, req)
225 return self.do_open(HTTPConnection, req)
226
226
227 def do_open(self, http_class, req):
227 def do_open(self, http_class, req):
228 host = req.get_host()
228 host = req.get_host()
229 if not host:
229 if not host:
230 raise urllib2.URLError('no host given')
230 raise urllib2.URLError('no host given')
231
231
232 try:
232 try:
233 h = self._cm.get_ready_conn(host)
233 h = self._cm.get_ready_conn(host)
234 while h:
234 while h:
235 r = self._reuse_connection(h, req, host)
235 r = self._reuse_connection(h, req, host)
236
236
237 # if this response is non-None, then it worked and we're
237 # if this response is non-None, then it worked and we're
238 # done. Break out, skipping the else block.
238 # done. Break out, skipping the else block.
239 if r:
239 if r:
240 break
240 break
241
241
242 # connection is bad - possibly closed by server
242 # connection is bad - possibly closed by server
243 # discard it and ask for the next free connection
243 # discard it and ask for the next free connection
244 h.close()
244 h.close()
245 self._cm.remove(h)
245 self._cm.remove(h)
246 h = self._cm.get_ready_conn(host)
246 h = self._cm.get_ready_conn(host)
247 else:
247 else:
248 # no (working) free connections were found. Create a new one.
248 # no (working) free connections were found. Create a new one.
249 h = http_class(host)
249 h = http_class(host)
250 if DEBUG:
250 if DEBUG:
251 DEBUG.info("creating new connection to %s (%d)",
251 DEBUG.info("creating new connection to %s (%d)",
252 host, id(h))
252 host, id(h))
253 self._cm.add(host, h, 0)
253 self._cm.add(host, h, 0)
254 self._start_transaction(h, req)
254 self._start_transaction(h, req)
255 r = h.getresponse()
255 r = h.getresponse()
256 except (socket.error, httplib.HTTPException), err:
256 except (socket.error, httplib.HTTPException), err:
257 raise urllib2.URLError(err)
257 raise urllib2.URLError(err)
258
258
259 # if not a persistent connection, don't try to reuse it
259 # if not a persistent connection, don't try to reuse it
260 if r.will_close:
260 if r.will_close:
261 self._cm.remove(h)
261 self._cm.remove(h)
262
262
263 if DEBUG:
263 if DEBUG:
264 DEBUG.info("STATUS: %s, %s", r.status, r.reason)
264 DEBUG.info("STATUS: %s, %s", r.status, r.reason)
265 r._handler = self
265 r._handler = self
266 r._host = host
266 r._host = host
267 r._url = req.get_full_url()
267 r._url = req.get_full_url()
268 r._connection = h
268 r._connection = h
269 r.code = r.status
269 r.code = r.status
270 r.headers = r.msg
270 r.headers = r.msg
271 r.msg = r.reason
271 r.msg = r.reason
272
272
273 if r.status == 200 or not HANDLE_ERRORS:
273 if r.status == 200 or not HANDLE_ERRORS:
274 return r
274 return r
275 else:
275 else:
276 return self.parent.error('http', req, r,
276 return self.parent.error('http', req, r,
277 r.status, r.msg, r.headers)
277 r.status, r.msg, r.headers)
278
278
279 def _reuse_connection(self, h, req, host):
279 def _reuse_connection(self, h, req, host):
280 """start the transaction with a re-used connection
280 """start the transaction with a re-used connection
281 return a response object (r) upon success or None on failure.
281 return a response object (r) upon success or None on failure.
282 This DOES not close or remove bad connections in cases where
282 This DOES not close or remove bad connections in cases where
283 it returns. However, if an unexpected exception occurs, it
283 it returns. However, if an unexpected exception occurs, it
284 will close and remove the connection before re-raising.
284 will close and remove the connection before re-raising.
285 """
285 """
286 try:
286 try:
287 self._start_transaction(h, req)
287 self._start_transaction(h, req)
288 r = h.getresponse()
288 r = h.getresponse()
289 # note: just because we got something back doesn't mean it
289 # note: just because we got something back doesn't mean it
290 # worked. We'll check the version below, too.
290 # worked. We'll check the version below, too.
291 except (socket.error, httplib.HTTPException):
291 except (socket.error, httplib.HTTPException):
292 r = None
292 r = None
293 except:
293 except:
294 # adding this block just in case we've missed
294 # adding this block just in case we've missed
295 # something we will still raise the exception, but
295 # something we will still raise the exception, but
296 # lets try and close the connection and remove it
296 # lets try and close the connection and remove it
297 # first. We previously got into a nasty loop
297 # first. We previously got into a nasty loop
298 # where an exception was uncaught, and so the
298 # where an exception was uncaught, and so the
299 # connection stayed open. On the next try, the
299 # connection stayed open. On the next try, the
300 # same exception was raised, etc. The tradeoff is
300 # same exception was raised, etc. The tradeoff is
301 # that it's now possible this call will raise
301 # that it's now possible this call will raise
302 # a DIFFERENT exception
302 # a DIFFERENT exception
303 if DEBUG:
303 if DEBUG:
304 DEBUG.error("unexpected exception - closing "
304 DEBUG.error("unexpected exception - closing "
305 "connection to %s (%d)", host, id(h))
305 "connection to %s (%d)", host, id(h))
306 self._cm.remove(h)
306 self._cm.remove(h)
307 h.close()
307 h.close()
308 raise
308 raise
309
309
310 if r is None or r.version == 9:
310 if r is None or r.version == 9:
311 # httplib falls back to assuming HTTP 0.9 if it gets a
311 # httplib falls back to assuming HTTP 0.9 if it gets a
312 # bad header back. This is most likely to happen if
312 # bad header back. This is most likely to happen if
313 # the socket has been closed by the server since we
313 # the socket has been closed by the server since we
314 # last used the connection.
314 # last used the connection.
315 if DEBUG:
315 if DEBUG:
316 DEBUG.info("failed to re-use connection to %s (%d)",
316 DEBUG.info("failed to re-use connection to %s (%d)",
317 host, id(h))
317 host, id(h))
318 r = None
318 r = None
319 else:
319 else:
320 if DEBUG:
320 if DEBUG:
321 DEBUG.info("re-using connection to %s (%d)", host, id(h))
321 DEBUG.info("re-using connection to %s (%d)", host, id(h))
322
322
323 return r
323 return r
324
324
325 def _start_transaction(self, h, req):
325 def _start_transaction(self, h, req):
326 # What follows mostly reimplements HTTPConnection.request()
326 # What follows mostly reimplements HTTPConnection.request()
327 # except it adds self.parent.addheaders in the mix.
327 # except it adds self.parent.addheaders in the mix.
328 headers = req.headers.copy()
328 headers = req.headers.copy()
329 if sys.version_info >= (2, 4):
329 if sys.version_info >= (2, 4):
330 headers.update(req.unredirected_hdrs)
330 headers.update(req.unredirected_hdrs)
331 headers.update(self.parent.addheaders)
331 headers.update(self.parent.addheaders)
332 headers = dict((n.lower(), v) for n, v in headers.items())
332 headers = dict((n.lower(), v) for n, v in headers.items())
333 skipheaders = {}
333 skipheaders = {}
334 for n in ('host', 'accept-encoding'):
334 for n in ('host', 'accept-encoding'):
335 if n in headers:
335 if n in headers:
336 skipheaders['skip_' + n.replace('-', '_')] = 1
336 skipheaders['skip_' + n.replace('-', '_')] = 1
337 try:
337 try:
338 if req.has_data():
338 if req.has_data():
339 data = req.get_data()
339 data = req.get_data()
340 h.putrequest('POST', req.get_selector(), **skipheaders)
340 h.putrequest('POST', req.get_selector(), **skipheaders)
341 if 'content-type' not in headers:
341 if 'content-type' not in headers:
342 h.putheader('Content-type',
342 h.putheader('Content-type',
343 'application/x-www-form-urlencoded')
343 'application/x-www-form-urlencoded')
344 if 'content-length' not in headers:
344 if 'content-length' not in headers:
345 h.putheader('Content-length', '%d' % len(data))
345 h.putheader('Content-length', '%d' % len(data))
346 else:
346 else:
347 h.putrequest('GET', req.get_selector(), **skipheaders)
347 h.putrequest('GET', req.get_selector(), **skipheaders)
348 except (socket.error), err:
348 except (socket.error), err:
349 raise urllib2.URLError(err)
349 raise urllib2.URLError(err)
350 for k, v in headers.items():
350 for k, v in headers.items():
351 h.putheader(k, v)
351 h.putheader(k, v)
352 h.endheaders()
352 h.endheaders()
353 if req.has_data():
353 if req.has_data():
354 h.send(data)
354 h.send(data)
355
355
356 class HTTPHandler(KeepAliveHandler, urllib2.HTTPHandler):
356 class HTTPHandler(KeepAliveHandler, urllib2.HTTPHandler):
357 pass
357 pass
358
358
359 class HTTPResponse(httplib.HTTPResponse):
359 class HTTPResponse(httplib.HTTPResponse):
360 # we need to subclass HTTPResponse in order to
360 # we need to subclass HTTPResponse in order to
361 # 1) add readline() and readlines() methods
361 # 1) add readline() and readlines() methods
362 # 2) add close_connection() methods
362 # 2) add close_connection() methods
363 # 3) add info() and geturl() methods
363 # 3) add info() and geturl() methods
364
364
365 # in order to add readline(), read must be modified to deal with a
365 # in order to add readline(), read must be modified to deal with a
366 # buffer. example: readline must read a buffer and then spit back
366 # buffer. example: readline must read a buffer and then spit back
367 # one line at a time. The only real alternative is to read one
367 # one line at a time. The only real alternative is to read one
368 # BYTE at a time (ick). Once something has been read, it can't be
368 # BYTE at a time (ick). Once something has been read, it can't be
369 # put back (ok, maybe it can, but that's even uglier than this),
369 # put back (ok, maybe it can, but that's even uglier than this),
370 # so if you THEN do a normal read, you must first take stuff from
370 # so if you THEN do a normal read, you must first take stuff from
371 # the buffer.
371 # the buffer.
372
372
373 # the read method wraps the original to accomodate buffering,
373 # the read method wraps the original to accomodate buffering,
374 # although read() never adds to the buffer.
374 # although read() never adds to the buffer.
375 # Both readline and readlines have been stolen with almost no
375 # Both readline and readlines have been stolen with almost no
376 # modification from socket.py
376 # modification from socket.py
377
377
378
378
379 def __init__(self, sock, debuglevel=0, strict=0, method=None):
379 def __init__(self, sock, debuglevel=0, strict=0, method=None):
380 if method: # the httplib in python 2.3 uses the method arg
380 if method: # the httplib in python 2.3 uses the method arg
381 httplib.HTTPResponse.__init__(self, sock, debuglevel, method)
381 httplib.HTTPResponse.__init__(self, sock, debuglevel, method)
382 else: # 2.2 doesn't
382 else: # 2.2 doesn't
383 httplib.HTTPResponse.__init__(self, sock, debuglevel)
383 httplib.HTTPResponse.__init__(self, sock, debuglevel)
384 self.fileno = sock.fileno
384 self.fileno = sock.fileno
385 self.code = None
385 self.code = None
386 self._rbuf = ''
386 self._rbuf = ''
387 self._rbufsize = 8096
387 self._rbufsize = 8096
388 self._handler = None # inserted by the handler later
388 self._handler = None # inserted by the handler later
389 self._host = None # (same)
389 self._host = None # (same)
390 self._url = None # (same)
390 self._url = None # (same)
391 self._connection = None # (same)
391 self._connection = None # (same)
392
392
393 _raw_read = httplib.HTTPResponse.read
393 _raw_read = httplib.HTTPResponse.read
394
394
395 def close(self):
395 def close(self):
396 if self.fp:
396 if self.fp:
397 self.fp.close()
397 self.fp.close()
398 self.fp = None
398 self.fp = None
399 if self._handler:
399 if self._handler:
400 self._handler._request_closed(self, self._host,
400 self._handler._request_closed(self, self._host,
401 self._connection)
401 self._connection)
402
402
403 def close_connection(self):
403 def close_connection(self):
404 self._handler._remove_connection(self._host, self._connection, close=1)
404 self._handler._remove_connection(self._host, self._connection, close=1)
405 self.close()
405 self.close()
406
406
407 def info(self):
407 def info(self):
408 return self.headers
408 return self.headers
409
409
410 def geturl(self):
410 def geturl(self):
411 return self._url
411 return self._url
412
412
413 def read(self, amt=None):
413 def read(self, amt=None):
414 # the _rbuf test is only in this first if for speed. It's not
414 # the _rbuf test is only in this first if for speed. It's not
415 # logically necessary
415 # logically necessary
416 if self._rbuf and not amt is None:
416 if self._rbuf and not amt is None:
417 L = len(self._rbuf)
417 L = len(self._rbuf)
418 if amt > L:
418 if amt > L:
419 amt -= L
419 amt -= L
420 else:
420 else:
421 s = self._rbuf[:amt]
421 s = self._rbuf[:amt]
422 self._rbuf = self._rbuf[amt:]
422 self._rbuf = self._rbuf[amt:]
423 return s
423 return s
424
424
425 s = self._rbuf + self._raw_read(amt)
425 s = self._rbuf + self._raw_read(amt)
426 self._rbuf = ''
426 self._rbuf = ''
427 return s
427 return s
428
428
429 # stolen from Python SVN #68532 to fix issue1088
429 # stolen from Python SVN #68532 to fix issue1088
430 def _read_chunked(self, amt):
430 def _read_chunked(self, amt):
431 chunk_left = self.chunk_left
431 chunk_left = self.chunk_left
432 value = ''
432 value = ''
433
433
434 # XXX This accumulates chunks by repeated string concatenation,
434 # XXX This accumulates chunks by repeated string concatenation,
435 # which is not efficient as the number or size of chunks gets big.
435 # which is not efficient as the number or size of chunks gets big.
436 while True:
436 while True:
437 if chunk_left is None:
437 if chunk_left is None:
438 line = self.fp.readline()
438 line = self.fp.readline()
439 i = line.find(';')
439 i = line.find(';')
440 if i >= 0:
440 if i >= 0:
441 line = line[:i] # strip chunk-extensions
441 line = line[:i] # strip chunk-extensions
442 try:
442 try:
443 chunk_left = int(line, 16)
443 chunk_left = int(line, 16)
444 except ValueError:
444 except ValueError:
445 # close the connection as protocol synchronisation is
445 # close the connection as protocol synchronisation is
446 # probably lost
446 # probably lost
447 self.close()
447 self.close()
448 raise httplib.IncompleteRead(value)
448 raise httplib.IncompleteRead(value)
449 if chunk_left == 0:
449 if chunk_left == 0:
450 break
450 break
451 if amt is None:
451 if amt is None:
452 value += self._safe_read(chunk_left)
452 value += self._safe_read(chunk_left)
453 elif amt < chunk_left:
453 elif amt < chunk_left:
454 value += self._safe_read(amt)
454 value += self._safe_read(amt)
455 self.chunk_left = chunk_left - amt
455 self.chunk_left = chunk_left - amt
456 return value
456 return value
457 elif amt == chunk_left:
457 elif amt == chunk_left:
458 value += self._safe_read(amt)
458 value += self._safe_read(amt)
459 self._safe_read(2) # toss the CRLF at the end of the chunk
459 self._safe_read(2) # toss the CRLF at the end of the chunk
460 self.chunk_left = None
460 self.chunk_left = None
461 return value
461 return value
462 else:
462 else:
463 value += self._safe_read(chunk_left)
463 value += self._safe_read(chunk_left)
464 amt -= chunk_left
464 amt -= chunk_left
465
465
466 # we read the whole chunk, get another
466 # we read the whole chunk, get another
467 self._safe_read(2) # toss the CRLF at the end of the chunk
467 self._safe_read(2) # toss the CRLF at the end of the chunk
468 chunk_left = None
468 chunk_left = None
469
469
470 # read and discard trailer up to the CRLF terminator
470 # read and discard trailer up to the CRLF terminator
471 ### note: we shouldn't have any trailers!
471 ### note: we shouldn't have any trailers!
472 while True:
472 while True:
473 line = self.fp.readline()
473 line = self.fp.readline()
474 if not line:
474 if not line:
475 # a vanishingly small number of sites EOF without
475 # a vanishingly small number of sites EOF without
476 # sending the trailer
476 # sending the trailer
477 break
477 break
478 if line == '\r\n':
478 if line == '\r\n':
479 break
479 break
480
480
481 # we read everything; close the "file"
481 # we read everything; close the "file"
482 self.close()
482 self.close()
483
483
484 return value
484 return value
485
485
486 def readline(self, limit=-1):
486 def readline(self, limit=-1):
487 i = self._rbuf.find('\n')
487 i = self._rbuf.find('\n')
488 while i < 0 and not (0 < limit <= len(self._rbuf)):
488 while i < 0 and not (0 < limit <= len(self._rbuf)):
489 new = self._raw_read(self._rbufsize)
489 new = self._raw_read(self._rbufsize)
490 if not new:
490 if not new:
491 break
491 break
492 i = new.find('\n')
492 i = new.find('\n')
493 if i >= 0:
493 if i >= 0:
494 i = i + len(self._rbuf)
494 i = i + len(self._rbuf)
495 self._rbuf = self._rbuf + new
495 self._rbuf = self._rbuf + new
496 if i < 0:
496 if i < 0:
497 i = len(self._rbuf)
497 i = len(self._rbuf)
498 else:
498 else:
499 i = i + 1
499 i = i + 1
500 if 0 <= limit < len(self._rbuf):
500 if 0 <= limit < len(self._rbuf):
501 i = limit
501 i = limit
502 data, self._rbuf = self._rbuf[:i], self._rbuf[i:]
502 data, self._rbuf = self._rbuf[:i], self._rbuf[i:]
503 return data
503 return data
504
504
505 def readlines(self, sizehint = 0):
505 def readlines(self, sizehint = 0):
506 total = 0
506 total = 0
507 list = []
507 list = []
508 while True:
508 while True:
509 line = self.readline()
509 line = self.readline()
510 if not line:
510 if not line:
511 break
511 break
512 list.append(line)
512 list.append(line)
513 total += len(line)
513 total += len(line)
514 if sizehint and total >= sizehint:
514 if sizehint and total >= sizehint:
515 break
515 break
516 return list
516 return list
517
517
518 def safesend(self, str):
518 def safesend(self, str):
519 """Send `str' to the server.
519 """Send `str' to the server.
520
520
521 Shamelessly ripped off from httplib to patch a bad behavior.
521 Shamelessly ripped off from httplib to patch a bad behavior.
522 """
522 """
523 # _broken_pipe_resp is an attribute we set in this function
523 # _broken_pipe_resp is an attribute we set in this function
524 # if the socket is closed while we're sending data but
524 # if the socket is closed while we're sending data but
525 # the server sent us a response before hanging up.
525 # the server sent us a response before hanging up.
526 # In that case, we want to pretend to send the rest of the
526 # In that case, we want to pretend to send the rest of the
527 # outgoing data, and then let the user use getresponse()
527 # outgoing data, and then let the user use getresponse()
528 # (which we wrap) to get this last response before
528 # (which we wrap) to get this last response before
529 # opening a new socket.
529 # opening a new socket.
530 if getattr(self, '_broken_pipe_resp', None) is not None:
530 if getattr(self, '_broken_pipe_resp', None) is not None:
531 return
531 return
532
532
533 if self.sock is None:
533 if self.sock is None:
534 if self.auto_open:
534 if self.auto_open:
535 self.connect()
535 self.connect()
536 else:
536 else:
537 raise httplib.NotConnected()
537 raise httplib.NotConnected
538
538
539 # send the data to the server. if we get a broken pipe, then close
539 # send the data to the server. if we get a broken pipe, then close
540 # the socket. we want to reconnect when somebody tries to send again.
540 # the socket. we want to reconnect when somebody tries to send again.
541 #
541 #
542 # NOTE: we DO propagate the error, though, because we cannot simply
542 # NOTE: we DO propagate the error, though, because we cannot simply
543 # ignore the error... the caller will know if they can retry.
543 # ignore the error... the caller will know if they can retry.
544 if self.debuglevel > 0:
544 if self.debuglevel > 0:
545 print "send:", repr(str)
545 print "send:", repr(str)
546 try:
546 try:
547 blocksize = 8192
547 blocksize = 8192
548 read = getattr(str, 'read', None)
548 read = getattr(str, 'read', None)
549 if read is not None:
549 if read is not None:
550 if self.debuglevel > 0:
550 if self.debuglevel > 0:
551 print "sendIng a read()able"
551 print "sendIng a read()able"
552 data = read(blocksize)
552 data = read(blocksize)
553 while data:
553 while data:
554 self.sock.sendall(data)
554 self.sock.sendall(data)
555 data = read(blocksize)
555 data = read(blocksize)
556 else:
556 else:
557 self.sock.sendall(str)
557 self.sock.sendall(str)
558 except socket.error, v:
558 except socket.error, v:
559 reraise = True
559 reraise = True
560 if v[0] == errno.EPIPE: # Broken pipe
560 if v[0] == errno.EPIPE: # Broken pipe
561 if self._HTTPConnection__state == httplib._CS_REQ_SENT:
561 if self._HTTPConnection__state == httplib._CS_REQ_SENT:
562 self._broken_pipe_resp = None
562 self._broken_pipe_resp = None
563 self._broken_pipe_resp = self.getresponse()
563 self._broken_pipe_resp = self.getresponse()
564 reraise = False
564 reraise = False
565 self.close()
565 self.close()
566 if reraise:
566 if reraise:
567 raise
567 raise
568
568
569 def wrapgetresponse(cls):
569 def wrapgetresponse(cls):
570 """Wraps getresponse in cls with a broken-pipe sane version.
570 """Wraps getresponse in cls with a broken-pipe sane version.
571 """
571 """
572 def safegetresponse(self):
572 def safegetresponse(self):
573 # In safesend() we might set the _broken_pipe_resp
573 # In safesend() we might set the _broken_pipe_resp
574 # attribute, in which case the socket has already
574 # attribute, in which case the socket has already
575 # been closed and we just need to give them the response
575 # been closed and we just need to give them the response
576 # back. Otherwise, we use the normal response path.
576 # back. Otherwise, we use the normal response path.
577 r = getattr(self, '_broken_pipe_resp', None)
577 r = getattr(self, '_broken_pipe_resp', None)
578 if r is not None:
578 if r is not None:
579 return r
579 return r
580 return cls.getresponse(self)
580 return cls.getresponse(self)
581 safegetresponse.__doc__ = cls.getresponse.__doc__
581 safegetresponse.__doc__ = cls.getresponse.__doc__
582 return safegetresponse
582 return safegetresponse
583
583
584 class HTTPConnection(httplib.HTTPConnection):
584 class HTTPConnection(httplib.HTTPConnection):
585 # use the modified response class
585 # use the modified response class
586 response_class = HTTPResponse
586 response_class = HTTPResponse
587 send = safesend
587 send = safesend
588 getresponse = wrapgetresponse(httplib.HTTPConnection)
588 getresponse = wrapgetresponse(httplib.HTTPConnection)
589
589
590
590
591 #########################################################################
591 #########################################################################
592 ##### TEST FUNCTIONS
592 ##### TEST FUNCTIONS
593 #########################################################################
593 #########################################################################
594
594
595 def error_handler(url):
595 def error_handler(url):
596 global HANDLE_ERRORS
596 global HANDLE_ERRORS
597 orig = HANDLE_ERRORS
597 orig = HANDLE_ERRORS
598 keepalive_handler = HTTPHandler()
598 keepalive_handler = HTTPHandler()
599 opener = urllib2.build_opener(keepalive_handler)
599 opener = urllib2.build_opener(keepalive_handler)
600 urllib2.install_opener(opener)
600 urllib2.install_opener(opener)
601 pos = {0: 'off', 1: 'on'}
601 pos = {0: 'off', 1: 'on'}
602 for i in (0, 1):
602 for i in (0, 1):
603 print " fancy error handling %s (HANDLE_ERRORS = %i)" % (pos[i], i)
603 print " fancy error handling %s (HANDLE_ERRORS = %i)" % (pos[i], i)
604 HANDLE_ERRORS = i
604 HANDLE_ERRORS = i
605 try:
605 try:
606 fo = urllib2.urlopen(url)
606 fo = urllib2.urlopen(url)
607 fo.read()
607 fo.read()
608 fo.close()
608 fo.close()
609 try:
609 try:
610 status, reason = fo.status, fo.reason
610 status, reason = fo.status, fo.reason
611 except AttributeError:
611 except AttributeError:
612 status, reason = None, None
612 status, reason = None, None
613 except IOError, e:
613 except IOError, e:
614 print " EXCEPTION: %s" % e
614 print " EXCEPTION: %s" % e
615 raise
615 raise
616 else:
616 else:
617 print " status = %s, reason = %s" % (status, reason)
617 print " status = %s, reason = %s" % (status, reason)
618 HANDLE_ERRORS = orig
618 HANDLE_ERRORS = orig
619 hosts = keepalive_handler.open_connections()
619 hosts = keepalive_handler.open_connections()
620 print "open connections:", hosts
620 print "open connections:", hosts
621 keepalive_handler.close_all()
621 keepalive_handler.close_all()
622
622
623 def md5(s):
623 def md5(s):
624 try:
624 try:
625 from hashlib import md5 as _md5
625 from hashlib import md5 as _md5
626 except ImportError:
626 except ImportError:
627 from md5 import md5 as _md5
627 from md5 import md5 as _md5
628 global md5
628 global md5
629 md5 = _md5
629 md5 = _md5
630 return _md5(s)
630 return _md5(s)
631
631
632 def continuity(url):
632 def continuity(url):
633 format = '%25s: %s'
633 format = '%25s: %s'
634
634
635 # first fetch the file with the normal http handler
635 # first fetch the file with the normal http handler
636 opener = urllib2.build_opener()
636 opener = urllib2.build_opener()
637 urllib2.install_opener(opener)
637 urllib2.install_opener(opener)
638 fo = urllib2.urlopen(url)
638 fo = urllib2.urlopen(url)
639 foo = fo.read()
639 foo = fo.read()
640 fo.close()
640 fo.close()
641 m = md5.new(foo)
641 m = md5.new(foo)
642 print format % ('normal urllib', m.hexdigest())
642 print format % ('normal urllib', m.hexdigest())
643
643
644 # now install the keepalive handler and try again
644 # now install the keepalive handler and try again
645 opener = urllib2.build_opener(HTTPHandler())
645 opener = urllib2.build_opener(HTTPHandler())
646 urllib2.install_opener(opener)
646 urllib2.install_opener(opener)
647
647
648 fo = urllib2.urlopen(url)
648 fo = urllib2.urlopen(url)
649 foo = fo.read()
649 foo = fo.read()
650 fo.close()
650 fo.close()
651 m = md5.new(foo)
651 m = md5.new(foo)
652 print format % ('keepalive read', m.hexdigest())
652 print format % ('keepalive read', m.hexdigest())
653
653
654 fo = urllib2.urlopen(url)
654 fo = urllib2.urlopen(url)
655 foo = ''
655 foo = ''
656 while True:
656 while True:
657 f = fo.readline()
657 f = fo.readline()
658 if f:
658 if f:
659 foo = foo + f
659 foo = foo + f
660 else: break
660 else: break
661 fo.close()
661 fo.close()
662 m = md5.new(foo)
662 m = md5.new(foo)
663 print format % ('keepalive readline', m.hexdigest())
663 print format % ('keepalive readline', m.hexdigest())
664
664
665 def comp(N, url):
665 def comp(N, url):
666 print ' making %i connections to:\n %s' % (N, url)
666 print ' making %i connections to:\n %s' % (N, url)
667
667
668 sys.stdout.write(' first using the normal urllib handlers')
668 sys.stdout.write(' first using the normal urllib handlers')
669 # first use normal opener
669 # first use normal opener
670 opener = urllib2.build_opener()
670 opener = urllib2.build_opener()
671 urllib2.install_opener(opener)
671 urllib2.install_opener(opener)
672 t1 = fetch(N, url)
672 t1 = fetch(N, url)
673 print ' TIME: %.3f s' % t1
673 print ' TIME: %.3f s' % t1
674
674
675 sys.stdout.write(' now using the keepalive handler ')
675 sys.stdout.write(' now using the keepalive handler ')
676 # now install the keepalive handler and try again
676 # now install the keepalive handler and try again
677 opener = urllib2.build_opener(HTTPHandler())
677 opener = urllib2.build_opener(HTTPHandler())
678 urllib2.install_opener(opener)
678 urllib2.install_opener(opener)
679 t2 = fetch(N, url)
679 t2 = fetch(N, url)
680 print ' TIME: %.3f s' % t2
680 print ' TIME: %.3f s' % t2
681 print ' improvement factor: %.2f' % (t1 / t2)
681 print ' improvement factor: %.2f' % (t1 / t2)
682
682
683 def fetch(N, url, delay=0):
683 def fetch(N, url, delay=0):
684 import time
684 import time
685 lens = []
685 lens = []
686 starttime = time.time()
686 starttime = time.time()
687 for i in range(N):
687 for i in range(N):
688 if delay and i > 0:
688 if delay and i > 0:
689 time.sleep(delay)
689 time.sleep(delay)
690 fo = urllib2.urlopen(url)
690 fo = urllib2.urlopen(url)
691 foo = fo.read()
691 foo = fo.read()
692 fo.close()
692 fo.close()
693 lens.append(len(foo))
693 lens.append(len(foo))
694 diff = time.time() - starttime
694 diff = time.time() - starttime
695
695
696 j = 0
696 j = 0
697 for i in lens[1:]:
697 for i in lens[1:]:
698 j = j + 1
698 j = j + 1
699 if not i == lens[0]:
699 if not i == lens[0]:
700 print "WARNING: inconsistent length on read %i: %i" % (j, i)
700 print "WARNING: inconsistent length on read %i: %i" % (j, i)
701
701
702 return diff
702 return diff
703
703
704 def test_timeout(url):
704 def test_timeout(url):
705 global DEBUG
705 global DEBUG
706 dbbackup = DEBUG
706 dbbackup = DEBUG
707 class FakeLogger(object):
707 class FakeLogger(object):
708 def debug(self, msg, *args):
708 def debug(self, msg, *args):
709 print msg % args
709 print msg % args
710 info = warning = error = debug
710 info = warning = error = debug
711 DEBUG = FakeLogger()
711 DEBUG = FakeLogger()
712 print " fetching the file to establish a connection"
712 print " fetching the file to establish a connection"
713 fo = urllib2.urlopen(url)
713 fo = urllib2.urlopen(url)
714 data1 = fo.read()
714 data1 = fo.read()
715 fo.close()
715 fo.close()
716
716
717 i = 20
717 i = 20
718 print " waiting %i seconds for the server to close the connection" % i
718 print " waiting %i seconds for the server to close the connection" % i
719 while i > 0:
719 while i > 0:
720 sys.stdout.write('\r %2i' % i)
720 sys.stdout.write('\r %2i' % i)
721 sys.stdout.flush()
721 sys.stdout.flush()
722 time.sleep(1)
722 time.sleep(1)
723 i -= 1
723 i -= 1
724 sys.stderr.write('\r')
724 sys.stderr.write('\r')
725
725
726 print " fetching the file a second time"
726 print " fetching the file a second time"
727 fo = urllib2.urlopen(url)
727 fo = urllib2.urlopen(url)
728 data2 = fo.read()
728 data2 = fo.read()
729 fo.close()
729 fo.close()
730
730
731 if data1 == data2:
731 if data1 == data2:
732 print ' data are identical'
732 print ' data are identical'
733 else:
733 else:
734 print ' ERROR: DATA DIFFER'
734 print ' ERROR: DATA DIFFER'
735
735
736 DEBUG = dbbackup
736 DEBUG = dbbackup
737
737
738
738
739 def test(url, N=10):
739 def test(url, N=10):
740 print "checking error hander (do this on a non-200)"
740 print "checking error hander (do this on a non-200)"
741 try: error_handler(url)
741 try: error_handler(url)
742 except IOError:
742 except IOError:
743 print "exiting - exception will prevent further tests"
743 print "exiting - exception will prevent further tests"
744 sys.exit()
744 sys.exit()
745 print
745 print
746 print "performing continuity test (making sure stuff isn't corrupted)"
746 print "performing continuity test (making sure stuff isn't corrupted)"
747 continuity(url)
747 continuity(url)
748 print
748 print
749 print "performing speed comparison"
749 print "performing speed comparison"
750 comp(N, url)
750 comp(N, url)
751 print
751 print
752 print "performing dropped-connection check"
752 print "performing dropped-connection check"
753 test_timeout(url)
753 test_timeout(url)
754
754
755 if __name__ == '__main__':
755 if __name__ == '__main__':
756 import time
756 import time
757 import sys
757 import sys
758 try:
758 try:
759 N = int(sys.argv[1])
759 N = int(sys.argv[1])
760 url = sys.argv[2]
760 url = sys.argv[2]
761 except:
761 except:
762 print "%s <integer> <url>" % sys.argv[0]
762 print "%s <integer> <url>" % sys.argv[0]
763 else:
763 else:
764 test(url, N)
764 test(url, N)
@@ -1,341 +1,341 b''
1 # match.py - filename matching
1 # match.py - filename matching
2 #
2 #
3 # Copyright 2008, 2009 Matt Mackall <mpm@selenic.com> and others
3 # Copyright 2008, 2009 Matt Mackall <mpm@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import re
8 import re
9 import scmutil, util, fileset
9 import scmutil, util, fileset
10 from i18n import _
10 from i18n import _
11
11
12 def _expandsets(pats, ctx):
12 def _expandsets(pats, ctx):
13 '''convert set: patterns into a list of files in the given context'''
13 '''convert set: patterns into a list of files in the given context'''
14 fset = set()
14 fset = set()
15 other = []
15 other = []
16
16
17 for kind, expr in pats:
17 for kind, expr in pats:
18 if kind == 'set':
18 if kind == 'set':
19 if not ctx:
19 if not ctx:
20 raise util.Abort("fileset expression with no context")
20 raise util.Abort("fileset expression with no context")
21 s = fileset.getfileset(ctx, expr)
21 s = fileset.getfileset(ctx, expr)
22 fset.update(s)
22 fset.update(s)
23 continue
23 continue
24 other.append((kind, expr))
24 other.append((kind, expr))
25 return fset, other
25 return fset, other
26
26
27 class match(object):
27 class match(object):
28 def __init__(self, root, cwd, patterns, include=[], exclude=[],
28 def __init__(self, root, cwd, patterns, include=[], exclude=[],
29 default='glob', exact=False, auditor=None, ctx=None):
29 default='glob', exact=False, auditor=None, ctx=None):
30 """build an object to match a set of file patterns
30 """build an object to match a set of file patterns
31
31
32 arguments:
32 arguments:
33 root - the canonical root of the tree you're matching against
33 root - the canonical root of the tree you're matching against
34 cwd - the current working directory, if relevant
34 cwd - the current working directory, if relevant
35 patterns - patterns to find
35 patterns - patterns to find
36 include - patterns to include
36 include - patterns to include
37 exclude - patterns to exclude
37 exclude - patterns to exclude
38 default - if a pattern in names has no explicit type, assume this one
38 default - if a pattern in names has no explicit type, assume this one
39 exact - patterns are actually literals
39 exact - patterns are actually literals
40
40
41 a pattern is one of:
41 a pattern is one of:
42 'glob:<glob>' - a glob relative to cwd
42 'glob:<glob>' - a glob relative to cwd
43 're:<regexp>' - a regular expression
43 're:<regexp>' - a regular expression
44 'path:<path>' - a path relative to canonroot
44 'path:<path>' - a path relative to canonroot
45 'relglob:<glob>' - an unrooted glob (*.c matches C files in all dirs)
45 'relglob:<glob>' - an unrooted glob (*.c matches C files in all dirs)
46 'relpath:<path>' - a path relative to cwd
46 'relpath:<path>' - a path relative to cwd
47 'relre:<regexp>' - a regexp that needn't match the start of a name
47 'relre:<regexp>' - a regexp that needn't match the start of a name
48 'set:<fileset>' - a fileset expression
48 'set:<fileset>' - a fileset expression
49 '<something>' - a pattern of the specified default type
49 '<something>' - a pattern of the specified default type
50 """
50 """
51
51
52 self._root = root
52 self._root = root
53 self._cwd = cwd
53 self._cwd = cwd
54 self._files = []
54 self._files = []
55 self._anypats = bool(include or exclude)
55 self._anypats = bool(include or exclude)
56 self._ctx = ctx
56 self._ctx = ctx
57
57
58 if include:
58 if include:
59 pats = _normalize(include, 'glob', root, cwd, auditor)
59 pats = _normalize(include, 'glob', root, cwd, auditor)
60 self.includepat, im = _buildmatch(ctx, pats, '(?:/|$)')
60 self.includepat, im = _buildmatch(ctx, pats, '(?:/|$)')
61 if exclude:
61 if exclude:
62 pats = _normalize(exclude, 'glob', root, cwd, auditor)
62 pats = _normalize(exclude, 'glob', root, cwd, auditor)
63 self.excludepat, em = _buildmatch(ctx, pats, '(?:/|$)')
63 self.excludepat, em = _buildmatch(ctx, pats, '(?:/|$)')
64 if exact:
64 if exact:
65 self._files = patterns
65 self._files = patterns
66 pm = self.exact
66 pm = self.exact
67 elif patterns:
67 elif patterns:
68 pats = _normalize(patterns, default, root, cwd, auditor)
68 pats = _normalize(patterns, default, root, cwd, auditor)
69 self._files = _roots(pats)
69 self._files = _roots(pats)
70 self._anypats = self._anypats or _anypats(pats)
70 self._anypats = self._anypats or _anypats(pats)
71 self.patternspat, pm = _buildmatch(ctx, pats, '$')
71 self.patternspat, pm = _buildmatch(ctx, pats, '$')
72
72
73 if patterns or exact:
73 if patterns or exact:
74 if include:
74 if include:
75 if exclude:
75 if exclude:
76 m = lambda f: im(f) and not em(f) and pm(f)
76 m = lambda f: im(f) and not em(f) and pm(f)
77 else:
77 else:
78 m = lambda f: im(f) and pm(f)
78 m = lambda f: im(f) and pm(f)
79 else:
79 else:
80 if exclude:
80 if exclude:
81 m = lambda f: not em(f) and pm(f)
81 m = lambda f: not em(f) and pm(f)
82 else:
82 else:
83 m = pm
83 m = pm
84 else:
84 else:
85 if include:
85 if include:
86 if exclude:
86 if exclude:
87 m = lambda f: im(f) and not em(f)
87 m = lambda f: im(f) and not em(f)
88 else:
88 else:
89 m = im
89 m = im
90 else:
90 else:
91 if exclude:
91 if exclude:
92 m = lambda f: not em(f)
92 m = lambda f: not em(f)
93 else:
93 else:
94 m = lambda f: True
94 m = lambda f: True
95
95
96 self.matchfn = m
96 self.matchfn = m
97 self._fmap = set(self._files)
97 self._fmap = set(self._files)
98
98
99 def __call__(self, fn):
99 def __call__(self, fn):
100 return self.matchfn(fn)
100 return self.matchfn(fn)
101 def __iter__(self):
101 def __iter__(self):
102 for f in self._files:
102 for f in self._files:
103 yield f
103 yield f
104 def bad(self, f, msg):
104 def bad(self, f, msg):
105 '''callback for each explicit file that can't be
105 '''callback for each explicit file that can't be
106 found/accessed, with an error message
106 found/accessed, with an error message
107 '''
107 '''
108 pass
108 pass
109 def dir(self, f):
109 def dir(self, f):
110 pass
110 pass
111 def missing(self, f):
111 def missing(self, f):
112 pass
112 pass
113 def exact(self, f):
113 def exact(self, f):
114 return f in self._fmap
114 return f in self._fmap
115 def rel(self, f):
115 def rel(self, f):
116 return util.pathto(self._root, self._cwd, f)
116 return util.pathto(self._root, self._cwd, f)
117 def files(self):
117 def files(self):
118 return self._files
118 return self._files
119 def anypats(self):
119 def anypats(self):
120 return self._anypats
120 return self._anypats
121 def always(self):
121 def always(self):
122 return False
122 return False
123
123
124 class exact(match):
124 class exact(match):
125 def __init__(self, root, cwd, files):
125 def __init__(self, root, cwd, files):
126 match.__init__(self, root, cwd, files, exact = True)
126 match.__init__(self, root, cwd, files, exact = True)
127
127
128 class always(match):
128 class always(match):
129 def __init__(self, root, cwd):
129 def __init__(self, root, cwd):
130 match.__init__(self, root, cwd, [])
130 match.__init__(self, root, cwd, [])
131 def always(self):
131 def always(self):
132 return True
132 return True
133
133
134 class narrowmatcher(match):
134 class narrowmatcher(match):
135 """Adapt a matcher to work on a subdirectory only.
135 """Adapt a matcher to work on a subdirectory only.
136
136
137 The paths are remapped to remove/insert the path as needed:
137 The paths are remapped to remove/insert the path as needed:
138
138
139 >>> m1 = match('root', '', ['a.txt', 'sub/b.txt'])
139 >>> m1 = match('root', '', ['a.txt', 'sub/b.txt'])
140 >>> m2 = narrowmatcher('sub', m1)
140 >>> m2 = narrowmatcher('sub', m1)
141 >>> bool(m2('a.txt'))
141 >>> bool(m2('a.txt'))
142 False
142 False
143 >>> bool(m2('b.txt'))
143 >>> bool(m2('b.txt'))
144 True
144 True
145 >>> bool(m2.matchfn('a.txt'))
145 >>> bool(m2.matchfn('a.txt'))
146 False
146 False
147 >>> bool(m2.matchfn('b.txt'))
147 >>> bool(m2.matchfn('b.txt'))
148 True
148 True
149 >>> m2.files()
149 >>> m2.files()
150 ['b.txt']
150 ['b.txt']
151 >>> m2.exact('b.txt')
151 >>> m2.exact('b.txt')
152 True
152 True
153 >>> m2.rel('b.txt')
153 >>> m2.rel('b.txt')
154 'b.txt'
154 'b.txt'
155 >>> def bad(f, msg):
155 >>> def bad(f, msg):
156 ... print "%s: %s" % (f, msg)
156 ... print "%s: %s" % (f, msg)
157 >>> m1.bad = bad
157 >>> m1.bad = bad
158 >>> m2.bad('x.txt', 'No such file')
158 >>> m2.bad('x.txt', 'No such file')
159 sub/x.txt: No such file
159 sub/x.txt: No such file
160 """
160 """
161
161
162 def __init__(self, path, matcher):
162 def __init__(self, path, matcher):
163 self._root = matcher._root
163 self._root = matcher._root
164 self._cwd = matcher._cwd
164 self._cwd = matcher._cwd
165 self._path = path
165 self._path = path
166 self._matcher = matcher
166 self._matcher = matcher
167
167
168 self._files = [f[len(path) + 1:] for f in matcher._files
168 self._files = [f[len(path) + 1:] for f in matcher._files
169 if f.startswith(path + "/")]
169 if f.startswith(path + "/")]
170 self._anypats = matcher._anypats
170 self._anypats = matcher._anypats
171 self.matchfn = lambda fn: matcher.matchfn(self._path + "/" + fn)
171 self.matchfn = lambda fn: matcher.matchfn(self._path + "/" + fn)
172 self._fmap = set(self._files)
172 self._fmap = set(self._files)
173
173
174 def bad(self, f, msg):
174 def bad(self, f, msg):
175 self._matcher.bad(self._path + "/" + f, msg)
175 self._matcher.bad(self._path + "/" + f, msg)
176
176
177 def patkind(pat):
177 def patkind(pat):
178 return _patsplit(pat, None)[0]
178 return _patsplit(pat, None)[0]
179
179
180 def _patsplit(pat, default):
180 def _patsplit(pat, default):
181 """Split a string into an optional pattern kind prefix and the
181 """Split a string into an optional pattern kind prefix and the
182 actual pattern."""
182 actual pattern."""
183 if ':' in pat:
183 if ':' in pat:
184 kind, val = pat.split(':', 1)
184 kind, val = pat.split(':', 1)
185 if kind in ('re', 'glob', 'path', 'relglob', 'relpath', 'relre',
185 if kind in ('re', 'glob', 'path', 'relglob', 'relpath', 'relre',
186 'listfile', 'listfile0', 'set'):
186 'listfile', 'listfile0', 'set'):
187 return kind, val
187 return kind, val
188 return default, pat
188 return default, pat
189
189
190 def _globre(pat):
190 def _globre(pat):
191 "convert a glob pattern into a regexp"
191 "convert a glob pattern into a regexp"
192 i, n = 0, len(pat)
192 i, n = 0, len(pat)
193 res = ''
193 res = ''
194 group = 0
194 group = 0
195 escape = re.escape
195 escape = re.escape
196 def peek():
196 def peek():
197 return i < n and pat[i]
197 return i < n and pat[i]
198 while i < n:
198 while i < n:
199 c = pat[i]
199 c = pat[i]
200 i += 1
200 i += 1
201 if c not in '*?[{},\\':
201 if c not in '*?[{},\\':
202 res += escape(c)
202 res += escape(c)
203 elif c == '*':
203 elif c == '*':
204 if peek() == '*':
204 if peek() == '*':
205 i += 1
205 i += 1
206 res += '.*'
206 res += '.*'
207 else:
207 else:
208 res += '[^/]*'
208 res += '[^/]*'
209 elif c == '?':
209 elif c == '?':
210 res += '.'
210 res += '.'
211 elif c == '[':
211 elif c == '[':
212 j = i
212 j = i
213 if j < n and pat[j] in '!]':
213 if j < n and pat[j] in '!]':
214 j += 1
214 j += 1
215 while j < n and pat[j] != ']':
215 while j < n and pat[j] != ']':
216 j += 1
216 j += 1
217 if j >= n:
217 if j >= n:
218 res += '\\['
218 res += '\\['
219 else:
219 else:
220 stuff = pat[i:j].replace('\\','\\\\')
220 stuff = pat[i:j].replace('\\','\\\\')
221 i = j + 1
221 i = j + 1
222 if stuff[0] == '!':
222 if stuff[0] == '!':
223 stuff = '^' + stuff[1:]
223 stuff = '^' + stuff[1:]
224 elif stuff[0] == '^':
224 elif stuff[0] == '^':
225 stuff = '\\' + stuff
225 stuff = '\\' + stuff
226 res = '%s[%s]' % (res, stuff)
226 res = '%s[%s]' % (res, stuff)
227 elif c == '{':
227 elif c == '{':
228 group += 1
228 group += 1
229 res += '(?:'
229 res += '(?:'
230 elif c == '}' and group:
230 elif c == '}' and group:
231 res += ')'
231 res += ')'
232 group -= 1
232 group -= 1
233 elif c == ',' and group:
233 elif c == ',' and group:
234 res += '|'
234 res += '|'
235 elif c == '\\':
235 elif c == '\\':
236 p = peek()
236 p = peek()
237 if p:
237 if p:
238 i += 1
238 i += 1
239 res += escape(p)
239 res += escape(p)
240 else:
240 else:
241 res += escape(c)
241 res += escape(c)
242 else:
242 else:
243 res += escape(c)
243 res += escape(c)
244 return res
244 return res
245
245
246 def _regex(kind, name, tail):
246 def _regex(kind, name, tail):
247 '''convert a pattern into a regular expression'''
247 '''convert a pattern into a regular expression'''
248 if not name:
248 if not name:
249 return ''
249 return ''
250 if kind == 're':
250 if kind == 're':
251 return name
251 return name
252 elif kind == 'path':
252 elif kind == 'path':
253 return '^' + re.escape(name) + '(?:/|$)'
253 return '^' + re.escape(name) + '(?:/|$)'
254 elif kind == 'relglob':
254 elif kind == 'relglob':
255 return '(?:|.*/)' + _globre(name) + tail
255 return '(?:|.*/)' + _globre(name) + tail
256 elif kind == 'relpath':
256 elif kind == 'relpath':
257 return re.escape(name) + '(?:/|$)'
257 return re.escape(name) + '(?:/|$)'
258 elif kind == 'relre':
258 elif kind == 'relre':
259 if name.startswith('^'):
259 if name.startswith('^'):
260 return name
260 return name
261 return '.*' + name
261 return '.*' + name
262 return _globre(name) + tail
262 return _globre(name) + tail
263
263
264 def _buildmatch(ctx, pats, tail):
264 def _buildmatch(ctx, pats, tail):
265 fset, pats = _expandsets(pats, ctx)
265 fset, pats = _expandsets(pats, ctx)
266 if not pats:
266 if not pats:
267 return "", fset.__contains__
267 return "", fset.__contains__
268
268
269 pat, mf = _buildregexmatch(pats, tail)
269 pat, mf = _buildregexmatch(pats, tail)
270 if fset:
270 if fset:
271 return pat, lambda f: f in fset or mf(f)
271 return pat, lambda f: f in fset or mf(f)
272 return pat, mf
272 return pat, mf
273
273
274 def _buildregexmatch(pats, tail):
274 def _buildregexmatch(pats, tail):
275 """build a matching function from a set of patterns"""
275 """build a matching function from a set of patterns"""
276 try:
276 try:
277 pat = '(?:%s)' % '|'.join([_regex(k, p, tail) for (k, p) in pats])
277 pat = '(?:%s)' % '|'.join([_regex(k, p, tail) for (k, p) in pats])
278 if len(pat) > 20000:
278 if len(pat) > 20000:
279 raise OverflowError()
279 raise OverflowError
280 return pat, re.compile(pat).match
280 return pat, re.compile(pat).match
281 except OverflowError:
281 except OverflowError:
282 # We're using a Python with a tiny regex engine and we
282 # We're using a Python with a tiny regex engine and we
283 # made it explode, so we'll divide the pattern list in two
283 # made it explode, so we'll divide the pattern list in two
284 # until it works
284 # until it works
285 l = len(pats)
285 l = len(pats)
286 if l < 2:
286 if l < 2:
287 raise
287 raise
288 pata, a = _buildregexmatch(pats[:l//2], tail)
288 pata, a = _buildregexmatch(pats[:l//2], tail)
289 patb, b = _buildregexmatch(pats[l//2:], tail)
289 patb, b = _buildregexmatch(pats[l//2:], tail)
290 return pat, lambda s: a(s) or b(s)
290 return pat, lambda s: a(s) or b(s)
291 except re.error:
291 except re.error:
292 for k, p in pats:
292 for k, p in pats:
293 try:
293 try:
294 re.compile('(?:%s)' % _regex(k, p, tail))
294 re.compile('(?:%s)' % _regex(k, p, tail))
295 except re.error:
295 except re.error:
296 raise util.Abort(_("invalid pattern (%s): %s") % (k, p))
296 raise util.Abort(_("invalid pattern (%s): %s") % (k, p))
297 raise util.Abort(_("invalid pattern"))
297 raise util.Abort(_("invalid pattern"))
298
298
299 def _normalize(names, default, root, cwd, auditor):
299 def _normalize(names, default, root, cwd, auditor):
300 pats = []
300 pats = []
301 for kind, name in [_patsplit(p, default) for p in names]:
301 for kind, name in [_patsplit(p, default) for p in names]:
302 if kind in ('glob', 'relpath'):
302 if kind in ('glob', 'relpath'):
303 name = scmutil.canonpath(root, cwd, name, auditor)
303 name = scmutil.canonpath(root, cwd, name, auditor)
304 elif kind in ('relglob', 'path'):
304 elif kind in ('relglob', 'path'):
305 name = util.normpath(name)
305 name = util.normpath(name)
306 elif kind in ('listfile', 'listfile0'):
306 elif kind in ('listfile', 'listfile0'):
307 try:
307 try:
308 files = util.readfile(name)
308 files = util.readfile(name)
309 if kind == 'listfile0':
309 if kind == 'listfile0':
310 files = files.split('\0')
310 files = files.split('\0')
311 else:
311 else:
312 files = files.splitlines()
312 files = files.splitlines()
313 files = [f for f in files if f]
313 files = [f for f in files if f]
314 except EnvironmentError:
314 except EnvironmentError:
315 raise util.Abort(_("unable to read file list (%s)") % name)
315 raise util.Abort(_("unable to read file list (%s)") % name)
316 pats += _normalize(files, default, root, cwd, auditor)
316 pats += _normalize(files, default, root, cwd, auditor)
317 continue
317 continue
318
318
319 pats.append((kind, name))
319 pats.append((kind, name))
320 return pats
320 return pats
321
321
322 def _roots(patterns):
322 def _roots(patterns):
323 r = []
323 r = []
324 for kind, name in patterns:
324 for kind, name in patterns:
325 if kind == 'glob': # find the non-glob prefix
325 if kind == 'glob': # find the non-glob prefix
326 root = []
326 root = []
327 for p in name.split('/'):
327 for p in name.split('/'):
328 if '[' in p or '{' in p or '*' in p or '?' in p:
328 if '[' in p or '{' in p or '*' in p or '?' in p:
329 break
329 break
330 root.append(p)
330 root.append(p)
331 r.append('/'.join(root) or '.')
331 r.append('/'.join(root) or '.')
332 elif kind in ('relpath', 'path'):
332 elif kind in ('relpath', 'path'):
333 r.append(name or '.')
333 r.append(name or '.')
334 elif kind == 'relglob':
334 elif kind == 'relglob':
335 r.append('.')
335 r.append('.')
336 return r
336 return r
337
337
338 def _anypats(patterns):
338 def _anypats(patterns):
339 for kind, name in patterns:
339 for kind, name in patterns:
340 if kind in ('glob', 're', 'relglob', 'relre', 'set'):
340 if kind in ('glob', 're', 'relglob', 'relre', 'set'):
341 return True
341 return True
@@ -1,1890 +1,1890 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 import cStringIO, email.Parser, os, errno, re
9 import cStringIO, email.Parser, os, errno, re
10 import tempfile, zlib, shutil
10 import tempfile, zlib, shutil
11
11
12 from i18n import _
12 from i18n import _
13 from node import hex, nullid, short
13 from node import hex, nullid, short
14 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
14 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
15 import context
15 import context
16
16
17 gitre = re.compile('diff --git a/(.*) b/(.*)')
17 gitre = re.compile('diff --git a/(.*) b/(.*)')
18
18
19 class PatchError(Exception):
19 class PatchError(Exception):
20 pass
20 pass
21
21
22
22
23 # public functions
23 # public functions
24
24
25 def split(stream):
25 def split(stream):
26 '''return an iterator of individual patches from a stream'''
26 '''return an iterator of individual patches from a stream'''
27 def isheader(line, inheader):
27 def isheader(line, inheader):
28 if inheader and line[0] in (' ', '\t'):
28 if inheader and line[0] in (' ', '\t'):
29 # continuation
29 # continuation
30 return True
30 return True
31 if line[0] in (' ', '-', '+'):
31 if line[0] in (' ', '-', '+'):
32 # diff line - don't check for header pattern in there
32 # diff line - don't check for header pattern in there
33 return False
33 return False
34 l = line.split(': ', 1)
34 l = line.split(': ', 1)
35 return len(l) == 2 and ' ' not in l[0]
35 return len(l) == 2 and ' ' not in l[0]
36
36
37 def chunk(lines):
37 def chunk(lines):
38 return cStringIO.StringIO(''.join(lines))
38 return cStringIO.StringIO(''.join(lines))
39
39
40 def hgsplit(stream, cur):
40 def hgsplit(stream, cur):
41 inheader = True
41 inheader = True
42
42
43 for line in stream:
43 for line in stream:
44 if not line.strip():
44 if not line.strip():
45 inheader = False
45 inheader = False
46 if not inheader and line.startswith('# HG changeset patch'):
46 if not inheader and line.startswith('# HG changeset patch'):
47 yield chunk(cur)
47 yield chunk(cur)
48 cur = []
48 cur = []
49 inheader = True
49 inheader = True
50
50
51 cur.append(line)
51 cur.append(line)
52
52
53 if cur:
53 if cur:
54 yield chunk(cur)
54 yield chunk(cur)
55
55
56 def mboxsplit(stream, cur):
56 def mboxsplit(stream, cur):
57 for line in stream:
57 for line in stream:
58 if line.startswith('From '):
58 if line.startswith('From '):
59 for c in split(chunk(cur[1:])):
59 for c in split(chunk(cur[1:])):
60 yield c
60 yield c
61 cur = []
61 cur = []
62
62
63 cur.append(line)
63 cur.append(line)
64
64
65 if cur:
65 if cur:
66 for c in split(chunk(cur[1:])):
66 for c in split(chunk(cur[1:])):
67 yield c
67 yield c
68
68
69 def mimesplit(stream, cur):
69 def mimesplit(stream, cur):
70 def msgfp(m):
70 def msgfp(m):
71 fp = cStringIO.StringIO()
71 fp = cStringIO.StringIO()
72 g = email.Generator.Generator(fp, mangle_from_=False)
72 g = email.Generator.Generator(fp, mangle_from_=False)
73 g.flatten(m)
73 g.flatten(m)
74 fp.seek(0)
74 fp.seek(0)
75 return fp
75 return fp
76
76
77 for line in stream:
77 for line in stream:
78 cur.append(line)
78 cur.append(line)
79 c = chunk(cur)
79 c = chunk(cur)
80
80
81 m = email.Parser.Parser().parse(c)
81 m = email.Parser.Parser().parse(c)
82 if not m.is_multipart():
82 if not m.is_multipart():
83 yield msgfp(m)
83 yield msgfp(m)
84 else:
84 else:
85 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
85 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
86 for part in m.walk():
86 for part in m.walk():
87 ct = part.get_content_type()
87 ct = part.get_content_type()
88 if ct not in ok_types:
88 if ct not in ok_types:
89 continue
89 continue
90 yield msgfp(part)
90 yield msgfp(part)
91
91
92 def headersplit(stream, cur):
92 def headersplit(stream, cur):
93 inheader = False
93 inheader = False
94
94
95 for line in stream:
95 for line in stream:
96 if not inheader and isheader(line, inheader):
96 if not inheader and isheader(line, inheader):
97 yield chunk(cur)
97 yield chunk(cur)
98 cur = []
98 cur = []
99 inheader = True
99 inheader = True
100 if inheader and not isheader(line, inheader):
100 if inheader and not isheader(line, inheader):
101 inheader = False
101 inheader = False
102
102
103 cur.append(line)
103 cur.append(line)
104
104
105 if cur:
105 if cur:
106 yield chunk(cur)
106 yield chunk(cur)
107
107
108 def remainder(cur):
108 def remainder(cur):
109 yield chunk(cur)
109 yield chunk(cur)
110
110
111 class fiter(object):
111 class fiter(object):
112 def __init__(self, fp):
112 def __init__(self, fp):
113 self.fp = fp
113 self.fp = fp
114
114
115 def __iter__(self):
115 def __iter__(self):
116 return self
116 return self
117
117
118 def next(self):
118 def next(self):
119 l = self.fp.readline()
119 l = self.fp.readline()
120 if not l:
120 if not l:
121 raise StopIteration
121 raise StopIteration
122 return l
122 return l
123
123
124 inheader = False
124 inheader = False
125 cur = []
125 cur = []
126
126
127 mimeheaders = ['content-type']
127 mimeheaders = ['content-type']
128
128
129 if not util.safehasattr(stream, 'next'):
129 if not util.safehasattr(stream, 'next'):
130 # http responses, for example, have readline but not next
130 # http responses, for example, have readline but not next
131 stream = fiter(stream)
131 stream = fiter(stream)
132
132
133 for line in stream:
133 for line in stream:
134 cur.append(line)
134 cur.append(line)
135 if line.startswith('# HG changeset patch'):
135 if line.startswith('# HG changeset patch'):
136 return hgsplit(stream, cur)
136 return hgsplit(stream, cur)
137 elif line.startswith('From '):
137 elif line.startswith('From '):
138 return mboxsplit(stream, cur)
138 return mboxsplit(stream, cur)
139 elif isheader(line, inheader):
139 elif isheader(line, inheader):
140 inheader = True
140 inheader = True
141 if line.split(':', 1)[0].lower() in mimeheaders:
141 if line.split(':', 1)[0].lower() in mimeheaders:
142 # let email parser handle this
142 # let email parser handle this
143 return mimesplit(stream, cur)
143 return mimesplit(stream, cur)
144 elif line.startswith('--- ') and inheader:
144 elif line.startswith('--- ') and inheader:
145 # No evil headers seen by diff start, split by hand
145 # No evil headers seen by diff start, split by hand
146 return headersplit(stream, cur)
146 return headersplit(stream, cur)
147 # Not enough info, keep reading
147 # Not enough info, keep reading
148
148
149 # if we are here, we have a very plain patch
149 # if we are here, we have a very plain patch
150 return remainder(cur)
150 return remainder(cur)
151
151
152 def extract(ui, fileobj):
152 def extract(ui, fileobj):
153 '''extract patch from data read from fileobj.
153 '''extract patch from data read from fileobj.
154
154
155 patch can be a normal patch or contained in an email message.
155 patch can be a normal patch or contained in an email message.
156
156
157 return tuple (filename, message, user, date, branch, node, p1, p2).
157 return tuple (filename, message, user, date, branch, node, p1, p2).
158 Any item in the returned tuple can be None. If filename is None,
158 Any item in the returned tuple can be None. If filename is None,
159 fileobj did not contain a patch. Caller must unlink filename when done.'''
159 fileobj did not contain a patch. Caller must unlink filename when done.'''
160
160
161 # attempt to detect the start of a patch
161 # attempt to detect the start of a patch
162 # (this heuristic is borrowed from quilt)
162 # (this heuristic is borrowed from quilt)
163 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
163 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
164 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
164 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
165 r'---[ \t].*?^\+\+\+[ \t]|'
165 r'---[ \t].*?^\+\+\+[ \t]|'
166 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
166 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
167
167
168 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
168 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
169 tmpfp = os.fdopen(fd, 'w')
169 tmpfp = os.fdopen(fd, 'w')
170 try:
170 try:
171 msg = email.Parser.Parser().parse(fileobj)
171 msg = email.Parser.Parser().parse(fileobj)
172
172
173 subject = msg['Subject']
173 subject = msg['Subject']
174 user = msg['From']
174 user = msg['From']
175 if not subject and not user:
175 if not subject and not user:
176 # Not an email, restore parsed headers if any
176 # Not an email, restore parsed headers if any
177 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
177 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
178
178
179 gitsendmail = 'git-send-email' in msg.get('X-Mailer', '')
179 gitsendmail = 'git-send-email' in msg.get('X-Mailer', '')
180 # should try to parse msg['Date']
180 # should try to parse msg['Date']
181 date = None
181 date = None
182 nodeid = None
182 nodeid = None
183 branch = None
183 branch = None
184 parents = []
184 parents = []
185
185
186 if subject:
186 if subject:
187 if subject.startswith('[PATCH'):
187 if subject.startswith('[PATCH'):
188 pend = subject.find(']')
188 pend = subject.find(']')
189 if pend >= 0:
189 if pend >= 0:
190 subject = subject[pend + 1:].lstrip()
190 subject = subject[pend + 1:].lstrip()
191 subject = re.sub(r'\n[ \t]+', ' ', subject)
191 subject = re.sub(r'\n[ \t]+', ' ', subject)
192 ui.debug('Subject: %s\n' % subject)
192 ui.debug('Subject: %s\n' % subject)
193 if user:
193 if user:
194 ui.debug('From: %s\n' % user)
194 ui.debug('From: %s\n' % user)
195 diffs_seen = 0
195 diffs_seen = 0
196 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
196 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
197 message = ''
197 message = ''
198 for part in msg.walk():
198 for part in msg.walk():
199 content_type = part.get_content_type()
199 content_type = part.get_content_type()
200 ui.debug('Content-Type: %s\n' % content_type)
200 ui.debug('Content-Type: %s\n' % content_type)
201 if content_type not in ok_types:
201 if content_type not in ok_types:
202 continue
202 continue
203 payload = part.get_payload(decode=True)
203 payload = part.get_payload(decode=True)
204 m = diffre.search(payload)
204 m = diffre.search(payload)
205 if m:
205 if m:
206 hgpatch = False
206 hgpatch = False
207 hgpatchheader = False
207 hgpatchheader = False
208 ignoretext = False
208 ignoretext = False
209
209
210 ui.debug('found patch at byte %d\n' % m.start(0))
210 ui.debug('found patch at byte %d\n' % m.start(0))
211 diffs_seen += 1
211 diffs_seen += 1
212 cfp = cStringIO.StringIO()
212 cfp = cStringIO.StringIO()
213 for line in payload[:m.start(0)].splitlines():
213 for line in payload[:m.start(0)].splitlines():
214 if line.startswith('# HG changeset patch') and not hgpatch:
214 if line.startswith('# HG changeset patch') and not hgpatch:
215 ui.debug('patch generated by hg export\n')
215 ui.debug('patch generated by hg export\n')
216 hgpatch = True
216 hgpatch = True
217 hgpatchheader = True
217 hgpatchheader = True
218 # drop earlier commit message content
218 # drop earlier commit message content
219 cfp.seek(0)
219 cfp.seek(0)
220 cfp.truncate()
220 cfp.truncate()
221 subject = None
221 subject = None
222 elif hgpatchheader:
222 elif hgpatchheader:
223 if line.startswith('# User '):
223 if line.startswith('# User '):
224 user = line[7:]
224 user = line[7:]
225 ui.debug('From: %s\n' % user)
225 ui.debug('From: %s\n' % user)
226 elif line.startswith("# Date "):
226 elif line.startswith("# Date "):
227 date = line[7:]
227 date = line[7:]
228 elif line.startswith("# Branch "):
228 elif line.startswith("# Branch "):
229 branch = line[9:]
229 branch = line[9:]
230 elif line.startswith("# Node ID "):
230 elif line.startswith("# Node ID "):
231 nodeid = line[10:]
231 nodeid = line[10:]
232 elif line.startswith("# Parent "):
232 elif line.startswith("# Parent "):
233 parents.append(line[9:].lstrip())
233 parents.append(line[9:].lstrip())
234 elif not line.startswith("# "):
234 elif not line.startswith("# "):
235 hgpatchheader = False
235 hgpatchheader = False
236 elif line == '---' and gitsendmail:
236 elif line == '---' and gitsendmail:
237 ignoretext = True
237 ignoretext = True
238 if not hgpatchheader and not ignoretext:
238 if not hgpatchheader and not ignoretext:
239 cfp.write(line)
239 cfp.write(line)
240 cfp.write('\n')
240 cfp.write('\n')
241 message = cfp.getvalue()
241 message = cfp.getvalue()
242 if tmpfp:
242 if tmpfp:
243 tmpfp.write(payload)
243 tmpfp.write(payload)
244 if not payload.endswith('\n'):
244 if not payload.endswith('\n'):
245 tmpfp.write('\n')
245 tmpfp.write('\n')
246 elif not diffs_seen and message and content_type == 'text/plain':
246 elif not diffs_seen and message and content_type == 'text/plain':
247 message += '\n' + payload
247 message += '\n' + payload
248 except:
248 except:
249 tmpfp.close()
249 tmpfp.close()
250 os.unlink(tmpname)
250 os.unlink(tmpname)
251 raise
251 raise
252
252
253 if subject and not message.startswith(subject):
253 if subject and not message.startswith(subject):
254 message = '%s\n%s' % (subject, message)
254 message = '%s\n%s' % (subject, message)
255 tmpfp.close()
255 tmpfp.close()
256 if not diffs_seen:
256 if not diffs_seen:
257 os.unlink(tmpname)
257 os.unlink(tmpname)
258 return None, message, user, date, branch, None, None, None
258 return None, message, user, date, branch, None, None, None
259 p1 = parents and parents.pop(0) or None
259 p1 = parents and parents.pop(0) or None
260 p2 = parents and parents.pop(0) or None
260 p2 = parents and parents.pop(0) or None
261 return tmpname, message, user, date, branch, nodeid, p1, p2
261 return tmpname, message, user, date, branch, nodeid, p1, p2
262
262
263 class patchmeta(object):
263 class patchmeta(object):
264 """Patched file metadata
264 """Patched file metadata
265
265
266 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
266 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
267 or COPY. 'path' is patched file path. 'oldpath' is set to the
267 or COPY. 'path' is patched file path. 'oldpath' is set to the
268 origin file when 'op' is either COPY or RENAME, None otherwise. If
268 origin file when 'op' is either COPY or RENAME, None otherwise. If
269 file mode is changed, 'mode' is a tuple (islink, isexec) where
269 file mode is changed, 'mode' is a tuple (islink, isexec) where
270 'islink' is True if the file is a symlink and 'isexec' is True if
270 'islink' is True if the file is a symlink and 'isexec' is True if
271 the file is executable. Otherwise, 'mode' is None.
271 the file is executable. Otherwise, 'mode' is None.
272 """
272 """
273 def __init__(self, path):
273 def __init__(self, path):
274 self.path = path
274 self.path = path
275 self.oldpath = None
275 self.oldpath = None
276 self.mode = None
276 self.mode = None
277 self.op = 'MODIFY'
277 self.op = 'MODIFY'
278 self.binary = False
278 self.binary = False
279
279
280 def setmode(self, mode):
280 def setmode(self, mode):
281 islink = mode & 020000
281 islink = mode & 020000
282 isexec = mode & 0100
282 isexec = mode & 0100
283 self.mode = (islink, isexec)
283 self.mode = (islink, isexec)
284
284
285 def copy(self):
285 def copy(self):
286 other = patchmeta(self.path)
286 other = patchmeta(self.path)
287 other.oldpath = self.oldpath
287 other.oldpath = self.oldpath
288 other.mode = self.mode
288 other.mode = self.mode
289 other.op = self.op
289 other.op = self.op
290 other.binary = self.binary
290 other.binary = self.binary
291 return other
291 return other
292
292
293 def _ispatchinga(self, afile):
293 def _ispatchinga(self, afile):
294 if afile == '/dev/null':
294 if afile == '/dev/null':
295 return self.op == 'ADD'
295 return self.op == 'ADD'
296 return afile == 'a/' + (self.oldpath or self.path)
296 return afile == 'a/' + (self.oldpath or self.path)
297
297
298 def _ispatchingb(self, bfile):
298 def _ispatchingb(self, bfile):
299 if bfile == '/dev/null':
299 if bfile == '/dev/null':
300 return self.op == 'DELETE'
300 return self.op == 'DELETE'
301 return bfile == 'b/' + self.path
301 return bfile == 'b/' + self.path
302
302
303 def ispatching(self, afile, bfile):
303 def ispatching(self, afile, bfile):
304 return self._ispatchinga(afile) and self._ispatchingb(bfile)
304 return self._ispatchinga(afile) and self._ispatchingb(bfile)
305
305
306 def __repr__(self):
306 def __repr__(self):
307 return "<patchmeta %s %r>" % (self.op, self.path)
307 return "<patchmeta %s %r>" % (self.op, self.path)
308
308
309 def readgitpatch(lr):
309 def readgitpatch(lr):
310 """extract git-style metadata about patches from <patchname>"""
310 """extract git-style metadata about patches from <patchname>"""
311
311
312 # Filter patch for git information
312 # Filter patch for git information
313 gp = None
313 gp = None
314 gitpatches = []
314 gitpatches = []
315 for line in lr:
315 for line in lr:
316 line = line.rstrip(' \r\n')
316 line = line.rstrip(' \r\n')
317 if line.startswith('diff --git'):
317 if line.startswith('diff --git'):
318 m = gitre.match(line)
318 m = gitre.match(line)
319 if m:
319 if m:
320 if gp:
320 if gp:
321 gitpatches.append(gp)
321 gitpatches.append(gp)
322 dst = m.group(2)
322 dst = m.group(2)
323 gp = patchmeta(dst)
323 gp = patchmeta(dst)
324 elif gp:
324 elif gp:
325 if line.startswith('--- '):
325 if line.startswith('--- '):
326 gitpatches.append(gp)
326 gitpatches.append(gp)
327 gp = None
327 gp = None
328 continue
328 continue
329 if line.startswith('rename from '):
329 if line.startswith('rename from '):
330 gp.op = 'RENAME'
330 gp.op = 'RENAME'
331 gp.oldpath = line[12:]
331 gp.oldpath = line[12:]
332 elif line.startswith('rename to '):
332 elif line.startswith('rename to '):
333 gp.path = line[10:]
333 gp.path = line[10:]
334 elif line.startswith('copy from '):
334 elif line.startswith('copy from '):
335 gp.op = 'COPY'
335 gp.op = 'COPY'
336 gp.oldpath = line[10:]
336 gp.oldpath = line[10:]
337 elif line.startswith('copy to '):
337 elif line.startswith('copy to '):
338 gp.path = line[8:]
338 gp.path = line[8:]
339 elif line.startswith('deleted file'):
339 elif line.startswith('deleted file'):
340 gp.op = 'DELETE'
340 gp.op = 'DELETE'
341 elif line.startswith('new file mode '):
341 elif line.startswith('new file mode '):
342 gp.op = 'ADD'
342 gp.op = 'ADD'
343 gp.setmode(int(line[-6:], 8))
343 gp.setmode(int(line[-6:], 8))
344 elif line.startswith('new mode '):
344 elif line.startswith('new mode '):
345 gp.setmode(int(line[-6:], 8))
345 gp.setmode(int(line[-6:], 8))
346 elif line.startswith('GIT binary patch'):
346 elif line.startswith('GIT binary patch'):
347 gp.binary = True
347 gp.binary = True
348 if gp:
348 if gp:
349 gitpatches.append(gp)
349 gitpatches.append(gp)
350
350
351 return gitpatches
351 return gitpatches
352
352
353 class linereader(object):
353 class linereader(object):
354 # simple class to allow pushing lines back into the input stream
354 # simple class to allow pushing lines back into the input stream
355 def __init__(self, fp):
355 def __init__(self, fp):
356 self.fp = fp
356 self.fp = fp
357 self.buf = []
357 self.buf = []
358
358
359 def push(self, line):
359 def push(self, line):
360 if line is not None:
360 if line is not None:
361 self.buf.append(line)
361 self.buf.append(line)
362
362
363 def readline(self):
363 def readline(self):
364 if self.buf:
364 if self.buf:
365 l = self.buf[0]
365 l = self.buf[0]
366 del self.buf[0]
366 del self.buf[0]
367 return l
367 return l
368 return self.fp.readline()
368 return self.fp.readline()
369
369
370 def __iter__(self):
370 def __iter__(self):
371 while True:
371 while True:
372 l = self.readline()
372 l = self.readline()
373 if not l:
373 if not l:
374 break
374 break
375 yield l
375 yield l
376
376
377 class abstractbackend(object):
377 class abstractbackend(object):
378 def __init__(self, ui):
378 def __init__(self, ui):
379 self.ui = ui
379 self.ui = ui
380
380
381 def getfile(self, fname):
381 def getfile(self, fname):
382 """Return target file data and flags as a (data, (islink,
382 """Return target file data and flags as a (data, (islink,
383 isexec)) tuple.
383 isexec)) tuple.
384 """
384 """
385 raise NotImplementedError
385 raise NotImplementedError
386
386
387 def setfile(self, fname, data, mode, copysource):
387 def setfile(self, fname, data, mode, copysource):
388 """Write data to target file fname and set its mode. mode is a
388 """Write data to target file fname and set its mode. mode is a
389 (islink, isexec) tuple. If data is None, the file content should
389 (islink, isexec) tuple. If data is None, the file content should
390 be left unchanged. If the file is modified after being copied,
390 be left unchanged. If the file is modified after being copied,
391 copysource is set to the original file name.
391 copysource is set to the original file name.
392 """
392 """
393 raise NotImplementedError
393 raise NotImplementedError
394
394
395 def unlink(self, fname):
395 def unlink(self, fname):
396 """Unlink target file."""
396 """Unlink target file."""
397 raise NotImplementedError
397 raise NotImplementedError
398
398
399 def writerej(self, fname, failed, total, lines):
399 def writerej(self, fname, failed, total, lines):
400 """Write rejected lines for fname. total is the number of hunks
400 """Write rejected lines for fname. total is the number of hunks
401 which failed to apply and total the total number of hunks for this
401 which failed to apply and total the total number of hunks for this
402 files.
402 files.
403 """
403 """
404 pass
404 pass
405
405
406 def exists(self, fname):
406 def exists(self, fname):
407 raise NotImplementedError
407 raise NotImplementedError
408
408
409 class fsbackend(abstractbackend):
409 class fsbackend(abstractbackend):
410 def __init__(self, ui, basedir):
410 def __init__(self, ui, basedir):
411 super(fsbackend, self).__init__(ui)
411 super(fsbackend, self).__init__(ui)
412 self.opener = scmutil.opener(basedir)
412 self.opener = scmutil.opener(basedir)
413
413
414 def _join(self, f):
414 def _join(self, f):
415 return os.path.join(self.opener.base, f)
415 return os.path.join(self.opener.base, f)
416
416
417 def getfile(self, fname):
417 def getfile(self, fname):
418 path = self._join(fname)
418 path = self._join(fname)
419 if os.path.islink(path):
419 if os.path.islink(path):
420 return (os.readlink(path), (True, False))
420 return (os.readlink(path), (True, False))
421 isexec = False
421 isexec = False
422 try:
422 try:
423 isexec = os.lstat(path).st_mode & 0100 != 0
423 isexec = os.lstat(path).st_mode & 0100 != 0
424 except OSError, e:
424 except OSError, e:
425 if e.errno != errno.ENOENT:
425 if e.errno != errno.ENOENT:
426 raise
426 raise
427 return (self.opener.read(fname), (False, isexec))
427 return (self.opener.read(fname), (False, isexec))
428
428
429 def setfile(self, fname, data, mode, copysource):
429 def setfile(self, fname, data, mode, copysource):
430 islink, isexec = mode
430 islink, isexec = mode
431 if data is None:
431 if data is None:
432 util.setflags(self._join(fname), islink, isexec)
432 util.setflags(self._join(fname), islink, isexec)
433 return
433 return
434 if islink:
434 if islink:
435 self.opener.symlink(data, fname)
435 self.opener.symlink(data, fname)
436 else:
436 else:
437 self.opener.write(fname, data)
437 self.opener.write(fname, data)
438 if isexec:
438 if isexec:
439 util.setflags(self._join(fname), False, True)
439 util.setflags(self._join(fname), False, True)
440
440
441 def unlink(self, fname):
441 def unlink(self, fname):
442 try:
442 try:
443 util.unlinkpath(self._join(fname))
443 util.unlinkpath(self._join(fname))
444 except OSError, inst:
444 except OSError, inst:
445 if inst.errno != errno.ENOENT:
445 if inst.errno != errno.ENOENT:
446 raise
446 raise
447
447
448 def writerej(self, fname, failed, total, lines):
448 def writerej(self, fname, failed, total, lines):
449 fname = fname + ".rej"
449 fname = fname + ".rej"
450 self.ui.warn(
450 self.ui.warn(
451 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
451 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
452 (failed, total, fname))
452 (failed, total, fname))
453 fp = self.opener(fname, 'w')
453 fp = self.opener(fname, 'w')
454 fp.writelines(lines)
454 fp.writelines(lines)
455 fp.close()
455 fp.close()
456
456
457 def exists(self, fname):
457 def exists(self, fname):
458 return os.path.lexists(self._join(fname))
458 return os.path.lexists(self._join(fname))
459
459
460 class workingbackend(fsbackend):
460 class workingbackend(fsbackend):
461 def __init__(self, ui, repo, similarity):
461 def __init__(self, ui, repo, similarity):
462 super(workingbackend, self).__init__(ui, repo.root)
462 super(workingbackend, self).__init__(ui, repo.root)
463 self.repo = repo
463 self.repo = repo
464 self.similarity = similarity
464 self.similarity = similarity
465 self.removed = set()
465 self.removed = set()
466 self.changed = set()
466 self.changed = set()
467 self.copied = []
467 self.copied = []
468
468
469 def _checkknown(self, fname):
469 def _checkknown(self, fname):
470 if self.repo.dirstate[fname] == '?' and self.exists(fname):
470 if self.repo.dirstate[fname] == '?' and self.exists(fname):
471 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
471 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
472
472
473 def setfile(self, fname, data, mode, copysource):
473 def setfile(self, fname, data, mode, copysource):
474 self._checkknown(fname)
474 self._checkknown(fname)
475 super(workingbackend, self).setfile(fname, data, mode, copysource)
475 super(workingbackend, self).setfile(fname, data, mode, copysource)
476 if copysource is not None:
476 if copysource is not None:
477 self.copied.append((copysource, fname))
477 self.copied.append((copysource, fname))
478 self.changed.add(fname)
478 self.changed.add(fname)
479
479
480 def unlink(self, fname):
480 def unlink(self, fname):
481 self._checkknown(fname)
481 self._checkknown(fname)
482 super(workingbackend, self).unlink(fname)
482 super(workingbackend, self).unlink(fname)
483 self.removed.add(fname)
483 self.removed.add(fname)
484 self.changed.add(fname)
484 self.changed.add(fname)
485
485
486 def close(self):
486 def close(self):
487 wctx = self.repo[None]
487 wctx = self.repo[None]
488 addremoved = set(self.changed)
488 addremoved = set(self.changed)
489 for src, dst in self.copied:
489 for src, dst in self.copied:
490 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
490 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
491 if self.removed:
491 if self.removed:
492 wctx.forget(sorted(self.removed))
492 wctx.forget(sorted(self.removed))
493 for f in self.removed:
493 for f in self.removed:
494 if f not in self.repo.dirstate:
494 if f not in self.repo.dirstate:
495 # File was deleted and no longer belongs to the
495 # File was deleted and no longer belongs to the
496 # dirstate, it was probably marked added then
496 # dirstate, it was probably marked added then
497 # deleted, and should not be considered by
497 # deleted, and should not be considered by
498 # addremove().
498 # addremove().
499 addremoved.discard(f)
499 addremoved.discard(f)
500 if addremoved:
500 if addremoved:
501 cwd = self.repo.getcwd()
501 cwd = self.repo.getcwd()
502 if cwd:
502 if cwd:
503 addremoved = [util.pathto(self.repo.root, cwd, f)
503 addremoved = [util.pathto(self.repo.root, cwd, f)
504 for f in addremoved]
504 for f in addremoved]
505 scmutil.addremove(self.repo, addremoved, similarity=self.similarity)
505 scmutil.addremove(self.repo, addremoved, similarity=self.similarity)
506 return sorted(self.changed)
506 return sorted(self.changed)
507
507
508 class filestore(object):
508 class filestore(object):
509 def __init__(self, maxsize=None):
509 def __init__(self, maxsize=None):
510 self.opener = None
510 self.opener = None
511 self.files = {}
511 self.files = {}
512 self.created = 0
512 self.created = 0
513 self.maxsize = maxsize
513 self.maxsize = maxsize
514 if self.maxsize is None:
514 if self.maxsize is None:
515 self.maxsize = 4*(2**20)
515 self.maxsize = 4*(2**20)
516 self.size = 0
516 self.size = 0
517 self.data = {}
517 self.data = {}
518
518
519 def setfile(self, fname, data, mode, copied=None):
519 def setfile(self, fname, data, mode, copied=None):
520 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
520 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
521 self.data[fname] = (data, mode, copied)
521 self.data[fname] = (data, mode, copied)
522 self.size += len(data)
522 self.size += len(data)
523 else:
523 else:
524 if self.opener is None:
524 if self.opener is None:
525 root = tempfile.mkdtemp(prefix='hg-patch-')
525 root = tempfile.mkdtemp(prefix='hg-patch-')
526 self.opener = scmutil.opener(root)
526 self.opener = scmutil.opener(root)
527 # Avoid filename issues with these simple names
527 # Avoid filename issues with these simple names
528 fn = str(self.created)
528 fn = str(self.created)
529 self.opener.write(fn, data)
529 self.opener.write(fn, data)
530 self.created += 1
530 self.created += 1
531 self.files[fname] = (fn, mode, copied)
531 self.files[fname] = (fn, mode, copied)
532
532
533 def getfile(self, fname):
533 def getfile(self, fname):
534 if fname in self.data:
534 if fname in self.data:
535 return self.data[fname]
535 return self.data[fname]
536 if not self.opener or fname not in self.files:
536 if not self.opener or fname not in self.files:
537 raise IOError()
537 raise IOError
538 fn, mode, copied = self.files[fname]
538 fn, mode, copied = self.files[fname]
539 return self.opener.read(fn), mode, copied
539 return self.opener.read(fn), mode, copied
540
540
541 def close(self):
541 def close(self):
542 if self.opener:
542 if self.opener:
543 shutil.rmtree(self.opener.base)
543 shutil.rmtree(self.opener.base)
544
544
545 class repobackend(abstractbackend):
545 class repobackend(abstractbackend):
546 def __init__(self, ui, repo, ctx, store):
546 def __init__(self, ui, repo, ctx, store):
547 super(repobackend, self).__init__(ui)
547 super(repobackend, self).__init__(ui)
548 self.repo = repo
548 self.repo = repo
549 self.ctx = ctx
549 self.ctx = ctx
550 self.store = store
550 self.store = store
551 self.changed = set()
551 self.changed = set()
552 self.removed = set()
552 self.removed = set()
553 self.copied = {}
553 self.copied = {}
554
554
555 def _checkknown(self, fname):
555 def _checkknown(self, fname):
556 if fname not in self.ctx:
556 if fname not in self.ctx:
557 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
557 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
558
558
559 def getfile(self, fname):
559 def getfile(self, fname):
560 try:
560 try:
561 fctx = self.ctx[fname]
561 fctx = self.ctx[fname]
562 except error.LookupError:
562 except error.LookupError:
563 raise IOError()
563 raise IOError
564 flags = fctx.flags()
564 flags = fctx.flags()
565 return fctx.data(), ('l' in flags, 'x' in flags)
565 return fctx.data(), ('l' in flags, 'x' in flags)
566
566
567 def setfile(self, fname, data, mode, copysource):
567 def setfile(self, fname, data, mode, copysource):
568 if copysource:
568 if copysource:
569 self._checkknown(copysource)
569 self._checkknown(copysource)
570 if data is None:
570 if data is None:
571 data = self.ctx[fname].data()
571 data = self.ctx[fname].data()
572 self.store.setfile(fname, data, mode, copysource)
572 self.store.setfile(fname, data, mode, copysource)
573 self.changed.add(fname)
573 self.changed.add(fname)
574 if copysource:
574 if copysource:
575 self.copied[fname] = copysource
575 self.copied[fname] = copysource
576
576
577 def unlink(self, fname):
577 def unlink(self, fname):
578 self._checkknown(fname)
578 self._checkknown(fname)
579 self.removed.add(fname)
579 self.removed.add(fname)
580
580
581 def exists(self, fname):
581 def exists(self, fname):
582 return fname in self.ctx
582 return fname in self.ctx
583
583
584 def close(self):
584 def close(self):
585 return self.changed | self.removed
585 return self.changed | self.removed
586
586
587 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
587 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
588 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
588 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
589 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
589 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
590 eolmodes = ['strict', 'crlf', 'lf', 'auto']
590 eolmodes = ['strict', 'crlf', 'lf', 'auto']
591
591
592 class patchfile(object):
592 class patchfile(object):
593 def __init__(self, ui, gp, backend, store, eolmode='strict'):
593 def __init__(self, ui, gp, backend, store, eolmode='strict'):
594 self.fname = gp.path
594 self.fname = gp.path
595 self.eolmode = eolmode
595 self.eolmode = eolmode
596 self.eol = None
596 self.eol = None
597 self.backend = backend
597 self.backend = backend
598 self.ui = ui
598 self.ui = ui
599 self.lines = []
599 self.lines = []
600 self.exists = False
600 self.exists = False
601 self.missing = True
601 self.missing = True
602 self.mode = gp.mode
602 self.mode = gp.mode
603 self.copysource = gp.oldpath
603 self.copysource = gp.oldpath
604 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
604 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
605 self.remove = gp.op == 'DELETE'
605 self.remove = gp.op == 'DELETE'
606 try:
606 try:
607 if self.copysource is None:
607 if self.copysource is None:
608 data, mode = backend.getfile(self.fname)
608 data, mode = backend.getfile(self.fname)
609 self.exists = True
609 self.exists = True
610 else:
610 else:
611 data, mode = store.getfile(self.copysource)[:2]
611 data, mode = store.getfile(self.copysource)[:2]
612 self.exists = backend.exists(self.fname)
612 self.exists = backend.exists(self.fname)
613 self.missing = False
613 self.missing = False
614 if data:
614 if data:
615 self.lines = mdiff.splitnewlines(data)
615 self.lines = mdiff.splitnewlines(data)
616 if self.mode is None:
616 if self.mode is None:
617 self.mode = mode
617 self.mode = mode
618 if self.lines:
618 if self.lines:
619 # Normalize line endings
619 # Normalize line endings
620 if self.lines[0].endswith('\r\n'):
620 if self.lines[0].endswith('\r\n'):
621 self.eol = '\r\n'
621 self.eol = '\r\n'
622 elif self.lines[0].endswith('\n'):
622 elif self.lines[0].endswith('\n'):
623 self.eol = '\n'
623 self.eol = '\n'
624 if eolmode != 'strict':
624 if eolmode != 'strict':
625 nlines = []
625 nlines = []
626 for l in self.lines:
626 for l in self.lines:
627 if l.endswith('\r\n'):
627 if l.endswith('\r\n'):
628 l = l[:-2] + '\n'
628 l = l[:-2] + '\n'
629 nlines.append(l)
629 nlines.append(l)
630 self.lines = nlines
630 self.lines = nlines
631 except IOError:
631 except IOError:
632 if self.create:
632 if self.create:
633 self.missing = False
633 self.missing = False
634 if self.mode is None:
634 if self.mode is None:
635 self.mode = (False, False)
635 self.mode = (False, False)
636 if self.missing:
636 if self.missing:
637 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
637 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
638
638
639 self.hash = {}
639 self.hash = {}
640 self.dirty = 0
640 self.dirty = 0
641 self.offset = 0
641 self.offset = 0
642 self.skew = 0
642 self.skew = 0
643 self.rej = []
643 self.rej = []
644 self.fileprinted = False
644 self.fileprinted = False
645 self.printfile(False)
645 self.printfile(False)
646 self.hunks = 0
646 self.hunks = 0
647
647
648 def writelines(self, fname, lines, mode):
648 def writelines(self, fname, lines, mode):
649 if self.eolmode == 'auto':
649 if self.eolmode == 'auto':
650 eol = self.eol
650 eol = self.eol
651 elif self.eolmode == 'crlf':
651 elif self.eolmode == 'crlf':
652 eol = '\r\n'
652 eol = '\r\n'
653 else:
653 else:
654 eol = '\n'
654 eol = '\n'
655
655
656 if self.eolmode != 'strict' and eol and eol != '\n':
656 if self.eolmode != 'strict' and eol and eol != '\n':
657 rawlines = []
657 rawlines = []
658 for l in lines:
658 for l in lines:
659 if l and l[-1] == '\n':
659 if l and l[-1] == '\n':
660 l = l[:-1] + eol
660 l = l[:-1] + eol
661 rawlines.append(l)
661 rawlines.append(l)
662 lines = rawlines
662 lines = rawlines
663
663
664 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
664 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
665
665
666 def printfile(self, warn):
666 def printfile(self, warn):
667 if self.fileprinted:
667 if self.fileprinted:
668 return
668 return
669 if warn or self.ui.verbose:
669 if warn or self.ui.verbose:
670 self.fileprinted = True
670 self.fileprinted = True
671 s = _("patching file %s\n") % self.fname
671 s = _("patching file %s\n") % self.fname
672 if warn:
672 if warn:
673 self.ui.warn(s)
673 self.ui.warn(s)
674 else:
674 else:
675 self.ui.note(s)
675 self.ui.note(s)
676
676
677
677
678 def findlines(self, l, linenum):
678 def findlines(self, l, linenum):
679 # looks through the hash and finds candidate lines. The
679 # looks through the hash and finds candidate lines. The
680 # result is a list of line numbers sorted based on distance
680 # result is a list of line numbers sorted based on distance
681 # from linenum
681 # from linenum
682
682
683 cand = self.hash.get(l, [])
683 cand = self.hash.get(l, [])
684 if len(cand) > 1:
684 if len(cand) > 1:
685 # resort our list of potentials forward then back.
685 # resort our list of potentials forward then back.
686 cand.sort(key=lambda x: abs(x - linenum))
686 cand.sort(key=lambda x: abs(x - linenum))
687 return cand
687 return cand
688
688
689 def write_rej(self):
689 def write_rej(self):
690 # our rejects are a little different from patch(1). This always
690 # our rejects are a little different from patch(1). This always
691 # creates rejects in the same form as the original patch. A file
691 # creates rejects in the same form as the original patch. A file
692 # header is inserted so that you can run the reject through patch again
692 # header is inserted so that you can run the reject through patch again
693 # without having to type the filename.
693 # without having to type the filename.
694 if not self.rej:
694 if not self.rej:
695 return
695 return
696 base = os.path.basename(self.fname)
696 base = os.path.basename(self.fname)
697 lines = ["--- %s\n+++ %s\n" % (base, base)]
697 lines = ["--- %s\n+++ %s\n" % (base, base)]
698 for x in self.rej:
698 for x in self.rej:
699 for l in x.hunk:
699 for l in x.hunk:
700 lines.append(l)
700 lines.append(l)
701 if l[-1] != '\n':
701 if l[-1] != '\n':
702 lines.append("\n\ No newline at end of file\n")
702 lines.append("\n\ No newline at end of file\n")
703 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
703 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
704
704
705 def apply(self, h):
705 def apply(self, h):
706 if not h.complete():
706 if not h.complete():
707 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
707 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
708 (h.number, h.desc, len(h.a), h.lena, len(h.b),
708 (h.number, h.desc, len(h.a), h.lena, len(h.b),
709 h.lenb))
709 h.lenb))
710
710
711 self.hunks += 1
711 self.hunks += 1
712
712
713 if self.missing:
713 if self.missing:
714 self.rej.append(h)
714 self.rej.append(h)
715 return -1
715 return -1
716
716
717 if self.exists and self.create:
717 if self.exists and self.create:
718 if self.copysource:
718 if self.copysource:
719 self.ui.warn(_("cannot create %s: destination already "
719 self.ui.warn(_("cannot create %s: destination already "
720 "exists\n" % self.fname))
720 "exists\n" % self.fname))
721 else:
721 else:
722 self.ui.warn(_("file %s already exists\n") % self.fname)
722 self.ui.warn(_("file %s already exists\n") % self.fname)
723 self.rej.append(h)
723 self.rej.append(h)
724 return -1
724 return -1
725
725
726 if isinstance(h, binhunk):
726 if isinstance(h, binhunk):
727 if self.remove:
727 if self.remove:
728 self.backend.unlink(self.fname)
728 self.backend.unlink(self.fname)
729 else:
729 else:
730 self.lines[:] = h.new()
730 self.lines[:] = h.new()
731 self.offset += len(h.new())
731 self.offset += len(h.new())
732 self.dirty = True
732 self.dirty = True
733 return 0
733 return 0
734
734
735 horig = h
735 horig = h
736 if (self.eolmode in ('crlf', 'lf')
736 if (self.eolmode in ('crlf', 'lf')
737 or self.eolmode == 'auto' and self.eol):
737 or self.eolmode == 'auto' and self.eol):
738 # If new eols are going to be normalized, then normalize
738 # If new eols are going to be normalized, then normalize
739 # hunk data before patching. Otherwise, preserve input
739 # hunk data before patching. Otherwise, preserve input
740 # line-endings.
740 # line-endings.
741 h = h.getnormalized()
741 h = h.getnormalized()
742
742
743 # fast case first, no offsets, no fuzz
743 # fast case first, no offsets, no fuzz
744 old, oldstart, new, newstart = h.fuzzit(0, False)
744 old, oldstart, new, newstart = h.fuzzit(0, False)
745 oldstart += self.offset
745 oldstart += self.offset
746 orig_start = oldstart
746 orig_start = oldstart
747 # if there's skew we want to emit the "(offset %d lines)" even
747 # if there's skew we want to emit the "(offset %d lines)" even
748 # when the hunk cleanly applies at start + skew, so skip the
748 # when the hunk cleanly applies at start + skew, so skip the
749 # fast case code
749 # fast case code
750 if (self.skew == 0 and
750 if (self.skew == 0 and
751 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
751 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
752 if self.remove:
752 if self.remove:
753 self.backend.unlink(self.fname)
753 self.backend.unlink(self.fname)
754 else:
754 else:
755 self.lines[oldstart:oldstart + len(old)] = new
755 self.lines[oldstart:oldstart + len(old)] = new
756 self.offset += len(new) - len(old)
756 self.offset += len(new) - len(old)
757 self.dirty = True
757 self.dirty = True
758 return 0
758 return 0
759
759
760 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
760 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
761 self.hash = {}
761 self.hash = {}
762 for x, s in enumerate(self.lines):
762 for x, s in enumerate(self.lines):
763 self.hash.setdefault(s, []).append(x)
763 self.hash.setdefault(s, []).append(x)
764
764
765 for fuzzlen in xrange(3):
765 for fuzzlen in xrange(3):
766 for toponly in [True, False]:
766 for toponly in [True, False]:
767 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
767 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
768 oldstart = oldstart + self.offset + self.skew
768 oldstart = oldstart + self.offset + self.skew
769 oldstart = min(oldstart, len(self.lines))
769 oldstart = min(oldstart, len(self.lines))
770 if old:
770 if old:
771 cand = self.findlines(old[0][1:], oldstart)
771 cand = self.findlines(old[0][1:], oldstart)
772 else:
772 else:
773 # Only adding lines with no or fuzzed context, just
773 # Only adding lines with no or fuzzed context, just
774 # take the skew in account
774 # take the skew in account
775 cand = [oldstart]
775 cand = [oldstart]
776
776
777 for l in cand:
777 for l in cand:
778 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
778 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
779 self.lines[l : l + len(old)] = new
779 self.lines[l : l + len(old)] = new
780 self.offset += len(new) - len(old)
780 self.offset += len(new) - len(old)
781 self.skew = l - orig_start
781 self.skew = l - orig_start
782 self.dirty = True
782 self.dirty = True
783 offset = l - orig_start - fuzzlen
783 offset = l - orig_start - fuzzlen
784 if fuzzlen:
784 if fuzzlen:
785 msg = _("Hunk #%d succeeded at %d "
785 msg = _("Hunk #%d succeeded at %d "
786 "with fuzz %d "
786 "with fuzz %d "
787 "(offset %d lines).\n")
787 "(offset %d lines).\n")
788 self.printfile(True)
788 self.printfile(True)
789 self.ui.warn(msg %
789 self.ui.warn(msg %
790 (h.number, l + 1, fuzzlen, offset))
790 (h.number, l + 1, fuzzlen, offset))
791 else:
791 else:
792 msg = _("Hunk #%d succeeded at %d "
792 msg = _("Hunk #%d succeeded at %d "
793 "(offset %d lines).\n")
793 "(offset %d lines).\n")
794 self.ui.note(msg % (h.number, l + 1, offset))
794 self.ui.note(msg % (h.number, l + 1, offset))
795 return fuzzlen
795 return fuzzlen
796 self.printfile(True)
796 self.printfile(True)
797 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
797 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
798 self.rej.append(horig)
798 self.rej.append(horig)
799 return -1
799 return -1
800
800
801 def close(self):
801 def close(self):
802 if self.dirty:
802 if self.dirty:
803 self.writelines(self.fname, self.lines, self.mode)
803 self.writelines(self.fname, self.lines, self.mode)
804 self.write_rej()
804 self.write_rej()
805 return len(self.rej)
805 return len(self.rej)
806
806
807 class hunk(object):
807 class hunk(object):
808 def __init__(self, desc, num, lr, context):
808 def __init__(self, desc, num, lr, context):
809 self.number = num
809 self.number = num
810 self.desc = desc
810 self.desc = desc
811 self.hunk = [desc]
811 self.hunk = [desc]
812 self.a = []
812 self.a = []
813 self.b = []
813 self.b = []
814 self.starta = self.lena = None
814 self.starta = self.lena = None
815 self.startb = self.lenb = None
815 self.startb = self.lenb = None
816 if lr is not None:
816 if lr is not None:
817 if context:
817 if context:
818 self.read_context_hunk(lr)
818 self.read_context_hunk(lr)
819 else:
819 else:
820 self.read_unified_hunk(lr)
820 self.read_unified_hunk(lr)
821
821
822 def getnormalized(self):
822 def getnormalized(self):
823 """Return a copy with line endings normalized to LF."""
823 """Return a copy with line endings normalized to LF."""
824
824
825 def normalize(lines):
825 def normalize(lines):
826 nlines = []
826 nlines = []
827 for line in lines:
827 for line in lines:
828 if line.endswith('\r\n'):
828 if line.endswith('\r\n'):
829 line = line[:-2] + '\n'
829 line = line[:-2] + '\n'
830 nlines.append(line)
830 nlines.append(line)
831 return nlines
831 return nlines
832
832
833 # Dummy object, it is rebuilt manually
833 # Dummy object, it is rebuilt manually
834 nh = hunk(self.desc, self.number, None, None)
834 nh = hunk(self.desc, self.number, None, None)
835 nh.number = self.number
835 nh.number = self.number
836 nh.desc = self.desc
836 nh.desc = self.desc
837 nh.hunk = self.hunk
837 nh.hunk = self.hunk
838 nh.a = normalize(self.a)
838 nh.a = normalize(self.a)
839 nh.b = normalize(self.b)
839 nh.b = normalize(self.b)
840 nh.starta = self.starta
840 nh.starta = self.starta
841 nh.startb = self.startb
841 nh.startb = self.startb
842 nh.lena = self.lena
842 nh.lena = self.lena
843 nh.lenb = self.lenb
843 nh.lenb = self.lenb
844 return nh
844 return nh
845
845
846 def read_unified_hunk(self, lr):
846 def read_unified_hunk(self, lr):
847 m = unidesc.match(self.desc)
847 m = unidesc.match(self.desc)
848 if not m:
848 if not m:
849 raise PatchError(_("bad hunk #%d") % self.number)
849 raise PatchError(_("bad hunk #%d") % self.number)
850 self.starta, self.lena, self.startb, self.lenb = m.groups()
850 self.starta, self.lena, self.startb, self.lenb = m.groups()
851 if self.lena is None:
851 if self.lena is None:
852 self.lena = 1
852 self.lena = 1
853 else:
853 else:
854 self.lena = int(self.lena)
854 self.lena = int(self.lena)
855 if self.lenb is None:
855 if self.lenb is None:
856 self.lenb = 1
856 self.lenb = 1
857 else:
857 else:
858 self.lenb = int(self.lenb)
858 self.lenb = int(self.lenb)
859 self.starta = int(self.starta)
859 self.starta = int(self.starta)
860 self.startb = int(self.startb)
860 self.startb = int(self.startb)
861 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
861 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
862 self.b)
862 self.b)
863 # if we hit eof before finishing out the hunk, the last line will
863 # if we hit eof before finishing out the hunk, the last line will
864 # be zero length. Lets try to fix it up.
864 # be zero length. Lets try to fix it up.
865 while len(self.hunk[-1]) == 0:
865 while len(self.hunk[-1]) == 0:
866 del self.hunk[-1]
866 del self.hunk[-1]
867 del self.a[-1]
867 del self.a[-1]
868 del self.b[-1]
868 del self.b[-1]
869 self.lena -= 1
869 self.lena -= 1
870 self.lenb -= 1
870 self.lenb -= 1
871 self._fixnewline(lr)
871 self._fixnewline(lr)
872
872
873 def read_context_hunk(self, lr):
873 def read_context_hunk(self, lr):
874 self.desc = lr.readline()
874 self.desc = lr.readline()
875 m = contextdesc.match(self.desc)
875 m = contextdesc.match(self.desc)
876 if not m:
876 if not m:
877 raise PatchError(_("bad hunk #%d") % self.number)
877 raise PatchError(_("bad hunk #%d") % self.number)
878 self.starta, aend = m.groups()
878 self.starta, aend = m.groups()
879 self.starta = int(self.starta)
879 self.starta = int(self.starta)
880 if aend is None:
880 if aend is None:
881 aend = self.starta
881 aend = self.starta
882 self.lena = int(aend) - self.starta
882 self.lena = int(aend) - self.starta
883 if self.starta:
883 if self.starta:
884 self.lena += 1
884 self.lena += 1
885 for x in xrange(self.lena):
885 for x in xrange(self.lena):
886 l = lr.readline()
886 l = lr.readline()
887 if l.startswith('---'):
887 if l.startswith('---'):
888 # lines addition, old block is empty
888 # lines addition, old block is empty
889 lr.push(l)
889 lr.push(l)
890 break
890 break
891 s = l[2:]
891 s = l[2:]
892 if l.startswith('- ') or l.startswith('! '):
892 if l.startswith('- ') or l.startswith('! '):
893 u = '-' + s
893 u = '-' + s
894 elif l.startswith(' '):
894 elif l.startswith(' '):
895 u = ' ' + s
895 u = ' ' + s
896 else:
896 else:
897 raise PatchError(_("bad hunk #%d old text line %d") %
897 raise PatchError(_("bad hunk #%d old text line %d") %
898 (self.number, x))
898 (self.number, x))
899 self.a.append(u)
899 self.a.append(u)
900 self.hunk.append(u)
900 self.hunk.append(u)
901
901
902 l = lr.readline()
902 l = lr.readline()
903 if l.startswith('\ '):
903 if l.startswith('\ '):
904 s = self.a[-1][:-1]
904 s = self.a[-1][:-1]
905 self.a[-1] = s
905 self.a[-1] = s
906 self.hunk[-1] = s
906 self.hunk[-1] = s
907 l = lr.readline()
907 l = lr.readline()
908 m = contextdesc.match(l)
908 m = contextdesc.match(l)
909 if not m:
909 if not m:
910 raise PatchError(_("bad hunk #%d") % self.number)
910 raise PatchError(_("bad hunk #%d") % self.number)
911 self.startb, bend = m.groups()
911 self.startb, bend = m.groups()
912 self.startb = int(self.startb)
912 self.startb = int(self.startb)
913 if bend is None:
913 if bend is None:
914 bend = self.startb
914 bend = self.startb
915 self.lenb = int(bend) - self.startb
915 self.lenb = int(bend) - self.startb
916 if self.startb:
916 if self.startb:
917 self.lenb += 1
917 self.lenb += 1
918 hunki = 1
918 hunki = 1
919 for x in xrange(self.lenb):
919 for x in xrange(self.lenb):
920 l = lr.readline()
920 l = lr.readline()
921 if l.startswith('\ '):
921 if l.startswith('\ '):
922 # XXX: the only way to hit this is with an invalid line range.
922 # XXX: the only way to hit this is with an invalid line range.
923 # The no-eol marker is not counted in the line range, but I
923 # The no-eol marker is not counted in the line range, but I
924 # guess there are diff(1) out there which behave differently.
924 # guess there are diff(1) out there which behave differently.
925 s = self.b[-1][:-1]
925 s = self.b[-1][:-1]
926 self.b[-1] = s
926 self.b[-1] = s
927 self.hunk[hunki - 1] = s
927 self.hunk[hunki - 1] = s
928 continue
928 continue
929 if not l:
929 if not l:
930 # line deletions, new block is empty and we hit EOF
930 # line deletions, new block is empty and we hit EOF
931 lr.push(l)
931 lr.push(l)
932 break
932 break
933 s = l[2:]
933 s = l[2:]
934 if l.startswith('+ ') or l.startswith('! '):
934 if l.startswith('+ ') or l.startswith('! '):
935 u = '+' + s
935 u = '+' + s
936 elif l.startswith(' '):
936 elif l.startswith(' '):
937 u = ' ' + s
937 u = ' ' + s
938 elif len(self.b) == 0:
938 elif len(self.b) == 0:
939 # line deletions, new block is empty
939 # line deletions, new block is empty
940 lr.push(l)
940 lr.push(l)
941 break
941 break
942 else:
942 else:
943 raise PatchError(_("bad hunk #%d old text line %d") %
943 raise PatchError(_("bad hunk #%d old text line %d") %
944 (self.number, x))
944 (self.number, x))
945 self.b.append(s)
945 self.b.append(s)
946 while True:
946 while True:
947 if hunki >= len(self.hunk):
947 if hunki >= len(self.hunk):
948 h = ""
948 h = ""
949 else:
949 else:
950 h = self.hunk[hunki]
950 h = self.hunk[hunki]
951 hunki += 1
951 hunki += 1
952 if h == u:
952 if h == u:
953 break
953 break
954 elif h.startswith('-'):
954 elif h.startswith('-'):
955 continue
955 continue
956 else:
956 else:
957 self.hunk.insert(hunki - 1, u)
957 self.hunk.insert(hunki - 1, u)
958 break
958 break
959
959
960 if not self.a:
960 if not self.a:
961 # this happens when lines were only added to the hunk
961 # this happens when lines were only added to the hunk
962 for x in self.hunk:
962 for x in self.hunk:
963 if x.startswith('-') or x.startswith(' '):
963 if x.startswith('-') or x.startswith(' '):
964 self.a.append(x)
964 self.a.append(x)
965 if not self.b:
965 if not self.b:
966 # this happens when lines were only deleted from the hunk
966 # this happens when lines were only deleted from the hunk
967 for x in self.hunk:
967 for x in self.hunk:
968 if x.startswith('+') or x.startswith(' '):
968 if x.startswith('+') or x.startswith(' '):
969 self.b.append(x[1:])
969 self.b.append(x[1:])
970 # @@ -start,len +start,len @@
970 # @@ -start,len +start,len @@
971 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
971 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
972 self.startb, self.lenb)
972 self.startb, self.lenb)
973 self.hunk[0] = self.desc
973 self.hunk[0] = self.desc
974 self._fixnewline(lr)
974 self._fixnewline(lr)
975
975
976 def _fixnewline(self, lr):
976 def _fixnewline(self, lr):
977 l = lr.readline()
977 l = lr.readline()
978 if l.startswith('\ '):
978 if l.startswith('\ '):
979 diffhelpers.fix_newline(self.hunk, self.a, self.b)
979 diffhelpers.fix_newline(self.hunk, self.a, self.b)
980 else:
980 else:
981 lr.push(l)
981 lr.push(l)
982
982
983 def complete(self):
983 def complete(self):
984 return len(self.a) == self.lena and len(self.b) == self.lenb
984 return len(self.a) == self.lena and len(self.b) == self.lenb
985
985
986 def _fuzzit(self, old, new, fuzz, toponly):
986 def _fuzzit(self, old, new, fuzz, toponly):
987 # this removes context lines from the top and bottom of list 'l'. It
987 # this removes context lines from the top and bottom of list 'l'. It
988 # checks the hunk to make sure only context lines are removed, and then
988 # checks the hunk to make sure only context lines are removed, and then
989 # returns a new shortened list of lines.
989 # returns a new shortened list of lines.
990 fuzz = min(fuzz, len(old))
990 fuzz = min(fuzz, len(old))
991 if fuzz:
991 if fuzz:
992 top = 0
992 top = 0
993 bot = 0
993 bot = 0
994 hlen = len(self.hunk)
994 hlen = len(self.hunk)
995 for x in xrange(hlen - 1):
995 for x in xrange(hlen - 1):
996 # the hunk starts with the @@ line, so use x+1
996 # the hunk starts with the @@ line, so use x+1
997 if self.hunk[x + 1][0] == ' ':
997 if self.hunk[x + 1][0] == ' ':
998 top += 1
998 top += 1
999 else:
999 else:
1000 break
1000 break
1001 if not toponly:
1001 if not toponly:
1002 for x in xrange(hlen - 1):
1002 for x in xrange(hlen - 1):
1003 if self.hunk[hlen - bot - 1][0] == ' ':
1003 if self.hunk[hlen - bot - 1][0] == ' ':
1004 bot += 1
1004 bot += 1
1005 else:
1005 else:
1006 break
1006 break
1007
1007
1008 bot = min(fuzz, bot)
1008 bot = min(fuzz, bot)
1009 top = min(fuzz, top)
1009 top = min(fuzz, top)
1010 return old[top:len(old)-bot], new[top:len(new)-bot], top
1010 return old[top:len(old)-bot], new[top:len(new)-bot], top
1011 return old, new, 0
1011 return old, new, 0
1012
1012
1013 def fuzzit(self, fuzz, toponly):
1013 def fuzzit(self, fuzz, toponly):
1014 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1014 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1015 oldstart = self.starta + top
1015 oldstart = self.starta + top
1016 newstart = self.startb + top
1016 newstart = self.startb + top
1017 # zero length hunk ranges already have their start decremented
1017 # zero length hunk ranges already have their start decremented
1018 if self.lena and oldstart > 0:
1018 if self.lena and oldstart > 0:
1019 oldstart -= 1
1019 oldstart -= 1
1020 if self.lenb and newstart > 0:
1020 if self.lenb and newstart > 0:
1021 newstart -= 1
1021 newstart -= 1
1022 return old, oldstart, new, newstart
1022 return old, oldstart, new, newstart
1023
1023
1024 class binhunk(object):
1024 class binhunk(object):
1025 'A binary patch file. Only understands literals so far.'
1025 'A binary patch file. Only understands literals so far.'
1026 def __init__(self, lr, fname):
1026 def __init__(self, lr, fname):
1027 self.text = None
1027 self.text = None
1028 self.hunk = ['GIT binary patch\n']
1028 self.hunk = ['GIT binary patch\n']
1029 self._fname = fname
1029 self._fname = fname
1030 self._read(lr)
1030 self._read(lr)
1031
1031
1032 def complete(self):
1032 def complete(self):
1033 return self.text is not None
1033 return self.text is not None
1034
1034
1035 def new(self):
1035 def new(self):
1036 return [self.text]
1036 return [self.text]
1037
1037
1038 def _read(self, lr):
1038 def _read(self, lr):
1039 def getline(lr, hunk):
1039 def getline(lr, hunk):
1040 l = lr.readline()
1040 l = lr.readline()
1041 hunk.append(l)
1041 hunk.append(l)
1042 return l.rstrip('\r\n')
1042 return l.rstrip('\r\n')
1043
1043
1044 while True:
1044 while True:
1045 line = getline(lr, self.hunk)
1045 line = getline(lr, self.hunk)
1046 if not line:
1046 if not line:
1047 raise PatchError(_('could not extract "%s" binary data')
1047 raise PatchError(_('could not extract "%s" binary data')
1048 % self._fname)
1048 % self._fname)
1049 if line.startswith('literal '):
1049 if line.startswith('literal '):
1050 break
1050 break
1051 size = int(line[8:].rstrip())
1051 size = int(line[8:].rstrip())
1052 dec = []
1052 dec = []
1053 line = getline(lr, self.hunk)
1053 line = getline(lr, self.hunk)
1054 while len(line) > 1:
1054 while len(line) > 1:
1055 l = line[0]
1055 l = line[0]
1056 if l <= 'Z' and l >= 'A':
1056 if l <= 'Z' and l >= 'A':
1057 l = ord(l) - ord('A') + 1
1057 l = ord(l) - ord('A') + 1
1058 else:
1058 else:
1059 l = ord(l) - ord('a') + 27
1059 l = ord(l) - ord('a') + 27
1060 try:
1060 try:
1061 dec.append(base85.b85decode(line[1:])[:l])
1061 dec.append(base85.b85decode(line[1:])[:l])
1062 except ValueError, e:
1062 except ValueError, e:
1063 raise PatchError(_('could not decode "%s" binary patch: %s')
1063 raise PatchError(_('could not decode "%s" binary patch: %s')
1064 % (self._fname, str(e)))
1064 % (self._fname, str(e)))
1065 line = getline(lr, self.hunk)
1065 line = getline(lr, self.hunk)
1066 text = zlib.decompress(''.join(dec))
1066 text = zlib.decompress(''.join(dec))
1067 if len(text) != size:
1067 if len(text) != size:
1068 raise PatchError(_('"%s" length is %d bytes, should be %d')
1068 raise PatchError(_('"%s" length is %d bytes, should be %d')
1069 % (self._fname, len(text), size))
1069 % (self._fname, len(text), size))
1070 self.text = text
1070 self.text = text
1071
1071
1072 def parsefilename(str):
1072 def parsefilename(str):
1073 # --- filename \t|space stuff
1073 # --- filename \t|space stuff
1074 s = str[4:].rstrip('\r\n')
1074 s = str[4:].rstrip('\r\n')
1075 i = s.find('\t')
1075 i = s.find('\t')
1076 if i < 0:
1076 if i < 0:
1077 i = s.find(' ')
1077 i = s.find(' ')
1078 if i < 0:
1078 if i < 0:
1079 return s
1079 return s
1080 return s[:i]
1080 return s[:i]
1081
1081
1082 def pathstrip(path, strip):
1082 def pathstrip(path, strip):
1083 pathlen = len(path)
1083 pathlen = len(path)
1084 i = 0
1084 i = 0
1085 if strip == 0:
1085 if strip == 0:
1086 return '', path.rstrip()
1086 return '', path.rstrip()
1087 count = strip
1087 count = strip
1088 while count > 0:
1088 while count > 0:
1089 i = path.find('/', i)
1089 i = path.find('/', i)
1090 if i == -1:
1090 if i == -1:
1091 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1091 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1092 (count, strip, path))
1092 (count, strip, path))
1093 i += 1
1093 i += 1
1094 # consume '//' in the path
1094 # consume '//' in the path
1095 while i < pathlen - 1 and path[i] == '/':
1095 while i < pathlen - 1 and path[i] == '/':
1096 i += 1
1096 i += 1
1097 count -= 1
1097 count -= 1
1098 return path[:i].lstrip(), path[i:].rstrip()
1098 return path[:i].lstrip(), path[i:].rstrip()
1099
1099
1100 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip):
1100 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip):
1101 nulla = afile_orig == "/dev/null"
1101 nulla = afile_orig == "/dev/null"
1102 nullb = bfile_orig == "/dev/null"
1102 nullb = bfile_orig == "/dev/null"
1103 create = nulla and hunk.starta == 0 and hunk.lena == 0
1103 create = nulla and hunk.starta == 0 and hunk.lena == 0
1104 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1104 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1105 abase, afile = pathstrip(afile_orig, strip)
1105 abase, afile = pathstrip(afile_orig, strip)
1106 gooda = not nulla and backend.exists(afile)
1106 gooda = not nulla and backend.exists(afile)
1107 bbase, bfile = pathstrip(bfile_orig, strip)
1107 bbase, bfile = pathstrip(bfile_orig, strip)
1108 if afile == bfile:
1108 if afile == bfile:
1109 goodb = gooda
1109 goodb = gooda
1110 else:
1110 else:
1111 goodb = not nullb and backend.exists(bfile)
1111 goodb = not nullb and backend.exists(bfile)
1112 missing = not goodb and not gooda and not create
1112 missing = not goodb and not gooda and not create
1113
1113
1114 # some diff programs apparently produce patches where the afile is
1114 # some diff programs apparently produce patches where the afile is
1115 # not /dev/null, but afile starts with bfile
1115 # not /dev/null, but afile starts with bfile
1116 abasedir = afile[:afile.rfind('/') + 1]
1116 abasedir = afile[:afile.rfind('/') + 1]
1117 bbasedir = bfile[:bfile.rfind('/') + 1]
1117 bbasedir = bfile[:bfile.rfind('/') + 1]
1118 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1118 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1119 and hunk.starta == 0 and hunk.lena == 0):
1119 and hunk.starta == 0 and hunk.lena == 0):
1120 create = True
1120 create = True
1121 missing = False
1121 missing = False
1122
1122
1123 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1123 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1124 # diff is between a file and its backup. In this case, the original
1124 # diff is between a file and its backup. In this case, the original
1125 # file should be patched (see original mpatch code).
1125 # file should be patched (see original mpatch code).
1126 isbackup = (abase == bbase and bfile.startswith(afile))
1126 isbackup = (abase == bbase and bfile.startswith(afile))
1127 fname = None
1127 fname = None
1128 if not missing:
1128 if not missing:
1129 if gooda and goodb:
1129 if gooda and goodb:
1130 fname = isbackup and afile or bfile
1130 fname = isbackup and afile or bfile
1131 elif gooda:
1131 elif gooda:
1132 fname = afile
1132 fname = afile
1133
1133
1134 if not fname:
1134 if not fname:
1135 if not nullb:
1135 if not nullb:
1136 fname = isbackup and afile or bfile
1136 fname = isbackup and afile or bfile
1137 elif not nulla:
1137 elif not nulla:
1138 fname = afile
1138 fname = afile
1139 else:
1139 else:
1140 raise PatchError(_("undefined source and destination files"))
1140 raise PatchError(_("undefined source and destination files"))
1141
1141
1142 gp = patchmeta(fname)
1142 gp = patchmeta(fname)
1143 if create:
1143 if create:
1144 gp.op = 'ADD'
1144 gp.op = 'ADD'
1145 elif remove:
1145 elif remove:
1146 gp.op = 'DELETE'
1146 gp.op = 'DELETE'
1147 return gp
1147 return gp
1148
1148
1149 def scangitpatch(lr, firstline):
1149 def scangitpatch(lr, firstline):
1150 """
1150 """
1151 Git patches can emit:
1151 Git patches can emit:
1152 - rename a to b
1152 - rename a to b
1153 - change b
1153 - change b
1154 - copy a to c
1154 - copy a to c
1155 - change c
1155 - change c
1156
1156
1157 We cannot apply this sequence as-is, the renamed 'a' could not be
1157 We cannot apply this sequence as-is, the renamed 'a' could not be
1158 found for it would have been renamed already. And we cannot copy
1158 found for it would have been renamed already. And we cannot copy
1159 from 'b' instead because 'b' would have been changed already. So
1159 from 'b' instead because 'b' would have been changed already. So
1160 we scan the git patch for copy and rename commands so we can
1160 we scan the git patch for copy and rename commands so we can
1161 perform the copies ahead of time.
1161 perform the copies ahead of time.
1162 """
1162 """
1163 pos = 0
1163 pos = 0
1164 try:
1164 try:
1165 pos = lr.fp.tell()
1165 pos = lr.fp.tell()
1166 fp = lr.fp
1166 fp = lr.fp
1167 except IOError:
1167 except IOError:
1168 fp = cStringIO.StringIO(lr.fp.read())
1168 fp = cStringIO.StringIO(lr.fp.read())
1169 gitlr = linereader(fp)
1169 gitlr = linereader(fp)
1170 gitlr.push(firstline)
1170 gitlr.push(firstline)
1171 gitpatches = readgitpatch(gitlr)
1171 gitpatches = readgitpatch(gitlr)
1172 fp.seek(pos)
1172 fp.seek(pos)
1173 return gitpatches
1173 return gitpatches
1174
1174
1175 def iterhunks(fp):
1175 def iterhunks(fp):
1176 """Read a patch and yield the following events:
1176 """Read a patch and yield the following events:
1177 - ("file", afile, bfile, firsthunk): select a new target file.
1177 - ("file", afile, bfile, firsthunk): select a new target file.
1178 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1178 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1179 "file" event.
1179 "file" event.
1180 - ("git", gitchanges): current diff is in git format, gitchanges
1180 - ("git", gitchanges): current diff is in git format, gitchanges
1181 maps filenames to gitpatch records. Unique event.
1181 maps filenames to gitpatch records. Unique event.
1182 """
1182 """
1183 afile = ""
1183 afile = ""
1184 bfile = ""
1184 bfile = ""
1185 state = None
1185 state = None
1186 hunknum = 0
1186 hunknum = 0
1187 emitfile = newfile = False
1187 emitfile = newfile = False
1188 gitpatches = None
1188 gitpatches = None
1189
1189
1190 # our states
1190 # our states
1191 BFILE = 1
1191 BFILE = 1
1192 context = None
1192 context = None
1193 lr = linereader(fp)
1193 lr = linereader(fp)
1194
1194
1195 while True:
1195 while True:
1196 x = lr.readline()
1196 x = lr.readline()
1197 if not x:
1197 if not x:
1198 break
1198 break
1199 if state == BFILE and (
1199 if state == BFILE and (
1200 (not context and x[0] == '@')
1200 (not context and x[0] == '@')
1201 or (context is not False and x.startswith('***************'))
1201 or (context is not False and x.startswith('***************'))
1202 or x.startswith('GIT binary patch')):
1202 or x.startswith('GIT binary patch')):
1203 gp = None
1203 gp = None
1204 if (gitpatches and
1204 if (gitpatches and
1205 gitpatches[-1].ispatching(afile, bfile)):
1205 gitpatches[-1].ispatching(afile, bfile)):
1206 gp = gitpatches.pop()
1206 gp = gitpatches.pop()
1207 if x.startswith('GIT binary patch'):
1207 if x.startswith('GIT binary patch'):
1208 h = binhunk(lr, gp.path)
1208 h = binhunk(lr, gp.path)
1209 else:
1209 else:
1210 if context is None and x.startswith('***************'):
1210 if context is None and x.startswith('***************'):
1211 context = True
1211 context = True
1212 h = hunk(x, hunknum + 1, lr, context)
1212 h = hunk(x, hunknum + 1, lr, context)
1213 hunknum += 1
1213 hunknum += 1
1214 if emitfile:
1214 if emitfile:
1215 emitfile = False
1215 emitfile = False
1216 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1216 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1217 yield 'hunk', h
1217 yield 'hunk', h
1218 elif x.startswith('diff --git'):
1218 elif x.startswith('diff --git'):
1219 m = gitre.match(x.rstrip(' \r\n'))
1219 m = gitre.match(x.rstrip(' \r\n'))
1220 if not m:
1220 if not m:
1221 continue
1221 continue
1222 if gitpatches is None:
1222 if gitpatches is None:
1223 # scan whole input for git metadata
1223 # scan whole input for git metadata
1224 gitpatches = scangitpatch(lr, x)
1224 gitpatches = scangitpatch(lr, x)
1225 yield 'git', [g.copy() for g in gitpatches
1225 yield 'git', [g.copy() for g in gitpatches
1226 if g.op in ('COPY', 'RENAME')]
1226 if g.op in ('COPY', 'RENAME')]
1227 gitpatches.reverse()
1227 gitpatches.reverse()
1228 afile = 'a/' + m.group(1)
1228 afile = 'a/' + m.group(1)
1229 bfile = 'b/' + m.group(2)
1229 bfile = 'b/' + m.group(2)
1230 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1230 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1231 gp = gitpatches.pop()
1231 gp = gitpatches.pop()
1232 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1232 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1233 if not gitpatches:
1233 if not gitpatches:
1234 raise PatchError(_('failed to synchronize metadata for "%s"')
1234 raise PatchError(_('failed to synchronize metadata for "%s"')
1235 % afile[2:])
1235 % afile[2:])
1236 gp = gitpatches[-1]
1236 gp = gitpatches[-1]
1237 newfile = True
1237 newfile = True
1238 elif x.startswith('---'):
1238 elif x.startswith('---'):
1239 # check for a unified diff
1239 # check for a unified diff
1240 l2 = lr.readline()
1240 l2 = lr.readline()
1241 if not l2.startswith('+++'):
1241 if not l2.startswith('+++'):
1242 lr.push(l2)
1242 lr.push(l2)
1243 continue
1243 continue
1244 newfile = True
1244 newfile = True
1245 context = False
1245 context = False
1246 afile = parsefilename(x)
1246 afile = parsefilename(x)
1247 bfile = parsefilename(l2)
1247 bfile = parsefilename(l2)
1248 elif x.startswith('***'):
1248 elif x.startswith('***'):
1249 # check for a context diff
1249 # check for a context diff
1250 l2 = lr.readline()
1250 l2 = lr.readline()
1251 if not l2.startswith('---'):
1251 if not l2.startswith('---'):
1252 lr.push(l2)
1252 lr.push(l2)
1253 continue
1253 continue
1254 l3 = lr.readline()
1254 l3 = lr.readline()
1255 lr.push(l3)
1255 lr.push(l3)
1256 if not l3.startswith("***************"):
1256 if not l3.startswith("***************"):
1257 lr.push(l2)
1257 lr.push(l2)
1258 continue
1258 continue
1259 newfile = True
1259 newfile = True
1260 context = True
1260 context = True
1261 afile = parsefilename(x)
1261 afile = parsefilename(x)
1262 bfile = parsefilename(l2)
1262 bfile = parsefilename(l2)
1263
1263
1264 if newfile:
1264 if newfile:
1265 newfile = False
1265 newfile = False
1266 emitfile = True
1266 emitfile = True
1267 state = BFILE
1267 state = BFILE
1268 hunknum = 0
1268 hunknum = 0
1269
1269
1270 while gitpatches:
1270 while gitpatches:
1271 gp = gitpatches.pop()
1271 gp = gitpatches.pop()
1272 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1272 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1273
1273
1274 def applydiff(ui, fp, backend, store, strip=1, eolmode='strict'):
1274 def applydiff(ui, fp, backend, store, strip=1, eolmode='strict'):
1275 """Reads a patch from fp and tries to apply it.
1275 """Reads a patch from fp and tries to apply it.
1276
1276
1277 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1277 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1278 there was any fuzz.
1278 there was any fuzz.
1279
1279
1280 If 'eolmode' is 'strict', the patch content and patched file are
1280 If 'eolmode' is 'strict', the patch content and patched file are
1281 read in binary mode. Otherwise, line endings are ignored when
1281 read in binary mode. Otherwise, line endings are ignored when
1282 patching then normalized according to 'eolmode'.
1282 patching then normalized according to 'eolmode'.
1283 """
1283 """
1284 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1284 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1285 eolmode=eolmode)
1285 eolmode=eolmode)
1286
1286
1287 def _applydiff(ui, fp, patcher, backend, store, strip=1,
1287 def _applydiff(ui, fp, patcher, backend, store, strip=1,
1288 eolmode='strict'):
1288 eolmode='strict'):
1289
1289
1290 def pstrip(p):
1290 def pstrip(p):
1291 return pathstrip(p, strip - 1)[1]
1291 return pathstrip(p, strip - 1)[1]
1292
1292
1293 rejects = 0
1293 rejects = 0
1294 err = 0
1294 err = 0
1295 current_file = None
1295 current_file = None
1296
1296
1297 for state, values in iterhunks(fp):
1297 for state, values in iterhunks(fp):
1298 if state == 'hunk':
1298 if state == 'hunk':
1299 if not current_file:
1299 if not current_file:
1300 continue
1300 continue
1301 ret = current_file.apply(values)
1301 ret = current_file.apply(values)
1302 if ret > 0:
1302 if ret > 0:
1303 err = 1
1303 err = 1
1304 elif state == 'file':
1304 elif state == 'file':
1305 if current_file:
1305 if current_file:
1306 rejects += current_file.close()
1306 rejects += current_file.close()
1307 current_file = None
1307 current_file = None
1308 afile, bfile, first_hunk, gp = values
1308 afile, bfile, first_hunk, gp = values
1309 if gp:
1309 if gp:
1310 gp.path = pstrip(gp.path)
1310 gp.path = pstrip(gp.path)
1311 if gp.oldpath:
1311 if gp.oldpath:
1312 gp.oldpath = pstrip(gp.oldpath)
1312 gp.oldpath = pstrip(gp.oldpath)
1313 else:
1313 else:
1314 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip)
1314 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip)
1315 if gp.op == 'RENAME':
1315 if gp.op == 'RENAME':
1316 backend.unlink(gp.oldpath)
1316 backend.unlink(gp.oldpath)
1317 if not first_hunk:
1317 if not first_hunk:
1318 if gp.op == 'DELETE':
1318 if gp.op == 'DELETE':
1319 backend.unlink(gp.path)
1319 backend.unlink(gp.path)
1320 continue
1320 continue
1321 data, mode = None, None
1321 data, mode = None, None
1322 if gp.op in ('RENAME', 'COPY'):
1322 if gp.op in ('RENAME', 'COPY'):
1323 data, mode = store.getfile(gp.oldpath)[:2]
1323 data, mode = store.getfile(gp.oldpath)[:2]
1324 if gp.mode:
1324 if gp.mode:
1325 mode = gp.mode
1325 mode = gp.mode
1326 if gp.op == 'ADD':
1326 if gp.op == 'ADD':
1327 # Added files without content have no hunk and
1327 # Added files without content have no hunk and
1328 # must be created
1328 # must be created
1329 data = ''
1329 data = ''
1330 if data or mode:
1330 if data or mode:
1331 if (gp.op in ('ADD', 'RENAME', 'COPY')
1331 if (gp.op in ('ADD', 'RENAME', 'COPY')
1332 and backend.exists(gp.path)):
1332 and backend.exists(gp.path)):
1333 raise PatchError(_("cannot create %s: destination "
1333 raise PatchError(_("cannot create %s: destination "
1334 "already exists") % gp.path)
1334 "already exists") % gp.path)
1335 backend.setfile(gp.path, data, mode, gp.oldpath)
1335 backend.setfile(gp.path, data, mode, gp.oldpath)
1336 continue
1336 continue
1337 try:
1337 try:
1338 current_file = patcher(ui, gp, backend, store,
1338 current_file = patcher(ui, gp, backend, store,
1339 eolmode=eolmode)
1339 eolmode=eolmode)
1340 except PatchError, inst:
1340 except PatchError, inst:
1341 ui.warn(str(inst) + '\n')
1341 ui.warn(str(inst) + '\n')
1342 current_file = None
1342 current_file = None
1343 rejects += 1
1343 rejects += 1
1344 continue
1344 continue
1345 elif state == 'git':
1345 elif state == 'git':
1346 for gp in values:
1346 for gp in values:
1347 path = pstrip(gp.oldpath)
1347 path = pstrip(gp.oldpath)
1348 data, mode = backend.getfile(path)
1348 data, mode = backend.getfile(path)
1349 store.setfile(path, data, mode)
1349 store.setfile(path, data, mode)
1350 else:
1350 else:
1351 raise util.Abort(_('unsupported parser state: %s') % state)
1351 raise util.Abort(_('unsupported parser state: %s') % state)
1352
1352
1353 if current_file:
1353 if current_file:
1354 rejects += current_file.close()
1354 rejects += current_file.close()
1355
1355
1356 if rejects:
1356 if rejects:
1357 return -1
1357 return -1
1358 return err
1358 return err
1359
1359
1360 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1360 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1361 similarity):
1361 similarity):
1362 """use <patcher> to apply <patchname> to the working directory.
1362 """use <patcher> to apply <patchname> to the working directory.
1363 returns whether patch was applied with fuzz factor."""
1363 returns whether patch was applied with fuzz factor."""
1364
1364
1365 fuzz = False
1365 fuzz = False
1366 args = []
1366 args = []
1367 cwd = repo.root
1367 cwd = repo.root
1368 if cwd:
1368 if cwd:
1369 args.append('-d %s' % util.shellquote(cwd))
1369 args.append('-d %s' % util.shellquote(cwd))
1370 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1370 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1371 util.shellquote(patchname)))
1371 util.shellquote(patchname)))
1372 try:
1372 try:
1373 for line in fp:
1373 for line in fp:
1374 line = line.rstrip()
1374 line = line.rstrip()
1375 ui.note(line + '\n')
1375 ui.note(line + '\n')
1376 if line.startswith('patching file '):
1376 if line.startswith('patching file '):
1377 pf = util.parsepatchoutput(line)
1377 pf = util.parsepatchoutput(line)
1378 printed_file = False
1378 printed_file = False
1379 files.add(pf)
1379 files.add(pf)
1380 elif line.find('with fuzz') >= 0:
1380 elif line.find('with fuzz') >= 0:
1381 fuzz = True
1381 fuzz = True
1382 if not printed_file:
1382 if not printed_file:
1383 ui.warn(pf + '\n')
1383 ui.warn(pf + '\n')
1384 printed_file = True
1384 printed_file = True
1385 ui.warn(line + '\n')
1385 ui.warn(line + '\n')
1386 elif line.find('saving rejects to file') >= 0:
1386 elif line.find('saving rejects to file') >= 0:
1387 ui.warn(line + '\n')
1387 ui.warn(line + '\n')
1388 elif line.find('FAILED') >= 0:
1388 elif line.find('FAILED') >= 0:
1389 if not printed_file:
1389 if not printed_file:
1390 ui.warn(pf + '\n')
1390 ui.warn(pf + '\n')
1391 printed_file = True
1391 printed_file = True
1392 ui.warn(line + '\n')
1392 ui.warn(line + '\n')
1393 finally:
1393 finally:
1394 if files:
1394 if files:
1395 cfiles = list(files)
1395 cfiles = list(files)
1396 cwd = repo.getcwd()
1396 cwd = repo.getcwd()
1397 if cwd:
1397 if cwd:
1398 cfiles = [util.pathto(repo.root, cwd, f)
1398 cfiles = [util.pathto(repo.root, cwd, f)
1399 for f in cfiles]
1399 for f in cfiles]
1400 scmutil.addremove(repo, cfiles, similarity=similarity)
1400 scmutil.addremove(repo, cfiles, similarity=similarity)
1401 code = fp.close()
1401 code = fp.close()
1402 if code:
1402 if code:
1403 raise PatchError(_("patch command failed: %s") %
1403 raise PatchError(_("patch command failed: %s") %
1404 util.explainexit(code)[0])
1404 util.explainexit(code)[0])
1405 return fuzz
1405 return fuzz
1406
1406
1407 def patchbackend(ui, backend, patchobj, strip, files=None, eolmode='strict'):
1407 def patchbackend(ui, backend, patchobj, strip, files=None, eolmode='strict'):
1408 if files is None:
1408 if files is None:
1409 files = set()
1409 files = set()
1410 if eolmode is None:
1410 if eolmode is None:
1411 eolmode = ui.config('patch', 'eol', 'strict')
1411 eolmode = ui.config('patch', 'eol', 'strict')
1412 if eolmode.lower() not in eolmodes:
1412 if eolmode.lower() not in eolmodes:
1413 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1413 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1414 eolmode = eolmode.lower()
1414 eolmode = eolmode.lower()
1415
1415
1416 store = filestore()
1416 store = filestore()
1417 try:
1417 try:
1418 fp = open(patchobj, 'rb')
1418 fp = open(patchobj, 'rb')
1419 except TypeError:
1419 except TypeError:
1420 fp = patchobj
1420 fp = patchobj
1421 try:
1421 try:
1422 ret = applydiff(ui, fp, backend, store, strip=strip,
1422 ret = applydiff(ui, fp, backend, store, strip=strip,
1423 eolmode=eolmode)
1423 eolmode=eolmode)
1424 finally:
1424 finally:
1425 if fp != patchobj:
1425 if fp != patchobj:
1426 fp.close()
1426 fp.close()
1427 files.update(backend.close())
1427 files.update(backend.close())
1428 store.close()
1428 store.close()
1429 if ret < 0:
1429 if ret < 0:
1430 raise PatchError(_('patch failed to apply'))
1430 raise PatchError(_('patch failed to apply'))
1431 return ret > 0
1431 return ret > 0
1432
1432
1433 def internalpatch(ui, repo, patchobj, strip, files=None, eolmode='strict',
1433 def internalpatch(ui, repo, patchobj, strip, files=None, eolmode='strict',
1434 similarity=0):
1434 similarity=0):
1435 """use builtin patch to apply <patchobj> to the working directory.
1435 """use builtin patch to apply <patchobj> to the working directory.
1436 returns whether patch was applied with fuzz factor."""
1436 returns whether patch was applied with fuzz factor."""
1437 backend = workingbackend(ui, repo, similarity)
1437 backend = workingbackend(ui, repo, similarity)
1438 return patchbackend(ui, backend, patchobj, strip, files, eolmode)
1438 return patchbackend(ui, backend, patchobj, strip, files, eolmode)
1439
1439
1440 def patchrepo(ui, repo, ctx, store, patchobj, strip, files=None,
1440 def patchrepo(ui, repo, ctx, store, patchobj, strip, files=None,
1441 eolmode='strict'):
1441 eolmode='strict'):
1442 backend = repobackend(ui, repo, ctx, store)
1442 backend = repobackend(ui, repo, ctx, store)
1443 return patchbackend(ui, backend, patchobj, strip, files, eolmode)
1443 return patchbackend(ui, backend, patchobj, strip, files, eolmode)
1444
1444
1445 def makememctx(repo, parents, text, user, date, branch, files, store,
1445 def makememctx(repo, parents, text, user, date, branch, files, store,
1446 editor=None):
1446 editor=None):
1447 def getfilectx(repo, memctx, path):
1447 def getfilectx(repo, memctx, path):
1448 data, (islink, isexec), copied = store.getfile(path)
1448 data, (islink, isexec), copied = store.getfile(path)
1449 return context.memfilectx(path, data, islink=islink, isexec=isexec,
1449 return context.memfilectx(path, data, islink=islink, isexec=isexec,
1450 copied=copied)
1450 copied=copied)
1451 extra = {}
1451 extra = {}
1452 if branch:
1452 if branch:
1453 extra['branch'] = encoding.fromlocal(branch)
1453 extra['branch'] = encoding.fromlocal(branch)
1454 ctx = context.memctx(repo, parents, text, files, getfilectx, user,
1454 ctx = context.memctx(repo, parents, text, files, getfilectx, user,
1455 date, extra)
1455 date, extra)
1456 if editor:
1456 if editor:
1457 ctx._text = editor(repo, ctx, [])
1457 ctx._text = editor(repo, ctx, [])
1458 return ctx
1458 return ctx
1459
1459
1460 def patch(ui, repo, patchname, strip=1, files=None, eolmode='strict',
1460 def patch(ui, repo, patchname, strip=1, files=None, eolmode='strict',
1461 similarity=0):
1461 similarity=0):
1462 """Apply <patchname> to the working directory.
1462 """Apply <patchname> to the working directory.
1463
1463
1464 'eolmode' specifies how end of lines should be handled. It can be:
1464 'eolmode' specifies how end of lines should be handled. It can be:
1465 - 'strict': inputs are read in binary mode, EOLs are preserved
1465 - 'strict': inputs are read in binary mode, EOLs are preserved
1466 - 'crlf': EOLs are ignored when patching and reset to CRLF
1466 - 'crlf': EOLs are ignored when patching and reset to CRLF
1467 - 'lf': EOLs are ignored when patching and reset to LF
1467 - 'lf': EOLs are ignored when patching and reset to LF
1468 - None: get it from user settings, default to 'strict'
1468 - None: get it from user settings, default to 'strict'
1469 'eolmode' is ignored when using an external patcher program.
1469 'eolmode' is ignored when using an external patcher program.
1470
1470
1471 Returns whether patch was applied with fuzz factor.
1471 Returns whether patch was applied with fuzz factor.
1472 """
1472 """
1473 patcher = ui.config('ui', 'patch')
1473 patcher = ui.config('ui', 'patch')
1474 if files is None:
1474 if files is None:
1475 files = set()
1475 files = set()
1476 try:
1476 try:
1477 if patcher:
1477 if patcher:
1478 return _externalpatch(ui, repo, patcher, patchname, strip,
1478 return _externalpatch(ui, repo, patcher, patchname, strip,
1479 files, similarity)
1479 files, similarity)
1480 return internalpatch(ui, repo, patchname, strip, files, eolmode,
1480 return internalpatch(ui, repo, patchname, strip, files, eolmode,
1481 similarity)
1481 similarity)
1482 except PatchError, err:
1482 except PatchError, err:
1483 raise util.Abort(str(err))
1483 raise util.Abort(str(err))
1484
1484
1485 def changedfiles(ui, repo, patchpath, strip=1):
1485 def changedfiles(ui, repo, patchpath, strip=1):
1486 backend = fsbackend(ui, repo.root)
1486 backend = fsbackend(ui, repo.root)
1487 fp = open(patchpath, 'rb')
1487 fp = open(patchpath, 'rb')
1488 try:
1488 try:
1489 changed = set()
1489 changed = set()
1490 for state, values in iterhunks(fp):
1490 for state, values in iterhunks(fp):
1491 if state == 'file':
1491 if state == 'file':
1492 afile, bfile, first_hunk, gp = values
1492 afile, bfile, first_hunk, gp = values
1493 if gp:
1493 if gp:
1494 gp.path = pathstrip(gp.path, strip - 1)[1]
1494 gp.path = pathstrip(gp.path, strip - 1)[1]
1495 if gp.oldpath:
1495 if gp.oldpath:
1496 gp.oldpath = pathstrip(gp.oldpath, strip - 1)[1]
1496 gp.oldpath = pathstrip(gp.oldpath, strip - 1)[1]
1497 else:
1497 else:
1498 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip)
1498 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip)
1499 changed.add(gp.path)
1499 changed.add(gp.path)
1500 if gp.op == 'RENAME':
1500 if gp.op == 'RENAME':
1501 changed.add(gp.oldpath)
1501 changed.add(gp.oldpath)
1502 elif state not in ('hunk', 'git'):
1502 elif state not in ('hunk', 'git'):
1503 raise util.Abort(_('unsupported parser state: %s') % state)
1503 raise util.Abort(_('unsupported parser state: %s') % state)
1504 return changed
1504 return changed
1505 finally:
1505 finally:
1506 fp.close()
1506 fp.close()
1507
1507
1508 def b85diff(to, tn):
1508 def b85diff(to, tn):
1509 '''print base85-encoded binary diff'''
1509 '''print base85-encoded binary diff'''
1510 def gitindex(text):
1510 def gitindex(text):
1511 if not text:
1511 if not text:
1512 return hex(nullid)
1512 return hex(nullid)
1513 l = len(text)
1513 l = len(text)
1514 s = util.sha1('blob %d\0' % l)
1514 s = util.sha1('blob %d\0' % l)
1515 s.update(text)
1515 s.update(text)
1516 return s.hexdigest()
1516 return s.hexdigest()
1517
1517
1518 def fmtline(line):
1518 def fmtline(line):
1519 l = len(line)
1519 l = len(line)
1520 if l <= 26:
1520 if l <= 26:
1521 l = chr(ord('A') + l - 1)
1521 l = chr(ord('A') + l - 1)
1522 else:
1522 else:
1523 l = chr(l - 26 + ord('a') - 1)
1523 l = chr(l - 26 + ord('a') - 1)
1524 return '%c%s\n' % (l, base85.b85encode(line, True))
1524 return '%c%s\n' % (l, base85.b85encode(line, True))
1525
1525
1526 def chunk(text, csize=52):
1526 def chunk(text, csize=52):
1527 l = len(text)
1527 l = len(text)
1528 i = 0
1528 i = 0
1529 while i < l:
1529 while i < l:
1530 yield text[i:i + csize]
1530 yield text[i:i + csize]
1531 i += csize
1531 i += csize
1532
1532
1533 tohash = gitindex(to)
1533 tohash = gitindex(to)
1534 tnhash = gitindex(tn)
1534 tnhash = gitindex(tn)
1535 if tohash == tnhash:
1535 if tohash == tnhash:
1536 return ""
1536 return ""
1537
1537
1538 # TODO: deltas
1538 # TODO: deltas
1539 ret = ['index %s..%s\nGIT binary patch\nliteral %s\n' %
1539 ret = ['index %s..%s\nGIT binary patch\nliteral %s\n' %
1540 (tohash, tnhash, len(tn))]
1540 (tohash, tnhash, len(tn))]
1541 for l in chunk(zlib.compress(tn)):
1541 for l in chunk(zlib.compress(tn)):
1542 ret.append(fmtline(l))
1542 ret.append(fmtline(l))
1543 ret.append('\n')
1543 ret.append('\n')
1544 return ''.join(ret)
1544 return ''.join(ret)
1545
1545
1546 class GitDiffRequired(Exception):
1546 class GitDiffRequired(Exception):
1547 pass
1547 pass
1548
1548
1549 def diffopts(ui, opts=None, untrusted=False, section='diff'):
1549 def diffopts(ui, opts=None, untrusted=False, section='diff'):
1550 def get(key, name=None, getter=ui.configbool):
1550 def get(key, name=None, getter=ui.configbool):
1551 return ((opts and opts.get(key)) or
1551 return ((opts and opts.get(key)) or
1552 getter(section, name or key, None, untrusted=untrusted))
1552 getter(section, name or key, None, untrusted=untrusted))
1553 return mdiff.diffopts(
1553 return mdiff.diffopts(
1554 text=opts and opts.get('text'),
1554 text=opts and opts.get('text'),
1555 git=get('git'),
1555 git=get('git'),
1556 nodates=get('nodates'),
1556 nodates=get('nodates'),
1557 showfunc=get('show_function', 'showfunc'),
1557 showfunc=get('show_function', 'showfunc'),
1558 ignorews=get('ignore_all_space', 'ignorews'),
1558 ignorews=get('ignore_all_space', 'ignorews'),
1559 ignorewsamount=get('ignore_space_change', 'ignorewsamount'),
1559 ignorewsamount=get('ignore_space_change', 'ignorewsamount'),
1560 ignoreblanklines=get('ignore_blank_lines', 'ignoreblanklines'),
1560 ignoreblanklines=get('ignore_blank_lines', 'ignoreblanklines'),
1561 context=get('unified', getter=ui.config))
1561 context=get('unified', getter=ui.config))
1562
1562
1563 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
1563 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
1564 losedatafn=None, prefix=''):
1564 losedatafn=None, prefix=''):
1565 '''yields diff of changes to files between two nodes, or node and
1565 '''yields diff of changes to files between two nodes, or node and
1566 working directory.
1566 working directory.
1567
1567
1568 if node1 is None, use first dirstate parent instead.
1568 if node1 is None, use first dirstate parent instead.
1569 if node2 is None, compare node1 with working directory.
1569 if node2 is None, compare node1 with working directory.
1570
1570
1571 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
1571 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
1572 every time some change cannot be represented with the current
1572 every time some change cannot be represented with the current
1573 patch format. Return False to upgrade to git patch format, True to
1573 patch format. Return False to upgrade to git patch format, True to
1574 accept the loss or raise an exception to abort the diff. It is
1574 accept the loss or raise an exception to abort the diff. It is
1575 called with the name of current file being diffed as 'fn'. If set
1575 called with the name of current file being diffed as 'fn'. If set
1576 to None, patches will always be upgraded to git format when
1576 to None, patches will always be upgraded to git format when
1577 necessary.
1577 necessary.
1578
1578
1579 prefix is a filename prefix that is prepended to all filenames on
1579 prefix is a filename prefix that is prepended to all filenames on
1580 display (used for subrepos).
1580 display (used for subrepos).
1581 '''
1581 '''
1582
1582
1583 if opts is None:
1583 if opts is None:
1584 opts = mdiff.defaultopts
1584 opts = mdiff.defaultopts
1585
1585
1586 if not node1 and not node2:
1586 if not node1 and not node2:
1587 node1 = repo.dirstate.p1()
1587 node1 = repo.dirstate.p1()
1588
1588
1589 def lrugetfilectx():
1589 def lrugetfilectx():
1590 cache = {}
1590 cache = {}
1591 order = []
1591 order = []
1592 def getfilectx(f, ctx):
1592 def getfilectx(f, ctx):
1593 fctx = ctx.filectx(f, filelog=cache.get(f))
1593 fctx = ctx.filectx(f, filelog=cache.get(f))
1594 if f not in cache:
1594 if f not in cache:
1595 if len(cache) > 20:
1595 if len(cache) > 20:
1596 del cache[order.pop(0)]
1596 del cache[order.pop(0)]
1597 cache[f] = fctx.filelog()
1597 cache[f] = fctx.filelog()
1598 else:
1598 else:
1599 order.remove(f)
1599 order.remove(f)
1600 order.append(f)
1600 order.append(f)
1601 return fctx
1601 return fctx
1602 return getfilectx
1602 return getfilectx
1603 getfilectx = lrugetfilectx()
1603 getfilectx = lrugetfilectx()
1604
1604
1605 ctx1 = repo[node1]
1605 ctx1 = repo[node1]
1606 ctx2 = repo[node2]
1606 ctx2 = repo[node2]
1607
1607
1608 if not changes:
1608 if not changes:
1609 changes = repo.status(ctx1, ctx2, match=match)
1609 changes = repo.status(ctx1, ctx2, match=match)
1610 modified, added, removed = changes[:3]
1610 modified, added, removed = changes[:3]
1611
1611
1612 if not modified and not added and not removed:
1612 if not modified and not added and not removed:
1613 return []
1613 return []
1614
1614
1615 revs = None
1615 revs = None
1616 if not repo.ui.quiet:
1616 if not repo.ui.quiet:
1617 hexfunc = repo.ui.debugflag and hex or short
1617 hexfunc = repo.ui.debugflag and hex or short
1618 revs = [hexfunc(node) for node in [node1, node2] if node]
1618 revs = [hexfunc(node) for node in [node1, node2] if node]
1619
1619
1620 copy = {}
1620 copy = {}
1621 if opts.git or opts.upgrade:
1621 if opts.git or opts.upgrade:
1622 copy = copies.pathcopies(ctx1, ctx2)
1622 copy = copies.pathcopies(ctx1, ctx2)
1623
1623
1624 difffn = (lambda opts, losedata:
1624 difffn = (lambda opts, losedata:
1625 trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1625 trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1626 copy, getfilectx, opts, losedata, prefix))
1626 copy, getfilectx, opts, losedata, prefix))
1627 if opts.upgrade and not opts.git:
1627 if opts.upgrade and not opts.git:
1628 try:
1628 try:
1629 def losedata(fn):
1629 def losedata(fn):
1630 if not losedatafn or not losedatafn(fn=fn):
1630 if not losedatafn or not losedatafn(fn=fn):
1631 raise GitDiffRequired()
1631 raise GitDiffRequired
1632 # Buffer the whole output until we are sure it can be generated
1632 # Buffer the whole output until we are sure it can be generated
1633 return list(difffn(opts.copy(git=False), losedata))
1633 return list(difffn(opts.copy(git=False), losedata))
1634 except GitDiffRequired:
1634 except GitDiffRequired:
1635 return difffn(opts.copy(git=True), None)
1635 return difffn(opts.copy(git=True), None)
1636 else:
1636 else:
1637 return difffn(opts, None)
1637 return difffn(opts, None)
1638
1638
1639 def difflabel(func, *args, **kw):
1639 def difflabel(func, *args, **kw):
1640 '''yields 2-tuples of (output, label) based on the output of func()'''
1640 '''yields 2-tuples of (output, label) based on the output of func()'''
1641 headprefixes = [('diff', 'diff.diffline'),
1641 headprefixes = [('diff', 'diff.diffline'),
1642 ('copy', 'diff.extended'),
1642 ('copy', 'diff.extended'),
1643 ('rename', 'diff.extended'),
1643 ('rename', 'diff.extended'),
1644 ('old', 'diff.extended'),
1644 ('old', 'diff.extended'),
1645 ('new', 'diff.extended'),
1645 ('new', 'diff.extended'),
1646 ('deleted', 'diff.extended'),
1646 ('deleted', 'diff.extended'),
1647 ('---', 'diff.file_a'),
1647 ('---', 'diff.file_a'),
1648 ('+++', 'diff.file_b')]
1648 ('+++', 'diff.file_b')]
1649 textprefixes = [('@', 'diff.hunk'),
1649 textprefixes = [('@', 'diff.hunk'),
1650 ('-', 'diff.deleted'),
1650 ('-', 'diff.deleted'),
1651 ('+', 'diff.inserted')]
1651 ('+', 'diff.inserted')]
1652 head = False
1652 head = False
1653 for chunk in func(*args, **kw):
1653 for chunk in func(*args, **kw):
1654 lines = chunk.split('\n')
1654 lines = chunk.split('\n')
1655 for i, line in enumerate(lines):
1655 for i, line in enumerate(lines):
1656 if i != 0:
1656 if i != 0:
1657 yield ('\n', '')
1657 yield ('\n', '')
1658 if head:
1658 if head:
1659 if line.startswith('@'):
1659 if line.startswith('@'):
1660 head = False
1660 head = False
1661 else:
1661 else:
1662 if line and line[0] not in ' +-@\\':
1662 if line and line[0] not in ' +-@\\':
1663 head = True
1663 head = True
1664 stripline = line
1664 stripline = line
1665 if not head and line and line[0] in '+-':
1665 if not head and line and line[0] in '+-':
1666 # highlight trailing whitespace, but only in changed lines
1666 # highlight trailing whitespace, but only in changed lines
1667 stripline = line.rstrip()
1667 stripline = line.rstrip()
1668 prefixes = textprefixes
1668 prefixes = textprefixes
1669 if head:
1669 if head:
1670 prefixes = headprefixes
1670 prefixes = headprefixes
1671 for prefix, label in prefixes:
1671 for prefix, label in prefixes:
1672 if stripline.startswith(prefix):
1672 if stripline.startswith(prefix):
1673 yield (stripline, label)
1673 yield (stripline, label)
1674 break
1674 break
1675 else:
1675 else:
1676 yield (line, '')
1676 yield (line, '')
1677 if line != stripline:
1677 if line != stripline:
1678 yield (line[len(stripline):], 'diff.trailingwhitespace')
1678 yield (line[len(stripline):], 'diff.trailingwhitespace')
1679
1679
1680 def diffui(*args, **kw):
1680 def diffui(*args, **kw):
1681 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
1681 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
1682 return difflabel(diff, *args, **kw)
1682 return difflabel(diff, *args, **kw)
1683
1683
1684
1684
1685 def _addmodehdr(header, omode, nmode):
1685 def _addmodehdr(header, omode, nmode):
1686 if omode != nmode:
1686 if omode != nmode:
1687 header.append('old mode %s\n' % omode)
1687 header.append('old mode %s\n' % omode)
1688 header.append('new mode %s\n' % nmode)
1688 header.append('new mode %s\n' % nmode)
1689
1689
1690 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1690 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1691 copy, getfilectx, opts, losedatafn, prefix):
1691 copy, getfilectx, opts, losedatafn, prefix):
1692
1692
1693 def join(f):
1693 def join(f):
1694 return os.path.join(prefix, f)
1694 return os.path.join(prefix, f)
1695
1695
1696 date1 = util.datestr(ctx1.date())
1696 date1 = util.datestr(ctx1.date())
1697 man1 = ctx1.manifest()
1697 man1 = ctx1.manifest()
1698
1698
1699 gone = set()
1699 gone = set()
1700 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
1700 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
1701
1701
1702 copyto = dict([(v, k) for k, v in copy.items()])
1702 copyto = dict([(v, k) for k, v in copy.items()])
1703
1703
1704 if opts.git:
1704 if opts.git:
1705 revs = None
1705 revs = None
1706
1706
1707 for f in sorted(modified + added + removed):
1707 for f in sorted(modified + added + removed):
1708 to = None
1708 to = None
1709 tn = None
1709 tn = None
1710 dodiff = True
1710 dodiff = True
1711 header = []
1711 header = []
1712 if f in man1:
1712 if f in man1:
1713 to = getfilectx(f, ctx1).data()
1713 to = getfilectx(f, ctx1).data()
1714 if f not in removed:
1714 if f not in removed:
1715 tn = getfilectx(f, ctx2).data()
1715 tn = getfilectx(f, ctx2).data()
1716 a, b = f, f
1716 a, b = f, f
1717 if opts.git or losedatafn:
1717 if opts.git or losedatafn:
1718 if f in added:
1718 if f in added:
1719 mode = gitmode[ctx2.flags(f)]
1719 mode = gitmode[ctx2.flags(f)]
1720 if f in copy or f in copyto:
1720 if f in copy or f in copyto:
1721 if opts.git:
1721 if opts.git:
1722 if f in copy:
1722 if f in copy:
1723 a = copy[f]
1723 a = copy[f]
1724 else:
1724 else:
1725 a = copyto[f]
1725 a = copyto[f]
1726 omode = gitmode[man1.flags(a)]
1726 omode = gitmode[man1.flags(a)]
1727 _addmodehdr(header, omode, mode)
1727 _addmodehdr(header, omode, mode)
1728 if a in removed and a not in gone:
1728 if a in removed and a not in gone:
1729 op = 'rename'
1729 op = 'rename'
1730 gone.add(a)
1730 gone.add(a)
1731 else:
1731 else:
1732 op = 'copy'
1732 op = 'copy'
1733 header.append('%s from %s\n' % (op, join(a)))
1733 header.append('%s from %s\n' % (op, join(a)))
1734 header.append('%s to %s\n' % (op, join(f)))
1734 header.append('%s to %s\n' % (op, join(f)))
1735 to = getfilectx(a, ctx1).data()
1735 to = getfilectx(a, ctx1).data()
1736 else:
1736 else:
1737 losedatafn(f)
1737 losedatafn(f)
1738 else:
1738 else:
1739 if opts.git:
1739 if opts.git:
1740 header.append('new file mode %s\n' % mode)
1740 header.append('new file mode %s\n' % mode)
1741 elif ctx2.flags(f):
1741 elif ctx2.flags(f):
1742 losedatafn(f)
1742 losedatafn(f)
1743 # In theory, if tn was copied or renamed we should check
1743 # In theory, if tn was copied or renamed we should check
1744 # if the source is binary too but the copy record already
1744 # if the source is binary too but the copy record already
1745 # forces git mode.
1745 # forces git mode.
1746 if util.binary(tn):
1746 if util.binary(tn):
1747 if opts.git:
1747 if opts.git:
1748 dodiff = 'binary'
1748 dodiff = 'binary'
1749 else:
1749 else:
1750 losedatafn(f)
1750 losedatafn(f)
1751 if not opts.git and not tn:
1751 if not opts.git and not tn:
1752 # regular diffs cannot represent new empty file
1752 # regular diffs cannot represent new empty file
1753 losedatafn(f)
1753 losedatafn(f)
1754 elif f in removed:
1754 elif f in removed:
1755 if opts.git:
1755 if opts.git:
1756 # have we already reported a copy above?
1756 # have we already reported a copy above?
1757 if ((f in copy and copy[f] in added
1757 if ((f in copy and copy[f] in added
1758 and copyto[copy[f]] == f) or
1758 and copyto[copy[f]] == f) or
1759 (f in copyto and copyto[f] in added
1759 (f in copyto and copyto[f] in added
1760 and copy[copyto[f]] == f)):
1760 and copy[copyto[f]] == f)):
1761 dodiff = False
1761 dodiff = False
1762 else:
1762 else:
1763 header.append('deleted file mode %s\n' %
1763 header.append('deleted file mode %s\n' %
1764 gitmode[man1.flags(f)])
1764 gitmode[man1.flags(f)])
1765 elif not to or util.binary(to):
1765 elif not to or util.binary(to):
1766 # regular diffs cannot represent empty file deletion
1766 # regular diffs cannot represent empty file deletion
1767 losedatafn(f)
1767 losedatafn(f)
1768 else:
1768 else:
1769 oflag = man1.flags(f)
1769 oflag = man1.flags(f)
1770 nflag = ctx2.flags(f)
1770 nflag = ctx2.flags(f)
1771 binary = util.binary(to) or util.binary(tn)
1771 binary = util.binary(to) or util.binary(tn)
1772 if opts.git:
1772 if opts.git:
1773 _addmodehdr(header, gitmode[oflag], gitmode[nflag])
1773 _addmodehdr(header, gitmode[oflag], gitmode[nflag])
1774 if binary:
1774 if binary:
1775 dodiff = 'binary'
1775 dodiff = 'binary'
1776 elif binary or nflag != oflag:
1776 elif binary or nflag != oflag:
1777 losedatafn(f)
1777 losedatafn(f)
1778 if opts.git:
1778 if opts.git:
1779 header.insert(0, mdiff.diffline(revs, join(a), join(b), opts))
1779 header.insert(0, mdiff.diffline(revs, join(a), join(b), opts))
1780
1780
1781 if dodiff:
1781 if dodiff:
1782 if dodiff == 'binary':
1782 if dodiff == 'binary':
1783 text = b85diff(to, tn)
1783 text = b85diff(to, tn)
1784 else:
1784 else:
1785 text = mdiff.unidiff(to, date1,
1785 text = mdiff.unidiff(to, date1,
1786 # ctx2 date may be dynamic
1786 # ctx2 date may be dynamic
1787 tn, util.datestr(ctx2.date()),
1787 tn, util.datestr(ctx2.date()),
1788 join(a), join(b), revs, opts=opts)
1788 join(a), join(b), revs, opts=opts)
1789 if header and (text or len(header) > 1):
1789 if header and (text or len(header) > 1):
1790 yield ''.join(header)
1790 yield ''.join(header)
1791 if text:
1791 if text:
1792 yield text
1792 yield text
1793
1793
1794 def diffstatsum(stats):
1794 def diffstatsum(stats):
1795 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
1795 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
1796 for f, a, r, b in stats:
1796 for f, a, r, b in stats:
1797 maxfile = max(maxfile, encoding.colwidth(f))
1797 maxfile = max(maxfile, encoding.colwidth(f))
1798 maxtotal = max(maxtotal, a + r)
1798 maxtotal = max(maxtotal, a + r)
1799 addtotal += a
1799 addtotal += a
1800 removetotal += r
1800 removetotal += r
1801 binary = binary or b
1801 binary = binary or b
1802
1802
1803 return maxfile, maxtotal, addtotal, removetotal, binary
1803 return maxfile, maxtotal, addtotal, removetotal, binary
1804
1804
1805 def diffstatdata(lines):
1805 def diffstatdata(lines):
1806 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
1806 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
1807
1807
1808 results = []
1808 results = []
1809 filename, adds, removes, isbinary = None, 0, 0, False
1809 filename, adds, removes, isbinary = None, 0, 0, False
1810
1810
1811 def addresult():
1811 def addresult():
1812 if filename:
1812 if filename:
1813 results.append((filename, adds, removes, isbinary))
1813 results.append((filename, adds, removes, isbinary))
1814
1814
1815 for line in lines:
1815 for line in lines:
1816 if line.startswith('diff'):
1816 if line.startswith('diff'):
1817 addresult()
1817 addresult()
1818 # set numbers to 0 anyway when starting new file
1818 # set numbers to 0 anyway when starting new file
1819 adds, removes, isbinary = 0, 0, False
1819 adds, removes, isbinary = 0, 0, False
1820 if line.startswith('diff --git'):
1820 if line.startswith('diff --git'):
1821 filename = gitre.search(line).group(1)
1821 filename = gitre.search(line).group(1)
1822 elif line.startswith('diff -r'):
1822 elif line.startswith('diff -r'):
1823 # format: "diff -r ... -r ... filename"
1823 # format: "diff -r ... -r ... filename"
1824 filename = diffre.search(line).group(1)
1824 filename = diffre.search(line).group(1)
1825 elif line.startswith('+') and not line.startswith('+++ '):
1825 elif line.startswith('+') and not line.startswith('+++ '):
1826 adds += 1
1826 adds += 1
1827 elif line.startswith('-') and not line.startswith('--- '):
1827 elif line.startswith('-') and not line.startswith('--- '):
1828 removes += 1
1828 removes += 1
1829 elif (line.startswith('GIT binary patch') or
1829 elif (line.startswith('GIT binary patch') or
1830 line.startswith('Binary file')):
1830 line.startswith('Binary file')):
1831 isbinary = True
1831 isbinary = True
1832 addresult()
1832 addresult()
1833 return results
1833 return results
1834
1834
1835 def diffstat(lines, width=80, git=False):
1835 def diffstat(lines, width=80, git=False):
1836 output = []
1836 output = []
1837 stats = diffstatdata(lines)
1837 stats = diffstatdata(lines)
1838 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
1838 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
1839
1839
1840 countwidth = len(str(maxtotal))
1840 countwidth = len(str(maxtotal))
1841 if hasbinary and countwidth < 3:
1841 if hasbinary and countwidth < 3:
1842 countwidth = 3
1842 countwidth = 3
1843 graphwidth = width - countwidth - maxname - 6
1843 graphwidth = width - countwidth - maxname - 6
1844 if graphwidth < 10:
1844 if graphwidth < 10:
1845 graphwidth = 10
1845 graphwidth = 10
1846
1846
1847 def scale(i):
1847 def scale(i):
1848 if maxtotal <= graphwidth:
1848 if maxtotal <= graphwidth:
1849 return i
1849 return i
1850 # If diffstat runs out of room it doesn't print anything,
1850 # If diffstat runs out of room it doesn't print anything,
1851 # which isn't very useful, so always print at least one + or -
1851 # which isn't very useful, so always print at least one + or -
1852 # if there were at least some changes.
1852 # if there were at least some changes.
1853 return max(i * graphwidth // maxtotal, int(bool(i)))
1853 return max(i * graphwidth // maxtotal, int(bool(i)))
1854
1854
1855 for filename, adds, removes, isbinary in stats:
1855 for filename, adds, removes, isbinary in stats:
1856 if isbinary:
1856 if isbinary:
1857 count = 'Bin'
1857 count = 'Bin'
1858 else:
1858 else:
1859 count = adds + removes
1859 count = adds + removes
1860 pluses = '+' * scale(adds)
1860 pluses = '+' * scale(adds)
1861 minuses = '-' * scale(removes)
1861 minuses = '-' * scale(removes)
1862 output.append(' %s%s | %*s %s%s\n' %
1862 output.append(' %s%s | %*s %s%s\n' %
1863 (filename, ' ' * (maxname - encoding.colwidth(filename)),
1863 (filename, ' ' * (maxname - encoding.colwidth(filename)),
1864 countwidth, count, pluses, minuses))
1864 countwidth, count, pluses, minuses))
1865
1865
1866 if stats:
1866 if stats:
1867 output.append(_(' %d files changed, %d insertions(+), '
1867 output.append(_(' %d files changed, %d insertions(+), '
1868 '%d deletions(-)\n')
1868 '%d deletions(-)\n')
1869 % (len(stats), totaladds, totalremoves))
1869 % (len(stats), totaladds, totalremoves))
1870
1870
1871 return ''.join(output)
1871 return ''.join(output)
1872
1872
1873 def diffstatui(*args, **kw):
1873 def diffstatui(*args, **kw):
1874 '''like diffstat(), but yields 2-tuples of (output, label) for
1874 '''like diffstat(), but yields 2-tuples of (output, label) for
1875 ui.write()
1875 ui.write()
1876 '''
1876 '''
1877
1877
1878 for line in diffstat(*args, **kw).splitlines():
1878 for line in diffstat(*args, **kw).splitlines():
1879 if line and line[-1] in '+-':
1879 if line and line[-1] in '+-':
1880 name, graph = line.rsplit(' ', 1)
1880 name, graph = line.rsplit(' ', 1)
1881 yield (name + ' ', '')
1881 yield (name + ' ', '')
1882 m = re.search(r'\++', graph)
1882 m = re.search(r'\++', graph)
1883 if m:
1883 if m:
1884 yield (m.group(0), 'diffstat.inserted')
1884 yield (m.group(0), 'diffstat.inserted')
1885 m = re.search(r'-+', graph)
1885 m = re.search(r'-+', graph)
1886 if m:
1886 if m:
1887 yield (m.group(0), 'diffstat.deleted')
1887 yield (m.group(0), 'diffstat.deleted')
1888 else:
1888 else:
1889 yield (line, '')
1889 yield (line, '')
1890 yield ('\n', '')
1890 yield ('\n', '')
@@ -1,453 +1,453 b''
1 # Copyright (C) 2004, 2005 Canonical Ltd
1 # Copyright (C) 2004, 2005 Canonical Ltd
2 #
2 #
3 # This program is free software; you can redistribute it and/or modify
3 # This program is free software; you can redistribute it and/or modify
4 # it under the terms of the GNU General Public License as published by
4 # it under the terms of the GNU General Public License as published by
5 # the Free Software Foundation; either version 2 of the License, or
5 # the Free Software Foundation; either version 2 of the License, or
6 # (at your option) any later version.
6 # (at your option) any later version.
7 #
7 #
8 # This program is distributed in the hope that it will be useful,
8 # This program is distributed in the hope that it will be useful,
9 # but WITHOUT ANY WARRANTY; without even the implied warranty of
9 # but WITHOUT ANY WARRANTY; without even the implied warranty of
10 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 # GNU General Public License for more details.
11 # GNU General Public License for more details.
12 #
12 #
13 # You should have received a copy of the GNU General Public License
13 # You should have received a copy of the GNU General Public License
14 # along with this program; if not, see <http://www.gnu.org/licenses/>.
14 # along with this program; if not, see <http://www.gnu.org/licenses/>.
15
15
16 # mbp: "you know that thing where cvs gives you conflict markers?"
16 # mbp: "you know that thing where cvs gives you conflict markers?"
17 # s: "i hate that."
17 # s: "i hate that."
18
18
19 from i18n import _
19 from i18n import _
20 import scmutil, util, mdiff
20 import scmutil, util, mdiff
21 import sys, os
21 import sys, os
22
22
23 class CantReprocessAndShowBase(Exception):
23 class CantReprocessAndShowBase(Exception):
24 pass
24 pass
25
25
26 def intersect(ra, rb):
26 def intersect(ra, rb):
27 """Given two ranges return the range where they intersect or None.
27 """Given two ranges return the range where they intersect or None.
28
28
29 >>> intersect((0, 10), (0, 6))
29 >>> intersect((0, 10), (0, 6))
30 (0, 6)
30 (0, 6)
31 >>> intersect((0, 10), (5, 15))
31 >>> intersect((0, 10), (5, 15))
32 (5, 10)
32 (5, 10)
33 >>> intersect((0, 10), (10, 15))
33 >>> intersect((0, 10), (10, 15))
34 >>> intersect((0, 9), (10, 15))
34 >>> intersect((0, 9), (10, 15))
35 >>> intersect((0, 9), (7, 15))
35 >>> intersect((0, 9), (7, 15))
36 (7, 9)
36 (7, 9)
37 """
37 """
38 assert ra[0] <= ra[1]
38 assert ra[0] <= ra[1]
39 assert rb[0] <= rb[1]
39 assert rb[0] <= rb[1]
40
40
41 sa = max(ra[0], rb[0])
41 sa = max(ra[0], rb[0])
42 sb = min(ra[1], rb[1])
42 sb = min(ra[1], rb[1])
43 if sa < sb:
43 if sa < sb:
44 return sa, sb
44 return sa, sb
45 else:
45 else:
46 return None
46 return None
47
47
48 def compare_range(a, astart, aend, b, bstart, bend):
48 def compare_range(a, astart, aend, b, bstart, bend):
49 """Compare a[astart:aend] == b[bstart:bend], without slicing.
49 """Compare a[astart:aend] == b[bstart:bend], without slicing.
50 """
50 """
51 if (aend - astart) != (bend - bstart):
51 if (aend - astart) != (bend - bstart):
52 return False
52 return False
53 for ia, ib in zip(xrange(astart, aend), xrange(bstart, bend)):
53 for ia, ib in zip(xrange(astart, aend), xrange(bstart, bend)):
54 if a[ia] != b[ib]:
54 if a[ia] != b[ib]:
55 return False
55 return False
56 else:
56 else:
57 return True
57 return True
58
58
59 class Merge3Text(object):
59 class Merge3Text(object):
60 """3-way merge of texts.
60 """3-way merge of texts.
61
61
62 Given strings BASE, OTHER, THIS, tries to produce a combined text
62 Given strings BASE, OTHER, THIS, tries to produce a combined text
63 incorporating the changes from both BASE->OTHER and BASE->THIS."""
63 incorporating the changes from both BASE->OTHER and BASE->THIS."""
64 def __init__(self, basetext, atext, btext, base=None, a=None, b=None):
64 def __init__(self, basetext, atext, btext, base=None, a=None, b=None):
65 self.basetext = basetext
65 self.basetext = basetext
66 self.atext = atext
66 self.atext = atext
67 self.btext = btext
67 self.btext = btext
68 if base is None:
68 if base is None:
69 base = mdiff.splitnewlines(basetext)
69 base = mdiff.splitnewlines(basetext)
70 if a is None:
70 if a is None:
71 a = mdiff.splitnewlines(atext)
71 a = mdiff.splitnewlines(atext)
72 if b is None:
72 if b is None:
73 b = mdiff.splitnewlines(btext)
73 b = mdiff.splitnewlines(btext)
74 self.base = base
74 self.base = base
75 self.a = a
75 self.a = a
76 self.b = b
76 self.b = b
77
77
78 def merge_lines(self,
78 def merge_lines(self,
79 name_a=None,
79 name_a=None,
80 name_b=None,
80 name_b=None,
81 name_base=None,
81 name_base=None,
82 start_marker='<<<<<<<',
82 start_marker='<<<<<<<',
83 mid_marker='=======',
83 mid_marker='=======',
84 end_marker='>>>>>>>',
84 end_marker='>>>>>>>',
85 base_marker=None,
85 base_marker=None,
86 reprocess=False):
86 reprocess=False):
87 """Return merge in cvs-like form.
87 """Return merge in cvs-like form.
88 """
88 """
89 self.conflicts = False
89 self.conflicts = False
90 newline = '\n'
90 newline = '\n'
91 if len(self.a) > 0:
91 if len(self.a) > 0:
92 if self.a[0].endswith('\r\n'):
92 if self.a[0].endswith('\r\n'):
93 newline = '\r\n'
93 newline = '\r\n'
94 elif self.a[0].endswith('\r'):
94 elif self.a[0].endswith('\r'):
95 newline = '\r'
95 newline = '\r'
96 if base_marker and reprocess:
96 if base_marker and reprocess:
97 raise CantReprocessAndShowBase()
97 raise CantReprocessAndShowBase
98 if name_a:
98 if name_a:
99 start_marker = start_marker + ' ' + name_a
99 start_marker = start_marker + ' ' + name_a
100 if name_b:
100 if name_b:
101 end_marker = end_marker + ' ' + name_b
101 end_marker = end_marker + ' ' + name_b
102 if name_base and base_marker:
102 if name_base and base_marker:
103 base_marker = base_marker + ' ' + name_base
103 base_marker = base_marker + ' ' + name_base
104 merge_regions = self.merge_regions()
104 merge_regions = self.merge_regions()
105 if reprocess is True:
105 if reprocess is True:
106 merge_regions = self.reprocess_merge_regions(merge_regions)
106 merge_regions = self.reprocess_merge_regions(merge_regions)
107 for t in merge_regions:
107 for t in merge_regions:
108 what = t[0]
108 what = t[0]
109 if what == 'unchanged':
109 if what == 'unchanged':
110 for i in range(t[1], t[2]):
110 for i in range(t[1], t[2]):
111 yield self.base[i]
111 yield self.base[i]
112 elif what == 'a' or what == 'same':
112 elif what == 'a' or what == 'same':
113 for i in range(t[1], t[2]):
113 for i in range(t[1], t[2]):
114 yield self.a[i]
114 yield self.a[i]
115 elif what == 'b':
115 elif what == 'b':
116 for i in range(t[1], t[2]):
116 for i in range(t[1], t[2]):
117 yield self.b[i]
117 yield self.b[i]
118 elif what == 'conflict':
118 elif what == 'conflict':
119 self.conflicts = True
119 self.conflicts = True
120 yield start_marker + newline
120 yield start_marker + newline
121 for i in range(t[3], t[4]):
121 for i in range(t[3], t[4]):
122 yield self.a[i]
122 yield self.a[i]
123 if base_marker is not None:
123 if base_marker is not None:
124 yield base_marker + newline
124 yield base_marker + newline
125 for i in range(t[1], t[2]):
125 for i in range(t[1], t[2]):
126 yield self.base[i]
126 yield self.base[i]
127 yield mid_marker + newline
127 yield mid_marker + newline
128 for i in range(t[5], t[6]):
128 for i in range(t[5], t[6]):
129 yield self.b[i]
129 yield self.b[i]
130 yield end_marker + newline
130 yield end_marker + newline
131 else:
131 else:
132 raise ValueError(what)
132 raise ValueError(what)
133
133
134 def merge_annotated(self):
134 def merge_annotated(self):
135 """Return merge with conflicts, showing origin of lines.
135 """Return merge with conflicts, showing origin of lines.
136
136
137 Most useful for debugging merge.
137 Most useful for debugging merge.
138 """
138 """
139 for t in self.merge_regions():
139 for t in self.merge_regions():
140 what = t[0]
140 what = t[0]
141 if what == 'unchanged':
141 if what == 'unchanged':
142 for i in range(t[1], t[2]):
142 for i in range(t[1], t[2]):
143 yield 'u | ' + self.base[i]
143 yield 'u | ' + self.base[i]
144 elif what == 'a' or what == 'same':
144 elif what == 'a' or what == 'same':
145 for i in range(t[1], t[2]):
145 for i in range(t[1], t[2]):
146 yield what[0] + ' | ' + self.a[i]
146 yield what[0] + ' | ' + self.a[i]
147 elif what == 'b':
147 elif what == 'b':
148 for i in range(t[1], t[2]):
148 for i in range(t[1], t[2]):
149 yield 'b | ' + self.b[i]
149 yield 'b | ' + self.b[i]
150 elif what == 'conflict':
150 elif what == 'conflict':
151 yield '<<<<\n'
151 yield '<<<<\n'
152 for i in range(t[3], t[4]):
152 for i in range(t[3], t[4]):
153 yield 'A | ' + self.a[i]
153 yield 'A | ' + self.a[i]
154 yield '----\n'
154 yield '----\n'
155 for i in range(t[5], t[6]):
155 for i in range(t[5], t[6]):
156 yield 'B | ' + self.b[i]
156 yield 'B | ' + self.b[i]
157 yield '>>>>\n'
157 yield '>>>>\n'
158 else:
158 else:
159 raise ValueError(what)
159 raise ValueError(what)
160
160
161 def merge_groups(self):
161 def merge_groups(self):
162 """Yield sequence of line groups. Each one is a tuple:
162 """Yield sequence of line groups. Each one is a tuple:
163
163
164 'unchanged', lines
164 'unchanged', lines
165 Lines unchanged from base
165 Lines unchanged from base
166
166
167 'a', lines
167 'a', lines
168 Lines taken from a
168 Lines taken from a
169
169
170 'same', lines
170 'same', lines
171 Lines taken from a (and equal to b)
171 Lines taken from a (and equal to b)
172
172
173 'b', lines
173 'b', lines
174 Lines taken from b
174 Lines taken from b
175
175
176 'conflict', base_lines, a_lines, b_lines
176 'conflict', base_lines, a_lines, b_lines
177 Lines from base were changed to either a or b and conflict.
177 Lines from base were changed to either a or b and conflict.
178 """
178 """
179 for t in self.merge_regions():
179 for t in self.merge_regions():
180 what = t[0]
180 what = t[0]
181 if what == 'unchanged':
181 if what == 'unchanged':
182 yield what, self.base[t[1]:t[2]]
182 yield what, self.base[t[1]:t[2]]
183 elif what == 'a' or what == 'same':
183 elif what == 'a' or what == 'same':
184 yield what, self.a[t[1]:t[2]]
184 yield what, self.a[t[1]:t[2]]
185 elif what == 'b':
185 elif what == 'b':
186 yield what, self.b[t[1]:t[2]]
186 yield what, self.b[t[1]:t[2]]
187 elif what == 'conflict':
187 elif what == 'conflict':
188 yield (what,
188 yield (what,
189 self.base[t[1]:t[2]],
189 self.base[t[1]:t[2]],
190 self.a[t[3]:t[4]],
190 self.a[t[3]:t[4]],
191 self.b[t[5]:t[6]])
191 self.b[t[5]:t[6]])
192 else:
192 else:
193 raise ValueError(what)
193 raise ValueError(what)
194
194
195 def merge_regions(self):
195 def merge_regions(self):
196 """Return sequences of matching and conflicting regions.
196 """Return sequences of matching and conflicting regions.
197
197
198 This returns tuples, where the first value says what kind we
198 This returns tuples, where the first value says what kind we
199 have:
199 have:
200
200
201 'unchanged', start, end
201 'unchanged', start, end
202 Take a region of base[start:end]
202 Take a region of base[start:end]
203
203
204 'same', astart, aend
204 'same', astart, aend
205 b and a are different from base but give the same result
205 b and a are different from base but give the same result
206
206
207 'a', start, end
207 'a', start, end
208 Non-clashing insertion from a[start:end]
208 Non-clashing insertion from a[start:end]
209
209
210 Method is as follows:
210 Method is as follows:
211
211
212 The two sequences align only on regions which match the base
212 The two sequences align only on regions which match the base
213 and both descendants. These are found by doing a two-way diff
213 and both descendants. These are found by doing a two-way diff
214 of each one against the base, and then finding the
214 of each one against the base, and then finding the
215 intersections between those regions. These "sync regions"
215 intersections between those regions. These "sync regions"
216 are by definition unchanged in both and easily dealt with.
216 are by definition unchanged in both and easily dealt with.
217
217
218 The regions in between can be in any of three cases:
218 The regions in between can be in any of three cases:
219 conflicted, or changed on only one side.
219 conflicted, or changed on only one side.
220 """
220 """
221
221
222 # section a[0:ia] has been disposed of, etc
222 # section a[0:ia] has been disposed of, etc
223 iz = ia = ib = 0
223 iz = ia = ib = 0
224
224
225 for region in self.find_sync_regions():
225 for region in self.find_sync_regions():
226 zmatch, zend, amatch, aend, bmatch, bend = region
226 zmatch, zend, amatch, aend, bmatch, bend = region
227 #print 'match base [%d:%d]' % (zmatch, zend)
227 #print 'match base [%d:%d]' % (zmatch, zend)
228
228
229 matchlen = zend - zmatch
229 matchlen = zend - zmatch
230 assert matchlen >= 0
230 assert matchlen >= 0
231 assert matchlen == (aend - amatch)
231 assert matchlen == (aend - amatch)
232 assert matchlen == (bend - bmatch)
232 assert matchlen == (bend - bmatch)
233
233
234 len_a = amatch - ia
234 len_a = amatch - ia
235 len_b = bmatch - ib
235 len_b = bmatch - ib
236 len_base = zmatch - iz
236 len_base = zmatch - iz
237 assert len_a >= 0
237 assert len_a >= 0
238 assert len_b >= 0
238 assert len_b >= 0
239 assert len_base >= 0
239 assert len_base >= 0
240
240
241 #print 'unmatched a=%d, b=%d' % (len_a, len_b)
241 #print 'unmatched a=%d, b=%d' % (len_a, len_b)
242
242
243 if len_a or len_b:
243 if len_a or len_b:
244 # try to avoid actually slicing the lists
244 # try to avoid actually slicing the lists
245 equal_a = compare_range(self.a, ia, amatch,
245 equal_a = compare_range(self.a, ia, amatch,
246 self.base, iz, zmatch)
246 self.base, iz, zmatch)
247 equal_b = compare_range(self.b, ib, bmatch,
247 equal_b = compare_range(self.b, ib, bmatch,
248 self.base, iz, zmatch)
248 self.base, iz, zmatch)
249 same = compare_range(self.a, ia, amatch,
249 same = compare_range(self.a, ia, amatch,
250 self.b, ib, bmatch)
250 self.b, ib, bmatch)
251
251
252 if same:
252 if same:
253 yield 'same', ia, amatch
253 yield 'same', ia, amatch
254 elif equal_a and not equal_b:
254 elif equal_a and not equal_b:
255 yield 'b', ib, bmatch
255 yield 'b', ib, bmatch
256 elif equal_b and not equal_a:
256 elif equal_b and not equal_a:
257 yield 'a', ia, amatch
257 yield 'a', ia, amatch
258 elif not equal_a and not equal_b:
258 elif not equal_a and not equal_b:
259 yield 'conflict', iz, zmatch, ia, amatch, ib, bmatch
259 yield 'conflict', iz, zmatch, ia, amatch, ib, bmatch
260 else:
260 else:
261 raise AssertionError("can't handle a=b=base but unmatched")
261 raise AssertionError("can't handle a=b=base but unmatched")
262
262
263 ia = amatch
263 ia = amatch
264 ib = bmatch
264 ib = bmatch
265 iz = zmatch
265 iz = zmatch
266
266
267 # if the same part of the base was deleted on both sides
267 # if the same part of the base was deleted on both sides
268 # that's OK, we can just skip it.
268 # that's OK, we can just skip it.
269
269
270
270
271 if matchlen > 0:
271 if matchlen > 0:
272 assert ia == amatch
272 assert ia == amatch
273 assert ib == bmatch
273 assert ib == bmatch
274 assert iz == zmatch
274 assert iz == zmatch
275
275
276 yield 'unchanged', zmatch, zend
276 yield 'unchanged', zmatch, zend
277 iz = zend
277 iz = zend
278 ia = aend
278 ia = aend
279 ib = bend
279 ib = bend
280
280
281 def reprocess_merge_regions(self, merge_regions):
281 def reprocess_merge_regions(self, merge_regions):
282 """Where there are conflict regions, remove the agreed lines.
282 """Where there are conflict regions, remove the agreed lines.
283
283
284 Lines where both A and B have made the same changes are
284 Lines where both A and B have made the same changes are
285 eliminated.
285 eliminated.
286 """
286 """
287 for region in merge_regions:
287 for region in merge_regions:
288 if region[0] != "conflict":
288 if region[0] != "conflict":
289 yield region
289 yield region
290 continue
290 continue
291 type, iz, zmatch, ia, amatch, ib, bmatch = region
291 type, iz, zmatch, ia, amatch, ib, bmatch = region
292 a_region = self.a[ia:amatch]
292 a_region = self.a[ia:amatch]
293 b_region = self.b[ib:bmatch]
293 b_region = self.b[ib:bmatch]
294 matches = mdiff.get_matching_blocks(''.join(a_region),
294 matches = mdiff.get_matching_blocks(''.join(a_region),
295 ''.join(b_region))
295 ''.join(b_region))
296 next_a = ia
296 next_a = ia
297 next_b = ib
297 next_b = ib
298 for region_ia, region_ib, region_len in matches[:-1]:
298 for region_ia, region_ib, region_len in matches[:-1]:
299 region_ia += ia
299 region_ia += ia
300 region_ib += ib
300 region_ib += ib
301 reg = self.mismatch_region(next_a, region_ia, next_b,
301 reg = self.mismatch_region(next_a, region_ia, next_b,
302 region_ib)
302 region_ib)
303 if reg is not None:
303 if reg is not None:
304 yield reg
304 yield reg
305 yield 'same', region_ia, region_len + region_ia
305 yield 'same', region_ia, region_len + region_ia
306 next_a = region_ia + region_len
306 next_a = region_ia + region_len
307 next_b = region_ib + region_len
307 next_b = region_ib + region_len
308 reg = self.mismatch_region(next_a, amatch, next_b, bmatch)
308 reg = self.mismatch_region(next_a, amatch, next_b, bmatch)
309 if reg is not None:
309 if reg is not None:
310 yield reg
310 yield reg
311
311
312 def mismatch_region(next_a, region_ia, next_b, region_ib):
312 def mismatch_region(next_a, region_ia, next_b, region_ib):
313 if next_a < region_ia or next_b < region_ib:
313 if next_a < region_ia or next_b < region_ib:
314 return 'conflict', None, None, next_a, region_ia, next_b, region_ib
314 return 'conflict', None, None, next_a, region_ia, next_b, region_ib
315 mismatch_region = staticmethod(mismatch_region)
315 mismatch_region = staticmethod(mismatch_region)
316
316
317 def find_sync_regions(self):
317 def find_sync_regions(self):
318 """Return a list of sync regions, where both descendants match the base.
318 """Return a list of sync regions, where both descendants match the base.
319
319
320 Generates a list of (base1, base2, a1, a2, b1, b2). There is
320 Generates a list of (base1, base2, a1, a2, b1, b2). There is
321 always a zero-length sync region at the end of all the files.
321 always a zero-length sync region at the end of all the files.
322 """
322 """
323
323
324 ia = ib = 0
324 ia = ib = 0
325 amatches = mdiff.get_matching_blocks(self.basetext, self.atext)
325 amatches = mdiff.get_matching_blocks(self.basetext, self.atext)
326 bmatches = mdiff.get_matching_blocks(self.basetext, self.btext)
326 bmatches = mdiff.get_matching_blocks(self.basetext, self.btext)
327 len_a = len(amatches)
327 len_a = len(amatches)
328 len_b = len(bmatches)
328 len_b = len(bmatches)
329
329
330 sl = []
330 sl = []
331
331
332 while ia < len_a and ib < len_b:
332 while ia < len_a and ib < len_b:
333 abase, amatch, alen = amatches[ia]
333 abase, amatch, alen = amatches[ia]
334 bbase, bmatch, blen = bmatches[ib]
334 bbase, bmatch, blen = bmatches[ib]
335
335
336 # there is an unconflicted block at i; how long does it
336 # there is an unconflicted block at i; how long does it
337 # extend? until whichever one ends earlier.
337 # extend? until whichever one ends earlier.
338 i = intersect((abase, abase + alen), (bbase, bbase + blen))
338 i = intersect((abase, abase + alen), (bbase, bbase + blen))
339 if i:
339 if i:
340 intbase = i[0]
340 intbase = i[0]
341 intend = i[1]
341 intend = i[1]
342 intlen = intend - intbase
342 intlen = intend - intbase
343
343
344 # found a match of base[i[0], i[1]]; this may be less than
344 # found a match of base[i[0], i[1]]; this may be less than
345 # the region that matches in either one
345 # the region that matches in either one
346 assert intlen <= alen
346 assert intlen <= alen
347 assert intlen <= blen
347 assert intlen <= blen
348 assert abase <= intbase
348 assert abase <= intbase
349 assert bbase <= intbase
349 assert bbase <= intbase
350
350
351 asub = amatch + (intbase - abase)
351 asub = amatch + (intbase - abase)
352 bsub = bmatch + (intbase - bbase)
352 bsub = bmatch + (intbase - bbase)
353 aend = asub + intlen
353 aend = asub + intlen
354 bend = bsub + intlen
354 bend = bsub + intlen
355
355
356 assert self.base[intbase:intend] == self.a[asub:aend], \
356 assert self.base[intbase:intend] == self.a[asub:aend], \
357 (self.base[intbase:intend], self.a[asub:aend])
357 (self.base[intbase:intend], self.a[asub:aend])
358
358
359 assert self.base[intbase:intend] == self.b[bsub:bend]
359 assert self.base[intbase:intend] == self.b[bsub:bend]
360
360
361 sl.append((intbase, intend,
361 sl.append((intbase, intend,
362 asub, aend,
362 asub, aend,
363 bsub, bend))
363 bsub, bend))
364
364
365 # advance whichever one ends first in the base text
365 # advance whichever one ends first in the base text
366 if (abase + alen) < (bbase + blen):
366 if (abase + alen) < (bbase + blen):
367 ia += 1
367 ia += 1
368 else:
368 else:
369 ib += 1
369 ib += 1
370
370
371 intbase = len(self.base)
371 intbase = len(self.base)
372 abase = len(self.a)
372 abase = len(self.a)
373 bbase = len(self.b)
373 bbase = len(self.b)
374 sl.append((intbase, intbase, abase, abase, bbase, bbase))
374 sl.append((intbase, intbase, abase, abase, bbase, bbase))
375
375
376 return sl
376 return sl
377
377
378 def find_unconflicted(self):
378 def find_unconflicted(self):
379 """Return a list of ranges in base that are not conflicted."""
379 """Return a list of ranges in base that are not conflicted."""
380 am = mdiff.get_matching_blocks(self.basetext, self.atext)
380 am = mdiff.get_matching_blocks(self.basetext, self.atext)
381 bm = mdiff.get_matching_blocks(self.basetext, self.btext)
381 bm = mdiff.get_matching_blocks(self.basetext, self.btext)
382
382
383 unc = []
383 unc = []
384
384
385 while am and bm:
385 while am and bm:
386 # there is an unconflicted block at i; how long does it
386 # there is an unconflicted block at i; how long does it
387 # extend? until whichever one ends earlier.
387 # extend? until whichever one ends earlier.
388 a1 = am[0][0]
388 a1 = am[0][0]
389 a2 = a1 + am[0][2]
389 a2 = a1 + am[0][2]
390 b1 = bm[0][0]
390 b1 = bm[0][0]
391 b2 = b1 + bm[0][2]
391 b2 = b1 + bm[0][2]
392 i = intersect((a1, a2), (b1, b2))
392 i = intersect((a1, a2), (b1, b2))
393 if i:
393 if i:
394 unc.append(i)
394 unc.append(i)
395
395
396 if a2 < b2:
396 if a2 < b2:
397 del am[0]
397 del am[0]
398 else:
398 else:
399 del bm[0]
399 del bm[0]
400
400
401 return unc
401 return unc
402
402
403 def simplemerge(ui, local, base, other, **opts):
403 def simplemerge(ui, local, base, other, **opts):
404 def readfile(filename):
404 def readfile(filename):
405 f = open(filename, "rb")
405 f = open(filename, "rb")
406 text = f.read()
406 text = f.read()
407 f.close()
407 f.close()
408 if util.binary(text):
408 if util.binary(text):
409 msg = _("%s looks like a binary file.") % filename
409 msg = _("%s looks like a binary file.") % filename
410 if not opts.get('quiet'):
410 if not opts.get('quiet'):
411 ui.warn(_('warning: %s\n') % msg)
411 ui.warn(_('warning: %s\n') % msg)
412 if not opts.get('text'):
412 if not opts.get('text'):
413 raise util.Abort(msg)
413 raise util.Abort(msg)
414 return text
414 return text
415
415
416 name_a = local
416 name_a = local
417 name_b = other
417 name_b = other
418 labels = opts.get('label', [])
418 labels = opts.get('label', [])
419 if labels:
419 if labels:
420 name_a = labels.pop(0)
420 name_a = labels.pop(0)
421 if labels:
421 if labels:
422 name_b = labels.pop(0)
422 name_b = labels.pop(0)
423 if labels:
423 if labels:
424 raise util.Abort(_("can only specify two labels."))
424 raise util.Abort(_("can only specify two labels."))
425
425
426 try:
426 try:
427 localtext = readfile(local)
427 localtext = readfile(local)
428 basetext = readfile(base)
428 basetext = readfile(base)
429 othertext = readfile(other)
429 othertext = readfile(other)
430 except util.Abort:
430 except util.Abort:
431 return 1
431 return 1
432
432
433 local = os.path.realpath(local)
433 local = os.path.realpath(local)
434 if not opts.get('print'):
434 if not opts.get('print'):
435 opener = scmutil.opener(os.path.dirname(local))
435 opener = scmutil.opener(os.path.dirname(local))
436 out = opener(os.path.basename(local), "w", atomictemp=True)
436 out = opener(os.path.basename(local), "w", atomictemp=True)
437 else:
437 else:
438 out = sys.stdout
438 out = sys.stdout
439
439
440 reprocess = not opts.get('no_minimal')
440 reprocess = not opts.get('no_minimal')
441
441
442 m3 = Merge3Text(basetext, localtext, othertext)
442 m3 = Merge3Text(basetext, localtext, othertext)
443 for line in m3.merge_lines(name_a=name_a, name_b=name_b,
443 for line in m3.merge_lines(name_a=name_a, name_b=name_b,
444 reprocess=reprocess):
444 reprocess=reprocess):
445 out.write(line)
445 out.write(line)
446
446
447 if not opts.get('print'):
447 if not opts.get('print'):
448 out.close()
448 out.close()
449
449
450 if m3.conflicts:
450 if m3.conflicts:
451 if not opts.get('quiet'):
451 if not opts.get('quiet'):
452 ui.warn(_("warning: conflicts during merge.\n"))
452 ui.warn(_("warning: conflicts during merge.\n"))
453 return 1
453 return 1
@@ -1,452 +1,452 b''
1 # win32.py - utility functions that use win32 API
1 # win32.py - utility functions that use win32 API
2 #
2 #
3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import encoding
8 import encoding
9 import ctypes, errno, os, struct, subprocess, random
9 import ctypes, errno, os, struct, subprocess, random
10
10
11 _kernel32 = ctypes.windll.kernel32
11 _kernel32 = ctypes.windll.kernel32
12 _advapi32 = ctypes.windll.advapi32
12 _advapi32 = ctypes.windll.advapi32
13 _user32 = ctypes.windll.user32
13 _user32 = ctypes.windll.user32
14
14
15 _BOOL = ctypes.c_long
15 _BOOL = ctypes.c_long
16 _WORD = ctypes.c_ushort
16 _WORD = ctypes.c_ushort
17 _DWORD = ctypes.c_ulong
17 _DWORD = ctypes.c_ulong
18 _UINT = ctypes.c_uint
18 _UINT = ctypes.c_uint
19 _LONG = ctypes.c_long
19 _LONG = ctypes.c_long
20 _LPCSTR = _LPSTR = ctypes.c_char_p
20 _LPCSTR = _LPSTR = ctypes.c_char_p
21 _HANDLE = ctypes.c_void_p
21 _HANDLE = ctypes.c_void_p
22 _HWND = _HANDLE
22 _HWND = _HANDLE
23
23
24 _INVALID_HANDLE_VALUE = _HANDLE(-1).value
24 _INVALID_HANDLE_VALUE = _HANDLE(-1).value
25
25
26 # GetLastError
26 # GetLastError
27 _ERROR_SUCCESS = 0
27 _ERROR_SUCCESS = 0
28 _ERROR_INVALID_PARAMETER = 87
28 _ERROR_INVALID_PARAMETER = 87
29 _ERROR_INSUFFICIENT_BUFFER = 122
29 _ERROR_INSUFFICIENT_BUFFER = 122
30
30
31 # WPARAM is defined as UINT_PTR (unsigned type)
31 # WPARAM is defined as UINT_PTR (unsigned type)
32 # LPARAM is defined as LONG_PTR (signed type)
32 # LPARAM is defined as LONG_PTR (signed type)
33 if ctypes.sizeof(ctypes.c_long) == ctypes.sizeof(ctypes.c_void_p):
33 if ctypes.sizeof(ctypes.c_long) == ctypes.sizeof(ctypes.c_void_p):
34 _WPARAM = ctypes.c_ulong
34 _WPARAM = ctypes.c_ulong
35 _LPARAM = ctypes.c_long
35 _LPARAM = ctypes.c_long
36 elif ctypes.sizeof(ctypes.c_longlong) == ctypes.sizeof(ctypes.c_void_p):
36 elif ctypes.sizeof(ctypes.c_longlong) == ctypes.sizeof(ctypes.c_void_p):
37 _WPARAM = ctypes.c_ulonglong
37 _WPARAM = ctypes.c_ulonglong
38 _LPARAM = ctypes.c_longlong
38 _LPARAM = ctypes.c_longlong
39
39
40 class _FILETIME(ctypes.Structure):
40 class _FILETIME(ctypes.Structure):
41 _fields_ = [('dwLowDateTime', _DWORD),
41 _fields_ = [('dwLowDateTime', _DWORD),
42 ('dwHighDateTime', _DWORD)]
42 ('dwHighDateTime', _DWORD)]
43
43
44 class _BY_HANDLE_FILE_INFORMATION(ctypes.Structure):
44 class _BY_HANDLE_FILE_INFORMATION(ctypes.Structure):
45 _fields_ = [('dwFileAttributes', _DWORD),
45 _fields_ = [('dwFileAttributes', _DWORD),
46 ('ftCreationTime', _FILETIME),
46 ('ftCreationTime', _FILETIME),
47 ('ftLastAccessTime', _FILETIME),
47 ('ftLastAccessTime', _FILETIME),
48 ('ftLastWriteTime', _FILETIME),
48 ('ftLastWriteTime', _FILETIME),
49 ('dwVolumeSerialNumber', _DWORD),
49 ('dwVolumeSerialNumber', _DWORD),
50 ('nFileSizeHigh', _DWORD),
50 ('nFileSizeHigh', _DWORD),
51 ('nFileSizeLow', _DWORD),
51 ('nFileSizeLow', _DWORD),
52 ('nNumberOfLinks', _DWORD),
52 ('nNumberOfLinks', _DWORD),
53 ('nFileIndexHigh', _DWORD),
53 ('nFileIndexHigh', _DWORD),
54 ('nFileIndexLow', _DWORD)]
54 ('nFileIndexLow', _DWORD)]
55
55
56 # CreateFile
56 # CreateFile
57 _FILE_SHARE_READ = 0x00000001
57 _FILE_SHARE_READ = 0x00000001
58 _FILE_SHARE_WRITE = 0x00000002
58 _FILE_SHARE_WRITE = 0x00000002
59 _FILE_SHARE_DELETE = 0x00000004
59 _FILE_SHARE_DELETE = 0x00000004
60
60
61 _OPEN_EXISTING = 3
61 _OPEN_EXISTING = 3
62
62
63 # SetFileAttributes
63 # SetFileAttributes
64 _FILE_ATTRIBUTE_NORMAL = 0x80
64 _FILE_ATTRIBUTE_NORMAL = 0x80
65 _FILE_ATTRIBUTE_NOT_CONTENT_INDEXED = 0x2000
65 _FILE_ATTRIBUTE_NOT_CONTENT_INDEXED = 0x2000
66
66
67 # Process Security and Access Rights
67 # Process Security and Access Rights
68 _PROCESS_QUERY_INFORMATION = 0x0400
68 _PROCESS_QUERY_INFORMATION = 0x0400
69
69
70 # GetExitCodeProcess
70 # GetExitCodeProcess
71 _STILL_ACTIVE = 259
71 _STILL_ACTIVE = 259
72
72
73 # registry
73 # registry
74 _HKEY_CURRENT_USER = 0x80000001L
74 _HKEY_CURRENT_USER = 0x80000001L
75 _HKEY_LOCAL_MACHINE = 0x80000002L
75 _HKEY_LOCAL_MACHINE = 0x80000002L
76 _KEY_READ = 0x20019
76 _KEY_READ = 0x20019
77 _REG_SZ = 1
77 _REG_SZ = 1
78 _REG_DWORD = 4
78 _REG_DWORD = 4
79
79
80 class _STARTUPINFO(ctypes.Structure):
80 class _STARTUPINFO(ctypes.Structure):
81 _fields_ = [('cb', _DWORD),
81 _fields_ = [('cb', _DWORD),
82 ('lpReserved', _LPSTR),
82 ('lpReserved', _LPSTR),
83 ('lpDesktop', _LPSTR),
83 ('lpDesktop', _LPSTR),
84 ('lpTitle', _LPSTR),
84 ('lpTitle', _LPSTR),
85 ('dwX', _DWORD),
85 ('dwX', _DWORD),
86 ('dwY', _DWORD),
86 ('dwY', _DWORD),
87 ('dwXSize', _DWORD),
87 ('dwXSize', _DWORD),
88 ('dwYSize', _DWORD),
88 ('dwYSize', _DWORD),
89 ('dwXCountChars', _DWORD),
89 ('dwXCountChars', _DWORD),
90 ('dwYCountChars', _DWORD),
90 ('dwYCountChars', _DWORD),
91 ('dwFillAttribute', _DWORD),
91 ('dwFillAttribute', _DWORD),
92 ('dwFlags', _DWORD),
92 ('dwFlags', _DWORD),
93 ('wShowWindow', _WORD),
93 ('wShowWindow', _WORD),
94 ('cbReserved2', _WORD),
94 ('cbReserved2', _WORD),
95 ('lpReserved2', ctypes.c_char_p),
95 ('lpReserved2', ctypes.c_char_p),
96 ('hStdInput', _HANDLE),
96 ('hStdInput', _HANDLE),
97 ('hStdOutput', _HANDLE),
97 ('hStdOutput', _HANDLE),
98 ('hStdError', _HANDLE)]
98 ('hStdError', _HANDLE)]
99
99
100 class _PROCESS_INFORMATION(ctypes.Structure):
100 class _PROCESS_INFORMATION(ctypes.Structure):
101 _fields_ = [('hProcess', _HANDLE),
101 _fields_ = [('hProcess', _HANDLE),
102 ('hThread', _HANDLE),
102 ('hThread', _HANDLE),
103 ('dwProcessId', _DWORD),
103 ('dwProcessId', _DWORD),
104 ('dwThreadId', _DWORD)]
104 ('dwThreadId', _DWORD)]
105
105
106 _DETACHED_PROCESS = 0x00000008
106 _DETACHED_PROCESS = 0x00000008
107 _STARTF_USESHOWWINDOW = 0x00000001
107 _STARTF_USESHOWWINDOW = 0x00000001
108 _SW_HIDE = 0
108 _SW_HIDE = 0
109
109
110 class _COORD(ctypes.Structure):
110 class _COORD(ctypes.Structure):
111 _fields_ = [('X', ctypes.c_short),
111 _fields_ = [('X', ctypes.c_short),
112 ('Y', ctypes.c_short)]
112 ('Y', ctypes.c_short)]
113
113
114 class _SMALL_RECT(ctypes.Structure):
114 class _SMALL_RECT(ctypes.Structure):
115 _fields_ = [('Left', ctypes.c_short),
115 _fields_ = [('Left', ctypes.c_short),
116 ('Top', ctypes.c_short),
116 ('Top', ctypes.c_short),
117 ('Right', ctypes.c_short),
117 ('Right', ctypes.c_short),
118 ('Bottom', ctypes.c_short)]
118 ('Bottom', ctypes.c_short)]
119
119
120 class _CONSOLE_SCREEN_BUFFER_INFO(ctypes.Structure):
120 class _CONSOLE_SCREEN_BUFFER_INFO(ctypes.Structure):
121 _fields_ = [('dwSize', _COORD),
121 _fields_ = [('dwSize', _COORD),
122 ('dwCursorPosition', _COORD),
122 ('dwCursorPosition', _COORD),
123 ('wAttributes', _WORD),
123 ('wAttributes', _WORD),
124 ('srWindow', _SMALL_RECT),
124 ('srWindow', _SMALL_RECT),
125 ('dwMaximumWindowSize', _COORD)]
125 ('dwMaximumWindowSize', _COORD)]
126
126
127 _STD_ERROR_HANDLE = _DWORD(-12).value
127 _STD_ERROR_HANDLE = _DWORD(-12).value
128
128
129 # types of parameters of C functions used (required by pypy)
129 # types of parameters of C functions used (required by pypy)
130
130
131 _kernel32.CreateFileA.argtypes = [_LPCSTR, _DWORD, _DWORD, ctypes.c_void_p,
131 _kernel32.CreateFileA.argtypes = [_LPCSTR, _DWORD, _DWORD, ctypes.c_void_p,
132 _DWORD, _DWORD, _HANDLE]
132 _DWORD, _DWORD, _HANDLE]
133 _kernel32.CreateFileA.restype = _HANDLE
133 _kernel32.CreateFileA.restype = _HANDLE
134
134
135 _kernel32.GetFileInformationByHandle.argtypes = [_HANDLE, ctypes.c_void_p]
135 _kernel32.GetFileInformationByHandle.argtypes = [_HANDLE, ctypes.c_void_p]
136 _kernel32.GetFileInformationByHandle.restype = _BOOL
136 _kernel32.GetFileInformationByHandle.restype = _BOOL
137
137
138 _kernel32.CloseHandle.argtypes = [_HANDLE]
138 _kernel32.CloseHandle.argtypes = [_HANDLE]
139 _kernel32.CloseHandle.restype = _BOOL
139 _kernel32.CloseHandle.restype = _BOOL
140
140
141 try:
141 try:
142 _kernel32.CreateHardLinkA.argtypes = [_LPCSTR, _LPCSTR, ctypes.c_void_p]
142 _kernel32.CreateHardLinkA.argtypes = [_LPCSTR, _LPCSTR, ctypes.c_void_p]
143 _kernel32.CreateHardLinkA.restype = _BOOL
143 _kernel32.CreateHardLinkA.restype = _BOOL
144 except AttributeError:
144 except AttributeError:
145 pass
145 pass
146
146
147 _kernel32.SetFileAttributesA.argtypes = [_LPCSTR, _DWORD]
147 _kernel32.SetFileAttributesA.argtypes = [_LPCSTR, _DWORD]
148 _kernel32.SetFileAttributesA.restype = _BOOL
148 _kernel32.SetFileAttributesA.restype = _BOOL
149
149
150 _kernel32.OpenProcess.argtypes = [_DWORD, _BOOL, _DWORD]
150 _kernel32.OpenProcess.argtypes = [_DWORD, _BOOL, _DWORD]
151 _kernel32.OpenProcess.restype = _HANDLE
151 _kernel32.OpenProcess.restype = _HANDLE
152
152
153 _kernel32.GetExitCodeProcess.argtypes = [_HANDLE, ctypes.c_void_p]
153 _kernel32.GetExitCodeProcess.argtypes = [_HANDLE, ctypes.c_void_p]
154 _kernel32.GetExitCodeProcess.restype = _BOOL
154 _kernel32.GetExitCodeProcess.restype = _BOOL
155
155
156 _kernel32.GetLastError.argtypes = []
156 _kernel32.GetLastError.argtypes = []
157 _kernel32.GetLastError.restype = _DWORD
157 _kernel32.GetLastError.restype = _DWORD
158
158
159 _kernel32.GetModuleFileNameA.argtypes = [_HANDLE, ctypes.c_void_p, _DWORD]
159 _kernel32.GetModuleFileNameA.argtypes = [_HANDLE, ctypes.c_void_p, _DWORD]
160 _kernel32.GetModuleFileNameA.restype = _DWORD
160 _kernel32.GetModuleFileNameA.restype = _DWORD
161
161
162 _kernel32.CreateProcessA.argtypes = [_LPCSTR, _LPCSTR, ctypes.c_void_p,
162 _kernel32.CreateProcessA.argtypes = [_LPCSTR, _LPCSTR, ctypes.c_void_p,
163 ctypes.c_void_p, _BOOL, _DWORD, ctypes.c_void_p, _LPCSTR, ctypes.c_void_p,
163 ctypes.c_void_p, _BOOL, _DWORD, ctypes.c_void_p, _LPCSTR, ctypes.c_void_p,
164 ctypes.c_void_p]
164 ctypes.c_void_p]
165 _kernel32.CreateProcessA.restype = _BOOL
165 _kernel32.CreateProcessA.restype = _BOOL
166
166
167 _kernel32.ExitProcess.argtypes = [_UINT]
167 _kernel32.ExitProcess.argtypes = [_UINT]
168 _kernel32.ExitProcess.restype = None
168 _kernel32.ExitProcess.restype = None
169
169
170 _kernel32.GetCurrentProcessId.argtypes = []
170 _kernel32.GetCurrentProcessId.argtypes = []
171 _kernel32.GetCurrentProcessId.restype = _DWORD
171 _kernel32.GetCurrentProcessId.restype = _DWORD
172
172
173 _SIGNAL_HANDLER = ctypes.WINFUNCTYPE(_BOOL, _DWORD)
173 _SIGNAL_HANDLER = ctypes.WINFUNCTYPE(_BOOL, _DWORD)
174 _kernel32.SetConsoleCtrlHandler.argtypes = [_SIGNAL_HANDLER, _BOOL]
174 _kernel32.SetConsoleCtrlHandler.argtypes = [_SIGNAL_HANDLER, _BOOL]
175 _kernel32.SetConsoleCtrlHandler.restype = _BOOL
175 _kernel32.SetConsoleCtrlHandler.restype = _BOOL
176
176
177 _kernel32.GetStdHandle.argtypes = [_DWORD]
177 _kernel32.GetStdHandle.argtypes = [_DWORD]
178 _kernel32.GetStdHandle.restype = _HANDLE
178 _kernel32.GetStdHandle.restype = _HANDLE
179
179
180 _kernel32.GetConsoleScreenBufferInfo.argtypes = [_HANDLE, ctypes.c_void_p]
180 _kernel32.GetConsoleScreenBufferInfo.argtypes = [_HANDLE, ctypes.c_void_p]
181 _kernel32.GetConsoleScreenBufferInfo.restype = _BOOL
181 _kernel32.GetConsoleScreenBufferInfo.restype = _BOOL
182
182
183 _advapi32.RegOpenKeyExA.argtypes = [_HANDLE, _LPCSTR, _DWORD, _DWORD,
183 _advapi32.RegOpenKeyExA.argtypes = [_HANDLE, _LPCSTR, _DWORD, _DWORD,
184 ctypes.c_void_p]
184 ctypes.c_void_p]
185 _advapi32.RegOpenKeyExA.restype = _LONG
185 _advapi32.RegOpenKeyExA.restype = _LONG
186
186
187 _advapi32.RegQueryValueExA.argtypes = [_HANDLE, _LPCSTR, ctypes.c_void_p,
187 _advapi32.RegQueryValueExA.argtypes = [_HANDLE, _LPCSTR, ctypes.c_void_p,
188 ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
188 ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
189 _advapi32.RegQueryValueExA.restype = _LONG
189 _advapi32.RegQueryValueExA.restype = _LONG
190
190
191 _advapi32.RegCloseKey.argtypes = [_HANDLE]
191 _advapi32.RegCloseKey.argtypes = [_HANDLE]
192 _advapi32.RegCloseKey.restype = _LONG
192 _advapi32.RegCloseKey.restype = _LONG
193
193
194 _advapi32.GetUserNameA.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
194 _advapi32.GetUserNameA.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
195 _advapi32.GetUserNameA.restype = _BOOL
195 _advapi32.GetUserNameA.restype = _BOOL
196
196
197 _user32.GetWindowThreadProcessId.argtypes = [_HANDLE, ctypes.c_void_p]
197 _user32.GetWindowThreadProcessId.argtypes = [_HANDLE, ctypes.c_void_p]
198 _user32.GetWindowThreadProcessId.restype = _DWORD
198 _user32.GetWindowThreadProcessId.restype = _DWORD
199
199
200 _user32.ShowWindow.argtypes = [_HANDLE, ctypes.c_int]
200 _user32.ShowWindow.argtypes = [_HANDLE, ctypes.c_int]
201 _user32.ShowWindow.restype = _BOOL
201 _user32.ShowWindow.restype = _BOOL
202
202
203 _WNDENUMPROC = ctypes.WINFUNCTYPE(_BOOL, _HWND, _LPARAM)
203 _WNDENUMPROC = ctypes.WINFUNCTYPE(_BOOL, _HWND, _LPARAM)
204 _user32.EnumWindows.argtypes = [_WNDENUMPROC, _LPARAM]
204 _user32.EnumWindows.argtypes = [_WNDENUMPROC, _LPARAM]
205 _user32.EnumWindows.restype = _BOOL
205 _user32.EnumWindows.restype = _BOOL
206
206
207 def _raiseoserror(name):
207 def _raiseoserror(name):
208 err = ctypes.WinError()
208 err = ctypes.WinError()
209 raise OSError(err.errno, '%s: %s' % (name, err.strerror))
209 raise OSError(err.errno, '%s: %s' % (name, err.strerror))
210
210
211 def _getfileinfo(name):
211 def _getfileinfo(name):
212 fh = _kernel32.CreateFileA(name, 0,
212 fh = _kernel32.CreateFileA(name, 0,
213 _FILE_SHARE_READ | _FILE_SHARE_WRITE | _FILE_SHARE_DELETE,
213 _FILE_SHARE_READ | _FILE_SHARE_WRITE | _FILE_SHARE_DELETE,
214 None, _OPEN_EXISTING, 0, None)
214 None, _OPEN_EXISTING, 0, None)
215 if fh == _INVALID_HANDLE_VALUE:
215 if fh == _INVALID_HANDLE_VALUE:
216 _raiseoserror(name)
216 _raiseoserror(name)
217 try:
217 try:
218 fi = _BY_HANDLE_FILE_INFORMATION()
218 fi = _BY_HANDLE_FILE_INFORMATION()
219 if not _kernel32.GetFileInformationByHandle(fh, ctypes.byref(fi)):
219 if not _kernel32.GetFileInformationByHandle(fh, ctypes.byref(fi)):
220 _raiseoserror(name)
220 _raiseoserror(name)
221 return fi
221 return fi
222 finally:
222 finally:
223 _kernel32.CloseHandle(fh)
223 _kernel32.CloseHandle(fh)
224
224
225 def oslink(src, dst):
225 def oslink(src, dst):
226 try:
226 try:
227 if not _kernel32.CreateHardLinkA(dst, src, None):
227 if not _kernel32.CreateHardLinkA(dst, src, None):
228 _raiseoserror(src)
228 _raiseoserror(src)
229 except AttributeError: # Wine doesn't support this function
229 except AttributeError: # Wine doesn't support this function
230 _raiseoserror(src)
230 _raiseoserror(src)
231
231
232 def nlinks(name):
232 def nlinks(name):
233 '''return number of hardlinks for the given file'''
233 '''return number of hardlinks for the given file'''
234 return _getfileinfo(name).nNumberOfLinks
234 return _getfileinfo(name).nNumberOfLinks
235
235
236 def samefile(fpath1, fpath2):
236 def samefile(fpath1, fpath2):
237 '''Returns whether fpath1 and fpath2 refer to the same file. This is only
237 '''Returns whether fpath1 and fpath2 refer to the same file. This is only
238 guaranteed to work for files, not directories.'''
238 guaranteed to work for files, not directories.'''
239 res1 = _getfileinfo(fpath1)
239 res1 = _getfileinfo(fpath1)
240 res2 = _getfileinfo(fpath2)
240 res2 = _getfileinfo(fpath2)
241 return (res1.dwVolumeSerialNumber == res2.dwVolumeSerialNumber
241 return (res1.dwVolumeSerialNumber == res2.dwVolumeSerialNumber
242 and res1.nFileIndexHigh == res2.nFileIndexHigh
242 and res1.nFileIndexHigh == res2.nFileIndexHigh
243 and res1.nFileIndexLow == res2.nFileIndexLow)
243 and res1.nFileIndexLow == res2.nFileIndexLow)
244
244
245 def samedevice(fpath1, fpath2):
245 def samedevice(fpath1, fpath2):
246 '''Returns whether fpath1 and fpath2 are on the same device. This is only
246 '''Returns whether fpath1 and fpath2 are on the same device. This is only
247 guaranteed to work for files, not directories.'''
247 guaranteed to work for files, not directories.'''
248 res1 = _getfileinfo(fpath1)
248 res1 = _getfileinfo(fpath1)
249 res2 = _getfileinfo(fpath2)
249 res2 = _getfileinfo(fpath2)
250 return res1.dwVolumeSerialNumber == res2.dwVolumeSerialNumber
250 return res1.dwVolumeSerialNumber == res2.dwVolumeSerialNumber
251
251
252 def testpid(pid):
252 def testpid(pid):
253 '''return True if pid is still running or unable to
253 '''return True if pid is still running or unable to
254 determine, False otherwise'''
254 determine, False otherwise'''
255 h = _kernel32.OpenProcess(_PROCESS_QUERY_INFORMATION, False, pid)
255 h = _kernel32.OpenProcess(_PROCESS_QUERY_INFORMATION, False, pid)
256 if h:
256 if h:
257 try:
257 try:
258 status = _DWORD()
258 status = _DWORD()
259 if _kernel32.GetExitCodeProcess(h, ctypes.byref(status)):
259 if _kernel32.GetExitCodeProcess(h, ctypes.byref(status)):
260 return status.value == _STILL_ACTIVE
260 return status.value == _STILL_ACTIVE
261 finally:
261 finally:
262 _kernel32.CloseHandle(h)
262 _kernel32.CloseHandle(h)
263 return _kernel32.GetLastError() != _ERROR_INVALID_PARAMETER
263 return _kernel32.GetLastError() != _ERROR_INVALID_PARAMETER
264
264
265 def lookupreg(key, valname=None, scope=None):
265 def lookupreg(key, valname=None, scope=None):
266 ''' Look up a key/value name in the Windows registry.
266 ''' Look up a key/value name in the Windows registry.
267
267
268 valname: value name. If unspecified, the default value for the key
268 valname: value name. If unspecified, the default value for the key
269 is used.
269 is used.
270 scope: optionally specify scope for registry lookup, this can be
270 scope: optionally specify scope for registry lookup, this can be
271 a sequence of scopes to look up in order. Default (CURRENT_USER,
271 a sequence of scopes to look up in order. Default (CURRENT_USER,
272 LOCAL_MACHINE).
272 LOCAL_MACHINE).
273 '''
273 '''
274 byref = ctypes.byref
274 byref = ctypes.byref
275 if scope is None:
275 if scope is None:
276 scope = (_HKEY_CURRENT_USER, _HKEY_LOCAL_MACHINE)
276 scope = (_HKEY_CURRENT_USER, _HKEY_LOCAL_MACHINE)
277 elif not isinstance(scope, (list, tuple)):
277 elif not isinstance(scope, (list, tuple)):
278 scope = (scope,)
278 scope = (scope,)
279 for s in scope:
279 for s in scope:
280 kh = _HANDLE()
280 kh = _HANDLE()
281 res = _advapi32.RegOpenKeyExA(s, key, 0, _KEY_READ, ctypes.byref(kh))
281 res = _advapi32.RegOpenKeyExA(s, key, 0, _KEY_READ, ctypes.byref(kh))
282 if res != _ERROR_SUCCESS:
282 if res != _ERROR_SUCCESS:
283 continue
283 continue
284 try:
284 try:
285 size = _DWORD(600)
285 size = _DWORD(600)
286 type = _DWORD()
286 type = _DWORD()
287 buf = ctypes.create_string_buffer(size.value + 1)
287 buf = ctypes.create_string_buffer(size.value + 1)
288 res = _advapi32.RegQueryValueExA(kh.value, valname, None,
288 res = _advapi32.RegQueryValueExA(kh.value, valname, None,
289 byref(type), buf, byref(size))
289 byref(type), buf, byref(size))
290 if res != _ERROR_SUCCESS:
290 if res != _ERROR_SUCCESS:
291 continue
291 continue
292 if type.value == _REG_SZ:
292 if type.value == _REG_SZ:
293 # never let a Unicode string escape into the wild
293 # never let a Unicode string escape into the wild
294 return encoding.tolocal(buf.value.encode('UTF-8'))
294 return encoding.tolocal(buf.value.encode('UTF-8'))
295 elif type.value == _REG_DWORD:
295 elif type.value == _REG_DWORD:
296 fmt = '<L'
296 fmt = '<L'
297 s = ctypes.string_at(byref(buf), struct.calcsize(fmt))
297 s = ctypes.string_at(byref(buf), struct.calcsize(fmt))
298 return struct.unpack(fmt, s)[0]
298 return struct.unpack(fmt, s)[0]
299 finally:
299 finally:
300 _advapi32.RegCloseKey(kh.value)
300 _advapi32.RegCloseKey(kh.value)
301
301
302 def executablepath():
302 def executablepath():
303 '''return full path of hg.exe'''
303 '''return full path of hg.exe'''
304 size = 600
304 size = 600
305 buf = ctypes.create_string_buffer(size + 1)
305 buf = ctypes.create_string_buffer(size + 1)
306 len = _kernel32.GetModuleFileNameA(None, ctypes.byref(buf), size)
306 len = _kernel32.GetModuleFileNameA(None, ctypes.byref(buf), size)
307 if len == 0:
307 if len == 0:
308 raise ctypes.WinError()
308 raise ctypes.WinError
309 elif len == size:
309 elif len == size:
310 raise ctypes.WinError(_ERROR_INSUFFICIENT_BUFFER)
310 raise ctypes.WinError(_ERROR_INSUFFICIENT_BUFFER)
311 return buf.value
311 return buf.value
312
312
313 def getuser():
313 def getuser():
314 '''return name of current user'''
314 '''return name of current user'''
315 size = _DWORD(300)
315 size = _DWORD(300)
316 buf = ctypes.create_string_buffer(size.value + 1)
316 buf = ctypes.create_string_buffer(size.value + 1)
317 if not _advapi32.GetUserNameA(ctypes.byref(buf), ctypes.byref(size)):
317 if not _advapi32.GetUserNameA(ctypes.byref(buf), ctypes.byref(size)):
318 raise ctypes.WinError()
318 raise ctypes.WinError
319 return buf.value
319 return buf.value
320
320
321 _signalhandler = []
321 _signalhandler = []
322
322
323 def setsignalhandler():
323 def setsignalhandler():
324 '''Register a termination handler for console events including
324 '''Register a termination handler for console events including
325 CTRL+C. python signal handlers do not work well with socket
325 CTRL+C. python signal handlers do not work well with socket
326 operations.
326 operations.
327 '''
327 '''
328 def handler(event):
328 def handler(event):
329 _kernel32.ExitProcess(1)
329 _kernel32.ExitProcess(1)
330
330
331 if _signalhandler:
331 if _signalhandler:
332 return # already registered
332 return # already registered
333 h = _SIGNAL_HANDLER(handler)
333 h = _SIGNAL_HANDLER(handler)
334 _signalhandler.append(h) # needed to prevent garbage collection
334 _signalhandler.append(h) # needed to prevent garbage collection
335 if not _kernel32.SetConsoleCtrlHandler(h, True):
335 if not _kernel32.SetConsoleCtrlHandler(h, True):
336 raise ctypes.WinError()
336 raise ctypes.WinError
337
337
338 def hidewindow():
338 def hidewindow():
339
339
340 def callback(hwnd, pid):
340 def callback(hwnd, pid):
341 wpid = _DWORD()
341 wpid = _DWORD()
342 _user32.GetWindowThreadProcessId(hwnd, ctypes.byref(wpid))
342 _user32.GetWindowThreadProcessId(hwnd, ctypes.byref(wpid))
343 if pid == wpid.value:
343 if pid == wpid.value:
344 _user32.ShowWindow(hwnd, _SW_HIDE)
344 _user32.ShowWindow(hwnd, _SW_HIDE)
345 return False # stop enumerating windows
345 return False # stop enumerating windows
346 return True
346 return True
347
347
348 pid = _kernel32.GetCurrentProcessId()
348 pid = _kernel32.GetCurrentProcessId()
349 _user32.EnumWindows(_WNDENUMPROC(callback), pid)
349 _user32.EnumWindows(_WNDENUMPROC(callback), pid)
350
350
351 def termwidth():
351 def termwidth():
352 # cmd.exe does not handle CR like a unix console, the CR is
352 # cmd.exe does not handle CR like a unix console, the CR is
353 # counted in the line length. On 80 columns consoles, if 80
353 # counted in the line length. On 80 columns consoles, if 80
354 # characters are written, the following CR won't apply on the
354 # characters are written, the following CR won't apply on the
355 # current line but on the new one. Keep room for it.
355 # current line but on the new one. Keep room for it.
356 width = 79
356 width = 79
357 # Query stderr to avoid problems with redirections
357 # Query stderr to avoid problems with redirections
358 screenbuf = _kernel32.GetStdHandle(
358 screenbuf = _kernel32.GetStdHandle(
359 _STD_ERROR_HANDLE) # don't close the handle returned
359 _STD_ERROR_HANDLE) # don't close the handle returned
360 if screenbuf is None or screenbuf == _INVALID_HANDLE_VALUE:
360 if screenbuf is None or screenbuf == _INVALID_HANDLE_VALUE:
361 return width
361 return width
362 csbi = _CONSOLE_SCREEN_BUFFER_INFO()
362 csbi = _CONSOLE_SCREEN_BUFFER_INFO()
363 if not _kernel32.GetConsoleScreenBufferInfo(
363 if not _kernel32.GetConsoleScreenBufferInfo(
364 screenbuf, ctypes.byref(csbi)):
364 screenbuf, ctypes.byref(csbi)):
365 return width
365 return width
366 width = csbi.srWindow.Right - csbi.srWindow.Left
366 width = csbi.srWindow.Right - csbi.srWindow.Left
367 return width
367 return width
368
368
369 def spawndetached(args):
369 def spawndetached(args):
370 # No standard library function really spawns a fully detached
370 # No standard library function really spawns a fully detached
371 # process under win32 because they allocate pipes or other objects
371 # process under win32 because they allocate pipes or other objects
372 # to handle standard streams communications. Passing these objects
372 # to handle standard streams communications. Passing these objects
373 # to the child process requires handle inheritance to be enabled
373 # to the child process requires handle inheritance to be enabled
374 # which makes really detached processes impossible.
374 # which makes really detached processes impossible.
375 si = _STARTUPINFO()
375 si = _STARTUPINFO()
376 si.cb = ctypes.sizeof(_STARTUPINFO)
376 si.cb = ctypes.sizeof(_STARTUPINFO)
377 si.dwFlags = _STARTF_USESHOWWINDOW
377 si.dwFlags = _STARTF_USESHOWWINDOW
378 si.wShowWindow = _SW_HIDE
378 si.wShowWindow = _SW_HIDE
379
379
380 pi = _PROCESS_INFORMATION()
380 pi = _PROCESS_INFORMATION()
381
381
382 env = ''
382 env = ''
383 for k in os.environ:
383 for k in os.environ:
384 env += "%s=%s\0" % (k, os.environ[k])
384 env += "%s=%s\0" % (k, os.environ[k])
385 if not env:
385 if not env:
386 env = '\0'
386 env = '\0'
387 env += '\0'
387 env += '\0'
388
388
389 args = subprocess.list2cmdline(args)
389 args = subprocess.list2cmdline(args)
390 # Not running the command in shell mode makes python26 hang when
390 # Not running the command in shell mode makes python26 hang when
391 # writing to hgweb output socket.
391 # writing to hgweb output socket.
392 comspec = os.environ.get("COMSPEC", "cmd.exe")
392 comspec = os.environ.get("COMSPEC", "cmd.exe")
393 args = comspec + " /c " + args
393 args = comspec + " /c " + args
394
394
395 res = _kernel32.CreateProcessA(
395 res = _kernel32.CreateProcessA(
396 None, args, None, None, False, _DETACHED_PROCESS,
396 None, args, None, None, False, _DETACHED_PROCESS,
397 env, os.getcwd(), ctypes.byref(si), ctypes.byref(pi))
397 env, os.getcwd(), ctypes.byref(si), ctypes.byref(pi))
398 if not res:
398 if not res:
399 raise ctypes.WinError()
399 raise ctypes.WinError
400
400
401 return pi.dwProcessId
401 return pi.dwProcessId
402
402
403 def unlink(f):
403 def unlink(f):
404 '''try to implement POSIX' unlink semantics on Windows'''
404 '''try to implement POSIX' unlink semantics on Windows'''
405
405
406 # POSIX allows to unlink and rename open files. Windows has serious
406 # POSIX allows to unlink and rename open files. Windows has serious
407 # problems with doing that:
407 # problems with doing that:
408 # - Calling os.unlink (or os.rename) on a file f fails if f or any
408 # - Calling os.unlink (or os.rename) on a file f fails if f or any
409 # hardlinked copy of f has been opened with Python's open(). There is no
409 # hardlinked copy of f has been opened with Python's open(). There is no
410 # way such a file can be deleted or renamed on Windows (other than
410 # way such a file can be deleted or renamed on Windows (other than
411 # scheduling the delete or rename for the next reboot).
411 # scheduling the delete or rename for the next reboot).
412 # - Calling os.unlink on a file that has been opened with Mercurial's
412 # - Calling os.unlink on a file that has been opened with Mercurial's
413 # posixfile (or comparable methods) will delay the actual deletion of
413 # posixfile (or comparable methods) will delay the actual deletion of
414 # the file for as long as the file is held open. The filename is blocked
414 # the file for as long as the file is held open. The filename is blocked
415 # during that time and cannot be used for recreating a new file under
415 # during that time and cannot be used for recreating a new file under
416 # that same name ("zombie file"). Directories containing such zombie files
416 # that same name ("zombie file"). Directories containing such zombie files
417 # cannot be removed or moved.
417 # cannot be removed or moved.
418 # A file that has been opened with posixfile can be renamed, so we rename
418 # A file that has been opened with posixfile can be renamed, so we rename
419 # f to a random temporary name before calling os.unlink on it. This allows
419 # f to a random temporary name before calling os.unlink on it. This allows
420 # callers to recreate f immediately while having other readers do their
420 # callers to recreate f immediately while having other readers do their
421 # implicit zombie filename blocking on a temporary name.
421 # implicit zombie filename blocking on a temporary name.
422
422
423 for tries in xrange(10):
423 for tries in xrange(10):
424 temp = '%s-%08x' % (f, random.randint(0, 0xffffffff))
424 temp = '%s-%08x' % (f, random.randint(0, 0xffffffff))
425 try:
425 try:
426 os.rename(f, temp) # raises OSError EEXIST if temp exists
426 os.rename(f, temp) # raises OSError EEXIST if temp exists
427 break
427 break
428 except OSError, e:
428 except OSError, e:
429 if e.errno != errno.EEXIST:
429 if e.errno != errno.EEXIST:
430 raise
430 raise
431 else:
431 else:
432 raise IOError, (errno.EEXIST, "No usable temporary filename found")
432 raise IOError, (errno.EEXIST, "No usable temporary filename found")
433
433
434 try:
434 try:
435 os.unlink(temp)
435 os.unlink(temp)
436 except OSError:
436 except OSError:
437 # The unlink might have failed because the READONLY attribute may heave
437 # The unlink might have failed because the READONLY attribute may heave
438 # been set on the original file. Rename works fine with READONLY set,
438 # been set on the original file. Rename works fine with READONLY set,
439 # but not os.unlink. Reset all attributes and try again.
439 # but not os.unlink. Reset all attributes and try again.
440 _kernel32.SetFileAttributesA(temp, _FILE_ATTRIBUTE_NORMAL)
440 _kernel32.SetFileAttributesA(temp, _FILE_ATTRIBUTE_NORMAL)
441 try:
441 try:
442 os.unlink(temp)
442 os.unlink(temp)
443 except OSError:
443 except OSError:
444 # The unlink might have failed due to some very rude AV-Scanners.
444 # The unlink might have failed due to some very rude AV-Scanners.
445 # Leaking a tempfile is the lesser evil than aborting here and
445 # Leaking a tempfile is the lesser evil than aborting here and
446 # leaving some potentially serious inconsistencies.
446 # leaving some potentially serious inconsistencies.
447 pass
447 pass
448
448
449 def makedir(path, notindexed):
449 def makedir(path, notindexed):
450 os.mkdir(path)
450 os.mkdir(path)
451 if notindexed:
451 if notindexed:
452 _kernel32.SetFileAttributesA(path, _FILE_ATTRIBUTE_NOT_CONTENT_INDEXED)
452 _kernel32.SetFileAttributesA(path, _FILE_ATTRIBUTE_NOT_CONTENT_INDEXED)
@@ -1,319 +1,319 b''
1 # windows.py - Windows utility function implementations for Mercurial
1 # windows.py - Windows utility function implementations for Mercurial
2 #
2 #
3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 import osutil
9 import osutil
10 import errno, msvcrt, os, re, sys
10 import errno, msvcrt, os, re, sys
11
11
12 import win32
12 import win32
13 executablepath = win32.executablepath
13 executablepath = win32.executablepath
14 getuser = win32.getuser
14 getuser = win32.getuser
15 hidewindow = win32.hidewindow
15 hidewindow = win32.hidewindow
16 lookupreg = win32.lookupreg
16 lookupreg = win32.lookupreg
17 makedir = win32.makedir
17 makedir = win32.makedir
18 nlinks = win32.nlinks
18 nlinks = win32.nlinks
19 oslink = win32.oslink
19 oslink = win32.oslink
20 samedevice = win32.samedevice
20 samedevice = win32.samedevice
21 samefile = win32.samefile
21 samefile = win32.samefile
22 setsignalhandler = win32.setsignalhandler
22 setsignalhandler = win32.setsignalhandler
23 spawndetached = win32.spawndetached
23 spawndetached = win32.spawndetached
24 termwidth = win32.termwidth
24 termwidth = win32.termwidth
25 testpid = win32.testpid
25 testpid = win32.testpid
26 unlink = win32.unlink
26 unlink = win32.unlink
27
27
28 nulldev = 'NUL:'
28 nulldev = 'NUL:'
29 umask = 0022
29 umask = 0022
30
30
31 # wrap osutil.posixfile to provide friendlier exceptions
31 # wrap osutil.posixfile to provide friendlier exceptions
32 def posixfile(name, mode='r', buffering=-1):
32 def posixfile(name, mode='r', buffering=-1):
33 try:
33 try:
34 return osutil.posixfile(name, mode, buffering)
34 return osutil.posixfile(name, mode, buffering)
35 except WindowsError, err:
35 except WindowsError, err:
36 raise IOError(err.errno, '%s: %s' % (name, err.strerror))
36 raise IOError(err.errno, '%s: %s' % (name, err.strerror))
37 posixfile.__doc__ = osutil.posixfile.__doc__
37 posixfile.__doc__ = osutil.posixfile.__doc__
38
38
39 class winstdout(object):
39 class winstdout(object):
40 '''stdout on windows misbehaves if sent through a pipe'''
40 '''stdout on windows misbehaves if sent through a pipe'''
41
41
42 def __init__(self, fp):
42 def __init__(self, fp):
43 self.fp = fp
43 self.fp = fp
44
44
45 def __getattr__(self, key):
45 def __getattr__(self, key):
46 return getattr(self.fp, key)
46 return getattr(self.fp, key)
47
47
48 def close(self):
48 def close(self):
49 try:
49 try:
50 self.fp.close()
50 self.fp.close()
51 except IOError:
51 except IOError:
52 pass
52 pass
53
53
54 def write(self, s):
54 def write(self, s):
55 try:
55 try:
56 # This is workaround for "Not enough space" error on
56 # This is workaround for "Not enough space" error on
57 # writing large size of data to console.
57 # writing large size of data to console.
58 limit = 16000
58 limit = 16000
59 l = len(s)
59 l = len(s)
60 start = 0
60 start = 0
61 self.softspace = 0
61 self.softspace = 0
62 while start < l:
62 while start < l:
63 end = start + limit
63 end = start + limit
64 self.fp.write(s[start:end])
64 self.fp.write(s[start:end])
65 start = end
65 start = end
66 except IOError, inst:
66 except IOError, inst:
67 if inst.errno != 0:
67 if inst.errno != 0:
68 raise
68 raise
69 self.close()
69 self.close()
70 raise IOError(errno.EPIPE, 'Broken pipe')
70 raise IOError(errno.EPIPE, 'Broken pipe')
71
71
72 def flush(self):
72 def flush(self):
73 try:
73 try:
74 return self.fp.flush()
74 return self.fp.flush()
75 except IOError, inst:
75 except IOError, inst:
76 if inst.errno != errno.EINVAL:
76 if inst.errno != errno.EINVAL:
77 raise
77 raise
78 self.close()
78 self.close()
79 raise IOError(errno.EPIPE, 'Broken pipe')
79 raise IOError(errno.EPIPE, 'Broken pipe')
80
80
81 sys.__stdout__ = sys.stdout = winstdout(sys.stdout)
81 sys.__stdout__ = sys.stdout = winstdout(sys.stdout)
82
82
83 def _is_win_9x():
83 def _is_win_9x():
84 '''return true if run on windows 95, 98 or me.'''
84 '''return true if run on windows 95, 98 or me.'''
85 try:
85 try:
86 return sys.getwindowsversion()[3] == 1
86 return sys.getwindowsversion()[3] == 1
87 except AttributeError:
87 except AttributeError:
88 return 'command' in os.environ.get('comspec', '')
88 return 'command' in os.environ.get('comspec', '')
89
89
90 def openhardlinks():
90 def openhardlinks():
91 return not _is_win_9x()
91 return not _is_win_9x()
92
92
93 def parsepatchoutput(output_line):
93 def parsepatchoutput(output_line):
94 """parses the output produced by patch and returns the filename"""
94 """parses the output produced by patch and returns the filename"""
95 pf = output_line[14:]
95 pf = output_line[14:]
96 if pf[0] == '`':
96 if pf[0] == '`':
97 pf = pf[1:-1] # Remove the quotes
97 pf = pf[1:-1] # Remove the quotes
98 return pf
98 return pf
99
99
100 def sshargs(sshcmd, host, user, port):
100 def sshargs(sshcmd, host, user, port):
101 '''Build argument list for ssh or Plink'''
101 '''Build argument list for ssh or Plink'''
102 pflag = 'plink' in sshcmd.lower() and '-P' or '-p'
102 pflag = 'plink' in sshcmd.lower() and '-P' or '-p'
103 args = user and ("%s@%s" % (user, host)) or host
103 args = user and ("%s@%s" % (user, host)) or host
104 return port and ("%s %s %s" % (args, pflag, port)) or args
104 return port and ("%s %s %s" % (args, pflag, port)) or args
105
105
106 def setflags(f, l, x):
106 def setflags(f, l, x):
107 pass
107 pass
108
108
109 def copymode(src, dst, mode=None):
109 def copymode(src, dst, mode=None):
110 pass
110 pass
111
111
112 def checkexec(path):
112 def checkexec(path):
113 return False
113 return False
114
114
115 def checklink(path):
115 def checklink(path):
116 return False
116 return False
117
117
118 def setbinary(fd):
118 def setbinary(fd):
119 # When run without console, pipes may expose invalid
119 # When run without console, pipes may expose invalid
120 # fileno(), usually set to -1.
120 # fileno(), usually set to -1.
121 fno = getattr(fd, 'fileno', None)
121 fno = getattr(fd, 'fileno', None)
122 if fno is not None and fno() >= 0:
122 if fno is not None and fno() >= 0:
123 msvcrt.setmode(fno(), os.O_BINARY)
123 msvcrt.setmode(fno(), os.O_BINARY)
124
124
125 def pconvert(path):
125 def pconvert(path):
126 return path.replace(os.sep, '/')
126 return path.replace(os.sep, '/')
127
127
128 def localpath(path):
128 def localpath(path):
129 return path.replace('/', '\\')
129 return path.replace('/', '\\')
130
130
131 def normpath(path):
131 def normpath(path):
132 return pconvert(os.path.normpath(path))
132 return pconvert(os.path.normpath(path))
133
133
134 encodinglower = None
134 encodinglower = None
135 encodingupper = None
135 encodingupper = None
136
136
137 def normcase(path):
137 def normcase(path):
138 return encodingupper(path)
138 return encodingupper(path)
139
139
140 def realpath(path):
140 def realpath(path):
141 '''
141 '''
142 Returns the true, canonical file system path equivalent to the given
142 Returns the true, canonical file system path equivalent to the given
143 path.
143 path.
144 '''
144 '''
145 # TODO: There may be a more clever way to do this that also handles other,
145 # TODO: There may be a more clever way to do this that also handles other,
146 # less common file systems.
146 # less common file systems.
147 return os.path.normpath(normcase(os.path.realpath(path)))
147 return os.path.normpath(normcase(os.path.realpath(path)))
148
148
149 def samestat(s1, s2):
149 def samestat(s1, s2):
150 return False
150 return False
151
151
152 # A sequence of backslashes is special iff it precedes a double quote:
152 # A sequence of backslashes is special iff it precedes a double quote:
153 # - if there's an even number of backslashes, the double quote is not
153 # - if there's an even number of backslashes, the double quote is not
154 # quoted (i.e. it ends the quoted region)
154 # quoted (i.e. it ends the quoted region)
155 # - if there's an odd number of backslashes, the double quote is quoted
155 # - if there's an odd number of backslashes, the double quote is quoted
156 # - in both cases, every pair of backslashes is unquoted into a single
156 # - in both cases, every pair of backslashes is unquoted into a single
157 # backslash
157 # backslash
158 # (See http://msdn2.microsoft.com/en-us/library/a1y7w461.aspx )
158 # (See http://msdn2.microsoft.com/en-us/library/a1y7w461.aspx )
159 # So, to quote a string, we must surround it in double quotes, double
159 # So, to quote a string, we must surround it in double quotes, double
160 # the number of backslashes that preceed double quotes and add another
160 # the number of backslashes that preceed double quotes and add another
161 # backslash before every double quote (being careful with the double
161 # backslash before every double quote (being careful with the double
162 # quote we've appended to the end)
162 # quote we've appended to the end)
163 _quotere = None
163 _quotere = None
164 def shellquote(s):
164 def shellquote(s):
165 global _quotere
165 global _quotere
166 if _quotere is None:
166 if _quotere is None:
167 _quotere = re.compile(r'(\\*)("|\\$)')
167 _quotere = re.compile(r'(\\*)("|\\$)')
168 return '"%s"' % _quotere.sub(r'\1\1\\\2', s)
168 return '"%s"' % _quotere.sub(r'\1\1\\\2', s)
169
169
170 def quotecommand(cmd):
170 def quotecommand(cmd):
171 """Build a command string suitable for os.popen* calls."""
171 """Build a command string suitable for os.popen* calls."""
172 if sys.version_info < (2, 7, 1):
172 if sys.version_info < (2, 7, 1):
173 # Python versions since 2.7.1 do this extra quoting themselves
173 # Python versions since 2.7.1 do this extra quoting themselves
174 return '"' + cmd + '"'
174 return '"' + cmd + '"'
175 return cmd
175 return cmd
176
176
177 def popen(command, mode='r'):
177 def popen(command, mode='r'):
178 # Work around "popen spawned process may not write to stdout
178 # Work around "popen spawned process may not write to stdout
179 # under windows"
179 # under windows"
180 # http://bugs.python.org/issue1366
180 # http://bugs.python.org/issue1366
181 command += " 2> %s" % nulldev
181 command += " 2> %s" % nulldev
182 return os.popen(quotecommand(command), mode)
182 return os.popen(quotecommand(command), mode)
183
183
184 def explainexit(code):
184 def explainexit(code):
185 return _("exited with status %d") % code, code
185 return _("exited with status %d") % code, code
186
186
187 # if you change this stub into a real check, please try to implement the
187 # if you change this stub into a real check, please try to implement the
188 # username and groupname functions above, too.
188 # username and groupname functions above, too.
189 def isowner(st):
189 def isowner(st):
190 return True
190 return True
191
191
192 def findexe(command):
192 def findexe(command):
193 '''Find executable for command searching like cmd.exe does.
193 '''Find executable for command searching like cmd.exe does.
194 If command is a basename then PATH is searched for command.
194 If command is a basename then PATH is searched for command.
195 PATH isn't searched if command is an absolute or relative path.
195 PATH isn't searched if command is an absolute or relative path.
196 An extension from PATHEXT is found and added if not present.
196 An extension from PATHEXT is found and added if not present.
197 If command isn't found None is returned.'''
197 If command isn't found None is returned.'''
198 pathext = os.environ.get('PATHEXT', '.COM;.EXE;.BAT;.CMD')
198 pathext = os.environ.get('PATHEXT', '.COM;.EXE;.BAT;.CMD')
199 pathexts = [ext for ext in pathext.lower().split(os.pathsep)]
199 pathexts = [ext for ext in pathext.lower().split(os.pathsep)]
200 if os.path.splitext(command)[1].lower() in pathexts:
200 if os.path.splitext(command)[1].lower() in pathexts:
201 pathexts = ['']
201 pathexts = ['']
202
202
203 def findexisting(pathcommand):
203 def findexisting(pathcommand):
204 'Will append extension (if needed) and return existing file'
204 'Will append extension (if needed) and return existing file'
205 for ext in pathexts:
205 for ext in pathexts:
206 executable = pathcommand + ext
206 executable = pathcommand + ext
207 if os.path.exists(executable):
207 if os.path.exists(executable):
208 return executable
208 return executable
209 return None
209 return None
210
210
211 if os.sep in command:
211 if os.sep in command:
212 return findexisting(command)
212 return findexisting(command)
213
213
214 for path in os.environ.get('PATH', '').split(os.pathsep):
214 for path in os.environ.get('PATH', '').split(os.pathsep):
215 executable = findexisting(os.path.join(path, command))
215 executable = findexisting(os.path.join(path, command))
216 if executable is not None:
216 if executable is not None:
217 return executable
217 return executable
218 return findexisting(os.path.expanduser(os.path.expandvars(command)))
218 return findexisting(os.path.expanduser(os.path.expandvars(command)))
219
219
220 def statfiles(files):
220 def statfiles(files):
221 '''Stat each file in files and yield stat or None if file does not exist.
221 '''Stat each file in files and yield stat or None if file does not exist.
222 Cluster and cache stat per directory to minimize number of OS stat calls.'''
222 Cluster and cache stat per directory to minimize number of OS stat calls.'''
223 dircache = {} # dirname -> filename -> status | None if file does not exist
223 dircache = {} # dirname -> filename -> status | None if file does not exist
224 for nf in files:
224 for nf in files:
225 nf = normcase(nf)
225 nf = normcase(nf)
226 dir, base = os.path.split(nf)
226 dir, base = os.path.split(nf)
227 if not dir:
227 if not dir:
228 dir = '.'
228 dir = '.'
229 cache = dircache.get(dir, None)
229 cache = dircache.get(dir, None)
230 if cache is None:
230 if cache is None:
231 try:
231 try:
232 dmap = dict([(normcase(n), s)
232 dmap = dict([(normcase(n), s)
233 for n, k, s in osutil.listdir(dir, True)])
233 for n, k, s in osutil.listdir(dir, True)])
234 except OSError, err:
234 except OSError, err:
235 # handle directory not found in Python version prior to 2.5
235 # handle directory not found in Python version prior to 2.5
236 # Python <= 2.4 returns native Windows code 3 in errno
236 # Python <= 2.4 returns native Windows code 3 in errno
237 # Python >= 2.5 returns ENOENT and adds winerror field
237 # Python >= 2.5 returns ENOENT and adds winerror field
238 # EINVAL is raised if dir is not a directory.
238 # EINVAL is raised if dir is not a directory.
239 if err.errno not in (3, errno.ENOENT, errno.EINVAL,
239 if err.errno not in (3, errno.ENOENT, errno.EINVAL,
240 errno.ENOTDIR):
240 errno.ENOTDIR):
241 raise
241 raise
242 dmap = {}
242 dmap = {}
243 cache = dircache.setdefault(dir, dmap)
243 cache = dircache.setdefault(dir, dmap)
244 yield cache.get(base, None)
244 yield cache.get(base, None)
245
245
246 def username(uid=None):
246 def username(uid=None):
247 """Return the name of the user with the given uid.
247 """Return the name of the user with the given uid.
248
248
249 If uid is None, return the name of the current user."""
249 If uid is None, return the name of the current user."""
250 return None
250 return None
251
251
252 def groupname(gid=None):
252 def groupname(gid=None):
253 """Return the name of the group with the given gid.
253 """Return the name of the group with the given gid.
254
254
255 If gid is None, return the name of the current group."""
255 If gid is None, return the name of the current group."""
256 return None
256 return None
257
257
258 def _removedirs(name):
258 def _removedirs(name):
259 """special version of os.removedirs that does not remove symlinked
259 """special version of os.removedirs that does not remove symlinked
260 directories or junction points if they actually contain files"""
260 directories or junction points if they actually contain files"""
261 if osutil.listdir(name):
261 if osutil.listdir(name):
262 return
262 return
263 os.rmdir(name)
263 os.rmdir(name)
264 head, tail = os.path.split(name)
264 head, tail = os.path.split(name)
265 if not tail:
265 if not tail:
266 head, tail = os.path.split(head)
266 head, tail = os.path.split(head)
267 while head and tail:
267 while head and tail:
268 try:
268 try:
269 if osutil.listdir(head):
269 if osutil.listdir(head):
270 return
270 return
271 os.rmdir(head)
271 os.rmdir(head)
272 except (ValueError, OSError):
272 except (ValueError, OSError):
273 break
273 break
274 head, tail = os.path.split(head)
274 head, tail = os.path.split(head)
275
275
276 def unlinkpath(f):
276 def unlinkpath(f):
277 """unlink and remove the directory if it is empty"""
277 """unlink and remove the directory if it is empty"""
278 unlink(f)
278 unlink(f)
279 # try removing directories that might now be empty
279 # try removing directories that might now be empty
280 try:
280 try:
281 _removedirs(os.path.dirname(f))
281 _removedirs(os.path.dirname(f))
282 except OSError:
282 except OSError:
283 pass
283 pass
284
284
285 def rename(src, dst):
285 def rename(src, dst):
286 '''atomically rename file src to dst, replacing dst if it exists'''
286 '''atomically rename file src to dst, replacing dst if it exists'''
287 try:
287 try:
288 os.rename(src, dst)
288 os.rename(src, dst)
289 except OSError, e:
289 except OSError, e:
290 if e.errno != errno.EEXIST:
290 if e.errno != errno.EEXIST:
291 raise
291 raise
292 unlink(dst)
292 unlink(dst)
293 os.rename(src, dst)
293 os.rename(src, dst)
294
294
295 def gethgcmd():
295 def gethgcmd():
296 return [sys.executable] + sys.argv[:1]
296 return [sys.executable] + sys.argv[:1]
297
297
298 def termwidth():
298 def termwidth():
299 # cmd.exe does not handle CR like a unix console, the CR is
299 # cmd.exe does not handle CR like a unix console, the CR is
300 # counted in the line length. On 80 columns consoles, if 80
300 # counted in the line length. On 80 columns consoles, if 80
301 # characters are written, the following CR won't apply on the
301 # characters are written, the following CR won't apply on the
302 # current line but on the new one. Keep room for it.
302 # current line but on the new one. Keep room for it.
303 return 79
303 return 79
304
304
305 def groupmembers(name):
305 def groupmembers(name):
306 # Don't support groups on Windows for now
306 # Don't support groups on Windows for now
307 raise KeyError()
307 raise KeyError
308
308
309 def isexec(f):
309 def isexec(f):
310 return False
310 return False
311
311
312 class cachestat(object):
312 class cachestat(object):
313 def __init__(self, path):
313 def __init__(self, path):
314 pass
314 pass
315
315
316 def cacheable(self):
316 def cacheable(self):
317 return False
317 return False
318
318
319 expandglobs = True
319 expandglobs = True
@@ -1,260 +1,260 b''
1 import sys, os, struct, subprocess, cStringIO, re, shutil
1 import sys, os, struct, subprocess, cStringIO, re, shutil
2
2
3 def connect(path=None):
3 def connect(path=None):
4 cmdline = ['hg', 'serve', '--cmdserver', 'pipe']
4 cmdline = ['hg', 'serve', '--cmdserver', 'pipe']
5 if path:
5 if path:
6 cmdline += ['-R', path]
6 cmdline += ['-R', path]
7
7
8 server = subprocess.Popen(cmdline, stdin=subprocess.PIPE,
8 server = subprocess.Popen(cmdline, stdin=subprocess.PIPE,
9 stdout=subprocess.PIPE)
9 stdout=subprocess.PIPE)
10
10
11 return server
11 return server
12
12
13 def writeblock(server, data):
13 def writeblock(server, data):
14 server.stdin.write(struct.pack('>I', len(data)))
14 server.stdin.write(struct.pack('>I', len(data)))
15 server.stdin.write(data)
15 server.stdin.write(data)
16 server.stdin.flush()
16 server.stdin.flush()
17
17
18 def readchannel(server):
18 def readchannel(server):
19 data = server.stdout.read(5)
19 data = server.stdout.read(5)
20 if not data:
20 if not data:
21 raise EOFError()
21 raise EOFError
22 channel, length = struct.unpack('>cI', data)
22 channel, length = struct.unpack('>cI', data)
23 if channel in 'IL':
23 if channel in 'IL':
24 return channel, length
24 return channel, length
25 else:
25 else:
26 return channel, server.stdout.read(length)
26 return channel, server.stdout.read(length)
27
27
28 def runcommand(server, args, output=sys.stdout, error=sys.stderr, input=None):
28 def runcommand(server, args, output=sys.stdout, error=sys.stderr, input=None):
29 print ' runcommand', ' '.join(args)
29 print ' runcommand', ' '.join(args)
30 sys.stdout.flush()
30 sys.stdout.flush()
31 server.stdin.write('runcommand\n')
31 server.stdin.write('runcommand\n')
32 writeblock(server, '\0'.join(args))
32 writeblock(server, '\0'.join(args))
33
33
34 if not input:
34 if not input:
35 input = cStringIO.StringIO()
35 input = cStringIO.StringIO()
36
36
37 while True:
37 while True:
38 ch, data = readchannel(server)
38 ch, data = readchannel(server)
39 if ch == 'o':
39 if ch == 'o':
40 output.write(data)
40 output.write(data)
41 output.flush()
41 output.flush()
42 elif ch == 'e':
42 elif ch == 'e':
43 error.write(data)
43 error.write(data)
44 error.flush()
44 error.flush()
45 elif ch == 'I':
45 elif ch == 'I':
46 writeblock(server, input.read(data))
46 writeblock(server, input.read(data))
47 elif ch == 'L':
47 elif ch == 'L':
48 writeblock(server, input.readline(data))
48 writeblock(server, input.readline(data))
49 elif ch == 'r':
49 elif ch == 'r':
50 return struct.unpack('>i', data)[0]
50 return struct.unpack('>i', data)[0]
51 else:
51 else:
52 print "unexpected channel %c: %r" % (ch, data)
52 print "unexpected channel %c: %r" % (ch, data)
53 if ch.isupper():
53 if ch.isupper():
54 return
54 return
55
55
56 def check(func, repopath=None):
56 def check(func, repopath=None):
57 print
57 print
58 print 'testing %s:' % func.__name__
58 print 'testing %s:' % func.__name__
59 print
59 print
60 sys.stdout.flush()
60 sys.stdout.flush()
61 server = connect(repopath)
61 server = connect(repopath)
62 try:
62 try:
63 return func(server)
63 return func(server)
64 finally:
64 finally:
65 server.stdin.close()
65 server.stdin.close()
66 server.wait()
66 server.wait()
67
67
68 def unknowncommand(server):
68 def unknowncommand(server):
69 server.stdin.write('unknowncommand\n')
69 server.stdin.write('unknowncommand\n')
70
70
71 def hellomessage(server):
71 def hellomessage(server):
72 ch, data = readchannel(server)
72 ch, data = readchannel(server)
73 # escaping python tests output not supported
73 # escaping python tests output not supported
74 print '%c, %r' % (ch, re.sub('encoding: [a-zA-Z0-9-]+', 'encoding: ***',
74 print '%c, %r' % (ch, re.sub('encoding: [a-zA-Z0-9-]+', 'encoding: ***',
75 data))
75 data))
76
76
77 # run an arbitrary command to make sure the next thing the server sends
77 # run an arbitrary command to make sure the next thing the server sends
78 # isn't part of the hello message
78 # isn't part of the hello message
79 runcommand(server, ['id'])
79 runcommand(server, ['id'])
80
80
81 def checkruncommand(server):
81 def checkruncommand(server):
82 # hello block
82 # hello block
83 readchannel(server)
83 readchannel(server)
84
84
85 # no args
85 # no args
86 runcommand(server, [])
86 runcommand(server, [])
87
87
88 # global options
88 # global options
89 runcommand(server, ['id', '--quiet'])
89 runcommand(server, ['id', '--quiet'])
90
90
91 # make sure global options don't stick through requests
91 # make sure global options don't stick through requests
92 runcommand(server, ['id'])
92 runcommand(server, ['id'])
93
93
94 # --config
94 # --config
95 runcommand(server, ['id', '--config', 'ui.quiet=True'])
95 runcommand(server, ['id', '--config', 'ui.quiet=True'])
96
96
97 # make sure --config doesn't stick
97 # make sure --config doesn't stick
98 runcommand(server, ['id'])
98 runcommand(server, ['id'])
99
99
100 def inputeof(server):
100 def inputeof(server):
101 readchannel(server)
101 readchannel(server)
102 server.stdin.write('runcommand\n')
102 server.stdin.write('runcommand\n')
103 # close stdin while server is waiting for input
103 # close stdin while server is waiting for input
104 server.stdin.close()
104 server.stdin.close()
105
105
106 # server exits with 1 if the pipe closed while reading the command
106 # server exits with 1 if the pipe closed while reading the command
107 print 'server exit code =', server.wait()
107 print 'server exit code =', server.wait()
108
108
109 def serverinput(server):
109 def serverinput(server):
110 readchannel(server)
110 readchannel(server)
111
111
112 patch = """
112 patch = """
113 # HG changeset patch
113 # HG changeset patch
114 # User test
114 # User test
115 # Date 0 0
115 # Date 0 0
116 # Node ID c103a3dec114d882c98382d684d8af798d09d857
116 # Node ID c103a3dec114d882c98382d684d8af798d09d857
117 # Parent 0000000000000000000000000000000000000000
117 # Parent 0000000000000000000000000000000000000000
118 1
118 1
119
119
120 diff -r 000000000000 -r c103a3dec114 a
120 diff -r 000000000000 -r c103a3dec114 a
121 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
121 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
122 +++ b/a Thu Jan 01 00:00:00 1970 +0000
122 +++ b/a Thu Jan 01 00:00:00 1970 +0000
123 @@ -0,0 +1,1 @@
123 @@ -0,0 +1,1 @@
124 +1
124 +1
125 """
125 """
126
126
127 runcommand(server, ['import', '-'], input=cStringIO.StringIO(patch))
127 runcommand(server, ['import', '-'], input=cStringIO.StringIO(patch))
128 runcommand(server, ['log'])
128 runcommand(server, ['log'])
129
129
130 def cwd(server):
130 def cwd(server):
131 """ check that --cwd doesn't persist between requests """
131 """ check that --cwd doesn't persist between requests """
132 readchannel(server)
132 readchannel(server)
133 os.mkdir('foo')
133 os.mkdir('foo')
134 f = open('foo/bar', 'wb')
134 f = open('foo/bar', 'wb')
135 f.write('a')
135 f.write('a')
136 f.close()
136 f.close()
137 runcommand(server, ['--cwd', 'foo', 'st', 'bar'])
137 runcommand(server, ['--cwd', 'foo', 'st', 'bar'])
138 runcommand(server, ['st', 'foo/bar'])
138 runcommand(server, ['st', 'foo/bar'])
139 os.remove('foo/bar')
139 os.remove('foo/bar')
140
140
141 def localhgrc(server):
141 def localhgrc(server):
142 """ check that local configs for the cached repo aren't inherited when -R
142 """ check that local configs for the cached repo aren't inherited when -R
143 is used """
143 is used """
144 readchannel(server)
144 readchannel(server)
145
145
146 # the cached repo local hgrc contains ui.foo=bar, so showconfig should
146 # the cached repo local hgrc contains ui.foo=bar, so showconfig should
147 # show it
147 # show it
148 runcommand(server, ['showconfig'])
148 runcommand(server, ['showconfig'])
149
149
150 # but not for this repo
150 # but not for this repo
151 runcommand(server, ['init', 'foo'])
151 runcommand(server, ['init', 'foo'])
152 runcommand(server, ['-R', 'foo', 'showconfig', 'ui', 'defaults'])
152 runcommand(server, ['-R', 'foo', 'showconfig', 'ui', 'defaults'])
153 shutil.rmtree('foo')
153 shutil.rmtree('foo')
154
154
155 def hook(**args):
155 def hook(**args):
156 print 'hook talking'
156 print 'hook talking'
157 print 'now try to read something: %r' % sys.stdin.read()
157 print 'now try to read something: %r' % sys.stdin.read()
158
158
159 def hookoutput(server):
159 def hookoutput(server):
160 readchannel(server)
160 readchannel(server)
161 runcommand(server, ['--config',
161 runcommand(server, ['--config',
162 'hooks.pre-identify=python:test-commandserver.hook',
162 'hooks.pre-identify=python:test-commandserver.hook',
163 'id'],
163 'id'],
164 input=cStringIO.StringIO('some input'))
164 input=cStringIO.StringIO('some input'))
165
165
166 def outsidechanges(server):
166 def outsidechanges(server):
167 readchannel(server)
167 readchannel(server)
168 f = open('a', 'ab')
168 f = open('a', 'ab')
169 f.write('a\n')
169 f.write('a\n')
170 f.close()
170 f.close()
171 runcommand(server, ['status'])
171 runcommand(server, ['status'])
172 os.system('hg ci -Am2')
172 os.system('hg ci -Am2')
173 runcommand(server, ['tip'])
173 runcommand(server, ['tip'])
174 runcommand(server, ['status'])
174 runcommand(server, ['status'])
175
175
176 def bookmarks(server):
176 def bookmarks(server):
177 readchannel(server)
177 readchannel(server)
178 runcommand(server, ['bookmarks'])
178 runcommand(server, ['bookmarks'])
179
179
180 # changes .hg/bookmarks
180 # changes .hg/bookmarks
181 os.system('hg bookmark -i bm1')
181 os.system('hg bookmark -i bm1')
182 os.system('hg bookmark -i bm2')
182 os.system('hg bookmark -i bm2')
183 runcommand(server, ['bookmarks'])
183 runcommand(server, ['bookmarks'])
184
184
185 # changes .hg/bookmarks.current
185 # changes .hg/bookmarks.current
186 os.system('hg upd bm1 -q')
186 os.system('hg upd bm1 -q')
187 runcommand(server, ['bookmarks'])
187 runcommand(server, ['bookmarks'])
188
188
189 runcommand(server, ['bookmarks', 'bm3'])
189 runcommand(server, ['bookmarks', 'bm3'])
190 f = open('a', 'ab')
190 f = open('a', 'ab')
191 f.write('a\n')
191 f.write('a\n')
192 f.close()
192 f.close()
193 runcommand(server, ['commit', '-Amm'])
193 runcommand(server, ['commit', '-Amm'])
194 runcommand(server, ['bookmarks'])
194 runcommand(server, ['bookmarks'])
195
195
196 def tagscache(server):
196 def tagscache(server):
197 readchannel(server)
197 readchannel(server)
198 runcommand(server, ['id', '-t', '-r', '0'])
198 runcommand(server, ['id', '-t', '-r', '0'])
199 os.system('hg tag -r 0 foo')
199 os.system('hg tag -r 0 foo')
200 runcommand(server, ['id', '-t', '-r', '0'])
200 runcommand(server, ['id', '-t', '-r', '0'])
201
201
202 def setphase(server):
202 def setphase(server):
203 readchannel(server)
203 readchannel(server)
204 runcommand(server, ['phase', '-r', '.'])
204 runcommand(server, ['phase', '-r', '.'])
205 os.system('hg phase -r . -p')
205 os.system('hg phase -r . -p')
206 runcommand(server, ['phase', '-r', '.'])
206 runcommand(server, ['phase', '-r', '.'])
207
207
208 def rollback(server):
208 def rollback(server):
209 readchannel(server)
209 readchannel(server)
210 runcommand(server, ['phase', '-r', '.', '-p'])
210 runcommand(server, ['phase', '-r', '.', '-p'])
211 f = open('a', 'ab')
211 f = open('a', 'ab')
212 f.write('a\n')
212 f.write('a\n')
213 f.close()
213 f.close()
214 runcommand(server, ['commit', '-Am.'])
214 runcommand(server, ['commit', '-Am.'])
215 runcommand(server, ['rollback'])
215 runcommand(server, ['rollback'])
216 runcommand(server, ['phase', '-r', '.'])
216 runcommand(server, ['phase', '-r', '.'])
217
217
218 def branch(server):
218 def branch(server):
219 readchannel(server)
219 readchannel(server)
220 runcommand(server, ['branch'])
220 runcommand(server, ['branch'])
221 os.system('hg branch foo')
221 os.system('hg branch foo')
222 runcommand(server, ['branch'])
222 runcommand(server, ['branch'])
223 os.system('hg branch default')
223 os.system('hg branch default')
224
224
225 def hgignore(server):
225 def hgignore(server):
226 readchannel(server)
226 readchannel(server)
227 f = open('.hgignore', 'ab')
227 f = open('.hgignore', 'ab')
228 f.write('')
228 f.write('')
229 f.close()
229 f.close()
230 runcommand(server, ['commit', '-Am.'])
230 runcommand(server, ['commit', '-Am.'])
231 f = open('ignored-file', 'ab')
231 f = open('ignored-file', 'ab')
232 f.write('')
232 f.write('')
233 f.close()
233 f.close()
234 f = open('.hgignore', 'ab')
234 f = open('.hgignore', 'ab')
235 f.write('ignored-file')
235 f.write('ignored-file')
236 f.close()
236 f.close()
237 runcommand(server, ['status', '-i', '-u'])
237 runcommand(server, ['status', '-i', '-u'])
238
238
239 if __name__ == '__main__':
239 if __name__ == '__main__':
240 os.system('hg init')
240 os.system('hg init')
241
241
242 check(hellomessage)
242 check(hellomessage)
243 check(unknowncommand)
243 check(unknowncommand)
244 check(checkruncommand)
244 check(checkruncommand)
245 check(inputeof)
245 check(inputeof)
246 check(serverinput)
246 check(serverinput)
247 check(cwd)
247 check(cwd)
248
248
249 hgrc = open('.hg/hgrc', 'a')
249 hgrc = open('.hg/hgrc', 'a')
250 hgrc.write('[ui]\nfoo=bar\n')
250 hgrc.write('[ui]\nfoo=bar\n')
251 hgrc.close()
251 hgrc.close()
252 check(localhgrc)
252 check(localhgrc)
253 check(hookoutput)
253 check(hookoutput)
254 check(outsidechanges)
254 check(outsidechanges)
255 check(bookmarks)
255 check(bookmarks)
256 check(tagscache)
256 check(tagscache)
257 check(setphase)
257 check(setphase)
258 check(rollback)
258 check(rollback)
259 check(branch)
259 check(branch)
260 check(hgignore)
260 check(hgignore)
General Comments 0
You need to be logged in to leave comments. Login now