##// END OF EJS Templates
typos: "it's" -> "its"
Martin Geisler -
r9087:f48454a2 default
parent child Browse files
Show More
@@ -1,549 +1,549
1 # record.py
1 # record.py
2 #
2 #
3 # Copyright 2007 Bryan O'Sullivan <bos@serpentine.com>
3 # Copyright 2007 Bryan O'Sullivan <bos@serpentine.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2, incorporated herein by reference.
6 # GNU General Public License version 2, incorporated herein by reference.
7
7
8 '''commands to interactively select changes for commit/qrefresh'''
8 '''commands to interactively select changes for commit/qrefresh'''
9
9
10 from mercurial.i18n import gettext, _
10 from mercurial.i18n import gettext, _
11 from mercurial import cmdutil, commands, extensions, hg, mdiff, patch
11 from mercurial import cmdutil, commands, extensions, hg, mdiff, patch
12 from mercurial import util
12 from mercurial import util
13 import copy, cStringIO, errno, operator, os, re, tempfile
13 import copy, cStringIO, errno, operator, os, re, tempfile
14
14
15 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
15 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
16
16
17 def scanpatch(fp):
17 def scanpatch(fp):
18 """like patch.iterhunks, but yield different events
18 """like patch.iterhunks, but yield different events
19
19
20 - ('file', [header_lines + fromfile + tofile])
20 - ('file', [header_lines + fromfile + tofile])
21 - ('context', [context_lines])
21 - ('context', [context_lines])
22 - ('hunk', [hunk_lines])
22 - ('hunk', [hunk_lines])
23 - ('range', (-start,len, +start,len, diffp))
23 - ('range', (-start,len, +start,len, diffp))
24 """
24 """
25 lr = patch.linereader(fp)
25 lr = patch.linereader(fp)
26
26
27 def scanwhile(first, p):
27 def scanwhile(first, p):
28 """scan lr while predicate holds"""
28 """scan lr while predicate holds"""
29 lines = [first]
29 lines = [first]
30 while True:
30 while True:
31 line = lr.readline()
31 line = lr.readline()
32 if not line:
32 if not line:
33 break
33 break
34 if p(line):
34 if p(line):
35 lines.append(line)
35 lines.append(line)
36 else:
36 else:
37 lr.push(line)
37 lr.push(line)
38 break
38 break
39 return lines
39 return lines
40
40
41 while True:
41 while True:
42 line = lr.readline()
42 line = lr.readline()
43 if not line:
43 if not line:
44 break
44 break
45 if line.startswith('diff --git a/'):
45 if line.startswith('diff --git a/'):
46 def notheader(line):
46 def notheader(line):
47 s = line.split(None, 1)
47 s = line.split(None, 1)
48 return not s or s[0] not in ('---', 'diff')
48 return not s or s[0] not in ('---', 'diff')
49 header = scanwhile(line, notheader)
49 header = scanwhile(line, notheader)
50 fromfile = lr.readline()
50 fromfile = lr.readline()
51 if fromfile.startswith('---'):
51 if fromfile.startswith('---'):
52 tofile = lr.readline()
52 tofile = lr.readline()
53 header += [fromfile, tofile]
53 header += [fromfile, tofile]
54 else:
54 else:
55 lr.push(fromfile)
55 lr.push(fromfile)
56 yield 'file', header
56 yield 'file', header
57 elif line[0] == ' ':
57 elif line[0] == ' ':
58 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
58 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
59 elif line[0] in '-+':
59 elif line[0] in '-+':
60 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
60 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
61 else:
61 else:
62 m = lines_re.match(line)
62 m = lines_re.match(line)
63 if m:
63 if m:
64 yield 'range', m.groups()
64 yield 'range', m.groups()
65 else:
65 else:
66 raise patch.PatchError('unknown patch content: %r' % line)
66 raise patch.PatchError('unknown patch content: %r' % line)
67
67
68 class header(object):
68 class header(object):
69 """patch header
69 """patch header
70
70
71 XXX shoudn't we move this to mercurial/patch.py ?
71 XXX shoudn't we move this to mercurial/patch.py ?
72 """
72 """
73 diff_re = re.compile('diff --git a/(.*) b/(.*)$')
73 diff_re = re.compile('diff --git a/(.*) b/(.*)$')
74 allhunks_re = re.compile('(?:index|new file|deleted file) ')
74 allhunks_re = re.compile('(?:index|new file|deleted file) ')
75 pretty_re = re.compile('(?:new file|deleted file) ')
75 pretty_re = re.compile('(?:new file|deleted file) ')
76 special_re = re.compile('(?:index|new|deleted|copy|rename) ')
76 special_re = re.compile('(?:index|new|deleted|copy|rename) ')
77
77
78 def __init__(self, header):
78 def __init__(self, header):
79 self.header = header
79 self.header = header
80 self.hunks = []
80 self.hunks = []
81
81
82 def binary(self):
82 def binary(self):
83 for h in self.header:
83 for h in self.header:
84 if h.startswith('index '):
84 if h.startswith('index '):
85 return True
85 return True
86
86
87 def pretty(self, fp):
87 def pretty(self, fp):
88 for h in self.header:
88 for h in self.header:
89 if h.startswith('index '):
89 if h.startswith('index '):
90 fp.write(_('this modifies a binary file (all or nothing)\n'))
90 fp.write(_('this modifies a binary file (all or nothing)\n'))
91 break
91 break
92 if self.pretty_re.match(h):
92 if self.pretty_re.match(h):
93 fp.write(h)
93 fp.write(h)
94 if self.binary():
94 if self.binary():
95 fp.write(_('this is a binary file\n'))
95 fp.write(_('this is a binary file\n'))
96 break
96 break
97 if h.startswith('---'):
97 if h.startswith('---'):
98 fp.write(_('%d hunks, %d lines changed\n') %
98 fp.write(_('%d hunks, %d lines changed\n') %
99 (len(self.hunks),
99 (len(self.hunks),
100 sum([h.added + h.removed for h in self.hunks])))
100 sum([h.added + h.removed for h in self.hunks])))
101 break
101 break
102 fp.write(h)
102 fp.write(h)
103
103
104 def write(self, fp):
104 def write(self, fp):
105 fp.write(''.join(self.header))
105 fp.write(''.join(self.header))
106
106
107 def allhunks(self):
107 def allhunks(self):
108 for h in self.header:
108 for h in self.header:
109 if self.allhunks_re.match(h):
109 if self.allhunks_re.match(h):
110 return True
110 return True
111
111
112 def files(self):
112 def files(self):
113 fromfile, tofile = self.diff_re.match(self.header[0]).groups()
113 fromfile, tofile = self.diff_re.match(self.header[0]).groups()
114 if fromfile == tofile:
114 if fromfile == tofile:
115 return [fromfile]
115 return [fromfile]
116 return [fromfile, tofile]
116 return [fromfile, tofile]
117
117
118 def filename(self):
118 def filename(self):
119 return self.files()[-1]
119 return self.files()[-1]
120
120
121 def __repr__(self):
121 def __repr__(self):
122 return '<header %s>' % (' '.join(map(repr, self.files())))
122 return '<header %s>' % (' '.join(map(repr, self.files())))
123
123
124 def special(self):
124 def special(self):
125 for h in self.header:
125 for h in self.header:
126 if self.special_re.match(h):
126 if self.special_re.match(h):
127 return True
127 return True
128
128
129 def countchanges(hunk):
129 def countchanges(hunk):
130 """hunk -> (n+,n-)"""
130 """hunk -> (n+,n-)"""
131 add = len([h for h in hunk if h[0] == '+'])
131 add = len([h for h in hunk if h[0] == '+'])
132 rem = len([h for h in hunk if h[0] == '-'])
132 rem = len([h for h in hunk if h[0] == '-'])
133 return add, rem
133 return add, rem
134
134
135 class hunk(object):
135 class hunk(object):
136 """patch hunk
136 """patch hunk
137
137
138 XXX shouldn't we merge this with patch.hunk ?
138 XXX shouldn't we merge this with patch.hunk ?
139 """
139 """
140 maxcontext = 3
140 maxcontext = 3
141
141
142 def __init__(self, header, fromline, toline, proc, before, hunk, after):
142 def __init__(self, header, fromline, toline, proc, before, hunk, after):
143 def trimcontext(number, lines):
143 def trimcontext(number, lines):
144 delta = len(lines) - self.maxcontext
144 delta = len(lines) - self.maxcontext
145 if False and delta > 0:
145 if False and delta > 0:
146 return number + delta, lines[:self.maxcontext]
146 return number + delta, lines[:self.maxcontext]
147 return number, lines
147 return number, lines
148
148
149 self.header = header
149 self.header = header
150 self.fromline, self.before = trimcontext(fromline, before)
150 self.fromline, self.before = trimcontext(fromline, before)
151 self.toline, self.after = trimcontext(toline, after)
151 self.toline, self.after = trimcontext(toline, after)
152 self.proc = proc
152 self.proc = proc
153 self.hunk = hunk
153 self.hunk = hunk
154 self.added, self.removed = countchanges(self.hunk)
154 self.added, self.removed = countchanges(self.hunk)
155
155
156 def write(self, fp):
156 def write(self, fp):
157 delta = len(self.before) + len(self.after)
157 delta = len(self.before) + len(self.after)
158 if self.after and self.after[-1] == '\\ No newline at end of file\n':
158 if self.after and self.after[-1] == '\\ No newline at end of file\n':
159 delta -= 1
159 delta -= 1
160 fromlen = delta + self.removed
160 fromlen = delta + self.removed
161 tolen = delta + self.added
161 tolen = delta + self.added
162 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
162 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
163 (self.fromline, fromlen, self.toline, tolen,
163 (self.fromline, fromlen, self.toline, tolen,
164 self.proc and (' ' + self.proc)))
164 self.proc and (' ' + self.proc)))
165 fp.write(''.join(self.before + self.hunk + self.after))
165 fp.write(''.join(self.before + self.hunk + self.after))
166
166
167 pretty = write
167 pretty = write
168
168
169 def filename(self):
169 def filename(self):
170 return self.header.filename()
170 return self.header.filename()
171
171
172 def __repr__(self):
172 def __repr__(self):
173 return '<hunk %r@%d>' % (self.filename(), self.fromline)
173 return '<hunk %r@%d>' % (self.filename(), self.fromline)
174
174
175 def parsepatch(fp):
175 def parsepatch(fp):
176 """patch -> [] of hunks """
176 """patch -> [] of hunks """
177 class parser(object):
177 class parser(object):
178 """patch parsing state machine"""
178 """patch parsing state machine"""
179 def __init__(self):
179 def __init__(self):
180 self.fromline = 0
180 self.fromline = 0
181 self.toline = 0
181 self.toline = 0
182 self.proc = ''
182 self.proc = ''
183 self.header = None
183 self.header = None
184 self.context = []
184 self.context = []
185 self.before = []
185 self.before = []
186 self.hunk = []
186 self.hunk = []
187 self.stream = []
187 self.stream = []
188
188
189 def addrange(self, (fromstart, fromend, tostart, toend, proc)):
189 def addrange(self, (fromstart, fromend, tostart, toend, proc)):
190 self.fromline = int(fromstart)
190 self.fromline = int(fromstart)
191 self.toline = int(tostart)
191 self.toline = int(tostart)
192 self.proc = proc
192 self.proc = proc
193
193
194 def addcontext(self, context):
194 def addcontext(self, context):
195 if self.hunk:
195 if self.hunk:
196 h = hunk(self.header, self.fromline, self.toline, self.proc,
196 h = hunk(self.header, self.fromline, self.toline, self.proc,
197 self.before, self.hunk, context)
197 self.before, self.hunk, context)
198 self.header.hunks.append(h)
198 self.header.hunks.append(h)
199 self.stream.append(h)
199 self.stream.append(h)
200 self.fromline += len(self.before) + h.removed
200 self.fromline += len(self.before) + h.removed
201 self.toline += len(self.before) + h.added
201 self.toline += len(self.before) + h.added
202 self.before = []
202 self.before = []
203 self.hunk = []
203 self.hunk = []
204 self.proc = ''
204 self.proc = ''
205 self.context = context
205 self.context = context
206
206
207 def addhunk(self, hunk):
207 def addhunk(self, hunk):
208 if self.context:
208 if self.context:
209 self.before = self.context
209 self.before = self.context
210 self.context = []
210 self.context = []
211 self.hunk = hunk
211 self.hunk = hunk
212
212
213 def newfile(self, hdr):
213 def newfile(self, hdr):
214 self.addcontext([])
214 self.addcontext([])
215 h = header(hdr)
215 h = header(hdr)
216 self.stream.append(h)
216 self.stream.append(h)
217 self.header = h
217 self.header = h
218
218
219 def finished(self):
219 def finished(self):
220 self.addcontext([])
220 self.addcontext([])
221 return self.stream
221 return self.stream
222
222
223 transitions = {
223 transitions = {
224 'file': {'context': addcontext,
224 'file': {'context': addcontext,
225 'file': newfile,
225 'file': newfile,
226 'hunk': addhunk,
226 'hunk': addhunk,
227 'range': addrange},
227 'range': addrange},
228 'context': {'file': newfile,
228 'context': {'file': newfile,
229 'hunk': addhunk,
229 'hunk': addhunk,
230 'range': addrange},
230 'range': addrange},
231 'hunk': {'context': addcontext,
231 'hunk': {'context': addcontext,
232 'file': newfile,
232 'file': newfile,
233 'range': addrange},
233 'range': addrange},
234 'range': {'context': addcontext,
234 'range': {'context': addcontext,
235 'hunk': addhunk},
235 'hunk': addhunk},
236 }
236 }
237
237
238 p = parser()
238 p = parser()
239
239
240 state = 'context'
240 state = 'context'
241 for newstate, data in scanpatch(fp):
241 for newstate, data in scanpatch(fp):
242 try:
242 try:
243 p.transitions[state][newstate](p, data)
243 p.transitions[state][newstate](p, data)
244 except KeyError:
244 except KeyError:
245 raise patch.PatchError('unhandled transition: %s -> %s' %
245 raise patch.PatchError('unhandled transition: %s -> %s' %
246 (state, newstate))
246 (state, newstate))
247 state = newstate
247 state = newstate
248 return p.finished()
248 return p.finished()
249
249
250 def filterpatch(ui, chunks):
250 def filterpatch(ui, chunks):
251 """Interactively filter patch chunks into applied-only chunks"""
251 """Interactively filter patch chunks into applied-only chunks"""
252 chunks = list(chunks)
252 chunks = list(chunks)
253 chunks.reverse()
253 chunks.reverse()
254 seen = set()
254 seen = set()
255 def consumefile():
255 def consumefile():
256 """fetch next portion from chunks until a 'header' is seen
256 """fetch next portion from chunks until a 'header' is seen
257 NB: header == new-file mark
257 NB: header == new-file mark
258 """
258 """
259 consumed = []
259 consumed = []
260 while chunks:
260 while chunks:
261 if isinstance(chunks[-1], header):
261 if isinstance(chunks[-1], header):
262 break
262 break
263 else:
263 else:
264 consumed.append(chunks.pop())
264 consumed.append(chunks.pop())
265 return consumed
265 return consumed
266
266
267 resp_all = [None] # this two are changed from inside prompt,
267 resp_all = [None] # this two are changed from inside prompt,
268 resp_file = [None] # so can't be usual variables
268 resp_file = [None] # so can't be usual variables
269 applied = {} # 'filename' -> [] of chunks
269 applied = {} # 'filename' -> [] of chunks
270 def prompt(query):
270 def prompt(query):
271 """prompt query, and process base inputs
271 """prompt query, and process base inputs
272
272
273 - y/n for the rest of file
273 - y/n for the rest of file
274 - y/n for the rest
274 - y/n for the rest
275 - ? (help)
275 - ? (help)
276 - q (quit)
276 - q (quit)
277
277
278 else, input is returned to the caller.
278 else, input is returned to the caller.
279 """
279 """
280 if resp_all[0] is not None:
280 if resp_all[0] is not None:
281 return resp_all[0]
281 return resp_all[0]
282 if resp_file[0] is not None:
282 if resp_file[0] is not None:
283 return resp_file[0]
283 return resp_file[0]
284 while True:
284 while True:
285 resps = _('[Ynsfdaq?]')
285 resps = _('[Ynsfdaq?]')
286 choices = (_('&Yes, record this change'),
286 choices = (_('&Yes, record this change'),
287 _('&No, skip this change'),
287 _('&No, skip this change'),
288 _('&Skip remaining changes to this file'),
288 _('&Skip remaining changes to this file'),
289 _('Record remaining changes to this &file'),
289 _('Record remaining changes to this &file'),
290 _('&Done, skip remaining changes and files'),
290 _('&Done, skip remaining changes and files'),
291 _('Record &all changes to all remaining files'),
291 _('Record &all changes to all remaining files'),
292 _('&Quit, recording no changes'),
292 _('&Quit, recording no changes'),
293 _('&?'))
293 _('&?'))
294 r = ui.promptchoice("%s %s " % (query, resps), choices)
294 r = ui.promptchoice("%s %s " % (query, resps), choices)
295 if r == 7: # ?
295 if r == 7: # ?
296 doc = gettext(record.__doc__)
296 doc = gettext(record.__doc__)
297 c = doc.find(_('y - record this change'))
297 c = doc.find(_('y - record this change'))
298 for l in doc[c:].splitlines():
298 for l in doc[c:].splitlines():
299 if l: ui.write(l.strip(), '\n')
299 if l: ui.write(l.strip(), '\n')
300 continue
300 continue
301 elif r == 0: # yes
301 elif r == 0: # yes
302 ret = 'y'
302 ret = 'y'
303 elif r == 1: # no
303 elif r == 1: # no
304 ret = 'n'
304 ret = 'n'
305 elif r == 2: # Skip
305 elif r == 2: # Skip
306 ret = resp_file[0] = 'n'
306 ret = resp_file[0] = 'n'
307 elif r == 3: # file (Record remaining)
307 elif r == 3: # file (Record remaining)
308 ret = resp_file[0] = 'y'
308 ret = resp_file[0] = 'y'
309 elif r == 4: # done, skip remaining
309 elif r == 4: # done, skip remaining
310 ret = resp_all[0] = 'n'
310 ret = resp_all[0] = 'n'
311 elif r == 5: # all
311 elif r == 5: # all
312 ret = resp_all[0] = 'y'
312 ret = resp_all[0] = 'y'
313 elif r == 6: # quit
313 elif r == 6: # quit
314 raise util.Abort(_('user quit'))
314 raise util.Abort(_('user quit'))
315 return ret
315 return ret
316 pos, total = 0, len(chunks) - 1
316 pos, total = 0, len(chunks) - 1
317 while chunks:
317 while chunks:
318 chunk = chunks.pop()
318 chunk = chunks.pop()
319 if isinstance(chunk, header):
319 if isinstance(chunk, header):
320 # new-file mark
320 # new-file mark
321 resp_file = [None]
321 resp_file = [None]
322 fixoffset = 0
322 fixoffset = 0
323 hdr = ''.join(chunk.header)
323 hdr = ''.join(chunk.header)
324 if hdr in seen:
324 if hdr in seen:
325 consumefile()
325 consumefile()
326 continue
326 continue
327 seen.add(hdr)
327 seen.add(hdr)
328 if resp_all[0] is None:
328 if resp_all[0] is None:
329 chunk.pretty(ui)
329 chunk.pretty(ui)
330 r = prompt(_('examine changes to %s?') %
330 r = prompt(_('examine changes to %s?') %
331 _(' and ').join(map(repr, chunk.files())))
331 _(' and ').join(map(repr, chunk.files())))
332 if r == _('y'):
332 if r == _('y'):
333 applied[chunk.filename()] = [chunk]
333 applied[chunk.filename()] = [chunk]
334 if chunk.allhunks():
334 if chunk.allhunks():
335 applied[chunk.filename()] += consumefile()
335 applied[chunk.filename()] += consumefile()
336 else:
336 else:
337 consumefile()
337 consumefile()
338 else:
338 else:
339 # new hunk
339 # new hunk
340 if resp_file[0] is None and resp_all[0] is None:
340 if resp_file[0] is None and resp_all[0] is None:
341 chunk.pretty(ui)
341 chunk.pretty(ui)
342 r = total == 1 and prompt(_('record this change to %r?') %
342 r = total == 1 and prompt(_('record this change to %r?') %
343 chunk.filename()) \
343 chunk.filename()) \
344 or prompt(_('record change %d/%d to %r?') %
344 or prompt(_('record change %d/%d to %r?') %
345 (pos, total, chunk.filename()))
345 (pos, total, chunk.filename()))
346 if r == _('y'):
346 if r == _('y'):
347 if fixoffset:
347 if fixoffset:
348 chunk = copy.copy(chunk)
348 chunk = copy.copy(chunk)
349 chunk.toline += fixoffset
349 chunk.toline += fixoffset
350 applied[chunk.filename()].append(chunk)
350 applied[chunk.filename()].append(chunk)
351 else:
351 else:
352 fixoffset += chunk.removed - chunk.added
352 fixoffset += chunk.removed - chunk.added
353 pos = pos + 1
353 pos = pos + 1
354 return reduce(operator.add, [h for h in applied.itervalues()
354 return reduce(operator.add, [h for h in applied.itervalues()
355 if h[0].special() or len(h) > 1], [])
355 if h[0].special() or len(h) > 1], [])
356
356
357 def record(ui, repo, *pats, **opts):
357 def record(ui, repo, *pats, **opts):
358 '''interactively select changes to commit
358 '''interactively select changes to commit
359
359
360 If a list of files is omitted, all changes reported by "hg status" will be
360 If a list of files is omitted, all changes reported by "hg status" will be
361 candidates for recording.
361 candidates for recording.
362
362
363 See 'hg help dates' for a list of formats valid for -d/--date.
363 See 'hg help dates' for a list of formats valid for -d/--date.
364
364
365 You will be prompted for whether to record changes to each modified file,
365 You will be prompted for whether to record changes to each modified file,
366 and for files with multiple changes, for each change to use. For each
366 and for files with multiple changes, for each change to use. For each
367 query, the following responses are possible:
367 query, the following responses are possible:
368
368
369 y - record this change
369 y - record this change
370 n - skip this change
370 n - skip this change
371
371
372 s - skip remaining changes to this file
372 s - skip remaining changes to this file
373 f - record remaining changes to this file
373 f - record remaining changes to this file
374
374
375 d - done, skip remaining changes and files
375 d - done, skip remaining changes and files
376 a - record all changes to all remaining files
376 a - record all changes to all remaining files
377 q - quit, recording no changes
377 q - quit, recording no changes
378
378
379 ? - display help'''
379 ? - display help'''
380
380
381 def record_committer(ui, repo, pats, opts):
381 def record_committer(ui, repo, pats, opts):
382 commands.commit(ui, repo, *pats, **opts)
382 commands.commit(ui, repo, *pats, **opts)
383
383
384 dorecord(ui, repo, record_committer, *pats, **opts)
384 dorecord(ui, repo, record_committer, *pats, **opts)
385
385
386
386
387 def qrecord(ui, repo, patch, *pats, **opts):
387 def qrecord(ui, repo, patch, *pats, **opts):
388 '''interactively record a new patch
388 '''interactively record a new patch
389
389
390 See 'hg help qnew' & 'hg help record' for more information and usage.
390 See 'hg help qnew' & 'hg help record' for more information and usage.
391 '''
391 '''
392
392
393 try:
393 try:
394 mq = extensions.find('mq')
394 mq = extensions.find('mq')
395 except KeyError:
395 except KeyError:
396 raise util.Abort(_("'mq' extension not loaded"))
396 raise util.Abort(_("'mq' extension not loaded"))
397
397
398 def qrecord_committer(ui, repo, pats, opts):
398 def qrecord_committer(ui, repo, pats, opts):
399 mq.new(ui, repo, patch, *pats, **opts)
399 mq.new(ui, repo, patch, *pats, **opts)
400
400
401 opts = opts.copy()
401 opts = opts.copy()
402 opts['force'] = True # always 'qnew -f'
402 opts['force'] = True # always 'qnew -f'
403 dorecord(ui, repo, qrecord_committer, *pats, **opts)
403 dorecord(ui, repo, qrecord_committer, *pats, **opts)
404
404
405
405
406 def dorecord(ui, repo, committer, *pats, **opts):
406 def dorecord(ui, repo, committer, *pats, **opts):
407 if not ui.interactive():
407 if not ui.interactive():
408 raise util.Abort(_('running non-interactively, use commit instead'))
408 raise util.Abort(_('running non-interactively, use commit instead'))
409
409
410 def recordfunc(ui, repo, message, match, opts):
410 def recordfunc(ui, repo, message, match, opts):
411 """This is generic record driver.
411 """This is generic record driver.
412
412
413 It's job is to interactively filter local changes, and accordingly
413 Its job is to interactively filter local changes, and accordingly
414 prepare working dir into a state, where the job can be delegated to
414 prepare working dir into a state, where the job can be delegated to
415 non-interactive commit command such as 'commit' or 'qrefresh'.
415 non-interactive commit command such as 'commit' or 'qrefresh'.
416
416
417 After the actual job is done by non-interactive command, working dir
417 After the actual job is done by non-interactive command, working dir
418 state is restored to original.
418 state is restored to original.
419
419
420 In the end we'll record intresting changes, and everything else will be
420 In the end we'll record intresting changes, and everything else will be
421 left in place, so the user can continue his work.
421 left in place, so the user can continue his work.
422 """
422 """
423
423
424 changes = repo.status(match=match)[:3]
424 changes = repo.status(match=match)[:3]
425 diffopts = mdiff.diffopts(git=True, nodates=True)
425 diffopts = mdiff.diffopts(git=True, nodates=True)
426 chunks = patch.diff(repo, changes=changes, opts=diffopts)
426 chunks = patch.diff(repo, changes=changes, opts=diffopts)
427 fp = cStringIO.StringIO()
427 fp = cStringIO.StringIO()
428 fp.write(''.join(chunks))
428 fp.write(''.join(chunks))
429 fp.seek(0)
429 fp.seek(0)
430
430
431 # 1. filter patch, so we have intending-to apply subset of it
431 # 1. filter patch, so we have intending-to apply subset of it
432 chunks = filterpatch(ui, parsepatch(fp))
432 chunks = filterpatch(ui, parsepatch(fp))
433 del fp
433 del fp
434
434
435 contenders = set()
435 contenders = set()
436 for h in chunks:
436 for h in chunks:
437 try: contenders.update(set(h.files()))
437 try: contenders.update(set(h.files()))
438 except AttributeError: pass
438 except AttributeError: pass
439
439
440 changed = changes[0] + changes[1] + changes[2]
440 changed = changes[0] + changes[1] + changes[2]
441 newfiles = [f for f in changed if f in contenders]
441 newfiles = [f for f in changed if f in contenders]
442 if not newfiles:
442 if not newfiles:
443 ui.status(_('no changes to record\n'))
443 ui.status(_('no changes to record\n'))
444 return 0
444 return 0
445
445
446 modified = set(changes[0])
446 modified = set(changes[0])
447
447
448 # 2. backup changed files, so we can restore them in the end
448 # 2. backup changed files, so we can restore them in the end
449 backups = {}
449 backups = {}
450 backupdir = repo.join('record-backups')
450 backupdir = repo.join('record-backups')
451 try:
451 try:
452 os.mkdir(backupdir)
452 os.mkdir(backupdir)
453 except OSError, err:
453 except OSError, err:
454 if err.errno != errno.EEXIST:
454 if err.errno != errno.EEXIST:
455 raise
455 raise
456 try:
456 try:
457 # backup continues
457 # backup continues
458 for f in newfiles:
458 for f in newfiles:
459 if f not in modified:
459 if f not in modified:
460 continue
460 continue
461 fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
461 fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
462 dir=backupdir)
462 dir=backupdir)
463 os.close(fd)
463 os.close(fd)
464 ui.debug(_('backup %r as %r\n') % (f, tmpname))
464 ui.debug(_('backup %r as %r\n') % (f, tmpname))
465 util.copyfile(repo.wjoin(f), tmpname)
465 util.copyfile(repo.wjoin(f), tmpname)
466 backups[f] = tmpname
466 backups[f] = tmpname
467
467
468 fp = cStringIO.StringIO()
468 fp = cStringIO.StringIO()
469 for c in chunks:
469 for c in chunks:
470 if c.filename() in backups:
470 if c.filename() in backups:
471 c.write(fp)
471 c.write(fp)
472 dopatch = fp.tell()
472 dopatch = fp.tell()
473 fp.seek(0)
473 fp.seek(0)
474
474
475 # 3a. apply filtered patch to clean repo (clean)
475 # 3a. apply filtered patch to clean repo (clean)
476 if backups:
476 if backups:
477 hg.revert(repo, repo.dirstate.parents()[0], backups.has_key)
477 hg.revert(repo, repo.dirstate.parents()[0], backups.has_key)
478
478
479 # 3b. (apply)
479 # 3b. (apply)
480 if dopatch:
480 if dopatch:
481 try:
481 try:
482 ui.debug(_('applying patch\n'))
482 ui.debug(_('applying patch\n'))
483 ui.debug(fp.getvalue())
483 ui.debug(fp.getvalue())
484 pfiles = {}
484 pfiles = {}
485 patch.internalpatch(fp, ui, 1, repo.root, files=pfiles,
485 patch.internalpatch(fp, ui, 1, repo.root, files=pfiles,
486 eolmode=None)
486 eolmode=None)
487 patch.updatedir(ui, repo, pfiles)
487 patch.updatedir(ui, repo, pfiles)
488 except patch.PatchError, err:
488 except patch.PatchError, err:
489 s = str(err)
489 s = str(err)
490 if s:
490 if s:
491 raise util.Abort(s)
491 raise util.Abort(s)
492 else:
492 else:
493 raise util.Abort(_('patch failed to apply'))
493 raise util.Abort(_('patch failed to apply'))
494 del fp
494 del fp
495
495
496 # 4. We prepared working directory according to filtered patch.
496 # 4. We prepared working directory according to filtered patch.
497 # Now is the time to delegate the job to commit/qrefresh or the like!
497 # Now is the time to delegate the job to commit/qrefresh or the like!
498
498
499 # it is important to first chdir to repo root -- we'll call a
499 # it is important to first chdir to repo root -- we'll call a
500 # highlevel command with list of pathnames relative to repo root
500 # highlevel command with list of pathnames relative to repo root
501 cwd = os.getcwd()
501 cwd = os.getcwd()
502 os.chdir(repo.root)
502 os.chdir(repo.root)
503 try:
503 try:
504 committer(ui, repo, newfiles, opts)
504 committer(ui, repo, newfiles, opts)
505 finally:
505 finally:
506 os.chdir(cwd)
506 os.chdir(cwd)
507
507
508 return 0
508 return 0
509 finally:
509 finally:
510 # 5. finally restore backed-up files
510 # 5. finally restore backed-up files
511 try:
511 try:
512 for realname, tmpname in backups.iteritems():
512 for realname, tmpname in backups.iteritems():
513 ui.debug(_('restoring %r to %r\n') % (tmpname, realname))
513 ui.debug(_('restoring %r to %r\n') % (tmpname, realname))
514 util.copyfile(tmpname, repo.wjoin(realname))
514 util.copyfile(tmpname, repo.wjoin(realname))
515 os.unlink(tmpname)
515 os.unlink(tmpname)
516 os.rmdir(backupdir)
516 os.rmdir(backupdir)
517 except OSError:
517 except OSError:
518 pass
518 pass
519 return cmdutil.commit(ui, repo, recordfunc, pats, opts)
519 return cmdutil.commit(ui, repo, recordfunc, pats, opts)
520
520
521 cmdtable = {
521 cmdtable = {
522 "record":
522 "record":
523 (record,
523 (record,
524
524
525 # add commit options
525 # add commit options
526 commands.table['^commit|ci'][1],
526 commands.table['^commit|ci'][1],
527
527
528 _('hg record [OPTION]... [FILE]...')),
528 _('hg record [OPTION]... [FILE]...')),
529 }
529 }
530
530
531
531
532 def extsetup():
532 def extsetup():
533 try:
533 try:
534 mq = extensions.find('mq')
534 mq = extensions.find('mq')
535 except KeyError:
535 except KeyError:
536 return
536 return
537
537
538 qcmdtable = {
538 qcmdtable = {
539 "qrecord":
539 "qrecord":
540 (qrecord,
540 (qrecord,
541
541
542 # add qnew options, except '--force'
542 # add qnew options, except '--force'
543 [opt for opt in mq.cmdtable['qnew'][1] if opt[1] != 'force'],
543 [opt for opt in mq.cmdtable['qnew'][1] if opt[1] != 'force'],
544
544
545 _('hg qrecord [OPTION]... PATCH [FILE]...')),
545 _('hg qrecord [OPTION]... PATCH [FILE]...')),
546 }
546 }
547
547
548 cmdtable.update(qcmdtable)
548 cmdtable.update(qcmdtable)
549
549
@@ -1,140 +1,140
1 # changegroup.py - Mercurial changegroup manipulation functions
1 # changegroup.py - Mercurial changegroup manipulation functions
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2, incorporated herein by reference.
6 # GNU General Public License version 2, incorporated herein by reference.
7
7
8 from i18n import _
8 from i18n import _
9 import util
9 import util
10 import struct, os, bz2, zlib, tempfile
10 import struct, os, bz2, zlib, tempfile
11
11
12 def getchunk(source):
12 def getchunk(source):
13 """get a chunk from a changegroup"""
13 """get a chunk from a changegroup"""
14 d = source.read(4)
14 d = source.read(4)
15 if not d:
15 if not d:
16 return ""
16 return ""
17 l = struct.unpack(">l", d)[0]
17 l = struct.unpack(">l", d)[0]
18 if l <= 4:
18 if l <= 4:
19 return ""
19 return ""
20 d = source.read(l - 4)
20 d = source.read(l - 4)
21 if len(d) < l - 4:
21 if len(d) < l - 4:
22 raise util.Abort(_("premature EOF reading chunk"
22 raise util.Abort(_("premature EOF reading chunk"
23 " (got %d bytes, expected %d)")
23 " (got %d bytes, expected %d)")
24 % (len(d), l - 4))
24 % (len(d), l - 4))
25 return d
25 return d
26
26
27 def chunkiter(source):
27 def chunkiter(source):
28 """iterate through the chunks in source"""
28 """iterate through the chunks in source"""
29 while 1:
29 while 1:
30 c = getchunk(source)
30 c = getchunk(source)
31 if not c:
31 if not c:
32 break
32 break
33 yield c
33 yield c
34
34
35 def chunkheader(length):
35 def chunkheader(length):
36 """build a changegroup chunk header"""
36 """build a changegroup chunk header"""
37 return struct.pack(">l", length + 4)
37 return struct.pack(">l", length + 4)
38
38
39 def closechunk():
39 def closechunk():
40 return struct.pack(">l", 0)
40 return struct.pack(">l", 0)
41
41
42 class nocompress(object):
42 class nocompress(object):
43 def compress(self, x):
43 def compress(self, x):
44 return x
44 return x
45 def flush(self):
45 def flush(self):
46 return ""
46 return ""
47
47
48 bundletypes = {
48 bundletypes = {
49 "": ("", nocompress),
49 "": ("", nocompress),
50 "HG10UN": ("HG10UN", nocompress),
50 "HG10UN": ("HG10UN", nocompress),
51 "HG10BZ": ("HG10", lambda: bz2.BZ2Compressor()),
51 "HG10BZ": ("HG10", lambda: bz2.BZ2Compressor()),
52 "HG10GZ": ("HG10GZ", lambda: zlib.compressobj()),
52 "HG10GZ": ("HG10GZ", lambda: zlib.compressobj()),
53 }
53 }
54
54
55 # hgweb uses this list to communicate it's preferred type
55 # hgweb uses this list to communicate its preferred type
56 bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
56 bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
57
57
58 def writebundle(cg, filename, bundletype):
58 def writebundle(cg, filename, bundletype):
59 """Write a bundle file and return its filename.
59 """Write a bundle file and return its filename.
60
60
61 Existing files will not be overwritten.
61 Existing files will not be overwritten.
62 If no filename is specified, a temporary file is created.
62 If no filename is specified, a temporary file is created.
63 bz2 compression can be turned off.
63 bz2 compression can be turned off.
64 The bundle file will be deleted in case of errors.
64 The bundle file will be deleted in case of errors.
65 """
65 """
66
66
67 fh = None
67 fh = None
68 cleanup = None
68 cleanup = None
69 try:
69 try:
70 if filename:
70 if filename:
71 fh = open(filename, "wb")
71 fh = open(filename, "wb")
72 else:
72 else:
73 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
73 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
74 fh = os.fdopen(fd, "wb")
74 fh = os.fdopen(fd, "wb")
75 cleanup = filename
75 cleanup = filename
76
76
77 header, compressor = bundletypes[bundletype]
77 header, compressor = bundletypes[bundletype]
78 fh.write(header)
78 fh.write(header)
79 z = compressor()
79 z = compressor()
80
80
81 # parse the changegroup data, otherwise we will block
81 # parse the changegroup data, otherwise we will block
82 # in case of sshrepo because we don't know the end of the stream
82 # in case of sshrepo because we don't know the end of the stream
83
83
84 # an empty chunkiter is the end of the changegroup
84 # an empty chunkiter is the end of the changegroup
85 # a changegroup has at least 2 chunkiters (changelog and manifest).
85 # a changegroup has at least 2 chunkiters (changelog and manifest).
86 # after that, an empty chunkiter is the end of the changegroup
86 # after that, an empty chunkiter is the end of the changegroup
87 empty = False
87 empty = False
88 count = 0
88 count = 0
89 while not empty or count <= 2:
89 while not empty or count <= 2:
90 empty = True
90 empty = True
91 count += 1
91 count += 1
92 for chunk in chunkiter(cg):
92 for chunk in chunkiter(cg):
93 empty = False
93 empty = False
94 fh.write(z.compress(chunkheader(len(chunk))))
94 fh.write(z.compress(chunkheader(len(chunk))))
95 pos = 0
95 pos = 0
96 while pos < len(chunk):
96 while pos < len(chunk):
97 next = pos + 2**20
97 next = pos + 2**20
98 fh.write(z.compress(chunk[pos:next]))
98 fh.write(z.compress(chunk[pos:next]))
99 pos = next
99 pos = next
100 fh.write(z.compress(closechunk()))
100 fh.write(z.compress(closechunk()))
101 fh.write(z.flush())
101 fh.write(z.flush())
102 cleanup = None
102 cleanup = None
103 return filename
103 return filename
104 finally:
104 finally:
105 if fh is not None:
105 if fh is not None:
106 fh.close()
106 fh.close()
107 if cleanup is not None:
107 if cleanup is not None:
108 os.unlink(cleanup)
108 os.unlink(cleanup)
109
109
110 def unbundle(header, fh):
110 def unbundle(header, fh):
111 if header == 'HG10UN':
111 if header == 'HG10UN':
112 return fh
112 return fh
113 elif not header.startswith('HG'):
113 elif not header.startswith('HG'):
114 # old client with uncompressed bundle
114 # old client with uncompressed bundle
115 def generator(f):
115 def generator(f):
116 yield header
116 yield header
117 for chunk in f:
117 for chunk in f:
118 yield chunk
118 yield chunk
119 elif header == 'HG10GZ':
119 elif header == 'HG10GZ':
120 def generator(f):
120 def generator(f):
121 zd = zlib.decompressobj()
121 zd = zlib.decompressobj()
122 for chunk in f:
122 for chunk in f:
123 yield zd.decompress(chunk)
123 yield zd.decompress(chunk)
124 elif header == 'HG10BZ':
124 elif header == 'HG10BZ':
125 def generator(f):
125 def generator(f):
126 zd = bz2.BZ2Decompressor()
126 zd = bz2.BZ2Decompressor()
127 zd.decompress("BZ")
127 zd.decompress("BZ")
128 for chunk in util.filechunkiter(f, 4096):
128 for chunk in util.filechunkiter(f, 4096):
129 yield zd.decompress(chunk)
129 yield zd.decompress(chunk)
130 return util.chunkbuffer(generator(fh))
130 return util.chunkbuffer(generator(fh))
131
131
132 def readbundle(fh, fname):
132 def readbundle(fh, fname):
133 header = fh.read(6)
133 header = fh.read(6)
134 if not header.startswith('HG'):
134 if not header.startswith('HG'):
135 raise util.Abort(_('%s: not a Mercurial bundle file') % fname)
135 raise util.Abort(_('%s: not a Mercurial bundle file') % fname)
136 if not header.startswith('HG10'):
136 if not header.startswith('HG10'):
137 raise util.Abort(_('%s: unknown bundle version') % fname)
137 raise util.Abort(_('%s: unknown bundle version') % fname)
138 elif header not in bundletypes:
138 elif header not in bundletypes:
139 raise util.Abort(_('%s: unknown bundle compression type') % fname)
139 raise util.Abort(_('%s: unknown bundle compression type') % fname)
140 return unbundle(header, fh)
140 return unbundle(header, fh)
General Comments 0
You need to be logged in to leave comments. Login now